Commit e3cf69511a2c5369c58f6fd6a065de152c3d4b22

Authored by Bjorn Helgaas
Committed by Dave Airlie
1 parent 55814b74c9

agp: use dev_printk when possible

Convert printks to use dev_printk().

Signed-off-by: Bjorn Helgaas <bjorn.helgaas@hp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Dave Airlie <airlied@redhat.com>

Showing 11 changed files with 141 additions and 134 deletions Inline Diff

drivers/char/agp/ali-agp.c
1 /* 1 /*
2 * ALi AGPGART routines. 2 * ALi AGPGART routines.
3 */ 3 */
4 4
5 #include <linux/types.h> 5 #include <linux/types.h>
6 #include <linux/module.h> 6 #include <linux/module.h>
7 #include <linux/pci.h> 7 #include <linux/pci.h>
8 #include <linux/init.h> 8 #include <linux/init.h>
9 #include <linux/agp_backend.h> 9 #include <linux/agp_backend.h>
10 #include <asm/page.h> /* PAGE_SIZE */ 10 #include <asm/page.h> /* PAGE_SIZE */
11 #include "agp.h" 11 #include "agp.h"
12 12
13 #define ALI_AGPCTRL 0xb8 13 #define ALI_AGPCTRL 0xb8
14 #define ALI_ATTBASE 0xbc 14 #define ALI_ATTBASE 0xbc
15 #define ALI_TLBCTRL 0xc0 15 #define ALI_TLBCTRL 0xc0
16 #define ALI_TAGCTRL 0xc4 16 #define ALI_TAGCTRL 0xc4
17 #define ALI_CACHE_FLUSH_CTRL 0xD0 17 #define ALI_CACHE_FLUSH_CTRL 0xD0
18 #define ALI_CACHE_FLUSH_ADDR_MASK 0xFFFFF000 18 #define ALI_CACHE_FLUSH_ADDR_MASK 0xFFFFF000
19 #define ALI_CACHE_FLUSH_EN 0x100 19 #define ALI_CACHE_FLUSH_EN 0x100
20 20
21 static int ali_fetch_size(void) 21 static int ali_fetch_size(void)
22 { 22 {
23 int i; 23 int i;
24 u32 temp; 24 u32 temp;
25 struct aper_size_info_32 *values; 25 struct aper_size_info_32 *values;
26 26
27 pci_read_config_dword(agp_bridge->dev, ALI_ATTBASE, &temp); 27 pci_read_config_dword(agp_bridge->dev, ALI_ATTBASE, &temp);
28 temp &= ~(0xfffffff0); 28 temp &= ~(0xfffffff0);
29 values = A_SIZE_32(agp_bridge->driver->aperture_sizes); 29 values = A_SIZE_32(agp_bridge->driver->aperture_sizes);
30 30
31 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { 31 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
32 if (temp == values[i].size_value) { 32 if (temp == values[i].size_value) {
33 agp_bridge->previous_size = 33 agp_bridge->previous_size =
34 agp_bridge->current_size = (void *) (values + i); 34 agp_bridge->current_size = (void *) (values + i);
35 agp_bridge->aperture_size_idx = i; 35 agp_bridge->aperture_size_idx = i;
36 return values[i].size; 36 return values[i].size;
37 } 37 }
38 } 38 }
39 39
40 return 0; 40 return 0;
41 } 41 }
42 42
43 static void ali_tlbflush(struct agp_memory *mem) 43 static void ali_tlbflush(struct agp_memory *mem)
44 { 44 {
45 u32 temp; 45 u32 temp;
46 46
47 pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp); 47 pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp);
48 temp &= 0xfffffff0; 48 temp &= 0xfffffff0;
49 temp |= (1<<0 | 1<<1); 49 temp |= (1<<0 | 1<<1);
50 pci_write_config_dword(agp_bridge->dev, ALI_TAGCTRL, temp); 50 pci_write_config_dword(agp_bridge->dev, ALI_TAGCTRL, temp);
51 } 51 }
52 52
53 static void ali_cleanup(void) 53 static void ali_cleanup(void)
54 { 54 {
55 struct aper_size_info_32 *previous_size; 55 struct aper_size_info_32 *previous_size;
56 u32 temp; 56 u32 temp;
57 57
58 previous_size = A_SIZE_32(agp_bridge->previous_size); 58 previous_size = A_SIZE_32(agp_bridge->previous_size);
59 59
60 pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp); 60 pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp);
61 // clear tag 61 // clear tag
62 pci_write_config_dword(agp_bridge->dev, ALI_TAGCTRL, 62 pci_write_config_dword(agp_bridge->dev, ALI_TAGCTRL,
63 ((temp & 0xffffff00) | 0x00000001|0x00000002)); 63 ((temp & 0xffffff00) | 0x00000001|0x00000002));
64 64
65 pci_read_config_dword(agp_bridge->dev, ALI_ATTBASE, &temp); 65 pci_read_config_dword(agp_bridge->dev, ALI_ATTBASE, &temp);
66 pci_write_config_dword(agp_bridge->dev, ALI_ATTBASE, 66 pci_write_config_dword(agp_bridge->dev, ALI_ATTBASE,
67 ((temp & 0x00000ff0) | previous_size->size_value)); 67 ((temp & 0x00000ff0) | previous_size->size_value));
68 } 68 }
69 69
70 static int ali_configure(void) 70 static int ali_configure(void)
71 { 71 {
72 u32 temp; 72 u32 temp;
73 struct aper_size_info_32 *current_size; 73 struct aper_size_info_32 *current_size;
74 74
75 current_size = A_SIZE_32(agp_bridge->current_size); 75 current_size = A_SIZE_32(agp_bridge->current_size);
76 76
77 /* aperture size and gatt addr */ 77 /* aperture size and gatt addr */
78 pci_read_config_dword(agp_bridge->dev, ALI_ATTBASE, &temp); 78 pci_read_config_dword(agp_bridge->dev, ALI_ATTBASE, &temp);
79 temp = (((temp & 0x00000ff0) | (agp_bridge->gatt_bus_addr & 0xfffff000)) 79 temp = (((temp & 0x00000ff0) | (agp_bridge->gatt_bus_addr & 0xfffff000))
80 | (current_size->size_value & 0xf)); 80 | (current_size->size_value & 0xf));
81 pci_write_config_dword(agp_bridge->dev, ALI_ATTBASE, temp); 81 pci_write_config_dword(agp_bridge->dev, ALI_ATTBASE, temp);
82 82
83 /* tlb control */ 83 /* tlb control */
84 pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp); 84 pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp);
85 pci_write_config_dword(agp_bridge->dev, ALI_TLBCTRL, ((temp & 0xffffff00) | 0x00000010)); 85 pci_write_config_dword(agp_bridge->dev, ALI_TLBCTRL, ((temp & 0xffffff00) | 0x00000010));
86 86
87 /* address to map to */ 87 /* address to map to */
88 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); 88 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
89 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); 89 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
90 90
91 #if 0 91 #if 0
92 if (agp_bridge->type == ALI_M1541) { 92 if (agp_bridge->type == ALI_M1541) {
93 u32 nlvm_addr = 0; 93 u32 nlvm_addr = 0;
94 94
95 switch (current_size->size_value) { 95 switch (current_size->size_value) {
96 case 0: break; 96 case 0: break;
97 case 1: nlvm_addr = 0x100000;break; 97 case 1: nlvm_addr = 0x100000;break;
98 case 2: nlvm_addr = 0x200000;break; 98 case 2: nlvm_addr = 0x200000;break;
99 case 3: nlvm_addr = 0x400000;break; 99 case 3: nlvm_addr = 0x400000;break;
100 case 4: nlvm_addr = 0x800000;break; 100 case 4: nlvm_addr = 0x800000;break;
101 case 6: nlvm_addr = 0x1000000;break; 101 case 6: nlvm_addr = 0x1000000;break;
102 case 7: nlvm_addr = 0x2000000;break; 102 case 7: nlvm_addr = 0x2000000;break;
103 case 8: nlvm_addr = 0x4000000;break; 103 case 8: nlvm_addr = 0x4000000;break;
104 case 9: nlvm_addr = 0x8000000;break; 104 case 9: nlvm_addr = 0x8000000;break;
105 case 10: nlvm_addr = 0x10000000;break; 105 case 10: nlvm_addr = 0x10000000;break;
106 default: break; 106 default: break;
107 } 107 }
108 nlvm_addr--; 108 nlvm_addr--;
109 nlvm_addr&=0xfff00000; 109 nlvm_addr&=0xfff00000;
110 110
111 nlvm_addr+= agp_bridge->gart_bus_addr; 111 nlvm_addr+= agp_bridge->gart_bus_addr;
112 nlvm_addr|=(agp_bridge->gart_bus_addr>>12); 112 nlvm_addr|=(agp_bridge->gart_bus_addr>>12);
113 printk(KERN_INFO PFX "nlvm top &base = %8x\n",nlvm_addr); 113 dev_info(&agp_bridge->dev->dev, "nlvm top &base = %8x\n",
114 nlvm_addr);
114 } 115 }
115 #endif 116 #endif
116 117
117 pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp); 118 pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp);
118 temp &= 0xffffff7f; //enable TLB 119 temp &= 0xffffff7f; //enable TLB
119 pci_write_config_dword(agp_bridge->dev, ALI_TLBCTRL, temp); 120 pci_write_config_dword(agp_bridge->dev, ALI_TLBCTRL, temp);
120 121
121 return 0; 122 return 0;
122 } 123 }
123 124
124 125
125 static void m1541_cache_flush(void) 126 static void m1541_cache_flush(void)
126 { 127 {
127 int i, page_count; 128 int i, page_count;
128 u32 temp; 129 u32 temp;
129 130
130 global_cache_flush(); 131 global_cache_flush();
131 132
132 page_count = 1 << A_SIZE_32(agp_bridge->current_size)->page_order; 133 page_count = 1 << A_SIZE_32(agp_bridge->current_size)->page_order;
133 for (i = 0; i < PAGE_SIZE * page_count; i += PAGE_SIZE) { 134 for (i = 0; i < PAGE_SIZE * page_count; i += PAGE_SIZE) {
134 pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, 135 pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
135 &temp); 136 &temp);
136 pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, 137 pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
137 (((temp & ALI_CACHE_FLUSH_ADDR_MASK) | 138 (((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
138 (agp_bridge->gatt_bus_addr + i)) | 139 (agp_bridge->gatt_bus_addr + i)) |
139 ALI_CACHE_FLUSH_EN)); 140 ALI_CACHE_FLUSH_EN));
140 } 141 }
141 } 142 }
142 143
143 static void *m1541_alloc_page(struct agp_bridge_data *bridge) 144 static void *m1541_alloc_page(struct agp_bridge_data *bridge)
144 { 145 {
145 void *addr = agp_generic_alloc_page(agp_bridge); 146 void *addr = agp_generic_alloc_page(agp_bridge);
146 u32 temp; 147 u32 temp;
147 148
148 if (!addr) 149 if (!addr)
149 return NULL; 150 return NULL;
150 151
151 pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp); 152 pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
152 pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, 153 pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
153 (((temp & ALI_CACHE_FLUSH_ADDR_MASK) | 154 (((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
154 virt_to_gart(addr)) | ALI_CACHE_FLUSH_EN )); 155 virt_to_gart(addr)) | ALI_CACHE_FLUSH_EN ));
155 return addr; 156 return addr;
156 } 157 }
157 158
158 static void ali_destroy_page(void * addr, int flags) 159 static void ali_destroy_page(void * addr, int flags)
159 { 160 {
160 if (addr) { 161 if (addr) {
161 if (flags & AGP_PAGE_DESTROY_UNMAP) { 162 if (flags & AGP_PAGE_DESTROY_UNMAP) {
162 global_cache_flush(); /* is this really needed? --hch */ 163 global_cache_flush(); /* is this really needed? --hch */
163 agp_generic_destroy_page(addr, flags); 164 agp_generic_destroy_page(addr, flags);
164 } else 165 } else
165 agp_generic_destroy_page(addr, flags); 166 agp_generic_destroy_page(addr, flags);
166 } 167 }
167 } 168 }
168 169
169 static void m1541_destroy_page(void * addr, int flags) 170 static void m1541_destroy_page(void * addr, int flags)
170 { 171 {
171 u32 temp; 172 u32 temp;
172 173
173 if (addr == NULL) 174 if (addr == NULL)
174 return; 175 return;
175 176
176 if (flags & AGP_PAGE_DESTROY_UNMAP) { 177 if (flags & AGP_PAGE_DESTROY_UNMAP) {
177 global_cache_flush(); 178 global_cache_flush();
178 179
179 pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp); 180 pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
180 pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, 181 pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
181 (((temp & ALI_CACHE_FLUSH_ADDR_MASK) | 182 (((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
182 virt_to_gart(addr)) | ALI_CACHE_FLUSH_EN)); 183 virt_to_gart(addr)) | ALI_CACHE_FLUSH_EN));
183 } 184 }
184 agp_generic_destroy_page(addr, flags); 185 agp_generic_destroy_page(addr, flags);
185 } 186 }
186 187
187 188
188 /* Setup function */ 189 /* Setup function */
189 190
190 static const struct aper_size_info_32 ali_generic_sizes[7] = 191 static const struct aper_size_info_32 ali_generic_sizes[7] =
191 { 192 {
192 {256, 65536, 6, 10}, 193 {256, 65536, 6, 10},
193 {128, 32768, 5, 9}, 194 {128, 32768, 5, 9},
194 {64, 16384, 4, 8}, 195 {64, 16384, 4, 8},
195 {32, 8192, 3, 7}, 196 {32, 8192, 3, 7},
196 {16, 4096, 2, 6}, 197 {16, 4096, 2, 6},
197 {8, 2048, 1, 4}, 198 {8, 2048, 1, 4},
198 {4, 1024, 0, 3} 199 {4, 1024, 0, 3}
199 }; 200 };
200 201
201 static const struct agp_bridge_driver ali_generic_bridge = { 202 static const struct agp_bridge_driver ali_generic_bridge = {
202 .owner = THIS_MODULE, 203 .owner = THIS_MODULE,
203 .aperture_sizes = ali_generic_sizes, 204 .aperture_sizes = ali_generic_sizes,
204 .size_type = U32_APER_SIZE, 205 .size_type = U32_APER_SIZE,
205 .num_aperture_sizes = 7, 206 .num_aperture_sizes = 7,
206 .configure = ali_configure, 207 .configure = ali_configure,
207 .fetch_size = ali_fetch_size, 208 .fetch_size = ali_fetch_size,
208 .cleanup = ali_cleanup, 209 .cleanup = ali_cleanup,
209 .tlb_flush = ali_tlbflush, 210 .tlb_flush = ali_tlbflush,
210 .mask_memory = agp_generic_mask_memory, 211 .mask_memory = agp_generic_mask_memory,
211 .masks = NULL, 212 .masks = NULL,
212 .agp_enable = agp_generic_enable, 213 .agp_enable = agp_generic_enable,
213 .cache_flush = global_cache_flush, 214 .cache_flush = global_cache_flush,
214 .create_gatt_table = agp_generic_create_gatt_table, 215 .create_gatt_table = agp_generic_create_gatt_table,
215 .free_gatt_table = agp_generic_free_gatt_table, 216 .free_gatt_table = agp_generic_free_gatt_table,
216 .insert_memory = agp_generic_insert_memory, 217 .insert_memory = agp_generic_insert_memory,
217 .remove_memory = agp_generic_remove_memory, 218 .remove_memory = agp_generic_remove_memory,
218 .alloc_by_type = agp_generic_alloc_by_type, 219 .alloc_by_type = agp_generic_alloc_by_type,
219 .free_by_type = agp_generic_free_by_type, 220 .free_by_type = agp_generic_free_by_type,
220 .agp_alloc_page = agp_generic_alloc_page, 221 .agp_alloc_page = agp_generic_alloc_page,
221 .agp_destroy_page = ali_destroy_page, 222 .agp_destroy_page = ali_destroy_page,
222 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 223 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
223 }; 224 };
224 225
225 static const struct agp_bridge_driver ali_m1541_bridge = { 226 static const struct agp_bridge_driver ali_m1541_bridge = {
226 .owner = THIS_MODULE, 227 .owner = THIS_MODULE,
227 .aperture_sizes = ali_generic_sizes, 228 .aperture_sizes = ali_generic_sizes,
228 .size_type = U32_APER_SIZE, 229 .size_type = U32_APER_SIZE,
229 .num_aperture_sizes = 7, 230 .num_aperture_sizes = 7,
230 .configure = ali_configure, 231 .configure = ali_configure,
231 .fetch_size = ali_fetch_size, 232 .fetch_size = ali_fetch_size,
232 .cleanup = ali_cleanup, 233 .cleanup = ali_cleanup,
233 .tlb_flush = ali_tlbflush, 234 .tlb_flush = ali_tlbflush,
234 .mask_memory = agp_generic_mask_memory, 235 .mask_memory = agp_generic_mask_memory,
235 .masks = NULL, 236 .masks = NULL,
236 .agp_enable = agp_generic_enable, 237 .agp_enable = agp_generic_enable,
237 .cache_flush = m1541_cache_flush, 238 .cache_flush = m1541_cache_flush,
238 .create_gatt_table = agp_generic_create_gatt_table, 239 .create_gatt_table = agp_generic_create_gatt_table,
239 .free_gatt_table = agp_generic_free_gatt_table, 240 .free_gatt_table = agp_generic_free_gatt_table,
240 .insert_memory = agp_generic_insert_memory, 241 .insert_memory = agp_generic_insert_memory,
241 .remove_memory = agp_generic_remove_memory, 242 .remove_memory = agp_generic_remove_memory,
242 .alloc_by_type = agp_generic_alloc_by_type, 243 .alloc_by_type = agp_generic_alloc_by_type,
243 .free_by_type = agp_generic_free_by_type, 244 .free_by_type = agp_generic_free_by_type,
244 .agp_alloc_page = m1541_alloc_page, 245 .agp_alloc_page = m1541_alloc_page,
245 .agp_destroy_page = m1541_destroy_page, 246 .agp_destroy_page = m1541_destroy_page,
246 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 247 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
247 }; 248 };
248 249
249 250
250 static struct agp_device_ids ali_agp_device_ids[] __devinitdata = 251 static struct agp_device_ids ali_agp_device_ids[] __devinitdata =
251 { 252 {
252 { 253 {
253 .device_id = PCI_DEVICE_ID_AL_M1541, 254 .device_id = PCI_DEVICE_ID_AL_M1541,
254 .chipset_name = "M1541", 255 .chipset_name = "M1541",
255 }, 256 },
256 { 257 {
257 .device_id = PCI_DEVICE_ID_AL_M1621, 258 .device_id = PCI_DEVICE_ID_AL_M1621,
258 .chipset_name = "M1621", 259 .chipset_name = "M1621",
259 }, 260 },
260 { 261 {
261 .device_id = PCI_DEVICE_ID_AL_M1631, 262 .device_id = PCI_DEVICE_ID_AL_M1631,
262 .chipset_name = "M1631", 263 .chipset_name = "M1631",
263 }, 264 },
264 { 265 {
265 .device_id = PCI_DEVICE_ID_AL_M1632, 266 .device_id = PCI_DEVICE_ID_AL_M1632,
266 .chipset_name = "M1632", 267 .chipset_name = "M1632",
267 }, 268 },
268 { 269 {
269 .device_id = PCI_DEVICE_ID_AL_M1641, 270 .device_id = PCI_DEVICE_ID_AL_M1641,
270 .chipset_name = "M1641", 271 .chipset_name = "M1641",
271 }, 272 },
272 { 273 {
273 .device_id = PCI_DEVICE_ID_AL_M1644, 274 .device_id = PCI_DEVICE_ID_AL_M1644,
274 .chipset_name = "M1644", 275 .chipset_name = "M1644",
275 }, 276 },
276 { 277 {
277 .device_id = PCI_DEVICE_ID_AL_M1647, 278 .device_id = PCI_DEVICE_ID_AL_M1647,
278 .chipset_name = "M1647", 279 .chipset_name = "M1647",
279 }, 280 },
280 { 281 {
281 .device_id = PCI_DEVICE_ID_AL_M1651, 282 .device_id = PCI_DEVICE_ID_AL_M1651,
282 .chipset_name = "M1651", 283 .chipset_name = "M1651",
283 }, 284 },
284 { 285 {
285 .device_id = PCI_DEVICE_ID_AL_M1671, 286 .device_id = PCI_DEVICE_ID_AL_M1671,
286 .chipset_name = "M1671", 287 .chipset_name = "M1671",
287 }, 288 },
288 { 289 {
289 .device_id = PCI_DEVICE_ID_AL_M1681, 290 .device_id = PCI_DEVICE_ID_AL_M1681,
290 .chipset_name = "M1681", 291 .chipset_name = "M1681",
291 }, 292 },
292 { 293 {
293 .device_id = PCI_DEVICE_ID_AL_M1683, 294 .device_id = PCI_DEVICE_ID_AL_M1683,
294 .chipset_name = "M1683", 295 .chipset_name = "M1683",
295 }, 296 },
296 297
297 { }, /* dummy final entry, always present */ 298 { }, /* dummy final entry, always present */
298 }; 299 };
299 300
300 static int __devinit agp_ali_probe(struct pci_dev *pdev, 301 static int __devinit agp_ali_probe(struct pci_dev *pdev,
301 const struct pci_device_id *ent) 302 const struct pci_device_id *ent)
302 { 303 {
303 struct agp_device_ids *devs = ali_agp_device_ids; 304 struct agp_device_ids *devs = ali_agp_device_ids;
304 struct agp_bridge_data *bridge; 305 struct agp_bridge_data *bridge;
305 u8 hidden_1621_id, cap_ptr; 306 u8 hidden_1621_id, cap_ptr;
306 int j; 307 int j;
307 308
308 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); 309 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
309 if (!cap_ptr) 310 if (!cap_ptr)
310 return -ENODEV; 311 return -ENODEV;
311 312
312 /* probe for known chipsets */ 313 /* probe for known chipsets */
313 for (j = 0; devs[j].chipset_name; j++) { 314 for (j = 0; devs[j].chipset_name; j++) {
314 if (pdev->device == devs[j].device_id) 315 if (pdev->device == devs[j].device_id)
315 goto found; 316 goto found;
316 } 317 }
317 318
318 printk(KERN_ERR PFX "Unsupported ALi chipset (device id: %04x)\n", 319 dev_err(&pdev->dev, "unsupported ALi chipset [%04x/%04x])\n",
319 pdev->device); 320 pdev->vendor, pdev->device);
320 return -ENODEV; 321 return -ENODEV;
321 322
322 323
323 found: 324 found:
324 bridge = agp_alloc_bridge(); 325 bridge = agp_alloc_bridge();
325 if (!bridge) 326 if (!bridge)
326 return -ENOMEM; 327 return -ENOMEM;
327 328
328 bridge->dev = pdev; 329 bridge->dev = pdev;
329 bridge->capndx = cap_ptr; 330 bridge->capndx = cap_ptr;
330 331
331 switch (pdev->device) { 332 switch (pdev->device) {
332 case PCI_DEVICE_ID_AL_M1541: 333 case PCI_DEVICE_ID_AL_M1541:
333 bridge->driver = &ali_m1541_bridge; 334 bridge->driver = &ali_m1541_bridge;
334 break; 335 break;
335 case PCI_DEVICE_ID_AL_M1621: 336 case PCI_DEVICE_ID_AL_M1621:
336 pci_read_config_byte(pdev, 0xFB, &hidden_1621_id); 337 pci_read_config_byte(pdev, 0xFB, &hidden_1621_id);
337 switch (hidden_1621_id) { 338 switch (hidden_1621_id) {
338 case 0x31: 339 case 0x31:
339 devs[j].chipset_name = "M1631"; 340 devs[j].chipset_name = "M1631";
340 break; 341 break;
341 case 0x32: 342 case 0x32:
342 devs[j].chipset_name = "M1632"; 343 devs[j].chipset_name = "M1632";
343 break; 344 break;
344 case 0x41: 345 case 0x41:
345 devs[j].chipset_name = "M1641"; 346 devs[j].chipset_name = "M1641";
346 break; 347 break;
347 case 0x43: 348 case 0x43:
348 devs[j].chipset_name = "M????"; 349 devs[j].chipset_name = "M????";
349 break; 350 break;
350 case 0x47: 351 case 0x47:
351 devs[j].chipset_name = "M1647"; 352 devs[j].chipset_name = "M1647";
352 break; 353 break;
353 case 0x51: 354 case 0x51:
354 devs[j].chipset_name = "M1651"; 355 devs[j].chipset_name = "M1651";
355 break; 356 break;
356 default: 357 default:
357 break; 358 break;
358 } 359 }
359 /*FALLTHROUGH*/ 360 /*FALLTHROUGH*/
360 default: 361 default:
361 bridge->driver = &ali_generic_bridge; 362 bridge->driver = &ali_generic_bridge;
362 } 363 }
363 364
364 printk(KERN_INFO PFX "Detected ALi %s chipset\n", 365 dev_info(&pdev->dev, "ALi %s chipset\n", devs[j].chipset_name);
365 devs[j].chipset_name);
366 366
367 /* Fill in the mode register */ 367 /* Fill in the mode register */
368 pci_read_config_dword(pdev, 368 pci_read_config_dword(pdev,
369 bridge->capndx+PCI_AGP_STATUS, 369 bridge->capndx+PCI_AGP_STATUS,
370 &bridge->mode); 370 &bridge->mode);
371 371
372 pci_set_drvdata(pdev, bridge); 372 pci_set_drvdata(pdev, bridge);
373 return agp_add_bridge(bridge); 373 return agp_add_bridge(bridge);
374 } 374 }
375 375
376 static void __devexit agp_ali_remove(struct pci_dev *pdev) 376 static void __devexit agp_ali_remove(struct pci_dev *pdev)
377 { 377 {
378 struct agp_bridge_data *bridge = pci_get_drvdata(pdev); 378 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
379 379
380 agp_remove_bridge(bridge); 380 agp_remove_bridge(bridge);
381 agp_put_bridge(bridge); 381 agp_put_bridge(bridge);
382 } 382 }
383 383
384 static struct pci_device_id agp_ali_pci_table[] = { 384 static struct pci_device_id agp_ali_pci_table[] = {
385 { 385 {
386 .class = (PCI_CLASS_BRIDGE_HOST << 8), 386 .class = (PCI_CLASS_BRIDGE_HOST << 8),
387 .class_mask = ~0, 387 .class_mask = ~0,
388 .vendor = PCI_VENDOR_ID_AL, 388 .vendor = PCI_VENDOR_ID_AL,
389 .device = PCI_ANY_ID, 389 .device = PCI_ANY_ID,
390 .subvendor = PCI_ANY_ID, 390 .subvendor = PCI_ANY_ID,
391 .subdevice = PCI_ANY_ID, 391 .subdevice = PCI_ANY_ID,
392 }, 392 },
393 { } 393 { }
394 }; 394 };
395 395
396 MODULE_DEVICE_TABLE(pci, agp_ali_pci_table); 396 MODULE_DEVICE_TABLE(pci, agp_ali_pci_table);
397 397
398 static struct pci_driver agp_ali_pci_driver = { 398 static struct pci_driver agp_ali_pci_driver = {
399 .name = "agpgart-ali", 399 .name = "agpgart-ali",
400 .id_table = agp_ali_pci_table, 400 .id_table = agp_ali_pci_table,
401 .probe = agp_ali_probe, 401 .probe = agp_ali_probe,
402 .remove = agp_ali_remove, 402 .remove = agp_ali_remove,
403 }; 403 };
404 404
405 static int __init agp_ali_init(void) 405 static int __init agp_ali_init(void)
406 { 406 {
407 if (agp_off) 407 if (agp_off)
408 return -EINVAL; 408 return -EINVAL;
409 return pci_register_driver(&agp_ali_pci_driver); 409 return pci_register_driver(&agp_ali_pci_driver);
410 } 410 }
411 411
412 static void __exit agp_ali_cleanup(void) 412 static void __exit agp_ali_cleanup(void)
413 { 413 {
414 pci_unregister_driver(&agp_ali_pci_driver); 414 pci_unregister_driver(&agp_ali_pci_driver);
415 } 415 }
416 416
417 module_init(agp_ali_init); 417 module_init(agp_ali_init);
418 module_exit(agp_ali_cleanup); 418 module_exit(agp_ali_cleanup);
419 419
420 MODULE_AUTHOR("Dave Jones <davej@codemonkey.org.uk>"); 420 MODULE_AUTHOR("Dave Jones <davej@codemonkey.org.uk>");
421 MODULE_LICENSE("GPL and additional rights"); 421 MODULE_LICENSE("GPL and additional rights");
422 422
drivers/char/agp/amd-k7-agp.c
1 /* 1 /*
2 * AMD K7 AGPGART routines. 2 * AMD K7 AGPGART routines.
3 */ 3 */
4 4
5 #include <linux/module.h> 5 #include <linux/module.h>
6 #include <linux/pci.h> 6 #include <linux/pci.h>
7 #include <linux/init.h> 7 #include <linux/init.h>
8 #include <linux/agp_backend.h> 8 #include <linux/agp_backend.h>
9 #include <linux/gfp.h> 9 #include <linux/gfp.h>
10 #include <linux/page-flags.h> 10 #include <linux/page-flags.h>
11 #include <linux/mm.h> 11 #include <linux/mm.h>
12 #include "agp.h" 12 #include "agp.h"
13 13
14 #define AMD_MMBASE 0x14 14 #define AMD_MMBASE 0x14
15 #define AMD_APSIZE 0xac 15 #define AMD_APSIZE 0xac
16 #define AMD_MODECNTL 0xb0 16 #define AMD_MODECNTL 0xb0
17 #define AMD_MODECNTL2 0xb2 17 #define AMD_MODECNTL2 0xb2
18 #define AMD_GARTENABLE 0x02 /* In mmio region (16-bit register) */ 18 #define AMD_GARTENABLE 0x02 /* In mmio region (16-bit register) */
19 #define AMD_ATTBASE 0x04 /* In mmio region (32-bit register) */ 19 #define AMD_ATTBASE 0x04 /* In mmio region (32-bit register) */
20 #define AMD_TLBFLUSH 0x0c /* In mmio region (32-bit register) */ 20 #define AMD_TLBFLUSH 0x0c /* In mmio region (32-bit register) */
21 #define AMD_CACHEENTRY 0x10 /* In mmio region (32-bit register) */ 21 #define AMD_CACHEENTRY 0x10 /* In mmio region (32-bit register) */
22 22
23 static struct pci_device_id agp_amdk7_pci_table[]; 23 static struct pci_device_id agp_amdk7_pci_table[];
24 24
25 struct amd_page_map { 25 struct amd_page_map {
26 unsigned long *real; 26 unsigned long *real;
27 unsigned long __iomem *remapped; 27 unsigned long __iomem *remapped;
28 }; 28 };
29 29
30 static struct _amd_irongate_private { 30 static struct _amd_irongate_private {
31 volatile u8 __iomem *registers; 31 volatile u8 __iomem *registers;
32 struct amd_page_map **gatt_pages; 32 struct amd_page_map **gatt_pages;
33 int num_tables; 33 int num_tables;
34 } amd_irongate_private; 34 } amd_irongate_private;
35 35
36 static int amd_create_page_map(struct amd_page_map *page_map) 36 static int amd_create_page_map(struct amd_page_map *page_map)
37 { 37 {
38 int i; 38 int i;
39 39
40 page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL); 40 page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
41 if (page_map->real == NULL) 41 if (page_map->real == NULL)
42 return -ENOMEM; 42 return -ENOMEM;
43 43
44 #ifndef CONFIG_X86 44 #ifndef CONFIG_X86
45 SetPageReserved(virt_to_page(page_map->real)); 45 SetPageReserved(virt_to_page(page_map->real));
46 global_cache_flush(); 46 global_cache_flush();
47 page_map->remapped = ioremap_nocache(virt_to_gart(page_map->real), 47 page_map->remapped = ioremap_nocache(virt_to_gart(page_map->real),
48 PAGE_SIZE); 48 PAGE_SIZE);
49 if (page_map->remapped == NULL) { 49 if (page_map->remapped == NULL) {
50 ClearPageReserved(virt_to_page(page_map->real)); 50 ClearPageReserved(virt_to_page(page_map->real));
51 free_page((unsigned long) page_map->real); 51 free_page((unsigned long) page_map->real);
52 page_map->real = NULL; 52 page_map->real = NULL;
53 return -ENOMEM; 53 return -ENOMEM;
54 } 54 }
55 global_cache_flush(); 55 global_cache_flush();
56 #else 56 #else
57 set_memory_uc((unsigned long)page_map->real, 1); 57 set_memory_uc((unsigned long)page_map->real, 1);
58 page_map->remapped = page_map->real; 58 page_map->remapped = page_map->real;
59 #endif 59 #endif
60 60
61 for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) { 61 for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
62 writel(agp_bridge->scratch_page, page_map->remapped+i); 62 writel(agp_bridge->scratch_page, page_map->remapped+i);
63 readl(page_map->remapped+i); /* PCI Posting. */ 63 readl(page_map->remapped+i); /* PCI Posting. */
64 } 64 }
65 65
66 return 0; 66 return 0;
67 } 67 }
68 68
69 static void amd_free_page_map(struct amd_page_map *page_map) 69 static void amd_free_page_map(struct amd_page_map *page_map)
70 { 70 {
71 #ifndef CONFIG_X86 71 #ifndef CONFIG_X86
72 iounmap(page_map->remapped); 72 iounmap(page_map->remapped);
73 ClearPageReserved(virt_to_page(page_map->real)); 73 ClearPageReserved(virt_to_page(page_map->real));
74 #else 74 #else
75 set_memory_wb((unsigned long)page_map->real, 1); 75 set_memory_wb((unsigned long)page_map->real, 1);
76 #endif 76 #endif
77 free_page((unsigned long) page_map->real); 77 free_page((unsigned long) page_map->real);
78 } 78 }
79 79
80 static void amd_free_gatt_pages(void) 80 static void amd_free_gatt_pages(void)
81 { 81 {
82 int i; 82 int i;
83 struct amd_page_map **tables; 83 struct amd_page_map **tables;
84 struct amd_page_map *entry; 84 struct amd_page_map *entry;
85 85
86 tables = amd_irongate_private.gatt_pages; 86 tables = amd_irongate_private.gatt_pages;
87 for (i = 0; i < amd_irongate_private.num_tables; i++) { 87 for (i = 0; i < amd_irongate_private.num_tables; i++) {
88 entry = tables[i]; 88 entry = tables[i];
89 if (entry != NULL) { 89 if (entry != NULL) {
90 if (entry->real != NULL) 90 if (entry->real != NULL)
91 amd_free_page_map(entry); 91 amd_free_page_map(entry);
92 kfree(entry); 92 kfree(entry);
93 } 93 }
94 } 94 }
95 kfree(tables); 95 kfree(tables);
96 amd_irongate_private.gatt_pages = NULL; 96 amd_irongate_private.gatt_pages = NULL;
97 } 97 }
98 98
99 static int amd_create_gatt_pages(int nr_tables) 99 static int amd_create_gatt_pages(int nr_tables)
100 { 100 {
101 struct amd_page_map **tables; 101 struct amd_page_map **tables;
102 struct amd_page_map *entry; 102 struct amd_page_map *entry;
103 int retval = 0; 103 int retval = 0;
104 int i; 104 int i;
105 105
106 tables = kzalloc((nr_tables + 1) * sizeof(struct amd_page_map *),GFP_KERNEL); 106 tables = kzalloc((nr_tables + 1) * sizeof(struct amd_page_map *),GFP_KERNEL);
107 if (tables == NULL) 107 if (tables == NULL)
108 return -ENOMEM; 108 return -ENOMEM;
109 109
110 for (i = 0; i < nr_tables; i++) { 110 for (i = 0; i < nr_tables; i++) {
111 entry = kzalloc(sizeof(struct amd_page_map), GFP_KERNEL); 111 entry = kzalloc(sizeof(struct amd_page_map), GFP_KERNEL);
112 tables[i] = entry; 112 tables[i] = entry;
113 if (entry == NULL) { 113 if (entry == NULL) {
114 retval = -ENOMEM; 114 retval = -ENOMEM;
115 break; 115 break;
116 } 116 }
117 retval = amd_create_page_map(entry); 117 retval = amd_create_page_map(entry);
118 if (retval != 0) 118 if (retval != 0)
119 break; 119 break;
120 } 120 }
121 amd_irongate_private.num_tables = i; 121 amd_irongate_private.num_tables = i;
122 amd_irongate_private.gatt_pages = tables; 122 amd_irongate_private.gatt_pages = tables;
123 123
124 if (retval != 0) 124 if (retval != 0)
125 amd_free_gatt_pages(); 125 amd_free_gatt_pages();
126 126
127 return retval; 127 return retval;
128 } 128 }
129 129
130 /* Since we don't need contiguous memory we just try 130 /* Since we don't need contiguous memory we just try
131 * to get the gatt table once 131 * to get the gatt table once
132 */ 132 */
133 133
134 #define GET_PAGE_DIR_OFF(addr) (addr >> 22) 134 #define GET_PAGE_DIR_OFF(addr) (addr >> 22)
135 #define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \ 135 #define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
136 GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr)) 136 GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr))
137 #define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12) 137 #define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
138 #define GET_GATT(addr) (amd_irongate_private.gatt_pages[\ 138 #define GET_GATT(addr) (amd_irongate_private.gatt_pages[\
139 GET_PAGE_DIR_IDX(addr)]->remapped) 139 GET_PAGE_DIR_IDX(addr)]->remapped)
140 140
141 static int amd_create_gatt_table(struct agp_bridge_data *bridge) 141 static int amd_create_gatt_table(struct agp_bridge_data *bridge)
142 { 142 {
143 struct aper_size_info_lvl2 *value; 143 struct aper_size_info_lvl2 *value;
144 struct amd_page_map page_dir; 144 struct amd_page_map page_dir;
145 unsigned long addr; 145 unsigned long addr;
146 int retval; 146 int retval;
147 u32 temp; 147 u32 temp;
148 int i; 148 int i;
149 149
150 value = A_SIZE_LVL2(agp_bridge->current_size); 150 value = A_SIZE_LVL2(agp_bridge->current_size);
151 retval = amd_create_page_map(&page_dir); 151 retval = amd_create_page_map(&page_dir);
152 if (retval != 0) 152 if (retval != 0)
153 return retval; 153 return retval;
154 154
155 retval = amd_create_gatt_pages(value->num_entries / 1024); 155 retval = amd_create_gatt_pages(value->num_entries / 1024);
156 if (retval != 0) { 156 if (retval != 0) {
157 amd_free_page_map(&page_dir); 157 amd_free_page_map(&page_dir);
158 return retval; 158 return retval;
159 } 159 }
160 160
161 agp_bridge->gatt_table_real = (u32 *)page_dir.real; 161 agp_bridge->gatt_table_real = (u32 *)page_dir.real;
162 agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped; 162 agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped;
163 agp_bridge->gatt_bus_addr = virt_to_gart(page_dir.real); 163 agp_bridge->gatt_bus_addr = virt_to_gart(page_dir.real);
164 164
165 /* Get the address for the gart region. 165 /* Get the address for the gart region.
166 * This is a bus address even on the alpha, b/c its 166 * This is a bus address even on the alpha, b/c its
167 * used to program the agp master not the cpu 167 * used to program the agp master not the cpu
168 */ 168 */
169 169
170 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); 170 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
171 addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); 171 addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
172 agp_bridge->gart_bus_addr = addr; 172 agp_bridge->gart_bus_addr = addr;
173 173
174 /* Calculate the agp offset */ 174 /* Calculate the agp offset */
175 for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) { 175 for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
176 writel(virt_to_gart(amd_irongate_private.gatt_pages[i]->real) | 1, 176 writel(virt_to_gart(amd_irongate_private.gatt_pages[i]->real) | 1,
177 page_dir.remapped+GET_PAGE_DIR_OFF(addr)); 177 page_dir.remapped+GET_PAGE_DIR_OFF(addr));
178 readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */ 178 readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */
179 } 179 }
180 180
181 return 0; 181 return 0;
182 } 182 }
183 183
184 static int amd_free_gatt_table(struct agp_bridge_data *bridge) 184 static int amd_free_gatt_table(struct agp_bridge_data *bridge)
185 { 185 {
186 struct amd_page_map page_dir; 186 struct amd_page_map page_dir;
187 187
188 page_dir.real = (unsigned long *)agp_bridge->gatt_table_real; 188 page_dir.real = (unsigned long *)agp_bridge->gatt_table_real;
189 page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table; 189 page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table;
190 190
191 amd_free_gatt_pages(); 191 amd_free_gatt_pages();
192 amd_free_page_map(&page_dir); 192 amd_free_page_map(&page_dir);
193 return 0; 193 return 0;
194 } 194 }
195 195
196 static int amd_irongate_fetch_size(void) 196 static int amd_irongate_fetch_size(void)
197 { 197 {
198 int i; 198 int i;
199 u32 temp; 199 u32 temp;
200 struct aper_size_info_lvl2 *values; 200 struct aper_size_info_lvl2 *values;
201 201
202 pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp); 202 pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp);
203 temp = (temp & 0x0000000e); 203 temp = (temp & 0x0000000e);
204 values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes); 204 values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes);
205 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { 205 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
206 if (temp == values[i].size_value) { 206 if (temp == values[i].size_value) {
207 agp_bridge->previous_size = 207 agp_bridge->previous_size =
208 agp_bridge->current_size = (void *) (values + i); 208 agp_bridge->current_size = (void *) (values + i);
209 209
210 agp_bridge->aperture_size_idx = i; 210 agp_bridge->aperture_size_idx = i;
211 return values[i].size; 211 return values[i].size;
212 } 212 }
213 } 213 }
214 214
215 return 0; 215 return 0;
216 } 216 }
217 217
218 static int amd_irongate_configure(void) 218 static int amd_irongate_configure(void)
219 { 219 {
220 struct aper_size_info_lvl2 *current_size; 220 struct aper_size_info_lvl2 *current_size;
221 u32 temp; 221 u32 temp;
222 u16 enable_reg; 222 u16 enable_reg;
223 223
224 current_size = A_SIZE_LVL2(agp_bridge->current_size); 224 current_size = A_SIZE_LVL2(agp_bridge->current_size);
225 225
226 /* Get the memory mapped registers */ 226 /* Get the memory mapped registers */
227 pci_read_config_dword(agp_bridge->dev, AMD_MMBASE, &temp); 227 pci_read_config_dword(agp_bridge->dev, AMD_MMBASE, &temp);
228 temp = (temp & PCI_BASE_ADDRESS_MEM_MASK); 228 temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
229 amd_irongate_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096); 229 amd_irongate_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096);
230 if (!amd_irongate_private.registers) 230 if (!amd_irongate_private.registers)
231 return -ENOMEM; 231 return -ENOMEM;
232 232
233 /* Write out the address of the gatt table */ 233 /* Write out the address of the gatt table */
234 writel(agp_bridge->gatt_bus_addr, amd_irongate_private.registers+AMD_ATTBASE); 234 writel(agp_bridge->gatt_bus_addr, amd_irongate_private.registers+AMD_ATTBASE);
235 readl(amd_irongate_private.registers+AMD_ATTBASE); /* PCI Posting. */ 235 readl(amd_irongate_private.registers+AMD_ATTBASE); /* PCI Posting. */
236 236
237 /* Write the Sync register */ 237 /* Write the Sync register */
238 pci_write_config_byte(agp_bridge->dev, AMD_MODECNTL, 0x80); 238 pci_write_config_byte(agp_bridge->dev, AMD_MODECNTL, 0x80);
239 239
240 /* Set indexing mode */ 240 /* Set indexing mode */
241 pci_write_config_byte(agp_bridge->dev, AMD_MODECNTL2, 0x00); 241 pci_write_config_byte(agp_bridge->dev, AMD_MODECNTL2, 0x00);
242 242
243 /* Write the enable register */ 243 /* Write the enable register */
244 enable_reg = readw(amd_irongate_private.registers+AMD_GARTENABLE); 244 enable_reg = readw(amd_irongate_private.registers+AMD_GARTENABLE);
245 enable_reg = (enable_reg | 0x0004); 245 enable_reg = (enable_reg | 0x0004);
246 writew(enable_reg, amd_irongate_private.registers+AMD_GARTENABLE); 246 writew(enable_reg, amd_irongate_private.registers+AMD_GARTENABLE);
247 readw(amd_irongate_private.registers+AMD_GARTENABLE); /* PCI Posting. */ 247 readw(amd_irongate_private.registers+AMD_GARTENABLE); /* PCI Posting. */
248 248
249 /* Write out the size register */ 249 /* Write out the size register */
250 pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp); 250 pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp);
251 temp = (((temp & ~(0x0000000e)) | current_size->size_value) | 1); 251 temp = (((temp & ~(0x0000000e)) | current_size->size_value) | 1);
252 pci_write_config_dword(agp_bridge->dev, AMD_APSIZE, temp); 252 pci_write_config_dword(agp_bridge->dev, AMD_APSIZE, temp);
253 253
254 /* Flush the tlb */ 254 /* Flush the tlb */
255 writel(1, amd_irongate_private.registers+AMD_TLBFLUSH); 255 writel(1, amd_irongate_private.registers+AMD_TLBFLUSH);
256 readl(amd_irongate_private.registers+AMD_TLBFLUSH); /* PCI Posting.*/ 256 readl(amd_irongate_private.registers+AMD_TLBFLUSH); /* PCI Posting.*/
257 return 0; 257 return 0;
258 } 258 }
259 259
260 static void amd_irongate_cleanup(void) 260 static void amd_irongate_cleanup(void)
261 { 261 {
262 struct aper_size_info_lvl2 *previous_size; 262 struct aper_size_info_lvl2 *previous_size;
263 u32 temp; 263 u32 temp;
264 u16 enable_reg; 264 u16 enable_reg;
265 265
266 previous_size = A_SIZE_LVL2(agp_bridge->previous_size); 266 previous_size = A_SIZE_LVL2(agp_bridge->previous_size);
267 267
268 enable_reg = readw(amd_irongate_private.registers+AMD_GARTENABLE); 268 enable_reg = readw(amd_irongate_private.registers+AMD_GARTENABLE);
269 enable_reg = (enable_reg & ~(0x0004)); 269 enable_reg = (enable_reg & ~(0x0004));
270 writew(enable_reg, amd_irongate_private.registers+AMD_GARTENABLE); 270 writew(enable_reg, amd_irongate_private.registers+AMD_GARTENABLE);
271 readw(amd_irongate_private.registers+AMD_GARTENABLE); /* PCI Posting. */ 271 readw(amd_irongate_private.registers+AMD_GARTENABLE); /* PCI Posting. */
272 272
273 /* Write back the previous size and disable gart translation */ 273 /* Write back the previous size and disable gart translation */
274 pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp); 274 pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp);
275 temp = ((temp & ~(0x0000000f)) | previous_size->size_value); 275 temp = ((temp & ~(0x0000000f)) | previous_size->size_value);
276 pci_write_config_dword(agp_bridge->dev, AMD_APSIZE, temp); 276 pci_write_config_dword(agp_bridge->dev, AMD_APSIZE, temp);
277 iounmap((void __iomem *) amd_irongate_private.registers); 277 iounmap((void __iomem *) amd_irongate_private.registers);
278 } 278 }
279 279
280 /* 280 /*
281 * This routine could be implemented by taking the addresses 281 * This routine could be implemented by taking the addresses
282 * written to the GATT, and flushing them individually. However 282 * written to the GATT, and flushing them individually. However
283 * currently it just flushes the whole table. Which is probably 283 * currently it just flushes the whole table. Which is probably
284 * more efficent, since agp_memory blocks can be a large number of 284 * more efficent, since agp_memory blocks can be a large number of
285 * entries. 285 * entries.
286 */ 286 */
287 287
288 static void amd_irongate_tlbflush(struct agp_memory *temp) 288 static void amd_irongate_tlbflush(struct agp_memory *temp)
289 { 289 {
290 writel(1, amd_irongate_private.registers+AMD_TLBFLUSH); 290 writel(1, amd_irongate_private.registers+AMD_TLBFLUSH);
291 readl(amd_irongate_private.registers+AMD_TLBFLUSH); /* PCI Posting. */ 291 readl(amd_irongate_private.registers+AMD_TLBFLUSH); /* PCI Posting. */
292 } 292 }
293 293
294 static int amd_insert_memory(struct agp_memory *mem, off_t pg_start, int type) 294 static int amd_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
295 { 295 {
296 int i, j, num_entries; 296 int i, j, num_entries;
297 unsigned long __iomem *cur_gatt; 297 unsigned long __iomem *cur_gatt;
298 unsigned long addr; 298 unsigned long addr;
299 299
300 num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries; 300 num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
301 301
302 if (type != 0 || mem->type != 0) 302 if (type != 0 || mem->type != 0)
303 return -EINVAL; 303 return -EINVAL;
304 304
305 if ((pg_start + mem->page_count) > num_entries) 305 if ((pg_start + mem->page_count) > num_entries)
306 return -EINVAL; 306 return -EINVAL;
307 307
308 j = pg_start; 308 j = pg_start;
309 while (j < (pg_start + mem->page_count)) { 309 while (j < (pg_start + mem->page_count)) {
310 addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr; 310 addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
311 cur_gatt = GET_GATT(addr); 311 cur_gatt = GET_GATT(addr);
312 if (!PGE_EMPTY(agp_bridge, readl(cur_gatt+GET_GATT_OFF(addr)))) 312 if (!PGE_EMPTY(agp_bridge, readl(cur_gatt+GET_GATT_OFF(addr))))
313 return -EBUSY; 313 return -EBUSY;
314 j++; 314 j++;
315 } 315 }
316 316
317 if (!mem->is_flushed) { 317 if (!mem->is_flushed) {
318 global_cache_flush(); 318 global_cache_flush();
319 mem->is_flushed = true; 319 mem->is_flushed = true;
320 } 320 }
321 321
322 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 322 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
323 addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr; 323 addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
324 cur_gatt = GET_GATT(addr); 324 cur_gatt = GET_GATT(addr);
325 writel(agp_generic_mask_memory(agp_bridge, 325 writel(agp_generic_mask_memory(agp_bridge,
326 mem->memory[i], mem->type), cur_gatt+GET_GATT_OFF(addr)); 326 mem->memory[i], mem->type), cur_gatt+GET_GATT_OFF(addr));
327 readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */ 327 readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */
328 } 328 }
329 amd_irongate_tlbflush(mem); 329 amd_irongate_tlbflush(mem);
330 return 0; 330 return 0;
331 } 331 }
332 332
333 static int amd_remove_memory(struct agp_memory *mem, off_t pg_start, int type) 333 static int amd_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
334 { 334 {
335 int i; 335 int i;
336 unsigned long __iomem *cur_gatt; 336 unsigned long __iomem *cur_gatt;
337 unsigned long addr; 337 unsigned long addr;
338 338
339 if (type != 0 || mem->type != 0) 339 if (type != 0 || mem->type != 0)
340 return -EINVAL; 340 return -EINVAL;
341 341
342 for (i = pg_start; i < (mem->page_count + pg_start); i++) { 342 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
343 addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr; 343 addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
344 cur_gatt = GET_GATT(addr); 344 cur_gatt = GET_GATT(addr);
345 writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr)); 345 writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
346 readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */ 346 readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */
347 } 347 }
348 348
349 amd_irongate_tlbflush(mem); 349 amd_irongate_tlbflush(mem);
350 return 0; 350 return 0;
351 } 351 }
352 352
353 static const struct aper_size_info_lvl2 amd_irongate_sizes[7] = 353 static const struct aper_size_info_lvl2 amd_irongate_sizes[7] =
354 { 354 {
355 {2048, 524288, 0x0000000c}, 355 {2048, 524288, 0x0000000c},
356 {1024, 262144, 0x0000000a}, 356 {1024, 262144, 0x0000000a},
357 {512, 131072, 0x00000008}, 357 {512, 131072, 0x00000008},
358 {256, 65536, 0x00000006}, 358 {256, 65536, 0x00000006},
359 {128, 32768, 0x00000004}, 359 {128, 32768, 0x00000004},
360 {64, 16384, 0x00000002}, 360 {64, 16384, 0x00000002},
361 {32, 8192, 0x00000000} 361 {32, 8192, 0x00000000}
362 }; 362 };
363 363
364 static const struct gatt_mask amd_irongate_masks[] = 364 static const struct gatt_mask amd_irongate_masks[] =
365 { 365 {
366 {.mask = 1, .type = 0} 366 {.mask = 1, .type = 0}
367 }; 367 };
368 368
369 static const struct agp_bridge_driver amd_irongate_driver = { 369 static const struct agp_bridge_driver amd_irongate_driver = {
370 .owner = THIS_MODULE, 370 .owner = THIS_MODULE,
371 .aperture_sizes = amd_irongate_sizes, 371 .aperture_sizes = amd_irongate_sizes,
372 .size_type = LVL2_APER_SIZE, 372 .size_type = LVL2_APER_SIZE,
373 .num_aperture_sizes = 7, 373 .num_aperture_sizes = 7,
374 .configure = amd_irongate_configure, 374 .configure = amd_irongate_configure,
375 .fetch_size = amd_irongate_fetch_size, 375 .fetch_size = amd_irongate_fetch_size,
376 .cleanup = amd_irongate_cleanup, 376 .cleanup = amd_irongate_cleanup,
377 .tlb_flush = amd_irongate_tlbflush, 377 .tlb_flush = amd_irongate_tlbflush,
378 .mask_memory = agp_generic_mask_memory, 378 .mask_memory = agp_generic_mask_memory,
379 .masks = amd_irongate_masks, 379 .masks = amd_irongate_masks,
380 .agp_enable = agp_generic_enable, 380 .agp_enable = agp_generic_enable,
381 .cache_flush = global_cache_flush, 381 .cache_flush = global_cache_flush,
382 .create_gatt_table = amd_create_gatt_table, 382 .create_gatt_table = amd_create_gatt_table,
383 .free_gatt_table = amd_free_gatt_table, 383 .free_gatt_table = amd_free_gatt_table,
384 .insert_memory = amd_insert_memory, 384 .insert_memory = amd_insert_memory,
385 .remove_memory = amd_remove_memory, 385 .remove_memory = amd_remove_memory,
386 .alloc_by_type = agp_generic_alloc_by_type, 386 .alloc_by_type = agp_generic_alloc_by_type,
387 .free_by_type = agp_generic_free_by_type, 387 .free_by_type = agp_generic_free_by_type,
388 .agp_alloc_page = agp_generic_alloc_page, 388 .agp_alloc_page = agp_generic_alloc_page,
389 .agp_destroy_page = agp_generic_destroy_page, 389 .agp_destroy_page = agp_generic_destroy_page,
390 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 390 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
391 }; 391 };
392 392
393 static struct agp_device_ids amd_agp_device_ids[] __devinitdata = 393 static struct agp_device_ids amd_agp_device_ids[] __devinitdata =
394 { 394 {
395 { 395 {
396 .device_id = PCI_DEVICE_ID_AMD_FE_GATE_7006, 396 .device_id = PCI_DEVICE_ID_AMD_FE_GATE_7006,
397 .chipset_name = "Irongate", 397 .chipset_name = "Irongate",
398 }, 398 },
399 { 399 {
400 .device_id = PCI_DEVICE_ID_AMD_FE_GATE_700E, 400 .device_id = PCI_DEVICE_ID_AMD_FE_GATE_700E,
401 .chipset_name = "761", 401 .chipset_name = "761",
402 }, 402 },
403 { 403 {
404 .device_id = PCI_DEVICE_ID_AMD_FE_GATE_700C, 404 .device_id = PCI_DEVICE_ID_AMD_FE_GATE_700C,
405 .chipset_name = "760MP", 405 .chipset_name = "760MP",
406 }, 406 },
407 { }, /* dummy final entry, always present */ 407 { }, /* dummy final entry, always present */
408 }; 408 };
409 409
410 static int __devinit agp_amdk7_probe(struct pci_dev *pdev, 410 static int __devinit agp_amdk7_probe(struct pci_dev *pdev,
411 const struct pci_device_id *ent) 411 const struct pci_device_id *ent)
412 { 412 {
413 struct agp_bridge_data *bridge; 413 struct agp_bridge_data *bridge;
414 u8 cap_ptr; 414 u8 cap_ptr;
415 int j; 415 int j;
416 416
417 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); 417 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
418 if (!cap_ptr) 418 if (!cap_ptr)
419 return -ENODEV; 419 return -ENODEV;
420 420
421 j = ent - agp_amdk7_pci_table; 421 j = ent - agp_amdk7_pci_table;
422 printk(KERN_INFO PFX "Detected AMD %s chipset\n", 422 dev_info(&pdev->dev, "AMD %s chipset\n",
423 amd_agp_device_ids[j].chipset_name); 423 amd_agp_device_ids[j].chipset_name);
424 424
425 bridge = agp_alloc_bridge(); 425 bridge = agp_alloc_bridge();
426 if (!bridge) 426 if (!bridge)
427 return -ENOMEM; 427 return -ENOMEM;
428 428
429 bridge->driver = &amd_irongate_driver; 429 bridge->driver = &amd_irongate_driver;
430 bridge->dev_private_data = &amd_irongate_private, 430 bridge->dev_private_data = &amd_irongate_private,
431 bridge->dev = pdev; 431 bridge->dev = pdev;
432 bridge->capndx = cap_ptr; 432 bridge->capndx = cap_ptr;
433 433
434 /* 751 Errata (22564_B-1.PDF) 434 /* 751 Errata (22564_B-1.PDF)
435 erratum 20: strobe glitch with Nvidia NV10 GeForce cards. 435 erratum 20: strobe glitch with Nvidia NV10 GeForce cards.
436 system controller may experience noise due to strong drive strengths 436 system controller may experience noise due to strong drive strengths
437 */ 437 */
438 if (agp_bridge->dev->device == PCI_DEVICE_ID_AMD_FE_GATE_7006) { 438 if (agp_bridge->dev->device == PCI_DEVICE_ID_AMD_FE_GATE_7006) {
439 struct pci_dev *gfxcard=NULL; 439 struct pci_dev *gfxcard=NULL;
440 440
441 cap_ptr = 0; 441 cap_ptr = 0;
442 while (!cap_ptr) { 442 while (!cap_ptr) {
443 gfxcard = pci_get_class(PCI_CLASS_DISPLAY_VGA<<8, gfxcard); 443 gfxcard = pci_get_class(PCI_CLASS_DISPLAY_VGA<<8, gfxcard);
444 if (!gfxcard) { 444 if (!gfxcard) {
445 printk (KERN_INFO PFX "Couldn't find an AGP VGA controller.\n"); 445 dev_info(&pdev->dev, "no AGP VGA controller\n");
446 return -ENODEV; 446 return -ENODEV;
447 } 447 }
448 cap_ptr = pci_find_capability(gfxcard, PCI_CAP_ID_AGP); 448 cap_ptr = pci_find_capability(gfxcard, PCI_CAP_ID_AGP);
449 } 449 }
450 450
451 /* With so many variants of NVidia cards, it's simpler just 451 /* With so many variants of NVidia cards, it's simpler just
452 to blacklist them all, and then whitelist them as needed 452 to blacklist them all, and then whitelist them as needed
453 (if necessary at all). */ 453 (if necessary at all). */
454 if (gfxcard->vendor == PCI_VENDOR_ID_NVIDIA) { 454 if (gfxcard->vendor == PCI_VENDOR_ID_NVIDIA) {
455 agp_bridge->flags |= AGP_ERRATA_1X; 455 agp_bridge->flags |= AGP_ERRATA_1X;
456 printk (KERN_INFO PFX "AMD 751 chipset with NVidia GeForce detected. Forcing to 1X due to errata.\n"); 456 dev_info(&pdev->dev, "AMD 751 chipset with NVidia GeForce; forcing 1X due to errata\n");
457 } 457 }
458 pci_dev_put(gfxcard); 458 pci_dev_put(gfxcard);
459 } 459 }
460 460
461 /* 761 Errata (23613_F.pdf) 461 /* 761 Errata (23613_F.pdf)
462 * Revisions B0/B1 were a disaster. 462 * Revisions B0/B1 were a disaster.
463 * erratum 44: SYSCLK/AGPCLK skew causes 2X failures -- Force mode to 1X 463 * erratum 44: SYSCLK/AGPCLK skew causes 2X failures -- Force mode to 1X
464 * erratum 45: Timing problem prevents fast writes -- Disable fast write. 464 * erratum 45: Timing problem prevents fast writes -- Disable fast write.
465 * erratum 46: Setup violation on AGP SBA pins - Disable side band addressing. 465 * erratum 46: Setup violation on AGP SBA pins - Disable side band addressing.
466 * With this lot disabled, we should prevent lockups. */ 466 * With this lot disabled, we should prevent lockups. */
467 if (agp_bridge->dev->device == PCI_DEVICE_ID_AMD_FE_GATE_700E) { 467 if (agp_bridge->dev->device == PCI_DEVICE_ID_AMD_FE_GATE_700E) {
468 if (pdev->revision == 0x10 || pdev->revision == 0x11) { 468 if (pdev->revision == 0x10 || pdev->revision == 0x11) {
469 agp_bridge->flags = AGP_ERRATA_FASTWRITES; 469 agp_bridge->flags = AGP_ERRATA_FASTWRITES;
470 agp_bridge->flags |= AGP_ERRATA_SBA; 470 agp_bridge->flags |= AGP_ERRATA_SBA;
471 agp_bridge->flags |= AGP_ERRATA_1X; 471 agp_bridge->flags |= AGP_ERRATA_1X;
472 printk (KERN_INFO PFX "AMD 761 chipset with errata detected - disabling AGP fast writes & SBA and forcing to 1X.\n"); 472 dev_info(&pdev->dev, "AMD 761 chipset with errata; disabling AGP fast writes & SBA and forcing to 1X\n");
473 } 473 }
474 } 474 }
475 475
476 /* Fill in the mode register */ 476 /* Fill in the mode register */
477 pci_read_config_dword(pdev, 477 pci_read_config_dword(pdev,
478 bridge->capndx+PCI_AGP_STATUS, 478 bridge->capndx+PCI_AGP_STATUS,
479 &bridge->mode); 479 &bridge->mode);
480 480
481 pci_set_drvdata(pdev, bridge); 481 pci_set_drvdata(pdev, bridge);
482 return agp_add_bridge(bridge); 482 return agp_add_bridge(bridge);
483 } 483 }
484 484
485 static void __devexit agp_amdk7_remove(struct pci_dev *pdev) 485 static void __devexit agp_amdk7_remove(struct pci_dev *pdev)
486 { 486 {
487 struct agp_bridge_data *bridge = pci_get_drvdata(pdev); 487 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
488 488
489 agp_remove_bridge(bridge); 489 agp_remove_bridge(bridge);
490 agp_put_bridge(bridge); 490 agp_put_bridge(bridge);
491 } 491 }
492 492
493 /* must be the same order as name table above */ 493 /* must be the same order as name table above */
494 static struct pci_device_id agp_amdk7_pci_table[] = { 494 static struct pci_device_id agp_amdk7_pci_table[] = {
495 { 495 {
496 .class = (PCI_CLASS_BRIDGE_HOST << 8), 496 .class = (PCI_CLASS_BRIDGE_HOST << 8),
497 .class_mask = ~0, 497 .class_mask = ~0,
498 .vendor = PCI_VENDOR_ID_AMD, 498 .vendor = PCI_VENDOR_ID_AMD,
499 .device = PCI_DEVICE_ID_AMD_FE_GATE_7006, 499 .device = PCI_DEVICE_ID_AMD_FE_GATE_7006,
500 .subvendor = PCI_ANY_ID, 500 .subvendor = PCI_ANY_ID,
501 .subdevice = PCI_ANY_ID, 501 .subdevice = PCI_ANY_ID,
502 }, 502 },
503 { 503 {
504 .class = (PCI_CLASS_BRIDGE_HOST << 8), 504 .class = (PCI_CLASS_BRIDGE_HOST << 8),
505 .class_mask = ~0, 505 .class_mask = ~0,
506 .vendor = PCI_VENDOR_ID_AMD, 506 .vendor = PCI_VENDOR_ID_AMD,
507 .device = PCI_DEVICE_ID_AMD_FE_GATE_700E, 507 .device = PCI_DEVICE_ID_AMD_FE_GATE_700E,
508 .subvendor = PCI_ANY_ID, 508 .subvendor = PCI_ANY_ID,
509 .subdevice = PCI_ANY_ID, 509 .subdevice = PCI_ANY_ID,
510 }, 510 },
511 { 511 {
512 .class = (PCI_CLASS_BRIDGE_HOST << 8), 512 .class = (PCI_CLASS_BRIDGE_HOST << 8),
513 .class_mask = ~0, 513 .class_mask = ~0,
514 .vendor = PCI_VENDOR_ID_AMD, 514 .vendor = PCI_VENDOR_ID_AMD,
515 .device = PCI_DEVICE_ID_AMD_FE_GATE_700C, 515 .device = PCI_DEVICE_ID_AMD_FE_GATE_700C,
516 .subvendor = PCI_ANY_ID, 516 .subvendor = PCI_ANY_ID,
517 .subdevice = PCI_ANY_ID, 517 .subdevice = PCI_ANY_ID,
518 }, 518 },
519 { } 519 { }
520 }; 520 };
521 521
522 MODULE_DEVICE_TABLE(pci, agp_amdk7_pci_table); 522 MODULE_DEVICE_TABLE(pci, agp_amdk7_pci_table);
523 523
524 static struct pci_driver agp_amdk7_pci_driver = { 524 static struct pci_driver agp_amdk7_pci_driver = {
525 .name = "agpgart-amdk7", 525 .name = "agpgart-amdk7",
526 .id_table = agp_amdk7_pci_table, 526 .id_table = agp_amdk7_pci_table,
527 .probe = agp_amdk7_probe, 527 .probe = agp_amdk7_probe,
528 .remove = agp_amdk7_remove, 528 .remove = agp_amdk7_remove,
529 }; 529 };
530 530
531 static int __init agp_amdk7_init(void) 531 static int __init agp_amdk7_init(void)
532 { 532 {
533 if (agp_off) 533 if (agp_off)
534 return -EINVAL; 534 return -EINVAL;
535 return pci_register_driver(&agp_amdk7_pci_driver); 535 return pci_register_driver(&agp_amdk7_pci_driver);
536 } 536 }
537 537
538 static void __exit agp_amdk7_cleanup(void) 538 static void __exit agp_amdk7_cleanup(void)
539 { 539 {
540 pci_unregister_driver(&agp_amdk7_pci_driver); 540 pci_unregister_driver(&agp_amdk7_pci_driver);
541 } 541 }
542 542
543 module_init(agp_amdk7_init); 543 module_init(agp_amdk7_init);
544 module_exit(agp_amdk7_cleanup); 544 module_exit(agp_amdk7_cleanup);
545 545
546 MODULE_LICENSE("GPL and additional rights"); 546 MODULE_LICENSE("GPL and additional rights");
547 547
drivers/char/agp/amd64-agp.c
1 /* 1 /*
2 * Copyright 2001-2003 SuSE Labs. 2 * Copyright 2001-2003 SuSE Labs.
3 * Distributed under the GNU public license, v2. 3 * Distributed under the GNU public license, v2.
4 * 4 *
5 * This is a GART driver for the AMD Opteron/Athlon64 on-CPU northbridge. 5 * This is a GART driver for the AMD Opteron/Athlon64 on-CPU northbridge.
6 * It also includes support for the AMD 8151 AGP bridge, 6 * It also includes support for the AMD 8151 AGP bridge,
7 * although it doesn't actually do much, as all the real 7 * although it doesn't actually do much, as all the real
8 * work is done in the northbridge(s). 8 * work is done in the northbridge(s).
9 */ 9 */
10 10
11 #include <linux/module.h> 11 #include <linux/module.h>
12 #include <linux/pci.h> 12 #include <linux/pci.h>
13 #include <linux/init.h> 13 #include <linux/init.h>
14 #include <linux/agp_backend.h> 14 #include <linux/agp_backend.h>
15 #include <linux/mmzone.h> 15 #include <linux/mmzone.h>
16 #include <asm/page.h> /* PAGE_SIZE */ 16 #include <asm/page.h> /* PAGE_SIZE */
17 #include <asm/e820.h> 17 #include <asm/e820.h>
18 #include <asm/k8.h> 18 #include <asm/k8.h>
19 #include <asm/gart.h> 19 #include <asm/gart.h>
20 #include "agp.h" 20 #include "agp.h"
21 21
22 /* NVIDIA K8 registers */ 22 /* NVIDIA K8 registers */
23 #define NVIDIA_X86_64_0_APBASE 0x10 23 #define NVIDIA_X86_64_0_APBASE 0x10
24 #define NVIDIA_X86_64_1_APBASE1 0x50 24 #define NVIDIA_X86_64_1_APBASE1 0x50
25 #define NVIDIA_X86_64_1_APLIMIT1 0x54 25 #define NVIDIA_X86_64_1_APLIMIT1 0x54
26 #define NVIDIA_X86_64_1_APSIZE 0xa8 26 #define NVIDIA_X86_64_1_APSIZE 0xa8
27 #define NVIDIA_X86_64_1_APBASE2 0xd8 27 #define NVIDIA_X86_64_1_APBASE2 0xd8
28 #define NVIDIA_X86_64_1_APLIMIT2 0xdc 28 #define NVIDIA_X86_64_1_APLIMIT2 0xdc
29 29
30 /* ULi K8 registers */ 30 /* ULi K8 registers */
31 #define ULI_X86_64_BASE_ADDR 0x10 31 #define ULI_X86_64_BASE_ADDR 0x10
32 #define ULI_X86_64_HTT_FEA_REG 0x50 32 #define ULI_X86_64_HTT_FEA_REG 0x50
33 #define ULI_X86_64_ENU_SCR_REG 0x54 33 #define ULI_X86_64_ENU_SCR_REG 0x54
34 34
35 static struct resource *aperture_resource; 35 static struct resource *aperture_resource;
36 static int __initdata agp_try_unsupported = 1; 36 static int __initdata agp_try_unsupported = 1;
37 static int agp_bridges_found; 37 static int agp_bridges_found;
38 38
39 static void amd64_tlbflush(struct agp_memory *temp) 39 static void amd64_tlbflush(struct agp_memory *temp)
40 { 40 {
41 k8_flush_garts(); 41 k8_flush_garts();
42 } 42 }
43 43
44 static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type) 44 static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
45 { 45 {
46 int i, j, num_entries; 46 int i, j, num_entries;
47 long long tmp; 47 long long tmp;
48 int mask_type; 48 int mask_type;
49 struct agp_bridge_data *bridge = mem->bridge; 49 struct agp_bridge_data *bridge = mem->bridge;
50 u32 pte; 50 u32 pte;
51 51
52 num_entries = agp_num_entries(); 52 num_entries = agp_num_entries();
53 53
54 if (type != mem->type) 54 if (type != mem->type)
55 return -EINVAL; 55 return -EINVAL;
56 mask_type = bridge->driver->agp_type_to_mask_type(bridge, type); 56 mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
57 if (mask_type != 0) 57 if (mask_type != 0)
58 return -EINVAL; 58 return -EINVAL;
59 59
60 60
61 /* Make sure we can fit the range in the gatt table. */ 61 /* Make sure we can fit the range in the gatt table. */
62 /* FIXME: could wrap */ 62 /* FIXME: could wrap */
63 if (((unsigned long)pg_start + mem->page_count) > num_entries) 63 if (((unsigned long)pg_start + mem->page_count) > num_entries)
64 return -EINVAL; 64 return -EINVAL;
65 65
66 j = pg_start; 66 j = pg_start;
67 67
68 /* gatt table should be empty. */ 68 /* gatt table should be empty. */
69 while (j < (pg_start + mem->page_count)) { 69 while (j < (pg_start + mem->page_count)) {
70 if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) 70 if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j)))
71 return -EBUSY; 71 return -EBUSY;
72 j++; 72 j++;
73 } 73 }
74 74
75 if (!mem->is_flushed) { 75 if (!mem->is_flushed) {
76 global_cache_flush(); 76 global_cache_flush();
77 mem->is_flushed = true; 77 mem->is_flushed = true;
78 } 78 }
79 79
80 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 80 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
81 tmp = agp_bridge->driver->mask_memory(agp_bridge, 81 tmp = agp_bridge->driver->mask_memory(agp_bridge,
82 mem->memory[i], mask_type); 82 mem->memory[i], mask_type);
83 83
84 BUG_ON(tmp & 0xffffff0000000ffcULL); 84 BUG_ON(tmp & 0xffffff0000000ffcULL);
85 pte = (tmp & 0x000000ff00000000ULL) >> 28; 85 pte = (tmp & 0x000000ff00000000ULL) >> 28;
86 pte |=(tmp & 0x00000000fffff000ULL); 86 pte |=(tmp & 0x00000000fffff000ULL);
87 pte |= GPTE_VALID | GPTE_COHERENT; 87 pte |= GPTE_VALID | GPTE_COHERENT;
88 88
89 writel(pte, agp_bridge->gatt_table+j); 89 writel(pte, agp_bridge->gatt_table+j);
90 readl(agp_bridge->gatt_table+j); /* PCI Posting. */ 90 readl(agp_bridge->gatt_table+j); /* PCI Posting. */
91 } 91 }
92 amd64_tlbflush(mem); 92 amd64_tlbflush(mem);
93 return 0; 93 return 0;
94 } 94 }
95 95
96 /* 96 /*
97 * This hack alters the order element according 97 * This hack alters the order element according
98 * to the size of a long. It sucks. I totally disown this, even 98 * to the size of a long. It sucks. I totally disown this, even
99 * though it does appear to work for the most part. 99 * though it does appear to work for the most part.
100 */ 100 */
101 static struct aper_size_info_32 amd64_aperture_sizes[7] = 101 static struct aper_size_info_32 amd64_aperture_sizes[7] =
102 { 102 {
103 {32, 8192, 3+(sizeof(long)/8), 0 }, 103 {32, 8192, 3+(sizeof(long)/8), 0 },
104 {64, 16384, 4+(sizeof(long)/8), 1<<1 }, 104 {64, 16384, 4+(sizeof(long)/8), 1<<1 },
105 {128, 32768, 5+(sizeof(long)/8), 1<<2 }, 105 {128, 32768, 5+(sizeof(long)/8), 1<<2 },
106 {256, 65536, 6+(sizeof(long)/8), 1<<1 | 1<<2 }, 106 {256, 65536, 6+(sizeof(long)/8), 1<<1 | 1<<2 },
107 {512, 131072, 7+(sizeof(long)/8), 1<<3 }, 107 {512, 131072, 7+(sizeof(long)/8), 1<<3 },
108 {1024, 262144, 8+(sizeof(long)/8), 1<<1 | 1<<3}, 108 {1024, 262144, 8+(sizeof(long)/8), 1<<1 | 1<<3},
109 {2048, 524288, 9+(sizeof(long)/8), 1<<2 | 1<<3} 109 {2048, 524288, 9+(sizeof(long)/8), 1<<2 | 1<<3}
110 }; 110 };
111 111
112 112
113 /* 113 /*
114 * Get the current Aperture size from the x86-64. 114 * Get the current Aperture size from the x86-64.
115 * Note, that there may be multiple x86-64's, but we just return 115 * Note, that there may be multiple x86-64's, but we just return
116 * the value from the first one we find. The set_size functions 116 * the value from the first one we find. The set_size functions
117 * keep the rest coherent anyway. Or at least should do. 117 * keep the rest coherent anyway. Or at least should do.
118 */ 118 */
119 static int amd64_fetch_size(void) 119 static int amd64_fetch_size(void)
120 { 120 {
121 struct pci_dev *dev; 121 struct pci_dev *dev;
122 int i; 122 int i;
123 u32 temp; 123 u32 temp;
124 struct aper_size_info_32 *values; 124 struct aper_size_info_32 *values;
125 125
126 dev = k8_northbridges[0]; 126 dev = k8_northbridges[0];
127 if (dev==NULL) 127 if (dev==NULL)
128 return 0; 128 return 0;
129 129
130 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &temp); 130 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &temp);
131 temp = (temp & 0xe); 131 temp = (temp & 0xe);
132 values = A_SIZE_32(amd64_aperture_sizes); 132 values = A_SIZE_32(amd64_aperture_sizes);
133 133
134 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { 134 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
135 if (temp == values[i].size_value) { 135 if (temp == values[i].size_value) {
136 agp_bridge->previous_size = 136 agp_bridge->previous_size =
137 agp_bridge->current_size = (void *) (values + i); 137 agp_bridge->current_size = (void *) (values + i);
138 138
139 agp_bridge->aperture_size_idx = i; 139 agp_bridge->aperture_size_idx = i;
140 return values[i].size; 140 return values[i].size;
141 } 141 }
142 } 142 }
143 return 0; 143 return 0;
144 } 144 }
145 145
146 /* 146 /*
147 * In a multiprocessor x86-64 system, this function gets 147 * In a multiprocessor x86-64 system, this function gets
148 * called once for each CPU. 148 * called once for each CPU.
149 */ 149 */
150 static u64 amd64_configure(struct pci_dev *hammer, u64 gatt_table) 150 static u64 amd64_configure(struct pci_dev *hammer, u64 gatt_table)
151 { 151 {
152 u64 aperturebase; 152 u64 aperturebase;
153 u32 tmp; 153 u32 tmp;
154 u64 aper_base; 154 u64 aper_base;
155 155
156 /* Address to map to */ 156 /* Address to map to */
157 pci_read_config_dword(hammer, AMD64_GARTAPERTUREBASE, &tmp); 157 pci_read_config_dword(hammer, AMD64_GARTAPERTUREBASE, &tmp);
158 aperturebase = tmp << 25; 158 aperturebase = tmp << 25;
159 aper_base = (aperturebase & PCI_BASE_ADDRESS_MEM_MASK); 159 aper_base = (aperturebase & PCI_BASE_ADDRESS_MEM_MASK);
160 160
161 enable_gart_translation(hammer, gatt_table); 161 enable_gart_translation(hammer, gatt_table);
162 162
163 return aper_base; 163 return aper_base;
164 } 164 }
165 165
166 166
167 static const struct aper_size_info_32 amd_8151_sizes[7] = 167 static const struct aper_size_info_32 amd_8151_sizes[7] =
168 { 168 {
169 {2048, 524288, 9, 0x00000000 }, /* 0 0 0 0 0 0 */ 169 {2048, 524288, 9, 0x00000000 }, /* 0 0 0 0 0 0 */
170 {1024, 262144, 8, 0x00000400 }, /* 1 0 0 0 0 0 */ 170 {1024, 262144, 8, 0x00000400 }, /* 1 0 0 0 0 0 */
171 {512, 131072, 7, 0x00000600 }, /* 1 1 0 0 0 0 */ 171 {512, 131072, 7, 0x00000600 }, /* 1 1 0 0 0 0 */
172 {256, 65536, 6, 0x00000700 }, /* 1 1 1 0 0 0 */ 172 {256, 65536, 6, 0x00000700 }, /* 1 1 1 0 0 0 */
173 {128, 32768, 5, 0x00000720 }, /* 1 1 1 1 0 0 */ 173 {128, 32768, 5, 0x00000720 }, /* 1 1 1 1 0 0 */
174 {64, 16384, 4, 0x00000730 }, /* 1 1 1 1 1 0 */ 174 {64, 16384, 4, 0x00000730 }, /* 1 1 1 1 1 0 */
175 {32, 8192, 3, 0x00000738 } /* 1 1 1 1 1 1 */ 175 {32, 8192, 3, 0x00000738 } /* 1 1 1 1 1 1 */
176 }; 176 };
177 177
178 static int amd_8151_configure(void) 178 static int amd_8151_configure(void)
179 { 179 {
180 unsigned long gatt_bus = virt_to_gart(agp_bridge->gatt_table_real); 180 unsigned long gatt_bus = virt_to_gart(agp_bridge->gatt_table_real);
181 int i; 181 int i;
182 182
183 /* Configure AGP regs in each x86-64 host bridge. */ 183 /* Configure AGP regs in each x86-64 host bridge. */
184 for (i = 0; i < num_k8_northbridges; i++) { 184 for (i = 0; i < num_k8_northbridges; i++) {
185 agp_bridge->gart_bus_addr = 185 agp_bridge->gart_bus_addr =
186 amd64_configure(k8_northbridges[i], gatt_bus); 186 amd64_configure(k8_northbridges[i], gatt_bus);
187 } 187 }
188 k8_flush_garts(); 188 k8_flush_garts();
189 return 0; 189 return 0;
190 } 190 }
191 191
192 192
193 static void amd64_cleanup(void) 193 static void amd64_cleanup(void)
194 { 194 {
195 u32 tmp; 195 u32 tmp;
196 int i; 196 int i;
197 for (i = 0; i < num_k8_northbridges; i++) { 197 for (i = 0; i < num_k8_northbridges; i++) {
198 struct pci_dev *dev = k8_northbridges[i]; 198 struct pci_dev *dev = k8_northbridges[i];
199 /* disable gart translation */ 199 /* disable gart translation */
200 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &tmp); 200 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &tmp);
201 tmp &= ~AMD64_GARTEN; 201 tmp &= ~AMD64_GARTEN;
202 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, tmp); 202 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, tmp);
203 } 203 }
204 } 204 }
205 205
206 206
207 static const struct agp_bridge_driver amd_8151_driver = { 207 static const struct agp_bridge_driver amd_8151_driver = {
208 .owner = THIS_MODULE, 208 .owner = THIS_MODULE,
209 .aperture_sizes = amd_8151_sizes, 209 .aperture_sizes = amd_8151_sizes,
210 .size_type = U32_APER_SIZE, 210 .size_type = U32_APER_SIZE,
211 .num_aperture_sizes = 7, 211 .num_aperture_sizes = 7,
212 .configure = amd_8151_configure, 212 .configure = amd_8151_configure,
213 .fetch_size = amd64_fetch_size, 213 .fetch_size = amd64_fetch_size,
214 .cleanup = amd64_cleanup, 214 .cleanup = amd64_cleanup,
215 .tlb_flush = amd64_tlbflush, 215 .tlb_flush = amd64_tlbflush,
216 .mask_memory = agp_generic_mask_memory, 216 .mask_memory = agp_generic_mask_memory,
217 .masks = NULL, 217 .masks = NULL,
218 .agp_enable = agp_generic_enable, 218 .agp_enable = agp_generic_enable,
219 .cache_flush = global_cache_flush, 219 .cache_flush = global_cache_flush,
220 .create_gatt_table = agp_generic_create_gatt_table, 220 .create_gatt_table = agp_generic_create_gatt_table,
221 .free_gatt_table = agp_generic_free_gatt_table, 221 .free_gatt_table = agp_generic_free_gatt_table,
222 .insert_memory = amd64_insert_memory, 222 .insert_memory = amd64_insert_memory,
223 .remove_memory = agp_generic_remove_memory, 223 .remove_memory = agp_generic_remove_memory,
224 .alloc_by_type = agp_generic_alloc_by_type, 224 .alloc_by_type = agp_generic_alloc_by_type,
225 .free_by_type = agp_generic_free_by_type, 225 .free_by_type = agp_generic_free_by_type,
226 .agp_alloc_page = agp_generic_alloc_page, 226 .agp_alloc_page = agp_generic_alloc_page,
227 .agp_destroy_page = agp_generic_destroy_page, 227 .agp_destroy_page = agp_generic_destroy_page,
228 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 228 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
229 }; 229 };
230 230
231 /* Some basic sanity checks for the aperture. */ 231 /* Some basic sanity checks for the aperture. */
232 static int __devinit agp_aperture_valid(u64 aper, u32 size) 232 static int __devinit agp_aperture_valid(u64 aper, u32 size)
233 { 233 {
234 if (!aperture_valid(aper, size, 32*1024*1024)) 234 if (!aperture_valid(aper, size, 32*1024*1024))
235 return 0; 235 return 0;
236 236
237 /* Request the Aperture. This catches cases when someone else 237 /* Request the Aperture. This catches cases when someone else
238 already put a mapping in there - happens with some very broken BIOS 238 already put a mapping in there - happens with some very broken BIOS
239 239
240 Maybe better to use pci_assign_resource/pci_enable_device instead 240 Maybe better to use pci_assign_resource/pci_enable_device instead
241 trusting the bridges? */ 241 trusting the bridges? */
242 if (!aperture_resource && 242 if (!aperture_resource &&
243 !(aperture_resource = request_mem_region(aper, size, "aperture"))) { 243 !(aperture_resource = request_mem_region(aper, size, "aperture"))) {
244 printk(KERN_ERR PFX "Aperture conflicts with PCI mapping.\n"); 244 printk(KERN_ERR PFX "Aperture conflicts with PCI mapping.\n");
245 return 0; 245 return 0;
246 } 246 }
247 return 1; 247 return 1;
248 } 248 }
249 249
250 /* 250 /*
251 * W*s centric BIOS sometimes only set up the aperture in the AGP 251 * W*s centric BIOS sometimes only set up the aperture in the AGP
252 * bridge, not the northbridge. On AMD64 this is handled early 252 * bridge, not the northbridge. On AMD64 this is handled early
253 * in aperture.c, but when IOMMU is not enabled or we run 253 * in aperture.c, but when IOMMU is not enabled or we run
254 * on a 32bit kernel this needs to be redone. 254 * on a 32bit kernel this needs to be redone.
255 * Unfortunately it is impossible to fix the aperture here because it's too late 255 * Unfortunately it is impossible to fix the aperture here because it's too late
256 * to allocate that much memory. But at least error out cleanly instead of 256 * to allocate that much memory. But at least error out cleanly instead of
257 * crashing. 257 * crashing.
258 */ 258 */
259 static __devinit int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp, 259 static __devinit int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp,
260 u16 cap) 260 u16 cap)
261 { 261 {
262 u32 aper_low, aper_hi; 262 u32 aper_low, aper_hi;
263 u64 aper, nb_aper; 263 u64 aper, nb_aper;
264 int order = 0; 264 int order = 0;
265 u32 nb_order, nb_base; 265 u32 nb_order, nb_base;
266 u16 apsize; 266 u16 apsize;
267 267
268 pci_read_config_dword(nb, AMD64_GARTAPERTURECTL, &nb_order); 268 pci_read_config_dword(nb, AMD64_GARTAPERTURECTL, &nb_order);
269 nb_order = (nb_order >> 1) & 7; 269 nb_order = (nb_order >> 1) & 7;
270 pci_read_config_dword(nb, AMD64_GARTAPERTUREBASE, &nb_base); 270 pci_read_config_dword(nb, AMD64_GARTAPERTUREBASE, &nb_base);
271 nb_aper = nb_base << 25; 271 nb_aper = nb_base << 25;
272 if (agp_aperture_valid(nb_aper, (32*1024*1024)<<nb_order)) { 272 if (agp_aperture_valid(nb_aper, (32*1024*1024)<<nb_order)) {
273 return 0; 273 return 0;
274 } 274 }
275 275
276 /* Northbridge seems to contain crap. Try the AGP bridge. */ 276 /* Northbridge seems to contain crap. Try the AGP bridge. */
277 277
278 pci_read_config_word(agp, cap+0x14, &apsize); 278 pci_read_config_word(agp, cap+0x14, &apsize);
279 if (apsize == 0xffff) 279 if (apsize == 0xffff)
280 return -1; 280 return -1;
281 281
282 apsize &= 0xfff; 282 apsize &= 0xfff;
283 /* Some BIOS use weird encodings not in the AGPv3 table. */ 283 /* Some BIOS use weird encodings not in the AGPv3 table. */
284 if (apsize & 0xff) 284 if (apsize & 0xff)
285 apsize |= 0xf00; 285 apsize |= 0xf00;
286 order = 7 - hweight16(apsize); 286 order = 7 - hweight16(apsize);
287 287
288 pci_read_config_dword(agp, 0x10, &aper_low); 288 pci_read_config_dword(agp, 0x10, &aper_low);
289 pci_read_config_dword(agp, 0x14, &aper_hi); 289 pci_read_config_dword(agp, 0x14, &aper_hi);
290 aper = (aper_low & ~((1<<22)-1)) | ((u64)aper_hi << 32); 290 aper = (aper_low & ~((1<<22)-1)) | ((u64)aper_hi << 32);
291 291
292 /* 292 /*
293 * On some sick chips APSIZE is 0. This means it wants 4G 293 * On some sick chips APSIZE is 0. This means it wants 4G
294 * so let double check that order, and lets trust the AMD NB settings 294 * so let double check that order, and lets trust the AMD NB settings
295 */ 295 */
296 if (order >=0 && aper + (32ULL<<(20 + order)) > 0x100000000ULL) { 296 if (order >=0 && aper + (32ULL<<(20 + order)) > 0x100000000ULL) {
297 printk(KERN_INFO "Aperture size %u MB is not right, using settings from NB\n", 297 dev_info(&agp->dev, "aperture size %u MB is not right, using settings from NB\n",
298 32 << order); 298 32 << order);
299 order = nb_order; 299 order = nb_order;
300 } 300 }
301 301
302 printk(KERN_INFO PFX "Aperture from AGP @ %Lx size %u MB\n", aper, 32 << order); 302 dev_info(&agp->dev, "aperture from AGP @ %Lx size %u MB\n",
303 aper, 32 << order);
303 if (order < 0 || !agp_aperture_valid(aper, (32*1024*1024)<<order)) 304 if (order < 0 || !agp_aperture_valid(aper, (32*1024*1024)<<order))
304 return -1; 305 return -1;
305 306
306 pci_write_config_dword(nb, AMD64_GARTAPERTURECTL, order << 1); 307 pci_write_config_dword(nb, AMD64_GARTAPERTURECTL, order << 1);
307 pci_write_config_dword(nb, AMD64_GARTAPERTUREBASE, aper >> 25); 308 pci_write_config_dword(nb, AMD64_GARTAPERTUREBASE, aper >> 25);
308 309
309 return 0; 310 return 0;
310 } 311 }
311 312
312 static __devinit int cache_nbs (struct pci_dev *pdev, u32 cap_ptr) 313 static __devinit int cache_nbs (struct pci_dev *pdev, u32 cap_ptr)
313 { 314 {
314 int i; 315 int i;
315 316
316 if (cache_k8_northbridges() < 0) 317 if (cache_k8_northbridges() < 0)
317 return -ENODEV; 318 return -ENODEV;
318 319
319 i = 0; 320 i = 0;
320 for (i = 0; i < num_k8_northbridges; i++) { 321 for (i = 0; i < num_k8_northbridges; i++) {
321 struct pci_dev *dev = k8_northbridges[i]; 322 struct pci_dev *dev = k8_northbridges[i];
322 if (fix_northbridge(dev, pdev, cap_ptr) < 0) { 323 if (fix_northbridge(dev, pdev, cap_ptr) < 0) {
323 printk(KERN_ERR PFX "No usable aperture found.\n"); 324 dev_err(&dev->dev, "no usable aperture found\n");
324 #ifdef __x86_64__ 325 #ifdef __x86_64__
325 /* should port this to i386 */ 326 /* should port this to i386 */
326 printk(KERN_ERR PFX "Consider rebooting with iommu=memaper=2 to get a good aperture.\n"); 327 dev_err(&dev->dev, "consider rebooting with iommu=memaper=2 to get a good aperture\n");
327 #endif 328 #endif
328 return -1; 329 return -1;
329 } 330 }
330 } 331 }
331 return 0; 332 return 0;
332 } 333 }
333 334
334 /* Handle AMD 8151 quirks */ 335 /* Handle AMD 8151 quirks */
335 static void __devinit amd8151_init(struct pci_dev *pdev, struct agp_bridge_data *bridge) 336 static void __devinit amd8151_init(struct pci_dev *pdev, struct agp_bridge_data *bridge)
336 { 337 {
337 char *revstring; 338 char *revstring;
338 339
339 switch (pdev->revision) { 340 switch (pdev->revision) {
340 case 0x01: revstring="A0"; break; 341 case 0x01: revstring="A0"; break;
341 case 0x02: revstring="A1"; break; 342 case 0x02: revstring="A1"; break;
342 case 0x11: revstring="B0"; break; 343 case 0x11: revstring="B0"; break;
343 case 0x12: revstring="B1"; break; 344 case 0x12: revstring="B1"; break;
344 case 0x13: revstring="B2"; break; 345 case 0x13: revstring="B2"; break;
345 case 0x14: revstring="B3"; break; 346 case 0x14: revstring="B3"; break;
346 default: revstring="??"; break; 347 default: revstring="??"; break;
347 } 348 }
348 349
349 printk (KERN_INFO PFX "Detected AMD 8151 AGP Bridge rev %s\n", revstring); 350 dev_info(&pdev->dev, "AMD 8151 AGP Bridge rev %s\n", revstring);
350 351
351 /* 352 /*
352 * Work around errata. 353 * Work around errata.
353 * Chips before B2 stepping incorrectly reporting v3.5 354 * Chips before B2 stepping incorrectly reporting v3.5
354 */ 355 */
355 if (pdev->revision < 0x13) { 356 if (pdev->revision < 0x13) {
356 printk (KERN_INFO PFX "Correcting AGP revision (reports 3.5, is really 3.0)\n"); 357 dev_info(&pdev->dev, "correcting AGP revision (reports 3.5, is really 3.0)\n");
357 bridge->major_version = 3; 358 bridge->major_version = 3;
358 bridge->minor_version = 0; 359 bridge->minor_version = 0;
359 } 360 }
360 } 361 }
361 362
362 363
363 static const struct aper_size_info_32 uli_sizes[7] = 364 static const struct aper_size_info_32 uli_sizes[7] =
364 { 365 {
365 {256, 65536, 6, 10}, 366 {256, 65536, 6, 10},
366 {128, 32768, 5, 9}, 367 {128, 32768, 5, 9},
367 {64, 16384, 4, 8}, 368 {64, 16384, 4, 8},
368 {32, 8192, 3, 7}, 369 {32, 8192, 3, 7},
369 {16, 4096, 2, 6}, 370 {16, 4096, 2, 6},
370 {8, 2048, 1, 4}, 371 {8, 2048, 1, 4},
371 {4, 1024, 0, 3} 372 {4, 1024, 0, 3}
372 }; 373 };
373 static int __devinit uli_agp_init(struct pci_dev *pdev) 374 static int __devinit uli_agp_init(struct pci_dev *pdev)
374 { 375 {
375 u32 httfea,baseaddr,enuscr; 376 u32 httfea,baseaddr,enuscr;
376 struct pci_dev *dev1; 377 struct pci_dev *dev1;
377 int i; 378 int i;
378 unsigned size = amd64_fetch_size(); 379 unsigned size = amd64_fetch_size();
379 printk(KERN_INFO "Setting up ULi AGP.\n"); 380
381 dev_info(&pdev->dev, "setting up ULi AGP\n");
380 dev1 = pci_get_slot (pdev->bus,PCI_DEVFN(0,0)); 382 dev1 = pci_get_slot (pdev->bus,PCI_DEVFN(0,0));
381 if (dev1 == NULL) { 383 if (dev1 == NULL) {
382 printk(KERN_INFO PFX "Detected a ULi chipset, " 384 dev_info(&pdev->dev, "can't find ULi secondary device\n");
383 "but could not fine the secondary device.\n");
384 return -ENODEV; 385 return -ENODEV;
385 } 386 }
386 387
387 for (i = 0; i < ARRAY_SIZE(uli_sizes); i++) 388 for (i = 0; i < ARRAY_SIZE(uli_sizes); i++)
388 if (uli_sizes[i].size == size) 389 if (uli_sizes[i].size == size)
389 break; 390 break;
390 391
391 if (i == ARRAY_SIZE(uli_sizes)) { 392 if (i == ARRAY_SIZE(uli_sizes)) {
392 printk(KERN_INFO PFX "No ULi size found for %d\n", size); 393 dev_info(&pdev->dev, "no ULi size found for %d\n", size);
393 return -ENODEV; 394 return -ENODEV;
394 } 395 }
395 396
396 /* shadow x86-64 registers into ULi registers */ 397 /* shadow x86-64 registers into ULi registers */
397 pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &httfea); 398 pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &httfea);
398 399
399 /* if x86-64 aperture base is beyond 4G, exit here */ 400 /* if x86-64 aperture base is beyond 4G, exit here */
400 if ((httfea & 0x7fff) >> (32 - 25)) 401 if ((httfea & 0x7fff) >> (32 - 25))
401 return -ENODEV; 402 return -ENODEV;
402 403
403 httfea = (httfea& 0x7fff) << 25; 404 httfea = (httfea& 0x7fff) << 25;
404 405
405 pci_read_config_dword(pdev, ULI_X86_64_BASE_ADDR, &baseaddr); 406 pci_read_config_dword(pdev, ULI_X86_64_BASE_ADDR, &baseaddr);
406 baseaddr&= ~PCI_BASE_ADDRESS_MEM_MASK; 407 baseaddr&= ~PCI_BASE_ADDRESS_MEM_MASK;
407 baseaddr|= httfea; 408 baseaddr|= httfea;
408 pci_write_config_dword(pdev, ULI_X86_64_BASE_ADDR, baseaddr); 409 pci_write_config_dword(pdev, ULI_X86_64_BASE_ADDR, baseaddr);
409 410
410 enuscr= httfea+ (size * 1024 * 1024) - 1; 411 enuscr= httfea+ (size * 1024 * 1024) - 1;
411 pci_write_config_dword(dev1, ULI_X86_64_HTT_FEA_REG, httfea); 412 pci_write_config_dword(dev1, ULI_X86_64_HTT_FEA_REG, httfea);
412 pci_write_config_dword(dev1, ULI_X86_64_ENU_SCR_REG, enuscr); 413 pci_write_config_dword(dev1, ULI_X86_64_ENU_SCR_REG, enuscr);
413 414
414 pci_dev_put(dev1); 415 pci_dev_put(dev1);
415 return 0; 416 return 0;
416 } 417 }
417 418
418 419
419 static const struct aper_size_info_32 nforce3_sizes[5] = 420 static const struct aper_size_info_32 nforce3_sizes[5] =
420 { 421 {
421 {512, 131072, 7, 0x00000000 }, 422 {512, 131072, 7, 0x00000000 },
422 {256, 65536, 6, 0x00000008 }, 423 {256, 65536, 6, 0x00000008 },
423 {128, 32768, 5, 0x0000000C }, 424 {128, 32768, 5, 0x0000000C },
424 {64, 16384, 4, 0x0000000E }, 425 {64, 16384, 4, 0x0000000E },
425 {32, 8192, 3, 0x0000000F } 426 {32, 8192, 3, 0x0000000F }
426 }; 427 };
427 428
428 /* Handle shadow device of the Nvidia NForce3 */ 429 /* Handle shadow device of the Nvidia NForce3 */
429 /* CHECK-ME original 2.4 version set up some IORRs. Check if that is needed. */ 430 /* CHECK-ME original 2.4 version set up some IORRs. Check if that is needed. */
430 static int nforce3_agp_init(struct pci_dev *pdev) 431 static int nforce3_agp_init(struct pci_dev *pdev)
431 { 432 {
432 u32 tmp, apbase, apbar, aplimit; 433 u32 tmp, apbase, apbar, aplimit;
433 struct pci_dev *dev1; 434 struct pci_dev *dev1;
434 int i; 435 int i;
435 unsigned size = amd64_fetch_size(); 436 unsigned size = amd64_fetch_size();
436 437
437 printk(KERN_INFO PFX "Setting up Nforce3 AGP.\n"); 438 dev_info(&pdev->dev, "setting up Nforce3 AGP\n");
438 439
439 dev1 = pci_get_slot(pdev->bus, PCI_DEVFN(11, 0)); 440 dev1 = pci_get_slot(pdev->bus, PCI_DEVFN(11, 0));
440 if (dev1 == NULL) { 441 if (dev1 == NULL) {
441 printk(KERN_INFO PFX "agpgart: Detected an NVIDIA " 442 dev_info(&pdev->dev, "can't find Nforce3 secondary device\n");
442 "nForce3 chipset, but could not find "
443 "the secondary device.\n");
444 return -ENODEV; 443 return -ENODEV;
445 } 444 }
446 445
447 for (i = 0; i < ARRAY_SIZE(nforce3_sizes); i++) 446 for (i = 0; i < ARRAY_SIZE(nforce3_sizes); i++)
448 if (nforce3_sizes[i].size == size) 447 if (nforce3_sizes[i].size == size)
449 break; 448 break;
450 449
451 if (i == ARRAY_SIZE(nforce3_sizes)) { 450 if (i == ARRAY_SIZE(nforce3_sizes)) {
452 printk(KERN_INFO PFX "No NForce3 size found for %d\n", size); 451 dev_info(&pdev->dev, "no NForce3 size found for %d\n", size);
453 return -ENODEV; 452 return -ENODEV;
454 } 453 }
455 454
456 pci_read_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, &tmp); 455 pci_read_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, &tmp);
457 tmp &= ~(0xf); 456 tmp &= ~(0xf);
458 tmp |= nforce3_sizes[i].size_value; 457 tmp |= nforce3_sizes[i].size_value;
459 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, tmp); 458 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, tmp);
460 459
461 /* shadow x86-64 registers into NVIDIA registers */ 460 /* shadow x86-64 registers into NVIDIA registers */
462 pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &apbase); 461 pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &apbase);
463 462
464 /* if x86-64 aperture base is beyond 4G, exit here */ 463 /* if x86-64 aperture base is beyond 4G, exit here */
465 if ( (apbase & 0x7fff) >> (32 - 25) ) { 464 if ( (apbase & 0x7fff) >> (32 - 25) ) {
466 printk(KERN_INFO PFX "aperture base > 4G\n"); 465 dev_info(&pdev->dev, "aperture base > 4G\n");
467 return -ENODEV; 466 return -ENODEV;
468 } 467 }
469 468
470 apbase = (apbase & 0x7fff) << 25; 469 apbase = (apbase & 0x7fff) << 25;
471 470
472 pci_read_config_dword(pdev, NVIDIA_X86_64_0_APBASE, &apbar); 471 pci_read_config_dword(pdev, NVIDIA_X86_64_0_APBASE, &apbar);
473 apbar &= ~PCI_BASE_ADDRESS_MEM_MASK; 472 apbar &= ~PCI_BASE_ADDRESS_MEM_MASK;
474 apbar |= apbase; 473 apbar |= apbase;
475 pci_write_config_dword(pdev, NVIDIA_X86_64_0_APBASE, apbar); 474 pci_write_config_dword(pdev, NVIDIA_X86_64_0_APBASE, apbar);
476 475
477 aplimit = apbase + (size * 1024 * 1024) - 1; 476 aplimit = apbase + (size * 1024 * 1024) - 1;
478 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE1, apbase); 477 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE1, apbase);
479 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT1, aplimit); 478 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT1, aplimit);
480 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE2, apbase); 479 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE2, apbase);
481 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT2, aplimit); 480 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT2, aplimit);
482 481
483 pci_dev_put(dev1); 482 pci_dev_put(dev1);
484 483
485 return 0; 484 return 0;
486 } 485 }
487 486
488 static int __devinit agp_amd64_probe(struct pci_dev *pdev, 487 static int __devinit agp_amd64_probe(struct pci_dev *pdev,
489 const struct pci_device_id *ent) 488 const struct pci_device_id *ent)
490 { 489 {
491 struct agp_bridge_data *bridge; 490 struct agp_bridge_data *bridge;
492 u8 cap_ptr; 491 u8 cap_ptr;
493 int err; 492 int err;
494 493
495 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); 494 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
496 if (!cap_ptr) 495 if (!cap_ptr)
497 return -ENODEV; 496 return -ENODEV;
498 497
499 /* Could check for AGPv3 here */ 498 /* Could check for AGPv3 here */
500 499
501 bridge = agp_alloc_bridge(); 500 bridge = agp_alloc_bridge();
502 if (!bridge) 501 if (!bridge)
503 return -ENOMEM; 502 return -ENOMEM;
504 503
505 if (pdev->vendor == PCI_VENDOR_ID_AMD && 504 if (pdev->vendor == PCI_VENDOR_ID_AMD &&
506 pdev->device == PCI_DEVICE_ID_AMD_8151_0) { 505 pdev->device == PCI_DEVICE_ID_AMD_8151_0) {
507 amd8151_init(pdev, bridge); 506 amd8151_init(pdev, bridge);
508 } else { 507 } else {
509 printk(KERN_INFO PFX "Detected AGP bridge %x\n", pdev->devfn); 508 dev_info(&pdev->dev, "AGP bridge [%04x/%04x]\n",
509 pdev->vendor, pdev->device);
510 } 510 }
511 511
512 bridge->driver = &amd_8151_driver; 512 bridge->driver = &amd_8151_driver;
513 bridge->dev = pdev; 513 bridge->dev = pdev;
514 bridge->capndx = cap_ptr; 514 bridge->capndx = cap_ptr;
515 515
516 /* Fill in the mode register */ 516 /* Fill in the mode register */
517 pci_read_config_dword(pdev, bridge->capndx+PCI_AGP_STATUS, &bridge->mode); 517 pci_read_config_dword(pdev, bridge->capndx+PCI_AGP_STATUS, &bridge->mode);
518 518
519 if (cache_nbs(pdev, cap_ptr) == -1) { 519 if (cache_nbs(pdev, cap_ptr) == -1) {
520 agp_put_bridge(bridge); 520 agp_put_bridge(bridge);
521 return -ENODEV; 521 return -ENODEV;
522 } 522 }
523 523
524 if (pdev->vendor == PCI_VENDOR_ID_NVIDIA) { 524 if (pdev->vendor == PCI_VENDOR_ID_NVIDIA) {
525 int ret = nforce3_agp_init(pdev); 525 int ret = nforce3_agp_init(pdev);
526 if (ret) { 526 if (ret) {
527 agp_put_bridge(bridge); 527 agp_put_bridge(bridge);
528 return ret; 528 return ret;
529 } 529 }
530 } 530 }
531 531
532 if (pdev->vendor == PCI_VENDOR_ID_AL) { 532 if (pdev->vendor == PCI_VENDOR_ID_AL) {
533 int ret = uli_agp_init(pdev); 533 int ret = uli_agp_init(pdev);
534 if (ret) { 534 if (ret) {
535 agp_put_bridge(bridge); 535 agp_put_bridge(bridge);
536 return ret; 536 return ret;
537 } 537 }
538 } 538 }
539 539
540 pci_set_drvdata(pdev, bridge); 540 pci_set_drvdata(pdev, bridge);
541 err = agp_add_bridge(bridge); 541 err = agp_add_bridge(bridge);
542 if (err < 0) 542 if (err < 0)
543 return err; 543 return err;
544 544
545 agp_bridges_found++; 545 agp_bridges_found++;
546 return 0; 546 return 0;
547 } 547 }
548 548
549 static void __devexit agp_amd64_remove(struct pci_dev *pdev) 549 static void __devexit agp_amd64_remove(struct pci_dev *pdev)
550 { 550 {
551 struct agp_bridge_data *bridge = pci_get_drvdata(pdev); 551 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
552 552
553 release_mem_region(virt_to_gart(bridge->gatt_table_real), 553 release_mem_region(virt_to_gart(bridge->gatt_table_real),
554 amd64_aperture_sizes[bridge->aperture_size_idx].size); 554 amd64_aperture_sizes[bridge->aperture_size_idx].size);
555 agp_remove_bridge(bridge); 555 agp_remove_bridge(bridge);
556 agp_put_bridge(bridge); 556 agp_put_bridge(bridge);
557 } 557 }
558 558
559 #ifdef CONFIG_PM 559 #ifdef CONFIG_PM
560 560
561 static int agp_amd64_suspend(struct pci_dev *pdev, pm_message_t state) 561 static int agp_amd64_suspend(struct pci_dev *pdev, pm_message_t state)
562 { 562 {
563 pci_save_state(pdev); 563 pci_save_state(pdev);
564 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 564 pci_set_power_state(pdev, pci_choose_state(pdev, state));
565 565
566 return 0; 566 return 0;
567 } 567 }
568 568
569 static int agp_amd64_resume(struct pci_dev *pdev) 569 static int agp_amd64_resume(struct pci_dev *pdev)
570 { 570 {
571 pci_set_power_state(pdev, PCI_D0); 571 pci_set_power_state(pdev, PCI_D0);
572 pci_restore_state(pdev); 572 pci_restore_state(pdev);
573 573
574 if (pdev->vendor == PCI_VENDOR_ID_NVIDIA) 574 if (pdev->vendor == PCI_VENDOR_ID_NVIDIA)
575 nforce3_agp_init(pdev); 575 nforce3_agp_init(pdev);
576 576
577 return amd_8151_configure(); 577 return amd_8151_configure();
578 } 578 }
579 579
580 #endif /* CONFIG_PM */ 580 #endif /* CONFIG_PM */
581 581
582 static struct pci_device_id agp_amd64_pci_table[] = { 582 static struct pci_device_id agp_amd64_pci_table[] = {
583 { 583 {
584 .class = (PCI_CLASS_BRIDGE_HOST << 8), 584 .class = (PCI_CLASS_BRIDGE_HOST << 8),
585 .class_mask = ~0, 585 .class_mask = ~0,
586 .vendor = PCI_VENDOR_ID_AMD, 586 .vendor = PCI_VENDOR_ID_AMD,
587 .device = PCI_DEVICE_ID_AMD_8151_0, 587 .device = PCI_DEVICE_ID_AMD_8151_0,
588 .subvendor = PCI_ANY_ID, 588 .subvendor = PCI_ANY_ID,
589 .subdevice = PCI_ANY_ID, 589 .subdevice = PCI_ANY_ID,
590 }, 590 },
591 /* ULi M1689 */ 591 /* ULi M1689 */
592 { 592 {
593 .class = (PCI_CLASS_BRIDGE_HOST << 8), 593 .class = (PCI_CLASS_BRIDGE_HOST << 8),
594 .class_mask = ~0, 594 .class_mask = ~0,
595 .vendor = PCI_VENDOR_ID_AL, 595 .vendor = PCI_VENDOR_ID_AL,
596 .device = PCI_DEVICE_ID_AL_M1689, 596 .device = PCI_DEVICE_ID_AL_M1689,
597 .subvendor = PCI_ANY_ID, 597 .subvendor = PCI_ANY_ID,
598 .subdevice = PCI_ANY_ID, 598 .subdevice = PCI_ANY_ID,
599 }, 599 },
600 /* VIA K8T800Pro */ 600 /* VIA K8T800Pro */
601 { 601 {
602 .class = (PCI_CLASS_BRIDGE_HOST << 8), 602 .class = (PCI_CLASS_BRIDGE_HOST << 8),
603 .class_mask = ~0, 603 .class_mask = ~0,
604 .vendor = PCI_VENDOR_ID_VIA, 604 .vendor = PCI_VENDOR_ID_VIA,
605 .device = PCI_DEVICE_ID_VIA_K8T800PRO_0, 605 .device = PCI_DEVICE_ID_VIA_K8T800PRO_0,
606 .subvendor = PCI_ANY_ID, 606 .subvendor = PCI_ANY_ID,
607 .subdevice = PCI_ANY_ID, 607 .subdevice = PCI_ANY_ID,
608 }, 608 },
609 /* VIA K8T800 */ 609 /* VIA K8T800 */
610 { 610 {
611 .class = (PCI_CLASS_BRIDGE_HOST << 8), 611 .class = (PCI_CLASS_BRIDGE_HOST << 8),
612 .class_mask = ~0, 612 .class_mask = ~0,
613 .vendor = PCI_VENDOR_ID_VIA, 613 .vendor = PCI_VENDOR_ID_VIA,
614 .device = PCI_DEVICE_ID_VIA_8385_0, 614 .device = PCI_DEVICE_ID_VIA_8385_0,
615 .subvendor = PCI_ANY_ID, 615 .subvendor = PCI_ANY_ID,
616 .subdevice = PCI_ANY_ID, 616 .subdevice = PCI_ANY_ID,
617 }, 617 },
618 /* VIA K8M800 / K8N800 */ 618 /* VIA K8M800 / K8N800 */
619 { 619 {
620 .class = (PCI_CLASS_BRIDGE_HOST << 8), 620 .class = (PCI_CLASS_BRIDGE_HOST << 8),
621 .class_mask = ~0, 621 .class_mask = ~0,
622 .vendor = PCI_VENDOR_ID_VIA, 622 .vendor = PCI_VENDOR_ID_VIA,
623 .device = PCI_DEVICE_ID_VIA_8380_0, 623 .device = PCI_DEVICE_ID_VIA_8380_0,
624 .subvendor = PCI_ANY_ID, 624 .subvendor = PCI_ANY_ID,
625 .subdevice = PCI_ANY_ID, 625 .subdevice = PCI_ANY_ID,
626 }, 626 },
627 /* VIA K8M890 / K8N890 */ 627 /* VIA K8M890 / K8N890 */
628 { 628 {
629 .class = (PCI_CLASS_BRIDGE_HOST << 8), 629 .class = (PCI_CLASS_BRIDGE_HOST << 8),
630 .class_mask = ~0, 630 .class_mask = ~0,
631 .vendor = PCI_VENDOR_ID_VIA, 631 .vendor = PCI_VENDOR_ID_VIA,
632 .device = PCI_DEVICE_ID_VIA_VT3336, 632 .device = PCI_DEVICE_ID_VIA_VT3336,
633 .subvendor = PCI_ANY_ID, 633 .subvendor = PCI_ANY_ID,
634 .subdevice = PCI_ANY_ID, 634 .subdevice = PCI_ANY_ID,
635 }, 635 },
636 /* VIA K8T890 */ 636 /* VIA K8T890 */
637 { 637 {
638 .class = (PCI_CLASS_BRIDGE_HOST << 8), 638 .class = (PCI_CLASS_BRIDGE_HOST << 8),
639 .class_mask = ~0, 639 .class_mask = ~0,
640 .vendor = PCI_VENDOR_ID_VIA, 640 .vendor = PCI_VENDOR_ID_VIA,
641 .device = PCI_DEVICE_ID_VIA_3238_0, 641 .device = PCI_DEVICE_ID_VIA_3238_0,
642 .subvendor = PCI_ANY_ID, 642 .subvendor = PCI_ANY_ID,
643 .subdevice = PCI_ANY_ID, 643 .subdevice = PCI_ANY_ID,
644 }, 644 },
645 /* VIA K8T800/K8M800/K8N800 */ 645 /* VIA K8T800/K8M800/K8N800 */
646 { 646 {
647 .class = (PCI_CLASS_BRIDGE_HOST << 8), 647 .class = (PCI_CLASS_BRIDGE_HOST << 8),
648 .class_mask = ~0, 648 .class_mask = ~0,
649 .vendor = PCI_VENDOR_ID_VIA, 649 .vendor = PCI_VENDOR_ID_VIA,
650 .device = PCI_DEVICE_ID_VIA_838X_1, 650 .device = PCI_DEVICE_ID_VIA_838X_1,
651 .subvendor = PCI_ANY_ID, 651 .subvendor = PCI_ANY_ID,
652 .subdevice = PCI_ANY_ID, 652 .subdevice = PCI_ANY_ID,
653 }, 653 },
654 /* NForce3 */ 654 /* NForce3 */
655 { 655 {
656 .class = (PCI_CLASS_BRIDGE_HOST << 8), 656 .class = (PCI_CLASS_BRIDGE_HOST << 8),
657 .class_mask = ~0, 657 .class_mask = ~0,
658 .vendor = PCI_VENDOR_ID_NVIDIA, 658 .vendor = PCI_VENDOR_ID_NVIDIA,
659 .device = PCI_DEVICE_ID_NVIDIA_NFORCE3, 659 .device = PCI_DEVICE_ID_NVIDIA_NFORCE3,
660 .subvendor = PCI_ANY_ID, 660 .subvendor = PCI_ANY_ID,
661 .subdevice = PCI_ANY_ID, 661 .subdevice = PCI_ANY_ID,
662 }, 662 },
663 { 663 {
664 .class = (PCI_CLASS_BRIDGE_HOST << 8), 664 .class = (PCI_CLASS_BRIDGE_HOST << 8),
665 .class_mask = ~0, 665 .class_mask = ~0,
666 .vendor = PCI_VENDOR_ID_NVIDIA, 666 .vendor = PCI_VENDOR_ID_NVIDIA,
667 .device = PCI_DEVICE_ID_NVIDIA_NFORCE3S, 667 .device = PCI_DEVICE_ID_NVIDIA_NFORCE3S,
668 .subvendor = PCI_ANY_ID, 668 .subvendor = PCI_ANY_ID,
669 .subdevice = PCI_ANY_ID, 669 .subdevice = PCI_ANY_ID,
670 }, 670 },
671 /* SIS 755 */ 671 /* SIS 755 */
672 { 672 {
673 .class = (PCI_CLASS_BRIDGE_HOST << 8), 673 .class = (PCI_CLASS_BRIDGE_HOST << 8),
674 .class_mask = ~0, 674 .class_mask = ~0,
675 .vendor = PCI_VENDOR_ID_SI, 675 .vendor = PCI_VENDOR_ID_SI,
676 .device = PCI_DEVICE_ID_SI_755, 676 .device = PCI_DEVICE_ID_SI_755,
677 .subvendor = PCI_ANY_ID, 677 .subvendor = PCI_ANY_ID,
678 .subdevice = PCI_ANY_ID, 678 .subdevice = PCI_ANY_ID,
679 }, 679 },
680 /* SIS 760 */ 680 /* SIS 760 */
681 { 681 {
682 .class = (PCI_CLASS_BRIDGE_HOST << 8), 682 .class = (PCI_CLASS_BRIDGE_HOST << 8),
683 .class_mask = ~0, 683 .class_mask = ~0,
684 .vendor = PCI_VENDOR_ID_SI, 684 .vendor = PCI_VENDOR_ID_SI,
685 .device = PCI_DEVICE_ID_SI_760, 685 .device = PCI_DEVICE_ID_SI_760,
686 .subvendor = PCI_ANY_ID, 686 .subvendor = PCI_ANY_ID,
687 .subdevice = PCI_ANY_ID, 687 .subdevice = PCI_ANY_ID,
688 }, 688 },
689 /* ALI/ULI M1695 */ 689 /* ALI/ULI M1695 */
690 { 690 {
691 .class = (PCI_CLASS_BRIDGE_HOST << 8), 691 .class = (PCI_CLASS_BRIDGE_HOST << 8),
692 .class_mask = ~0, 692 .class_mask = ~0,
693 .vendor = PCI_VENDOR_ID_AL, 693 .vendor = PCI_VENDOR_ID_AL,
694 .device = 0x1695, 694 .device = 0x1695,
695 .subvendor = PCI_ANY_ID, 695 .subvendor = PCI_ANY_ID,
696 .subdevice = PCI_ANY_ID, 696 .subdevice = PCI_ANY_ID,
697 }, 697 },
698 698
699 { } 699 { }
700 }; 700 };
701 701
702 MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table); 702 MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table);
703 703
704 static struct pci_driver agp_amd64_pci_driver = { 704 static struct pci_driver agp_amd64_pci_driver = {
705 .name = "agpgart-amd64", 705 .name = "agpgart-amd64",
706 .id_table = agp_amd64_pci_table, 706 .id_table = agp_amd64_pci_table,
707 .probe = agp_amd64_probe, 707 .probe = agp_amd64_probe,
708 .remove = agp_amd64_remove, 708 .remove = agp_amd64_remove,
709 #ifdef CONFIG_PM 709 #ifdef CONFIG_PM
710 .suspend = agp_amd64_suspend, 710 .suspend = agp_amd64_suspend,
711 .resume = agp_amd64_resume, 711 .resume = agp_amd64_resume,
712 #endif 712 #endif
713 }; 713 };
714 714
715 715
716 /* Not static due to IOMMU code calling it early. */ 716 /* Not static due to IOMMU code calling it early. */
717 int __init agp_amd64_init(void) 717 int __init agp_amd64_init(void)
718 { 718 {
719 int err = 0; 719 int err = 0;
720 720
721 if (agp_off) 721 if (agp_off)
722 return -EINVAL; 722 return -EINVAL;
723 err = pci_register_driver(&agp_amd64_pci_driver); 723 err = pci_register_driver(&agp_amd64_pci_driver);
724 if (err < 0) 724 if (err < 0)
725 return err; 725 return err;
726 726
727 if (agp_bridges_found == 0) { 727 if (agp_bridges_found == 0) {
728 struct pci_dev *dev; 728 struct pci_dev *dev;
729 if (!agp_try_unsupported && !agp_try_unsupported_boot) { 729 if (!agp_try_unsupported && !agp_try_unsupported_boot) {
730 printk(KERN_INFO PFX "No supported AGP bridge found.\n"); 730 printk(KERN_INFO PFX "No supported AGP bridge found.\n");
731 #ifdef MODULE 731 #ifdef MODULE
732 printk(KERN_INFO PFX "You can try agp_try_unsupported=1\n"); 732 printk(KERN_INFO PFX "You can try agp_try_unsupported=1\n");
733 #else 733 #else
734 printk(KERN_INFO PFX "You can boot with agp=try_unsupported\n"); 734 printk(KERN_INFO PFX "You can boot with agp=try_unsupported\n");
735 #endif 735 #endif
736 return -ENODEV; 736 return -ENODEV;
737 } 737 }
738 738
739 /* First check that we have at least one AMD64 NB */ 739 /* First check that we have at least one AMD64 NB */
740 if (!pci_dev_present(k8_nb_ids)) 740 if (!pci_dev_present(k8_nb_ids))
741 return -ENODEV; 741 return -ENODEV;
742 742
743 /* Look for any AGP bridge */ 743 /* Look for any AGP bridge */
744 dev = NULL; 744 dev = NULL;
745 err = -ENODEV; 745 err = -ENODEV;
746 for_each_pci_dev(dev) { 746 for_each_pci_dev(dev) {
747 if (!pci_find_capability(dev, PCI_CAP_ID_AGP)) 747 if (!pci_find_capability(dev, PCI_CAP_ID_AGP))
748 continue; 748 continue;
749 /* Only one bridge supported right now */ 749 /* Only one bridge supported right now */
750 if (agp_amd64_probe(dev, NULL) == 0) { 750 if (agp_amd64_probe(dev, NULL) == 0) {
751 err = 0; 751 err = 0;
752 break; 752 break;
753 } 753 }
754 } 754 }
755 } 755 }
756 return err; 756 return err;
757 } 757 }
758 758
759 static void __exit agp_amd64_cleanup(void) 759 static void __exit agp_amd64_cleanup(void)
760 { 760 {
761 if (aperture_resource) 761 if (aperture_resource)
762 release_resource(aperture_resource); 762 release_resource(aperture_resource);
763 pci_unregister_driver(&agp_amd64_pci_driver); 763 pci_unregister_driver(&agp_amd64_pci_driver);
764 } 764 }
765 765
766 /* On AMD64 the PCI driver needs to initialize this driver early 766 /* On AMD64 the PCI driver needs to initialize this driver early
767 for the IOMMU, so it has to be called via a backdoor. */ 767 for the IOMMU, so it has to be called via a backdoor. */
768 #ifndef CONFIG_GART_IOMMU 768 #ifndef CONFIG_GART_IOMMU
769 module_init(agp_amd64_init); 769 module_init(agp_amd64_init);
770 module_exit(agp_amd64_cleanup); 770 module_exit(agp_amd64_cleanup);
771 #endif 771 #endif
772 772
773 MODULE_AUTHOR("Dave Jones <davej@codemonkey.org.uk>, Andi Kleen"); 773 MODULE_AUTHOR("Dave Jones <davej@codemonkey.org.uk>, Andi Kleen");
drivers/char/agp/ati-agp.c
1 /* 1 /*
2 * ATi AGPGART routines. 2 * ATi AGPGART routines.
3 */ 3 */
4 4
5 #include <linux/types.h> 5 #include <linux/types.h>
6 #include <linux/module.h> 6 #include <linux/module.h>
7 #include <linux/pci.h> 7 #include <linux/pci.h>
8 #include <linux/init.h> 8 #include <linux/init.h>
9 #include <linux/string.h> 9 #include <linux/string.h>
10 #include <linux/slab.h> 10 #include <linux/slab.h>
11 #include <linux/agp_backend.h> 11 #include <linux/agp_backend.h>
12 #include <asm/agp.h> 12 #include <asm/agp.h>
13 #include "agp.h" 13 #include "agp.h"
14 14
15 #define ATI_GART_MMBASE_ADDR 0x14 15 #define ATI_GART_MMBASE_ADDR 0x14
16 #define ATI_RS100_APSIZE 0xac 16 #define ATI_RS100_APSIZE 0xac
17 #define ATI_RS100_IG_AGPMODE 0xb0 17 #define ATI_RS100_IG_AGPMODE 0xb0
18 #define ATI_RS300_APSIZE 0xf8 18 #define ATI_RS300_APSIZE 0xf8
19 #define ATI_RS300_IG_AGPMODE 0xfc 19 #define ATI_RS300_IG_AGPMODE 0xfc
20 #define ATI_GART_FEATURE_ID 0x00 20 #define ATI_GART_FEATURE_ID 0x00
21 #define ATI_GART_BASE 0x04 21 #define ATI_GART_BASE 0x04
22 #define ATI_GART_CACHE_SZBASE 0x08 22 #define ATI_GART_CACHE_SZBASE 0x08
23 #define ATI_GART_CACHE_CNTRL 0x0c 23 #define ATI_GART_CACHE_CNTRL 0x0c
24 #define ATI_GART_CACHE_ENTRY_CNTRL 0x10 24 #define ATI_GART_CACHE_ENTRY_CNTRL 0x10
25 25
26 26
27 static const struct aper_size_info_lvl2 ati_generic_sizes[7] = 27 static const struct aper_size_info_lvl2 ati_generic_sizes[7] =
28 { 28 {
29 {2048, 524288, 0x0000000c}, 29 {2048, 524288, 0x0000000c},
30 {1024, 262144, 0x0000000a}, 30 {1024, 262144, 0x0000000a},
31 {512, 131072, 0x00000008}, 31 {512, 131072, 0x00000008},
32 {256, 65536, 0x00000006}, 32 {256, 65536, 0x00000006},
33 {128, 32768, 0x00000004}, 33 {128, 32768, 0x00000004},
34 {64, 16384, 0x00000002}, 34 {64, 16384, 0x00000002},
35 {32, 8192, 0x00000000} 35 {32, 8192, 0x00000000}
36 }; 36 };
37 37
38 static struct gatt_mask ati_generic_masks[] = 38 static struct gatt_mask ati_generic_masks[] =
39 { 39 {
40 { .mask = 1, .type = 0} 40 { .mask = 1, .type = 0}
41 }; 41 };
42 42
43 43
44 struct ati_page_map { 44 struct ati_page_map {
45 unsigned long *real; 45 unsigned long *real;
46 unsigned long __iomem *remapped; 46 unsigned long __iomem *remapped;
47 }; 47 };
48 48
49 static struct _ati_generic_private { 49 static struct _ati_generic_private {
50 volatile u8 __iomem *registers; 50 volatile u8 __iomem *registers;
51 struct ati_page_map **gatt_pages; 51 struct ati_page_map **gatt_pages;
52 int num_tables; 52 int num_tables;
53 } ati_generic_private; 53 } ati_generic_private;
54 54
55 static int ati_create_page_map(struct ati_page_map *page_map) 55 static int ati_create_page_map(struct ati_page_map *page_map)
56 { 56 {
57 int i, err = 0; 57 int i, err = 0;
58 58
59 page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL); 59 page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
60 if (page_map->real == NULL) 60 if (page_map->real == NULL)
61 return -ENOMEM; 61 return -ENOMEM;
62 62
63 set_memory_uc((unsigned long)page_map->real, 1); 63 set_memory_uc((unsigned long)page_map->real, 1);
64 err = map_page_into_agp(virt_to_page(page_map->real)); 64 err = map_page_into_agp(virt_to_page(page_map->real));
65 page_map->remapped = page_map->real; 65 page_map->remapped = page_map->real;
66 66
67 for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) { 67 for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
68 writel(agp_bridge->scratch_page, page_map->remapped+i); 68 writel(agp_bridge->scratch_page, page_map->remapped+i);
69 readl(page_map->remapped+i); /* PCI Posting. */ 69 readl(page_map->remapped+i); /* PCI Posting. */
70 } 70 }
71 71
72 return 0; 72 return 0;
73 } 73 }
74 74
75 75
76 static void ati_free_page_map(struct ati_page_map *page_map) 76 static void ati_free_page_map(struct ati_page_map *page_map)
77 { 77 {
78 unmap_page_from_agp(virt_to_page(page_map->real)); 78 unmap_page_from_agp(virt_to_page(page_map->real));
79 set_memory_wb((unsigned long)page_map->real, 1); 79 set_memory_wb((unsigned long)page_map->real, 1);
80 free_page((unsigned long) page_map->real); 80 free_page((unsigned long) page_map->real);
81 } 81 }
82 82
83 83
84 static void ati_free_gatt_pages(void) 84 static void ati_free_gatt_pages(void)
85 { 85 {
86 int i; 86 int i;
87 struct ati_page_map **tables; 87 struct ati_page_map **tables;
88 struct ati_page_map *entry; 88 struct ati_page_map *entry;
89 89
90 tables = ati_generic_private.gatt_pages; 90 tables = ati_generic_private.gatt_pages;
91 for (i = 0; i < ati_generic_private.num_tables; i++) { 91 for (i = 0; i < ati_generic_private.num_tables; i++) {
92 entry = tables[i]; 92 entry = tables[i];
93 if (entry != NULL) { 93 if (entry != NULL) {
94 if (entry->real != NULL) 94 if (entry->real != NULL)
95 ati_free_page_map(entry); 95 ati_free_page_map(entry);
96 kfree(entry); 96 kfree(entry);
97 } 97 }
98 } 98 }
99 kfree(tables); 99 kfree(tables);
100 } 100 }
101 101
102 102
103 static int ati_create_gatt_pages(int nr_tables) 103 static int ati_create_gatt_pages(int nr_tables)
104 { 104 {
105 struct ati_page_map **tables; 105 struct ati_page_map **tables;
106 struct ati_page_map *entry; 106 struct ati_page_map *entry;
107 int retval = 0; 107 int retval = 0;
108 int i; 108 int i;
109 109
110 tables = kzalloc((nr_tables + 1) * sizeof(struct ati_page_map *),GFP_KERNEL); 110 tables = kzalloc((nr_tables + 1) * sizeof(struct ati_page_map *),GFP_KERNEL);
111 if (tables == NULL) 111 if (tables == NULL)
112 return -ENOMEM; 112 return -ENOMEM;
113 113
114 for (i = 0; i < nr_tables; i++) { 114 for (i = 0; i < nr_tables; i++) {
115 entry = kzalloc(sizeof(struct ati_page_map), GFP_KERNEL); 115 entry = kzalloc(sizeof(struct ati_page_map), GFP_KERNEL);
116 tables[i] = entry; 116 tables[i] = entry;
117 if (entry == NULL) { 117 if (entry == NULL) {
118 retval = -ENOMEM; 118 retval = -ENOMEM;
119 break; 119 break;
120 } 120 }
121 retval = ati_create_page_map(entry); 121 retval = ati_create_page_map(entry);
122 if (retval != 0) 122 if (retval != 0)
123 break; 123 break;
124 } 124 }
125 ati_generic_private.num_tables = i; 125 ati_generic_private.num_tables = i;
126 ati_generic_private.gatt_pages = tables; 126 ati_generic_private.gatt_pages = tables;
127 127
128 if (retval != 0) 128 if (retval != 0)
129 ati_free_gatt_pages(); 129 ati_free_gatt_pages();
130 130
131 return retval; 131 return retval;
132 } 132 }
133 133
134 static int is_r200(void) 134 static int is_r200(void)
135 { 135 {
136 if ((agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS100) || 136 if ((agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS100) ||
137 (agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS200) || 137 (agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS200) ||
138 (agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS200_B) || 138 (agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS200_B) ||
139 (agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS250)) 139 (agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS250))
140 return 1; 140 return 1;
141 return 0; 141 return 0;
142 } 142 }
143 143
144 static int ati_fetch_size(void) 144 static int ati_fetch_size(void)
145 { 145 {
146 int i; 146 int i;
147 u32 temp; 147 u32 temp;
148 struct aper_size_info_lvl2 *values; 148 struct aper_size_info_lvl2 *values;
149 149
150 if (is_r200()) 150 if (is_r200())
151 pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp); 151 pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp);
152 else 152 else
153 pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp); 153 pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp);
154 154
155 temp = (temp & 0x0000000e); 155 temp = (temp & 0x0000000e);
156 values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes); 156 values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes);
157 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { 157 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
158 if (temp == values[i].size_value) { 158 if (temp == values[i].size_value) {
159 agp_bridge->previous_size = 159 agp_bridge->previous_size =
160 agp_bridge->current_size = (void *) (values + i); 160 agp_bridge->current_size = (void *) (values + i);
161 161
162 agp_bridge->aperture_size_idx = i; 162 agp_bridge->aperture_size_idx = i;
163 return values[i].size; 163 return values[i].size;
164 } 164 }
165 } 165 }
166 166
167 return 0; 167 return 0;
168 } 168 }
169 169
170 static void ati_tlbflush(struct agp_memory * mem) 170 static void ati_tlbflush(struct agp_memory * mem)
171 { 171 {
172 writel(1, ati_generic_private.registers+ATI_GART_CACHE_CNTRL); 172 writel(1, ati_generic_private.registers+ATI_GART_CACHE_CNTRL);
173 readl(ati_generic_private.registers+ATI_GART_CACHE_CNTRL); /* PCI Posting. */ 173 readl(ati_generic_private.registers+ATI_GART_CACHE_CNTRL); /* PCI Posting. */
174 } 174 }
175 175
176 static void ati_cleanup(void) 176 static void ati_cleanup(void)
177 { 177 {
178 struct aper_size_info_lvl2 *previous_size; 178 struct aper_size_info_lvl2 *previous_size;
179 u32 temp; 179 u32 temp;
180 180
181 previous_size = A_SIZE_LVL2(agp_bridge->previous_size); 181 previous_size = A_SIZE_LVL2(agp_bridge->previous_size);
182 182
183 /* Write back the previous size and disable gart translation */ 183 /* Write back the previous size and disable gart translation */
184 if (is_r200()) { 184 if (is_r200()) {
185 pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp); 185 pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp);
186 temp = ((temp & ~(0x0000000f)) | previous_size->size_value); 186 temp = ((temp & ~(0x0000000f)) | previous_size->size_value);
187 pci_write_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, temp); 187 pci_write_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, temp);
188 } else { 188 } else {
189 pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp); 189 pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp);
190 temp = ((temp & ~(0x0000000f)) | previous_size->size_value); 190 temp = ((temp & ~(0x0000000f)) | previous_size->size_value);
191 pci_write_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, temp); 191 pci_write_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, temp);
192 } 192 }
193 iounmap((volatile u8 __iomem *)ati_generic_private.registers); 193 iounmap((volatile u8 __iomem *)ati_generic_private.registers);
194 } 194 }
195 195
196 196
197 static int ati_configure(void) 197 static int ati_configure(void)
198 { 198 {
199 u32 temp; 199 u32 temp;
200 200
201 /* Get the memory mapped registers */ 201 /* Get the memory mapped registers */
202 pci_read_config_dword(agp_bridge->dev, ATI_GART_MMBASE_ADDR, &temp); 202 pci_read_config_dword(agp_bridge->dev, ATI_GART_MMBASE_ADDR, &temp);
203 temp = (temp & 0xfffff000); 203 temp = (temp & 0xfffff000);
204 ati_generic_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096); 204 ati_generic_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096);
205 205
206 if (!ati_generic_private.registers) 206 if (!ati_generic_private.registers)
207 return -ENOMEM; 207 return -ENOMEM;
208 208
209 if (is_r200()) 209 if (is_r200())
210 pci_write_config_dword(agp_bridge->dev, ATI_RS100_IG_AGPMODE, 0x20000); 210 pci_write_config_dword(agp_bridge->dev, ATI_RS100_IG_AGPMODE, 0x20000);
211 else 211 else
212 pci_write_config_dword(agp_bridge->dev, ATI_RS300_IG_AGPMODE, 0x20000); 212 pci_write_config_dword(agp_bridge->dev, ATI_RS300_IG_AGPMODE, 0x20000);
213 213
214 /* address to map too */ 214 /* address to map too */
215 /* 215 /*
216 pci_read_config_dword(agp_bridge.dev, AGP_APBASE, &temp); 216 pci_read_config_dword(agp_bridge.dev, AGP_APBASE, &temp);
217 agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); 217 agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
218 printk(KERN_INFO PFX "IGP320 gart_bus_addr: %x\n", agp_bridge.gart_bus_addr); 218 printk(KERN_INFO PFX "IGP320 gart_bus_addr: %x\n", agp_bridge.gart_bus_addr);
219 */ 219 */
220 writel(0x60000, ati_generic_private.registers+ATI_GART_FEATURE_ID); 220 writel(0x60000, ati_generic_private.registers+ATI_GART_FEATURE_ID);
221 readl(ati_generic_private.registers+ATI_GART_FEATURE_ID); /* PCI Posting.*/ 221 readl(ati_generic_private.registers+ATI_GART_FEATURE_ID); /* PCI Posting.*/
222 222
223 /* SIGNALED_SYSTEM_ERROR @ NB_STATUS */ 223 /* SIGNALED_SYSTEM_ERROR @ NB_STATUS */
224 pci_read_config_dword(agp_bridge->dev, 4, &temp); 224 pci_read_config_dword(agp_bridge->dev, 4, &temp);
225 pci_write_config_dword(agp_bridge->dev, 4, temp | (1<<14)); 225 pci_write_config_dword(agp_bridge->dev, 4, temp | (1<<14));
226 226
227 /* Write out the address of the gatt table */ 227 /* Write out the address of the gatt table */
228 writel(agp_bridge->gatt_bus_addr, ati_generic_private.registers+ATI_GART_BASE); 228 writel(agp_bridge->gatt_bus_addr, ati_generic_private.registers+ATI_GART_BASE);
229 readl(ati_generic_private.registers+ATI_GART_BASE); /* PCI Posting. */ 229 readl(ati_generic_private.registers+ATI_GART_BASE); /* PCI Posting. */
230 230
231 return 0; 231 return 0;
232 } 232 }
233 233
234 234
235 #ifdef CONFIG_PM 235 #ifdef CONFIG_PM
236 static int agp_ati_suspend(struct pci_dev *dev, pm_message_t state) 236 static int agp_ati_suspend(struct pci_dev *dev, pm_message_t state)
237 { 237 {
238 pci_save_state(dev); 238 pci_save_state(dev);
239 pci_set_power_state(dev, 3); 239 pci_set_power_state(dev, 3);
240 240
241 return 0; 241 return 0;
242 } 242 }
243 243
244 static int agp_ati_resume(struct pci_dev *dev) 244 static int agp_ati_resume(struct pci_dev *dev)
245 { 245 {
246 pci_set_power_state(dev, 0); 246 pci_set_power_state(dev, 0);
247 pci_restore_state(dev); 247 pci_restore_state(dev);
248 248
249 return ati_configure(); 249 return ati_configure();
250 } 250 }
251 #endif 251 #endif
252 252
253 /* 253 /*
254 *Since we don't need contiguous memory we just try 254 *Since we don't need contiguous memory we just try
255 * to get the gatt table once 255 * to get the gatt table once
256 */ 256 */
257 257
258 #define GET_PAGE_DIR_OFF(addr) (addr >> 22) 258 #define GET_PAGE_DIR_OFF(addr) (addr >> 22)
259 #define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \ 259 #define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
260 GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr)) 260 GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr))
261 #define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12) 261 #define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
262 #undef GET_GATT 262 #undef GET_GATT
263 #define GET_GATT(addr) (ati_generic_private.gatt_pages[\ 263 #define GET_GATT(addr) (ati_generic_private.gatt_pages[\
264 GET_PAGE_DIR_IDX(addr)]->remapped) 264 GET_PAGE_DIR_IDX(addr)]->remapped)
265 265
266 static int ati_insert_memory(struct agp_memory * mem, 266 static int ati_insert_memory(struct agp_memory * mem,
267 off_t pg_start, int type) 267 off_t pg_start, int type)
268 { 268 {
269 int i, j, num_entries; 269 int i, j, num_entries;
270 unsigned long __iomem *cur_gatt; 270 unsigned long __iomem *cur_gatt;
271 unsigned long addr; 271 unsigned long addr;
272 272
273 num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries; 273 num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
274 274
275 if (type != 0 || mem->type != 0) 275 if (type != 0 || mem->type != 0)
276 return -EINVAL; 276 return -EINVAL;
277 277
278 if ((pg_start + mem->page_count) > num_entries) 278 if ((pg_start + mem->page_count) > num_entries)
279 return -EINVAL; 279 return -EINVAL;
280 280
281 j = pg_start; 281 j = pg_start;
282 while (j < (pg_start + mem->page_count)) { 282 while (j < (pg_start + mem->page_count)) {
283 addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr; 283 addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
284 cur_gatt = GET_GATT(addr); 284 cur_gatt = GET_GATT(addr);
285 if (!PGE_EMPTY(agp_bridge,readl(cur_gatt+GET_GATT_OFF(addr)))) 285 if (!PGE_EMPTY(agp_bridge,readl(cur_gatt+GET_GATT_OFF(addr))))
286 return -EBUSY; 286 return -EBUSY;
287 j++; 287 j++;
288 } 288 }
289 289
290 if (!mem->is_flushed) { 290 if (!mem->is_flushed) {
291 /*CACHE_FLUSH(); */ 291 /*CACHE_FLUSH(); */
292 global_cache_flush(); 292 global_cache_flush();
293 mem->is_flushed = true; 293 mem->is_flushed = true;
294 } 294 }
295 295
296 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 296 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
297 addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr; 297 addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
298 cur_gatt = GET_GATT(addr); 298 cur_gatt = GET_GATT(addr);
299 writel(agp_bridge->driver->mask_memory(agp_bridge, 299 writel(agp_bridge->driver->mask_memory(agp_bridge,
300 mem->memory[i], mem->type), cur_gatt+GET_GATT_OFF(addr)); 300 mem->memory[i], mem->type), cur_gatt+GET_GATT_OFF(addr));
301 readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */ 301 readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */
302 } 302 }
303 agp_bridge->driver->tlb_flush(mem); 303 agp_bridge->driver->tlb_flush(mem);
304 return 0; 304 return 0;
305 } 305 }
306 306
307 static int ati_remove_memory(struct agp_memory * mem, off_t pg_start, 307 static int ati_remove_memory(struct agp_memory * mem, off_t pg_start,
308 int type) 308 int type)
309 { 309 {
310 int i; 310 int i;
311 unsigned long __iomem *cur_gatt; 311 unsigned long __iomem *cur_gatt;
312 unsigned long addr; 312 unsigned long addr;
313 313
314 if (type != 0 || mem->type != 0) 314 if (type != 0 || mem->type != 0)
315 return -EINVAL; 315 return -EINVAL;
316 316
317 for (i = pg_start; i < (mem->page_count + pg_start); i++) { 317 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
318 addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr; 318 addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
319 cur_gatt = GET_GATT(addr); 319 cur_gatt = GET_GATT(addr);
320 writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr)); 320 writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
321 readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */ 321 readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */
322 } 322 }
323 323
324 agp_bridge->driver->tlb_flush(mem); 324 agp_bridge->driver->tlb_flush(mem);
325 return 0; 325 return 0;
326 } 326 }
327 327
328 static int ati_create_gatt_table(struct agp_bridge_data *bridge) 328 static int ati_create_gatt_table(struct agp_bridge_data *bridge)
329 { 329 {
330 struct aper_size_info_lvl2 *value; 330 struct aper_size_info_lvl2 *value;
331 struct ati_page_map page_dir; 331 struct ati_page_map page_dir;
332 unsigned long addr; 332 unsigned long addr;
333 int retval; 333 int retval;
334 u32 temp; 334 u32 temp;
335 int i; 335 int i;
336 struct aper_size_info_lvl2 *current_size; 336 struct aper_size_info_lvl2 *current_size;
337 337
338 value = A_SIZE_LVL2(agp_bridge->current_size); 338 value = A_SIZE_LVL2(agp_bridge->current_size);
339 retval = ati_create_page_map(&page_dir); 339 retval = ati_create_page_map(&page_dir);
340 if (retval != 0) 340 if (retval != 0)
341 return retval; 341 return retval;
342 342
343 retval = ati_create_gatt_pages(value->num_entries / 1024); 343 retval = ati_create_gatt_pages(value->num_entries / 1024);
344 if (retval != 0) { 344 if (retval != 0) {
345 ati_free_page_map(&page_dir); 345 ati_free_page_map(&page_dir);
346 return retval; 346 return retval;
347 } 347 }
348 348
349 agp_bridge->gatt_table_real = (u32 *)page_dir.real; 349 agp_bridge->gatt_table_real = (u32 *)page_dir.real;
350 agp_bridge->gatt_table = (u32 __iomem *) page_dir.remapped; 350 agp_bridge->gatt_table = (u32 __iomem *) page_dir.remapped;
351 agp_bridge->gatt_bus_addr = virt_to_gart(page_dir.real); 351 agp_bridge->gatt_bus_addr = virt_to_gart(page_dir.real);
352 352
353 /* Write out the size register */ 353 /* Write out the size register */
354 current_size = A_SIZE_LVL2(agp_bridge->current_size); 354 current_size = A_SIZE_LVL2(agp_bridge->current_size);
355 355
356 if (is_r200()) { 356 if (is_r200()) {
357 pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp); 357 pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp);
358 temp = (((temp & ~(0x0000000e)) | current_size->size_value) 358 temp = (((temp & ~(0x0000000e)) | current_size->size_value)
359 | 0x00000001); 359 | 0x00000001);
360 pci_write_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, temp); 360 pci_write_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, temp);
361 pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp); 361 pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp);
362 } else { 362 } else {
363 pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp); 363 pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp);
364 temp = (((temp & ~(0x0000000e)) | current_size->size_value) 364 temp = (((temp & ~(0x0000000e)) | current_size->size_value)
365 | 0x00000001); 365 | 0x00000001);
366 pci_write_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, temp); 366 pci_write_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, temp);
367 pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp); 367 pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp);
368 } 368 }
369 369
370 /* 370 /*
371 * Get the address for the gart region. 371 * Get the address for the gart region.
372 * This is a bus address even on the alpha, b/c its 372 * This is a bus address even on the alpha, b/c its
373 * used to program the agp master not the cpu 373 * used to program the agp master not the cpu
374 */ 374 */
375 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); 375 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
376 addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); 376 addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
377 agp_bridge->gart_bus_addr = addr; 377 agp_bridge->gart_bus_addr = addr;
378 378
379 /* Calculate the agp offset */ 379 /* Calculate the agp offset */
380 for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) { 380 for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
381 writel(virt_to_gart(ati_generic_private.gatt_pages[i]->real) | 1, 381 writel(virt_to_gart(ati_generic_private.gatt_pages[i]->real) | 1,
382 page_dir.remapped+GET_PAGE_DIR_OFF(addr)); 382 page_dir.remapped+GET_PAGE_DIR_OFF(addr));
383 readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */ 383 readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */
384 } 384 }
385 385
386 return 0; 386 return 0;
387 } 387 }
388 388
389 static int ati_free_gatt_table(struct agp_bridge_data *bridge) 389 static int ati_free_gatt_table(struct agp_bridge_data *bridge)
390 { 390 {
391 struct ati_page_map page_dir; 391 struct ati_page_map page_dir;
392 392
393 page_dir.real = (unsigned long *)agp_bridge->gatt_table_real; 393 page_dir.real = (unsigned long *)agp_bridge->gatt_table_real;
394 page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table; 394 page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table;
395 395
396 ati_free_gatt_pages(); 396 ati_free_gatt_pages();
397 ati_free_page_map(&page_dir); 397 ati_free_page_map(&page_dir);
398 return 0; 398 return 0;
399 } 399 }
400 400
401 static const struct agp_bridge_driver ati_generic_bridge = { 401 static const struct agp_bridge_driver ati_generic_bridge = {
402 .owner = THIS_MODULE, 402 .owner = THIS_MODULE,
403 .aperture_sizes = ati_generic_sizes, 403 .aperture_sizes = ati_generic_sizes,
404 .size_type = LVL2_APER_SIZE, 404 .size_type = LVL2_APER_SIZE,
405 .num_aperture_sizes = 7, 405 .num_aperture_sizes = 7,
406 .configure = ati_configure, 406 .configure = ati_configure,
407 .fetch_size = ati_fetch_size, 407 .fetch_size = ati_fetch_size,
408 .cleanup = ati_cleanup, 408 .cleanup = ati_cleanup,
409 .tlb_flush = ati_tlbflush, 409 .tlb_flush = ati_tlbflush,
410 .mask_memory = agp_generic_mask_memory, 410 .mask_memory = agp_generic_mask_memory,
411 .masks = ati_generic_masks, 411 .masks = ati_generic_masks,
412 .agp_enable = agp_generic_enable, 412 .agp_enable = agp_generic_enable,
413 .cache_flush = global_cache_flush, 413 .cache_flush = global_cache_flush,
414 .create_gatt_table = ati_create_gatt_table, 414 .create_gatt_table = ati_create_gatt_table,
415 .free_gatt_table = ati_free_gatt_table, 415 .free_gatt_table = ati_free_gatt_table,
416 .insert_memory = ati_insert_memory, 416 .insert_memory = ati_insert_memory,
417 .remove_memory = ati_remove_memory, 417 .remove_memory = ati_remove_memory,
418 .alloc_by_type = agp_generic_alloc_by_type, 418 .alloc_by_type = agp_generic_alloc_by_type,
419 .free_by_type = agp_generic_free_by_type, 419 .free_by_type = agp_generic_free_by_type,
420 .agp_alloc_page = agp_generic_alloc_page, 420 .agp_alloc_page = agp_generic_alloc_page,
421 .agp_destroy_page = agp_generic_destroy_page, 421 .agp_destroy_page = agp_generic_destroy_page,
422 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 422 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
423 }; 423 };
424 424
425 425
426 static struct agp_device_ids ati_agp_device_ids[] __devinitdata = 426 static struct agp_device_ids ati_agp_device_ids[] __devinitdata =
427 { 427 {
428 { 428 {
429 .device_id = PCI_DEVICE_ID_ATI_RS100, 429 .device_id = PCI_DEVICE_ID_ATI_RS100,
430 .chipset_name = "IGP320/M", 430 .chipset_name = "IGP320/M",
431 }, 431 },
432 { 432 {
433 .device_id = PCI_DEVICE_ID_ATI_RS200, 433 .device_id = PCI_DEVICE_ID_ATI_RS200,
434 .chipset_name = "IGP330/340/345/350/M", 434 .chipset_name = "IGP330/340/345/350/M",
435 }, 435 },
436 { 436 {
437 .device_id = PCI_DEVICE_ID_ATI_RS200_B, 437 .device_id = PCI_DEVICE_ID_ATI_RS200_B,
438 .chipset_name = "IGP345M", 438 .chipset_name = "IGP345M",
439 }, 439 },
440 { 440 {
441 .device_id = PCI_DEVICE_ID_ATI_RS250, 441 .device_id = PCI_DEVICE_ID_ATI_RS250,
442 .chipset_name = "IGP7000/M", 442 .chipset_name = "IGP7000/M",
443 }, 443 },
444 { 444 {
445 .device_id = PCI_DEVICE_ID_ATI_RS300_100, 445 .device_id = PCI_DEVICE_ID_ATI_RS300_100,
446 .chipset_name = "IGP9100/M", 446 .chipset_name = "IGP9100/M",
447 }, 447 },
448 { 448 {
449 .device_id = PCI_DEVICE_ID_ATI_RS300_133, 449 .device_id = PCI_DEVICE_ID_ATI_RS300_133,
450 .chipset_name = "IGP9100/M", 450 .chipset_name = "IGP9100/M",
451 }, 451 },
452 { 452 {
453 .device_id = PCI_DEVICE_ID_ATI_RS300_166, 453 .device_id = PCI_DEVICE_ID_ATI_RS300_166,
454 .chipset_name = "IGP9100/M", 454 .chipset_name = "IGP9100/M",
455 }, 455 },
456 { 456 {
457 .device_id = PCI_DEVICE_ID_ATI_RS300_200, 457 .device_id = PCI_DEVICE_ID_ATI_RS300_200,
458 .chipset_name = "IGP9100/M", 458 .chipset_name = "IGP9100/M",
459 }, 459 },
460 { 460 {
461 .device_id = PCI_DEVICE_ID_ATI_RS350_133, 461 .device_id = PCI_DEVICE_ID_ATI_RS350_133,
462 .chipset_name = "IGP9000/M", 462 .chipset_name = "IGP9000/M",
463 }, 463 },
464 { 464 {
465 .device_id = PCI_DEVICE_ID_ATI_RS350_200, 465 .device_id = PCI_DEVICE_ID_ATI_RS350_200,
466 .chipset_name = "IGP9100/M", 466 .chipset_name = "IGP9100/M",
467 }, 467 },
468 { }, /* dummy final entry, always present */ 468 { }, /* dummy final entry, always present */
469 }; 469 };
470 470
471 static int __devinit agp_ati_probe(struct pci_dev *pdev, 471 static int __devinit agp_ati_probe(struct pci_dev *pdev,
472 const struct pci_device_id *ent) 472 const struct pci_device_id *ent)
473 { 473 {
474 struct agp_device_ids *devs = ati_agp_device_ids; 474 struct agp_device_ids *devs = ati_agp_device_ids;
475 struct agp_bridge_data *bridge; 475 struct agp_bridge_data *bridge;
476 u8 cap_ptr; 476 u8 cap_ptr;
477 int j; 477 int j;
478 478
479 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); 479 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
480 if (!cap_ptr) 480 if (!cap_ptr)
481 return -ENODEV; 481 return -ENODEV;
482 482
483 /* probe for known chipsets */ 483 /* probe for known chipsets */
484 for (j = 0; devs[j].chipset_name; j++) { 484 for (j = 0; devs[j].chipset_name; j++) {
485 if (pdev->device == devs[j].device_id) 485 if (pdev->device == devs[j].device_id)
486 goto found; 486 goto found;
487 } 487 }
488 488
489 printk(KERN_ERR PFX 489 dev_err(&pdev->dev, "unsupported Ati chipset [%04x/%04x])\n",
490 "Unsupported Ati chipset (device id: %04x)\n", pdev->device); 490 pdev->vendor, pdev->device);
491 return -ENODEV; 491 return -ENODEV;
492 492
493 found: 493 found:
494 bridge = agp_alloc_bridge(); 494 bridge = agp_alloc_bridge();
495 if (!bridge) 495 if (!bridge)
496 return -ENOMEM; 496 return -ENOMEM;
497 497
498 bridge->dev = pdev; 498 bridge->dev = pdev;
499 bridge->capndx = cap_ptr; 499 bridge->capndx = cap_ptr;
500 500
501 bridge->driver = &ati_generic_bridge; 501 bridge->driver = &ati_generic_bridge;
502 502
503 printk(KERN_INFO PFX "Detected Ati %s chipset\n", 503 dev_info(&pdev->dev, "Ati %s chipset\n", devs[j].chipset_name);
504 devs[j].chipset_name);
505 504
506 /* Fill in the mode register */ 505 /* Fill in the mode register */
507 pci_read_config_dword(pdev, 506 pci_read_config_dword(pdev,
508 bridge->capndx+PCI_AGP_STATUS, 507 bridge->capndx+PCI_AGP_STATUS,
509 &bridge->mode); 508 &bridge->mode);
510 509
511 pci_set_drvdata(pdev, bridge); 510 pci_set_drvdata(pdev, bridge);
512 return agp_add_bridge(bridge); 511 return agp_add_bridge(bridge);
513 } 512 }
514 513
515 static void __devexit agp_ati_remove(struct pci_dev *pdev) 514 static void __devexit agp_ati_remove(struct pci_dev *pdev)
516 { 515 {
517 struct agp_bridge_data *bridge = pci_get_drvdata(pdev); 516 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
518 517
519 agp_remove_bridge(bridge); 518 agp_remove_bridge(bridge);
520 agp_put_bridge(bridge); 519 agp_put_bridge(bridge);
521 } 520 }
522 521
523 static struct pci_device_id agp_ati_pci_table[] = { 522 static struct pci_device_id agp_ati_pci_table[] = {
524 { 523 {
525 .class = (PCI_CLASS_BRIDGE_HOST << 8), 524 .class = (PCI_CLASS_BRIDGE_HOST << 8),
526 .class_mask = ~0, 525 .class_mask = ~0,
527 .vendor = PCI_VENDOR_ID_ATI, 526 .vendor = PCI_VENDOR_ID_ATI,
528 .device = PCI_ANY_ID, 527 .device = PCI_ANY_ID,
529 .subvendor = PCI_ANY_ID, 528 .subvendor = PCI_ANY_ID,
530 .subdevice = PCI_ANY_ID, 529 .subdevice = PCI_ANY_ID,
531 }, 530 },
532 { } 531 { }
533 }; 532 };
534 533
535 MODULE_DEVICE_TABLE(pci, agp_ati_pci_table); 534 MODULE_DEVICE_TABLE(pci, agp_ati_pci_table);
536 535
537 static struct pci_driver agp_ati_pci_driver = { 536 static struct pci_driver agp_ati_pci_driver = {
538 .name = "agpgart-ati", 537 .name = "agpgart-ati",
539 .id_table = agp_ati_pci_table, 538 .id_table = agp_ati_pci_table,
540 .probe = agp_ati_probe, 539 .probe = agp_ati_probe,
541 .remove = agp_ati_remove, 540 .remove = agp_ati_remove,
542 #ifdef CONFIG_PM 541 #ifdef CONFIG_PM
543 .suspend = agp_ati_suspend, 542 .suspend = agp_ati_suspend,
544 .resume = agp_ati_resume, 543 .resume = agp_ati_resume,
545 #endif 544 #endif
546 }; 545 };
547 546
548 static int __init agp_ati_init(void) 547 static int __init agp_ati_init(void)
549 { 548 {
550 if (agp_off) 549 if (agp_off)
551 return -EINVAL; 550 return -EINVAL;
552 return pci_register_driver(&agp_ati_pci_driver); 551 return pci_register_driver(&agp_ati_pci_driver);
553 } 552 }
554 553
555 static void __exit agp_ati_cleanup(void) 554 static void __exit agp_ati_cleanup(void)
556 { 555 {
557 pci_unregister_driver(&agp_ati_pci_driver); 556 pci_unregister_driver(&agp_ati_pci_driver);
558 } 557 }
559 558
560 module_init(agp_ati_init); 559 module_init(agp_ati_init);
561 module_exit(agp_ati_cleanup); 560 module_exit(agp_ati_cleanup);
562 561
563 MODULE_AUTHOR("Dave Jones <davej@codemonkey.org.uk>"); 562 MODULE_AUTHOR("Dave Jones <davej@codemonkey.org.uk>");
564 MODULE_LICENSE("GPL and additional rights"); 563 MODULE_LICENSE("GPL and additional rights");
565 564
566 565
drivers/char/agp/backend.c
1 /* 1 /*
2 * AGPGART driver backend routines. 2 * AGPGART driver backend routines.
3 * Copyright (C) 2004 Silicon Graphics, Inc. 3 * Copyright (C) 2004 Silicon Graphics, Inc.
4 * Copyright (C) 2002-2003 Dave Jones. 4 * Copyright (C) 2002-2003 Dave Jones.
5 * Copyright (C) 1999 Jeff Hartmann. 5 * Copyright (C) 1999 Jeff Hartmann.
6 * Copyright (C) 1999 Precision Insight, Inc. 6 * Copyright (C) 1999 Precision Insight, Inc.
7 * Copyright (C) 1999 Xi Graphics, Inc. 7 * Copyright (C) 1999 Xi Graphics, Inc.
8 * 8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a 9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"), 10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation 11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the 13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions: 14 * Software is furnished to do so, subject to the following conditions:
15 * 15 *
16 * The above copyright notice and this permission notice shall be included 16 * The above copyright notice and this permission notice shall be included
17 * in all copies or substantial portions of the Software. 17 * in all copies or substantial portions of the Software.
18 * 18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * JEFF HARTMANN, DAVE JONES, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, 22 * JEFF HARTMANN, DAVE JONES, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
25 * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 * 26 *
27 * TODO: 27 * TODO:
28 * - Allocate more than order 0 pages to avoid too much linear map splitting. 28 * - Allocate more than order 0 pages to avoid too much linear map splitting.
29 */ 29 */
30 #include <linux/module.h> 30 #include <linux/module.h>
31 #include <linux/pci.h> 31 #include <linux/pci.h>
32 #include <linux/init.h> 32 #include <linux/init.h>
33 #include <linux/pagemap.h> 33 #include <linux/pagemap.h>
34 #include <linux/miscdevice.h> 34 #include <linux/miscdevice.h>
35 #include <linux/pm.h> 35 #include <linux/pm.h>
36 #include <linux/agp_backend.h> 36 #include <linux/agp_backend.h>
37 #include <linux/agpgart.h> 37 #include <linux/agpgart.h>
38 #include <linux/vmalloc.h> 38 #include <linux/vmalloc.h>
39 #include <asm/io.h> 39 #include <asm/io.h>
40 #include "agp.h" 40 #include "agp.h"
41 41
42 /* Due to XFree86 brain-damage, we can't go to 1.0 until they 42 /* Due to XFree86 brain-damage, we can't go to 1.0 until they
43 * fix some real stupidity. It's only by chance we can bump 43 * fix some real stupidity. It's only by chance we can bump
44 * past 0.99 at all due to some boolean logic error. */ 44 * past 0.99 at all due to some boolean logic error. */
45 #define AGPGART_VERSION_MAJOR 0 45 #define AGPGART_VERSION_MAJOR 0
46 #define AGPGART_VERSION_MINOR 103 46 #define AGPGART_VERSION_MINOR 103
47 static const struct agp_version agp_current_version = 47 static const struct agp_version agp_current_version =
48 { 48 {
49 .major = AGPGART_VERSION_MAJOR, 49 .major = AGPGART_VERSION_MAJOR,
50 .minor = AGPGART_VERSION_MINOR, 50 .minor = AGPGART_VERSION_MINOR,
51 }; 51 };
52 52
53 struct agp_bridge_data *(*agp_find_bridge)(struct pci_dev *) = 53 struct agp_bridge_data *(*agp_find_bridge)(struct pci_dev *) =
54 &agp_generic_find_bridge; 54 &agp_generic_find_bridge;
55 55
56 struct agp_bridge_data *agp_bridge; 56 struct agp_bridge_data *agp_bridge;
57 LIST_HEAD(agp_bridges); 57 LIST_HEAD(agp_bridges);
58 EXPORT_SYMBOL(agp_bridge); 58 EXPORT_SYMBOL(agp_bridge);
59 EXPORT_SYMBOL(agp_bridges); 59 EXPORT_SYMBOL(agp_bridges);
60 EXPORT_SYMBOL(agp_find_bridge); 60 EXPORT_SYMBOL(agp_find_bridge);
61 61
62 /** 62 /**
63 * agp_backend_acquire - attempt to acquire an agp backend. 63 * agp_backend_acquire - attempt to acquire an agp backend.
64 * 64 *
65 */ 65 */
66 struct agp_bridge_data *agp_backend_acquire(struct pci_dev *pdev) 66 struct agp_bridge_data *agp_backend_acquire(struct pci_dev *pdev)
67 { 67 {
68 struct agp_bridge_data *bridge; 68 struct agp_bridge_data *bridge;
69 69
70 bridge = agp_find_bridge(pdev); 70 bridge = agp_find_bridge(pdev);
71 71
72 if (!bridge) 72 if (!bridge)
73 return NULL; 73 return NULL;
74 74
75 if (atomic_read(&bridge->agp_in_use)) 75 if (atomic_read(&bridge->agp_in_use))
76 return NULL; 76 return NULL;
77 atomic_inc(&bridge->agp_in_use); 77 atomic_inc(&bridge->agp_in_use);
78 return bridge; 78 return bridge;
79 } 79 }
80 EXPORT_SYMBOL(agp_backend_acquire); 80 EXPORT_SYMBOL(agp_backend_acquire);
81 81
82 82
83 /** 83 /**
84 * agp_backend_release - release the lock on the agp backend. 84 * agp_backend_release - release the lock on the agp backend.
85 * 85 *
86 * The caller must insure that the graphics aperture translation table 86 * The caller must insure that the graphics aperture translation table
87 * is read for use by another entity. 87 * is read for use by another entity.
88 * 88 *
89 * (Ensure that all memory it bound is unbound.) 89 * (Ensure that all memory it bound is unbound.)
90 */ 90 */
91 void agp_backend_release(struct agp_bridge_data *bridge) 91 void agp_backend_release(struct agp_bridge_data *bridge)
92 { 92 {
93 93
94 if (bridge) 94 if (bridge)
95 atomic_dec(&bridge->agp_in_use); 95 atomic_dec(&bridge->agp_in_use);
96 } 96 }
97 EXPORT_SYMBOL(agp_backend_release); 97 EXPORT_SYMBOL(agp_backend_release);
98 98
99 99
100 static const struct { int mem, agp; } maxes_table[] = { 100 static const struct { int mem, agp; } maxes_table[] = {
101 {0, 0}, 101 {0, 0},
102 {32, 4}, 102 {32, 4},
103 {64, 28}, 103 {64, 28},
104 {128, 96}, 104 {128, 96},
105 {256, 204}, 105 {256, 204},
106 {512, 440}, 106 {512, 440},
107 {1024, 942}, 107 {1024, 942},
108 {2048, 1920}, 108 {2048, 1920},
109 {4096, 3932} 109 {4096, 3932}
110 }; 110 };
111 111
112 static int agp_find_max(void) 112 static int agp_find_max(void)
113 { 113 {
114 long memory, index, result; 114 long memory, index, result;
115 115
116 #if PAGE_SHIFT < 20 116 #if PAGE_SHIFT < 20
117 memory = num_physpages >> (20 - PAGE_SHIFT); 117 memory = num_physpages >> (20 - PAGE_SHIFT);
118 #else 118 #else
119 memory = num_physpages << (PAGE_SHIFT - 20); 119 memory = num_physpages << (PAGE_SHIFT - 20);
120 #endif 120 #endif
121 index = 1; 121 index = 1;
122 122
123 while ((memory > maxes_table[index].mem) && (index < 8)) 123 while ((memory > maxes_table[index].mem) && (index < 8))
124 index++; 124 index++;
125 125
126 result = maxes_table[index - 1].agp + 126 result = maxes_table[index - 1].agp +
127 ( (memory - maxes_table[index - 1].mem) * 127 ( (memory - maxes_table[index - 1].mem) *
128 (maxes_table[index].agp - maxes_table[index - 1].agp)) / 128 (maxes_table[index].agp - maxes_table[index - 1].agp)) /
129 (maxes_table[index].mem - maxes_table[index - 1].mem); 129 (maxes_table[index].mem - maxes_table[index - 1].mem);
130 130
131 result = result << (20 - PAGE_SHIFT); 131 result = result << (20 - PAGE_SHIFT);
132 return result; 132 return result;
133 } 133 }
134 134
135 135
136 static int agp_backend_initialize(struct agp_bridge_data *bridge) 136 static int agp_backend_initialize(struct agp_bridge_data *bridge)
137 { 137 {
138 int size_value, rc, got_gatt=0, got_keylist=0; 138 int size_value, rc, got_gatt=0, got_keylist=0;
139 139
140 bridge->max_memory_agp = agp_find_max(); 140 bridge->max_memory_agp = agp_find_max();
141 bridge->version = &agp_current_version; 141 bridge->version = &agp_current_version;
142 142
143 if (bridge->driver->needs_scratch_page) { 143 if (bridge->driver->needs_scratch_page) {
144 void *addr = bridge->driver->agp_alloc_page(bridge); 144 void *addr = bridge->driver->agp_alloc_page(bridge);
145 145
146 if (!addr) { 146 if (!addr) {
147 printk(KERN_ERR PFX "unable to get memory for scratch page.\n"); 147 dev_err(&bridge->dev->dev,
148 "can't get memory for scratch page\n");
148 return -ENOMEM; 149 return -ENOMEM;
149 } 150 }
150 151
151 bridge->scratch_page_real = virt_to_gart(addr); 152 bridge->scratch_page_real = virt_to_gart(addr);
152 bridge->scratch_page = 153 bridge->scratch_page =
153 bridge->driver->mask_memory(bridge, bridge->scratch_page_real, 0); 154 bridge->driver->mask_memory(bridge, bridge->scratch_page_real, 0);
154 } 155 }
155 156
156 size_value = bridge->driver->fetch_size(); 157 size_value = bridge->driver->fetch_size();
157 if (size_value == 0) { 158 if (size_value == 0) {
158 printk(KERN_ERR PFX "unable to determine aperture size.\n"); 159 dev_err(&bridge->dev->dev, "can't determine aperture size\n");
159 rc = -EINVAL; 160 rc = -EINVAL;
160 goto err_out; 161 goto err_out;
161 } 162 }
162 if (bridge->driver->create_gatt_table(bridge)) { 163 if (bridge->driver->create_gatt_table(bridge)) {
163 printk(KERN_ERR PFX 164 dev_err(&bridge->dev->dev,
164 "unable to get memory for graphics translation table.\n"); 165 "can't get memory for graphics translation table\n");
165 rc = -ENOMEM; 166 rc = -ENOMEM;
166 goto err_out; 167 goto err_out;
167 } 168 }
168 got_gatt = 1; 169 got_gatt = 1;
169 170
170 bridge->key_list = vmalloc(PAGE_SIZE * 4); 171 bridge->key_list = vmalloc(PAGE_SIZE * 4);
171 if (bridge->key_list == NULL) { 172 if (bridge->key_list == NULL) {
172 printk(KERN_ERR PFX "error allocating memory for key lists.\n"); 173 dev_err(&bridge->dev->dev,
174 "can't allocate memory for key lists\n");
173 rc = -ENOMEM; 175 rc = -ENOMEM;
174 goto err_out; 176 goto err_out;
175 } 177 }
176 got_keylist = 1; 178 got_keylist = 1;
177 179
178 /* FIXME vmalloc'd memory not guaranteed contiguous */ 180 /* FIXME vmalloc'd memory not guaranteed contiguous */
179 memset(bridge->key_list, 0, PAGE_SIZE * 4); 181 memset(bridge->key_list, 0, PAGE_SIZE * 4);
180 182
181 if (bridge->driver->configure()) { 183 if (bridge->driver->configure()) {
182 printk(KERN_ERR PFX "error configuring host chipset.\n"); 184 dev_err(&bridge->dev->dev, "error configuring host chipset\n");
183 rc = -EINVAL; 185 rc = -EINVAL;
184 goto err_out; 186 goto err_out;
185 } 187 }
186 188
187 return 0; 189 return 0;
188 190
189 err_out: 191 err_out:
190 if (bridge->driver->needs_scratch_page) { 192 if (bridge->driver->needs_scratch_page) {
191 void *va = gart_to_virt(bridge->scratch_page_real); 193 void *va = gart_to_virt(bridge->scratch_page_real);
192 194
193 bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_UNMAP); 195 bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_UNMAP);
194 bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_FREE); 196 bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_FREE);
195 } 197 }
196 if (got_gatt) 198 if (got_gatt)
197 bridge->driver->free_gatt_table(bridge); 199 bridge->driver->free_gatt_table(bridge);
198 if (got_keylist) { 200 if (got_keylist) {
199 vfree(bridge->key_list); 201 vfree(bridge->key_list);
200 bridge->key_list = NULL; 202 bridge->key_list = NULL;
201 } 203 }
202 return rc; 204 return rc;
203 } 205 }
204 206
205 /* cannot be __exit b/c as it could be called from __init code */ 207 /* cannot be __exit b/c as it could be called from __init code */
206 static void agp_backend_cleanup(struct agp_bridge_data *bridge) 208 static void agp_backend_cleanup(struct agp_bridge_data *bridge)
207 { 209 {
208 if (bridge->driver->cleanup) 210 if (bridge->driver->cleanup)
209 bridge->driver->cleanup(); 211 bridge->driver->cleanup();
210 if (bridge->driver->free_gatt_table) 212 if (bridge->driver->free_gatt_table)
211 bridge->driver->free_gatt_table(bridge); 213 bridge->driver->free_gatt_table(bridge);
212 214
213 vfree(bridge->key_list); 215 vfree(bridge->key_list);
214 bridge->key_list = NULL; 216 bridge->key_list = NULL;
215 217
216 if (bridge->driver->agp_destroy_page && 218 if (bridge->driver->agp_destroy_page &&
217 bridge->driver->needs_scratch_page) { 219 bridge->driver->needs_scratch_page) {
218 void *va = gart_to_virt(bridge->scratch_page_real); 220 void *va = gart_to_virt(bridge->scratch_page_real);
219 221
220 bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_UNMAP); 222 bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_UNMAP);
221 bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_FREE); 223 bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_FREE);
222 } 224 }
223 } 225 }
224 226
225 /* When we remove the global variable agp_bridge from all drivers 227 /* When we remove the global variable agp_bridge from all drivers
226 * then agp_alloc_bridge and agp_generic_find_bridge need to be updated 228 * then agp_alloc_bridge and agp_generic_find_bridge need to be updated
227 */ 229 */
228 230
229 struct agp_bridge_data *agp_alloc_bridge(void) 231 struct agp_bridge_data *agp_alloc_bridge(void)
230 { 232 {
231 struct agp_bridge_data *bridge; 233 struct agp_bridge_data *bridge;
232 234
233 bridge = kzalloc(sizeof(*bridge), GFP_KERNEL); 235 bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
234 if (!bridge) 236 if (!bridge)
235 return NULL; 237 return NULL;
236 238
237 atomic_set(&bridge->agp_in_use, 0); 239 atomic_set(&bridge->agp_in_use, 0);
238 atomic_set(&bridge->current_memory_agp, 0); 240 atomic_set(&bridge->current_memory_agp, 0);
239 241
240 if (list_empty(&agp_bridges)) 242 if (list_empty(&agp_bridges))
241 agp_bridge = bridge; 243 agp_bridge = bridge;
242 244
243 return bridge; 245 return bridge;
244 } 246 }
245 EXPORT_SYMBOL(agp_alloc_bridge); 247 EXPORT_SYMBOL(agp_alloc_bridge);
246 248
247 249
248 void agp_put_bridge(struct agp_bridge_data *bridge) 250 void agp_put_bridge(struct agp_bridge_data *bridge)
249 { 251 {
250 kfree(bridge); 252 kfree(bridge);
251 253
252 if (list_empty(&agp_bridges)) 254 if (list_empty(&agp_bridges))
253 agp_bridge = NULL; 255 agp_bridge = NULL;
254 } 256 }
255 EXPORT_SYMBOL(agp_put_bridge); 257 EXPORT_SYMBOL(agp_put_bridge);
256 258
257 259
258 int agp_add_bridge(struct agp_bridge_data *bridge) 260 int agp_add_bridge(struct agp_bridge_data *bridge)
259 { 261 {
260 int error; 262 int error;
261 263
262 if (agp_off) 264 if (agp_off)
263 return -ENODEV; 265 return -ENODEV;
264 266
265 if (!bridge->dev) { 267 if (!bridge->dev) {
266 printk (KERN_DEBUG PFX "Erk, registering with no pci_dev!\n"); 268 printk (KERN_DEBUG PFX "Erk, registering with no pci_dev!\n");
267 return -EINVAL; 269 return -EINVAL;
268 } 270 }
269 271
270 /* Grab reference on the chipset driver. */ 272 /* Grab reference on the chipset driver. */
271 if (!try_module_get(bridge->driver->owner)) { 273 if (!try_module_get(bridge->driver->owner)) {
272 printk (KERN_INFO PFX "Couldn't lock chipset driver.\n"); 274 dev_info(&bridge->dev->dev, "can't lock chipset driver\n");
273 return -EINVAL; 275 return -EINVAL;
274 } 276 }
275 277
276 error = agp_backend_initialize(bridge); 278 error = agp_backend_initialize(bridge);
277 if (error) { 279 if (error) {
278 printk (KERN_INFO PFX "agp_backend_initialize() failed.\n"); 280 dev_info(&bridge->dev->dev,
281 "agp_backend_initialize() failed\n");
279 goto err_out; 282 goto err_out;
280 } 283 }
281 284
282 if (list_empty(&agp_bridges)) { 285 if (list_empty(&agp_bridges)) {
283 error = agp_frontend_initialize(); 286 error = agp_frontend_initialize();
284 if (error) { 287 if (error) {
285 printk (KERN_INFO PFX "agp_frontend_initialize() failed.\n"); 288 dev_info(&bridge->dev->dev,
289 "agp_frontend_initialize() failed\n");
286 goto frontend_err; 290 goto frontend_err;
287 } 291 }
288 292
289 printk(KERN_INFO PFX "AGP aperture is %dM @ 0x%lx\n", 293 dev_info(&bridge->dev->dev, "AGP aperture is %dM @ 0x%lx\n",
290 bridge->driver->fetch_size(), bridge->gart_bus_addr); 294 bridge->driver->fetch_size(), bridge->gart_bus_addr);
291 295
292 } 296 }
293 297
294 list_add(&bridge->list, &agp_bridges); 298 list_add(&bridge->list, &agp_bridges);
295 return 0; 299 return 0;
296 300
297 frontend_err: 301 frontend_err:
298 agp_backend_cleanup(bridge); 302 agp_backend_cleanup(bridge);
299 err_out: 303 err_out:
300 module_put(bridge->driver->owner); 304 module_put(bridge->driver->owner);
301 agp_put_bridge(bridge); 305 agp_put_bridge(bridge);
302 return error; 306 return error;
303 } 307 }
304 EXPORT_SYMBOL_GPL(agp_add_bridge); 308 EXPORT_SYMBOL_GPL(agp_add_bridge);
305 309
306 310
307 void agp_remove_bridge(struct agp_bridge_data *bridge) 311 void agp_remove_bridge(struct agp_bridge_data *bridge)
308 { 312 {
309 agp_backend_cleanup(bridge); 313 agp_backend_cleanup(bridge);
310 list_del(&bridge->list); 314 list_del(&bridge->list);
311 if (list_empty(&agp_bridges)) 315 if (list_empty(&agp_bridges))
312 agp_frontend_cleanup(); 316 agp_frontend_cleanup();
313 module_put(bridge->driver->owner); 317 module_put(bridge->driver->owner);
314 } 318 }
315 EXPORT_SYMBOL_GPL(agp_remove_bridge); 319 EXPORT_SYMBOL_GPL(agp_remove_bridge);
316 320
317 int agp_off; 321 int agp_off;
318 int agp_try_unsupported_boot; 322 int agp_try_unsupported_boot;
319 EXPORT_SYMBOL(agp_off); 323 EXPORT_SYMBOL(agp_off);
320 EXPORT_SYMBOL(agp_try_unsupported_boot); 324 EXPORT_SYMBOL(agp_try_unsupported_boot);
321 325
322 static int __init agp_init(void) 326 static int __init agp_init(void)
323 { 327 {
324 if (!agp_off) 328 if (!agp_off)
325 printk(KERN_INFO "Linux agpgart interface v%d.%d\n", 329 printk(KERN_INFO "Linux agpgart interface v%d.%d\n",
326 AGPGART_VERSION_MAJOR, AGPGART_VERSION_MINOR); 330 AGPGART_VERSION_MAJOR, AGPGART_VERSION_MINOR);
327 return 0; 331 return 0;
328 } 332 }
329 333
330 static void __exit agp_exit(void) 334 static void __exit agp_exit(void)
331 { 335 {
332 } 336 }
333 337
334 #ifndef MODULE 338 #ifndef MODULE
335 static __init int agp_setup(char *s) 339 static __init int agp_setup(char *s)
336 { 340 {
337 if (!strcmp(s,"off")) 341 if (!strcmp(s,"off"))
338 agp_off = 1; 342 agp_off = 1;
339 if (!strcmp(s,"try_unsupported")) 343 if (!strcmp(s,"try_unsupported"))
340 agp_try_unsupported_boot = 1; 344 agp_try_unsupported_boot = 1;
341 return 1; 345 return 1;
342 } 346 }
343 __setup("agp=", agp_setup); 347 __setup("agp=", agp_setup);
344 #endif 348 #endif
345 349
346 MODULE_AUTHOR("Dave Jones <davej@codemonkey.org.uk>"); 350 MODULE_AUTHOR("Dave Jones <davej@codemonkey.org.uk>");
347 MODULE_DESCRIPTION("AGP GART driver"); 351 MODULE_DESCRIPTION("AGP GART driver");
348 MODULE_LICENSE("GPL and additional rights"); 352 MODULE_LICENSE("GPL and additional rights");
349 MODULE_ALIAS_MISCDEV(AGPGART_MINOR); 353 MODULE_ALIAS_MISCDEV(AGPGART_MINOR);
350 354
351 module_init(agp_init); 355 module_init(agp_init);
352 module_exit(agp_exit); 356 module_exit(agp_exit);
353 357
354 358
drivers/char/agp/generic.c
1 /* 1 /*
2 * AGPGART driver. 2 * AGPGART driver.
3 * Copyright (C) 2004 Silicon Graphics, Inc. 3 * Copyright (C) 2004 Silicon Graphics, Inc.
4 * Copyright (C) 2002-2005 Dave Jones. 4 * Copyright (C) 2002-2005 Dave Jones.
5 * Copyright (C) 1999 Jeff Hartmann. 5 * Copyright (C) 1999 Jeff Hartmann.
6 * Copyright (C) 1999 Precision Insight, Inc. 6 * Copyright (C) 1999 Precision Insight, Inc.
7 * Copyright (C) 1999 Xi Graphics, Inc. 7 * Copyright (C) 1999 Xi Graphics, Inc.
8 * 8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a 9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"), 10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation 11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the 13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions: 14 * Software is furnished to do so, subject to the following conditions:
15 * 15 *
16 * The above copyright notice and this permission notice shall be included 16 * The above copyright notice and this permission notice shall be included
17 * in all copies or substantial portions of the Software. 17 * in all copies or substantial portions of the Software.
18 * 18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, 22 * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
25 * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 * 26 *
27 * TODO: 27 * TODO:
28 * - Allocate more than order 0 pages to avoid too much linear map splitting. 28 * - Allocate more than order 0 pages to avoid too much linear map splitting.
29 */ 29 */
30 #include <linux/module.h> 30 #include <linux/module.h>
31 #include <linux/pci.h> 31 #include <linux/pci.h>
32 #include <linux/init.h> 32 #include <linux/init.h>
33 #include <linux/pagemap.h> 33 #include <linux/pagemap.h>
34 #include <linux/miscdevice.h> 34 #include <linux/miscdevice.h>
35 #include <linux/pm.h> 35 #include <linux/pm.h>
36 #include <linux/agp_backend.h> 36 #include <linux/agp_backend.h>
37 #include <linux/vmalloc.h> 37 #include <linux/vmalloc.h>
38 #include <linux/dma-mapping.h> 38 #include <linux/dma-mapping.h>
39 #include <linux/mm.h> 39 #include <linux/mm.h>
40 #include <linux/sched.h> 40 #include <linux/sched.h>
41 #include <asm/io.h> 41 #include <asm/io.h>
42 #include <asm/cacheflush.h> 42 #include <asm/cacheflush.h>
43 #include <asm/pgtable.h> 43 #include <asm/pgtable.h>
44 #include "agp.h" 44 #include "agp.h"
45 45
46 __u32 *agp_gatt_table; 46 __u32 *agp_gatt_table;
47 int agp_memory_reserved; 47 int agp_memory_reserved;
48 48
49 /* 49 /*
50 * Needed by the Nforce GART driver for the time being. Would be 50 * Needed by the Nforce GART driver for the time being. Would be
51 * nice to do this some other way instead of needing this export. 51 * nice to do this some other way instead of needing this export.
52 */ 52 */
53 EXPORT_SYMBOL_GPL(agp_memory_reserved); 53 EXPORT_SYMBOL_GPL(agp_memory_reserved);
54 54
55 /* 55 /*
56 * Generic routines for handling agp_memory structures - 56 * Generic routines for handling agp_memory structures -
57 * They use the basic page allocation routines to do the brunt of the work. 57 * They use the basic page allocation routines to do the brunt of the work.
58 */ 58 */
59 59
60 void agp_free_key(int key) 60 void agp_free_key(int key)
61 { 61 {
62 if (key < 0) 62 if (key < 0)
63 return; 63 return;
64 64
65 if (key < MAXKEY) 65 if (key < MAXKEY)
66 clear_bit(key, agp_bridge->key_list); 66 clear_bit(key, agp_bridge->key_list);
67 } 67 }
68 EXPORT_SYMBOL(agp_free_key); 68 EXPORT_SYMBOL(agp_free_key);
69 69
70 70
71 static int agp_get_key(void) 71 static int agp_get_key(void)
72 { 72 {
73 int bit; 73 int bit;
74 74
75 bit = find_first_zero_bit(agp_bridge->key_list, MAXKEY); 75 bit = find_first_zero_bit(agp_bridge->key_list, MAXKEY);
76 if (bit < MAXKEY) { 76 if (bit < MAXKEY) {
77 set_bit(bit, agp_bridge->key_list); 77 set_bit(bit, agp_bridge->key_list);
78 return bit; 78 return bit;
79 } 79 }
80 return -1; 80 return -1;
81 } 81 }
82 82
83 void agp_flush_chipset(struct agp_bridge_data *bridge) 83 void agp_flush_chipset(struct agp_bridge_data *bridge)
84 { 84 {
85 if (bridge->driver->chipset_flush) 85 if (bridge->driver->chipset_flush)
86 bridge->driver->chipset_flush(bridge); 86 bridge->driver->chipset_flush(bridge);
87 } 87 }
88 EXPORT_SYMBOL(agp_flush_chipset); 88 EXPORT_SYMBOL(agp_flush_chipset);
89 89
90 /* 90 /*
91 * Use kmalloc if possible for the page list. Otherwise fall back to 91 * Use kmalloc if possible for the page list. Otherwise fall back to
92 * vmalloc. This speeds things up and also saves memory for small AGP 92 * vmalloc. This speeds things up and also saves memory for small AGP
93 * regions. 93 * regions.
94 */ 94 */
95 95
96 void agp_alloc_page_array(size_t size, struct agp_memory *mem) 96 void agp_alloc_page_array(size_t size, struct agp_memory *mem)
97 { 97 {
98 mem->memory = NULL; 98 mem->memory = NULL;
99 mem->vmalloc_flag = false; 99 mem->vmalloc_flag = false;
100 100
101 if (size <= 2*PAGE_SIZE) 101 if (size <= 2*PAGE_SIZE)
102 mem->memory = kmalloc(size, GFP_KERNEL | __GFP_NORETRY); 102 mem->memory = kmalloc(size, GFP_KERNEL | __GFP_NORETRY);
103 if (mem->memory == NULL) { 103 if (mem->memory == NULL) {
104 mem->memory = vmalloc(size); 104 mem->memory = vmalloc(size);
105 mem->vmalloc_flag = true; 105 mem->vmalloc_flag = true;
106 } 106 }
107 } 107 }
108 EXPORT_SYMBOL(agp_alloc_page_array); 108 EXPORT_SYMBOL(agp_alloc_page_array);
109 109
110 void agp_free_page_array(struct agp_memory *mem) 110 void agp_free_page_array(struct agp_memory *mem)
111 { 111 {
112 if (mem->vmalloc_flag) { 112 if (mem->vmalloc_flag) {
113 vfree(mem->memory); 113 vfree(mem->memory);
114 } else { 114 } else {
115 kfree(mem->memory); 115 kfree(mem->memory);
116 } 116 }
117 } 117 }
118 EXPORT_SYMBOL(agp_free_page_array); 118 EXPORT_SYMBOL(agp_free_page_array);
119 119
120 120
121 static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages) 121 static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages)
122 { 122 {
123 struct agp_memory *new; 123 struct agp_memory *new;
124 unsigned long alloc_size = num_agp_pages*sizeof(struct page *); 124 unsigned long alloc_size = num_agp_pages*sizeof(struct page *);
125 125
126 new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL); 126 new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
127 if (new == NULL) 127 if (new == NULL)
128 return NULL; 128 return NULL;
129 129
130 new->key = agp_get_key(); 130 new->key = agp_get_key();
131 131
132 if (new->key < 0) { 132 if (new->key < 0) {
133 kfree(new); 133 kfree(new);
134 return NULL; 134 return NULL;
135 } 135 }
136 136
137 agp_alloc_page_array(alloc_size, new); 137 agp_alloc_page_array(alloc_size, new);
138 138
139 if (new->memory == NULL) { 139 if (new->memory == NULL) {
140 agp_free_key(new->key); 140 agp_free_key(new->key);
141 kfree(new); 141 kfree(new);
142 return NULL; 142 return NULL;
143 } 143 }
144 new->num_scratch_pages = 0; 144 new->num_scratch_pages = 0;
145 return new; 145 return new;
146 } 146 }
147 147
148 struct agp_memory *agp_create_memory(int scratch_pages) 148 struct agp_memory *agp_create_memory(int scratch_pages)
149 { 149 {
150 struct agp_memory *new; 150 struct agp_memory *new;
151 151
152 new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL); 152 new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
153 if (new == NULL) 153 if (new == NULL)
154 return NULL; 154 return NULL;
155 155
156 new->key = agp_get_key(); 156 new->key = agp_get_key();
157 157
158 if (new->key < 0) { 158 if (new->key < 0) {
159 kfree(new); 159 kfree(new);
160 return NULL; 160 return NULL;
161 } 161 }
162 162
163 agp_alloc_page_array(PAGE_SIZE * scratch_pages, new); 163 agp_alloc_page_array(PAGE_SIZE * scratch_pages, new);
164 164
165 if (new->memory == NULL) { 165 if (new->memory == NULL) {
166 agp_free_key(new->key); 166 agp_free_key(new->key);
167 kfree(new); 167 kfree(new);
168 return NULL; 168 return NULL;
169 } 169 }
170 new->num_scratch_pages = scratch_pages; 170 new->num_scratch_pages = scratch_pages;
171 new->type = AGP_NORMAL_MEMORY; 171 new->type = AGP_NORMAL_MEMORY;
172 return new; 172 return new;
173 } 173 }
174 EXPORT_SYMBOL(agp_create_memory); 174 EXPORT_SYMBOL(agp_create_memory);
175 175
176 /** 176 /**
177 * agp_free_memory - free memory associated with an agp_memory pointer. 177 * agp_free_memory - free memory associated with an agp_memory pointer.
178 * 178 *
179 * @curr: agp_memory pointer to be freed. 179 * @curr: agp_memory pointer to be freed.
180 * 180 *
181 * It is the only function that can be called when the backend is not owned 181 * It is the only function that can be called when the backend is not owned
182 * by the caller. (So it can free memory on client death.) 182 * by the caller. (So it can free memory on client death.)
183 */ 183 */
184 void agp_free_memory(struct agp_memory *curr) 184 void agp_free_memory(struct agp_memory *curr)
185 { 185 {
186 size_t i; 186 size_t i;
187 187
188 if (curr == NULL) 188 if (curr == NULL)
189 return; 189 return;
190 190
191 if (curr->is_bound) 191 if (curr->is_bound)
192 agp_unbind_memory(curr); 192 agp_unbind_memory(curr);
193 193
194 if (curr->type >= AGP_USER_TYPES) { 194 if (curr->type >= AGP_USER_TYPES) {
195 agp_generic_free_by_type(curr); 195 agp_generic_free_by_type(curr);
196 return; 196 return;
197 } 197 }
198 198
199 if (curr->type != 0) { 199 if (curr->type != 0) {
200 curr->bridge->driver->free_by_type(curr); 200 curr->bridge->driver->free_by_type(curr);
201 return; 201 return;
202 } 202 }
203 if (curr->page_count != 0) { 203 if (curr->page_count != 0) {
204 for (i = 0; i < curr->page_count; i++) { 204 for (i = 0; i < curr->page_count; i++) {
205 curr->memory[i] = (unsigned long)gart_to_virt(curr->memory[i]); 205 curr->memory[i] = (unsigned long)gart_to_virt(curr->memory[i]);
206 curr->bridge->driver->agp_destroy_page((void *)curr->memory[i], 206 curr->bridge->driver->agp_destroy_page((void *)curr->memory[i],
207 AGP_PAGE_DESTROY_UNMAP); 207 AGP_PAGE_DESTROY_UNMAP);
208 } 208 }
209 for (i = 0; i < curr->page_count; i++) { 209 for (i = 0; i < curr->page_count; i++) {
210 curr->bridge->driver->agp_destroy_page((void *)curr->memory[i], 210 curr->bridge->driver->agp_destroy_page((void *)curr->memory[i],
211 AGP_PAGE_DESTROY_FREE); 211 AGP_PAGE_DESTROY_FREE);
212 } 212 }
213 } 213 }
214 agp_free_key(curr->key); 214 agp_free_key(curr->key);
215 agp_free_page_array(curr); 215 agp_free_page_array(curr);
216 kfree(curr); 216 kfree(curr);
217 } 217 }
218 EXPORT_SYMBOL(agp_free_memory); 218 EXPORT_SYMBOL(agp_free_memory);
219 219
220 #define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long)) 220 #define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
221 221
222 /** 222 /**
223 * agp_allocate_memory - allocate a group of pages of a certain type. 223 * agp_allocate_memory - allocate a group of pages of a certain type.
224 * 224 *
225 * @page_count: size_t argument of the number of pages 225 * @page_count: size_t argument of the number of pages
226 * @type: u32 argument of the type of memory to be allocated. 226 * @type: u32 argument of the type of memory to be allocated.
227 * 227 *
228 * Every agp bridge device will allow you to allocate AGP_NORMAL_MEMORY which 228 * Every agp bridge device will allow you to allocate AGP_NORMAL_MEMORY which
229 * maps to physical ram. Any other type is device dependent. 229 * maps to physical ram. Any other type is device dependent.
230 * 230 *
231 * It returns NULL whenever memory is unavailable. 231 * It returns NULL whenever memory is unavailable.
232 */ 232 */
233 struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge, 233 struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
234 size_t page_count, u32 type) 234 size_t page_count, u32 type)
235 { 235 {
236 int scratch_pages; 236 int scratch_pages;
237 struct agp_memory *new; 237 struct agp_memory *new;
238 size_t i; 238 size_t i;
239 239
240 if (!bridge) 240 if (!bridge)
241 return NULL; 241 return NULL;
242 242
243 if ((atomic_read(&bridge->current_memory_agp) + page_count) > bridge->max_memory_agp) 243 if ((atomic_read(&bridge->current_memory_agp) + page_count) > bridge->max_memory_agp)
244 return NULL; 244 return NULL;
245 245
246 if (type >= AGP_USER_TYPES) { 246 if (type >= AGP_USER_TYPES) {
247 new = agp_generic_alloc_user(page_count, type); 247 new = agp_generic_alloc_user(page_count, type);
248 if (new) 248 if (new)
249 new->bridge = bridge; 249 new->bridge = bridge;
250 return new; 250 return new;
251 } 251 }
252 252
253 if (type != 0) { 253 if (type != 0) {
254 new = bridge->driver->alloc_by_type(page_count, type); 254 new = bridge->driver->alloc_by_type(page_count, type);
255 if (new) 255 if (new)
256 new->bridge = bridge; 256 new->bridge = bridge;
257 return new; 257 return new;
258 } 258 }
259 259
260 scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE; 260 scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
261 261
262 new = agp_create_memory(scratch_pages); 262 new = agp_create_memory(scratch_pages);
263 263
264 if (new == NULL) 264 if (new == NULL)
265 return NULL; 265 return NULL;
266 266
267 for (i = 0; i < page_count; i++) { 267 for (i = 0; i < page_count; i++) {
268 void *addr = bridge->driver->agp_alloc_page(bridge); 268 void *addr = bridge->driver->agp_alloc_page(bridge);
269 269
270 if (addr == NULL) { 270 if (addr == NULL) {
271 agp_free_memory(new); 271 agp_free_memory(new);
272 return NULL; 272 return NULL;
273 } 273 }
274 new->memory[i] = virt_to_gart(addr); 274 new->memory[i] = virt_to_gart(addr);
275 new->page_count++; 275 new->page_count++;
276 } 276 }
277 new->bridge = bridge; 277 new->bridge = bridge;
278 278
279 return new; 279 return new;
280 } 280 }
281 EXPORT_SYMBOL(agp_allocate_memory); 281 EXPORT_SYMBOL(agp_allocate_memory);
282 282
283 283
284 /* End - Generic routines for handling agp_memory structures */ 284 /* End - Generic routines for handling agp_memory structures */
285 285
286 286
287 static int agp_return_size(void) 287 static int agp_return_size(void)
288 { 288 {
289 int current_size; 289 int current_size;
290 void *temp; 290 void *temp;
291 291
292 temp = agp_bridge->current_size; 292 temp = agp_bridge->current_size;
293 293
294 switch (agp_bridge->driver->size_type) { 294 switch (agp_bridge->driver->size_type) {
295 case U8_APER_SIZE: 295 case U8_APER_SIZE:
296 current_size = A_SIZE_8(temp)->size; 296 current_size = A_SIZE_8(temp)->size;
297 break; 297 break;
298 case U16_APER_SIZE: 298 case U16_APER_SIZE:
299 current_size = A_SIZE_16(temp)->size; 299 current_size = A_SIZE_16(temp)->size;
300 break; 300 break;
301 case U32_APER_SIZE: 301 case U32_APER_SIZE:
302 current_size = A_SIZE_32(temp)->size; 302 current_size = A_SIZE_32(temp)->size;
303 break; 303 break;
304 case LVL2_APER_SIZE: 304 case LVL2_APER_SIZE:
305 current_size = A_SIZE_LVL2(temp)->size; 305 current_size = A_SIZE_LVL2(temp)->size;
306 break; 306 break;
307 case FIXED_APER_SIZE: 307 case FIXED_APER_SIZE:
308 current_size = A_SIZE_FIX(temp)->size; 308 current_size = A_SIZE_FIX(temp)->size;
309 break; 309 break;
310 default: 310 default:
311 current_size = 0; 311 current_size = 0;
312 break; 312 break;
313 } 313 }
314 314
315 current_size -= (agp_memory_reserved / (1024*1024)); 315 current_size -= (agp_memory_reserved / (1024*1024));
316 if (current_size <0) 316 if (current_size <0)
317 current_size = 0; 317 current_size = 0;
318 return current_size; 318 return current_size;
319 } 319 }
320 320
321 321
322 int agp_num_entries(void) 322 int agp_num_entries(void)
323 { 323 {
324 int num_entries; 324 int num_entries;
325 void *temp; 325 void *temp;
326 326
327 temp = agp_bridge->current_size; 327 temp = agp_bridge->current_size;
328 328
329 switch (agp_bridge->driver->size_type) { 329 switch (agp_bridge->driver->size_type) {
330 case U8_APER_SIZE: 330 case U8_APER_SIZE:
331 num_entries = A_SIZE_8(temp)->num_entries; 331 num_entries = A_SIZE_8(temp)->num_entries;
332 break; 332 break;
333 case U16_APER_SIZE: 333 case U16_APER_SIZE:
334 num_entries = A_SIZE_16(temp)->num_entries; 334 num_entries = A_SIZE_16(temp)->num_entries;
335 break; 335 break;
336 case U32_APER_SIZE: 336 case U32_APER_SIZE:
337 num_entries = A_SIZE_32(temp)->num_entries; 337 num_entries = A_SIZE_32(temp)->num_entries;
338 break; 338 break;
339 case LVL2_APER_SIZE: 339 case LVL2_APER_SIZE:
340 num_entries = A_SIZE_LVL2(temp)->num_entries; 340 num_entries = A_SIZE_LVL2(temp)->num_entries;
341 break; 341 break;
342 case FIXED_APER_SIZE: 342 case FIXED_APER_SIZE:
343 num_entries = A_SIZE_FIX(temp)->num_entries; 343 num_entries = A_SIZE_FIX(temp)->num_entries;
344 break; 344 break;
345 default: 345 default:
346 num_entries = 0; 346 num_entries = 0;
347 break; 347 break;
348 } 348 }
349 349
350 num_entries -= agp_memory_reserved>>PAGE_SHIFT; 350 num_entries -= agp_memory_reserved>>PAGE_SHIFT;
351 if (num_entries<0) 351 if (num_entries<0)
352 num_entries = 0; 352 num_entries = 0;
353 return num_entries; 353 return num_entries;
354 } 354 }
355 EXPORT_SYMBOL_GPL(agp_num_entries); 355 EXPORT_SYMBOL_GPL(agp_num_entries);
356 356
357 357
358 /** 358 /**
359 * agp_copy_info - copy bridge state information 359 * agp_copy_info - copy bridge state information
360 * 360 *
361 * @info: agp_kern_info pointer. The caller should insure that this pointer is valid. 361 * @info: agp_kern_info pointer. The caller should insure that this pointer is valid.
362 * 362 *
363 * This function copies information about the agp bridge device and the state of 363 * This function copies information about the agp bridge device and the state of
364 * the agp backend into an agp_kern_info pointer. 364 * the agp backend into an agp_kern_info pointer.
365 */ 365 */
366 int agp_copy_info(struct agp_bridge_data *bridge, struct agp_kern_info *info) 366 int agp_copy_info(struct agp_bridge_data *bridge, struct agp_kern_info *info)
367 { 367 {
368 memset(info, 0, sizeof(struct agp_kern_info)); 368 memset(info, 0, sizeof(struct agp_kern_info));
369 if (!bridge) { 369 if (!bridge) {
370 info->chipset = NOT_SUPPORTED; 370 info->chipset = NOT_SUPPORTED;
371 return -EIO; 371 return -EIO;
372 } 372 }
373 373
374 info->version.major = bridge->version->major; 374 info->version.major = bridge->version->major;
375 info->version.minor = bridge->version->minor; 375 info->version.minor = bridge->version->minor;
376 info->chipset = SUPPORTED; 376 info->chipset = SUPPORTED;
377 info->device = bridge->dev; 377 info->device = bridge->dev;
378 if (bridge->mode & AGPSTAT_MODE_3_0) 378 if (bridge->mode & AGPSTAT_MODE_3_0)
379 info->mode = bridge->mode & ~AGP3_RESERVED_MASK; 379 info->mode = bridge->mode & ~AGP3_RESERVED_MASK;
380 else 380 else
381 info->mode = bridge->mode & ~AGP2_RESERVED_MASK; 381 info->mode = bridge->mode & ~AGP2_RESERVED_MASK;
382 info->aper_base = bridge->gart_bus_addr; 382 info->aper_base = bridge->gart_bus_addr;
383 info->aper_size = agp_return_size(); 383 info->aper_size = agp_return_size();
384 info->max_memory = bridge->max_memory_agp; 384 info->max_memory = bridge->max_memory_agp;
385 info->current_memory = atomic_read(&bridge->current_memory_agp); 385 info->current_memory = atomic_read(&bridge->current_memory_agp);
386 info->cant_use_aperture = bridge->driver->cant_use_aperture; 386 info->cant_use_aperture = bridge->driver->cant_use_aperture;
387 info->vm_ops = bridge->vm_ops; 387 info->vm_ops = bridge->vm_ops;
388 info->page_mask = ~0UL; 388 info->page_mask = ~0UL;
389 return 0; 389 return 0;
390 } 390 }
391 EXPORT_SYMBOL(agp_copy_info); 391 EXPORT_SYMBOL(agp_copy_info);
392 392
393 /* End - Routine to copy over information structure */ 393 /* End - Routine to copy over information structure */
394 394
395 /* 395 /*
396 * Routines for handling swapping of agp_memory into the GATT - 396 * Routines for handling swapping of agp_memory into the GATT -
397 * These routines take agp_memory and insert them into the GATT. 397 * These routines take agp_memory and insert them into the GATT.
398 * They call device specific routines to actually write to the GATT. 398 * They call device specific routines to actually write to the GATT.
399 */ 399 */
400 400
401 /** 401 /**
402 * agp_bind_memory - Bind an agp_memory structure into the GATT. 402 * agp_bind_memory - Bind an agp_memory structure into the GATT.
403 * 403 *
404 * @curr: agp_memory pointer 404 * @curr: agp_memory pointer
405 * @pg_start: an offset into the graphics aperture translation table 405 * @pg_start: an offset into the graphics aperture translation table
406 * 406 *
407 * It returns -EINVAL if the pointer == NULL. 407 * It returns -EINVAL if the pointer == NULL.
408 * It returns -EBUSY if the area of the table requested is already in use. 408 * It returns -EBUSY if the area of the table requested is already in use.
409 */ 409 */
410 int agp_bind_memory(struct agp_memory *curr, off_t pg_start) 410 int agp_bind_memory(struct agp_memory *curr, off_t pg_start)
411 { 411 {
412 int ret_val; 412 int ret_val;
413 413
414 if (curr == NULL) 414 if (curr == NULL)
415 return -EINVAL; 415 return -EINVAL;
416 416
417 if (curr->is_bound) { 417 if (curr->is_bound) {
418 printk(KERN_INFO PFX "memory %p is already bound!\n", curr); 418 printk(KERN_INFO PFX "memory %p is already bound!\n", curr);
419 return -EINVAL; 419 return -EINVAL;
420 } 420 }
421 if (!curr->is_flushed) { 421 if (!curr->is_flushed) {
422 curr->bridge->driver->cache_flush(); 422 curr->bridge->driver->cache_flush();
423 curr->is_flushed = true; 423 curr->is_flushed = true;
424 } 424 }
425 ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type); 425 ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type);
426 426
427 if (ret_val != 0) 427 if (ret_val != 0)
428 return ret_val; 428 return ret_val;
429 429
430 curr->is_bound = true; 430 curr->is_bound = true;
431 curr->pg_start = pg_start; 431 curr->pg_start = pg_start;
432 return 0; 432 return 0;
433 } 433 }
434 EXPORT_SYMBOL(agp_bind_memory); 434 EXPORT_SYMBOL(agp_bind_memory);
435 435
436 436
437 /** 437 /**
438 * agp_unbind_memory - Removes an agp_memory structure from the GATT 438 * agp_unbind_memory - Removes an agp_memory structure from the GATT
439 * 439 *
440 * @curr: agp_memory pointer to be removed from the GATT. 440 * @curr: agp_memory pointer to be removed from the GATT.
441 * 441 *
442 * It returns -EINVAL if this piece of agp_memory is not currently bound to 442 * It returns -EINVAL if this piece of agp_memory is not currently bound to
443 * the graphics aperture translation table or if the agp_memory pointer == NULL 443 * the graphics aperture translation table or if the agp_memory pointer == NULL
444 */ 444 */
445 int agp_unbind_memory(struct agp_memory *curr) 445 int agp_unbind_memory(struct agp_memory *curr)
446 { 446 {
447 int ret_val; 447 int ret_val;
448 448
449 if (curr == NULL) 449 if (curr == NULL)
450 return -EINVAL; 450 return -EINVAL;
451 451
452 if (!curr->is_bound) { 452 if (!curr->is_bound) {
453 printk(KERN_INFO PFX "memory %p was not bound!\n", curr); 453 printk(KERN_INFO PFX "memory %p was not bound!\n", curr);
454 return -EINVAL; 454 return -EINVAL;
455 } 455 }
456 456
457 ret_val = curr->bridge->driver->remove_memory(curr, curr->pg_start, curr->type); 457 ret_val = curr->bridge->driver->remove_memory(curr, curr->pg_start, curr->type);
458 458
459 if (ret_val != 0) 459 if (ret_val != 0)
460 return ret_val; 460 return ret_val;
461 461
462 curr->is_bound = false; 462 curr->is_bound = false;
463 curr->pg_start = 0; 463 curr->pg_start = 0;
464 return 0; 464 return 0;
465 } 465 }
466 EXPORT_SYMBOL(agp_unbind_memory); 466 EXPORT_SYMBOL(agp_unbind_memory);
467 467
468 /* End - Routines for handling swapping of agp_memory into the GATT */ 468 /* End - Routines for handling swapping of agp_memory into the GATT */
469 469
470 470
471 /* Generic Agp routines - Start */ 471 /* Generic Agp routines - Start */
472 static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat) 472 static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
473 { 473 {
474 u32 tmp; 474 u32 tmp;
475 475
476 if (*requested_mode & AGP2_RESERVED_MASK) { 476 if (*requested_mode & AGP2_RESERVED_MASK) {
477 printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n", 477 printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n",
478 *requested_mode & AGP2_RESERVED_MASK, *requested_mode); 478 *requested_mode & AGP2_RESERVED_MASK, *requested_mode);
479 *requested_mode &= ~AGP2_RESERVED_MASK; 479 *requested_mode &= ~AGP2_RESERVED_MASK;
480 } 480 }
481 481
482 /* 482 /*
483 * Some dumb bridges are programmed to disobey the AGP2 spec. 483 * Some dumb bridges are programmed to disobey the AGP2 spec.
484 * This is likely a BIOS misprogramming rather than poweron default, or 484 * This is likely a BIOS misprogramming rather than poweron default, or
485 * it would be a lot more common. 485 * it would be a lot more common.
486 * https://bugs.freedesktop.org/show_bug.cgi?id=8816 486 * https://bugs.freedesktop.org/show_bug.cgi?id=8816
487 * AGPv2 spec 6.1.9 states: 487 * AGPv2 spec 6.1.9 states:
488 * The RATE field indicates the data transfer rates supported by this 488 * The RATE field indicates the data transfer rates supported by this
489 * device. A.G.P. devices must report all that apply. 489 * device. A.G.P. devices must report all that apply.
490 * Fix them up as best we can. 490 * Fix them up as best we can.
491 */ 491 */
492 switch (*bridge_agpstat & 7) { 492 switch (*bridge_agpstat & 7) {
493 case 4: 493 case 4:
494 *bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X); 494 *bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X);
495 printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate" 495 printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate"
496 "Fixing up support for x2 & x1\n"); 496 "Fixing up support for x2 & x1\n");
497 break; 497 break;
498 case 2: 498 case 2:
499 *bridge_agpstat |= AGPSTAT2_1X; 499 *bridge_agpstat |= AGPSTAT2_1X;
500 printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate" 500 printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate"
501 "Fixing up support for x1\n"); 501 "Fixing up support for x1\n");
502 break; 502 break;
503 default: 503 default:
504 break; 504 break;
505 } 505 }
506 506
507 /* Check the speed bits make sense. Only one should be set. */ 507 /* Check the speed bits make sense. Only one should be set. */
508 tmp = *requested_mode & 7; 508 tmp = *requested_mode & 7;
509 switch (tmp) { 509 switch (tmp) {
510 case 0: 510 case 0:
511 printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to x1 mode.\n", current->comm); 511 printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to x1 mode.\n", current->comm);
512 *requested_mode |= AGPSTAT2_1X; 512 *requested_mode |= AGPSTAT2_1X;
513 break; 513 break;
514 case 1: 514 case 1:
515 case 2: 515 case 2:
516 break; 516 break;
517 case 3: 517 case 3:
518 *requested_mode &= ~(AGPSTAT2_1X); /* rate=2 */ 518 *requested_mode &= ~(AGPSTAT2_1X); /* rate=2 */
519 break; 519 break;
520 case 4: 520 case 4:
521 break; 521 break;
522 case 5: 522 case 5:
523 case 6: 523 case 6:
524 case 7: 524 case 7:
525 *requested_mode &= ~(AGPSTAT2_1X|AGPSTAT2_2X); /* rate=4*/ 525 *requested_mode &= ~(AGPSTAT2_1X|AGPSTAT2_2X); /* rate=4*/
526 break; 526 break;
527 } 527 }
528 528
529 /* disable SBA if it's not supported */ 529 /* disable SBA if it's not supported */
530 if (!((*bridge_agpstat & AGPSTAT_SBA) && (*vga_agpstat & AGPSTAT_SBA) && (*requested_mode & AGPSTAT_SBA))) 530 if (!((*bridge_agpstat & AGPSTAT_SBA) && (*vga_agpstat & AGPSTAT_SBA) && (*requested_mode & AGPSTAT_SBA)))
531 *bridge_agpstat &= ~AGPSTAT_SBA; 531 *bridge_agpstat &= ~AGPSTAT_SBA;
532 532
533 /* Set rate */ 533 /* Set rate */
534 if (!((*bridge_agpstat & AGPSTAT2_4X) && (*vga_agpstat & AGPSTAT2_4X) && (*requested_mode & AGPSTAT2_4X))) 534 if (!((*bridge_agpstat & AGPSTAT2_4X) && (*vga_agpstat & AGPSTAT2_4X) && (*requested_mode & AGPSTAT2_4X)))
535 *bridge_agpstat &= ~AGPSTAT2_4X; 535 *bridge_agpstat &= ~AGPSTAT2_4X;
536 536
537 if (!((*bridge_agpstat & AGPSTAT2_2X) && (*vga_agpstat & AGPSTAT2_2X) && (*requested_mode & AGPSTAT2_2X))) 537 if (!((*bridge_agpstat & AGPSTAT2_2X) && (*vga_agpstat & AGPSTAT2_2X) && (*requested_mode & AGPSTAT2_2X)))
538 *bridge_agpstat &= ~AGPSTAT2_2X; 538 *bridge_agpstat &= ~AGPSTAT2_2X;
539 539
540 if (!((*bridge_agpstat & AGPSTAT2_1X) && (*vga_agpstat & AGPSTAT2_1X) && (*requested_mode & AGPSTAT2_1X))) 540 if (!((*bridge_agpstat & AGPSTAT2_1X) && (*vga_agpstat & AGPSTAT2_1X) && (*requested_mode & AGPSTAT2_1X)))
541 *bridge_agpstat &= ~AGPSTAT2_1X; 541 *bridge_agpstat &= ~AGPSTAT2_1X;
542 542
543 /* Now we know what mode it should be, clear out the unwanted bits. */ 543 /* Now we know what mode it should be, clear out the unwanted bits. */
544 if (*bridge_agpstat & AGPSTAT2_4X) 544 if (*bridge_agpstat & AGPSTAT2_4X)
545 *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_2X); /* 4X */ 545 *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_2X); /* 4X */
546 546
547 if (*bridge_agpstat & AGPSTAT2_2X) 547 if (*bridge_agpstat & AGPSTAT2_2X)
548 *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_4X); /* 2X */ 548 *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_4X); /* 2X */
549 549
550 if (*bridge_agpstat & AGPSTAT2_1X) 550 if (*bridge_agpstat & AGPSTAT2_1X)
551 *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); /* 1X */ 551 *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); /* 1X */
552 552
553 /* Apply any errata. */ 553 /* Apply any errata. */
554 if (agp_bridge->flags & AGP_ERRATA_FASTWRITES) 554 if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
555 *bridge_agpstat &= ~AGPSTAT_FW; 555 *bridge_agpstat &= ~AGPSTAT_FW;
556 556
557 if (agp_bridge->flags & AGP_ERRATA_SBA) 557 if (agp_bridge->flags & AGP_ERRATA_SBA)
558 *bridge_agpstat &= ~AGPSTAT_SBA; 558 *bridge_agpstat &= ~AGPSTAT_SBA;
559 559
560 if (agp_bridge->flags & AGP_ERRATA_1X) { 560 if (agp_bridge->flags & AGP_ERRATA_1X) {
561 *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); 561 *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
562 *bridge_agpstat |= AGPSTAT2_1X; 562 *bridge_agpstat |= AGPSTAT2_1X;
563 } 563 }
564 564
565 /* If we've dropped down to 1X, disable fast writes. */ 565 /* If we've dropped down to 1X, disable fast writes. */
566 if (*bridge_agpstat & AGPSTAT2_1X) 566 if (*bridge_agpstat & AGPSTAT2_1X)
567 *bridge_agpstat &= ~AGPSTAT_FW; 567 *bridge_agpstat &= ~AGPSTAT_FW;
568 } 568 }
569 569
570 /* 570 /*
571 * requested_mode = Mode requested by (typically) X. 571 * requested_mode = Mode requested by (typically) X.
572 * bridge_agpstat = PCI_AGP_STATUS from agp bridge. 572 * bridge_agpstat = PCI_AGP_STATUS from agp bridge.
573 * vga_agpstat = PCI_AGP_STATUS from graphic card. 573 * vga_agpstat = PCI_AGP_STATUS from graphic card.
574 */ 574 */
575 static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat) 575 static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
576 { 576 {
577 u32 origbridge=*bridge_agpstat, origvga=*vga_agpstat; 577 u32 origbridge=*bridge_agpstat, origvga=*vga_agpstat;
578 u32 tmp; 578 u32 tmp;
579 579
580 if (*requested_mode & AGP3_RESERVED_MASK) { 580 if (*requested_mode & AGP3_RESERVED_MASK) {
581 printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n", 581 printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n",
582 *requested_mode & AGP3_RESERVED_MASK, *requested_mode); 582 *requested_mode & AGP3_RESERVED_MASK, *requested_mode);
583 *requested_mode &= ~AGP3_RESERVED_MASK; 583 *requested_mode &= ~AGP3_RESERVED_MASK;
584 } 584 }
585 585
586 /* Check the speed bits make sense. */ 586 /* Check the speed bits make sense. */
587 tmp = *requested_mode & 7; 587 tmp = *requested_mode & 7;
588 if (tmp == 0) { 588 if (tmp == 0) {
589 printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to AGP3 x4 mode.\n", current->comm); 589 printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to AGP3 x4 mode.\n", current->comm);
590 *requested_mode |= AGPSTAT3_4X; 590 *requested_mode |= AGPSTAT3_4X;
591 } 591 }
592 if (tmp >= 3) { 592 if (tmp >= 3) {
593 printk(KERN_INFO PFX "%s tried to set rate=x%d. Setting to AGP3 x8 mode.\n", current->comm, tmp * 4); 593 printk(KERN_INFO PFX "%s tried to set rate=x%d. Setting to AGP3 x8 mode.\n", current->comm, tmp * 4);
594 *requested_mode = (*requested_mode & ~7) | AGPSTAT3_8X; 594 *requested_mode = (*requested_mode & ~7) | AGPSTAT3_8X;
595 } 595 }
596 596
597 /* ARQSZ - Set the value to the maximum one. 597 /* ARQSZ - Set the value to the maximum one.
598 * Don't allow the mode register to override values. */ 598 * Don't allow the mode register to override values. */
599 *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_ARQSZ) | 599 *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_ARQSZ) |
600 max_t(u32,(*bridge_agpstat & AGPSTAT_ARQSZ),(*vga_agpstat & AGPSTAT_ARQSZ))); 600 max_t(u32,(*bridge_agpstat & AGPSTAT_ARQSZ),(*vga_agpstat & AGPSTAT_ARQSZ)));
601 601
602 /* Calibration cycle. 602 /* Calibration cycle.
603 * Don't allow the mode register to override values. */ 603 * Don't allow the mode register to override values. */
604 *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_CAL_MASK) | 604 *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_CAL_MASK) |
605 min_t(u32,(*bridge_agpstat & AGPSTAT_CAL_MASK),(*vga_agpstat & AGPSTAT_CAL_MASK))); 605 min_t(u32,(*bridge_agpstat & AGPSTAT_CAL_MASK),(*vga_agpstat & AGPSTAT_CAL_MASK)));
606 606
607 /* SBA *must* be supported for AGP v3 */ 607 /* SBA *must* be supported for AGP v3 */
608 *bridge_agpstat |= AGPSTAT_SBA; 608 *bridge_agpstat |= AGPSTAT_SBA;
609 609
610 /* 610 /*
611 * Set speed. 611 * Set speed.
612 * Check for invalid speeds. This can happen when applications 612 * Check for invalid speeds. This can happen when applications
613 * written before the AGP 3.0 standard pass AGP2.x modes to AGP3 hardware 613 * written before the AGP 3.0 standard pass AGP2.x modes to AGP3 hardware
614 */ 614 */
615 if (*requested_mode & AGPSTAT_MODE_3_0) { 615 if (*requested_mode & AGPSTAT_MODE_3_0) {
616 /* 616 /*
617 * Caller hasn't a clue what it is doing. Bridge is in 3.0 mode, 617 * Caller hasn't a clue what it is doing. Bridge is in 3.0 mode,
618 * have been passed a 3.0 mode, but with 2.x speed bits set. 618 * have been passed a 3.0 mode, but with 2.x speed bits set.
619 * AGP2.x 4x -> AGP3.0 4x. 619 * AGP2.x 4x -> AGP3.0 4x.
620 */ 620 */
621 if (*requested_mode & AGPSTAT2_4X) { 621 if (*requested_mode & AGPSTAT2_4X) {
622 printk(KERN_INFO PFX "%s passes broken AGP3 flags (%x). Fixed.\n", 622 printk(KERN_INFO PFX "%s passes broken AGP3 flags (%x). Fixed.\n",
623 current->comm, *requested_mode); 623 current->comm, *requested_mode);
624 *requested_mode &= ~AGPSTAT2_4X; 624 *requested_mode &= ~AGPSTAT2_4X;
625 *requested_mode |= AGPSTAT3_4X; 625 *requested_mode |= AGPSTAT3_4X;
626 } 626 }
627 } else { 627 } else {
628 /* 628 /*
629 * The caller doesn't know what they are doing. We are in 3.0 mode, 629 * The caller doesn't know what they are doing. We are in 3.0 mode,
630 * but have been passed an AGP 2.x mode. 630 * but have been passed an AGP 2.x mode.
631 * Convert AGP 1x,2x,4x -> AGP 3.0 4x. 631 * Convert AGP 1x,2x,4x -> AGP 3.0 4x.
632 */ 632 */
633 printk(KERN_INFO PFX "%s passes broken AGP2 flags (%x) in AGP3 mode. Fixed.\n", 633 printk(KERN_INFO PFX "%s passes broken AGP2 flags (%x) in AGP3 mode. Fixed.\n",
634 current->comm, *requested_mode); 634 current->comm, *requested_mode);
635 *requested_mode &= ~(AGPSTAT2_4X | AGPSTAT2_2X | AGPSTAT2_1X); 635 *requested_mode &= ~(AGPSTAT2_4X | AGPSTAT2_2X | AGPSTAT2_1X);
636 *requested_mode |= AGPSTAT3_4X; 636 *requested_mode |= AGPSTAT3_4X;
637 } 637 }
638 638
639 if (*requested_mode & AGPSTAT3_8X) { 639 if (*requested_mode & AGPSTAT3_8X) {
640 if (!(*bridge_agpstat & AGPSTAT3_8X)) { 640 if (!(*bridge_agpstat & AGPSTAT3_8X)) {
641 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); 641 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
642 *bridge_agpstat |= AGPSTAT3_4X; 642 *bridge_agpstat |= AGPSTAT3_4X;
643 printk(KERN_INFO PFX "%s requested AGPx8 but bridge not capable.\n", current->comm); 643 printk(KERN_INFO PFX "%s requested AGPx8 but bridge not capable.\n", current->comm);
644 return; 644 return;
645 } 645 }
646 if (!(*vga_agpstat & AGPSTAT3_8X)) { 646 if (!(*vga_agpstat & AGPSTAT3_8X)) {
647 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); 647 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
648 *bridge_agpstat |= AGPSTAT3_4X; 648 *bridge_agpstat |= AGPSTAT3_4X;
649 printk(KERN_INFO PFX "%s requested AGPx8 but graphic card not capable.\n", current->comm); 649 printk(KERN_INFO PFX "%s requested AGPx8 but graphic card not capable.\n", current->comm);
650 return; 650 return;
651 } 651 }
652 /* All set, bridge & device can do AGP x8*/ 652 /* All set, bridge & device can do AGP x8*/
653 *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); 653 *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
654 goto done; 654 goto done;
655 655
656 } else if (*requested_mode & AGPSTAT3_4X) { 656 } else if (*requested_mode & AGPSTAT3_4X) {
657 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); 657 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
658 *bridge_agpstat |= AGPSTAT3_4X; 658 *bridge_agpstat |= AGPSTAT3_4X;
659 goto done; 659 goto done;
660 660
661 } else { 661 } else {
662 662
663 /* 663 /*
664 * If we didn't specify an AGP mode, we see if both 664 * If we didn't specify an AGP mode, we see if both
665 * the graphics card, and the bridge can do x8, and use if so. 665 * the graphics card, and the bridge can do x8, and use if so.
666 * If not, we fall back to x4 mode. 666 * If not, we fall back to x4 mode.
667 */ 667 */
668 if ((*bridge_agpstat & AGPSTAT3_8X) && (*vga_agpstat & AGPSTAT3_8X)) { 668 if ((*bridge_agpstat & AGPSTAT3_8X) && (*vga_agpstat & AGPSTAT3_8X)) {
669 printk(KERN_INFO PFX "No AGP mode specified. Setting to highest mode " 669 printk(KERN_INFO PFX "No AGP mode specified. Setting to highest mode "
670 "supported by bridge & card (x8).\n"); 670 "supported by bridge & card (x8).\n");
671 *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); 671 *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
672 *vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); 672 *vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
673 } else { 673 } else {
674 printk(KERN_INFO PFX "Fell back to AGPx4 mode because"); 674 printk(KERN_INFO PFX "Fell back to AGPx4 mode because");
675 if (!(*bridge_agpstat & AGPSTAT3_8X)) { 675 if (!(*bridge_agpstat & AGPSTAT3_8X)) {
676 printk(KERN_INFO PFX "bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n", 676 printk(KERN_INFO PFX "bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n",
677 *bridge_agpstat, origbridge); 677 *bridge_agpstat, origbridge);
678 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); 678 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
679 *bridge_agpstat |= AGPSTAT3_4X; 679 *bridge_agpstat |= AGPSTAT3_4X;
680 } 680 }
681 if (!(*vga_agpstat & AGPSTAT3_8X)) { 681 if (!(*vga_agpstat & AGPSTAT3_8X)) {
682 printk(KERN_INFO PFX "graphics card couldn't do x8. vga_agpstat:%x (orig=%x)\n", 682 printk(KERN_INFO PFX "graphics card couldn't do x8. vga_agpstat:%x (orig=%x)\n",
683 *vga_agpstat, origvga); 683 *vga_agpstat, origvga);
684 *vga_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); 684 *vga_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
685 *vga_agpstat |= AGPSTAT3_4X; 685 *vga_agpstat |= AGPSTAT3_4X;
686 } 686 }
687 } 687 }
688 } 688 }
689 689
690 done: 690 done:
691 /* Apply any errata. */ 691 /* Apply any errata. */
692 if (agp_bridge->flags & AGP_ERRATA_FASTWRITES) 692 if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
693 *bridge_agpstat &= ~AGPSTAT_FW; 693 *bridge_agpstat &= ~AGPSTAT_FW;
694 694
695 if (agp_bridge->flags & AGP_ERRATA_SBA) 695 if (agp_bridge->flags & AGP_ERRATA_SBA)
696 *bridge_agpstat &= ~AGPSTAT_SBA; 696 *bridge_agpstat &= ~AGPSTAT_SBA;
697 697
698 if (agp_bridge->flags & AGP_ERRATA_1X) { 698 if (agp_bridge->flags & AGP_ERRATA_1X) {
699 *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); 699 *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
700 *bridge_agpstat |= AGPSTAT2_1X; 700 *bridge_agpstat |= AGPSTAT2_1X;
701 } 701 }
702 } 702 }
703 703
704 704
705 /** 705 /**
706 * agp_collect_device_status - determine correct agp_cmd from various agp_stat's 706 * agp_collect_device_status - determine correct agp_cmd from various agp_stat's
707 * @bridge: an agp_bridge_data struct allocated for the AGP host bridge. 707 * @bridge: an agp_bridge_data struct allocated for the AGP host bridge.
708 * @requested_mode: requested agp_stat from userspace (Typically from X) 708 * @requested_mode: requested agp_stat from userspace (Typically from X)
709 * @bridge_agpstat: current agp_stat from AGP bridge. 709 * @bridge_agpstat: current agp_stat from AGP bridge.
710 * 710 *
711 * This function will hunt for an AGP graphics card, and try to match 711 * This function will hunt for an AGP graphics card, and try to match
712 * the requested mode to the capabilities of both the bridge and the card. 712 * the requested mode to the capabilities of both the bridge and the card.
713 */ 713 */
714 u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 requested_mode, u32 bridge_agpstat) 714 u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 requested_mode, u32 bridge_agpstat)
715 { 715 {
716 struct pci_dev *device = NULL; 716 struct pci_dev *device = NULL;
717 u32 vga_agpstat; 717 u32 vga_agpstat;
718 u8 cap_ptr; 718 u8 cap_ptr;
719 719
720 for (;;) { 720 for (;;) {
721 device = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, device); 721 device = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, device);
722 if (!device) { 722 if (!device) {
723 printk(KERN_INFO PFX "Couldn't find an AGP VGA controller.\n"); 723 printk(KERN_INFO PFX "Couldn't find an AGP VGA controller.\n");
724 return 0; 724 return 0;
725 } 725 }
726 cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP); 726 cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
727 if (cap_ptr) 727 if (cap_ptr)
728 break; 728 break;
729 } 729 }
730 730
731 /* 731 /*
732 * Ok, here we have a AGP device. Disable impossible 732 * Ok, here we have a AGP device. Disable impossible
733 * settings, and adjust the readqueue to the minimum. 733 * settings, and adjust the readqueue to the minimum.
734 */ 734 */
735 pci_read_config_dword(device, cap_ptr+PCI_AGP_STATUS, &vga_agpstat); 735 pci_read_config_dword(device, cap_ptr+PCI_AGP_STATUS, &vga_agpstat);
736 736
737 /* adjust RQ depth */ 737 /* adjust RQ depth */
738 bridge_agpstat = ((bridge_agpstat & ~AGPSTAT_RQ_DEPTH) | 738 bridge_agpstat = ((bridge_agpstat & ~AGPSTAT_RQ_DEPTH) |
739 min_t(u32, (requested_mode & AGPSTAT_RQ_DEPTH), 739 min_t(u32, (requested_mode & AGPSTAT_RQ_DEPTH),
740 min_t(u32, (bridge_agpstat & AGPSTAT_RQ_DEPTH), (vga_agpstat & AGPSTAT_RQ_DEPTH)))); 740 min_t(u32, (bridge_agpstat & AGPSTAT_RQ_DEPTH), (vga_agpstat & AGPSTAT_RQ_DEPTH))));
741 741
742 /* disable FW if it's not supported */ 742 /* disable FW if it's not supported */
743 if (!((bridge_agpstat & AGPSTAT_FW) && 743 if (!((bridge_agpstat & AGPSTAT_FW) &&
744 (vga_agpstat & AGPSTAT_FW) && 744 (vga_agpstat & AGPSTAT_FW) &&
745 (requested_mode & AGPSTAT_FW))) 745 (requested_mode & AGPSTAT_FW)))
746 bridge_agpstat &= ~AGPSTAT_FW; 746 bridge_agpstat &= ~AGPSTAT_FW;
747 747
748 /* Check to see if we are operating in 3.0 mode */ 748 /* Check to see if we are operating in 3.0 mode */
749 if (agp_bridge->mode & AGPSTAT_MODE_3_0) 749 if (agp_bridge->mode & AGPSTAT_MODE_3_0)
750 agp_v3_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat); 750 agp_v3_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
751 else 751 else
752 agp_v2_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat); 752 agp_v2_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
753 753
754 pci_dev_put(device); 754 pci_dev_put(device);
755 return bridge_agpstat; 755 return bridge_agpstat;
756 } 756 }
757 EXPORT_SYMBOL(agp_collect_device_status); 757 EXPORT_SYMBOL(agp_collect_device_status);
758 758
759 759
760 void agp_device_command(u32 bridge_agpstat, bool agp_v3) 760 void agp_device_command(u32 bridge_agpstat, bool agp_v3)
761 { 761 {
762 struct pci_dev *device = NULL; 762 struct pci_dev *device = NULL;
763 int mode; 763 int mode;
764 764
765 mode = bridge_agpstat & 0x7; 765 mode = bridge_agpstat & 0x7;
766 if (agp_v3) 766 if (agp_v3)
767 mode *= 4; 767 mode *= 4;
768 768
769 for_each_pci_dev(device) { 769 for_each_pci_dev(device) {
770 u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP); 770 u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP);
771 if (!agp) 771 if (!agp)
772 continue; 772 continue;
773 773
774 printk(KERN_INFO PFX "Putting AGP V%d device at %s into %dx mode\n", 774 dev_info(&device->dev, "putting AGP V%d device into %dx mode\n",
775 agp_v3 ? 3 : 2, pci_name(device), mode); 775 agp_v3 ? 3 : 2, mode);
776 pci_write_config_dword(device, agp + PCI_AGP_COMMAND, bridge_agpstat); 776 pci_write_config_dword(device, agp + PCI_AGP_COMMAND, bridge_agpstat);
777 } 777 }
778 } 778 }
779 EXPORT_SYMBOL(agp_device_command); 779 EXPORT_SYMBOL(agp_device_command);
780 780
781 781
782 void get_agp_version(struct agp_bridge_data *bridge) 782 void get_agp_version(struct agp_bridge_data *bridge)
783 { 783 {
784 u32 ncapid; 784 u32 ncapid;
785 785
786 /* Exit early if already set by errata workarounds. */ 786 /* Exit early if already set by errata workarounds. */
787 if (bridge->major_version != 0) 787 if (bridge->major_version != 0)
788 return; 788 return;
789 789
790 pci_read_config_dword(bridge->dev, bridge->capndx, &ncapid); 790 pci_read_config_dword(bridge->dev, bridge->capndx, &ncapid);
791 bridge->major_version = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf; 791 bridge->major_version = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf;
792 bridge->minor_version = (ncapid >> AGP_MINOR_VERSION_SHIFT) & 0xf; 792 bridge->minor_version = (ncapid >> AGP_MINOR_VERSION_SHIFT) & 0xf;
793 } 793 }
794 EXPORT_SYMBOL(get_agp_version); 794 EXPORT_SYMBOL(get_agp_version);
795 795
796 796
797 void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode) 797 void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode)
798 { 798 {
799 u32 bridge_agpstat, temp; 799 u32 bridge_agpstat, temp;
800 800
801 get_agp_version(agp_bridge); 801 get_agp_version(agp_bridge);
802 802
803 printk(KERN_INFO PFX "Found an AGP %d.%d compliant device at %s.\n", 803 dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n",
804 agp_bridge->major_version, 804 agp_bridge->major_version, agp_bridge->minor_version);
805 agp_bridge->minor_version,
806 pci_name(agp_bridge->dev));
807 805
808 pci_read_config_dword(agp_bridge->dev, 806 pci_read_config_dword(agp_bridge->dev,
809 agp_bridge->capndx + PCI_AGP_STATUS, &bridge_agpstat); 807 agp_bridge->capndx + PCI_AGP_STATUS, &bridge_agpstat);
810 808
811 bridge_agpstat = agp_collect_device_status(agp_bridge, requested_mode, bridge_agpstat); 809 bridge_agpstat = agp_collect_device_status(agp_bridge, requested_mode, bridge_agpstat);
812 if (bridge_agpstat == 0) 810 if (bridge_agpstat == 0)
813 /* Something bad happened. FIXME: Return error code? */ 811 /* Something bad happened. FIXME: Return error code? */
814 return; 812 return;
815 813
816 bridge_agpstat |= AGPSTAT_AGP_ENABLE; 814 bridge_agpstat |= AGPSTAT_AGP_ENABLE;
817 815
818 /* Do AGP version specific frobbing. */ 816 /* Do AGP version specific frobbing. */
819 if (bridge->major_version >= 3) { 817 if (bridge->major_version >= 3) {
820 if (bridge->mode & AGPSTAT_MODE_3_0) { 818 if (bridge->mode & AGPSTAT_MODE_3_0) {
821 /* If we have 3.5, we can do the isoch stuff. */ 819 /* If we have 3.5, we can do the isoch stuff. */
822 if (bridge->minor_version >= 5) 820 if (bridge->minor_version >= 5)
823 agp_3_5_enable(bridge); 821 agp_3_5_enable(bridge);
824 agp_device_command(bridge_agpstat, true); 822 agp_device_command(bridge_agpstat, true);
825 return; 823 return;
826 } else { 824 } else {
827 /* Disable calibration cycle in RX91<1> when not in AGP3.0 mode of operation.*/ 825 /* Disable calibration cycle in RX91<1> when not in AGP3.0 mode of operation.*/
828 bridge_agpstat &= ~(7<<10) ; 826 bridge_agpstat &= ~(7<<10) ;
829 pci_read_config_dword(bridge->dev, 827 pci_read_config_dword(bridge->dev,
830 bridge->capndx+AGPCTRL, &temp); 828 bridge->capndx+AGPCTRL, &temp);
831 temp |= (1<<9); 829 temp |= (1<<9);
832 pci_write_config_dword(bridge->dev, 830 pci_write_config_dword(bridge->dev,
833 bridge->capndx+AGPCTRL, temp); 831 bridge->capndx+AGPCTRL, temp);
834 832
835 printk(KERN_INFO PFX "Device is in legacy mode," 833 dev_info(&bridge->dev->dev, "bridge is in legacy mode, falling back to 2.x\n");
836 " falling back to 2.x\n");
837 } 834 }
838 } 835 }
839 836
840 /* AGP v<3 */ 837 /* AGP v<3 */
841 agp_device_command(bridge_agpstat, false); 838 agp_device_command(bridge_agpstat, false);
842 } 839 }
843 EXPORT_SYMBOL(agp_generic_enable); 840 EXPORT_SYMBOL(agp_generic_enable);
844 841
845 842
846 int agp_generic_create_gatt_table(struct agp_bridge_data *bridge) 843 int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
847 { 844 {
848 char *table; 845 char *table;
849 char *table_end; 846 char *table_end;
850 int size; 847 int size;
851 int page_order; 848 int page_order;
852 int num_entries; 849 int num_entries;
853 int i; 850 int i;
854 void *temp; 851 void *temp;
855 struct page *page; 852 struct page *page;
856 853
857 /* The generic routines can't handle 2 level gatt's */ 854 /* The generic routines can't handle 2 level gatt's */
858 if (bridge->driver->size_type == LVL2_APER_SIZE) 855 if (bridge->driver->size_type == LVL2_APER_SIZE)
859 return -EINVAL; 856 return -EINVAL;
860 857
861 table = NULL; 858 table = NULL;
862 i = bridge->aperture_size_idx; 859 i = bridge->aperture_size_idx;
863 temp = bridge->current_size; 860 temp = bridge->current_size;
864 size = page_order = num_entries = 0; 861 size = page_order = num_entries = 0;
865 862
866 if (bridge->driver->size_type != FIXED_APER_SIZE) { 863 if (bridge->driver->size_type != FIXED_APER_SIZE) {
867 do { 864 do {
868 switch (bridge->driver->size_type) { 865 switch (bridge->driver->size_type) {
869 case U8_APER_SIZE: 866 case U8_APER_SIZE:
870 size = A_SIZE_8(temp)->size; 867 size = A_SIZE_8(temp)->size;
871 page_order = 868 page_order =
872 A_SIZE_8(temp)->page_order; 869 A_SIZE_8(temp)->page_order;
873 num_entries = 870 num_entries =
874 A_SIZE_8(temp)->num_entries; 871 A_SIZE_8(temp)->num_entries;
875 break; 872 break;
876 case U16_APER_SIZE: 873 case U16_APER_SIZE:
877 size = A_SIZE_16(temp)->size; 874 size = A_SIZE_16(temp)->size;
878 page_order = A_SIZE_16(temp)->page_order; 875 page_order = A_SIZE_16(temp)->page_order;
879 num_entries = A_SIZE_16(temp)->num_entries; 876 num_entries = A_SIZE_16(temp)->num_entries;
880 break; 877 break;
881 case U32_APER_SIZE: 878 case U32_APER_SIZE:
882 size = A_SIZE_32(temp)->size; 879 size = A_SIZE_32(temp)->size;
883 page_order = A_SIZE_32(temp)->page_order; 880 page_order = A_SIZE_32(temp)->page_order;
884 num_entries = A_SIZE_32(temp)->num_entries; 881 num_entries = A_SIZE_32(temp)->num_entries;
885 break; 882 break;
886 /* This case will never really happen. */ 883 /* This case will never really happen. */
887 case FIXED_APER_SIZE: 884 case FIXED_APER_SIZE:
888 case LVL2_APER_SIZE: 885 case LVL2_APER_SIZE:
889 default: 886 default:
890 size = page_order = num_entries = 0; 887 size = page_order = num_entries = 0;
891 break; 888 break;
892 } 889 }
893 890
894 table = alloc_gatt_pages(page_order); 891 table = alloc_gatt_pages(page_order);
895 892
896 if (table == NULL) { 893 if (table == NULL) {
897 i++; 894 i++;
898 switch (bridge->driver->size_type) { 895 switch (bridge->driver->size_type) {
899 case U8_APER_SIZE: 896 case U8_APER_SIZE:
900 bridge->current_size = A_IDX8(bridge); 897 bridge->current_size = A_IDX8(bridge);
901 break; 898 break;
902 case U16_APER_SIZE: 899 case U16_APER_SIZE:
903 bridge->current_size = A_IDX16(bridge); 900 bridge->current_size = A_IDX16(bridge);
904 break; 901 break;
905 case U32_APER_SIZE: 902 case U32_APER_SIZE:
906 bridge->current_size = A_IDX32(bridge); 903 bridge->current_size = A_IDX32(bridge);
907 break; 904 break;
908 /* These cases will never really happen. */ 905 /* These cases will never really happen. */
909 case FIXED_APER_SIZE: 906 case FIXED_APER_SIZE:
910 case LVL2_APER_SIZE: 907 case LVL2_APER_SIZE:
911 default: 908 default:
912 break; 909 break;
913 } 910 }
914 temp = bridge->current_size; 911 temp = bridge->current_size;
915 } else { 912 } else {
916 bridge->aperture_size_idx = i; 913 bridge->aperture_size_idx = i;
917 } 914 }
918 } while (!table && (i < bridge->driver->num_aperture_sizes)); 915 } while (!table && (i < bridge->driver->num_aperture_sizes));
919 } else { 916 } else {
920 size = ((struct aper_size_info_fixed *) temp)->size; 917 size = ((struct aper_size_info_fixed *) temp)->size;
921 page_order = ((struct aper_size_info_fixed *) temp)->page_order; 918 page_order = ((struct aper_size_info_fixed *) temp)->page_order;
922 num_entries = ((struct aper_size_info_fixed *) temp)->num_entries; 919 num_entries = ((struct aper_size_info_fixed *) temp)->num_entries;
923 table = alloc_gatt_pages(page_order); 920 table = alloc_gatt_pages(page_order);
924 } 921 }
925 922
926 if (table == NULL) 923 if (table == NULL)
927 return -ENOMEM; 924 return -ENOMEM;
928 925
929 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); 926 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
930 927
931 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) 928 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
932 SetPageReserved(page); 929 SetPageReserved(page);
933 930
934 bridge->gatt_table_real = (u32 *) table; 931 bridge->gatt_table_real = (u32 *) table;
935 agp_gatt_table = (void *)table; 932 agp_gatt_table = (void *)table;
936 933
937 bridge->driver->cache_flush(); 934 bridge->driver->cache_flush();
938 #ifdef CONFIG_X86 935 #ifdef CONFIG_X86
939 set_memory_uc((unsigned long)table, 1 << page_order); 936 set_memory_uc((unsigned long)table, 1 << page_order);
940 bridge->gatt_table = (void *)table; 937 bridge->gatt_table = (void *)table;
941 #else 938 #else
942 bridge->gatt_table = ioremap_nocache(virt_to_gart(table), 939 bridge->gatt_table = ioremap_nocache(virt_to_gart(table),
943 (PAGE_SIZE * (1 << page_order))); 940 (PAGE_SIZE * (1 << page_order)));
944 bridge->driver->cache_flush(); 941 bridge->driver->cache_flush();
945 #endif 942 #endif
946 943
947 if (bridge->gatt_table == NULL) { 944 if (bridge->gatt_table == NULL) {
948 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) 945 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
949 ClearPageReserved(page); 946 ClearPageReserved(page);
950 947
951 free_gatt_pages(table, page_order); 948 free_gatt_pages(table, page_order);
952 949
953 return -ENOMEM; 950 return -ENOMEM;
954 } 951 }
955 bridge->gatt_bus_addr = virt_to_gart(bridge->gatt_table_real); 952 bridge->gatt_bus_addr = virt_to_gart(bridge->gatt_table_real);
956 953
957 /* AK: bogus, should encode addresses > 4GB */ 954 /* AK: bogus, should encode addresses > 4GB */
958 for (i = 0; i < num_entries; i++) { 955 for (i = 0; i < num_entries; i++) {
959 writel(bridge->scratch_page, bridge->gatt_table+i); 956 writel(bridge->scratch_page, bridge->gatt_table+i);
960 readl(bridge->gatt_table+i); /* PCI Posting. */ 957 readl(bridge->gatt_table+i); /* PCI Posting. */
961 } 958 }
962 959
963 return 0; 960 return 0;
964 } 961 }
965 EXPORT_SYMBOL(agp_generic_create_gatt_table); 962 EXPORT_SYMBOL(agp_generic_create_gatt_table);
966 963
967 int agp_generic_free_gatt_table(struct agp_bridge_data *bridge) 964 int agp_generic_free_gatt_table(struct agp_bridge_data *bridge)
968 { 965 {
969 int page_order; 966 int page_order;
970 char *table, *table_end; 967 char *table, *table_end;
971 void *temp; 968 void *temp;
972 struct page *page; 969 struct page *page;
973 970
974 temp = bridge->current_size; 971 temp = bridge->current_size;
975 972
976 switch (bridge->driver->size_type) { 973 switch (bridge->driver->size_type) {
977 case U8_APER_SIZE: 974 case U8_APER_SIZE:
978 page_order = A_SIZE_8(temp)->page_order; 975 page_order = A_SIZE_8(temp)->page_order;
979 break; 976 break;
980 case U16_APER_SIZE: 977 case U16_APER_SIZE:
981 page_order = A_SIZE_16(temp)->page_order; 978 page_order = A_SIZE_16(temp)->page_order;
982 break; 979 break;
983 case U32_APER_SIZE: 980 case U32_APER_SIZE:
984 page_order = A_SIZE_32(temp)->page_order; 981 page_order = A_SIZE_32(temp)->page_order;
985 break; 982 break;
986 case FIXED_APER_SIZE: 983 case FIXED_APER_SIZE:
987 page_order = A_SIZE_FIX(temp)->page_order; 984 page_order = A_SIZE_FIX(temp)->page_order;
988 break; 985 break;
989 case LVL2_APER_SIZE: 986 case LVL2_APER_SIZE:
990 /* The generic routines can't deal with 2 level gatt's */ 987 /* The generic routines can't deal with 2 level gatt's */
991 return -EINVAL; 988 return -EINVAL;
992 break; 989 break;
993 default: 990 default:
994 page_order = 0; 991 page_order = 0;
995 break; 992 break;
996 } 993 }
997 994
998 /* Do not worry about freeing memory, because if this is 995 /* Do not worry about freeing memory, because if this is
999 * called, then all agp memory is deallocated and removed 996 * called, then all agp memory is deallocated and removed
1000 * from the table. */ 997 * from the table. */
1001 998
1002 #ifdef CONFIG_X86 999 #ifdef CONFIG_X86
1003 set_memory_wb((unsigned long)bridge->gatt_table, 1 << page_order); 1000 set_memory_wb((unsigned long)bridge->gatt_table, 1 << page_order);
1004 #else 1001 #else
1005 iounmap(bridge->gatt_table); 1002 iounmap(bridge->gatt_table);
1006 #endif 1003 #endif
1007 table = (char *) bridge->gatt_table_real; 1004 table = (char *) bridge->gatt_table_real;
1008 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); 1005 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
1009 1006
1010 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) 1007 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
1011 ClearPageReserved(page); 1008 ClearPageReserved(page);
1012 1009
1013 free_gatt_pages(bridge->gatt_table_real, page_order); 1010 free_gatt_pages(bridge->gatt_table_real, page_order);
1014 1011
1015 agp_gatt_table = NULL; 1012 agp_gatt_table = NULL;
1016 bridge->gatt_table = NULL; 1013 bridge->gatt_table = NULL;
1017 bridge->gatt_table_real = NULL; 1014 bridge->gatt_table_real = NULL;
1018 bridge->gatt_bus_addr = 0; 1015 bridge->gatt_bus_addr = 0;
1019 1016
1020 return 0; 1017 return 0;
1021 } 1018 }
1022 EXPORT_SYMBOL(agp_generic_free_gatt_table); 1019 EXPORT_SYMBOL(agp_generic_free_gatt_table);
1023 1020
1024 1021
1025 int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type) 1022 int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
1026 { 1023 {
1027 int num_entries; 1024 int num_entries;
1028 size_t i; 1025 size_t i;
1029 off_t j; 1026 off_t j;
1030 void *temp; 1027 void *temp;
1031 struct agp_bridge_data *bridge; 1028 struct agp_bridge_data *bridge;
1032 int mask_type; 1029 int mask_type;
1033 1030
1034 bridge = mem->bridge; 1031 bridge = mem->bridge;
1035 if (!bridge) 1032 if (!bridge)
1036 return -EINVAL; 1033 return -EINVAL;
1037 1034
1038 if (mem->page_count == 0) 1035 if (mem->page_count == 0)
1039 return 0; 1036 return 0;
1040 1037
1041 temp = bridge->current_size; 1038 temp = bridge->current_size;
1042 1039
1043 switch (bridge->driver->size_type) { 1040 switch (bridge->driver->size_type) {
1044 case U8_APER_SIZE: 1041 case U8_APER_SIZE:
1045 num_entries = A_SIZE_8(temp)->num_entries; 1042 num_entries = A_SIZE_8(temp)->num_entries;
1046 break; 1043 break;
1047 case U16_APER_SIZE: 1044 case U16_APER_SIZE:
1048 num_entries = A_SIZE_16(temp)->num_entries; 1045 num_entries = A_SIZE_16(temp)->num_entries;
1049 break; 1046 break;
1050 case U32_APER_SIZE: 1047 case U32_APER_SIZE:
1051 num_entries = A_SIZE_32(temp)->num_entries; 1048 num_entries = A_SIZE_32(temp)->num_entries;
1052 break; 1049 break;
1053 case FIXED_APER_SIZE: 1050 case FIXED_APER_SIZE:
1054 num_entries = A_SIZE_FIX(temp)->num_entries; 1051 num_entries = A_SIZE_FIX(temp)->num_entries;
1055 break; 1052 break;
1056 case LVL2_APER_SIZE: 1053 case LVL2_APER_SIZE:
1057 /* The generic routines can't deal with 2 level gatt's */ 1054 /* The generic routines can't deal with 2 level gatt's */
1058 return -EINVAL; 1055 return -EINVAL;
1059 break; 1056 break;
1060 default: 1057 default:
1061 num_entries = 0; 1058 num_entries = 0;
1062 break; 1059 break;
1063 } 1060 }
1064 1061
1065 num_entries -= agp_memory_reserved/PAGE_SIZE; 1062 num_entries -= agp_memory_reserved/PAGE_SIZE;
1066 if (num_entries < 0) num_entries = 0; 1063 if (num_entries < 0) num_entries = 0;
1067 1064
1068 if (type != mem->type) 1065 if (type != mem->type)
1069 return -EINVAL; 1066 return -EINVAL;
1070 1067
1071 mask_type = bridge->driver->agp_type_to_mask_type(bridge, type); 1068 mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
1072 if (mask_type != 0) { 1069 if (mask_type != 0) {
1073 /* The generic routines know nothing of memory types */ 1070 /* The generic routines know nothing of memory types */
1074 return -EINVAL; 1071 return -EINVAL;
1075 } 1072 }
1076 1073
1077 /* AK: could wrap */ 1074 /* AK: could wrap */
1078 if ((pg_start + mem->page_count) > num_entries) 1075 if ((pg_start + mem->page_count) > num_entries)
1079 return -EINVAL; 1076 return -EINVAL;
1080 1077
1081 j = pg_start; 1078 j = pg_start;
1082 1079
1083 while (j < (pg_start + mem->page_count)) { 1080 while (j < (pg_start + mem->page_count)) {
1084 if (!PGE_EMPTY(bridge, readl(bridge->gatt_table+j))) 1081 if (!PGE_EMPTY(bridge, readl(bridge->gatt_table+j)))
1085 return -EBUSY; 1082 return -EBUSY;
1086 j++; 1083 j++;
1087 } 1084 }
1088 1085
1089 if (!mem->is_flushed) { 1086 if (!mem->is_flushed) {
1090 bridge->driver->cache_flush(); 1087 bridge->driver->cache_flush();
1091 mem->is_flushed = true; 1088 mem->is_flushed = true;
1092 } 1089 }
1093 1090
1094 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 1091 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
1095 writel(bridge->driver->mask_memory(bridge, mem->memory[i], mask_type), 1092 writel(bridge->driver->mask_memory(bridge, mem->memory[i], mask_type),
1096 bridge->gatt_table+j); 1093 bridge->gatt_table+j);
1097 } 1094 }
1098 readl(bridge->gatt_table+j-1); /* PCI Posting. */ 1095 readl(bridge->gatt_table+j-1); /* PCI Posting. */
1099 1096
1100 bridge->driver->tlb_flush(mem); 1097 bridge->driver->tlb_flush(mem);
1101 return 0; 1098 return 0;
1102 } 1099 }
1103 EXPORT_SYMBOL(agp_generic_insert_memory); 1100 EXPORT_SYMBOL(agp_generic_insert_memory);
1104 1101
1105 1102
1106 int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type) 1103 int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
1107 { 1104 {
1108 size_t i; 1105 size_t i;
1109 struct agp_bridge_data *bridge; 1106 struct agp_bridge_data *bridge;
1110 int mask_type; 1107 int mask_type;
1111 1108
1112 bridge = mem->bridge; 1109 bridge = mem->bridge;
1113 if (!bridge) 1110 if (!bridge)
1114 return -EINVAL; 1111 return -EINVAL;
1115 1112
1116 if (mem->page_count == 0) 1113 if (mem->page_count == 0)
1117 return 0; 1114 return 0;
1118 1115
1119 if (type != mem->type) 1116 if (type != mem->type)
1120 return -EINVAL; 1117 return -EINVAL;
1121 1118
1122 mask_type = bridge->driver->agp_type_to_mask_type(bridge, type); 1119 mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
1123 if (mask_type != 0) { 1120 if (mask_type != 0) {
1124 /* The generic routines know nothing of memory types */ 1121 /* The generic routines know nothing of memory types */
1125 return -EINVAL; 1122 return -EINVAL;
1126 } 1123 }
1127 1124
1128 /* AK: bogus, should encode addresses > 4GB */ 1125 /* AK: bogus, should encode addresses > 4GB */
1129 for (i = pg_start; i < (mem->page_count + pg_start); i++) { 1126 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
1130 writel(bridge->scratch_page, bridge->gatt_table+i); 1127 writel(bridge->scratch_page, bridge->gatt_table+i);
1131 } 1128 }
1132 readl(bridge->gatt_table+i-1); /* PCI Posting. */ 1129 readl(bridge->gatt_table+i-1); /* PCI Posting. */
1133 1130
1134 bridge->driver->tlb_flush(mem); 1131 bridge->driver->tlb_flush(mem);
1135 return 0; 1132 return 0;
1136 } 1133 }
1137 EXPORT_SYMBOL(agp_generic_remove_memory); 1134 EXPORT_SYMBOL(agp_generic_remove_memory);
1138 1135
1139 struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type) 1136 struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type)
1140 { 1137 {
1141 return NULL; 1138 return NULL;
1142 } 1139 }
1143 EXPORT_SYMBOL(agp_generic_alloc_by_type); 1140 EXPORT_SYMBOL(agp_generic_alloc_by_type);
1144 1141
1145 void agp_generic_free_by_type(struct agp_memory *curr) 1142 void agp_generic_free_by_type(struct agp_memory *curr)
1146 { 1143 {
1147 agp_free_page_array(curr); 1144 agp_free_page_array(curr);
1148 agp_free_key(curr->key); 1145 agp_free_key(curr->key);
1149 kfree(curr); 1146 kfree(curr);
1150 } 1147 }
1151 EXPORT_SYMBOL(agp_generic_free_by_type); 1148 EXPORT_SYMBOL(agp_generic_free_by_type);
1152 1149
1153 struct agp_memory *agp_generic_alloc_user(size_t page_count, int type) 1150 struct agp_memory *agp_generic_alloc_user(size_t page_count, int type)
1154 { 1151 {
1155 struct agp_memory *new; 1152 struct agp_memory *new;
1156 int i; 1153 int i;
1157 int pages; 1154 int pages;
1158 1155
1159 pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE; 1156 pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
1160 new = agp_create_user_memory(page_count); 1157 new = agp_create_user_memory(page_count);
1161 if (new == NULL) 1158 if (new == NULL)
1162 return NULL; 1159 return NULL;
1163 1160
1164 for (i = 0; i < page_count; i++) 1161 for (i = 0; i < page_count; i++)
1165 new->memory[i] = 0; 1162 new->memory[i] = 0;
1166 new->page_count = 0; 1163 new->page_count = 0;
1167 new->type = type; 1164 new->type = type;
1168 new->num_scratch_pages = pages; 1165 new->num_scratch_pages = pages;
1169 1166
1170 return new; 1167 return new;
1171 } 1168 }
1172 EXPORT_SYMBOL(agp_generic_alloc_user); 1169 EXPORT_SYMBOL(agp_generic_alloc_user);
1173 1170
1174 /* 1171 /*
1175 * Basic Page Allocation Routines - 1172 * Basic Page Allocation Routines -
1176 * These routines handle page allocation and by default they reserve the allocated 1173 * These routines handle page allocation and by default they reserve the allocated
1177 * memory. They also handle incrementing the current_memory_agp value, Which is checked 1174 * memory. They also handle incrementing the current_memory_agp value, Which is checked
1178 * against a maximum value. 1175 * against a maximum value.
1179 */ 1176 */
1180 1177
1181 void *agp_generic_alloc_page(struct agp_bridge_data *bridge) 1178 void *agp_generic_alloc_page(struct agp_bridge_data *bridge)
1182 { 1179 {
1183 struct page * page; 1180 struct page * page;
1184 1181
1185 page = alloc_page(GFP_KERNEL | GFP_DMA32); 1182 page = alloc_page(GFP_KERNEL | GFP_DMA32);
1186 if (page == NULL) 1183 if (page == NULL)
1187 return NULL; 1184 return NULL;
1188 1185
1189 map_page_into_agp(page); 1186 map_page_into_agp(page);
1190 1187
1191 get_page(page); 1188 get_page(page);
1192 atomic_inc(&agp_bridge->current_memory_agp); 1189 atomic_inc(&agp_bridge->current_memory_agp);
1193 return page_address(page); 1190 return page_address(page);
1194 } 1191 }
1195 EXPORT_SYMBOL(agp_generic_alloc_page); 1192 EXPORT_SYMBOL(agp_generic_alloc_page);
1196 1193
1197 1194
1198 void agp_generic_destroy_page(void *addr, int flags) 1195 void agp_generic_destroy_page(void *addr, int flags)
1199 { 1196 {
1200 struct page *page; 1197 struct page *page;
1201 1198
1202 if (addr == NULL) 1199 if (addr == NULL)
1203 return; 1200 return;
1204 1201
1205 page = virt_to_page(addr); 1202 page = virt_to_page(addr);
1206 if (flags & AGP_PAGE_DESTROY_UNMAP) 1203 if (flags & AGP_PAGE_DESTROY_UNMAP)
1207 unmap_page_from_agp(page); 1204 unmap_page_from_agp(page);
1208 1205
1209 if (flags & AGP_PAGE_DESTROY_FREE) { 1206 if (flags & AGP_PAGE_DESTROY_FREE) {
1210 put_page(page); 1207 put_page(page);
1211 free_page((unsigned long)addr); 1208 free_page((unsigned long)addr);
1212 atomic_dec(&agp_bridge->current_memory_agp); 1209 atomic_dec(&agp_bridge->current_memory_agp);
1213 } 1210 }
1214 } 1211 }
1215 EXPORT_SYMBOL(agp_generic_destroy_page); 1212 EXPORT_SYMBOL(agp_generic_destroy_page);
1216 1213
1217 /* End Basic Page Allocation Routines */ 1214 /* End Basic Page Allocation Routines */
1218 1215
1219 1216
1220 /** 1217 /**
1221 * agp_enable - initialise the agp point-to-point connection. 1218 * agp_enable - initialise the agp point-to-point connection.
1222 * 1219 *
1223 * @mode: agp mode register value to configure with. 1220 * @mode: agp mode register value to configure with.
1224 */ 1221 */
1225 void agp_enable(struct agp_bridge_data *bridge, u32 mode) 1222 void agp_enable(struct agp_bridge_data *bridge, u32 mode)
1226 { 1223 {
1227 if (!bridge) 1224 if (!bridge)
1228 return; 1225 return;
1229 bridge->driver->agp_enable(bridge, mode); 1226 bridge->driver->agp_enable(bridge, mode);
1230 } 1227 }
1231 EXPORT_SYMBOL(agp_enable); 1228 EXPORT_SYMBOL(agp_enable);
1232 1229
1233 /* When we remove the global variable agp_bridge from all drivers 1230 /* When we remove the global variable agp_bridge from all drivers
1234 * then agp_alloc_bridge and agp_generic_find_bridge need to be updated 1231 * then agp_alloc_bridge and agp_generic_find_bridge need to be updated
1235 */ 1232 */
1236 1233
1237 struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev) 1234 struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev)
1238 { 1235 {
1239 if (list_empty(&agp_bridges)) 1236 if (list_empty(&agp_bridges))
1240 return NULL; 1237 return NULL;
1241 1238
1242 return agp_bridge; 1239 return agp_bridge;
1243 } 1240 }
1244 1241
1245 static void ipi_handler(void *null) 1242 static void ipi_handler(void *null)
1246 { 1243 {
1247 flush_agp_cache(); 1244 flush_agp_cache();
1248 } 1245 }
1249 1246
1250 void global_cache_flush(void) 1247 void global_cache_flush(void)
1251 { 1248 {
1252 if (on_each_cpu(ipi_handler, NULL, 1) != 0) 1249 if (on_each_cpu(ipi_handler, NULL, 1) != 0)
1253 panic(PFX "timed out waiting for the other CPUs!\n"); 1250 panic(PFX "timed out waiting for the other CPUs!\n");
1254 } 1251 }
1255 EXPORT_SYMBOL(global_cache_flush); 1252 EXPORT_SYMBOL(global_cache_flush);
1256 1253
1257 unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge, 1254 unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
1258 unsigned long addr, int type) 1255 unsigned long addr, int type)
1259 { 1256 {
1260 /* memory type is ignored in the generic routine */ 1257 /* memory type is ignored in the generic routine */
1261 if (bridge->driver->masks) 1258 if (bridge->driver->masks)
1262 return addr | bridge->driver->masks[0].mask; 1259 return addr | bridge->driver->masks[0].mask;
1263 else 1260 else
1264 return addr; 1261 return addr;
1265 } 1262 }
1266 EXPORT_SYMBOL(agp_generic_mask_memory); 1263 EXPORT_SYMBOL(agp_generic_mask_memory);
1267 1264
1268 int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge, 1265 int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge,
1269 int type) 1266 int type)
1270 { 1267 {
1271 if (type >= AGP_USER_TYPES) 1268 if (type >= AGP_USER_TYPES)
1272 return 0; 1269 return 0;
1273 return type; 1270 return type;
1274 } 1271 }
1275 EXPORT_SYMBOL(agp_generic_type_to_mask_type); 1272 EXPORT_SYMBOL(agp_generic_type_to_mask_type);
1276 1273
1277 /* 1274 /*
1278 * These functions are implemented according to the AGPv3 spec, 1275 * These functions are implemented according to the AGPv3 spec,
1279 * which covers implementation details that had previously been 1276 * which covers implementation details that had previously been
1280 * left open. 1277 * left open.
1281 */ 1278 */
1282 1279
1283 int agp3_generic_fetch_size(void) 1280 int agp3_generic_fetch_size(void)
1284 { 1281 {
1285 u16 temp_size; 1282 u16 temp_size;
1286 int i; 1283 int i;
1287 struct aper_size_info_16 *values; 1284 struct aper_size_info_16 *values;
1288 1285
1289 pci_read_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, &temp_size); 1286 pci_read_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, &temp_size);
1290 values = A_SIZE_16(agp_bridge->driver->aperture_sizes); 1287 values = A_SIZE_16(agp_bridge->driver->aperture_sizes);
1291 1288
1292 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { 1289 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
1293 if (temp_size == values[i].size_value) { 1290 if (temp_size == values[i].size_value) {
1294 agp_bridge->previous_size = 1291 agp_bridge->previous_size =
1295 agp_bridge->current_size = (void *) (values + i); 1292 agp_bridge->current_size = (void *) (values + i);
1296 1293
1297 agp_bridge->aperture_size_idx = i; 1294 agp_bridge->aperture_size_idx = i;
1298 return values[i].size; 1295 return values[i].size;
1299 } 1296 }
1300 } 1297 }
1301 return 0; 1298 return 0;
1302 } 1299 }
1303 EXPORT_SYMBOL(agp3_generic_fetch_size); 1300 EXPORT_SYMBOL(agp3_generic_fetch_size);
1304 1301
1305 void agp3_generic_tlbflush(struct agp_memory *mem) 1302 void agp3_generic_tlbflush(struct agp_memory *mem)
1306 { 1303 {
1307 u32 ctrl; 1304 u32 ctrl;
1308 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl); 1305 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
1309 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_GTLBEN); 1306 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_GTLBEN);
1310 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl); 1307 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl);
1311 } 1308 }
1312 EXPORT_SYMBOL(agp3_generic_tlbflush); 1309 EXPORT_SYMBOL(agp3_generic_tlbflush);
1313 1310
1314 int agp3_generic_configure(void) 1311 int agp3_generic_configure(void)
1315 { 1312 {
1316 u32 temp; 1313 u32 temp;
1317 struct aper_size_info_16 *current_size; 1314 struct aper_size_info_16 *current_size;
1318 1315
1319 current_size = A_SIZE_16(agp_bridge->current_size); 1316 current_size = A_SIZE_16(agp_bridge->current_size);
1320 1317
1321 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); 1318 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
1322 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); 1319 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1323 1320
1324 /* set aperture size */ 1321 /* set aperture size */
1325 pci_write_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, current_size->size_value); 1322 pci_write_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, current_size->size_value);
1326 /* set gart pointer */ 1323 /* set gart pointer */
1327 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPGARTLO, agp_bridge->gatt_bus_addr); 1324 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPGARTLO, agp_bridge->gatt_bus_addr);
1328 /* enable aperture and GTLB */ 1325 /* enable aperture and GTLB */
1329 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &temp); 1326 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &temp);
1330 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, temp | AGPCTRL_APERENB | AGPCTRL_GTLBEN); 1327 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, temp | AGPCTRL_APERENB | AGPCTRL_GTLBEN);
1331 return 0; 1328 return 0;
1332 } 1329 }
1333 EXPORT_SYMBOL(agp3_generic_configure); 1330 EXPORT_SYMBOL(agp3_generic_configure);
1334 1331
1335 void agp3_generic_cleanup(void) 1332 void agp3_generic_cleanup(void)
1336 { 1333 {
1337 u32 ctrl; 1334 u32 ctrl;
1338 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl); 1335 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
1339 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_APERENB); 1336 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_APERENB);
1340 } 1337 }
1341 EXPORT_SYMBOL(agp3_generic_cleanup); 1338 EXPORT_SYMBOL(agp3_generic_cleanup);
1342 1339
1343 const struct aper_size_info_16 agp3_generic_sizes[AGP_GENERIC_SIZES_ENTRIES] = 1340 const struct aper_size_info_16 agp3_generic_sizes[AGP_GENERIC_SIZES_ENTRIES] =
1344 { 1341 {
1345 {4096, 1048576, 10,0x000}, 1342 {4096, 1048576, 10,0x000},
1346 {2048, 524288, 9, 0x800}, 1343 {2048, 524288, 9, 0x800},
1347 {1024, 262144, 8, 0xc00}, 1344 {1024, 262144, 8, 0xc00},
1348 { 512, 131072, 7, 0xe00}, 1345 { 512, 131072, 7, 0xe00},
1349 { 256, 65536, 6, 0xf00}, 1346 { 256, 65536, 6, 0xf00},
1350 { 128, 32768, 5, 0xf20}, 1347 { 128, 32768, 5, 0xf20},
1351 { 64, 16384, 4, 0xf30}, 1348 { 64, 16384, 4, 0xf30},
1352 { 32, 8192, 3, 0xf38}, 1349 { 32, 8192, 3, 0xf38},
1353 { 16, 4096, 2, 0xf3c}, 1350 { 16, 4096, 2, 0xf3c},
1354 { 8, 2048, 1, 0xf3e}, 1351 { 8, 2048, 1, 0xf3e},
1355 { 4, 1024, 0, 0xf3f} 1352 { 4, 1024, 0, 0xf3f}
1356 }; 1353 };
1357 EXPORT_SYMBOL(agp3_generic_sizes); 1354 EXPORT_SYMBOL(agp3_generic_sizes);
1358 1355
1359 1356
drivers/char/agp/intel-agp.c
1 /* 1 /*
2 * Intel AGPGART routines. 2 * Intel AGPGART routines.
3 */ 3 */
4 4
5 #include <linux/module.h> 5 #include <linux/module.h>
6 #include <linux/pci.h> 6 #include <linux/pci.h>
7 #include <linux/init.h> 7 #include <linux/init.h>
8 #include <linux/kernel.h> 8 #include <linux/kernel.h>
9 #include <linux/pagemap.h> 9 #include <linux/pagemap.h>
10 #include <linux/agp_backend.h> 10 #include <linux/agp_backend.h>
11 #include "agp.h" 11 #include "agp.h"
12 12
13 #define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588 13 #define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588
14 #define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a 14 #define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a
15 #define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970 15 #define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970
16 #define PCI_DEVICE_ID_INTEL_82946GZ_IG 0x2972 16 #define PCI_DEVICE_ID_INTEL_82946GZ_IG 0x2972
17 #define PCI_DEVICE_ID_INTEL_82G35_HB 0x2980 17 #define PCI_DEVICE_ID_INTEL_82G35_HB 0x2980
18 #define PCI_DEVICE_ID_INTEL_82G35_IG 0x2982 18 #define PCI_DEVICE_ID_INTEL_82G35_IG 0x2982
19 #define PCI_DEVICE_ID_INTEL_82965Q_HB 0x2990 19 #define PCI_DEVICE_ID_INTEL_82965Q_HB 0x2990
20 #define PCI_DEVICE_ID_INTEL_82965Q_IG 0x2992 20 #define PCI_DEVICE_ID_INTEL_82965Q_IG 0x2992
21 #define PCI_DEVICE_ID_INTEL_82965G_HB 0x29A0 21 #define PCI_DEVICE_ID_INTEL_82965G_HB 0x29A0
22 #define PCI_DEVICE_ID_INTEL_82965G_IG 0x29A2 22 #define PCI_DEVICE_ID_INTEL_82965G_IG 0x29A2
23 #define PCI_DEVICE_ID_INTEL_82965GM_HB 0x2A00 23 #define PCI_DEVICE_ID_INTEL_82965GM_HB 0x2A00
24 #define PCI_DEVICE_ID_INTEL_82965GM_IG 0x2A02 24 #define PCI_DEVICE_ID_INTEL_82965GM_IG 0x2A02
25 #define PCI_DEVICE_ID_INTEL_82965GME_HB 0x2A10 25 #define PCI_DEVICE_ID_INTEL_82965GME_HB 0x2A10
26 #define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12 26 #define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12
27 #define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC 27 #define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC
28 #define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE 28 #define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE
29 #define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0 29 #define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0
30 #define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2 30 #define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2
31 #define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0 31 #define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0
32 #define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2 32 #define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2
33 #define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0 33 #define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0
34 #define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2 34 #define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2
35 #define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40 35 #define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40
36 #define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42 36 #define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42
37 #define PCI_DEVICE_ID_INTEL_IGD_E_HB 0x2E00 37 #define PCI_DEVICE_ID_INTEL_IGD_E_HB 0x2E00
38 #define PCI_DEVICE_ID_INTEL_IGD_E_IG 0x2E02 38 #define PCI_DEVICE_ID_INTEL_IGD_E_IG 0x2E02
39 #define PCI_DEVICE_ID_INTEL_Q45_HB 0x2E10 39 #define PCI_DEVICE_ID_INTEL_Q45_HB 0x2E10
40 #define PCI_DEVICE_ID_INTEL_Q45_IG 0x2E12 40 #define PCI_DEVICE_ID_INTEL_Q45_IG 0x2E12
41 #define PCI_DEVICE_ID_INTEL_G45_HB 0x2E20 41 #define PCI_DEVICE_ID_INTEL_G45_HB 0x2E20
42 #define PCI_DEVICE_ID_INTEL_G45_IG 0x2E22 42 #define PCI_DEVICE_ID_INTEL_G45_IG 0x2E22
43 43
44 /* cover 915 and 945 variants */ 44 /* cover 915 and 945 variants */
45 #define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \ 45 #define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
46 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || \ 46 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || \
47 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || \ 47 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || \
48 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || \ 48 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || \
49 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || \ 49 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || \
50 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB) 50 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB)
51 51
52 #define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \ 52 #define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \
53 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82G35_HB || \ 53 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82G35_HB || \
54 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \ 54 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \
55 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \ 55 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \
56 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \ 56 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \
57 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB || \ 57 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB || \
58 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB) 58 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB)
59 59
60 #define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \ 60 #define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
61 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \ 61 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
62 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB) 62 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB)
63 63
64 #define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGD_E_HB || \ 64 #define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGD_E_HB || \
65 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \ 65 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
66 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB) 66 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB)
67 67
68 extern int agp_memory_reserved; 68 extern int agp_memory_reserved;
69 69
70 70
71 /* Intel 815 register */ 71 /* Intel 815 register */
72 #define INTEL_815_APCONT 0x51 72 #define INTEL_815_APCONT 0x51
73 #define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF 73 #define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF
74 74
75 /* Intel i820 registers */ 75 /* Intel i820 registers */
76 #define INTEL_I820_RDCR 0x51 76 #define INTEL_I820_RDCR 0x51
77 #define INTEL_I820_ERRSTS 0xc8 77 #define INTEL_I820_ERRSTS 0xc8
78 78
79 /* Intel i840 registers */ 79 /* Intel i840 registers */
80 #define INTEL_I840_MCHCFG 0x50 80 #define INTEL_I840_MCHCFG 0x50
81 #define INTEL_I840_ERRSTS 0xc8 81 #define INTEL_I840_ERRSTS 0xc8
82 82
83 /* Intel i850 registers */ 83 /* Intel i850 registers */
84 #define INTEL_I850_MCHCFG 0x50 84 #define INTEL_I850_MCHCFG 0x50
85 #define INTEL_I850_ERRSTS 0xc8 85 #define INTEL_I850_ERRSTS 0xc8
86 86
87 /* intel 915G registers */ 87 /* intel 915G registers */
88 #define I915_GMADDR 0x18 88 #define I915_GMADDR 0x18
89 #define I915_MMADDR 0x10 89 #define I915_MMADDR 0x10
90 #define I915_PTEADDR 0x1C 90 #define I915_PTEADDR 0x1C
91 #define I915_GMCH_GMS_STOLEN_48M (0x6 << 4) 91 #define I915_GMCH_GMS_STOLEN_48M (0x6 << 4)
92 #define I915_GMCH_GMS_STOLEN_64M (0x7 << 4) 92 #define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
93 #define G33_GMCH_GMS_STOLEN_128M (0x8 << 4) 93 #define G33_GMCH_GMS_STOLEN_128M (0x8 << 4)
94 #define G33_GMCH_GMS_STOLEN_256M (0x9 << 4) 94 #define G33_GMCH_GMS_STOLEN_256M (0x9 << 4)
95 #define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4) 95 #define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4)
96 #define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4) 96 #define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4)
97 #define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4) 97 #define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
98 #define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4) 98 #define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
99 99
100 #define I915_IFPADDR 0x60 100 #define I915_IFPADDR 0x60
101 101
102 /* Intel 965G registers */ 102 /* Intel 965G registers */
103 #define I965_MSAC 0x62 103 #define I965_MSAC 0x62
104 #define I965_IFPADDR 0x70 104 #define I965_IFPADDR 0x70
105 105
106 /* Intel 7505 registers */ 106 /* Intel 7505 registers */
107 #define INTEL_I7505_APSIZE 0x74 107 #define INTEL_I7505_APSIZE 0x74
108 #define INTEL_I7505_NCAPID 0x60 108 #define INTEL_I7505_NCAPID 0x60
109 #define INTEL_I7505_NISTAT 0x6c 109 #define INTEL_I7505_NISTAT 0x6c
110 #define INTEL_I7505_ATTBASE 0x78 110 #define INTEL_I7505_ATTBASE 0x78
111 #define INTEL_I7505_ERRSTS 0x42 111 #define INTEL_I7505_ERRSTS 0x42
112 #define INTEL_I7505_AGPCTRL 0x70 112 #define INTEL_I7505_AGPCTRL 0x70
113 #define INTEL_I7505_MCHCFG 0x50 113 #define INTEL_I7505_MCHCFG 0x50
114 114
115 static const struct aper_size_info_fixed intel_i810_sizes[] = 115 static const struct aper_size_info_fixed intel_i810_sizes[] =
116 { 116 {
117 {64, 16384, 4}, 117 {64, 16384, 4},
118 /* The 32M mode still requires a 64k gatt */ 118 /* The 32M mode still requires a 64k gatt */
119 {32, 8192, 4} 119 {32, 8192, 4}
120 }; 120 };
121 121
122 #define AGP_DCACHE_MEMORY 1 122 #define AGP_DCACHE_MEMORY 1
123 #define AGP_PHYS_MEMORY 2 123 #define AGP_PHYS_MEMORY 2
124 #define INTEL_AGP_CACHED_MEMORY 3 124 #define INTEL_AGP_CACHED_MEMORY 3
125 125
126 static struct gatt_mask intel_i810_masks[] = 126 static struct gatt_mask intel_i810_masks[] =
127 { 127 {
128 {.mask = I810_PTE_VALID, .type = 0}, 128 {.mask = I810_PTE_VALID, .type = 0},
129 {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY}, 129 {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY},
130 {.mask = I810_PTE_VALID, .type = 0}, 130 {.mask = I810_PTE_VALID, .type = 0},
131 {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED, 131 {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED,
132 .type = INTEL_AGP_CACHED_MEMORY} 132 .type = INTEL_AGP_CACHED_MEMORY}
133 }; 133 };
134 134
135 static struct _intel_private { 135 static struct _intel_private {
136 struct pci_dev *pcidev; /* device one */ 136 struct pci_dev *pcidev; /* device one */
137 u8 __iomem *registers; 137 u8 __iomem *registers;
138 u32 __iomem *gtt; /* I915G */ 138 u32 __iomem *gtt; /* I915G */
139 int num_dcache_entries; 139 int num_dcache_entries;
140 /* gtt_entries is the number of gtt entries that are already mapped 140 /* gtt_entries is the number of gtt entries that are already mapped
141 * to stolen memory. Stolen memory is larger than the memory mapped 141 * to stolen memory. Stolen memory is larger than the memory mapped
142 * through gtt_entries, as it includes some reserved space for the BIOS 142 * through gtt_entries, as it includes some reserved space for the BIOS
143 * popup and for the GTT. 143 * popup and for the GTT.
144 */ 144 */
145 int gtt_entries; /* i830+ */ 145 int gtt_entries; /* i830+ */
146 union { 146 union {
147 void __iomem *i9xx_flush_page; 147 void __iomem *i9xx_flush_page;
148 void *i8xx_flush_page; 148 void *i8xx_flush_page;
149 }; 149 };
150 struct page *i8xx_page; 150 struct page *i8xx_page;
151 struct resource ifp_resource; 151 struct resource ifp_resource;
152 int resource_valid; 152 int resource_valid;
153 } intel_private; 153 } intel_private;
154 154
155 static int intel_i810_fetch_size(void) 155 static int intel_i810_fetch_size(void)
156 { 156 {
157 u32 smram_miscc; 157 u32 smram_miscc;
158 struct aper_size_info_fixed *values; 158 struct aper_size_info_fixed *values;
159 159
160 pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc); 160 pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc);
161 values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes); 161 values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
162 162
163 if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) { 163 if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
164 printk(KERN_WARNING PFX "i810 is disabled\n"); 164 dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n");
165 return 0; 165 return 0;
166 } 166 }
167 if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) { 167 if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
168 agp_bridge->previous_size = 168 agp_bridge->previous_size =
169 agp_bridge->current_size = (void *) (values + 1); 169 agp_bridge->current_size = (void *) (values + 1);
170 agp_bridge->aperture_size_idx = 1; 170 agp_bridge->aperture_size_idx = 1;
171 return values[1].size; 171 return values[1].size;
172 } else { 172 } else {
173 agp_bridge->previous_size = 173 agp_bridge->previous_size =
174 agp_bridge->current_size = (void *) (values); 174 agp_bridge->current_size = (void *) (values);
175 agp_bridge->aperture_size_idx = 0; 175 agp_bridge->aperture_size_idx = 0;
176 return values[0].size; 176 return values[0].size;
177 } 177 }
178 178
179 return 0; 179 return 0;
180 } 180 }
181 181
182 static int intel_i810_configure(void) 182 static int intel_i810_configure(void)
183 { 183 {
184 struct aper_size_info_fixed *current_size; 184 struct aper_size_info_fixed *current_size;
185 u32 temp; 185 u32 temp;
186 int i; 186 int i;
187 187
188 current_size = A_SIZE_FIX(agp_bridge->current_size); 188 current_size = A_SIZE_FIX(agp_bridge->current_size);
189 189
190 if (!intel_private.registers) { 190 if (!intel_private.registers) {
191 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp); 191 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
192 temp &= 0xfff80000; 192 temp &= 0xfff80000;
193 193
194 intel_private.registers = ioremap(temp, 128 * 4096); 194 intel_private.registers = ioremap(temp, 128 * 4096);
195 if (!intel_private.registers) { 195 if (!intel_private.registers) {
196 printk(KERN_ERR PFX "Unable to remap memory.\n"); 196 dev_err(&intel_private.pcidev->dev,
197 "can't remap memory\n");
197 return -ENOMEM; 198 return -ENOMEM;
198 } 199 }
199 } 200 }
200 201
201 if ((readl(intel_private.registers+I810_DRAM_CTL) 202 if ((readl(intel_private.registers+I810_DRAM_CTL)
202 & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) { 203 & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
203 /* This will need to be dynamically assigned */ 204 /* This will need to be dynamically assigned */
204 printk(KERN_INFO PFX "detected 4MB dedicated video ram.\n"); 205 dev_info(&intel_private.pcidev->dev,
206 "detected 4MB dedicated video ram\n");
205 intel_private.num_dcache_entries = 1024; 207 intel_private.num_dcache_entries = 1024;
206 } 208 }
207 pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp); 209 pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
208 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); 210 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
209 writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); 211 writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
210 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ 212 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
211 213
212 if (agp_bridge->driver->needs_scratch_page) { 214 if (agp_bridge->driver->needs_scratch_page) {
213 for (i = 0; i < current_size->num_entries; i++) { 215 for (i = 0; i < current_size->num_entries; i++) {
214 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); 216 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
215 readl(intel_private.registers+I810_PTE_BASE+(i*4)); /* PCI posting. */ 217 readl(intel_private.registers+I810_PTE_BASE+(i*4)); /* PCI posting. */
216 } 218 }
217 } 219 }
218 global_cache_flush(); 220 global_cache_flush();
219 return 0; 221 return 0;
220 } 222 }
221 223
222 static void intel_i810_cleanup(void) 224 static void intel_i810_cleanup(void)
223 { 225 {
224 writel(0, intel_private.registers+I810_PGETBL_CTL); 226 writel(0, intel_private.registers+I810_PGETBL_CTL);
225 readl(intel_private.registers); /* PCI Posting. */ 227 readl(intel_private.registers); /* PCI Posting. */
226 iounmap(intel_private.registers); 228 iounmap(intel_private.registers);
227 } 229 }
228 230
229 static void intel_i810_tlbflush(struct agp_memory *mem) 231 static void intel_i810_tlbflush(struct agp_memory *mem)
230 { 232 {
231 return; 233 return;
232 } 234 }
233 235
234 static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode) 236 static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode)
235 { 237 {
236 return; 238 return;
237 } 239 }
238 240
239 /* Exists to support ARGB cursors */ 241 /* Exists to support ARGB cursors */
240 static void *i8xx_alloc_pages(void) 242 static void *i8xx_alloc_pages(void)
241 { 243 {
242 struct page *page; 244 struct page *page;
243 245
244 page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2); 246 page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
245 if (page == NULL) 247 if (page == NULL)
246 return NULL; 248 return NULL;
247 249
248 if (set_pages_uc(page, 4) < 0) { 250 if (set_pages_uc(page, 4) < 0) {
249 set_pages_wb(page, 4); 251 set_pages_wb(page, 4);
250 __free_pages(page, 2); 252 __free_pages(page, 2);
251 return NULL; 253 return NULL;
252 } 254 }
253 get_page(page); 255 get_page(page);
254 atomic_inc(&agp_bridge->current_memory_agp); 256 atomic_inc(&agp_bridge->current_memory_agp);
255 return page_address(page); 257 return page_address(page);
256 } 258 }
257 259
258 static void i8xx_destroy_pages(void *addr) 260 static void i8xx_destroy_pages(void *addr)
259 { 261 {
260 struct page *page; 262 struct page *page;
261 263
262 if (addr == NULL) 264 if (addr == NULL)
263 return; 265 return;
264 266
265 page = virt_to_page(addr); 267 page = virt_to_page(addr);
266 set_pages_wb(page, 4); 268 set_pages_wb(page, 4);
267 put_page(page); 269 put_page(page);
268 __free_pages(page, 2); 270 __free_pages(page, 2);
269 atomic_dec(&agp_bridge->current_memory_agp); 271 atomic_dec(&agp_bridge->current_memory_agp);
270 } 272 }
271 273
272 static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge, 274 static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
273 int type) 275 int type)
274 { 276 {
275 if (type < AGP_USER_TYPES) 277 if (type < AGP_USER_TYPES)
276 return type; 278 return type;
277 else if (type == AGP_USER_CACHED_MEMORY) 279 else if (type == AGP_USER_CACHED_MEMORY)
278 return INTEL_AGP_CACHED_MEMORY; 280 return INTEL_AGP_CACHED_MEMORY;
279 else 281 else
280 return 0; 282 return 0;
281 } 283 }
282 284
283 static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start, 285 static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
284 int type) 286 int type)
285 { 287 {
286 int i, j, num_entries; 288 int i, j, num_entries;
287 void *temp; 289 void *temp;
288 int ret = -EINVAL; 290 int ret = -EINVAL;
289 int mask_type; 291 int mask_type;
290 292
291 if (mem->page_count == 0) 293 if (mem->page_count == 0)
292 goto out; 294 goto out;
293 295
294 temp = agp_bridge->current_size; 296 temp = agp_bridge->current_size;
295 num_entries = A_SIZE_FIX(temp)->num_entries; 297 num_entries = A_SIZE_FIX(temp)->num_entries;
296 298
297 if ((pg_start + mem->page_count) > num_entries) 299 if ((pg_start + mem->page_count) > num_entries)
298 goto out_err; 300 goto out_err;
299 301
300 302
301 for (j = pg_start; j < (pg_start + mem->page_count); j++) { 303 for (j = pg_start; j < (pg_start + mem->page_count); j++) {
302 if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) { 304 if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) {
303 ret = -EBUSY; 305 ret = -EBUSY;
304 goto out_err; 306 goto out_err;
305 } 307 }
306 } 308 }
307 309
308 if (type != mem->type) 310 if (type != mem->type)
309 goto out_err; 311 goto out_err;
310 312
311 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); 313 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
312 314
313 switch (mask_type) { 315 switch (mask_type) {
314 case AGP_DCACHE_MEMORY: 316 case AGP_DCACHE_MEMORY:
315 if (!mem->is_flushed) 317 if (!mem->is_flushed)
316 global_cache_flush(); 318 global_cache_flush();
317 for (i = pg_start; i < (pg_start + mem->page_count); i++) { 319 for (i = pg_start; i < (pg_start + mem->page_count); i++) {
318 writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID, 320 writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID,
319 intel_private.registers+I810_PTE_BASE+(i*4)); 321 intel_private.registers+I810_PTE_BASE+(i*4));
320 } 322 }
321 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); 323 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
322 break; 324 break;
323 case AGP_PHYS_MEMORY: 325 case AGP_PHYS_MEMORY:
324 case AGP_NORMAL_MEMORY: 326 case AGP_NORMAL_MEMORY:
325 if (!mem->is_flushed) 327 if (!mem->is_flushed)
326 global_cache_flush(); 328 global_cache_flush();
327 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 329 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
328 writel(agp_bridge->driver->mask_memory(agp_bridge, 330 writel(agp_bridge->driver->mask_memory(agp_bridge,
329 mem->memory[i], 331 mem->memory[i],
330 mask_type), 332 mask_type),
331 intel_private.registers+I810_PTE_BASE+(j*4)); 333 intel_private.registers+I810_PTE_BASE+(j*4));
332 } 334 }
333 readl(intel_private.registers+I810_PTE_BASE+((j-1)*4)); 335 readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
334 break; 336 break;
335 default: 337 default:
336 goto out_err; 338 goto out_err;
337 } 339 }
338 340
339 agp_bridge->driver->tlb_flush(mem); 341 agp_bridge->driver->tlb_flush(mem);
340 out: 342 out:
341 ret = 0; 343 ret = 0;
342 out_err: 344 out_err:
343 mem->is_flushed = true; 345 mem->is_flushed = true;
344 return ret; 346 return ret;
345 } 347 }
346 348
347 static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start, 349 static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start,
348 int type) 350 int type)
349 { 351 {
350 int i; 352 int i;
351 353
352 if (mem->page_count == 0) 354 if (mem->page_count == 0)
353 return 0; 355 return 0;
354 356
355 for (i = pg_start; i < (mem->page_count + pg_start); i++) { 357 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
356 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); 358 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
357 } 359 }
358 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); 360 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
359 361
360 agp_bridge->driver->tlb_flush(mem); 362 agp_bridge->driver->tlb_flush(mem);
361 return 0; 363 return 0;
362 } 364 }
363 365
364 /* 366 /*
365 * The i810/i830 requires a physical address to program its mouse 367 * The i810/i830 requires a physical address to program its mouse
366 * pointer into hardware. 368 * pointer into hardware.
367 * However the Xserver still writes to it through the agp aperture. 369 * However the Xserver still writes to it through the agp aperture.
368 */ 370 */
369 static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type) 371 static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
370 { 372 {
371 struct agp_memory *new; 373 struct agp_memory *new;
372 void *addr; 374 void *addr;
373 375
374 switch (pg_count) { 376 switch (pg_count) {
375 case 1: addr = agp_bridge->driver->agp_alloc_page(agp_bridge); 377 case 1: addr = agp_bridge->driver->agp_alloc_page(agp_bridge);
376 break; 378 break;
377 case 4: 379 case 4:
378 /* kludge to get 4 physical pages for ARGB cursor */ 380 /* kludge to get 4 physical pages for ARGB cursor */
379 addr = i8xx_alloc_pages(); 381 addr = i8xx_alloc_pages();
380 break; 382 break;
381 default: 383 default:
382 return NULL; 384 return NULL;
383 } 385 }
384 386
385 if (addr == NULL) 387 if (addr == NULL)
386 return NULL; 388 return NULL;
387 389
388 new = agp_create_memory(pg_count); 390 new = agp_create_memory(pg_count);
389 if (new == NULL) 391 if (new == NULL)
390 return NULL; 392 return NULL;
391 393
392 new->memory[0] = virt_to_gart(addr); 394 new->memory[0] = virt_to_gart(addr);
393 if (pg_count == 4) { 395 if (pg_count == 4) {
394 /* kludge to get 4 physical pages for ARGB cursor */ 396 /* kludge to get 4 physical pages for ARGB cursor */
395 new->memory[1] = new->memory[0] + PAGE_SIZE; 397 new->memory[1] = new->memory[0] + PAGE_SIZE;
396 new->memory[2] = new->memory[1] + PAGE_SIZE; 398 new->memory[2] = new->memory[1] + PAGE_SIZE;
397 new->memory[3] = new->memory[2] + PAGE_SIZE; 399 new->memory[3] = new->memory[2] + PAGE_SIZE;
398 } 400 }
399 new->page_count = pg_count; 401 new->page_count = pg_count;
400 new->num_scratch_pages = pg_count; 402 new->num_scratch_pages = pg_count;
401 new->type = AGP_PHYS_MEMORY; 403 new->type = AGP_PHYS_MEMORY;
402 new->physical = new->memory[0]; 404 new->physical = new->memory[0];
403 return new; 405 return new;
404 } 406 }
405 407
406 static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type) 408 static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
407 { 409 {
408 struct agp_memory *new; 410 struct agp_memory *new;
409 411
410 if (type == AGP_DCACHE_MEMORY) { 412 if (type == AGP_DCACHE_MEMORY) {
411 if (pg_count != intel_private.num_dcache_entries) 413 if (pg_count != intel_private.num_dcache_entries)
412 return NULL; 414 return NULL;
413 415
414 new = agp_create_memory(1); 416 new = agp_create_memory(1);
415 if (new == NULL) 417 if (new == NULL)
416 return NULL; 418 return NULL;
417 419
418 new->type = AGP_DCACHE_MEMORY; 420 new->type = AGP_DCACHE_MEMORY;
419 new->page_count = pg_count; 421 new->page_count = pg_count;
420 new->num_scratch_pages = 0; 422 new->num_scratch_pages = 0;
421 agp_free_page_array(new); 423 agp_free_page_array(new);
422 return new; 424 return new;
423 } 425 }
424 if (type == AGP_PHYS_MEMORY) 426 if (type == AGP_PHYS_MEMORY)
425 return alloc_agpphysmem_i8xx(pg_count, type); 427 return alloc_agpphysmem_i8xx(pg_count, type);
426 return NULL; 428 return NULL;
427 } 429 }
428 430
429 static void intel_i810_free_by_type(struct agp_memory *curr) 431 static void intel_i810_free_by_type(struct agp_memory *curr)
430 { 432 {
431 agp_free_key(curr->key); 433 agp_free_key(curr->key);
432 if (curr->type == AGP_PHYS_MEMORY) { 434 if (curr->type == AGP_PHYS_MEMORY) {
433 if (curr->page_count == 4) 435 if (curr->page_count == 4)
434 i8xx_destroy_pages(gart_to_virt(curr->memory[0])); 436 i8xx_destroy_pages(gart_to_virt(curr->memory[0]));
435 else { 437 else {
436 void *va = gart_to_virt(curr->memory[0]); 438 void *va = gart_to_virt(curr->memory[0]);
437 439
438 agp_bridge->driver->agp_destroy_page(va, 440 agp_bridge->driver->agp_destroy_page(va,
439 AGP_PAGE_DESTROY_UNMAP); 441 AGP_PAGE_DESTROY_UNMAP);
440 agp_bridge->driver->agp_destroy_page(va, 442 agp_bridge->driver->agp_destroy_page(va,
441 AGP_PAGE_DESTROY_FREE); 443 AGP_PAGE_DESTROY_FREE);
442 } 444 }
443 agp_free_page_array(curr); 445 agp_free_page_array(curr);
444 } 446 }
445 kfree(curr); 447 kfree(curr);
446 } 448 }
447 449
448 static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge, 450 static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
449 unsigned long addr, int type) 451 unsigned long addr, int type)
450 { 452 {
451 /* Type checking must be done elsewhere */ 453 /* Type checking must be done elsewhere */
452 return addr | bridge->driver->masks[type].mask; 454 return addr | bridge->driver->masks[type].mask;
453 } 455 }
454 456
455 static struct aper_size_info_fixed intel_i830_sizes[] = 457 static struct aper_size_info_fixed intel_i830_sizes[] =
456 { 458 {
457 {128, 32768, 5}, 459 {128, 32768, 5},
458 /* The 64M mode still requires a 128k gatt */ 460 /* The 64M mode still requires a 128k gatt */
459 {64, 16384, 5}, 461 {64, 16384, 5},
460 {256, 65536, 6}, 462 {256, 65536, 6},
461 {512, 131072, 7}, 463 {512, 131072, 7},
462 }; 464 };
463 465
464 static void intel_i830_init_gtt_entries(void) 466 static void intel_i830_init_gtt_entries(void)
465 { 467 {
466 u16 gmch_ctrl; 468 u16 gmch_ctrl;
467 int gtt_entries; 469 int gtt_entries;
468 u8 rdct; 470 u8 rdct;
469 int local = 0; 471 int local = 0;
470 static const int ddt[4] = { 0, 16, 32, 64 }; 472 static const int ddt[4] = { 0, 16, 32, 64 };
471 int size; /* reserved space (in kb) at the top of stolen memory */ 473 int size; /* reserved space (in kb) at the top of stolen memory */
472 474
473 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); 475 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
474 476
475 if (IS_I965) { 477 if (IS_I965) {
476 u32 pgetbl_ctl; 478 u32 pgetbl_ctl;
477 pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL); 479 pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
478 480
479 /* The 965 has a field telling us the size of the GTT, 481 /* The 965 has a field telling us the size of the GTT,
480 * which may be larger than what is necessary to map the 482 * which may be larger than what is necessary to map the
481 * aperture. 483 * aperture.
482 */ 484 */
483 switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) { 485 switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
484 case I965_PGETBL_SIZE_128KB: 486 case I965_PGETBL_SIZE_128KB:
485 size = 128; 487 size = 128;
486 break; 488 break;
487 case I965_PGETBL_SIZE_256KB: 489 case I965_PGETBL_SIZE_256KB:
488 size = 256; 490 size = 256;
489 break; 491 break;
490 case I965_PGETBL_SIZE_512KB: 492 case I965_PGETBL_SIZE_512KB:
491 size = 512; 493 size = 512;
492 break; 494 break;
493 case I965_PGETBL_SIZE_1MB: 495 case I965_PGETBL_SIZE_1MB:
494 size = 1024; 496 size = 1024;
495 break; 497 break;
496 case I965_PGETBL_SIZE_2MB: 498 case I965_PGETBL_SIZE_2MB:
497 size = 2048; 499 size = 2048;
498 break; 500 break;
499 case I965_PGETBL_SIZE_1_5MB: 501 case I965_PGETBL_SIZE_1_5MB:
500 size = 1024 + 512; 502 size = 1024 + 512;
501 break; 503 break;
502 default: 504 default:
503 printk(KERN_INFO PFX "Unknown page table size, " 505 dev_info(&intel_private.pcidev->dev,
504 "assuming 512KB\n"); 506 "unknown page table size, assuming 512KB\n");
505 size = 512; 507 size = 512;
506 } 508 }
507 size += 4; /* add in BIOS popup space */ 509 size += 4; /* add in BIOS popup space */
508 } else if (IS_G33) { 510 } else if (IS_G33) {
509 /* G33's GTT size defined in gmch_ctrl */ 511 /* G33's GTT size defined in gmch_ctrl */
510 switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) { 512 switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
511 case G33_PGETBL_SIZE_1M: 513 case G33_PGETBL_SIZE_1M:
512 size = 1024; 514 size = 1024;
513 break; 515 break;
514 case G33_PGETBL_SIZE_2M: 516 case G33_PGETBL_SIZE_2M:
515 size = 2048; 517 size = 2048;
516 break; 518 break;
517 default: 519 default:
518 printk(KERN_INFO PFX "Unknown page table size 0x%x, " 520 dev_info(&agp_bridge->dev->dev,
519 "assuming 512KB\n", 521 "unknown page table size 0x%x, assuming 512KB\n",
520 (gmch_ctrl & G33_PGETBL_SIZE_MASK)); 522 (gmch_ctrl & G33_PGETBL_SIZE_MASK));
521 size = 512; 523 size = 512;
522 } 524 }
523 size += 4; 525 size += 4;
524 } else if (IS_G4X) { 526 } else if (IS_G4X) {
525 /* On 4 series hardware, GTT stolen is separate from graphics 527 /* On 4 series hardware, GTT stolen is separate from graphics
526 * stolen, ignore it in stolen gtt entries counting */ 528 * stolen, ignore it in stolen gtt entries counting */
527 size = 0; 529 size = 0;
528 } else { 530 } else {
529 /* On previous hardware, the GTT size was just what was 531 /* On previous hardware, the GTT size was just what was
530 * required to map the aperture. 532 * required to map the aperture.
531 */ 533 */
532 size = agp_bridge->driver->fetch_size() + 4; 534 size = agp_bridge->driver->fetch_size() + 4;
533 } 535 }
534 536
535 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB || 537 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
536 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) { 538 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
537 switch (gmch_ctrl & I830_GMCH_GMS_MASK) { 539 switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
538 case I830_GMCH_GMS_STOLEN_512: 540 case I830_GMCH_GMS_STOLEN_512:
539 gtt_entries = KB(512) - KB(size); 541 gtt_entries = KB(512) - KB(size);
540 break; 542 break;
541 case I830_GMCH_GMS_STOLEN_1024: 543 case I830_GMCH_GMS_STOLEN_1024:
542 gtt_entries = MB(1) - KB(size); 544 gtt_entries = MB(1) - KB(size);
543 break; 545 break;
544 case I830_GMCH_GMS_STOLEN_8192: 546 case I830_GMCH_GMS_STOLEN_8192:
545 gtt_entries = MB(8) - KB(size); 547 gtt_entries = MB(8) - KB(size);
546 break; 548 break;
547 case I830_GMCH_GMS_LOCAL: 549 case I830_GMCH_GMS_LOCAL:
548 rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE); 550 rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
549 gtt_entries = (I830_RDRAM_ND(rdct) + 1) * 551 gtt_entries = (I830_RDRAM_ND(rdct) + 1) *
550 MB(ddt[I830_RDRAM_DDT(rdct)]); 552 MB(ddt[I830_RDRAM_DDT(rdct)]);
551 local = 1; 553 local = 1;
552 break; 554 break;
553 default: 555 default:
554 gtt_entries = 0; 556 gtt_entries = 0;
555 break; 557 break;
556 } 558 }
557 } else { 559 } else {
558 switch (gmch_ctrl & I855_GMCH_GMS_MASK) { 560 switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
559 case I855_GMCH_GMS_STOLEN_1M: 561 case I855_GMCH_GMS_STOLEN_1M:
560 gtt_entries = MB(1) - KB(size); 562 gtt_entries = MB(1) - KB(size);
561 break; 563 break;
562 case I855_GMCH_GMS_STOLEN_4M: 564 case I855_GMCH_GMS_STOLEN_4M:
563 gtt_entries = MB(4) - KB(size); 565 gtt_entries = MB(4) - KB(size);
564 break; 566 break;
565 case I855_GMCH_GMS_STOLEN_8M: 567 case I855_GMCH_GMS_STOLEN_8M:
566 gtt_entries = MB(8) - KB(size); 568 gtt_entries = MB(8) - KB(size);
567 break; 569 break;
568 case I855_GMCH_GMS_STOLEN_16M: 570 case I855_GMCH_GMS_STOLEN_16M:
569 gtt_entries = MB(16) - KB(size); 571 gtt_entries = MB(16) - KB(size);
570 break; 572 break;
571 case I855_GMCH_GMS_STOLEN_32M: 573 case I855_GMCH_GMS_STOLEN_32M:
572 gtt_entries = MB(32) - KB(size); 574 gtt_entries = MB(32) - KB(size);
573 break; 575 break;
574 case I915_GMCH_GMS_STOLEN_48M: 576 case I915_GMCH_GMS_STOLEN_48M:
575 /* Check it's really I915G */ 577 /* Check it's really I915G */
576 if (IS_I915 || IS_I965 || IS_G33 || IS_G4X) 578 if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
577 gtt_entries = MB(48) - KB(size); 579 gtt_entries = MB(48) - KB(size);
578 else 580 else
579 gtt_entries = 0; 581 gtt_entries = 0;
580 break; 582 break;
581 case I915_GMCH_GMS_STOLEN_64M: 583 case I915_GMCH_GMS_STOLEN_64M:
582 /* Check it's really I915G */ 584 /* Check it's really I915G */
583 if (IS_I915 || IS_I965 || IS_G33 || IS_G4X) 585 if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
584 gtt_entries = MB(64) - KB(size); 586 gtt_entries = MB(64) - KB(size);
585 else 587 else
586 gtt_entries = 0; 588 gtt_entries = 0;
587 break; 589 break;
588 case G33_GMCH_GMS_STOLEN_128M: 590 case G33_GMCH_GMS_STOLEN_128M:
589 if (IS_G33 || IS_I965 || IS_G4X) 591 if (IS_G33 || IS_I965 || IS_G4X)
590 gtt_entries = MB(128) - KB(size); 592 gtt_entries = MB(128) - KB(size);
591 else 593 else
592 gtt_entries = 0; 594 gtt_entries = 0;
593 break; 595 break;
594 case G33_GMCH_GMS_STOLEN_256M: 596 case G33_GMCH_GMS_STOLEN_256M:
595 if (IS_G33 || IS_I965 || IS_G4X) 597 if (IS_G33 || IS_I965 || IS_G4X)
596 gtt_entries = MB(256) - KB(size); 598 gtt_entries = MB(256) - KB(size);
597 else 599 else
598 gtt_entries = 0; 600 gtt_entries = 0;
599 break; 601 break;
600 case INTEL_GMCH_GMS_STOLEN_96M: 602 case INTEL_GMCH_GMS_STOLEN_96M:
601 if (IS_I965 || IS_G4X) 603 if (IS_I965 || IS_G4X)
602 gtt_entries = MB(96) - KB(size); 604 gtt_entries = MB(96) - KB(size);
603 else 605 else
604 gtt_entries = 0; 606 gtt_entries = 0;
605 break; 607 break;
606 case INTEL_GMCH_GMS_STOLEN_160M: 608 case INTEL_GMCH_GMS_STOLEN_160M:
607 if (IS_I965 || IS_G4X) 609 if (IS_I965 || IS_G4X)
608 gtt_entries = MB(160) - KB(size); 610 gtt_entries = MB(160) - KB(size);
609 else 611 else
610 gtt_entries = 0; 612 gtt_entries = 0;
611 break; 613 break;
612 case INTEL_GMCH_GMS_STOLEN_224M: 614 case INTEL_GMCH_GMS_STOLEN_224M:
613 if (IS_I965 || IS_G4X) 615 if (IS_I965 || IS_G4X)
614 gtt_entries = MB(224) - KB(size); 616 gtt_entries = MB(224) - KB(size);
615 else 617 else
616 gtt_entries = 0; 618 gtt_entries = 0;
617 break; 619 break;
618 case INTEL_GMCH_GMS_STOLEN_352M: 620 case INTEL_GMCH_GMS_STOLEN_352M:
619 if (IS_I965 || IS_G4X) 621 if (IS_I965 || IS_G4X)
620 gtt_entries = MB(352) - KB(size); 622 gtt_entries = MB(352) - KB(size);
621 else 623 else
622 gtt_entries = 0; 624 gtt_entries = 0;
623 break; 625 break;
624 default: 626 default:
625 gtt_entries = 0; 627 gtt_entries = 0;
626 break; 628 break;
627 } 629 }
628 } 630 }
629 if (gtt_entries > 0) 631 if (gtt_entries > 0)
630 printk(KERN_INFO PFX "Detected %dK %s memory.\n", 632 dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
631 gtt_entries / KB(1), local ? "local" : "stolen"); 633 gtt_entries / KB(1), local ? "local" : "stolen");
632 else 634 else
633 printk(KERN_INFO PFX 635 dev_info(&agp_bridge->dev->dev,
634 "No pre-allocated video memory detected.\n"); 636 "no pre-allocated video memory detected\n");
635 gtt_entries /= KB(4); 637 gtt_entries /= KB(4);
636 638
637 intel_private.gtt_entries = gtt_entries; 639 intel_private.gtt_entries = gtt_entries;
638 } 640 }
639 641
640 static void intel_i830_fini_flush(void) 642 static void intel_i830_fini_flush(void)
641 { 643 {
642 kunmap(intel_private.i8xx_page); 644 kunmap(intel_private.i8xx_page);
643 intel_private.i8xx_flush_page = NULL; 645 intel_private.i8xx_flush_page = NULL;
644 unmap_page_from_agp(intel_private.i8xx_page); 646 unmap_page_from_agp(intel_private.i8xx_page);
645 647
646 __free_page(intel_private.i8xx_page); 648 __free_page(intel_private.i8xx_page);
647 intel_private.i8xx_page = NULL; 649 intel_private.i8xx_page = NULL;
648 } 650 }
649 651
650 static void intel_i830_setup_flush(void) 652 static void intel_i830_setup_flush(void)
651 { 653 {
652 /* return if we've already set the flush mechanism up */ 654 /* return if we've already set the flush mechanism up */
653 if (intel_private.i8xx_page) 655 if (intel_private.i8xx_page)
654 return; 656 return;
655 657
656 intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32); 658 intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
657 if (!intel_private.i8xx_page) 659 if (!intel_private.i8xx_page)
658 return; 660 return;
659 661
660 /* make page uncached */ 662 /* make page uncached */
661 map_page_into_agp(intel_private.i8xx_page); 663 map_page_into_agp(intel_private.i8xx_page);
662 664
663 intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page); 665 intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
664 if (!intel_private.i8xx_flush_page) 666 if (!intel_private.i8xx_flush_page)
665 intel_i830_fini_flush(); 667 intel_i830_fini_flush();
666 } 668 }
667 669
668 static void intel_i830_chipset_flush(struct agp_bridge_data *bridge) 670 static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
669 { 671 {
670 unsigned int *pg = intel_private.i8xx_flush_page; 672 unsigned int *pg = intel_private.i8xx_flush_page;
671 int i; 673 int i;
672 674
673 for (i = 0; i < 256; i += 2) 675 for (i = 0; i < 256; i += 2)
674 *(pg + i) = i; 676 *(pg + i) = i;
675 677
676 wmb(); 678 wmb();
677 } 679 }
678 680
679 /* The intel i830 automatically initializes the agp aperture during POST. 681 /* The intel i830 automatically initializes the agp aperture during POST.
680 * Use the memory already set aside for in the GTT. 682 * Use the memory already set aside for in the GTT.
681 */ 683 */
682 static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge) 684 static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge)
683 { 685 {
684 int page_order; 686 int page_order;
685 struct aper_size_info_fixed *size; 687 struct aper_size_info_fixed *size;
686 int num_entries; 688 int num_entries;
687 u32 temp; 689 u32 temp;
688 690
689 size = agp_bridge->current_size; 691 size = agp_bridge->current_size;
690 page_order = size->page_order; 692 page_order = size->page_order;
691 num_entries = size->num_entries; 693 num_entries = size->num_entries;
692 agp_bridge->gatt_table_real = NULL; 694 agp_bridge->gatt_table_real = NULL;
693 695
694 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp); 696 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
695 temp &= 0xfff80000; 697 temp &= 0xfff80000;
696 698
697 intel_private.registers = ioremap(temp, 128 * 4096); 699 intel_private.registers = ioremap(temp, 128 * 4096);
698 if (!intel_private.registers) 700 if (!intel_private.registers)
699 return -ENOMEM; 701 return -ENOMEM;
700 702
701 temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; 703 temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
702 global_cache_flush(); /* FIXME: ?? */ 704 global_cache_flush(); /* FIXME: ?? */
703 705
704 /* we have to call this as early as possible after the MMIO base address is known */ 706 /* we have to call this as early as possible after the MMIO base address is known */
705 intel_i830_init_gtt_entries(); 707 intel_i830_init_gtt_entries();
706 708
707 agp_bridge->gatt_table = NULL; 709 agp_bridge->gatt_table = NULL;
708 710
709 agp_bridge->gatt_bus_addr = temp; 711 agp_bridge->gatt_bus_addr = temp;
710 712
711 return 0; 713 return 0;
712 } 714 }
713 715
714 /* Return the gatt table to a sane state. Use the top of stolen 716 /* Return the gatt table to a sane state. Use the top of stolen
715 * memory for the GTT. 717 * memory for the GTT.
716 */ 718 */
717 static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge) 719 static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge)
718 { 720 {
719 return 0; 721 return 0;
720 } 722 }
721 723
722 static int intel_i830_fetch_size(void) 724 static int intel_i830_fetch_size(void)
723 { 725 {
724 u16 gmch_ctrl; 726 u16 gmch_ctrl;
725 struct aper_size_info_fixed *values; 727 struct aper_size_info_fixed *values;
726 728
727 values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes); 729 values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
728 730
729 if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB && 731 if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB &&
730 agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) { 732 agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) {
731 /* 855GM/852GM/865G has 128MB aperture size */ 733 /* 855GM/852GM/865G has 128MB aperture size */
732 agp_bridge->previous_size = agp_bridge->current_size = (void *) values; 734 agp_bridge->previous_size = agp_bridge->current_size = (void *) values;
733 agp_bridge->aperture_size_idx = 0; 735 agp_bridge->aperture_size_idx = 0;
734 return values[0].size; 736 return values[0].size;
735 } 737 }
736 738
737 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); 739 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
738 740
739 if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) { 741 if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
740 agp_bridge->previous_size = agp_bridge->current_size = (void *) values; 742 agp_bridge->previous_size = agp_bridge->current_size = (void *) values;
741 agp_bridge->aperture_size_idx = 0; 743 agp_bridge->aperture_size_idx = 0;
742 return values[0].size; 744 return values[0].size;
743 } else { 745 } else {
744 agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + 1); 746 agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + 1);
745 agp_bridge->aperture_size_idx = 1; 747 agp_bridge->aperture_size_idx = 1;
746 return values[1].size; 748 return values[1].size;
747 } 749 }
748 750
749 return 0; 751 return 0;
750 } 752 }
751 753
752 static int intel_i830_configure(void) 754 static int intel_i830_configure(void)
753 { 755 {
754 struct aper_size_info_fixed *current_size; 756 struct aper_size_info_fixed *current_size;
755 u32 temp; 757 u32 temp;
756 u16 gmch_ctrl; 758 u16 gmch_ctrl;
757 int i; 759 int i;
758 760
759 current_size = A_SIZE_FIX(agp_bridge->current_size); 761 current_size = A_SIZE_FIX(agp_bridge->current_size);
760 762
761 pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp); 763 pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
762 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); 764 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
763 765
764 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); 766 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
765 gmch_ctrl |= I830_GMCH_ENABLED; 767 gmch_ctrl |= I830_GMCH_ENABLED;
766 pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl); 768 pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
767 769
768 writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); 770 writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
769 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ 771 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
770 772
771 if (agp_bridge->driver->needs_scratch_page) { 773 if (agp_bridge->driver->needs_scratch_page) {
772 for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) { 774 for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) {
773 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); 775 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
774 readl(intel_private.registers+I810_PTE_BASE+(i*4)); /* PCI Posting. */ 776 readl(intel_private.registers+I810_PTE_BASE+(i*4)); /* PCI Posting. */
775 } 777 }
776 } 778 }
777 779
778 global_cache_flush(); 780 global_cache_flush();
779 781
780 intel_i830_setup_flush(); 782 intel_i830_setup_flush();
781 return 0; 783 return 0;
782 } 784 }
783 785
784 static void intel_i830_cleanup(void) 786 static void intel_i830_cleanup(void)
785 { 787 {
786 iounmap(intel_private.registers); 788 iounmap(intel_private.registers);
787 } 789 }
788 790
789 static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start, 791 static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
790 int type) 792 int type)
791 { 793 {
792 int i, j, num_entries; 794 int i, j, num_entries;
793 void *temp; 795 void *temp;
794 int ret = -EINVAL; 796 int ret = -EINVAL;
795 int mask_type; 797 int mask_type;
796 798
797 if (mem->page_count == 0) 799 if (mem->page_count == 0)
798 goto out; 800 goto out;
799 801
800 temp = agp_bridge->current_size; 802 temp = agp_bridge->current_size;
801 num_entries = A_SIZE_FIX(temp)->num_entries; 803 num_entries = A_SIZE_FIX(temp)->num_entries;
802 804
803 if (pg_start < intel_private.gtt_entries) { 805 if (pg_start < intel_private.gtt_entries) {
804 printk(KERN_DEBUG PFX "pg_start == 0x%.8lx,intel_private.gtt_entries == 0x%.8x\n", 806 dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
805 pg_start, intel_private.gtt_entries); 807 "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
808 pg_start, intel_private.gtt_entries);
806 809
807 printk(KERN_INFO PFX "Trying to insert into local/stolen memory\n"); 810 dev_info(&intel_private.pcidev->dev,
811 "trying to insert into local/stolen memory\n");
808 goto out_err; 812 goto out_err;
809 } 813 }
810 814
811 if ((pg_start + mem->page_count) > num_entries) 815 if ((pg_start + mem->page_count) > num_entries)
812 goto out_err; 816 goto out_err;
813 817
814 /* The i830 can't check the GTT for entries since its read only, 818 /* The i830 can't check the GTT for entries since its read only,
815 * depend on the caller to make the correct offset decisions. 819 * depend on the caller to make the correct offset decisions.
816 */ 820 */
817 821
818 if (type != mem->type) 822 if (type != mem->type)
819 goto out_err; 823 goto out_err;
820 824
821 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); 825 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
822 826
823 if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY && 827 if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
824 mask_type != INTEL_AGP_CACHED_MEMORY) 828 mask_type != INTEL_AGP_CACHED_MEMORY)
825 goto out_err; 829 goto out_err;
826 830
827 if (!mem->is_flushed) 831 if (!mem->is_flushed)
828 global_cache_flush(); 832 global_cache_flush();
829 833
830 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 834 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
831 writel(agp_bridge->driver->mask_memory(agp_bridge, 835 writel(agp_bridge->driver->mask_memory(agp_bridge,
832 mem->memory[i], mask_type), 836 mem->memory[i], mask_type),
833 intel_private.registers+I810_PTE_BASE+(j*4)); 837 intel_private.registers+I810_PTE_BASE+(j*4));
834 } 838 }
835 readl(intel_private.registers+I810_PTE_BASE+((j-1)*4)); 839 readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
836 agp_bridge->driver->tlb_flush(mem); 840 agp_bridge->driver->tlb_flush(mem);
837 841
838 out: 842 out:
839 ret = 0; 843 ret = 0;
840 out_err: 844 out_err:
841 mem->is_flushed = true; 845 mem->is_flushed = true;
842 return ret; 846 return ret;
843 } 847 }
844 848
845 static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start, 849 static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start,
846 int type) 850 int type)
847 { 851 {
848 int i; 852 int i;
849 853
850 if (mem->page_count == 0) 854 if (mem->page_count == 0)
851 return 0; 855 return 0;
852 856
853 if (pg_start < intel_private.gtt_entries) { 857 if (pg_start < intel_private.gtt_entries) {
854 printk(KERN_INFO PFX "Trying to disable local/stolen memory\n"); 858 dev_info(&intel_private.pcidev->dev,
859 "trying to disable local/stolen memory\n");
855 return -EINVAL; 860 return -EINVAL;
856 } 861 }
857 862
858 for (i = pg_start; i < (mem->page_count + pg_start); i++) { 863 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
859 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); 864 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
860 } 865 }
861 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); 866 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
862 867
863 agp_bridge->driver->tlb_flush(mem); 868 agp_bridge->driver->tlb_flush(mem);
864 return 0; 869 return 0;
865 } 870 }
866 871
867 static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type) 872 static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type)
868 { 873 {
869 if (type == AGP_PHYS_MEMORY) 874 if (type == AGP_PHYS_MEMORY)
870 return alloc_agpphysmem_i8xx(pg_count, type); 875 return alloc_agpphysmem_i8xx(pg_count, type);
871 /* always return NULL for other allocation types for now */ 876 /* always return NULL for other allocation types for now */
872 return NULL; 877 return NULL;
873 } 878 }
874 879
875 static int intel_alloc_chipset_flush_resource(void) 880 static int intel_alloc_chipset_flush_resource(void)
876 { 881 {
877 int ret; 882 int ret;
878 ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE, 883 ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
879 PAGE_SIZE, PCIBIOS_MIN_MEM, 0, 884 PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
880 pcibios_align_resource, agp_bridge->dev); 885 pcibios_align_resource, agp_bridge->dev);
881 886
882 return ret; 887 return ret;
883 } 888 }
884 889
885 static void intel_i915_setup_chipset_flush(void) 890 static void intel_i915_setup_chipset_flush(void)
886 { 891 {
887 int ret; 892 int ret;
888 u32 temp; 893 u32 temp;
889 894
890 pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp); 895 pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp);
891 if (!(temp & 0x1)) { 896 if (!(temp & 0x1)) {
892 intel_alloc_chipset_flush_resource(); 897 intel_alloc_chipset_flush_resource();
893 intel_private.resource_valid = 1; 898 intel_private.resource_valid = 1;
894 pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); 899 pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
895 } else { 900 } else {
896 temp &= ~1; 901 temp &= ~1;
897 902
898 intel_private.resource_valid = 1; 903 intel_private.resource_valid = 1;
899 intel_private.ifp_resource.start = temp; 904 intel_private.ifp_resource.start = temp;
900 intel_private.ifp_resource.end = temp + PAGE_SIZE; 905 intel_private.ifp_resource.end = temp + PAGE_SIZE;
901 ret = request_resource(&iomem_resource, &intel_private.ifp_resource); 906 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
902 /* some BIOSes reserve this area in a pnp some don't */ 907 /* some BIOSes reserve this area in a pnp some don't */
903 if (ret) 908 if (ret)
904 intel_private.resource_valid = 0; 909 intel_private.resource_valid = 0;
905 } 910 }
906 } 911 }
907 912
908 static void intel_i965_g33_setup_chipset_flush(void) 913 static void intel_i965_g33_setup_chipset_flush(void)
909 { 914 {
910 u32 temp_hi, temp_lo; 915 u32 temp_hi, temp_lo;
911 int ret; 916 int ret;
912 917
913 pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi); 918 pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi);
914 pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo); 919 pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo);
915 920
916 if (!(temp_lo & 0x1)) { 921 if (!(temp_lo & 0x1)) {
917 922
918 intel_alloc_chipset_flush_resource(); 923 intel_alloc_chipset_flush_resource();
919 924
920 intel_private.resource_valid = 1; 925 intel_private.resource_valid = 1;
921 pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4, 926 pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4,
922 upper_32_bits(intel_private.ifp_resource.start)); 927 upper_32_bits(intel_private.ifp_resource.start));
923 pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); 928 pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
924 } else { 929 } else {
925 u64 l64; 930 u64 l64;
926 931
927 temp_lo &= ~0x1; 932 temp_lo &= ~0x1;
928 l64 = ((u64)temp_hi << 32) | temp_lo; 933 l64 = ((u64)temp_hi << 32) | temp_lo;
929 934
930 intel_private.resource_valid = 1; 935 intel_private.resource_valid = 1;
931 intel_private.ifp_resource.start = l64; 936 intel_private.ifp_resource.start = l64;
932 intel_private.ifp_resource.end = l64 + PAGE_SIZE; 937 intel_private.ifp_resource.end = l64 + PAGE_SIZE;
933 ret = request_resource(&iomem_resource, &intel_private.ifp_resource); 938 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
934 /* some BIOSes reserve this area in a pnp some don't */ 939 /* some BIOSes reserve this area in a pnp some don't */
935 if (ret) 940 if (ret)
936 intel_private.resource_valid = 0; 941 intel_private.resource_valid = 0;
937 } 942 }
938 } 943 }
939 944
940 static void intel_i9xx_setup_flush(void) 945 static void intel_i9xx_setup_flush(void)
941 { 946 {
942 /* return if already configured */ 947 /* return if already configured */
943 if (intel_private.ifp_resource.start) 948 if (intel_private.ifp_resource.start)
944 return; 949 return;
945 950
946 /* setup a resource for this object */ 951 /* setup a resource for this object */
947 intel_private.ifp_resource.name = "Intel Flush Page"; 952 intel_private.ifp_resource.name = "Intel Flush Page";
948 intel_private.ifp_resource.flags = IORESOURCE_MEM; 953 intel_private.ifp_resource.flags = IORESOURCE_MEM;
949 954
950 /* Setup chipset flush for 915 */ 955 /* Setup chipset flush for 915 */
951 if (IS_I965 || IS_G33 || IS_G4X) { 956 if (IS_I965 || IS_G33 || IS_G4X) {
952 intel_i965_g33_setup_chipset_flush(); 957 intel_i965_g33_setup_chipset_flush();
953 } else { 958 } else {
954 intel_i915_setup_chipset_flush(); 959 intel_i915_setup_chipset_flush();
955 } 960 }
956 961
957 if (intel_private.ifp_resource.start) { 962 if (intel_private.ifp_resource.start) {
958 intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE); 963 intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
959 if (!intel_private.i9xx_flush_page) 964 if (!intel_private.i9xx_flush_page)
960 printk(KERN_INFO "unable to ioremap flush page - no chipset flushing"); 965 dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing");
961 } 966 }
962 } 967 }
963 968
964 static int intel_i915_configure(void) 969 static int intel_i915_configure(void)
965 { 970 {
966 struct aper_size_info_fixed *current_size; 971 struct aper_size_info_fixed *current_size;
967 u32 temp; 972 u32 temp;
968 u16 gmch_ctrl; 973 u16 gmch_ctrl;
969 int i; 974 int i;
970 975
971 current_size = A_SIZE_FIX(agp_bridge->current_size); 976 current_size = A_SIZE_FIX(agp_bridge->current_size);
972 977
973 pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp); 978 pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp);
974 979
975 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); 980 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
976 981
977 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); 982 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
978 gmch_ctrl |= I830_GMCH_ENABLED; 983 gmch_ctrl |= I830_GMCH_ENABLED;
979 pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl); 984 pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
980 985
981 writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); 986 writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
982 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ 987 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
983 988
984 if (agp_bridge->driver->needs_scratch_page) { 989 if (agp_bridge->driver->needs_scratch_page) {
985 for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) { 990 for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) {
986 writel(agp_bridge->scratch_page, intel_private.gtt+i); 991 writel(agp_bridge->scratch_page, intel_private.gtt+i);
987 readl(intel_private.gtt+i); /* PCI Posting. */ 992 readl(intel_private.gtt+i); /* PCI Posting. */
988 } 993 }
989 } 994 }
990 995
991 global_cache_flush(); 996 global_cache_flush();
992 997
993 intel_i9xx_setup_flush(); 998 intel_i9xx_setup_flush();
994 999
995 return 0; 1000 return 0;
996 } 1001 }
997 1002
998 static void intel_i915_cleanup(void) 1003 static void intel_i915_cleanup(void)
999 { 1004 {
1000 if (intel_private.i9xx_flush_page) 1005 if (intel_private.i9xx_flush_page)
1001 iounmap(intel_private.i9xx_flush_page); 1006 iounmap(intel_private.i9xx_flush_page);
1002 if (intel_private.resource_valid) 1007 if (intel_private.resource_valid)
1003 release_resource(&intel_private.ifp_resource); 1008 release_resource(&intel_private.ifp_resource);
1004 intel_private.ifp_resource.start = 0; 1009 intel_private.ifp_resource.start = 0;
1005 intel_private.resource_valid = 0; 1010 intel_private.resource_valid = 0;
1006 iounmap(intel_private.gtt); 1011 iounmap(intel_private.gtt);
1007 iounmap(intel_private.registers); 1012 iounmap(intel_private.registers);
1008 } 1013 }
1009 1014
1010 static void intel_i915_chipset_flush(struct agp_bridge_data *bridge) 1015 static void intel_i915_chipset_flush(struct agp_bridge_data *bridge)
1011 { 1016 {
1012 if (intel_private.i9xx_flush_page) 1017 if (intel_private.i9xx_flush_page)
1013 writel(1, intel_private.i9xx_flush_page); 1018 writel(1, intel_private.i9xx_flush_page);
1014 } 1019 }
1015 1020
1016 static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start, 1021 static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
1017 int type) 1022 int type)
1018 { 1023 {
1019 int i, j, num_entries; 1024 int i, j, num_entries;
1020 void *temp; 1025 void *temp;
1021 int ret = -EINVAL; 1026 int ret = -EINVAL;
1022 int mask_type; 1027 int mask_type;
1023 1028
1024 if (mem->page_count == 0) 1029 if (mem->page_count == 0)
1025 goto out; 1030 goto out;
1026 1031
1027 temp = agp_bridge->current_size; 1032 temp = agp_bridge->current_size;
1028 num_entries = A_SIZE_FIX(temp)->num_entries; 1033 num_entries = A_SIZE_FIX(temp)->num_entries;
1029 1034
1030 if (pg_start < intel_private.gtt_entries) { 1035 if (pg_start < intel_private.gtt_entries) {
1031 printk(KERN_DEBUG PFX "pg_start == 0x%.8lx,intel_private.gtt_entries == 0x%.8x\n", 1036 dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
1032 pg_start, intel_private.gtt_entries); 1037 "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
1038 pg_start, intel_private.gtt_entries);
1033 1039
1034 printk(KERN_INFO PFX "Trying to insert into local/stolen memory\n"); 1040 dev_info(&intel_private.pcidev->dev,
1041 "trying to insert into local/stolen memory\n");
1035 goto out_err; 1042 goto out_err;
1036 } 1043 }
1037 1044
1038 if ((pg_start + mem->page_count) > num_entries) 1045 if ((pg_start + mem->page_count) > num_entries)
1039 goto out_err; 1046 goto out_err;
1040 1047
1041 /* The i915 can't check the GTT for entries since its read only, 1048 /* The i915 can't check the GTT for entries since its read only,
1042 * depend on the caller to make the correct offset decisions. 1049 * depend on the caller to make the correct offset decisions.
1043 */ 1050 */
1044 1051
1045 if (type != mem->type) 1052 if (type != mem->type)
1046 goto out_err; 1053 goto out_err;
1047 1054
1048 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); 1055 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
1049 1056
1050 if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY && 1057 if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
1051 mask_type != INTEL_AGP_CACHED_MEMORY) 1058 mask_type != INTEL_AGP_CACHED_MEMORY)
1052 goto out_err; 1059 goto out_err;
1053 1060
1054 if (!mem->is_flushed) 1061 if (!mem->is_flushed)
1055 global_cache_flush(); 1062 global_cache_flush();
1056 1063
1057 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 1064 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
1058 writel(agp_bridge->driver->mask_memory(agp_bridge, 1065 writel(agp_bridge->driver->mask_memory(agp_bridge,
1059 mem->memory[i], mask_type), intel_private.gtt+j); 1066 mem->memory[i], mask_type), intel_private.gtt+j);
1060 } 1067 }
1061 1068
1062 readl(intel_private.gtt+j-1); 1069 readl(intel_private.gtt+j-1);
1063 agp_bridge->driver->tlb_flush(mem); 1070 agp_bridge->driver->tlb_flush(mem);
1064 1071
1065 out: 1072 out:
1066 ret = 0; 1073 ret = 0;
1067 out_err: 1074 out_err:
1068 mem->is_flushed = true; 1075 mem->is_flushed = true;
1069 return ret; 1076 return ret;
1070 } 1077 }
1071 1078
1072 static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start, 1079 static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start,
1073 int type) 1080 int type)
1074 { 1081 {
1075 int i; 1082 int i;
1076 1083
1077 if (mem->page_count == 0) 1084 if (mem->page_count == 0)
1078 return 0; 1085 return 0;
1079 1086
1080 if (pg_start < intel_private.gtt_entries) { 1087 if (pg_start < intel_private.gtt_entries) {
1081 printk(KERN_INFO PFX "Trying to disable local/stolen memory\n"); 1088 dev_info(&intel_private.pcidev->dev,
1089 "trying to disable local/stolen memory\n");
1082 return -EINVAL; 1090 return -EINVAL;
1083 } 1091 }
1084 1092
1085 for (i = pg_start; i < (mem->page_count + pg_start); i++) 1093 for (i = pg_start; i < (mem->page_count + pg_start); i++)
1086 writel(agp_bridge->scratch_page, intel_private.gtt+i); 1094 writel(agp_bridge->scratch_page, intel_private.gtt+i);
1087 1095
1088 readl(intel_private.gtt+i-1); 1096 readl(intel_private.gtt+i-1);
1089 1097
1090 agp_bridge->driver->tlb_flush(mem); 1098 agp_bridge->driver->tlb_flush(mem);
1091 return 0; 1099 return 0;
1092 } 1100 }
1093 1101
1094 /* Return the aperture size by just checking the resource length. The effect 1102 /* Return the aperture size by just checking the resource length. The effect
1095 * described in the spec of the MSAC registers is just changing of the 1103 * described in the spec of the MSAC registers is just changing of the
1096 * resource size. 1104 * resource size.
1097 */ 1105 */
1098 static int intel_i9xx_fetch_size(void) 1106 static int intel_i9xx_fetch_size(void)
1099 { 1107 {
1100 int num_sizes = ARRAY_SIZE(intel_i830_sizes); 1108 int num_sizes = ARRAY_SIZE(intel_i830_sizes);
1101 int aper_size; /* size in megabytes */ 1109 int aper_size; /* size in megabytes */
1102 int i; 1110 int i;
1103 1111
1104 aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1); 1112 aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1);
1105 1113
1106 for (i = 0; i < num_sizes; i++) { 1114 for (i = 0; i < num_sizes; i++) {
1107 if (aper_size == intel_i830_sizes[i].size) { 1115 if (aper_size == intel_i830_sizes[i].size) {
1108 agp_bridge->current_size = intel_i830_sizes + i; 1116 agp_bridge->current_size = intel_i830_sizes + i;
1109 agp_bridge->previous_size = agp_bridge->current_size; 1117 agp_bridge->previous_size = agp_bridge->current_size;
1110 return aper_size; 1118 return aper_size;
1111 } 1119 }
1112 } 1120 }
1113 1121
1114 return 0; 1122 return 0;
1115 } 1123 }
1116 1124
1117 /* The intel i915 automatically initializes the agp aperture during POST. 1125 /* The intel i915 automatically initializes the agp aperture during POST.
1118 * Use the memory already set aside for in the GTT. 1126 * Use the memory already set aside for in the GTT.
1119 */ 1127 */
1120 static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge) 1128 static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
1121 { 1129 {
1122 int page_order; 1130 int page_order;
1123 struct aper_size_info_fixed *size; 1131 struct aper_size_info_fixed *size;
1124 int num_entries; 1132 int num_entries;
1125 u32 temp, temp2; 1133 u32 temp, temp2;
1126 int gtt_map_size = 256 * 1024; 1134 int gtt_map_size = 256 * 1024;
1127 1135
1128 size = agp_bridge->current_size; 1136 size = agp_bridge->current_size;
1129 page_order = size->page_order; 1137 page_order = size->page_order;
1130 num_entries = size->num_entries; 1138 num_entries = size->num_entries;
1131 agp_bridge->gatt_table_real = NULL; 1139 agp_bridge->gatt_table_real = NULL;
1132 1140
1133 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp); 1141 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
1134 pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2); 1142 pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2);
1135 1143
1136 if (IS_G33) 1144 if (IS_G33)
1137 gtt_map_size = 1024 * 1024; /* 1M on G33 */ 1145 gtt_map_size = 1024 * 1024; /* 1M on G33 */
1138 intel_private.gtt = ioremap(temp2, gtt_map_size); 1146 intel_private.gtt = ioremap(temp2, gtt_map_size);
1139 if (!intel_private.gtt) 1147 if (!intel_private.gtt)
1140 return -ENOMEM; 1148 return -ENOMEM;
1141 1149
1142 temp &= 0xfff80000; 1150 temp &= 0xfff80000;
1143 1151
1144 intel_private.registers = ioremap(temp, 128 * 4096); 1152 intel_private.registers = ioremap(temp, 128 * 4096);
1145 if (!intel_private.registers) { 1153 if (!intel_private.registers) {
1146 iounmap(intel_private.gtt); 1154 iounmap(intel_private.gtt);
1147 return -ENOMEM; 1155 return -ENOMEM;
1148 } 1156 }
1149 1157
1150 temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; 1158 temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
1151 global_cache_flush(); /* FIXME: ? */ 1159 global_cache_flush(); /* FIXME: ? */
1152 1160
1153 /* we have to call this as early as possible after the MMIO base address is known */ 1161 /* we have to call this as early as possible after the MMIO base address is known */
1154 intel_i830_init_gtt_entries(); 1162 intel_i830_init_gtt_entries();
1155 1163
1156 agp_bridge->gatt_table = NULL; 1164 agp_bridge->gatt_table = NULL;
1157 1165
1158 agp_bridge->gatt_bus_addr = temp; 1166 agp_bridge->gatt_bus_addr = temp;
1159 1167
1160 return 0; 1168 return 0;
1161 } 1169 }
1162 1170
1163 /* 1171 /*
1164 * The i965 supports 36-bit physical addresses, but to keep 1172 * The i965 supports 36-bit physical addresses, but to keep
1165 * the format of the GTT the same, the bits that don't fit 1173 * the format of the GTT the same, the bits that don't fit
1166 * in a 32-bit word are shifted down to bits 4..7. 1174 * in a 32-bit word are shifted down to bits 4..7.
1167 * 1175 *
1168 * Gcc is smart enough to notice that "(addr >> 28) & 0xf0" 1176 * Gcc is smart enough to notice that "(addr >> 28) & 0xf0"
1169 * is always zero on 32-bit architectures, so no need to make 1177 * is always zero on 32-bit architectures, so no need to make
1170 * this conditional. 1178 * this conditional.
1171 */ 1179 */
1172 static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge, 1180 static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
1173 unsigned long addr, int type) 1181 unsigned long addr, int type)
1174 { 1182 {
1175 /* Shift high bits down */ 1183 /* Shift high bits down */
1176 addr |= (addr >> 28) & 0xf0; 1184 addr |= (addr >> 28) & 0xf0;
1177 1185
1178 /* Type checking must be done elsewhere */ 1186 /* Type checking must be done elsewhere */
1179 return addr | bridge->driver->masks[type].mask; 1187 return addr | bridge->driver->masks[type].mask;
1180 } 1188 }
1181 1189
1182 static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) 1190 static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
1183 { 1191 {
1184 switch (agp_bridge->dev->device) { 1192 switch (agp_bridge->dev->device) {
1185 case PCI_DEVICE_ID_INTEL_GM45_HB: 1193 case PCI_DEVICE_ID_INTEL_GM45_HB:
1186 case PCI_DEVICE_ID_INTEL_IGD_E_HB: 1194 case PCI_DEVICE_ID_INTEL_IGD_E_HB:
1187 case PCI_DEVICE_ID_INTEL_Q45_HB: 1195 case PCI_DEVICE_ID_INTEL_Q45_HB:
1188 case PCI_DEVICE_ID_INTEL_G45_HB: 1196 case PCI_DEVICE_ID_INTEL_G45_HB:
1189 *gtt_offset = *gtt_size = MB(2); 1197 *gtt_offset = *gtt_size = MB(2);
1190 break; 1198 break;
1191 default: 1199 default:
1192 *gtt_offset = *gtt_size = KB(512); 1200 *gtt_offset = *gtt_size = KB(512);
1193 } 1201 }
1194 } 1202 }
1195 1203
1196 /* The intel i965 automatically initializes the agp aperture during POST. 1204 /* The intel i965 automatically initializes the agp aperture during POST.
1197 * Use the memory already set aside for in the GTT. 1205 * Use the memory already set aside for in the GTT.
1198 */ 1206 */
1199 static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge) 1207 static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
1200 { 1208 {
1201 int page_order; 1209 int page_order;
1202 struct aper_size_info_fixed *size; 1210 struct aper_size_info_fixed *size;
1203 int num_entries; 1211 int num_entries;
1204 u32 temp; 1212 u32 temp;
1205 int gtt_offset, gtt_size; 1213 int gtt_offset, gtt_size;
1206 1214
1207 size = agp_bridge->current_size; 1215 size = agp_bridge->current_size;
1208 page_order = size->page_order; 1216 page_order = size->page_order;
1209 num_entries = size->num_entries; 1217 num_entries = size->num_entries;
1210 agp_bridge->gatt_table_real = NULL; 1218 agp_bridge->gatt_table_real = NULL;
1211 1219
1212 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp); 1220 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
1213 1221
1214 temp &= 0xfff00000; 1222 temp &= 0xfff00000;
1215 1223
1216 intel_i965_get_gtt_range(&gtt_offset, &gtt_size); 1224 intel_i965_get_gtt_range(&gtt_offset, &gtt_size);
1217 1225
1218 intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size); 1226 intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size);
1219 1227
1220 if (!intel_private.gtt) 1228 if (!intel_private.gtt)
1221 return -ENOMEM; 1229 return -ENOMEM;
1222 1230
1223 intel_private.registers = ioremap(temp, 128 * 4096); 1231 intel_private.registers = ioremap(temp, 128 * 4096);
1224 if (!intel_private.registers) { 1232 if (!intel_private.registers) {
1225 iounmap(intel_private.gtt); 1233 iounmap(intel_private.gtt);
1226 return -ENOMEM; 1234 return -ENOMEM;
1227 } 1235 }
1228 1236
1229 temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; 1237 temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
1230 global_cache_flush(); /* FIXME: ? */ 1238 global_cache_flush(); /* FIXME: ? */
1231 1239
1232 /* we have to call this as early as possible after the MMIO base address is known */ 1240 /* we have to call this as early as possible after the MMIO base address is known */
1233 intel_i830_init_gtt_entries(); 1241 intel_i830_init_gtt_entries();
1234 1242
1235 agp_bridge->gatt_table = NULL; 1243 agp_bridge->gatt_table = NULL;
1236 1244
1237 agp_bridge->gatt_bus_addr = temp; 1245 agp_bridge->gatt_bus_addr = temp;
1238 1246
1239 return 0; 1247 return 0;
1240 } 1248 }
1241 1249
1242 1250
1243 static int intel_fetch_size(void) 1251 static int intel_fetch_size(void)
1244 { 1252 {
1245 int i; 1253 int i;
1246 u16 temp; 1254 u16 temp;
1247 struct aper_size_info_16 *values; 1255 struct aper_size_info_16 *values;
1248 1256
1249 pci_read_config_word(agp_bridge->dev, INTEL_APSIZE, &temp); 1257 pci_read_config_word(agp_bridge->dev, INTEL_APSIZE, &temp);
1250 values = A_SIZE_16(agp_bridge->driver->aperture_sizes); 1258 values = A_SIZE_16(agp_bridge->driver->aperture_sizes);
1251 1259
1252 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { 1260 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
1253 if (temp == values[i].size_value) { 1261 if (temp == values[i].size_value) {
1254 agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i); 1262 agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i);
1255 agp_bridge->aperture_size_idx = i; 1263 agp_bridge->aperture_size_idx = i;
1256 return values[i].size; 1264 return values[i].size;
1257 } 1265 }
1258 } 1266 }
1259 1267
1260 return 0; 1268 return 0;
1261 } 1269 }
1262 1270
1263 static int __intel_8xx_fetch_size(u8 temp) 1271 static int __intel_8xx_fetch_size(u8 temp)
1264 { 1272 {
1265 int i; 1273 int i;
1266 struct aper_size_info_8 *values; 1274 struct aper_size_info_8 *values;
1267 1275
1268 values = A_SIZE_8(agp_bridge->driver->aperture_sizes); 1276 values = A_SIZE_8(agp_bridge->driver->aperture_sizes);
1269 1277
1270 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { 1278 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
1271 if (temp == values[i].size_value) { 1279 if (temp == values[i].size_value) {
1272 agp_bridge->previous_size = 1280 agp_bridge->previous_size =
1273 agp_bridge->current_size = (void *) (values + i); 1281 agp_bridge->current_size = (void *) (values + i);
1274 agp_bridge->aperture_size_idx = i; 1282 agp_bridge->aperture_size_idx = i;
1275 return values[i].size; 1283 return values[i].size;
1276 } 1284 }
1277 } 1285 }
1278 return 0; 1286 return 0;
1279 } 1287 }
1280 1288
1281 static int intel_8xx_fetch_size(void) 1289 static int intel_8xx_fetch_size(void)
1282 { 1290 {
1283 u8 temp; 1291 u8 temp;
1284 1292
1285 pci_read_config_byte(agp_bridge->dev, INTEL_APSIZE, &temp); 1293 pci_read_config_byte(agp_bridge->dev, INTEL_APSIZE, &temp);
1286 return __intel_8xx_fetch_size(temp); 1294 return __intel_8xx_fetch_size(temp);
1287 } 1295 }
1288 1296
1289 static int intel_815_fetch_size(void) 1297 static int intel_815_fetch_size(void)
1290 { 1298 {
1291 u8 temp; 1299 u8 temp;
1292 1300
1293 /* Intel 815 chipsets have a _weird_ APSIZE register with only 1301 /* Intel 815 chipsets have a _weird_ APSIZE register with only
1294 * one non-reserved bit, so mask the others out ... */ 1302 * one non-reserved bit, so mask the others out ... */
1295 pci_read_config_byte(agp_bridge->dev, INTEL_APSIZE, &temp); 1303 pci_read_config_byte(agp_bridge->dev, INTEL_APSIZE, &temp);
1296 temp &= (1 << 3); 1304 temp &= (1 << 3);
1297 1305
1298 return __intel_8xx_fetch_size(temp); 1306 return __intel_8xx_fetch_size(temp);
1299 } 1307 }
1300 1308
1301 static void intel_tlbflush(struct agp_memory *mem) 1309 static void intel_tlbflush(struct agp_memory *mem)
1302 { 1310 {
1303 pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2200); 1311 pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2200);
1304 pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280); 1312 pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280);
1305 } 1313 }
1306 1314
1307 1315
1308 static void intel_8xx_tlbflush(struct agp_memory *mem) 1316 static void intel_8xx_tlbflush(struct agp_memory *mem)
1309 { 1317 {
1310 u32 temp; 1318 u32 temp;
1311 pci_read_config_dword(agp_bridge->dev, INTEL_AGPCTRL, &temp); 1319 pci_read_config_dword(agp_bridge->dev, INTEL_AGPCTRL, &temp);
1312 pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, temp & ~(1 << 7)); 1320 pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, temp & ~(1 << 7));
1313 pci_read_config_dword(agp_bridge->dev, INTEL_AGPCTRL, &temp); 1321 pci_read_config_dword(agp_bridge->dev, INTEL_AGPCTRL, &temp);
1314 pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, temp | (1 << 7)); 1322 pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, temp | (1 << 7));
1315 } 1323 }
1316 1324
1317 1325
1318 static void intel_cleanup(void) 1326 static void intel_cleanup(void)
1319 { 1327 {
1320 u16 temp; 1328 u16 temp;
1321 struct aper_size_info_16 *previous_size; 1329 struct aper_size_info_16 *previous_size;
1322 1330
1323 previous_size = A_SIZE_16(agp_bridge->previous_size); 1331 previous_size = A_SIZE_16(agp_bridge->previous_size);
1324 pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp); 1332 pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp);
1325 pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp & ~(1 << 9)); 1333 pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp & ~(1 << 9));
1326 pci_write_config_word(agp_bridge->dev, INTEL_APSIZE, previous_size->size_value); 1334 pci_write_config_word(agp_bridge->dev, INTEL_APSIZE, previous_size->size_value);
1327 } 1335 }
1328 1336
1329 1337
1330 static void intel_8xx_cleanup(void) 1338 static void intel_8xx_cleanup(void)
1331 { 1339 {
1332 u16 temp; 1340 u16 temp;
1333 struct aper_size_info_8 *previous_size; 1341 struct aper_size_info_8 *previous_size;
1334 1342
1335 previous_size = A_SIZE_8(agp_bridge->previous_size); 1343 previous_size = A_SIZE_8(agp_bridge->previous_size);
1336 pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp); 1344 pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp);
1337 pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp & ~(1 << 9)); 1345 pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp & ~(1 << 9));
1338 pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, previous_size->size_value); 1346 pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, previous_size->size_value);
1339 } 1347 }
1340 1348
1341 1349
1342 static int intel_configure(void) 1350 static int intel_configure(void)
1343 { 1351 {
1344 u32 temp; 1352 u32 temp;
1345 u16 temp2; 1353 u16 temp2;
1346 struct aper_size_info_16 *current_size; 1354 struct aper_size_info_16 *current_size;
1347 1355
1348 current_size = A_SIZE_16(agp_bridge->current_size); 1356 current_size = A_SIZE_16(agp_bridge->current_size);
1349 1357
1350 /* aperture size */ 1358 /* aperture size */
1351 pci_write_config_word(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); 1359 pci_write_config_word(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
1352 1360
1353 /* address to map to */ 1361 /* address to map to */
1354 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); 1362 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
1355 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); 1363 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1356 1364
1357 /* attbase - aperture base */ 1365 /* attbase - aperture base */
1358 pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); 1366 pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
1359 1367
1360 /* agpctrl */ 1368 /* agpctrl */
1361 pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280); 1369 pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280);
1362 1370
1363 /* paccfg/nbxcfg */ 1371 /* paccfg/nbxcfg */
1364 pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp2); 1372 pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp2);
1365 pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, 1373 pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG,
1366 (temp2 & ~(1 << 10)) | (1 << 9)); 1374 (temp2 & ~(1 << 10)) | (1 << 9));
1367 /* clear any possible error conditions */ 1375 /* clear any possible error conditions */
1368 pci_write_config_byte(agp_bridge->dev, INTEL_ERRSTS + 1, 7); 1376 pci_write_config_byte(agp_bridge->dev, INTEL_ERRSTS + 1, 7);
1369 return 0; 1377 return 0;
1370 } 1378 }
1371 1379
1372 static int intel_815_configure(void) 1380 static int intel_815_configure(void)
1373 { 1381 {
1374 u32 temp, addr; 1382 u32 temp, addr;
1375 u8 temp2; 1383 u8 temp2;
1376 struct aper_size_info_8 *current_size; 1384 struct aper_size_info_8 *current_size;
1377 1385
1378 /* attbase - aperture base */ 1386 /* attbase - aperture base */
1379 /* the Intel 815 chipset spec. says that bits 29-31 in the 1387 /* the Intel 815 chipset spec. says that bits 29-31 in the
1380 * ATTBASE register are reserved -> try not to write them */ 1388 * ATTBASE register are reserved -> try not to write them */
1381 if (agp_bridge->gatt_bus_addr & INTEL_815_ATTBASE_MASK) { 1389 if (agp_bridge->gatt_bus_addr & INTEL_815_ATTBASE_MASK) {
1382 printk(KERN_EMERG PFX "gatt bus addr too high"); 1390 dev_emerg(&agp_bridge->dev->dev, "gatt bus addr too high");
1383 return -EINVAL; 1391 return -EINVAL;
1384 } 1392 }
1385 1393
1386 current_size = A_SIZE_8(agp_bridge->current_size); 1394 current_size = A_SIZE_8(agp_bridge->current_size);
1387 1395
1388 /* aperture size */ 1396 /* aperture size */
1389 pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, 1397 pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE,
1390 current_size->size_value); 1398 current_size->size_value);
1391 1399
1392 /* address to map to */ 1400 /* address to map to */
1393 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); 1401 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
1394 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); 1402 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1395 1403
1396 pci_read_config_dword(agp_bridge->dev, INTEL_ATTBASE, &addr); 1404 pci_read_config_dword(agp_bridge->dev, INTEL_ATTBASE, &addr);
1397 addr &= INTEL_815_ATTBASE_MASK; 1405 addr &= INTEL_815_ATTBASE_MASK;
1398 addr |= agp_bridge->gatt_bus_addr; 1406 addr |= agp_bridge->gatt_bus_addr;
1399 pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, addr); 1407 pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, addr);
1400 1408
1401 /* agpctrl */ 1409 /* agpctrl */
1402 pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000); 1410 pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
1403 1411
1404 /* apcont */ 1412 /* apcont */
1405 pci_read_config_byte(agp_bridge->dev, INTEL_815_APCONT, &temp2); 1413 pci_read_config_byte(agp_bridge->dev, INTEL_815_APCONT, &temp2);
1406 pci_write_config_byte(agp_bridge->dev, INTEL_815_APCONT, temp2 | (1 << 1)); 1414 pci_write_config_byte(agp_bridge->dev, INTEL_815_APCONT, temp2 | (1 << 1));
1407 1415
1408 /* clear any possible error conditions */ 1416 /* clear any possible error conditions */
1409 /* Oddness : this chipset seems to have no ERRSTS register ! */ 1417 /* Oddness : this chipset seems to have no ERRSTS register ! */
1410 return 0; 1418 return 0;
1411 } 1419 }
1412 1420
1413 static void intel_820_tlbflush(struct agp_memory *mem) 1421 static void intel_820_tlbflush(struct agp_memory *mem)
1414 { 1422 {
1415 return; 1423 return;
1416 } 1424 }
1417 1425
1418 static void intel_820_cleanup(void) 1426 static void intel_820_cleanup(void)
1419 { 1427 {
1420 u8 temp; 1428 u8 temp;
1421 struct aper_size_info_8 *previous_size; 1429 struct aper_size_info_8 *previous_size;
1422 1430
1423 previous_size = A_SIZE_8(agp_bridge->previous_size); 1431 previous_size = A_SIZE_8(agp_bridge->previous_size);
1424 pci_read_config_byte(agp_bridge->dev, INTEL_I820_RDCR, &temp); 1432 pci_read_config_byte(agp_bridge->dev, INTEL_I820_RDCR, &temp);
1425 pci_write_config_byte(agp_bridge->dev, INTEL_I820_RDCR, 1433 pci_write_config_byte(agp_bridge->dev, INTEL_I820_RDCR,
1426 temp & ~(1 << 1)); 1434 temp & ~(1 << 1));
1427 pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, 1435 pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE,
1428 previous_size->size_value); 1436 previous_size->size_value);
1429 } 1437 }
1430 1438
1431 1439
1432 static int intel_820_configure(void) 1440 static int intel_820_configure(void)
1433 { 1441 {
1434 u32 temp; 1442 u32 temp;
1435 u8 temp2; 1443 u8 temp2;
1436 struct aper_size_info_8 *current_size; 1444 struct aper_size_info_8 *current_size;
1437 1445
1438 current_size = A_SIZE_8(agp_bridge->current_size); 1446 current_size = A_SIZE_8(agp_bridge->current_size);
1439 1447
1440 /* aperture size */ 1448 /* aperture size */
1441 pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); 1449 pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
1442 1450
1443 /* address to map to */ 1451 /* address to map to */
1444 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); 1452 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
1445 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); 1453 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1446 1454
1447 /* attbase - aperture base */ 1455 /* attbase - aperture base */
1448 pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); 1456 pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
1449 1457
1450 /* agpctrl */ 1458 /* agpctrl */
1451 pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000); 1459 pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
1452 1460
1453 /* global enable aperture access */ 1461 /* global enable aperture access */
1454 /* This flag is not accessed through MCHCFG register as in */ 1462 /* This flag is not accessed through MCHCFG register as in */
1455 /* i850 chipset. */ 1463 /* i850 chipset. */
1456 pci_read_config_byte(agp_bridge->dev, INTEL_I820_RDCR, &temp2); 1464 pci_read_config_byte(agp_bridge->dev, INTEL_I820_RDCR, &temp2);
1457 pci_write_config_byte(agp_bridge->dev, INTEL_I820_RDCR, temp2 | (1 << 1)); 1465 pci_write_config_byte(agp_bridge->dev, INTEL_I820_RDCR, temp2 | (1 << 1));
1458 /* clear any possible AGP-related error conditions */ 1466 /* clear any possible AGP-related error conditions */
1459 pci_write_config_word(agp_bridge->dev, INTEL_I820_ERRSTS, 0x001c); 1467 pci_write_config_word(agp_bridge->dev, INTEL_I820_ERRSTS, 0x001c);
1460 return 0; 1468 return 0;
1461 } 1469 }
1462 1470
1463 static int intel_840_configure(void) 1471 static int intel_840_configure(void)
1464 { 1472 {
1465 u32 temp; 1473 u32 temp;
1466 u16 temp2; 1474 u16 temp2;
1467 struct aper_size_info_8 *current_size; 1475 struct aper_size_info_8 *current_size;
1468 1476
1469 current_size = A_SIZE_8(agp_bridge->current_size); 1477 current_size = A_SIZE_8(agp_bridge->current_size);
1470 1478
1471 /* aperture size */ 1479 /* aperture size */
1472 pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); 1480 pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
1473 1481
1474 /* address to map to */ 1482 /* address to map to */
1475 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); 1483 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
1476 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); 1484 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1477 1485
1478 /* attbase - aperture base */ 1486 /* attbase - aperture base */
1479 pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); 1487 pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
1480 1488
1481 /* agpctrl */ 1489 /* agpctrl */
1482 pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000); 1490 pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
1483 1491
1484 /* mcgcfg */ 1492 /* mcgcfg */
1485 pci_read_config_word(agp_bridge->dev, INTEL_I840_MCHCFG, &temp2); 1493 pci_read_config_word(agp_bridge->dev, INTEL_I840_MCHCFG, &temp2);
1486 pci_write_config_word(agp_bridge->dev, INTEL_I840_MCHCFG, temp2 | (1 << 9)); 1494 pci_write_config_word(agp_bridge->dev, INTEL_I840_MCHCFG, temp2 | (1 << 9));
1487 /* clear any possible error conditions */ 1495 /* clear any possible error conditions */
1488 pci_write_config_word(agp_bridge->dev, INTEL_I840_ERRSTS, 0xc000); 1496 pci_write_config_word(agp_bridge->dev, INTEL_I840_ERRSTS, 0xc000);
1489 return 0; 1497 return 0;
1490 } 1498 }
1491 1499
1492 static int intel_845_configure(void) 1500 static int intel_845_configure(void)
1493 { 1501 {
1494 u32 temp; 1502 u32 temp;
1495 u8 temp2; 1503 u8 temp2;
1496 struct aper_size_info_8 *current_size; 1504 struct aper_size_info_8 *current_size;
1497 1505
1498 current_size = A_SIZE_8(agp_bridge->current_size); 1506 current_size = A_SIZE_8(agp_bridge->current_size);
1499 1507
1500 /* aperture size */ 1508 /* aperture size */
1501 pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); 1509 pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
1502 1510
1503 if (agp_bridge->apbase_config != 0) { 1511 if (agp_bridge->apbase_config != 0) {
1504 pci_write_config_dword(agp_bridge->dev, AGP_APBASE, 1512 pci_write_config_dword(agp_bridge->dev, AGP_APBASE,
1505 agp_bridge->apbase_config); 1513 agp_bridge->apbase_config);
1506 } else { 1514 } else {
1507 /* address to map to */ 1515 /* address to map to */
1508 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); 1516 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
1509 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); 1517 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1510 agp_bridge->apbase_config = temp; 1518 agp_bridge->apbase_config = temp;
1511 } 1519 }
1512 1520
1513 /* attbase - aperture base */ 1521 /* attbase - aperture base */
1514 pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); 1522 pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
1515 1523
1516 /* agpctrl */ 1524 /* agpctrl */
1517 pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000); 1525 pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
1518 1526
1519 /* agpm */ 1527 /* agpm */
1520 pci_read_config_byte(agp_bridge->dev, INTEL_I845_AGPM, &temp2); 1528 pci_read_config_byte(agp_bridge->dev, INTEL_I845_AGPM, &temp2);
1521 pci_write_config_byte(agp_bridge->dev, INTEL_I845_AGPM, temp2 | (1 << 1)); 1529 pci_write_config_byte(agp_bridge->dev, INTEL_I845_AGPM, temp2 | (1 << 1));
1522 /* clear any possible error conditions */ 1530 /* clear any possible error conditions */
1523 pci_write_config_word(agp_bridge->dev, INTEL_I845_ERRSTS, 0x001c); 1531 pci_write_config_word(agp_bridge->dev, INTEL_I845_ERRSTS, 0x001c);
1524 1532
1525 intel_i830_setup_flush(); 1533 intel_i830_setup_flush();
1526 return 0; 1534 return 0;
1527 } 1535 }
1528 1536
1529 static int intel_850_configure(void) 1537 static int intel_850_configure(void)
1530 { 1538 {
1531 u32 temp; 1539 u32 temp;
1532 u16 temp2; 1540 u16 temp2;
1533 struct aper_size_info_8 *current_size; 1541 struct aper_size_info_8 *current_size;
1534 1542
1535 current_size = A_SIZE_8(agp_bridge->current_size); 1543 current_size = A_SIZE_8(agp_bridge->current_size);
1536 1544
1537 /* aperture size */ 1545 /* aperture size */
1538 pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); 1546 pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
1539 1547
1540 /* address to map to */ 1548 /* address to map to */
1541 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); 1549 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
1542 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); 1550 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1543 1551
1544 /* attbase - aperture base */ 1552 /* attbase - aperture base */
1545 pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); 1553 pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
1546 1554
1547 /* agpctrl */ 1555 /* agpctrl */
1548 pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000); 1556 pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
1549 1557
1550 /* mcgcfg */ 1558 /* mcgcfg */
1551 pci_read_config_word(agp_bridge->dev, INTEL_I850_MCHCFG, &temp2); 1559 pci_read_config_word(agp_bridge->dev, INTEL_I850_MCHCFG, &temp2);
1552 pci_write_config_word(agp_bridge->dev, INTEL_I850_MCHCFG, temp2 | (1 << 9)); 1560 pci_write_config_word(agp_bridge->dev, INTEL_I850_MCHCFG, temp2 | (1 << 9));
1553 /* clear any possible AGP-related error conditions */ 1561 /* clear any possible AGP-related error conditions */
1554 pci_write_config_word(agp_bridge->dev, INTEL_I850_ERRSTS, 0x001c); 1562 pci_write_config_word(agp_bridge->dev, INTEL_I850_ERRSTS, 0x001c);
1555 return 0; 1563 return 0;
1556 } 1564 }
1557 1565
1558 static int intel_860_configure(void) 1566 static int intel_860_configure(void)
1559 { 1567 {
1560 u32 temp; 1568 u32 temp;
1561 u16 temp2; 1569 u16 temp2;
1562 struct aper_size_info_8 *current_size; 1570 struct aper_size_info_8 *current_size;
1563 1571
1564 current_size = A_SIZE_8(agp_bridge->current_size); 1572 current_size = A_SIZE_8(agp_bridge->current_size);
1565 1573
1566 /* aperture size */ 1574 /* aperture size */
1567 pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); 1575 pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
1568 1576
1569 /* address to map to */ 1577 /* address to map to */
1570 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); 1578 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
1571 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); 1579 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1572 1580
1573 /* attbase - aperture base */ 1581 /* attbase - aperture base */
1574 pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); 1582 pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
1575 1583
1576 /* agpctrl */ 1584 /* agpctrl */
1577 pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000); 1585 pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
1578 1586
1579 /* mcgcfg */ 1587 /* mcgcfg */
1580 pci_read_config_word(agp_bridge->dev, INTEL_I860_MCHCFG, &temp2); 1588 pci_read_config_word(agp_bridge->dev, INTEL_I860_MCHCFG, &temp2);
1581 pci_write_config_word(agp_bridge->dev, INTEL_I860_MCHCFG, temp2 | (1 << 9)); 1589 pci_write_config_word(agp_bridge->dev, INTEL_I860_MCHCFG, temp2 | (1 << 9));
1582 /* clear any possible AGP-related error conditions */ 1590 /* clear any possible AGP-related error conditions */
1583 pci_write_config_word(agp_bridge->dev, INTEL_I860_ERRSTS, 0xf700); 1591 pci_write_config_word(agp_bridge->dev, INTEL_I860_ERRSTS, 0xf700);
1584 return 0; 1592 return 0;
1585 } 1593 }
1586 1594
1587 static int intel_830mp_configure(void) 1595 static int intel_830mp_configure(void)
1588 { 1596 {
1589 u32 temp; 1597 u32 temp;
1590 u16 temp2; 1598 u16 temp2;
1591 struct aper_size_info_8 *current_size; 1599 struct aper_size_info_8 *current_size;
1592 1600
1593 current_size = A_SIZE_8(agp_bridge->current_size); 1601 current_size = A_SIZE_8(agp_bridge->current_size);
1594 1602
1595 /* aperture size */ 1603 /* aperture size */
1596 pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); 1604 pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
1597 1605
1598 /* address to map to */ 1606 /* address to map to */
1599 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); 1607 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
1600 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); 1608 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1601 1609
1602 /* attbase - aperture base */ 1610 /* attbase - aperture base */
1603 pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); 1611 pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
1604 1612
1605 /* agpctrl */ 1613 /* agpctrl */
1606 pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000); 1614 pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
1607 1615
1608 /* gmch */ 1616 /* gmch */
1609 pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp2); 1617 pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp2);
1610 pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp2 | (1 << 9)); 1618 pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp2 | (1 << 9));
1611 /* clear any possible AGP-related error conditions */ 1619 /* clear any possible AGP-related error conditions */
1612 pci_write_config_word(agp_bridge->dev, INTEL_I830_ERRSTS, 0x1c); 1620 pci_write_config_word(agp_bridge->dev, INTEL_I830_ERRSTS, 0x1c);
1613 return 0; 1621 return 0;
1614 } 1622 }
1615 1623
1616 static int intel_7505_configure(void) 1624 static int intel_7505_configure(void)
1617 { 1625 {
1618 u32 temp; 1626 u32 temp;
1619 u16 temp2; 1627 u16 temp2;
1620 struct aper_size_info_8 *current_size; 1628 struct aper_size_info_8 *current_size;
1621 1629
1622 current_size = A_SIZE_8(agp_bridge->current_size); 1630 current_size = A_SIZE_8(agp_bridge->current_size);
1623 1631
1624 /* aperture size */ 1632 /* aperture size */
1625 pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); 1633 pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
1626 1634
1627 /* address to map to */ 1635 /* address to map to */
1628 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); 1636 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
1629 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); 1637 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1630 1638
1631 /* attbase - aperture base */ 1639 /* attbase - aperture base */
1632 pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); 1640 pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
1633 1641
1634 /* agpctrl */ 1642 /* agpctrl */
1635 pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000); 1643 pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
1636 1644
1637 /* mchcfg */ 1645 /* mchcfg */
1638 pci_read_config_word(agp_bridge->dev, INTEL_I7505_MCHCFG, &temp2); 1646 pci_read_config_word(agp_bridge->dev, INTEL_I7505_MCHCFG, &temp2);
1639 pci_write_config_word(agp_bridge->dev, INTEL_I7505_MCHCFG, temp2 | (1 << 9)); 1647 pci_write_config_word(agp_bridge->dev, INTEL_I7505_MCHCFG, temp2 | (1 << 9));
1640 1648
1641 return 0; 1649 return 0;
1642 } 1650 }
1643 1651
1644 /* Setup function */ 1652 /* Setup function */
1645 static const struct gatt_mask intel_generic_masks[] = 1653 static const struct gatt_mask intel_generic_masks[] =
1646 { 1654 {
1647 {.mask = 0x00000017, .type = 0} 1655 {.mask = 0x00000017, .type = 0}
1648 }; 1656 };
1649 1657
1650 static const struct aper_size_info_8 intel_815_sizes[2] = 1658 static const struct aper_size_info_8 intel_815_sizes[2] =
1651 { 1659 {
1652 {64, 16384, 4, 0}, 1660 {64, 16384, 4, 0},
1653 {32, 8192, 3, 8}, 1661 {32, 8192, 3, 8},
1654 }; 1662 };
1655 1663
1656 static const struct aper_size_info_8 intel_8xx_sizes[7] = 1664 static const struct aper_size_info_8 intel_8xx_sizes[7] =
1657 { 1665 {
1658 {256, 65536, 6, 0}, 1666 {256, 65536, 6, 0},
1659 {128, 32768, 5, 32}, 1667 {128, 32768, 5, 32},
1660 {64, 16384, 4, 48}, 1668 {64, 16384, 4, 48},
1661 {32, 8192, 3, 56}, 1669 {32, 8192, 3, 56},
1662 {16, 4096, 2, 60}, 1670 {16, 4096, 2, 60},
1663 {8, 2048, 1, 62}, 1671 {8, 2048, 1, 62},
1664 {4, 1024, 0, 63} 1672 {4, 1024, 0, 63}
1665 }; 1673 };
1666 1674
1667 static const struct aper_size_info_16 intel_generic_sizes[7] = 1675 static const struct aper_size_info_16 intel_generic_sizes[7] =
1668 { 1676 {
1669 {256, 65536, 6, 0}, 1677 {256, 65536, 6, 0},
1670 {128, 32768, 5, 32}, 1678 {128, 32768, 5, 32},
1671 {64, 16384, 4, 48}, 1679 {64, 16384, 4, 48},
1672 {32, 8192, 3, 56}, 1680 {32, 8192, 3, 56},
1673 {16, 4096, 2, 60}, 1681 {16, 4096, 2, 60},
1674 {8, 2048, 1, 62}, 1682 {8, 2048, 1, 62},
1675 {4, 1024, 0, 63} 1683 {4, 1024, 0, 63}
1676 }; 1684 };
1677 1685
1678 static const struct aper_size_info_8 intel_830mp_sizes[4] = 1686 static const struct aper_size_info_8 intel_830mp_sizes[4] =
1679 { 1687 {
1680 {256, 65536, 6, 0}, 1688 {256, 65536, 6, 0},
1681 {128, 32768, 5, 32}, 1689 {128, 32768, 5, 32},
1682 {64, 16384, 4, 48}, 1690 {64, 16384, 4, 48},
1683 {32, 8192, 3, 56} 1691 {32, 8192, 3, 56}
1684 }; 1692 };
1685 1693
1686 static const struct agp_bridge_driver intel_generic_driver = { 1694 static const struct agp_bridge_driver intel_generic_driver = {
1687 .owner = THIS_MODULE, 1695 .owner = THIS_MODULE,
1688 .aperture_sizes = intel_generic_sizes, 1696 .aperture_sizes = intel_generic_sizes,
1689 .size_type = U16_APER_SIZE, 1697 .size_type = U16_APER_SIZE,
1690 .num_aperture_sizes = 7, 1698 .num_aperture_sizes = 7,
1691 .configure = intel_configure, 1699 .configure = intel_configure,
1692 .fetch_size = intel_fetch_size, 1700 .fetch_size = intel_fetch_size,
1693 .cleanup = intel_cleanup, 1701 .cleanup = intel_cleanup,
1694 .tlb_flush = intel_tlbflush, 1702 .tlb_flush = intel_tlbflush,
1695 .mask_memory = agp_generic_mask_memory, 1703 .mask_memory = agp_generic_mask_memory,
1696 .masks = intel_generic_masks, 1704 .masks = intel_generic_masks,
1697 .agp_enable = agp_generic_enable, 1705 .agp_enable = agp_generic_enable,
1698 .cache_flush = global_cache_flush, 1706 .cache_flush = global_cache_flush,
1699 .create_gatt_table = agp_generic_create_gatt_table, 1707 .create_gatt_table = agp_generic_create_gatt_table,
1700 .free_gatt_table = agp_generic_free_gatt_table, 1708 .free_gatt_table = agp_generic_free_gatt_table,
1701 .insert_memory = agp_generic_insert_memory, 1709 .insert_memory = agp_generic_insert_memory,
1702 .remove_memory = agp_generic_remove_memory, 1710 .remove_memory = agp_generic_remove_memory,
1703 .alloc_by_type = agp_generic_alloc_by_type, 1711 .alloc_by_type = agp_generic_alloc_by_type,
1704 .free_by_type = agp_generic_free_by_type, 1712 .free_by_type = agp_generic_free_by_type,
1705 .agp_alloc_page = agp_generic_alloc_page, 1713 .agp_alloc_page = agp_generic_alloc_page,
1706 .agp_destroy_page = agp_generic_destroy_page, 1714 .agp_destroy_page = agp_generic_destroy_page,
1707 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 1715 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
1708 }; 1716 };
1709 1717
1710 static const struct agp_bridge_driver intel_810_driver = { 1718 static const struct agp_bridge_driver intel_810_driver = {
1711 .owner = THIS_MODULE, 1719 .owner = THIS_MODULE,
1712 .aperture_sizes = intel_i810_sizes, 1720 .aperture_sizes = intel_i810_sizes,
1713 .size_type = FIXED_APER_SIZE, 1721 .size_type = FIXED_APER_SIZE,
1714 .num_aperture_sizes = 2, 1722 .num_aperture_sizes = 2,
1715 .needs_scratch_page = true, 1723 .needs_scratch_page = true,
1716 .configure = intel_i810_configure, 1724 .configure = intel_i810_configure,
1717 .fetch_size = intel_i810_fetch_size, 1725 .fetch_size = intel_i810_fetch_size,
1718 .cleanup = intel_i810_cleanup, 1726 .cleanup = intel_i810_cleanup,
1719 .tlb_flush = intel_i810_tlbflush, 1727 .tlb_flush = intel_i810_tlbflush,
1720 .mask_memory = intel_i810_mask_memory, 1728 .mask_memory = intel_i810_mask_memory,
1721 .masks = intel_i810_masks, 1729 .masks = intel_i810_masks,
1722 .agp_enable = intel_i810_agp_enable, 1730 .agp_enable = intel_i810_agp_enable,
1723 .cache_flush = global_cache_flush, 1731 .cache_flush = global_cache_flush,
1724 .create_gatt_table = agp_generic_create_gatt_table, 1732 .create_gatt_table = agp_generic_create_gatt_table,
1725 .free_gatt_table = agp_generic_free_gatt_table, 1733 .free_gatt_table = agp_generic_free_gatt_table,
1726 .insert_memory = intel_i810_insert_entries, 1734 .insert_memory = intel_i810_insert_entries,
1727 .remove_memory = intel_i810_remove_entries, 1735 .remove_memory = intel_i810_remove_entries,
1728 .alloc_by_type = intel_i810_alloc_by_type, 1736 .alloc_by_type = intel_i810_alloc_by_type,
1729 .free_by_type = intel_i810_free_by_type, 1737 .free_by_type = intel_i810_free_by_type,
1730 .agp_alloc_page = agp_generic_alloc_page, 1738 .agp_alloc_page = agp_generic_alloc_page,
1731 .agp_destroy_page = agp_generic_destroy_page, 1739 .agp_destroy_page = agp_generic_destroy_page,
1732 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 1740 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
1733 }; 1741 };
1734 1742
1735 static const struct agp_bridge_driver intel_815_driver = { 1743 static const struct agp_bridge_driver intel_815_driver = {
1736 .owner = THIS_MODULE, 1744 .owner = THIS_MODULE,
1737 .aperture_sizes = intel_815_sizes, 1745 .aperture_sizes = intel_815_sizes,
1738 .size_type = U8_APER_SIZE, 1746 .size_type = U8_APER_SIZE,
1739 .num_aperture_sizes = 2, 1747 .num_aperture_sizes = 2,
1740 .configure = intel_815_configure, 1748 .configure = intel_815_configure,
1741 .fetch_size = intel_815_fetch_size, 1749 .fetch_size = intel_815_fetch_size,
1742 .cleanup = intel_8xx_cleanup, 1750 .cleanup = intel_8xx_cleanup,
1743 .tlb_flush = intel_8xx_tlbflush, 1751 .tlb_flush = intel_8xx_tlbflush,
1744 .mask_memory = agp_generic_mask_memory, 1752 .mask_memory = agp_generic_mask_memory,
1745 .masks = intel_generic_masks, 1753 .masks = intel_generic_masks,
1746 .agp_enable = agp_generic_enable, 1754 .agp_enable = agp_generic_enable,
1747 .cache_flush = global_cache_flush, 1755 .cache_flush = global_cache_flush,
1748 .create_gatt_table = agp_generic_create_gatt_table, 1756 .create_gatt_table = agp_generic_create_gatt_table,
1749 .free_gatt_table = agp_generic_free_gatt_table, 1757 .free_gatt_table = agp_generic_free_gatt_table,
1750 .insert_memory = agp_generic_insert_memory, 1758 .insert_memory = agp_generic_insert_memory,
1751 .remove_memory = agp_generic_remove_memory, 1759 .remove_memory = agp_generic_remove_memory,
1752 .alloc_by_type = agp_generic_alloc_by_type, 1760 .alloc_by_type = agp_generic_alloc_by_type,
1753 .free_by_type = agp_generic_free_by_type, 1761 .free_by_type = agp_generic_free_by_type,
1754 .agp_alloc_page = agp_generic_alloc_page, 1762 .agp_alloc_page = agp_generic_alloc_page,
1755 .agp_destroy_page = agp_generic_destroy_page, 1763 .agp_destroy_page = agp_generic_destroy_page,
1756 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 1764 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
1757 }; 1765 };
1758 1766
1759 static const struct agp_bridge_driver intel_830_driver = { 1767 static const struct agp_bridge_driver intel_830_driver = {
1760 .owner = THIS_MODULE, 1768 .owner = THIS_MODULE,
1761 .aperture_sizes = intel_i830_sizes, 1769 .aperture_sizes = intel_i830_sizes,
1762 .size_type = FIXED_APER_SIZE, 1770 .size_type = FIXED_APER_SIZE,
1763 .num_aperture_sizes = 4, 1771 .num_aperture_sizes = 4,
1764 .needs_scratch_page = true, 1772 .needs_scratch_page = true,
1765 .configure = intel_i830_configure, 1773 .configure = intel_i830_configure,
1766 .fetch_size = intel_i830_fetch_size, 1774 .fetch_size = intel_i830_fetch_size,
1767 .cleanup = intel_i830_cleanup, 1775 .cleanup = intel_i830_cleanup,
1768 .tlb_flush = intel_i810_tlbflush, 1776 .tlb_flush = intel_i810_tlbflush,
1769 .mask_memory = intel_i810_mask_memory, 1777 .mask_memory = intel_i810_mask_memory,
1770 .masks = intel_i810_masks, 1778 .masks = intel_i810_masks,
1771 .agp_enable = intel_i810_agp_enable, 1779 .agp_enable = intel_i810_agp_enable,
1772 .cache_flush = global_cache_flush, 1780 .cache_flush = global_cache_flush,
1773 .create_gatt_table = intel_i830_create_gatt_table, 1781 .create_gatt_table = intel_i830_create_gatt_table,
1774 .free_gatt_table = intel_i830_free_gatt_table, 1782 .free_gatt_table = intel_i830_free_gatt_table,
1775 .insert_memory = intel_i830_insert_entries, 1783 .insert_memory = intel_i830_insert_entries,
1776 .remove_memory = intel_i830_remove_entries, 1784 .remove_memory = intel_i830_remove_entries,
1777 .alloc_by_type = intel_i830_alloc_by_type, 1785 .alloc_by_type = intel_i830_alloc_by_type,
1778 .free_by_type = intel_i810_free_by_type, 1786 .free_by_type = intel_i810_free_by_type,
1779 .agp_alloc_page = agp_generic_alloc_page, 1787 .agp_alloc_page = agp_generic_alloc_page,
1780 .agp_destroy_page = agp_generic_destroy_page, 1788 .agp_destroy_page = agp_generic_destroy_page,
1781 .agp_type_to_mask_type = intel_i830_type_to_mask_type, 1789 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
1782 .chipset_flush = intel_i830_chipset_flush, 1790 .chipset_flush = intel_i830_chipset_flush,
1783 }; 1791 };
1784 1792
1785 static const struct agp_bridge_driver intel_820_driver = { 1793 static const struct agp_bridge_driver intel_820_driver = {
1786 .owner = THIS_MODULE, 1794 .owner = THIS_MODULE,
1787 .aperture_sizes = intel_8xx_sizes, 1795 .aperture_sizes = intel_8xx_sizes,
1788 .size_type = U8_APER_SIZE, 1796 .size_type = U8_APER_SIZE,
1789 .num_aperture_sizes = 7, 1797 .num_aperture_sizes = 7,
1790 .configure = intel_820_configure, 1798 .configure = intel_820_configure,
1791 .fetch_size = intel_8xx_fetch_size, 1799 .fetch_size = intel_8xx_fetch_size,
1792 .cleanup = intel_820_cleanup, 1800 .cleanup = intel_820_cleanup,
1793 .tlb_flush = intel_820_tlbflush, 1801 .tlb_flush = intel_820_tlbflush,
1794 .mask_memory = agp_generic_mask_memory, 1802 .mask_memory = agp_generic_mask_memory,
1795 .masks = intel_generic_masks, 1803 .masks = intel_generic_masks,
1796 .agp_enable = agp_generic_enable, 1804 .agp_enable = agp_generic_enable,
1797 .cache_flush = global_cache_flush, 1805 .cache_flush = global_cache_flush,
1798 .create_gatt_table = agp_generic_create_gatt_table, 1806 .create_gatt_table = agp_generic_create_gatt_table,
1799 .free_gatt_table = agp_generic_free_gatt_table, 1807 .free_gatt_table = agp_generic_free_gatt_table,
1800 .insert_memory = agp_generic_insert_memory, 1808 .insert_memory = agp_generic_insert_memory,
1801 .remove_memory = agp_generic_remove_memory, 1809 .remove_memory = agp_generic_remove_memory,
1802 .alloc_by_type = agp_generic_alloc_by_type, 1810 .alloc_by_type = agp_generic_alloc_by_type,
1803 .free_by_type = agp_generic_free_by_type, 1811 .free_by_type = agp_generic_free_by_type,
1804 .agp_alloc_page = agp_generic_alloc_page, 1812 .agp_alloc_page = agp_generic_alloc_page,
1805 .agp_destroy_page = agp_generic_destroy_page, 1813 .agp_destroy_page = agp_generic_destroy_page,
1806 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 1814 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
1807 }; 1815 };
1808 1816
1809 static const struct agp_bridge_driver intel_830mp_driver = { 1817 static const struct agp_bridge_driver intel_830mp_driver = {
1810 .owner = THIS_MODULE, 1818 .owner = THIS_MODULE,
1811 .aperture_sizes = intel_830mp_sizes, 1819 .aperture_sizes = intel_830mp_sizes,
1812 .size_type = U8_APER_SIZE, 1820 .size_type = U8_APER_SIZE,
1813 .num_aperture_sizes = 4, 1821 .num_aperture_sizes = 4,
1814 .configure = intel_830mp_configure, 1822 .configure = intel_830mp_configure,
1815 .fetch_size = intel_8xx_fetch_size, 1823 .fetch_size = intel_8xx_fetch_size,
1816 .cleanup = intel_8xx_cleanup, 1824 .cleanup = intel_8xx_cleanup,
1817 .tlb_flush = intel_8xx_tlbflush, 1825 .tlb_flush = intel_8xx_tlbflush,
1818 .mask_memory = agp_generic_mask_memory, 1826 .mask_memory = agp_generic_mask_memory,
1819 .masks = intel_generic_masks, 1827 .masks = intel_generic_masks,
1820 .agp_enable = agp_generic_enable, 1828 .agp_enable = agp_generic_enable,
1821 .cache_flush = global_cache_flush, 1829 .cache_flush = global_cache_flush,
1822 .create_gatt_table = agp_generic_create_gatt_table, 1830 .create_gatt_table = agp_generic_create_gatt_table,
1823 .free_gatt_table = agp_generic_free_gatt_table, 1831 .free_gatt_table = agp_generic_free_gatt_table,
1824 .insert_memory = agp_generic_insert_memory, 1832 .insert_memory = agp_generic_insert_memory,
1825 .remove_memory = agp_generic_remove_memory, 1833 .remove_memory = agp_generic_remove_memory,
1826 .alloc_by_type = agp_generic_alloc_by_type, 1834 .alloc_by_type = agp_generic_alloc_by_type,
1827 .free_by_type = agp_generic_free_by_type, 1835 .free_by_type = agp_generic_free_by_type,
1828 .agp_alloc_page = agp_generic_alloc_page, 1836 .agp_alloc_page = agp_generic_alloc_page,
1829 .agp_destroy_page = agp_generic_destroy_page, 1837 .agp_destroy_page = agp_generic_destroy_page,
1830 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 1838 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
1831 }; 1839 };
1832 1840
1833 static const struct agp_bridge_driver intel_840_driver = { 1841 static const struct agp_bridge_driver intel_840_driver = {
1834 .owner = THIS_MODULE, 1842 .owner = THIS_MODULE,
1835 .aperture_sizes = intel_8xx_sizes, 1843 .aperture_sizes = intel_8xx_sizes,
1836 .size_type = U8_APER_SIZE, 1844 .size_type = U8_APER_SIZE,
1837 .num_aperture_sizes = 7, 1845 .num_aperture_sizes = 7,
1838 .configure = intel_840_configure, 1846 .configure = intel_840_configure,
1839 .fetch_size = intel_8xx_fetch_size, 1847 .fetch_size = intel_8xx_fetch_size,
1840 .cleanup = intel_8xx_cleanup, 1848 .cleanup = intel_8xx_cleanup,
1841 .tlb_flush = intel_8xx_tlbflush, 1849 .tlb_flush = intel_8xx_tlbflush,
1842 .mask_memory = agp_generic_mask_memory, 1850 .mask_memory = agp_generic_mask_memory,
1843 .masks = intel_generic_masks, 1851 .masks = intel_generic_masks,
1844 .agp_enable = agp_generic_enable, 1852 .agp_enable = agp_generic_enable,
1845 .cache_flush = global_cache_flush, 1853 .cache_flush = global_cache_flush,
1846 .create_gatt_table = agp_generic_create_gatt_table, 1854 .create_gatt_table = agp_generic_create_gatt_table,
1847 .free_gatt_table = agp_generic_free_gatt_table, 1855 .free_gatt_table = agp_generic_free_gatt_table,
1848 .insert_memory = agp_generic_insert_memory, 1856 .insert_memory = agp_generic_insert_memory,
1849 .remove_memory = agp_generic_remove_memory, 1857 .remove_memory = agp_generic_remove_memory,
1850 .alloc_by_type = agp_generic_alloc_by_type, 1858 .alloc_by_type = agp_generic_alloc_by_type,
1851 .free_by_type = agp_generic_free_by_type, 1859 .free_by_type = agp_generic_free_by_type,
1852 .agp_alloc_page = agp_generic_alloc_page, 1860 .agp_alloc_page = agp_generic_alloc_page,
1853 .agp_destroy_page = agp_generic_destroy_page, 1861 .agp_destroy_page = agp_generic_destroy_page,
1854 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 1862 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
1855 }; 1863 };
1856 1864
1857 static const struct agp_bridge_driver intel_845_driver = { 1865 static const struct agp_bridge_driver intel_845_driver = {
1858 .owner = THIS_MODULE, 1866 .owner = THIS_MODULE,
1859 .aperture_sizes = intel_8xx_sizes, 1867 .aperture_sizes = intel_8xx_sizes,
1860 .size_type = U8_APER_SIZE, 1868 .size_type = U8_APER_SIZE,
1861 .num_aperture_sizes = 7, 1869 .num_aperture_sizes = 7,
1862 .configure = intel_845_configure, 1870 .configure = intel_845_configure,
1863 .fetch_size = intel_8xx_fetch_size, 1871 .fetch_size = intel_8xx_fetch_size,
1864 .cleanup = intel_8xx_cleanup, 1872 .cleanup = intel_8xx_cleanup,
1865 .tlb_flush = intel_8xx_tlbflush, 1873 .tlb_flush = intel_8xx_tlbflush,
1866 .mask_memory = agp_generic_mask_memory, 1874 .mask_memory = agp_generic_mask_memory,
1867 .masks = intel_generic_masks, 1875 .masks = intel_generic_masks,
1868 .agp_enable = agp_generic_enable, 1876 .agp_enable = agp_generic_enable,
1869 .cache_flush = global_cache_flush, 1877 .cache_flush = global_cache_flush,
1870 .create_gatt_table = agp_generic_create_gatt_table, 1878 .create_gatt_table = agp_generic_create_gatt_table,
1871 .free_gatt_table = agp_generic_free_gatt_table, 1879 .free_gatt_table = agp_generic_free_gatt_table,
1872 .insert_memory = agp_generic_insert_memory, 1880 .insert_memory = agp_generic_insert_memory,
1873 .remove_memory = agp_generic_remove_memory, 1881 .remove_memory = agp_generic_remove_memory,
1874 .alloc_by_type = agp_generic_alloc_by_type, 1882 .alloc_by_type = agp_generic_alloc_by_type,
1875 .free_by_type = agp_generic_free_by_type, 1883 .free_by_type = agp_generic_free_by_type,
1876 .agp_alloc_page = agp_generic_alloc_page, 1884 .agp_alloc_page = agp_generic_alloc_page,
1877 .agp_destroy_page = agp_generic_destroy_page, 1885 .agp_destroy_page = agp_generic_destroy_page,
1878 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 1886 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
1879 .chipset_flush = intel_i830_chipset_flush, 1887 .chipset_flush = intel_i830_chipset_flush,
1880 }; 1888 };
1881 1889
1882 static const struct agp_bridge_driver intel_850_driver = { 1890 static const struct agp_bridge_driver intel_850_driver = {
1883 .owner = THIS_MODULE, 1891 .owner = THIS_MODULE,
1884 .aperture_sizes = intel_8xx_sizes, 1892 .aperture_sizes = intel_8xx_sizes,
1885 .size_type = U8_APER_SIZE, 1893 .size_type = U8_APER_SIZE,
1886 .num_aperture_sizes = 7, 1894 .num_aperture_sizes = 7,
1887 .configure = intel_850_configure, 1895 .configure = intel_850_configure,
1888 .fetch_size = intel_8xx_fetch_size, 1896 .fetch_size = intel_8xx_fetch_size,
1889 .cleanup = intel_8xx_cleanup, 1897 .cleanup = intel_8xx_cleanup,
1890 .tlb_flush = intel_8xx_tlbflush, 1898 .tlb_flush = intel_8xx_tlbflush,
1891 .mask_memory = agp_generic_mask_memory, 1899 .mask_memory = agp_generic_mask_memory,
1892 .masks = intel_generic_masks, 1900 .masks = intel_generic_masks,
1893 .agp_enable = agp_generic_enable, 1901 .agp_enable = agp_generic_enable,
1894 .cache_flush = global_cache_flush, 1902 .cache_flush = global_cache_flush,
1895 .create_gatt_table = agp_generic_create_gatt_table, 1903 .create_gatt_table = agp_generic_create_gatt_table,
1896 .free_gatt_table = agp_generic_free_gatt_table, 1904 .free_gatt_table = agp_generic_free_gatt_table,
1897 .insert_memory = agp_generic_insert_memory, 1905 .insert_memory = agp_generic_insert_memory,
1898 .remove_memory = agp_generic_remove_memory, 1906 .remove_memory = agp_generic_remove_memory,
1899 .alloc_by_type = agp_generic_alloc_by_type, 1907 .alloc_by_type = agp_generic_alloc_by_type,
1900 .free_by_type = agp_generic_free_by_type, 1908 .free_by_type = agp_generic_free_by_type,
1901 .agp_alloc_page = agp_generic_alloc_page, 1909 .agp_alloc_page = agp_generic_alloc_page,
1902 .agp_destroy_page = agp_generic_destroy_page, 1910 .agp_destroy_page = agp_generic_destroy_page,
1903 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 1911 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
1904 }; 1912 };
1905 1913
1906 static const struct agp_bridge_driver intel_860_driver = { 1914 static const struct agp_bridge_driver intel_860_driver = {
1907 .owner = THIS_MODULE, 1915 .owner = THIS_MODULE,
1908 .aperture_sizes = intel_8xx_sizes, 1916 .aperture_sizes = intel_8xx_sizes,
1909 .size_type = U8_APER_SIZE, 1917 .size_type = U8_APER_SIZE,
1910 .num_aperture_sizes = 7, 1918 .num_aperture_sizes = 7,
1911 .configure = intel_860_configure, 1919 .configure = intel_860_configure,
1912 .fetch_size = intel_8xx_fetch_size, 1920 .fetch_size = intel_8xx_fetch_size,
1913 .cleanup = intel_8xx_cleanup, 1921 .cleanup = intel_8xx_cleanup,
1914 .tlb_flush = intel_8xx_tlbflush, 1922 .tlb_flush = intel_8xx_tlbflush,
1915 .mask_memory = agp_generic_mask_memory, 1923 .mask_memory = agp_generic_mask_memory,
1916 .masks = intel_generic_masks, 1924 .masks = intel_generic_masks,
1917 .agp_enable = agp_generic_enable, 1925 .agp_enable = agp_generic_enable,
1918 .cache_flush = global_cache_flush, 1926 .cache_flush = global_cache_flush,
1919 .create_gatt_table = agp_generic_create_gatt_table, 1927 .create_gatt_table = agp_generic_create_gatt_table,
1920 .free_gatt_table = agp_generic_free_gatt_table, 1928 .free_gatt_table = agp_generic_free_gatt_table,
1921 .insert_memory = agp_generic_insert_memory, 1929 .insert_memory = agp_generic_insert_memory,
1922 .remove_memory = agp_generic_remove_memory, 1930 .remove_memory = agp_generic_remove_memory,
1923 .alloc_by_type = agp_generic_alloc_by_type, 1931 .alloc_by_type = agp_generic_alloc_by_type,
1924 .free_by_type = agp_generic_free_by_type, 1932 .free_by_type = agp_generic_free_by_type,
1925 .agp_alloc_page = agp_generic_alloc_page, 1933 .agp_alloc_page = agp_generic_alloc_page,
1926 .agp_destroy_page = agp_generic_destroy_page, 1934 .agp_destroy_page = agp_generic_destroy_page,
1927 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 1935 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
1928 }; 1936 };
1929 1937
1930 static const struct agp_bridge_driver intel_915_driver = { 1938 static const struct agp_bridge_driver intel_915_driver = {
1931 .owner = THIS_MODULE, 1939 .owner = THIS_MODULE,
1932 .aperture_sizes = intel_i830_sizes, 1940 .aperture_sizes = intel_i830_sizes,
1933 .size_type = FIXED_APER_SIZE, 1941 .size_type = FIXED_APER_SIZE,
1934 .num_aperture_sizes = 4, 1942 .num_aperture_sizes = 4,
1935 .needs_scratch_page = true, 1943 .needs_scratch_page = true,
1936 .configure = intel_i915_configure, 1944 .configure = intel_i915_configure,
1937 .fetch_size = intel_i9xx_fetch_size, 1945 .fetch_size = intel_i9xx_fetch_size,
1938 .cleanup = intel_i915_cleanup, 1946 .cleanup = intel_i915_cleanup,
1939 .tlb_flush = intel_i810_tlbflush, 1947 .tlb_flush = intel_i810_tlbflush,
1940 .mask_memory = intel_i810_mask_memory, 1948 .mask_memory = intel_i810_mask_memory,
1941 .masks = intel_i810_masks, 1949 .masks = intel_i810_masks,
1942 .agp_enable = intel_i810_agp_enable, 1950 .agp_enable = intel_i810_agp_enable,
1943 .cache_flush = global_cache_flush, 1951 .cache_flush = global_cache_flush,
1944 .create_gatt_table = intel_i915_create_gatt_table, 1952 .create_gatt_table = intel_i915_create_gatt_table,
1945 .free_gatt_table = intel_i830_free_gatt_table, 1953 .free_gatt_table = intel_i830_free_gatt_table,
1946 .insert_memory = intel_i915_insert_entries, 1954 .insert_memory = intel_i915_insert_entries,
1947 .remove_memory = intel_i915_remove_entries, 1955 .remove_memory = intel_i915_remove_entries,
1948 .alloc_by_type = intel_i830_alloc_by_type, 1956 .alloc_by_type = intel_i830_alloc_by_type,
1949 .free_by_type = intel_i810_free_by_type, 1957 .free_by_type = intel_i810_free_by_type,
1950 .agp_alloc_page = agp_generic_alloc_page, 1958 .agp_alloc_page = agp_generic_alloc_page,
1951 .agp_destroy_page = agp_generic_destroy_page, 1959 .agp_destroy_page = agp_generic_destroy_page,
1952 .agp_type_to_mask_type = intel_i830_type_to_mask_type, 1960 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
1953 .chipset_flush = intel_i915_chipset_flush, 1961 .chipset_flush = intel_i915_chipset_flush,
1954 }; 1962 };
1955 1963
1956 static const struct agp_bridge_driver intel_i965_driver = { 1964 static const struct agp_bridge_driver intel_i965_driver = {
1957 .owner = THIS_MODULE, 1965 .owner = THIS_MODULE,
1958 .aperture_sizes = intel_i830_sizes, 1966 .aperture_sizes = intel_i830_sizes,
1959 .size_type = FIXED_APER_SIZE, 1967 .size_type = FIXED_APER_SIZE,
1960 .num_aperture_sizes = 4, 1968 .num_aperture_sizes = 4,
1961 .needs_scratch_page = true, 1969 .needs_scratch_page = true,
1962 .configure = intel_i915_configure, 1970 .configure = intel_i915_configure,
1963 .fetch_size = intel_i9xx_fetch_size, 1971 .fetch_size = intel_i9xx_fetch_size,
1964 .cleanup = intel_i915_cleanup, 1972 .cleanup = intel_i915_cleanup,
1965 .tlb_flush = intel_i810_tlbflush, 1973 .tlb_flush = intel_i810_tlbflush,
1966 .mask_memory = intel_i965_mask_memory, 1974 .mask_memory = intel_i965_mask_memory,
1967 .masks = intel_i810_masks, 1975 .masks = intel_i810_masks,
1968 .agp_enable = intel_i810_agp_enable, 1976 .agp_enable = intel_i810_agp_enable,
1969 .cache_flush = global_cache_flush, 1977 .cache_flush = global_cache_flush,
1970 .create_gatt_table = intel_i965_create_gatt_table, 1978 .create_gatt_table = intel_i965_create_gatt_table,
1971 .free_gatt_table = intel_i830_free_gatt_table, 1979 .free_gatt_table = intel_i830_free_gatt_table,
1972 .insert_memory = intel_i915_insert_entries, 1980 .insert_memory = intel_i915_insert_entries,
1973 .remove_memory = intel_i915_remove_entries, 1981 .remove_memory = intel_i915_remove_entries,
1974 .alloc_by_type = intel_i830_alloc_by_type, 1982 .alloc_by_type = intel_i830_alloc_by_type,
1975 .free_by_type = intel_i810_free_by_type, 1983 .free_by_type = intel_i810_free_by_type,
1976 .agp_alloc_page = agp_generic_alloc_page, 1984 .agp_alloc_page = agp_generic_alloc_page,
1977 .agp_destroy_page = agp_generic_destroy_page, 1985 .agp_destroy_page = agp_generic_destroy_page,
1978 .agp_type_to_mask_type = intel_i830_type_to_mask_type, 1986 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
1979 .chipset_flush = intel_i915_chipset_flush, 1987 .chipset_flush = intel_i915_chipset_flush,
1980 }; 1988 };
1981 1989
1982 static const struct agp_bridge_driver intel_7505_driver = { 1990 static const struct agp_bridge_driver intel_7505_driver = {
1983 .owner = THIS_MODULE, 1991 .owner = THIS_MODULE,
1984 .aperture_sizes = intel_8xx_sizes, 1992 .aperture_sizes = intel_8xx_sizes,
1985 .size_type = U8_APER_SIZE, 1993 .size_type = U8_APER_SIZE,
1986 .num_aperture_sizes = 7, 1994 .num_aperture_sizes = 7,
1987 .configure = intel_7505_configure, 1995 .configure = intel_7505_configure,
1988 .fetch_size = intel_8xx_fetch_size, 1996 .fetch_size = intel_8xx_fetch_size,
1989 .cleanup = intel_8xx_cleanup, 1997 .cleanup = intel_8xx_cleanup,
1990 .tlb_flush = intel_8xx_tlbflush, 1998 .tlb_flush = intel_8xx_tlbflush,
1991 .mask_memory = agp_generic_mask_memory, 1999 .mask_memory = agp_generic_mask_memory,
1992 .masks = intel_generic_masks, 2000 .masks = intel_generic_masks,
1993 .agp_enable = agp_generic_enable, 2001 .agp_enable = agp_generic_enable,
1994 .cache_flush = global_cache_flush, 2002 .cache_flush = global_cache_flush,
1995 .create_gatt_table = agp_generic_create_gatt_table, 2003 .create_gatt_table = agp_generic_create_gatt_table,
1996 .free_gatt_table = agp_generic_free_gatt_table, 2004 .free_gatt_table = agp_generic_free_gatt_table,
1997 .insert_memory = agp_generic_insert_memory, 2005 .insert_memory = agp_generic_insert_memory,
1998 .remove_memory = agp_generic_remove_memory, 2006 .remove_memory = agp_generic_remove_memory,
1999 .alloc_by_type = agp_generic_alloc_by_type, 2007 .alloc_by_type = agp_generic_alloc_by_type,
2000 .free_by_type = agp_generic_free_by_type, 2008 .free_by_type = agp_generic_free_by_type,
2001 .agp_alloc_page = agp_generic_alloc_page, 2009 .agp_alloc_page = agp_generic_alloc_page,
2002 .agp_destroy_page = agp_generic_destroy_page, 2010 .agp_destroy_page = agp_generic_destroy_page,
2003 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 2011 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
2004 }; 2012 };
2005 2013
2006 static const struct agp_bridge_driver intel_g33_driver = { 2014 static const struct agp_bridge_driver intel_g33_driver = {
2007 .owner = THIS_MODULE, 2015 .owner = THIS_MODULE,
2008 .aperture_sizes = intel_i830_sizes, 2016 .aperture_sizes = intel_i830_sizes,
2009 .size_type = FIXED_APER_SIZE, 2017 .size_type = FIXED_APER_SIZE,
2010 .num_aperture_sizes = 4, 2018 .num_aperture_sizes = 4,
2011 .needs_scratch_page = true, 2019 .needs_scratch_page = true,
2012 .configure = intel_i915_configure, 2020 .configure = intel_i915_configure,
2013 .fetch_size = intel_i9xx_fetch_size, 2021 .fetch_size = intel_i9xx_fetch_size,
2014 .cleanup = intel_i915_cleanup, 2022 .cleanup = intel_i915_cleanup,
2015 .tlb_flush = intel_i810_tlbflush, 2023 .tlb_flush = intel_i810_tlbflush,
2016 .mask_memory = intel_i965_mask_memory, 2024 .mask_memory = intel_i965_mask_memory,
2017 .masks = intel_i810_masks, 2025 .masks = intel_i810_masks,
2018 .agp_enable = intel_i810_agp_enable, 2026 .agp_enable = intel_i810_agp_enable,
2019 .cache_flush = global_cache_flush, 2027 .cache_flush = global_cache_flush,
2020 .create_gatt_table = intel_i915_create_gatt_table, 2028 .create_gatt_table = intel_i915_create_gatt_table,
2021 .free_gatt_table = intel_i830_free_gatt_table, 2029 .free_gatt_table = intel_i830_free_gatt_table,
2022 .insert_memory = intel_i915_insert_entries, 2030 .insert_memory = intel_i915_insert_entries,
2023 .remove_memory = intel_i915_remove_entries, 2031 .remove_memory = intel_i915_remove_entries,
2024 .alloc_by_type = intel_i830_alloc_by_type, 2032 .alloc_by_type = intel_i830_alloc_by_type,
2025 .free_by_type = intel_i810_free_by_type, 2033 .free_by_type = intel_i810_free_by_type,
2026 .agp_alloc_page = agp_generic_alloc_page, 2034 .agp_alloc_page = agp_generic_alloc_page,
2027 .agp_destroy_page = agp_generic_destroy_page, 2035 .agp_destroy_page = agp_generic_destroy_page,
2028 .agp_type_to_mask_type = intel_i830_type_to_mask_type, 2036 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
2029 .chipset_flush = intel_i915_chipset_flush, 2037 .chipset_flush = intel_i915_chipset_flush,
2030 }; 2038 };
2031 2039
2032 static int find_gmch(u16 device) 2040 static int find_gmch(u16 device)
2033 { 2041 {
2034 struct pci_dev *gmch_device; 2042 struct pci_dev *gmch_device;
2035 2043
2036 gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL); 2044 gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
2037 if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) { 2045 if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
2038 gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, 2046 gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
2039 device, gmch_device); 2047 device, gmch_device);
2040 } 2048 }
2041 2049
2042 if (!gmch_device) 2050 if (!gmch_device)
2043 return 0; 2051 return 0;
2044 2052
2045 intel_private.pcidev = gmch_device; 2053 intel_private.pcidev = gmch_device;
2046 return 1; 2054 return 1;
2047 } 2055 }
2048 2056
2049 /* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of 2057 /* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
2050 * driver and gmch_driver must be non-null, and find_gmch will determine 2058 * driver and gmch_driver must be non-null, and find_gmch will determine
2051 * which one should be used if a gmch_chip_id is present. 2059 * which one should be used if a gmch_chip_id is present.
2052 */ 2060 */
2053 static const struct intel_driver_description { 2061 static const struct intel_driver_description {
2054 unsigned int chip_id; 2062 unsigned int chip_id;
2055 unsigned int gmch_chip_id; 2063 unsigned int gmch_chip_id;
2056 unsigned int multi_gmch_chip; /* if we have more gfx chip type on this HB. */ 2064 unsigned int multi_gmch_chip; /* if we have more gfx chip type on this HB. */
2057 char *name; 2065 char *name;
2058 const struct agp_bridge_driver *driver; 2066 const struct agp_bridge_driver *driver;
2059 const struct agp_bridge_driver *gmch_driver; 2067 const struct agp_bridge_driver *gmch_driver;
2060 } intel_agp_chipsets[] = { 2068 } intel_agp_chipsets[] = {
2061 { PCI_DEVICE_ID_INTEL_82443LX_0, 0, 0, "440LX", &intel_generic_driver, NULL }, 2069 { PCI_DEVICE_ID_INTEL_82443LX_0, 0, 0, "440LX", &intel_generic_driver, NULL },
2062 { PCI_DEVICE_ID_INTEL_82443BX_0, 0, 0, "440BX", &intel_generic_driver, NULL }, 2070 { PCI_DEVICE_ID_INTEL_82443BX_0, 0, 0, "440BX", &intel_generic_driver, NULL },
2063 { PCI_DEVICE_ID_INTEL_82443GX_0, 0, 0, "440GX", &intel_generic_driver, NULL }, 2071 { PCI_DEVICE_ID_INTEL_82443GX_0, 0, 0, "440GX", &intel_generic_driver, NULL },
2064 { PCI_DEVICE_ID_INTEL_82810_MC1, PCI_DEVICE_ID_INTEL_82810_IG1, 0, "i810", 2072 { PCI_DEVICE_ID_INTEL_82810_MC1, PCI_DEVICE_ID_INTEL_82810_IG1, 0, "i810",
2065 NULL, &intel_810_driver }, 2073 NULL, &intel_810_driver },
2066 { PCI_DEVICE_ID_INTEL_82810_MC3, PCI_DEVICE_ID_INTEL_82810_IG3, 0, "i810", 2074 { PCI_DEVICE_ID_INTEL_82810_MC3, PCI_DEVICE_ID_INTEL_82810_IG3, 0, "i810",
2067 NULL, &intel_810_driver }, 2075 NULL, &intel_810_driver },
2068 { PCI_DEVICE_ID_INTEL_82810E_MC, PCI_DEVICE_ID_INTEL_82810E_IG, 0, "i810", 2076 { PCI_DEVICE_ID_INTEL_82810E_MC, PCI_DEVICE_ID_INTEL_82810E_IG, 0, "i810",
2069 NULL, &intel_810_driver }, 2077 NULL, &intel_810_driver },
2070 { PCI_DEVICE_ID_INTEL_82815_MC, PCI_DEVICE_ID_INTEL_82815_CGC, 0, "i815", 2078 { PCI_DEVICE_ID_INTEL_82815_MC, PCI_DEVICE_ID_INTEL_82815_CGC, 0, "i815",
2071 &intel_815_driver, &intel_810_driver }, 2079 &intel_815_driver, &intel_810_driver },
2072 { PCI_DEVICE_ID_INTEL_82820_HB, 0, 0, "i820", &intel_820_driver, NULL }, 2080 { PCI_DEVICE_ID_INTEL_82820_HB, 0, 0, "i820", &intel_820_driver, NULL },
2073 { PCI_DEVICE_ID_INTEL_82820_UP_HB, 0, 0, "i820", &intel_820_driver, NULL }, 2081 { PCI_DEVICE_ID_INTEL_82820_UP_HB, 0, 0, "i820", &intel_820_driver, NULL },
2074 { PCI_DEVICE_ID_INTEL_82830_HB, PCI_DEVICE_ID_INTEL_82830_CGC, 0, "830M", 2082 { PCI_DEVICE_ID_INTEL_82830_HB, PCI_DEVICE_ID_INTEL_82830_CGC, 0, "830M",
2075 &intel_830mp_driver, &intel_830_driver }, 2083 &intel_830mp_driver, &intel_830_driver },
2076 { PCI_DEVICE_ID_INTEL_82840_HB, 0, 0, "i840", &intel_840_driver, NULL }, 2084 { PCI_DEVICE_ID_INTEL_82840_HB, 0, 0, "i840", &intel_840_driver, NULL },
2077 { PCI_DEVICE_ID_INTEL_82845_HB, 0, 0, "845G", &intel_845_driver, NULL }, 2085 { PCI_DEVICE_ID_INTEL_82845_HB, 0, 0, "845G", &intel_845_driver, NULL },
2078 { PCI_DEVICE_ID_INTEL_82845G_HB, PCI_DEVICE_ID_INTEL_82845G_IG, 0, "830M", 2086 { PCI_DEVICE_ID_INTEL_82845G_HB, PCI_DEVICE_ID_INTEL_82845G_IG, 0, "830M",
2079 &intel_845_driver, &intel_830_driver }, 2087 &intel_845_driver, &intel_830_driver },
2080 { PCI_DEVICE_ID_INTEL_82850_HB, 0, 0, "i850", &intel_850_driver, NULL }, 2088 { PCI_DEVICE_ID_INTEL_82850_HB, 0, 0, "i850", &intel_850_driver, NULL },
2081 { PCI_DEVICE_ID_INTEL_82855PM_HB, 0, 0, "855PM", &intel_845_driver, NULL }, 2089 { PCI_DEVICE_ID_INTEL_82855PM_HB, 0, 0, "855PM", &intel_845_driver, NULL },
2082 { PCI_DEVICE_ID_INTEL_82855GM_HB, PCI_DEVICE_ID_INTEL_82855GM_IG, 0, "855GM", 2090 { PCI_DEVICE_ID_INTEL_82855GM_HB, PCI_DEVICE_ID_INTEL_82855GM_IG, 0, "855GM",
2083 &intel_845_driver, &intel_830_driver }, 2091 &intel_845_driver, &intel_830_driver },
2084 { PCI_DEVICE_ID_INTEL_82860_HB, 0, 0, "i860", &intel_860_driver, NULL }, 2092 { PCI_DEVICE_ID_INTEL_82860_HB, 0, 0, "i860", &intel_860_driver, NULL },
2085 { PCI_DEVICE_ID_INTEL_82865_HB, PCI_DEVICE_ID_INTEL_82865_IG, 0, "865", 2093 { PCI_DEVICE_ID_INTEL_82865_HB, PCI_DEVICE_ID_INTEL_82865_IG, 0, "865",
2086 &intel_845_driver, &intel_830_driver }, 2094 &intel_845_driver, &intel_830_driver },
2087 { PCI_DEVICE_ID_INTEL_82875_HB, 0, 0, "i875", &intel_845_driver, NULL }, 2095 { PCI_DEVICE_ID_INTEL_82875_HB, 0, 0, "i875", &intel_845_driver, NULL },
2088 { PCI_DEVICE_ID_INTEL_E7221_HB, PCI_DEVICE_ID_INTEL_E7221_IG, 0, "E7221 (i915)", 2096 { PCI_DEVICE_ID_INTEL_E7221_HB, PCI_DEVICE_ID_INTEL_E7221_IG, 0, "E7221 (i915)",
2089 NULL, &intel_915_driver }, 2097 NULL, &intel_915_driver },
2090 { PCI_DEVICE_ID_INTEL_82915G_HB, PCI_DEVICE_ID_INTEL_82915G_IG, 0, "915G", 2098 { PCI_DEVICE_ID_INTEL_82915G_HB, PCI_DEVICE_ID_INTEL_82915G_IG, 0, "915G",
2091 NULL, &intel_915_driver }, 2099 NULL, &intel_915_driver },
2092 { PCI_DEVICE_ID_INTEL_82915GM_HB, PCI_DEVICE_ID_INTEL_82915GM_IG, 0, "915GM", 2100 { PCI_DEVICE_ID_INTEL_82915GM_HB, PCI_DEVICE_ID_INTEL_82915GM_IG, 0, "915GM",
2093 NULL, &intel_915_driver }, 2101 NULL, &intel_915_driver },
2094 { PCI_DEVICE_ID_INTEL_82945G_HB, PCI_DEVICE_ID_INTEL_82945G_IG, 0, "945G", 2102 { PCI_DEVICE_ID_INTEL_82945G_HB, PCI_DEVICE_ID_INTEL_82945G_IG, 0, "945G",
2095 NULL, &intel_915_driver }, 2103 NULL, &intel_915_driver },
2096 { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, 0, "945GM", 2104 { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, 0, "945GM",
2097 NULL, &intel_915_driver }, 2105 NULL, &intel_915_driver },
2098 { PCI_DEVICE_ID_INTEL_82945GME_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, 0, "945GME", 2106 { PCI_DEVICE_ID_INTEL_82945GME_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, 0, "945GME",
2099 NULL, &intel_915_driver }, 2107 NULL, &intel_915_driver },
2100 { PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, 0, "946GZ", 2108 { PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, 0, "946GZ",
2101 NULL, &intel_i965_driver }, 2109 NULL, &intel_i965_driver },
2102 { PCI_DEVICE_ID_INTEL_82G35_HB, PCI_DEVICE_ID_INTEL_82G35_IG, 0, "G35", 2110 { PCI_DEVICE_ID_INTEL_82G35_HB, PCI_DEVICE_ID_INTEL_82G35_IG, 0, "G35",
2103 NULL, &intel_i965_driver }, 2111 NULL, &intel_i965_driver },
2104 { PCI_DEVICE_ID_INTEL_82965Q_HB, PCI_DEVICE_ID_INTEL_82965Q_IG, 0, "965Q", 2112 { PCI_DEVICE_ID_INTEL_82965Q_HB, PCI_DEVICE_ID_INTEL_82965Q_IG, 0, "965Q",
2105 NULL, &intel_i965_driver }, 2113 NULL, &intel_i965_driver },
2106 { PCI_DEVICE_ID_INTEL_82965G_HB, PCI_DEVICE_ID_INTEL_82965G_IG, 0, "965G", 2114 { PCI_DEVICE_ID_INTEL_82965G_HB, PCI_DEVICE_ID_INTEL_82965G_IG, 0, "965G",
2107 NULL, &intel_i965_driver }, 2115 NULL, &intel_i965_driver },
2108 { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, 0, "965GM", 2116 { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, 0, "965GM",
2109 NULL, &intel_i965_driver }, 2117 NULL, &intel_i965_driver },
2110 { PCI_DEVICE_ID_INTEL_82965GME_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, 0, "965GME/GLE", 2118 { PCI_DEVICE_ID_INTEL_82965GME_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, 0, "965GME/GLE",
2111 NULL, &intel_i965_driver }, 2119 NULL, &intel_i965_driver },
2112 { PCI_DEVICE_ID_INTEL_7505_0, 0, 0, "E7505", &intel_7505_driver, NULL }, 2120 { PCI_DEVICE_ID_INTEL_7505_0, 0, 0, "E7505", &intel_7505_driver, NULL },
2113 { PCI_DEVICE_ID_INTEL_7205_0, 0, 0, "E7205", &intel_7505_driver, NULL }, 2121 { PCI_DEVICE_ID_INTEL_7205_0, 0, 0, "E7205", &intel_7505_driver, NULL },
2114 { PCI_DEVICE_ID_INTEL_G33_HB, PCI_DEVICE_ID_INTEL_G33_IG, 0, "G33", 2122 { PCI_DEVICE_ID_INTEL_G33_HB, PCI_DEVICE_ID_INTEL_G33_IG, 0, "G33",
2115 NULL, &intel_g33_driver }, 2123 NULL, &intel_g33_driver },
2116 { PCI_DEVICE_ID_INTEL_Q35_HB, PCI_DEVICE_ID_INTEL_Q35_IG, 0, "Q35", 2124 { PCI_DEVICE_ID_INTEL_Q35_HB, PCI_DEVICE_ID_INTEL_Q35_IG, 0, "Q35",
2117 NULL, &intel_g33_driver }, 2125 NULL, &intel_g33_driver },
2118 { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33", 2126 { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33",
2119 NULL, &intel_g33_driver }, 2127 NULL, &intel_g33_driver },
2120 { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0, 2128 { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0,
2121 "Mobile Intel? GM45 Express", NULL, &intel_i965_driver }, 2129 "Mobile Intel? GM45 Express", NULL, &intel_i965_driver },
2122 { PCI_DEVICE_ID_INTEL_IGD_E_HB, PCI_DEVICE_ID_INTEL_IGD_E_IG, 0, 2130 { PCI_DEVICE_ID_INTEL_IGD_E_HB, PCI_DEVICE_ID_INTEL_IGD_E_IG, 0,
2123 "Intel Integrated Graphics Device", NULL, &intel_i965_driver }, 2131 "Intel Integrated Graphics Device", NULL, &intel_i965_driver },
2124 { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG, 0, 2132 { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG, 0,
2125 "Q45/Q43", NULL, &intel_i965_driver }, 2133 "Q45/Q43", NULL, &intel_i965_driver },
2126 { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, 0, 2134 { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, 0,
2127 "G45/G43", NULL, &intel_i965_driver }, 2135 "G45/G43", NULL, &intel_i965_driver },
2128 { 0, 0, 0, NULL, NULL, NULL } 2136 { 0, 0, 0, NULL, NULL, NULL }
2129 }; 2137 };
2130 2138
2131 static int __devinit agp_intel_probe(struct pci_dev *pdev, 2139 static int __devinit agp_intel_probe(struct pci_dev *pdev,
2132 const struct pci_device_id *ent) 2140 const struct pci_device_id *ent)
2133 { 2141 {
2134 struct agp_bridge_data *bridge; 2142 struct agp_bridge_data *bridge;
2135 u8 cap_ptr = 0; 2143 u8 cap_ptr = 0;
2136 struct resource *r; 2144 struct resource *r;
2137 int i; 2145 int i;
2138 2146
2139 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); 2147 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
2140 2148
2141 bridge = agp_alloc_bridge(); 2149 bridge = agp_alloc_bridge();
2142 if (!bridge) 2150 if (!bridge)
2143 return -ENOMEM; 2151 return -ENOMEM;
2144 2152
2145 for (i = 0; intel_agp_chipsets[i].name != NULL; i++) { 2153 for (i = 0; intel_agp_chipsets[i].name != NULL; i++) {
2146 /* In case that multiple models of gfx chip may 2154 /* In case that multiple models of gfx chip may
2147 stand on same host bridge type, this can be 2155 stand on same host bridge type, this can be
2148 sure we detect the right IGD. */ 2156 sure we detect the right IGD. */
2149 if (pdev->device == intel_agp_chipsets[i].chip_id) { 2157 if (pdev->device == intel_agp_chipsets[i].chip_id) {
2150 if ((intel_agp_chipsets[i].gmch_chip_id != 0) && 2158 if ((intel_agp_chipsets[i].gmch_chip_id != 0) &&
2151 find_gmch(intel_agp_chipsets[i].gmch_chip_id)) { 2159 find_gmch(intel_agp_chipsets[i].gmch_chip_id)) {
2152 bridge->driver = 2160 bridge->driver =
2153 intel_agp_chipsets[i].gmch_driver; 2161 intel_agp_chipsets[i].gmch_driver;
2154 break; 2162 break;
2155 } else if (intel_agp_chipsets[i].multi_gmch_chip) { 2163 } else if (intel_agp_chipsets[i].multi_gmch_chip) {
2156 continue; 2164 continue;
2157 } else { 2165 } else {
2158 bridge->driver = intel_agp_chipsets[i].driver; 2166 bridge->driver = intel_agp_chipsets[i].driver;
2159 break; 2167 break;
2160 } 2168 }
2161 } 2169 }
2162 } 2170 }
2163 2171
2164 if (intel_agp_chipsets[i].name == NULL) { 2172 if (intel_agp_chipsets[i].name == NULL) {
2165 if (cap_ptr) 2173 if (cap_ptr)
2166 printk(KERN_WARNING PFX "Unsupported Intel chipset" 2174 dev_warn(&pdev->dev, "unsupported Intel chipset [%04x/%04x]\n",
2167 "(device id: %04x)\n", pdev->device); 2175 pdev->vendor, pdev->device);
2168 agp_put_bridge(bridge); 2176 agp_put_bridge(bridge);
2169 return -ENODEV; 2177 return -ENODEV;
2170 } 2178 }
2171 2179
2172 if (bridge->driver == NULL) { 2180 if (bridge->driver == NULL) {
2173 /* bridge has no AGP and no IGD detected */ 2181 /* bridge has no AGP and no IGD detected */
2174 if (cap_ptr) 2182 if (cap_ptr)
2175 printk(KERN_WARNING PFX "Failed to find bridge device " 2183 dev_warn(&pdev->dev, "can't find bridge device (chip_id: %04x)\n",
2176 "(chip_id: %04x)\n", 2184 intel_agp_chipsets[i].gmch_chip_id);
2177 intel_agp_chipsets[i].gmch_chip_id);
2178 agp_put_bridge(bridge); 2185 agp_put_bridge(bridge);
2179 return -ENODEV; 2186 return -ENODEV;
2180 } 2187 }
2181 2188
2182 bridge->dev = pdev; 2189 bridge->dev = pdev;
2183 bridge->capndx = cap_ptr; 2190 bridge->capndx = cap_ptr;
2184 bridge->dev_private_data = &intel_private; 2191 bridge->dev_private_data = &intel_private;
2185 2192
2186 printk(KERN_INFO PFX "Detected an Intel %s Chipset.\n", 2193 dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
2187 intel_agp_chipsets[i].name);
2188 2194
2189 /* 2195 /*
2190 * The following fixes the case where the BIOS has "forgotten" to 2196 * The following fixes the case where the BIOS has "forgotten" to
2191 * provide an address range for the GART. 2197 * provide an address range for the GART.
2192 * 20030610 - hamish@zot.org 2198 * 20030610 - hamish@zot.org
2193 */ 2199 */
2194 r = &pdev->resource[0]; 2200 r = &pdev->resource[0];
2195 if (!r->start && r->end) { 2201 if (!r->start && r->end) {
2196 if (pci_assign_resource(pdev, 0)) { 2202 if (pci_assign_resource(pdev, 0)) {
2197 printk(KERN_ERR PFX "could not assign resource 0\n"); 2203 dev_err(&pdev->dev, "can't assign resource 0\n");
2198 agp_put_bridge(bridge); 2204 agp_put_bridge(bridge);
2199 return -ENODEV; 2205 return -ENODEV;
2200 } 2206 }
2201 } 2207 }
2202 2208
2203 /* 2209 /*
2204 * If the device has not been properly setup, the following will catch 2210 * If the device has not been properly setup, the following will catch
2205 * the problem and should stop the system from crashing. 2211 * the problem and should stop the system from crashing.
2206 * 20030610 - hamish@zot.org 2212 * 20030610 - hamish@zot.org
2207 */ 2213 */
2208 if (pci_enable_device(pdev)) { 2214 if (pci_enable_device(pdev)) {
2209 printk(KERN_ERR PFX "Unable to Enable PCI device\n"); 2215 dev_err(&pdev->dev, "can't enable PCI device\n");
2210 agp_put_bridge(bridge); 2216 agp_put_bridge(bridge);
2211 return -ENODEV; 2217 return -ENODEV;
2212 } 2218 }
2213 2219
2214 /* Fill in the mode register */ 2220 /* Fill in the mode register */
2215 if (cap_ptr) { 2221 if (cap_ptr) {
2216 pci_read_config_dword(pdev, 2222 pci_read_config_dword(pdev,
2217 bridge->capndx+PCI_AGP_STATUS, 2223 bridge->capndx+PCI_AGP_STATUS,
2218 &bridge->mode); 2224 &bridge->mode);
2219 } 2225 }
2220 2226
2221 pci_set_drvdata(pdev, bridge); 2227 pci_set_drvdata(pdev, bridge);
2222 return agp_add_bridge(bridge); 2228 return agp_add_bridge(bridge);
2223 } 2229 }
2224 2230
2225 static void __devexit agp_intel_remove(struct pci_dev *pdev) 2231 static void __devexit agp_intel_remove(struct pci_dev *pdev)
2226 { 2232 {
2227 struct agp_bridge_data *bridge = pci_get_drvdata(pdev); 2233 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
2228 2234
2229 agp_remove_bridge(bridge); 2235 agp_remove_bridge(bridge);
2230 2236
2231 if (intel_private.pcidev) 2237 if (intel_private.pcidev)
2232 pci_dev_put(intel_private.pcidev); 2238 pci_dev_put(intel_private.pcidev);
2233 2239
2234 agp_put_bridge(bridge); 2240 agp_put_bridge(bridge);
2235 } 2241 }
2236 2242
2237 #ifdef CONFIG_PM 2243 #ifdef CONFIG_PM
2238 static int agp_intel_resume(struct pci_dev *pdev) 2244 static int agp_intel_resume(struct pci_dev *pdev)
2239 { 2245 {
2240 struct agp_bridge_data *bridge = pci_get_drvdata(pdev); 2246 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
2241 2247
2242 pci_restore_state(pdev); 2248 pci_restore_state(pdev);
2243 2249
2244 /* We should restore our graphics device's config space, 2250 /* We should restore our graphics device's config space,
2245 * as host bridge (00:00) resumes before graphics device (02:00), 2251 * as host bridge (00:00) resumes before graphics device (02:00),
2246 * then our access to its pci space can work right. 2252 * then our access to its pci space can work right.
2247 */ 2253 */
2248 if (intel_private.pcidev) 2254 if (intel_private.pcidev)
2249 pci_restore_state(intel_private.pcidev); 2255 pci_restore_state(intel_private.pcidev);
2250 2256
2251 if (bridge->driver == &intel_generic_driver) 2257 if (bridge->driver == &intel_generic_driver)
2252 intel_configure(); 2258 intel_configure();
2253 else if (bridge->driver == &intel_850_driver) 2259 else if (bridge->driver == &intel_850_driver)
2254 intel_850_configure(); 2260 intel_850_configure();
2255 else if (bridge->driver == &intel_845_driver) 2261 else if (bridge->driver == &intel_845_driver)
2256 intel_845_configure(); 2262 intel_845_configure();
2257 else if (bridge->driver == &intel_830mp_driver) 2263 else if (bridge->driver == &intel_830mp_driver)
2258 intel_830mp_configure(); 2264 intel_830mp_configure();
2259 else if (bridge->driver == &intel_915_driver) 2265 else if (bridge->driver == &intel_915_driver)
2260 intel_i915_configure(); 2266 intel_i915_configure();
2261 else if (bridge->driver == &intel_830_driver) 2267 else if (bridge->driver == &intel_830_driver)
2262 intel_i830_configure(); 2268 intel_i830_configure();
2263 else if (bridge->driver == &intel_810_driver) 2269 else if (bridge->driver == &intel_810_driver)
2264 intel_i810_configure(); 2270 intel_i810_configure();
2265 else if (bridge->driver == &intel_i965_driver) 2271 else if (bridge->driver == &intel_i965_driver)
2266 intel_i915_configure(); 2272 intel_i915_configure();
2267 2273
2268 return 0; 2274 return 0;
2269 } 2275 }
2270 #endif 2276 #endif
2271 2277
2272 static struct pci_device_id agp_intel_pci_table[] = { 2278 static struct pci_device_id agp_intel_pci_table[] = {
2273 #define ID(x) \ 2279 #define ID(x) \
2274 { \ 2280 { \
2275 .class = (PCI_CLASS_BRIDGE_HOST << 8), \ 2281 .class = (PCI_CLASS_BRIDGE_HOST << 8), \
2276 .class_mask = ~0, \ 2282 .class_mask = ~0, \
2277 .vendor = PCI_VENDOR_ID_INTEL, \ 2283 .vendor = PCI_VENDOR_ID_INTEL, \
2278 .device = x, \ 2284 .device = x, \
2279 .subvendor = PCI_ANY_ID, \ 2285 .subvendor = PCI_ANY_ID, \
2280 .subdevice = PCI_ANY_ID, \ 2286 .subdevice = PCI_ANY_ID, \
2281 } 2287 }
2282 ID(PCI_DEVICE_ID_INTEL_82443LX_0), 2288 ID(PCI_DEVICE_ID_INTEL_82443LX_0),
2283 ID(PCI_DEVICE_ID_INTEL_82443BX_0), 2289 ID(PCI_DEVICE_ID_INTEL_82443BX_0),
2284 ID(PCI_DEVICE_ID_INTEL_82443GX_0), 2290 ID(PCI_DEVICE_ID_INTEL_82443GX_0),
2285 ID(PCI_DEVICE_ID_INTEL_82810_MC1), 2291 ID(PCI_DEVICE_ID_INTEL_82810_MC1),
2286 ID(PCI_DEVICE_ID_INTEL_82810_MC3), 2292 ID(PCI_DEVICE_ID_INTEL_82810_MC3),
2287 ID(PCI_DEVICE_ID_INTEL_82810E_MC), 2293 ID(PCI_DEVICE_ID_INTEL_82810E_MC),
2288 ID(PCI_DEVICE_ID_INTEL_82815_MC), 2294 ID(PCI_DEVICE_ID_INTEL_82815_MC),
2289 ID(PCI_DEVICE_ID_INTEL_82820_HB), 2295 ID(PCI_DEVICE_ID_INTEL_82820_HB),
2290 ID(PCI_DEVICE_ID_INTEL_82820_UP_HB), 2296 ID(PCI_DEVICE_ID_INTEL_82820_UP_HB),
2291 ID(PCI_DEVICE_ID_INTEL_82830_HB), 2297 ID(PCI_DEVICE_ID_INTEL_82830_HB),
2292 ID(PCI_DEVICE_ID_INTEL_82840_HB), 2298 ID(PCI_DEVICE_ID_INTEL_82840_HB),
2293 ID(PCI_DEVICE_ID_INTEL_82845_HB), 2299 ID(PCI_DEVICE_ID_INTEL_82845_HB),
2294 ID(PCI_DEVICE_ID_INTEL_82845G_HB), 2300 ID(PCI_DEVICE_ID_INTEL_82845G_HB),
2295 ID(PCI_DEVICE_ID_INTEL_82850_HB), 2301 ID(PCI_DEVICE_ID_INTEL_82850_HB),
2296 ID(PCI_DEVICE_ID_INTEL_82855PM_HB), 2302 ID(PCI_DEVICE_ID_INTEL_82855PM_HB),
2297 ID(PCI_DEVICE_ID_INTEL_82855GM_HB), 2303 ID(PCI_DEVICE_ID_INTEL_82855GM_HB),
2298 ID(PCI_DEVICE_ID_INTEL_82860_HB), 2304 ID(PCI_DEVICE_ID_INTEL_82860_HB),
2299 ID(PCI_DEVICE_ID_INTEL_82865_HB), 2305 ID(PCI_DEVICE_ID_INTEL_82865_HB),
2300 ID(PCI_DEVICE_ID_INTEL_82875_HB), 2306 ID(PCI_DEVICE_ID_INTEL_82875_HB),
2301 ID(PCI_DEVICE_ID_INTEL_7505_0), 2307 ID(PCI_DEVICE_ID_INTEL_7505_0),
2302 ID(PCI_DEVICE_ID_INTEL_7205_0), 2308 ID(PCI_DEVICE_ID_INTEL_7205_0),
2303 ID(PCI_DEVICE_ID_INTEL_E7221_HB), 2309 ID(PCI_DEVICE_ID_INTEL_E7221_HB),
2304 ID(PCI_DEVICE_ID_INTEL_82915G_HB), 2310 ID(PCI_DEVICE_ID_INTEL_82915G_HB),
2305 ID(PCI_DEVICE_ID_INTEL_82915GM_HB), 2311 ID(PCI_DEVICE_ID_INTEL_82915GM_HB),
2306 ID(PCI_DEVICE_ID_INTEL_82945G_HB), 2312 ID(PCI_DEVICE_ID_INTEL_82945G_HB),
2307 ID(PCI_DEVICE_ID_INTEL_82945GM_HB), 2313 ID(PCI_DEVICE_ID_INTEL_82945GM_HB),
2308 ID(PCI_DEVICE_ID_INTEL_82945GME_HB), 2314 ID(PCI_DEVICE_ID_INTEL_82945GME_HB),
2309 ID(PCI_DEVICE_ID_INTEL_82946GZ_HB), 2315 ID(PCI_DEVICE_ID_INTEL_82946GZ_HB),
2310 ID(PCI_DEVICE_ID_INTEL_82G35_HB), 2316 ID(PCI_DEVICE_ID_INTEL_82G35_HB),
2311 ID(PCI_DEVICE_ID_INTEL_82965Q_HB), 2317 ID(PCI_DEVICE_ID_INTEL_82965Q_HB),
2312 ID(PCI_DEVICE_ID_INTEL_82965G_HB), 2318 ID(PCI_DEVICE_ID_INTEL_82965G_HB),
2313 ID(PCI_DEVICE_ID_INTEL_82965GM_HB), 2319 ID(PCI_DEVICE_ID_INTEL_82965GM_HB),
2314 ID(PCI_DEVICE_ID_INTEL_82965GME_HB), 2320 ID(PCI_DEVICE_ID_INTEL_82965GME_HB),
2315 ID(PCI_DEVICE_ID_INTEL_G33_HB), 2321 ID(PCI_DEVICE_ID_INTEL_G33_HB),
2316 ID(PCI_DEVICE_ID_INTEL_Q35_HB), 2322 ID(PCI_DEVICE_ID_INTEL_Q35_HB),
2317 ID(PCI_DEVICE_ID_INTEL_Q33_HB), 2323 ID(PCI_DEVICE_ID_INTEL_Q33_HB),
2318 ID(PCI_DEVICE_ID_INTEL_GM45_HB), 2324 ID(PCI_DEVICE_ID_INTEL_GM45_HB),
2319 ID(PCI_DEVICE_ID_INTEL_IGD_E_HB), 2325 ID(PCI_DEVICE_ID_INTEL_IGD_E_HB),
2320 ID(PCI_DEVICE_ID_INTEL_Q45_HB), 2326 ID(PCI_DEVICE_ID_INTEL_Q45_HB),
2321 ID(PCI_DEVICE_ID_INTEL_G45_HB), 2327 ID(PCI_DEVICE_ID_INTEL_G45_HB),
2322 { } 2328 { }
2323 }; 2329 };
2324 2330
2325 MODULE_DEVICE_TABLE(pci, agp_intel_pci_table); 2331 MODULE_DEVICE_TABLE(pci, agp_intel_pci_table);
2326 2332
2327 static struct pci_driver agp_intel_pci_driver = { 2333 static struct pci_driver agp_intel_pci_driver = {
2328 .name = "agpgart-intel", 2334 .name = "agpgart-intel",
2329 .id_table = agp_intel_pci_table, 2335 .id_table = agp_intel_pci_table,
2330 .probe = agp_intel_probe, 2336 .probe = agp_intel_probe,
2331 .remove = __devexit_p(agp_intel_remove), 2337 .remove = __devexit_p(agp_intel_remove),
2332 #ifdef CONFIG_PM 2338 #ifdef CONFIG_PM
2333 .resume = agp_intel_resume, 2339 .resume = agp_intel_resume,
2334 #endif 2340 #endif
2335 }; 2341 };
2336 2342
2337 static int __init agp_intel_init(void) 2343 static int __init agp_intel_init(void)
2338 { 2344 {
2339 if (agp_off) 2345 if (agp_off)
2340 return -EINVAL; 2346 return -EINVAL;
2341 return pci_register_driver(&agp_intel_pci_driver); 2347 return pci_register_driver(&agp_intel_pci_driver);
2342 } 2348 }
2343 2349
2344 static void __exit agp_intel_cleanup(void) 2350 static void __exit agp_intel_cleanup(void)
2345 { 2351 {
2346 pci_unregister_driver(&agp_intel_pci_driver); 2352 pci_unregister_driver(&agp_intel_pci_driver);
2347 } 2353 }
2348 2354
2349 module_init(agp_intel_init); 2355 module_init(agp_intel_init);
2350 module_exit(agp_intel_cleanup); 2356 module_exit(agp_intel_cleanup);
2351 2357
2352 MODULE_AUTHOR("Dave Jones <davej@codemonkey.org.uk>"); 2358 MODULE_AUTHOR("Dave Jones <davej@codemonkey.org.uk>");
drivers/char/agp/isoch.c
1 /* 1 /*
2 * Setup routines for AGP 3.5 compliant bridges. 2 * Setup routines for AGP 3.5 compliant bridges.
3 */ 3 */
4 4
5 #include <linux/list.h> 5 #include <linux/list.h>
6 #include <linux/pci.h> 6 #include <linux/pci.h>
7 #include <linux/agp_backend.h> 7 #include <linux/agp_backend.h>
8 #include <linux/module.h> 8 #include <linux/module.h>
9 #include <linux/slab.h> 9 #include <linux/slab.h>
10 10
11 #include "agp.h" 11 #include "agp.h"
12 12
13 /* Generic AGP 3.5 enabling routines */ 13 /* Generic AGP 3.5 enabling routines */
14 14
15 struct agp_3_5_dev { 15 struct agp_3_5_dev {
16 struct list_head list; 16 struct list_head list;
17 u8 capndx; 17 u8 capndx;
18 u32 maxbw; 18 u32 maxbw;
19 struct pci_dev *dev; 19 struct pci_dev *dev;
20 }; 20 };
21 21
22 static void agp_3_5_dev_list_insert(struct list_head *head, struct list_head *new) 22 static void agp_3_5_dev_list_insert(struct list_head *head, struct list_head *new)
23 { 23 {
24 struct agp_3_5_dev *cur, *n = list_entry(new, struct agp_3_5_dev, list); 24 struct agp_3_5_dev *cur, *n = list_entry(new, struct agp_3_5_dev, list);
25 struct list_head *pos; 25 struct list_head *pos;
26 26
27 list_for_each(pos, head) { 27 list_for_each(pos, head) {
28 cur = list_entry(pos, struct agp_3_5_dev, list); 28 cur = list_entry(pos, struct agp_3_5_dev, list);
29 if (cur->maxbw > n->maxbw) 29 if (cur->maxbw > n->maxbw)
30 break; 30 break;
31 } 31 }
32 list_add_tail(new, pos); 32 list_add_tail(new, pos);
33 } 33 }
34 34
35 static void agp_3_5_dev_list_sort(struct agp_3_5_dev *list, unsigned int ndevs) 35 static void agp_3_5_dev_list_sort(struct agp_3_5_dev *list, unsigned int ndevs)
36 { 36 {
37 struct agp_3_5_dev *cur; 37 struct agp_3_5_dev *cur;
38 struct pci_dev *dev; 38 struct pci_dev *dev;
39 struct list_head *pos, *tmp, *head = &list->list, *start = head->next; 39 struct list_head *pos, *tmp, *head = &list->list, *start = head->next;
40 u32 nistat; 40 u32 nistat;
41 41
42 INIT_LIST_HEAD(head); 42 INIT_LIST_HEAD(head);
43 43
44 for (pos=start; pos!=head; ) { 44 for (pos=start; pos!=head; ) {
45 cur = list_entry(pos, struct agp_3_5_dev, list); 45 cur = list_entry(pos, struct agp_3_5_dev, list);
46 dev = cur->dev; 46 dev = cur->dev;
47 47
48 pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &nistat); 48 pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &nistat);
49 cur->maxbw = (nistat >> 16) & 0xff; 49 cur->maxbw = (nistat >> 16) & 0xff;
50 50
51 tmp = pos; 51 tmp = pos;
52 pos = pos->next; 52 pos = pos->next;
53 agp_3_5_dev_list_insert(head, tmp); 53 agp_3_5_dev_list_insert(head, tmp);
54 } 54 }
55 } 55 }
56 56
57 /* 57 /*
58 * Initialize all isochronous transfer parameters for an AGP 3.0 58 * Initialize all isochronous transfer parameters for an AGP 3.0
59 * node (i.e. a host bridge in combination with the adapters 59 * node (i.e. a host bridge in combination with the adapters
60 * lying behind it...) 60 * lying behind it...)
61 */ 61 */
62 62
63 static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge, 63 static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge,
64 struct agp_3_5_dev *dev_list, unsigned int ndevs) 64 struct agp_3_5_dev *dev_list, unsigned int ndevs)
65 { 65 {
66 /* 66 /*
67 * Convenience structure to make the calculations clearer 67 * Convenience structure to make the calculations clearer
68 * here. The field names come straight from the AGP 3.0 spec. 68 * here. The field names come straight from the AGP 3.0 spec.
69 */ 69 */
70 struct isoch_data { 70 struct isoch_data {
71 u32 maxbw; 71 u32 maxbw;
72 u32 n; 72 u32 n;
73 u32 y; 73 u32 y;
74 u32 l; 74 u32 l;
75 u32 rq; 75 u32 rq;
76 struct agp_3_5_dev *dev; 76 struct agp_3_5_dev *dev;
77 }; 77 };
78 78
79 struct pci_dev *td = bridge->dev, *dev; 79 struct pci_dev *td = bridge->dev, *dev;
80 struct list_head *head = &dev_list->list, *pos; 80 struct list_head *head = &dev_list->list, *pos;
81 struct agp_3_5_dev *cur; 81 struct agp_3_5_dev *cur;
82 struct isoch_data *master, target; 82 struct isoch_data *master, target;
83 unsigned int cdev = 0; 83 unsigned int cdev = 0;
84 u32 mnistat, tnistat, tstatus, mcmd; 84 u32 mnistat, tnistat, tstatus, mcmd;
85 u16 tnicmd, mnicmd; 85 u16 tnicmd, mnicmd;
86 u8 mcapndx; 86 u8 mcapndx;
87 u32 tot_bw = 0, tot_n = 0, tot_rq = 0, y_max, rq_isoch, rq_async; 87 u32 tot_bw = 0, tot_n = 0, tot_rq = 0, y_max, rq_isoch, rq_async;
88 u32 step, rem, rem_isoch, rem_async; 88 u32 step, rem, rem_isoch, rem_async;
89 int ret = 0; 89 int ret = 0;
90 90
91 /* 91 /*
92 * We'll work with an array of isoch_data's (one for each 92 * We'll work with an array of isoch_data's (one for each
93 * device in dev_list) throughout this function. 93 * device in dev_list) throughout this function.
94 */ 94 */
95 if ((master = kmalloc(ndevs * sizeof(*master), GFP_KERNEL)) == NULL) { 95 if ((master = kmalloc(ndevs * sizeof(*master), GFP_KERNEL)) == NULL) {
96 ret = -ENOMEM; 96 ret = -ENOMEM;
97 goto get_out; 97 goto get_out;
98 } 98 }
99 99
100 /* 100 /*
101 * Sort the device list by maxbw. We need to do this because the 101 * Sort the device list by maxbw. We need to do this because the
102 * spec suggests that the devices with the smallest requirements 102 * spec suggests that the devices with the smallest requirements
103 * have their resources allocated first, with all remaining resources 103 * have their resources allocated first, with all remaining resources
104 * falling to the device with the largest requirement. 104 * falling to the device with the largest requirement.
105 * 105 *
106 * We don't exactly do this, we divide target resources by ndevs 106 * We don't exactly do this, we divide target resources by ndevs
107 * and split them amongst the AGP 3.0 devices. The remainder of such 107 * and split them amongst the AGP 3.0 devices. The remainder of such
108 * division operations are dropped on the last device, sort of like 108 * division operations are dropped on the last device, sort of like
109 * the spec mentions it should be done. 109 * the spec mentions it should be done.
110 * 110 *
111 * We can't do this sort when we initially construct the dev_list 111 * We can't do this sort when we initially construct the dev_list
112 * because we don't know until this function whether isochronous 112 * because we don't know until this function whether isochronous
113 * transfers are enabled and consequently whether maxbw will mean 113 * transfers are enabled and consequently whether maxbw will mean
114 * anything. 114 * anything.
115 */ 115 */
116 agp_3_5_dev_list_sort(dev_list, ndevs); 116 agp_3_5_dev_list_sort(dev_list, ndevs);
117 117
118 pci_read_config_dword(td, bridge->capndx+AGPNISTAT, &tnistat); 118 pci_read_config_dword(td, bridge->capndx+AGPNISTAT, &tnistat);
119 pci_read_config_dword(td, bridge->capndx+AGPSTAT, &tstatus); 119 pci_read_config_dword(td, bridge->capndx+AGPSTAT, &tstatus);
120 120
121 /* Extract power-on defaults from the target */ 121 /* Extract power-on defaults from the target */
122 target.maxbw = (tnistat >> 16) & 0xff; 122 target.maxbw = (tnistat >> 16) & 0xff;
123 target.n = (tnistat >> 8) & 0xff; 123 target.n = (tnistat >> 8) & 0xff;
124 target.y = (tnistat >> 6) & 0x3; 124 target.y = (tnistat >> 6) & 0x3;
125 target.l = (tnistat >> 3) & 0x7; 125 target.l = (tnistat >> 3) & 0x7;
126 target.rq = (tstatus >> 24) & 0xff; 126 target.rq = (tstatus >> 24) & 0xff;
127 127
128 y_max = target.y; 128 y_max = target.y;
129 129
130 /* 130 /*
131 * Extract power-on defaults for each device in dev_list. Along 131 * Extract power-on defaults for each device in dev_list. Along
132 * the way, calculate the total isochronous bandwidth required 132 * the way, calculate the total isochronous bandwidth required
133 * by these devices and the largest requested payload size. 133 * by these devices and the largest requested payload size.
134 */ 134 */
135 list_for_each(pos, head) { 135 list_for_each(pos, head) {
136 cur = list_entry(pos, struct agp_3_5_dev, list); 136 cur = list_entry(pos, struct agp_3_5_dev, list);
137 dev = cur->dev; 137 dev = cur->dev;
138 138
139 mcapndx = cur->capndx; 139 mcapndx = cur->capndx;
140 140
141 pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &mnistat); 141 pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &mnistat);
142 142
143 master[cdev].maxbw = (mnistat >> 16) & 0xff; 143 master[cdev].maxbw = (mnistat >> 16) & 0xff;
144 master[cdev].n = (mnistat >> 8) & 0xff; 144 master[cdev].n = (mnistat >> 8) & 0xff;
145 master[cdev].y = (mnistat >> 6) & 0x3; 145 master[cdev].y = (mnistat >> 6) & 0x3;
146 master[cdev].dev = cur; 146 master[cdev].dev = cur;
147 147
148 tot_bw += master[cdev].maxbw; 148 tot_bw += master[cdev].maxbw;
149 y_max = max(y_max, master[cdev].y); 149 y_max = max(y_max, master[cdev].y);
150 150
151 cdev++; 151 cdev++;
152 } 152 }
153 153
154 /* Check if this configuration has any chance of working */ 154 /* Check if this configuration has any chance of working */
155 if (tot_bw > target.maxbw) { 155 if (tot_bw > target.maxbw) {
156 printk(KERN_ERR PFX "isochronous bandwidth required " 156 dev_err(&td->dev, "isochronous bandwidth required "
157 "by AGP 3.0 devices exceeds that which is supported by " 157 "by AGP 3.0 devices exceeds that which is supported by "
158 "the AGP 3.0 bridge!\n"); 158 "the AGP 3.0 bridge!\n");
159 ret = -ENODEV; 159 ret = -ENODEV;
160 goto free_and_exit; 160 goto free_and_exit;
161 } 161 }
162 162
163 target.y = y_max; 163 target.y = y_max;
164 164
165 /* 165 /*
166 * Write the calculated payload size into the target's NICMD 166 * Write the calculated payload size into the target's NICMD
167 * register. Doing this directly effects the ISOCH_N value 167 * register. Doing this directly effects the ISOCH_N value
168 * in the target's NISTAT register, so we need to do this now 168 * in the target's NISTAT register, so we need to do this now
169 * to get an accurate value for ISOCH_N later. 169 * to get an accurate value for ISOCH_N later.
170 */ 170 */
171 pci_read_config_word(td, bridge->capndx+AGPNICMD, &tnicmd); 171 pci_read_config_word(td, bridge->capndx+AGPNICMD, &tnicmd);
172 tnicmd &= ~(0x3 << 6); 172 tnicmd &= ~(0x3 << 6);
173 tnicmd |= target.y << 6; 173 tnicmd |= target.y << 6;
174 pci_write_config_word(td, bridge->capndx+AGPNICMD, tnicmd); 174 pci_write_config_word(td, bridge->capndx+AGPNICMD, tnicmd);
175 175
176 /* Reread the target's ISOCH_N */ 176 /* Reread the target's ISOCH_N */
177 pci_read_config_dword(td, bridge->capndx+AGPNISTAT, &tnistat); 177 pci_read_config_dword(td, bridge->capndx+AGPNISTAT, &tnistat);
178 target.n = (tnistat >> 8) & 0xff; 178 target.n = (tnistat >> 8) & 0xff;
179 179
180 /* Calculate the minimum ISOCH_N needed by each master */ 180 /* Calculate the minimum ISOCH_N needed by each master */
181 for (cdev=0; cdev<ndevs; cdev++) { 181 for (cdev=0; cdev<ndevs; cdev++) {
182 master[cdev].y = target.y; 182 master[cdev].y = target.y;
183 master[cdev].n = master[cdev].maxbw / (master[cdev].y + 1); 183 master[cdev].n = master[cdev].maxbw / (master[cdev].y + 1);
184 184
185 tot_n += master[cdev].n; 185 tot_n += master[cdev].n;
186 } 186 }
187 187
188 /* Exit if the minimal ISOCH_N allocation among the masters is more 188 /* Exit if the minimal ISOCH_N allocation among the masters is more
189 * than the target can handle. */ 189 * than the target can handle. */
190 if (tot_n > target.n) { 190 if (tot_n > target.n) {
191 printk(KERN_ERR PFX "number of isochronous " 191 dev_err(&td->dev, "number of isochronous "
192 "transactions per period required by AGP 3.0 devices " 192 "transactions per period required by AGP 3.0 devices "
193 "exceeds that which is supported by the AGP 3.0 " 193 "exceeds that which is supported by the AGP 3.0 "
194 "bridge!\n"); 194 "bridge!\n");
195 ret = -ENODEV; 195 ret = -ENODEV;
196 goto free_and_exit; 196 goto free_and_exit;
197 } 197 }
198 198
199 /* Calculate left over ISOCH_N capability in the target. We'll give 199 /* Calculate left over ISOCH_N capability in the target. We'll give
200 * this to the hungriest device (as per the spec) */ 200 * this to the hungriest device (as per the spec) */
201 rem = target.n - tot_n; 201 rem = target.n - tot_n;
202 202
203 /* 203 /*
204 * Calculate the minimum isochronous RQ depth needed by each master. 204 * Calculate the minimum isochronous RQ depth needed by each master.
205 * Along the way, distribute the extra ISOCH_N capability calculated 205 * Along the way, distribute the extra ISOCH_N capability calculated
206 * above. 206 * above.
207 */ 207 */
208 for (cdev=0; cdev<ndevs; cdev++) { 208 for (cdev=0; cdev<ndevs; cdev++) {
209 /* 209 /*
210 * This is a little subtle. If ISOCH_Y > 64B, then ISOCH_Y 210 * This is a little subtle. If ISOCH_Y > 64B, then ISOCH_Y
211 * byte isochronous writes will be broken into 64B pieces. 211 * byte isochronous writes will be broken into 64B pieces.
212 * This means we need to budget more RQ depth to account for 212 * This means we need to budget more RQ depth to account for
213 * these kind of writes (each isochronous write is actually 213 * these kind of writes (each isochronous write is actually
214 * many writes on the AGP bus). 214 * many writes on the AGP bus).
215 */ 215 */
216 master[cdev].rq = master[cdev].n; 216 master[cdev].rq = master[cdev].n;
217 if (master[cdev].y > 0x1) 217 if (master[cdev].y > 0x1)
218 master[cdev].rq *= (1 << (master[cdev].y - 1)); 218 master[cdev].rq *= (1 << (master[cdev].y - 1));
219 219
220 tot_rq += master[cdev].rq; 220 tot_rq += master[cdev].rq;
221 } 221 }
222 master[ndevs-1].n += rem; 222 master[ndevs-1].n += rem;
223 223
224 /* Figure the number of isochronous and asynchronous RQ slots the 224 /* Figure the number of isochronous and asynchronous RQ slots the
225 * target is providing. */ 225 * target is providing. */
226 rq_isoch = (target.y > 0x1) ? target.n * (1 << (target.y - 1)) : target.n; 226 rq_isoch = (target.y > 0x1) ? target.n * (1 << (target.y - 1)) : target.n;
227 rq_async = target.rq - rq_isoch; 227 rq_async = target.rq - rq_isoch;
228 228
229 /* Exit if the minimal RQ needs of the masters exceeds what the target 229 /* Exit if the minimal RQ needs of the masters exceeds what the target
230 * can provide. */ 230 * can provide. */
231 if (tot_rq > rq_isoch) { 231 if (tot_rq > rq_isoch) {
232 printk(KERN_ERR PFX "number of request queue slots " 232 dev_err(&td->dev, "number of request queue slots "
233 "required by the isochronous bandwidth requested by " 233 "required by the isochronous bandwidth requested by "
234 "AGP 3.0 devices exceeds the number provided by the " 234 "AGP 3.0 devices exceeds the number provided by the "
235 "AGP 3.0 bridge!\n"); 235 "AGP 3.0 bridge!\n");
236 ret = -ENODEV; 236 ret = -ENODEV;
237 goto free_and_exit; 237 goto free_and_exit;
238 } 238 }
239 239
240 /* Calculate asynchronous RQ capability in the target (per master) as 240 /* Calculate asynchronous RQ capability in the target (per master) as
241 * well as the total number of leftover isochronous RQ slots. */ 241 * well as the total number of leftover isochronous RQ slots. */
242 step = rq_async / ndevs; 242 step = rq_async / ndevs;
243 rem_async = step + (rq_async % ndevs); 243 rem_async = step + (rq_async % ndevs);
244 rem_isoch = rq_isoch - tot_rq; 244 rem_isoch = rq_isoch - tot_rq;
245 245
246 /* Distribute the extra RQ slots calculated above and write our 246 /* Distribute the extra RQ slots calculated above and write our
247 * isochronous settings out to the actual devices. */ 247 * isochronous settings out to the actual devices. */
248 for (cdev=0; cdev<ndevs; cdev++) { 248 for (cdev=0; cdev<ndevs; cdev++) {
249 cur = master[cdev].dev; 249 cur = master[cdev].dev;
250 dev = cur->dev; 250 dev = cur->dev;
251 251
252 mcapndx = cur->capndx; 252 mcapndx = cur->capndx;
253 253
254 master[cdev].rq += (cdev == ndevs - 1) 254 master[cdev].rq += (cdev == ndevs - 1)
255 ? (rem_async + rem_isoch) : step; 255 ? (rem_async + rem_isoch) : step;
256 256
257 pci_read_config_word(dev, cur->capndx+AGPNICMD, &mnicmd); 257 pci_read_config_word(dev, cur->capndx+AGPNICMD, &mnicmd);
258 pci_read_config_dword(dev, cur->capndx+AGPCMD, &mcmd); 258 pci_read_config_dword(dev, cur->capndx+AGPCMD, &mcmd);
259 259
260 mnicmd &= ~(0xff << 8); 260 mnicmd &= ~(0xff << 8);
261 mnicmd &= ~(0x3 << 6); 261 mnicmd &= ~(0x3 << 6);
262 mcmd &= ~(0xff << 24); 262 mcmd &= ~(0xff << 24);
263 263
264 mnicmd |= master[cdev].n << 8; 264 mnicmd |= master[cdev].n << 8;
265 mnicmd |= master[cdev].y << 6; 265 mnicmd |= master[cdev].y << 6;
266 mcmd |= master[cdev].rq << 24; 266 mcmd |= master[cdev].rq << 24;
267 267
268 pci_write_config_dword(dev, cur->capndx+AGPCMD, mcmd); 268 pci_write_config_dword(dev, cur->capndx+AGPCMD, mcmd);
269 pci_write_config_word(dev, cur->capndx+AGPNICMD, mnicmd); 269 pci_write_config_word(dev, cur->capndx+AGPNICMD, mnicmd);
270 } 270 }
271 271
272 free_and_exit: 272 free_and_exit:
273 kfree(master); 273 kfree(master);
274 274
275 get_out: 275 get_out:
276 return ret; 276 return ret;
277 } 277 }
278 278
279 /* 279 /*
280 * This function basically allocates request queue slots among the 280 * This function basically allocates request queue slots among the
281 * AGP 3.0 systems in nonisochronous nodes. The algorithm is 281 * AGP 3.0 systems in nonisochronous nodes. The algorithm is
282 * pretty stupid, divide the total number of RQ slots provided by the 282 * pretty stupid, divide the total number of RQ slots provided by the
283 * target by ndevs. Distribute this many slots to each AGP 3.0 device, 283 * target by ndevs. Distribute this many slots to each AGP 3.0 device,
284 * giving any left over slots to the last device in dev_list. 284 * giving any left over slots to the last device in dev_list.
285 */ 285 */
286 static void agp_3_5_nonisochronous_node_enable(struct agp_bridge_data *bridge, 286 static void agp_3_5_nonisochronous_node_enable(struct agp_bridge_data *bridge,
287 struct agp_3_5_dev *dev_list, unsigned int ndevs) 287 struct agp_3_5_dev *dev_list, unsigned int ndevs)
288 { 288 {
289 struct agp_3_5_dev *cur; 289 struct agp_3_5_dev *cur;
290 struct list_head *head = &dev_list->list, *pos; 290 struct list_head *head = &dev_list->list, *pos;
291 u32 tstatus, mcmd; 291 u32 tstatus, mcmd;
292 u32 trq, mrq, rem; 292 u32 trq, mrq, rem;
293 unsigned int cdev = 0; 293 unsigned int cdev = 0;
294 294
295 pci_read_config_dword(bridge->dev, bridge->capndx+AGPSTAT, &tstatus); 295 pci_read_config_dword(bridge->dev, bridge->capndx+AGPSTAT, &tstatus);
296 296
297 trq = (tstatus >> 24) & 0xff; 297 trq = (tstatus >> 24) & 0xff;
298 mrq = trq / ndevs; 298 mrq = trq / ndevs;
299 299
300 rem = mrq + (trq % ndevs); 300 rem = mrq + (trq % ndevs);
301 301
302 for (pos=head->next; cdev<ndevs; cdev++, pos=pos->next) { 302 for (pos=head->next; cdev<ndevs; cdev++, pos=pos->next) {
303 cur = list_entry(pos, struct agp_3_5_dev, list); 303 cur = list_entry(pos, struct agp_3_5_dev, list);
304 304
305 pci_read_config_dword(cur->dev, cur->capndx+AGPCMD, &mcmd); 305 pci_read_config_dword(cur->dev, cur->capndx+AGPCMD, &mcmd);
306 mcmd &= ~(0xff << 24); 306 mcmd &= ~(0xff << 24);
307 mcmd |= ((cdev == ndevs - 1) ? rem : mrq) << 24; 307 mcmd |= ((cdev == ndevs - 1) ? rem : mrq) << 24;
308 pci_write_config_dword(cur->dev, cur->capndx+AGPCMD, mcmd); 308 pci_write_config_dword(cur->dev, cur->capndx+AGPCMD, mcmd);
309 } 309 }
310 } 310 }
311 311
312 /* 312 /*
313 * Fully configure and enable an AGP 3.0 host bridge and all the devices 313 * Fully configure and enable an AGP 3.0 host bridge and all the devices
314 * lying behind it. 314 * lying behind it.
315 */ 315 */
316 int agp_3_5_enable(struct agp_bridge_data *bridge) 316 int agp_3_5_enable(struct agp_bridge_data *bridge)
317 { 317 {
318 struct pci_dev *td = bridge->dev, *dev = NULL; 318 struct pci_dev *td = bridge->dev, *dev = NULL;
319 u8 mcapndx; 319 u8 mcapndx;
320 u32 isoch, arqsz; 320 u32 isoch, arqsz;
321 u32 tstatus, mstatus, ncapid; 321 u32 tstatus, mstatus, ncapid;
322 u32 mmajor; 322 u32 mmajor;
323 u16 mpstat; 323 u16 mpstat;
324 struct agp_3_5_dev *dev_list, *cur; 324 struct agp_3_5_dev *dev_list, *cur;
325 struct list_head *head, *pos; 325 struct list_head *head, *pos;
326 unsigned int ndevs = 0; 326 unsigned int ndevs = 0;
327 int ret = 0; 327 int ret = 0;
328 328
329 /* Extract some power-on defaults from the target */ 329 /* Extract some power-on defaults from the target */
330 pci_read_config_dword(td, bridge->capndx+AGPSTAT, &tstatus); 330 pci_read_config_dword(td, bridge->capndx+AGPSTAT, &tstatus);
331 isoch = (tstatus >> 17) & 0x1; 331 isoch = (tstatus >> 17) & 0x1;
332 if (isoch == 0) /* isoch xfers not available, bail out. */ 332 if (isoch == 0) /* isoch xfers not available, bail out. */
333 return -ENODEV; 333 return -ENODEV;
334 334
335 arqsz = (tstatus >> 13) & 0x7; 335 arqsz = (tstatus >> 13) & 0x7;
336 336
337 /* 337 /*
338 * Allocate a head for our AGP 3.5 device list 338 * Allocate a head for our AGP 3.5 device list
339 * (multiple AGP v3 devices are allowed behind a single bridge). 339 * (multiple AGP v3 devices are allowed behind a single bridge).
340 */ 340 */
341 if ((dev_list = kmalloc(sizeof(*dev_list), GFP_KERNEL)) == NULL) { 341 if ((dev_list = kmalloc(sizeof(*dev_list), GFP_KERNEL)) == NULL) {
342 ret = -ENOMEM; 342 ret = -ENOMEM;
343 goto get_out; 343 goto get_out;
344 } 344 }
345 head = &dev_list->list; 345 head = &dev_list->list;
346 INIT_LIST_HEAD(head); 346 INIT_LIST_HEAD(head);
347 347
348 /* Find all AGP devices, and add them to dev_list. */ 348 /* Find all AGP devices, and add them to dev_list. */
349 for_each_pci_dev(dev) { 349 for_each_pci_dev(dev) {
350 mcapndx = pci_find_capability(dev, PCI_CAP_ID_AGP); 350 mcapndx = pci_find_capability(dev, PCI_CAP_ID_AGP);
351 if (mcapndx == 0) 351 if (mcapndx == 0)
352 continue; 352 continue;
353 353
354 switch ((dev->class >>8) & 0xff00) { 354 switch ((dev->class >>8) & 0xff00) {
355 case 0x0600: /* Bridge */ 355 case 0x0600: /* Bridge */
356 /* Skip bridges. We should call this function for each one. */ 356 /* Skip bridges. We should call this function for each one. */
357 continue; 357 continue;
358 358
359 case 0x0001: /* Unclassified device */ 359 case 0x0001: /* Unclassified device */
360 /* Don't know what this is, but log it for investigation. */ 360 /* Don't know what this is, but log it for investigation. */
361 if (mcapndx != 0) { 361 if (mcapndx != 0) {
362 printk (KERN_INFO PFX "Wacky, found unclassified AGP device. %x:%x\n", 362 dev_info(&td->dev, "wacky, found unclassified AGP device %s [%04x/%04x]\n",
363 dev->vendor, dev->device); 363 pci_name(dev),
364 dev->vendor, dev->device);
364 } 365 }
365 continue; 366 continue;
366 367
367 case 0x0300: /* Display controller */ 368 case 0x0300: /* Display controller */
368 case 0x0400: /* Multimedia controller */ 369 case 0x0400: /* Multimedia controller */
369 if ((cur = kmalloc(sizeof(*cur), GFP_KERNEL)) == NULL) { 370 if ((cur = kmalloc(sizeof(*cur), GFP_KERNEL)) == NULL) {
370 ret = -ENOMEM; 371 ret = -ENOMEM;
371 goto free_and_exit; 372 goto free_and_exit;
372 } 373 }
373 cur->dev = dev; 374 cur->dev = dev;
374 375
375 pos = &cur->list; 376 pos = &cur->list;
376 list_add(pos, head); 377 list_add(pos, head);
377 ndevs++; 378 ndevs++;
378 continue; 379 continue;
379 380
380 default: 381 default:
381 continue; 382 continue;
382 } 383 }
383 } 384 }
384 385
385 /* 386 /*
386 * Take an initial pass through the devices lying behind our host 387 * Take an initial pass through the devices lying behind our host
387 * bridge. Make sure each one is actually an AGP 3.0 device, otherwise 388 * bridge. Make sure each one is actually an AGP 3.0 device, otherwise
388 * exit with an error message. Along the way store the AGP 3.0 389 * exit with an error message. Along the way store the AGP 3.0
389 * cap_ptr for each device 390 * cap_ptr for each device
390 */ 391 */
391 list_for_each(pos, head) { 392 list_for_each(pos, head) {
392 cur = list_entry(pos, struct agp_3_5_dev, list); 393 cur = list_entry(pos, struct agp_3_5_dev, list);
393 dev = cur->dev; 394 dev = cur->dev;
394 395
395 pci_read_config_word(dev, PCI_STATUS, &mpstat); 396 pci_read_config_word(dev, PCI_STATUS, &mpstat);
396 if ((mpstat & PCI_STATUS_CAP_LIST) == 0) 397 if ((mpstat & PCI_STATUS_CAP_LIST) == 0)
397 continue; 398 continue;
398 399
399 pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &mcapndx); 400 pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &mcapndx);
400 if (mcapndx != 0) { 401 if (mcapndx != 0) {
401 do { 402 do {
402 pci_read_config_dword(dev, mcapndx, &ncapid); 403 pci_read_config_dword(dev, mcapndx, &ncapid);
403 if ((ncapid & 0xff) != 2) 404 if ((ncapid & 0xff) != 2)
404 mcapndx = (ncapid >> 8) & 0xff; 405 mcapndx = (ncapid >> 8) & 0xff;
405 } 406 }
406 while (((ncapid & 0xff) != 2) && (mcapndx != 0)); 407 while (((ncapid & 0xff) != 2) && (mcapndx != 0));
407 } 408 }
408 409
409 if (mcapndx == 0) { 410 if (mcapndx == 0) {
410 printk(KERN_ERR PFX "woah! Non-AGP device " 411 dev_err(&td->dev, "woah! Non-AGP device %s on "
411 "found on the secondary bus of an AGP 3.5 bridge!\n"); 412 "secondary bus of AGP 3.5 bridge!\n",
413 pci_name(dev));
412 ret = -ENODEV; 414 ret = -ENODEV;
413 goto free_and_exit; 415 goto free_and_exit;
414 } 416 }
415 417
416 mmajor = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf; 418 mmajor = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf;
417 if (mmajor < 3) { 419 if (mmajor < 3) {
418 printk(KERN_ERR PFX "woah! AGP 2.0 device " 420 dev_err(&td->dev, "woah! AGP 2.0 device %s on "
419 "found on the secondary bus of an AGP 3.5 " 421 "secondary bus of AGP 3.5 bridge operating "
420 "bridge operating with AGP 3.0 electricals!\n"); 422 "with AGP 3.0 electricals!\n", pci_name(dev));
421 ret = -ENODEV; 423 ret = -ENODEV;
422 goto free_and_exit; 424 goto free_and_exit;
423 } 425 }
424 426
425 cur->capndx = mcapndx; 427 cur->capndx = mcapndx;
426 428
427 pci_read_config_dword(dev, cur->capndx+AGPSTAT, &mstatus); 429 pci_read_config_dword(dev, cur->capndx+AGPSTAT, &mstatus);
428 430
429 if (((mstatus >> 3) & 0x1) == 0) { 431 if (((mstatus >> 3) & 0x1) == 0) {
430 printk(KERN_ERR PFX "woah! AGP 3.x device " 432 dev_err(&td->dev, "woah! AGP 3.x device %s not "
431 "not operating in AGP 3.x mode found on the " 433 "operating in AGP 3.x mode on secondary bus "
432 "secondary bus of an AGP 3.5 bridge operating " 434 "of AGP 3.5 bridge operating with AGP 3.0 "
433 "with AGP 3.0 electricals!\n"); 435 "electricals!\n", pci_name(dev));
434 ret = -ENODEV; 436 ret = -ENODEV;
435 goto free_and_exit; 437 goto free_and_exit;
436 } 438 }
437 } 439 }
438 440
439 /* 441 /*
440 * Call functions to divide target resources amongst the AGP 3.0 442 * Call functions to divide target resources amongst the AGP 3.0
441 * masters. This process is dramatically different depending on 443 * masters. This process is dramatically different depending on
442 * whether isochronous transfers are supported. 444 * whether isochronous transfers are supported.
443 */ 445 */
444 if (isoch) { 446 if (isoch) {
445 ret = agp_3_5_isochronous_node_enable(bridge, dev_list, ndevs); 447 ret = agp_3_5_isochronous_node_enable(bridge, dev_list, ndevs);
446 if (ret) { 448 if (ret) {
447 printk(KERN_INFO PFX "Something bad happened setting " 449 dev_info(&td->dev, "something bad happened setting "
448 "up isochronous xfers. Falling back to " 450 "up isochronous xfers; falling back to "
449 "non-isochronous xfer mode.\n"); 451 "non-isochronous xfer mode\n");
450 } else { 452 } else {
451 goto free_and_exit; 453 goto free_and_exit;
452 } 454 }
453 } 455 }
454 agp_3_5_nonisochronous_node_enable(bridge, dev_list, ndevs); 456 agp_3_5_nonisochronous_node_enable(bridge, dev_list, ndevs);
455 457
456 free_and_exit: 458 free_and_exit:
457 /* Be sure to free the dev_list */ 459 /* Be sure to free the dev_list */
458 for (pos=head->next; pos!=head; ) { 460 for (pos=head->next; pos!=head; ) {
459 cur = list_entry(pos, struct agp_3_5_dev, list); 461 cur = list_entry(pos, struct agp_3_5_dev, list);
460 462
461 pos = pos->next; 463 pos = pos->next;
462 kfree(cur); 464 kfree(cur);
463 } 465 }
464 kfree(dev_list); 466 kfree(dev_list);
465 467
466 get_out: 468 get_out:
467 return ret; 469 return ret;
468 } 470 }
469 471
drivers/char/agp/sis-agp.c
1 /* 1 /*
2 * SiS AGPGART routines. 2 * SiS AGPGART routines.
3 */ 3 */
4 4
5 #include <linux/module.h> 5 #include <linux/module.h>
6 #include <linux/pci.h> 6 #include <linux/pci.h>
7 #include <linux/init.h> 7 #include <linux/init.h>
8 #include <linux/agp_backend.h> 8 #include <linux/agp_backend.h>
9 #include <linux/delay.h> 9 #include <linux/delay.h>
10 #include "agp.h" 10 #include "agp.h"
11 11
12 #define SIS_ATTBASE 0x90 12 #define SIS_ATTBASE 0x90
13 #define SIS_APSIZE 0x94 13 #define SIS_APSIZE 0x94
14 #define SIS_TLBCNTRL 0x97 14 #define SIS_TLBCNTRL 0x97
15 #define SIS_TLBFLUSH 0x98 15 #define SIS_TLBFLUSH 0x98
16 16
17 #define PCI_DEVICE_ID_SI_662 0x0662 17 #define PCI_DEVICE_ID_SI_662 0x0662
18 #define PCI_DEVICE_ID_SI_671 0x0671 18 #define PCI_DEVICE_ID_SI_671 0x0671
19 19
20 static int __devinitdata agp_sis_force_delay = 0; 20 static int __devinitdata agp_sis_force_delay = 0;
21 static int __devinitdata agp_sis_agp_spec = -1; 21 static int __devinitdata agp_sis_agp_spec = -1;
22 22
23 static int sis_fetch_size(void) 23 static int sis_fetch_size(void)
24 { 24 {
25 u8 temp_size; 25 u8 temp_size;
26 int i; 26 int i;
27 struct aper_size_info_8 *values; 27 struct aper_size_info_8 *values;
28 28
29 pci_read_config_byte(agp_bridge->dev, SIS_APSIZE, &temp_size); 29 pci_read_config_byte(agp_bridge->dev, SIS_APSIZE, &temp_size);
30 values = A_SIZE_8(agp_bridge->driver->aperture_sizes); 30 values = A_SIZE_8(agp_bridge->driver->aperture_sizes);
31 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { 31 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
32 if ((temp_size == values[i].size_value) || 32 if ((temp_size == values[i].size_value) ||
33 ((temp_size & ~(0x07)) == 33 ((temp_size & ~(0x07)) ==
34 (values[i].size_value & ~(0x07)))) { 34 (values[i].size_value & ~(0x07)))) {
35 agp_bridge->previous_size = 35 agp_bridge->previous_size =
36 agp_bridge->current_size = (void *) (values + i); 36 agp_bridge->current_size = (void *) (values + i);
37 37
38 agp_bridge->aperture_size_idx = i; 38 agp_bridge->aperture_size_idx = i;
39 return values[i].size; 39 return values[i].size;
40 } 40 }
41 } 41 }
42 42
43 return 0; 43 return 0;
44 } 44 }
45 45
46 static void sis_tlbflush(struct agp_memory *mem) 46 static void sis_tlbflush(struct agp_memory *mem)
47 { 47 {
48 pci_write_config_byte(agp_bridge->dev, SIS_TLBFLUSH, 0x02); 48 pci_write_config_byte(agp_bridge->dev, SIS_TLBFLUSH, 0x02);
49 } 49 }
50 50
51 static int sis_configure(void) 51 static int sis_configure(void)
52 { 52 {
53 u32 temp; 53 u32 temp;
54 struct aper_size_info_8 *current_size; 54 struct aper_size_info_8 *current_size;
55 55
56 current_size = A_SIZE_8(agp_bridge->current_size); 56 current_size = A_SIZE_8(agp_bridge->current_size);
57 pci_write_config_byte(agp_bridge->dev, SIS_TLBCNTRL, 0x05); 57 pci_write_config_byte(agp_bridge->dev, SIS_TLBCNTRL, 0x05);
58 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); 58 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
59 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); 59 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
60 pci_write_config_dword(agp_bridge->dev, SIS_ATTBASE, 60 pci_write_config_dword(agp_bridge->dev, SIS_ATTBASE,
61 agp_bridge->gatt_bus_addr); 61 agp_bridge->gatt_bus_addr);
62 pci_write_config_byte(agp_bridge->dev, SIS_APSIZE, 62 pci_write_config_byte(agp_bridge->dev, SIS_APSIZE,
63 current_size->size_value); 63 current_size->size_value);
64 return 0; 64 return 0;
65 } 65 }
66 66
67 static void sis_cleanup(void) 67 static void sis_cleanup(void)
68 { 68 {
69 struct aper_size_info_8 *previous_size; 69 struct aper_size_info_8 *previous_size;
70 70
71 previous_size = A_SIZE_8(agp_bridge->previous_size); 71 previous_size = A_SIZE_8(agp_bridge->previous_size);
72 pci_write_config_byte(agp_bridge->dev, SIS_APSIZE, 72 pci_write_config_byte(agp_bridge->dev, SIS_APSIZE,
73 (previous_size->size_value & ~(0x03))); 73 (previous_size->size_value & ~(0x03)));
74 } 74 }
75 75
76 static void sis_delayed_enable(struct agp_bridge_data *bridge, u32 mode) 76 static void sis_delayed_enable(struct agp_bridge_data *bridge, u32 mode)
77 { 77 {
78 struct pci_dev *device = NULL; 78 struct pci_dev *device = NULL;
79 u32 command; 79 u32 command;
80 int rate; 80 int rate;
81 81
82 printk(KERN_INFO PFX "Found an AGP %d.%d compliant device at %s.\n", 82 dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n",
83 agp_bridge->major_version, 83 agp_bridge->major_version, agp_bridge->minor_version);
84 agp_bridge->minor_version,
85 pci_name(agp_bridge->dev));
86 84
87 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx + PCI_AGP_STATUS, &command); 85 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx + PCI_AGP_STATUS, &command);
88 command = agp_collect_device_status(bridge, mode, command); 86 command = agp_collect_device_status(bridge, mode, command);
89 command |= AGPSTAT_AGP_ENABLE; 87 command |= AGPSTAT_AGP_ENABLE;
90 rate = (command & 0x7) << 2; 88 rate = (command & 0x7) << 2;
91 89
92 for_each_pci_dev(device) { 90 for_each_pci_dev(device) {
93 u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP); 91 u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP);
94 if (!agp) 92 if (!agp)
95 continue; 93 continue;
96 94
97 printk(KERN_INFO PFX "Putting AGP V3 device at %s into %dx mode\n", 95 dev_info(&agp_bridge->dev->dev, "putting AGP V3 device at %s into %dx mode\n",
98 pci_name(device), rate); 96 pci_name(device), rate);
99 97
100 pci_write_config_dword(device, agp + PCI_AGP_COMMAND, command); 98 pci_write_config_dword(device, agp + PCI_AGP_COMMAND, command);
101 99
102 /* 100 /*
103 * Weird: on some sis chipsets any rate change in the target 101 * Weird: on some sis chipsets any rate change in the target
104 * command register triggers a 5ms screwup during which the master 102 * command register triggers a 5ms screwup during which the master
105 * cannot be configured 103 * cannot be configured
106 */ 104 */
107 if (device->device == bridge->dev->device) { 105 if (device->device == bridge->dev->device) {
108 printk(KERN_INFO PFX "SiS delay workaround: giving bridge time to recover.\n"); 106 dev_info(&agp_bridge->dev->dev, "SiS delay workaround: giving bridge time to recover\n");
109 msleep(10); 107 msleep(10);
110 } 108 }
111 } 109 }
112 } 110 }
113 111
114 static const struct aper_size_info_8 sis_generic_sizes[7] = 112 static const struct aper_size_info_8 sis_generic_sizes[7] =
115 { 113 {
116 {256, 65536, 6, 99}, 114 {256, 65536, 6, 99},
117 {128, 32768, 5, 83}, 115 {128, 32768, 5, 83},
118 {64, 16384, 4, 67}, 116 {64, 16384, 4, 67},
119 {32, 8192, 3, 51}, 117 {32, 8192, 3, 51},
120 {16, 4096, 2, 35}, 118 {16, 4096, 2, 35},
121 {8, 2048, 1, 19}, 119 {8, 2048, 1, 19},
122 {4, 1024, 0, 3} 120 {4, 1024, 0, 3}
123 }; 121 };
124 122
125 static struct agp_bridge_driver sis_driver = { 123 static struct agp_bridge_driver sis_driver = {
126 .owner = THIS_MODULE, 124 .owner = THIS_MODULE,
127 .aperture_sizes = sis_generic_sizes, 125 .aperture_sizes = sis_generic_sizes,
128 .size_type = U8_APER_SIZE, 126 .size_type = U8_APER_SIZE,
129 .num_aperture_sizes = 7, 127 .num_aperture_sizes = 7,
130 .configure = sis_configure, 128 .configure = sis_configure,
131 .fetch_size = sis_fetch_size, 129 .fetch_size = sis_fetch_size,
132 .cleanup = sis_cleanup, 130 .cleanup = sis_cleanup,
133 .tlb_flush = sis_tlbflush, 131 .tlb_flush = sis_tlbflush,
134 .mask_memory = agp_generic_mask_memory, 132 .mask_memory = agp_generic_mask_memory,
135 .masks = NULL, 133 .masks = NULL,
136 .agp_enable = agp_generic_enable, 134 .agp_enable = agp_generic_enable,
137 .cache_flush = global_cache_flush, 135 .cache_flush = global_cache_flush,
138 .create_gatt_table = agp_generic_create_gatt_table, 136 .create_gatt_table = agp_generic_create_gatt_table,
139 .free_gatt_table = agp_generic_free_gatt_table, 137 .free_gatt_table = agp_generic_free_gatt_table,
140 .insert_memory = agp_generic_insert_memory, 138 .insert_memory = agp_generic_insert_memory,
141 .remove_memory = agp_generic_remove_memory, 139 .remove_memory = agp_generic_remove_memory,
142 .alloc_by_type = agp_generic_alloc_by_type, 140 .alloc_by_type = agp_generic_alloc_by_type,
143 .free_by_type = agp_generic_free_by_type, 141 .free_by_type = agp_generic_free_by_type,
144 .agp_alloc_page = agp_generic_alloc_page, 142 .agp_alloc_page = agp_generic_alloc_page,
145 .agp_destroy_page = agp_generic_destroy_page, 143 .agp_destroy_page = agp_generic_destroy_page,
146 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 144 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
147 }; 145 };
148 146
149 // chipsets that require the 'delay hack' 147 // chipsets that require the 'delay hack'
150 static int sis_broken_chipsets[] __devinitdata = { 148 static int sis_broken_chipsets[] __devinitdata = {
151 PCI_DEVICE_ID_SI_648, 149 PCI_DEVICE_ID_SI_648,
152 PCI_DEVICE_ID_SI_746, 150 PCI_DEVICE_ID_SI_746,
153 0 // terminator 151 0 // terminator
154 }; 152 };
155 153
156 static void __devinit sis_get_driver(struct agp_bridge_data *bridge) 154 static void __devinit sis_get_driver(struct agp_bridge_data *bridge)
157 { 155 {
158 int i; 156 int i;
159 157
160 for (i=0; sis_broken_chipsets[i]!=0; ++i) 158 for (i=0; sis_broken_chipsets[i]!=0; ++i)
161 if (bridge->dev->device==sis_broken_chipsets[i]) 159 if (bridge->dev->device==sis_broken_chipsets[i])
162 break; 160 break;
163 161
164 if (sis_broken_chipsets[i] || agp_sis_force_delay) 162 if (sis_broken_chipsets[i] || agp_sis_force_delay)
165 sis_driver.agp_enable=sis_delayed_enable; 163 sis_driver.agp_enable=sis_delayed_enable;
166 164
167 // sis chipsets that indicate less than agp3.5 165 // sis chipsets that indicate less than agp3.5
168 // are not actually fully agp3 compliant 166 // are not actually fully agp3 compliant
169 if ((agp_bridge->major_version == 3 && agp_bridge->minor_version >= 5 167 if ((agp_bridge->major_version == 3 && agp_bridge->minor_version >= 5
170 && agp_sis_agp_spec!=0) || agp_sis_agp_spec==1) { 168 && agp_sis_agp_spec!=0) || agp_sis_agp_spec==1) {
171 sis_driver.aperture_sizes = agp3_generic_sizes; 169 sis_driver.aperture_sizes = agp3_generic_sizes;
172 sis_driver.size_type = U16_APER_SIZE; 170 sis_driver.size_type = U16_APER_SIZE;
173 sis_driver.num_aperture_sizes = AGP_GENERIC_SIZES_ENTRIES; 171 sis_driver.num_aperture_sizes = AGP_GENERIC_SIZES_ENTRIES;
174 sis_driver.configure = agp3_generic_configure; 172 sis_driver.configure = agp3_generic_configure;
175 sis_driver.fetch_size = agp3_generic_fetch_size; 173 sis_driver.fetch_size = agp3_generic_fetch_size;
176 sis_driver.cleanup = agp3_generic_cleanup; 174 sis_driver.cleanup = agp3_generic_cleanup;
177 sis_driver.tlb_flush = agp3_generic_tlbflush; 175 sis_driver.tlb_flush = agp3_generic_tlbflush;
178 } 176 }
179 } 177 }
180 178
181 179
182 static int __devinit agp_sis_probe(struct pci_dev *pdev, 180 static int __devinit agp_sis_probe(struct pci_dev *pdev,
183 const struct pci_device_id *ent) 181 const struct pci_device_id *ent)
184 { 182 {
185 struct agp_bridge_data *bridge; 183 struct agp_bridge_data *bridge;
186 u8 cap_ptr; 184 u8 cap_ptr;
187 185
188 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); 186 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
189 if (!cap_ptr) 187 if (!cap_ptr)
190 return -ENODEV; 188 return -ENODEV;
191 189
192 190
193 printk(KERN_INFO PFX "Detected SiS chipset - id:%i\n", pdev->device); 191 dev_info(&pdev->dev, "SiS chipset [%04x/%04x]\n",
192 pdev->vendor, pdev->device);
194 bridge = agp_alloc_bridge(); 193 bridge = agp_alloc_bridge();
195 if (!bridge) 194 if (!bridge)
196 return -ENOMEM; 195 return -ENOMEM;
197 196
198 bridge->driver = &sis_driver; 197 bridge->driver = &sis_driver;
199 bridge->dev = pdev; 198 bridge->dev = pdev;
200 bridge->capndx = cap_ptr; 199 bridge->capndx = cap_ptr;
201 200
202 get_agp_version(bridge); 201 get_agp_version(bridge);
203 202
204 /* Fill in the mode register */ 203 /* Fill in the mode register */
205 pci_read_config_dword(pdev, bridge->capndx+PCI_AGP_STATUS, &bridge->mode); 204 pci_read_config_dword(pdev, bridge->capndx+PCI_AGP_STATUS, &bridge->mode);
206 sis_get_driver(bridge); 205 sis_get_driver(bridge);
207 206
208 pci_set_drvdata(pdev, bridge); 207 pci_set_drvdata(pdev, bridge);
209 return agp_add_bridge(bridge); 208 return agp_add_bridge(bridge);
210 } 209 }
211 210
212 static void __devexit agp_sis_remove(struct pci_dev *pdev) 211 static void __devexit agp_sis_remove(struct pci_dev *pdev)
213 { 212 {
214 struct agp_bridge_data *bridge = pci_get_drvdata(pdev); 213 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
215 214
216 agp_remove_bridge(bridge); 215 agp_remove_bridge(bridge);
217 agp_put_bridge(bridge); 216 agp_put_bridge(bridge);
218 } 217 }
219 218
220 #ifdef CONFIG_PM 219 #ifdef CONFIG_PM
221 220
222 static int agp_sis_suspend(struct pci_dev *pdev, pm_message_t state) 221 static int agp_sis_suspend(struct pci_dev *pdev, pm_message_t state)
223 { 222 {
224 pci_save_state(pdev); 223 pci_save_state(pdev);
225 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 224 pci_set_power_state(pdev, pci_choose_state(pdev, state));
226 225
227 return 0; 226 return 0;
228 } 227 }
229 228
230 static int agp_sis_resume(struct pci_dev *pdev) 229 static int agp_sis_resume(struct pci_dev *pdev)
231 { 230 {
232 pci_set_power_state(pdev, PCI_D0); 231 pci_set_power_state(pdev, PCI_D0);
233 pci_restore_state(pdev); 232 pci_restore_state(pdev);
234 233
235 return sis_driver.configure(); 234 return sis_driver.configure();
236 } 235 }
237 236
238 #endif /* CONFIG_PM */ 237 #endif /* CONFIG_PM */
239 238
240 static struct pci_device_id agp_sis_pci_table[] = { 239 static struct pci_device_id agp_sis_pci_table[] = {
241 { 240 {
242 .class = (PCI_CLASS_BRIDGE_HOST << 8), 241 .class = (PCI_CLASS_BRIDGE_HOST << 8),
243 .class_mask = ~0, 242 .class_mask = ~0,
244 .vendor = PCI_VENDOR_ID_SI, 243 .vendor = PCI_VENDOR_ID_SI,
245 .device = PCI_DEVICE_ID_SI_5591_AGP, 244 .device = PCI_DEVICE_ID_SI_5591_AGP,
246 .subvendor = PCI_ANY_ID, 245 .subvendor = PCI_ANY_ID,
247 .subdevice = PCI_ANY_ID, 246 .subdevice = PCI_ANY_ID,
248 }, 247 },
249 { 248 {
250 .class = (PCI_CLASS_BRIDGE_HOST << 8), 249 .class = (PCI_CLASS_BRIDGE_HOST << 8),
251 .class_mask = ~0, 250 .class_mask = ~0,
252 .vendor = PCI_VENDOR_ID_SI, 251 .vendor = PCI_VENDOR_ID_SI,
253 .device = PCI_DEVICE_ID_SI_530, 252 .device = PCI_DEVICE_ID_SI_530,
254 .subvendor = PCI_ANY_ID, 253 .subvendor = PCI_ANY_ID,
255 .subdevice = PCI_ANY_ID, 254 .subdevice = PCI_ANY_ID,
256 }, 255 },
257 { 256 {
258 .class = (PCI_CLASS_BRIDGE_HOST << 8), 257 .class = (PCI_CLASS_BRIDGE_HOST << 8),
259 .class_mask = ~0, 258 .class_mask = ~0,
260 .vendor = PCI_VENDOR_ID_SI, 259 .vendor = PCI_VENDOR_ID_SI,
261 .device = PCI_DEVICE_ID_SI_540, 260 .device = PCI_DEVICE_ID_SI_540,
262 .subvendor = PCI_ANY_ID, 261 .subvendor = PCI_ANY_ID,
263 .subdevice = PCI_ANY_ID, 262 .subdevice = PCI_ANY_ID,
264 }, 263 },
265 { 264 {
266 .class = (PCI_CLASS_BRIDGE_HOST << 8), 265 .class = (PCI_CLASS_BRIDGE_HOST << 8),
267 .class_mask = ~0, 266 .class_mask = ~0,
268 .vendor = PCI_VENDOR_ID_SI, 267 .vendor = PCI_VENDOR_ID_SI,
269 .device = PCI_DEVICE_ID_SI_550, 268 .device = PCI_DEVICE_ID_SI_550,
270 .subvendor = PCI_ANY_ID, 269 .subvendor = PCI_ANY_ID,
271 .subdevice = PCI_ANY_ID, 270 .subdevice = PCI_ANY_ID,
272 }, 271 },
273 { 272 {
274 .class = (PCI_CLASS_BRIDGE_HOST << 8), 273 .class = (PCI_CLASS_BRIDGE_HOST << 8),
275 .class_mask = ~0, 274 .class_mask = ~0,
276 .vendor = PCI_VENDOR_ID_SI, 275 .vendor = PCI_VENDOR_ID_SI,
277 .device = PCI_DEVICE_ID_SI_620, 276 .device = PCI_DEVICE_ID_SI_620,
278 .subvendor = PCI_ANY_ID, 277 .subvendor = PCI_ANY_ID,
279 .subdevice = PCI_ANY_ID, 278 .subdevice = PCI_ANY_ID,
280 }, 279 },
281 { 280 {
282 .class = (PCI_CLASS_BRIDGE_HOST << 8), 281 .class = (PCI_CLASS_BRIDGE_HOST << 8),
283 .class_mask = ~0, 282 .class_mask = ~0,
284 .vendor = PCI_VENDOR_ID_SI, 283 .vendor = PCI_VENDOR_ID_SI,
285 .device = PCI_DEVICE_ID_SI_630, 284 .device = PCI_DEVICE_ID_SI_630,
286 .subvendor = PCI_ANY_ID, 285 .subvendor = PCI_ANY_ID,
287 .subdevice = PCI_ANY_ID, 286 .subdevice = PCI_ANY_ID,
288 }, 287 },
289 { 288 {
290 .class = (PCI_CLASS_BRIDGE_HOST << 8), 289 .class = (PCI_CLASS_BRIDGE_HOST << 8),
291 .class_mask = ~0, 290 .class_mask = ~0,
292 .vendor = PCI_VENDOR_ID_SI, 291 .vendor = PCI_VENDOR_ID_SI,
293 .device = PCI_DEVICE_ID_SI_635, 292 .device = PCI_DEVICE_ID_SI_635,
294 .subvendor = PCI_ANY_ID, 293 .subvendor = PCI_ANY_ID,
295 .subdevice = PCI_ANY_ID, 294 .subdevice = PCI_ANY_ID,
296 }, 295 },
297 { 296 {
298 .class = (PCI_CLASS_BRIDGE_HOST << 8), 297 .class = (PCI_CLASS_BRIDGE_HOST << 8),
299 .class_mask = ~0, 298 .class_mask = ~0,
300 .vendor = PCI_VENDOR_ID_SI, 299 .vendor = PCI_VENDOR_ID_SI,
301 .device = PCI_DEVICE_ID_SI_645, 300 .device = PCI_DEVICE_ID_SI_645,
302 .subvendor = PCI_ANY_ID, 301 .subvendor = PCI_ANY_ID,
303 .subdevice = PCI_ANY_ID, 302 .subdevice = PCI_ANY_ID,
304 }, 303 },
305 { 304 {
306 .class = (PCI_CLASS_BRIDGE_HOST << 8), 305 .class = (PCI_CLASS_BRIDGE_HOST << 8),
307 .class_mask = ~0, 306 .class_mask = ~0,
308 .vendor = PCI_VENDOR_ID_SI, 307 .vendor = PCI_VENDOR_ID_SI,
309 .device = PCI_DEVICE_ID_SI_646, 308 .device = PCI_DEVICE_ID_SI_646,
310 .subvendor = PCI_ANY_ID, 309 .subvendor = PCI_ANY_ID,
311 .subdevice = PCI_ANY_ID, 310 .subdevice = PCI_ANY_ID,
312 }, 311 },
313 { 312 {
314 .class = (PCI_CLASS_BRIDGE_HOST << 8), 313 .class = (PCI_CLASS_BRIDGE_HOST << 8),
315 .class_mask = ~0, 314 .class_mask = ~0,
316 .vendor = PCI_VENDOR_ID_SI, 315 .vendor = PCI_VENDOR_ID_SI,
317 .device = PCI_DEVICE_ID_SI_648, 316 .device = PCI_DEVICE_ID_SI_648,
318 .subvendor = PCI_ANY_ID, 317 .subvendor = PCI_ANY_ID,
319 .subdevice = PCI_ANY_ID, 318 .subdevice = PCI_ANY_ID,
320 }, 319 },
321 { 320 {
322 .class = (PCI_CLASS_BRIDGE_HOST << 8), 321 .class = (PCI_CLASS_BRIDGE_HOST << 8),
323 .class_mask = ~0, 322 .class_mask = ~0,
324 .vendor = PCI_VENDOR_ID_SI, 323 .vendor = PCI_VENDOR_ID_SI,
325 .device = PCI_DEVICE_ID_SI_650, 324 .device = PCI_DEVICE_ID_SI_650,
326 .subvendor = PCI_ANY_ID, 325 .subvendor = PCI_ANY_ID,
327 .subdevice = PCI_ANY_ID, 326 .subdevice = PCI_ANY_ID,
328 }, 327 },
329 { 328 {
330 .class = (PCI_CLASS_BRIDGE_HOST << 8), 329 .class = (PCI_CLASS_BRIDGE_HOST << 8),
331 .class_mask = ~0, 330 .class_mask = ~0,
332 .vendor = PCI_VENDOR_ID_SI, 331 .vendor = PCI_VENDOR_ID_SI,
333 .device = PCI_DEVICE_ID_SI_651, 332 .device = PCI_DEVICE_ID_SI_651,
334 .subvendor = PCI_ANY_ID, 333 .subvendor = PCI_ANY_ID,
335 .subdevice = PCI_ANY_ID, 334 .subdevice = PCI_ANY_ID,
336 }, 335 },
337 { 336 {
338 .class = (PCI_CLASS_BRIDGE_HOST << 8), 337 .class = (PCI_CLASS_BRIDGE_HOST << 8),
339 .class_mask = ~0, 338 .class_mask = ~0,
340 .vendor = PCI_VENDOR_ID_SI, 339 .vendor = PCI_VENDOR_ID_SI,
341 .device = PCI_DEVICE_ID_SI_655, 340 .device = PCI_DEVICE_ID_SI_655,
342 .subvendor = PCI_ANY_ID, 341 .subvendor = PCI_ANY_ID,
343 .subdevice = PCI_ANY_ID, 342 .subdevice = PCI_ANY_ID,
344 }, 343 },
345 { 344 {
346 .class = (PCI_CLASS_BRIDGE_HOST << 8), 345 .class = (PCI_CLASS_BRIDGE_HOST << 8),
347 .class_mask = ~0, 346 .class_mask = ~0,
348 .vendor = PCI_VENDOR_ID_SI, 347 .vendor = PCI_VENDOR_ID_SI,
349 .device = PCI_DEVICE_ID_SI_661, 348 .device = PCI_DEVICE_ID_SI_661,
350 .subvendor = PCI_ANY_ID, 349 .subvendor = PCI_ANY_ID,
351 .subdevice = PCI_ANY_ID, 350 .subdevice = PCI_ANY_ID,
352 }, 351 },
353 { 352 {
354 .class = (PCI_CLASS_BRIDGE_HOST << 8), 353 .class = (PCI_CLASS_BRIDGE_HOST << 8),
355 .class_mask = ~0, 354 .class_mask = ~0,
356 .vendor = PCI_VENDOR_ID_SI, 355 .vendor = PCI_VENDOR_ID_SI,
357 .device = PCI_DEVICE_ID_SI_662, 356 .device = PCI_DEVICE_ID_SI_662,
358 .subvendor = PCI_ANY_ID, 357 .subvendor = PCI_ANY_ID,
359 .subdevice = PCI_ANY_ID, 358 .subdevice = PCI_ANY_ID,
360 }, 359 },
361 { 360 {
362 .class = (PCI_CLASS_BRIDGE_HOST << 8), 361 .class = (PCI_CLASS_BRIDGE_HOST << 8),
363 .class_mask = ~0, 362 .class_mask = ~0,
364 .vendor = PCI_VENDOR_ID_SI, 363 .vendor = PCI_VENDOR_ID_SI,
365 .device = PCI_DEVICE_ID_SI_671, 364 .device = PCI_DEVICE_ID_SI_671,
366 .subvendor = PCI_ANY_ID, 365 .subvendor = PCI_ANY_ID,
367 .subdevice = PCI_ANY_ID, 366 .subdevice = PCI_ANY_ID,
368 }, 367 },
369 { 368 {
370 .class = (PCI_CLASS_BRIDGE_HOST << 8), 369 .class = (PCI_CLASS_BRIDGE_HOST << 8),
371 .class_mask = ~0, 370 .class_mask = ~0,
372 .vendor = PCI_VENDOR_ID_SI, 371 .vendor = PCI_VENDOR_ID_SI,
373 .device = PCI_DEVICE_ID_SI_730, 372 .device = PCI_DEVICE_ID_SI_730,
374 .subvendor = PCI_ANY_ID, 373 .subvendor = PCI_ANY_ID,
375 .subdevice = PCI_ANY_ID, 374 .subdevice = PCI_ANY_ID,
376 }, 375 },
377 { 376 {
378 .class = (PCI_CLASS_BRIDGE_HOST << 8), 377 .class = (PCI_CLASS_BRIDGE_HOST << 8),
379 .class_mask = ~0, 378 .class_mask = ~0,
380 .vendor = PCI_VENDOR_ID_SI, 379 .vendor = PCI_VENDOR_ID_SI,
381 .device = PCI_DEVICE_ID_SI_735, 380 .device = PCI_DEVICE_ID_SI_735,
382 .subvendor = PCI_ANY_ID, 381 .subvendor = PCI_ANY_ID,
383 .subdevice = PCI_ANY_ID, 382 .subdevice = PCI_ANY_ID,
384 }, 383 },
385 { 384 {
386 .class = (PCI_CLASS_BRIDGE_HOST << 8), 385 .class = (PCI_CLASS_BRIDGE_HOST << 8),
387 .class_mask = ~0, 386 .class_mask = ~0,
388 .vendor = PCI_VENDOR_ID_SI, 387 .vendor = PCI_VENDOR_ID_SI,
389 .device = PCI_DEVICE_ID_SI_740, 388 .device = PCI_DEVICE_ID_SI_740,
390 .subvendor = PCI_ANY_ID, 389 .subvendor = PCI_ANY_ID,
391 .subdevice = PCI_ANY_ID, 390 .subdevice = PCI_ANY_ID,
392 }, 391 },
393 { 392 {
394 .class = (PCI_CLASS_BRIDGE_HOST << 8), 393 .class = (PCI_CLASS_BRIDGE_HOST << 8),
395 .class_mask = ~0, 394 .class_mask = ~0,
396 .vendor = PCI_VENDOR_ID_SI, 395 .vendor = PCI_VENDOR_ID_SI,
397 .device = PCI_DEVICE_ID_SI_741, 396 .device = PCI_DEVICE_ID_SI_741,
398 .subvendor = PCI_ANY_ID, 397 .subvendor = PCI_ANY_ID,
399 .subdevice = PCI_ANY_ID, 398 .subdevice = PCI_ANY_ID,
400 }, 399 },
401 { 400 {
402 .class = (PCI_CLASS_BRIDGE_HOST << 8), 401 .class = (PCI_CLASS_BRIDGE_HOST << 8),
403 .class_mask = ~0, 402 .class_mask = ~0,
404 .vendor = PCI_VENDOR_ID_SI, 403 .vendor = PCI_VENDOR_ID_SI,
405 .device = PCI_DEVICE_ID_SI_745, 404 .device = PCI_DEVICE_ID_SI_745,
406 .subvendor = PCI_ANY_ID, 405 .subvendor = PCI_ANY_ID,
407 .subdevice = PCI_ANY_ID, 406 .subdevice = PCI_ANY_ID,
408 }, 407 },
409 { 408 {
410 .class = (PCI_CLASS_BRIDGE_HOST << 8), 409 .class = (PCI_CLASS_BRIDGE_HOST << 8),
411 .class_mask = ~0, 410 .class_mask = ~0,
412 .vendor = PCI_VENDOR_ID_SI, 411 .vendor = PCI_VENDOR_ID_SI,
413 .device = PCI_DEVICE_ID_SI_746, 412 .device = PCI_DEVICE_ID_SI_746,
414 .subvendor = PCI_ANY_ID, 413 .subvendor = PCI_ANY_ID,
415 .subdevice = PCI_ANY_ID, 414 .subdevice = PCI_ANY_ID,
416 }, 415 },
417 { 416 {
418 .class = (PCI_CLASS_BRIDGE_HOST << 8), 417 .class = (PCI_CLASS_BRIDGE_HOST << 8),
419 .class_mask = ~0, 418 .class_mask = ~0,
420 .vendor = PCI_VENDOR_ID_SI, 419 .vendor = PCI_VENDOR_ID_SI,
421 .device = PCI_DEVICE_ID_SI_760, 420 .device = PCI_DEVICE_ID_SI_760,
422 .subvendor = PCI_ANY_ID, 421 .subvendor = PCI_ANY_ID,
423 .subdevice = PCI_ANY_ID, 422 .subdevice = PCI_ANY_ID,
424 }, 423 },
425 { } 424 { }
426 }; 425 };
427 426
428 MODULE_DEVICE_TABLE(pci, agp_sis_pci_table); 427 MODULE_DEVICE_TABLE(pci, agp_sis_pci_table);
429 428
430 static struct pci_driver agp_sis_pci_driver = { 429 static struct pci_driver agp_sis_pci_driver = {
431 .name = "agpgart-sis", 430 .name = "agpgart-sis",
432 .id_table = agp_sis_pci_table, 431 .id_table = agp_sis_pci_table,
433 .probe = agp_sis_probe, 432 .probe = agp_sis_probe,
434 .remove = agp_sis_remove, 433 .remove = agp_sis_remove,
435 #ifdef CONFIG_PM 434 #ifdef CONFIG_PM
436 .suspend = agp_sis_suspend, 435 .suspend = agp_sis_suspend,
437 .resume = agp_sis_resume, 436 .resume = agp_sis_resume,
438 #endif 437 #endif
439 }; 438 };
440 439
441 static int __init agp_sis_init(void) 440 static int __init agp_sis_init(void)
442 { 441 {
443 if (agp_off) 442 if (agp_off)
444 return -EINVAL; 443 return -EINVAL;
445 return pci_register_driver(&agp_sis_pci_driver); 444 return pci_register_driver(&agp_sis_pci_driver);
446 } 445 }
447 446
448 static void __exit agp_sis_cleanup(void) 447 static void __exit agp_sis_cleanup(void)
449 { 448 {
450 pci_unregister_driver(&agp_sis_pci_driver); 449 pci_unregister_driver(&agp_sis_pci_driver);
451 } 450 }
452 451
453 module_init(agp_sis_init); 452 module_init(agp_sis_init);
454 module_exit(agp_sis_cleanup); 453 module_exit(agp_sis_cleanup);
455 454
456 module_param(agp_sis_force_delay, bool, 0); 455 module_param(agp_sis_force_delay, bool, 0);
457 MODULE_PARM_DESC(agp_sis_force_delay,"forces sis delay hack"); 456 MODULE_PARM_DESC(agp_sis_force_delay,"forces sis delay hack");
458 module_param(agp_sis_agp_spec, int, 0); 457 module_param(agp_sis_agp_spec, int, 0);
459 MODULE_PARM_DESC(agp_sis_agp_spec,"0=force sis init, 1=force generic agp3 init, default: autodetect"); 458 MODULE_PARM_DESC(agp_sis_agp_spec,"0=force sis init, 1=force generic agp3 init, default: autodetect");
460 MODULE_LICENSE("GPL and additional rights"); 459 MODULE_LICENSE("GPL and additional rights");
drivers/char/agp/sworks-agp.c
1 /* 1 /*
2 * Serverworks AGPGART routines. 2 * Serverworks AGPGART routines.
3 */ 3 */
4 4
5 #include <linux/module.h> 5 #include <linux/module.h>
6 #include <linux/pci.h> 6 #include <linux/pci.h>
7 #include <linux/init.h> 7 #include <linux/init.h>
8 #include <linux/string.h> 8 #include <linux/string.h>
9 #include <linux/slab.h> 9 #include <linux/slab.h>
10 #include <linux/jiffies.h> 10 #include <linux/jiffies.h>
11 #include <linux/agp_backend.h> 11 #include <linux/agp_backend.h>
12 #include "agp.h" 12 #include "agp.h"
13 13
14 #define SVWRKS_COMMAND 0x04 14 #define SVWRKS_COMMAND 0x04
15 #define SVWRKS_APSIZE 0x10 15 #define SVWRKS_APSIZE 0x10
16 #define SVWRKS_MMBASE 0x14 16 #define SVWRKS_MMBASE 0x14
17 #define SVWRKS_CACHING 0x4b 17 #define SVWRKS_CACHING 0x4b
18 #define SVWRKS_AGP_ENABLE 0x60 18 #define SVWRKS_AGP_ENABLE 0x60
19 #define SVWRKS_FEATURE 0x68 19 #define SVWRKS_FEATURE 0x68
20 20
21 #define SVWRKS_SIZE_MASK 0xfe000000 21 #define SVWRKS_SIZE_MASK 0xfe000000
22 22
23 /* Memory mapped registers */ 23 /* Memory mapped registers */
24 #define SVWRKS_GART_CACHE 0x02 24 #define SVWRKS_GART_CACHE 0x02
25 #define SVWRKS_GATTBASE 0x04 25 #define SVWRKS_GATTBASE 0x04
26 #define SVWRKS_TLBFLUSH 0x10 26 #define SVWRKS_TLBFLUSH 0x10
27 #define SVWRKS_POSTFLUSH 0x14 27 #define SVWRKS_POSTFLUSH 0x14
28 #define SVWRKS_DIRFLUSH 0x0c 28 #define SVWRKS_DIRFLUSH 0x0c
29 29
30 30
31 struct serverworks_page_map { 31 struct serverworks_page_map {
32 unsigned long *real; 32 unsigned long *real;
33 unsigned long __iomem *remapped; 33 unsigned long __iomem *remapped;
34 }; 34 };
35 35
36 static struct _serverworks_private { 36 static struct _serverworks_private {
37 struct pci_dev *svrwrks_dev; /* device one */ 37 struct pci_dev *svrwrks_dev; /* device one */
38 volatile u8 __iomem *registers; 38 volatile u8 __iomem *registers;
39 struct serverworks_page_map **gatt_pages; 39 struct serverworks_page_map **gatt_pages;
40 int num_tables; 40 int num_tables;
41 struct serverworks_page_map scratch_dir; 41 struct serverworks_page_map scratch_dir;
42 42
43 int gart_addr_ofs; 43 int gart_addr_ofs;
44 int mm_addr_ofs; 44 int mm_addr_ofs;
45 } serverworks_private; 45 } serverworks_private;
46 46
47 static int serverworks_create_page_map(struct serverworks_page_map *page_map) 47 static int serverworks_create_page_map(struct serverworks_page_map *page_map)
48 { 48 {
49 int i; 49 int i;
50 50
51 page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL); 51 page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
52 if (page_map->real == NULL) { 52 if (page_map->real == NULL) {
53 return -ENOMEM; 53 return -ENOMEM;
54 } 54 }
55 55
56 set_memory_uc((unsigned long)page_map->real, 1); 56 set_memory_uc((unsigned long)page_map->real, 1);
57 page_map->remapped = page_map->real; 57 page_map->remapped = page_map->real;
58 58
59 for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) 59 for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++)
60 writel(agp_bridge->scratch_page, page_map->remapped+i); 60 writel(agp_bridge->scratch_page, page_map->remapped+i);
61 /* Red Pen: Everyone else does pci posting flush here */ 61 /* Red Pen: Everyone else does pci posting flush here */
62 62
63 return 0; 63 return 0;
64 } 64 }
65 65
66 static void serverworks_free_page_map(struct serverworks_page_map *page_map) 66 static void serverworks_free_page_map(struct serverworks_page_map *page_map)
67 { 67 {
68 set_memory_wb((unsigned long)page_map->real, 1); 68 set_memory_wb((unsigned long)page_map->real, 1);
69 free_page((unsigned long) page_map->real); 69 free_page((unsigned long) page_map->real);
70 } 70 }
71 71
72 static void serverworks_free_gatt_pages(void) 72 static void serverworks_free_gatt_pages(void)
73 { 73 {
74 int i; 74 int i;
75 struct serverworks_page_map **tables; 75 struct serverworks_page_map **tables;
76 struct serverworks_page_map *entry; 76 struct serverworks_page_map *entry;
77 77
78 tables = serverworks_private.gatt_pages; 78 tables = serverworks_private.gatt_pages;
79 for (i = 0; i < serverworks_private.num_tables; i++) { 79 for (i = 0; i < serverworks_private.num_tables; i++) {
80 entry = tables[i]; 80 entry = tables[i];
81 if (entry != NULL) { 81 if (entry != NULL) {
82 if (entry->real != NULL) { 82 if (entry->real != NULL) {
83 serverworks_free_page_map(entry); 83 serverworks_free_page_map(entry);
84 } 84 }
85 kfree(entry); 85 kfree(entry);
86 } 86 }
87 } 87 }
88 kfree(tables); 88 kfree(tables);
89 } 89 }
90 90
91 static int serverworks_create_gatt_pages(int nr_tables) 91 static int serverworks_create_gatt_pages(int nr_tables)
92 { 92 {
93 struct serverworks_page_map **tables; 93 struct serverworks_page_map **tables;
94 struct serverworks_page_map *entry; 94 struct serverworks_page_map *entry;
95 int retval = 0; 95 int retval = 0;
96 int i; 96 int i;
97 97
98 tables = kzalloc((nr_tables + 1) * sizeof(struct serverworks_page_map *), 98 tables = kzalloc((nr_tables + 1) * sizeof(struct serverworks_page_map *),
99 GFP_KERNEL); 99 GFP_KERNEL);
100 if (tables == NULL) 100 if (tables == NULL)
101 return -ENOMEM; 101 return -ENOMEM;
102 102
103 for (i = 0; i < nr_tables; i++) { 103 for (i = 0; i < nr_tables; i++) {
104 entry = kzalloc(sizeof(struct serverworks_page_map), GFP_KERNEL); 104 entry = kzalloc(sizeof(struct serverworks_page_map), GFP_KERNEL);
105 if (entry == NULL) { 105 if (entry == NULL) {
106 retval = -ENOMEM; 106 retval = -ENOMEM;
107 break; 107 break;
108 } 108 }
109 tables[i] = entry; 109 tables[i] = entry;
110 retval = serverworks_create_page_map(entry); 110 retval = serverworks_create_page_map(entry);
111 if (retval != 0) break; 111 if (retval != 0) break;
112 } 112 }
113 serverworks_private.num_tables = nr_tables; 113 serverworks_private.num_tables = nr_tables;
114 serverworks_private.gatt_pages = tables; 114 serverworks_private.gatt_pages = tables;
115 115
116 if (retval != 0) serverworks_free_gatt_pages(); 116 if (retval != 0) serverworks_free_gatt_pages();
117 117
118 return retval; 118 return retval;
119 } 119 }
120 120
121 #define SVRWRKS_GET_GATT(addr) (serverworks_private.gatt_pages[\ 121 #define SVRWRKS_GET_GATT(addr) (serverworks_private.gatt_pages[\
122 GET_PAGE_DIR_IDX(addr)]->remapped) 122 GET_PAGE_DIR_IDX(addr)]->remapped)
123 123
124 #ifndef GET_PAGE_DIR_OFF 124 #ifndef GET_PAGE_DIR_OFF
125 #define GET_PAGE_DIR_OFF(addr) (addr >> 22) 125 #define GET_PAGE_DIR_OFF(addr) (addr >> 22)
126 #endif 126 #endif
127 127
128 #ifndef GET_PAGE_DIR_IDX 128 #ifndef GET_PAGE_DIR_IDX
129 #define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \ 129 #define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
130 GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr)) 130 GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr))
131 #endif 131 #endif
132 132
133 #ifndef GET_GATT_OFF 133 #ifndef GET_GATT_OFF
134 #define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12) 134 #define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
135 #endif 135 #endif
136 136
137 static int serverworks_create_gatt_table(struct agp_bridge_data *bridge) 137 static int serverworks_create_gatt_table(struct agp_bridge_data *bridge)
138 { 138 {
139 struct aper_size_info_lvl2 *value; 139 struct aper_size_info_lvl2 *value;
140 struct serverworks_page_map page_dir; 140 struct serverworks_page_map page_dir;
141 int retval; 141 int retval;
142 u32 temp; 142 u32 temp;
143 int i; 143 int i;
144 144
145 value = A_SIZE_LVL2(agp_bridge->current_size); 145 value = A_SIZE_LVL2(agp_bridge->current_size);
146 retval = serverworks_create_page_map(&page_dir); 146 retval = serverworks_create_page_map(&page_dir);
147 if (retval != 0) { 147 if (retval != 0) {
148 return retval; 148 return retval;
149 } 149 }
150 retval = serverworks_create_page_map(&serverworks_private.scratch_dir); 150 retval = serverworks_create_page_map(&serverworks_private.scratch_dir);
151 if (retval != 0) { 151 if (retval != 0) {
152 serverworks_free_page_map(&page_dir); 152 serverworks_free_page_map(&page_dir);
153 return retval; 153 return retval;
154 } 154 }
155 /* Create a fake scratch directory */ 155 /* Create a fake scratch directory */
156 for (i = 0; i < 1024; i++) { 156 for (i = 0; i < 1024; i++) {
157 writel(agp_bridge->scratch_page, serverworks_private.scratch_dir.remapped+i); 157 writel(agp_bridge->scratch_page, serverworks_private.scratch_dir.remapped+i);
158 writel(virt_to_gart(serverworks_private.scratch_dir.real) | 1, page_dir.remapped+i); 158 writel(virt_to_gart(serverworks_private.scratch_dir.real) | 1, page_dir.remapped+i);
159 } 159 }
160 160
161 retval = serverworks_create_gatt_pages(value->num_entries / 1024); 161 retval = serverworks_create_gatt_pages(value->num_entries / 1024);
162 if (retval != 0) { 162 if (retval != 0) {
163 serverworks_free_page_map(&page_dir); 163 serverworks_free_page_map(&page_dir);
164 serverworks_free_page_map(&serverworks_private.scratch_dir); 164 serverworks_free_page_map(&serverworks_private.scratch_dir);
165 return retval; 165 return retval;
166 } 166 }
167 167
168 agp_bridge->gatt_table_real = (u32 *)page_dir.real; 168 agp_bridge->gatt_table_real = (u32 *)page_dir.real;
169 agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped; 169 agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped;
170 agp_bridge->gatt_bus_addr = virt_to_gart(page_dir.real); 170 agp_bridge->gatt_bus_addr = virt_to_gart(page_dir.real);
171 171
172 /* Get the address for the gart region. 172 /* Get the address for the gart region.
173 * This is a bus address even on the alpha, b/c its 173 * This is a bus address even on the alpha, b/c its
174 * used to program the agp master not the cpu 174 * used to program the agp master not the cpu
175 */ 175 */
176 176
177 pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp); 177 pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp);
178 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); 178 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
179 179
180 /* Calculate the agp offset */ 180 /* Calculate the agp offset */
181 for (i = 0; i < value->num_entries / 1024; i++) 181 for (i = 0; i < value->num_entries / 1024; i++)
182 writel(virt_to_gart(serverworks_private.gatt_pages[i]->real)|1, page_dir.remapped+i); 182 writel(virt_to_gart(serverworks_private.gatt_pages[i]->real)|1, page_dir.remapped+i);
183 183
184 return 0; 184 return 0;
185 } 185 }
186 186
187 static int serverworks_free_gatt_table(struct agp_bridge_data *bridge) 187 static int serverworks_free_gatt_table(struct agp_bridge_data *bridge)
188 { 188 {
189 struct serverworks_page_map page_dir; 189 struct serverworks_page_map page_dir;
190 190
191 page_dir.real = (unsigned long *)agp_bridge->gatt_table_real; 191 page_dir.real = (unsigned long *)agp_bridge->gatt_table_real;
192 page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table; 192 page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table;
193 193
194 serverworks_free_gatt_pages(); 194 serverworks_free_gatt_pages();
195 serverworks_free_page_map(&page_dir); 195 serverworks_free_page_map(&page_dir);
196 serverworks_free_page_map(&serverworks_private.scratch_dir); 196 serverworks_free_page_map(&serverworks_private.scratch_dir);
197 return 0; 197 return 0;
198 } 198 }
199 199
200 static int serverworks_fetch_size(void) 200 static int serverworks_fetch_size(void)
201 { 201 {
202 int i; 202 int i;
203 u32 temp; 203 u32 temp;
204 u32 temp2; 204 u32 temp2;
205 struct aper_size_info_lvl2 *values; 205 struct aper_size_info_lvl2 *values;
206 206
207 values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes); 207 values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes);
208 pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp); 208 pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp);
209 pci_write_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs, 209 pci_write_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,
210 SVWRKS_SIZE_MASK); 210 SVWRKS_SIZE_MASK);
211 pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp2); 211 pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp2);
212 pci_write_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,temp); 212 pci_write_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,temp);
213 temp2 &= SVWRKS_SIZE_MASK; 213 temp2 &= SVWRKS_SIZE_MASK;
214 214
215 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { 215 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
216 if (temp2 == values[i].size_value) { 216 if (temp2 == values[i].size_value) {
217 agp_bridge->previous_size = 217 agp_bridge->previous_size =
218 agp_bridge->current_size = (void *) (values + i); 218 agp_bridge->current_size = (void *) (values + i);
219 219
220 agp_bridge->aperture_size_idx = i; 220 agp_bridge->aperture_size_idx = i;
221 return values[i].size; 221 return values[i].size;
222 } 222 }
223 } 223 }
224 224
225 return 0; 225 return 0;
226 } 226 }
227 227
228 /* 228 /*
229 * This routine could be implemented by taking the addresses 229 * This routine could be implemented by taking the addresses
230 * written to the GATT, and flushing them individually. However 230 * written to the GATT, and flushing them individually. However
231 * currently it just flushes the whole table. Which is probably 231 * currently it just flushes the whole table. Which is probably
232 * more efficent, since agp_memory blocks can be a large number of 232 * more efficent, since agp_memory blocks can be a large number of
233 * entries. 233 * entries.
234 */ 234 */
235 static void serverworks_tlbflush(struct agp_memory *temp) 235 static void serverworks_tlbflush(struct agp_memory *temp)
236 { 236 {
237 unsigned long timeout; 237 unsigned long timeout;
238 238
239 writeb(1, serverworks_private.registers+SVWRKS_POSTFLUSH); 239 writeb(1, serverworks_private.registers+SVWRKS_POSTFLUSH);
240 timeout = jiffies + 3*HZ; 240 timeout = jiffies + 3*HZ;
241 while (readb(serverworks_private.registers+SVWRKS_POSTFLUSH) == 1) { 241 while (readb(serverworks_private.registers+SVWRKS_POSTFLUSH) == 1) {
242 cpu_relax(); 242 cpu_relax();
243 if (time_after(jiffies, timeout)) { 243 if (time_after(jiffies, timeout)) {
244 printk(KERN_ERR PFX "TLB post flush took more than 3 seconds\n"); 244 dev_err(&serverworks_private.svrwrks_dev->dev,
245 "TLB post flush took more than 3 seconds\n");
245 break; 246 break;
246 } 247 }
247 } 248 }
248 249
249 writel(1, serverworks_private.registers+SVWRKS_DIRFLUSH); 250 writel(1, serverworks_private.registers+SVWRKS_DIRFLUSH);
250 timeout = jiffies + 3*HZ; 251 timeout = jiffies + 3*HZ;
251 while (readl(serverworks_private.registers+SVWRKS_DIRFLUSH) == 1) { 252 while (readl(serverworks_private.registers+SVWRKS_DIRFLUSH) == 1) {
252 cpu_relax(); 253 cpu_relax();
253 if (time_after(jiffies, timeout)) { 254 if (time_after(jiffies, timeout)) {
254 printk(KERN_ERR PFX "TLB Dir flush took more than 3 seconds\n"); 255 dev_err(&serverworks_private.svrwrks_dev->dev,
256 "TLB Dir flush took more than 3 seconds\n");
255 break; 257 break;
256 } 258 }
257 } 259 }
258 } 260 }
259 261
260 static int serverworks_configure(void) 262 static int serverworks_configure(void)
261 { 263 {
262 struct aper_size_info_lvl2 *current_size; 264 struct aper_size_info_lvl2 *current_size;
263 u32 temp; 265 u32 temp;
264 u8 enable_reg; 266 u8 enable_reg;
265 u16 cap_reg; 267 u16 cap_reg;
266 268
267 current_size = A_SIZE_LVL2(agp_bridge->current_size); 269 current_size = A_SIZE_LVL2(agp_bridge->current_size);
268 270
269 /* Get the memory mapped registers */ 271 /* Get the memory mapped registers */
270 pci_read_config_dword(agp_bridge->dev, serverworks_private.mm_addr_ofs, &temp); 272 pci_read_config_dword(agp_bridge->dev, serverworks_private.mm_addr_ofs, &temp);
271 temp = (temp & PCI_BASE_ADDRESS_MEM_MASK); 273 temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
272 serverworks_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096); 274 serverworks_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096);
273 if (!serverworks_private.registers) { 275 if (!serverworks_private.registers) {
274 printk (KERN_ERR PFX "Unable to ioremap() memory.\n"); 276 dev_err(&agp_bridge->dev->dev, "can't ioremap(%#x)\n", temp);
275 return -ENOMEM; 277 return -ENOMEM;
276 } 278 }
277 279
278 writeb(0xA, serverworks_private.registers+SVWRKS_GART_CACHE); 280 writeb(0xA, serverworks_private.registers+SVWRKS_GART_CACHE);
279 readb(serverworks_private.registers+SVWRKS_GART_CACHE); /* PCI Posting. */ 281 readb(serverworks_private.registers+SVWRKS_GART_CACHE); /* PCI Posting. */
280 282
281 writel(agp_bridge->gatt_bus_addr, serverworks_private.registers+SVWRKS_GATTBASE); 283 writel(agp_bridge->gatt_bus_addr, serverworks_private.registers+SVWRKS_GATTBASE);
282 readl(serverworks_private.registers+SVWRKS_GATTBASE); /* PCI Posting. */ 284 readl(serverworks_private.registers+SVWRKS_GATTBASE); /* PCI Posting. */
283 285
284 cap_reg = readw(serverworks_private.registers+SVWRKS_COMMAND); 286 cap_reg = readw(serverworks_private.registers+SVWRKS_COMMAND);
285 cap_reg &= ~0x0007; 287 cap_reg &= ~0x0007;
286 cap_reg |= 0x4; 288 cap_reg |= 0x4;
287 writew(cap_reg, serverworks_private.registers+SVWRKS_COMMAND); 289 writew(cap_reg, serverworks_private.registers+SVWRKS_COMMAND);
288 readw(serverworks_private.registers+SVWRKS_COMMAND); 290 readw(serverworks_private.registers+SVWRKS_COMMAND);
289 291
290 pci_read_config_byte(serverworks_private.svrwrks_dev,SVWRKS_AGP_ENABLE, &enable_reg); 292 pci_read_config_byte(serverworks_private.svrwrks_dev,SVWRKS_AGP_ENABLE, &enable_reg);
291 enable_reg |= 0x1; /* Agp Enable bit */ 293 enable_reg |= 0x1; /* Agp Enable bit */
292 pci_write_config_byte(serverworks_private.svrwrks_dev,SVWRKS_AGP_ENABLE, enable_reg); 294 pci_write_config_byte(serverworks_private.svrwrks_dev,SVWRKS_AGP_ENABLE, enable_reg);
293 serverworks_tlbflush(NULL); 295 serverworks_tlbflush(NULL);
294 296
295 agp_bridge->capndx = pci_find_capability(serverworks_private.svrwrks_dev, PCI_CAP_ID_AGP); 297 agp_bridge->capndx = pci_find_capability(serverworks_private.svrwrks_dev, PCI_CAP_ID_AGP);
296 298
297 /* Fill in the mode register */ 299 /* Fill in the mode register */
298 pci_read_config_dword(serverworks_private.svrwrks_dev, 300 pci_read_config_dword(serverworks_private.svrwrks_dev,
299 agp_bridge->capndx+PCI_AGP_STATUS, &agp_bridge->mode); 301 agp_bridge->capndx+PCI_AGP_STATUS, &agp_bridge->mode);
300 302
301 pci_read_config_byte(agp_bridge->dev, SVWRKS_CACHING, &enable_reg); 303 pci_read_config_byte(agp_bridge->dev, SVWRKS_CACHING, &enable_reg);
302 enable_reg &= ~0x3; 304 enable_reg &= ~0x3;
303 pci_write_config_byte(agp_bridge->dev, SVWRKS_CACHING, enable_reg); 305 pci_write_config_byte(agp_bridge->dev, SVWRKS_CACHING, enable_reg);
304 306
305 pci_read_config_byte(agp_bridge->dev, SVWRKS_FEATURE, &enable_reg); 307 pci_read_config_byte(agp_bridge->dev, SVWRKS_FEATURE, &enable_reg);
306 enable_reg |= (1<<6); 308 enable_reg |= (1<<6);
307 pci_write_config_byte(agp_bridge->dev,SVWRKS_FEATURE, enable_reg); 309 pci_write_config_byte(agp_bridge->dev,SVWRKS_FEATURE, enable_reg);
308 310
309 return 0; 311 return 0;
310 } 312 }
311 313
312 static void serverworks_cleanup(void) 314 static void serverworks_cleanup(void)
313 { 315 {
314 iounmap((void __iomem *) serverworks_private.registers); 316 iounmap((void __iomem *) serverworks_private.registers);
315 } 317 }
316 318
317 static int serverworks_insert_memory(struct agp_memory *mem, 319 static int serverworks_insert_memory(struct agp_memory *mem,
318 off_t pg_start, int type) 320 off_t pg_start, int type)
319 { 321 {
320 int i, j, num_entries; 322 int i, j, num_entries;
321 unsigned long __iomem *cur_gatt; 323 unsigned long __iomem *cur_gatt;
322 unsigned long addr; 324 unsigned long addr;
323 325
324 num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries; 326 num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
325 327
326 if (type != 0 || mem->type != 0) { 328 if (type != 0 || mem->type != 0) {
327 return -EINVAL; 329 return -EINVAL;
328 } 330 }
329 if ((pg_start + mem->page_count) > num_entries) { 331 if ((pg_start + mem->page_count) > num_entries) {
330 return -EINVAL; 332 return -EINVAL;
331 } 333 }
332 334
333 j = pg_start; 335 j = pg_start;
334 while (j < (pg_start + mem->page_count)) { 336 while (j < (pg_start + mem->page_count)) {
335 addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr; 337 addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
336 cur_gatt = SVRWRKS_GET_GATT(addr); 338 cur_gatt = SVRWRKS_GET_GATT(addr);
337 if (!PGE_EMPTY(agp_bridge, readl(cur_gatt+GET_GATT_OFF(addr)))) 339 if (!PGE_EMPTY(agp_bridge, readl(cur_gatt+GET_GATT_OFF(addr))))
338 return -EBUSY; 340 return -EBUSY;
339 j++; 341 j++;
340 } 342 }
341 343
342 if (!mem->is_flushed) { 344 if (!mem->is_flushed) {
343 global_cache_flush(); 345 global_cache_flush();
344 mem->is_flushed = true; 346 mem->is_flushed = true;
345 } 347 }
346 348
347 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 349 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
348 addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr; 350 addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
349 cur_gatt = SVRWRKS_GET_GATT(addr); 351 cur_gatt = SVRWRKS_GET_GATT(addr);
350 writel(agp_bridge->driver->mask_memory(agp_bridge, mem->memory[i], mem->type), cur_gatt+GET_GATT_OFF(addr)); 352 writel(agp_bridge->driver->mask_memory(agp_bridge, mem->memory[i], mem->type), cur_gatt+GET_GATT_OFF(addr));
351 } 353 }
352 serverworks_tlbflush(mem); 354 serverworks_tlbflush(mem);
353 return 0; 355 return 0;
354 } 356 }
355 357
356 static int serverworks_remove_memory(struct agp_memory *mem, off_t pg_start, 358 static int serverworks_remove_memory(struct agp_memory *mem, off_t pg_start,
357 int type) 359 int type)
358 { 360 {
359 int i; 361 int i;
360 unsigned long __iomem *cur_gatt; 362 unsigned long __iomem *cur_gatt;
361 unsigned long addr; 363 unsigned long addr;
362 364
363 if (type != 0 || mem->type != 0) { 365 if (type != 0 || mem->type != 0) {
364 return -EINVAL; 366 return -EINVAL;
365 } 367 }
366 368
367 global_cache_flush(); 369 global_cache_flush();
368 serverworks_tlbflush(mem); 370 serverworks_tlbflush(mem);
369 371
370 for (i = pg_start; i < (mem->page_count + pg_start); i++) { 372 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
371 addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr; 373 addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
372 cur_gatt = SVRWRKS_GET_GATT(addr); 374 cur_gatt = SVRWRKS_GET_GATT(addr);
373 writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr)); 375 writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
374 } 376 }
375 377
376 serverworks_tlbflush(mem); 378 serverworks_tlbflush(mem);
377 return 0; 379 return 0;
378 } 380 }
379 381
380 static const struct gatt_mask serverworks_masks[] = 382 static const struct gatt_mask serverworks_masks[] =
381 { 383 {
382 {.mask = 1, .type = 0} 384 {.mask = 1, .type = 0}
383 }; 385 };
384 386
385 static const struct aper_size_info_lvl2 serverworks_sizes[7] = 387 static const struct aper_size_info_lvl2 serverworks_sizes[7] =
386 { 388 {
387 {2048, 524288, 0x80000000}, 389 {2048, 524288, 0x80000000},
388 {1024, 262144, 0xc0000000}, 390 {1024, 262144, 0xc0000000},
389 {512, 131072, 0xe0000000}, 391 {512, 131072, 0xe0000000},
390 {256, 65536, 0xf0000000}, 392 {256, 65536, 0xf0000000},
391 {128, 32768, 0xf8000000}, 393 {128, 32768, 0xf8000000},
392 {64, 16384, 0xfc000000}, 394 {64, 16384, 0xfc000000},
393 {32, 8192, 0xfe000000} 395 {32, 8192, 0xfe000000}
394 }; 396 };
395 397
396 static void serverworks_agp_enable(struct agp_bridge_data *bridge, u32 mode) 398 static void serverworks_agp_enable(struct agp_bridge_data *bridge, u32 mode)
397 { 399 {
398 u32 command; 400 u32 command;
399 401
400 pci_read_config_dword(serverworks_private.svrwrks_dev, 402 pci_read_config_dword(serverworks_private.svrwrks_dev,
401 bridge->capndx + PCI_AGP_STATUS, 403 bridge->capndx + PCI_AGP_STATUS,
402 &command); 404 &command);
403 405
404 command = agp_collect_device_status(bridge, mode, command); 406 command = agp_collect_device_status(bridge, mode, command);
405 407
406 command &= ~0x10; /* disable FW */ 408 command &= ~0x10; /* disable FW */
407 command &= ~0x08; 409 command &= ~0x08;
408 410
409 command |= 0x100; 411 command |= 0x100;
410 412
411 pci_write_config_dword(serverworks_private.svrwrks_dev, 413 pci_write_config_dword(serverworks_private.svrwrks_dev,
412 bridge->capndx + PCI_AGP_COMMAND, 414 bridge->capndx + PCI_AGP_COMMAND,
413 command); 415 command);
414 416
415 agp_device_command(command, false); 417 agp_device_command(command, false);
416 } 418 }
417 419
418 static const struct agp_bridge_driver sworks_driver = { 420 static const struct agp_bridge_driver sworks_driver = {
419 .owner = THIS_MODULE, 421 .owner = THIS_MODULE,
420 .aperture_sizes = serverworks_sizes, 422 .aperture_sizes = serverworks_sizes,
421 .size_type = LVL2_APER_SIZE, 423 .size_type = LVL2_APER_SIZE,
422 .num_aperture_sizes = 7, 424 .num_aperture_sizes = 7,
423 .configure = serverworks_configure, 425 .configure = serverworks_configure,
424 .fetch_size = serverworks_fetch_size, 426 .fetch_size = serverworks_fetch_size,
425 .cleanup = serverworks_cleanup, 427 .cleanup = serverworks_cleanup,
426 .tlb_flush = serverworks_tlbflush, 428 .tlb_flush = serverworks_tlbflush,
427 .mask_memory = agp_generic_mask_memory, 429 .mask_memory = agp_generic_mask_memory,
428 .masks = serverworks_masks, 430 .masks = serverworks_masks,
429 .agp_enable = serverworks_agp_enable, 431 .agp_enable = serverworks_agp_enable,
430 .cache_flush = global_cache_flush, 432 .cache_flush = global_cache_flush,
431 .create_gatt_table = serverworks_create_gatt_table, 433 .create_gatt_table = serverworks_create_gatt_table,
432 .free_gatt_table = serverworks_free_gatt_table, 434 .free_gatt_table = serverworks_free_gatt_table,
433 .insert_memory = serverworks_insert_memory, 435 .insert_memory = serverworks_insert_memory,
434 .remove_memory = serverworks_remove_memory, 436 .remove_memory = serverworks_remove_memory,
435 .alloc_by_type = agp_generic_alloc_by_type, 437 .alloc_by_type = agp_generic_alloc_by_type,
436 .free_by_type = agp_generic_free_by_type, 438 .free_by_type = agp_generic_free_by_type,
437 .agp_alloc_page = agp_generic_alloc_page, 439 .agp_alloc_page = agp_generic_alloc_page,
438 .agp_destroy_page = agp_generic_destroy_page, 440 .agp_destroy_page = agp_generic_destroy_page,
439 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 441 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
440 }; 442 };
441 443
442 static int __devinit agp_serverworks_probe(struct pci_dev *pdev, 444 static int __devinit agp_serverworks_probe(struct pci_dev *pdev,
443 const struct pci_device_id *ent) 445 const struct pci_device_id *ent)
444 { 446 {
445 struct agp_bridge_data *bridge; 447 struct agp_bridge_data *bridge;
446 struct pci_dev *bridge_dev; 448 struct pci_dev *bridge_dev;
447 u32 temp, temp2; 449 u32 temp, temp2;
448 u8 cap_ptr = 0; 450 u8 cap_ptr = 0;
449 451
450 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); 452 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
451 453
452 switch (pdev->device) { 454 switch (pdev->device) {
453 case 0x0006: 455 case 0x0006:
454 printk (KERN_ERR PFX "ServerWorks CNB20HE is unsupported due to lack of documentation.\n"); 456 dev_err(&pdev->dev, "ServerWorks CNB20HE is unsupported due to lack of documentation\n");
455 return -ENODEV; 457 return -ENODEV;
456 458
457 case PCI_DEVICE_ID_SERVERWORKS_HE: 459 case PCI_DEVICE_ID_SERVERWORKS_HE:
458 case PCI_DEVICE_ID_SERVERWORKS_LE: 460 case PCI_DEVICE_ID_SERVERWORKS_LE:
459 case 0x0007: 461 case 0x0007:
460 break; 462 break;
461 463
462 default: 464 default:
463 if (cap_ptr) 465 if (cap_ptr)
464 printk(KERN_ERR PFX "Unsupported Serverworks chipset " 466 dev_err(&pdev->dev, "unsupported Serverworks chipset "
465 "(device id: %04x)\n", pdev->device); 467 "[%04x/%04x]\n", pdev->vendor, pdev->device);
466 return -ENODEV; 468 return -ENODEV;
467 } 469 }
468 470
469 /* Everything is on func 1 here so we are hardcoding function one */ 471 /* Everything is on func 1 here so we are hardcoding function one */
470 bridge_dev = pci_get_bus_and_slot((unsigned int)pdev->bus->number, 472 bridge_dev = pci_get_bus_and_slot((unsigned int)pdev->bus->number,
471 PCI_DEVFN(0, 1)); 473 PCI_DEVFN(0, 1));
472 if (!bridge_dev) { 474 if (!bridge_dev) {
473 printk(KERN_INFO PFX "Detected a Serverworks chipset " 475 dev_info(&pdev->dev, "can't find secondary device\n");
474 "but could not find the secondary device.\n");
475 return -ENODEV; 476 return -ENODEV;
476 } 477 }
477 478
478 serverworks_private.svrwrks_dev = bridge_dev; 479 serverworks_private.svrwrks_dev = bridge_dev;
479 serverworks_private.gart_addr_ofs = 0x10; 480 serverworks_private.gart_addr_ofs = 0x10;
480 481
481 pci_read_config_dword(pdev, SVWRKS_APSIZE, &temp); 482 pci_read_config_dword(pdev, SVWRKS_APSIZE, &temp);
482 if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) { 483 if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
483 pci_read_config_dword(pdev, SVWRKS_APSIZE + 4, &temp2); 484 pci_read_config_dword(pdev, SVWRKS_APSIZE + 4, &temp2);
484 if (temp2 != 0) { 485 if (temp2 != 0) {
485 printk(KERN_INFO PFX "Detected 64 bit aperture address, " 486 dev_info(&pdev->dev, "64 bit aperture address, "
486 "but top bits are not zero. Disabling agp\n"); 487 "but top bits are not zero; disabling AGP\n");
487 return -ENODEV; 488 return -ENODEV;
488 } 489 }
489 serverworks_private.mm_addr_ofs = 0x18; 490 serverworks_private.mm_addr_ofs = 0x18;
490 } else 491 } else
491 serverworks_private.mm_addr_ofs = 0x14; 492 serverworks_private.mm_addr_ofs = 0x14;
492 493
493 pci_read_config_dword(pdev, serverworks_private.mm_addr_ofs, &temp); 494 pci_read_config_dword(pdev, serverworks_private.mm_addr_ofs, &temp);
494 if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) { 495 if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
495 pci_read_config_dword(pdev, 496 pci_read_config_dword(pdev,
496 serverworks_private.mm_addr_ofs + 4, &temp2); 497 serverworks_private.mm_addr_ofs + 4, &temp2);
497 if (temp2 != 0) { 498 if (temp2 != 0) {
498 printk(KERN_INFO PFX "Detected 64 bit MMIO address, " 499 dev_info(&pdev->dev, "64 bit MMIO address, but top "
499 "but top bits are not zero. Disabling agp\n"); 500 "bits are not zero; disabling AGP\n");
500 return -ENODEV; 501 return -ENODEV;
501 } 502 }
502 } 503 }
503 504
504 bridge = agp_alloc_bridge(); 505 bridge = agp_alloc_bridge();
505 if (!bridge) 506 if (!bridge)
506 return -ENOMEM; 507 return -ENOMEM;
507 508
508 bridge->driver = &sworks_driver; 509 bridge->driver = &sworks_driver;
509 bridge->dev_private_data = &serverworks_private, 510 bridge->dev_private_data = &serverworks_private,
510 bridge->dev = pci_dev_get(pdev); 511 bridge->dev = pci_dev_get(pdev);
511 512
512 pci_set_drvdata(pdev, bridge); 513 pci_set_drvdata(pdev, bridge);
513 return agp_add_bridge(bridge); 514 return agp_add_bridge(bridge);
514 } 515 }
515 516
516 static void __devexit agp_serverworks_remove(struct pci_dev *pdev) 517 static void __devexit agp_serverworks_remove(struct pci_dev *pdev)
517 { 518 {
518 struct agp_bridge_data *bridge = pci_get_drvdata(pdev); 519 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
519 520
520 pci_dev_put(bridge->dev); 521 pci_dev_put(bridge->dev);
521 agp_remove_bridge(bridge); 522 agp_remove_bridge(bridge);
522 agp_put_bridge(bridge); 523 agp_put_bridge(bridge);
523 pci_dev_put(serverworks_private.svrwrks_dev); 524 pci_dev_put(serverworks_private.svrwrks_dev);
524 serverworks_private.svrwrks_dev = NULL; 525 serverworks_private.svrwrks_dev = NULL;
525 } 526 }
526 527
527 static struct pci_device_id agp_serverworks_pci_table[] = { 528 static struct pci_device_id agp_serverworks_pci_table[] = {
528 { 529 {
529 .class = (PCI_CLASS_BRIDGE_HOST << 8), 530 .class = (PCI_CLASS_BRIDGE_HOST << 8),
530 .class_mask = ~0, 531 .class_mask = ~0,
531 .vendor = PCI_VENDOR_ID_SERVERWORKS, 532 .vendor = PCI_VENDOR_ID_SERVERWORKS,
532 .device = PCI_ANY_ID, 533 .device = PCI_ANY_ID,
533 .subvendor = PCI_ANY_ID, 534 .subvendor = PCI_ANY_ID,
534 .subdevice = PCI_ANY_ID, 535 .subdevice = PCI_ANY_ID,
535 }, 536 },
536 { } 537 { }
537 }; 538 };
538 539
539 MODULE_DEVICE_TABLE(pci, agp_serverworks_pci_table); 540 MODULE_DEVICE_TABLE(pci, agp_serverworks_pci_table);
540 541
541 static struct pci_driver agp_serverworks_pci_driver = { 542 static struct pci_driver agp_serverworks_pci_driver = {
542 .name = "agpgart-serverworks", 543 .name = "agpgart-serverworks",
543 .id_table = agp_serverworks_pci_table, 544 .id_table = agp_serverworks_pci_table,
544 .probe = agp_serverworks_probe, 545 .probe = agp_serverworks_probe,
545 .remove = agp_serverworks_remove, 546 .remove = agp_serverworks_remove,
546 }; 547 };
547 548
548 static int __init agp_serverworks_init(void) 549 static int __init agp_serverworks_init(void)
549 { 550 {
550 if (agp_off) 551 if (agp_off)
551 return -EINVAL; 552 return -EINVAL;
552 return pci_register_driver(&agp_serverworks_pci_driver); 553 return pci_register_driver(&agp_serverworks_pci_driver);
553 } 554 }
554 555
555 static void __exit agp_serverworks_cleanup(void) 556 static void __exit agp_serverworks_cleanup(void)
556 { 557 {
557 pci_unregister_driver(&agp_serverworks_pci_driver); 558 pci_unregister_driver(&agp_serverworks_pci_driver);
558 } 559 }
559 560
560 module_init(agp_serverworks_init); 561 module_init(agp_serverworks_init);
561 module_exit(agp_serverworks_cleanup); 562 module_exit(agp_serverworks_cleanup);
562 563
563 MODULE_LICENSE("GPL and additional rights"); 564 MODULE_LICENSE("GPL and additional rights");
564 565
drivers/char/agp/uninorth-agp.c
1 /* 1 /*
2 * UniNorth AGPGART routines. 2 * UniNorth AGPGART routines.
3 */ 3 */
4 #include <linux/module.h> 4 #include <linux/module.h>
5 #include <linux/pci.h> 5 #include <linux/pci.h>
6 #include <linux/init.h> 6 #include <linux/init.h>
7 #include <linux/pagemap.h> 7 #include <linux/pagemap.h>
8 #include <linux/agp_backend.h> 8 #include <linux/agp_backend.h>
9 #include <linux/delay.h> 9 #include <linux/delay.h>
10 #include <asm/uninorth.h> 10 #include <asm/uninorth.h>
11 #include <asm/pci-bridge.h> 11 #include <asm/pci-bridge.h>
12 #include <asm/prom.h> 12 #include <asm/prom.h>
13 #include <asm/pmac_feature.h> 13 #include <asm/pmac_feature.h>
14 #include "agp.h" 14 #include "agp.h"
15 15
16 /* 16 /*
17 * NOTES for uninorth3 (G5 AGP) supports : 17 * NOTES for uninorth3 (G5 AGP) supports :
18 * 18 *
19 * There maybe also possibility to have bigger cache line size for 19 * There maybe also possibility to have bigger cache line size for
20 * agp (see pmac_pci.c and look for cache line). Need to be investigated 20 * agp (see pmac_pci.c and look for cache line). Need to be investigated
21 * by someone. 21 * by someone.
22 * 22 *
23 * PAGE size are hardcoded but this may change, see asm/page.h. 23 * PAGE size are hardcoded but this may change, see asm/page.h.
24 * 24 *
25 * Jerome Glisse <j.glisse@gmail.com> 25 * Jerome Glisse <j.glisse@gmail.com>
26 */ 26 */
27 static int uninorth_rev; 27 static int uninorth_rev;
28 static int is_u3; 28 static int is_u3;
29 29
30 static char __devinitdata *aperture = NULL; 30 static char __devinitdata *aperture = NULL;
31 31
32 static int uninorth_fetch_size(void) 32 static int uninorth_fetch_size(void)
33 { 33 {
34 int i, size = 0; 34 int i, size = 0;
35 struct aper_size_info_32 *values = 35 struct aper_size_info_32 *values =
36 A_SIZE_32(agp_bridge->driver->aperture_sizes); 36 A_SIZE_32(agp_bridge->driver->aperture_sizes);
37 37
38 if (aperture) { 38 if (aperture) {
39 char *save = aperture; 39 char *save = aperture;
40 40
41 size = memparse(aperture, &aperture) >> 20; 41 size = memparse(aperture, &aperture) >> 20;
42 aperture = save; 42 aperture = save;
43 43
44 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) 44 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++)
45 if (size == values[i].size) 45 if (size == values[i].size)
46 break; 46 break;
47 47
48 if (i == agp_bridge->driver->num_aperture_sizes) { 48 if (i == agp_bridge->driver->num_aperture_sizes) {
49 printk(KERN_ERR PFX "Invalid aperture size, using" 49 dev_err(&agp_bridge->dev->dev, "invalid aperture size, "
50 " default\n"); 50 "using default\n");
51 size = 0; 51 size = 0;
52 aperture = NULL; 52 aperture = NULL;
53 } 53 }
54 } 54 }
55 55
56 if (!size) { 56 if (!size) {
57 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) 57 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++)
58 if (values[i].size == 32) 58 if (values[i].size == 32)
59 break; 59 break;
60 } 60 }
61 61
62 agp_bridge->previous_size = 62 agp_bridge->previous_size =
63 agp_bridge->current_size = (void *)(values + i); 63 agp_bridge->current_size = (void *)(values + i);
64 agp_bridge->aperture_size_idx = i; 64 agp_bridge->aperture_size_idx = i;
65 return values[i].size; 65 return values[i].size;
66 } 66 }
67 67
68 static void uninorth_tlbflush(struct agp_memory *mem) 68 static void uninorth_tlbflush(struct agp_memory *mem)
69 { 69 {
70 u32 ctrl = UNI_N_CFG_GART_ENABLE; 70 u32 ctrl = UNI_N_CFG_GART_ENABLE;
71 71
72 if (is_u3) 72 if (is_u3)
73 ctrl |= U3_N_CFG_GART_PERFRD; 73 ctrl |= U3_N_CFG_GART_PERFRD;
74 pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, 74 pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL,
75 ctrl | UNI_N_CFG_GART_INVAL); 75 ctrl | UNI_N_CFG_GART_INVAL);
76 pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, ctrl); 76 pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, ctrl);
77 77
78 if (uninorth_rev <= 0x30) { 78 if (uninorth_rev <= 0x30) {
79 pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, 79 pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL,
80 ctrl | UNI_N_CFG_GART_2xRESET); 80 ctrl | UNI_N_CFG_GART_2xRESET);
81 pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, 81 pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL,
82 ctrl); 82 ctrl);
83 } 83 }
84 } 84 }
85 85
86 static void uninorth_cleanup(void) 86 static void uninorth_cleanup(void)
87 { 87 {
88 u32 tmp; 88 u32 tmp;
89 89
90 pci_read_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, &tmp); 90 pci_read_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, &tmp);
91 if (!(tmp & UNI_N_CFG_GART_ENABLE)) 91 if (!(tmp & UNI_N_CFG_GART_ENABLE))
92 return; 92 return;
93 tmp |= UNI_N_CFG_GART_INVAL; 93 tmp |= UNI_N_CFG_GART_INVAL;
94 pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, tmp); 94 pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, tmp);
95 pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, 0); 95 pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, 0);
96 96
97 if (uninorth_rev <= 0x30) { 97 if (uninorth_rev <= 0x30) {
98 pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, 98 pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL,
99 UNI_N_CFG_GART_2xRESET); 99 UNI_N_CFG_GART_2xRESET);
100 pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, 100 pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL,
101 0); 101 0);
102 } 102 }
103 } 103 }
104 104
105 static int uninorth_configure(void) 105 static int uninorth_configure(void)
106 { 106 {
107 struct aper_size_info_32 *current_size; 107 struct aper_size_info_32 *current_size;
108 108
109 current_size = A_SIZE_32(agp_bridge->current_size); 109 current_size = A_SIZE_32(agp_bridge->current_size);
110 110
111 printk(KERN_INFO PFX "configuring for size idx: %d\n", 111 dev_info(&agp_bridge->dev->dev, "configuring for size idx: %d\n",
112 current_size->size_value); 112 current_size->size_value);
113 113
114 /* aperture size and gatt addr */ 114 /* aperture size and gatt addr */
115 pci_write_config_dword(agp_bridge->dev, 115 pci_write_config_dword(agp_bridge->dev,
116 UNI_N_CFG_GART_BASE, 116 UNI_N_CFG_GART_BASE,
117 (agp_bridge->gatt_bus_addr & 0xfffff000) 117 (agp_bridge->gatt_bus_addr & 0xfffff000)
118 | current_size->size_value); 118 | current_size->size_value);
119 119
120 /* HACK ALERT 120 /* HACK ALERT
121 * UniNorth seem to be buggy enough not to handle properly when 121 * UniNorth seem to be buggy enough not to handle properly when
122 * the AGP aperture isn't mapped at bus physical address 0 122 * the AGP aperture isn't mapped at bus physical address 0
123 */ 123 */
124 agp_bridge->gart_bus_addr = 0; 124 agp_bridge->gart_bus_addr = 0;
125 #ifdef CONFIG_PPC64 125 #ifdef CONFIG_PPC64
126 /* Assume U3 or later on PPC64 systems */ 126 /* Assume U3 or later on PPC64 systems */
127 /* high 4 bits of GART physical address go in UNI_N_CFG_AGP_BASE */ 127 /* high 4 bits of GART physical address go in UNI_N_CFG_AGP_BASE */
128 pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_AGP_BASE, 128 pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_AGP_BASE,
129 (agp_bridge->gatt_bus_addr >> 32) & 0xf); 129 (agp_bridge->gatt_bus_addr >> 32) & 0xf);
130 #else 130 #else
131 pci_write_config_dword(agp_bridge->dev, 131 pci_write_config_dword(agp_bridge->dev,
132 UNI_N_CFG_AGP_BASE, agp_bridge->gart_bus_addr); 132 UNI_N_CFG_AGP_BASE, agp_bridge->gart_bus_addr);
133 #endif 133 #endif
134 134
135 if (is_u3) { 135 if (is_u3) {
136 pci_write_config_dword(agp_bridge->dev, 136 pci_write_config_dword(agp_bridge->dev,
137 UNI_N_CFG_GART_DUMMY_PAGE, 137 UNI_N_CFG_GART_DUMMY_PAGE,
138 agp_bridge->scratch_page_real >> 12); 138 agp_bridge->scratch_page_real >> 12);
139 } 139 }
140 140
141 return 0; 141 return 0;
142 } 142 }
143 143
144 static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, 144 static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start,
145 int type) 145 int type)
146 { 146 {
147 int i, j, num_entries; 147 int i, j, num_entries;
148 void *temp; 148 void *temp;
149 149
150 temp = agp_bridge->current_size; 150 temp = agp_bridge->current_size;
151 num_entries = A_SIZE_32(temp)->num_entries; 151 num_entries = A_SIZE_32(temp)->num_entries;
152 152
153 if (type != 0 || mem->type != 0) 153 if (type != 0 || mem->type != 0)
154 /* We know nothing of memory types */ 154 /* We know nothing of memory types */
155 return -EINVAL; 155 return -EINVAL;
156 if ((pg_start + mem->page_count) > num_entries) 156 if ((pg_start + mem->page_count) > num_entries)
157 return -EINVAL; 157 return -EINVAL;
158 158
159 j = pg_start; 159 j = pg_start;
160 160
161 while (j < (pg_start + mem->page_count)) { 161 while (j < (pg_start + mem->page_count)) {
162 if (agp_bridge->gatt_table[j]) 162 if (agp_bridge->gatt_table[j])
163 return -EBUSY; 163 return -EBUSY;
164 j++; 164 j++;
165 } 165 }
166 166
167 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 167 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
168 agp_bridge->gatt_table[j] = 168 agp_bridge->gatt_table[j] =
169 cpu_to_le32((mem->memory[i] & 0xFFFFF000UL) | 0x1UL); 169 cpu_to_le32((mem->memory[i] & 0xFFFFF000UL) | 0x1UL);
170 flush_dcache_range((unsigned long)__va(mem->memory[i]), 170 flush_dcache_range((unsigned long)__va(mem->memory[i]),
171 (unsigned long)__va(mem->memory[i])+0x1000); 171 (unsigned long)__va(mem->memory[i])+0x1000);
172 } 172 }
173 (void)in_le32((volatile u32*)&agp_bridge->gatt_table[pg_start]); 173 (void)in_le32((volatile u32*)&agp_bridge->gatt_table[pg_start]);
174 mb(); 174 mb();
175 flush_dcache_range((unsigned long)&agp_bridge->gatt_table[pg_start], 175 flush_dcache_range((unsigned long)&agp_bridge->gatt_table[pg_start],
176 (unsigned long)&agp_bridge->gatt_table[pg_start + mem->page_count]); 176 (unsigned long)&agp_bridge->gatt_table[pg_start + mem->page_count]);
177 177
178 uninorth_tlbflush(mem); 178 uninorth_tlbflush(mem);
179 return 0; 179 return 0;
180 } 180 }
181 181
182 static int u3_insert_memory(struct agp_memory *mem, off_t pg_start, int type) 182 static int u3_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
183 { 183 {
184 int i, num_entries; 184 int i, num_entries;
185 void *temp; 185 void *temp;
186 u32 *gp; 186 u32 *gp;
187 187
188 temp = agp_bridge->current_size; 188 temp = agp_bridge->current_size;
189 num_entries = A_SIZE_32(temp)->num_entries; 189 num_entries = A_SIZE_32(temp)->num_entries;
190 190
191 if (type != 0 || mem->type != 0) 191 if (type != 0 || mem->type != 0)
192 /* We know nothing of memory types */ 192 /* We know nothing of memory types */
193 return -EINVAL; 193 return -EINVAL;
194 if ((pg_start + mem->page_count) > num_entries) 194 if ((pg_start + mem->page_count) > num_entries)
195 return -EINVAL; 195 return -EINVAL;
196 196
197 gp = (u32 *) &agp_bridge->gatt_table[pg_start]; 197 gp = (u32 *) &agp_bridge->gatt_table[pg_start];
198 for (i = 0; i < mem->page_count; ++i) { 198 for (i = 0; i < mem->page_count; ++i) {
199 if (gp[i]) { 199 if (gp[i]) {
200 printk("u3_insert_memory: entry 0x%x occupied (%x)\n", 200 dev_info(&agp_bridge->dev->dev,
201 i, gp[i]); 201 "u3_insert_memory: entry 0x%x occupied (%x)\n",
202 i, gp[i]);
202 return -EBUSY; 203 return -EBUSY;
203 } 204 }
204 } 205 }
205 206
206 for (i = 0; i < mem->page_count; i++) { 207 for (i = 0; i < mem->page_count; i++) {
207 gp[i] = (mem->memory[i] >> PAGE_SHIFT) | 0x80000000UL; 208 gp[i] = (mem->memory[i] >> PAGE_SHIFT) | 0x80000000UL;
208 flush_dcache_range((unsigned long)__va(mem->memory[i]), 209 flush_dcache_range((unsigned long)__va(mem->memory[i]),
209 (unsigned long)__va(mem->memory[i])+0x1000); 210 (unsigned long)__va(mem->memory[i])+0x1000);
210 } 211 }
211 mb(); 212 mb();
212 flush_dcache_range((unsigned long)gp, (unsigned long) &gp[i]); 213 flush_dcache_range((unsigned long)gp, (unsigned long) &gp[i]);
213 uninorth_tlbflush(mem); 214 uninorth_tlbflush(mem);
214 215
215 return 0; 216 return 0;
216 } 217 }
217 218
218 int u3_remove_memory(struct agp_memory *mem, off_t pg_start, int type) 219 int u3_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
219 { 220 {
220 size_t i; 221 size_t i;
221 u32 *gp; 222 u32 *gp;
222 223
223 if (type != 0 || mem->type != 0) 224 if (type != 0 || mem->type != 0)
224 /* We know nothing of memory types */ 225 /* We know nothing of memory types */
225 return -EINVAL; 226 return -EINVAL;
226 227
227 gp = (u32 *) &agp_bridge->gatt_table[pg_start]; 228 gp = (u32 *) &agp_bridge->gatt_table[pg_start];
228 for (i = 0; i < mem->page_count; ++i) 229 for (i = 0; i < mem->page_count; ++i)
229 gp[i] = 0; 230 gp[i] = 0;
230 mb(); 231 mb();
231 flush_dcache_range((unsigned long)gp, (unsigned long) &gp[i]); 232 flush_dcache_range((unsigned long)gp, (unsigned long) &gp[i]);
232 uninorth_tlbflush(mem); 233 uninorth_tlbflush(mem);
233 234
234 return 0; 235 return 0;
235 } 236 }
236 237
237 static void uninorth_agp_enable(struct agp_bridge_data *bridge, u32 mode) 238 static void uninorth_agp_enable(struct agp_bridge_data *bridge, u32 mode)
238 { 239 {
239 u32 command, scratch, status; 240 u32 command, scratch, status;
240 int timeout; 241 int timeout;
241 242
242 pci_read_config_dword(bridge->dev, 243 pci_read_config_dword(bridge->dev,
243 bridge->capndx + PCI_AGP_STATUS, 244 bridge->capndx + PCI_AGP_STATUS,
244 &status); 245 &status);
245 246
246 command = agp_collect_device_status(bridge, mode, status); 247 command = agp_collect_device_status(bridge, mode, status);
247 command |= PCI_AGP_COMMAND_AGP; 248 command |= PCI_AGP_COMMAND_AGP;
248 249
249 if (uninorth_rev == 0x21) { 250 if (uninorth_rev == 0x21) {
250 /* 251 /*
251 * Darwin disable AGP 4x on this revision, thus we 252 * Darwin disable AGP 4x on this revision, thus we
252 * may assume it's broken. This is an AGP2 controller. 253 * may assume it's broken. This is an AGP2 controller.
253 */ 254 */
254 command &= ~AGPSTAT2_4X; 255 command &= ~AGPSTAT2_4X;
255 } 256 }
256 257
257 if ((uninorth_rev >= 0x30) && (uninorth_rev <= 0x33)) { 258 if ((uninorth_rev >= 0x30) && (uninorth_rev <= 0x33)) {
258 /* 259 /*
259 * We need to to set REQ_DEPTH to 7 for U3 versions 1.0, 2.1, 260 * We need to to set REQ_DEPTH to 7 for U3 versions 1.0, 2.1,
260 * 2.2 and 2.3, Darwin do so. 261 * 2.2 and 2.3, Darwin do so.
261 */ 262 */
262 if ((command >> AGPSTAT_RQ_DEPTH_SHIFT) > 7) 263 if ((command >> AGPSTAT_RQ_DEPTH_SHIFT) > 7)
263 command = (command & ~AGPSTAT_RQ_DEPTH) 264 command = (command & ~AGPSTAT_RQ_DEPTH)
264 | (7 << AGPSTAT_RQ_DEPTH_SHIFT); 265 | (7 << AGPSTAT_RQ_DEPTH_SHIFT);
265 } 266 }
266 267
267 uninorth_tlbflush(NULL); 268 uninorth_tlbflush(NULL);
268 269
269 timeout = 0; 270 timeout = 0;
270 do { 271 do {
271 pci_write_config_dword(bridge->dev, 272 pci_write_config_dword(bridge->dev,
272 bridge->capndx + PCI_AGP_COMMAND, 273 bridge->capndx + PCI_AGP_COMMAND,
273 command); 274 command);
274 pci_read_config_dword(bridge->dev, 275 pci_read_config_dword(bridge->dev,
275 bridge->capndx + PCI_AGP_COMMAND, 276 bridge->capndx + PCI_AGP_COMMAND,
276 &scratch); 277 &scratch);
277 } while ((scratch & PCI_AGP_COMMAND_AGP) == 0 && ++timeout < 1000); 278 } while ((scratch & PCI_AGP_COMMAND_AGP) == 0 && ++timeout < 1000);
278 if ((scratch & PCI_AGP_COMMAND_AGP) == 0) 279 if ((scratch & PCI_AGP_COMMAND_AGP) == 0)
279 printk(KERN_ERR PFX "failed to write UniNorth AGP" 280 dev_err(&bridge->dev->dev, "can't write UniNorth AGP "
280 " command register\n"); 281 "command register\n");
281 282
282 if (uninorth_rev >= 0x30) { 283 if (uninorth_rev >= 0x30) {
283 /* This is an AGP V3 */ 284 /* This is an AGP V3 */
284 agp_device_command(command, (status & AGPSTAT_MODE_3_0) != 0); 285 agp_device_command(command, (status & AGPSTAT_MODE_3_0) != 0);
285 } else { 286 } else {
286 /* AGP V2 */ 287 /* AGP V2 */
287 agp_device_command(command, false); 288 agp_device_command(command, false);
288 } 289 }
289 290
290 uninorth_tlbflush(NULL); 291 uninorth_tlbflush(NULL);
291 } 292 }
292 293
293 #ifdef CONFIG_PM 294 #ifdef CONFIG_PM
294 /* 295 /*
295 * These Power Management routines are _not_ called by the normal PCI PM layer, 296 * These Power Management routines are _not_ called by the normal PCI PM layer,
296 * but directly by the video driver through function pointers in the device 297 * but directly by the video driver through function pointers in the device
297 * tree. 298 * tree.
298 */ 299 */
299 static int agp_uninorth_suspend(struct pci_dev *pdev) 300 static int agp_uninorth_suspend(struct pci_dev *pdev)
300 { 301 {
301 struct agp_bridge_data *bridge; 302 struct agp_bridge_data *bridge;
302 u32 cmd; 303 u32 cmd;
303 u8 agp; 304 u8 agp;
304 struct pci_dev *device = NULL; 305 struct pci_dev *device = NULL;
305 306
306 bridge = agp_find_bridge(pdev); 307 bridge = agp_find_bridge(pdev);
307 if (bridge == NULL) 308 if (bridge == NULL)
308 return -ENODEV; 309 return -ENODEV;
309 310
310 /* Only one suspend supported */ 311 /* Only one suspend supported */
311 if (bridge->dev_private_data) 312 if (bridge->dev_private_data)
312 return 0; 313 return 0;
313 314
314 /* turn off AGP on the video chip, if it was enabled */ 315 /* turn off AGP on the video chip, if it was enabled */
315 for_each_pci_dev(device) { 316 for_each_pci_dev(device) {
316 /* Don't touch the bridge yet, device first */ 317 /* Don't touch the bridge yet, device first */
317 if (device == pdev) 318 if (device == pdev)
318 continue; 319 continue;
319 /* Only deal with devices on the same bus here, no Mac has a P2P 320 /* Only deal with devices on the same bus here, no Mac has a P2P
320 * bridge on the AGP port, and mucking around the entire PCI 321 * bridge on the AGP port, and mucking around the entire PCI
321 * tree is source of problems on some machines because of a bug 322 * tree is source of problems on some machines because of a bug
322 * in some versions of pci_find_capability() when hitting a dead 323 * in some versions of pci_find_capability() when hitting a dead
323 * device 324 * device
324 */ 325 */
325 if (device->bus != pdev->bus) 326 if (device->bus != pdev->bus)
326 continue; 327 continue;
327 agp = pci_find_capability(device, PCI_CAP_ID_AGP); 328 agp = pci_find_capability(device, PCI_CAP_ID_AGP);
328 if (!agp) 329 if (!agp)
329 continue; 330 continue;
330 pci_read_config_dword(device, agp + PCI_AGP_COMMAND, &cmd); 331 pci_read_config_dword(device, agp + PCI_AGP_COMMAND, &cmd);
331 if (!(cmd & PCI_AGP_COMMAND_AGP)) 332 if (!(cmd & PCI_AGP_COMMAND_AGP))
332 continue; 333 continue;
333 printk("uninorth-agp: disabling AGP on device %s\n", 334 dev_info(&pdev->dev, "disabling AGP on device %s\n",
334 pci_name(device)); 335 pci_name(device));
335 cmd &= ~PCI_AGP_COMMAND_AGP; 336 cmd &= ~PCI_AGP_COMMAND_AGP;
336 pci_write_config_dword(device, agp + PCI_AGP_COMMAND, cmd); 337 pci_write_config_dword(device, agp + PCI_AGP_COMMAND, cmd);
337 } 338 }
338 339
339 /* turn off AGP on the bridge */ 340 /* turn off AGP on the bridge */
340 agp = pci_find_capability(pdev, PCI_CAP_ID_AGP); 341 agp = pci_find_capability(pdev, PCI_CAP_ID_AGP);
341 pci_read_config_dword(pdev, agp + PCI_AGP_COMMAND, &cmd); 342 pci_read_config_dword(pdev, agp + PCI_AGP_COMMAND, &cmd);
342 bridge->dev_private_data = (void *)(long)cmd; 343 bridge->dev_private_data = (void *)(long)cmd;
343 if (cmd & PCI_AGP_COMMAND_AGP) { 344 if (cmd & PCI_AGP_COMMAND_AGP) {
344 printk("uninorth-agp: disabling AGP on bridge %s\n", 345 dev_info(&pdev->dev, "disabling AGP on bridge\n");
345 pci_name(pdev));
346 cmd &= ~PCI_AGP_COMMAND_AGP; 346 cmd &= ~PCI_AGP_COMMAND_AGP;
347 pci_write_config_dword(pdev, agp + PCI_AGP_COMMAND, cmd); 347 pci_write_config_dword(pdev, agp + PCI_AGP_COMMAND, cmd);
348 } 348 }
349 /* turn off the GART */ 349 /* turn off the GART */
350 uninorth_cleanup(); 350 uninorth_cleanup();
351 351
352 return 0; 352 return 0;
353 } 353 }
354 354
355 static int agp_uninorth_resume(struct pci_dev *pdev) 355 static int agp_uninorth_resume(struct pci_dev *pdev)
356 { 356 {
357 struct agp_bridge_data *bridge; 357 struct agp_bridge_data *bridge;
358 u32 command; 358 u32 command;
359 359
360 bridge = agp_find_bridge(pdev); 360 bridge = agp_find_bridge(pdev);
361 if (bridge == NULL) 361 if (bridge == NULL)
362 return -ENODEV; 362 return -ENODEV;
363 363
364 command = (long)bridge->dev_private_data; 364 command = (long)bridge->dev_private_data;
365 bridge->dev_private_data = NULL; 365 bridge->dev_private_data = NULL;
366 if (!(command & PCI_AGP_COMMAND_AGP)) 366 if (!(command & PCI_AGP_COMMAND_AGP))
367 return 0; 367 return 0;
368 368
369 uninorth_agp_enable(bridge, command); 369 uninorth_agp_enable(bridge, command);
370 370
371 return 0; 371 return 0;
372 } 372 }
373 #endif /* CONFIG_PM */ 373 #endif /* CONFIG_PM */
374 374
375 static int uninorth_create_gatt_table(struct agp_bridge_data *bridge) 375 static int uninorth_create_gatt_table(struct agp_bridge_data *bridge)
376 { 376 {
377 char *table; 377 char *table;
378 char *table_end; 378 char *table_end;
379 int size; 379 int size;
380 int page_order; 380 int page_order;
381 int num_entries; 381 int num_entries;
382 int i; 382 int i;
383 void *temp; 383 void *temp;
384 struct page *page; 384 struct page *page;
385 385
386 /* We can't handle 2 level gatt's */ 386 /* We can't handle 2 level gatt's */
387 if (bridge->driver->size_type == LVL2_APER_SIZE) 387 if (bridge->driver->size_type == LVL2_APER_SIZE)
388 return -EINVAL; 388 return -EINVAL;
389 389
390 table = NULL; 390 table = NULL;
391 i = bridge->aperture_size_idx; 391 i = bridge->aperture_size_idx;
392 temp = bridge->current_size; 392 temp = bridge->current_size;
393 size = page_order = num_entries = 0; 393 size = page_order = num_entries = 0;
394 394
395 do { 395 do {
396 size = A_SIZE_32(temp)->size; 396 size = A_SIZE_32(temp)->size;
397 page_order = A_SIZE_32(temp)->page_order; 397 page_order = A_SIZE_32(temp)->page_order;
398 num_entries = A_SIZE_32(temp)->num_entries; 398 num_entries = A_SIZE_32(temp)->num_entries;
399 399
400 table = (char *) __get_free_pages(GFP_KERNEL, page_order); 400 table = (char *) __get_free_pages(GFP_KERNEL, page_order);
401 401
402 if (table == NULL) { 402 if (table == NULL) {
403 i++; 403 i++;
404 bridge->current_size = A_IDX32(bridge); 404 bridge->current_size = A_IDX32(bridge);
405 } else { 405 } else {
406 bridge->aperture_size_idx = i; 406 bridge->aperture_size_idx = i;
407 } 407 }
408 } while (!table && (i < bridge->driver->num_aperture_sizes)); 408 } while (!table && (i < bridge->driver->num_aperture_sizes));
409 409
410 if (table == NULL) 410 if (table == NULL)
411 return -ENOMEM; 411 return -ENOMEM;
412 412
413 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); 413 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
414 414
415 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) 415 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
416 SetPageReserved(page); 416 SetPageReserved(page);
417 417
418 bridge->gatt_table_real = (u32 *) table; 418 bridge->gatt_table_real = (u32 *) table;
419 bridge->gatt_table = (u32 *)table; 419 bridge->gatt_table = (u32 *)table;
420 bridge->gatt_bus_addr = virt_to_gart(table); 420 bridge->gatt_bus_addr = virt_to_gart(table);
421 421
422 for (i = 0; i < num_entries; i++) 422 for (i = 0; i < num_entries; i++)
423 bridge->gatt_table[i] = 0; 423 bridge->gatt_table[i] = 0;
424 424
425 flush_dcache_range((unsigned long)table, (unsigned long)table_end); 425 flush_dcache_range((unsigned long)table, (unsigned long)table_end);
426 426
427 return 0; 427 return 0;
428 } 428 }
429 429
430 static int uninorth_free_gatt_table(struct agp_bridge_data *bridge) 430 static int uninorth_free_gatt_table(struct agp_bridge_data *bridge)
431 { 431 {
432 int page_order; 432 int page_order;
433 char *table, *table_end; 433 char *table, *table_end;
434 void *temp; 434 void *temp;
435 struct page *page; 435 struct page *page;
436 436
437 temp = bridge->current_size; 437 temp = bridge->current_size;
438 page_order = A_SIZE_32(temp)->page_order; 438 page_order = A_SIZE_32(temp)->page_order;
439 439
440 /* Do not worry about freeing memory, because if this is 440 /* Do not worry about freeing memory, because if this is
441 * called, then all agp memory is deallocated and removed 441 * called, then all agp memory is deallocated and removed
442 * from the table. 442 * from the table.
443 */ 443 */
444 444
445 table = (char *) bridge->gatt_table_real; 445 table = (char *) bridge->gatt_table_real;
446 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); 446 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
447 447
448 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) 448 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
449 ClearPageReserved(page); 449 ClearPageReserved(page);
450 450
451 free_pages((unsigned long) bridge->gatt_table_real, page_order); 451 free_pages((unsigned long) bridge->gatt_table_real, page_order);
452 452
453 return 0; 453 return 0;
454 } 454 }
455 455
456 void null_cache_flush(void) 456 void null_cache_flush(void)
457 { 457 {
458 mb(); 458 mb();
459 } 459 }
460 460
461 /* Setup function */ 461 /* Setup function */
462 462
463 static const struct aper_size_info_32 uninorth_sizes[7] = 463 static const struct aper_size_info_32 uninorth_sizes[7] =
464 { 464 {
465 #if 0 /* Not sure uninorth supports that high aperture sizes */ 465 #if 0 /* Not sure uninorth supports that high aperture sizes */
466 {256, 65536, 6, 64}, 466 {256, 65536, 6, 64},
467 {128, 32768, 5, 32}, 467 {128, 32768, 5, 32},
468 {64, 16384, 4, 16}, 468 {64, 16384, 4, 16},
469 #endif 469 #endif
470 {32, 8192, 3, 8}, 470 {32, 8192, 3, 8},
471 {16, 4096, 2, 4}, 471 {16, 4096, 2, 4},
472 {8, 2048, 1, 2}, 472 {8, 2048, 1, 2},
473 {4, 1024, 0, 1} 473 {4, 1024, 0, 1}
474 }; 474 };
475 475
476 /* 476 /*
477 * Not sure that u3 supports that high aperture sizes but it 477 * Not sure that u3 supports that high aperture sizes but it
478 * would strange if it did not :) 478 * would strange if it did not :)
479 */ 479 */
480 static const struct aper_size_info_32 u3_sizes[8] = 480 static const struct aper_size_info_32 u3_sizes[8] =
481 { 481 {
482 {512, 131072, 7, 128}, 482 {512, 131072, 7, 128},
483 {256, 65536, 6, 64}, 483 {256, 65536, 6, 64},
484 {128, 32768, 5, 32}, 484 {128, 32768, 5, 32},
485 {64, 16384, 4, 16}, 485 {64, 16384, 4, 16},
486 {32, 8192, 3, 8}, 486 {32, 8192, 3, 8},
487 {16, 4096, 2, 4}, 487 {16, 4096, 2, 4},
488 {8, 2048, 1, 2}, 488 {8, 2048, 1, 2},
489 {4, 1024, 0, 1} 489 {4, 1024, 0, 1}
490 }; 490 };
491 491
492 const struct agp_bridge_driver uninorth_agp_driver = { 492 const struct agp_bridge_driver uninorth_agp_driver = {
493 .owner = THIS_MODULE, 493 .owner = THIS_MODULE,
494 .aperture_sizes = (void *)uninorth_sizes, 494 .aperture_sizes = (void *)uninorth_sizes,
495 .size_type = U32_APER_SIZE, 495 .size_type = U32_APER_SIZE,
496 .num_aperture_sizes = 4, 496 .num_aperture_sizes = 4,
497 .configure = uninorth_configure, 497 .configure = uninorth_configure,
498 .fetch_size = uninorth_fetch_size, 498 .fetch_size = uninorth_fetch_size,
499 .cleanup = uninorth_cleanup, 499 .cleanup = uninorth_cleanup,
500 .tlb_flush = uninorth_tlbflush, 500 .tlb_flush = uninorth_tlbflush,
501 .mask_memory = agp_generic_mask_memory, 501 .mask_memory = agp_generic_mask_memory,
502 .masks = NULL, 502 .masks = NULL,
503 .cache_flush = null_cache_flush, 503 .cache_flush = null_cache_flush,
504 .agp_enable = uninorth_agp_enable, 504 .agp_enable = uninorth_agp_enable,
505 .create_gatt_table = uninorth_create_gatt_table, 505 .create_gatt_table = uninorth_create_gatt_table,
506 .free_gatt_table = uninorth_free_gatt_table, 506 .free_gatt_table = uninorth_free_gatt_table,
507 .insert_memory = uninorth_insert_memory, 507 .insert_memory = uninorth_insert_memory,
508 .remove_memory = agp_generic_remove_memory, 508 .remove_memory = agp_generic_remove_memory,
509 .alloc_by_type = agp_generic_alloc_by_type, 509 .alloc_by_type = agp_generic_alloc_by_type,
510 .free_by_type = agp_generic_free_by_type, 510 .free_by_type = agp_generic_free_by_type,
511 .agp_alloc_page = agp_generic_alloc_page, 511 .agp_alloc_page = agp_generic_alloc_page,
512 .agp_destroy_page = agp_generic_destroy_page, 512 .agp_destroy_page = agp_generic_destroy_page,
513 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 513 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
514 .cant_use_aperture = true, 514 .cant_use_aperture = true,
515 }; 515 };
516 516
517 const struct agp_bridge_driver u3_agp_driver = { 517 const struct agp_bridge_driver u3_agp_driver = {
518 .owner = THIS_MODULE, 518 .owner = THIS_MODULE,
519 .aperture_sizes = (void *)u3_sizes, 519 .aperture_sizes = (void *)u3_sizes,
520 .size_type = U32_APER_SIZE, 520 .size_type = U32_APER_SIZE,
521 .num_aperture_sizes = 8, 521 .num_aperture_sizes = 8,
522 .configure = uninorth_configure, 522 .configure = uninorth_configure,
523 .fetch_size = uninorth_fetch_size, 523 .fetch_size = uninorth_fetch_size,
524 .cleanup = uninorth_cleanup, 524 .cleanup = uninorth_cleanup,
525 .tlb_flush = uninorth_tlbflush, 525 .tlb_flush = uninorth_tlbflush,
526 .mask_memory = agp_generic_mask_memory, 526 .mask_memory = agp_generic_mask_memory,
527 .masks = NULL, 527 .masks = NULL,
528 .cache_flush = null_cache_flush, 528 .cache_flush = null_cache_flush,
529 .agp_enable = uninorth_agp_enable, 529 .agp_enable = uninorth_agp_enable,
530 .create_gatt_table = uninorth_create_gatt_table, 530 .create_gatt_table = uninorth_create_gatt_table,
531 .free_gatt_table = uninorth_free_gatt_table, 531 .free_gatt_table = uninorth_free_gatt_table,
532 .insert_memory = u3_insert_memory, 532 .insert_memory = u3_insert_memory,
533 .remove_memory = u3_remove_memory, 533 .remove_memory = u3_remove_memory,
534 .alloc_by_type = agp_generic_alloc_by_type, 534 .alloc_by_type = agp_generic_alloc_by_type,
535 .free_by_type = agp_generic_free_by_type, 535 .free_by_type = agp_generic_free_by_type,
536 .agp_alloc_page = agp_generic_alloc_page, 536 .agp_alloc_page = agp_generic_alloc_page,
537 .agp_destroy_page = agp_generic_destroy_page, 537 .agp_destroy_page = agp_generic_destroy_page,
538 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 538 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
539 .cant_use_aperture = true, 539 .cant_use_aperture = true,
540 .needs_scratch_page = true, 540 .needs_scratch_page = true,
541 }; 541 };
542 542
543 static struct agp_device_ids uninorth_agp_device_ids[] __devinitdata = { 543 static struct agp_device_ids uninorth_agp_device_ids[] __devinitdata = {
544 { 544 {
545 .device_id = PCI_DEVICE_ID_APPLE_UNI_N_AGP, 545 .device_id = PCI_DEVICE_ID_APPLE_UNI_N_AGP,
546 .chipset_name = "UniNorth", 546 .chipset_name = "UniNorth",
547 }, 547 },
548 { 548 {
549 .device_id = PCI_DEVICE_ID_APPLE_UNI_N_AGP_P, 549 .device_id = PCI_DEVICE_ID_APPLE_UNI_N_AGP_P,
550 .chipset_name = "UniNorth/Pangea", 550 .chipset_name = "UniNorth/Pangea",
551 }, 551 },
552 { 552 {
553 .device_id = PCI_DEVICE_ID_APPLE_UNI_N_AGP15, 553 .device_id = PCI_DEVICE_ID_APPLE_UNI_N_AGP15,
554 .chipset_name = "UniNorth 1.5", 554 .chipset_name = "UniNorth 1.5",
555 }, 555 },
556 { 556 {
557 .device_id = PCI_DEVICE_ID_APPLE_UNI_N_AGP2, 557 .device_id = PCI_DEVICE_ID_APPLE_UNI_N_AGP2,
558 .chipset_name = "UniNorth 2", 558 .chipset_name = "UniNorth 2",
559 }, 559 },
560 { 560 {
561 .device_id = PCI_DEVICE_ID_APPLE_U3_AGP, 561 .device_id = PCI_DEVICE_ID_APPLE_U3_AGP,
562 .chipset_name = "U3", 562 .chipset_name = "U3",
563 }, 563 },
564 { 564 {
565 .device_id = PCI_DEVICE_ID_APPLE_U3L_AGP, 565 .device_id = PCI_DEVICE_ID_APPLE_U3L_AGP,
566 .chipset_name = "U3L", 566 .chipset_name = "U3L",
567 }, 567 },
568 { 568 {
569 .device_id = PCI_DEVICE_ID_APPLE_U3H_AGP, 569 .device_id = PCI_DEVICE_ID_APPLE_U3H_AGP,
570 .chipset_name = "U3H", 570 .chipset_name = "U3H",
571 }, 571 },
572 { 572 {
573 .device_id = PCI_DEVICE_ID_APPLE_IPID2_AGP, 573 .device_id = PCI_DEVICE_ID_APPLE_IPID2_AGP,
574 .chipset_name = "UniNorth/Intrepid2", 574 .chipset_name = "UniNorth/Intrepid2",
575 }, 575 },
576 }; 576 };
577 577
578 static int __devinit agp_uninorth_probe(struct pci_dev *pdev, 578 static int __devinit agp_uninorth_probe(struct pci_dev *pdev,
579 const struct pci_device_id *ent) 579 const struct pci_device_id *ent)
580 { 580 {
581 struct agp_device_ids *devs = uninorth_agp_device_ids; 581 struct agp_device_ids *devs = uninorth_agp_device_ids;
582 struct agp_bridge_data *bridge; 582 struct agp_bridge_data *bridge;
583 struct device_node *uninorth_node; 583 struct device_node *uninorth_node;
584 u8 cap_ptr; 584 u8 cap_ptr;
585 int j; 585 int j;
586 586
587 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); 587 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
588 if (cap_ptr == 0) 588 if (cap_ptr == 0)
589 return -ENODEV; 589 return -ENODEV;
590 590
591 /* probe for known chipsets */ 591 /* probe for known chipsets */
592 for (j = 0; devs[j].chipset_name != NULL; ++j) { 592 for (j = 0; devs[j].chipset_name != NULL; ++j) {
593 if (pdev->device == devs[j].device_id) { 593 if (pdev->device == devs[j].device_id) {
594 printk(KERN_INFO PFX "Detected Apple %s chipset\n", 594 dev_info(&pdev->dev, "Apple %s chipset\n",
595 devs[j].chipset_name); 595 devs[j].chipset_name);
596 goto found; 596 goto found;
597 } 597 }
598 } 598 }
599 599
600 printk(KERN_ERR PFX "Unsupported Apple chipset (device id: %04x).\n", 600 dev_err(&pdev->dev, "unsupported Apple chipset [%04x/%04x]\n",
601 pdev->device); 601 pdev->vendor, pdev->device);
602 return -ENODEV; 602 return -ENODEV;
603 603
604 found: 604 found:
605 /* Set revision to 0 if we could not read it. */ 605 /* Set revision to 0 if we could not read it. */
606 uninorth_rev = 0; 606 uninorth_rev = 0;
607 is_u3 = 0; 607 is_u3 = 0;
608 /* Locate core99 Uni-N */ 608 /* Locate core99 Uni-N */
609 uninorth_node = of_find_node_by_name(NULL, "uni-n"); 609 uninorth_node = of_find_node_by_name(NULL, "uni-n");
610 /* Locate G5 u3 */ 610 /* Locate G5 u3 */
611 if (uninorth_node == NULL) { 611 if (uninorth_node == NULL) {
612 is_u3 = 1; 612 is_u3 = 1;
613 uninorth_node = of_find_node_by_name(NULL, "u3"); 613 uninorth_node = of_find_node_by_name(NULL, "u3");
614 } 614 }
615 if (uninorth_node) { 615 if (uninorth_node) {
616 const int *revprop = of_get_property(uninorth_node, 616 const int *revprop = of_get_property(uninorth_node,
617 "device-rev", NULL); 617 "device-rev", NULL);
618 if (revprop != NULL) 618 if (revprop != NULL)
619 uninorth_rev = *revprop & 0x3f; 619 uninorth_rev = *revprop & 0x3f;
620 of_node_put(uninorth_node); 620 of_node_put(uninorth_node);
621 } 621 }
622 622
623 #ifdef CONFIG_PM 623 #ifdef CONFIG_PM
624 /* Inform platform of our suspend/resume caps */ 624 /* Inform platform of our suspend/resume caps */
625 pmac_register_agp_pm(pdev, agp_uninorth_suspend, agp_uninorth_resume); 625 pmac_register_agp_pm(pdev, agp_uninorth_suspend, agp_uninorth_resume);
626 #endif 626 #endif
627 627
628 /* Allocate & setup our driver */ 628 /* Allocate & setup our driver */
629 bridge = agp_alloc_bridge(); 629 bridge = agp_alloc_bridge();
630 if (!bridge) 630 if (!bridge)
631 return -ENOMEM; 631 return -ENOMEM;
632 632
633 if (is_u3) 633 if (is_u3)
634 bridge->driver = &u3_agp_driver; 634 bridge->driver = &u3_agp_driver;
635 else 635 else
636 bridge->driver = &uninorth_agp_driver; 636 bridge->driver = &uninorth_agp_driver;
637 637
638 bridge->dev = pdev; 638 bridge->dev = pdev;
639 bridge->capndx = cap_ptr; 639 bridge->capndx = cap_ptr;
640 bridge->flags = AGP_ERRATA_FASTWRITES; 640 bridge->flags = AGP_ERRATA_FASTWRITES;
641 641
642 /* Fill in the mode register */ 642 /* Fill in the mode register */
643 pci_read_config_dword(pdev, cap_ptr+PCI_AGP_STATUS, &bridge->mode); 643 pci_read_config_dword(pdev, cap_ptr+PCI_AGP_STATUS, &bridge->mode);
644 644
645 pci_set_drvdata(pdev, bridge); 645 pci_set_drvdata(pdev, bridge);
646 return agp_add_bridge(bridge); 646 return agp_add_bridge(bridge);
647 } 647 }
648 648
649 static void __devexit agp_uninorth_remove(struct pci_dev *pdev) 649 static void __devexit agp_uninorth_remove(struct pci_dev *pdev)
650 { 650 {
651 struct agp_bridge_data *bridge = pci_get_drvdata(pdev); 651 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
652 652
653 #ifdef CONFIG_PM 653 #ifdef CONFIG_PM
654 /* Inform platform of our suspend/resume caps */ 654 /* Inform platform of our suspend/resume caps */
655 pmac_register_agp_pm(pdev, NULL, NULL); 655 pmac_register_agp_pm(pdev, NULL, NULL);
656 #endif 656 #endif
657 657
658 agp_remove_bridge(bridge); 658 agp_remove_bridge(bridge);
659 agp_put_bridge(bridge); 659 agp_put_bridge(bridge);
660 } 660 }
661 661
662 static struct pci_device_id agp_uninorth_pci_table[] = { 662 static struct pci_device_id agp_uninorth_pci_table[] = {
663 { 663 {
664 .class = (PCI_CLASS_BRIDGE_HOST << 8), 664 .class = (PCI_CLASS_BRIDGE_HOST << 8),
665 .class_mask = ~0, 665 .class_mask = ~0,
666 .vendor = PCI_VENDOR_ID_APPLE, 666 .vendor = PCI_VENDOR_ID_APPLE,
667 .device = PCI_ANY_ID, 667 .device = PCI_ANY_ID,
668 .subvendor = PCI_ANY_ID, 668 .subvendor = PCI_ANY_ID,
669 .subdevice = PCI_ANY_ID, 669 .subdevice = PCI_ANY_ID,
670 }, 670 },
671 { } 671 { }
672 }; 672 };
673 673
674 MODULE_DEVICE_TABLE(pci, agp_uninorth_pci_table); 674 MODULE_DEVICE_TABLE(pci, agp_uninorth_pci_table);
675 675
676 static struct pci_driver agp_uninorth_pci_driver = { 676 static struct pci_driver agp_uninorth_pci_driver = {
677 .name = "agpgart-uninorth", 677 .name = "agpgart-uninorth",
678 .id_table = agp_uninorth_pci_table, 678 .id_table = agp_uninorth_pci_table,
679 .probe = agp_uninorth_probe, 679 .probe = agp_uninorth_probe,
680 .remove = agp_uninorth_remove, 680 .remove = agp_uninorth_remove,
681 }; 681 };
682 682
683 static int __init agp_uninorth_init(void) 683 static int __init agp_uninorth_init(void)
684 { 684 {
685 if (agp_off) 685 if (agp_off)
686 return -EINVAL; 686 return -EINVAL;
687 return pci_register_driver(&agp_uninorth_pci_driver); 687 return pci_register_driver(&agp_uninorth_pci_driver);
688 } 688 }
689 689
690 static void __exit agp_uninorth_cleanup(void) 690 static void __exit agp_uninorth_cleanup(void)
691 { 691 {
692 pci_unregister_driver(&agp_uninorth_pci_driver); 692 pci_unregister_driver(&agp_uninorth_pci_driver);
693 } 693 }
694 694
695 module_init(agp_uninorth_init); 695 module_init(agp_uninorth_init);
696 module_exit(agp_uninorth_cleanup); 696 module_exit(agp_uninorth_cleanup);
697 697
698 module_param(aperture, charp, 0); 698 module_param(aperture, charp, 0);
699 MODULE_PARM_DESC(aperture, 699 MODULE_PARM_DESC(aperture,
700 "Aperture size, must be power of two between 4MB and an\n" 700 "Aperture size, must be power of two between 4MB and an\n"
701 "\t\tupper limit specific to the UniNorth revision.\n" 701 "\t\tupper limit specific to the UniNorth revision.\n"
702 "\t\tDefault: 32M"); 702 "\t\tDefault: 32M");
703 703
704 MODULE_AUTHOR("Ben Herrenschmidt & Paul Mackerras"); 704 MODULE_AUTHOR("Ben Herrenschmidt & Paul Mackerras");
705 MODULE_LICENSE("GPL"); 705 MODULE_LICENSE("GPL");