Commit 4280370efe0e9c527ccd6188d6211a10bcb22b1e
Committed by
Dave Jones
1 parent
4092e256ca
Exists in
master
and in
7 other branches
[AGPGART] remove unused variable
This patch removes an unused variable. Signed-off-by: Adrian Bunk <bunk@stusta.de> Signed-off-by: Dave Jones <davej@redhat.com>
Showing 1 changed file with 0 additions and 2 deletions Inline Diff
drivers/char/agp/nvidia-agp.c
1 | /* | 1 | /* |
2 | * Nvidia AGPGART routines. | 2 | * Nvidia AGPGART routines. |
3 | * Based upon a 2.4 agpgart diff by the folks from NVIDIA, and hacked up | 3 | * Based upon a 2.4 agpgart diff by the folks from NVIDIA, and hacked up |
4 | * to work in 2.5 by Dave Jones <davej@codemonkey.org.uk> | 4 | * to work in 2.5 by Dave Jones <davej@codemonkey.org.uk> |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include <linux/module.h> | 7 | #include <linux/module.h> |
8 | #include <linux/pci.h> | 8 | #include <linux/pci.h> |
9 | #include <linux/init.h> | 9 | #include <linux/init.h> |
10 | #include <linux/agp_backend.h> | 10 | #include <linux/agp_backend.h> |
11 | #include <linux/gfp.h> | 11 | #include <linux/gfp.h> |
12 | #include <linux/page-flags.h> | 12 | #include <linux/page-flags.h> |
13 | #include <linux/mm.h> | 13 | #include <linux/mm.h> |
14 | #include <linux/jiffies.h> | 14 | #include <linux/jiffies.h> |
15 | #include "agp.h" | 15 | #include "agp.h" |
16 | 16 | ||
17 | /* NVIDIA registers */ | 17 | /* NVIDIA registers */ |
18 | #define NVIDIA_0_APSIZE 0x80 | 18 | #define NVIDIA_0_APSIZE 0x80 |
19 | #define NVIDIA_1_WBC 0xf0 | 19 | #define NVIDIA_1_WBC 0xf0 |
20 | #define NVIDIA_2_GARTCTRL 0xd0 | 20 | #define NVIDIA_2_GARTCTRL 0xd0 |
21 | #define NVIDIA_2_APBASE 0xd8 | 21 | #define NVIDIA_2_APBASE 0xd8 |
22 | #define NVIDIA_2_APLIMIT 0xdc | 22 | #define NVIDIA_2_APLIMIT 0xdc |
23 | #define NVIDIA_2_ATTBASE(i) (0xe0 + (i) * 4) | 23 | #define NVIDIA_2_ATTBASE(i) (0xe0 + (i) * 4) |
24 | #define NVIDIA_3_APBASE 0x50 | 24 | #define NVIDIA_3_APBASE 0x50 |
25 | #define NVIDIA_3_APLIMIT 0x54 | 25 | #define NVIDIA_3_APLIMIT 0x54 |
26 | 26 | ||
27 | 27 | ||
28 | static struct _nvidia_private { | 28 | static struct _nvidia_private { |
29 | struct pci_dev *dev_1; | 29 | struct pci_dev *dev_1; |
30 | struct pci_dev *dev_2; | 30 | struct pci_dev *dev_2; |
31 | struct pci_dev *dev_3; | 31 | struct pci_dev *dev_3; |
32 | volatile u32 __iomem *aperture; | 32 | volatile u32 __iomem *aperture; |
33 | int num_active_entries; | 33 | int num_active_entries; |
34 | off_t pg_offset; | 34 | off_t pg_offset; |
35 | u32 wbc_mask; | 35 | u32 wbc_mask; |
36 | } nvidia_private; | 36 | } nvidia_private; |
37 | 37 | ||
38 | 38 | ||
39 | static int nvidia_fetch_size(void) | 39 | static int nvidia_fetch_size(void) |
40 | { | 40 | { |
41 | int i; | 41 | int i; |
42 | u8 size_value; | 42 | u8 size_value; |
43 | struct aper_size_info_8 *values; | 43 | struct aper_size_info_8 *values; |
44 | 44 | ||
45 | pci_read_config_byte(agp_bridge->dev, NVIDIA_0_APSIZE, &size_value); | 45 | pci_read_config_byte(agp_bridge->dev, NVIDIA_0_APSIZE, &size_value); |
46 | size_value &= 0x0f; | 46 | size_value &= 0x0f; |
47 | values = A_SIZE_8(agp_bridge->driver->aperture_sizes); | 47 | values = A_SIZE_8(agp_bridge->driver->aperture_sizes); |
48 | 48 | ||
49 | for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { | 49 | for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { |
50 | if (size_value == values[i].size_value) { | 50 | if (size_value == values[i].size_value) { |
51 | agp_bridge->previous_size = | 51 | agp_bridge->previous_size = |
52 | agp_bridge->current_size = (void *) (values + i); | 52 | agp_bridge->current_size = (void *) (values + i); |
53 | agp_bridge->aperture_size_idx = i; | 53 | agp_bridge->aperture_size_idx = i; |
54 | return values[i].size; | 54 | return values[i].size; |
55 | } | 55 | } |
56 | } | 56 | } |
57 | 57 | ||
58 | return 0; | 58 | return 0; |
59 | } | 59 | } |
60 | 60 | ||
61 | #define SYSCFG 0xC0010010 | 61 | #define SYSCFG 0xC0010010 |
62 | #define IORR_BASE0 0xC0010016 | 62 | #define IORR_BASE0 0xC0010016 |
63 | #define IORR_MASK0 0xC0010017 | 63 | #define IORR_MASK0 0xC0010017 |
64 | #define AMD_K7_NUM_IORR 2 | 64 | #define AMD_K7_NUM_IORR 2 |
65 | 65 | ||
66 | static int nvidia_init_iorr(u32 base, u32 size) | 66 | static int nvidia_init_iorr(u32 base, u32 size) |
67 | { | 67 | { |
68 | u32 base_hi, base_lo; | 68 | u32 base_hi, base_lo; |
69 | u32 mask_hi, mask_lo; | 69 | u32 mask_hi, mask_lo; |
70 | u32 sys_hi, sys_lo; | 70 | u32 sys_hi, sys_lo; |
71 | u32 iorr_addr, free_iorr_addr; | 71 | u32 iorr_addr, free_iorr_addr; |
72 | 72 | ||
73 | /* Find the iorr that is already used for the base */ | 73 | /* Find the iorr that is already used for the base */ |
74 | /* If not found, determine the uppermost available iorr */ | 74 | /* If not found, determine the uppermost available iorr */ |
75 | free_iorr_addr = AMD_K7_NUM_IORR; | 75 | free_iorr_addr = AMD_K7_NUM_IORR; |
76 | for (iorr_addr = 0; iorr_addr < AMD_K7_NUM_IORR; iorr_addr++) { | 76 | for (iorr_addr = 0; iorr_addr < AMD_K7_NUM_IORR; iorr_addr++) { |
77 | rdmsr(IORR_BASE0 + 2 * iorr_addr, base_lo, base_hi); | 77 | rdmsr(IORR_BASE0 + 2 * iorr_addr, base_lo, base_hi); |
78 | rdmsr(IORR_MASK0 + 2 * iorr_addr, mask_lo, mask_hi); | 78 | rdmsr(IORR_MASK0 + 2 * iorr_addr, mask_lo, mask_hi); |
79 | 79 | ||
80 | if ((base_lo & 0xfffff000) == (base & 0xfffff000)) | 80 | if ((base_lo & 0xfffff000) == (base & 0xfffff000)) |
81 | break; | 81 | break; |
82 | 82 | ||
83 | if ((mask_lo & 0x00000800) == 0) | 83 | if ((mask_lo & 0x00000800) == 0) |
84 | free_iorr_addr = iorr_addr; | 84 | free_iorr_addr = iorr_addr; |
85 | } | 85 | } |
86 | 86 | ||
87 | if (iorr_addr >= AMD_K7_NUM_IORR) { | 87 | if (iorr_addr >= AMD_K7_NUM_IORR) { |
88 | iorr_addr = free_iorr_addr; | 88 | iorr_addr = free_iorr_addr; |
89 | if (iorr_addr >= AMD_K7_NUM_IORR) | 89 | if (iorr_addr >= AMD_K7_NUM_IORR) |
90 | return -EINVAL; | 90 | return -EINVAL; |
91 | } | 91 | } |
92 | base_hi = 0x0; | 92 | base_hi = 0x0; |
93 | base_lo = (base & ~0xfff) | 0x18; | 93 | base_lo = (base & ~0xfff) | 0x18; |
94 | mask_hi = 0xf; | 94 | mask_hi = 0xf; |
95 | mask_lo = ((~(size - 1)) & 0xfffff000) | 0x800; | 95 | mask_lo = ((~(size - 1)) & 0xfffff000) | 0x800; |
96 | wrmsr(IORR_BASE0 + 2 * iorr_addr, base_lo, base_hi); | 96 | wrmsr(IORR_BASE0 + 2 * iorr_addr, base_lo, base_hi); |
97 | wrmsr(IORR_MASK0 + 2 * iorr_addr, mask_lo, mask_hi); | 97 | wrmsr(IORR_MASK0 + 2 * iorr_addr, mask_lo, mask_hi); |
98 | 98 | ||
99 | rdmsr(SYSCFG, sys_lo, sys_hi); | 99 | rdmsr(SYSCFG, sys_lo, sys_hi); |
100 | sys_lo |= 0x00100000; | 100 | sys_lo |= 0x00100000; |
101 | wrmsr(SYSCFG, sys_lo, sys_hi); | 101 | wrmsr(SYSCFG, sys_lo, sys_hi); |
102 | 102 | ||
103 | return 0; | 103 | return 0; |
104 | } | 104 | } |
105 | 105 | ||
106 | static int nvidia_configure(void) | 106 | static int nvidia_configure(void) |
107 | { | 107 | { |
108 | int i, rc, num_dirs; | 108 | int i, rc, num_dirs; |
109 | u32 apbase, aplimit; | 109 | u32 apbase, aplimit; |
110 | struct aper_size_info_8 *current_size; | 110 | struct aper_size_info_8 *current_size; |
111 | u32 temp; | 111 | u32 temp; |
112 | 112 | ||
113 | current_size = A_SIZE_8(agp_bridge->current_size); | 113 | current_size = A_SIZE_8(agp_bridge->current_size); |
114 | 114 | ||
115 | /* aperture size */ | 115 | /* aperture size */ |
116 | pci_write_config_byte(agp_bridge->dev, NVIDIA_0_APSIZE, | 116 | pci_write_config_byte(agp_bridge->dev, NVIDIA_0_APSIZE, |
117 | current_size->size_value); | 117 | current_size->size_value); |
118 | 118 | ||
119 | /* address to map to */ | 119 | /* address to map to */ |
120 | pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &apbase); | 120 | pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &apbase); |
121 | apbase &= PCI_BASE_ADDRESS_MEM_MASK; | 121 | apbase &= PCI_BASE_ADDRESS_MEM_MASK; |
122 | agp_bridge->gart_bus_addr = apbase; | 122 | agp_bridge->gart_bus_addr = apbase; |
123 | aplimit = apbase + (current_size->size * 1024 * 1024) - 1; | 123 | aplimit = apbase + (current_size->size * 1024 * 1024) - 1; |
124 | pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_APBASE, apbase); | 124 | pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_APBASE, apbase); |
125 | pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_APLIMIT, aplimit); | 125 | pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_APLIMIT, aplimit); |
126 | pci_write_config_dword(nvidia_private.dev_3, NVIDIA_3_APBASE, apbase); | 126 | pci_write_config_dword(nvidia_private.dev_3, NVIDIA_3_APBASE, apbase); |
127 | pci_write_config_dword(nvidia_private.dev_3, NVIDIA_3_APLIMIT, aplimit); | 127 | pci_write_config_dword(nvidia_private.dev_3, NVIDIA_3_APLIMIT, aplimit); |
128 | if (0 != (rc = nvidia_init_iorr(apbase, current_size->size * 1024 * 1024))) | 128 | if (0 != (rc = nvidia_init_iorr(apbase, current_size->size * 1024 * 1024))) |
129 | return rc; | 129 | return rc; |
130 | 130 | ||
131 | /* directory size is 64k */ | 131 | /* directory size is 64k */ |
132 | num_dirs = current_size->size / 64; | 132 | num_dirs = current_size->size / 64; |
133 | nvidia_private.num_active_entries = current_size->num_entries; | 133 | nvidia_private.num_active_entries = current_size->num_entries; |
134 | nvidia_private.pg_offset = 0; | 134 | nvidia_private.pg_offset = 0; |
135 | if (num_dirs == 0) { | 135 | if (num_dirs == 0) { |
136 | num_dirs = 1; | 136 | num_dirs = 1; |
137 | nvidia_private.num_active_entries /= (64 / current_size->size); | 137 | nvidia_private.num_active_entries /= (64 / current_size->size); |
138 | nvidia_private.pg_offset = (apbase & (64 * 1024 * 1024 - 1) & | 138 | nvidia_private.pg_offset = (apbase & (64 * 1024 * 1024 - 1) & |
139 | ~(current_size->size * 1024 * 1024 - 1)) / PAGE_SIZE; | 139 | ~(current_size->size * 1024 * 1024 - 1)) / PAGE_SIZE; |
140 | } | 140 | } |
141 | 141 | ||
142 | /* attbase */ | 142 | /* attbase */ |
143 | for (i = 0; i < 8; i++) { | 143 | for (i = 0; i < 8; i++) { |
144 | pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_ATTBASE(i), | 144 | pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_ATTBASE(i), |
145 | (agp_bridge->gatt_bus_addr + (i % num_dirs) * 64 * 1024) | 1); | 145 | (agp_bridge->gatt_bus_addr + (i % num_dirs) * 64 * 1024) | 1); |
146 | } | 146 | } |
147 | 147 | ||
148 | /* gtlb control */ | 148 | /* gtlb control */ |
149 | pci_read_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, &temp); | 149 | pci_read_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, &temp); |
150 | pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, temp | 0x11); | 150 | pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, temp | 0x11); |
151 | 151 | ||
152 | /* gart control */ | 152 | /* gart control */ |
153 | pci_read_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, &temp); | 153 | pci_read_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, &temp); |
154 | pci_write_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, temp | 0x100); | 154 | pci_write_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, temp | 0x100); |
155 | 155 | ||
156 | /* map aperture */ | 156 | /* map aperture */ |
157 | nvidia_private.aperture = | 157 | nvidia_private.aperture = |
158 | (volatile u32 __iomem *) ioremap(apbase, 33 * PAGE_SIZE); | 158 | (volatile u32 __iomem *) ioremap(apbase, 33 * PAGE_SIZE); |
159 | 159 | ||
160 | return 0; | 160 | return 0; |
161 | } | 161 | } |
162 | 162 | ||
163 | static void nvidia_cleanup(void) | 163 | static void nvidia_cleanup(void) |
164 | { | 164 | { |
165 | struct aper_size_info_8 *previous_size; | 165 | struct aper_size_info_8 *previous_size; |
166 | u32 temp; | 166 | u32 temp; |
167 | 167 | ||
168 | /* gart control */ | 168 | /* gart control */ |
169 | pci_read_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, &temp); | 169 | pci_read_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, &temp); |
170 | pci_write_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, temp & ~(0x100)); | 170 | pci_write_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, temp & ~(0x100)); |
171 | 171 | ||
172 | /* gtlb control */ | 172 | /* gtlb control */ |
173 | pci_read_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, &temp); | 173 | pci_read_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, &temp); |
174 | pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, temp & ~(0x11)); | 174 | pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, temp & ~(0x11)); |
175 | 175 | ||
176 | /* unmap aperture */ | 176 | /* unmap aperture */ |
177 | iounmap((void __iomem *) nvidia_private.aperture); | 177 | iounmap((void __iomem *) nvidia_private.aperture); |
178 | 178 | ||
179 | /* restore previous aperture size */ | 179 | /* restore previous aperture size */ |
180 | previous_size = A_SIZE_8(agp_bridge->previous_size); | 180 | previous_size = A_SIZE_8(agp_bridge->previous_size); |
181 | pci_write_config_byte(agp_bridge->dev, NVIDIA_0_APSIZE, | 181 | pci_write_config_byte(agp_bridge->dev, NVIDIA_0_APSIZE, |
182 | previous_size->size_value); | 182 | previous_size->size_value); |
183 | 183 | ||
184 | /* restore iorr for previous aperture size */ | 184 | /* restore iorr for previous aperture size */ |
185 | nvidia_init_iorr(agp_bridge->gart_bus_addr, | 185 | nvidia_init_iorr(agp_bridge->gart_bus_addr, |
186 | previous_size->size * 1024 * 1024); | 186 | previous_size->size * 1024 * 1024); |
187 | } | 187 | } |
188 | 188 | ||
189 | 189 | ||
190 | /* | 190 | /* |
191 | * Note we can't use the generic routines, even though they are 99% the same. | 191 | * Note we can't use the generic routines, even though they are 99% the same. |
192 | * Aperture sizes <64M still requires a full 64k GART directory, but | 192 | * Aperture sizes <64M still requires a full 64k GART directory, but |
193 | * only use the portion of the TLB entries that correspond to the apertures | 193 | * only use the portion of the TLB entries that correspond to the apertures |
194 | * alignment inside the surrounding 64M block. | 194 | * alignment inside the surrounding 64M block. |
195 | */ | 195 | */ |
196 | extern int agp_memory_reserved; | 196 | extern int agp_memory_reserved; |
197 | 197 | ||
198 | static int nvidia_insert_memory(struct agp_memory *mem, off_t pg_start, int type) | 198 | static int nvidia_insert_memory(struct agp_memory *mem, off_t pg_start, int type) |
199 | { | 199 | { |
200 | int i, j; | 200 | int i, j; |
201 | 201 | ||
202 | if ((type != 0) || (mem->type != 0)) | 202 | if ((type != 0) || (mem->type != 0)) |
203 | return -EINVAL; | 203 | return -EINVAL; |
204 | 204 | ||
205 | if ((pg_start + mem->page_count) > | 205 | if ((pg_start + mem->page_count) > |
206 | (nvidia_private.num_active_entries - agp_memory_reserved/PAGE_SIZE)) | 206 | (nvidia_private.num_active_entries - agp_memory_reserved/PAGE_SIZE)) |
207 | return -EINVAL; | 207 | return -EINVAL; |
208 | 208 | ||
209 | for (j = pg_start; j < (pg_start + mem->page_count); j++) { | 209 | for (j = pg_start; j < (pg_start + mem->page_count); j++) { |
210 | if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+nvidia_private.pg_offset+j))) | 210 | if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+nvidia_private.pg_offset+j))) |
211 | return -EBUSY; | 211 | return -EBUSY; |
212 | } | 212 | } |
213 | 213 | ||
214 | if (mem->is_flushed == FALSE) { | 214 | if (mem->is_flushed == FALSE) { |
215 | global_cache_flush(); | 215 | global_cache_flush(); |
216 | mem->is_flushed = TRUE; | 216 | mem->is_flushed = TRUE; |
217 | } | 217 | } |
218 | for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { | 218 | for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { |
219 | writel(agp_bridge->driver->mask_memory(agp_bridge, | 219 | writel(agp_bridge->driver->mask_memory(agp_bridge, |
220 | mem->memory[i], mem->type), | 220 | mem->memory[i], mem->type), |
221 | agp_bridge->gatt_table+nvidia_private.pg_offset+j); | 221 | agp_bridge->gatt_table+nvidia_private.pg_offset+j); |
222 | readl(agp_bridge->gatt_table+nvidia_private.pg_offset+j); /* PCI Posting. */ | 222 | readl(agp_bridge->gatt_table+nvidia_private.pg_offset+j); /* PCI Posting. */ |
223 | } | 223 | } |
224 | agp_bridge->driver->tlb_flush(mem); | 224 | agp_bridge->driver->tlb_flush(mem); |
225 | return 0; | 225 | return 0; |
226 | } | 226 | } |
227 | 227 | ||
228 | 228 | ||
229 | static int nvidia_remove_memory(struct agp_memory *mem, off_t pg_start, int type) | 229 | static int nvidia_remove_memory(struct agp_memory *mem, off_t pg_start, int type) |
230 | { | 230 | { |
231 | int i; | 231 | int i; |
232 | 232 | ||
233 | if ((type != 0) || (mem->type != 0)) | 233 | if ((type != 0) || (mem->type != 0)) |
234 | return -EINVAL; | 234 | return -EINVAL; |
235 | 235 | ||
236 | for (i = pg_start; i < (mem->page_count + pg_start); i++) | 236 | for (i = pg_start; i < (mem->page_count + pg_start); i++) |
237 | writel(agp_bridge->scratch_page, agp_bridge->gatt_table+nvidia_private.pg_offset+i); | 237 | writel(agp_bridge->scratch_page, agp_bridge->gatt_table+nvidia_private.pg_offset+i); |
238 | 238 | ||
239 | agp_bridge->driver->tlb_flush(mem); | 239 | agp_bridge->driver->tlb_flush(mem); |
240 | return 0; | 240 | return 0; |
241 | } | 241 | } |
242 | 242 | ||
243 | 243 | ||
244 | static void nvidia_tlbflush(struct agp_memory *mem) | 244 | static void nvidia_tlbflush(struct agp_memory *mem) |
245 | { | 245 | { |
246 | unsigned long end; | 246 | unsigned long end; |
247 | u32 wbc_reg, temp; | 247 | u32 wbc_reg, temp; |
248 | int i; | 248 | int i; |
249 | 249 | ||
250 | /* flush chipset */ | 250 | /* flush chipset */ |
251 | if (nvidia_private.wbc_mask) { | 251 | if (nvidia_private.wbc_mask) { |
252 | pci_read_config_dword(nvidia_private.dev_1, NVIDIA_1_WBC, &wbc_reg); | 252 | pci_read_config_dword(nvidia_private.dev_1, NVIDIA_1_WBC, &wbc_reg); |
253 | wbc_reg |= nvidia_private.wbc_mask; | 253 | wbc_reg |= nvidia_private.wbc_mask; |
254 | pci_write_config_dword(nvidia_private.dev_1, NVIDIA_1_WBC, wbc_reg); | 254 | pci_write_config_dword(nvidia_private.dev_1, NVIDIA_1_WBC, wbc_reg); |
255 | 255 | ||
256 | end = jiffies + 3*HZ; | 256 | end = jiffies + 3*HZ; |
257 | do { | 257 | do { |
258 | pci_read_config_dword(nvidia_private.dev_1, | 258 | pci_read_config_dword(nvidia_private.dev_1, |
259 | NVIDIA_1_WBC, &wbc_reg); | 259 | NVIDIA_1_WBC, &wbc_reg); |
260 | if (time_before_eq(end, jiffies)) { | 260 | if (time_before_eq(end, jiffies)) { |
261 | printk(KERN_ERR PFX | 261 | printk(KERN_ERR PFX |
262 | "TLB flush took more than 3 seconds.\n"); | 262 | "TLB flush took more than 3 seconds.\n"); |
263 | } | 263 | } |
264 | } while (wbc_reg & nvidia_private.wbc_mask); | 264 | } while (wbc_reg & nvidia_private.wbc_mask); |
265 | } | 265 | } |
266 | 266 | ||
267 | /* flush TLB entries */ | 267 | /* flush TLB entries */ |
268 | for (i = 0; i < 32 + 1; i++) | 268 | for (i = 0; i < 32 + 1; i++) |
269 | temp = readl(nvidia_private.aperture+(i * PAGE_SIZE / sizeof(u32))); | 269 | temp = readl(nvidia_private.aperture+(i * PAGE_SIZE / sizeof(u32))); |
270 | for (i = 0; i < 32 + 1; i++) | 270 | for (i = 0; i < 32 + 1; i++) |
271 | temp = readl(nvidia_private.aperture+(i * PAGE_SIZE / sizeof(u32))); | 271 | temp = readl(nvidia_private.aperture+(i * PAGE_SIZE / sizeof(u32))); |
272 | } | 272 | } |
273 | 273 | ||
274 | 274 | ||
275 | static struct aper_size_info_8 nvidia_generic_sizes[5] = | 275 | static struct aper_size_info_8 nvidia_generic_sizes[5] = |
276 | { | 276 | { |
277 | {512, 131072, 7, 0}, | 277 | {512, 131072, 7, 0}, |
278 | {256, 65536, 6, 8}, | 278 | {256, 65536, 6, 8}, |
279 | {128, 32768, 5, 12}, | 279 | {128, 32768, 5, 12}, |
280 | {64, 16384, 4, 14}, | 280 | {64, 16384, 4, 14}, |
281 | /* The 32M mode still requires a 64k gatt */ | 281 | /* The 32M mode still requires a 64k gatt */ |
282 | {32, 16384, 4, 15} | 282 | {32, 16384, 4, 15} |
283 | }; | 283 | }; |
284 | 284 | ||
285 | 285 | ||
286 | static struct gatt_mask nvidia_generic_masks[] = | 286 | static struct gatt_mask nvidia_generic_masks[] = |
287 | { | 287 | { |
288 | { .mask = 1, .type = 0} | 288 | { .mask = 1, .type = 0} |
289 | }; | 289 | }; |
290 | 290 | ||
291 | 291 | ||
292 | static struct agp_bridge_driver nvidia_driver = { | 292 | static struct agp_bridge_driver nvidia_driver = { |
293 | .owner = THIS_MODULE, | 293 | .owner = THIS_MODULE, |
294 | .aperture_sizes = nvidia_generic_sizes, | 294 | .aperture_sizes = nvidia_generic_sizes, |
295 | .size_type = U8_APER_SIZE, | 295 | .size_type = U8_APER_SIZE, |
296 | .num_aperture_sizes = 5, | 296 | .num_aperture_sizes = 5, |
297 | .configure = nvidia_configure, | 297 | .configure = nvidia_configure, |
298 | .fetch_size = nvidia_fetch_size, | 298 | .fetch_size = nvidia_fetch_size, |
299 | .cleanup = nvidia_cleanup, | 299 | .cleanup = nvidia_cleanup, |
300 | .tlb_flush = nvidia_tlbflush, | 300 | .tlb_flush = nvidia_tlbflush, |
301 | .mask_memory = agp_generic_mask_memory, | 301 | .mask_memory = agp_generic_mask_memory, |
302 | .masks = nvidia_generic_masks, | 302 | .masks = nvidia_generic_masks, |
303 | .agp_enable = agp_generic_enable, | 303 | .agp_enable = agp_generic_enable, |
304 | .cache_flush = global_cache_flush, | 304 | .cache_flush = global_cache_flush, |
305 | .create_gatt_table = agp_generic_create_gatt_table, | 305 | .create_gatt_table = agp_generic_create_gatt_table, |
306 | .free_gatt_table = agp_generic_free_gatt_table, | 306 | .free_gatt_table = agp_generic_free_gatt_table, |
307 | .insert_memory = nvidia_insert_memory, | 307 | .insert_memory = nvidia_insert_memory, |
308 | .remove_memory = nvidia_remove_memory, | 308 | .remove_memory = nvidia_remove_memory, |
309 | .alloc_by_type = agp_generic_alloc_by_type, | 309 | .alloc_by_type = agp_generic_alloc_by_type, |
310 | .free_by_type = agp_generic_free_by_type, | 310 | .free_by_type = agp_generic_free_by_type, |
311 | .agp_alloc_page = agp_generic_alloc_page, | 311 | .agp_alloc_page = agp_generic_alloc_page, |
312 | .agp_destroy_page = agp_generic_destroy_page, | 312 | .agp_destroy_page = agp_generic_destroy_page, |
313 | }; | 313 | }; |
314 | 314 | ||
315 | static int __devinit agp_nvidia_probe(struct pci_dev *pdev, | 315 | static int __devinit agp_nvidia_probe(struct pci_dev *pdev, |
316 | const struct pci_device_id *ent) | 316 | const struct pci_device_id *ent) |
317 | { | 317 | { |
318 | struct agp_bridge_data *bridge; | 318 | struct agp_bridge_data *bridge; |
319 | u8 cap_ptr; | 319 | u8 cap_ptr; |
320 | 320 | ||
321 | nvidia_private.dev_1 = | 321 | nvidia_private.dev_1 = |
322 | pci_find_slot((unsigned int)pdev->bus->number, PCI_DEVFN(0, 1)); | 322 | pci_find_slot((unsigned int)pdev->bus->number, PCI_DEVFN(0, 1)); |
323 | nvidia_private.dev_2 = | 323 | nvidia_private.dev_2 = |
324 | pci_find_slot((unsigned int)pdev->bus->number, PCI_DEVFN(0, 2)); | 324 | pci_find_slot((unsigned int)pdev->bus->number, PCI_DEVFN(0, 2)); |
325 | nvidia_private.dev_3 = | 325 | nvidia_private.dev_3 = |
326 | pci_find_slot((unsigned int)pdev->bus->number, PCI_DEVFN(30, 0)); | 326 | pci_find_slot((unsigned int)pdev->bus->number, PCI_DEVFN(30, 0)); |
327 | 327 | ||
328 | if (!nvidia_private.dev_1 || !nvidia_private.dev_2 || !nvidia_private.dev_3) { | 328 | if (!nvidia_private.dev_1 || !nvidia_private.dev_2 || !nvidia_private.dev_3) { |
329 | printk(KERN_INFO PFX "Detected an NVIDIA nForce/nForce2 " | 329 | printk(KERN_INFO PFX "Detected an NVIDIA nForce/nForce2 " |
330 | "chipset, but could not find the secondary devices.\n"); | 330 | "chipset, but could not find the secondary devices.\n"); |
331 | return -ENODEV; | 331 | return -ENODEV; |
332 | } | 332 | } |
333 | 333 | ||
334 | cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); | 334 | cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); |
335 | if (!cap_ptr) | 335 | if (!cap_ptr) |
336 | return -ENODEV; | 336 | return -ENODEV; |
337 | 337 | ||
338 | switch (pdev->device) { | 338 | switch (pdev->device) { |
339 | case PCI_DEVICE_ID_NVIDIA_NFORCE: | 339 | case PCI_DEVICE_ID_NVIDIA_NFORCE: |
340 | printk(KERN_INFO PFX "Detected NVIDIA nForce chipset\n"); | 340 | printk(KERN_INFO PFX "Detected NVIDIA nForce chipset\n"); |
341 | nvidia_private.wbc_mask = 0x00010000; | 341 | nvidia_private.wbc_mask = 0x00010000; |
342 | break; | 342 | break; |
343 | case PCI_DEVICE_ID_NVIDIA_NFORCE2: | 343 | case PCI_DEVICE_ID_NVIDIA_NFORCE2: |
344 | printk(KERN_INFO PFX "Detected NVIDIA nForce2 chipset\n"); | 344 | printk(KERN_INFO PFX "Detected NVIDIA nForce2 chipset\n"); |
345 | nvidia_private.wbc_mask = 0x80000000; | 345 | nvidia_private.wbc_mask = 0x80000000; |
346 | break; | 346 | break; |
347 | default: | 347 | default: |
348 | printk(KERN_ERR PFX "Unsupported NVIDIA chipset (device id: %04x)\n", | 348 | printk(KERN_ERR PFX "Unsupported NVIDIA chipset (device id: %04x)\n", |
349 | pdev->device); | 349 | pdev->device); |
350 | return -ENODEV; | 350 | return -ENODEV; |
351 | } | 351 | } |
352 | 352 | ||
353 | bridge = agp_alloc_bridge(); | 353 | bridge = agp_alloc_bridge(); |
354 | if (!bridge) | 354 | if (!bridge) |
355 | return -ENOMEM; | 355 | return -ENOMEM; |
356 | 356 | ||
357 | bridge->driver = &nvidia_driver; | 357 | bridge->driver = &nvidia_driver; |
358 | bridge->dev_private_data = &nvidia_private, | 358 | bridge->dev_private_data = &nvidia_private, |
359 | bridge->dev = pdev; | 359 | bridge->dev = pdev; |
360 | bridge->capndx = cap_ptr; | 360 | bridge->capndx = cap_ptr; |
361 | 361 | ||
362 | /* Fill in the mode register */ | 362 | /* Fill in the mode register */ |
363 | pci_read_config_dword(pdev, | 363 | pci_read_config_dword(pdev, |
364 | bridge->capndx+PCI_AGP_STATUS, | 364 | bridge->capndx+PCI_AGP_STATUS, |
365 | &bridge->mode); | 365 | &bridge->mode); |
366 | 366 | ||
367 | pci_set_drvdata(pdev, bridge); | 367 | pci_set_drvdata(pdev, bridge); |
368 | return agp_add_bridge(bridge); | 368 | return agp_add_bridge(bridge); |
369 | } | 369 | } |
370 | 370 | ||
371 | static void __devexit agp_nvidia_remove(struct pci_dev *pdev) | 371 | static void __devexit agp_nvidia_remove(struct pci_dev *pdev) |
372 | { | 372 | { |
373 | struct agp_bridge_data *bridge = pci_get_drvdata(pdev); | 373 | struct agp_bridge_data *bridge = pci_get_drvdata(pdev); |
374 | 374 | ||
375 | agp_remove_bridge(bridge); | 375 | agp_remove_bridge(bridge); |
376 | agp_put_bridge(bridge); | 376 | agp_put_bridge(bridge); |
377 | } | 377 | } |
378 | 378 | ||
379 | #ifdef CONFIG_PM | 379 | #ifdef CONFIG_PM |
380 | static int agp_nvidia_suspend(struct pci_dev *pdev, pm_message_t state) | 380 | static int agp_nvidia_suspend(struct pci_dev *pdev, pm_message_t state) |
381 | { | 381 | { |
382 | pci_save_state (pdev); | 382 | pci_save_state (pdev); |
383 | pci_set_power_state (pdev, 3); | 383 | pci_set_power_state (pdev, 3); |
384 | 384 | ||
385 | return 0; | 385 | return 0; |
386 | } | 386 | } |
387 | 387 | ||
388 | static int agp_nvidia_resume(struct pci_dev *pdev) | 388 | static int agp_nvidia_resume(struct pci_dev *pdev) |
389 | { | 389 | { |
390 | struct agp_bridge_data *bridge = pci_get_drvdata(pdev); | ||
391 | |||
392 | /* set power state 0 and restore PCI space */ | 390 | /* set power state 0 and restore PCI space */ |
393 | pci_set_power_state (pdev, 0); | 391 | pci_set_power_state (pdev, 0); |
394 | pci_restore_state(pdev); | 392 | pci_restore_state(pdev); |
395 | 393 | ||
396 | /* reconfigure AGP hardware again */ | 394 | /* reconfigure AGP hardware again */ |
397 | nvidia_configure(); | 395 | nvidia_configure(); |
398 | 396 | ||
399 | return 0; | 397 | return 0; |
400 | } | 398 | } |
401 | #endif | 399 | #endif |
402 | 400 | ||
403 | 401 | ||
404 | static struct pci_device_id agp_nvidia_pci_table[] = { | 402 | static struct pci_device_id agp_nvidia_pci_table[] = { |
405 | { | 403 | { |
406 | .class = (PCI_CLASS_BRIDGE_HOST << 8), | 404 | .class = (PCI_CLASS_BRIDGE_HOST << 8), |
407 | .class_mask = ~0, | 405 | .class_mask = ~0, |
408 | .vendor = PCI_VENDOR_ID_NVIDIA, | 406 | .vendor = PCI_VENDOR_ID_NVIDIA, |
409 | .device = PCI_DEVICE_ID_NVIDIA_NFORCE, | 407 | .device = PCI_DEVICE_ID_NVIDIA_NFORCE, |
410 | .subvendor = PCI_ANY_ID, | 408 | .subvendor = PCI_ANY_ID, |
411 | .subdevice = PCI_ANY_ID, | 409 | .subdevice = PCI_ANY_ID, |
412 | }, | 410 | }, |
413 | { | 411 | { |
414 | .class = (PCI_CLASS_BRIDGE_HOST << 8), | 412 | .class = (PCI_CLASS_BRIDGE_HOST << 8), |
415 | .class_mask = ~0, | 413 | .class_mask = ~0, |
416 | .vendor = PCI_VENDOR_ID_NVIDIA, | 414 | .vendor = PCI_VENDOR_ID_NVIDIA, |
417 | .device = PCI_DEVICE_ID_NVIDIA_NFORCE2, | 415 | .device = PCI_DEVICE_ID_NVIDIA_NFORCE2, |
418 | .subvendor = PCI_ANY_ID, | 416 | .subvendor = PCI_ANY_ID, |
419 | .subdevice = PCI_ANY_ID, | 417 | .subdevice = PCI_ANY_ID, |
420 | }, | 418 | }, |
421 | { } | 419 | { } |
422 | }; | 420 | }; |
423 | 421 | ||
424 | MODULE_DEVICE_TABLE(pci, agp_nvidia_pci_table); | 422 | MODULE_DEVICE_TABLE(pci, agp_nvidia_pci_table); |
425 | 423 | ||
426 | static struct pci_driver agp_nvidia_pci_driver = { | 424 | static struct pci_driver agp_nvidia_pci_driver = { |
427 | .name = "agpgart-nvidia", | 425 | .name = "agpgart-nvidia", |
428 | .id_table = agp_nvidia_pci_table, | 426 | .id_table = agp_nvidia_pci_table, |
429 | .probe = agp_nvidia_probe, | 427 | .probe = agp_nvidia_probe, |
430 | .remove = agp_nvidia_remove, | 428 | .remove = agp_nvidia_remove, |
431 | #ifdef CONFIG_PM | 429 | #ifdef CONFIG_PM |
432 | .suspend = agp_nvidia_suspend, | 430 | .suspend = agp_nvidia_suspend, |
433 | .resume = agp_nvidia_resume, | 431 | .resume = agp_nvidia_resume, |
434 | #endif | 432 | #endif |
435 | }; | 433 | }; |
436 | 434 | ||
437 | static int __init agp_nvidia_init(void) | 435 | static int __init agp_nvidia_init(void) |
438 | { | 436 | { |
439 | if (agp_off) | 437 | if (agp_off) |
440 | return -EINVAL; | 438 | return -EINVAL; |
441 | return pci_register_driver(&agp_nvidia_pci_driver); | 439 | return pci_register_driver(&agp_nvidia_pci_driver); |
442 | } | 440 | } |
443 | 441 | ||
444 | static void __exit agp_nvidia_cleanup(void) | 442 | static void __exit agp_nvidia_cleanup(void) |
445 | { | 443 | { |
446 | pci_unregister_driver(&agp_nvidia_pci_driver); | 444 | pci_unregister_driver(&agp_nvidia_pci_driver); |
447 | } | 445 | } |
448 | 446 | ||
449 | module_init(agp_nvidia_init); | 447 | module_init(agp_nvidia_init); |
450 | module_exit(agp_nvidia_cleanup); | 448 | module_exit(agp_nvidia_cleanup); |
451 | 449 | ||
452 | MODULE_LICENSE("GPL and additional rights"); | 450 | MODULE_LICENSE("GPL and additional rights"); |
453 | MODULE_AUTHOR("NVIDIA Corporation"); | 451 | MODULE_AUTHOR("NVIDIA Corporation"); |
454 | 452 | ||
455 | 453 |