Commit b1e3a6d1c4d0ac75ad8289bcfd69efcc9b1bc6e5
Committed by
Dave Airlie
1 parent
696d4df1db
Exists in
master
and in
7 other branches
drm/radeon: Clear surface registers at initialization time.
Some PowerMac firmwares set up a tiling surface at the beginning of VRAM which messes us up otherwise. Signed-off-by: Michel Dänzer <daenzer@vmware.com> Signed-off-by: Dave Airlie <airlied@linux.ie>
Showing 1 changed file with 20 additions and 0 deletions Inline Diff
drivers/gpu/drm/radeon/radeon_device.c
1 | /* | 1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. | 2 | * Copyright 2008 Advanced Micro Devices, Inc. |
3 | * Copyright 2008 Red Hat Inc. | 3 | * Copyright 2008 Red Hat Inc. |
4 | * Copyright 2009 Jerome Glisse. | 4 | * Copyright 2009 Jerome Glisse. |
5 | * | 5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), | 7 | * copy of this software and associated documentation files (the "Software"), |
8 | * to deal in the Software without restriction, including without limitation | 8 | * to deal in the Software without restriction, including without limitation |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
10 | * and/or sell copies of the Software, and to permit persons to whom the | 10 | * and/or sell copies of the Software, and to permit persons to whom the |
11 | * Software is furnished to do so, subject to the following conditions: | 11 | * Software is furnished to do so, subject to the following conditions: |
12 | * | 12 | * |
13 | * The above copyright notice and this permission notice shall be included in | 13 | * The above copyright notice and this permission notice shall be included in |
14 | * all copies or substantial portions of the Software. | 14 | * all copies or substantial portions of the Software. |
15 | * | 15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
22 | * OTHER DEALINGS IN THE SOFTWARE. | 22 | * OTHER DEALINGS IN THE SOFTWARE. |
23 | * | 23 | * |
24 | * Authors: Dave Airlie | 24 | * Authors: Dave Airlie |
25 | * Alex Deucher | 25 | * Alex Deucher |
26 | * Jerome Glisse | 26 | * Jerome Glisse |
27 | */ | 27 | */ |
28 | #include <linux/console.h> | 28 | #include <linux/console.h> |
29 | #include <drm/drmP.h> | 29 | #include <drm/drmP.h> |
30 | #include <drm/drm_crtc_helper.h> | 30 | #include <drm/drm_crtc_helper.h> |
31 | #include <drm/radeon_drm.h> | 31 | #include <drm/radeon_drm.h> |
32 | #include "radeon_reg.h" | 32 | #include "radeon_reg.h" |
33 | #include "radeon.h" | 33 | #include "radeon.h" |
34 | #include "radeon_asic.h" | 34 | #include "radeon_asic.h" |
35 | #include "atom.h" | 35 | #include "atom.h" |
36 | 36 | ||
37 | /* | 37 | /* |
38 | * Clear GPU surface registers. | ||
39 | */ | ||
40 | static void radeon_surface_init(struct radeon_device *rdev) | ||
41 | { | ||
42 | /* FIXME: check this out */ | ||
43 | if (rdev->family < CHIP_R600) { | ||
44 | int i; | ||
45 | |||
46 | for (i = 0; i < 8; i++) { | ||
47 | WREG32(RADEON_SURFACE0_INFO + | ||
48 | i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO), | ||
49 | 0); | ||
50 | } | ||
51 | } | ||
52 | } | ||
53 | |||
54 | /* | ||
38 | * GPU scratch registers helpers function. | 55 | * GPU scratch registers helpers function. |
39 | */ | 56 | */ |
40 | static void radeon_scratch_init(struct radeon_device *rdev) | 57 | static void radeon_scratch_init(struct radeon_device *rdev) |
41 | { | 58 | { |
42 | int i; | 59 | int i; |
43 | 60 | ||
44 | /* FIXME: check this out */ | 61 | /* FIXME: check this out */ |
45 | if (rdev->family < CHIP_R300) { | 62 | if (rdev->family < CHIP_R300) { |
46 | rdev->scratch.num_reg = 5; | 63 | rdev->scratch.num_reg = 5; |
47 | } else { | 64 | } else { |
48 | rdev->scratch.num_reg = 7; | 65 | rdev->scratch.num_reg = 7; |
49 | } | 66 | } |
50 | for (i = 0; i < rdev->scratch.num_reg; i++) { | 67 | for (i = 0; i < rdev->scratch.num_reg; i++) { |
51 | rdev->scratch.free[i] = true; | 68 | rdev->scratch.free[i] = true; |
52 | rdev->scratch.reg[i] = RADEON_SCRATCH_REG0 + (i * 4); | 69 | rdev->scratch.reg[i] = RADEON_SCRATCH_REG0 + (i * 4); |
53 | } | 70 | } |
54 | } | 71 | } |
55 | 72 | ||
56 | int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg) | 73 | int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg) |
57 | { | 74 | { |
58 | int i; | 75 | int i; |
59 | 76 | ||
60 | for (i = 0; i < rdev->scratch.num_reg; i++) { | 77 | for (i = 0; i < rdev->scratch.num_reg; i++) { |
61 | if (rdev->scratch.free[i]) { | 78 | if (rdev->scratch.free[i]) { |
62 | rdev->scratch.free[i] = false; | 79 | rdev->scratch.free[i] = false; |
63 | *reg = rdev->scratch.reg[i]; | 80 | *reg = rdev->scratch.reg[i]; |
64 | return 0; | 81 | return 0; |
65 | } | 82 | } |
66 | } | 83 | } |
67 | return -EINVAL; | 84 | return -EINVAL; |
68 | } | 85 | } |
69 | 86 | ||
70 | void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg) | 87 | void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg) |
71 | { | 88 | { |
72 | int i; | 89 | int i; |
73 | 90 | ||
74 | for (i = 0; i < rdev->scratch.num_reg; i++) { | 91 | for (i = 0; i < rdev->scratch.num_reg; i++) { |
75 | if (rdev->scratch.reg[i] == reg) { | 92 | if (rdev->scratch.reg[i] == reg) { |
76 | rdev->scratch.free[i] = true; | 93 | rdev->scratch.free[i] = true; |
77 | return; | 94 | return; |
78 | } | 95 | } |
79 | } | 96 | } |
80 | } | 97 | } |
81 | 98 | ||
82 | /* | 99 | /* |
83 | * MC common functions | 100 | * MC common functions |
84 | */ | 101 | */ |
85 | int radeon_mc_setup(struct radeon_device *rdev) | 102 | int radeon_mc_setup(struct radeon_device *rdev) |
86 | { | 103 | { |
87 | uint32_t tmp; | 104 | uint32_t tmp; |
88 | 105 | ||
89 | /* Some chips have an "issue" with the memory controller, the | 106 | /* Some chips have an "issue" with the memory controller, the |
90 | * location must be aligned to the size. We just align it down, | 107 | * location must be aligned to the size. We just align it down, |
91 | * too bad if we walk over the top of system memory, we don't | 108 | * too bad if we walk over the top of system memory, we don't |
92 | * use DMA without a remapped anyway. | 109 | * use DMA without a remapped anyway. |
93 | * Affected chips are rv280, all r3xx, and all r4xx, but not IGP | 110 | * Affected chips are rv280, all r3xx, and all r4xx, but not IGP |
94 | */ | 111 | */ |
95 | /* FGLRX seems to setup like this, VRAM a 0, then GART. | 112 | /* FGLRX seems to setup like this, VRAM a 0, then GART. |
96 | */ | 113 | */ |
97 | /* | 114 | /* |
98 | * Note: from R6xx the address space is 40bits but here we only | 115 | * Note: from R6xx the address space is 40bits but here we only |
99 | * use 32bits (still have to see a card which would exhaust 4G | 116 | * use 32bits (still have to see a card which would exhaust 4G |
100 | * address space). | 117 | * address space). |
101 | */ | 118 | */ |
102 | if (rdev->mc.vram_location != 0xFFFFFFFFUL) { | 119 | if (rdev->mc.vram_location != 0xFFFFFFFFUL) { |
103 | /* vram location was already setup try to put gtt after | 120 | /* vram location was already setup try to put gtt after |
104 | * if it fits */ | 121 | * if it fits */ |
105 | tmp = rdev->mc.vram_location + rdev->mc.vram_size; | 122 | tmp = rdev->mc.vram_location + rdev->mc.vram_size; |
106 | tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1); | 123 | tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1); |
107 | if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) { | 124 | if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) { |
108 | rdev->mc.gtt_location = tmp; | 125 | rdev->mc.gtt_location = tmp; |
109 | } else { | 126 | } else { |
110 | if (rdev->mc.gtt_size >= rdev->mc.vram_location) { | 127 | if (rdev->mc.gtt_size >= rdev->mc.vram_location) { |
111 | printk(KERN_ERR "[drm] GTT too big to fit " | 128 | printk(KERN_ERR "[drm] GTT too big to fit " |
112 | "before or after vram location.\n"); | 129 | "before or after vram location.\n"); |
113 | return -EINVAL; | 130 | return -EINVAL; |
114 | } | 131 | } |
115 | rdev->mc.gtt_location = 0; | 132 | rdev->mc.gtt_location = 0; |
116 | } | 133 | } |
117 | } else if (rdev->mc.gtt_location != 0xFFFFFFFFUL) { | 134 | } else if (rdev->mc.gtt_location != 0xFFFFFFFFUL) { |
118 | /* gtt location was already setup try to put vram before | 135 | /* gtt location was already setup try to put vram before |
119 | * if it fits */ | 136 | * if it fits */ |
120 | if (rdev->mc.vram_size < rdev->mc.gtt_location) { | 137 | if (rdev->mc.vram_size < rdev->mc.gtt_location) { |
121 | rdev->mc.vram_location = 0; | 138 | rdev->mc.vram_location = 0; |
122 | } else { | 139 | } else { |
123 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size; | 140 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size; |
124 | tmp += (rdev->mc.vram_size - 1); | 141 | tmp += (rdev->mc.vram_size - 1); |
125 | tmp &= ~(rdev->mc.vram_size - 1); | 142 | tmp &= ~(rdev->mc.vram_size - 1); |
126 | if ((0xFFFFFFFFUL - tmp) >= rdev->mc.vram_size) { | 143 | if ((0xFFFFFFFFUL - tmp) >= rdev->mc.vram_size) { |
127 | rdev->mc.vram_location = tmp; | 144 | rdev->mc.vram_location = tmp; |
128 | } else { | 145 | } else { |
129 | printk(KERN_ERR "[drm] vram too big to fit " | 146 | printk(KERN_ERR "[drm] vram too big to fit " |
130 | "before or after GTT location.\n"); | 147 | "before or after GTT location.\n"); |
131 | return -EINVAL; | 148 | return -EINVAL; |
132 | } | 149 | } |
133 | } | 150 | } |
134 | } else { | 151 | } else { |
135 | rdev->mc.vram_location = 0; | 152 | rdev->mc.vram_location = 0; |
136 | rdev->mc.gtt_location = rdev->mc.vram_size; | 153 | rdev->mc.gtt_location = rdev->mc.vram_size; |
137 | } | 154 | } |
138 | DRM_INFO("radeon: VRAM %uM\n", rdev->mc.vram_size >> 20); | 155 | DRM_INFO("radeon: VRAM %uM\n", rdev->mc.vram_size >> 20); |
139 | DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n", | 156 | DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n", |
140 | rdev->mc.vram_location, | 157 | rdev->mc.vram_location, |
141 | rdev->mc.vram_location + rdev->mc.vram_size - 1); | 158 | rdev->mc.vram_location + rdev->mc.vram_size - 1); |
142 | DRM_INFO("radeon: GTT %uM\n", rdev->mc.gtt_size >> 20); | 159 | DRM_INFO("radeon: GTT %uM\n", rdev->mc.gtt_size >> 20); |
143 | DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n", | 160 | DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n", |
144 | rdev->mc.gtt_location, | 161 | rdev->mc.gtt_location, |
145 | rdev->mc.gtt_location + rdev->mc.gtt_size - 1); | 162 | rdev->mc.gtt_location + rdev->mc.gtt_size - 1); |
146 | return 0; | 163 | return 0; |
147 | } | 164 | } |
148 | 165 | ||
149 | 166 | ||
150 | /* | 167 | /* |
151 | * GPU helpers function. | 168 | * GPU helpers function. |
152 | */ | 169 | */ |
153 | static bool radeon_card_posted(struct radeon_device *rdev) | 170 | static bool radeon_card_posted(struct radeon_device *rdev) |
154 | { | 171 | { |
155 | uint32_t reg; | 172 | uint32_t reg; |
156 | 173 | ||
157 | /* first check CRTCs */ | 174 | /* first check CRTCs */ |
158 | if (ASIC_IS_AVIVO(rdev)) { | 175 | if (ASIC_IS_AVIVO(rdev)) { |
159 | reg = RREG32(AVIVO_D1CRTC_CONTROL) | | 176 | reg = RREG32(AVIVO_D1CRTC_CONTROL) | |
160 | RREG32(AVIVO_D2CRTC_CONTROL); | 177 | RREG32(AVIVO_D2CRTC_CONTROL); |
161 | if (reg & AVIVO_CRTC_EN) { | 178 | if (reg & AVIVO_CRTC_EN) { |
162 | return true; | 179 | return true; |
163 | } | 180 | } |
164 | } else { | 181 | } else { |
165 | reg = RREG32(RADEON_CRTC_GEN_CNTL) | | 182 | reg = RREG32(RADEON_CRTC_GEN_CNTL) | |
166 | RREG32(RADEON_CRTC2_GEN_CNTL); | 183 | RREG32(RADEON_CRTC2_GEN_CNTL); |
167 | if (reg & RADEON_CRTC_EN) { | 184 | if (reg & RADEON_CRTC_EN) { |
168 | return true; | 185 | return true; |
169 | } | 186 | } |
170 | } | 187 | } |
171 | 188 | ||
172 | /* then check MEM_SIZE, in case the crtcs are off */ | 189 | /* then check MEM_SIZE, in case the crtcs are off */ |
173 | if (rdev->family >= CHIP_R600) | 190 | if (rdev->family >= CHIP_R600) |
174 | reg = RREG32(R600_CONFIG_MEMSIZE); | 191 | reg = RREG32(R600_CONFIG_MEMSIZE); |
175 | else | 192 | else |
176 | reg = RREG32(RADEON_CONFIG_MEMSIZE); | 193 | reg = RREG32(RADEON_CONFIG_MEMSIZE); |
177 | 194 | ||
178 | if (reg) | 195 | if (reg) |
179 | return true; | 196 | return true; |
180 | 197 | ||
181 | return false; | 198 | return false; |
182 | 199 | ||
183 | } | 200 | } |
184 | 201 | ||
185 | 202 | ||
186 | /* | 203 | /* |
187 | * Registers accessors functions. | 204 | * Registers accessors functions. |
188 | */ | 205 | */ |
189 | uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg) | 206 | uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg) |
190 | { | 207 | { |
191 | DRM_ERROR("Invalid callback to read register 0x%04X\n", reg); | 208 | DRM_ERROR("Invalid callback to read register 0x%04X\n", reg); |
192 | BUG_ON(1); | 209 | BUG_ON(1); |
193 | return 0; | 210 | return 0; |
194 | } | 211 | } |
195 | 212 | ||
196 | void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | 213 | void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
197 | { | 214 | { |
198 | DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n", | 215 | DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n", |
199 | reg, v); | 216 | reg, v); |
200 | BUG_ON(1); | 217 | BUG_ON(1); |
201 | } | 218 | } |
202 | 219 | ||
203 | void radeon_register_accessor_init(struct radeon_device *rdev) | 220 | void radeon_register_accessor_init(struct radeon_device *rdev) |
204 | { | 221 | { |
205 | rdev->mm_rreg = &r100_mm_rreg; | 222 | rdev->mm_rreg = &r100_mm_rreg; |
206 | rdev->mm_wreg = &r100_mm_wreg; | 223 | rdev->mm_wreg = &r100_mm_wreg; |
207 | rdev->mc_rreg = &radeon_invalid_rreg; | 224 | rdev->mc_rreg = &radeon_invalid_rreg; |
208 | rdev->mc_wreg = &radeon_invalid_wreg; | 225 | rdev->mc_wreg = &radeon_invalid_wreg; |
209 | rdev->pll_rreg = &radeon_invalid_rreg; | 226 | rdev->pll_rreg = &radeon_invalid_rreg; |
210 | rdev->pll_wreg = &radeon_invalid_wreg; | 227 | rdev->pll_wreg = &radeon_invalid_wreg; |
211 | rdev->pcie_rreg = &radeon_invalid_rreg; | 228 | rdev->pcie_rreg = &radeon_invalid_rreg; |
212 | rdev->pcie_wreg = &radeon_invalid_wreg; | 229 | rdev->pcie_wreg = &radeon_invalid_wreg; |
213 | rdev->pciep_rreg = &radeon_invalid_rreg; | 230 | rdev->pciep_rreg = &radeon_invalid_rreg; |
214 | rdev->pciep_wreg = &radeon_invalid_wreg; | 231 | rdev->pciep_wreg = &radeon_invalid_wreg; |
215 | 232 | ||
216 | /* Don't change order as we are overridding accessor. */ | 233 | /* Don't change order as we are overridding accessor. */ |
217 | if (rdev->family < CHIP_RV515) { | 234 | if (rdev->family < CHIP_RV515) { |
218 | rdev->pcie_rreg = &rv370_pcie_rreg; | 235 | rdev->pcie_rreg = &rv370_pcie_rreg; |
219 | rdev->pcie_wreg = &rv370_pcie_wreg; | 236 | rdev->pcie_wreg = &rv370_pcie_wreg; |
220 | } | 237 | } |
221 | if (rdev->family >= CHIP_RV515) { | 238 | if (rdev->family >= CHIP_RV515) { |
222 | rdev->pcie_rreg = &rv515_pcie_rreg; | 239 | rdev->pcie_rreg = &rv515_pcie_rreg; |
223 | rdev->pcie_wreg = &rv515_pcie_wreg; | 240 | rdev->pcie_wreg = &rv515_pcie_wreg; |
224 | } | 241 | } |
225 | /* FIXME: not sure here */ | 242 | /* FIXME: not sure here */ |
226 | if (rdev->family <= CHIP_R580) { | 243 | if (rdev->family <= CHIP_R580) { |
227 | rdev->pll_rreg = &r100_pll_rreg; | 244 | rdev->pll_rreg = &r100_pll_rreg; |
228 | rdev->pll_wreg = &r100_pll_wreg; | 245 | rdev->pll_wreg = &r100_pll_wreg; |
229 | } | 246 | } |
230 | if (rdev->family >= CHIP_RV515) { | 247 | if (rdev->family >= CHIP_RV515) { |
231 | rdev->mc_rreg = &rv515_mc_rreg; | 248 | rdev->mc_rreg = &rv515_mc_rreg; |
232 | rdev->mc_wreg = &rv515_mc_wreg; | 249 | rdev->mc_wreg = &rv515_mc_wreg; |
233 | } | 250 | } |
234 | if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) { | 251 | if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) { |
235 | rdev->mc_rreg = &rs400_mc_rreg; | 252 | rdev->mc_rreg = &rs400_mc_rreg; |
236 | rdev->mc_wreg = &rs400_mc_wreg; | 253 | rdev->mc_wreg = &rs400_mc_wreg; |
237 | } | 254 | } |
238 | if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) { | 255 | if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) { |
239 | rdev->mc_rreg = &rs690_mc_rreg; | 256 | rdev->mc_rreg = &rs690_mc_rreg; |
240 | rdev->mc_wreg = &rs690_mc_wreg; | 257 | rdev->mc_wreg = &rs690_mc_wreg; |
241 | } | 258 | } |
242 | if (rdev->family == CHIP_RS600) { | 259 | if (rdev->family == CHIP_RS600) { |
243 | rdev->mc_rreg = &rs600_mc_rreg; | 260 | rdev->mc_rreg = &rs600_mc_rreg; |
244 | rdev->mc_wreg = &rs600_mc_wreg; | 261 | rdev->mc_wreg = &rs600_mc_wreg; |
245 | } | 262 | } |
246 | if (rdev->family >= CHIP_R600) { | 263 | if (rdev->family >= CHIP_R600) { |
247 | rdev->pciep_rreg = &r600_pciep_rreg; | 264 | rdev->pciep_rreg = &r600_pciep_rreg; |
248 | rdev->pciep_wreg = &r600_pciep_wreg; | 265 | rdev->pciep_wreg = &r600_pciep_wreg; |
249 | } | 266 | } |
250 | } | 267 | } |
251 | 268 | ||
252 | 269 | ||
253 | /* | 270 | /* |
254 | * ASIC | 271 | * ASIC |
255 | */ | 272 | */ |
256 | int radeon_asic_init(struct radeon_device *rdev) | 273 | int radeon_asic_init(struct radeon_device *rdev) |
257 | { | 274 | { |
258 | radeon_register_accessor_init(rdev); | 275 | radeon_register_accessor_init(rdev); |
259 | switch (rdev->family) { | 276 | switch (rdev->family) { |
260 | case CHIP_R100: | 277 | case CHIP_R100: |
261 | case CHIP_RV100: | 278 | case CHIP_RV100: |
262 | case CHIP_RS100: | 279 | case CHIP_RS100: |
263 | case CHIP_RV200: | 280 | case CHIP_RV200: |
264 | case CHIP_RS200: | 281 | case CHIP_RS200: |
265 | case CHIP_R200: | 282 | case CHIP_R200: |
266 | case CHIP_RV250: | 283 | case CHIP_RV250: |
267 | case CHIP_RS300: | 284 | case CHIP_RS300: |
268 | case CHIP_RV280: | 285 | case CHIP_RV280: |
269 | rdev->asic = &r100_asic; | 286 | rdev->asic = &r100_asic; |
270 | break; | 287 | break; |
271 | case CHIP_R300: | 288 | case CHIP_R300: |
272 | case CHIP_R350: | 289 | case CHIP_R350: |
273 | case CHIP_RV350: | 290 | case CHIP_RV350: |
274 | case CHIP_RV380: | 291 | case CHIP_RV380: |
275 | rdev->asic = &r300_asic; | 292 | rdev->asic = &r300_asic; |
276 | break; | 293 | break; |
277 | case CHIP_R420: | 294 | case CHIP_R420: |
278 | case CHIP_R423: | 295 | case CHIP_R423: |
279 | case CHIP_RV410: | 296 | case CHIP_RV410: |
280 | rdev->asic = &r420_asic; | 297 | rdev->asic = &r420_asic; |
281 | break; | 298 | break; |
282 | case CHIP_RS400: | 299 | case CHIP_RS400: |
283 | case CHIP_RS480: | 300 | case CHIP_RS480: |
284 | rdev->asic = &rs400_asic; | 301 | rdev->asic = &rs400_asic; |
285 | break; | 302 | break; |
286 | case CHIP_RS600: | 303 | case CHIP_RS600: |
287 | rdev->asic = &rs600_asic; | 304 | rdev->asic = &rs600_asic; |
288 | break; | 305 | break; |
289 | case CHIP_RS690: | 306 | case CHIP_RS690: |
290 | case CHIP_RS740: | 307 | case CHIP_RS740: |
291 | rdev->asic = &rs690_asic; | 308 | rdev->asic = &rs690_asic; |
292 | break; | 309 | break; |
293 | case CHIP_RV515: | 310 | case CHIP_RV515: |
294 | rdev->asic = &rv515_asic; | 311 | rdev->asic = &rv515_asic; |
295 | break; | 312 | break; |
296 | case CHIP_R520: | 313 | case CHIP_R520: |
297 | case CHIP_RV530: | 314 | case CHIP_RV530: |
298 | case CHIP_RV560: | 315 | case CHIP_RV560: |
299 | case CHIP_RV570: | 316 | case CHIP_RV570: |
300 | case CHIP_R580: | 317 | case CHIP_R580: |
301 | rdev->asic = &r520_asic; | 318 | rdev->asic = &r520_asic; |
302 | break; | 319 | break; |
303 | case CHIP_R600: | 320 | case CHIP_R600: |
304 | case CHIP_RV610: | 321 | case CHIP_RV610: |
305 | case CHIP_RV630: | 322 | case CHIP_RV630: |
306 | case CHIP_RV620: | 323 | case CHIP_RV620: |
307 | case CHIP_RV635: | 324 | case CHIP_RV635: |
308 | case CHIP_RV670: | 325 | case CHIP_RV670: |
309 | case CHIP_RS780: | 326 | case CHIP_RS780: |
310 | case CHIP_RV770: | 327 | case CHIP_RV770: |
311 | case CHIP_RV730: | 328 | case CHIP_RV730: |
312 | case CHIP_RV710: | 329 | case CHIP_RV710: |
313 | default: | 330 | default: |
314 | /* FIXME: not supported yet */ | 331 | /* FIXME: not supported yet */ |
315 | return -EINVAL; | 332 | return -EINVAL; |
316 | } | 333 | } |
317 | return 0; | 334 | return 0; |
318 | } | 335 | } |
319 | 336 | ||
320 | 337 | ||
321 | /* | 338 | /* |
322 | * Wrapper around modesetting bits. | 339 | * Wrapper around modesetting bits. |
323 | */ | 340 | */ |
324 | int radeon_clocks_init(struct radeon_device *rdev) | 341 | int radeon_clocks_init(struct radeon_device *rdev) |
325 | { | 342 | { |
326 | int r; | 343 | int r; |
327 | 344 | ||
328 | radeon_get_clock_info(rdev->ddev); | 345 | radeon_get_clock_info(rdev->ddev); |
329 | r = radeon_static_clocks_init(rdev->ddev); | 346 | r = radeon_static_clocks_init(rdev->ddev); |
330 | if (r) { | 347 | if (r) { |
331 | return r; | 348 | return r; |
332 | } | 349 | } |
333 | DRM_INFO("Clocks initialized !\n"); | 350 | DRM_INFO("Clocks initialized !\n"); |
334 | return 0; | 351 | return 0; |
335 | } | 352 | } |
336 | 353 | ||
337 | void radeon_clocks_fini(struct radeon_device *rdev) | 354 | void radeon_clocks_fini(struct radeon_device *rdev) |
338 | { | 355 | { |
339 | } | 356 | } |
340 | 357 | ||
341 | /* ATOM accessor methods */ | 358 | /* ATOM accessor methods */ |
342 | static uint32_t cail_pll_read(struct card_info *info, uint32_t reg) | 359 | static uint32_t cail_pll_read(struct card_info *info, uint32_t reg) |
343 | { | 360 | { |
344 | struct radeon_device *rdev = info->dev->dev_private; | 361 | struct radeon_device *rdev = info->dev->dev_private; |
345 | uint32_t r; | 362 | uint32_t r; |
346 | 363 | ||
347 | r = rdev->pll_rreg(rdev, reg); | 364 | r = rdev->pll_rreg(rdev, reg); |
348 | return r; | 365 | return r; |
349 | } | 366 | } |
350 | 367 | ||
351 | static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val) | 368 | static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val) |
352 | { | 369 | { |
353 | struct radeon_device *rdev = info->dev->dev_private; | 370 | struct radeon_device *rdev = info->dev->dev_private; |
354 | 371 | ||
355 | rdev->pll_wreg(rdev, reg, val); | 372 | rdev->pll_wreg(rdev, reg, val); |
356 | } | 373 | } |
357 | 374 | ||
358 | static uint32_t cail_mc_read(struct card_info *info, uint32_t reg) | 375 | static uint32_t cail_mc_read(struct card_info *info, uint32_t reg) |
359 | { | 376 | { |
360 | struct radeon_device *rdev = info->dev->dev_private; | 377 | struct radeon_device *rdev = info->dev->dev_private; |
361 | uint32_t r; | 378 | uint32_t r; |
362 | 379 | ||
363 | r = rdev->mc_rreg(rdev, reg); | 380 | r = rdev->mc_rreg(rdev, reg); |
364 | return r; | 381 | return r; |
365 | } | 382 | } |
366 | 383 | ||
367 | static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val) | 384 | static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val) |
368 | { | 385 | { |
369 | struct radeon_device *rdev = info->dev->dev_private; | 386 | struct radeon_device *rdev = info->dev->dev_private; |
370 | 387 | ||
371 | rdev->mc_wreg(rdev, reg, val); | 388 | rdev->mc_wreg(rdev, reg, val); |
372 | } | 389 | } |
373 | 390 | ||
374 | static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val) | 391 | static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val) |
375 | { | 392 | { |
376 | struct radeon_device *rdev = info->dev->dev_private; | 393 | struct radeon_device *rdev = info->dev->dev_private; |
377 | 394 | ||
378 | WREG32(reg*4, val); | 395 | WREG32(reg*4, val); |
379 | } | 396 | } |
380 | 397 | ||
381 | static uint32_t cail_reg_read(struct card_info *info, uint32_t reg) | 398 | static uint32_t cail_reg_read(struct card_info *info, uint32_t reg) |
382 | { | 399 | { |
383 | struct radeon_device *rdev = info->dev->dev_private; | 400 | struct radeon_device *rdev = info->dev->dev_private; |
384 | uint32_t r; | 401 | uint32_t r; |
385 | 402 | ||
386 | r = RREG32(reg*4); | 403 | r = RREG32(reg*4); |
387 | return r; | 404 | return r; |
388 | } | 405 | } |
389 | 406 | ||
390 | static struct card_info atom_card_info = { | 407 | static struct card_info atom_card_info = { |
391 | .dev = NULL, | 408 | .dev = NULL, |
392 | .reg_read = cail_reg_read, | 409 | .reg_read = cail_reg_read, |
393 | .reg_write = cail_reg_write, | 410 | .reg_write = cail_reg_write, |
394 | .mc_read = cail_mc_read, | 411 | .mc_read = cail_mc_read, |
395 | .mc_write = cail_mc_write, | 412 | .mc_write = cail_mc_write, |
396 | .pll_read = cail_pll_read, | 413 | .pll_read = cail_pll_read, |
397 | .pll_write = cail_pll_write, | 414 | .pll_write = cail_pll_write, |
398 | }; | 415 | }; |
399 | 416 | ||
400 | int radeon_atombios_init(struct radeon_device *rdev) | 417 | int radeon_atombios_init(struct radeon_device *rdev) |
401 | { | 418 | { |
402 | atom_card_info.dev = rdev->ddev; | 419 | atom_card_info.dev = rdev->ddev; |
403 | rdev->mode_info.atom_context = atom_parse(&atom_card_info, rdev->bios); | 420 | rdev->mode_info.atom_context = atom_parse(&atom_card_info, rdev->bios); |
404 | radeon_atom_initialize_bios_scratch_regs(rdev->ddev); | 421 | radeon_atom_initialize_bios_scratch_regs(rdev->ddev); |
405 | return 0; | 422 | return 0; |
406 | } | 423 | } |
407 | 424 | ||
408 | void radeon_atombios_fini(struct radeon_device *rdev) | 425 | void radeon_atombios_fini(struct radeon_device *rdev) |
409 | { | 426 | { |
410 | kfree(rdev->mode_info.atom_context); | 427 | kfree(rdev->mode_info.atom_context); |
411 | } | 428 | } |
412 | 429 | ||
413 | int radeon_combios_init(struct radeon_device *rdev) | 430 | int radeon_combios_init(struct radeon_device *rdev) |
414 | { | 431 | { |
415 | radeon_combios_initialize_bios_scratch_regs(rdev->ddev); | 432 | radeon_combios_initialize_bios_scratch_regs(rdev->ddev); |
416 | return 0; | 433 | return 0; |
417 | } | 434 | } |
418 | 435 | ||
419 | void radeon_combios_fini(struct radeon_device *rdev) | 436 | void radeon_combios_fini(struct radeon_device *rdev) |
420 | { | 437 | { |
421 | } | 438 | } |
422 | 439 | ||
423 | int radeon_modeset_init(struct radeon_device *rdev); | 440 | int radeon_modeset_init(struct radeon_device *rdev); |
424 | void radeon_modeset_fini(struct radeon_device *rdev); | 441 | void radeon_modeset_fini(struct radeon_device *rdev); |
425 | 442 | ||
426 | 443 | ||
427 | /* | 444 | /* |
428 | * Radeon device. | 445 | * Radeon device. |
429 | */ | 446 | */ |
430 | int radeon_device_init(struct radeon_device *rdev, | 447 | int radeon_device_init(struct radeon_device *rdev, |
431 | struct drm_device *ddev, | 448 | struct drm_device *ddev, |
432 | struct pci_dev *pdev, | 449 | struct pci_dev *pdev, |
433 | uint32_t flags) | 450 | uint32_t flags) |
434 | { | 451 | { |
435 | int r, ret; | 452 | int r, ret; |
436 | 453 | ||
437 | DRM_INFO("radeon: Initializing kernel modesetting.\n"); | 454 | DRM_INFO("radeon: Initializing kernel modesetting.\n"); |
438 | rdev->shutdown = false; | 455 | rdev->shutdown = false; |
439 | rdev->ddev = ddev; | 456 | rdev->ddev = ddev; |
440 | rdev->pdev = pdev; | 457 | rdev->pdev = pdev; |
441 | rdev->flags = flags; | 458 | rdev->flags = flags; |
442 | rdev->family = flags & RADEON_FAMILY_MASK; | 459 | rdev->family = flags & RADEON_FAMILY_MASK; |
443 | rdev->is_atom_bios = false; | 460 | rdev->is_atom_bios = false; |
444 | rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT; | 461 | rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT; |
445 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; | 462 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; |
446 | rdev->gpu_lockup = false; | 463 | rdev->gpu_lockup = false; |
447 | /* mutex initialization are all done here so we | 464 | /* mutex initialization are all done here so we |
448 | * can recall function without having locking issues */ | 465 | * can recall function without having locking issues */ |
449 | mutex_init(&rdev->cs_mutex); | 466 | mutex_init(&rdev->cs_mutex); |
450 | mutex_init(&rdev->ib_pool.mutex); | 467 | mutex_init(&rdev->ib_pool.mutex); |
451 | mutex_init(&rdev->cp.mutex); | 468 | mutex_init(&rdev->cp.mutex); |
452 | rwlock_init(&rdev->fence_drv.lock); | 469 | rwlock_init(&rdev->fence_drv.lock); |
453 | 470 | ||
454 | if (radeon_agpmode == -1) { | 471 | if (radeon_agpmode == -1) { |
455 | rdev->flags &= ~RADEON_IS_AGP; | 472 | rdev->flags &= ~RADEON_IS_AGP; |
456 | if (rdev->family > CHIP_RV515 || | 473 | if (rdev->family > CHIP_RV515 || |
457 | rdev->family == CHIP_RV380 || | 474 | rdev->family == CHIP_RV380 || |
458 | rdev->family == CHIP_RV410 || | 475 | rdev->family == CHIP_RV410 || |
459 | rdev->family == CHIP_R423) { | 476 | rdev->family == CHIP_R423) { |
460 | DRM_INFO("Forcing AGP to PCIE mode\n"); | 477 | DRM_INFO("Forcing AGP to PCIE mode\n"); |
461 | rdev->flags |= RADEON_IS_PCIE; | 478 | rdev->flags |= RADEON_IS_PCIE; |
462 | } else { | 479 | } else { |
463 | DRM_INFO("Forcing AGP to PCI mode\n"); | 480 | DRM_INFO("Forcing AGP to PCI mode\n"); |
464 | rdev->flags |= RADEON_IS_PCI; | 481 | rdev->flags |= RADEON_IS_PCI; |
465 | } | 482 | } |
466 | } | 483 | } |
467 | 484 | ||
468 | /* Set asic functions */ | 485 | /* Set asic functions */ |
469 | r = radeon_asic_init(rdev); | 486 | r = radeon_asic_init(rdev); |
470 | if (r) { | 487 | if (r) { |
471 | return r; | 488 | return r; |
472 | } | 489 | } |
473 | r = radeon_init(rdev); | 490 | r = radeon_init(rdev); |
474 | if (r) { | 491 | if (r) { |
475 | return r; | 492 | return r; |
476 | } | 493 | } |
477 | 494 | ||
478 | /* Report DMA addressing limitation */ | 495 | /* Report DMA addressing limitation */ |
479 | r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(32)); | 496 | r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(32)); |
480 | if (r) { | 497 | if (r) { |
481 | printk(KERN_WARNING "radeon: No suitable DMA available.\n"); | 498 | printk(KERN_WARNING "radeon: No suitable DMA available.\n"); |
482 | } | 499 | } |
483 | 500 | ||
484 | /* Registers mapping */ | 501 | /* Registers mapping */ |
485 | /* TODO: block userspace mapping of io register */ | 502 | /* TODO: block userspace mapping of io register */ |
486 | rdev->rmmio_base = drm_get_resource_start(rdev->ddev, 2); | 503 | rdev->rmmio_base = drm_get_resource_start(rdev->ddev, 2); |
487 | rdev->rmmio_size = drm_get_resource_len(rdev->ddev, 2); | 504 | rdev->rmmio_size = drm_get_resource_len(rdev->ddev, 2); |
488 | rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size); | 505 | rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size); |
489 | if (rdev->rmmio == NULL) { | 506 | if (rdev->rmmio == NULL) { |
490 | return -ENOMEM; | 507 | return -ENOMEM; |
491 | } | 508 | } |
492 | DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base); | 509 | DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base); |
493 | DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size); | 510 | DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size); |
494 | 511 | ||
495 | /* Setup errata flags */ | 512 | /* Setup errata flags */ |
496 | radeon_errata(rdev); | 513 | radeon_errata(rdev); |
497 | /* Initialize scratch registers */ | 514 | /* Initialize scratch registers */ |
498 | radeon_scratch_init(rdev); | 515 | radeon_scratch_init(rdev); |
516 | /* Initialize surface registers */ | ||
517 | radeon_surface_init(rdev); | ||
518 | |||
499 | /* TODO: disable VGA need to use VGA request */ | 519 | /* TODO: disable VGA need to use VGA request */ |
500 | /* BIOS*/ | 520 | /* BIOS*/ |
501 | if (!radeon_get_bios(rdev)) { | 521 | if (!radeon_get_bios(rdev)) { |
502 | if (ASIC_IS_AVIVO(rdev)) | 522 | if (ASIC_IS_AVIVO(rdev)) |
503 | return -EINVAL; | 523 | return -EINVAL; |
504 | } | 524 | } |
505 | if (rdev->is_atom_bios) { | 525 | if (rdev->is_atom_bios) { |
506 | r = radeon_atombios_init(rdev); | 526 | r = radeon_atombios_init(rdev); |
507 | if (r) { | 527 | if (r) { |
508 | return r; | 528 | return r; |
509 | } | 529 | } |
510 | } else { | 530 | } else { |
511 | r = radeon_combios_init(rdev); | 531 | r = radeon_combios_init(rdev); |
512 | if (r) { | 532 | if (r) { |
513 | return r; | 533 | return r; |
514 | } | 534 | } |
515 | } | 535 | } |
516 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | 536 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ |
517 | if (radeon_gpu_reset(rdev)) { | 537 | if (radeon_gpu_reset(rdev)) { |
518 | /* FIXME: what do we want to do here ? */ | 538 | /* FIXME: what do we want to do here ? */ |
519 | } | 539 | } |
520 | /* check if cards are posted or not */ | 540 | /* check if cards are posted or not */ |
521 | if (!radeon_card_posted(rdev) && rdev->bios) { | 541 | if (!radeon_card_posted(rdev) && rdev->bios) { |
522 | DRM_INFO("GPU not posted. posting now...\n"); | 542 | DRM_INFO("GPU not posted. posting now...\n"); |
523 | if (rdev->is_atom_bios) { | 543 | if (rdev->is_atom_bios) { |
524 | atom_asic_init(rdev->mode_info.atom_context); | 544 | atom_asic_init(rdev->mode_info.atom_context); |
525 | } else { | 545 | } else { |
526 | radeon_combios_asic_init(rdev->ddev); | 546 | radeon_combios_asic_init(rdev->ddev); |
527 | } | 547 | } |
528 | } | 548 | } |
529 | /* Get vram informations */ | 549 | /* Get vram informations */ |
530 | radeon_vram_info(rdev); | 550 | radeon_vram_info(rdev); |
531 | /* Device is severly broken if aper size > vram size. | 551 | /* Device is severly broken if aper size > vram size. |
532 | * for RN50/M6/M7 - Novell bug 204882 ? | 552 | * for RN50/M6/M7 - Novell bug 204882 ? |
533 | */ | 553 | */ |
534 | if (rdev->mc.vram_size < rdev->mc.aper_size) { | 554 | if (rdev->mc.vram_size < rdev->mc.aper_size) { |
535 | rdev->mc.aper_size = rdev->mc.vram_size; | 555 | rdev->mc.aper_size = rdev->mc.vram_size; |
536 | } | 556 | } |
537 | /* Add an MTRR for the VRAM */ | 557 | /* Add an MTRR for the VRAM */ |
538 | rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size, | 558 | rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size, |
539 | MTRR_TYPE_WRCOMB, 1); | 559 | MTRR_TYPE_WRCOMB, 1); |
540 | DRM_INFO("Detected VRAM RAM=%uM, BAR=%uM\n", | 560 | DRM_INFO("Detected VRAM RAM=%uM, BAR=%uM\n", |
541 | rdev->mc.vram_size >> 20, | 561 | rdev->mc.vram_size >> 20, |
542 | (unsigned)rdev->mc.aper_size >> 20); | 562 | (unsigned)rdev->mc.aper_size >> 20); |
543 | DRM_INFO("RAM width %dbits %cDR\n", | 563 | DRM_INFO("RAM width %dbits %cDR\n", |
544 | rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S'); | 564 | rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S'); |
545 | /* Initialize clocks */ | 565 | /* Initialize clocks */ |
546 | r = radeon_clocks_init(rdev); | 566 | r = radeon_clocks_init(rdev); |
547 | if (r) { | 567 | if (r) { |
548 | return r; | 568 | return r; |
549 | } | 569 | } |
550 | /* Initialize memory controller (also test AGP) */ | 570 | /* Initialize memory controller (also test AGP) */ |
551 | r = radeon_mc_init(rdev); | 571 | r = radeon_mc_init(rdev); |
552 | if (r) { | 572 | if (r) { |
553 | return r; | 573 | return r; |
554 | } | 574 | } |
555 | /* Fence driver */ | 575 | /* Fence driver */ |
556 | r = radeon_fence_driver_init(rdev); | 576 | r = radeon_fence_driver_init(rdev); |
557 | if (r) { | 577 | if (r) { |
558 | return r; | 578 | return r; |
559 | } | 579 | } |
560 | r = radeon_irq_kms_init(rdev); | 580 | r = radeon_irq_kms_init(rdev); |
561 | if (r) { | 581 | if (r) { |
562 | return r; | 582 | return r; |
563 | } | 583 | } |
564 | /* Memory manager */ | 584 | /* Memory manager */ |
565 | r = radeon_object_init(rdev); | 585 | r = radeon_object_init(rdev); |
566 | if (r) { | 586 | if (r) { |
567 | return r; | 587 | return r; |
568 | } | 588 | } |
569 | /* Initialize GART (initialize after TTM so we can allocate | 589 | /* Initialize GART (initialize after TTM so we can allocate |
570 | * memory through TTM but finalize after TTM) */ | 590 | * memory through TTM but finalize after TTM) */ |
571 | r = radeon_gart_enable(rdev); | 591 | r = radeon_gart_enable(rdev); |
572 | if (!r) { | 592 | if (!r) { |
573 | r = radeon_gem_init(rdev); | 593 | r = radeon_gem_init(rdev); |
574 | } | 594 | } |
575 | 595 | ||
576 | /* 1M ring buffer */ | 596 | /* 1M ring buffer */ |
577 | if (!r) { | 597 | if (!r) { |
578 | r = radeon_cp_init(rdev, 1024 * 1024); | 598 | r = radeon_cp_init(rdev, 1024 * 1024); |
579 | } | 599 | } |
580 | if (!r) { | 600 | if (!r) { |
581 | r = radeon_wb_init(rdev); | 601 | r = radeon_wb_init(rdev); |
582 | if (r) { | 602 | if (r) { |
583 | DRM_ERROR("radeon: failled initializing WB (%d).\n", r); | 603 | DRM_ERROR("radeon: failled initializing WB (%d).\n", r); |
584 | return r; | 604 | return r; |
585 | } | 605 | } |
586 | } | 606 | } |
587 | if (!r) { | 607 | if (!r) { |
588 | r = radeon_ib_pool_init(rdev); | 608 | r = radeon_ib_pool_init(rdev); |
589 | if (r) { | 609 | if (r) { |
590 | DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); | 610 | DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); |
591 | return r; | 611 | return r; |
592 | } | 612 | } |
593 | } | 613 | } |
594 | if (!r) { | 614 | if (!r) { |
595 | r = radeon_ib_test(rdev); | 615 | r = radeon_ib_test(rdev); |
596 | if (r) { | 616 | if (r) { |
597 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); | 617 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); |
598 | return r; | 618 | return r; |
599 | } | 619 | } |
600 | } | 620 | } |
601 | ret = r; | 621 | ret = r; |
602 | r = radeon_modeset_init(rdev); | 622 | r = radeon_modeset_init(rdev); |
603 | if (r) { | 623 | if (r) { |
604 | return r; | 624 | return r; |
605 | } | 625 | } |
606 | if (!ret) { | 626 | if (!ret) { |
607 | DRM_INFO("radeon: kernel modesetting successfully initialized.\n"); | 627 | DRM_INFO("radeon: kernel modesetting successfully initialized.\n"); |
608 | } | 628 | } |
609 | if (radeon_benchmarking) { | 629 | if (radeon_benchmarking) { |
610 | radeon_benchmark(rdev); | 630 | radeon_benchmark(rdev); |
611 | } | 631 | } |
612 | return ret; | 632 | return ret; |
613 | } | 633 | } |
614 | 634 | ||
615 | void radeon_device_fini(struct radeon_device *rdev) | 635 | void radeon_device_fini(struct radeon_device *rdev) |
616 | { | 636 | { |
617 | if (rdev == NULL || rdev->rmmio == NULL) { | 637 | if (rdev == NULL || rdev->rmmio == NULL) { |
618 | return; | 638 | return; |
619 | } | 639 | } |
620 | DRM_INFO("radeon: finishing device.\n"); | 640 | DRM_INFO("radeon: finishing device.\n"); |
621 | rdev->shutdown = true; | 641 | rdev->shutdown = true; |
622 | /* Order matter so becarefull if you rearrange anythings */ | 642 | /* Order matter so becarefull if you rearrange anythings */ |
623 | radeon_modeset_fini(rdev); | 643 | radeon_modeset_fini(rdev); |
624 | radeon_ib_pool_fini(rdev); | 644 | radeon_ib_pool_fini(rdev); |
625 | radeon_cp_fini(rdev); | 645 | radeon_cp_fini(rdev); |
626 | radeon_wb_fini(rdev); | 646 | radeon_wb_fini(rdev); |
627 | radeon_gem_fini(rdev); | 647 | radeon_gem_fini(rdev); |
628 | radeon_object_fini(rdev); | 648 | radeon_object_fini(rdev); |
629 | /* mc_fini must be after object_fini */ | 649 | /* mc_fini must be after object_fini */ |
630 | radeon_mc_fini(rdev); | 650 | radeon_mc_fini(rdev); |
631 | #if __OS_HAS_AGP | 651 | #if __OS_HAS_AGP |
632 | radeon_agp_fini(rdev); | 652 | radeon_agp_fini(rdev); |
633 | #endif | 653 | #endif |
634 | radeon_irq_kms_fini(rdev); | 654 | radeon_irq_kms_fini(rdev); |
635 | radeon_fence_driver_fini(rdev); | 655 | radeon_fence_driver_fini(rdev); |
636 | radeon_clocks_fini(rdev); | 656 | radeon_clocks_fini(rdev); |
637 | if (rdev->is_atom_bios) { | 657 | if (rdev->is_atom_bios) { |
638 | radeon_atombios_fini(rdev); | 658 | radeon_atombios_fini(rdev); |
639 | } else { | 659 | } else { |
640 | radeon_combios_fini(rdev); | 660 | radeon_combios_fini(rdev); |
641 | } | 661 | } |
642 | kfree(rdev->bios); | 662 | kfree(rdev->bios); |
643 | rdev->bios = NULL; | 663 | rdev->bios = NULL; |
644 | iounmap(rdev->rmmio); | 664 | iounmap(rdev->rmmio); |
645 | rdev->rmmio = NULL; | 665 | rdev->rmmio = NULL; |
646 | } | 666 | } |
647 | 667 | ||
648 | 668 | ||
649 | /* | 669 | /* |
650 | * Suspend & resume. | 670 | * Suspend & resume. |
651 | */ | 671 | */ |
652 | int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) | 672 | int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) |
653 | { | 673 | { |
654 | struct radeon_device *rdev = dev->dev_private; | 674 | struct radeon_device *rdev = dev->dev_private; |
655 | struct drm_crtc *crtc; | 675 | struct drm_crtc *crtc; |
656 | 676 | ||
657 | if (dev == NULL || rdev == NULL) { | 677 | if (dev == NULL || rdev == NULL) { |
658 | return -ENODEV; | 678 | return -ENODEV; |
659 | } | 679 | } |
660 | if (state.event == PM_EVENT_PRETHAW) { | 680 | if (state.event == PM_EVENT_PRETHAW) { |
661 | return 0; | 681 | return 0; |
662 | } | 682 | } |
663 | /* unpin the front buffers */ | 683 | /* unpin the front buffers */ |
664 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 684 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
665 | struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); | 685 | struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); |
666 | struct radeon_object *robj; | 686 | struct radeon_object *robj; |
667 | 687 | ||
668 | if (rfb == NULL || rfb->obj == NULL) { | 688 | if (rfb == NULL || rfb->obj == NULL) { |
669 | continue; | 689 | continue; |
670 | } | 690 | } |
671 | robj = rfb->obj->driver_private; | 691 | robj = rfb->obj->driver_private; |
672 | if (robj != rdev->fbdev_robj) { | 692 | if (robj != rdev->fbdev_robj) { |
673 | radeon_object_unpin(robj); | 693 | radeon_object_unpin(robj); |
674 | } | 694 | } |
675 | } | 695 | } |
676 | /* evict vram memory */ | 696 | /* evict vram memory */ |
677 | radeon_object_evict_vram(rdev); | 697 | radeon_object_evict_vram(rdev); |
678 | /* wait for gpu to finish processing current batch */ | 698 | /* wait for gpu to finish processing current batch */ |
679 | radeon_fence_wait_last(rdev); | 699 | radeon_fence_wait_last(rdev); |
680 | 700 | ||
681 | radeon_cp_disable(rdev); | 701 | radeon_cp_disable(rdev); |
682 | radeon_gart_disable(rdev); | 702 | radeon_gart_disable(rdev); |
683 | 703 | ||
684 | /* evict remaining vram memory */ | 704 | /* evict remaining vram memory */ |
685 | radeon_object_evict_vram(rdev); | 705 | radeon_object_evict_vram(rdev); |
686 | 706 | ||
687 | rdev->irq.sw_int = false; | 707 | rdev->irq.sw_int = false; |
688 | radeon_irq_set(rdev); | 708 | radeon_irq_set(rdev); |
689 | 709 | ||
690 | pci_save_state(dev->pdev); | 710 | pci_save_state(dev->pdev); |
691 | if (state.event == PM_EVENT_SUSPEND) { | 711 | if (state.event == PM_EVENT_SUSPEND) { |
692 | /* Shut down the device */ | 712 | /* Shut down the device */ |
693 | pci_disable_device(dev->pdev); | 713 | pci_disable_device(dev->pdev); |
694 | pci_set_power_state(dev->pdev, PCI_D3hot); | 714 | pci_set_power_state(dev->pdev, PCI_D3hot); |
695 | } | 715 | } |
696 | acquire_console_sem(); | 716 | acquire_console_sem(); |
697 | fb_set_suspend(rdev->fbdev_info, 1); | 717 | fb_set_suspend(rdev->fbdev_info, 1); |
698 | release_console_sem(); | 718 | release_console_sem(); |
699 | return 0; | 719 | return 0; |
700 | } | 720 | } |
701 | 721 | ||
702 | int radeon_resume_kms(struct drm_device *dev) | 722 | int radeon_resume_kms(struct drm_device *dev) |
703 | { | 723 | { |
704 | struct radeon_device *rdev = dev->dev_private; | 724 | struct radeon_device *rdev = dev->dev_private; |
705 | int r; | 725 | int r; |
706 | 726 | ||
707 | acquire_console_sem(); | 727 | acquire_console_sem(); |
708 | pci_set_power_state(dev->pdev, PCI_D0); | 728 | pci_set_power_state(dev->pdev, PCI_D0); |
709 | pci_restore_state(dev->pdev); | 729 | pci_restore_state(dev->pdev); |
710 | if (pci_enable_device(dev->pdev)) { | 730 | if (pci_enable_device(dev->pdev)) { |
711 | release_console_sem(); | 731 | release_console_sem(); |
712 | return -1; | 732 | return -1; |
713 | } | 733 | } |
714 | pci_set_master(dev->pdev); | 734 | pci_set_master(dev->pdev); |
715 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | 735 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ |
716 | if (radeon_gpu_reset(rdev)) { | 736 | if (radeon_gpu_reset(rdev)) { |
717 | /* FIXME: what do we want to do here ? */ | 737 | /* FIXME: what do we want to do here ? */ |
718 | } | 738 | } |
719 | /* post card */ | 739 | /* post card */ |
720 | if (rdev->is_atom_bios) { | 740 | if (rdev->is_atom_bios) { |
721 | atom_asic_init(rdev->mode_info.atom_context); | 741 | atom_asic_init(rdev->mode_info.atom_context); |
722 | } else { | 742 | } else { |
723 | radeon_combios_asic_init(rdev->ddev); | 743 | radeon_combios_asic_init(rdev->ddev); |
724 | } | 744 | } |
725 | /* Initialize clocks */ | 745 | /* Initialize clocks */ |
726 | r = radeon_clocks_init(rdev); | 746 | r = radeon_clocks_init(rdev); |
727 | if (r) { | 747 | if (r) { |
728 | release_console_sem(); | 748 | release_console_sem(); |
729 | return r; | 749 | return r; |
730 | } | 750 | } |
731 | /* Enable IRQ */ | 751 | /* Enable IRQ */ |
732 | rdev->irq.sw_int = true; | 752 | rdev->irq.sw_int = true; |
733 | radeon_irq_set(rdev); | 753 | radeon_irq_set(rdev); |
734 | /* Initialize GPU Memory Controller */ | 754 | /* Initialize GPU Memory Controller */ |
735 | r = radeon_mc_init(rdev); | 755 | r = radeon_mc_init(rdev); |
736 | if (r) { | 756 | if (r) { |
737 | goto out; | 757 | goto out; |
738 | } | 758 | } |
739 | r = radeon_gart_enable(rdev); | 759 | r = radeon_gart_enable(rdev); |
740 | if (r) { | 760 | if (r) { |
741 | goto out; | 761 | goto out; |
742 | } | 762 | } |
743 | r = radeon_cp_init(rdev, rdev->cp.ring_size); | 763 | r = radeon_cp_init(rdev, rdev->cp.ring_size); |
744 | if (r) { | 764 | if (r) { |
745 | goto out; | 765 | goto out; |
746 | } | 766 | } |
747 | out: | 767 | out: |
748 | fb_set_suspend(rdev->fbdev_info, 0); | 768 | fb_set_suspend(rdev->fbdev_info, 0); |
749 | release_console_sem(); | 769 | release_console_sem(); |
750 | 770 | ||
751 | /* blat the mode back in */ | 771 | /* blat the mode back in */ |
752 | drm_helper_resume_force_mode(dev); | 772 | drm_helper_resume_force_mode(dev); |
753 | return 0; | 773 | return 0; |
754 | } | 774 | } |
755 | 775 | ||
756 | 776 | ||
757 | /* | 777 | /* |
758 | * Debugfs | 778 | * Debugfs |
759 | */ | 779 | */ |
760 | struct radeon_debugfs { | 780 | struct radeon_debugfs { |
761 | struct drm_info_list *files; | 781 | struct drm_info_list *files; |
762 | unsigned num_files; | 782 | unsigned num_files; |
763 | }; | 783 | }; |
764 | static struct radeon_debugfs _radeon_debugfs[RADEON_DEBUGFS_MAX_NUM_FILES]; | 784 | static struct radeon_debugfs _radeon_debugfs[RADEON_DEBUGFS_MAX_NUM_FILES]; |
765 | static unsigned _radeon_debugfs_count = 0; | 785 | static unsigned _radeon_debugfs_count = 0; |
766 | 786 | ||
767 | int radeon_debugfs_add_files(struct radeon_device *rdev, | 787 | int radeon_debugfs_add_files(struct radeon_device *rdev, |
768 | struct drm_info_list *files, | 788 | struct drm_info_list *files, |
769 | unsigned nfiles) | 789 | unsigned nfiles) |
770 | { | 790 | { |
771 | unsigned i; | 791 | unsigned i; |
772 | 792 | ||
773 | for (i = 0; i < _radeon_debugfs_count; i++) { | 793 | for (i = 0; i < _radeon_debugfs_count; i++) { |
774 | if (_radeon_debugfs[i].files == files) { | 794 | if (_radeon_debugfs[i].files == files) { |
775 | /* Already registered */ | 795 | /* Already registered */ |
776 | return 0; | 796 | return 0; |
777 | } | 797 | } |
778 | } | 798 | } |
779 | if ((_radeon_debugfs_count + nfiles) > RADEON_DEBUGFS_MAX_NUM_FILES) { | 799 | if ((_radeon_debugfs_count + nfiles) > RADEON_DEBUGFS_MAX_NUM_FILES) { |
780 | DRM_ERROR("Reached maximum number of debugfs files.\n"); | 800 | DRM_ERROR("Reached maximum number of debugfs files.\n"); |
781 | DRM_ERROR("Report so we increase RADEON_DEBUGFS_MAX_NUM_FILES.\n"); | 801 | DRM_ERROR("Report so we increase RADEON_DEBUGFS_MAX_NUM_FILES.\n"); |
782 | return -EINVAL; | 802 | return -EINVAL; |
783 | } | 803 | } |
784 | _radeon_debugfs[_radeon_debugfs_count].files = files; | 804 | _radeon_debugfs[_radeon_debugfs_count].files = files; |
785 | _radeon_debugfs[_radeon_debugfs_count].num_files = nfiles; | 805 | _radeon_debugfs[_radeon_debugfs_count].num_files = nfiles; |
786 | _radeon_debugfs_count++; | 806 | _radeon_debugfs_count++; |
787 | #if defined(CONFIG_DEBUG_FS) | 807 | #if defined(CONFIG_DEBUG_FS) |
788 | drm_debugfs_create_files(files, nfiles, | 808 | drm_debugfs_create_files(files, nfiles, |
789 | rdev->ddev->control->debugfs_root, | 809 | rdev->ddev->control->debugfs_root, |
790 | rdev->ddev->control); | 810 | rdev->ddev->control); |
791 | drm_debugfs_create_files(files, nfiles, | 811 | drm_debugfs_create_files(files, nfiles, |
792 | rdev->ddev->primary->debugfs_root, | 812 | rdev->ddev->primary->debugfs_root, |
793 | rdev->ddev->primary); | 813 | rdev->ddev->primary); |
794 | #endif | 814 | #endif |
795 | return 0; | 815 | return 0; |
796 | } | 816 | } |
797 | 817 | ||
798 | #if defined(CONFIG_DEBUG_FS) | 818 | #if defined(CONFIG_DEBUG_FS) |
799 | int radeon_debugfs_init(struct drm_minor *minor) | 819 | int radeon_debugfs_init(struct drm_minor *minor) |
800 | { | 820 | { |
801 | return 0; | 821 | return 0; |
802 | } | 822 | } |
803 | 823 | ||
804 | void radeon_debugfs_cleanup(struct drm_minor *minor) | 824 | void radeon_debugfs_cleanup(struct drm_minor *minor) |
805 | { | 825 | { |
806 | unsigned i; | 826 | unsigned i; |
807 | 827 | ||
808 | for (i = 0; i < _radeon_debugfs_count; i++) { | 828 | for (i = 0; i < _radeon_debugfs_count; i++) { |
809 | drm_debugfs_remove_files(_radeon_debugfs[i].files, | 829 | drm_debugfs_remove_files(_radeon_debugfs[i].files, |
810 | _radeon_debugfs[i].num_files, minor); | 830 | _radeon_debugfs[i].num_files, minor); |
811 | } | 831 | } |
812 | } | 832 | } |
813 | #endif | 833 | #endif |
814 | 834 |