Commit a0fc608178a9b38a5f782331909fcc208b742a7b
Exists in
ti-lsk-linux-4.1.y
and in
10 other branches
Merge branch 'drm-fixes-3.18' of git://people.freedesktop.org/~agd5f/linux into drm-fixes
fix one regression and one endian issue. * 'drm-fixes-3.18' of git://people.freedesktop.org/~agd5f/linux: drm/radeon: fix endian swapping in vbios fetch for tdp table drm/radeon: disable native backlight control on pre-r6xx asics (v2)
Showing 2 changed files Inline Diff
drivers/gpu/drm/radeon/r600_dpm.c
1 | /* | 1 | /* |
2 | * Copyright 2011 Advanced Micro Devices, Inc. | 2 | * Copyright 2011 Advanced Micro Devices, Inc. |
3 | * | 3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), | 5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation | 6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the | 8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: | 9 | * Software is furnished to do so, subject to the following conditions: |
10 | * | 10 | * |
11 | * The above copyright notice and this permission notice shall be included in | 11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. | 12 | * all copies or substantial portions of the Software. |
13 | * | 13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. | 20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * | 21 | * |
22 | * Authors: Alex Deucher | 22 | * Authors: Alex Deucher |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include "drmP.h" | 25 | #include "drmP.h" |
26 | #include "radeon.h" | 26 | #include "radeon.h" |
27 | #include "radeon_asic.h" | 27 | #include "radeon_asic.h" |
28 | #include "r600d.h" | 28 | #include "r600d.h" |
29 | #include "r600_dpm.h" | 29 | #include "r600_dpm.h" |
30 | #include "atom.h" | 30 | #include "atom.h" |
31 | 31 | ||
32 | const u32 r600_utc[R600_PM_NUMBER_OF_TC] = | 32 | const u32 r600_utc[R600_PM_NUMBER_OF_TC] = |
33 | { | 33 | { |
34 | R600_UTC_DFLT_00, | 34 | R600_UTC_DFLT_00, |
35 | R600_UTC_DFLT_01, | 35 | R600_UTC_DFLT_01, |
36 | R600_UTC_DFLT_02, | 36 | R600_UTC_DFLT_02, |
37 | R600_UTC_DFLT_03, | 37 | R600_UTC_DFLT_03, |
38 | R600_UTC_DFLT_04, | 38 | R600_UTC_DFLT_04, |
39 | R600_UTC_DFLT_05, | 39 | R600_UTC_DFLT_05, |
40 | R600_UTC_DFLT_06, | 40 | R600_UTC_DFLT_06, |
41 | R600_UTC_DFLT_07, | 41 | R600_UTC_DFLT_07, |
42 | R600_UTC_DFLT_08, | 42 | R600_UTC_DFLT_08, |
43 | R600_UTC_DFLT_09, | 43 | R600_UTC_DFLT_09, |
44 | R600_UTC_DFLT_10, | 44 | R600_UTC_DFLT_10, |
45 | R600_UTC_DFLT_11, | 45 | R600_UTC_DFLT_11, |
46 | R600_UTC_DFLT_12, | 46 | R600_UTC_DFLT_12, |
47 | R600_UTC_DFLT_13, | 47 | R600_UTC_DFLT_13, |
48 | R600_UTC_DFLT_14, | 48 | R600_UTC_DFLT_14, |
49 | }; | 49 | }; |
50 | 50 | ||
51 | const u32 r600_dtc[R600_PM_NUMBER_OF_TC] = | 51 | const u32 r600_dtc[R600_PM_NUMBER_OF_TC] = |
52 | { | 52 | { |
53 | R600_DTC_DFLT_00, | 53 | R600_DTC_DFLT_00, |
54 | R600_DTC_DFLT_01, | 54 | R600_DTC_DFLT_01, |
55 | R600_DTC_DFLT_02, | 55 | R600_DTC_DFLT_02, |
56 | R600_DTC_DFLT_03, | 56 | R600_DTC_DFLT_03, |
57 | R600_DTC_DFLT_04, | 57 | R600_DTC_DFLT_04, |
58 | R600_DTC_DFLT_05, | 58 | R600_DTC_DFLT_05, |
59 | R600_DTC_DFLT_06, | 59 | R600_DTC_DFLT_06, |
60 | R600_DTC_DFLT_07, | 60 | R600_DTC_DFLT_07, |
61 | R600_DTC_DFLT_08, | 61 | R600_DTC_DFLT_08, |
62 | R600_DTC_DFLT_09, | 62 | R600_DTC_DFLT_09, |
63 | R600_DTC_DFLT_10, | 63 | R600_DTC_DFLT_10, |
64 | R600_DTC_DFLT_11, | 64 | R600_DTC_DFLT_11, |
65 | R600_DTC_DFLT_12, | 65 | R600_DTC_DFLT_12, |
66 | R600_DTC_DFLT_13, | 66 | R600_DTC_DFLT_13, |
67 | R600_DTC_DFLT_14, | 67 | R600_DTC_DFLT_14, |
68 | }; | 68 | }; |
69 | 69 | ||
70 | void r600_dpm_print_class_info(u32 class, u32 class2) | 70 | void r600_dpm_print_class_info(u32 class, u32 class2) |
71 | { | 71 | { |
72 | printk("\tui class: "); | 72 | printk("\tui class: "); |
73 | switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { | 73 | switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { |
74 | case ATOM_PPLIB_CLASSIFICATION_UI_NONE: | 74 | case ATOM_PPLIB_CLASSIFICATION_UI_NONE: |
75 | default: | 75 | default: |
76 | printk("none\n"); | 76 | printk("none\n"); |
77 | break; | 77 | break; |
78 | case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY: | 78 | case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY: |
79 | printk("battery\n"); | 79 | printk("battery\n"); |
80 | break; | 80 | break; |
81 | case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED: | 81 | case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED: |
82 | printk("balanced\n"); | 82 | printk("balanced\n"); |
83 | break; | 83 | break; |
84 | case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE: | 84 | case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE: |
85 | printk("performance\n"); | 85 | printk("performance\n"); |
86 | break; | 86 | break; |
87 | } | 87 | } |
88 | printk("\tinternal class: "); | 88 | printk("\tinternal class: "); |
89 | if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) && | 89 | if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) && |
90 | (class2 == 0)) | 90 | (class2 == 0)) |
91 | printk("none"); | 91 | printk("none"); |
92 | else { | 92 | else { |
93 | if (class & ATOM_PPLIB_CLASSIFICATION_BOOT) | 93 | if (class & ATOM_PPLIB_CLASSIFICATION_BOOT) |
94 | printk("boot "); | 94 | printk("boot "); |
95 | if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL) | 95 | if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL) |
96 | printk("thermal "); | 96 | printk("thermal "); |
97 | if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE) | 97 | if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE) |
98 | printk("limited_pwr "); | 98 | printk("limited_pwr "); |
99 | if (class & ATOM_PPLIB_CLASSIFICATION_REST) | 99 | if (class & ATOM_PPLIB_CLASSIFICATION_REST) |
100 | printk("rest "); | 100 | printk("rest "); |
101 | if (class & ATOM_PPLIB_CLASSIFICATION_FORCED) | 101 | if (class & ATOM_PPLIB_CLASSIFICATION_FORCED) |
102 | printk("forced "); | 102 | printk("forced "); |
103 | if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) | 103 | if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) |
104 | printk("3d_perf "); | 104 | printk("3d_perf "); |
105 | if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE) | 105 | if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE) |
106 | printk("ovrdrv "); | 106 | printk("ovrdrv "); |
107 | if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) | 107 | if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) |
108 | printk("uvd "); | 108 | printk("uvd "); |
109 | if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW) | 109 | if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW) |
110 | printk("3d_low "); | 110 | printk("3d_low "); |
111 | if (class & ATOM_PPLIB_CLASSIFICATION_ACPI) | 111 | if (class & ATOM_PPLIB_CLASSIFICATION_ACPI) |
112 | printk("acpi "); | 112 | printk("acpi "); |
113 | if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) | 113 | if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) |
114 | printk("uvd_hd2 "); | 114 | printk("uvd_hd2 "); |
115 | if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) | 115 | if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) |
116 | printk("uvd_hd "); | 116 | printk("uvd_hd "); |
117 | if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) | 117 | if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) |
118 | printk("uvd_sd "); | 118 | printk("uvd_sd "); |
119 | if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2) | 119 | if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2) |
120 | printk("limited_pwr2 "); | 120 | printk("limited_pwr2 "); |
121 | if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) | 121 | if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) |
122 | printk("ulv "); | 122 | printk("ulv "); |
123 | if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) | 123 | if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) |
124 | printk("uvd_mvc "); | 124 | printk("uvd_mvc "); |
125 | } | 125 | } |
126 | printk("\n"); | 126 | printk("\n"); |
127 | } | 127 | } |
128 | 128 | ||
129 | void r600_dpm_print_cap_info(u32 caps) | 129 | void r600_dpm_print_cap_info(u32 caps) |
130 | { | 130 | { |
131 | printk("\tcaps: "); | 131 | printk("\tcaps: "); |
132 | if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) | 132 | if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) |
133 | printk("single_disp "); | 133 | printk("single_disp "); |
134 | if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK) | 134 | if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK) |
135 | printk("video "); | 135 | printk("video "); |
136 | if (caps & ATOM_PPLIB_DISALLOW_ON_DC) | 136 | if (caps & ATOM_PPLIB_DISALLOW_ON_DC) |
137 | printk("no_dc "); | 137 | printk("no_dc "); |
138 | printk("\n"); | 138 | printk("\n"); |
139 | } | 139 | } |
140 | 140 | ||
141 | void r600_dpm_print_ps_status(struct radeon_device *rdev, | 141 | void r600_dpm_print_ps_status(struct radeon_device *rdev, |
142 | struct radeon_ps *rps) | 142 | struct radeon_ps *rps) |
143 | { | 143 | { |
144 | printk("\tstatus: "); | 144 | printk("\tstatus: "); |
145 | if (rps == rdev->pm.dpm.current_ps) | 145 | if (rps == rdev->pm.dpm.current_ps) |
146 | printk("c "); | 146 | printk("c "); |
147 | if (rps == rdev->pm.dpm.requested_ps) | 147 | if (rps == rdev->pm.dpm.requested_ps) |
148 | printk("r "); | 148 | printk("r "); |
149 | if (rps == rdev->pm.dpm.boot_ps) | 149 | if (rps == rdev->pm.dpm.boot_ps) |
150 | printk("b "); | 150 | printk("b "); |
151 | printk("\n"); | 151 | printk("\n"); |
152 | } | 152 | } |
153 | 153 | ||
154 | u32 r600_dpm_get_vblank_time(struct radeon_device *rdev) | 154 | u32 r600_dpm_get_vblank_time(struct radeon_device *rdev) |
155 | { | 155 | { |
156 | struct drm_device *dev = rdev->ddev; | 156 | struct drm_device *dev = rdev->ddev; |
157 | struct drm_crtc *crtc; | 157 | struct drm_crtc *crtc; |
158 | struct radeon_crtc *radeon_crtc; | 158 | struct radeon_crtc *radeon_crtc; |
159 | u32 line_time_us, vblank_lines; | 159 | u32 line_time_us, vblank_lines; |
160 | u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */ | 160 | u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */ |
161 | 161 | ||
162 | if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) { | 162 | if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) { |
163 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 163 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
164 | radeon_crtc = to_radeon_crtc(crtc); | 164 | radeon_crtc = to_radeon_crtc(crtc); |
165 | if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) { | 165 | if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) { |
166 | line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) / | 166 | line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) / |
167 | radeon_crtc->hw_mode.clock; | 167 | radeon_crtc->hw_mode.clock; |
168 | vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end - | 168 | vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end - |
169 | radeon_crtc->hw_mode.crtc_vdisplay + | 169 | radeon_crtc->hw_mode.crtc_vdisplay + |
170 | (radeon_crtc->v_border * 2); | 170 | (radeon_crtc->v_border * 2); |
171 | vblank_time_us = vblank_lines * line_time_us; | 171 | vblank_time_us = vblank_lines * line_time_us; |
172 | break; | 172 | break; |
173 | } | 173 | } |
174 | } | 174 | } |
175 | } | 175 | } |
176 | 176 | ||
177 | return vblank_time_us; | 177 | return vblank_time_us; |
178 | } | 178 | } |
179 | 179 | ||
180 | u32 r600_dpm_get_vrefresh(struct radeon_device *rdev) | 180 | u32 r600_dpm_get_vrefresh(struct radeon_device *rdev) |
181 | { | 181 | { |
182 | struct drm_device *dev = rdev->ddev; | 182 | struct drm_device *dev = rdev->ddev; |
183 | struct drm_crtc *crtc; | 183 | struct drm_crtc *crtc; |
184 | struct radeon_crtc *radeon_crtc; | 184 | struct radeon_crtc *radeon_crtc; |
185 | u32 vrefresh = 0; | 185 | u32 vrefresh = 0; |
186 | 186 | ||
187 | if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) { | 187 | if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) { |
188 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 188 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
189 | radeon_crtc = to_radeon_crtc(crtc); | 189 | radeon_crtc = to_radeon_crtc(crtc); |
190 | if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) { | 190 | if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) { |
191 | vrefresh = radeon_crtc->hw_mode.vrefresh; | 191 | vrefresh = radeon_crtc->hw_mode.vrefresh; |
192 | break; | 192 | break; |
193 | } | 193 | } |
194 | } | 194 | } |
195 | } | 195 | } |
196 | return vrefresh; | 196 | return vrefresh; |
197 | } | 197 | } |
198 | 198 | ||
199 | void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b, | 199 | void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b, |
200 | u32 *p, u32 *u) | 200 | u32 *p, u32 *u) |
201 | { | 201 | { |
202 | u32 b_c = 0; | 202 | u32 b_c = 0; |
203 | u32 i_c; | 203 | u32 i_c; |
204 | u32 tmp; | 204 | u32 tmp; |
205 | 205 | ||
206 | i_c = (i * r_c) / 100; | 206 | i_c = (i * r_c) / 100; |
207 | tmp = i_c >> p_b; | 207 | tmp = i_c >> p_b; |
208 | 208 | ||
209 | while (tmp) { | 209 | while (tmp) { |
210 | b_c++; | 210 | b_c++; |
211 | tmp >>= 1; | 211 | tmp >>= 1; |
212 | } | 212 | } |
213 | 213 | ||
214 | *u = (b_c + 1) / 2; | 214 | *u = (b_c + 1) / 2; |
215 | *p = i_c / (1 << (2 * (*u))); | 215 | *p = i_c / (1 << (2 * (*u))); |
216 | } | 216 | } |
217 | 217 | ||
218 | int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th) | 218 | int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th) |
219 | { | 219 | { |
220 | u32 k, a, ah, al; | 220 | u32 k, a, ah, al; |
221 | u32 t1; | 221 | u32 t1; |
222 | 222 | ||
223 | if ((fl == 0) || (fh == 0) || (fl > fh)) | 223 | if ((fl == 0) || (fh == 0) || (fl > fh)) |
224 | return -EINVAL; | 224 | return -EINVAL; |
225 | 225 | ||
226 | k = (100 * fh) / fl; | 226 | k = (100 * fh) / fl; |
227 | t1 = (t * (k - 100)); | 227 | t1 = (t * (k - 100)); |
228 | a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100)); | 228 | a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100)); |
229 | a = (a + 5) / 10; | 229 | a = (a + 5) / 10; |
230 | ah = ((a * t) + 5000) / 10000; | 230 | ah = ((a * t) + 5000) / 10000; |
231 | al = a - ah; | 231 | al = a - ah; |
232 | 232 | ||
233 | *th = t - ah; | 233 | *th = t - ah; |
234 | *tl = t + al; | 234 | *tl = t + al; |
235 | 235 | ||
236 | return 0; | 236 | return 0; |
237 | } | 237 | } |
238 | 238 | ||
239 | void r600_gfx_clockgating_enable(struct radeon_device *rdev, bool enable) | 239 | void r600_gfx_clockgating_enable(struct radeon_device *rdev, bool enable) |
240 | { | 240 | { |
241 | int i; | 241 | int i; |
242 | 242 | ||
243 | if (enable) { | 243 | if (enable) { |
244 | WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN); | 244 | WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN); |
245 | } else { | 245 | } else { |
246 | WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN); | 246 | WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN); |
247 | 247 | ||
248 | WREG32(CG_RLC_REQ_AND_RSP, 0x2); | 248 | WREG32(CG_RLC_REQ_AND_RSP, 0x2); |
249 | 249 | ||
250 | for (i = 0; i < rdev->usec_timeout; i++) { | 250 | for (i = 0; i < rdev->usec_timeout; i++) { |
251 | if (((RREG32(CG_RLC_REQ_AND_RSP) & CG_RLC_RSP_TYPE_MASK) >> CG_RLC_RSP_TYPE_SHIFT) == 1) | 251 | if (((RREG32(CG_RLC_REQ_AND_RSP) & CG_RLC_RSP_TYPE_MASK) >> CG_RLC_RSP_TYPE_SHIFT) == 1) |
252 | break; | 252 | break; |
253 | udelay(1); | 253 | udelay(1); |
254 | } | 254 | } |
255 | 255 | ||
256 | WREG32(CG_RLC_REQ_AND_RSP, 0x0); | 256 | WREG32(CG_RLC_REQ_AND_RSP, 0x0); |
257 | 257 | ||
258 | WREG32(GRBM_PWR_CNTL, 0x1); | 258 | WREG32(GRBM_PWR_CNTL, 0x1); |
259 | RREG32(GRBM_PWR_CNTL); | 259 | RREG32(GRBM_PWR_CNTL); |
260 | } | 260 | } |
261 | } | 261 | } |
262 | 262 | ||
263 | void r600_dynamicpm_enable(struct radeon_device *rdev, bool enable) | 263 | void r600_dynamicpm_enable(struct radeon_device *rdev, bool enable) |
264 | { | 264 | { |
265 | if (enable) | 265 | if (enable) |
266 | WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN); | 266 | WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN); |
267 | else | 267 | else |
268 | WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN); | 268 | WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN); |
269 | } | 269 | } |
270 | 270 | ||
271 | void r600_enable_thermal_protection(struct radeon_device *rdev, bool enable) | 271 | void r600_enable_thermal_protection(struct radeon_device *rdev, bool enable) |
272 | { | 272 | { |
273 | if (enable) | 273 | if (enable) |
274 | WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS); | 274 | WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS); |
275 | else | 275 | else |
276 | WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS); | 276 | WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS); |
277 | } | 277 | } |
278 | 278 | ||
279 | void r600_enable_acpi_pm(struct radeon_device *rdev) | 279 | void r600_enable_acpi_pm(struct radeon_device *rdev) |
280 | { | 280 | { |
281 | WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN); | 281 | WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN); |
282 | } | 282 | } |
283 | 283 | ||
284 | void r600_enable_dynamic_pcie_gen2(struct radeon_device *rdev, bool enable) | 284 | void r600_enable_dynamic_pcie_gen2(struct radeon_device *rdev, bool enable) |
285 | { | 285 | { |
286 | if (enable) | 286 | if (enable) |
287 | WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE); | 287 | WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE); |
288 | else | 288 | else |
289 | WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE); | 289 | WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE); |
290 | } | 290 | } |
291 | 291 | ||
292 | bool r600_dynamicpm_enabled(struct radeon_device *rdev) | 292 | bool r600_dynamicpm_enabled(struct radeon_device *rdev) |
293 | { | 293 | { |
294 | if (RREG32(GENERAL_PWRMGT) & GLOBAL_PWRMGT_EN) | 294 | if (RREG32(GENERAL_PWRMGT) & GLOBAL_PWRMGT_EN) |
295 | return true; | 295 | return true; |
296 | else | 296 | else |
297 | return false; | 297 | return false; |
298 | } | 298 | } |
299 | 299 | ||
300 | void r600_enable_sclk_control(struct radeon_device *rdev, bool enable) | 300 | void r600_enable_sclk_control(struct radeon_device *rdev, bool enable) |
301 | { | 301 | { |
302 | if (enable) | 302 | if (enable) |
303 | WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF); | 303 | WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF); |
304 | else | 304 | else |
305 | WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF); | 305 | WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF); |
306 | } | 306 | } |
307 | 307 | ||
308 | void r600_enable_mclk_control(struct radeon_device *rdev, bool enable) | 308 | void r600_enable_mclk_control(struct radeon_device *rdev, bool enable) |
309 | { | 309 | { |
310 | if (enable) | 310 | if (enable) |
311 | WREG32_P(MCLK_PWRMGT_CNTL, 0, ~MPLL_PWRMGT_OFF); | 311 | WREG32_P(MCLK_PWRMGT_CNTL, 0, ~MPLL_PWRMGT_OFF); |
312 | else | 312 | else |
313 | WREG32_P(MCLK_PWRMGT_CNTL, MPLL_PWRMGT_OFF, ~MPLL_PWRMGT_OFF); | 313 | WREG32_P(MCLK_PWRMGT_CNTL, MPLL_PWRMGT_OFF, ~MPLL_PWRMGT_OFF); |
314 | } | 314 | } |
315 | 315 | ||
316 | void r600_enable_spll_bypass(struct radeon_device *rdev, bool enable) | 316 | void r600_enable_spll_bypass(struct radeon_device *rdev, bool enable) |
317 | { | 317 | { |
318 | if (enable) | 318 | if (enable) |
319 | WREG32_P(CG_SPLL_FUNC_CNTL, SPLL_BYPASS_EN, ~SPLL_BYPASS_EN); | 319 | WREG32_P(CG_SPLL_FUNC_CNTL, SPLL_BYPASS_EN, ~SPLL_BYPASS_EN); |
320 | else | 320 | else |
321 | WREG32_P(CG_SPLL_FUNC_CNTL, 0, ~SPLL_BYPASS_EN); | 321 | WREG32_P(CG_SPLL_FUNC_CNTL, 0, ~SPLL_BYPASS_EN); |
322 | } | 322 | } |
323 | 323 | ||
324 | void r600_wait_for_spll_change(struct radeon_device *rdev) | 324 | void r600_wait_for_spll_change(struct radeon_device *rdev) |
325 | { | 325 | { |
326 | int i; | 326 | int i; |
327 | 327 | ||
328 | for (i = 0; i < rdev->usec_timeout; i++) { | 328 | for (i = 0; i < rdev->usec_timeout; i++) { |
329 | if (RREG32(CG_SPLL_FUNC_CNTL) & SPLL_CHG_STATUS) | 329 | if (RREG32(CG_SPLL_FUNC_CNTL) & SPLL_CHG_STATUS) |
330 | break; | 330 | break; |
331 | udelay(1); | 331 | udelay(1); |
332 | } | 332 | } |
333 | } | 333 | } |
334 | 334 | ||
335 | void r600_set_bsp(struct radeon_device *rdev, u32 u, u32 p) | 335 | void r600_set_bsp(struct radeon_device *rdev, u32 u, u32 p) |
336 | { | 336 | { |
337 | WREG32(CG_BSP, BSP(p) | BSU(u)); | 337 | WREG32(CG_BSP, BSP(p) | BSU(u)); |
338 | } | 338 | } |
339 | 339 | ||
340 | void r600_set_at(struct radeon_device *rdev, | 340 | void r600_set_at(struct radeon_device *rdev, |
341 | u32 l_to_m, u32 m_to_h, | 341 | u32 l_to_m, u32 m_to_h, |
342 | u32 h_to_m, u32 m_to_l) | 342 | u32 h_to_m, u32 m_to_l) |
343 | { | 343 | { |
344 | WREG32(CG_RT, FLS(l_to_m) | FMS(m_to_h)); | 344 | WREG32(CG_RT, FLS(l_to_m) | FMS(m_to_h)); |
345 | WREG32(CG_LT, FHS(h_to_m) | FMS(m_to_l)); | 345 | WREG32(CG_LT, FHS(h_to_m) | FMS(m_to_l)); |
346 | } | 346 | } |
347 | 347 | ||
348 | void r600_set_tc(struct radeon_device *rdev, | 348 | void r600_set_tc(struct radeon_device *rdev, |
349 | u32 index, u32 u_t, u32 d_t) | 349 | u32 index, u32 u_t, u32 d_t) |
350 | { | 350 | { |
351 | WREG32(CG_FFCT_0 + (index * 4), UTC_0(u_t) | DTC_0(d_t)); | 351 | WREG32(CG_FFCT_0 + (index * 4), UTC_0(u_t) | DTC_0(d_t)); |
352 | } | 352 | } |
353 | 353 | ||
354 | void r600_select_td(struct radeon_device *rdev, | 354 | void r600_select_td(struct radeon_device *rdev, |
355 | enum r600_td td) | 355 | enum r600_td td) |
356 | { | 356 | { |
357 | if (td == R600_TD_AUTO) | 357 | if (td == R600_TD_AUTO) |
358 | WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL); | 358 | WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL); |
359 | else | 359 | else |
360 | WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL); | 360 | WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL); |
361 | if (td == R600_TD_UP) | 361 | if (td == R600_TD_UP) |
362 | WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE); | 362 | WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE); |
363 | if (td == R600_TD_DOWN) | 363 | if (td == R600_TD_DOWN) |
364 | WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE); | 364 | WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE); |
365 | } | 365 | } |
366 | 366 | ||
367 | void r600_set_vrc(struct radeon_device *rdev, u32 vrv) | 367 | void r600_set_vrc(struct radeon_device *rdev, u32 vrv) |
368 | { | 368 | { |
369 | WREG32(CG_FTV, vrv); | 369 | WREG32(CG_FTV, vrv); |
370 | } | 370 | } |
371 | 371 | ||
372 | void r600_set_tpu(struct radeon_device *rdev, u32 u) | 372 | void r600_set_tpu(struct radeon_device *rdev, u32 u) |
373 | { | 373 | { |
374 | WREG32_P(CG_TPC, TPU(u), ~TPU_MASK); | 374 | WREG32_P(CG_TPC, TPU(u), ~TPU_MASK); |
375 | } | 375 | } |
376 | 376 | ||
377 | void r600_set_tpc(struct radeon_device *rdev, u32 c) | 377 | void r600_set_tpc(struct radeon_device *rdev, u32 c) |
378 | { | 378 | { |
379 | WREG32_P(CG_TPC, TPCC(c), ~TPCC_MASK); | 379 | WREG32_P(CG_TPC, TPCC(c), ~TPCC_MASK); |
380 | } | 380 | } |
381 | 381 | ||
382 | void r600_set_sstu(struct radeon_device *rdev, u32 u) | 382 | void r600_set_sstu(struct radeon_device *rdev, u32 u) |
383 | { | 383 | { |
384 | WREG32_P(CG_SSP, CG_SSTU(u), ~CG_SSTU_MASK); | 384 | WREG32_P(CG_SSP, CG_SSTU(u), ~CG_SSTU_MASK); |
385 | } | 385 | } |
386 | 386 | ||
387 | void r600_set_sst(struct radeon_device *rdev, u32 t) | 387 | void r600_set_sst(struct radeon_device *rdev, u32 t) |
388 | { | 388 | { |
389 | WREG32_P(CG_SSP, CG_SST(t), ~CG_SST_MASK); | 389 | WREG32_P(CG_SSP, CG_SST(t), ~CG_SST_MASK); |
390 | } | 390 | } |
391 | 391 | ||
392 | void r600_set_git(struct radeon_device *rdev, u32 t) | 392 | void r600_set_git(struct radeon_device *rdev, u32 t) |
393 | { | 393 | { |
394 | WREG32_P(CG_GIT, CG_GICST(t), ~CG_GICST_MASK); | 394 | WREG32_P(CG_GIT, CG_GICST(t), ~CG_GICST_MASK); |
395 | } | 395 | } |
396 | 396 | ||
397 | void r600_set_fctu(struct radeon_device *rdev, u32 u) | 397 | void r600_set_fctu(struct radeon_device *rdev, u32 u) |
398 | { | 398 | { |
399 | WREG32_P(CG_FC_T, FC_TU(u), ~FC_TU_MASK); | 399 | WREG32_P(CG_FC_T, FC_TU(u), ~FC_TU_MASK); |
400 | } | 400 | } |
401 | 401 | ||
402 | void r600_set_fct(struct radeon_device *rdev, u32 t) | 402 | void r600_set_fct(struct radeon_device *rdev, u32 t) |
403 | { | 403 | { |
404 | WREG32_P(CG_FC_T, FC_T(t), ~FC_T_MASK); | 404 | WREG32_P(CG_FC_T, FC_T(t), ~FC_T_MASK); |
405 | } | 405 | } |
406 | 406 | ||
407 | void r600_set_ctxcgtt3d_rphc(struct radeon_device *rdev, u32 p) | 407 | void r600_set_ctxcgtt3d_rphc(struct radeon_device *rdev, u32 p) |
408 | { | 408 | { |
409 | WREG32_P(CG_CTX_CGTT3D_R, PHC(p), ~PHC_MASK); | 409 | WREG32_P(CG_CTX_CGTT3D_R, PHC(p), ~PHC_MASK); |
410 | } | 410 | } |
411 | 411 | ||
412 | void r600_set_ctxcgtt3d_rsdc(struct radeon_device *rdev, u32 s) | 412 | void r600_set_ctxcgtt3d_rsdc(struct radeon_device *rdev, u32 s) |
413 | { | 413 | { |
414 | WREG32_P(CG_CTX_CGTT3D_R, SDC(s), ~SDC_MASK); | 414 | WREG32_P(CG_CTX_CGTT3D_R, SDC(s), ~SDC_MASK); |
415 | } | 415 | } |
416 | 416 | ||
417 | void r600_set_vddc3d_oorsu(struct radeon_device *rdev, u32 u) | 417 | void r600_set_vddc3d_oorsu(struct radeon_device *rdev, u32 u) |
418 | { | 418 | { |
419 | WREG32_P(CG_VDDC3D_OOR, SU(u), ~SU_MASK); | 419 | WREG32_P(CG_VDDC3D_OOR, SU(u), ~SU_MASK); |
420 | } | 420 | } |
421 | 421 | ||
422 | void r600_set_vddc3d_oorphc(struct radeon_device *rdev, u32 p) | 422 | void r600_set_vddc3d_oorphc(struct radeon_device *rdev, u32 p) |
423 | { | 423 | { |
424 | WREG32_P(CG_VDDC3D_OOR, PHC(p), ~PHC_MASK); | 424 | WREG32_P(CG_VDDC3D_OOR, PHC(p), ~PHC_MASK); |
425 | } | 425 | } |
426 | 426 | ||
427 | void r600_set_vddc3d_oorsdc(struct radeon_device *rdev, u32 s) | 427 | void r600_set_vddc3d_oorsdc(struct radeon_device *rdev, u32 s) |
428 | { | 428 | { |
429 | WREG32_P(CG_VDDC3D_OOR, SDC(s), ~SDC_MASK); | 429 | WREG32_P(CG_VDDC3D_OOR, SDC(s), ~SDC_MASK); |
430 | } | 430 | } |
431 | 431 | ||
432 | void r600_set_mpll_lock_time(struct radeon_device *rdev, u32 lock_time) | 432 | void r600_set_mpll_lock_time(struct radeon_device *rdev, u32 lock_time) |
433 | { | 433 | { |
434 | WREG32_P(MPLL_TIME, MPLL_LOCK_TIME(lock_time), ~MPLL_LOCK_TIME_MASK); | 434 | WREG32_P(MPLL_TIME, MPLL_LOCK_TIME(lock_time), ~MPLL_LOCK_TIME_MASK); |
435 | } | 435 | } |
436 | 436 | ||
437 | void r600_set_mpll_reset_time(struct radeon_device *rdev, u32 reset_time) | 437 | void r600_set_mpll_reset_time(struct radeon_device *rdev, u32 reset_time) |
438 | { | 438 | { |
439 | WREG32_P(MPLL_TIME, MPLL_RESET_TIME(reset_time), ~MPLL_RESET_TIME_MASK); | 439 | WREG32_P(MPLL_TIME, MPLL_RESET_TIME(reset_time), ~MPLL_RESET_TIME_MASK); |
440 | } | 440 | } |
441 | 441 | ||
442 | void r600_engine_clock_entry_enable(struct radeon_device *rdev, | 442 | void r600_engine_clock_entry_enable(struct radeon_device *rdev, |
443 | u32 index, bool enable) | 443 | u32 index, bool enable) |
444 | { | 444 | { |
445 | if (enable) | 445 | if (enable) |
446 | WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2), | 446 | WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2), |
447 | STEP_0_SPLL_ENTRY_VALID, ~STEP_0_SPLL_ENTRY_VALID); | 447 | STEP_0_SPLL_ENTRY_VALID, ~STEP_0_SPLL_ENTRY_VALID); |
448 | else | 448 | else |
449 | WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2), | 449 | WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2), |
450 | 0, ~STEP_0_SPLL_ENTRY_VALID); | 450 | 0, ~STEP_0_SPLL_ENTRY_VALID); |
451 | } | 451 | } |
452 | 452 | ||
453 | void r600_engine_clock_entry_enable_pulse_skipping(struct radeon_device *rdev, | 453 | void r600_engine_clock_entry_enable_pulse_skipping(struct radeon_device *rdev, |
454 | u32 index, bool enable) | 454 | u32 index, bool enable) |
455 | { | 455 | { |
456 | if (enable) | 456 | if (enable) |
457 | WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2), | 457 | WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2), |
458 | STEP_0_SPLL_STEP_ENABLE, ~STEP_0_SPLL_STEP_ENABLE); | 458 | STEP_0_SPLL_STEP_ENABLE, ~STEP_0_SPLL_STEP_ENABLE); |
459 | else | 459 | else |
460 | WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2), | 460 | WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2), |
461 | 0, ~STEP_0_SPLL_STEP_ENABLE); | 461 | 0, ~STEP_0_SPLL_STEP_ENABLE); |
462 | } | 462 | } |
463 | 463 | ||
464 | void r600_engine_clock_entry_enable_post_divider(struct radeon_device *rdev, | 464 | void r600_engine_clock_entry_enable_post_divider(struct radeon_device *rdev, |
465 | u32 index, bool enable) | 465 | u32 index, bool enable) |
466 | { | 466 | { |
467 | if (enable) | 467 | if (enable) |
468 | WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2), | 468 | WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2), |
469 | STEP_0_POST_DIV_EN, ~STEP_0_POST_DIV_EN); | 469 | STEP_0_POST_DIV_EN, ~STEP_0_POST_DIV_EN); |
470 | else | 470 | else |
471 | WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2), | 471 | WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2), |
472 | 0, ~STEP_0_POST_DIV_EN); | 472 | 0, ~STEP_0_POST_DIV_EN); |
473 | } | 473 | } |
474 | 474 | ||
475 | void r600_engine_clock_entry_set_post_divider(struct radeon_device *rdev, | 475 | void r600_engine_clock_entry_set_post_divider(struct radeon_device *rdev, |
476 | u32 index, u32 divider) | 476 | u32 index, u32 divider) |
477 | { | 477 | { |
478 | WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2), | 478 | WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2), |
479 | STEP_0_SPLL_POST_DIV(divider), ~STEP_0_SPLL_POST_DIV_MASK); | 479 | STEP_0_SPLL_POST_DIV(divider), ~STEP_0_SPLL_POST_DIV_MASK); |
480 | } | 480 | } |
481 | 481 | ||
482 | void r600_engine_clock_entry_set_reference_divider(struct radeon_device *rdev, | 482 | void r600_engine_clock_entry_set_reference_divider(struct radeon_device *rdev, |
483 | u32 index, u32 divider) | 483 | u32 index, u32 divider) |
484 | { | 484 | { |
485 | WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2), | 485 | WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2), |
486 | STEP_0_SPLL_REF_DIV(divider), ~STEP_0_SPLL_REF_DIV_MASK); | 486 | STEP_0_SPLL_REF_DIV(divider), ~STEP_0_SPLL_REF_DIV_MASK); |
487 | } | 487 | } |
488 | 488 | ||
489 | void r600_engine_clock_entry_set_feedback_divider(struct radeon_device *rdev, | 489 | void r600_engine_clock_entry_set_feedback_divider(struct radeon_device *rdev, |
490 | u32 index, u32 divider) | 490 | u32 index, u32 divider) |
491 | { | 491 | { |
492 | WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2), | 492 | WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2), |
493 | STEP_0_SPLL_FB_DIV(divider), ~STEP_0_SPLL_FB_DIV_MASK); | 493 | STEP_0_SPLL_FB_DIV(divider), ~STEP_0_SPLL_FB_DIV_MASK); |
494 | } | 494 | } |
495 | 495 | ||
496 | void r600_engine_clock_entry_set_step_time(struct radeon_device *rdev, | 496 | void r600_engine_clock_entry_set_step_time(struct radeon_device *rdev, |
497 | u32 index, u32 step_time) | 497 | u32 index, u32 step_time) |
498 | { | 498 | { |
499 | WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2), | 499 | WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2), |
500 | STEP_0_SPLL_STEP_TIME(step_time), ~STEP_0_SPLL_STEP_TIME_MASK); | 500 | STEP_0_SPLL_STEP_TIME(step_time), ~STEP_0_SPLL_STEP_TIME_MASK); |
501 | } | 501 | } |
502 | 502 | ||
503 | void r600_vid_rt_set_ssu(struct radeon_device *rdev, u32 u) | 503 | void r600_vid_rt_set_ssu(struct radeon_device *rdev, u32 u) |
504 | { | 504 | { |
505 | WREG32_P(VID_RT, SSTU(u), ~SSTU_MASK); | 505 | WREG32_P(VID_RT, SSTU(u), ~SSTU_MASK); |
506 | } | 506 | } |
507 | 507 | ||
508 | void r600_vid_rt_set_vru(struct radeon_device *rdev, u32 u) | 508 | void r600_vid_rt_set_vru(struct radeon_device *rdev, u32 u) |
509 | { | 509 | { |
510 | WREG32_P(VID_RT, VID_CRTU(u), ~VID_CRTU_MASK); | 510 | WREG32_P(VID_RT, VID_CRTU(u), ~VID_CRTU_MASK); |
511 | } | 511 | } |
512 | 512 | ||
513 | void r600_vid_rt_set_vrt(struct radeon_device *rdev, u32 rt) | 513 | void r600_vid_rt_set_vrt(struct radeon_device *rdev, u32 rt) |
514 | { | 514 | { |
515 | WREG32_P(VID_RT, VID_CRT(rt), ~VID_CRT_MASK); | 515 | WREG32_P(VID_RT, VID_CRT(rt), ~VID_CRT_MASK); |
516 | } | 516 | } |
517 | 517 | ||
518 | void r600_voltage_control_enable_pins(struct radeon_device *rdev, | 518 | void r600_voltage_control_enable_pins(struct radeon_device *rdev, |
519 | u64 mask) | 519 | u64 mask) |
520 | { | 520 | { |
521 | WREG32(LOWER_GPIO_ENABLE, mask & 0xffffffff); | 521 | WREG32(LOWER_GPIO_ENABLE, mask & 0xffffffff); |
522 | WREG32(UPPER_GPIO_ENABLE, upper_32_bits(mask)); | 522 | WREG32(UPPER_GPIO_ENABLE, upper_32_bits(mask)); |
523 | } | 523 | } |
524 | 524 | ||
525 | 525 | ||
526 | void r600_voltage_control_program_voltages(struct radeon_device *rdev, | 526 | void r600_voltage_control_program_voltages(struct radeon_device *rdev, |
527 | enum r600_power_level index, u64 pins) | 527 | enum r600_power_level index, u64 pins) |
528 | { | 528 | { |
529 | u32 tmp, mask; | 529 | u32 tmp, mask; |
530 | u32 ix = 3 - (3 & index); | 530 | u32 ix = 3 - (3 & index); |
531 | 531 | ||
532 | WREG32(CTXSW_VID_LOWER_GPIO_CNTL + (ix * 4), pins & 0xffffffff); | 532 | WREG32(CTXSW_VID_LOWER_GPIO_CNTL + (ix * 4), pins & 0xffffffff); |
533 | 533 | ||
534 | mask = 7 << (3 * ix); | 534 | mask = 7 << (3 * ix); |
535 | tmp = RREG32(VID_UPPER_GPIO_CNTL); | 535 | tmp = RREG32(VID_UPPER_GPIO_CNTL); |
536 | tmp = (tmp & ~mask) | ((pins >> (32 - (3 * ix))) & mask); | 536 | tmp = (tmp & ~mask) | ((pins >> (32 - (3 * ix))) & mask); |
537 | WREG32(VID_UPPER_GPIO_CNTL, tmp); | 537 | WREG32(VID_UPPER_GPIO_CNTL, tmp); |
538 | } | 538 | } |
539 | 539 | ||
540 | void r600_voltage_control_deactivate_static_control(struct radeon_device *rdev, | 540 | void r600_voltage_control_deactivate_static_control(struct radeon_device *rdev, |
541 | u64 mask) | 541 | u64 mask) |
542 | { | 542 | { |
543 | u32 gpio; | 543 | u32 gpio; |
544 | 544 | ||
545 | gpio = RREG32(GPIOPAD_MASK); | 545 | gpio = RREG32(GPIOPAD_MASK); |
546 | gpio &= ~mask; | 546 | gpio &= ~mask; |
547 | WREG32(GPIOPAD_MASK, gpio); | 547 | WREG32(GPIOPAD_MASK, gpio); |
548 | 548 | ||
549 | gpio = RREG32(GPIOPAD_EN); | 549 | gpio = RREG32(GPIOPAD_EN); |
550 | gpio &= ~mask; | 550 | gpio &= ~mask; |
551 | WREG32(GPIOPAD_EN, gpio); | 551 | WREG32(GPIOPAD_EN, gpio); |
552 | 552 | ||
553 | gpio = RREG32(GPIOPAD_A); | 553 | gpio = RREG32(GPIOPAD_A); |
554 | gpio &= ~mask; | 554 | gpio &= ~mask; |
555 | WREG32(GPIOPAD_A, gpio); | 555 | WREG32(GPIOPAD_A, gpio); |
556 | } | 556 | } |
557 | 557 | ||
558 | void r600_power_level_enable(struct radeon_device *rdev, | 558 | void r600_power_level_enable(struct radeon_device *rdev, |
559 | enum r600_power_level index, bool enable) | 559 | enum r600_power_level index, bool enable) |
560 | { | 560 | { |
561 | u32 ix = 3 - (3 & index); | 561 | u32 ix = 3 - (3 & index); |
562 | 562 | ||
563 | if (enable) | 563 | if (enable) |
564 | WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), CTXSW_FREQ_STATE_ENABLE, | 564 | WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), CTXSW_FREQ_STATE_ENABLE, |
565 | ~CTXSW_FREQ_STATE_ENABLE); | 565 | ~CTXSW_FREQ_STATE_ENABLE); |
566 | else | 566 | else |
567 | WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), 0, | 567 | WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), 0, |
568 | ~CTXSW_FREQ_STATE_ENABLE); | 568 | ~CTXSW_FREQ_STATE_ENABLE); |
569 | } | 569 | } |
570 | 570 | ||
571 | void r600_power_level_set_voltage_index(struct radeon_device *rdev, | 571 | void r600_power_level_set_voltage_index(struct radeon_device *rdev, |
572 | enum r600_power_level index, u32 voltage_index) | 572 | enum r600_power_level index, u32 voltage_index) |
573 | { | 573 | { |
574 | u32 ix = 3 - (3 & index); | 574 | u32 ix = 3 - (3 & index); |
575 | 575 | ||
576 | WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), | 576 | WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), |
577 | CTXSW_FREQ_VIDS_CFG_INDEX(voltage_index), ~CTXSW_FREQ_VIDS_CFG_INDEX_MASK); | 577 | CTXSW_FREQ_VIDS_CFG_INDEX(voltage_index), ~CTXSW_FREQ_VIDS_CFG_INDEX_MASK); |
578 | } | 578 | } |
579 | 579 | ||
580 | void r600_power_level_set_mem_clock_index(struct radeon_device *rdev, | 580 | void r600_power_level_set_mem_clock_index(struct radeon_device *rdev, |
581 | enum r600_power_level index, u32 mem_clock_index) | 581 | enum r600_power_level index, u32 mem_clock_index) |
582 | { | 582 | { |
583 | u32 ix = 3 - (3 & index); | 583 | u32 ix = 3 - (3 & index); |
584 | 584 | ||
585 | WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), | 585 | WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), |
586 | CTXSW_FREQ_MCLK_CFG_INDEX(mem_clock_index), ~CTXSW_FREQ_MCLK_CFG_INDEX_MASK); | 586 | CTXSW_FREQ_MCLK_CFG_INDEX(mem_clock_index), ~CTXSW_FREQ_MCLK_CFG_INDEX_MASK); |
587 | } | 587 | } |
588 | 588 | ||
589 | void r600_power_level_set_eng_clock_index(struct radeon_device *rdev, | 589 | void r600_power_level_set_eng_clock_index(struct radeon_device *rdev, |
590 | enum r600_power_level index, u32 eng_clock_index) | 590 | enum r600_power_level index, u32 eng_clock_index) |
591 | { | 591 | { |
592 | u32 ix = 3 - (3 & index); | 592 | u32 ix = 3 - (3 & index); |
593 | 593 | ||
594 | WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), | 594 | WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), |
595 | CTXSW_FREQ_SCLK_CFG_INDEX(eng_clock_index), ~CTXSW_FREQ_SCLK_CFG_INDEX_MASK); | 595 | CTXSW_FREQ_SCLK_CFG_INDEX(eng_clock_index), ~CTXSW_FREQ_SCLK_CFG_INDEX_MASK); |
596 | } | 596 | } |
597 | 597 | ||
598 | void r600_power_level_set_watermark_id(struct radeon_device *rdev, | 598 | void r600_power_level_set_watermark_id(struct radeon_device *rdev, |
599 | enum r600_power_level index, | 599 | enum r600_power_level index, |
600 | enum r600_display_watermark watermark_id) | 600 | enum r600_display_watermark watermark_id) |
601 | { | 601 | { |
602 | u32 ix = 3 - (3 & index); | 602 | u32 ix = 3 - (3 & index); |
603 | u32 tmp = 0; | 603 | u32 tmp = 0; |
604 | 604 | ||
605 | if (watermark_id == R600_DISPLAY_WATERMARK_HIGH) | 605 | if (watermark_id == R600_DISPLAY_WATERMARK_HIGH) |
606 | tmp = CTXSW_FREQ_DISPLAY_WATERMARK; | 606 | tmp = CTXSW_FREQ_DISPLAY_WATERMARK; |
607 | WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), tmp, ~CTXSW_FREQ_DISPLAY_WATERMARK); | 607 | WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), tmp, ~CTXSW_FREQ_DISPLAY_WATERMARK); |
608 | } | 608 | } |
609 | 609 | ||
610 | void r600_power_level_set_pcie_gen2(struct radeon_device *rdev, | 610 | void r600_power_level_set_pcie_gen2(struct radeon_device *rdev, |
611 | enum r600_power_level index, bool compatible) | 611 | enum r600_power_level index, bool compatible) |
612 | { | 612 | { |
613 | u32 ix = 3 - (3 & index); | 613 | u32 ix = 3 - (3 & index); |
614 | u32 tmp = 0; | 614 | u32 tmp = 0; |
615 | 615 | ||
616 | if (compatible) | 616 | if (compatible) |
617 | tmp = CTXSW_FREQ_GEN2PCIE_VOLT; | 617 | tmp = CTXSW_FREQ_GEN2PCIE_VOLT; |
618 | WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), tmp, ~CTXSW_FREQ_GEN2PCIE_VOLT); | 618 | WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), tmp, ~CTXSW_FREQ_GEN2PCIE_VOLT); |
619 | } | 619 | } |
620 | 620 | ||
621 | enum r600_power_level r600_power_level_get_current_index(struct radeon_device *rdev) | 621 | enum r600_power_level r600_power_level_get_current_index(struct radeon_device *rdev) |
622 | { | 622 | { |
623 | u32 tmp; | 623 | u32 tmp; |
624 | 624 | ||
625 | tmp = RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK; | 625 | tmp = RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK; |
626 | tmp >>= CURRENT_PROFILE_INDEX_SHIFT; | 626 | tmp >>= CURRENT_PROFILE_INDEX_SHIFT; |
627 | return tmp; | 627 | return tmp; |
628 | } | 628 | } |
629 | 629 | ||
630 | enum r600_power_level r600_power_level_get_target_index(struct radeon_device *rdev) | 630 | enum r600_power_level r600_power_level_get_target_index(struct radeon_device *rdev) |
631 | { | 631 | { |
632 | u32 tmp; | 632 | u32 tmp; |
633 | 633 | ||
634 | tmp = RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & TARGET_PROFILE_INDEX_MASK; | 634 | tmp = RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & TARGET_PROFILE_INDEX_MASK; |
635 | tmp >>= TARGET_PROFILE_INDEX_SHIFT; | 635 | tmp >>= TARGET_PROFILE_INDEX_SHIFT; |
636 | return tmp; | 636 | return tmp; |
637 | } | 637 | } |
638 | 638 | ||
639 | void r600_power_level_set_enter_index(struct radeon_device *rdev, | 639 | void r600_power_level_set_enter_index(struct radeon_device *rdev, |
640 | enum r600_power_level index) | 640 | enum r600_power_level index) |
641 | { | 641 | { |
642 | WREG32_P(TARGET_AND_CURRENT_PROFILE_INDEX, DYN_PWR_ENTER_INDEX(index), | 642 | WREG32_P(TARGET_AND_CURRENT_PROFILE_INDEX, DYN_PWR_ENTER_INDEX(index), |
643 | ~DYN_PWR_ENTER_INDEX_MASK); | 643 | ~DYN_PWR_ENTER_INDEX_MASK); |
644 | } | 644 | } |
645 | 645 | ||
646 | void r600_wait_for_power_level_unequal(struct radeon_device *rdev, | 646 | void r600_wait_for_power_level_unequal(struct radeon_device *rdev, |
647 | enum r600_power_level index) | 647 | enum r600_power_level index) |
648 | { | 648 | { |
649 | int i; | 649 | int i; |
650 | 650 | ||
651 | for (i = 0; i < rdev->usec_timeout; i++) { | 651 | for (i = 0; i < rdev->usec_timeout; i++) { |
652 | if (r600_power_level_get_target_index(rdev) != index) | 652 | if (r600_power_level_get_target_index(rdev) != index) |
653 | break; | 653 | break; |
654 | udelay(1); | 654 | udelay(1); |
655 | } | 655 | } |
656 | 656 | ||
657 | for (i = 0; i < rdev->usec_timeout; i++) { | 657 | for (i = 0; i < rdev->usec_timeout; i++) { |
658 | if (r600_power_level_get_current_index(rdev) != index) | 658 | if (r600_power_level_get_current_index(rdev) != index) |
659 | break; | 659 | break; |
660 | udelay(1); | 660 | udelay(1); |
661 | } | 661 | } |
662 | } | 662 | } |
663 | 663 | ||
664 | void r600_wait_for_power_level(struct radeon_device *rdev, | 664 | void r600_wait_for_power_level(struct radeon_device *rdev, |
665 | enum r600_power_level index) | 665 | enum r600_power_level index) |
666 | { | 666 | { |
667 | int i; | 667 | int i; |
668 | 668 | ||
669 | for (i = 0; i < rdev->usec_timeout; i++) { | 669 | for (i = 0; i < rdev->usec_timeout; i++) { |
670 | if (r600_power_level_get_target_index(rdev) == index) | 670 | if (r600_power_level_get_target_index(rdev) == index) |
671 | break; | 671 | break; |
672 | udelay(1); | 672 | udelay(1); |
673 | } | 673 | } |
674 | 674 | ||
675 | for (i = 0; i < rdev->usec_timeout; i++) { | 675 | for (i = 0; i < rdev->usec_timeout; i++) { |
676 | if (r600_power_level_get_current_index(rdev) == index) | 676 | if (r600_power_level_get_current_index(rdev) == index) |
677 | break; | 677 | break; |
678 | udelay(1); | 678 | udelay(1); |
679 | } | 679 | } |
680 | } | 680 | } |
681 | 681 | ||
682 | void r600_start_dpm(struct radeon_device *rdev) | 682 | void r600_start_dpm(struct radeon_device *rdev) |
683 | { | 683 | { |
684 | r600_enable_sclk_control(rdev, false); | 684 | r600_enable_sclk_control(rdev, false); |
685 | r600_enable_mclk_control(rdev, false); | 685 | r600_enable_mclk_control(rdev, false); |
686 | 686 | ||
687 | r600_dynamicpm_enable(rdev, true); | 687 | r600_dynamicpm_enable(rdev, true); |
688 | 688 | ||
689 | radeon_wait_for_vblank(rdev, 0); | 689 | radeon_wait_for_vblank(rdev, 0); |
690 | radeon_wait_for_vblank(rdev, 1); | 690 | radeon_wait_for_vblank(rdev, 1); |
691 | 691 | ||
692 | r600_enable_spll_bypass(rdev, true); | 692 | r600_enable_spll_bypass(rdev, true); |
693 | r600_wait_for_spll_change(rdev); | 693 | r600_wait_for_spll_change(rdev); |
694 | r600_enable_spll_bypass(rdev, false); | 694 | r600_enable_spll_bypass(rdev, false); |
695 | r600_wait_for_spll_change(rdev); | 695 | r600_wait_for_spll_change(rdev); |
696 | 696 | ||
697 | r600_enable_spll_bypass(rdev, true); | 697 | r600_enable_spll_bypass(rdev, true); |
698 | r600_wait_for_spll_change(rdev); | 698 | r600_wait_for_spll_change(rdev); |
699 | r600_enable_spll_bypass(rdev, false); | 699 | r600_enable_spll_bypass(rdev, false); |
700 | r600_wait_for_spll_change(rdev); | 700 | r600_wait_for_spll_change(rdev); |
701 | 701 | ||
702 | r600_enable_sclk_control(rdev, true); | 702 | r600_enable_sclk_control(rdev, true); |
703 | r600_enable_mclk_control(rdev, true); | 703 | r600_enable_mclk_control(rdev, true); |
704 | } | 704 | } |
705 | 705 | ||
706 | void r600_stop_dpm(struct radeon_device *rdev) | 706 | void r600_stop_dpm(struct radeon_device *rdev) |
707 | { | 707 | { |
708 | r600_dynamicpm_enable(rdev, false); | 708 | r600_dynamicpm_enable(rdev, false); |
709 | } | 709 | } |
710 | 710 | ||
711 | int r600_dpm_pre_set_power_state(struct radeon_device *rdev) | 711 | int r600_dpm_pre_set_power_state(struct radeon_device *rdev) |
712 | { | 712 | { |
713 | return 0; | 713 | return 0; |
714 | } | 714 | } |
715 | 715 | ||
716 | void r600_dpm_post_set_power_state(struct radeon_device *rdev) | 716 | void r600_dpm_post_set_power_state(struct radeon_device *rdev) |
717 | { | 717 | { |
718 | 718 | ||
719 | } | 719 | } |
720 | 720 | ||
721 | bool r600_is_uvd_state(u32 class, u32 class2) | 721 | bool r600_is_uvd_state(u32 class, u32 class2) |
722 | { | 722 | { |
723 | if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) | 723 | if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) |
724 | return true; | 724 | return true; |
725 | if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) | 725 | if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) |
726 | return true; | 726 | return true; |
727 | if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) | 727 | if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) |
728 | return true; | 728 | return true; |
729 | if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) | 729 | if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) |
730 | return true; | 730 | return true; |
731 | if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) | 731 | if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) |
732 | return true; | 732 | return true; |
733 | return false; | 733 | return false; |
734 | } | 734 | } |
735 | 735 | ||
736 | static int r600_set_thermal_temperature_range(struct radeon_device *rdev, | 736 | static int r600_set_thermal_temperature_range(struct radeon_device *rdev, |
737 | int min_temp, int max_temp) | 737 | int min_temp, int max_temp) |
738 | { | 738 | { |
739 | int low_temp = 0 * 1000; | 739 | int low_temp = 0 * 1000; |
740 | int high_temp = 255 * 1000; | 740 | int high_temp = 255 * 1000; |
741 | 741 | ||
742 | if (low_temp < min_temp) | 742 | if (low_temp < min_temp) |
743 | low_temp = min_temp; | 743 | low_temp = min_temp; |
744 | if (high_temp > max_temp) | 744 | if (high_temp > max_temp) |
745 | high_temp = max_temp; | 745 | high_temp = max_temp; |
746 | if (high_temp < low_temp) { | 746 | if (high_temp < low_temp) { |
747 | DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); | 747 | DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); |
748 | return -EINVAL; | 748 | return -EINVAL; |
749 | } | 749 | } |
750 | 750 | ||
751 | WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK); | 751 | WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK); |
752 | WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK); | 752 | WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK); |
753 | WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK); | 753 | WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK); |
754 | 754 | ||
755 | rdev->pm.dpm.thermal.min_temp = low_temp; | 755 | rdev->pm.dpm.thermal.min_temp = low_temp; |
756 | rdev->pm.dpm.thermal.max_temp = high_temp; | 756 | rdev->pm.dpm.thermal.max_temp = high_temp; |
757 | 757 | ||
758 | return 0; | 758 | return 0; |
759 | } | 759 | } |
760 | 760 | ||
761 | bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor) | 761 | bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor) |
762 | { | 762 | { |
763 | switch (sensor) { | 763 | switch (sensor) { |
764 | case THERMAL_TYPE_RV6XX: | 764 | case THERMAL_TYPE_RV6XX: |
765 | case THERMAL_TYPE_RV770: | 765 | case THERMAL_TYPE_RV770: |
766 | case THERMAL_TYPE_EVERGREEN: | 766 | case THERMAL_TYPE_EVERGREEN: |
767 | case THERMAL_TYPE_SUMO: | 767 | case THERMAL_TYPE_SUMO: |
768 | case THERMAL_TYPE_NI: | 768 | case THERMAL_TYPE_NI: |
769 | case THERMAL_TYPE_SI: | 769 | case THERMAL_TYPE_SI: |
770 | case THERMAL_TYPE_CI: | 770 | case THERMAL_TYPE_CI: |
771 | case THERMAL_TYPE_KV: | 771 | case THERMAL_TYPE_KV: |
772 | return true; | 772 | return true; |
773 | case THERMAL_TYPE_ADT7473_WITH_INTERNAL: | 773 | case THERMAL_TYPE_ADT7473_WITH_INTERNAL: |
774 | case THERMAL_TYPE_EMC2103_WITH_INTERNAL: | 774 | case THERMAL_TYPE_EMC2103_WITH_INTERNAL: |
775 | return false; /* need special handling */ | 775 | return false; /* need special handling */ |
776 | case THERMAL_TYPE_NONE: | 776 | case THERMAL_TYPE_NONE: |
777 | case THERMAL_TYPE_EXTERNAL: | 777 | case THERMAL_TYPE_EXTERNAL: |
778 | case THERMAL_TYPE_EXTERNAL_GPIO: | 778 | case THERMAL_TYPE_EXTERNAL_GPIO: |
779 | default: | 779 | default: |
780 | return false; | 780 | return false; |
781 | } | 781 | } |
782 | } | 782 | } |
783 | 783 | ||
784 | int r600_dpm_late_enable(struct radeon_device *rdev) | 784 | int r600_dpm_late_enable(struct radeon_device *rdev) |
785 | { | 785 | { |
786 | int ret; | 786 | int ret; |
787 | 787 | ||
788 | if (rdev->irq.installed && | 788 | if (rdev->irq.installed && |
789 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { | 789 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { |
790 | ret = r600_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); | 790 | ret = r600_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); |
791 | if (ret) | 791 | if (ret) |
792 | return ret; | 792 | return ret; |
793 | rdev->irq.dpm_thermal = true; | 793 | rdev->irq.dpm_thermal = true; |
794 | radeon_irq_set(rdev); | 794 | radeon_irq_set(rdev); |
795 | } | 795 | } |
796 | 796 | ||
797 | return 0; | 797 | return 0; |
798 | } | 798 | } |
799 | 799 | ||
800 | union power_info { | 800 | union power_info { |
801 | struct _ATOM_POWERPLAY_INFO info; | 801 | struct _ATOM_POWERPLAY_INFO info; |
802 | struct _ATOM_POWERPLAY_INFO_V2 info_2; | 802 | struct _ATOM_POWERPLAY_INFO_V2 info_2; |
803 | struct _ATOM_POWERPLAY_INFO_V3 info_3; | 803 | struct _ATOM_POWERPLAY_INFO_V3 info_3; |
804 | struct _ATOM_PPLIB_POWERPLAYTABLE pplib; | 804 | struct _ATOM_PPLIB_POWERPLAYTABLE pplib; |
805 | struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; | 805 | struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; |
806 | struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; | 806 | struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; |
807 | struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4; | 807 | struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4; |
808 | struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5; | 808 | struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5; |
809 | }; | 809 | }; |
810 | 810 | ||
811 | union fan_info { | 811 | union fan_info { |
812 | struct _ATOM_PPLIB_FANTABLE fan; | 812 | struct _ATOM_PPLIB_FANTABLE fan; |
813 | struct _ATOM_PPLIB_FANTABLE2 fan2; | 813 | struct _ATOM_PPLIB_FANTABLE2 fan2; |
814 | }; | 814 | }; |
815 | 815 | ||
816 | static int r600_parse_clk_voltage_dep_table(struct radeon_clock_voltage_dependency_table *radeon_table, | 816 | static int r600_parse_clk_voltage_dep_table(struct radeon_clock_voltage_dependency_table *radeon_table, |
817 | ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table) | 817 | ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table) |
818 | { | 818 | { |
819 | u32 size = atom_table->ucNumEntries * | 819 | u32 size = atom_table->ucNumEntries * |
820 | sizeof(struct radeon_clock_voltage_dependency_entry); | 820 | sizeof(struct radeon_clock_voltage_dependency_entry); |
821 | int i; | 821 | int i; |
822 | ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry; | 822 | ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry; |
823 | 823 | ||
824 | radeon_table->entries = kzalloc(size, GFP_KERNEL); | 824 | radeon_table->entries = kzalloc(size, GFP_KERNEL); |
825 | if (!radeon_table->entries) | 825 | if (!radeon_table->entries) |
826 | return -ENOMEM; | 826 | return -ENOMEM; |
827 | 827 | ||
828 | entry = &atom_table->entries[0]; | 828 | entry = &atom_table->entries[0]; |
829 | for (i = 0; i < atom_table->ucNumEntries; i++) { | 829 | for (i = 0; i < atom_table->ucNumEntries; i++) { |
830 | radeon_table->entries[i].clk = le16_to_cpu(entry->usClockLow) | | 830 | radeon_table->entries[i].clk = le16_to_cpu(entry->usClockLow) | |
831 | (entry->ucClockHigh << 16); | 831 | (entry->ucClockHigh << 16); |
832 | radeon_table->entries[i].v = le16_to_cpu(entry->usVoltage); | 832 | radeon_table->entries[i].v = le16_to_cpu(entry->usVoltage); |
833 | entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *) | 833 | entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *) |
834 | ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record)); | 834 | ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record)); |
835 | } | 835 | } |
836 | radeon_table->count = atom_table->ucNumEntries; | 836 | radeon_table->count = atom_table->ucNumEntries; |
837 | 837 | ||
838 | return 0; | 838 | return 0; |
839 | } | 839 | } |
840 | 840 | ||
841 | int r600_get_platform_caps(struct radeon_device *rdev) | 841 | int r600_get_platform_caps(struct radeon_device *rdev) |
842 | { | 842 | { |
843 | struct radeon_mode_info *mode_info = &rdev->mode_info; | 843 | struct radeon_mode_info *mode_info = &rdev->mode_info; |
844 | union power_info *power_info; | 844 | union power_info *power_info; |
845 | int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); | 845 | int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); |
846 | u16 data_offset; | 846 | u16 data_offset; |
847 | u8 frev, crev; | 847 | u8 frev, crev; |
848 | 848 | ||
849 | if (!atom_parse_data_header(mode_info->atom_context, index, NULL, | 849 | if (!atom_parse_data_header(mode_info->atom_context, index, NULL, |
850 | &frev, &crev, &data_offset)) | 850 | &frev, &crev, &data_offset)) |
851 | return -EINVAL; | 851 | return -EINVAL; |
852 | power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); | 852 | power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); |
853 | 853 | ||
854 | rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps); | 854 | rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps); |
855 | rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); | 855 | rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); |
856 | rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); | 856 | rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); |
857 | 857 | ||
858 | return 0; | 858 | return 0; |
859 | } | 859 | } |
860 | 860 | ||
861 | /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */ | 861 | /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */ |
862 | #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12 | 862 | #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12 |
863 | #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14 | 863 | #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14 |
864 | #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16 | 864 | #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16 |
865 | #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18 | 865 | #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18 |
866 | #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20 | 866 | #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20 |
867 | #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22 | 867 | #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22 |
868 | 868 | ||
869 | int r600_parse_extended_power_table(struct radeon_device *rdev) | 869 | int r600_parse_extended_power_table(struct radeon_device *rdev) |
870 | { | 870 | { |
871 | struct radeon_mode_info *mode_info = &rdev->mode_info; | 871 | struct radeon_mode_info *mode_info = &rdev->mode_info; |
872 | union power_info *power_info; | 872 | union power_info *power_info; |
873 | union fan_info *fan_info; | 873 | union fan_info *fan_info; |
874 | ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table; | 874 | ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table; |
875 | int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); | 875 | int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); |
876 | u16 data_offset; | 876 | u16 data_offset; |
877 | u8 frev, crev; | 877 | u8 frev, crev; |
878 | int ret, i; | 878 | int ret, i; |
879 | 879 | ||
880 | if (!atom_parse_data_header(mode_info->atom_context, index, NULL, | 880 | if (!atom_parse_data_header(mode_info->atom_context, index, NULL, |
881 | &frev, &crev, &data_offset)) | 881 | &frev, &crev, &data_offset)) |
882 | return -EINVAL; | 882 | return -EINVAL; |
883 | power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); | 883 | power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); |
884 | 884 | ||
885 | /* fan table */ | 885 | /* fan table */ |
886 | if (le16_to_cpu(power_info->pplib.usTableSize) >= | 886 | if (le16_to_cpu(power_info->pplib.usTableSize) >= |
887 | sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) { | 887 | sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) { |
888 | if (power_info->pplib3.usFanTableOffset) { | 888 | if (power_info->pplib3.usFanTableOffset) { |
889 | fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset + | 889 | fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset + |
890 | le16_to_cpu(power_info->pplib3.usFanTableOffset)); | 890 | le16_to_cpu(power_info->pplib3.usFanTableOffset)); |
891 | rdev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst; | 891 | rdev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst; |
892 | rdev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin); | 892 | rdev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin); |
893 | rdev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed); | 893 | rdev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed); |
894 | rdev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh); | 894 | rdev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh); |
895 | rdev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin); | 895 | rdev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin); |
896 | rdev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed); | 896 | rdev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed); |
897 | rdev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh); | 897 | rdev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh); |
898 | if (fan_info->fan.ucFanTableFormat >= 2) | 898 | if (fan_info->fan.ucFanTableFormat >= 2) |
899 | rdev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax); | 899 | rdev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax); |
900 | else | 900 | else |
901 | rdev->pm.dpm.fan.t_max = 10900; | 901 | rdev->pm.dpm.fan.t_max = 10900; |
902 | rdev->pm.dpm.fan.cycle_delay = 100000; | 902 | rdev->pm.dpm.fan.cycle_delay = 100000; |
903 | rdev->pm.dpm.fan.ucode_fan_control = true; | 903 | rdev->pm.dpm.fan.ucode_fan_control = true; |
904 | } | 904 | } |
905 | } | 905 | } |
906 | 906 | ||
907 | /* clock dependancy tables, shedding tables */ | 907 | /* clock dependancy tables, shedding tables */ |
908 | if (le16_to_cpu(power_info->pplib.usTableSize) >= | 908 | if (le16_to_cpu(power_info->pplib.usTableSize) >= |
909 | sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) { | 909 | sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) { |
910 | if (power_info->pplib4.usVddcDependencyOnSCLKOffset) { | 910 | if (power_info->pplib4.usVddcDependencyOnSCLKOffset) { |
911 | dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) | 911 | dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) |
912 | (mode_info->atom_context->bios + data_offset + | 912 | (mode_info->atom_context->bios + data_offset + |
913 | le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset)); | 913 | le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset)); |
914 | ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk, | 914 | ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk, |
915 | dep_table); | 915 | dep_table); |
916 | if (ret) | 916 | if (ret) |
917 | return ret; | 917 | return ret; |
918 | } | 918 | } |
919 | if (power_info->pplib4.usVddciDependencyOnMCLKOffset) { | 919 | if (power_info->pplib4.usVddciDependencyOnMCLKOffset) { |
920 | dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) | 920 | dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) |
921 | (mode_info->atom_context->bios + data_offset + | 921 | (mode_info->atom_context->bios + data_offset + |
922 | le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset)); | 922 | le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset)); |
923 | ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk, | 923 | ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk, |
924 | dep_table); | 924 | dep_table); |
925 | if (ret) { | 925 | if (ret) { |
926 | kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); | 926 | kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); |
927 | return ret; | 927 | return ret; |
928 | } | 928 | } |
929 | } | 929 | } |
930 | if (power_info->pplib4.usVddcDependencyOnMCLKOffset) { | 930 | if (power_info->pplib4.usVddcDependencyOnMCLKOffset) { |
931 | dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) | 931 | dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) |
932 | (mode_info->atom_context->bios + data_offset + | 932 | (mode_info->atom_context->bios + data_offset + |
933 | le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset)); | 933 | le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset)); |
934 | ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk, | 934 | ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk, |
935 | dep_table); | 935 | dep_table); |
936 | if (ret) { | 936 | if (ret) { |
937 | kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); | 937 | kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); |
938 | kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); | 938 | kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); |
939 | return ret; | 939 | return ret; |
940 | } | 940 | } |
941 | } | 941 | } |
942 | if (power_info->pplib4.usMvddDependencyOnMCLKOffset) { | 942 | if (power_info->pplib4.usMvddDependencyOnMCLKOffset) { |
943 | dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) | 943 | dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) |
944 | (mode_info->atom_context->bios + data_offset + | 944 | (mode_info->atom_context->bios + data_offset + |
945 | le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset)); | 945 | le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset)); |
946 | ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk, | 946 | ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk, |
947 | dep_table); | 947 | dep_table); |
948 | if (ret) { | 948 | if (ret) { |
949 | kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); | 949 | kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); |
950 | kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); | 950 | kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); |
951 | kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries); | 951 | kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries); |
952 | return ret; | 952 | return ret; |
953 | } | 953 | } |
954 | } | 954 | } |
955 | if (power_info->pplib4.usMaxClockVoltageOnDCOffset) { | 955 | if (power_info->pplib4.usMaxClockVoltageOnDCOffset) { |
956 | ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v = | 956 | ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v = |
957 | (ATOM_PPLIB_Clock_Voltage_Limit_Table *) | 957 | (ATOM_PPLIB_Clock_Voltage_Limit_Table *) |
958 | (mode_info->atom_context->bios + data_offset + | 958 | (mode_info->atom_context->bios + data_offset + |
959 | le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset)); | 959 | le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset)); |
960 | if (clk_v->ucNumEntries) { | 960 | if (clk_v->ucNumEntries) { |
961 | rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk = | 961 | rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk = |
962 | le16_to_cpu(clk_v->entries[0].usSclkLow) | | 962 | le16_to_cpu(clk_v->entries[0].usSclkLow) | |
963 | (clk_v->entries[0].ucSclkHigh << 16); | 963 | (clk_v->entries[0].ucSclkHigh << 16); |
964 | rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk = | 964 | rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk = |
965 | le16_to_cpu(clk_v->entries[0].usMclkLow) | | 965 | le16_to_cpu(clk_v->entries[0].usMclkLow) | |
966 | (clk_v->entries[0].ucMclkHigh << 16); | 966 | (clk_v->entries[0].ucMclkHigh << 16); |
967 | rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc = | 967 | rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc = |
968 | le16_to_cpu(clk_v->entries[0].usVddc); | 968 | le16_to_cpu(clk_v->entries[0].usVddc); |
969 | rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci = | 969 | rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci = |
970 | le16_to_cpu(clk_v->entries[0].usVddci); | 970 | le16_to_cpu(clk_v->entries[0].usVddci); |
971 | } | 971 | } |
972 | } | 972 | } |
973 | if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) { | 973 | if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) { |
974 | ATOM_PPLIB_PhaseSheddingLimits_Table *psl = | 974 | ATOM_PPLIB_PhaseSheddingLimits_Table *psl = |
975 | (ATOM_PPLIB_PhaseSheddingLimits_Table *) | 975 | (ATOM_PPLIB_PhaseSheddingLimits_Table *) |
976 | (mode_info->atom_context->bios + data_offset + | 976 | (mode_info->atom_context->bios + data_offset + |
977 | le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset)); | 977 | le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset)); |
978 | ATOM_PPLIB_PhaseSheddingLimits_Record *entry; | 978 | ATOM_PPLIB_PhaseSheddingLimits_Record *entry; |
979 | 979 | ||
980 | rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries = | 980 | rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries = |
981 | kzalloc(psl->ucNumEntries * | 981 | kzalloc(psl->ucNumEntries * |
982 | sizeof(struct radeon_phase_shedding_limits_entry), | 982 | sizeof(struct radeon_phase_shedding_limits_entry), |
983 | GFP_KERNEL); | 983 | GFP_KERNEL); |
984 | if (!rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) { | 984 | if (!rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) { |
985 | r600_free_extended_power_table(rdev); | 985 | r600_free_extended_power_table(rdev); |
986 | return -ENOMEM; | 986 | return -ENOMEM; |
987 | } | 987 | } |
988 | 988 | ||
989 | entry = &psl->entries[0]; | 989 | entry = &psl->entries[0]; |
990 | for (i = 0; i < psl->ucNumEntries; i++) { | 990 | for (i = 0; i < psl->ucNumEntries; i++) { |
991 | rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk = | 991 | rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk = |
992 | le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16); | 992 | le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16); |
993 | rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk = | 993 | rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk = |
994 | le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16); | 994 | le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16); |
995 | rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage = | 995 | rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage = |
996 | le16_to_cpu(entry->usVoltage); | 996 | le16_to_cpu(entry->usVoltage); |
997 | entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *) | 997 | entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *) |
998 | ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record)); | 998 | ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record)); |
999 | } | 999 | } |
1000 | rdev->pm.dpm.dyn_state.phase_shedding_limits_table.count = | 1000 | rdev->pm.dpm.dyn_state.phase_shedding_limits_table.count = |
1001 | psl->ucNumEntries; | 1001 | psl->ucNumEntries; |
1002 | } | 1002 | } |
1003 | } | 1003 | } |
1004 | 1004 | ||
1005 | /* cac data */ | 1005 | /* cac data */ |
1006 | if (le16_to_cpu(power_info->pplib.usTableSize) >= | 1006 | if (le16_to_cpu(power_info->pplib.usTableSize) >= |
1007 | sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) { | 1007 | sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) { |
1008 | rdev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit); | 1008 | rdev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit); |
1009 | rdev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit); | 1009 | rdev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit); |
1010 | rdev->pm.dpm.near_tdp_limit_adjusted = rdev->pm.dpm.near_tdp_limit; | 1010 | rdev->pm.dpm.near_tdp_limit_adjusted = rdev->pm.dpm.near_tdp_limit; |
1011 | rdev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit); | 1011 | rdev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit); |
1012 | if (rdev->pm.dpm.tdp_od_limit) | 1012 | if (rdev->pm.dpm.tdp_od_limit) |
1013 | rdev->pm.dpm.power_control = true; | 1013 | rdev->pm.dpm.power_control = true; |
1014 | else | 1014 | else |
1015 | rdev->pm.dpm.power_control = false; | 1015 | rdev->pm.dpm.power_control = false; |
1016 | rdev->pm.dpm.tdp_adjustment = 0; | 1016 | rdev->pm.dpm.tdp_adjustment = 0; |
1017 | rdev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold); | 1017 | rdev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold); |
1018 | rdev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage); | 1018 | rdev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage); |
1019 | rdev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope); | 1019 | rdev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope); |
1020 | if (power_info->pplib5.usCACLeakageTableOffset) { | 1020 | if (power_info->pplib5.usCACLeakageTableOffset) { |
1021 | ATOM_PPLIB_CAC_Leakage_Table *cac_table = | 1021 | ATOM_PPLIB_CAC_Leakage_Table *cac_table = |
1022 | (ATOM_PPLIB_CAC_Leakage_Table *) | 1022 | (ATOM_PPLIB_CAC_Leakage_Table *) |
1023 | (mode_info->atom_context->bios + data_offset + | 1023 | (mode_info->atom_context->bios + data_offset + |
1024 | le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset)); | 1024 | le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset)); |
1025 | ATOM_PPLIB_CAC_Leakage_Record *entry; | 1025 | ATOM_PPLIB_CAC_Leakage_Record *entry; |
1026 | u32 size = cac_table->ucNumEntries * sizeof(struct radeon_cac_leakage_table); | 1026 | u32 size = cac_table->ucNumEntries * sizeof(struct radeon_cac_leakage_table); |
1027 | rdev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL); | 1027 | rdev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL); |
1028 | if (!rdev->pm.dpm.dyn_state.cac_leakage_table.entries) { | 1028 | if (!rdev->pm.dpm.dyn_state.cac_leakage_table.entries) { |
1029 | r600_free_extended_power_table(rdev); | 1029 | r600_free_extended_power_table(rdev); |
1030 | return -ENOMEM; | 1030 | return -ENOMEM; |
1031 | } | 1031 | } |
1032 | entry = &cac_table->entries[0]; | 1032 | entry = &cac_table->entries[0]; |
1033 | for (i = 0; i < cac_table->ucNumEntries; i++) { | 1033 | for (i = 0; i < cac_table->ucNumEntries; i++) { |
1034 | if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) { | 1034 | if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) { |
1035 | rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 = | 1035 | rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 = |
1036 | le16_to_cpu(entry->usVddc1); | 1036 | le16_to_cpu(entry->usVddc1); |
1037 | rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 = | 1037 | rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 = |
1038 | le16_to_cpu(entry->usVddc2); | 1038 | le16_to_cpu(entry->usVddc2); |
1039 | rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 = | 1039 | rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 = |
1040 | le16_to_cpu(entry->usVddc3); | 1040 | le16_to_cpu(entry->usVddc3); |
1041 | } else { | 1041 | } else { |
1042 | rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc = | 1042 | rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc = |
1043 | le16_to_cpu(entry->usVddc); | 1043 | le16_to_cpu(entry->usVddc); |
1044 | rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage = | 1044 | rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage = |
1045 | le32_to_cpu(entry->ulLeakageValue); | 1045 | le32_to_cpu(entry->ulLeakageValue); |
1046 | } | 1046 | } |
1047 | entry = (ATOM_PPLIB_CAC_Leakage_Record *) | 1047 | entry = (ATOM_PPLIB_CAC_Leakage_Record *) |
1048 | ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record)); | 1048 | ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record)); |
1049 | } | 1049 | } |
1050 | rdev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries; | 1050 | rdev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries; |
1051 | } | 1051 | } |
1052 | } | 1052 | } |
1053 | 1053 | ||
1054 | /* ext tables */ | 1054 | /* ext tables */ |
1055 | if (le16_to_cpu(power_info->pplib.usTableSize) >= | 1055 | if (le16_to_cpu(power_info->pplib.usTableSize) >= |
1056 | sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) { | 1056 | sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) { |
1057 | ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *) | 1057 | ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *) |
1058 | (mode_info->atom_context->bios + data_offset + | 1058 | (mode_info->atom_context->bios + data_offset + |
1059 | le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset)); | 1059 | le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset)); |
1060 | if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) && | 1060 | if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) && |
1061 | ext_hdr->usVCETableOffset) { | 1061 | ext_hdr->usVCETableOffset) { |
1062 | VCEClockInfoArray *array = (VCEClockInfoArray *) | 1062 | VCEClockInfoArray *array = (VCEClockInfoArray *) |
1063 | (mode_info->atom_context->bios + data_offset + | 1063 | (mode_info->atom_context->bios + data_offset + |
1064 | le16_to_cpu(ext_hdr->usVCETableOffset) + 1); | 1064 | le16_to_cpu(ext_hdr->usVCETableOffset) + 1); |
1065 | ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits = | 1065 | ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits = |
1066 | (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *) | 1066 | (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *) |
1067 | (mode_info->atom_context->bios + data_offset + | 1067 | (mode_info->atom_context->bios + data_offset + |
1068 | le16_to_cpu(ext_hdr->usVCETableOffset) + 1 + | 1068 | le16_to_cpu(ext_hdr->usVCETableOffset) + 1 + |
1069 | 1 + array->ucNumEntries * sizeof(VCEClockInfo)); | 1069 | 1 + array->ucNumEntries * sizeof(VCEClockInfo)); |
1070 | ATOM_PPLIB_VCE_State_Table *states = | 1070 | ATOM_PPLIB_VCE_State_Table *states = |
1071 | (ATOM_PPLIB_VCE_State_Table *) | 1071 | (ATOM_PPLIB_VCE_State_Table *) |
1072 | (mode_info->atom_context->bios + data_offset + | 1072 | (mode_info->atom_context->bios + data_offset + |
1073 | le16_to_cpu(ext_hdr->usVCETableOffset) + 1 + | 1073 | le16_to_cpu(ext_hdr->usVCETableOffset) + 1 + |
1074 | 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) + | 1074 | 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) + |
1075 | 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record))); | 1075 | 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record))); |
1076 | ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry; | 1076 | ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry; |
1077 | ATOM_PPLIB_VCE_State_Record *state_entry; | 1077 | ATOM_PPLIB_VCE_State_Record *state_entry; |
1078 | VCEClockInfo *vce_clk; | 1078 | VCEClockInfo *vce_clk; |
1079 | u32 size = limits->numEntries * | 1079 | u32 size = limits->numEntries * |
1080 | sizeof(struct radeon_vce_clock_voltage_dependency_entry); | 1080 | sizeof(struct radeon_vce_clock_voltage_dependency_entry); |
1081 | rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries = | 1081 | rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries = |
1082 | kzalloc(size, GFP_KERNEL); | 1082 | kzalloc(size, GFP_KERNEL); |
1083 | if (!rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) { | 1083 | if (!rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) { |
1084 | r600_free_extended_power_table(rdev); | 1084 | r600_free_extended_power_table(rdev); |
1085 | return -ENOMEM; | 1085 | return -ENOMEM; |
1086 | } | 1086 | } |
1087 | rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count = | 1087 | rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count = |
1088 | limits->numEntries; | 1088 | limits->numEntries; |
1089 | entry = &limits->entries[0]; | 1089 | entry = &limits->entries[0]; |
1090 | state_entry = &states->entries[0]; | 1090 | state_entry = &states->entries[0]; |
1091 | for (i = 0; i < limits->numEntries; i++) { | 1091 | for (i = 0; i < limits->numEntries; i++) { |
1092 | vce_clk = (VCEClockInfo *) | 1092 | vce_clk = (VCEClockInfo *) |
1093 | ((u8 *)&array->entries[0] + | 1093 | ((u8 *)&array->entries[0] + |
1094 | (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); | 1094 | (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); |
1095 | rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk = | 1095 | rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk = |
1096 | le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16); | 1096 | le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16); |
1097 | rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk = | 1097 | rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk = |
1098 | le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16); | 1098 | le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16); |
1099 | rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v = | 1099 | rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v = |
1100 | le16_to_cpu(entry->usVoltage); | 1100 | le16_to_cpu(entry->usVoltage); |
1101 | entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *) | 1101 | entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *) |
1102 | ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)); | 1102 | ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)); |
1103 | } | 1103 | } |
1104 | for (i = 0; i < states->numEntries; i++) { | 1104 | for (i = 0; i < states->numEntries; i++) { |
1105 | if (i >= RADEON_MAX_VCE_LEVELS) | 1105 | if (i >= RADEON_MAX_VCE_LEVELS) |
1106 | break; | 1106 | break; |
1107 | vce_clk = (VCEClockInfo *) | 1107 | vce_clk = (VCEClockInfo *) |
1108 | ((u8 *)&array->entries[0] + | 1108 | ((u8 *)&array->entries[0] + |
1109 | (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); | 1109 | (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); |
1110 | rdev->pm.dpm.vce_states[i].evclk = | 1110 | rdev->pm.dpm.vce_states[i].evclk = |
1111 | le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16); | 1111 | le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16); |
1112 | rdev->pm.dpm.vce_states[i].ecclk = | 1112 | rdev->pm.dpm.vce_states[i].ecclk = |
1113 | le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16); | 1113 | le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16); |
1114 | rdev->pm.dpm.vce_states[i].clk_idx = | 1114 | rdev->pm.dpm.vce_states[i].clk_idx = |
1115 | state_entry->ucClockInfoIndex & 0x3f; | 1115 | state_entry->ucClockInfoIndex & 0x3f; |
1116 | rdev->pm.dpm.vce_states[i].pstate = | 1116 | rdev->pm.dpm.vce_states[i].pstate = |
1117 | (state_entry->ucClockInfoIndex & 0xc0) >> 6; | 1117 | (state_entry->ucClockInfoIndex & 0xc0) >> 6; |
1118 | state_entry = (ATOM_PPLIB_VCE_State_Record *) | 1118 | state_entry = (ATOM_PPLIB_VCE_State_Record *) |
1119 | ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record)); | 1119 | ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record)); |
1120 | } | 1120 | } |
1121 | } | 1121 | } |
1122 | if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) && | 1122 | if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) && |
1123 | ext_hdr->usUVDTableOffset) { | 1123 | ext_hdr->usUVDTableOffset) { |
1124 | UVDClockInfoArray *array = (UVDClockInfoArray *) | 1124 | UVDClockInfoArray *array = (UVDClockInfoArray *) |
1125 | (mode_info->atom_context->bios + data_offset + | 1125 | (mode_info->atom_context->bios + data_offset + |
1126 | le16_to_cpu(ext_hdr->usUVDTableOffset) + 1); | 1126 | le16_to_cpu(ext_hdr->usUVDTableOffset) + 1); |
1127 | ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits = | 1127 | ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits = |
1128 | (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *) | 1128 | (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *) |
1129 | (mode_info->atom_context->bios + data_offset + | 1129 | (mode_info->atom_context->bios + data_offset + |
1130 | le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 + | 1130 | le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 + |
1131 | 1 + (array->ucNumEntries * sizeof (UVDClockInfo))); | 1131 | 1 + (array->ucNumEntries * sizeof (UVDClockInfo))); |
1132 | ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry; | 1132 | ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry; |
1133 | u32 size = limits->numEntries * | 1133 | u32 size = limits->numEntries * |
1134 | sizeof(struct radeon_uvd_clock_voltage_dependency_entry); | 1134 | sizeof(struct radeon_uvd_clock_voltage_dependency_entry); |
1135 | rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries = | 1135 | rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries = |
1136 | kzalloc(size, GFP_KERNEL); | 1136 | kzalloc(size, GFP_KERNEL); |
1137 | if (!rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) { | 1137 | if (!rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) { |
1138 | r600_free_extended_power_table(rdev); | 1138 | r600_free_extended_power_table(rdev); |
1139 | return -ENOMEM; | 1139 | return -ENOMEM; |
1140 | } | 1140 | } |
1141 | rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count = | 1141 | rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count = |
1142 | limits->numEntries; | 1142 | limits->numEntries; |
1143 | entry = &limits->entries[0]; | 1143 | entry = &limits->entries[0]; |
1144 | for (i = 0; i < limits->numEntries; i++) { | 1144 | for (i = 0; i < limits->numEntries; i++) { |
1145 | UVDClockInfo *uvd_clk = (UVDClockInfo *) | 1145 | UVDClockInfo *uvd_clk = (UVDClockInfo *) |
1146 | ((u8 *)&array->entries[0] + | 1146 | ((u8 *)&array->entries[0] + |
1147 | (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo))); | 1147 | (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo))); |
1148 | rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk = | 1148 | rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk = |
1149 | le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16); | 1149 | le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16); |
1150 | rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk = | 1150 | rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk = |
1151 | le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16); | 1151 | le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16); |
1152 | rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v = | 1152 | rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v = |
1153 | le16_to_cpu(entry->usVoltage); | 1153 | le16_to_cpu(entry->usVoltage); |
1154 | entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *) | 1154 | entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *) |
1155 | ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record)); | 1155 | ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record)); |
1156 | } | 1156 | } |
1157 | } | 1157 | } |
1158 | if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) && | 1158 | if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) && |
1159 | ext_hdr->usSAMUTableOffset) { | 1159 | ext_hdr->usSAMUTableOffset) { |
1160 | ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits = | 1160 | ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits = |
1161 | (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *) | 1161 | (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *) |
1162 | (mode_info->atom_context->bios + data_offset + | 1162 | (mode_info->atom_context->bios + data_offset + |
1163 | le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1); | 1163 | le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1); |
1164 | ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry; | 1164 | ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry; |
1165 | u32 size = limits->numEntries * | 1165 | u32 size = limits->numEntries * |
1166 | sizeof(struct radeon_clock_voltage_dependency_entry); | 1166 | sizeof(struct radeon_clock_voltage_dependency_entry); |
1167 | rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries = | 1167 | rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries = |
1168 | kzalloc(size, GFP_KERNEL); | 1168 | kzalloc(size, GFP_KERNEL); |
1169 | if (!rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) { | 1169 | if (!rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) { |
1170 | r600_free_extended_power_table(rdev); | 1170 | r600_free_extended_power_table(rdev); |
1171 | return -ENOMEM; | 1171 | return -ENOMEM; |
1172 | } | 1172 | } |
1173 | rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count = | 1173 | rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count = |
1174 | limits->numEntries; | 1174 | limits->numEntries; |
1175 | entry = &limits->entries[0]; | 1175 | entry = &limits->entries[0]; |
1176 | for (i = 0; i < limits->numEntries; i++) { | 1176 | for (i = 0; i < limits->numEntries; i++) { |
1177 | rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk = | 1177 | rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk = |
1178 | le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16); | 1178 | le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16); |
1179 | rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v = | 1179 | rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v = |
1180 | le16_to_cpu(entry->usVoltage); | 1180 | le16_to_cpu(entry->usVoltage); |
1181 | entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *) | 1181 | entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *) |
1182 | ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record)); | 1182 | ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record)); |
1183 | } | 1183 | } |
1184 | } | 1184 | } |
1185 | if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) && | 1185 | if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) && |
1186 | ext_hdr->usPPMTableOffset) { | 1186 | ext_hdr->usPPMTableOffset) { |
1187 | ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *) | 1187 | ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *) |
1188 | (mode_info->atom_context->bios + data_offset + | 1188 | (mode_info->atom_context->bios + data_offset + |
1189 | le16_to_cpu(ext_hdr->usPPMTableOffset)); | 1189 | le16_to_cpu(ext_hdr->usPPMTableOffset)); |
1190 | rdev->pm.dpm.dyn_state.ppm_table = | 1190 | rdev->pm.dpm.dyn_state.ppm_table = |
1191 | kzalloc(sizeof(struct radeon_ppm_table), GFP_KERNEL); | 1191 | kzalloc(sizeof(struct radeon_ppm_table), GFP_KERNEL); |
1192 | if (!rdev->pm.dpm.dyn_state.ppm_table) { | 1192 | if (!rdev->pm.dpm.dyn_state.ppm_table) { |
1193 | r600_free_extended_power_table(rdev); | 1193 | r600_free_extended_power_table(rdev); |
1194 | return -ENOMEM; | 1194 | return -ENOMEM; |
1195 | } | 1195 | } |
1196 | rdev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign; | 1196 | rdev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign; |
1197 | rdev->pm.dpm.dyn_state.ppm_table->cpu_core_number = | 1197 | rdev->pm.dpm.dyn_state.ppm_table->cpu_core_number = |
1198 | le16_to_cpu(ppm->usCpuCoreNumber); | 1198 | le16_to_cpu(ppm->usCpuCoreNumber); |
1199 | rdev->pm.dpm.dyn_state.ppm_table->platform_tdp = | 1199 | rdev->pm.dpm.dyn_state.ppm_table->platform_tdp = |
1200 | le32_to_cpu(ppm->ulPlatformTDP); | 1200 | le32_to_cpu(ppm->ulPlatformTDP); |
1201 | rdev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp = | 1201 | rdev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp = |
1202 | le32_to_cpu(ppm->ulSmallACPlatformTDP); | 1202 | le32_to_cpu(ppm->ulSmallACPlatformTDP); |
1203 | rdev->pm.dpm.dyn_state.ppm_table->platform_tdc = | 1203 | rdev->pm.dpm.dyn_state.ppm_table->platform_tdc = |
1204 | le32_to_cpu(ppm->ulPlatformTDC); | 1204 | le32_to_cpu(ppm->ulPlatformTDC); |
1205 | rdev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc = | 1205 | rdev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc = |
1206 | le32_to_cpu(ppm->ulSmallACPlatformTDC); | 1206 | le32_to_cpu(ppm->ulSmallACPlatformTDC); |
1207 | rdev->pm.dpm.dyn_state.ppm_table->apu_tdp = | 1207 | rdev->pm.dpm.dyn_state.ppm_table->apu_tdp = |
1208 | le32_to_cpu(ppm->ulApuTDP); | 1208 | le32_to_cpu(ppm->ulApuTDP); |
1209 | rdev->pm.dpm.dyn_state.ppm_table->dgpu_tdp = | 1209 | rdev->pm.dpm.dyn_state.ppm_table->dgpu_tdp = |
1210 | le32_to_cpu(ppm->ulDGpuTDP); | 1210 | le32_to_cpu(ppm->ulDGpuTDP); |
1211 | rdev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power = | 1211 | rdev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power = |
1212 | le32_to_cpu(ppm->ulDGpuUlvPower); | 1212 | le32_to_cpu(ppm->ulDGpuUlvPower); |
1213 | rdev->pm.dpm.dyn_state.ppm_table->tj_max = | 1213 | rdev->pm.dpm.dyn_state.ppm_table->tj_max = |
1214 | le32_to_cpu(ppm->ulTjmax); | 1214 | le32_to_cpu(ppm->ulTjmax); |
1215 | } | 1215 | } |
1216 | if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) && | 1216 | if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) && |
1217 | ext_hdr->usACPTableOffset) { | 1217 | ext_hdr->usACPTableOffset) { |
1218 | ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits = | 1218 | ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits = |
1219 | (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *) | 1219 | (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *) |
1220 | (mode_info->atom_context->bios + data_offset + | 1220 | (mode_info->atom_context->bios + data_offset + |
1221 | le16_to_cpu(ext_hdr->usACPTableOffset) + 1); | 1221 | le16_to_cpu(ext_hdr->usACPTableOffset) + 1); |
1222 | ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry; | 1222 | ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry; |
1223 | u32 size = limits->numEntries * | 1223 | u32 size = limits->numEntries * |
1224 | sizeof(struct radeon_clock_voltage_dependency_entry); | 1224 | sizeof(struct radeon_clock_voltage_dependency_entry); |
1225 | rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries = | 1225 | rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries = |
1226 | kzalloc(size, GFP_KERNEL); | 1226 | kzalloc(size, GFP_KERNEL); |
1227 | if (!rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) { | 1227 | if (!rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) { |
1228 | r600_free_extended_power_table(rdev); | 1228 | r600_free_extended_power_table(rdev); |
1229 | return -ENOMEM; | 1229 | return -ENOMEM; |
1230 | } | 1230 | } |
1231 | rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count = | 1231 | rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count = |
1232 | limits->numEntries; | 1232 | limits->numEntries; |
1233 | entry = &limits->entries[0]; | 1233 | entry = &limits->entries[0]; |
1234 | for (i = 0; i < limits->numEntries; i++) { | 1234 | for (i = 0; i < limits->numEntries; i++) { |
1235 | rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk = | 1235 | rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk = |
1236 | le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16); | 1236 | le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16); |
1237 | rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v = | 1237 | rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v = |
1238 | le16_to_cpu(entry->usVoltage); | 1238 | le16_to_cpu(entry->usVoltage); |
1239 | entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *) | 1239 | entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *) |
1240 | ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record)); | 1240 | ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record)); |
1241 | } | 1241 | } |
1242 | } | 1242 | } |
1243 | if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) && | 1243 | if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) && |
1244 | ext_hdr->usPowerTuneTableOffset) { | 1244 | ext_hdr->usPowerTuneTableOffset) { |
1245 | u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset + | 1245 | u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset + |
1246 | le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); | 1246 | le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); |
1247 | ATOM_PowerTune_Table *pt; | 1247 | ATOM_PowerTune_Table *pt; |
1248 | rdev->pm.dpm.dyn_state.cac_tdp_table = | 1248 | rdev->pm.dpm.dyn_state.cac_tdp_table = |
1249 | kzalloc(sizeof(struct radeon_cac_tdp_table), GFP_KERNEL); | 1249 | kzalloc(sizeof(struct radeon_cac_tdp_table), GFP_KERNEL); |
1250 | if (!rdev->pm.dpm.dyn_state.cac_tdp_table) { | 1250 | if (!rdev->pm.dpm.dyn_state.cac_tdp_table) { |
1251 | r600_free_extended_power_table(rdev); | 1251 | r600_free_extended_power_table(rdev); |
1252 | return -ENOMEM; | 1252 | return -ENOMEM; |
1253 | } | 1253 | } |
1254 | if (rev > 0) { | 1254 | if (rev > 0) { |
1255 | ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *) | 1255 | ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *) |
1256 | (mode_info->atom_context->bios + data_offset + | 1256 | (mode_info->atom_context->bios + data_offset + |
1257 | le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); | 1257 | le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); |
1258 | rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = | 1258 | rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = |
1259 | ppt->usMaximumPowerDeliveryLimit; | 1259 | le16_to_cpu(ppt->usMaximumPowerDeliveryLimit); |
1260 | pt = &ppt->power_tune_table; | 1260 | pt = &ppt->power_tune_table; |
1261 | } else { | 1261 | } else { |
1262 | ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *) | 1262 | ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *) |
1263 | (mode_info->atom_context->bios + data_offset + | 1263 | (mode_info->atom_context->bios + data_offset + |
1264 | le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); | 1264 | le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); |
1265 | rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255; | 1265 | rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255; |
1266 | pt = &ppt->power_tune_table; | 1266 | pt = &ppt->power_tune_table; |
1267 | } | 1267 | } |
1268 | rdev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP); | 1268 | rdev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP); |
1269 | rdev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp = | 1269 | rdev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp = |
1270 | le16_to_cpu(pt->usConfigurableTDP); | 1270 | le16_to_cpu(pt->usConfigurableTDP); |
1271 | rdev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC); | 1271 | rdev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC); |
1272 | rdev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit = | 1272 | rdev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit = |
1273 | le16_to_cpu(pt->usBatteryPowerLimit); | 1273 | le16_to_cpu(pt->usBatteryPowerLimit); |
1274 | rdev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit = | 1274 | rdev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit = |
1275 | le16_to_cpu(pt->usSmallPowerLimit); | 1275 | le16_to_cpu(pt->usSmallPowerLimit); |
1276 | rdev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage = | 1276 | rdev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage = |
1277 | le16_to_cpu(pt->usLowCACLeakage); | 1277 | le16_to_cpu(pt->usLowCACLeakage); |
1278 | rdev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage = | 1278 | rdev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage = |
1279 | le16_to_cpu(pt->usHighCACLeakage); | 1279 | le16_to_cpu(pt->usHighCACLeakage); |
1280 | } | 1280 | } |
1281 | } | 1281 | } |
1282 | 1282 | ||
1283 | return 0; | 1283 | return 0; |
1284 | } | 1284 | } |
1285 | 1285 | ||
1286 | void r600_free_extended_power_table(struct radeon_device *rdev) | 1286 | void r600_free_extended_power_table(struct radeon_device *rdev) |
1287 | { | 1287 | { |
1288 | struct radeon_dpm_dynamic_state *dyn_state = &rdev->pm.dpm.dyn_state; | 1288 | struct radeon_dpm_dynamic_state *dyn_state = &rdev->pm.dpm.dyn_state; |
1289 | 1289 | ||
1290 | kfree(dyn_state->vddc_dependency_on_sclk.entries); | 1290 | kfree(dyn_state->vddc_dependency_on_sclk.entries); |
1291 | kfree(dyn_state->vddci_dependency_on_mclk.entries); | 1291 | kfree(dyn_state->vddci_dependency_on_mclk.entries); |
1292 | kfree(dyn_state->vddc_dependency_on_mclk.entries); | 1292 | kfree(dyn_state->vddc_dependency_on_mclk.entries); |
1293 | kfree(dyn_state->mvdd_dependency_on_mclk.entries); | 1293 | kfree(dyn_state->mvdd_dependency_on_mclk.entries); |
1294 | kfree(dyn_state->cac_leakage_table.entries); | 1294 | kfree(dyn_state->cac_leakage_table.entries); |
1295 | kfree(dyn_state->phase_shedding_limits_table.entries); | 1295 | kfree(dyn_state->phase_shedding_limits_table.entries); |
1296 | kfree(dyn_state->ppm_table); | 1296 | kfree(dyn_state->ppm_table); |
1297 | kfree(dyn_state->cac_tdp_table); | 1297 | kfree(dyn_state->cac_tdp_table); |
1298 | kfree(dyn_state->vce_clock_voltage_dependency_table.entries); | 1298 | kfree(dyn_state->vce_clock_voltage_dependency_table.entries); |
1299 | kfree(dyn_state->uvd_clock_voltage_dependency_table.entries); | 1299 | kfree(dyn_state->uvd_clock_voltage_dependency_table.entries); |
1300 | kfree(dyn_state->samu_clock_voltage_dependency_table.entries); | 1300 | kfree(dyn_state->samu_clock_voltage_dependency_table.entries); |
1301 | kfree(dyn_state->acp_clock_voltage_dependency_table.entries); | 1301 | kfree(dyn_state->acp_clock_voltage_dependency_table.entries); |
1302 | } | 1302 | } |
1303 | 1303 | ||
1304 | enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev, | 1304 | enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev, |
1305 | u32 sys_mask, | 1305 | u32 sys_mask, |
1306 | enum radeon_pcie_gen asic_gen, | 1306 | enum radeon_pcie_gen asic_gen, |
1307 | enum radeon_pcie_gen default_gen) | 1307 | enum radeon_pcie_gen default_gen) |
1308 | { | 1308 | { |
1309 | switch (asic_gen) { | 1309 | switch (asic_gen) { |
1310 | case RADEON_PCIE_GEN1: | 1310 | case RADEON_PCIE_GEN1: |
1311 | return RADEON_PCIE_GEN1; | 1311 | return RADEON_PCIE_GEN1; |
1312 | case RADEON_PCIE_GEN2: | 1312 | case RADEON_PCIE_GEN2: |
1313 | return RADEON_PCIE_GEN2; | 1313 | return RADEON_PCIE_GEN2; |
1314 | case RADEON_PCIE_GEN3: | 1314 | case RADEON_PCIE_GEN3: |
1315 | return RADEON_PCIE_GEN3; | 1315 | return RADEON_PCIE_GEN3; |
1316 | default: | 1316 | default: |
1317 | if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == RADEON_PCIE_GEN3)) | 1317 | if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == RADEON_PCIE_GEN3)) |
1318 | return RADEON_PCIE_GEN3; | 1318 | return RADEON_PCIE_GEN3; |
1319 | else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == RADEON_PCIE_GEN2)) | 1319 | else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == RADEON_PCIE_GEN2)) |
1320 | return RADEON_PCIE_GEN2; | 1320 | return RADEON_PCIE_GEN2; |
1321 | else | 1321 | else |
1322 | return RADEON_PCIE_GEN1; | 1322 | return RADEON_PCIE_GEN1; |
1323 | } | 1323 | } |
1324 | return RADEON_PCIE_GEN1; | 1324 | return RADEON_PCIE_GEN1; |
1325 | } | 1325 | } |
1326 | 1326 | ||
1327 | u16 r600_get_pcie_lane_support(struct radeon_device *rdev, | 1327 | u16 r600_get_pcie_lane_support(struct radeon_device *rdev, |
1328 | u16 asic_lanes, | 1328 | u16 asic_lanes, |
1329 | u16 default_lanes) | 1329 | u16 default_lanes) |
1330 | { | 1330 | { |
1331 | switch (asic_lanes) { | 1331 | switch (asic_lanes) { |
1332 | case 0: | 1332 | case 0: |
1333 | default: | 1333 | default: |
1334 | return default_lanes; | 1334 | return default_lanes; |
1335 | case 1: | 1335 | case 1: |
1336 | return 1; | 1336 | return 1; |
1337 | case 2: | 1337 | case 2: |
1338 | return 2; | 1338 | return 2; |
1339 | case 4: | 1339 | case 4: |
1340 | return 4; | 1340 | return 4; |
1341 | case 8: | 1341 | case 8: |
1342 | return 8; | 1342 | return 8; |
1343 | case 12: | 1343 | case 12: |
1344 | return 12; | 1344 | return 12; |
1345 | case 16: | 1345 | case 16: |
1346 | return 16; | 1346 | return 16; |
1347 | } | 1347 | } |
1348 | } | 1348 | } |
1349 | 1349 | ||
1350 | u8 r600_encode_pci_lane_width(u32 lanes) | 1350 | u8 r600_encode_pci_lane_width(u32 lanes) |
1351 | { | 1351 | { |
1352 | u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 }; | 1352 | u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 }; |
1353 | 1353 | ||
1354 | if (lanes > 16) | 1354 | if (lanes > 16) |
1355 | return 0; | 1355 | return 0; |
1356 | 1356 | ||
1357 | return encoded_lanes[lanes]; | 1357 | return encoded_lanes[lanes]; |
1358 | } | 1358 | } |
1359 | 1359 |
drivers/gpu/drm/radeon/radeon_encoders.c
1 | /* | 1 | /* |
2 | * Copyright 2007-8 Advanced Micro Devices, Inc. | 2 | * Copyright 2007-8 Advanced Micro Devices, Inc. |
3 | * Copyright 2008 Red Hat Inc. | 3 | * Copyright 2008 Red Hat Inc. |
4 | * | 4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | 5 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * copy of this software and associated documentation files (the "Software"), | 6 | * copy of this software and associated documentation files (the "Software"), |
7 | * to deal in the Software without restriction, including without limitation | 7 | * to deal in the Software without restriction, including without limitation |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | 8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
9 | * and/or sell copies of the Software, and to permit persons to whom the | 9 | * and/or sell copies of the Software, and to permit persons to whom the |
10 | * Software is furnished to do so, subject to the following conditions: | 10 | * Software is furnished to do so, subject to the following conditions: |
11 | * | 11 | * |
12 | * The above copyright notice and this permission notice shall be included in | 12 | * The above copyright notice and this permission notice shall be included in |
13 | * all copies or substantial portions of the Software. | 13 | * all copies or substantial portions of the Software. |
14 | * | 14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | 18 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | 19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | 20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
21 | * OTHER DEALINGS IN THE SOFTWARE. | 21 | * OTHER DEALINGS IN THE SOFTWARE. |
22 | * | 22 | * |
23 | * Authors: Dave Airlie | 23 | * Authors: Dave Airlie |
24 | * Alex Deucher | 24 | * Alex Deucher |
25 | */ | 25 | */ |
26 | #include <drm/drmP.h> | 26 | #include <drm/drmP.h> |
27 | #include <drm/drm_crtc_helper.h> | 27 | #include <drm/drm_crtc_helper.h> |
28 | #include <drm/radeon_drm.h> | 28 | #include <drm/radeon_drm.h> |
29 | #include "radeon.h" | 29 | #include "radeon.h" |
30 | #include "atom.h" | 30 | #include "atom.h" |
31 | 31 | ||
32 | extern void | 32 | extern void |
33 | radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder, | 33 | radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder, |
34 | struct drm_connector *drm_connector); | 34 | struct drm_connector *drm_connector); |
35 | extern void | 35 | extern void |
36 | radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder, | 36 | radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder, |
37 | struct drm_connector *drm_connector); | 37 | struct drm_connector *drm_connector); |
38 | 38 | ||
39 | 39 | ||
40 | static uint32_t radeon_encoder_clones(struct drm_encoder *encoder) | 40 | static uint32_t radeon_encoder_clones(struct drm_encoder *encoder) |
41 | { | 41 | { |
42 | struct drm_device *dev = encoder->dev; | 42 | struct drm_device *dev = encoder->dev; |
43 | struct radeon_device *rdev = dev->dev_private; | 43 | struct radeon_device *rdev = dev->dev_private; |
44 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 44 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
45 | struct drm_encoder *clone_encoder; | 45 | struct drm_encoder *clone_encoder; |
46 | uint32_t index_mask = 0; | 46 | uint32_t index_mask = 0; |
47 | int count; | 47 | int count; |
48 | 48 | ||
49 | /* DIG routing gets problematic */ | 49 | /* DIG routing gets problematic */ |
50 | if (rdev->family >= CHIP_R600) | 50 | if (rdev->family >= CHIP_R600) |
51 | return index_mask; | 51 | return index_mask; |
52 | /* LVDS/TV are too wacky */ | 52 | /* LVDS/TV are too wacky */ |
53 | if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT) | 53 | if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT) |
54 | return index_mask; | 54 | return index_mask; |
55 | /* DVO requires 2x ppll clocks depending on tmds chip */ | 55 | /* DVO requires 2x ppll clocks depending on tmds chip */ |
56 | if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) | 56 | if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) |
57 | return index_mask; | 57 | return index_mask; |
58 | 58 | ||
59 | count = -1; | 59 | count = -1; |
60 | list_for_each_entry(clone_encoder, &dev->mode_config.encoder_list, head) { | 60 | list_for_each_entry(clone_encoder, &dev->mode_config.encoder_list, head) { |
61 | struct radeon_encoder *radeon_clone = to_radeon_encoder(clone_encoder); | 61 | struct radeon_encoder *radeon_clone = to_radeon_encoder(clone_encoder); |
62 | count++; | 62 | count++; |
63 | 63 | ||
64 | if (clone_encoder == encoder) | 64 | if (clone_encoder == encoder) |
65 | continue; | 65 | continue; |
66 | if (radeon_clone->devices & (ATOM_DEVICE_LCD_SUPPORT)) | 66 | if (radeon_clone->devices & (ATOM_DEVICE_LCD_SUPPORT)) |
67 | continue; | 67 | continue; |
68 | if (radeon_clone->devices & ATOM_DEVICE_DFP2_SUPPORT) | 68 | if (radeon_clone->devices & ATOM_DEVICE_DFP2_SUPPORT) |
69 | continue; | 69 | continue; |
70 | else | 70 | else |
71 | index_mask |= (1 << count); | 71 | index_mask |= (1 << count); |
72 | } | 72 | } |
73 | return index_mask; | 73 | return index_mask; |
74 | } | 74 | } |
75 | 75 | ||
76 | void radeon_setup_encoder_clones(struct drm_device *dev) | 76 | void radeon_setup_encoder_clones(struct drm_device *dev) |
77 | { | 77 | { |
78 | struct drm_encoder *encoder; | 78 | struct drm_encoder *encoder; |
79 | 79 | ||
80 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 80 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
81 | encoder->possible_clones = radeon_encoder_clones(encoder); | 81 | encoder->possible_clones = radeon_encoder_clones(encoder); |
82 | } | 82 | } |
83 | } | 83 | } |
84 | 84 | ||
85 | uint32_t | 85 | uint32_t |
86 | radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8_t dac) | 86 | radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8_t dac) |
87 | { | 87 | { |
88 | struct radeon_device *rdev = dev->dev_private; | 88 | struct radeon_device *rdev = dev->dev_private; |
89 | uint32_t ret = 0; | 89 | uint32_t ret = 0; |
90 | 90 | ||
91 | switch (supported_device) { | 91 | switch (supported_device) { |
92 | case ATOM_DEVICE_CRT1_SUPPORT: | 92 | case ATOM_DEVICE_CRT1_SUPPORT: |
93 | case ATOM_DEVICE_TV1_SUPPORT: | 93 | case ATOM_DEVICE_TV1_SUPPORT: |
94 | case ATOM_DEVICE_TV2_SUPPORT: | 94 | case ATOM_DEVICE_TV2_SUPPORT: |
95 | case ATOM_DEVICE_CRT2_SUPPORT: | 95 | case ATOM_DEVICE_CRT2_SUPPORT: |
96 | case ATOM_DEVICE_CV_SUPPORT: | 96 | case ATOM_DEVICE_CV_SUPPORT: |
97 | switch (dac) { | 97 | switch (dac) { |
98 | case 1: /* dac a */ | 98 | case 1: /* dac a */ |
99 | if ((rdev->family == CHIP_RS300) || | 99 | if ((rdev->family == CHIP_RS300) || |
100 | (rdev->family == CHIP_RS400) || | 100 | (rdev->family == CHIP_RS400) || |
101 | (rdev->family == CHIP_RS480)) | 101 | (rdev->family == CHIP_RS480)) |
102 | ret = ENCODER_INTERNAL_DAC2_ENUM_ID1; | 102 | ret = ENCODER_INTERNAL_DAC2_ENUM_ID1; |
103 | else if (ASIC_IS_AVIVO(rdev)) | 103 | else if (ASIC_IS_AVIVO(rdev)) |
104 | ret = ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1; | 104 | ret = ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1; |
105 | else | 105 | else |
106 | ret = ENCODER_INTERNAL_DAC1_ENUM_ID1; | 106 | ret = ENCODER_INTERNAL_DAC1_ENUM_ID1; |
107 | break; | 107 | break; |
108 | case 2: /* dac b */ | 108 | case 2: /* dac b */ |
109 | if (ASIC_IS_AVIVO(rdev)) | 109 | if (ASIC_IS_AVIVO(rdev)) |
110 | ret = ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1; | 110 | ret = ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1; |
111 | else { | 111 | else { |
112 | /*if (rdev->family == CHIP_R200) | 112 | /*if (rdev->family == CHIP_R200) |
113 | ret = ENCODER_INTERNAL_DVO1_ENUM_ID1; | 113 | ret = ENCODER_INTERNAL_DVO1_ENUM_ID1; |
114 | else*/ | 114 | else*/ |
115 | ret = ENCODER_INTERNAL_DAC2_ENUM_ID1; | 115 | ret = ENCODER_INTERNAL_DAC2_ENUM_ID1; |
116 | } | 116 | } |
117 | break; | 117 | break; |
118 | case 3: /* external dac */ | 118 | case 3: /* external dac */ |
119 | if (ASIC_IS_AVIVO(rdev)) | 119 | if (ASIC_IS_AVIVO(rdev)) |
120 | ret = ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1; | 120 | ret = ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1; |
121 | else | 121 | else |
122 | ret = ENCODER_INTERNAL_DVO1_ENUM_ID1; | 122 | ret = ENCODER_INTERNAL_DVO1_ENUM_ID1; |
123 | break; | 123 | break; |
124 | } | 124 | } |
125 | break; | 125 | break; |
126 | case ATOM_DEVICE_LCD1_SUPPORT: | 126 | case ATOM_DEVICE_LCD1_SUPPORT: |
127 | if (ASIC_IS_AVIVO(rdev)) | 127 | if (ASIC_IS_AVIVO(rdev)) |
128 | ret = ENCODER_INTERNAL_LVTM1_ENUM_ID1; | 128 | ret = ENCODER_INTERNAL_LVTM1_ENUM_ID1; |
129 | else | 129 | else |
130 | ret = ENCODER_INTERNAL_LVDS_ENUM_ID1; | 130 | ret = ENCODER_INTERNAL_LVDS_ENUM_ID1; |
131 | break; | 131 | break; |
132 | case ATOM_DEVICE_DFP1_SUPPORT: | 132 | case ATOM_DEVICE_DFP1_SUPPORT: |
133 | if ((rdev->family == CHIP_RS300) || | 133 | if ((rdev->family == CHIP_RS300) || |
134 | (rdev->family == CHIP_RS400) || | 134 | (rdev->family == CHIP_RS400) || |
135 | (rdev->family == CHIP_RS480)) | 135 | (rdev->family == CHIP_RS480)) |
136 | ret = ENCODER_INTERNAL_DVO1_ENUM_ID1; | 136 | ret = ENCODER_INTERNAL_DVO1_ENUM_ID1; |
137 | else if (ASIC_IS_AVIVO(rdev)) | 137 | else if (ASIC_IS_AVIVO(rdev)) |
138 | ret = ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1; | 138 | ret = ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1; |
139 | else | 139 | else |
140 | ret = ENCODER_INTERNAL_TMDS1_ENUM_ID1; | 140 | ret = ENCODER_INTERNAL_TMDS1_ENUM_ID1; |
141 | break; | 141 | break; |
142 | case ATOM_DEVICE_LCD2_SUPPORT: | 142 | case ATOM_DEVICE_LCD2_SUPPORT: |
143 | case ATOM_DEVICE_DFP2_SUPPORT: | 143 | case ATOM_DEVICE_DFP2_SUPPORT: |
144 | if ((rdev->family == CHIP_RS600) || | 144 | if ((rdev->family == CHIP_RS600) || |
145 | (rdev->family == CHIP_RS690) || | 145 | (rdev->family == CHIP_RS690) || |
146 | (rdev->family == CHIP_RS740)) | 146 | (rdev->family == CHIP_RS740)) |
147 | ret = ENCODER_INTERNAL_DDI_ENUM_ID1; | 147 | ret = ENCODER_INTERNAL_DDI_ENUM_ID1; |
148 | else if (ASIC_IS_AVIVO(rdev)) | 148 | else if (ASIC_IS_AVIVO(rdev)) |
149 | ret = ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1; | 149 | ret = ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1; |
150 | else | 150 | else |
151 | ret = ENCODER_INTERNAL_DVO1_ENUM_ID1; | 151 | ret = ENCODER_INTERNAL_DVO1_ENUM_ID1; |
152 | break; | 152 | break; |
153 | case ATOM_DEVICE_DFP3_SUPPORT: | 153 | case ATOM_DEVICE_DFP3_SUPPORT: |
154 | ret = ENCODER_INTERNAL_LVTM1_ENUM_ID1; | 154 | ret = ENCODER_INTERNAL_LVTM1_ENUM_ID1; |
155 | break; | 155 | break; |
156 | } | 156 | } |
157 | 157 | ||
158 | return ret; | 158 | return ret; |
159 | } | 159 | } |
160 | 160 | ||
161 | static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder, | 161 | static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder, |
162 | struct drm_connector *connector) | 162 | struct drm_connector *connector) |
163 | { | 163 | { |
164 | struct drm_device *dev = radeon_encoder->base.dev; | 164 | struct drm_device *dev = radeon_encoder->base.dev; |
165 | struct radeon_device *rdev = dev->dev_private; | 165 | struct radeon_device *rdev = dev->dev_private; |
166 | bool use_bl = false; | 166 | bool use_bl = false; |
167 | 167 | ||
168 | if (!(radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))) | 168 | if (!(radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))) |
169 | return; | 169 | return; |
170 | 170 | ||
171 | if (radeon_backlight == 0) { | 171 | if (radeon_backlight == 0) { |
172 | return; | 172 | return; |
173 | } else if (radeon_backlight == 1) { | 173 | } else if (radeon_backlight == 1) { |
174 | use_bl = true; | 174 | use_bl = true; |
175 | } else if (radeon_backlight == -1) { | 175 | } else if (radeon_backlight == -1) { |
176 | /* Quirks */ | 176 | /* Quirks */ |
177 | /* Amilo Xi 2550 only works with acpi bl */ | 177 | /* Amilo Xi 2550 only works with acpi bl */ |
178 | if ((rdev->pdev->device == 0x9583) && | 178 | if ((rdev->pdev->device == 0x9583) && |
179 | (rdev->pdev->subsystem_vendor == 0x1734) && | 179 | (rdev->pdev->subsystem_vendor == 0x1734) && |
180 | (rdev->pdev->subsystem_device == 0x1107)) | 180 | (rdev->pdev->subsystem_device == 0x1107)) |
181 | use_bl = false; | 181 | use_bl = false; |
182 | /* disable native backlight control on older asics */ | ||
183 | else if (rdev->family < CHIP_R600) | ||
184 | use_bl = false; | ||
182 | else | 185 | else |
183 | use_bl = true; | 186 | use_bl = true; |
184 | } | 187 | } |
185 | 188 | ||
186 | if (use_bl) { | 189 | if (use_bl) { |
187 | if (rdev->is_atom_bios) | 190 | if (rdev->is_atom_bios) |
188 | radeon_atom_backlight_init(radeon_encoder, connector); | 191 | radeon_atom_backlight_init(radeon_encoder, connector); |
189 | else | 192 | else |
190 | radeon_legacy_backlight_init(radeon_encoder, connector); | 193 | radeon_legacy_backlight_init(radeon_encoder, connector); |
191 | rdev->mode_info.bl_encoder = radeon_encoder; | 194 | rdev->mode_info.bl_encoder = radeon_encoder; |
192 | } | 195 | } |
193 | } | 196 | } |
194 | 197 | ||
195 | void | 198 | void |
196 | radeon_link_encoder_connector(struct drm_device *dev) | 199 | radeon_link_encoder_connector(struct drm_device *dev) |
197 | { | 200 | { |
198 | struct drm_connector *connector; | 201 | struct drm_connector *connector; |
199 | struct radeon_connector *radeon_connector; | 202 | struct radeon_connector *radeon_connector; |
200 | struct drm_encoder *encoder; | 203 | struct drm_encoder *encoder; |
201 | struct radeon_encoder *radeon_encoder; | 204 | struct radeon_encoder *radeon_encoder; |
202 | 205 | ||
203 | /* walk the list and link encoders to connectors */ | 206 | /* walk the list and link encoders to connectors */ |
204 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 207 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
205 | radeon_connector = to_radeon_connector(connector); | 208 | radeon_connector = to_radeon_connector(connector); |
206 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 209 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
207 | radeon_encoder = to_radeon_encoder(encoder); | 210 | radeon_encoder = to_radeon_encoder(encoder); |
208 | if (radeon_encoder->devices & radeon_connector->devices) { | 211 | if (radeon_encoder->devices & radeon_connector->devices) { |
209 | drm_mode_connector_attach_encoder(connector, encoder); | 212 | drm_mode_connector_attach_encoder(connector, encoder); |
210 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) | 213 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) |
211 | radeon_encoder_add_backlight(radeon_encoder, connector); | 214 | radeon_encoder_add_backlight(radeon_encoder, connector); |
212 | } | 215 | } |
213 | } | 216 | } |
214 | } | 217 | } |
215 | } | 218 | } |
216 | 219 | ||
217 | void radeon_encoder_set_active_device(struct drm_encoder *encoder) | 220 | void radeon_encoder_set_active_device(struct drm_encoder *encoder) |
218 | { | 221 | { |
219 | struct drm_device *dev = encoder->dev; | 222 | struct drm_device *dev = encoder->dev; |
220 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 223 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
221 | struct drm_connector *connector; | 224 | struct drm_connector *connector; |
222 | 225 | ||
223 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 226 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
224 | if (connector->encoder == encoder) { | 227 | if (connector->encoder == encoder) { |
225 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 228 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
226 | radeon_encoder->active_device = radeon_encoder->devices & radeon_connector->devices; | 229 | radeon_encoder->active_device = radeon_encoder->devices & radeon_connector->devices; |
227 | DRM_DEBUG_KMS("setting active device to %08x from %08x %08x for encoder %d\n", | 230 | DRM_DEBUG_KMS("setting active device to %08x from %08x %08x for encoder %d\n", |
228 | radeon_encoder->active_device, radeon_encoder->devices, | 231 | radeon_encoder->active_device, radeon_encoder->devices, |
229 | radeon_connector->devices, encoder->encoder_type); | 232 | radeon_connector->devices, encoder->encoder_type); |
230 | } | 233 | } |
231 | } | 234 | } |
232 | } | 235 | } |
233 | 236 | ||
234 | struct drm_connector * | 237 | struct drm_connector * |
235 | radeon_get_connector_for_encoder(struct drm_encoder *encoder) | 238 | radeon_get_connector_for_encoder(struct drm_encoder *encoder) |
236 | { | 239 | { |
237 | struct drm_device *dev = encoder->dev; | 240 | struct drm_device *dev = encoder->dev; |
238 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 241 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
239 | struct drm_connector *connector; | 242 | struct drm_connector *connector; |
240 | struct radeon_connector *radeon_connector; | 243 | struct radeon_connector *radeon_connector; |
241 | 244 | ||
242 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 245 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
243 | radeon_connector = to_radeon_connector(connector); | 246 | radeon_connector = to_radeon_connector(connector); |
244 | if (radeon_encoder->active_device & radeon_connector->devices) | 247 | if (radeon_encoder->active_device & radeon_connector->devices) |
245 | return connector; | 248 | return connector; |
246 | } | 249 | } |
247 | return NULL; | 250 | return NULL; |
248 | } | 251 | } |
249 | 252 | ||
250 | struct drm_connector * | 253 | struct drm_connector * |
251 | radeon_get_connector_for_encoder_init(struct drm_encoder *encoder) | 254 | radeon_get_connector_for_encoder_init(struct drm_encoder *encoder) |
252 | { | 255 | { |
253 | struct drm_device *dev = encoder->dev; | 256 | struct drm_device *dev = encoder->dev; |
254 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 257 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
255 | struct drm_connector *connector; | 258 | struct drm_connector *connector; |
256 | struct radeon_connector *radeon_connector; | 259 | struct radeon_connector *radeon_connector; |
257 | 260 | ||
258 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 261 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
259 | radeon_connector = to_radeon_connector(connector); | 262 | radeon_connector = to_radeon_connector(connector); |
260 | if (radeon_encoder->devices & radeon_connector->devices) | 263 | if (radeon_encoder->devices & radeon_connector->devices) |
261 | return connector; | 264 | return connector; |
262 | } | 265 | } |
263 | return NULL; | 266 | return NULL; |
264 | } | 267 | } |
265 | 268 | ||
266 | struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder) | 269 | struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder) |
267 | { | 270 | { |
268 | struct drm_device *dev = encoder->dev; | 271 | struct drm_device *dev = encoder->dev; |
269 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 272 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
270 | struct drm_encoder *other_encoder; | 273 | struct drm_encoder *other_encoder; |
271 | struct radeon_encoder *other_radeon_encoder; | 274 | struct radeon_encoder *other_radeon_encoder; |
272 | 275 | ||
273 | if (radeon_encoder->is_ext_encoder) | 276 | if (radeon_encoder->is_ext_encoder) |
274 | return NULL; | 277 | return NULL; |
275 | 278 | ||
276 | list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) { | 279 | list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) { |
277 | if (other_encoder == encoder) | 280 | if (other_encoder == encoder) |
278 | continue; | 281 | continue; |
279 | other_radeon_encoder = to_radeon_encoder(other_encoder); | 282 | other_radeon_encoder = to_radeon_encoder(other_encoder); |
280 | if (other_radeon_encoder->is_ext_encoder && | 283 | if (other_radeon_encoder->is_ext_encoder && |
281 | (radeon_encoder->devices & other_radeon_encoder->devices)) | 284 | (radeon_encoder->devices & other_radeon_encoder->devices)) |
282 | return other_encoder; | 285 | return other_encoder; |
283 | } | 286 | } |
284 | return NULL; | 287 | return NULL; |
285 | } | 288 | } |
286 | 289 | ||
287 | u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder) | 290 | u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder) |
288 | { | 291 | { |
289 | struct drm_encoder *other_encoder = radeon_get_external_encoder(encoder); | 292 | struct drm_encoder *other_encoder = radeon_get_external_encoder(encoder); |
290 | 293 | ||
291 | if (other_encoder) { | 294 | if (other_encoder) { |
292 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(other_encoder); | 295 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(other_encoder); |
293 | 296 | ||
294 | switch (radeon_encoder->encoder_id) { | 297 | switch (radeon_encoder->encoder_id) { |
295 | case ENCODER_OBJECT_ID_TRAVIS: | 298 | case ENCODER_OBJECT_ID_TRAVIS: |
296 | case ENCODER_OBJECT_ID_NUTMEG: | 299 | case ENCODER_OBJECT_ID_NUTMEG: |
297 | return radeon_encoder->encoder_id; | 300 | return radeon_encoder->encoder_id; |
298 | default: | 301 | default: |
299 | return ENCODER_OBJECT_ID_NONE; | 302 | return ENCODER_OBJECT_ID_NONE; |
300 | } | 303 | } |
301 | } | 304 | } |
302 | return ENCODER_OBJECT_ID_NONE; | 305 | return ENCODER_OBJECT_ID_NONE; |
303 | } | 306 | } |
304 | 307 | ||
305 | void radeon_panel_mode_fixup(struct drm_encoder *encoder, | 308 | void radeon_panel_mode_fixup(struct drm_encoder *encoder, |
306 | struct drm_display_mode *adjusted_mode) | 309 | struct drm_display_mode *adjusted_mode) |
307 | { | 310 | { |
308 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 311 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
309 | struct drm_device *dev = encoder->dev; | 312 | struct drm_device *dev = encoder->dev; |
310 | struct radeon_device *rdev = dev->dev_private; | 313 | struct radeon_device *rdev = dev->dev_private; |
311 | struct drm_display_mode *native_mode = &radeon_encoder->native_mode; | 314 | struct drm_display_mode *native_mode = &radeon_encoder->native_mode; |
312 | unsigned hblank = native_mode->htotal - native_mode->hdisplay; | 315 | unsigned hblank = native_mode->htotal - native_mode->hdisplay; |
313 | unsigned vblank = native_mode->vtotal - native_mode->vdisplay; | 316 | unsigned vblank = native_mode->vtotal - native_mode->vdisplay; |
314 | unsigned hover = native_mode->hsync_start - native_mode->hdisplay; | 317 | unsigned hover = native_mode->hsync_start - native_mode->hdisplay; |
315 | unsigned vover = native_mode->vsync_start - native_mode->vdisplay; | 318 | unsigned vover = native_mode->vsync_start - native_mode->vdisplay; |
316 | unsigned hsync_width = native_mode->hsync_end - native_mode->hsync_start; | 319 | unsigned hsync_width = native_mode->hsync_end - native_mode->hsync_start; |
317 | unsigned vsync_width = native_mode->vsync_end - native_mode->vsync_start; | 320 | unsigned vsync_width = native_mode->vsync_end - native_mode->vsync_start; |
318 | 321 | ||
319 | adjusted_mode->clock = native_mode->clock; | 322 | adjusted_mode->clock = native_mode->clock; |
320 | adjusted_mode->flags = native_mode->flags; | 323 | adjusted_mode->flags = native_mode->flags; |
321 | 324 | ||
322 | if (ASIC_IS_AVIVO(rdev)) { | 325 | if (ASIC_IS_AVIVO(rdev)) { |
323 | adjusted_mode->hdisplay = native_mode->hdisplay; | 326 | adjusted_mode->hdisplay = native_mode->hdisplay; |
324 | adjusted_mode->vdisplay = native_mode->vdisplay; | 327 | adjusted_mode->vdisplay = native_mode->vdisplay; |
325 | } | 328 | } |
326 | 329 | ||
327 | adjusted_mode->htotal = native_mode->hdisplay + hblank; | 330 | adjusted_mode->htotal = native_mode->hdisplay + hblank; |
328 | adjusted_mode->hsync_start = native_mode->hdisplay + hover; | 331 | adjusted_mode->hsync_start = native_mode->hdisplay + hover; |
329 | adjusted_mode->hsync_end = adjusted_mode->hsync_start + hsync_width; | 332 | adjusted_mode->hsync_end = adjusted_mode->hsync_start + hsync_width; |
330 | 333 | ||
331 | adjusted_mode->vtotal = native_mode->vdisplay + vblank; | 334 | adjusted_mode->vtotal = native_mode->vdisplay + vblank; |
332 | adjusted_mode->vsync_start = native_mode->vdisplay + vover; | 335 | adjusted_mode->vsync_start = native_mode->vdisplay + vover; |
333 | adjusted_mode->vsync_end = adjusted_mode->vsync_start + vsync_width; | 336 | adjusted_mode->vsync_end = adjusted_mode->vsync_start + vsync_width; |
334 | 337 | ||
335 | drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); | 338 | drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); |
336 | 339 | ||
337 | if (ASIC_IS_AVIVO(rdev)) { | 340 | if (ASIC_IS_AVIVO(rdev)) { |
338 | adjusted_mode->crtc_hdisplay = native_mode->hdisplay; | 341 | adjusted_mode->crtc_hdisplay = native_mode->hdisplay; |
339 | adjusted_mode->crtc_vdisplay = native_mode->vdisplay; | 342 | adjusted_mode->crtc_vdisplay = native_mode->vdisplay; |
340 | } | 343 | } |
341 | 344 | ||
342 | adjusted_mode->crtc_htotal = adjusted_mode->crtc_hdisplay + hblank; | 345 | adjusted_mode->crtc_htotal = adjusted_mode->crtc_hdisplay + hblank; |
343 | adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hdisplay + hover; | 346 | adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hdisplay + hover; |
344 | adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + hsync_width; | 347 | adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + hsync_width; |
345 | 348 | ||
346 | adjusted_mode->crtc_vtotal = adjusted_mode->crtc_vdisplay + vblank; | 349 | adjusted_mode->crtc_vtotal = adjusted_mode->crtc_vdisplay + vblank; |
347 | adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + vover; | 350 | adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + vover; |
348 | adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + vsync_width; | 351 | adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + vsync_width; |
349 | 352 | ||
350 | } | 353 | } |
351 | 354 | ||
352 | bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder, | 355 | bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder, |
353 | u32 pixel_clock) | 356 | u32 pixel_clock) |
354 | { | 357 | { |
355 | struct drm_device *dev = encoder->dev; | 358 | struct drm_device *dev = encoder->dev; |
356 | struct radeon_device *rdev = dev->dev_private; | 359 | struct radeon_device *rdev = dev->dev_private; |
357 | struct drm_connector *connector; | 360 | struct drm_connector *connector; |
358 | struct radeon_connector *radeon_connector; | 361 | struct radeon_connector *radeon_connector; |
359 | struct radeon_connector_atom_dig *dig_connector; | 362 | struct radeon_connector_atom_dig *dig_connector; |
360 | 363 | ||
361 | connector = radeon_get_connector_for_encoder(encoder); | 364 | connector = radeon_get_connector_for_encoder(encoder); |
362 | /* if we don't have an active device yet, just use one of | 365 | /* if we don't have an active device yet, just use one of |
363 | * the connectors tied to the encoder. | 366 | * the connectors tied to the encoder. |
364 | */ | 367 | */ |
365 | if (!connector) | 368 | if (!connector) |
366 | connector = radeon_get_connector_for_encoder_init(encoder); | 369 | connector = radeon_get_connector_for_encoder_init(encoder); |
367 | radeon_connector = to_radeon_connector(connector); | 370 | radeon_connector = to_radeon_connector(connector); |
368 | 371 | ||
369 | switch (connector->connector_type) { | 372 | switch (connector->connector_type) { |
370 | case DRM_MODE_CONNECTOR_DVII: | 373 | case DRM_MODE_CONNECTOR_DVII: |
371 | case DRM_MODE_CONNECTOR_HDMIB: | 374 | case DRM_MODE_CONNECTOR_HDMIB: |
372 | if (radeon_connector->use_digital) { | 375 | if (radeon_connector->use_digital) { |
373 | /* HDMI 1.3 supports up to 340 Mhz over single link */ | 376 | /* HDMI 1.3 supports up to 340 Mhz over single link */ |
374 | if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) { | 377 | if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) { |
375 | if (pixel_clock > 340000) | 378 | if (pixel_clock > 340000) |
376 | return true; | 379 | return true; |
377 | else | 380 | else |
378 | return false; | 381 | return false; |
379 | } else { | 382 | } else { |
380 | if (pixel_clock > 165000) | 383 | if (pixel_clock > 165000) |
381 | return true; | 384 | return true; |
382 | else | 385 | else |
383 | return false; | 386 | return false; |
384 | } | 387 | } |
385 | } else | 388 | } else |
386 | return false; | 389 | return false; |
387 | case DRM_MODE_CONNECTOR_DVID: | 390 | case DRM_MODE_CONNECTOR_DVID: |
388 | case DRM_MODE_CONNECTOR_HDMIA: | 391 | case DRM_MODE_CONNECTOR_HDMIA: |
389 | case DRM_MODE_CONNECTOR_DisplayPort: | 392 | case DRM_MODE_CONNECTOR_DisplayPort: |
390 | dig_connector = radeon_connector->con_priv; | 393 | dig_connector = radeon_connector->con_priv; |
391 | if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || | 394 | if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || |
392 | (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) | 395 | (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) |
393 | return false; | 396 | return false; |
394 | else { | 397 | else { |
395 | /* HDMI 1.3 supports up to 340 Mhz over single link */ | 398 | /* HDMI 1.3 supports up to 340 Mhz over single link */ |
396 | if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) { | 399 | if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) { |
397 | if (pixel_clock > 340000) | 400 | if (pixel_clock > 340000) |
398 | return true; | 401 | return true; |
399 | else | 402 | else |
400 | return false; | 403 | return false; |
401 | } else { | 404 | } else { |
402 | if (pixel_clock > 165000) | 405 | if (pixel_clock > 165000) |
403 | return true; | 406 | return true; |
404 | else | 407 | else |
405 | return false; | 408 | return false; |
406 | } | 409 | } |
407 | } | 410 | } |
408 | default: | 411 | default: |
409 | return false; | 412 | return false; |
410 | } | 413 | } |
411 | } | 414 | } |
412 | 415 | ||
413 | bool radeon_encoder_is_digital(struct drm_encoder *encoder) | 416 | bool radeon_encoder_is_digital(struct drm_encoder *encoder) |
414 | { | 417 | { |
415 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 418 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
416 | switch (radeon_encoder->encoder_id) { | 419 | switch (radeon_encoder->encoder_id) { |
417 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: | 420 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: |
418 | case ENCODER_OBJECT_ID_INTERNAL_TMDS1: | 421 | case ENCODER_OBJECT_ID_INTERNAL_TMDS1: |
419 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: | 422 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: |
420 | case ENCODER_OBJECT_ID_INTERNAL_LVTM1: | 423 | case ENCODER_OBJECT_ID_INTERNAL_LVTM1: |
421 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: | 424 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: |
422 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | 425 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: |
423 | case ENCODER_OBJECT_ID_INTERNAL_DDI: | 426 | case ENCODER_OBJECT_ID_INTERNAL_DDI: |
424 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | 427 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
425 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | 428 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: |
426 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | 429 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: |
427 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | 430 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: |
428 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: | 431 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: |
429 | return true; | 432 | return true; |
430 | default: | 433 | default: |
431 | return false; | 434 | return false; |
432 | } | 435 | } |
433 | } | 436 | } |
434 | 437 |