Commit 6548be50135e2a406c767cfef246b7d5a1c1cc66

Authored by Dan Murphy

Merge branch 'master' of http://git.kernel.org/pub/scm/linux/kernel/git/torvalds…

…/linux into ti-linux-3.15.y

* 'master' of http://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux:
  intel_pstate: Improve initial busy calculation
  intel_pstate: add sample time scaling
  intel_pstate: Correct rounding in busy calculation
  intel_pstate: Remove C0 tracking
  drm/radeon: use the CP DMA on CIK
  drm/radeon: sync page table updates
  drm/radeon: fix vm buffer size estimation
  drm/crtc-helper: skip locking checks in panicking path
  drm/radeon/dpm: resume fixes for some systems

Signed-off-by: Dan Murphy <DMurphy@ti.com>

Showing 7 changed files Inline Diff

drivers/cpufreq/intel_pstate.c
1 /* 1 /*
2 * intel_pstate.c: Native P state management for Intel processors 2 * intel_pstate.c: Native P state management for Intel processors
3 * 3 *
4 * (C) Copyright 2012 Intel Corporation 4 * (C) Copyright 2012 Intel Corporation
5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com> 5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2 9 * as published by the Free Software Foundation; version 2
10 * of the License. 10 * of the License.
11 */ 11 */
12 12
13 #include <linux/kernel.h> 13 #include <linux/kernel.h>
14 #include <linux/kernel_stat.h> 14 #include <linux/kernel_stat.h>
15 #include <linux/module.h> 15 #include <linux/module.h>
16 #include <linux/ktime.h> 16 #include <linux/ktime.h>
17 #include <linux/hrtimer.h> 17 #include <linux/hrtimer.h>
18 #include <linux/tick.h> 18 #include <linux/tick.h>
19 #include <linux/slab.h> 19 #include <linux/slab.h>
20 #include <linux/sched.h> 20 #include <linux/sched.h>
21 #include <linux/list.h> 21 #include <linux/list.h>
22 #include <linux/cpu.h> 22 #include <linux/cpu.h>
23 #include <linux/cpufreq.h> 23 #include <linux/cpufreq.h>
24 #include <linux/sysfs.h> 24 #include <linux/sysfs.h>
25 #include <linux/types.h> 25 #include <linux/types.h>
26 #include <linux/fs.h> 26 #include <linux/fs.h>
27 #include <linux/debugfs.h> 27 #include <linux/debugfs.h>
28 #include <linux/acpi.h> 28 #include <linux/acpi.h>
29 #include <trace/events/power.h> 29 #include <trace/events/power.h>
30 30
31 #include <asm/div64.h> 31 #include <asm/div64.h>
32 #include <asm/msr.h> 32 #include <asm/msr.h>
33 #include <asm/cpu_device_id.h> 33 #include <asm/cpu_device_id.h>
34 34
35 #define SAMPLE_COUNT 3 35 #define SAMPLE_COUNT 3
36 36
37 #define BYT_RATIOS 0x66a 37 #define BYT_RATIOS 0x66a
38 #define BYT_VIDS 0x66b 38 #define BYT_VIDS 0x66b
39 #define BYT_TURBO_RATIOS 0x66c 39 #define BYT_TURBO_RATIOS 0x66c
40 #define BYT_TURBO_VIDS 0x66d 40 #define BYT_TURBO_VIDS 0x66d
41 41
42 42
43 #define FRAC_BITS 6 43 #define FRAC_BITS 8
44 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) 44 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
45 #define fp_toint(X) ((X) >> FRAC_BITS) 45 #define fp_toint(X) ((X) >> FRAC_BITS)
46 #define FP_ROUNDUP(X) ((X) += 1 << FRAC_BITS)
47 46
47
48 static inline int32_t mul_fp(int32_t x, int32_t y) 48 static inline int32_t mul_fp(int32_t x, int32_t y)
49 { 49 {
50 return ((int64_t)x * (int64_t)y) >> FRAC_BITS; 50 return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
51 } 51 }
52 52
53 static inline int32_t div_fp(int32_t x, int32_t y) 53 static inline int32_t div_fp(int32_t x, int32_t y)
54 { 54 {
55 return div_s64((int64_t)x << FRAC_BITS, (int64_t)y); 55 return div_s64((int64_t)x << FRAC_BITS, (int64_t)y);
56 } 56 }
57 57
58 struct sample { 58 struct sample {
59 int32_t core_pct_busy; 59 int32_t core_pct_busy;
60 u64 aperf; 60 u64 aperf;
61 u64 mperf; 61 u64 mperf;
62 unsigned long long tsc;
63 int freq; 62 int freq;
63 ktime_t time;
64 }; 64 };
65 65
66 struct pstate_data { 66 struct pstate_data {
67 int current_pstate; 67 int current_pstate;
68 int min_pstate; 68 int min_pstate;
69 int max_pstate; 69 int max_pstate;
70 int turbo_pstate; 70 int turbo_pstate;
71 }; 71 };
72 72
73 struct vid_data { 73 struct vid_data {
74 int min; 74 int min;
75 int max; 75 int max;
76 int turbo; 76 int turbo;
77 int32_t ratio; 77 int32_t ratio;
78 }; 78 };
79 79
80 struct _pid { 80 struct _pid {
81 int setpoint; 81 int setpoint;
82 int32_t integral; 82 int32_t integral;
83 int32_t p_gain; 83 int32_t p_gain;
84 int32_t i_gain; 84 int32_t i_gain;
85 int32_t d_gain; 85 int32_t d_gain;
86 int deadband; 86 int deadband;
87 int32_t last_err; 87 int32_t last_err;
88 }; 88 };
89 89
90 struct cpudata { 90 struct cpudata {
91 int cpu; 91 int cpu;
92 92
93 char name[64]; 93 char name[64];
94 94
95 struct timer_list timer; 95 struct timer_list timer;
96 96
97 struct pstate_data pstate; 97 struct pstate_data pstate;
98 struct vid_data vid; 98 struct vid_data vid;
99 struct _pid pid; 99 struct _pid pid;
100 100
101 ktime_t last_sample_time;
101 u64 prev_aperf; 102 u64 prev_aperf;
102 u64 prev_mperf; 103 u64 prev_mperf;
103 unsigned long long prev_tsc;
104 struct sample sample; 104 struct sample sample;
105 }; 105 };
106 106
107 static struct cpudata **all_cpu_data; 107 static struct cpudata **all_cpu_data;
108 struct pstate_adjust_policy { 108 struct pstate_adjust_policy {
109 int sample_rate_ms; 109 int sample_rate_ms;
110 int deadband; 110 int deadband;
111 int setpoint; 111 int setpoint;
112 int p_gain_pct; 112 int p_gain_pct;
113 int d_gain_pct; 113 int d_gain_pct;
114 int i_gain_pct; 114 int i_gain_pct;
115 }; 115 };
116 116
117 struct pstate_funcs { 117 struct pstate_funcs {
118 int (*get_max)(void); 118 int (*get_max)(void);
119 int (*get_min)(void); 119 int (*get_min)(void);
120 int (*get_turbo)(void); 120 int (*get_turbo)(void);
121 void (*set)(struct cpudata*, int pstate); 121 void (*set)(struct cpudata*, int pstate);
122 void (*get_vid)(struct cpudata *); 122 void (*get_vid)(struct cpudata *);
123 }; 123 };
124 124
125 struct cpu_defaults { 125 struct cpu_defaults {
126 struct pstate_adjust_policy pid_policy; 126 struct pstate_adjust_policy pid_policy;
127 struct pstate_funcs funcs; 127 struct pstate_funcs funcs;
128 }; 128 };
129 129
130 static struct pstate_adjust_policy pid_params; 130 static struct pstate_adjust_policy pid_params;
131 static struct pstate_funcs pstate_funcs; 131 static struct pstate_funcs pstate_funcs;
132 132
133 struct perf_limits { 133 struct perf_limits {
134 int no_turbo; 134 int no_turbo;
135 int max_perf_pct; 135 int max_perf_pct;
136 int min_perf_pct; 136 int min_perf_pct;
137 int32_t max_perf; 137 int32_t max_perf;
138 int32_t min_perf; 138 int32_t min_perf;
139 int max_policy_pct; 139 int max_policy_pct;
140 int max_sysfs_pct; 140 int max_sysfs_pct;
141 }; 141 };
142 142
143 static struct perf_limits limits = { 143 static struct perf_limits limits = {
144 .no_turbo = 0, 144 .no_turbo = 0,
145 .max_perf_pct = 100, 145 .max_perf_pct = 100,
146 .max_perf = int_tofp(1), 146 .max_perf = int_tofp(1),
147 .min_perf_pct = 0, 147 .min_perf_pct = 0,
148 .min_perf = 0, 148 .min_perf = 0,
149 .max_policy_pct = 100, 149 .max_policy_pct = 100,
150 .max_sysfs_pct = 100, 150 .max_sysfs_pct = 100,
151 }; 151 };
152 152
153 static inline void pid_reset(struct _pid *pid, int setpoint, int busy, 153 static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
154 int deadband, int integral) { 154 int deadband, int integral) {
155 pid->setpoint = setpoint; 155 pid->setpoint = setpoint;
156 pid->deadband = deadband; 156 pid->deadband = deadband;
157 pid->integral = int_tofp(integral); 157 pid->integral = int_tofp(integral);
158 pid->last_err = int_tofp(setpoint) - int_tofp(busy); 158 pid->last_err = int_tofp(setpoint) - int_tofp(busy);
159 } 159 }
160 160
161 static inline void pid_p_gain_set(struct _pid *pid, int percent) 161 static inline void pid_p_gain_set(struct _pid *pid, int percent)
162 { 162 {
163 pid->p_gain = div_fp(int_tofp(percent), int_tofp(100)); 163 pid->p_gain = div_fp(int_tofp(percent), int_tofp(100));
164 } 164 }
165 165
166 static inline void pid_i_gain_set(struct _pid *pid, int percent) 166 static inline void pid_i_gain_set(struct _pid *pid, int percent)
167 { 167 {
168 pid->i_gain = div_fp(int_tofp(percent), int_tofp(100)); 168 pid->i_gain = div_fp(int_tofp(percent), int_tofp(100));
169 } 169 }
170 170
171 static inline void pid_d_gain_set(struct _pid *pid, int percent) 171 static inline void pid_d_gain_set(struct _pid *pid, int percent)
172 { 172 {
173 173
174 pid->d_gain = div_fp(int_tofp(percent), int_tofp(100)); 174 pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
175 } 175 }
176 176
177 static signed int pid_calc(struct _pid *pid, int32_t busy) 177 static signed int pid_calc(struct _pid *pid, int32_t busy)
178 { 178 {
179 signed int result; 179 signed int result;
180 int32_t pterm, dterm, fp_error; 180 int32_t pterm, dterm, fp_error;
181 int32_t integral_limit; 181 int32_t integral_limit;
182 182
183 fp_error = int_tofp(pid->setpoint) - busy; 183 fp_error = int_tofp(pid->setpoint) - busy;
184 184
185 if (abs(fp_error) <= int_tofp(pid->deadband)) 185 if (abs(fp_error) <= int_tofp(pid->deadband))
186 return 0; 186 return 0;
187 187
188 pterm = mul_fp(pid->p_gain, fp_error); 188 pterm = mul_fp(pid->p_gain, fp_error);
189 189
190 pid->integral += fp_error; 190 pid->integral += fp_error;
191 191
192 /* limit the integral term */ 192 /* limit the integral term */
193 integral_limit = int_tofp(30); 193 integral_limit = int_tofp(30);
194 if (pid->integral > integral_limit) 194 if (pid->integral > integral_limit)
195 pid->integral = integral_limit; 195 pid->integral = integral_limit;
196 if (pid->integral < -integral_limit) 196 if (pid->integral < -integral_limit)
197 pid->integral = -integral_limit; 197 pid->integral = -integral_limit;
198 198
199 dterm = mul_fp(pid->d_gain, fp_error - pid->last_err); 199 dterm = mul_fp(pid->d_gain, fp_error - pid->last_err);
200 pid->last_err = fp_error; 200 pid->last_err = fp_error;
201 201
202 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; 202 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
203 203 if (result >= 0)
204 result = result + (1 << (FRAC_BITS-1));
205 else
206 result = result - (1 << (FRAC_BITS-1));
204 return (signed int)fp_toint(result); 207 return (signed int)fp_toint(result);
205 } 208 }
206 209
207 static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu) 210 static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu)
208 { 211 {
209 pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct); 212 pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct);
210 pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct); 213 pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct);
211 pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct); 214 pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct);
212 215
213 pid_reset(&cpu->pid, 216 pid_reset(&cpu->pid,
214 pid_params.setpoint, 217 pid_params.setpoint,
215 100, 218 100,
216 pid_params.deadband, 219 pid_params.deadband,
217 0); 220 0);
218 } 221 }
219 222
220 static inline void intel_pstate_reset_all_pid(void) 223 static inline void intel_pstate_reset_all_pid(void)
221 { 224 {
222 unsigned int cpu; 225 unsigned int cpu;
223 for_each_online_cpu(cpu) { 226 for_each_online_cpu(cpu) {
224 if (all_cpu_data[cpu]) 227 if (all_cpu_data[cpu])
225 intel_pstate_busy_pid_reset(all_cpu_data[cpu]); 228 intel_pstate_busy_pid_reset(all_cpu_data[cpu]);
226 } 229 }
227 } 230 }
228 231
229 /************************** debugfs begin ************************/ 232 /************************** debugfs begin ************************/
230 static int pid_param_set(void *data, u64 val) 233 static int pid_param_set(void *data, u64 val)
231 { 234 {
232 *(u32 *)data = val; 235 *(u32 *)data = val;
233 intel_pstate_reset_all_pid(); 236 intel_pstate_reset_all_pid();
234 return 0; 237 return 0;
235 } 238 }
236 static int pid_param_get(void *data, u64 *val) 239 static int pid_param_get(void *data, u64 *val)
237 { 240 {
238 *val = *(u32 *)data; 241 *val = *(u32 *)data;
239 return 0; 242 return 0;
240 } 243 }
241 DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, 244 DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get,
242 pid_param_set, "%llu\n"); 245 pid_param_set, "%llu\n");
243 246
244 struct pid_param { 247 struct pid_param {
245 char *name; 248 char *name;
246 void *value; 249 void *value;
247 }; 250 };
248 251
249 static struct pid_param pid_files[] = { 252 static struct pid_param pid_files[] = {
250 {"sample_rate_ms", &pid_params.sample_rate_ms}, 253 {"sample_rate_ms", &pid_params.sample_rate_ms},
251 {"d_gain_pct", &pid_params.d_gain_pct}, 254 {"d_gain_pct", &pid_params.d_gain_pct},
252 {"i_gain_pct", &pid_params.i_gain_pct}, 255 {"i_gain_pct", &pid_params.i_gain_pct},
253 {"deadband", &pid_params.deadband}, 256 {"deadband", &pid_params.deadband},
254 {"setpoint", &pid_params.setpoint}, 257 {"setpoint", &pid_params.setpoint},
255 {"p_gain_pct", &pid_params.p_gain_pct}, 258 {"p_gain_pct", &pid_params.p_gain_pct},
256 {NULL, NULL} 259 {NULL, NULL}
257 }; 260 };
258 261
259 static struct dentry *debugfs_parent; 262 static struct dentry *debugfs_parent;
260 static void intel_pstate_debug_expose_params(void) 263 static void intel_pstate_debug_expose_params(void)
261 { 264 {
262 int i = 0; 265 int i = 0;
263 266
264 debugfs_parent = debugfs_create_dir("pstate_snb", NULL); 267 debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
265 if (IS_ERR_OR_NULL(debugfs_parent)) 268 if (IS_ERR_OR_NULL(debugfs_parent))
266 return; 269 return;
267 while (pid_files[i].name) { 270 while (pid_files[i].name) {
268 debugfs_create_file(pid_files[i].name, 0660, 271 debugfs_create_file(pid_files[i].name, 0660,
269 debugfs_parent, pid_files[i].value, 272 debugfs_parent, pid_files[i].value,
270 &fops_pid_param); 273 &fops_pid_param);
271 i++; 274 i++;
272 } 275 }
273 } 276 }
274 277
275 /************************** debugfs end ************************/ 278 /************************** debugfs end ************************/
276 279
277 /************************** sysfs begin ************************/ 280 /************************** sysfs begin ************************/
278 #define show_one(file_name, object) \ 281 #define show_one(file_name, object) \
279 static ssize_t show_##file_name \ 282 static ssize_t show_##file_name \
280 (struct kobject *kobj, struct attribute *attr, char *buf) \ 283 (struct kobject *kobj, struct attribute *attr, char *buf) \
281 { \ 284 { \
282 return sprintf(buf, "%u\n", limits.object); \ 285 return sprintf(buf, "%u\n", limits.object); \
283 } 286 }
284 287
285 static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, 288 static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
286 const char *buf, size_t count) 289 const char *buf, size_t count)
287 { 290 {
288 unsigned int input; 291 unsigned int input;
289 int ret; 292 int ret;
290 ret = sscanf(buf, "%u", &input); 293 ret = sscanf(buf, "%u", &input);
291 if (ret != 1) 294 if (ret != 1)
292 return -EINVAL; 295 return -EINVAL;
293 limits.no_turbo = clamp_t(int, input, 0 , 1); 296 limits.no_turbo = clamp_t(int, input, 0 , 1);
294 297
295 return count; 298 return count;
296 } 299 }
297 300
298 static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, 301 static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
299 const char *buf, size_t count) 302 const char *buf, size_t count)
300 { 303 {
301 unsigned int input; 304 unsigned int input;
302 int ret; 305 int ret;
303 ret = sscanf(buf, "%u", &input); 306 ret = sscanf(buf, "%u", &input);
304 if (ret != 1) 307 if (ret != 1)
305 return -EINVAL; 308 return -EINVAL;
306 309
307 limits.max_sysfs_pct = clamp_t(int, input, 0 , 100); 310 limits.max_sysfs_pct = clamp_t(int, input, 0 , 100);
308 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); 311 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
309 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); 312 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
310 return count; 313 return count;
311 } 314 }
312 315
313 static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, 316 static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
314 const char *buf, size_t count) 317 const char *buf, size_t count)
315 { 318 {
316 unsigned int input; 319 unsigned int input;
317 int ret; 320 int ret;
318 ret = sscanf(buf, "%u", &input); 321 ret = sscanf(buf, "%u", &input);
319 if (ret != 1) 322 if (ret != 1)
320 return -EINVAL; 323 return -EINVAL;
321 limits.min_perf_pct = clamp_t(int, input, 0 , 100); 324 limits.min_perf_pct = clamp_t(int, input, 0 , 100);
322 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); 325 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
323 326
324 return count; 327 return count;
325 } 328 }
326 329
327 show_one(no_turbo, no_turbo); 330 show_one(no_turbo, no_turbo);
328 show_one(max_perf_pct, max_perf_pct); 331 show_one(max_perf_pct, max_perf_pct);
329 show_one(min_perf_pct, min_perf_pct); 332 show_one(min_perf_pct, min_perf_pct);
330 333
331 define_one_global_rw(no_turbo); 334 define_one_global_rw(no_turbo);
332 define_one_global_rw(max_perf_pct); 335 define_one_global_rw(max_perf_pct);
333 define_one_global_rw(min_perf_pct); 336 define_one_global_rw(min_perf_pct);
334 337
335 static struct attribute *intel_pstate_attributes[] = { 338 static struct attribute *intel_pstate_attributes[] = {
336 &no_turbo.attr, 339 &no_turbo.attr,
337 &max_perf_pct.attr, 340 &max_perf_pct.attr,
338 &min_perf_pct.attr, 341 &min_perf_pct.attr,
339 NULL 342 NULL
340 }; 343 };
341 344
342 static struct attribute_group intel_pstate_attr_group = { 345 static struct attribute_group intel_pstate_attr_group = {
343 .attrs = intel_pstate_attributes, 346 .attrs = intel_pstate_attributes,
344 }; 347 };
345 static struct kobject *intel_pstate_kobject; 348 static struct kobject *intel_pstate_kobject;
346 349
347 static void intel_pstate_sysfs_expose_params(void) 350 static void intel_pstate_sysfs_expose_params(void)
348 { 351 {
349 int rc; 352 int rc;
350 353
351 intel_pstate_kobject = kobject_create_and_add("intel_pstate", 354 intel_pstate_kobject = kobject_create_and_add("intel_pstate",
352 &cpu_subsys.dev_root->kobj); 355 &cpu_subsys.dev_root->kobj);
353 BUG_ON(!intel_pstate_kobject); 356 BUG_ON(!intel_pstate_kobject);
354 rc = sysfs_create_group(intel_pstate_kobject, 357 rc = sysfs_create_group(intel_pstate_kobject,
355 &intel_pstate_attr_group); 358 &intel_pstate_attr_group);
356 BUG_ON(rc); 359 BUG_ON(rc);
357 } 360 }
358 361
359 /************************** sysfs end ************************/ 362 /************************** sysfs end ************************/
360 static int byt_get_min_pstate(void) 363 static int byt_get_min_pstate(void)
361 { 364 {
362 u64 value; 365 u64 value;
363 rdmsrl(BYT_RATIOS, value); 366 rdmsrl(BYT_RATIOS, value);
364 return (value >> 8) & 0x3F; 367 return (value >> 8) & 0x3F;
365 } 368 }
366 369
367 static int byt_get_max_pstate(void) 370 static int byt_get_max_pstate(void)
368 { 371 {
369 u64 value; 372 u64 value;
370 rdmsrl(BYT_RATIOS, value); 373 rdmsrl(BYT_RATIOS, value);
371 return (value >> 16) & 0x3F; 374 return (value >> 16) & 0x3F;
372 } 375 }
373 376
374 static int byt_get_turbo_pstate(void) 377 static int byt_get_turbo_pstate(void)
375 { 378 {
376 u64 value; 379 u64 value;
377 rdmsrl(BYT_TURBO_RATIOS, value); 380 rdmsrl(BYT_TURBO_RATIOS, value);
378 return value & 0x3F; 381 return value & 0x3F;
379 } 382 }
380 383
381 static void byt_set_pstate(struct cpudata *cpudata, int pstate) 384 static void byt_set_pstate(struct cpudata *cpudata, int pstate)
382 { 385 {
383 u64 val; 386 u64 val;
384 int32_t vid_fp; 387 int32_t vid_fp;
385 u32 vid; 388 u32 vid;
386 389
387 val = pstate << 8; 390 val = pstate << 8;
388 if (limits.no_turbo) 391 if (limits.no_turbo)
389 val |= (u64)1 << 32; 392 val |= (u64)1 << 32;
390 393
391 vid_fp = cpudata->vid.min + mul_fp( 394 vid_fp = cpudata->vid.min + mul_fp(
392 int_tofp(pstate - cpudata->pstate.min_pstate), 395 int_tofp(pstate - cpudata->pstate.min_pstate),
393 cpudata->vid.ratio); 396 cpudata->vid.ratio);
394 397
395 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); 398 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
396 vid = fp_toint(vid_fp); 399 vid = fp_toint(vid_fp);
397 400
398 if (pstate > cpudata->pstate.max_pstate) 401 if (pstate > cpudata->pstate.max_pstate)
399 vid = cpudata->vid.turbo; 402 vid = cpudata->vid.turbo;
400 403
401 val |= vid; 404 val |= vid;
402 405
403 wrmsrl(MSR_IA32_PERF_CTL, val); 406 wrmsrl(MSR_IA32_PERF_CTL, val);
404 } 407 }
405 408
406 static void byt_get_vid(struct cpudata *cpudata) 409 static void byt_get_vid(struct cpudata *cpudata)
407 { 410 {
408 u64 value; 411 u64 value;
409 412
410 413
411 rdmsrl(BYT_VIDS, value); 414 rdmsrl(BYT_VIDS, value);
412 cpudata->vid.min = int_tofp((value >> 8) & 0x3f); 415 cpudata->vid.min = int_tofp((value >> 8) & 0x3f);
413 cpudata->vid.max = int_tofp((value >> 16) & 0x3f); 416 cpudata->vid.max = int_tofp((value >> 16) & 0x3f);
414 cpudata->vid.ratio = div_fp( 417 cpudata->vid.ratio = div_fp(
415 cpudata->vid.max - cpudata->vid.min, 418 cpudata->vid.max - cpudata->vid.min,
416 int_tofp(cpudata->pstate.max_pstate - 419 int_tofp(cpudata->pstate.max_pstate -
417 cpudata->pstate.min_pstate)); 420 cpudata->pstate.min_pstate));
418 421
419 rdmsrl(BYT_TURBO_VIDS, value); 422 rdmsrl(BYT_TURBO_VIDS, value);
420 cpudata->vid.turbo = value & 0x7f; 423 cpudata->vid.turbo = value & 0x7f;
421 } 424 }
422 425
423 426
424 static int core_get_min_pstate(void) 427 static int core_get_min_pstate(void)
425 { 428 {
426 u64 value; 429 u64 value;
427 rdmsrl(MSR_PLATFORM_INFO, value); 430 rdmsrl(MSR_PLATFORM_INFO, value);
428 return (value >> 40) & 0xFF; 431 return (value >> 40) & 0xFF;
429 } 432 }
430 433
431 static int core_get_max_pstate(void) 434 static int core_get_max_pstate(void)
432 { 435 {
433 u64 value; 436 u64 value;
434 rdmsrl(MSR_PLATFORM_INFO, value); 437 rdmsrl(MSR_PLATFORM_INFO, value);
435 return (value >> 8) & 0xFF; 438 return (value >> 8) & 0xFF;
436 } 439 }
437 440
438 static int core_get_turbo_pstate(void) 441 static int core_get_turbo_pstate(void)
439 { 442 {
440 u64 value; 443 u64 value;
441 int nont, ret; 444 int nont, ret;
442 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value); 445 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
443 nont = core_get_max_pstate(); 446 nont = core_get_max_pstate();
444 ret = ((value) & 255); 447 ret = ((value) & 255);
445 if (ret <= nont) 448 if (ret <= nont)
446 ret = nont; 449 ret = nont;
447 return ret; 450 return ret;
448 } 451 }
449 452
450 static void core_set_pstate(struct cpudata *cpudata, int pstate) 453 static void core_set_pstate(struct cpudata *cpudata, int pstate)
451 { 454 {
452 u64 val; 455 u64 val;
453 456
454 val = pstate << 8; 457 val = pstate << 8;
455 if (limits.no_turbo) 458 if (limits.no_turbo)
456 val |= (u64)1 << 32; 459 val |= (u64)1 << 32;
457 460
458 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); 461 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
459 } 462 }
460 463
461 static struct cpu_defaults core_params = { 464 static struct cpu_defaults core_params = {
462 .pid_policy = { 465 .pid_policy = {
463 .sample_rate_ms = 10, 466 .sample_rate_ms = 10,
464 .deadband = 0, 467 .deadband = 0,
465 .setpoint = 97, 468 .setpoint = 97,
466 .p_gain_pct = 20, 469 .p_gain_pct = 20,
467 .d_gain_pct = 0, 470 .d_gain_pct = 0,
468 .i_gain_pct = 0, 471 .i_gain_pct = 0,
469 }, 472 },
470 .funcs = { 473 .funcs = {
471 .get_max = core_get_max_pstate, 474 .get_max = core_get_max_pstate,
472 .get_min = core_get_min_pstate, 475 .get_min = core_get_min_pstate,
473 .get_turbo = core_get_turbo_pstate, 476 .get_turbo = core_get_turbo_pstate,
474 .set = core_set_pstate, 477 .set = core_set_pstate,
475 }, 478 },
476 }; 479 };
477 480
478 static struct cpu_defaults byt_params = { 481 static struct cpu_defaults byt_params = {
479 .pid_policy = { 482 .pid_policy = {
480 .sample_rate_ms = 10, 483 .sample_rate_ms = 10,
481 .deadband = 0, 484 .deadband = 0,
482 .setpoint = 97, 485 .setpoint = 97,
483 .p_gain_pct = 14, 486 .p_gain_pct = 14,
484 .d_gain_pct = 0, 487 .d_gain_pct = 0,
485 .i_gain_pct = 4, 488 .i_gain_pct = 4,
486 }, 489 },
487 .funcs = { 490 .funcs = {
488 .get_max = byt_get_max_pstate, 491 .get_max = byt_get_max_pstate,
489 .get_min = byt_get_min_pstate, 492 .get_min = byt_get_min_pstate,
490 .get_turbo = byt_get_turbo_pstate, 493 .get_turbo = byt_get_turbo_pstate,
491 .set = byt_set_pstate, 494 .set = byt_set_pstate,
492 .get_vid = byt_get_vid, 495 .get_vid = byt_get_vid,
493 }, 496 },
494 }; 497 };
495 498
496 499
497 static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) 500 static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
498 { 501 {
499 int max_perf = cpu->pstate.turbo_pstate; 502 int max_perf = cpu->pstate.turbo_pstate;
500 int max_perf_adj; 503 int max_perf_adj;
501 int min_perf; 504 int min_perf;
502 if (limits.no_turbo) 505 if (limits.no_turbo)
503 max_perf = cpu->pstate.max_pstate; 506 max_perf = cpu->pstate.max_pstate;
504 507
505 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf)); 508 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
506 *max = clamp_t(int, max_perf_adj, 509 *max = clamp_t(int, max_perf_adj,
507 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); 510 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
508 511
509 min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf)); 512 min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf));
510 *min = clamp_t(int, min_perf, 513 *min = clamp_t(int, min_perf,
511 cpu->pstate.min_pstate, max_perf); 514 cpu->pstate.min_pstate, max_perf);
512 } 515 }
513 516
514 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) 517 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
515 { 518 {
516 int max_perf, min_perf; 519 int max_perf, min_perf;
517 520
518 intel_pstate_get_min_max(cpu, &min_perf, &max_perf); 521 intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
519 522
520 pstate = clamp_t(int, pstate, min_perf, max_perf); 523 pstate = clamp_t(int, pstate, min_perf, max_perf);
521 524
522 if (pstate == cpu->pstate.current_pstate) 525 if (pstate == cpu->pstate.current_pstate)
523 return; 526 return;
524 527
525 trace_cpu_frequency(pstate * 100000, cpu->cpu); 528 trace_cpu_frequency(pstate * 100000, cpu->cpu);
526 529
527 cpu->pstate.current_pstate = pstate; 530 cpu->pstate.current_pstate = pstate;
528 531
529 pstate_funcs.set(cpu, pstate); 532 pstate_funcs.set(cpu, pstate);
530 } 533 }
531 534
532 static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps) 535 static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps)
533 { 536 {
534 int target; 537 int target;
535 target = cpu->pstate.current_pstate + steps; 538 target = cpu->pstate.current_pstate + steps;
536 539
537 intel_pstate_set_pstate(cpu, target); 540 intel_pstate_set_pstate(cpu, target);
538 } 541 }
539 542
540 static inline void intel_pstate_pstate_decrease(struct cpudata *cpu, int steps) 543 static inline void intel_pstate_pstate_decrease(struct cpudata *cpu, int steps)
541 { 544 {
542 int target; 545 int target;
543 target = cpu->pstate.current_pstate - steps; 546 target = cpu->pstate.current_pstate - steps;
544 intel_pstate_set_pstate(cpu, target); 547 intel_pstate_set_pstate(cpu, target);
545 } 548 }
546 549
547 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) 550 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
548 { 551 {
549 sprintf(cpu->name, "Intel 2nd generation core"); 552 sprintf(cpu->name, "Intel 2nd generation core");
550 553
551 cpu->pstate.min_pstate = pstate_funcs.get_min(); 554 cpu->pstate.min_pstate = pstate_funcs.get_min();
552 cpu->pstate.max_pstate = pstate_funcs.get_max(); 555 cpu->pstate.max_pstate = pstate_funcs.get_max();
553 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); 556 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
554 557
555 if (pstate_funcs.get_vid) 558 if (pstate_funcs.get_vid)
556 pstate_funcs.get_vid(cpu); 559 pstate_funcs.get_vid(cpu);
557 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); 560 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
558 } 561 }
559 562
560 static inline void intel_pstate_calc_busy(struct cpudata *cpu, 563 static inline void intel_pstate_calc_busy(struct cpudata *cpu,
561 struct sample *sample) 564 struct sample *sample)
562 { 565 {
563 int32_t core_pct; 566 int64_t core_pct;
564 int32_t c0_pct; 567 int32_t rem;
565 568
566 core_pct = div_fp(int_tofp((sample->aperf)), 569 core_pct = int_tofp(sample->aperf) * int_tofp(100);
567 int_tofp((sample->mperf))); 570 core_pct = div_u64_rem(core_pct, int_tofp(sample->mperf), &rem);
568 core_pct = mul_fp(core_pct, int_tofp(100));
569 FP_ROUNDUP(core_pct);
570 571
571 c0_pct = div_fp(int_tofp(sample->mperf), int_tofp(sample->tsc)); 572 if ((rem << 1) >= int_tofp(sample->mperf))
573 core_pct += 1;
572 574
573 sample->freq = fp_toint( 575 sample->freq = fp_toint(
574 mul_fp(int_tofp(cpu->pstate.max_pstate * 1000), core_pct)); 576 mul_fp(int_tofp(cpu->pstate.max_pstate * 1000), core_pct));
575 577
576 sample->core_pct_busy = mul_fp(core_pct, c0_pct); 578 sample->core_pct_busy = (int32_t)core_pct;
577 } 579 }
578 580
579 static inline void intel_pstate_sample(struct cpudata *cpu) 581 static inline void intel_pstate_sample(struct cpudata *cpu)
580 { 582 {
581 u64 aperf, mperf; 583 u64 aperf, mperf;
582 unsigned long long tsc;
583 584
584 rdmsrl(MSR_IA32_APERF, aperf); 585 rdmsrl(MSR_IA32_APERF, aperf);
585 rdmsrl(MSR_IA32_MPERF, mperf); 586 rdmsrl(MSR_IA32_MPERF, mperf);
586 tsc = native_read_tsc();
587 587
588 aperf = aperf >> FRAC_BITS; 588 aperf = aperf >> FRAC_BITS;
589 mperf = mperf >> FRAC_BITS; 589 mperf = mperf >> FRAC_BITS;
590 tsc = tsc >> FRAC_BITS;
591 590
591 cpu->last_sample_time = cpu->sample.time;
592 cpu->sample.time = ktime_get();
592 cpu->sample.aperf = aperf; 593 cpu->sample.aperf = aperf;
593 cpu->sample.mperf = mperf; 594 cpu->sample.mperf = mperf;
594 cpu->sample.tsc = tsc;
595 cpu->sample.aperf -= cpu->prev_aperf; 595 cpu->sample.aperf -= cpu->prev_aperf;
596 cpu->sample.mperf -= cpu->prev_mperf; 596 cpu->sample.mperf -= cpu->prev_mperf;
597 cpu->sample.tsc -= cpu->prev_tsc;
598 597
599 intel_pstate_calc_busy(cpu, &cpu->sample); 598 intel_pstate_calc_busy(cpu, &cpu->sample);
600 599
601 cpu->prev_aperf = aperf; 600 cpu->prev_aperf = aperf;
602 cpu->prev_mperf = mperf; 601 cpu->prev_mperf = mperf;
603 cpu->prev_tsc = tsc;
604 } 602 }
605 603
606 static inline void intel_pstate_set_sample_time(struct cpudata *cpu) 604 static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
607 { 605 {
608 int sample_time, delay; 606 int sample_time, delay;
609 607
610 sample_time = pid_params.sample_rate_ms; 608 sample_time = pid_params.sample_rate_ms;
611 delay = msecs_to_jiffies(sample_time); 609 delay = msecs_to_jiffies(sample_time);
612 mod_timer_pinned(&cpu->timer, jiffies + delay); 610 mod_timer_pinned(&cpu->timer, jiffies + delay);
613 } 611 }
614 612
615 static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu) 613 static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
616 { 614 {
617 int32_t core_busy, max_pstate, current_pstate; 615 int32_t core_busy, max_pstate, current_pstate, sample_ratio;
616 u32 duration_us;
617 u32 sample_time;
618 618
619 core_busy = cpu->sample.core_pct_busy; 619 core_busy = cpu->sample.core_pct_busy;
620 max_pstate = int_tofp(cpu->pstate.max_pstate); 620 max_pstate = int_tofp(cpu->pstate.max_pstate);
621 current_pstate = int_tofp(cpu->pstate.current_pstate); 621 current_pstate = int_tofp(cpu->pstate.current_pstate);
622 core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); 622 core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
623 return FP_ROUNDUP(core_busy); 623
624 sample_time = (pid_params.sample_rate_ms * USEC_PER_MSEC);
625 duration_us = (u32) ktime_us_delta(cpu->sample.time,
626 cpu->last_sample_time);
627 if (duration_us > sample_time * 3) {
628 sample_ratio = div_fp(int_tofp(sample_time),
629 int_tofp(duration_us));
630 core_busy = mul_fp(core_busy, sample_ratio);
631 }
632
633 return core_busy;
624 } 634 }
625 635
626 static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) 636 static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
627 { 637 {
628 int32_t busy_scaled; 638 int32_t busy_scaled;
629 struct _pid *pid; 639 struct _pid *pid;
630 signed int ctl = 0; 640 signed int ctl = 0;
631 int steps; 641 int steps;
632 642
633 pid = &cpu->pid; 643 pid = &cpu->pid;
634 busy_scaled = intel_pstate_get_scaled_busy(cpu); 644 busy_scaled = intel_pstate_get_scaled_busy(cpu);
635 645
636 ctl = pid_calc(pid, busy_scaled); 646 ctl = pid_calc(pid, busy_scaled);
637 647
638 steps = abs(ctl); 648 steps = abs(ctl);
639 649
640 if (ctl < 0) 650 if (ctl < 0)
641 intel_pstate_pstate_increase(cpu, steps); 651 intel_pstate_pstate_increase(cpu, steps);
642 else 652 else
643 intel_pstate_pstate_decrease(cpu, steps); 653 intel_pstate_pstate_decrease(cpu, steps);
644 } 654 }
645 655
646 static void intel_pstate_timer_func(unsigned long __data) 656 static void intel_pstate_timer_func(unsigned long __data)
647 { 657 {
648 struct cpudata *cpu = (struct cpudata *) __data; 658 struct cpudata *cpu = (struct cpudata *) __data;
649 struct sample *sample; 659 struct sample *sample;
650 660
651 intel_pstate_sample(cpu); 661 intel_pstate_sample(cpu);
652 662
653 sample = &cpu->sample; 663 sample = &cpu->sample;
654 664
655 intel_pstate_adjust_busy_pstate(cpu); 665 intel_pstate_adjust_busy_pstate(cpu);
656 666
657 trace_pstate_sample(fp_toint(sample->core_pct_busy), 667 trace_pstate_sample(fp_toint(sample->core_pct_busy),
658 fp_toint(intel_pstate_get_scaled_busy(cpu)), 668 fp_toint(intel_pstate_get_scaled_busy(cpu)),
659 cpu->pstate.current_pstate, 669 cpu->pstate.current_pstate,
660 sample->mperf, 670 sample->mperf,
661 sample->aperf, 671 sample->aperf,
662 sample->freq); 672 sample->freq);
663 673
664 intel_pstate_set_sample_time(cpu); 674 intel_pstate_set_sample_time(cpu);
665 } 675 }
666 676
667 #define ICPU(model, policy) \ 677 #define ICPU(model, policy) \
668 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\ 678 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
669 (unsigned long)&policy } 679 (unsigned long)&policy }
670 680
671 static const struct x86_cpu_id intel_pstate_cpu_ids[] = { 681 static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
672 ICPU(0x2a, core_params), 682 ICPU(0x2a, core_params),
673 ICPU(0x2d, core_params), 683 ICPU(0x2d, core_params),
674 ICPU(0x37, byt_params), 684 ICPU(0x37, byt_params),
675 ICPU(0x3a, core_params), 685 ICPU(0x3a, core_params),
676 ICPU(0x3c, core_params), 686 ICPU(0x3c, core_params),
677 ICPU(0x3e, core_params), 687 ICPU(0x3e, core_params),
678 ICPU(0x3f, core_params), 688 ICPU(0x3f, core_params),
679 ICPU(0x45, core_params), 689 ICPU(0x45, core_params),
680 ICPU(0x46, core_params), 690 ICPU(0x46, core_params),
681 {} 691 {}
682 }; 692 };
683 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); 693 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
684 694
685 static int intel_pstate_init_cpu(unsigned int cpunum) 695 static int intel_pstate_init_cpu(unsigned int cpunum)
686 { 696 {
687 697
688 const struct x86_cpu_id *id; 698 const struct x86_cpu_id *id;
689 struct cpudata *cpu; 699 struct cpudata *cpu;
690 700
691 id = x86_match_cpu(intel_pstate_cpu_ids); 701 id = x86_match_cpu(intel_pstate_cpu_ids);
692 if (!id) 702 if (!id)
693 return -ENODEV; 703 return -ENODEV;
694 704
695 all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), GFP_KERNEL); 705 all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), GFP_KERNEL);
696 if (!all_cpu_data[cpunum]) 706 if (!all_cpu_data[cpunum])
697 return -ENOMEM; 707 return -ENOMEM;
698 708
699 cpu = all_cpu_data[cpunum]; 709 cpu = all_cpu_data[cpunum];
700 710
701 intel_pstate_get_cpu_pstates(cpu); 711 intel_pstate_get_cpu_pstates(cpu);
702 712
703 cpu->cpu = cpunum; 713 cpu->cpu = cpunum;
704 714
705 init_timer_deferrable(&cpu->timer); 715 init_timer_deferrable(&cpu->timer);
706 cpu->timer.function = intel_pstate_timer_func; 716 cpu->timer.function = intel_pstate_timer_func;
707 cpu->timer.data = 717 cpu->timer.data =
708 (unsigned long)cpu; 718 (unsigned long)cpu;
709 cpu->timer.expires = jiffies + HZ/100; 719 cpu->timer.expires = jiffies + HZ/100;
710 intel_pstate_busy_pid_reset(cpu); 720 intel_pstate_busy_pid_reset(cpu);
711 intel_pstate_sample(cpu); 721 intel_pstate_sample(cpu);
712 722
713 add_timer_on(&cpu->timer, cpunum); 723 add_timer_on(&cpu->timer, cpunum);
714 724
715 pr_info("Intel pstate controlling: cpu %d\n", cpunum); 725 pr_info("Intel pstate controlling: cpu %d\n", cpunum);
716 726
717 return 0; 727 return 0;
718 } 728 }
719 729
720 static unsigned int intel_pstate_get(unsigned int cpu_num) 730 static unsigned int intel_pstate_get(unsigned int cpu_num)
721 { 731 {
722 struct sample *sample; 732 struct sample *sample;
723 struct cpudata *cpu; 733 struct cpudata *cpu;
724 734
725 cpu = all_cpu_data[cpu_num]; 735 cpu = all_cpu_data[cpu_num];
726 if (!cpu) 736 if (!cpu)
727 return 0; 737 return 0;
728 sample = &cpu->sample; 738 sample = &cpu->sample;
729 return sample->freq; 739 return sample->freq;
730 } 740 }
731 741
732 static int intel_pstate_set_policy(struct cpufreq_policy *policy) 742 static int intel_pstate_set_policy(struct cpufreq_policy *policy)
733 { 743 {
734 struct cpudata *cpu; 744 struct cpudata *cpu;
735 745
736 cpu = all_cpu_data[policy->cpu]; 746 cpu = all_cpu_data[policy->cpu];
737 747
738 if (!policy->cpuinfo.max_freq) 748 if (!policy->cpuinfo.max_freq)
739 return -ENODEV; 749 return -ENODEV;
740 750
741 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { 751 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
742 limits.min_perf_pct = 100; 752 limits.min_perf_pct = 100;
743 limits.min_perf = int_tofp(1); 753 limits.min_perf = int_tofp(1);
744 limits.max_perf_pct = 100; 754 limits.max_perf_pct = 100;
745 limits.max_perf = int_tofp(1); 755 limits.max_perf = int_tofp(1);
746 limits.no_turbo = 0; 756 limits.no_turbo = 0;
747 return 0; 757 return 0;
748 } 758 }
749 limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq; 759 limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
750 limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100); 760 limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100);
751 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); 761 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
752 762
753 limits.max_policy_pct = policy->max * 100 / policy->cpuinfo.max_freq; 763 limits.max_policy_pct = policy->max * 100 / policy->cpuinfo.max_freq;
754 limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100); 764 limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100);
755 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); 765 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
756 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); 766 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
757 767
758 return 0; 768 return 0;
759 } 769 }
760 770
761 static int intel_pstate_verify_policy(struct cpufreq_policy *policy) 771 static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
762 { 772 {
763 cpufreq_verify_within_cpu_limits(policy); 773 cpufreq_verify_within_cpu_limits(policy);
764 774
765 if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) && 775 if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) &&
766 (policy->policy != CPUFREQ_POLICY_PERFORMANCE)) 776 (policy->policy != CPUFREQ_POLICY_PERFORMANCE))
767 return -EINVAL; 777 return -EINVAL;
768 778
769 return 0; 779 return 0;
770 } 780 }
771 781
772 static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) 782 static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
773 { 783 {
774 int cpu_num = policy->cpu; 784 int cpu_num = policy->cpu;
775 struct cpudata *cpu = all_cpu_data[cpu_num]; 785 struct cpudata *cpu = all_cpu_data[cpu_num];
776 786
777 pr_info("intel_pstate CPU %d exiting\n", cpu_num); 787 pr_info("intel_pstate CPU %d exiting\n", cpu_num);
778 788
779 del_timer_sync(&all_cpu_data[cpu_num]->timer); 789 del_timer_sync(&all_cpu_data[cpu_num]->timer);
780 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); 790 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
781 kfree(all_cpu_data[cpu_num]); 791 kfree(all_cpu_data[cpu_num]);
782 all_cpu_data[cpu_num] = NULL; 792 all_cpu_data[cpu_num] = NULL;
783 } 793 }
784 794
785 static int intel_pstate_cpu_init(struct cpufreq_policy *policy) 795 static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
786 { 796 {
787 struct cpudata *cpu; 797 struct cpudata *cpu;
788 int rc; 798 int rc;
789 799
790 rc = intel_pstate_init_cpu(policy->cpu); 800 rc = intel_pstate_init_cpu(policy->cpu);
791 if (rc) 801 if (rc)
792 return rc; 802 return rc;
793 803
794 cpu = all_cpu_data[policy->cpu]; 804 cpu = all_cpu_data[policy->cpu];
795 805
796 if (!limits.no_turbo && 806 if (!limits.no_turbo &&
797 limits.min_perf_pct == 100 && limits.max_perf_pct == 100) 807 limits.min_perf_pct == 100 && limits.max_perf_pct == 100)
798 policy->policy = CPUFREQ_POLICY_PERFORMANCE; 808 policy->policy = CPUFREQ_POLICY_PERFORMANCE;
799 else 809 else
800 policy->policy = CPUFREQ_POLICY_POWERSAVE; 810 policy->policy = CPUFREQ_POLICY_POWERSAVE;
801 811
802 policy->min = cpu->pstate.min_pstate * 100000; 812 policy->min = cpu->pstate.min_pstate * 100000;
803 policy->max = cpu->pstate.turbo_pstate * 100000; 813 policy->max = cpu->pstate.turbo_pstate * 100000;
804 814
805 /* cpuinfo and default policy values */ 815 /* cpuinfo and default policy values */
806 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * 100000; 816 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * 100000;
807 policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate * 100000; 817 policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate * 100000;
808 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 818 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
809 cpumask_set_cpu(policy->cpu, policy->cpus); 819 cpumask_set_cpu(policy->cpu, policy->cpus);
810 820
811 return 0; 821 return 0;
812 } 822 }
813 823
814 static struct cpufreq_driver intel_pstate_driver = { 824 static struct cpufreq_driver intel_pstate_driver = {
815 .flags = CPUFREQ_CONST_LOOPS, 825 .flags = CPUFREQ_CONST_LOOPS,
816 .verify = intel_pstate_verify_policy, 826 .verify = intel_pstate_verify_policy,
817 .setpolicy = intel_pstate_set_policy, 827 .setpolicy = intel_pstate_set_policy,
818 .get = intel_pstate_get, 828 .get = intel_pstate_get,
819 .init = intel_pstate_cpu_init, 829 .init = intel_pstate_cpu_init,
820 .stop_cpu = intel_pstate_stop_cpu, 830 .stop_cpu = intel_pstate_stop_cpu,
821 .name = "intel_pstate", 831 .name = "intel_pstate",
822 }; 832 };
823 833
824 static int __initdata no_load; 834 static int __initdata no_load;
825 835
826 static int intel_pstate_msrs_not_valid(void) 836 static int intel_pstate_msrs_not_valid(void)
827 { 837 {
828 /* Check that all the msr's we are using are valid. */ 838 /* Check that all the msr's we are using are valid. */
829 u64 aperf, mperf, tmp; 839 u64 aperf, mperf, tmp;
830 840
831 rdmsrl(MSR_IA32_APERF, aperf); 841 rdmsrl(MSR_IA32_APERF, aperf);
832 rdmsrl(MSR_IA32_MPERF, mperf); 842 rdmsrl(MSR_IA32_MPERF, mperf);
833 843
834 if (!pstate_funcs.get_max() || 844 if (!pstate_funcs.get_max() ||
835 !pstate_funcs.get_min() || 845 !pstate_funcs.get_min() ||
836 !pstate_funcs.get_turbo()) 846 !pstate_funcs.get_turbo())
837 return -ENODEV; 847 return -ENODEV;
838 848
839 rdmsrl(MSR_IA32_APERF, tmp); 849 rdmsrl(MSR_IA32_APERF, tmp);
840 if (!(tmp - aperf)) 850 if (!(tmp - aperf))
841 return -ENODEV; 851 return -ENODEV;
842 852
843 rdmsrl(MSR_IA32_MPERF, tmp); 853 rdmsrl(MSR_IA32_MPERF, tmp);
844 if (!(tmp - mperf)) 854 if (!(tmp - mperf))
845 return -ENODEV; 855 return -ENODEV;
846 856
847 return 0; 857 return 0;
848 } 858 }
849 859
850 static void copy_pid_params(struct pstate_adjust_policy *policy) 860 static void copy_pid_params(struct pstate_adjust_policy *policy)
851 { 861 {
852 pid_params.sample_rate_ms = policy->sample_rate_ms; 862 pid_params.sample_rate_ms = policy->sample_rate_ms;
853 pid_params.p_gain_pct = policy->p_gain_pct; 863 pid_params.p_gain_pct = policy->p_gain_pct;
854 pid_params.i_gain_pct = policy->i_gain_pct; 864 pid_params.i_gain_pct = policy->i_gain_pct;
855 pid_params.d_gain_pct = policy->d_gain_pct; 865 pid_params.d_gain_pct = policy->d_gain_pct;
856 pid_params.deadband = policy->deadband; 866 pid_params.deadband = policy->deadband;
857 pid_params.setpoint = policy->setpoint; 867 pid_params.setpoint = policy->setpoint;
858 } 868 }
859 869
860 static void copy_cpu_funcs(struct pstate_funcs *funcs) 870 static void copy_cpu_funcs(struct pstate_funcs *funcs)
861 { 871 {
862 pstate_funcs.get_max = funcs->get_max; 872 pstate_funcs.get_max = funcs->get_max;
863 pstate_funcs.get_min = funcs->get_min; 873 pstate_funcs.get_min = funcs->get_min;
864 pstate_funcs.get_turbo = funcs->get_turbo; 874 pstate_funcs.get_turbo = funcs->get_turbo;
865 pstate_funcs.set = funcs->set; 875 pstate_funcs.set = funcs->set;
866 pstate_funcs.get_vid = funcs->get_vid; 876 pstate_funcs.get_vid = funcs->get_vid;
867 } 877 }
868 878
869 #if IS_ENABLED(CONFIG_ACPI) 879 #if IS_ENABLED(CONFIG_ACPI)
870 #include <acpi/processor.h> 880 #include <acpi/processor.h>
871 881
872 static bool intel_pstate_no_acpi_pss(void) 882 static bool intel_pstate_no_acpi_pss(void)
873 { 883 {
874 int i; 884 int i;
875 885
876 for_each_possible_cpu(i) { 886 for_each_possible_cpu(i) {
877 acpi_status status; 887 acpi_status status;
878 union acpi_object *pss; 888 union acpi_object *pss;
879 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 889 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
880 struct acpi_processor *pr = per_cpu(processors, i); 890 struct acpi_processor *pr = per_cpu(processors, i);
881 891
882 if (!pr) 892 if (!pr)
883 continue; 893 continue;
884 894
885 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); 895 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
886 if (ACPI_FAILURE(status)) 896 if (ACPI_FAILURE(status))
887 continue; 897 continue;
888 898
889 pss = buffer.pointer; 899 pss = buffer.pointer;
890 if (pss && pss->type == ACPI_TYPE_PACKAGE) { 900 if (pss && pss->type == ACPI_TYPE_PACKAGE) {
891 kfree(pss); 901 kfree(pss);
892 return false; 902 return false;
893 } 903 }
894 904
895 kfree(pss); 905 kfree(pss);
896 } 906 }
897 907
898 return true; 908 return true;
899 } 909 }
900 910
901 struct hw_vendor_info { 911 struct hw_vendor_info {
902 u16 valid; 912 u16 valid;
903 char oem_id[ACPI_OEM_ID_SIZE]; 913 char oem_id[ACPI_OEM_ID_SIZE];
904 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE]; 914 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE];
905 }; 915 };
906 916
907 /* Hardware vendor-specific info that has its own power management modes */ 917 /* Hardware vendor-specific info that has its own power management modes */
908 static struct hw_vendor_info vendor_info[] = { 918 static struct hw_vendor_info vendor_info[] = {
909 {1, "HP ", "ProLiant"}, 919 {1, "HP ", "ProLiant"},
910 {0, "", ""}, 920 {0, "", ""},
911 }; 921 };
912 922
913 static bool intel_pstate_platform_pwr_mgmt_exists(void) 923 static bool intel_pstate_platform_pwr_mgmt_exists(void)
914 { 924 {
915 struct acpi_table_header hdr; 925 struct acpi_table_header hdr;
916 struct hw_vendor_info *v_info; 926 struct hw_vendor_info *v_info;
917 927
918 if (acpi_disabled 928 if (acpi_disabled
919 || ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr))) 929 || ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr)))
920 return false; 930 return false;
921 931
922 for (v_info = vendor_info; v_info->valid; v_info++) { 932 for (v_info = vendor_info; v_info->valid; v_info++) {
923 if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) 933 if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE)
924 && !strncmp(hdr.oem_table_id, v_info->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) 934 && !strncmp(hdr.oem_table_id, v_info->oem_table_id, ACPI_OEM_TABLE_ID_SIZE)
925 && intel_pstate_no_acpi_pss()) 935 && intel_pstate_no_acpi_pss())
926 return true; 936 return true;
927 } 937 }
928 938
929 return false; 939 return false;
930 } 940 }
931 #else /* CONFIG_ACPI not enabled */ 941 #else /* CONFIG_ACPI not enabled */
932 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; } 942 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
933 #endif /* CONFIG_ACPI */ 943 #endif /* CONFIG_ACPI */
934 944
935 static int __init intel_pstate_init(void) 945 static int __init intel_pstate_init(void)
936 { 946 {
937 int cpu, rc = 0; 947 int cpu, rc = 0;
938 const struct x86_cpu_id *id; 948 const struct x86_cpu_id *id;
939 struct cpu_defaults *cpu_info; 949 struct cpu_defaults *cpu_info;
940 950
941 if (no_load) 951 if (no_load)
942 return -ENODEV; 952 return -ENODEV;
943 953
944 id = x86_match_cpu(intel_pstate_cpu_ids); 954 id = x86_match_cpu(intel_pstate_cpu_ids);
945 if (!id) 955 if (!id)
946 return -ENODEV; 956 return -ENODEV;
947 957
948 /* 958 /*
949 * The Intel pstate driver will be ignored if the platform 959 * The Intel pstate driver will be ignored if the platform
950 * firmware has its own power management modes. 960 * firmware has its own power management modes.
951 */ 961 */
952 if (intel_pstate_platform_pwr_mgmt_exists()) 962 if (intel_pstate_platform_pwr_mgmt_exists())
953 return -ENODEV; 963 return -ENODEV;
954 964
955 cpu_info = (struct cpu_defaults *)id->driver_data; 965 cpu_info = (struct cpu_defaults *)id->driver_data;
956 966
957 copy_pid_params(&cpu_info->pid_policy); 967 copy_pid_params(&cpu_info->pid_policy);
958 copy_cpu_funcs(&cpu_info->funcs); 968 copy_cpu_funcs(&cpu_info->funcs);
959 969
960 if (intel_pstate_msrs_not_valid()) 970 if (intel_pstate_msrs_not_valid())
961 return -ENODEV; 971 return -ENODEV;
962 972
963 pr_info("Intel P-state driver initializing.\n"); 973 pr_info("Intel P-state driver initializing.\n");
964 974
965 all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus()); 975 all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
966 if (!all_cpu_data) 976 if (!all_cpu_data)
967 return -ENOMEM; 977 return -ENOMEM;
968 978
969 rc = cpufreq_register_driver(&intel_pstate_driver); 979 rc = cpufreq_register_driver(&intel_pstate_driver);
970 if (rc) 980 if (rc)
971 goto out; 981 goto out;
972 982
973 intel_pstate_debug_expose_params(); 983 intel_pstate_debug_expose_params();
974 intel_pstate_sysfs_expose_params(); 984 intel_pstate_sysfs_expose_params();
975 985
976 return rc; 986 return rc;
977 out: 987 out:
978 get_online_cpus(); 988 get_online_cpus();
979 for_each_online_cpu(cpu) { 989 for_each_online_cpu(cpu) {
980 if (all_cpu_data[cpu]) { 990 if (all_cpu_data[cpu]) {
981 del_timer_sync(&all_cpu_data[cpu]->timer); 991 del_timer_sync(&all_cpu_data[cpu]->timer);
982 kfree(all_cpu_data[cpu]); 992 kfree(all_cpu_data[cpu]);
983 } 993 }
984 } 994 }
985 995
986 put_online_cpus(); 996 put_online_cpus();
987 vfree(all_cpu_data); 997 vfree(all_cpu_data);
988 return -ENODEV; 998 return -ENODEV;
989 } 999 }
990 device_initcall(intel_pstate_init); 1000 device_initcall(intel_pstate_init);
991 1001
992 static int __init intel_pstate_setup(char *str) 1002 static int __init intel_pstate_setup(char *str)
993 { 1003 {
994 if (!str) 1004 if (!str)
995 return -EINVAL; 1005 return -EINVAL;
drivers/gpu/drm/drm_crtc_helper.c
1 /* 1 /*
2 * Copyright (c) 2006-2008 Intel Corporation 2 * Copyright (c) 2006-2008 Intel Corporation
3 * Copyright (c) 2007 Dave Airlie <airlied@linux.ie> 3 * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
4 * 4 *
5 * DRM core CRTC related functions 5 * DRM core CRTC related functions
6 * 6 *
7 * Permission to use, copy, modify, distribute, and sell this software and its 7 * Permission to use, copy, modify, distribute, and sell this software and its
8 * documentation for any purpose is hereby granted without fee, provided that 8 * documentation for any purpose is hereby granted without fee, provided that
9 * the above copyright notice appear in all copies and that both that copyright 9 * the above copyright notice appear in all copies and that both that copyright
10 * notice and this permission notice appear in supporting documentation, and 10 * notice and this permission notice appear in supporting documentation, and
11 * that the name of the copyright holders not be used in advertising or 11 * that the name of the copyright holders not be used in advertising or
12 * publicity pertaining to distribution of the software without specific, 12 * publicity pertaining to distribution of the software without specific,
13 * written prior permission. The copyright holders make no representations 13 * written prior permission. The copyright holders make no representations
14 * about the suitability of this software for any purpose. It is provided "as 14 * about the suitability of this software for any purpose. It is provided "as
15 * is" without express or implied warranty. 15 * is" without express or implied warranty.
16 * 16 *
17 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, 17 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
18 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO 18 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
19 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR 19 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
20 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, 20 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
21 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 21 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
22 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE 22 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
23 * OF THIS SOFTWARE. 23 * OF THIS SOFTWARE.
24 * 24 *
25 * Authors: 25 * Authors:
26 * Keith Packard 26 * Keith Packard
27 * Eric Anholt <eric@anholt.net> 27 * Eric Anholt <eric@anholt.net>
28 * Dave Airlie <airlied@linux.ie> 28 * Dave Airlie <airlied@linux.ie>
29 * Jesse Barnes <jesse.barnes@intel.com> 29 * Jesse Barnes <jesse.barnes@intel.com>
30 */ 30 */
31 31
32 #include <linux/kernel.h>
32 #include <linux/export.h> 33 #include <linux/export.h>
33 #include <linux/moduleparam.h> 34 #include <linux/moduleparam.h>
34 35
35 #include <drm/drmP.h> 36 #include <drm/drmP.h>
36 #include <drm/drm_crtc.h> 37 #include <drm/drm_crtc.h>
37 #include <drm/drm_fourcc.h> 38 #include <drm/drm_fourcc.h>
38 #include <drm/drm_crtc_helper.h> 39 #include <drm/drm_crtc_helper.h>
39 #include <drm/drm_fb_helper.h> 40 #include <drm/drm_fb_helper.h>
40 #include <drm/drm_edid.h> 41 #include <drm/drm_edid.h>
41 42
42 MODULE_AUTHOR("David Airlie, Jesse Barnes"); 43 MODULE_AUTHOR("David Airlie, Jesse Barnes");
43 MODULE_DESCRIPTION("DRM KMS helper"); 44 MODULE_DESCRIPTION("DRM KMS helper");
44 MODULE_LICENSE("GPL and additional rights"); 45 MODULE_LICENSE("GPL and additional rights");
45 46
46 /** 47 /**
47 * drm_helper_move_panel_connectors_to_head() - move panels to the front in the 48 * drm_helper_move_panel_connectors_to_head() - move panels to the front in the
48 * connector list 49 * connector list
49 * @dev: drm device to operate on 50 * @dev: drm device to operate on
50 * 51 *
51 * Some userspace presumes that the first connected connector is the main 52 * Some userspace presumes that the first connected connector is the main
52 * display, where it's supposed to display e.g. the login screen. For 53 * display, where it's supposed to display e.g. the login screen. For
53 * laptops, this should be the main panel. Use this function to sort all 54 * laptops, this should be the main panel. Use this function to sort all
54 * (eDP/LVDS) panels to the front of the connector list, instead of 55 * (eDP/LVDS) panels to the front of the connector list, instead of
55 * painstakingly trying to initialize them in the right order. 56 * painstakingly trying to initialize them in the right order.
56 */ 57 */
57 void drm_helper_move_panel_connectors_to_head(struct drm_device *dev) 58 void drm_helper_move_panel_connectors_to_head(struct drm_device *dev)
58 { 59 {
59 struct drm_connector *connector, *tmp; 60 struct drm_connector *connector, *tmp;
60 struct list_head panel_list; 61 struct list_head panel_list;
61 62
62 INIT_LIST_HEAD(&panel_list); 63 INIT_LIST_HEAD(&panel_list);
63 64
64 list_for_each_entry_safe(connector, tmp, 65 list_for_each_entry_safe(connector, tmp,
65 &dev->mode_config.connector_list, head) { 66 &dev->mode_config.connector_list, head) {
66 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS || 67 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
67 connector->connector_type == DRM_MODE_CONNECTOR_eDP) 68 connector->connector_type == DRM_MODE_CONNECTOR_eDP)
68 list_move_tail(&connector->head, &panel_list); 69 list_move_tail(&connector->head, &panel_list);
69 } 70 }
70 71
71 list_splice(&panel_list, &dev->mode_config.connector_list); 72 list_splice(&panel_list, &dev->mode_config.connector_list);
72 } 73 }
73 EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head); 74 EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
74 75
75 /** 76 /**
76 * drm_helper_encoder_in_use - check if a given encoder is in use 77 * drm_helper_encoder_in_use - check if a given encoder is in use
77 * @encoder: encoder to check 78 * @encoder: encoder to check
78 * 79 *
79 * Checks whether @encoder is with the current mode setting output configuration 80 * Checks whether @encoder is with the current mode setting output configuration
80 * in use by any connector. This doesn't mean that it is actually enabled since 81 * in use by any connector. This doesn't mean that it is actually enabled since
81 * the DPMS state is tracked separately. 82 * the DPMS state is tracked separately.
82 * 83 *
83 * Returns: 84 * Returns:
84 * True if @encoder is used, false otherwise. 85 * True if @encoder is used, false otherwise.
85 */ 86 */
86 bool drm_helper_encoder_in_use(struct drm_encoder *encoder) 87 bool drm_helper_encoder_in_use(struct drm_encoder *encoder)
87 { 88 {
88 struct drm_connector *connector; 89 struct drm_connector *connector;
89 struct drm_device *dev = encoder->dev; 90 struct drm_device *dev = encoder->dev;
90 91
91 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 92 /*
93 * We can expect this mutex to be locked if we are not panicking.
94 * Locking is currently fubar in the panic handler.
95 */
96 if (!oops_in_progress)
97 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
98
92 list_for_each_entry(connector, &dev->mode_config.connector_list, head) 99 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
93 if (connector->encoder == encoder) 100 if (connector->encoder == encoder)
94 return true; 101 return true;
95 return false; 102 return false;
96 } 103 }
97 EXPORT_SYMBOL(drm_helper_encoder_in_use); 104 EXPORT_SYMBOL(drm_helper_encoder_in_use);
98 105
99 /** 106 /**
100 * drm_helper_crtc_in_use - check if a given CRTC is in a mode_config 107 * drm_helper_crtc_in_use - check if a given CRTC is in a mode_config
101 * @crtc: CRTC to check 108 * @crtc: CRTC to check
102 * 109 *
103 * Checks whether @crtc is with the current mode setting output configuration 110 * Checks whether @crtc is with the current mode setting output configuration
104 * in use by any connector. This doesn't mean that it is actually enabled since 111 * in use by any connector. This doesn't mean that it is actually enabled since
105 * the DPMS state is tracked separately. 112 * the DPMS state is tracked separately.
106 * 113 *
107 * Returns: 114 * Returns:
108 * True if @crtc is used, false otherwise. 115 * True if @crtc is used, false otherwise.
109 */ 116 */
110 bool drm_helper_crtc_in_use(struct drm_crtc *crtc) 117 bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
111 { 118 {
112 struct drm_encoder *encoder; 119 struct drm_encoder *encoder;
113 struct drm_device *dev = crtc->dev; 120 struct drm_device *dev = crtc->dev;
114 121
115 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 122 /*
123 * We can expect this mutex to be locked if we are not panicking.
124 * Locking is currently fubar in the panic handler.
125 */
126 if (!oops_in_progress)
127 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
128
116 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) 129 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
117 if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder)) 130 if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder))
118 return true; 131 return true;
119 return false; 132 return false;
120 } 133 }
121 EXPORT_SYMBOL(drm_helper_crtc_in_use); 134 EXPORT_SYMBOL(drm_helper_crtc_in_use);
122 135
123 static void 136 static void
124 drm_encoder_disable(struct drm_encoder *encoder) 137 drm_encoder_disable(struct drm_encoder *encoder)
125 { 138 {
126 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 139 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
127 140
128 if (encoder->bridge) 141 if (encoder->bridge)
129 encoder->bridge->funcs->disable(encoder->bridge); 142 encoder->bridge->funcs->disable(encoder->bridge);
130 143
131 if (encoder_funcs->disable) 144 if (encoder_funcs->disable)
132 (*encoder_funcs->disable)(encoder); 145 (*encoder_funcs->disable)(encoder);
133 else 146 else
134 (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF); 147 (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
135 148
136 if (encoder->bridge) 149 if (encoder->bridge)
137 encoder->bridge->funcs->post_disable(encoder->bridge); 150 encoder->bridge->funcs->post_disable(encoder->bridge);
138 } 151 }
139 152
140 static void __drm_helper_disable_unused_functions(struct drm_device *dev) 153 static void __drm_helper_disable_unused_functions(struct drm_device *dev)
141 { 154 {
142 struct drm_encoder *encoder; 155 struct drm_encoder *encoder;
143 struct drm_connector *connector; 156 struct drm_connector *connector;
144 struct drm_crtc *crtc; 157 struct drm_crtc *crtc;
145 158
146 drm_warn_on_modeset_not_all_locked(dev); 159 drm_warn_on_modeset_not_all_locked(dev);
147 160
148 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 161 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
149 if (!connector->encoder) 162 if (!connector->encoder)
150 continue; 163 continue;
151 } 164 }
152 165
153 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 166 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
154 if (!drm_helper_encoder_in_use(encoder)) { 167 if (!drm_helper_encoder_in_use(encoder)) {
155 drm_encoder_disable(encoder); 168 drm_encoder_disable(encoder);
156 /* disconnector encoder from any connector */ 169 /* disconnector encoder from any connector */
157 encoder->crtc = NULL; 170 encoder->crtc = NULL;
158 } 171 }
159 } 172 }
160 173
161 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 174 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
162 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 175 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
163 crtc->enabled = drm_helper_crtc_in_use(crtc); 176 crtc->enabled = drm_helper_crtc_in_use(crtc);
164 if (!crtc->enabled) { 177 if (!crtc->enabled) {
165 if (crtc_funcs->disable) 178 if (crtc_funcs->disable)
166 (*crtc_funcs->disable)(crtc); 179 (*crtc_funcs->disable)(crtc);
167 else 180 else
168 (*crtc_funcs->dpms)(crtc, DRM_MODE_DPMS_OFF); 181 (*crtc_funcs->dpms)(crtc, DRM_MODE_DPMS_OFF);
169 crtc->primary->fb = NULL; 182 crtc->primary->fb = NULL;
170 } 183 }
171 } 184 }
172 } 185 }
173 186
174 /** 187 /**
175 * drm_helper_disable_unused_functions - disable unused objects 188 * drm_helper_disable_unused_functions - disable unused objects
176 * @dev: DRM device 189 * @dev: DRM device
177 * 190 *
178 * This function walks through the entire mode setting configuration of @dev. It 191 * This function walks through the entire mode setting configuration of @dev. It
179 * will remove any crtc links of unused encoders and encoder links of 192 * will remove any crtc links of unused encoders and encoder links of
180 * disconnected connectors. Then it will disable all unused encoders and crtcs 193 * disconnected connectors. Then it will disable all unused encoders and crtcs
181 * either by calling their disable callback if available or by calling their 194 * either by calling their disable callback if available or by calling their
182 * dpms callback with DRM_MODE_DPMS_OFF. 195 * dpms callback with DRM_MODE_DPMS_OFF.
183 */ 196 */
184 void drm_helper_disable_unused_functions(struct drm_device *dev) 197 void drm_helper_disable_unused_functions(struct drm_device *dev)
185 { 198 {
186 drm_modeset_lock_all(dev); 199 drm_modeset_lock_all(dev);
187 __drm_helper_disable_unused_functions(dev); 200 __drm_helper_disable_unused_functions(dev);
188 drm_modeset_unlock_all(dev); 201 drm_modeset_unlock_all(dev);
189 } 202 }
190 EXPORT_SYMBOL(drm_helper_disable_unused_functions); 203 EXPORT_SYMBOL(drm_helper_disable_unused_functions);
191 204
192 /* 205 /*
193 * Check the CRTC we're going to map each output to vs. its current 206 * Check the CRTC we're going to map each output to vs. its current
194 * CRTC. If they don't match, we have to disable the output and the CRTC 207 * CRTC. If they don't match, we have to disable the output and the CRTC
195 * since the driver will have to re-route things. 208 * since the driver will have to re-route things.
196 */ 209 */
197 static void 210 static void
198 drm_crtc_prepare_encoders(struct drm_device *dev) 211 drm_crtc_prepare_encoders(struct drm_device *dev)
199 { 212 {
200 struct drm_encoder_helper_funcs *encoder_funcs; 213 struct drm_encoder_helper_funcs *encoder_funcs;
201 struct drm_encoder *encoder; 214 struct drm_encoder *encoder;
202 215
203 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 216 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
204 encoder_funcs = encoder->helper_private; 217 encoder_funcs = encoder->helper_private;
205 /* Disable unused encoders */ 218 /* Disable unused encoders */
206 if (encoder->crtc == NULL) 219 if (encoder->crtc == NULL)
207 drm_encoder_disable(encoder); 220 drm_encoder_disable(encoder);
208 /* Disable encoders whose CRTC is about to change */ 221 /* Disable encoders whose CRTC is about to change */
209 if (encoder_funcs->get_crtc && 222 if (encoder_funcs->get_crtc &&
210 encoder->crtc != (*encoder_funcs->get_crtc)(encoder)) 223 encoder->crtc != (*encoder_funcs->get_crtc)(encoder))
211 drm_encoder_disable(encoder); 224 drm_encoder_disable(encoder);
212 } 225 }
213 } 226 }
214 227
215 /** 228 /**
216 * drm_crtc_helper_set_mode - internal helper to set a mode 229 * drm_crtc_helper_set_mode - internal helper to set a mode
217 * @crtc: CRTC to program 230 * @crtc: CRTC to program
218 * @mode: mode to use 231 * @mode: mode to use
219 * @x: horizontal offset into the surface 232 * @x: horizontal offset into the surface
220 * @y: vertical offset into the surface 233 * @y: vertical offset into the surface
221 * @old_fb: old framebuffer, for cleanup 234 * @old_fb: old framebuffer, for cleanup
222 * 235 *
223 * Try to set @mode on @crtc. Give @crtc and its associated connectors a chance 236 * Try to set @mode on @crtc. Give @crtc and its associated connectors a chance
224 * to fixup or reject the mode prior to trying to set it. This is an internal 237 * to fixup or reject the mode prior to trying to set it. This is an internal
225 * helper that drivers could e.g. use to update properties that require the 238 * helper that drivers could e.g. use to update properties that require the
226 * entire output pipe to be disabled and re-enabled in a new configuration. For 239 * entire output pipe to be disabled and re-enabled in a new configuration. For
227 * example for changing whether audio is enabled on a hdmi link or for changing 240 * example for changing whether audio is enabled on a hdmi link or for changing
228 * panel fitter or dither attributes. It is also called by the 241 * panel fitter or dither attributes. It is also called by the
229 * drm_crtc_helper_set_config() helper function to drive the mode setting 242 * drm_crtc_helper_set_config() helper function to drive the mode setting
230 * sequence. 243 * sequence.
231 * 244 *
232 * Returns: 245 * Returns:
233 * True if the mode was set successfully, false otherwise. 246 * True if the mode was set successfully, false otherwise.
234 */ 247 */
235 bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, 248 bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
236 struct drm_display_mode *mode, 249 struct drm_display_mode *mode,
237 int x, int y, 250 int x, int y,
238 struct drm_framebuffer *old_fb) 251 struct drm_framebuffer *old_fb)
239 { 252 {
240 struct drm_device *dev = crtc->dev; 253 struct drm_device *dev = crtc->dev;
241 struct drm_display_mode *adjusted_mode, saved_mode; 254 struct drm_display_mode *adjusted_mode, saved_mode;
242 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 255 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
243 struct drm_encoder_helper_funcs *encoder_funcs; 256 struct drm_encoder_helper_funcs *encoder_funcs;
244 int saved_x, saved_y; 257 int saved_x, saved_y;
245 bool saved_enabled; 258 bool saved_enabled;
246 struct drm_encoder *encoder; 259 struct drm_encoder *encoder;
247 bool ret = true; 260 bool ret = true;
248 261
249 drm_warn_on_modeset_not_all_locked(dev); 262 drm_warn_on_modeset_not_all_locked(dev);
250 263
251 saved_enabled = crtc->enabled; 264 saved_enabled = crtc->enabled;
252 crtc->enabled = drm_helper_crtc_in_use(crtc); 265 crtc->enabled = drm_helper_crtc_in_use(crtc);
253 if (!crtc->enabled) 266 if (!crtc->enabled)
254 return true; 267 return true;
255 268
256 adjusted_mode = drm_mode_duplicate(dev, mode); 269 adjusted_mode = drm_mode_duplicate(dev, mode);
257 if (!adjusted_mode) { 270 if (!adjusted_mode) {
258 crtc->enabled = saved_enabled; 271 crtc->enabled = saved_enabled;
259 return false; 272 return false;
260 } 273 }
261 274
262 saved_mode = crtc->mode; 275 saved_mode = crtc->mode;
263 saved_x = crtc->x; 276 saved_x = crtc->x;
264 saved_y = crtc->y; 277 saved_y = crtc->y;
265 278
266 /* Update crtc values up front so the driver can rely on them for mode 279 /* Update crtc values up front so the driver can rely on them for mode
267 * setting. 280 * setting.
268 */ 281 */
269 crtc->mode = *mode; 282 crtc->mode = *mode;
270 crtc->x = x; 283 crtc->x = x;
271 crtc->y = y; 284 crtc->y = y;
272 285
273 /* Pass our mode to the connectors and the CRTC to give them a chance to 286 /* Pass our mode to the connectors and the CRTC to give them a chance to
274 * adjust it according to limitations or connector properties, and also 287 * adjust it according to limitations or connector properties, and also
275 * a chance to reject the mode entirely. 288 * a chance to reject the mode entirely.
276 */ 289 */
277 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 290 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
278 291
279 if (encoder->crtc != crtc) 292 if (encoder->crtc != crtc)
280 continue; 293 continue;
281 294
282 if (encoder->bridge && encoder->bridge->funcs->mode_fixup) { 295 if (encoder->bridge && encoder->bridge->funcs->mode_fixup) {
283 ret = encoder->bridge->funcs->mode_fixup( 296 ret = encoder->bridge->funcs->mode_fixup(
284 encoder->bridge, mode, adjusted_mode); 297 encoder->bridge, mode, adjusted_mode);
285 if (!ret) { 298 if (!ret) {
286 DRM_DEBUG_KMS("Bridge fixup failed\n"); 299 DRM_DEBUG_KMS("Bridge fixup failed\n");
287 goto done; 300 goto done;
288 } 301 }
289 } 302 }
290 303
291 encoder_funcs = encoder->helper_private; 304 encoder_funcs = encoder->helper_private;
292 if (!(ret = encoder_funcs->mode_fixup(encoder, mode, 305 if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
293 adjusted_mode))) { 306 adjusted_mode))) {
294 DRM_DEBUG_KMS("Encoder fixup failed\n"); 307 DRM_DEBUG_KMS("Encoder fixup failed\n");
295 goto done; 308 goto done;
296 } 309 }
297 } 310 }
298 311
299 if (!(ret = crtc_funcs->mode_fixup(crtc, mode, adjusted_mode))) { 312 if (!(ret = crtc_funcs->mode_fixup(crtc, mode, adjusted_mode))) {
300 DRM_DEBUG_KMS("CRTC fixup failed\n"); 313 DRM_DEBUG_KMS("CRTC fixup failed\n");
301 goto done; 314 goto done;
302 } 315 }
303 DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); 316 DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
304 317
305 /* Prepare the encoders and CRTCs before setting the mode. */ 318 /* Prepare the encoders and CRTCs before setting the mode. */
306 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 319 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
307 320
308 if (encoder->crtc != crtc) 321 if (encoder->crtc != crtc)
309 continue; 322 continue;
310 323
311 if (encoder->bridge) 324 if (encoder->bridge)
312 encoder->bridge->funcs->disable(encoder->bridge); 325 encoder->bridge->funcs->disable(encoder->bridge);
313 326
314 encoder_funcs = encoder->helper_private; 327 encoder_funcs = encoder->helper_private;
315 /* Disable the encoders as the first thing we do. */ 328 /* Disable the encoders as the first thing we do. */
316 encoder_funcs->prepare(encoder); 329 encoder_funcs->prepare(encoder);
317 330
318 if (encoder->bridge) 331 if (encoder->bridge)
319 encoder->bridge->funcs->post_disable(encoder->bridge); 332 encoder->bridge->funcs->post_disable(encoder->bridge);
320 } 333 }
321 334
322 drm_crtc_prepare_encoders(dev); 335 drm_crtc_prepare_encoders(dev);
323 336
324 crtc_funcs->prepare(crtc); 337 crtc_funcs->prepare(crtc);
325 338
326 /* Set up the DPLL and any encoders state that needs to adjust or depend 339 /* Set up the DPLL and any encoders state that needs to adjust or depend
327 * on the DPLL. 340 * on the DPLL.
328 */ 341 */
329 ret = !crtc_funcs->mode_set(crtc, mode, adjusted_mode, x, y, old_fb); 342 ret = !crtc_funcs->mode_set(crtc, mode, adjusted_mode, x, y, old_fb);
330 if (!ret) 343 if (!ret)
331 goto done; 344 goto done;
332 345
333 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 346 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
334 347
335 if (encoder->crtc != crtc) 348 if (encoder->crtc != crtc)
336 continue; 349 continue;
337 350
338 DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n", 351 DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
339 encoder->base.id, drm_get_encoder_name(encoder), 352 encoder->base.id, drm_get_encoder_name(encoder),
340 mode->base.id, mode->name); 353 mode->base.id, mode->name);
341 encoder_funcs = encoder->helper_private; 354 encoder_funcs = encoder->helper_private;
342 encoder_funcs->mode_set(encoder, mode, adjusted_mode); 355 encoder_funcs->mode_set(encoder, mode, adjusted_mode);
343 356
344 if (encoder->bridge && encoder->bridge->funcs->mode_set) 357 if (encoder->bridge && encoder->bridge->funcs->mode_set)
345 encoder->bridge->funcs->mode_set(encoder->bridge, mode, 358 encoder->bridge->funcs->mode_set(encoder->bridge, mode,
346 adjusted_mode); 359 adjusted_mode);
347 } 360 }
348 361
349 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 362 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
350 crtc_funcs->commit(crtc); 363 crtc_funcs->commit(crtc);
351 364
352 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 365 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
353 366
354 if (encoder->crtc != crtc) 367 if (encoder->crtc != crtc)
355 continue; 368 continue;
356 369
357 if (encoder->bridge) 370 if (encoder->bridge)
358 encoder->bridge->funcs->pre_enable(encoder->bridge); 371 encoder->bridge->funcs->pre_enable(encoder->bridge);
359 372
360 encoder_funcs = encoder->helper_private; 373 encoder_funcs = encoder->helper_private;
361 encoder_funcs->commit(encoder); 374 encoder_funcs->commit(encoder);
362 375
363 if (encoder->bridge) 376 if (encoder->bridge)
364 encoder->bridge->funcs->enable(encoder->bridge); 377 encoder->bridge->funcs->enable(encoder->bridge);
365 } 378 }
366 379
367 /* Store real post-adjustment hardware mode. */ 380 /* Store real post-adjustment hardware mode. */
368 crtc->hwmode = *adjusted_mode; 381 crtc->hwmode = *adjusted_mode;
369 382
370 /* Calculate and store various constants which 383 /* Calculate and store various constants which
371 * are later needed by vblank and swap-completion 384 * are later needed by vblank and swap-completion
372 * timestamping. They are derived from true hwmode. 385 * timestamping. They are derived from true hwmode.
373 */ 386 */
374 drm_calc_timestamping_constants(crtc, &crtc->hwmode); 387 drm_calc_timestamping_constants(crtc, &crtc->hwmode);
375 388
376 /* FIXME: add subpixel order */ 389 /* FIXME: add subpixel order */
377 done: 390 done:
378 drm_mode_destroy(dev, adjusted_mode); 391 drm_mode_destroy(dev, adjusted_mode);
379 if (!ret) { 392 if (!ret) {
380 crtc->enabled = saved_enabled; 393 crtc->enabled = saved_enabled;
381 crtc->mode = saved_mode; 394 crtc->mode = saved_mode;
382 crtc->x = saved_x; 395 crtc->x = saved_x;
383 crtc->y = saved_y; 396 crtc->y = saved_y;
384 } 397 }
385 398
386 return ret; 399 return ret;
387 } 400 }
388 EXPORT_SYMBOL(drm_crtc_helper_set_mode); 401 EXPORT_SYMBOL(drm_crtc_helper_set_mode);
389 402
390 403
391 static int 404 static int
392 drm_crtc_helper_disable(struct drm_crtc *crtc) 405 drm_crtc_helper_disable(struct drm_crtc *crtc)
393 { 406 {
394 struct drm_device *dev = crtc->dev; 407 struct drm_device *dev = crtc->dev;
395 struct drm_connector *connector; 408 struct drm_connector *connector;
396 struct drm_encoder *encoder; 409 struct drm_encoder *encoder;
397 410
398 /* Decouple all encoders and their attached connectors from this crtc */ 411 /* Decouple all encoders and their attached connectors from this crtc */
399 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 412 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
400 if (encoder->crtc != crtc) 413 if (encoder->crtc != crtc)
401 continue; 414 continue;
402 415
403 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 416 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
404 if (connector->encoder != encoder) 417 if (connector->encoder != encoder)
405 continue; 418 continue;
406 419
407 connector->encoder = NULL; 420 connector->encoder = NULL;
408 421
409 /* 422 /*
410 * drm_helper_disable_unused_functions() ought to be 423 * drm_helper_disable_unused_functions() ought to be
411 * doing this, but since we've decoupled the encoder 424 * doing this, but since we've decoupled the encoder
412 * from the connector above, the required connection 425 * from the connector above, the required connection
413 * between them is henceforth no longer available. 426 * between them is henceforth no longer available.
414 */ 427 */
415 connector->dpms = DRM_MODE_DPMS_OFF; 428 connector->dpms = DRM_MODE_DPMS_OFF;
416 } 429 }
417 } 430 }
418 431
419 __drm_helper_disable_unused_functions(dev); 432 __drm_helper_disable_unused_functions(dev);
420 return 0; 433 return 0;
421 } 434 }
422 435
423 /** 436 /**
424 * drm_crtc_helper_set_config - set a new config from userspace 437 * drm_crtc_helper_set_config - set a new config from userspace
425 * @set: mode set configuration 438 * @set: mode set configuration
426 * 439 *
427 * Setup a new configuration, provided by the upper layers (either an ioctl call 440 * Setup a new configuration, provided by the upper layers (either an ioctl call
428 * from userspace or internally e.g. from the fbdev support code) in @set, and 441 * from userspace or internally e.g. from the fbdev support code) in @set, and
429 * enable it. This is the main helper functions for drivers that implement 442 * enable it. This is the main helper functions for drivers that implement
430 * kernel mode setting with the crtc helper functions and the assorted 443 * kernel mode setting with the crtc helper functions and the assorted
431 * ->prepare(), ->modeset() and ->commit() helper callbacks. 444 * ->prepare(), ->modeset() and ->commit() helper callbacks.
432 * 445 *
433 * Returns: 446 * Returns:
434 * Returns 0 on success, negative errno numbers on failure. 447 * Returns 0 on success, negative errno numbers on failure.
435 */ 448 */
436 int drm_crtc_helper_set_config(struct drm_mode_set *set) 449 int drm_crtc_helper_set_config(struct drm_mode_set *set)
437 { 450 {
438 struct drm_device *dev; 451 struct drm_device *dev;
439 struct drm_crtc *new_crtc; 452 struct drm_crtc *new_crtc;
440 struct drm_encoder *save_encoders, *new_encoder, *encoder; 453 struct drm_encoder *save_encoders, *new_encoder, *encoder;
441 bool mode_changed = false; /* if true do a full mode set */ 454 bool mode_changed = false; /* if true do a full mode set */
442 bool fb_changed = false; /* if true and !mode_changed just do a flip */ 455 bool fb_changed = false; /* if true and !mode_changed just do a flip */
443 struct drm_connector *save_connectors, *connector; 456 struct drm_connector *save_connectors, *connector;
444 int count = 0, ro, fail = 0; 457 int count = 0, ro, fail = 0;
445 struct drm_crtc_helper_funcs *crtc_funcs; 458 struct drm_crtc_helper_funcs *crtc_funcs;
446 struct drm_mode_set save_set; 459 struct drm_mode_set save_set;
447 int ret; 460 int ret;
448 int i; 461 int i;
449 462
450 DRM_DEBUG_KMS("\n"); 463 DRM_DEBUG_KMS("\n");
451 464
452 BUG_ON(!set); 465 BUG_ON(!set);
453 BUG_ON(!set->crtc); 466 BUG_ON(!set->crtc);
454 BUG_ON(!set->crtc->helper_private); 467 BUG_ON(!set->crtc->helper_private);
455 468
456 /* Enforce sane interface api - has been abused by the fb helper. */ 469 /* Enforce sane interface api - has been abused by the fb helper. */
457 BUG_ON(!set->mode && set->fb); 470 BUG_ON(!set->mode && set->fb);
458 BUG_ON(set->fb && set->num_connectors == 0); 471 BUG_ON(set->fb && set->num_connectors == 0);
459 472
460 crtc_funcs = set->crtc->helper_private; 473 crtc_funcs = set->crtc->helper_private;
461 474
462 if (!set->mode) 475 if (!set->mode)
463 set->fb = NULL; 476 set->fb = NULL;
464 477
465 if (set->fb) { 478 if (set->fb) {
466 DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n", 479 DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
467 set->crtc->base.id, set->fb->base.id, 480 set->crtc->base.id, set->fb->base.id,
468 (int)set->num_connectors, set->x, set->y); 481 (int)set->num_connectors, set->x, set->y);
469 } else { 482 } else {
470 DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id); 483 DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
471 return drm_crtc_helper_disable(set->crtc); 484 return drm_crtc_helper_disable(set->crtc);
472 } 485 }
473 486
474 dev = set->crtc->dev; 487 dev = set->crtc->dev;
475 488
476 drm_warn_on_modeset_not_all_locked(dev); 489 drm_warn_on_modeset_not_all_locked(dev);
477 490
478 /* 491 /*
479 * Allocate space for the backup of all (non-pointer) encoder and 492 * Allocate space for the backup of all (non-pointer) encoder and
480 * connector data. 493 * connector data.
481 */ 494 */
482 save_encoders = kzalloc(dev->mode_config.num_encoder * 495 save_encoders = kzalloc(dev->mode_config.num_encoder *
483 sizeof(struct drm_encoder), GFP_KERNEL); 496 sizeof(struct drm_encoder), GFP_KERNEL);
484 if (!save_encoders) 497 if (!save_encoders)
485 return -ENOMEM; 498 return -ENOMEM;
486 499
487 save_connectors = kzalloc(dev->mode_config.num_connector * 500 save_connectors = kzalloc(dev->mode_config.num_connector *
488 sizeof(struct drm_connector), GFP_KERNEL); 501 sizeof(struct drm_connector), GFP_KERNEL);
489 if (!save_connectors) { 502 if (!save_connectors) {
490 kfree(save_encoders); 503 kfree(save_encoders);
491 return -ENOMEM; 504 return -ENOMEM;
492 } 505 }
493 506
494 /* 507 /*
495 * Copy data. Note that driver private data is not affected. 508 * Copy data. Note that driver private data is not affected.
496 * Should anything bad happen only the expected state is 509 * Should anything bad happen only the expected state is
497 * restored, not the drivers personal bookkeeping. 510 * restored, not the drivers personal bookkeeping.
498 */ 511 */
499 count = 0; 512 count = 0;
500 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 513 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
501 save_encoders[count++] = *encoder; 514 save_encoders[count++] = *encoder;
502 } 515 }
503 516
504 count = 0; 517 count = 0;
505 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 518 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
506 save_connectors[count++] = *connector; 519 save_connectors[count++] = *connector;
507 } 520 }
508 521
509 save_set.crtc = set->crtc; 522 save_set.crtc = set->crtc;
510 save_set.mode = &set->crtc->mode; 523 save_set.mode = &set->crtc->mode;
511 save_set.x = set->crtc->x; 524 save_set.x = set->crtc->x;
512 save_set.y = set->crtc->y; 525 save_set.y = set->crtc->y;
513 save_set.fb = set->crtc->primary->fb; 526 save_set.fb = set->crtc->primary->fb;
514 527
515 /* We should be able to check here if the fb has the same properties 528 /* We should be able to check here if the fb has the same properties
516 * and then just flip_or_move it */ 529 * and then just flip_or_move it */
517 if (set->crtc->primary->fb != set->fb) { 530 if (set->crtc->primary->fb != set->fb) {
518 /* If we have no fb then treat it as a full mode set */ 531 /* If we have no fb then treat it as a full mode set */
519 if (set->crtc->primary->fb == NULL) { 532 if (set->crtc->primary->fb == NULL) {
520 DRM_DEBUG_KMS("crtc has no fb, full mode set\n"); 533 DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
521 mode_changed = true; 534 mode_changed = true;
522 } else if (set->fb == NULL) { 535 } else if (set->fb == NULL) {
523 mode_changed = true; 536 mode_changed = true;
524 } else if (set->fb->pixel_format != 537 } else if (set->fb->pixel_format !=
525 set->crtc->primary->fb->pixel_format) { 538 set->crtc->primary->fb->pixel_format) {
526 mode_changed = true; 539 mode_changed = true;
527 } else 540 } else
528 fb_changed = true; 541 fb_changed = true;
529 } 542 }
530 543
531 if (set->x != set->crtc->x || set->y != set->crtc->y) 544 if (set->x != set->crtc->x || set->y != set->crtc->y)
532 fb_changed = true; 545 fb_changed = true;
533 546
534 if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) { 547 if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
535 DRM_DEBUG_KMS("modes are different, full mode set\n"); 548 DRM_DEBUG_KMS("modes are different, full mode set\n");
536 drm_mode_debug_printmodeline(&set->crtc->mode); 549 drm_mode_debug_printmodeline(&set->crtc->mode);
537 drm_mode_debug_printmodeline(set->mode); 550 drm_mode_debug_printmodeline(set->mode);
538 mode_changed = true; 551 mode_changed = true;
539 } 552 }
540 553
541 /* a) traverse passed in connector list and get encoders for them */ 554 /* a) traverse passed in connector list and get encoders for them */
542 count = 0; 555 count = 0;
543 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 556 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
544 struct drm_connector_helper_funcs *connector_funcs = 557 struct drm_connector_helper_funcs *connector_funcs =
545 connector->helper_private; 558 connector->helper_private;
546 new_encoder = connector->encoder; 559 new_encoder = connector->encoder;
547 for (ro = 0; ro < set->num_connectors; ro++) { 560 for (ro = 0; ro < set->num_connectors; ro++) {
548 if (set->connectors[ro] == connector) { 561 if (set->connectors[ro] == connector) {
549 new_encoder = connector_funcs->best_encoder(connector); 562 new_encoder = connector_funcs->best_encoder(connector);
550 /* if we can't get an encoder for a connector 563 /* if we can't get an encoder for a connector
551 we are setting now - then fail */ 564 we are setting now - then fail */
552 if (new_encoder == NULL) 565 if (new_encoder == NULL)
553 /* don't break so fail path works correct */ 566 /* don't break so fail path works correct */
554 fail = 1; 567 fail = 1;
555 568
556 if (connector->dpms != DRM_MODE_DPMS_ON) { 569 if (connector->dpms != DRM_MODE_DPMS_ON) {
557 DRM_DEBUG_KMS("connector dpms not on, full mode switch\n"); 570 DRM_DEBUG_KMS("connector dpms not on, full mode switch\n");
558 mode_changed = true; 571 mode_changed = true;
559 } 572 }
560 573
561 break; 574 break;
562 } 575 }
563 } 576 }
564 577
565 if (new_encoder != connector->encoder) { 578 if (new_encoder != connector->encoder) {
566 DRM_DEBUG_KMS("encoder changed, full mode switch\n"); 579 DRM_DEBUG_KMS("encoder changed, full mode switch\n");
567 mode_changed = true; 580 mode_changed = true;
568 /* If the encoder is reused for another connector, then 581 /* If the encoder is reused for another connector, then
569 * the appropriate crtc will be set later. 582 * the appropriate crtc will be set later.
570 */ 583 */
571 if (connector->encoder) 584 if (connector->encoder)
572 connector->encoder->crtc = NULL; 585 connector->encoder->crtc = NULL;
573 connector->encoder = new_encoder; 586 connector->encoder = new_encoder;
574 } 587 }
575 } 588 }
576 589
577 if (fail) { 590 if (fail) {
578 ret = -EINVAL; 591 ret = -EINVAL;
579 goto fail; 592 goto fail;
580 } 593 }
581 594
582 count = 0; 595 count = 0;
583 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 596 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
584 if (!connector->encoder) 597 if (!connector->encoder)
585 continue; 598 continue;
586 599
587 if (connector->encoder->crtc == set->crtc) 600 if (connector->encoder->crtc == set->crtc)
588 new_crtc = NULL; 601 new_crtc = NULL;
589 else 602 else
590 new_crtc = connector->encoder->crtc; 603 new_crtc = connector->encoder->crtc;
591 604
592 for (ro = 0; ro < set->num_connectors; ro++) { 605 for (ro = 0; ro < set->num_connectors; ro++) {
593 if (set->connectors[ro] == connector) 606 if (set->connectors[ro] == connector)
594 new_crtc = set->crtc; 607 new_crtc = set->crtc;
595 } 608 }
596 609
597 /* Make sure the new CRTC will work with the encoder */ 610 /* Make sure the new CRTC will work with the encoder */
598 if (new_crtc && 611 if (new_crtc &&
599 !drm_encoder_crtc_ok(connector->encoder, new_crtc)) { 612 !drm_encoder_crtc_ok(connector->encoder, new_crtc)) {
600 ret = -EINVAL; 613 ret = -EINVAL;
601 goto fail; 614 goto fail;
602 } 615 }
603 if (new_crtc != connector->encoder->crtc) { 616 if (new_crtc != connector->encoder->crtc) {
604 DRM_DEBUG_KMS("crtc changed, full mode switch\n"); 617 DRM_DEBUG_KMS("crtc changed, full mode switch\n");
605 mode_changed = true; 618 mode_changed = true;
606 connector->encoder->crtc = new_crtc; 619 connector->encoder->crtc = new_crtc;
607 } 620 }
608 if (new_crtc) { 621 if (new_crtc) {
609 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n", 622 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
610 connector->base.id, drm_get_connector_name(connector), 623 connector->base.id, drm_get_connector_name(connector),
611 new_crtc->base.id); 624 new_crtc->base.id);
612 } else { 625 } else {
613 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n", 626 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
614 connector->base.id, drm_get_connector_name(connector)); 627 connector->base.id, drm_get_connector_name(connector));
615 } 628 }
616 } 629 }
617 630
618 /* mode_set_base is not a required function */ 631 /* mode_set_base is not a required function */
619 if (fb_changed && !crtc_funcs->mode_set_base) 632 if (fb_changed && !crtc_funcs->mode_set_base)
620 mode_changed = true; 633 mode_changed = true;
621 634
622 if (mode_changed) { 635 if (mode_changed) {
623 if (drm_helper_crtc_in_use(set->crtc)) { 636 if (drm_helper_crtc_in_use(set->crtc)) {
624 DRM_DEBUG_KMS("attempting to set mode from" 637 DRM_DEBUG_KMS("attempting to set mode from"
625 " userspace\n"); 638 " userspace\n");
626 drm_mode_debug_printmodeline(set->mode); 639 drm_mode_debug_printmodeline(set->mode);
627 set->crtc->primary->fb = set->fb; 640 set->crtc->primary->fb = set->fb;
628 if (!drm_crtc_helper_set_mode(set->crtc, set->mode, 641 if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
629 set->x, set->y, 642 set->x, set->y,
630 save_set.fb)) { 643 save_set.fb)) {
631 DRM_ERROR("failed to set mode on [CRTC:%d]\n", 644 DRM_ERROR("failed to set mode on [CRTC:%d]\n",
632 set->crtc->base.id); 645 set->crtc->base.id);
633 set->crtc->primary->fb = save_set.fb; 646 set->crtc->primary->fb = save_set.fb;
634 ret = -EINVAL; 647 ret = -EINVAL;
635 goto fail; 648 goto fail;
636 } 649 }
637 DRM_DEBUG_KMS("Setting connector DPMS state to on\n"); 650 DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
638 for (i = 0; i < set->num_connectors; i++) { 651 for (i = 0; i < set->num_connectors; i++) {
639 DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id, 652 DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
640 drm_get_connector_name(set->connectors[i])); 653 drm_get_connector_name(set->connectors[i]));
641 set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON); 654 set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
642 } 655 }
643 } 656 }
644 __drm_helper_disable_unused_functions(dev); 657 __drm_helper_disable_unused_functions(dev);
645 } else if (fb_changed) { 658 } else if (fb_changed) {
646 set->crtc->x = set->x; 659 set->crtc->x = set->x;
647 set->crtc->y = set->y; 660 set->crtc->y = set->y;
648 set->crtc->primary->fb = set->fb; 661 set->crtc->primary->fb = set->fb;
649 ret = crtc_funcs->mode_set_base(set->crtc, 662 ret = crtc_funcs->mode_set_base(set->crtc,
650 set->x, set->y, save_set.fb); 663 set->x, set->y, save_set.fb);
651 if (ret != 0) { 664 if (ret != 0) {
652 set->crtc->x = save_set.x; 665 set->crtc->x = save_set.x;
653 set->crtc->y = save_set.y; 666 set->crtc->y = save_set.y;
654 set->crtc->primary->fb = save_set.fb; 667 set->crtc->primary->fb = save_set.fb;
655 goto fail; 668 goto fail;
656 } 669 }
657 } 670 }
658 671
659 kfree(save_connectors); 672 kfree(save_connectors);
660 kfree(save_encoders); 673 kfree(save_encoders);
661 return 0; 674 return 0;
662 675
663 fail: 676 fail:
664 /* Restore all previous data. */ 677 /* Restore all previous data. */
665 count = 0; 678 count = 0;
666 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 679 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
667 *encoder = save_encoders[count++]; 680 *encoder = save_encoders[count++];
668 } 681 }
669 682
670 count = 0; 683 count = 0;
671 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 684 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
672 *connector = save_connectors[count++]; 685 *connector = save_connectors[count++];
673 } 686 }
674 687
675 /* Try to restore the config */ 688 /* Try to restore the config */
676 if (mode_changed && 689 if (mode_changed &&
677 !drm_crtc_helper_set_mode(save_set.crtc, save_set.mode, save_set.x, 690 !drm_crtc_helper_set_mode(save_set.crtc, save_set.mode, save_set.x,
678 save_set.y, save_set.fb)) 691 save_set.y, save_set.fb))
679 DRM_ERROR("failed to restore config after modeset failure\n"); 692 DRM_ERROR("failed to restore config after modeset failure\n");
680 693
681 kfree(save_connectors); 694 kfree(save_connectors);
682 kfree(save_encoders); 695 kfree(save_encoders);
683 return ret; 696 return ret;
684 } 697 }
685 EXPORT_SYMBOL(drm_crtc_helper_set_config); 698 EXPORT_SYMBOL(drm_crtc_helper_set_config);
686 699
687 static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder) 700 static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
688 { 701 {
689 int dpms = DRM_MODE_DPMS_OFF; 702 int dpms = DRM_MODE_DPMS_OFF;
690 struct drm_connector *connector; 703 struct drm_connector *connector;
691 struct drm_device *dev = encoder->dev; 704 struct drm_device *dev = encoder->dev;
692 705
693 list_for_each_entry(connector, &dev->mode_config.connector_list, head) 706 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
694 if (connector->encoder == encoder) 707 if (connector->encoder == encoder)
695 if (connector->dpms < dpms) 708 if (connector->dpms < dpms)
696 dpms = connector->dpms; 709 dpms = connector->dpms;
697 return dpms; 710 return dpms;
698 } 711 }
699 712
700 /* Helper which handles bridge ordering around encoder dpms */ 713 /* Helper which handles bridge ordering around encoder dpms */
701 static void drm_helper_encoder_dpms(struct drm_encoder *encoder, int mode) 714 static void drm_helper_encoder_dpms(struct drm_encoder *encoder, int mode)
702 { 715 {
703 struct drm_bridge *bridge = encoder->bridge; 716 struct drm_bridge *bridge = encoder->bridge;
704 struct drm_encoder_helper_funcs *encoder_funcs; 717 struct drm_encoder_helper_funcs *encoder_funcs;
705 718
706 if (bridge) { 719 if (bridge) {
707 if (mode == DRM_MODE_DPMS_ON) 720 if (mode == DRM_MODE_DPMS_ON)
708 bridge->funcs->pre_enable(bridge); 721 bridge->funcs->pre_enable(bridge);
709 else 722 else
710 bridge->funcs->disable(bridge); 723 bridge->funcs->disable(bridge);
711 } 724 }
712 725
713 encoder_funcs = encoder->helper_private; 726 encoder_funcs = encoder->helper_private;
714 if (encoder_funcs->dpms) 727 if (encoder_funcs->dpms)
715 encoder_funcs->dpms(encoder, mode); 728 encoder_funcs->dpms(encoder, mode);
716 729
717 if (bridge) { 730 if (bridge) {
718 if (mode == DRM_MODE_DPMS_ON) 731 if (mode == DRM_MODE_DPMS_ON)
719 bridge->funcs->enable(bridge); 732 bridge->funcs->enable(bridge);
720 else 733 else
721 bridge->funcs->post_disable(bridge); 734 bridge->funcs->post_disable(bridge);
722 } 735 }
723 } 736 }
724 737
725 static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc) 738 static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
726 { 739 {
727 int dpms = DRM_MODE_DPMS_OFF; 740 int dpms = DRM_MODE_DPMS_OFF;
728 struct drm_connector *connector; 741 struct drm_connector *connector;
729 struct drm_device *dev = crtc->dev; 742 struct drm_device *dev = crtc->dev;
730 743
731 list_for_each_entry(connector, &dev->mode_config.connector_list, head) 744 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
732 if (connector->encoder && connector->encoder->crtc == crtc) 745 if (connector->encoder && connector->encoder->crtc == crtc)
733 if (connector->dpms < dpms) 746 if (connector->dpms < dpms)
734 dpms = connector->dpms; 747 dpms = connector->dpms;
735 return dpms; 748 return dpms;
736 } 749 }
737 750
738 /** 751 /**
739 * drm_helper_connector_dpms() - connector dpms helper implementation 752 * drm_helper_connector_dpms() - connector dpms helper implementation
740 * @connector: affected connector 753 * @connector: affected connector
741 * @mode: DPMS mode 754 * @mode: DPMS mode
742 * 755 *
743 * This is the main helper function provided by the crtc helper framework for 756 * This is the main helper function provided by the crtc helper framework for
744 * implementing the DPMS connector attribute. It computes the new desired DPMS 757 * implementing the DPMS connector attribute. It computes the new desired DPMS
745 * state for all encoders and crtcs in the output mesh and calls the ->dpms() 758 * state for all encoders and crtcs in the output mesh and calls the ->dpms()
746 * callback provided by the driver appropriately. 759 * callback provided by the driver appropriately.
747 */ 760 */
748 void drm_helper_connector_dpms(struct drm_connector *connector, int mode) 761 void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
749 { 762 {
750 struct drm_encoder *encoder = connector->encoder; 763 struct drm_encoder *encoder = connector->encoder;
751 struct drm_crtc *crtc = encoder ? encoder->crtc : NULL; 764 struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
752 int old_dpms, encoder_dpms = DRM_MODE_DPMS_OFF; 765 int old_dpms, encoder_dpms = DRM_MODE_DPMS_OFF;
753 766
754 if (mode == connector->dpms) 767 if (mode == connector->dpms)
755 return; 768 return;
756 769
757 old_dpms = connector->dpms; 770 old_dpms = connector->dpms;
758 connector->dpms = mode; 771 connector->dpms = mode;
759 772
760 if (encoder) 773 if (encoder)
761 encoder_dpms = drm_helper_choose_encoder_dpms(encoder); 774 encoder_dpms = drm_helper_choose_encoder_dpms(encoder);
762 775
763 /* from off to on, do crtc then encoder */ 776 /* from off to on, do crtc then encoder */
764 if (mode < old_dpms) { 777 if (mode < old_dpms) {
765 if (crtc) { 778 if (crtc) {
766 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 779 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
767 if (crtc_funcs->dpms) 780 if (crtc_funcs->dpms)
768 (*crtc_funcs->dpms) (crtc, 781 (*crtc_funcs->dpms) (crtc,
769 drm_helper_choose_crtc_dpms(crtc)); 782 drm_helper_choose_crtc_dpms(crtc));
770 } 783 }
771 if (encoder) 784 if (encoder)
772 drm_helper_encoder_dpms(encoder, encoder_dpms); 785 drm_helper_encoder_dpms(encoder, encoder_dpms);
773 } 786 }
774 787
775 /* from on to off, do encoder then crtc */ 788 /* from on to off, do encoder then crtc */
776 if (mode > old_dpms) { 789 if (mode > old_dpms) {
777 if (encoder) 790 if (encoder)
778 drm_helper_encoder_dpms(encoder, encoder_dpms); 791 drm_helper_encoder_dpms(encoder, encoder_dpms);
779 if (crtc) { 792 if (crtc) {
780 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 793 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
781 if (crtc_funcs->dpms) 794 if (crtc_funcs->dpms)
782 (*crtc_funcs->dpms) (crtc, 795 (*crtc_funcs->dpms) (crtc,
783 drm_helper_choose_crtc_dpms(crtc)); 796 drm_helper_choose_crtc_dpms(crtc));
784 } 797 }
785 } 798 }
786 799
787 return; 800 return;
788 } 801 }
789 EXPORT_SYMBOL(drm_helper_connector_dpms); 802 EXPORT_SYMBOL(drm_helper_connector_dpms);
790 803
791 /** 804 /**
792 * drm_helper_mode_fill_fb_struct - fill out framebuffer metadata 805 * drm_helper_mode_fill_fb_struct - fill out framebuffer metadata
793 * @fb: drm_framebuffer object to fill out 806 * @fb: drm_framebuffer object to fill out
794 * @mode_cmd: metadata from the userspace fb creation request 807 * @mode_cmd: metadata from the userspace fb creation request
795 * 808 *
796 * This helper can be used in a drivers fb_create callback to pre-fill the fb's 809 * This helper can be used in a drivers fb_create callback to pre-fill the fb's
797 * metadata fields. 810 * metadata fields.
798 */ 811 */
799 void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, 812 void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
800 struct drm_mode_fb_cmd2 *mode_cmd) 813 struct drm_mode_fb_cmd2 *mode_cmd)
801 { 814 {
802 int i; 815 int i;
803 816
804 fb->width = mode_cmd->width; 817 fb->width = mode_cmd->width;
805 fb->height = mode_cmd->height; 818 fb->height = mode_cmd->height;
806 for (i = 0; i < 4; i++) { 819 for (i = 0; i < 4; i++) {
807 fb->pitches[i] = mode_cmd->pitches[i]; 820 fb->pitches[i] = mode_cmd->pitches[i];
808 fb->offsets[i] = mode_cmd->offsets[i]; 821 fb->offsets[i] = mode_cmd->offsets[i];
809 } 822 }
810 drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth, 823 drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
811 &fb->bits_per_pixel); 824 &fb->bits_per_pixel);
812 fb->pixel_format = mode_cmd->pixel_format; 825 fb->pixel_format = mode_cmd->pixel_format;
813 } 826 }
814 EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct); 827 EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
815 828
816 /** 829 /**
817 * drm_helper_resume_force_mode - force-restore mode setting configuration 830 * drm_helper_resume_force_mode - force-restore mode setting configuration
818 * @dev: drm_device which should be restored 831 * @dev: drm_device which should be restored
819 * 832 *
820 * Drivers which use the mode setting helpers can use this function to 833 * Drivers which use the mode setting helpers can use this function to
821 * force-restore the mode setting configuration e.g. on resume or when something 834 * force-restore the mode setting configuration e.g. on resume or when something
822 * else might have trampled over the hw state (like some overzealous old BIOSen 835 * else might have trampled over the hw state (like some overzealous old BIOSen
823 * tended to do). 836 * tended to do).
824 * 837 *
825 * This helper doesn't provide a error return value since restoring the old 838 * This helper doesn't provide a error return value since restoring the old
826 * config should never fail due to resource allocation issues since the driver 839 * config should never fail due to resource allocation issues since the driver
827 * has successfully set the restored configuration already. Hence this should 840 * has successfully set the restored configuration already. Hence this should
828 * boil down to the equivalent of a few dpms on calls, which also don't provide 841 * boil down to the equivalent of a few dpms on calls, which also don't provide
829 * an error code. 842 * an error code.
830 * 843 *
831 * Drivers where simply restoring an old configuration again might fail (e.g. 844 * Drivers where simply restoring an old configuration again might fail (e.g.
832 * due to slight differences in allocating shared resources when the 845 * due to slight differences in allocating shared resources when the
833 * configuration is restored in a different order than when userspace set it up) 846 * configuration is restored in a different order than when userspace set it up)
834 * need to use their own restore logic. 847 * need to use their own restore logic.
835 */ 848 */
836 void drm_helper_resume_force_mode(struct drm_device *dev) 849 void drm_helper_resume_force_mode(struct drm_device *dev)
837 { 850 {
838 struct drm_crtc *crtc; 851 struct drm_crtc *crtc;
839 struct drm_encoder *encoder; 852 struct drm_encoder *encoder;
840 struct drm_crtc_helper_funcs *crtc_funcs; 853 struct drm_crtc_helper_funcs *crtc_funcs;
841 int encoder_dpms; 854 int encoder_dpms;
842 bool ret; 855 bool ret;
843 856
844 drm_modeset_lock_all(dev); 857 drm_modeset_lock_all(dev);
845 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 858 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
846 859
847 if (!crtc->enabled) 860 if (!crtc->enabled)
848 continue; 861 continue;
849 862
850 ret = drm_crtc_helper_set_mode(crtc, &crtc->mode, 863 ret = drm_crtc_helper_set_mode(crtc, &crtc->mode,
851 crtc->x, crtc->y, crtc->primary->fb); 864 crtc->x, crtc->y, crtc->primary->fb);
852 865
853 /* Restoring the old config should never fail! */ 866 /* Restoring the old config should never fail! */
854 if (ret == false) 867 if (ret == false)
855 DRM_ERROR("failed to set mode on crtc %p\n", crtc); 868 DRM_ERROR("failed to set mode on crtc %p\n", crtc);
856 869
857 /* Turn off outputs that were already powered off */ 870 /* Turn off outputs that were already powered off */
858 if (drm_helper_choose_crtc_dpms(crtc)) { 871 if (drm_helper_choose_crtc_dpms(crtc)) {
859 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 872 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
860 873
861 if(encoder->crtc != crtc) 874 if(encoder->crtc != crtc)
862 continue; 875 continue;
863 876
864 encoder_dpms = drm_helper_choose_encoder_dpms( 877 encoder_dpms = drm_helper_choose_encoder_dpms(
865 encoder); 878 encoder);
866 879
867 drm_helper_encoder_dpms(encoder, encoder_dpms); 880 drm_helper_encoder_dpms(encoder, encoder_dpms);
868 } 881 }
869 882
870 crtc_funcs = crtc->helper_private; 883 crtc_funcs = crtc->helper_private;
871 if (crtc_funcs->dpms) 884 if (crtc_funcs->dpms)
872 (*crtc_funcs->dpms) (crtc, 885 (*crtc_funcs->dpms) (crtc,
873 drm_helper_choose_crtc_dpms(crtc)); 886 drm_helper_choose_crtc_dpms(crtc));
874 } 887 }
875 } 888 }
876 889
877 /* disable the unused connectors while restoring the modesetting */ 890 /* disable the unused connectors while restoring the modesetting */
878 __drm_helper_disable_unused_functions(dev); 891 __drm_helper_disable_unused_functions(dev);
879 drm_modeset_unlock_all(dev); 892 drm_modeset_unlock_all(dev);
880 } 893 }
881 EXPORT_SYMBOL(drm_helper_resume_force_mode); 894 EXPORT_SYMBOL(drm_helper_resume_force_mode);
882 895
drivers/gpu/drm/radeon/atombios_crtc.c
1 /* 1 /*
2 * Copyright 2007-8 Advanced Micro Devices, Inc. 2 * Copyright 2007-8 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc. 3 * Copyright 2008 Red Hat Inc.
4 * 4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a 5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"), 6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation 7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the 9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions: 10 * Software is furnished to do so, subject to the following conditions:
11 * 11 *
12 * The above copyright notice and this permission notice shall be included in 12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software. 13 * all copies or substantial portions of the Software.
14 * 14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE. 21 * OTHER DEALINGS IN THE SOFTWARE.
22 * 22 *
23 * Authors: Dave Airlie 23 * Authors: Dave Airlie
24 * Alex Deucher 24 * Alex Deucher
25 */ 25 */
26 #include <drm/drmP.h> 26 #include <drm/drmP.h>
27 #include <drm/drm_crtc_helper.h> 27 #include <drm/drm_crtc_helper.h>
28 #include <drm/radeon_drm.h> 28 #include <drm/radeon_drm.h>
29 #include <drm/drm_fixed.h> 29 #include <drm/drm_fixed.h>
30 #include "radeon.h" 30 #include "radeon.h"
31 #include "atom.h" 31 #include "atom.h"
32 #include "atom-bits.h" 32 #include "atom-bits.h"
33 33
34 static void atombios_overscan_setup(struct drm_crtc *crtc, 34 static void atombios_overscan_setup(struct drm_crtc *crtc,
35 struct drm_display_mode *mode, 35 struct drm_display_mode *mode,
36 struct drm_display_mode *adjusted_mode) 36 struct drm_display_mode *adjusted_mode)
37 { 37 {
38 struct drm_device *dev = crtc->dev; 38 struct drm_device *dev = crtc->dev;
39 struct radeon_device *rdev = dev->dev_private; 39 struct radeon_device *rdev = dev->dev_private;
40 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 40 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
41 SET_CRTC_OVERSCAN_PS_ALLOCATION args; 41 SET_CRTC_OVERSCAN_PS_ALLOCATION args;
42 int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_OverScan); 42 int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_OverScan);
43 int a1, a2; 43 int a1, a2;
44 44
45 memset(&args, 0, sizeof(args)); 45 memset(&args, 0, sizeof(args));
46 46
47 args.ucCRTC = radeon_crtc->crtc_id; 47 args.ucCRTC = radeon_crtc->crtc_id;
48 48
49 switch (radeon_crtc->rmx_type) { 49 switch (radeon_crtc->rmx_type) {
50 case RMX_CENTER: 50 case RMX_CENTER:
51 args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2); 51 args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2);
52 args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2); 52 args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2);
53 args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2); 53 args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2);
54 args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2); 54 args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2);
55 break; 55 break;
56 case RMX_ASPECT: 56 case RMX_ASPECT:
57 a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay; 57 a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay;
58 a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay; 58 a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay;
59 59
60 if (a1 > a2) { 60 if (a1 > a2) {
61 args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2); 61 args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
62 args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2); 62 args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
63 } else if (a2 > a1) { 63 } else if (a2 > a1) {
64 args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2); 64 args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
65 args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2); 65 args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
66 } 66 }
67 break; 67 break;
68 case RMX_FULL: 68 case RMX_FULL:
69 default: 69 default:
70 args.usOverscanRight = cpu_to_le16(radeon_crtc->h_border); 70 args.usOverscanRight = cpu_to_le16(radeon_crtc->h_border);
71 args.usOverscanLeft = cpu_to_le16(radeon_crtc->h_border); 71 args.usOverscanLeft = cpu_to_le16(radeon_crtc->h_border);
72 args.usOverscanBottom = cpu_to_le16(radeon_crtc->v_border); 72 args.usOverscanBottom = cpu_to_le16(radeon_crtc->v_border);
73 args.usOverscanTop = cpu_to_le16(radeon_crtc->v_border); 73 args.usOverscanTop = cpu_to_le16(radeon_crtc->v_border);
74 break; 74 break;
75 } 75 }
76 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 76 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
77 } 77 }
78 78
79 static void atombios_scaler_setup(struct drm_crtc *crtc) 79 static void atombios_scaler_setup(struct drm_crtc *crtc)
80 { 80 {
81 struct drm_device *dev = crtc->dev; 81 struct drm_device *dev = crtc->dev;
82 struct radeon_device *rdev = dev->dev_private; 82 struct radeon_device *rdev = dev->dev_private;
83 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 83 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
84 ENABLE_SCALER_PS_ALLOCATION args; 84 ENABLE_SCALER_PS_ALLOCATION args;
85 int index = GetIndexIntoMasterTable(COMMAND, EnableScaler); 85 int index = GetIndexIntoMasterTable(COMMAND, EnableScaler);
86 struct radeon_encoder *radeon_encoder = 86 struct radeon_encoder *radeon_encoder =
87 to_radeon_encoder(radeon_crtc->encoder); 87 to_radeon_encoder(radeon_crtc->encoder);
88 /* fixme - fill in enc_priv for atom dac */ 88 /* fixme - fill in enc_priv for atom dac */
89 enum radeon_tv_std tv_std = TV_STD_NTSC; 89 enum radeon_tv_std tv_std = TV_STD_NTSC;
90 bool is_tv = false, is_cv = false; 90 bool is_tv = false, is_cv = false;
91 91
92 if (!ASIC_IS_AVIVO(rdev) && radeon_crtc->crtc_id) 92 if (!ASIC_IS_AVIVO(rdev) && radeon_crtc->crtc_id)
93 return; 93 return;
94 94
95 if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) { 95 if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) {
96 struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv; 96 struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
97 tv_std = tv_dac->tv_std; 97 tv_std = tv_dac->tv_std;
98 is_tv = true; 98 is_tv = true;
99 } 99 }
100 100
101 memset(&args, 0, sizeof(args)); 101 memset(&args, 0, sizeof(args));
102 102
103 args.ucScaler = radeon_crtc->crtc_id; 103 args.ucScaler = radeon_crtc->crtc_id;
104 104
105 if (is_tv) { 105 if (is_tv) {
106 switch (tv_std) { 106 switch (tv_std) {
107 case TV_STD_NTSC: 107 case TV_STD_NTSC:
108 default: 108 default:
109 args.ucTVStandard = ATOM_TV_NTSC; 109 args.ucTVStandard = ATOM_TV_NTSC;
110 break; 110 break;
111 case TV_STD_PAL: 111 case TV_STD_PAL:
112 args.ucTVStandard = ATOM_TV_PAL; 112 args.ucTVStandard = ATOM_TV_PAL;
113 break; 113 break;
114 case TV_STD_PAL_M: 114 case TV_STD_PAL_M:
115 args.ucTVStandard = ATOM_TV_PALM; 115 args.ucTVStandard = ATOM_TV_PALM;
116 break; 116 break;
117 case TV_STD_PAL_60: 117 case TV_STD_PAL_60:
118 args.ucTVStandard = ATOM_TV_PAL60; 118 args.ucTVStandard = ATOM_TV_PAL60;
119 break; 119 break;
120 case TV_STD_NTSC_J: 120 case TV_STD_NTSC_J:
121 args.ucTVStandard = ATOM_TV_NTSCJ; 121 args.ucTVStandard = ATOM_TV_NTSCJ;
122 break; 122 break;
123 case TV_STD_SCART_PAL: 123 case TV_STD_SCART_PAL:
124 args.ucTVStandard = ATOM_TV_PAL; /* ??? */ 124 args.ucTVStandard = ATOM_TV_PAL; /* ??? */
125 break; 125 break;
126 case TV_STD_SECAM: 126 case TV_STD_SECAM:
127 args.ucTVStandard = ATOM_TV_SECAM; 127 args.ucTVStandard = ATOM_TV_SECAM;
128 break; 128 break;
129 case TV_STD_PAL_CN: 129 case TV_STD_PAL_CN:
130 args.ucTVStandard = ATOM_TV_PALCN; 130 args.ucTVStandard = ATOM_TV_PALCN;
131 break; 131 break;
132 } 132 }
133 args.ucEnable = SCALER_ENABLE_MULTITAP_MODE; 133 args.ucEnable = SCALER_ENABLE_MULTITAP_MODE;
134 } else if (is_cv) { 134 } else if (is_cv) {
135 args.ucTVStandard = ATOM_TV_CV; 135 args.ucTVStandard = ATOM_TV_CV;
136 args.ucEnable = SCALER_ENABLE_MULTITAP_MODE; 136 args.ucEnable = SCALER_ENABLE_MULTITAP_MODE;
137 } else { 137 } else {
138 switch (radeon_crtc->rmx_type) { 138 switch (radeon_crtc->rmx_type) {
139 case RMX_FULL: 139 case RMX_FULL:
140 args.ucEnable = ATOM_SCALER_EXPANSION; 140 args.ucEnable = ATOM_SCALER_EXPANSION;
141 break; 141 break;
142 case RMX_CENTER: 142 case RMX_CENTER:
143 args.ucEnable = ATOM_SCALER_CENTER; 143 args.ucEnable = ATOM_SCALER_CENTER;
144 break; 144 break;
145 case RMX_ASPECT: 145 case RMX_ASPECT:
146 args.ucEnable = ATOM_SCALER_EXPANSION; 146 args.ucEnable = ATOM_SCALER_EXPANSION;
147 break; 147 break;
148 default: 148 default:
149 if (ASIC_IS_AVIVO(rdev)) 149 if (ASIC_IS_AVIVO(rdev))
150 args.ucEnable = ATOM_SCALER_DISABLE; 150 args.ucEnable = ATOM_SCALER_DISABLE;
151 else 151 else
152 args.ucEnable = ATOM_SCALER_CENTER; 152 args.ucEnable = ATOM_SCALER_CENTER;
153 break; 153 break;
154 } 154 }
155 } 155 }
156 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 156 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
157 if ((is_tv || is_cv) 157 if ((is_tv || is_cv)
158 && rdev->family >= CHIP_RV515 && rdev->family <= CHIP_R580) { 158 && rdev->family >= CHIP_RV515 && rdev->family <= CHIP_R580) {
159 atom_rv515_force_tv_scaler(rdev, radeon_crtc); 159 atom_rv515_force_tv_scaler(rdev, radeon_crtc);
160 } 160 }
161 } 161 }
162 162
163 static void atombios_lock_crtc(struct drm_crtc *crtc, int lock) 163 static void atombios_lock_crtc(struct drm_crtc *crtc, int lock)
164 { 164 {
165 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 165 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
166 struct drm_device *dev = crtc->dev; 166 struct drm_device *dev = crtc->dev;
167 struct radeon_device *rdev = dev->dev_private; 167 struct radeon_device *rdev = dev->dev_private;
168 int index = 168 int index =
169 GetIndexIntoMasterTable(COMMAND, UpdateCRTC_DoubleBufferRegisters); 169 GetIndexIntoMasterTable(COMMAND, UpdateCRTC_DoubleBufferRegisters);
170 ENABLE_CRTC_PS_ALLOCATION args; 170 ENABLE_CRTC_PS_ALLOCATION args;
171 171
172 memset(&args, 0, sizeof(args)); 172 memset(&args, 0, sizeof(args));
173 173
174 args.ucCRTC = radeon_crtc->crtc_id; 174 args.ucCRTC = radeon_crtc->crtc_id;
175 args.ucEnable = lock; 175 args.ucEnable = lock;
176 176
177 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 177 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
178 } 178 }
179 179
180 static void atombios_enable_crtc(struct drm_crtc *crtc, int state) 180 static void atombios_enable_crtc(struct drm_crtc *crtc, int state)
181 { 181 {
182 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 182 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
183 struct drm_device *dev = crtc->dev; 183 struct drm_device *dev = crtc->dev;
184 struct radeon_device *rdev = dev->dev_private; 184 struct radeon_device *rdev = dev->dev_private;
185 int index = GetIndexIntoMasterTable(COMMAND, EnableCRTC); 185 int index = GetIndexIntoMasterTable(COMMAND, EnableCRTC);
186 ENABLE_CRTC_PS_ALLOCATION args; 186 ENABLE_CRTC_PS_ALLOCATION args;
187 187
188 memset(&args, 0, sizeof(args)); 188 memset(&args, 0, sizeof(args));
189 189
190 args.ucCRTC = radeon_crtc->crtc_id; 190 args.ucCRTC = radeon_crtc->crtc_id;
191 args.ucEnable = state; 191 args.ucEnable = state;
192 192
193 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 193 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
194 } 194 }
195 195
196 static void atombios_enable_crtc_memreq(struct drm_crtc *crtc, int state) 196 static void atombios_enable_crtc_memreq(struct drm_crtc *crtc, int state)
197 { 197 {
198 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 198 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
199 struct drm_device *dev = crtc->dev; 199 struct drm_device *dev = crtc->dev;
200 struct radeon_device *rdev = dev->dev_private; 200 struct radeon_device *rdev = dev->dev_private;
201 int index = GetIndexIntoMasterTable(COMMAND, EnableCRTCMemReq); 201 int index = GetIndexIntoMasterTable(COMMAND, EnableCRTCMemReq);
202 ENABLE_CRTC_PS_ALLOCATION args; 202 ENABLE_CRTC_PS_ALLOCATION args;
203 203
204 memset(&args, 0, sizeof(args)); 204 memset(&args, 0, sizeof(args));
205 205
206 args.ucCRTC = radeon_crtc->crtc_id; 206 args.ucCRTC = radeon_crtc->crtc_id;
207 args.ucEnable = state; 207 args.ucEnable = state;
208 208
209 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 209 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
210 } 210 }
211 211
212 static const u32 vga_control_regs[6] = 212 static const u32 vga_control_regs[6] =
213 { 213 {
214 AVIVO_D1VGA_CONTROL, 214 AVIVO_D1VGA_CONTROL,
215 AVIVO_D2VGA_CONTROL, 215 AVIVO_D2VGA_CONTROL,
216 EVERGREEN_D3VGA_CONTROL, 216 EVERGREEN_D3VGA_CONTROL,
217 EVERGREEN_D4VGA_CONTROL, 217 EVERGREEN_D4VGA_CONTROL,
218 EVERGREEN_D5VGA_CONTROL, 218 EVERGREEN_D5VGA_CONTROL,
219 EVERGREEN_D6VGA_CONTROL, 219 EVERGREEN_D6VGA_CONTROL,
220 }; 220 };
221 221
222 static void atombios_blank_crtc(struct drm_crtc *crtc, int state) 222 static void atombios_blank_crtc(struct drm_crtc *crtc, int state)
223 { 223 {
224 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 224 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
225 struct drm_device *dev = crtc->dev; 225 struct drm_device *dev = crtc->dev;
226 struct radeon_device *rdev = dev->dev_private; 226 struct radeon_device *rdev = dev->dev_private;
227 int index = GetIndexIntoMasterTable(COMMAND, BlankCRTC); 227 int index = GetIndexIntoMasterTable(COMMAND, BlankCRTC);
228 BLANK_CRTC_PS_ALLOCATION args; 228 BLANK_CRTC_PS_ALLOCATION args;
229 u32 vga_control = 0; 229 u32 vga_control = 0;
230 230
231 memset(&args, 0, sizeof(args)); 231 memset(&args, 0, sizeof(args));
232 232
233 if (ASIC_IS_DCE8(rdev)) { 233 if (ASIC_IS_DCE8(rdev)) {
234 vga_control = RREG32(vga_control_regs[radeon_crtc->crtc_id]); 234 vga_control = RREG32(vga_control_regs[radeon_crtc->crtc_id]);
235 WREG32(vga_control_regs[radeon_crtc->crtc_id], vga_control | 1); 235 WREG32(vga_control_regs[radeon_crtc->crtc_id], vga_control | 1);
236 } 236 }
237 237
238 args.ucCRTC = radeon_crtc->crtc_id; 238 args.ucCRTC = radeon_crtc->crtc_id;
239 args.ucBlanking = state; 239 args.ucBlanking = state;
240 240
241 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 241 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
242 242
243 if (ASIC_IS_DCE8(rdev)) { 243 if (ASIC_IS_DCE8(rdev)) {
244 WREG32(vga_control_regs[radeon_crtc->crtc_id], vga_control); 244 WREG32(vga_control_regs[radeon_crtc->crtc_id], vga_control);
245 } 245 }
246 } 246 }
247 247
248 static void atombios_powergate_crtc(struct drm_crtc *crtc, int state) 248 static void atombios_powergate_crtc(struct drm_crtc *crtc, int state)
249 { 249 {
250 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 250 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
251 struct drm_device *dev = crtc->dev; 251 struct drm_device *dev = crtc->dev;
252 struct radeon_device *rdev = dev->dev_private; 252 struct radeon_device *rdev = dev->dev_private;
253 int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); 253 int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
254 ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args; 254 ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args;
255 255
256 memset(&args, 0, sizeof(args)); 256 memset(&args, 0, sizeof(args));
257 257
258 args.ucDispPipeId = radeon_crtc->crtc_id; 258 args.ucDispPipeId = radeon_crtc->crtc_id;
259 args.ucEnable = state; 259 args.ucEnable = state;
260 260
261 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 261 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
262 } 262 }
263 263
264 void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) 264 void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
265 { 265 {
266 struct drm_device *dev = crtc->dev; 266 struct drm_device *dev = crtc->dev;
267 struct radeon_device *rdev = dev->dev_private; 267 struct radeon_device *rdev = dev->dev_private;
268 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 268 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
269 269
270 switch (mode) { 270 switch (mode) {
271 case DRM_MODE_DPMS_ON: 271 case DRM_MODE_DPMS_ON:
272 radeon_crtc->enabled = true; 272 radeon_crtc->enabled = true;
273 /* adjust pm to dpms changes BEFORE enabling crtcs */
274 radeon_pm_compute_clocks(rdev);
275 atombios_enable_crtc(crtc, ATOM_ENABLE); 273 atombios_enable_crtc(crtc, ATOM_ENABLE);
276 if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev)) 274 if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
277 atombios_enable_crtc_memreq(crtc, ATOM_ENABLE); 275 atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
278 atombios_blank_crtc(crtc, ATOM_DISABLE); 276 atombios_blank_crtc(crtc, ATOM_DISABLE);
279 drm_vblank_post_modeset(dev, radeon_crtc->crtc_id); 277 drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
280 radeon_crtc_load_lut(crtc); 278 radeon_crtc_load_lut(crtc);
281 break; 279 break;
282 case DRM_MODE_DPMS_STANDBY: 280 case DRM_MODE_DPMS_STANDBY:
283 case DRM_MODE_DPMS_SUSPEND: 281 case DRM_MODE_DPMS_SUSPEND:
284 case DRM_MODE_DPMS_OFF: 282 case DRM_MODE_DPMS_OFF:
285 drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); 283 drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
286 if (radeon_crtc->enabled) 284 if (radeon_crtc->enabled)
287 atombios_blank_crtc(crtc, ATOM_ENABLE); 285 atombios_blank_crtc(crtc, ATOM_ENABLE);
288 if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev)) 286 if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
289 atombios_enable_crtc_memreq(crtc, ATOM_DISABLE); 287 atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
290 atombios_enable_crtc(crtc, ATOM_DISABLE); 288 atombios_enable_crtc(crtc, ATOM_DISABLE);
291 radeon_crtc->enabled = false; 289 radeon_crtc->enabled = false;
292 /* adjust pm to dpms changes AFTER disabling crtcs */
293 radeon_pm_compute_clocks(rdev);
294 break; 290 break;
295 } 291 }
292 /* adjust pm to dpms */
293 radeon_pm_compute_clocks(rdev);
296 } 294 }
297 295
298 static void 296 static void
299 atombios_set_crtc_dtd_timing(struct drm_crtc *crtc, 297 atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
300 struct drm_display_mode *mode) 298 struct drm_display_mode *mode)
301 { 299 {
302 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 300 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
303 struct drm_device *dev = crtc->dev; 301 struct drm_device *dev = crtc->dev;
304 struct radeon_device *rdev = dev->dev_private; 302 struct radeon_device *rdev = dev->dev_private;
305 SET_CRTC_USING_DTD_TIMING_PARAMETERS args; 303 SET_CRTC_USING_DTD_TIMING_PARAMETERS args;
306 int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_UsingDTDTiming); 304 int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_UsingDTDTiming);
307 u16 misc = 0; 305 u16 misc = 0;
308 306
309 memset(&args, 0, sizeof(args)); 307 memset(&args, 0, sizeof(args));
310 args.usH_Size = cpu_to_le16(mode->crtc_hdisplay - (radeon_crtc->h_border * 2)); 308 args.usH_Size = cpu_to_le16(mode->crtc_hdisplay - (radeon_crtc->h_border * 2));
311 args.usH_Blanking_Time = 309 args.usH_Blanking_Time =
312 cpu_to_le16(mode->crtc_hblank_end - mode->crtc_hdisplay + (radeon_crtc->h_border * 2)); 310 cpu_to_le16(mode->crtc_hblank_end - mode->crtc_hdisplay + (radeon_crtc->h_border * 2));
313 args.usV_Size = cpu_to_le16(mode->crtc_vdisplay - (radeon_crtc->v_border * 2)); 311 args.usV_Size = cpu_to_le16(mode->crtc_vdisplay - (radeon_crtc->v_border * 2));
314 args.usV_Blanking_Time = 312 args.usV_Blanking_Time =
315 cpu_to_le16(mode->crtc_vblank_end - mode->crtc_vdisplay + (radeon_crtc->v_border * 2)); 313 cpu_to_le16(mode->crtc_vblank_end - mode->crtc_vdisplay + (radeon_crtc->v_border * 2));
316 args.usH_SyncOffset = 314 args.usH_SyncOffset =
317 cpu_to_le16(mode->crtc_hsync_start - mode->crtc_hdisplay + radeon_crtc->h_border); 315 cpu_to_le16(mode->crtc_hsync_start - mode->crtc_hdisplay + radeon_crtc->h_border);
318 args.usH_SyncWidth = 316 args.usH_SyncWidth =
319 cpu_to_le16(mode->crtc_hsync_end - mode->crtc_hsync_start); 317 cpu_to_le16(mode->crtc_hsync_end - mode->crtc_hsync_start);
320 args.usV_SyncOffset = 318 args.usV_SyncOffset =
321 cpu_to_le16(mode->crtc_vsync_start - mode->crtc_vdisplay + radeon_crtc->v_border); 319 cpu_to_le16(mode->crtc_vsync_start - mode->crtc_vdisplay + radeon_crtc->v_border);
322 args.usV_SyncWidth = 320 args.usV_SyncWidth =
323 cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start); 321 cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start);
324 args.ucH_Border = radeon_crtc->h_border; 322 args.ucH_Border = radeon_crtc->h_border;
325 args.ucV_Border = radeon_crtc->v_border; 323 args.ucV_Border = radeon_crtc->v_border;
326 324
327 if (mode->flags & DRM_MODE_FLAG_NVSYNC) 325 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
328 misc |= ATOM_VSYNC_POLARITY; 326 misc |= ATOM_VSYNC_POLARITY;
329 if (mode->flags & DRM_MODE_FLAG_NHSYNC) 327 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
330 misc |= ATOM_HSYNC_POLARITY; 328 misc |= ATOM_HSYNC_POLARITY;
331 if (mode->flags & DRM_MODE_FLAG_CSYNC) 329 if (mode->flags & DRM_MODE_FLAG_CSYNC)
332 misc |= ATOM_COMPOSITESYNC; 330 misc |= ATOM_COMPOSITESYNC;
333 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 331 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
334 misc |= ATOM_INTERLACE; 332 misc |= ATOM_INTERLACE;
335 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 333 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
336 misc |= ATOM_DOUBLE_CLOCK_MODE; 334 misc |= ATOM_DOUBLE_CLOCK_MODE;
337 335
338 args.susModeMiscInfo.usAccess = cpu_to_le16(misc); 336 args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
339 args.ucCRTC = radeon_crtc->crtc_id; 337 args.ucCRTC = radeon_crtc->crtc_id;
340 338
341 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 339 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
342 } 340 }
343 341
344 static void atombios_crtc_set_timing(struct drm_crtc *crtc, 342 static void atombios_crtc_set_timing(struct drm_crtc *crtc,
345 struct drm_display_mode *mode) 343 struct drm_display_mode *mode)
346 { 344 {
347 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 345 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
348 struct drm_device *dev = crtc->dev; 346 struct drm_device *dev = crtc->dev;
349 struct radeon_device *rdev = dev->dev_private; 347 struct radeon_device *rdev = dev->dev_private;
350 SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION args; 348 SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION args;
351 int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_Timing); 349 int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_Timing);
352 u16 misc = 0; 350 u16 misc = 0;
353 351
354 memset(&args, 0, sizeof(args)); 352 memset(&args, 0, sizeof(args));
355 args.usH_Total = cpu_to_le16(mode->crtc_htotal); 353 args.usH_Total = cpu_to_le16(mode->crtc_htotal);
356 args.usH_Disp = cpu_to_le16(mode->crtc_hdisplay); 354 args.usH_Disp = cpu_to_le16(mode->crtc_hdisplay);
357 args.usH_SyncStart = cpu_to_le16(mode->crtc_hsync_start); 355 args.usH_SyncStart = cpu_to_le16(mode->crtc_hsync_start);
358 args.usH_SyncWidth = 356 args.usH_SyncWidth =
359 cpu_to_le16(mode->crtc_hsync_end - mode->crtc_hsync_start); 357 cpu_to_le16(mode->crtc_hsync_end - mode->crtc_hsync_start);
360 args.usV_Total = cpu_to_le16(mode->crtc_vtotal); 358 args.usV_Total = cpu_to_le16(mode->crtc_vtotal);
361 args.usV_Disp = cpu_to_le16(mode->crtc_vdisplay); 359 args.usV_Disp = cpu_to_le16(mode->crtc_vdisplay);
362 args.usV_SyncStart = cpu_to_le16(mode->crtc_vsync_start); 360 args.usV_SyncStart = cpu_to_le16(mode->crtc_vsync_start);
363 args.usV_SyncWidth = 361 args.usV_SyncWidth =
364 cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start); 362 cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start);
365 363
366 args.ucOverscanRight = radeon_crtc->h_border; 364 args.ucOverscanRight = radeon_crtc->h_border;
367 args.ucOverscanLeft = radeon_crtc->h_border; 365 args.ucOverscanLeft = radeon_crtc->h_border;
368 args.ucOverscanBottom = radeon_crtc->v_border; 366 args.ucOverscanBottom = radeon_crtc->v_border;
369 args.ucOverscanTop = radeon_crtc->v_border; 367 args.ucOverscanTop = radeon_crtc->v_border;
370 368
371 if (mode->flags & DRM_MODE_FLAG_NVSYNC) 369 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
372 misc |= ATOM_VSYNC_POLARITY; 370 misc |= ATOM_VSYNC_POLARITY;
373 if (mode->flags & DRM_MODE_FLAG_NHSYNC) 371 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
374 misc |= ATOM_HSYNC_POLARITY; 372 misc |= ATOM_HSYNC_POLARITY;
375 if (mode->flags & DRM_MODE_FLAG_CSYNC) 373 if (mode->flags & DRM_MODE_FLAG_CSYNC)
376 misc |= ATOM_COMPOSITESYNC; 374 misc |= ATOM_COMPOSITESYNC;
377 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 375 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
378 misc |= ATOM_INTERLACE; 376 misc |= ATOM_INTERLACE;
379 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 377 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
380 misc |= ATOM_DOUBLE_CLOCK_MODE; 378 misc |= ATOM_DOUBLE_CLOCK_MODE;
381 379
382 args.susModeMiscInfo.usAccess = cpu_to_le16(misc); 380 args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
383 args.ucCRTC = radeon_crtc->crtc_id; 381 args.ucCRTC = radeon_crtc->crtc_id;
384 382
385 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 383 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
386 } 384 }
387 385
388 static void atombios_disable_ss(struct radeon_device *rdev, int pll_id) 386 static void atombios_disable_ss(struct radeon_device *rdev, int pll_id)
389 { 387 {
390 u32 ss_cntl; 388 u32 ss_cntl;
391 389
392 if (ASIC_IS_DCE4(rdev)) { 390 if (ASIC_IS_DCE4(rdev)) {
393 switch (pll_id) { 391 switch (pll_id) {
394 case ATOM_PPLL1: 392 case ATOM_PPLL1:
395 ss_cntl = RREG32(EVERGREEN_P1PLL_SS_CNTL); 393 ss_cntl = RREG32(EVERGREEN_P1PLL_SS_CNTL);
396 ss_cntl &= ~EVERGREEN_PxPLL_SS_EN; 394 ss_cntl &= ~EVERGREEN_PxPLL_SS_EN;
397 WREG32(EVERGREEN_P1PLL_SS_CNTL, ss_cntl); 395 WREG32(EVERGREEN_P1PLL_SS_CNTL, ss_cntl);
398 break; 396 break;
399 case ATOM_PPLL2: 397 case ATOM_PPLL2:
400 ss_cntl = RREG32(EVERGREEN_P2PLL_SS_CNTL); 398 ss_cntl = RREG32(EVERGREEN_P2PLL_SS_CNTL);
401 ss_cntl &= ~EVERGREEN_PxPLL_SS_EN; 399 ss_cntl &= ~EVERGREEN_PxPLL_SS_EN;
402 WREG32(EVERGREEN_P2PLL_SS_CNTL, ss_cntl); 400 WREG32(EVERGREEN_P2PLL_SS_CNTL, ss_cntl);
403 break; 401 break;
404 case ATOM_DCPLL: 402 case ATOM_DCPLL:
405 case ATOM_PPLL_INVALID: 403 case ATOM_PPLL_INVALID:
406 return; 404 return;
407 } 405 }
408 } else if (ASIC_IS_AVIVO(rdev)) { 406 } else if (ASIC_IS_AVIVO(rdev)) {
409 switch (pll_id) { 407 switch (pll_id) {
410 case ATOM_PPLL1: 408 case ATOM_PPLL1:
411 ss_cntl = RREG32(AVIVO_P1PLL_INT_SS_CNTL); 409 ss_cntl = RREG32(AVIVO_P1PLL_INT_SS_CNTL);
412 ss_cntl &= ~1; 410 ss_cntl &= ~1;
413 WREG32(AVIVO_P1PLL_INT_SS_CNTL, ss_cntl); 411 WREG32(AVIVO_P1PLL_INT_SS_CNTL, ss_cntl);
414 break; 412 break;
415 case ATOM_PPLL2: 413 case ATOM_PPLL2:
416 ss_cntl = RREG32(AVIVO_P2PLL_INT_SS_CNTL); 414 ss_cntl = RREG32(AVIVO_P2PLL_INT_SS_CNTL);
417 ss_cntl &= ~1; 415 ss_cntl &= ~1;
418 WREG32(AVIVO_P2PLL_INT_SS_CNTL, ss_cntl); 416 WREG32(AVIVO_P2PLL_INT_SS_CNTL, ss_cntl);
419 break; 417 break;
420 case ATOM_DCPLL: 418 case ATOM_DCPLL:
421 case ATOM_PPLL_INVALID: 419 case ATOM_PPLL_INVALID:
422 return; 420 return;
423 } 421 }
424 } 422 }
425 } 423 }
426 424
427 425
428 union atom_enable_ss { 426 union atom_enable_ss {
429 ENABLE_LVDS_SS_PARAMETERS lvds_ss; 427 ENABLE_LVDS_SS_PARAMETERS lvds_ss;
430 ENABLE_LVDS_SS_PARAMETERS_V2 lvds_ss_2; 428 ENABLE_LVDS_SS_PARAMETERS_V2 lvds_ss_2;
431 ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1; 429 ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1;
432 ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 v2; 430 ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 v2;
433 ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 v3; 431 ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 v3;
434 }; 432 };
435 433
436 static void atombios_crtc_program_ss(struct radeon_device *rdev, 434 static void atombios_crtc_program_ss(struct radeon_device *rdev,
437 int enable, 435 int enable,
438 int pll_id, 436 int pll_id,
439 int crtc_id, 437 int crtc_id,
440 struct radeon_atom_ss *ss) 438 struct radeon_atom_ss *ss)
441 { 439 {
442 unsigned i; 440 unsigned i;
443 int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL); 441 int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
444 union atom_enable_ss args; 442 union atom_enable_ss args;
445 443
446 if (enable) { 444 if (enable) {
447 /* Don't mess with SS if percentage is 0 or external ss. 445 /* Don't mess with SS if percentage is 0 or external ss.
448 * SS is already disabled previously, and disabling it 446 * SS is already disabled previously, and disabling it
449 * again can cause display problems if the pll is already 447 * again can cause display problems if the pll is already
450 * programmed. 448 * programmed.
451 */ 449 */
452 if (ss->percentage == 0) 450 if (ss->percentage == 0)
453 return; 451 return;
454 if (ss->type & ATOM_EXTERNAL_SS_MASK) 452 if (ss->type & ATOM_EXTERNAL_SS_MASK)
455 return; 453 return;
456 } else { 454 } else {
457 for (i = 0; i < rdev->num_crtc; i++) { 455 for (i = 0; i < rdev->num_crtc; i++) {
458 if (rdev->mode_info.crtcs[i] && 456 if (rdev->mode_info.crtcs[i] &&
459 rdev->mode_info.crtcs[i]->enabled && 457 rdev->mode_info.crtcs[i]->enabled &&
460 i != crtc_id && 458 i != crtc_id &&
461 pll_id == rdev->mode_info.crtcs[i]->pll_id) { 459 pll_id == rdev->mode_info.crtcs[i]->pll_id) {
462 /* one other crtc is using this pll don't turn 460 /* one other crtc is using this pll don't turn
463 * off spread spectrum as it might turn off 461 * off spread spectrum as it might turn off
464 * display on active crtc 462 * display on active crtc
465 */ 463 */
466 return; 464 return;
467 } 465 }
468 } 466 }
469 } 467 }
470 468
471 memset(&args, 0, sizeof(args)); 469 memset(&args, 0, sizeof(args));
472 470
473 if (ASIC_IS_DCE5(rdev)) { 471 if (ASIC_IS_DCE5(rdev)) {
474 args.v3.usSpreadSpectrumAmountFrac = cpu_to_le16(0); 472 args.v3.usSpreadSpectrumAmountFrac = cpu_to_le16(0);
475 args.v3.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK; 473 args.v3.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
476 switch (pll_id) { 474 switch (pll_id) {
477 case ATOM_PPLL1: 475 case ATOM_PPLL1:
478 args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL; 476 args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL;
479 break; 477 break;
480 case ATOM_PPLL2: 478 case ATOM_PPLL2:
481 args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL; 479 args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL;
482 break; 480 break;
483 case ATOM_DCPLL: 481 case ATOM_DCPLL:
484 args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL; 482 args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL;
485 break; 483 break;
486 case ATOM_PPLL_INVALID: 484 case ATOM_PPLL_INVALID:
487 return; 485 return;
488 } 486 }
489 args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount); 487 args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
490 args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step); 488 args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
491 args.v3.ucEnable = enable; 489 args.v3.ucEnable = enable;
492 } else if (ASIC_IS_DCE4(rdev)) { 490 } else if (ASIC_IS_DCE4(rdev)) {
493 args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); 491 args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
494 args.v2.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK; 492 args.v2.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
495 switch (pll_id) { 493 switch (pll_id) {
496 case ATOM_PPLL1: 494 case ATOM_PPLL1:
497 args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL; 495 args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL;
498 break; 496 break;
499 case ATOM_PPLL2: 497 case ATOM_PPLL2:
500 args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P2PLL; 498 args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P2PLL;
501 break; 499 break;
502 case ATOM_DCPLL: 500 case ATOM_DCPLL:
503 args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_DCPLL; 501 args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_DCPLL;
504 break; 502 break;
505 case ATOM_PPLL_INVALID: 503 case ATOM_PPLL_INVALID:
506 return; 504 return;
507 } 505 }
508 args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount); 506 args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
509 args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step); 507 args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
510 args.v2.ucEnable = enable; 508 args.v2.ucEnable = enable;
511 } else if (ASIC_IS_DCE3(rdev)) { 509 } else if (ASIC_IS_DCE3(rdev)) {
512 args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); 510 args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
513 args.v1.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK; 511 args.v1.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
514 args.v1.ucSpreadSpectrumStep = ss->step; 512 args.v1.ucSpreadSpectrumStep = ss->step;
515 args.v1.ucSpreadSpectrumDelay = ss->delay; 513 args.v1.ucSpreadSpectrumDelay = ss->delay;
516 args.v1.ucSpreadSpectrumRange = ss->range; 514 args.v1.ucSpreadSpectrumRange = ss->range;
517 args.v1.ucPpll = pll_id; 515 args.v1.ucPpll = pll_id;
518 args.v1.ucEnable = enable; 516 args.v1.ucEnable = enable;
519 } else if (ASIC_IS_AVIVO(rdev)) { 517 } else if (ASIC_IS_AVIVO(rdev)) {
520 if ((enable == ATOM_DISABLE) || (ss->percentage == 0) || 518 if ((enable == ATOM_DISABLE) || (ss->percentage == 0) ||
521 (ss->type & ATOM_EXTERNAL_SS_MASK)) { 519 (ss->type & ATOM_EXTERNAL_SS_MASK)) {
522 atombios_disable_ss(rdev, pll_id); 520 atombios_disable_ss(rdev, pll_id);
523 return; 521 return;
524 } 522 }
525 args.lvds_ss_2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); 523 args.lvds_ss_2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
526 args.lvds_ss_2.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK; 524 args.lvds_ss_2.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
527 args.lvds_ss_2.ucSpreadSpectrumStep = ss->step; 525 args.lvds_ss_2.ucSpreadSpectrumStep = ss->step;
528 args.lvds_ss_2.ucSpreadSpectrumDelay = ss->delay; 526 args.lvds_ss_2.ucSpreadSpectrumDelay = ss->delay;
529 args.lvds_ss_2.ucSpreadSpectrumRange = ss->range; 527 args.lvds_ss_2.ucSpreadSpectrumRange = ss->range;
530 args.lvds_ss_2.ucEnable = enable; 528 args.lvds_ss_2.ucEnable = enable;
531 } else { 529 } else {
532 if (enable == ATOM_DISABLE) { 530 if (enable == ATOM_DISABLE) {
533 atombios_disable_ss(rdev, pll_id); 531 atombios_disable_ss(rdev, pll_id);
534 return; 532 return;
535 } 533 }
536 args.lvds_ss.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); 534 args.lvds_ss.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
537 args.lvds_ss.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK; 535 args.lvds_ss.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
538 args.lvds_ss.ucSpreadSpectrumStepSize_Delay = (ss->step & 3) << 2; 536 args.lvds_ss.ucSpreadSpectrumStepSize_Delay = (ss->step & 3) << 2;
539 args.lvds_ss.ucSpreadSpectrumStepSize_Delay |= (ss->delay & 7) << 4; 537 args.lvds_ss.ucSpreadSpectrumStepSize_Delay |= (ss->delay & 7) << 4;
540 args.lvds_ss.ucEnable = enable; 538 args.lvds_ss.ucEnable = enable;
541 } 539 }
542 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 540 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
543 } 541 }
544 542
545 union adjust_pixel_clock { 543 union adjust_pixel_clock {
546 ADJUST_DISPLAY_PLL_PS_ALLOCATION v1; 544 ADJUST_DISPLAY_PLL_PS_ALLOCATION v1;
547 ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3 v3; 545 ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3 v3;
548 }; 546 };
549 547
550 static u32 atombios_adjust_pll(struct drm_crtc *crtc, 548 static u32 atombios_adjust_pll(struct drm_crtc *crtc,
551 struct drm_display_mode *mode) 549 struct drm_display_mode *mode)
552 { 550 {
553 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 551 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
554 struct drm_device *dev = crtc->dev; 552 struct drm_device *dev = crtc->dev;
555 struct radeon_device *rdev = dev->dev_private; 553 struct radeon_device *rdev = dev->dev_private;
556 struct drm_encoder *encoder = radeon_crtc->encoder; 554 struct drm_encoder *encoder = radeon_crtc->encoder;
557 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 555 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
558 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 556 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
559 u32 adjusted_clock = mode->clock; 557 u32 adjusted_clock = mode->clock;
560 int encoder_mode = atombios_get_encoder_mode(encoder); 558 int encoder_mode = atombios_get_encoder_mode(encoder);
561 u32 dp_clock = mode->clock; 559 u32 dp_clock = mode->clock;
562 int bpc = radeon_crtc->bpc; 560 int bpc = radeon_crtc->bpc;
563 bool is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock); 561 bool is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock);
564 562
565 /* reset the pll flags */ 563 /* reset the pll flags */
566 radeon_crtc->pll_flags = 0; 564 radeon_crtc->pll_flags = 0;
567 565
568 if (ASIC_IS_AVIVO(rdev)) { 566 if (ASIC_IS_AVIVO(rdev)) {
569 if ((rdev->family == CHIP_RS600) || 567 if ((rdev->family == CHIP_RS600) ||
570 (rdev->family == CHIP_RS690) || 568 (rdev->family == CHIP_RS690) ||
571 (rdev->family == CHIP_RS740)) 569 (rdev->family == CHIP_RS740))
572 radeon_crtc->pll_flags |= (/*RADEON_PLL_USE_FRAC_FB_DIV |*/ 570 radeon_crtc->pll_flags |= (/*RADEON_PLL_USE_FRAC_FB_DIV |*/
573 RADEON_PLL_PREFER_CLOSEST_LOWER); 571 RADEON_PLL_PREFER_CLOSEST_LOWER);
574 572
575 if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */ 573 if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */
576 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; 574 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
577 else 575 else
578 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; 576 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
579 577
580 if (rdev->family < CHIP_RV770) 578 if (rdev->family < CHIP_RV770)
581 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; 579 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
582 /* use frac fb div on APUs */ 580 /* use frac fb div on APUs */
583 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev) || ASIC_IS_DCE8(rdev)) 581 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev) || ASIC_IS_DCE8(rdev))
584 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; 582 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
585 /* use frac fb div on RS780/RS880 */ 583 /* use frac fb div on RS780/RS880 */
586 if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) 584 if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
587 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; 585 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
588 if (ASIC_IS_DCE32(rdev) && mode->clock > 165000) 586 if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
589 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; 587 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
590 } else { 588 } else {
591 radeon_crtc->pll_flags |= RADEON_PLL_LEGACY; 589 radeon_crtc->pll_flags |= RADEON_PLL_LEGACY;
592 590
593 if (mode->clock > 200000) /* range limits??? */ 591 if (mode->clock > 200000) /* range limits??? */
594 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; 592 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
595 else 593 else
596 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; 594 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
597 } 595 }
598 596
599 if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) || 597 if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
600 (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) { 598 (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) {
601 if (connector) { 599 if (connector) {
602 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 600 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
603 struct radeon_connector_atom_dig *dig_connector = 601 struct radeon_connector_atom_dig *dig_connector =
604 radeon_connector->con_priv; 602 radeon_connector->con_priv;
605 603
606 dp_clock = dig_connector->dp_clock; 604 dp_clock = dig_connector->dp_clock;
607 } 605 }
608 } 606 }
609 607
610 /* use recommended ref_div for ss */ 608 /* use recommended ref_div for ss */
611 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 609 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
612 if (radeon_crtc->ss_enabled) { 610 if (radeon_crtc->ss_enabled) {
613 if (radeon_crtc->ss.refdiv) { 611 if (radeon_crtc->ss.refdiv) {
614 radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV; 612 radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
615 radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv; 613 radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv;
616 if (ASIC_IS_AVIVO(rdev)) 614 if (ASIC_IS_AVIVO(rdev))
617 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; 615 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
618 } 616 }
619 } 617 }
620 } 618 }
621 619
622 if (ASIC_IS_AVIVO(rdev)) { 620 if (ASIC_IS_AVIVO(rdev)) {
623 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ 621 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
624 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) 622 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
625 adjusted_clock = mode->clock * 2; 623 adjusted_clock = mode->clock * 2;
626 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) 624 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
627 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; 625 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
628 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 626 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
629 radeon_crtc->pll_flags |= RADEON_PLL_IS_LCD; 627 radeon_crtc->pll_flags |= RADEON_PLL_IS_LCD;
630 } else { 628 } else {
631 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) 629 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
632 radeon_crtc->pll_flags |= RADEON_PLL_NO_ODD_POST_DIV; 630 radeon_crtc->pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
633 if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) 631 if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS)
634 radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV; 632 radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
635 } 633 }
636 634
637 /* DCE3+ has an AdjustDisplayPll that will adjust the pixel clock 635 /* DCE3+ has an AdjustDisplayPll that will adjust the pixel clock
638 * accordingly based on the encoder/transmitter to work around 636 * accordingly based on the encoder/transmitter to work around
639 * special hw requirements. 637 * special hw requirements.
640 */ 638 */
641 if (ASIC_IS_DCE3(rdev)) { 639 if (ASIC_IS_DCE3(rdev)) {
642 union adjust_pixel_clock args; 640 union adjust_pixel_clock args;
643 u8 frev, crev; 641 u8 frev, crev;
644 int index; 642 int index;
645 643
646 index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll); 644 index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll);
647 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, 645 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
648 &crev)) 646 &crev))
649 return adjusted_clock; 647 return adjusted_clock;
650 648
651 memset(&args, 0, sizeof(args)); 649 memset(&args, 0, sizeof(args));
652 650
653 switch (frev) { 651 switch (frev) {
654 case 1: 652 case 1:
655 switch (crev) { 653 switch (crev) {
656 case 1: 654 case 1:
657 case 2: 655 case 2:
658 args.v1.usPixelClock = cpu_to_le16(mode->clock / 10); 656 args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
659 args.v1.ucTransmitterID = radeon_encoder->encoder_id; 657 args.v1.ucTransmitterID = radeon_encoder->encoder_id;
660 args.v1.ucEncodeMode = encoder_mode; 658 args.v1.ucEncodeMode = encoder_mode;
661 if (radeon_crtc->ss_enabled && radeon_crtc->ss.percentage) 659 if (radeon_crtc->ss_enabled && radeon_crtc->ss.percentage)
662 args.v1.ucConfig |= 660 args.v1.ucConfig |=
663 ADJUST_DISPLAY_CONFIG_SS_ENABLE; 661 ADJUST_DISPLAY_CONFIG_SS_ENABLE;
664 662
665 atom_execute_table(rdev->mode_info.atom_context, 663 atom_execute_table(rdev->mode_info.atom_context,
666 index, (uint32_t *)&args); 664 index, (uint32_t *)&args);
667 adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10; 665 adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10;
668 break; 666 break;
669 case 3: 667 case 3:
670 args.v3.sInput.usPixelClock = cpu_to_le16(mode->clock / 10); 668 args.v3.sInput.usPixelClock = cpu_to_le16(mode->clock / 10);
671 args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id; 669 args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id;
672 args.v3.sInput.ucEncodeMode = encoder_mode; 670 args.v3.sInput.ucEncodeMode = encoder_mode;
673 args.v3.sInput.ucDispPllConfig = 0; 671 args.v3.sInput.ucDispPllConfig = 0;
674 if (radeon_crtc->ss_enabled && radeon_crtc->ss.percentage) 672 if (radeon_crtc->ss_enabled && radeon_crtc->ss.percentage)
675 args.v3.sInput.ucDispPllConfig |= 673 args.v3.sInput.ucDispPllConfig |=
676 DISPPLL_CONFIG_SS_ENABLE; 674 DISPPLL_CONFIG_SS_ENABLE;
677 if (ENCODER_MODE_IS_DP(encoder_mode)) { 675 if (ENCODER_MODE_IS_DP(encoder_mode)) {
678 args.v3.sInput.ucDispPllConfig |= 676 args.v3.sInput.ucDispPllConfig |=
679 DISPPLL_CONFIG_COHERENT_MODE; 677 DISPPLL_CONFIG_COHERENT_MODE;
680 /* 16200 or 27000 */ 678 /* 16200 or 27000 */
681 args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10); 679 args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
682 } else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { 680 } else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
683 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 681 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
684 if (encoder_mode == ATOM_ENCODER_MODE_HDMI) 682 if (encoder_mode == ATOM_ENCODER_MODE_HDMI)
685 /* deep color support */ 683 /* deep color support */
686 args.v3.sInput.usPixelClock = 684 args.v3.sInput.usPixelClock =
687 cpu_to_le16((mode->clock * bpc / 8) / 10); 685 cpu_to_le16((mode->clock * bpc / 8) / 10);
688 if (dig->coherent_mode) 686 if (dig->coherent_mode)
689 args.v3.sInput.ucDispPllConfig |= 687 args.v3.sInput.ucDispPllConfig |=
690 DISPPLL_CONFIG_COHERENT_MODE; 688 DISPPLL_CONFIG_COHERENT_MODE;
691 if (is_duallink) 689 if (is_duallink)
692 args.v3.sInput.ucDispPllConfig |= 690 args.v3.sInput.ucDispPllConfig |=
693 DISPPLL_CONFIG_DUAL_LINK; 691 DISPPLL_CONFIG_DUAL_LINK;
694 } 692 }
695 if (radeon_encoder_get_dp_bridge_encoder_id(encoder) != 693 if (radeon_encoder_get_dp_bridge_encoder_id(encoder) !=
696 ENCODER_OBJECT_ID_NONE) 694 ENCODER_OBJECT_ID_NONE)
697 args.v3.sInput.ucExtTransmitterID = 695 args.v3.sInput.ucExtTransmitterID =
698 radeon_encoder_get_dp_bridge_encoder_id(encoder); 696 radeon_encoder_get_dp_bridge_encoder_id(encoder);
699 else 697 else
700 args.v3.sInput.ucExtTransmitterID = 0; 698 args.v3.sInput.ucExtTransmitterID = 0;
701 699
702 atom_execute_table(rdev->mode_info.atom_context, 700 atom_execute_table(rdev->mode_info.atom_context,
703 index, (uint32_t *)&args); 701 index, (uint32_t *)&args);
704 adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10; 702 adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
705 if (args.v3.sOutput.ucRefDiv) { 703 if (args.v3.sOutput.ucRefDiv) {
706 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; 704 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
707 radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV; 705 radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
708 radeon_crtc->pll_reference_div = args.v3.sOutput.ucRefDiv; 706 radeon_crtc->pll_reference_div = args.v3.sOutput.ucRefDiv;
709 } 707 }
710 if (args.v3.sOutput.ucPostDiv) { 708 if (args.v3.sOutput.ucPostDiv) {
711 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; 709 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
712 radeon_crtc->pll_flags |= RADEON_PLL_USE_POST_DIV; 710 radeon_crtc->pll_flags |= RADEON_PLL_USE_POST_DIV;
713 radeon_crtc->pll_post_div = args.v3.sOutput.ucPostDiv; 711 radeon_crtc->pll_post_div = args.v3.sOutput.ucPostDiv;
714 } 712 }
715 break; 713 break;
716 default: 714 default:
717 DRM_ERROR("Unknown table version %d %d\n", frev, crev); 715 DRM_ERROR("Unknown table version %d %d\n", frev, crev);
718 return adjusted_clock; 716 return adjusted_clock;
719 } 717 }
720 break; 718 break;
721 default: 719 default:
722 DRM_ERROR("Unknown table version %d %d\n", frev, crev); 720 DRM_ERROR("Unknown table version %d %d\n", frev, crev);
723 return adjusted_clock; 721 return adjusted_clock;
724 } 722 }
725 } 723 }
726 return adjusted_clock; 724 return adjusted_clock;
727 } 725 }
728 726
729 union set_pixel_clock { 727 union set_pixel_clock {
730 SET_PIXEL_CLOCK_PS_ALLOCATION base; 728 SET_PIXEL_CLOCK_PS_ALLOCATION base;
731 PIXEL_CLOCK_PARAMETERS v1; 729 PIXEL_CLOCK_PARAMETERS v1;
732 PIXEL_CLOCK_PARAMETERS_V2 v2; 730 PIXEL_CLOCK_PARAMETERS_V2 v2;
733 PIXEL_CLOCK_PARAMETERS_V3 v3; 731 PIXEL_CLOCK_PARAMETERS_V3 v3;
734 PIXEL_CLOCK_PARAMETERS_V5 v5; 732 PIXEL_CLOCK_PARAMETERS_V5 v5;
735 PIXEL_CLOCK_PARAMETERS_V6 v6; 733 PIXEL_CLOCK_PARAMETERS_V6 v6;
736 }; 734 };
737 735
738 /* on DCE5, make sure the voltage is high enough to support the 736 /* on DCE5, make sure the voltage is high enough to support the
739 * required disp clk. 737 * required disp clk.
740 */ 738 */
741 static void atombios_crtc_set_disp_eng_pll(struct radeon_device *rdev, 739 static void atombios_crtc_set_disp_eng_pll(struct radeon_device *rdev,
742 u32 dispclk) 740 u32 dispclk)
743 { 741 {
744 u8 frev, crev; 742 u8 frev, crev;
745 int index; 743 int index;
746 union set_pixel_clock args; 744 union set_pixel_clock args;
747 745
748 memset(&args, 0, sizeof(args)); 746 memset(&args, 0, sizeof(args));
749 747
750 index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); 748 index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
751 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, 749 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
752 &crev)) 750 &crev))
753 return; 751 return;
754 752
755 switch (frev) { 753 switch (frev) {
756 case 1: 754 case 1:
757 switch (crev) { 755 switch (crev) {
758 case 5: 756 case 5:
759 /* if the default dcpll clock is specified, 757 /* if the default dcpll clock is specified,
760 * SetPixelClock provides the dividers 758 * SetPixelClock provides the dividers
761 */ 759 */
762 args.v5.ucCRTC = ATOM_CRTC_INVALID; 760 args.v5.ucCRTC = ATOM_CRTC_INVALID;
763 args.v5.usPixelClock = cpu_to_le16(dispclk); 761 args.v5.usPixelClock = cpu_to_le16(dispclk);
764 args.v5.ucPpll = ATOM_DCPLL; 762 args.v5.ucPpll = ATOM_DCPLL;
765 break; 763 break;
766 case 6: 764 case 6:
767 /* if the default dcpll clock is specified, 765 /* if the default dcpll clock is specified,
768 * SetPixelClock provides the dividers 766 * SetPixelClock provides the dividers
769 */ 767 */
770 args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk); 768 args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk);
771 if (ASIC_IS_DCE61(rdev) || ASIC_IS_DCE8(rdev)) 769 if (ASIC_IS_DCE61(rdev) || ASIC_IS_DCE8(rdev))
772 args.v6.ucPpll = ATOM_EXT_PLL1; 770 args.v6.ucPpll = ATOM_EXT_PLL1;
773 else if (ASIC_IS_DCE6(rdev)) 771 else if (ASIC_IS_DCE6(rdev))
774 args.v6.ucPpll = ATOM_PPLL0; 772 args.v6.ucPpll = ATOM_PPLL0;
775 else 773 else
776 args.v6.ucPpll = ATOM_DCPLL; 774 args.v6.ucPpll = ATOM_DCPLL;
777 break; 775 break;
778 default: 776 default:
779 DRM_ERROR("Unknown table version %d %d\n", frev, crev); 777 DRM_ERROR("Unknown table version %d %d\n", frev, crev);
780 return; 778 return;
781 } 779 }
782 break; 780 break;
783 default: 781 default:
784 DRM_ERROR("Unknown table version %d %d\n", frev, crev); 782 DRM_ERROR("Unknown table version %d %d\n", frev, crev);
785 return; 783 return;
786 } 784 }
787 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 785 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
788 } 786 }
789 787
790 static void atombios_crtc_program_pll(struct drm_crtc *crtc, 788 static void atombios_crtc_program_pll(struct drm_crtc *crtc,
791 u32 crtc_id, 789 u32 crtc_id,
792 int pll_id, 790 int pll_id,
793 u32 encoder_mode, 791 u32 encoder_mode,
794 u32 encoder_id, 792 u32 encoder_id,
795 u32 clock, 793 u32 clock,
796 u32 ref_div, 794 u32 ref_div,
797 u32 fb_div, 795 u32 fb_div,
798 u32 frac_fb_div, 796 u32 frac_fb_div,
799 u32 post_div, 797 u32 post_div,
800 int bpc, 798 int bpc,
801 bool ss_enabled, 799 bool ss_enabled,
802 struct radeon_atom_ss *ss) 800 struct radeon_atom_ss *ss)
803 { 801 {
804 struct drm_device *dev = crtc->dev; 802 struct drm_device *dev = crtc->dev;
805 struct radeon_device *rdev = dev->dev_private; 803 struct radeon_device *rdev = dev->dev_private;
806 u8 frev, crev; 804 u8 frev, crev;
807 int index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); 805 int index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
808 union set_pixel_clock args; 806 union set_pixel_clock args;
809 807
810 memset(&args, 0, sizeof(args)); 808 memset(&args, 0, sizeof(args));
811 809
812 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, 810 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
813 &crev)) 811 &crev))
814 return; 812 return;
815 813
816 switch (frev) { 814 switch (frev) {
817 case 1: 815 case 1:
818 switch (crev) { 816 switch (crev) {
819 case 1: 817 case 1:
820 if (clock == ATOM_DISABLE) 818 if (clock == ATOM_DISABLE)
821 return; 819 return;
822 args.v1.usPixelClock = cpu_to_le16(clock / 10); 820 args.v1.usPixelClock = cpu_to_le16(clock / 10);
823 args.v1.usRefDiv = cpu_to_le16(ref_div); 821 args.v1.usRefDiv = cpu_to_le16(ref_div);
824 args.v1.usFbDiv = cpu_to_le16(fb_div); 822 args.v1.usFbDiv = cpu_to_le16(fb_div);
825 args.v1.ucFracFbDiv = frac_fb_div; 823 args.v1.ucFracFbDiv = frac_fb_div;
826 args.v1.ucPostDiv = post_div; 824 args.v1.ucPostDiv = post_div;
827 args.v1.ucPpll = pll_id; 825 args.v1.ucPpll = pll_id;
828 args.v1.ucCRTC = crtc_id; 826 args.v1.ucCRTC = crtc_id;
829 args.v1.ucRefDivSrc = 1; 827 args.v1.ucRefDivSrc = 1;
830 break; 828 break;
831 case 2: 829 case 2:
832 args.v2.usPixelClock = cpu_to_le16(clock / 10); 830 args.v2.usPixelClock = cpu_to_le16(clock / 10);
833 args.v2.usRefDiv = cpu_to_le16(ref_div); 831 args.v2.usRefDiv = cpu_to_le16(ref_div);
834 args.v2.usFbDiv = cpu_to_le16(fb_div); 832 args.v2.usFbDiv = cpu_to_le16(fb_div);
835 args.v2.ucFracFbDiv = frac_fb_div; 833 args.v2.ucFracFbDiv = frac_fb_div;
836 args.v2.ucPostDiv = post_div; 834 args.v2.ucPostDiv = post_div;
837 args.v2.ucPpll = pll_id; 835 args.v2.ucPpll = pll_id;
838 args.v2.ucCRTC = crtc_id; 836 args.v2.ucCRTC = crtc_id;
839 args.v2.ucRefDivSrc = 1; 837 args.v2.ucRefDivSrc = 1;
840 break; 838 break;
841 case 3: 839 case 3:
842 args.v3.usPixelClock = cpu_to_le16(clock / 10); 840 args.v3.usPixelClock = cpu_to_le16(clock / 10);
843 args.v3.usRefDiv = cpu_to_le16(ref_div); 841 args.v3.usRefDiv = cpu_to_le16(ref_div);
844 args.v3.usFbDiv = cpu_to_le16(fb_div); 842 args.v3.usFbDiv = cpu_to_le16(fb_div);
845 args.v3.ucFracFbDiv = frac_fb_div; 843 args.v3.ucFracFbDiv = frac_fb_div;
846 args.v3.ucPostDiv = post_div; 844 args.v3.ucPostDiv = post_div;
847 args.v3.ucPpll = pll_id; 845 args.v3.ucPpll = pll_id;
848 if (crtc_id == ATOM_CRTC2) 846 if (crtc_id == ATOM_CRTC2)
849 args.v3.ucMiscInfo = PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2; 847 args.v3.ucMiscInfo = PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2;
850 else 848 else
851 args.v3.ucMiscInfo = PIXEL_CLOCK_MISC_CRTC_SEL_CRTC1; 849 args.v3.ucMiscInfo = PIXEL_CLOCK_MISC_CRTC_SEL_CRTC1;
852 if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) 850 if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
853 args.v3.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC; 851 args.v3.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC;
854 args.v3.ucTransmitterId = encoder_id; 852 args.v3.ucTransmitterId = encoder_id;
855 args.v3.ucEncoderMode = encoder_mode; 853 args.v3.ucEncoderMode = encoder_mode;
856 break; 854 break;
857 case 5: 855 case 5:
858 args.v5.ucCRTC = crtc_id; 856 args.v5.ucCRTC = crtc_id;
859 args.v5.usPixelClock = cpu_to_le16(clock / 10); 857 args.v5.usPixelClock = cpu_to_le16(clock / 10);
860 args.v5.ucRefDiv = ref_div; 858 args.v5.ucRefDiv = ref_div;
861 args.v5.usFbDiv = cpu_to_le16(fb_div); 859 args.v5.usFbDiv = cpu_to_le16(fb_div);
862 args.v5.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000); 860 args.v5.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
863 args.v5.ucPostDiv = post_div; 861 args.v5.ucPostDiv = post_div;
864 args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */ 862 args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */
865 if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) 863 if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
866 args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_REF_DIV_SRC; 864 args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_REF_DIV_SRC;
867 switch (bpc) { 865 switch (bpc) {
868 case 8: 866 case 8:
869 default: 867 default:
870 args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP; 868 args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP;
871 break; 869 break;
872 case 10: 870 case 10:
873 args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP; 871 args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP;
874 break; 872 break;
875 } 873 }
876 args.v5.ucTransmitterID = encoder_id; 874 args.v5.ucTransmitterID = encoder_id;
877 args.v5.ucEncoderMode = encoder_mode; 875 args.v5.ucEncoderMode = encoder_mode;
878 args.v5.ucPpll = pll_id; 876 args.v5.ucPpll = pll_id;
879 break; 877 break;
880 case 6: 878 case 6:
881 args.v6.ulDispEngClkFreq = cpu_to_le32(crtc_id << 24 | clock / 10); 879 args.v6.ulDispEngClkFreq = cpu_to_le32(crtc_id << 24 | clock / 10);
882 args.v6.ucRefDiv = ref_div; 880 args.v6.ucRefDiv = ref_div;
883 args.v6.usFbDiv = cpu_to_le16(fb_div); 881 args.v6.usFbDiv = cpu_to_le16(fb_div);
884 args.v6.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000); 882 args.v6.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
885 args.v6.ucPostDiv = post_div; 883 args.v6.ucPostDiv = post_div;
886 args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */ 884 args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */
887 if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) 885 if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
888 args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_REF_DIV_SRC; 886 args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_REF_DIV_SRC;
889 switch (bpc) { 887 switch (bpc) {
890 case 8: 888 case 8:
891 default: 889 default:
892 args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP; 890 args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP;
893 break; 891 break;
894 case 10: 892 case 10:
895 args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP; 893 args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP;
896 break; 894 break;
897 case 12: 895 case 12:
898 args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP; 896 args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP;
899 break; 897 break;
900 case 16: 898 case 16:
901 args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP; 899 args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP;
902 break; 900 break;
903 } 901 }
904 args.v6.ucTransmitterID = encoder_id; 902 args.v6.ucTransmitterID = encoder_id;
905 args.v6.ucEncoderMode = encoder_mode; 903 args.v6.ucEncoderMode = encoder_mode;
906 args.v6.ucPpll = pll_id; 904 args.v6.ucPpll = pll_id;
907 break; 905 break;
908 default: 906 default:
909 DRM_ERROR("Unknown table version %d %d\n", frev, crev); 907 DRM_ERROR("Unknown table version %d %d\n", frev, crev);
910 return; 908 return;
911 } 909 }
912 break; 910 break;
913 default: 911 default:
914 DRM_ERROR("Unknown table version %d %d\n", frev, crev); 912 DRM_ERROR("Unknown table version %d %d\n", frev, crev);
915 return; 913 return;
916 } 914 }
917 915
918 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 916 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
919 } 917 }
920 918
921 static bool atombios_crtc_prepare_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) 919 static bool atombios_crtc_prepare_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
922 { 920 {
923 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 921 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
924 struct drm_device *dev = crtc->dev; 922 struct drm_device *dev = crtc->dev;
925 struct radeon_device *rdev = dev->dev_private; 923 struct radeon_device *rdev = dev->dev_private;
926 struct radeon_encoder *radeon_encoder = 924 struct radeon_encoder *radeon_encoder =
927 to_radeon_encoder(radeon_crtc->encoder); 925 to_radeon_encoder(radeon_crtc->encoder);
928 int encoder_mode = atombios_get_encoder_mode(radeon_crtc->encoder); 926 int encoder_mode = atombios_get_encoder_mode(radeon_crtc->encoder);
929 927
930 radeon_crtc->bpc = 8; 928 radeon_crtc->bpc = 8;
931 radeon_crtc->ss_enabled = false; 929 radeon_crtc->ss_enabled = false;
932 930
933 if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) || 931 if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
934 (radeon_encoder_get_dp_bridge_encoder_id(radeon_crtc->encoder) != ENCODER_OBJECT_ID_NONE)) { 932 (radeon_encoder_get_dp_bridge_encoder_id(radeon_crtc->encoder) != ENCODER_OBJECT_ID_NONE)) {
935 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 933 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
936 struct drm_connector *connector = 934 struct drm_connector *connector =
937 radeon_get_connector_for_encoder(radeon_crtc->encoder); 935 radeon_get_connector_for_encoder(radeon_crtc->encoder);
938 struct radeon_connector *radeon_connector = 936 struct radeon_connector *radeon_connector =
939 to_radeon_connector(connector); 937 to_radeon_connector(connector);
940 struct radeon_connector_atom_dig *dig_connector = 938 struct radeon_connector_atom_dig *dig_connector =
941 radeon_connector->con_priv; 939 radeon_connector->con_priv;
942 int dp_clock; 940 int dp_clock;
943 radeon_crtc->bpc = radeon_get_monitor_bpc(connector); 941 radeon_crtc->bpc = radeon_get_monitor_bpc(connector);
944 942
945 switch (encoder_mode) { 943 switch (encoder_mode) {
946 case ATOM_ENCODER_MODE_DP_MST: 944 case ATOM_ENCODER_MODE_DP_MST:
947 case ATOM_ENCODER_MODE_DP: 945 case ATOM_ENCODER_MODE_DP:
948 /* DP/eDP */ 946 /* DP/eDP */
949 dp_clock = dig_connector->dp_clock / 10; 947 dp_clock = dig_connector->dp_clock / 10;
950 if (ASIC_IS_DCE4(rdev)) 948 if (ASIC_IS_DCE4(rdev))
951 radeon_crtc->ss_enabled = 949 radeon_crtc->ss_enabled =
952 radeon_atombios_get_asic_ss_info(rdev, &radeon_crtc->ss, 950 radeon_atombios_get_asic_ss_info(rdev, &radeon_crtc->ss,
953 ASIC_INTERNAL_SS_ON_DP, 951 ASIC_INTERNAL_SS_ON_DP,
954 dp_clock); 952 dp_clock);
955 else { 953 else {
956 if (dp_clock == 16200) { 954 if (dp_clock == 16200) {
957 radeon_crtc->ss_enabled = 955 radeon_crtc->ss_enabled =
958 radeon_atombios_get_ppll_ss_info(rdev, 956 radeon_atombios_get_ppll_ss_info(rdev,
959 &radeon_crtc->ss, 957 &radeon_crtc->ss,
960 ATOM_DP_SS_ID2); 958 ATOM_DP_SS_ID2);
961 if (!radeon_crtc->ss_enabled) 959 if (!radeon_crtc->ss_enabled)
962 radeon_crtc->ss_enabled = 960 radeon_crtc->ss_enabled =
963 radeon_atombios_get_ppll_ss_info(rdev, 961 radeon_atombios_get_ppll_ss_info(rdev,
964 &radeon_crtc->ss, 962 &radeon_crtc->ss,
965 ATOM_DP_SS_ID1); 963 ATOM_DP_SS_ID1);
966 } else { 964 } else {
967 radeon_crtc->ss_enabled = 965 radeon_crtc->ss_enabled =
968 radeon_atombios_get_ppll_ss_info(rdev, 966 radeon_atombios_get_ppll_ss_info(rdev,
969 &radeon_crtc->ss, 967 &radeon_crtc->ss,
970 ATOM_DP_SS_ID1); 968 ATOM_DP_SS_ID1);
971 } 969 }
972 /* disable spread spectrum on DCE3 DP */ 970 /* disable spread spectrum on DCE3 DP */
973 radeon_crtc->ss_enabled = false; 971 radeon_crtc->ss_enabled = false;
974 } 972 }
975 break; 973 break;
976 case ATOM_ENCODER_MODE_LVDS: 974 case ATOM_ENCODER_MODE_LVDS:
977 if (ASIC_IS_DCE4(rdev)) 975 if (ASIC_IS_DCE4(rdev))
978 radeon_crtc->ss_enabled = 976 radeon_crtc->ss_enabled =
979 radeon_atombios_get_asic_ss_info(rdev, 977 radeon_atombios_get_asic_ss_info(rdev,
980 &radeon_crtc->ss, 978 &radeon_crtc->ss,
981 dig->lcd_ss_id, 979 dig->lcd_ss_id,
982 mode->clock / 10); 980 mode->clock / 10);
983 else 981 else
984 radeon_crtc->ss_enabled = 982 radeon_crtc->ss_enabled =
985 radeon_atombios_get_ppll_ss_info(rdev, 983 radeon_atombios_get_ppll_ss_info(rdev,
986 &radeon_crtc->ss, 984 &radeon_crtc->ss,
987 dig->lcd_ss_id); 985 dig->lcd_ss_id);
988 break; 986 break;
989 case ATOM_ENCODER_MODE_DVI: 987 case ATOM_ENCODER_MODE_DVI:
990 if (ASIC_IS_DCE4(rdev)) 988 if (ASIC_IS_DCE4(rdev))
991 radeon_crtc->ss_enabled = 989 radeon_crtc->ss_enabled =
992 radeon_atombios_get_asic_ss_info(rdev, 990 radeon_atombios_get_asic_ss_info(rdev,
993 &radeon_crtc->ss, 991 &radeon_crtc->ss,
994 ASIC_INTERNAL_SS_ON_TMDS, 992 ASIC_INTERNAL_SS_ON_TMDS,
995 mode->clock / 10); 993 mode->clock / 10);
996 break; 994 break;
997 case ATOM_ENCODER_MODE_HDMI: 995 case ATOM_ENCODER_MODE_HDMI:
998 if (ASIC_IS_DCE4(rdev)) 996 if (ASIC_IS_DCE4(rdev))
999 radeon_crtc->ss_enabled = 997 radeon_crtc->ss_enabled =
1000 radeon_atombios_get_asic_ss_info(rdev, 998 radeon_atombios_get_asic_ss_info(rdev,
1001 &radeon_crtc->ss, 999 &radeon_crtc->ss,
1002 ASIC_INTERNAL_SS_ON_HDMI, 1000 ASIC_INTERNAL_SS_ON_HDMI,
1003 mode->clock / 10); 1001 mode->clock / 10);
1004 break; 1002 break;
1005 default: 1003 default:
1006 break; 1004 break;
1007 } 1005 }
1008 } 1006 }
1009 1007
1010 /* adjust pixel clock as needed */ 1008 /* adjust pixel clock as needed */
1011 radeon_crtc->adjusted_clock = atombios_adjust_pll(crtc, mode); 1009 radeon_crtc->adjusted_clock = atombios_adjust_pll(crtc, mode);
1012 1010
1013 return true; 1011 return true;
1014 } 1012 }
1015 1013
1016 static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) 1014 static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
1017 { 1015 {
1018 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 1016 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1019 struct drm_device *dev = crtc->dev; 1017 struct drm_device *dev = crtc->dev;
1020 struct radeon_device *rdev = dev->dev_private; 1018 struct radeon_device *rdev = dev->dev_private;
1021 struct radeon_encoder *radeon_encoder = 1019 struct radeon_encoder *radeon_encoder =
1022 to_radeon_encoder(radeon_crtc->encoder); 1020 to_radeon_encoder(radeon_crtc->encoder);
1023 u32 pll_clock = mode->clock; 1021 u32 pll_clock = mode->clock;
1024 u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0; 1022 u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
1025 struct radeon_pll *pll; 1023 struct radeon_pll *pll;
1026 int encoder_mode = atombios_get_encoder_mode(radeon_crtc->encoder); 1024 int encoder_mode = atombios_get_encoder_mode(radeon_crtc->encoder);
1027 1025
1028 switch (radeon_crtc->pll_id) { 1026 switch (radeon_crtc->pll_id) {
1029 case ATOM_PPLL1: 1027 case ATOM_PPLL1:
1030 pll = &rdev->clock.p1pll; 1028 pll = &rdev->clock.p1pll;
1031 break; 1029 break;
1032 case ATOM_PPLL2: 1030 case ATOM_PPLL2:
1033 pll = &rdev->clock.p2pll; 1031 pll = &rdev->clock.p2pll;
1034 break; 1032 break;
1035 case ATOM_DCPLL: 1033 case ATOM_DCPLL:
1036 case ATOM_PPLL_INVALID: 1034 case ATOM_PPLL_INVALID:
1037 default: 1035 default:
1038 pll = &rdev->clock.dcpll; 1036 pll = &rdev->clock.dcpll;
1039 break; 1037 break;
1040 } 1038 }
1041 1039
1042 /* update pll params */ 1040 /* update pll params */
1043 pll->flags = radeon_crtc->pll_flags; 1041 pll->flags = radeon_crtc->pll_flags;
1044 pll->reference_div = radeon_crtc->pll_reference_div; 1042 pll->reference_div = radeon_crtc->pll_reference_div;
1045 pll->post_div = radeon_crtc->pll_post_div; 1043 pll->post_div = radeon_crtc->pll_post_div;
1046 1044
1047 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) 1045 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
1048 /* TV seems to prefer the legacy algo on some boards */ 1046 /* TV seems to prefer the legacy algo on some boards */
1049 radeon_compute_pll_legacy(pll, radeon_crtc->adjusted_clock, &pll_clock, 1047 radeon_compute_pll_legacy(pll, radeon_crtc->adjusted_clock, &pll_clock,
1050 &fb_div, &frac_fb_div, &ref_div, &post_div); 1048 &fb_div, &frac_fb_div, &ref_div, &post_div);
1051 else if (ASIC_IS_AVIVO(rdev)) 1049 else if (ASIC_IS_AVIVO(rdev))
1052 radeon_compute_pll_avivo(pll, radeon_crtc->adjusted_clock, &pll_clock, 1050 radeon_compute_pll_avivo(pll, radeon_crtc->adjusted_clock, &pll_clock,
1053 &fb_div, &frac_fb_div, &ref_div, &post_div); 1051 &fb_div, &frac_fb_div, &ref_div, &post_div);
1054 else 1052 else
1055 radeon_compute_pll_legacy(pll, radeon_crtc->adjusted_clock, &pll_clock, 1053 radeon_compute_pll_legacy(pll, radeon_crtc->adjusted_clock, &pll_clock,
1056 &fb_div, &frac_fb_div, &ref_div, &post_div); 1054 &fb_div, &frac_fb_div, &ref_div, &post_div);
1057 1055
1058 atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id, 1056 atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id,
1059 radeon_crtc->crtc_id, &radeon_crtc->ss); 1057 radeon_crtc->crtc_id, &radeon_crtc->ss);
1060 1058
1061 atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, 1059 atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
1062 encoder_mode, radeon_encoder->encoder_id, mode->clock, 1060 encoder_mode, radeon_encoder->encoder_id, mode->clock,
1063 ref_div, fb_div, frac_fb_div, post_div, 1061 ref_div, fb_div, frac_fb_div, post_div,
1064 radeon_crtc->bpc, radeon_crtc->ss_enabled, &radeon_crtc->ss); 1062 radeon_crtc->bpc, radeon_crtc->ss_enabled, &radeon_crtc->ss);
1065 1063
1066 if (radeon_crtc->ss_enabled) { 1064 if (radeon_crtc->ss_enabled) {
1067 /* calculate ss amount and step size */ 1065 /* calculate ss amount and step size */
1068 if (ASIC_IS_DCE4(rdev)) { 1066 if (ASIC_IS_DCE4(rdev)) {
1069 u32 step_size; 1067 u32 step_size;
1070 u32 amount = (((fb_div * 10) + frac_fb_div) * 1068 u32 amount = (((fb_div * 10) + frac_fb_div) *
1071 (u32)radeon_crtc->ss.percentage) / 1069 (u32)radeon_crtc->ss.percentage) /
1072 (100 * (u32)radeon_crtc->ss.percentage_divider); 1070 (100 * (u32)radeon_crtc->ss.percentage_divider);
1073 radeon_crtc->ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK; 1071 radeon_crtc->ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK;
1074 radeon_crtc->ss.amount |= ((amount - (amount / 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) & 1072 radeon_crtc->ss.amount |= ((amount - (amount / 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) &
1075 ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK; 1073 ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK;
1076 if (radeon_crtc->ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD) 1074 if (radeon_crtc->ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD)
1077 step_size = (4 * amount * ref_div * ((u32)radeon_crtc->ss.rate * 2048)) / 1075 step_size = (4 * amount * ref_div * ((u32)radeon_crtc->ss.rate * 2048)) /
1078 (125 * 25 * pll->reference_freq / 100); 1076 (125 * 25 * pll->reference_freq / 100);
1079 else 1077 else
1080 step_size = (2 * amount * ref_div * ((u32)radeon_crtc->ss.rate * 2048)) / 1078 step_size = (2 * amount * ref_div * ((u32)radeon_crtc->ss.rate * 2048)) /
1081 (125 * 25 * pll->reference_freq / 100); 1079 (125 * 25 * pll->reference_freq / 100);
1082 radeon_crtc->ss.step = step_size; 1080 radeon_crtc->ss.step = step_size;
1083 } 1081 }
1084 1082
1085 atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id, 1083 atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id,
1086 radeon_crtc->crtc_id, &radeon_crtc->ss); 1084 radeon_crtc->crtc_id, &radeon_crtc->ss);
1087 } 1085 }
1088 } 1086 }
1089 1087
1090 static int dce4_crtc_do_set_base(struct drm_crtc *crtc, 1088 static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1091 struct drm_framebuffer *fb, 1089 struct drm_framebuffer *fb,
1092 int x, int y, int atomic) 1090 int x, int y, int atomic)
1093 { 1091 {
1094 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 1092 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1095 struct drm_device *dev = crtc->dev; 1093 struct drm_device *dev = crtc->dev;
1096 struct radeon_device *rdev = dev->dev_private; 1094 struct radeon_device *rdev = dev->dev_private;
1097 struct radeon_framebuffer *radeon_fb; 1095 struct radeon_framebuffer *radeon_fb;
1098 struct drm_framebuffer *target_fb; 1096 struct drm_framebuffer *target_fb;
1099 struct drm_gem_object *obj; 1097 struct drm_gem_object *obj;
1100 struct radeon_bo *rbo; 1098 struct radeon_bo *rbo;
1101 uint64_t fb_location; 1099 uint64_t fb_location;
1102 uint32_t fb_format, fb_pitch_pixels, tiling_flags; 1100 uint32_t fb_format, fb_pitch_pixels, tiling_flags;
1103 unsigned bankw, bankh, mtaspect, tile_split; 1101 unsigned bankw, bankh, mtaspect, tile_split;
1104 u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE); 1102 u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
1105 u32 tmp, viewport_w, viewport_h; 1103 u32 tmp, viewport_w, viewport_h;
1106 int r; 1104 int r;
1107 1105
1108 /* no fb bound */ 1106 /* no fb bound */
1109 if (!atomic && !crtc->primary->fb) { 1107 if (!atomic && !crtc->primary->fb) {
1110 DRM_DEBUG_KMS("No FB bound\n"); 1108 DRM_DEBUG_KMS("No FB bound\n");
1111 return 0; 1109 return 0;
1112 } 1110 }
1113 1111
1114 if (atomic) { 1112 if (atomic) {
1115 radeon_fb = to_radeon_framebuffer(fb); 1113 radeon_fb = to_radeon_framebuffer(fb);
1116 target_fb = fb; 1114 target_fb = fb;
1117 } 1115 }
1118 else { 1116 else {
1119 radeon_fb = to_radeon_framebuffer(crtc->primary->fb); 1117 radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
1120 target_fb = crtc->primary->fb; 1118 target_fb = crtc->primary->fb;
1121 } 1119 }
1122 1120
1123 /* If atomic, assume fb object is pinned & idle & fenced and 1121 /* If atomic, assume fb object is pinned & idle & fenced and
1124 * just update base pointers 1122 * just update base pointers
1125 */ 1123 */
1126 obj = radeon_fb->obj; 1124 obj = radeon_fb->obj;
1127 rbo = gem_to_radeon_bo(obj); 1125 rbo = gem_to_radeon_bo(obj);
1128 r = radeon_bo_reserve(rbo, false); 1126 r = radeon_bo_reserve(rbo, false);
1129 if (unlikely(r != 0)) 1127 if (unlikely(r != 0))
1130 return r; 1128 return r;
1131 1129
1132 if (atomic) 1130 if (atomic)
1133 fb_location = radeon_bo_gpu_offset(rbo); 1131 fb_location = radeon_bo_gpu_offset(rbo);
1134 else { 1132 else {
1135 r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location); 1133 r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
1136 if (unlikely(r != 0)) { 1134 if (unlikely(r != 0)) {
1137 radeon_bo_unreserve(rbo); 1135 radeon_bo_unreserve(rbo);
1138 return -EINVAL; 1136 return -EINVAL;
1139 } 1137 }
1140 } 1138 }
1141 1139
1142 radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); 1140 radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
1143 radeon_bo_unreserve(rbo); 1141 radeon_bo_unreserve(rbo);
1144 1142
1145 switch (target_fb->bits_per_pixel) { 1143 switch (target_fb->bits_per_pixel) {
1146 case 8: 1144 case 8:
1147 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) | 1145 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) |
1148 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED)); 1146 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED));
1149 break; 1147 break;
1150 case 15: 1148 case 15:
1151 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) | 1149 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
1152 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB1555)); 1150 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB1555));
1153 break; 1151 break;
1154 case 16: 1152 case 16:
1155 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) | 1153 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
1156 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565)); 1154 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565));
1157 #ifdef __BIG_ENDIAN 1155 #ifdef __BIG_ENDIAN
1158 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16); 1156 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
1159 #endif 1157 #endif
1160 break; 1158 break;
1161 case 24: 1159 case 24:
1162 case 32: 1160 case 32:
1163 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) | 1161 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
1164 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888)); 1162 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888));
1165 #ifdef __BIG_ENDIAN 1163 #ifdef __BIG_ENDIAN
1166 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32); 1164 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
1167 #endif 1165 #endif
1168 break; 1166 break;
1169 default: 1167 default:
1170 DRM_ERROR("Unsupported screen depth %d\n", 1168 DRM_ERROR("Unsupported screen depth %d\n",
1171 target_fb->bits_per_pixel); 1169 target_fb->bits_per_pixel);
1172 return -EINVAL; 1170 return -EINVAL;
1173 } 1171 }
1174 1172
1175 if (tiling_flags & RADEON_TILING_MACRO) { 1173 if (tiling_flags & RADEON_TILING_MACRO) {
1176 evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split); 1174 evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
1177 1175
1178 /* Set NUM_BANKS. */ 1176 /* Set NUM_BANKS. */
1179 if (rdev->family >= CHIP_TAHITI) { 1177 if (rdev->family >= CHIP_TAHITI) {
1180 unsigned index, num_banks; 1178 unsigned index, num_banks;
1181 1179
1182 if (rdev->family >= CHIP_BONAIRE) { 1180 if (rdev->family >= CHIP_BONAIRE) {
1183 unsigned tileb, tile_split_bytes; 1181 unsigned tileb, tile_split_bytes;
1184 1182
1185 /* Calculate the macrotile mode index. */ 1183 /* Calculate the macrotile mode index. */
1186 tile_split_bytes = 64 << tile_split; 1184 tile_split_bytes = 64 << tile_split;
1187 tileb = 8 * 8 * target_fb->bits_per_pixel / 8; 1185 tileb = 8 * 8 * target_fb->bits_per_pixel / 8;
1188 tileb = min(tile_split_bytes, tileb); 1186 tileb = min(tile_split_bytes, tileb);
1189 1187
1190 for (index = 0; tileb > 64; index++) 1188 for (index = 0; tileb > 64; index++)
1191 tileb >>= 1; 1189 tileb >>= 1;
1192 1190
1193 if (index >= 16) { 1191 if (index >= 16) {
1194 DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n", 1192 DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n",
1195 target_fb->bits_per_pixel, tile_split); 1193 target_fb->bits_per_pixel, tile_split);
1196 return -EINVAL; 1194 return -EINVAL;
1197 } 1195 }
1198 1196
1199 num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3; 1197 num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3;
1200 } else { 1198 } else {
1201 switch (target_fb->bits_per_pixel) { 1199 switch (target_fb->bits_per_pixel) {
1202 case 8: 1200 case 8:
1203 index = 10; 1201 index = 10;
1204 break; 1202 break;
1205 case 16: 1203 case 16:
1206 index = SI_TILE_MODE_COLOR_2D_SCANOUT_16BPP; 1204 index = SI_TILE_MODE_COLOR_2D_SCANOUT_16BPP;
1207 break; 1205 break;
1208 default: 1206 default:
1209 case 32: 1207 case 32:
1210 index = SI_TILE_MODE_COLOR_2D_SCANOUT_32BPP; 1208 index = SI_TILE_MODE_COLOR_2D_SCANOUT_32BPP;
1211 break; 1209 break;
1212 } 1210 }
1213 1211
1214 num_banks = (rdev->config.si.tile_mode_array[index] >> 20) & 0x3; 1212 num_banks = (rdev->config.si.tile_mode_array[index] >> 20) & 0x3;
1215 } 1213 }
1216 1214
1217 fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks); 1215 fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks);
1218 } else { 1216 } else {
1219 /* NI and older. */ 1217 /* NI and older. */
1220 if (rdev->family >= CHIP_CAYMAN) 1218 if (rdev->family >= CHIP_CAYMAN)
1221 tmp = rdev->config.cayman.tile_config; 1219 tmp = rdev->config.cayman.tile_config;
1222 else 1220 else
1223 tmp = rdev->config.evergreen.tile_config; 1221 tmp = rdev->config.evergreen.tile_config;
1224 1222
1225 switch ((tmp & 0xf0) >> 4) { 1223 switch ((tmp & 0xf0) >> 4) {
1226 case 0: /* 4 banks */ 1224 case 0: /* 4 banks */
1227 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK); 1225 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
1228 break; 1226 break;
1229 case 1: /* 8 banks */ 1227 case 1: /* 8 banks */
1230 default: 1228 default:
1231 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK); 1229 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
1232 break; 1230 break;
1233 case 2: /* 16 banks */ 1231 case 2: /* 16 banks */
1234 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK); 1232 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
1235 break; 1233 break;
1236 } 1234 }
1237 } 1235 }
1238 1236
1239 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1); 1237 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
1240 fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split); 1238 fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split);
1241 fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw); 1239 fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw);
1242 fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh); 1240 fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh);
1243 fb_format |= EVERGREEN_GRPH_MACRO_TILE_ASPECT(mtaspect); 1241 fb_format |= EVERGREEN_GRPH_MACRO_TILE_ASPECT(mtaspect);
1244 if (rdev->family >= CHIP_BONAIRE) { 1242 if (rdev->family >= CHIP_BONAIRE) {
1245 /* XXX need to know more about the surface tiling mode */ 1243 /* XXX need to know more about the surface tiling mode */
1246 fb_format |= CIK_GRPH_MICRO_TILE_MODE(CIK_DISPLAY_MICRO_TILING); 1244 fb_format |= CIK_GRPH_MICRO_TILE_MODE(CIK_DISPLAY_MICRO_TILING);
1247 } 1245 }
1248 } else if (tiling_flags & RADEON_TILING_MICRO) 1246 } else if (tiling_flags & RADEON_TILING_MICRO)
1249 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1); 1247 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
1250 1248
1251 if (rdev->family >= CHIP_BONAIRE) { 1249 if (rdev->family >= CHIP_BONAIRE) {
1252 /* Read the pipe config from the 2D TILED SCANOUT mode. 1250 /* Read the pipe config from the 2D TILED SCANOUT mode.
1253 * It should be the same for the other modes too, but not all 1251 * It should be the same for the other modes too, but not all
1254 * modes set the pipe config field. */ 1252 * modes set the pipe config field. */
1255 u32 pipe_config = (rdev->config.cik.tile_mode_array[10] >> 6) & 0x1f; 1253 u32 pipe_config = (rdev->config.cik.tile_mode_array[10] >> 6) & 0x1f;
1256 1254
1257 fb_format |= CIK_GRPH_PIPE_CONFIG(pipe_config); 1255 fb_format |= CIK_GRPH_PIPE_CONFIG(pipe_config);
1258 } else if ((rdev->family == CHIP_TAHITI) || 1256 } else if ((rdev->family == CHIP_TAHITI) ||
1259 (rdev->family == CHIP_PITCAIRN)) 1257 (rdev->family == CHIP_PITCAIRN))
1260 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16); 1258 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16);
1261 else if ((rdev->family == CHIP_VERDE) || 1259 else if ((rdev->family == CHIP_VERDE) ||
1262 (rdev->family == CHIP_OLAND) || 1260 (rdev->family == CHIP_OLAND) ||
1263 (rdev->family == CHIP_HAINAN)) /* for completeness. HAINAN has no display hw */ 1261 (rdev->family == CHIP_HAINAN)) /* for completeness. HAINAN has no display hw */
1264 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16); 1262 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16);
1265 1263
1266 switch (radeon_crtc->crtc_id) { 1264 switch (radeon_crtc->crtc_id) {
1267 case 0: 1265 case 0:
1268 WREG32(AVIVO_D1VGA_CONTROL, 0); 1266 WREG32(AVIVO_D1VGA_CONTROL, 0);
1269 break; 1267 break;
1270 case 1: 1268 case 1:
1271 WREG32(AVIVO_D2VGA_CONTROL, 0); 1269 WREG32(AVIVO_D2VGA_CONTROL, 0);
1272 break; 1270 break;
1273 case 2: 1271 case 2:
1274 WREG32(EVERGREEN_D3VGA_CONTROL, 0); 1272 WREG32(EVERGREEN_D3VGA_CONTROL, 0);
1275 break; 1273 break;
1276 case 3: 1274 case 3:
1277 WREG32(EVERGREEN_D4VGA_CONTROL, 0); 1275 WREG32(EVERGREEN_D4VGA_CONTROL, 0);
1278 break; 1276 break;
1279 case 4: 1277 case 4:
1280 WREG32(EVERGREEN_D5VGA_CONTROL, 0); 1278 WREG32(EVERGREEN_D5VGA_CONTROL, 0);
1281 break; 1279 break;
1282 case 5: 1280 case 5:
1283 WREG32(EVERGREEN_D6VGA_CONTROL, 0); 1281 WREG32(EVERGREEN_D6VGA_CONTROL, 0);
1284 break; 1282 break;
1285 default: 1283 default:
1286 break; 1284 break;
1287 } 1285 }
1288 1286
1289 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, 1287 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
1290 upper_32_bits(fb_location)); 1288 upper_32_bits(fb_location));
1291 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, 1289 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
1292 upper_32_bits(fb_location)); 1290 upper_32_bits(fb_location));
1293 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, 1291 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
1294 (u32)fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK); 1292 (u32)fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
1295 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, 1293 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
1296 (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK); 1294 (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
1297 WREG32(EVERGREEN_GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format); 1295 WREG32(EVERGREEN_GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
1298 WREG32(EVERGREEN_GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap); 1296 WREG32(EVERGREEN_GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap);
1299 1297
1300 WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0); 1298 WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
1301 WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0); 1299 WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
1302 WREG32(EVERGREEN_GRPH_X_START + radeon_crtc->crtc_offset, 0); 1300 WREG32(EVERGREEN_GRPH_X_START + radeon_crtc->crtc_offset, 0);
1303 WREG32(EVERGREEN_GRPH_Y_START + radeon_crtc->crtc_offset, 0); 1301 WREG32(EVERGREEN_GRPH_Y_START + radeon_crtc->crtc_offset, 0);
1304 WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width); 1302 WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
1305 WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height); 1303 WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
1306 1304
1307 fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8); 1305 fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
1308 WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels); 1306 WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
1309 WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1); 1307 WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
1310 1308
1311 if (rdev->family >= CHIP_BONAIRE) 1309 if (rdev->family >= CHIP_BONAIRE)
1312 WREG32(CIK_LB_DESKTOP_HEIGHT + radeon_crtc->crtc_offset, 1310 WREG32(CIK_LB_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
1313 target_fb->height); 1311 target_fb->height);
1314 else 1312 else
1315 WREG32(EVERGREEN_DESKTOP_HEIGHT + radeon_crtc->crtc_offset, 1313 WREG32(EVERGREEN_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
1316 target_fb->height); 1314 target_fb->height);
1317 x &= ~3; 1315 x &= ~3;
1318 y &= ~1; 1316 y &= ~1;
1319 WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset, 1317 WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset,
1320 (x << 16) | y); 1318 (x << 16) | y);
1321 viewport_w = crtc->mode.hdisplay; 1319 viewport_w = crtc->mode.hdisplay;
1322 viewport_h = (crtc->mode.vdisplay + 1) & ~1; 1320 viewport_h = (crtc->mode.vdisplay + 1) & ~1;
1323 WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset, 1321 WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
1324 (viewport_w << 16) | viewport_h); 1322 (viewport_w << 16) | viewport_h);
1325 1323
1326 /* pageflip setup */ 1324 /* pageflip setup */
1327 /* make sure flip is at vb rather than hb */ 1325 /* make sure flip is at vb rather than hb */
1328 tmp = RREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset); 1326 tmp = RREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset);
1329 tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN; 1327 tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN;
1330 WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp); 1328 WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
1331 1329
1332 /* set pageflip to happen anywhere in vblank interval */ 1330 /* set pageflip to happen anywhere in vblank interval */
1333 WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0); 1331 WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
1334 1332
1335 if (!atomic && fb && fb != crtc->primary->fb) { 1333 if (!atomic && fb && fb != crtc->primary->fb) {
1336 radeon_fb = to_radeon_framebuffer(fb); 1334 radeon_fb = to_radeon_framebuffer(fb);
1337 rbo = gem_to_radeon_bo(radeon_fb->obj); 1335 rbo = gem_to_radeon_bo(radeon_fb->obj);
1338 r = radeon_bo_reserve(rbo, false); 1336 r = radeon_bo_reserve(rbo, false);
1339 if (unlikely(r != 0)) 1337 if (unlikely(r != 0))
1340 return r; 1338 return r;
1341 radeon_bo_unpin(rbo); 1339 radeon_bo_unpin(rbo);
1342 radeon_bo_unreserve(rbo); 1340 radeon_bo_unreserve(rbo);
1343 } 1341 }
1344 1342
1345 /* Bytes per pixel may have changed */ 1343 /* Bytes per pixel may have changed */
1346 radeon_bandwidth_update(rdev); 1344 radeon_bandwidth_update(rdev);
1347 1345
1348 return 0; 1346 return 0;
1349 } 1347 }
1350 1348
1351 static int avivo_crtc_do_set_base(struct drm_crtc *crtc, 1349 static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
1352 struct drm_framebuffer *fb, 1350 struct drm_framebuffer *fb,
1353 int x, int y, int atomic) 1351 int x, int y, int atomic)
1354 { 1352 {
1355 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 1353 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1356 struct drm_device *dev = crtc->dev; 1354 struct drm_device *dev = crtc->dev;
1357 struct radeon_device *rdev = dev->dev_private; 1355 struct radeon_device *rdev = dev->dev_private;
1358 struct radeon_framebuffer *radeon_fb; 1356 struct radeon_framebuffer *radeon_fb;
1359 struct drm_gem_object *obj; 1357 struct drm_gem_object *obj;
1360 struct radeon_bo *rbo; 1358 struct radeon_bo *rbo;
1361 struct drm_framebuffer *target_fb; 1359 struct drm_framebuffer *target_fb;
1362 uint64_t fb_location; 1360 uint64_t fb_location;
1363 uint32_t fb_format, fb_pitch_pixels, tiling_flags; 1361 uint32_t fb_format, fb_pitch_pixels, tiling_flags;
1364 u32 fb_swap = R600_D1GRPH_SWAP_ENDIAN_NONE; 1362 u32 fb_swap = R600_D1GRPH_SWAP_ENDIAN_NONE;
1365 u32 tmp, viewport_w, viewport_h; 1363 u32 tmp, viewport_w, viewport_h;
1366 int r; 1364 int r;
1367 1365
1368 /* no fb bound */ 1366 /* no fb bound */
1369 if (!atomic && !crtc->primary->fb) { 1367 if (!atomic && !crtc->primary->fb) {
1370 DRM_DEBUG_KMS("No FB bound\n"); 1368 DRM_DEBUG_KMS("No FB bound\n");
1371 return 0; 1369 return 0;
1372 } 1370 }
1373 1371
1374 if (atomic) { 1372 if (atomic) {
1375 radeon_fb = to_radeon_framebuffer(fb); 1373 radeon_fb = to_radeon_framebuffer(fb);
1376 target_fb = fb; 1374 target_fb = fb;
1377 } 1375 }
1378 else { 1376 else {
1379 radeon_fb = to_radeon_framebuffer(crtc->primary->fb); 1377 radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
1380 target_fb = crtc->primary->fb; 1378 target_fb = crtc->primary->fb;
1381 } 1379 }
1382 1380
1383 obj = radeon_fb->obj; 1381 obj = radeon_fb->obj;
1384 rbo = gem_to_radeon_bo(obj); 1382 rbo = gem_to_radeon_bo(obj);
1385 r = radeon_bo_reserve(rbo, false); 1383 r = radeon_bo_reserve(rbo, false);
1386 if (unlikely(r != 0)) 1384 if (unlikely(r != 0))
1387 return r; 1385 return r;
1388 1386
1389 /* If atomic, assume fb object is pinned & idle & fenced and 1387 /* If atomic, assume fb object is pinned & idle & fenced and
1390 * just update base pointers 1388 * just update base pointers
1391 */ 1389 */
1392 if (atomic) 1390 if (atomic)
1393 fb_location = radeon_bo_gpu_offset(rbo); 1391 fb_location = radeon_bo_gpu_offset(rbo);
1394 else { 1392 else {
1395 r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location); 1393 r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
1396 if (unlikely(r != 0)) { 1394 if (unlikely(r != 0)) {
1397 radeon_bo_unreserve(rbo); 1395 radeon_bo_unreserve(rbo);
1398 return -EINVAL; 1396 return -EINVAL;
1399 } 1397 }
1400 } 1398 }
1401 radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); 1399 radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
1402 radeon_bo_unreserve(rbo); 1400 radeon_bo_unreserve(rbo);
1403 1401
1404 switch (target_fb->bits_per_pixel) { 1402 switch (target_fb->bits_per_pixel) {
1405 case 8: 1403 case 8:
1406 fb_format = 1404 fb_format =
1407 AVIVO_D1GRPH_CONTROL_DEPTH_8BPP | 1405 AVIVO_D1GRPH_CONTROL_DEPTH_8BPP |
1408 AVIVO_D1GRPH_CONTROL_8BPP_INDEXED; 1406 AVIVO_D1GRPH_CONTROL_8BPP_INDEXED;
1409 break; 1407 break;
1410 case 15: 1408 case 15:
1411 fb_format = 1409 fb_format =
1412 AVIVO_D1GRPH_CONTROL_DEPTH_16BPP | 1410 AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
1413 AVIVO_D1GRPH_CONTROL_16BPP_ARGB1555; 1411 AVIVO_D1GRPH_CONTROL_16BPP_ARGB1555;
1414 break; 1412 break;
1415 case 16: 1413 case 16:
1416 fb_format = 1414 fb_format =
1417 AVIVO_D1GRPH_CONTROL_DEPTH_16BPP | 1415 AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
1418 AVIVO_D1GRPH_CONTROL_16BPP_RGB565; 1416 AVIVO_D1GRPH_CONTROL_16BPP_RGB565;
1419 #ifdef __BIG_ENDIAN 1417 #ifdef __BIG_ENDIAN
1420 fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT; 1418 fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT;
1421 #endif 1419 #endif
1422 break; 1420 break;
1423 case 24: 1421 case 24:
1424 case 32: 1422 case 32:
1425 fb_format = 1423 fb_format =
1426 AVIVO_D1GRPH_CONTROL_DEPTH_32BPP | 1424 AVIVO_D1GRPH_CONTROL_DEPTH_32BPP |
1427 AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888; 1425 AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888;
1428 #ifdef __BIG_ENDIAN 1426 #ifdef __BIG_ENDIAN
1429 fb_swap = R600_D1GRPH_SWAP_ENDIAN_32BIT; 1427 fb_swap = R600_D1GRPH_SWAP_ENDIAN_32BIT;
1430 #endif 1428 #endif
1431 break; 1429 break;
1432 default: 1430 default:
1433 DRM_ERROR("Unsupported screen depth %d\n", 1431 DRM_ERROR("Unsupported screen depth %d\n",
1434 target_fb->bits_per_pixel); 1432 target_fb->bits_per_pixel);
1435 return -EINVAL; 1433 return -EINVAL;
1436 } 1434 }
1437 1435
1438 if (rdev->family >= CHIP_R600) { 1436 if (rdev->family >= CHIP_R600) {
1439 if (tiling_flags & RADEON_TILING_MACRO) 1437 if (tiling_flags & RADEON_TILING_MACRO)
1440 fb_format |= R600_D1GRPH_ARRAY_MODE_2D_TILED_THIN1; 1438 fb_format |= R600_D1GRPH_ARRAY_MODE_2D_TILED_THIN1;
1441 else if (tiling_flags & RADEON_TILING_MICRO) 1439 else if (tiling_flags & RADEON_TILING_MICRO)
1442 fb_format |= R600_D1GRPH_ARRAY_MODE_1D_TILED_THIN1; 1440 fb_format |= R600_D1GRPH_ARRAY_MODE_1D_TILED_THIN1;
1443 } else { 1441 } else {
1444 if (tiling_flags & RADEON_TILING_MACRO) 1442 if (tiling_flags & RADEON_TILING_MACRO)
1445 fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE; 1443 fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE;
1446 1444
1447 if (tiling_flags & RADEON_TILING_MICRO) 1445 if (tiling_flags & RADEON_TILING_MICRO)
1448 fb_format |= AVIVO_D1GRPH_TILED; 1446 fb_format |= AVIVO_D1GRPH_TILED;
1449 } 1447 }
1450 1448
1451 if (radeon_crtc->crtc_id == 0) 1449 if (radeon_crtc->crtc_id == 0)
1452 WREG32(AVIVO_D1VGA_CONTROL, 0); 1450 WREG32(AVIVO_D1VGA_CONTROL, 0);
1453 else 1451 else
1454 WREG32(AVIVO_D2VGA_CONTROL, 0); 1452 WREG32(AVIVO_D2VGA_CONTROL, 0);
1455 1453
1456 if (rdev->family >= CHIP_RV770) { 1454 if (rdev->family >= CHIP_RV770) {
1457 if (radeon_crtc->crtc_id) { 1455 if (radeon_crtc->crtc_id) {
1458 WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location)); 1456 WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
1459 WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location)); 1457 WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
1460 } else { 1458 } else {
1461 WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location)); 1459 WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
1462 WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location)); 1460 WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
1463 } 1461 }
1464 } 1462 }
1465 WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, 1463 WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
1466 (u32) fb_location); 1464 (u32) fb_location);
1467 WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + 1465 WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS +
1468 radeon_crtc->crtc_offset, (u32) fb_location); 1466 radeon_crtc->crtc_offset, (u32) fb_location);
1469 WREG32(AVIVO_D1GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format); 1467 WREG32(AVIVO_D1GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
1470 if (rdev->family >= CHIP_R600) 1468 if (rdev->family >= CHIP_R600)
1471 WREG32(R600_D1GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap); 1469 WREG32(R600_D1GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap);
1472 1470
1473 WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0); 1471 WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
1474 WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0); 1472 WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
1475 WREG32(AVIVO_D1GRPH_X_START + radeon_crtc->crtc_offset, 0); 1473 WREG32(AVIVO_D1GRPH_X_START + radeon_crtc->crtc_offset, 0);
1476 WREG32(AVIVO_D1GRPH_Y_START + radeon_crtc->crtc_offset, 0); 1474 WREG32(AVIVO_D1GRPH_Y_START + radeon_crtc->crtc_offset, 0);
1477 WREG32(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width); 1475 WREG32(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
1478 WREG32(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height); 1476 WREG32(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
1479 1477
1480 fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8); 1478 fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
1481 WREG32(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels); 1479 WREG32(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
1482 WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1); 1480 WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
1483 1481
1484 WREG32(AVIVO_D1MODE_DESKTOP_HEIGHT + radeon_crtc->crtc_offset, 1482 WREG32(AVIVO_D1MODE_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
1485 target_fb->height); 1483 target_fb->height);
1486 x &= ~3; 1484 x &= ~3;
1487 y &= ~1; 1485 y &= ~1;
1488 WREG32(AVIVO_D1MODE_VIEWPORT_START + radeon_crtc->crtc_offset, 1486 WREG32(AVIVO_D1MODE_VIEWPORT_START + radeon_crtc->crtc_offset,
1489 (x << 16) | y); 1487 (x << 16) | y);
1490 viewport_w = crtc->mode.hdisplay; 1488 viewport_w = crtc->mode.hdisplay;
1491 viewport_h = (crtc->mode.vdisplay + 1) & ~1; 1489 viewport_h = (crtc->mode.vdisplay + 1) & ~1;
1492 WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset, 1490 WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
1493 (viewport_w << 16) | viewport_h); 1491 (viewport_w << 16) | viewport_h);
1494 1492
1495 /* pageflip setup */ 1493 /* pageflip setup */
1496 /* make sure flip is at vb rather than hb */ 1494 /* make sure flip is at vb rather than hb */
1497 tmp = RREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset); 1495 tmp = RREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset);
1498 tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN; 1496 tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN;
1499 WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp); 1497 WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
1500 1498
1501 /* set pageflip to happen anywhere in vblank interval */ 1499 /* set pageflip to happen anywhere in vblank interval */
1502 WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0); 1500 WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
1503 1501
1504 if (!atomic && fb && fb != crtc->primary->fb) { 1502 if (!atomic && fb && fb != crtc->primary->fb) {
1505 radeon_fb = to_radeon_framebuffer(fb); 1503 radeon_fb = to_radeon_framebuffer(fb);
1506 rbo = gem_to_radeon_bo(radeon_fb->obj); 1504 rbo = gem_to_radeon_bo(radeon_fb->obj);
1507 r = radeon_bo_reserve(rbo, false); 1505 r = radeon_bo_reserve(rbo, false);
1508 if (unlikely(r != 0)) 1506 if (unlikely(r != 0))
1509 return r; 1507 return r;
1510 radeon_bo_unpin(rbo); 1508 radeon_bo_unpin(rbo);
1511 radeon_bo_unreserve(rbo); 1509 radeon_bo_unreserve(rbo);
1512 } 1510 }
1513 1511
1514 /* Bytes per pixel may have changed */ 1512 /* Bytes per pixel may have changed */
1515 radeon_bandwidth_update(rdev); 1513 radeon_bandwidth_update(rdev);
1516 1514
1517 return 0; 1515 return 0;
1518 } 1516 }
1519 1517
1520 int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, 1518 int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
1521 struct drm_framebuffer *old_fb) 1519 struct drm_framebuffer *old_fb)
1522 { 1520 {
1523 struct drm_device *dev = crtc->dev; 1521 struct drm_device *dev = crtc->dev;
1524 struct radeon_device *rdev = dev->dev_private; 1522 struct radeon_device *rdev = dev->dev_private;
1525 1523
1526 if (ASIC_IS_DCE4(rdev)) 1524 if (ASIC_IS_DCE4(rdev))
1527 return dce4_crtc_do_set_base(crtc, old_fb, x, y, 0); 1525 return dce4_crtc_do_set_base(crtc, old_fb, x, y, 0);
1528 else if (ASIC_IS_AVIVO(rdev)) 1526 else if (ASIC_IS_AVIVO(rdev))
1529 return avivo_crtc_do_set_base(crtc, old_fb, x, y, 0); 1527 return avivo_crtc_do_set_base(crtc, old_fb, x, y, 0);
1530 else 1528 else
1531 return radeon_crtc_do_set_base(crtc, old_fb, x, y, 0); 1529 return radeon_crtc_do_set_base(crtc, old_fb, x, y, 0);
1532 } 1530 }
1533 1531
1534 int atombios_crtc_set_base_atomic(struct drm_crtc *crtc, 1532 int atombios_crtc_set_base_atomic(struct drm_crtc *crtc,
1535 struct drm_framebuffer *fb, 1533 struct drm_framebuffer *fb,
1536 int x, int y, enum mode_set_atomic state) 1534 int x, int y, enum mode_set_atomic state)
1537 { 1535 {
1538 struct drm_device *dev = crtc->dev; 1536 struct drm_device *dev = crtc->dev;
1539 struct radeon_device *rdev = dev->dev_private; 1537 struct radeon_device *rdev = dev->dev_private;
1540 1538
1541 if (ASIC_IS_DCE4(rdev)) 1539 if (ASIC_IS_DCE4(rdev))
1542 return dce4_crtc_do_set_base(crtc, fb, x, y, 1); 1540 return dce4_crtc_do_set_base(crtc, fb, x, y, 1);
1543 else if (ASIC_IS_AVIVO(rdev)) 1541 else if (ASIC_IS_AVIVO(rdev))
1544 return avivo_crtc_do_set_base(crtc, fb, x, y, 1); 1542 return avivo_crtc_do_set_base(crtc, fb, x, y, 1);
1545 else 1543 else
1546 return radeon_crtc_do_set_base(crtc, fb, x, y, 1); 1544 return radeon_crtc_do_set_base(crtc, fb, x, y, 1);
1547 } 1545 }
1548 1546
1549 /* properly set additional regs when using atombios */ 1547 /* properly set additional regs when using atombios */
1550 static void radeon_legacy_atom_fixup(struct drm_crtc *crtc) 1548 static void radeon_legacy_atom_fixup(struct drm_crtc *crtc)
1551 { 1549 {
1552 struct drm_device *dev = crtc->dev; 1550 struct drm_device *dev = crtc->dev;
1553 struct radeon_device *rdev = dev->dev_private; 1551 struct radeon_device *rdev = dev->dev_private;
1554 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 1552 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1555 u32 disp_merge_cntl; 1553 u32 disp_merge_cntl;
1556 1554
1557 switch (radeon_crtc->crtc_id) { 1555 switch (radeon_crtc->crtc_id) {
1558 case 0: 1556 case 0:
1559 disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL); 1557 disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL);
1560 disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN; 1558 disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN;
1561 WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl); 1559 WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl);
1562 break; 1560 break;
1563 case 1: 1561 case 1:
1564 disp_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL); 1562 disp_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
1565 disp_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN; 1563 disp_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
1566 WREG32(RADEON_DISP2_MERGE_CNTL, disp_merge_cntl); 1564 WREG32(RADEON_DISP2_MERGE_CNTL, disp_merge_cntl);
1567 WREG32(RADEON_FP_H2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_H_SYNC_STRT_WID)); 1565 WREG32(RADEON_FP_H2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_H_SYNC_STRT_WID));
1568 WREG32(RADEON_FP_V2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_V_SYNC_STRT_WID)); 1566 WREG32(RADEON_FP_V2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_V_SYNC_STRT_WID));
1569 break; 1567 break;
1570 } 1568 }
1571 } 1569 }
1572 1570
1573 /** 1571 /**
1574 * radeon_get_pll_use_mask - look up a mask of which pplls are in use 1572 * radeon_get_pll_use_mask - look up a mask of which pplls are in use
1575 * 1573 *
1576 * @crtc: drm crtc 1574 * @crtc: drm crtc
1577 * 1575 *
1578 * Returns the mask of which PPLLs (Pixel PLLs) are in use. 1576 * Returns the mask of which PPLLs (Pixel PLLs) are in use.
1579 */ 1577 */
1580 static u32 radeon_get_pll_use_mask(struct drm_crtc *crtc) 1578 static u32 radeon_get_pll_use_mask(struct drm_crtc *crtc)
1581 { 1579 {
1582 struct drm_device *dev = crtc->dev; 1580 struct drm_device *dev = crtc->dev;
1583 struct drm_crtc *test_crtc; 1581 struct drm_crtc *test_crtc;
1584 struct radeon_crtc *test_radeon_crtc; 1582 struct radeon_crtc *test_radeon_crtc;
1585 u32 pll_in_use = 0; 1583 u32 pll_in_use = 0;
1586 1584
1587 list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) { 1585 list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
1588 if (crtc == test_crtc) 1586 if (crtc == test_crtc)
1589 continue; 1587 continue;
1590 1588
1591 test_radeon_crtc = to_radeon_crtc(test_crtc); 1589 test_radeon_crtc = to_radeon_crtc(test_crtc);
1592 if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID) 1590 if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
1593 pll_in_use |= (1 << test_radeon_crtc->pll_id); 1591 pll_in_use |= (1 << test_radeon_crtc->pll_id);
1594 } 1592 }
1595 return pll_in_use; 1593 return pll_in_use;
1596 } 1594 }
1597 1595
1598 /** 1596 /**
1599 * radeon_get_shared_dp_ppll - return the PPLL used by another crtc for DP 1597 * radeon_get_shared_dp_ppll - return the PPLL used by another crtc for DP
1600 * 1598 *
1601 * @crtc: drm crtc 1599 * @crtc: drm crtc
1602 * 1600 *
1603 * Returns the PPLL (Pixel PLL) used by another crtc/encoder which is 1601 * Returns the PPLL (Pixel PLL) used by another crtc/encoder which is
1604 * also in DP mode. For DP, a single PPLL can be used for all DP 1602 * also in DP mode. For DP, a single PPLL can be used for all DP
1605 * crtcs/encoders. 1603 * crtcs/encoders.
1606 */ 1604 */
1607 static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc) 1605 static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
1608 { 1606 {
1609 struct drm_device *dev = crtc->dev; 1607 struct drm_device *dev = crtc->dev;
1610 struct drm_crtc *test_crtc; 1608 struct drm_crtc *test_crtc;
1611 struct radeon_crtc *test_radeon_crtc; 1609 struct radeon_crtc *test_radeon_crtc;
1612 1610
1613 list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) { 1611 list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
1614 if (crtc == test_crtc) 1612 if (crtc == test_crtc)
1615 continue; 1613 continue;
1616 test_radeon_crtc = to_radeon_crtc(test_crtc); 1614 test_radeon_crtc = to_radeon_crtc(test_crtc);
1617 if (test_radeon_crtc->encoder && 1615 if (test_radeon_crtc->encoder &&
1618 ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) { 1616 ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
1619 /* for DP use the same PLL for all */ 1617 /* for DP use the same PLL for all */
1620 if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID) 1618 if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
1621 return test_radeon_crtc->pll_id; 1619 return test_radeon_crtc->pll_id;
1622 } 1620 }
1623 } 1621 }
1624 return ATOM_PPLL_INVALID; 1622 return ATOM_PPLL_INVALID;
1625 } 1623 }
1626 1624
1627 /** 1625 /**
1628 * radeon_get_shared_nondp_ppll - return the PPLL used by another non-DP crtc 1626 * radeon_get_shared_nondp_ppll - return the PPLL used by another non-DP crtc
1629 * 1627 *
1630 * @crtc: drm crtc 1628 * @crtc: drm crtc
1631 * @encoder: drm encoder 1629 * @encoder: drm encoder
1632 * 1630 *
1633 * Returns the PPLL (Pixel PLL) used by another non-DP crtc/encoder which can 1631 * Returns the PPLL (Pixel PLL) used by another non-DP crtc/encoder which can
1634 * be shared (i.e., same clock). 1632 * be shared (i.e., same clock).
1635 */ 1633 */
1636 static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc) 1634 static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
1637 { 1635 {
1638 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 1636 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1639 struct drm_device *dev = crtc->dev; 1637 struct drm_device *dev = crtc->dev;
1640 struct drm_crtc *test_crtc; 1638 struct drm_crtc *test_crtc;
1641 struct radeon_crtc *test_radeon_crtc; 1639 struct radeon_crtc *test_radeon_crtc;
1642 u32 adjusted_clock, test_adjusted_clock; 1640 u32 adjusted_clock, test_adjusted_clock;
1643 1641
1644 adjusted_clock = radeon_crtc->adjusted_clock; 1642 adjusted_clock = radeon_crtc->adjusted_clock;
1645 1643
1646 if (adjusted_clock == 0) 1644 if (adjusted_clock == 0)
1647 return ATOM_PPLL_INVALID; 1645 return ATOM_PPLL_INVALID;
1648 1646
1649 list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) { 1647 list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
1650 if (crtc == test_crtc) 1648 if (crtc == test_crtc)
1651 continue; 1649 continue;
1652 test_radeon_crtc = to_radeon_crtc(test_crtc); 1650 test_radeon_crtc = to_radeon_crtc(test_crtc);
1653 if (test_radeon_crtc->encoder && 1651 if (test_radeon_crtc->encoder &&
1654 !ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) { 1652 !ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
1655 /* check if we are already driving this connector with another crtc */ 1653 /* check if we are already driving this connector with another crtc */
1656 if (test_radeon_crtc->connector == radeon_crtc->connector) { 1654 if (test_radeon_crtc->connector == radeon_crtc->connector) {
1657 /* if we are, return that pll */ 1655 /* if we are, return that pll */
1658 if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID) 1656 if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
1659 return test_radeon_crtc->pll_id; 1657 return test_radeon_crtc->pll_id;
1660 } 1658 }
1661 /* for non-DP check the clock */ 1659 /* for non-DP check the clock */
1662 test_adjusted_clock = test_radeon_crtc->adjusted_clock; 1660 test_adjusted_clock = test_radeon_crtc->adjusted_clock;
1663 if ((crtc->mode.clock == test_crtc->mode.clock) && 1661 if ((crtc->mode.clock == test_crtc->mode.clock) &&
1664 (adjusted_clock == test_adjusted_clock) && 1662 (adjusted_clock == test_adjusted_clock) &&
1665 (radeon_crtc->ss_enabled == test_radeon_crtc->ss_enabled) && 1663 (radeon_crtc->ss_enabled == test_radeon_crtc->ss_enabled) &&
1666 (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)) 1664 (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID))
1667 return test_radeon_crtc->pll_id; 1665 return test_radeon_crtc->pll_id;
1668 } 1666 }
1669 } 1667 }
1670 return ATOM_PPLL_INVALID; 1668 return ATOM_PPLL_INVALID;
1671 } 1669 }
1672 1670
1673 /** 1671 /**
1674 * radeon_atom_pick_pll - Allocate a PPLL for use by the crtc. 1672 * radeon_atom_pick_pll - Allocate a PPLL for use by the crtc.
1675 * 1673 *
1676 * @crtc: drm crtc 1674 * @crtc: drm crtc
1677 * 1675 *
1678 * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors 1676 * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors
1679 * a single PPLL can be used for all DP crtcs/encoders. For non-DP 1677 * a single PPLL can be used for all DP crtcs/encoders. For non-DP
1680 * monitors a dedicated PPLL must be used. If a particular board has 1678 * monitors a dedicated PPLL must be used. If a particular board has
1681 * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming 1679 * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
1682 * as there is no need to program the PLL itself. If we are not able to 1680 * as there is no need to program the PLL itself. If we are not able to
1683 * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to 1681 * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
1684 * avoid messing up an existing monitor. 1682 * avoid messing up an existing monitor.
1685 * 1683 *
1686 * Asic specific PLL information 1684 * Asic specific PLL information
1687 * 1685 *
1688 * DCE 8.x 1686 * DCE 8.x
1689 * KB/KV 1687 * KB/KV
1690 * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) 1688 * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
1691 * CI 1689 * CI
1692 * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC 1690 * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
1693 * 1691 *
1694 * DCE 6.1 1692 * DCE 6.1
1695 * - PPLL2 is only available to UNIPHYA (both DP and non-DP) 1693 * - PPLL2 is only available to UNIPHYA (both DP and non-DP)
1696 * - PPLL0, PPLL1 are available for UNIPHYB/C/D/E/F (both DP and non-DP) 1694 * - PPLL0, PPLL1 are available for UNIPHYB/C/D/E/F (both DP and non-DP)
1697 * 1695 *
1698 * DCE 6.0 1696 * DCE 6.0
1699 * - PPLL0 is available to all UNIPHY (DP only) 1697 * - PPLL0 is available to all UNIPHY (DP only)
1700 * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC 1698 * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
1701 * 1699 *
1702 * DCE 5.0 1700 * DCE 5.0
1703 * - DCPLL is available to all UNIPHY (DP only) 1701 * - DCPLL is available to all UNIPHY (DP only)
1704 * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC 1702 * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
1705 * 1703 *
1706 * DCE 3.0/4.0/4.1 1704 * DCE 3.0/4.0/4.1
1707 * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC 1705 * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
1708 * 1706 *
1709 */ 1707 */
1710 static int radeon_atom_pick_pll(struct drm_crtc *crtc) 1708 static int radeon_atom_pick_pll(struct drm_crtc *crtc)
1711 { 1709 {
1712 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 1710 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1713 struct drm_device *dev = crtc->dev; 1711 struct drm_device *dev = crtc->dev;
1714 struct radeon_device *rdev = dev->dev_private; 1712 struct radeon_device *rdev = dev->dev_private;
1715 struct radeon_encoder *radeon_encoder = 1713 struct radeon_encoder *radeon_encoder =
1716 to_radeon_encoder(radeon_crtc->encoder); 1714 to_radeon_encoder(radeon_crtc->encoder);
1717 u32 pll_in_use; 1715 u32 pll_in_use;
1718 int pll; 1716 int pll;
1719 1717
1720 if (ASIC_IS_DCE8(rdev)) { 1718 if (ASIC_IS_DCE8(rdev)) {
1721 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) { 1719 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
1722 if (rdev->clock.dp_extclk) 1720 if (rdev->clock.dp_extclk)
1723 /* skip PPLL programming if using ext clock */ 1721 /* skip PPLL programming if using ext clock */
1724 return ATOM_PPLL_INVALID; 1722 return ATOM_PPLL_INVALID;
1725 else { 1723 else {
1726 /* use the same PPLL for all DP monitors */ 1724 /* use the same PPLL for all DP monitors */
1727 pll = radeon_get_shared_dp_ppll(crtc); 1725 pll = radeon_get_shared_dp_ppll(crtc);
1728 if (pll != ATOM_PPLL_INVALID) 1726 if (pll != ATOM_PPLL_INVALID)
1729 return pll; 1727 return pll;
1730 } 1728 }
1731 } else { 1729 } else {
1732 /* use the same PPLL for all monitors with the same clock */ 1730 /* use the same PPLL for all monitors with the same clock */
1733 pll = radeon_get_shared_nondp_ppll(crtc); 1731 pll = radeon_get_shared_nondp_ppll(crtc);
1734 if (pll != ATOM_PPLL_INVALID) 1732 if (pll != ATOM_PPLL_INVALID)
1735 return pll; 1733 return pll;
1736 } 1734 }
1737 /* otherwise, pick one of the plls */ 1735 /* otherwise, pick one of the plls */
1738 if ((rdev->family == CHIP_KAVERI) || 1736 if ((rdev->family == CHIP_KAVERI) ||
1739 (rdev->family == CHIP_KABINI) || 1737 (rdev->family == CHIP_KABINI) ||
1740 (rdev->family == CHIP_MULLINS)) { 1738 (rdev->family == CHIP_MULLINS)) {
1741 /* KB/KV/ML has PPLL1 and PPLL2 */ 1739 /* KB/KV/ML has PPLL1 and PPLL2 */
1742 pll_in_use = radeon_get_pll_use_mask(crtc); 1740 pll_in_use = radeon_get_pll_use_mask(crtc);
1743 if (!(pll_in_use & (1 << ATOM_PPLL2))) 1741 if (!(pll_in_use & (1 << ATOM_PPLL2)))
1744 return ATOM_PPLL2; 1742 return ATOM_PPLL2;
1745 if (!(pll_in_use & (1 << ATOM_PPLL1))) 1743 if (!(pll_in_use & (1 << ATOM_PPLL1)))
1746 return ATOM_PPLL1; 1744 return ATOM_PPLL1;
1747 DRM_ERROR("unable to allocate a PPLL\n"); 1745 DRM_ERROR("unable to allocate a PPLL\n");
1748 return ATOM_PPLL_INVALID; 1746 return ATOM_PPLL_INVALID;
1749 } else { 1747 } else {
1750 /* CI has PPLL0, PPLL1, and PPLL2 */ 1748 /* CI has PPLL0, PPLL1, and PPLL2 */
1751 pll_in_use = radeon_get_pll_use_mask(crtc); 1749 pll_in_use = radeon_get_pll_use_mask(crtc);
1752 if (!(pll_in_use & (1 << ATOM_PPLL2))) 1750 if (!(pll_in_use & (1 << ATOM_PPLL2)))
1753 return ATOM_PPLL2; 1751 return ATOM_PPLL2;
1754 if (!(pll_in_use & (1 << ATOM_PPLL1))) 1752 if (!(pll_in_use & (1 << ATOM_PPLL1)))
1755 return ATOM_PPLL1; 1753 return ATOM_PPLL1;
1756 if (!(pll_in_use & (1 << ATOM_PPLL0))) 1754 if (!(pll_in_use & (1 << ATOM_PPLL0)))
1757 return ATOM_PPLL0; 1755 return ATOM_PPLL0;
1758 DRM_ERROR("unable to allocate a PPLL\n"); 1756 DRM_ERROR("unable to allocate a PPLL\n");
1759 return ATOM_PPLL_INVALID; 1757 return ATOM_PPLL_INVALID;
1760 } 1758 }
1761 } else if (ASIC_IS_DCE61(rdev)) { 1759 } else if (ASIC_IS_DCE61(rdev)) {
1762 struct radeon_encoder_atom_dig *dig = 1760 struct radeon_encoder_atom_dig *dig =
1763 radeon_encoder->enc_priv; 1761 radeon_encoder->enc_priv;
1764 1762
1765 if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY) && 1763 if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY) &&
1766 (dig->linkb == false)) 1764 (dig->linkb == false))
1767 /* UNIPHY A uses PPLL2 */ 1765 /* UNIPHY A uses PPLL2 */
1768 return ATOM_PPLL2; 1766 return ATOM_PPLL2;
1769 else if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) { 1767 else if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
1770 /* UNIPHY B/C/D/E/F */ 1768 /* UNIPHY B/C/D/E/F */
1771 if (rdev->clock.dp_extclk) 1769 if (rdev->clock.dp_extclk)
1772 /* skip PPLL programming if using ext clock */ 1770 /* skip PPLL programming if using ext clock */
1773 return ATOM_PPLL_INVALID; 1771 return ATOM_PPLL_INVALID;
1774 else { 1772 else {
1775 /* use the same PPLL for all DP monitors */ 1773 /* use the same PPLL for all DP monitors */
1776 pll = radeon_get_shared_dp_ppll(crtc); 1774 pll = radeon_get_shared_dp_ppll(crtc);
1777 if (pll != ATOM_PPLL_INVALID) 1775 if (pll != ATOM_PPLL_INVALID)
1778 return pll; 1776 return pll;
1779 } 1777 }
1780 } else { 1778 } else {
1781 /* use the same PPLL for all monitors with the same clock */ 1779 /* use the same PPLL for all monitors with the same clock */
1782 pll = radeon_get_shared_nondp_ppll(crtc); 1780 pll = radeon_get_shared_nondp_ppll(crtc);
1783 if (pll != ATOM_PPLL_INVALID) 1781 if (pll != ATOM_PPLL_INVALID)
1784 return pll; 1782 return pll;
1785 } 1783 }
1786 /* UNIPHY B/C/D/E/F */ 1784 /* UNIPHY B/C/D/E/F */
1787 pll_in_use = radeon_get_pll_use_mask(crtc); 1785 pll_in_use = radeon_get_pll_use_mask(crtc);
1788 if (!(pll_in_use & (1 << ATOM_PPLL0))) 1786 if (!(pll_in_use & (1 << ATOM_PPLL0)))
1789 return ATOM_PPLL0; 1787 return ATOM_PPLL0;
1790 if (!(pll_in_use & (1 << ATOM_PPLL1))) 1788 if (!(pll_in_use & (1 << ATOM_PPLL1)))
1791 return ATOM_PPLL1; 1789 return ATOM_PPLL1;
1792 DRM_ERROR("unable to allocate a PPLL\n"); 1790 DRM_ERROR("unable to allocate a PPLL\n");
1793 return ATOM_PPLL_INVALID; 1791 return ATOM_PPLL_INVALID;
1794 } else if (ASIC_IS_DCE41(rdev)) { 1792 } else if (ASIC_IS_DCE41(rdev)) {
1795 /* Don't share PLLs on DCE4.1 chips */ 1793 /* Don't share PLLs on DCE4.1 chips */
1796 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) { 1794 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
1797 if (rdev->clock.dp_extclk) 1795 if (rdev->clock.dp_extclk)
1798 /* skip PPLL programming if using ext clock */ 1796 /* skip PPLL programming if using ext clock */
1799 return ATOM_PPLL_INVALID; 1797 return ATOM_PPLL_INVALID;
1800 } 1798 }
1801 pll_in_use = radeon_get_pll_use_mask(crtc); 1799 pll_in_use = radeon_get_pll_use_mask(crtc);
1802 if (!(pll_in_use & (1 << ATOM_PPLL1))) 1800 if (!(pll_in_use & (1 << ATOM_PPLL1)))
1803 return ATOM_PPLL1; 1801 return ATOM_PPLL1;
1804 if (!(pll_in_use & (1 << ATOM_PPLL2))) 1802 if (!(pll_in_use & (1 << ATOM_PPLL2)))
1805 return ATOM_PPLL2; 1803 return ATOM_PPLL2;
1806 DRM_ERROR("unable to allocate a PPLL\n"); 1804 DRM_ERROR("unable to allocate a PPLL\n");
1807 return ATOM_PPLL_INVALID; 1805 return ATOM_PPLL_INVALID;
1808 } else if (ASIC_IS_DCE4(rdev)) { 1806 } else if (ASIC_IS_DCE4(rdev)) {
1809 /* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock, 1807 /* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock,
1810 * depending on the asic: 1808 * depending on the asic:
1811 * DCE4: PPLL or ext clock 1809 * DCE4: PPLL or ext clock
1812 * DCE5: PPLL, DCPLL, or ext clock 1810 * DCE5: PPLL, DCPLL, or ext clock
1813 * DCE6: PPLL, PPLL0, or ext clock 1811 * DCE6: PPLL, PPLL0, or ext clock
1814 * 1812 *
1815 * Setting ATOM_PPLL_INVALID will cause SetPixelClock to skip 1813 * Setting ATOM_PPLL_INVALID will cause SetPixelClock to skip
1816 * PPLL/DCPLL programming and only program the DP DTO for the 1814 * PPLL/DCPLL programming and only program the DP DTO for the
1817 * crtc virtual pixel clock. 1815 * crtc virtual pixel clock.
1818 */ 1816 */
1819 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) { 1817 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
1820 if (rdev->clock.dp_extclk) 1818 if (rdev->clock.dp_extclk)
1821 /* skip PPLL programming if using ext clock */ 1819 /* skip PPLL programming if using ext clock */
1822 return ATOM_PPLL_INVALID; 1820 return ATOM_PPLL_INVALID;
1823 else if (ASIC_IS_DCE6(rdev)) 1821 else if (ASIC_IS_DCE6(rdev))
1824 /* use PPLL0 for all DP */ 1822 /* use PPLL0 for all DP */
1825 return ATOM_PPLL0; 1823 return ATOM_PPLL0;
1826 else if (ASIC_IS_DCE5(rdev)) 1824 else if (ASIC_IS_DCE5(rdev))
1827 /* use DCPLL for all DP */ 1825 /* use DCPLL for all DP */
1828 return ATOM_DCPLL; 1826 return ATOM_DCPLL;
1829 else { 1827 else {
1830 /* use the same PPLL for all DP monitors */ 1828 /* use the same PPLL for all DP monitors */
1831 pll = radeon_get_shared_dp_ppll(crtc); 1829 pll = radeon_get_shared_dp_ppll(crtc);
1832 if (pll != ATOM_PPLL_INVALID) 1830 if (pll != ATOM_PPLL_INVALID)
1833 return pll; 1831 return pll;
1834 } 1832 }
1835 } else { 1833 } else {
1836 /* use the same PPLL for all monitors with the same clock */ 1834 /* use the same PPLL for all monitors with the same clock */
1837 pll = radeon_get_shared_nondp_ppll(crtc); 1835 pll = radeon_get_shared_nondp_ppll(crtc);
1838 if (pll != ATOM_PPLL_INVALID) 1836 if (pll != ATOM_PPLL_INVALID)
1839 return pll; 1837 return pll;
1840 } 1838 }
1841 /* all other cases */ 1839 /* all other cases */
1842 pll_in_use = radeon_get_pll_use_mask(crtc); 1840 pll_in_use = radeon_get_pll_use_mask(crtc);
1843 if (!(pll_in_use & (1 << ATOM_PPLL1))) 1841 if (!(pll_in_use & (1 << ATOM_PPLL1)))
1844 return ATOM_PPLL1; 1842 return ATOM_PPLL1;
1845 if (!(pll_in_use & (1 << ATOM_PPLL2))) 1843 if (!(pll_in_use & (1 << ATOM_PPLL2)))
1846 return ATOM_PPLL2; 1844 return ATOM_PPLL2;
1847 DRM_ERROR("unable to allocate a PPLL\n"); 1845 DRM_ERROR("unable to allocate a PPLL\n");
1848 return ATOM_PPLL_INVALID; 1846 return ATOM_PPLL_INVALID;
1849 } else { 1847 } else {
1850 /* on pre-R5xx asics, the crtc to pll mapping is hardcoded */ 1848 /* on pre-R5xx asics, the crtc to pll mapping is hardcoded */
1851 /* some atombios (observed in some DCE2/DCE3) code have a bug, 1849 /* some atombios (observed in some DCE2/DCE3) code have a bug,
1852 * the matching btw pll and crtc is done through 1850 * the matching btw pll and crtc is done through
1853 * PCLK_CRTC[1|2]_CNTL (0x480/0x484) but atombios code use the 1851 * PCLK_CRTC[1|2]_CNTL (0x480/0x484) but atombios code use the
1854 * pll (1 or 2) to select which register to write. ie if using 1852 * pll (1 or 2) to select which register to write. ie if using
1855 * pll1 it will use PCLK_CRTC1_CNTL (0x480) and if using pll2 1853 * pll1 it will use PCLK_CRTC1_CNTL (0x480) and if using pll2
1856 * it will use PCLK_CRTC2_CNTL (0x484), it then use crtc id to 1854 * it will use PCLK_CRTC2_CNTL (0x484), it then use crtc id to
1857 * choose which value to write. Which is reverse order from 1855 * choose which value to write. Which is reverse order from
1858 * register logic. So only case that works is when pllid is 1856 * register logic. So only case that works is when pllid is
1859 * same as crtcid or when both pll and crtc are enabled and 1857 * same as crtcid or when both pll and crtc are enabled and
1860 * both use same clock. 1858 * both use same clock.
1861 * 1859 *
1862 * So just return crtc id as if crtc and pll were hard linked 1860 * So just return crtc id as if crtc and pll were hard linked
1863 * together even if they aren't 1861 * together even if they aren't
1864 */ 1862 */
1865 return radeon_crtc->crtc_id; 1863 return radeon_crtc->crtc_id;
1866 } 1864 }
1867 } 1865 }
1868 1866
1869 void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev) 1867 void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev)
1870 { 1868 {
1871 /* always set DCPLL */ 1869 /* always set DCPLL */
1872 if (ASIC_IS_DCE6(rdev)) 1870 if (ASIC_IS_DCE6(rdev))
1873 atombios_crtc_set_disp_eng_pll(rdev, rdev->clock.default_dispclk); 1871 atombios_crtc_set_disp_eng_pll(rdev, rdev->clock.default_dispclk);
1874 else if (ASIC_IS_DCE4(rdev)) { 1872 else if (ASIC_IS_DCE4(rdev)) {
1875 struct radeon_atom_ss ss; 1873 struct radeon_atom_ss ss;
1876 bool ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss, 1874 bool ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss,
1877 ASIC_INTERNAL_SS_ON_DCPLL, 1875 ASIC_INTERNAL_SS_ON_DCPLL,
1878 rdev->clock.default_dispclk); 1876 rdev->clock.default_dispclk);
1879 if (ss_enabled) 1877 if (ss_enabled)
1880 atombios_crtc_program_ss(rdev, ATOM_DISABLE, ATOM_DCPLL, -1, &ss); 1878 atombios_crtc_program_ss(rdev, ATOM_DISABLE, ATOM_DCPLL, -1, &ss);
1881 /* XXX: DCE5, make sure voltage, dispclk is high enough */ 1879 /* XXX: DCE5, make sure voltage, dispclk is high enough */
1882 atombios_crtc_set_disp_eng_pll(rdev, rdev->clock.default_dispclk); 1880 atombios_crtc_set_disp_eng_pll(rdev, rdev->clock.default_dispclk);
1883 if (ss_enabled) 1881 if (ss_enabled)
1884 atombios_crtc_program_ss(rdev, ATOM_ENABLE, ATOM_DCPLL, -1, &ss); 1882 atombios_crtc_program_ss(rdev, ATOM_ENABLE, ATOM_DCPLL, -1, &ss);
1885 } 1883 }
1886 1884
1887 } 1885 }
1888 1886
1889 int atombios_crtc_mode_set(struct drm_crtc *crtc, 1887 int atombios_crtc_mode_set(struct drm_crtc *crtc,
1890 struct drm_display_mode *mode, 1888 struct drm_display_mode *mode,
1891 struct drm_display_mode *adjusted_mode, 1889 struct drm_display_mode *adjusted_mode,
1892 int x, int y, struct drm_framebuffer *old_fb) 1890 int x, int y, struct drm_framebuffer *old_fb)
1893 { 1891 {
1894 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 1892 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1895 struct drm_device *dev = crtc->dev; 1893 struct drm_device *dev = crtc->dev;
1896 struct radeon_device *rdev = dev->dev_private; 1894 struct radeon_device *rdev = dev->dev_private;
1897 struct radeon_encoder *radeon_encoder = 1895 struct radeon_encoder *radeon_encoder =
1898 to_radeon_encoder(radeon_crtc->encoder); 1896 to_radeon_encoder(radeon_crtc->encoder);
1899 bool is_tvcv = false; 1897 bool is_tvcv = false;
1900 1898
1901 if (radeon_encoder->active_device & 1899 if (radeon_encoder->active_device &
1902 (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) 1900 (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
1903 is_tvcv = true; 1901 is_tvcv = true;
1904 1902
1905 if (!radeon_crtc->adjusted_clock) 1903 if (!radeon_crtc->adjusted_clock)
1906 return -EINVAL; 1904 return -EINVAL;
1907 1905
1908 atombios_crtc_set_pll(crtc, adjusted_mode); 1906 atombios_crtc_set_pll(crtc, adjusted_mode);
1909 1907
1910 if (ASIC_IS_DCE4(rdev)) 1908 if (ASIC_IS_DCE4(rdev))
1911 atombios_set_crtc_dtd_timing(crtc, adjusted_mode); 1909 atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
1912 else if (ASIC_IS_AVIVO(rdev)) { 1910 else if (ASIC_IS_AVIVO(rdev)) {
1913 if (is_tvcv) 1911 if (is_tvcv)
1914 atombios_crtc_set_timing(crtc, adjusted_mode); 1912 atombios_crtc_set_timing(crtc, adjusted_mode);
1915 else 1913 else
1916 atombios_set_crtc_dtd_timing(crtc, adjusted_mode); 1914 atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
1917 } else { 1915 } else {
1918 atombios_crtc_set_timing(crtc, adjusted_mode); 1916 atombios_crtc_set_timing(crtc, adjusted_mode);
1919 if (radeon_crtc->crtc_id == 0) 1917 if (radeon_crtc->crtc_id == 0)
1920 atombios_set_crtc_dtd_timing(crtc, adjusted_mode); 1918 atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
1921 radeon_legacy_atom_fixup(crtc); 1919 radeon_legacy_atom_fixup(crtc);
1922 } 1920 }
1923 atombios_crtc_set_base(crtc, x, y, old_fb); 1921 atombios_crtc_set_base(crtc, x, y, old_fb);
1924 atombios_overscan_setup(crtc, mode, adjusted_mode); 1922 atombios_overscan_setup(crtc, mode, adjusted_mode);
1925 atombios_scaler_setup(crtc); 1923 atombios_scaler_setup(crtc);
1926 /* update the hw version fpr dpm */ 1924 /* update the hw version fpr dpm */
1927 radeon_crtc->hw_mode = *adjusted_mode; 1925 radeon_crtc->hw_mode = *adjusted_mode;
1928 1926
1929 return 0; 1927 return 0;
1930 } 1928 }
1931 1929
1932 static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc, 1930 static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
1933 const struct drm_display_mode *mode, 1931 const struct drm_display_mode *mode,
1934 struct drm_display_mode *adjusted_mode) 1932 struct drm_display_mode *adjusted_mode)
1935 { 1933 {
1936 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 1934 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1937 struct drm_device *dev = crtc->dev; 1935 struct drm_device *dev = crtc->dev;
1938 struct drm_encoder *encoder; 1936 struct drm_encoder *encoder;
1939 1937
1940 /* assign the encoder to the radeon crtc to avoid repeated lookups later */ 1938 /* assign the encoder to the radeon crtc to avoid repeated lookups later */
1941 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 1939 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1942 if (encoder->crtc == crtc) { 1940 if (encoder->crtc == crtc) {
1943 radeon_crtc->encoder = encoder; 1941 radeon_crtc->encoder = encoder;
1944 radeon_crtc->connector = radeon_get_connector_for_encoder(encoder); 1942 radeon_crtc->connector = radeon_get_connector_for_encoder(encoder);
1945 break; 1943 break;
1946 } 1944 }
1947 } 1945 }
1948 if ((radeon_crtc->encoder == NULL) || (radeon_crtc->connector == NULL)) { 1946 if ((radeon_crtc->encoder == NULL) || (radeon_crtc->connector == NULL)) {
1949 radeon_crtc->encoder = NULL; 1947 radeon_crtc->encoder = NULL;
1950 radeon_crtc->connector = NULL; 1948 radeon_crtc->connector = NULL;
1951 return false; 1949 return false;
1952 } 1950 }
1953 if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) 1951 if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
1954 return false; 1952 return false;
1955 if (!atombios_crtc_prepare_pll(crtc, adjusted_mode)) 1953 if (!atombios_crtc_prepare_pll(crtc, adjusted_mode))
1956 return false; 1954 return false;
1957 /* pick pll */ 1955 /* pick pll */
1958 radeon_crtc->pll_id = radeon_atom_pick_pll(crtc); 1956 radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
1959 /* if we can't get a PPLL for a non-DP encoder, fail */ 1957 /* if we can't get a PPLL for a non-DP encoder, fail */
1960 if ((radeon_crtc->pll_id == ATOM_PPLL_INVALID) && 1958 if ((radeon_crtc->pll_id == ATOM_PPLL_INVALID) &&
1961 !ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) 1959 !ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder)))
1962 return false; 1960 return false;
1963 1961
1964 return true; 1962 return true;
1965 } 1963 }
1966 1964
1967 static void atombios_crtc_prepare(struct drm_crtc *crtc) 1965 static void atombios_crtc_prepare(struct drm_crtc *crtc)
1968 { 1966 {
1969 struct drm_device *dev = crtc->dev; 1967 struct drm_device *dev = crtc->dev;
1970 struct radeon_device *rdev = dev->dev_private; 1968 struct radeon_device *rdev = dev->dev_private;
1971 1969
1972 /* disable crtc pair power gating before programming */ 1970 /* disable crtc pair power gating before programming */
1973 if (ASIC_IS_DCE6(rdev)) 1971 if (ASIC_IS_DCE6(rdev))
1974 atombios_powergate_crtc(crtc, ATOM_DISABLE); 1972 atombios_powergate_crtc(crtc, ATOM_DISABLE);
1975 1973
1976 atombios_lock_crtc(crtc, ATOM_ENABLE); 1974 atombios_lock_crtc(crtc, ATOM_ENABLE);
1977 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 1975 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
1978 } 1976 }
1979 1977
1980 static void atombios_crtc_commit(struct drm_crtc *crtc) 1978 static void atombios_crtc_commit(struct drm_crtc *crtc)
1981 { 1979 {
1982 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON); 1980 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
1983 atombios_lock_crtc(crtc, ATOM_DISABLE); 1981 atombios_lock_crtc(crtc, ATOM_DISABLE);
1984 } 1982 }
1985 1983
1986 static void atombios_crtc_disable(struct drm_crtc *crtc) 1984 static void atombios_crtc_disable(struct drm_crtc *crtc)
1987 { 1985 {
1988 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 1986 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1989 struct drm_device *dev = crtc->dev; 1987 struct drm_device *dev = crtc->dev;
1990 struct radeon_device *rdev = dev->dev_private; 1988 struct radeon_device *rdev = dev->dev_private;
1991 struct radeon_atom_ss ss; 1989 struct radeon_atom_ss ss;
1992 int i; 1990 int i;
1993 1991
1994 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 1992 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
1995 if (crtc->primary->fb) { 1993 if (crtc->primary->fb) {
1996 int r; 1994 int r;
1997 struct radeon_framebuffer *radeon_fb; 1995 struct radeon_framebuffer *radeon_fb;
1998 struct radeon_bo *rbo; 1996 struct radeon_bo *rbo;
1999 1997
2000 radeon_fb = to_radeon_framebuffer(crtc->primary->fb); 1998 radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
2001 rbo = gem_to_radeon_bo(radeon_fb->obj); 1999 rbo = gem_to_radeon_bo(radeon_fb->obj);
2002 r = radeon_bo_reserve(rbo, false); 2000 r = radeon_bo_reserve(rbo, false);
2003 if (unlikely(r)) 2001 if (unlikely(r))
2004 DRM_ERROR("failed to reserve rbo before unpin\n"); 2002 DRM_ERROR("failed to reserve rbo before unpin\n");
2005 else { 2003 else {
2006 radeon_bo_unpin(rbo); 2004 radeon_bo_unpin(rbo);
2007 radeon_bo_unreserve(rbo); 2005 radeon_bo_unreserve(rbo);
2008 } 2006 }
2009 } 2007 }
2010 /* disable the GRPH */ 2008 /* disable the GRPH */
2011 if (ASIC_IS_DCE4(rdev)) 2009 if (ASIC_IS_DCE4(rdev))
2012 WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 0); 2010 WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 0);
2013 else if (ASIC_IS_AVIVO(rdev)) 2011 else if (ASIC_IS_AVIVO(rdev))
2014 WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 0); 2012 WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 0);
2015 2013
2016 if (ASIC_IS_DCE6(rdev)) 2014 if (ASIC_IS_DCE6(rdev))
2017 atombios_powergate_crtc(crtc, ATOM_ENABLE); 2015 atombios_powergate_crtc(crtc, ATOM_ENABLE);
2018 2016
2019 for (i = 0; i < rdev->num_crtc; i++) { 2017 for (i = 0; i < rdev->num_crtc; i++) {
2020 if (rdev->mode_info.crtcs[i] && 2018 if (rdev->mode_info.crtcs[i] &&
2021 rdev->mode_info.crtcs[i]->enabled && 2019 rdev->mode_info.crtcs[i]->enabled &&
2022 i != radeon_crtc->crtc_id && 2020 i != radeon_crtc->crtc_id &&
2023 radeon_crtc->pll_id == rdev->mode_info.crtcs[i]->pll_id) { 2021 radeon_crtc->pll_id == rdev->mode_info.crtcs[i]->pll_id) {
2024 /* one other crtc is using this pll don't turn 2022 /* one other crtc is using this pll don't turn
2025 * off the pll 2023 * off the pll
2026 */ 2024 */
2027 goto done; 2025 goto done;
2028 } 2026 }
2029 } 2027 }
2030 2028
2031 switch (radeon_crtc->pll_id) { 2029 switch (radeon_crtc->pll_id) {
2032 case ATOM_PPLL1: 2030 case ATOM_PPLL1:
2033 case ATOM_PPLL2: 2031 case ATOM_PPLL2:
2034 /* disable the ppll */ 2032 /* disable the ppll */
2035 atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, 2033 atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
2036 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); 2034 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2037 break; 2035 break;
2038 case ATOM_PPLL0: 2036 case ATOM_PPLL0:
2039 /* disable the ppll */ 2037 /* disable the ppll */
2040 if ((rdev->family == CHIP_ARUBA) || 2038 if ((rdev->family == CHIP_ARUBA) ||
2041 (rdev->family == CHIP_BONAIRE) || 2039 (rdev->family == CHIP_BONAIRE) ||
2042 (rdev->family == CHIP_HAWAII)) 2040 (rdev->family == CHIP_HAWAII))
2043 atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, 2041 atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
2044 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); 2042 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2045 break; 2043 break;
2046 default: 2044 default:
2047 break; 2045 break;
2048 } 2046 }
2049 done: 2047 done:
2050 radeon_crtc->pll_id = ATOM_PPLL_INVALID; 2048 radeon_crtc->pll_id = ATOM_PPLL_INVALID;
2051 radeon_crtc->adjusted_clock = 0; 2049 radeon_crtc->adjusted_clock = 0;
2052 radeon_crtc->encoder = NULL; 2050 radeon_crtc->encoder = NULL;
2053 radeon_crtc->connector = NULL; 2051 radeon_crtc->connector = NULL;
2054 } 2052 }
2055 2053
2056 static const struct drm_crtc_helper_funcs atombios_helper_funcs = { 2054 static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
2057 .dpms = atombios_crtc_dpms, 2055 .dpms = atombios_crtc_dpms,
2058 .mode_fixup = atombios_crtc_mode_fixup, 2056 .mode_fixup = atombios_crtc_mode_fixup,
2059 .mode_set = atombios_crtc_mode_set, 2057 .mode_set = atombios_crtc_mode_set,
2060 .mode_set_base = atombios_crtc_set_base, 2058 .mode_set_base = atombios_crtc_set_base,
2061 .mode_set_base_atomic = atombios_crtc_set_base_atomic, 2059 .mode_set_base_atomic = atombios_crtc_set_base_atomic,
2062 .prepare = atombios_crtc_prepare, 2060 .prepare = atombios_crtc_prepare,
2063 .commit = atombios_crtc_commit, 2061 .commit = atombios_crtc_commit,
2064 .load_lut = radeon_crtc_load_lut, 2062 .load_lut = radeon_crtc_load_lut,
2065 .disable = atombios_crtc_disable, 2063 .disable = atombios_crtc_disable,
2066 }; 2064 };
2067 2065
2068 void radeon_atombios_init_crtc(struct drm_device *dev, 2066 void radeon_atombios_init_crtc(struct drm_device *dev,
2069 struct radeon_crtc *radeon_crtc) 2067 struct radeon_crtc *radeon_crtc)
2070 { 2068 {
2071 struct radeon_device *rdev = dev->dev_private; 2069 struct radeon_device *rdev = dev->dev_private;
2072 2070
2073 if (ASIC_IS_DCE4(rdev)) { 2071 if (ASIC_IS_DCE4(rdev)) {
2074 switch (radeon_crtc->crtc_id) { 2072 switch (radeon_crtc->crtc_id) {
2075 case 0: 2073 case 0:
2076 default: 2074 default:
2077 radeon_crtc->crtc_offset = EVERGREEN_CRTC0_REGISTER_OFFSET; 2075 radeon_crtc->crtc_offset = EVERGREEN_CRTC0_REGISTER_OFFSET;
2078 break; 2076 break;
2079 case 1: 2077 case 1:
2080 radeon_crtc->crtc_offset = EVERGREEN_CRTC1_REGISTER_OFFSET; 2078 radeon_crtc->crtc_offset = EVERGREEN_CRTC1_REGISTER_OFFSET;
2081 break; 2079 break;
2082 case 2: 2080 case 2:
2083 radeon_crtc->crtc_offset = EVERGREEN_CRTC2_REGISTER_OFFSET; 2081 radeon_crtc->crtc_offset = EVERGREEN_CRTC2_REGISTER_OFFSET;
2084 break; 2082 break;
2085 case 3: 2083 case 3:
2086 radeon_crtc->crtc_offset = EVERGREEN_CRTC3_REGISTER_OFFSET; 2084 radeon_crtc->crtc_offset = EVERGREEN_CRTC3_REGISTER_OFFSET;
2087 break; 2085 break;
2088 case 4: 2086 case 4:
2089 radeon_crtc->crtc_offset = EVERGREEN_CRTC4_REGISTER_OFFSET; 2087 radeon_crtc->crtc_offset = EVERGREEN_CRTC4_REGISTER_OFFSET;
2090 break; 2088 break;
2091 case 5: 2089 case 5:
2092 radeon_crtc->crtc_offset = EVERGREEN_CRTC5_REGISTER_OFFSET; 2090 radeon_crtc->crtc_offset = EVERGREEN_CRTC5_REGISTER_OFFSET;
2093 break; 2091 break;
2094 } 2092 }
2095 } else { 2093 } else {
2096 if (radeon_crtc->crtc_id == 1) 2094 if (radeon_crtc->crtc_id == 1)
2097 radeon_crtc->crtc_offset = 2095 radeon_crtc->crtc_offset =
2098 AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL; 2096 AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL;
2099 else 2097 else
2100 radeon_crtc->crtc_offset = 0; 2098 radeon_crtc->crtc_offset = 0;
2101 } 2099 }
2102 radeon_crtc->pll_id = ATOM_PPLL_INVALID; 2100 radeon_crtc->pll_id = ATOM_PPLL_INVALID;
2103 radeon_crtc->adjusted_clock = 0; 2101 radeon_crtc->adjusted_clock = 0;
2104 radeon_crtc->encoder = NULL; 2102 radeon_crtc->encoder = NULL;
2105 radeon_crtc->connector = NULL; 2103 radeon_crtc->connector = NULL;
2106 drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs); 2104 drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs);
drivers/gpu/drm/radeon/radeon_asic.c
1 /* 1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc. 2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc. 3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse. 4 * Copyright 2009 Jerome Glisse.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation 8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the 10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions: 11 * Software is furnished to do so, subject to the following conditions:
12 * 12 *
13 * The above copyright notice and this permission notice shall be included in 13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software. 14 * all copies or substantial portions of the Software.
15 * 15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE. 22 * OTHER DEALINGS IN THE SOFTWARE.
23 * 23 *
24 * Authors: Dave Airlie 24 * Authors: Dave Airlie
25 * Alex Deucher 25 * Alex Deucher
26 * Jerome Glisse 26 * Jerome Glisse
27 */ 27 */
28 28
29 #include <linux/console.h> 29 #include <linux/console.h>
30 #include <drm/drmP.h> 30 #include <drm/drmP.h>
31 #include <drm/drm_crtc_helper.h> 31 #include <drm/drm_crtc_helper.h>
32 #include <drm/radeon_drm.h> 32 #include <drm/radeon_drm.h>
33 #include <linux/vgaarb.h> 33 #include <linux/vgaarb.h>
34 #include <linux/vga_switcheroo.h> 34 #include <linux/vga_switcheroo.h>
35 #include "radeon_reg.h" 35 #include "radeon_reg.h"
36 #include "radeon.h" 36 #include "radeon.h"
37 #include "radeon_asic.h" 37 #include "radeon_asic.h"
38 #include "atom.h" 38 #include "atom.h"
39 39
40 /* 40 /*
41 * Registers accessors functions. 41 * Registers accessors functions.
42 */ 42 */
43 /** 43 /**
44 * radeon_invalid_rreg - dummy reg read function 44 * radeon_invalid_rreg - dummy reg read function
45 * 45 *
46 * @rdev: radeon device pointer 46 * @rdev: radeon device pointer
47 * @reg: offset of register 47 * @reg: offset of register
48 * 48 *
49 * Dummy register read function. Used for register blocks 49 * Dummy register read function. Used for register blocks
50 * that certain asics don't have (all asics). 50 * that certain asics don't have (all asics).
51 * Returns the value in the register. 51 * Returns the value in the register.
52 */ 52 */
53 static uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg) 53 static uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg)
54 { 54 {
55 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg); 55 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
56 BUG_ON(1); 56 BUG_ON(1);
57 return 0; 57 return 0;
58 } 58 }
59 59
60 /** 60 /**
61 * radeon_invalid_wreg - dummy reg write function 61 * radeon_invalid_wreg - dummy reg write function
62 * 62 *
63 * @rdev: radeon device pointer 63 * @rdev: radeon device pointer
64 * @reg: offset of register 64 * @reg: offset of register
65 * @v: value to write to the register 65 * @v: value to write to the register
66 * 66 *
67 * Dummy register read function. Used for register blocks 67 * Dummy register read function. Used for register blocks
68 * that certain asics don't have (all asics). 68 * that certain asics don't have (all asics).
69 */ 69 */
70 static void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 70 static void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
71 { 71 {
72 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n", 72 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
73 reg, v); 73 reg, v);
74 BUG_ON(1); 74 BUG_ON(1);
75 } 75 }
76 76
77 /** 77 /**
78 * radeon_register_accessor_init - sets up the register accessor callbacks 78 * radeon_register_accessor_init - sets up the register accessor callbacks
79 * 79 *
80 * @rdev: radeon device pointer 80 * @rdev: radeon device pointer
81 * 81 *
82 * Sets up the register accessor callbacks for various register 82 * Sets up the register accessor callbacks for various register
83 * apertures. Not all asics have all apertures (all asics). 83 * apertures. Not all asics have all apertures (all asics).
84 */ 84 */
85 static void radeon_register_accessor_init(struct radeon_device *rdev) 85 static void radeon_register_accessor_init(struct radeon_device *rdev)
86 { 86 {
87 rdev->mc_rreg = &radeon_invalid_rreg; 87 rdev->mc_rreg = &radeon_invalid_rreg;
88 rdev->mc_wreg = &radeon_invalid_wreg; 88 rdev->mc_wreg = &radeon_invalid_wreg;
89 rdev->pll_rreg = &radeon_invalid_rreg; 89 rdev->pll_rreg = &radeon_invalid_rreg;
90 rdev->pll_wreg = &radeon_invalid_wreg; 90 rdev->pll_wreg = &radeon_invalid_wreg;
91 rdev->pciep_rreg = &radeon_invalid_rreg; 91 rdev->pciep_rreg = &radeon_invalid_rreg;
92 rdev->pciep_wreg = &radeon_invalid_wreg; 92 rdev->pciep_wreg = &radeon_invalid_wreg;
93 93
94 /* Don't change order as we are overridding accessor. */ 94 /* Don't change order as we are overridding accessor. */
95 if (rdev->family < CHIP_RV515) { 95 if (rdev->family < CHIP_RV515) {
96 rdev->pcie_reg_mask = 0xff; 96 rdev->pcie_reg_mask = 0xff;
97 } else { 97 } else {
98 rdev->pcie_reg_mask = 0x7ff; 98 rdev->pcie_reg_mask = 0x7ff;
99 } 99 }
100 /* FIXME: not sure here */ 100 /* FIXME: not sure here */
101 if (rdev->family <= CHIP_R580) { 101 if (rdev->family <= CHIP_R580) {
102 rdev->pll_rreg = &r100_pll_rreg; 102 rdev->pll_rreg = &r100_pll_rreg;
103 rdev->pll_wreg = &r100_pll_wreg; 103 rdev->pll_wreg = &r100_pll_wreg;
104 } 104 }
105 if (rdev->family >= CHIP_R420) { 105 if (rdev->family >= CHIP_R420) {
106 rdev->mc_rreg = &r420_mc_rreg; 106 rdev->mc_rreg = &r420_mc_rreg;
107 rdev->mc_wreg = &r420_mc_wreg; 107 rdev->mc_wreg = &r420_mc_wreg;
108 } 108 }
109 if (rdev->family >= CHIP_RV515) { 109 if (rdev->family >= CHIP_RV515) {
110 rdev->mc_rreg = &rv515_mc_rreg; 110 rdev->mc_rreg = &rv515_mc_rreg;
111 rdev->mc_wreg = &rv515_mc_wreg; 111 rdev->mc_wreg = &rv515_mc_wreg;
112 } 112 }
113 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) { 113 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
114 rdev->mc_rreg = &rs400_mc_rreg; 114 rdev->mc_rreg = &rs400_mc_rreg;
115 rdev->mc_wreg = &rs400_mc_wreg; 115 rdev->mc_wreg = &rs400_mc_wreg;
116 } 116 }
117 if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) { 117 if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
118 rdev->mc_rreg = &rs690_mc_rreg; 118 rdev->mc_rreg = &rs690_mc_rreg;
119 rdev->mc_wreg = &rs690_mc_wreg; 119 rdev->mc_wreg = &rs690_mc_wreg;
120 } 120 }
121 if (rdev->family == CHIP_RS600) { 121 if (rdev->family == CHIP_RS600) {
122 rdev->mc_rreg = &rs600_mc_rreg; 122 rdev->mc_rreg = &rs600_mc_rreg;
123 rdev->mc_wreg = &rs600_mc_wreg; 123 rdev->mc_wreg = &rs600_mc_wreg;
124 } 124 }
125 if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) { 125 if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
126 rdev->mc_rreg = &rs780_mc_rreg; 126 rdev->mc_rreg = &rs780_mc_rreg;
127 rdev->mc_wreg = &rs780_mc_wreg; 127 rdev->mc_wreg = &rs780_mc_wreg;
128 } 128 }
129 129
130 if (rdev->family >= CHIP_BONAIRE) { 130 if (rdev->family >= CHIP_BONAIRE) {
131 rdev->pciep_rreg = &cik_pciep_rreg; 131 rdev->pciep_rreg = &cik_pciep_rreg;
132 rdev->pciep_wreg = &cik_pciep_wreg; 132 rdev->pciep_wreg = &cik_pciep_wreg;
133 } else if (rdev->family >= CHIP_R600) { 133 } else if (rdev->family >= CHIP_R600) {
134 rdev->pciep_rreg = &r600_pciep_rreg; 134 rdev->pciep_rreg = &r600_pciep_rreg;
135 rdev->pciep_wreg = &r600_pciep_wreg; 135 rdev->pciep_wreg = &r600_pciep_wreg;
136 } 136 }
137 } 137 }
138 138
139 139
140 /* helper to disable agp */ 140 /* helper to disable agp */
141 /** 141 /**
142 * radeon_agp_disable - AGP disable helper function 142 * radeon_agp_disable - AGP disable helper function
143 * 143 *
144 * @rdev: radeon device pointer 144 * @rdev: radeon device pointer
145 * 145 *
146 * Removes AGP flags and changes the gart callbacks on AGP 146 * Removes AGP flags and changes the gart callbacks on AGP
147 * cards when using the internal gart rather than AGP (all asics). 147 * cards when using the internal gart rather than AGP (all asics).
148 */ 148 */
149 void radeon_agp_disable(struct radeon_device *rdev) 149 void radeon_agp_disable(struct radeon_device *rdev)
150 { 150 {
151 rdev->flags &= ~RADEON_IS_AGP; 151 rdev->flags &= ~RADEON_IS_AGP;
152 if (rdev->family >= CHIP_R600) { 152 if (rdev->family >= CHIP_R600) {
153 DRM_INFO("Forcing AGP to PCIE mode\n"); 153 DRM_INFO("Forcing AGP to PCIE mode\n");
154 rdev->flags |= RADEON_IS_PCIE; 154 rdev->flags |= RADEON_IS_PCIE;
155 } else if (rdev->family >= CHIP_RV515 || 155 } else if (rdev->family >= CHIP_RV515 ||
156 rdev->family == CHIP_RV380 || 156 rdev->family == CHIP_RV380 ||
157 rdev->family == CHIP_RV410 || 157 rdev->family == CHIP_RV410 ||
158 rdev->family == CHIP_R423) { 158 rdev->family == CHIP_R423) {
159 DRM_INFO("Forcing AGP to PCIE mode\n"); 159 DRM_INFO("Forcing AGP to PCIE mode\n");
160 rdev->flags |= RADEON_IS_PCIE; 160 rdev->flags |= RADEON_IS_PCIE;
161 rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; 161 rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
162 rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; 162 rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
163 } else { 163 } else {
164 DRM_INFO("Forcing AGP to PCI mode\n"); 164 DRM_INFO("Forcing AGP to PCI mode\n");
165 rdev->flags |= RADEON_IS_PCI; 165 rdev->flags |= RADEON_IS_PCI;
166 rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; 166 rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
167 rdev->asic->gart.set_page = &r100_pci_gart_set_page; 167 rdev->asic->gart.set_page = &r100_pci_gart_set_page;
168 } 168 }
169 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; 169 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
170 } 170 }
171 171
172 /* 172 /*
173 * ASIC 173 * ASIC
174 */ 174 */
175 175
176 static struct radeon_asic_ring r100_gfx_ring = { 176 static struct radeon_asic_ring r100_gfx_ring = {
177 .ib_execute = &r100_ring_ib_execute, 177 .ib_execute = &r100_ring_ib_execute,
178 .emit_fence = &r100_fence_ring_emit, 178 .emit_fence = &r100_fence_ring_emit,
179 .emit_semaphore = &r100_semaphore_ring_emit, 179 .emit_semaphore = &r100_semaphore_ring_emit,
180 .cs_parse = &r100_cs_parse, 180 .cs_parse = &r100_cs_parse,
181 .ring_start = &r100_ring_start, 181 .ring_start = &r100_ring_start,
182 .ring_test = &r100_ring_test, 182 .ring_test = &r100_ring_test,
183 .ib_test = &r100_ib_test, 183 .ib_test = &r100_ib_test,
184 .is_lockup = &r100_gpu_is_lockup, 184 .is_lockup = &r100_gpu_is_lockup,
185 .get_rptr = &r100_gfx_get_rptr, 185 .get_rptr = &r100_gfx_get_rptr,
186 .get_wptr = &r100_gfx_get_wptr, 186 .get_wptr = &r100_gfx_get_wptr,
187 .set_wptr = &r100_gfx_set_wptr, 187 .set_wptr = &r100_gfx_set_wptr,
188 }; 188 };
189 189
190 static struct radeon_asic r100_asic = { 190 static struct radeon_asic r100_asic = {
191 .init = &r100_init, 191 .init = &r100_init,
192 .fini = &r100_fini, 192 .fini = &r100_fini,
193 .suspend = &r100_suspend, 193 .suspend = &r100_suspend,
194 .resume = &r100_resume, 194 .resume = &r100_resume,
195 .vga_set_state = &r100_vga_set_state, 195 .vga_set_state = &r100_vga_set_state,
196 .asic_reset = &r100_asic_reset, 196 .asic_reset = &r100_asic_reset,
197 .ioctl_wait_idle = NULL, 197 .ioctl_wait_idle = NULL,
198 .gui_idle = &r100_gui_idle, 198 .gui_idle = &r100_gui_idle,
199 .mc_wait_for_idle = &r100_mc_wait_for_idle, 199 .mc_wait_for_idle = &r100_mc_wait_for_idle,
200 .gart = { 200 .gart = {
201 .tlb_flush = &r100_pci_gart_tlb_flush, 201 .tlb_flush = &r100_pci_gart_tlb_flush,
202 .set_page = &r100_pci_gart_set_page, 202 .set_page = &r100_pci_gart_set_page,
203 }, 203 },
204 .ring = { 204 .ring = {
205 [RADEON_RING_TYPE_GFX_INDEX] = &r100_gfx_ring 205 [RADEON_RING_TYPE_GFX_INDEX] = &r100_gfx_ring
206 }, 206 },
207 .irq = { 207 .irq = {
208 .set = &r100_irq_set, 208 .set = &r100_irq_set,
209 .process = &r100_irq_process, 209 .process = &r100_irq_process,
210 }, 210 },
211 .display = { 211 .display = {
212 .bandwidth_update = &r100_bandwidth_update, 212 .bandwidth_update = &r100_bandwidth_update,
213 .get_vblank_counter = &r100_get_vblank_counter, 213 .get_vblank_counter = &r100_get_vblank_counter,
214 .wait_for_vblank = &r100_wait_for_vblank, 214 .wait_for_vblank = &r100_wait_for_vblank,
215 .set_backlight_level = &radeon_legacy_set_backlight_level, 215 .set_backlight_level = &radeon_legacy_set_backlight_level,
216 .get_backlight_level = &radeon_legacy_get_backlight_level, 216 .get_backlight_level = &radeon_legacy_get_backlight_level,
217 }, 217 },
218 .copy = { 218 .copy = {
219 .blit = &r100_copy_blit, 219 .blit = &r100_copy_blit,
220 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 220 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
221 .dma = NULL, 221 .dma = NULL,
222 .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, 222 .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
223 .copy = &r100_copy_blit, 223 .copy = &r100_copy_blit,
224 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, 224 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
225 }, 225 },
226 .surface = { 226 .surface = {
227 .set_reg = r100_set_surface_reg, 227 .set_reg = r100_set_surface_reg,
228 .clear_reg = r100_clear_surface_reg, 228 .clear_reg = r100_clear_surface_reg,
229 }, 229 },
230 .hpd = { 230 .hpd = {
231 .init = &r100_hpd_init, 231 .init = &r100_hpd_init,
232 .fini = &r100_hpd_fini, 232 .fini = &r100_hpd_fini,
233 .sense = &r100_hpd_sense, 233 .sense = &r100_hpd_sense,
234 .set_polarity = &r100_hpd_set_polarity, 234 .set_polarity = &r100_hpd_set_polarity,
235 }, 235 },
236 .pm = { 236 .pm = {
237 .misc = &r100_pm_misc, 237 .misc = &r100_pm_misc,
238 .prepare = &r100_pm_prepare, 238 .prepare = &r100_pm_prepare,
239 .finish = &r100_pm_finish, 239 .finish = &r100_pm_finish,
240 .init_profile = &r100_pm_init_profile, 240 .init_profile = &r100_pm_init_profile,
241 .get_dynpm_state = &r100_pm_get_dynpm_state, 241 .get_dynpm_state = &r100_pm_get_dynpm_state,
242 .get_engine_clock = &radeon_legacy_get_engine_clock, 242 .get_engine_clock = &radeon_legacy_get_engine_clock,
243 .set_engine_clock = &radeon_legacy_set_engine_clock, 243 .set_engine_clock = &radeon_legacy_set_engine_clock,
244 .get_memory_clock = &radeon_legacy_get_memory_clock, 244 .get_memory_clock = &radeon_legacy_get_memory_clock,
245 .set_memory_clock = NULL, 245 .set_memory_clock = NULL,
246 .get_pcie_lanes = NULL, 246 .get_pcie_lanes = NULL,
247 .set_pcie_lanes = NULL, 247 .set_pcie_lanes = NULL,
248 .set_clock_gating = &radeon_legacy_set_clock_gating, 248 .set_clock_gating = &radeon_legacy_set_clock_gating,
249 }, 249 },
250 .pflip = { 250 .pflip = {
251 .pre_page_flip = &r100_pre_page_flip, 251 .pre_page_flip = &r100_pre_page_flip,
252 .page_flip = &r100_page_flip, 252 .page_flip = &r100_page_flip,
253 .post_page_flip = &r100_post_page_flip, 253 .post_page_flip = &r100_post_page_flip,
254 }, 254 },
255 }; 255 };
256 256
257 static struct radeon_asic r200_asic = { 257 static struct radeon_asic r200_asic = {
258 .init = &r100_init, 258 .init = &r100_init,
259 .fini = &r100_fini, 259 .fini = &r100_fini,
260 .suspend = &r100_suspend, 260 .suspend = &r100_suspend,
261 .resume = &r100_resume, 261 .resume = &r100_resume,
262 .vga_set_state = &r100_vga_set_state, 262 .vga_set_state = &r100_vga_set_state,
263 .asic_reset = &r100_asic_reset, 263 .asic_reset = &r100_asic_reset,
264 .ioctl_wait_idle = NULL, 264 .ioctl_wait_idle = NULL,
265 .gui_idle = &r100_gui_idle, 265 .gui_idle = &r100_gui_idle,
266 .mc_wait_for_idle = &r100_mc_wait_for_idle, 266 .mc_wait_for_idle = &r100_mc_wait_for_idle,
267 .gart = { 267 .gart = {
268 .tlb_flush = &r100_pci_gart_tlb_flush, 268 .tlb_flush = &r100_pci_gart_tlb_flush,
269 .set_page = &r100_pci_gart_set_page, 269 .set_page = &r100_pci_gart_set_page,
270 }, 270 },
271 .ring = { 271 .ring = {
272 [RADEON_RING_TYPE_GFX_INDEX] = &r100_gfx_ring 272 [RADEON_RING_TYPE_GFX_INDEX] = &r100_gfx_ring
273 }, 273 },
274 .irq = { 274 .irq = {
275 .set = &r100_irq_set, 275 .set = &r100_irq_set,
276 .process = &r100_irq_process, 276 .process = &r100_irq_process,
277 }, 277 },
278 .display = { 278 .display = {
279 .bandwidth_update = &r100_bandwidth_update, 279 .bandwidth_update = &r100_bandwidth_update,
280 .get_vblank_counter = &r100_get_vblank_counter, 280 .get_vblank_counter = &r100_get_vblank_counter,
281 .wait_for_vblank = &r100_wait_for_vblank, 281 .wait_for_vblank = &r100_wait_for_vblank,
282 .set_backlight_level = &radeon_legacy_set_backlight_level, 282 .set_backlight_level = &radeon_legacy_set_backlight_level,
283 .get_backlight_level = &radeon_legacy_get_backlight_level, 283 .get_backlight_level = &radeon_legacy_get_backlight_level,
284 }, 284 },
285 .copy = { 285 .copy = {
286 .blit = &r100_copy_blit, 286 .blit = &r100_copy_blit,
287 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 287 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
288 .dma = &r200_copy_dma, 288 .dma = &r200_copy_dma,
289 .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, 289 .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
290 .copy = &r100_copy_blit, 290 .copy = &r100_copy_blit,
291 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, 291 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
292 }, 292 },
293 .surface = { 293 .surface = {
294 .set_reg = r100_set_surface_reg, 294 .set_reg = r100_set_surface_reg,
295 .clear_reg = r100_clear_surface_reg, 295 .clear_reg = r100_clear_surface_reg,
296 }, 296 },
297 .hpd = { 297 .hpd = {
298 .init = &r100_hpd_init, 298 .init = &r100_hpd_init,
299 .fini = &r100_hpd_fini, 299 .fini = &r100_hpd_fini,
300 .sense = &r100_hpd_sense, 300 .sense = &r100_hpd_sense,
301 .set_polarity = &r100_hpd_set_polarity, 301 .set_polarity = &r100_hpd_set_polarity,
302 }, 302 },
303 .pm = { 303 .pm = {
304 .misc = &r100_pm_misc, 304 .misc = &r100_pm_misc,
305 .prepare = &r100_pm_prepare, 305 .prepare = &r100_pm_prepare,
306 .finish = &r100_pm_finish, 306 .finish = &r100_pm_finish,
307 .init_profile = &r100_pm_init_profile, 307 .init_profile = &r100_pm_init_profile,
308 .get_dynpm_state = &r100_pm_get_dynpm_state, 308 .get_dynpm_state = &r100_pm_get_dynpm_state,
309 .get_engine_clock = &radeon_legacy_get_engine_clock, 309 .get_engine_clock = &radeon_legacy_get_engine_clock,
310 .set_engine_clock = &radeon_legacy_set_engine_clock, 310 .set_engine_clock = &radeon_legacy_set_engine_clock,
311 .get_memory_clock = &radeon_legacy_get_memory_clock, 311 .get_memory_clock = &radeon_legacy_get_memory_clock,
312 .set_memory_clock = NULL, 312 .set_memory_clock = NULL,
313 .get_pcie_lanes = NULL, 313 .get_pcie_lanes = NULL,
314 .set_pcie_lanes = NULL, 314 .set_pcie_lanes = NULL,
315 .set_clock_gating = &radeon_legacy_set_clock_gating, 315 .set_clock_gating = &radeon_legacy_set_clock_gating,
316 }, 316 },
317 .pflip = { 317 .pflip = {
318 .pre_page_flip = &r100_pre_page_flip, 318 .pre_page_flip = &r100_pre_page_flip,
319 .page_flip = &r100_page_flip, 319 .page_flip = &r100_page_flip,
320 .post_page_flip = &r100_post_page_flip, 320 .post_page_flip = &r100_post_page_flip,
321 }, 321 },
322 }; 322 };
323 323
324 static struct radeon_asic_ring r300_gfx_ring = { 324 static struct radeon_asic_ring r300_gfx_ring = {
325 .ib_execute = &r100_ring_ib_execute, 325 .ib_execute = &r100_ring_ib_execute,
326 .emit_fence = &r300_fence_ring_emit, 326 .emit_fence = &r300_fence_ring_emit,
327 .emit_semaphore = &r100_semaphore_ring_emit, 327 .emit_semaphore = &r100_semaphore_ring_emit,
328 .cs_parse = &r300_cs_parse, 328 .cs_parse = &r300_cs_parse,
329 .ring_start = &r300_ring_start, 329 .ring_start = &r300_ring_start,
330 .ring_test = &r100_ring_test, 330 .ring_test = &r100_ring_test,
331 .ib_test = &r100_ib_test, 331 .ib_test = &r100_ib_test,
332 .is_lockup = &r100_gpu_is_lockup, 332 .is_lockup = &r100_gpu_is_lockup,
333 .get_rptr = &r100_gfx_get_rptr, 333 .get_rptr = &r100_gfx_get_rptr,
334 .get_wptr = &r100_gfx_get_wptr, 334 .get_wptr = &r100_gfx_get_wptr,
335 .set_wptr = &r100_gfx_set_wptr, 335 .set_wptr = &r100_gfx_set_wptr,
336 }; 336 };
337 337
338 static struct radeon_asic r300_asic = { 338 static struct radeon_asic r300_asic = {
339 .init = &r300_init, 339 .init = &r300_init,
340 .fini = &r300_fini, 340 .fini = &r300_fini,
341 .suspend = &r300_suspend, 341 .suspend = &r300_suspend,
342 .resume = &r300_resume, 342 .resume = &r300_resume,
343 .vga_set_state = &r100_vga_set_state, 343 .vga_set_state = &r100_vga_set_state,
344 .asic_reset = &r300_asic_reset, 344 .asic_reset = &r300_asic_reset,
345 .ioctl_wait_idle = NULL, 345 .ioctl_wait_idle = NULL,
346 .gui_idle = &r100_gui_idle, 346 .gui_idle = &r100_gui_idle,
347 .mc_wait_for_idle = &r300_mc_wait_for_idle, 347 .mc_wait_for_idle = &r300_mc_wait_for_idle,
348 .gart = { 348 .gart = {
349 .tlb_flush = &r100_pci_gart_tlb_flush, 349 .tlb_flush = &r100_pci_gart_tlb_flush,
350 .set_page = &r100_pci_gart_set_page, 350 .set_page = &r100_pci_gart_set_page,
351 }, 351 },
352 .ring = { 352 .ring = {
353 [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring 353 [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
354 }, 354 },
355 .irq = { 355 .irq = {
356 .set = &r100_irq_set, 356 .set = &r100_irq_set,
357 .process = &r100_irq_process, 357 .process = &r100_irq_process,
358 }, 358 },
359 .display = { 359 .display = {
360 .bandwidth_update = &r100_bandwidth_update, 360 .bandwidth_update = &r100_bandwidth_update,
361 .get_vblank_counter = &r100_get_vblank_counter, 361 .get_vblank_counter = &r100_get_vblank_counter,
362 .wait_for_vblank = &r100_wait_for_vblank, 362 .wait_for_vblank = &r100_wait_for_vblank,
363 .set_backlight_level = &radeon_legacy_set_backlight_level, 363 .set_backlight_level = &radeon_legacy_set_backlight_level,
364 .get_backlight_level = &radeon_legacy_get_backlight_level, 364 .get_backlight_level = &radeon_legacy_get_backlight_level,
365 }, 365 },
366 .copy = { 366 .copy = {
367 .blit = &r100_copy_blit, 367 .blit = &r100_copy_blit,
368 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 368 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
369 .dma = &r200_copy_dma, 369 .dma = &r200_copy_dma,
370 .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, 370 .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
371 .copy = &r100_copy_blit, 371 .copy = &r100_copy_blit,
372 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, 372 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
373 }, 373 },
374 .surface = { 374 .surface = {
375 .set_reg = r100_set_surface_reg, 375 .set_reg = r100_set_surface_reg,
376 .clear_reg = r100_clear_surface_reg, 376 .clear_reg = r100_clear_surface_reg,
377 }, 377 },
378 .hpd = { 378 .hpd = {
379 .init = &r100_hpd_init, 379 .init = &r100_hpd_init,
380 .fini = &r100_hpd_fini, 380 .fini = &r100_hpd_fini,
381 .sense = &r100_hpd_sense, 381 .sense = &r100_hpd_sense,
382 .set_polarity = &r100_hpd_set_polarity, 382 .set_polarity = &r100_hpd_set_polarity,
383 }, 383 },
384 .pm = { 384 .pm = {
385 .misc = &r100_pm_misc, 385 .misc = &r100_pm_misc,
386 .prepare = &r100_pm_prepare, 386 .prepare = &r100_pm_prepare,
387 .finish = &r100_pm_finish, 387 .finish = &r100_pm_finish,
388 .init_profile = &r100_pm_init_profile, 388 .init_profile = &r100_pm_init_profile,
389 .get_dynpm_state = &r100_pm_get_dynpm_state, 389 .get_dynpm_state = &r100_pm_get_dynpm_state,
390 .get_engine_clock = &radeon_legacy_get_engine_clock, 390 .get_engine_clock = &radeon_legacy_get_engine_clock,
391 .set_engine_clock = &radeon_legacy_set_engine_clock, 391 .set_engine_clock = &radeon_legacy_set_engine_clock,
392 .get_memory_clock = &radeon_legacy_get_memory_clock, 392 .get_memory_clock = &radeon_legacy_get_memory_clock,
393 .set_memory_clock = NULL, 393 .set_memory_clock = NULL,
394 .get_pcie_lanes = &rv370_get_pcie_lanes, 394 .get_pcie_lanes = &rv370_get_pcie_lanes,
395 .set_pcie_lanes = &rv370_set_pcie_lanes, 395 .set_pcie_lanes = &rv370_set_pcie_lanes,
396 .set_clock_gating = &radeon_legacy_set_clock_gating, 396 .set_clock_gating = &radeon_legacy_set_clock_gating,
397 }, 397 },
398 .pflip = { 398 .pflip = {
399 .pre_page_flip = &r100_pre_page_flip, 399 .pre_page_flip = &r100_pre_page_flip,
400 .page_flip = &r100_page_flip, 400 .page_flip = &r100_page_flip,
401 .post_page_flip = &r100_post_page_flip, 401 .post_page_flip = &r100_post_page_flip,
402 }, 402 },
403 }; 403 };
404 404
405 static struct radeon_asic r300_asic_pcie = { 405 static struct radeon_asic r300_asic_pcie = {
406 .init = &r300_init, 406 .init = &r300_init,
407 .fini = &r300_fini, 407 .fini = &r300_fini,
408 .suspend = &r300_suspend, 408 .suspend = &r300_suspend,
409 .resume = &r300_resume, 409 .resume = &r300_resume,
410 .vga_set_state = &r100_vga_set_state, 410 .vga_set_state = &r100_vga_set_state,
411 .asic_reset = &r300_asic_reset, 411 .asic_reset = &r300_asic_reset,
412 .ioctl_wait_idle = NULL, 412 .ioctl_wait_idle = NULL,
413 .gui_idle = &r100_gui_idle, 413 .gui_idle = &r100_gui_idle,
414 .mc_wait_for_idle = &r300_mc_wait_for_idle, 414 .mc_wait_for_idle = &r300_mc_wait_for_idle,
415 .gart = { 415 .gart = {
416 .tlb_flush = &rv370_pcie_gart_tlb_flush, 416 .tlb_flush = &rv370_pcie_gart_tlb_flush,
417 .set_page = &rv370_pcie_gart_set_page, 417 .set_page = &rv370_pcie_gart_set_page,
418 }, 418 },
419 .ring = { 419 .ring = {
420 [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring 420 [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
421 }, 421 },
422 .irq = { 422 .irq = {
423 .set = &r100_irq_set, 423 .set = &r100_irq_set,
424 .process = &r100_irq_process, 424 .process = &r100_irq_process,
425 }, 425 },
426 .display = { 426 .display = {
427 .bandwidth_update = &r100_bandwidth_update, 427 .bandwidth_update = &r100_bandwidth_update,
428 .get_vblank_counter = &r100_get_vblank_counter, 428 .get_vblank_counter = &r100_get_vblank_counter,
429 .wait_for_vblank = &r100_wait_for_vblank, 429 .wait_for_vblank = &r100_wait_for_vblank,
430 .set_backlight_level = &radeon_legacy_set_backlight_level, 430 .set_backlight_level = &radeon_legacy_set_backlight_level,
431 .get_backlight_level = &radeon_legacy_get_backlight_level, 431 .get_backlight_level = &radeon_legacy_get_backlight_level,
432 }, 432 },
433 .copy = { 433 .copy = {
434 .blit = &r100_copy_blit, 434 .blit = &r100_copy_blit,
435 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 435 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
436 .dma = &r200_copy_dma, 436 .dma = &r200_copy_dma,
437 .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, 437 .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
438 .copy = &r100_copy_blit, 438 .copy = &r100_copy_blit,
439 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, 439 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
440 }, 440 },
441 .surface = { 441 .surface = {
442 .set_reg = r100_set_surface_reg, 442 .set_reg = r100_set_surface_reg,
443 .clear_reg = r100_clear_surface_reg, 443 .clear_reg = r100_clear_surface_reg,
444 }, 444 },
445 .hpd = { 445 .hpd = {
446 .init = &r100_hpd_init, 446 .init = &r100_hpd_init,
447 .fini = &r100_hpd_fini, 447 .fini = &r100_hpd_fini,
448 .sense = &r100_hpd_sense, 448 .sense = &r100_hpd_sense,
449 .set_polarity = &r100_hpd_set_polarity, 449 .set_polarity = &r100_hpd_set_polarity,
450 }, 450 },
451 .pm = { 451 .pm = {
452 .misc = &r100_pm_misc, 452 .misc = &r100_pm_misc,
453 .prepare = &r100_pm_prepare, 453 .prepare = &r100_pm_prepare,
454 .finish = &r100_pm_finish, 454 .finish = &r100_pm_finish,
455 .init_profile = &r100_pm_init_profile, 455 .init_profile = &r100_pm_init_profile,
456 .get_dynpm_state = &r100_pm_get_dynpm_state, 456 .get_dynpm_state = &r100_pm_get_dynpm_state,
457 .get_engine_clock = &radeon_legacy_get_engine_clock, 457 .get_engine_clock = &radeon_legacy_get_engine_clock,
458 .set_engine_clock = &radeon_legacy_set_engine_clock, 458 .set_engine_clock = &radeon_legacy_set_engine_clock,
459 .get_memory_clock = &radeon_legacy_get_memory_clock, 459 .get_memory_clock = &radeon_legacy_get_memory_clock,
460 .set_memory_clock = NULL, 460 .set_memory_clock = NULL,
461 .get_pcie_lanes = &rv370_get_pcie_lanes, 461 .get_pcie_lanes = &rv370_get_pcie_lanes,
462 .set_pcie_lanes = &rv370_set_pcie_lanes, 462 .set_pcie_lanes = &rv370_set_pcie_lanes,
463 .set_clock_gating = &radeon_legacy_set_clock_gating, 463 .set_clock_gating = &radeon_legacy_set_clock_gating,
464 }, 464 },
465 .pflip = { 465 .pflip = {
466 .pre_page_flip = &r100_pre_page_flip, 466 .pre_page_flip = &r100_pre_page_flip,
467 .page_flip = &r100_page_flip, 467 .page_flip = &r100_page_flip,
468 .post_page_flip = &r100_post_page_flip, 468 .post_page_flip = &r100_post_page_flip,
469 }, 469 },
470 }; 470 };
471 471
472 static struct radeon_asic r420_asic = { 472 static struct radeon_asic r420_asic = {
473 .init = &r420_init, 473 .init = &r420_init,
474 .fini = &r420_fini, 474 .fini = &r420_fini,
475 .suspend = &r420_suspend, 475 .suspend = &r420_suspend,
476 .resume = &r420_resume, 476 .resume = &r420_resume,
477 .vga_set_state = &r100_vga_set_state, 477 .vga_set_state = &r100_vga_set_state,
478 .asic_reset = &r300_asic_reset, 478 .asic_reset = &r300_asic_reset,
479 .ioctl_wait_idle = NULL, 479 .ioctl_wait_idle = NULL,
480 .gui_idle = &r100_gui_idle, 480 .gui_idle = &r100_gui_idle,
481 .mc_wait_for_idle = &r300_mc_wait_for_idle, 481 .mc_wait_for_idle = &r300_mc_wait_for_idle,
482 .gart = { 482 .gart = {
483 .tlb_flush = &rv370_pcie_gart_tlb_flush, 483 .tlb_flush = &rv370_pcie_gart_tlb_flush,
484 .set_page = &rv370_pcie_gart_set_page, 484 .set_page = &rv370_pcie_gart_set_page,
485 }, 485 },
486 .ring = { 486 .ring = {
487 [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring 487 [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
488 }, 488 },
489 .irq = { 489 .irq = {
490 .set = &r100_irq_set, 490 .set = &r100_irq_set,
491 .process = &r100_irq_process, 491 .process = &r100_irq_process,
492 }, 492 },
493 .display = { 493 .display = {
494 .bandwidth_update = &r100_bandwidth_update, 494 .bandwidth_update = &r100_bandwidth_update,
495 .get_vblank_counter = &r100_get_vblank_counter, 495 .get_vblank_counter = &r100_get_vblank_counter,
496 .wait_for_vblank = &r100_wait_for_vblank, 496 .wait_for_vblank = &r100_wait_for_vblank,
497 .set_backlight_level = &atombios_set_backlight_level, 497 .set_backlight_level = &atombios_set_backlight_level,
498 .get_backlight_level = &atombios_get_backlight_level, 498 .get_backlight_level = &atombios_get_backlight_level,
499 }, 499 },
500 .copy = { 500 .copy = {
501 .blit = &r100_copy_blit, 501 .blit = &r100_copy_blit,
502 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 502 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
503 .dma = &r200_copy_dma, 503 .dma = &r200_copy_dma,
504 .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, 504 .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
505 .copy = &r100_copy_blit, 505 .copy = &r100_copy_blit,
506 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, 506 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
507 }, 507 },
508 .surface = { 508 .surface = {
509 .set_reg = r100_set_surface_reg, 509 .set_reg = r100_set_surface_reg,
510 .clear_reg = r100_clear_surface_reg, 510 .clear_reg = r100_clear_surface_reg,
511 }, 511 },
512 .hpd = { 512 .hpd = {
513 .init = &r100_hpd_init, 513 .init = &r100_hpd_init,
514 .fini = &r100_hpd_fini, 514 .fini = &r100_hpd_fini,
515 .sense = &r100_hpd_sense, 515 .sense = &r100_hpd_sense,
516 .set_polarity = &r100_hpd_set_polarity, 516 .set_polarity = &r100_hpd_set_polarity,
517 }, 517 },
518 .pm = { 518 .pm = {
519 .misc = &r100_pm_misc, 519 .misc = &r100_pm_misc,
520 .prepare = &r100_pm_prepare, 520 .prepare = &r100_pm_prepare,
521 .finish = &r100_pm_finish, 521 .finish = &r100_pm_finish,
522 .init_profile = &r420_pm_init_profile, 522 .init_profile = &r420_pm_init_profile,
523 .get_dynpm_state = &r100_pm_get_dynpm_state, 523 .get_dynpm_state = &r100_pm_get_dynpm_state,
524 .get_engine_clock = &radeon_atom_get_engine_clock, 524 .get_engine_clock = &radeon_atom_get_engine_clock,
525 .set_engine_clock = &radeon_atom_set_engine_clock, 525 .set_engine_clock = &radeon_atom_set_engine_clock,
526 .get_memory_clock = &radeon_atom_get_memory_clock, 526 .get_memory_clock = &radeon_atom_get_memory_clock,
527 .set_memory_clock = &radeon_atom_set_memory_clock, 527 .set_memory_clock = &radeon_atom_set_memory_clock,
528 .get_pcie_lanes = &rv370_get_pcie_lanes, 528 .get_pcie_lanes = &rv370_get_pcie_lanes,
529 .set_pcie_lanes = &rv370_set_pcie_lanes, 529 .set_pcie_lanes = &rv370_set_pcie_lanes,
530 .set_clock_gating = &radeon_atom_set_clock_gating, 530 .set_clock_gating = &radeon_atom_set_clock_gating,
531 }, 531 },
532 .pflip = { 532 .pflip = {
533 .pre_page_flip = &r100_pre_page_flip, 533 .pre_page_flip = &r100_pre_page_flip,
534 .page_flip = &r100_page_flip, 534 .page_flip = &r100_page_flip,
535 .post_page_flip = &r100_post_page_flip, 535 .post_page_flip = &r100_post_page_flip,
536 }, 536 },
537 }; 537 };
538 538
539 static struct radeon_asic rs400_asic = { 539 static struct radeon_asic rs400_asic = {
540 .init = &rs400_init, 540 .init = &rs400_init,
541 .fini = &rs400_fini, 541 .fini = &rs400_fini,
542 .suspend = &rs400_suspend, 542 .suspend = &rs400_suspend,
543 .resume = &rs400_resume, 543 .resume = &rs400_resume,
544 .vga_set_state = &r100_vga_set_state, 544 .vga_set_state = &r100_vga_set_state,
545 .asic_reset = &r300_asic_reset, 545 .asic_reset = &r300_asic_reset,
546 .ioctl_wait_idle = NULL, 546 .ioctl_wait_idle = NULL,
547 .gui_idle = &r100_gui_idle, 547 .gui_idle = &r100_gui_idle,
548 .mc_wait_for_idle = &rs400_mc_wait_for_idle, 548 .mc_wait_for_idle = &rs400_mc_wait_for_idle,
549 .gart = { 549 .gart = {
550 .tlb_flush = &rs400_gart_tlb_flush, 550 .tlb_flush = &rs400_gart_tlb_flush,
551 .set_page = &rs400_gart_set_page, 551 .set_page = &rs400_gart_set_page,
552 }, 552 },
553 .ring = { 553 .ring = {
554 [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring 554 [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
555 }, 555 },
556 .irq = { 556 .irq = {
557 .set = &r100_irq_set, 557 .set = &r100_irq_set,
558 .process = &r100_irq_process, 558 .process = &r100_irq_process,
559 }, 559 },
560 .display = { 560 .display = {
561 .bandwidth_update = &r100_bandwidth_update, 561 .bandwidth_update = &r100_bandwidth_update,
562 .get_vblank_counter = &r100_get_vblank_counter, 562 .get_vblank_counter = &r100_get_vblank_counter,
563 .wait_for_vblank = &r100_wait_for_vblank, 563 .wait_for_vblank = &r100_wait_for_vblank,
564 .set_backlight_level = &radeon_legacy_set_backlight_level, 564 .set_backlight_level = &radeon_legacy_set_backlight_level,
565 .get_backlight_level = &radeon_legacy_get_backlight_level, 565 .get_backlight_level = &radeon_legacy_get_backlight_level,
566 }, 566 },
567 .copy = { 567 .copy = {
568 .blit = &r100_copy_blit, 568 .blit = &r100_copy_blit,
569 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 569 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
570 .dma = &r200_copy_dma, 570 .dma = &r200_copy_dma,
571 .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, 571 .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
572 .copy = &r100_copy_blit, 572 .copy = &r100_copy_blit,
573 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, 573 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
574 }, 574 },
575 .surface = { 575 .surface = {
576 .set_reg = r100_set_surface_reg, 576 .set_reg = r100_set_surface_reg,
577 .clear_reg = r100_clear_surface_reg, 577 .clear_reg = r100_clear_surface_reg,
578 }, 578 },
579 .hpd = { 579 .hpd = {
580 .init = &r100_hpd_init, 580 .init = &r100_hpd_init,
581 .fini = &r100_hpd_fini, 581 .fini = &r100_hpd_fini,
582 .sense = &r100_hpd_sense, 582 .sense = &r100_hpd_sense,
583 .set_polarity = &r100_hpd_set_polarity, 583 .set_polarity = &r100_hpd_set_polarity,
584 }, 584 },
585 .pm = { 585 .pm = {
586 .misc = &r100_pm_misc, 586 .misc = &r100_pm_misc,
587 .prepare = &r100_pm_prepare, 587 .prepare = &r100_pm_prepare,
588 .finish = &r100_pm_finish, 588 .finish = &r100_pm_finish,
589 .init_profile = &r100_pm_init_profile, 589 .init_profile = &r100_pm_init_profile,
590 .get_dynpm_state = &r100_pm_get_dynpm_state, 590 .get_dynpm_state = &r100_pm_get_dynpm_state,
591 .get_engine_clock = &radeon_legacy_get_engine_clock, 591 .get_engine_clock = &radeon_legacy_get_engine_clock,
592 .set_engine_clock = &radeon_legacy_set_engine_clock, 592 .set_engine_clock = &radeon_legacy_set_engine_clock,
593 .get_memory_clock = &radeon_legacy_get_memory_clock, 593 .get_memory_clock = &radeon_legacy_get_memory_clock,
594 .set_memory_clock = NULL, 594 .set_memory_clock = NULL,
595 .get_pcie_lanes = NULL, 595 .get_pcie_lanes = NULL,
596 .set_pcie_lanes = NULL, 596 .set_pcie_lanes = NULL,
597 .set_clock_gating = &radeon_legacy_set_clock_gating, 597 .set_clock_gating = &radeon_legacy_set_clock_gating,
598 }, 598 },
599 .pflip = { 599 .pflip = {
600 .pre_page_flip = &r100_pre_page_flip, 600 .pre_page_flip = &r100_pre_page_flip,
601 .page_flip = &r100_page_flip, 601 .page_flip = &r100_page_flip,
602 .post_page_flip = &r100_post_page_flip, 602 .post_page_flip = &r100_post_page_flip,
603 }, 603 },
604 }; 604 };
605 605
606 static struct radeon_asic rs600_asic = { 606 static struct radeon_asic rs600_asic = {
607 .init = &rs600_init, 607 .init = &rs600_init,
608 .fini = &rs600_fini, 608 .fini = &rs600_fini,
609 .suspend = &rs600_suspend, 609 .suspend = &rs600_suspend,
610 .resume = &rs600_resume, 610 .resume = &rs600_resume,
611 .vga_set_state = &r100_vga_set_state, 611 .vga_set_state = &r100_vga_set_state,
612 .asic_reset = &rs600_asic_reset, 612 .asic_reset = &rs600_asic_reset,
613 .ioctl_wait_idle = NULL, 613 .ioctl_wait_idle = NULL,
614 .gui_idle = &r100_gui_idle, 614 .gui_idle = &r100_gui_idle,
615 .mc_wait_for_idle = &rs600_mc_wait_for_idle, 615 .mc_wait_for_idle = &rs600_mc_wait_for_idle,
616 .gart = { 616 .gart = {
617 .tlb_flush = &rs600_gart_tlb_flush, 617 .tlb_flush = &rs600_gart_tlb_flush,
618 .set_page = &rs600_gart_set_page, 618 .set_page = &rs600_gart_set_page,
619 }, 619 },
620 .ring = { 620 .ring = {
621 [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring 621 [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
622 }, 622 },
623 .irq = { 623 .irq = {
624 .set = &rs600_irq_set, 624 .set = &rs600_irq_set,
625 .process = &rs600_irq_process, 625 .process = &rs600_irq_process,
626 }, 626 },
627 .display = { 627 .display = {
628 .bandwidth_update = &rs600_bandwidth_update, 628 .bandwidth_update = &rs600_bandwidth_update,
629 .get_vblank_counter = &rs600_get_vblank_counter, 629 .get_vblank_counter = &rs600_get_vblank_counter,
630 .wait_for_vblank = &avivo_wait_for_vblank, 630 .wait_for_vblank = &avivo_wait_for_vblank,
631 .set_backlight_level = &atombios_set_backlight_level, 631 .set_backlight_level = &atombios_set_backlight_level,
632 .get_backlight_level = &atombios_get_backlight_level, 632 .get_backlight_level = &atombios_get_backlight_level,
633 .hdmi_enable = &r600_hdmi_enable, 633 .hdmi_enable = &r600_hdmi_enable,
634 .hdmi_setmode = &r600_hdmi_setmode, 634 .hdmi_setmode = &r600_hdmi_setmode,
635 }, 635 },
636 .copy = { 636 .copy = {
637 .blit = &r100_copy_blit, 637 .blit = &r100_copy_blit,
638 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 638 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
639 .dma = &r200_copy_dma, 639 .dma = &r200_copy_dma,
640 .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, 640 .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
641 .copy = &r100_copy_blit, 641 .copy = &r100_copy_blit,
642 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, 642 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
643 }, 643 },
644 .surface = { 644 .surface = {
645 .set_reg = r100_set_surface_reg, 645 .set_reg = r100_set_surface_reg,
646 .clear_reg = r100_clear_surface_reg, 646 .clear_reg = r100_clear_surface_reg,
647 }, 647 },
648 .hpd = { 648 .hpd = {
649 .init = &rs600_hpd_init, 649 .init = &rs600_hpd_init,
650 .fini = &rs600_hpd_fini, 650 .fini = &rs600_hpd_fini,
651 .sense = &rs600_hpd_sense, 651 .sense = &rs600_hpd_sense,
652 .set_polarity = &rs600_hpd_set_polarity, 652 .set_polarity = &rs600_hpd_set_polarity,
653 }, 653 },
654 .pm = { 654 .pm = {
655 .misc = &rs600_pm_misc, 655 .misc = &rs600_pm_misc,
656 .prepare = &rs600_pm_prepare, 656 .prepare = &rs600_pm_prepare,
657 .finish = &rs600_pm_finish, 657 .finish = &rs600_pm_finish,
658 .init_profile = &r420_pm_init_profile, 658 .init_profile = &r420_pm_init_profile,
659 .get_dynpm_state = &r100_pm_get_dynpm_state, 659 .get_dynpm_state = &r100_pm_get_dynpm_state,
660 .get_engine_clock = &radeon_atom_get_engine_clock, 660 .get_engine_clock = &radeon_atom_get_engine_clock,
661 .set_engine_clock = &radeon_atom_set_engine_clock, 661 .set_engine_clock = &radeon_atom_set_engine_clock,
662 .get_memory_clock = &radeon_atom_get_memory_clock, 662 .get_memory_clock = &radeon_atom_get_memory_clock,
663 .set_memory_clock = &radeon_atom_set_memory_clock, 663 .set_memory_clock = &radeon_atom_set_memory_clock,
664 .get_pcie_lanes = NULL, 664 .get_pcie_lanes = NULL,
665 .set_pcie_lanes = NULL, 665 .set_pcie_lanes = NULL,
666 .set_clock_gating = &radeon_atom_set_clock_gating, 666 .set_clock_gating = &radeon_atom_set_clock_gating,
667 }, 667 },
668 .pflip = { 668 .pflip = {
669 .pre_page_flip = &rs600_pre_page_flip, 669 .pre_page_flip = &rs600_pre_page_flip,
670 .page_flip = &rs600_page_flip, 670 .page_flip = &rs600_page_flip,
671 .post_page_flip = &rs600_post_page_flip, 671 .post_page_flip = &rs600_post_page_flip,
672 }, 672 },
673 }; 673 };
674 674
675 static struct radeon_asic rs690_asic = { 675 static struct radeon_asic rs690_asic = {
676 .init = &rs690_init, 676 .init = &rs690_init,
677 .fini = &rs690_fini, 677 .fini = &rs690_fini,
678 .suspend = &rs690_suspend, 678 .suspend = &rs690_suspend,
679 .resume = &rs690_resume, 679 .resume = &rs690_resume,
680 .vga_set_state = &r100_vga_set_state, 680 .vga_set_state = &r100_vga_set_state,
681 .asic_reset = &rs600_asic_reset, 681 .asic_reset = &rs600_asic_reset,
682 .ioctl_wait_idle = NULL, 682 .ioctl_wait_idle = NULL,
683 .gui_idle = &r100_gui_idle, 683 .gui_idle = &r100_gui_idle,
684 .mc_wait_for_idle = &rs690_mc_wait_for_idle, 684 .mc_wait_for_idle = &rs690_mc_wait_for_idle,
685 .gart = { 685 .gart = {
686 .tlb_flush = &rs400_gart_tlb_flush, 686 .tlb_flush = &rs400_gart_tlb_flush,
687 .set_page = &rs400_gart_set_page, 687 .set_page = &rs400_gart_set_page,
688 }, 688 },
689 .ring = { 689 .ring = {
690 [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring 690 [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
691 }, 691 },
692 .irq = { 692 .irq = {
693 .set = &rs600_irq_set, 693 .set = &rs600_irq_set,
694 .process = &rs600_irq_process, 694 .process = &rs600_irq_process,
695 }, 695 },
696 .display = { 696 .display = {
697 .get_vblank_counter = &rs600_get_vblank_counter, 697 .get_vblank_counter = &rs600_get_vblank_counter,
698 .bandwidth_update = &rs690_bandwidth_update, 698 .bandwidth_update = &rs690_bandwidth_update,
699 .wait_for_vblank = &avivo_wait_for_vblank, 699 .wait_for_vblank = &avivo_wait_for_vblank,
700 .set_backlight_level = &atombios_set_backlight_level, 700 .set_backlight_level = &atombios_set_backlight_level,
701 .get_backlight_level = &atombios_get_backlight_level, 701 .get_backlight_level = &atombios_get_backlight_level,
702 .hdmi_enable = &r600_hdmi_enable, 702 .hdmi_enable = &r600_hdmi_enable,
703 .hdmi_setmode = &r600_hdmi_setmode, 703 .hdmi_setmode = &r600_hdmi_setmode,
704 }, 704 },
705 .copy = { 705 .copy = {
706 .blit = &r100_copy_blit, 706 .blit = &r100_copy_blit,
707 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 707 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
708 .dma = &r200_copy_dma, 708 .dma = &r200_copy_dma,
709 .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, 709 .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
710 .copy = &r200_copy_dma, 710 .copy = &r200_copy_dma,
711 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, 711 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
712 }, 712 },
713 .surface = { 713 .surface = {
714 .set_reg = r100_set_surface_reg, 714 .set_reg = r100_set_surface_reg,
715 .clear_reg = r100_clear_surface_reg, 715 .clear_reg = r100_clear_surface_reg,
716 }, 716 },
717 .hpd = { 717 .hpd = {
718 .init = &rs600_hpd_init, 718 .init = &rs600_hpd_init,
719 .fini = &rs600_hpd_fini, 719 .fini = &rs600_hpd_fini,
720 .sense = &rs600_hpd_sense, 720 .sense = &rs600_hpd_sense,
721 .set_polarity = &rs600_hpd_set_polarity, 721 .set_polarity = &rs600_hpd_set_polarity,
722 }, 722 },
723 .pm = { 723 .pm = {
724 .misc = &rs600_pm_misc, 724 .misc = &rs600_pm_misc,
725 .prepare = &rs600_pm_prepare, 725 .prepare = &rs600_pm_prepare,
726 .finish = &rs600_pm_finish, 726 .finish = &rs600_pm_finish,
727 .init_profile = &r420_pm_init_profile, 727 .init_profile = &r420_pm_init_profile,
728 .get_dynpm_state = &r100_pm_get_dynpm_state, 728 .get_dynpm_state = &r100_pm_get_dynpm_state,
729 .get_engine_clock = &radeon_atom_get_engine_clock, 729 .get_engine_clock = &radeon_atom_get_engine_clock,
730 .set_engine_clock = &radeon_atom_set_engine_clock, 730 .set_engine_clock = &radeon_atom_set_engine_clock,
731 .get_memory_clock = &radeon_atom_get_memory_clock, 731 .get_memory_clock = &radeon_atom_get_memory_clock,
732 .set_memory_clock = &radeon_atom_set_memory_clock, 732 .set_memory_clock = &radeon_atom_set_memory_clock,
733 .get_pcie_lanes = NULL, 733 .get_pcie_lanes = NULL,
734 .set_pcie_lanes = NULL, 734 .set_pcie_lanes = NULL,
735 .set_clock_gating = &radeon_atom_set_clock_gating, 735 .set_clock_gating = &radeon_atom_set_clock_gating,
736 }, 736 },
737 .pflip = { 737 .pflip = {
738 .pre_page_flip = &rs600_pre_page_flip, 738 .pre_page_flip = &rs600_pre_page_flip,
739 .page_flip = &rs600_page_flip, 739 .page_flip = &rs600_page_flip,
740 .post_page_flip = &rs600_post_page_flip, 740 .post_page_flip = &rs600_post_page_flip,
741 }, 741 },
742 }; 742 };
743 743
744 static struct radeon_asic rv515_asic = { 744 static struct radeon_asic rv515_asic = {
745 .init = &rv515_init, 745 .init = &rv515_init,
746 .fini = &rv515_fini, 746 .fini = &rv515_fini,
747 .suspend = &rv515_suspend, 747 .suspend = &rv515_suspend,
748 .resume = &rv515_resume, 748 .resume = &rv515_resume,
749 .vga_set_state = &r100_vga_set_state, 749 .vga_set_state = &r100_vga_set_state,
750 .asic_reset = &rs600_asic_reset, 750 .asic_reset = &rs600_asic_reset,
751 .ioctl_wait_idle = NULL, 751 .ioctl_wait_idle = NULL,
752 .gui_idle = &r100_gui_idle, 752 .gui_idle = &r100_gui_idle,
753 .mc_wait_for_idle = &rv515_mc_wait_for_idle, 753 .mc_wait_for_idle = &rv515_mc_wait_for_idle,
754 .gart = { 754 .gart = {
755 .tlb_flush = &rv370_pcie_gart_tlb_flush, 755 .tlb_flush = &rv370_pcie_gart_tlb_flush,
756 .set_page = &rv370_pcie_gart_set_page, 756 .set_page = &rv370_pcie_gart_set_page,
757 }, 757 },
758 .ring = { 758 .ring = {
759 [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring 759 [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
760 }, 760 },
761 .irq = { 761 .irq = {
762 .set = &rs600_irq_set, 762 .set = &rs600_irq_set,
763 .process = &rs600_irq_process, 763 .process = &rs600_irq_process,
764 }, 764 },
765 .display = { 765 .display = {
766 .get_vblank_counter = &rs600_get_vblank_counter, 766 .get_vblank_counter = &rs600_get_vblank_counter,
767 .bandwidth_update = &rv515_bandwidth_update, 767 .bandwidth_update = &rv515_bandwidth_update,
768 .wait_for_vblank = &avivo_wait_for_vblank, 768 .wait_for_vblank = &avivo_wait_for_vblank,
769 .set_backlight_level = &atombios_set_backlight_level, 769 .set_backlight_level = &atombios_set_backlight_level,
770 .get_backlight_level = &atombios_get_backlight_level, 770 .get_backlight_level = &atombios_get_backlight_level,
771 }, 771 },
772 .copy = { 772 .copy = {
773 .blit = &r100_copy_blit, 773 .blit = &r100_copy_blit,
774 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 774 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
775 .dma = &r200_copy_dma, 775 .dma = &r200_copy_dma,
776 .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, 776 .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
777 .copy = &r100_copy_blit, 777 .copy = &r100_copy_blit,
778 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, 778 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
779 }, 779 },
780 .surface = { 780 .surface = {
781 .set_reg = r100_set_surface_reg, 781 .set_reg = r100_set_surface_reg,
782 .clear_reg = r100_clear_surface_reg, 782 .clear_reg = r100_clear_surface_reg,
783 }, 783 },
784 .hpd = { 784 .hpd = {
785 .init = &rs600_hpd_init, 785 .init = &rs600_hpd_init,
786 .fini = &rs600_hpd_fini, 786 .fini = &rs600_hpd_fini,
787 .sense = &rs600_hpd_sense, 787 .sense = &rs600_hpd_sense,
788 .set_polarity = &rs600_hpd_set_polarity, 788 .set_polarity = &rs600_hpd_set_polarity,
789 }, 789 },
790 .pm = { 790 .pm = {
791 .misc = &rs600_pm_misc, 791 .misc = &rs600_pm_misc,
792 .prepare = &rs600_pm_prepare, 792 .prepare = &rs600_pm_prepare,
793 .finish = &rs600_pm_finish, 793 .finish = &rs600_pm_finish,
794 .init_profile = &r420_pm_init_profile, 794 .init_profile = &r420_pm_init_profile,
795 .get_dynpm_state = &r100_pm_get_dynpm_state, 795 .get_dynpm_state = &r100_pm_get_dynpm_state,
796 .get_engine_clock = &radeon_atom_get_engine_clock, 796 .get_engine_clock = &radeon_atom_get_engine_clock,
797 .set_engine_clock = &radeon_atom_set_engine_clock, 797 .set_engine_clock = &radeon_atom_set_engine_clock,
798 .get_memory_clock = &radeon_atom_get_memory_clock, 798 .get_memory_clock = &radeon_atom_get_memory_clock,
799 .set_memory_clock = &radeon_atom_set_memory_clock, 799 .set_memory_clock = &radeon_atom_set_memory_clock,
800 .get_pcie_lanes = &rv370_get_pcie_lanes, 800 .get_pcie_lanes = &rv370_get_pcie_lanes,
801 .set_pcie_lanes = &rv370_set_pcie_lanes, 801 .set_pcie_lanes = &rv370_set_pcie_lanes,
802 .set_clock_gating = &radeon_atom_set_clock_gating, 802 .set_clock_gating = &radeon_atom_set_clock_gating,
803 }, 803 },
804 .pflip = { 804 .pflip = {
805 .pre_page_flip = &rs600_pre_page_flip, 805 .pre_page_flip = &rs600_pre_page_flip,
806 .page_flip = &rs600_page_flip, 806 .page_flip = &rs600_page_flip,
807 .post_page_flip = &rs600_post_page_flip, 807 .post_page_flip = &rs600_post_page_flip,
808 }, 808 },
809 }; 809 };
810 810
811 static struct radeon_asic r520_asic = { 811 static struct radeon_asic r520_asic = {
812 .init = &r520_init, 812 .init = &r520_init,
813 .fini = &rv515_fini, 813 .fini = &rv515_fini,
814 .suspend = &rv515_suspend, 814 .suspend = &rv515_suspend,
815 .resume = &r520_resume, 815 .resume = &r520_resume,
816 .vga_set_state = &r100_vga_set_state, 816 .vga_set_state = &r100_vga_set_state,
817 .asic_reset = &rs600_asic_reset, 817 .asic_reset = &rs600_asic_reset,
818 .ioctl_wait_idle = NULL, 818 .ioctl_wait_idle = NULL,
819 .gui_idle = &r100_gui_idle, 819 .gui_idle = &r100_gui_idle,
820 .mc_wait_for_idle = &r520_mc_wait_for_idle, 820 .mc_wait_for_idle = &r520_mc_wait_for_idle,
821 .gart = { 821 .gart = {
822 .tlb_flush = &rv370_pcie_gart_tlb_flush, 822 .tlb_flush = &rv370_pcie_gart_tlb_flush,
823 .set_page = &rv370_pcie_gart_set_page, 823 .set_page = &rv370_pcie_gart_set_page,
824 }, 824 },
825 .ring = { 825 .ring = {
826 [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring 826 [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
827 }, 827 },
828 .irq = { 828 .irq = {
829 .set = &rs600_irq_set, 829 .set = &rs600_irq_set,
830 .process = &rs600_irq_process, 830 .process = &rs600_irq_process,
831 }, 831 },
832 .display = { 832 .display = {
833 .bandwidth_update = &rv515_bandwidth_update, 833 .bandwidth_update = &rv515_bandwidth_update,
834 .get_vblank_counter = &rs600_get_vblank_counter, 834 .get_vblank_counter = &rs600_get_vblank_counter,
835 .wait_for_vblank = &avivo_wait_for_vblank, 835 .wait_for_vblank = &avivo_wait_for_vblank,
836 .set_backlight_level = &atombios_set_backlight_level, 836 .set_backlight_level = &atombios_set_backlight_level,
837 .get_backlight_level = &atombios_get_backlight_level, 837 .get_backlight_level = &atombios_get_backlight_level,
838 }, 838 },
839 .copy = { 839 .copy = {
840 .blit = &r100_copy_blit, 840 .blit = &r100_copy_blit,
841 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 841 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
842 .dma = &r200_copy_dma, 842 .dma = &r200_copy_dma,
843 .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, 843 .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
844 .copy = &r100_copy_blit, 844 .copy = &r100_copy_blit,
845 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, 845 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
846 }, 846 },
847 .surface = { 847 .surface = {
848 .set_reg = r100_set_surface_reg, 848 .set_reg = r100_set_surface_reg,
849 .clear_reg = r100_clear_surface_reg, 849 .clear_reg = r100_clear_surface_reg,
850 }, 850 },
851 .hpd = { 851 .hpd = {
852 .init = &rs600_hpd_init, 852 .init = &rs600_hpd_init,
853 .fini = &rs600_hpd_fini, 853 .fini = &rs600_hpd_fini,
854 .sense = &rs600_hpd_sense, 854 .sense = &rs600_hpd_sense,
855 .set_polarity = &rs600_hpd_set_polarity, 855 .set_polarity = &rs600_hpd_set_polarity,
856 }, 856 },
857 .pm = { 857 .pm = {
858 .misc = &rs600_pm_misc, 858 .misc = &rs600_pm_misc,
859 .prepare = &rs600_pm_prepare, 859 .prepare = &rs600_pm_prepare,
860 .finish = &rs600_pm_finish, 860 .finish = &rs600_pm_finish,
861 .init_profile = &r420_pm_init_profile, 861 .init_profile = &r420_pm_init_profile,
862 .get_dynpm_state = &r100_pm_get_dynpm_state, 862 .get_dynpm_state = &r100_pm_get_dynpm_state,
863 .get_engine_clock = &radeon_atom_get_engine_clock, 863 .get_engine_clock = &radeon_atom_get_engine_clock,
864 .set_engine_clock = &radeon_atom_set_engine_clock, 864 .set_engine_clock = &radeon_atom_set_engine_clock,
865 .get_memory_clock = &radeon_atom_get_memory_clock, 865 .get_memory_clock = &radeon_atom_get_memory_clock,
866 .set_memory_clock = &radeon_atom_set_memory_clock, 866 .set_memory_clock = &radeon_atom_set_memory_clock,
867 .get_pcie_lanes = &rv370_get_pcie_lanes, 867 .get_pcie_lanes = &rv370_get_pcie_lanes,
868 .set_pcie_lanes = &rv370_set_pcie_lanes, 868 .set_pcie_lanes = &rv370_set_pcie_lanes,
869 .set_clock_gating = &radeon_atom_set_clock_gating, 869 .set_clock_gating = &radeon_atom_set_clock_gating,
870 }, 870 },
871 .pflip = { 871 .pflip = {
872 .pre_page_flip = &rs600_pre_page_flip, 872 .pre_page_flip = &rs600_pre_page_flip,
873 .page_flip = &rs600_page_flip, 873 .page_flip = &rs600_page_flip,
874 .post_page_flip = &rs600_post_page_flip, 874 .post_page_flip = &rs600_post_page_flip,
875 }, 875 },
876 }; 876 };
877 877
878 static struct radeon_asic_ring r600_gfx_ring = { 878 static struct radeon_asic_ring r600_gfx_ring = {
879 .ib_execute = &r600_ring_ib_execute, 879 .ib_execute = &r600_ring_ib_execute,
880 .emit_fence = &r600_fence_ring_emit, 880 .emit_fence = &r600_fence_ring_emit,
881 .emit_semaphore = &r600_semaphore_ring_emit, 881 .emit_semaphore = &r600_semaphore_ring_emit,
882 .cs_parse = &r600_cs_parse, 882 .cs_parse = &r600_cs_parse,
883 .ring_test = &r600_ring_test, 883 .ring_test = &r600_ring_test,
884 .ib_test = &r600_ib_test, 884 .ib_test = &r600_ib_test,
885 .is_lockup = &r600_gfx_is_lockup, 885 .is_lockup = &r600_gfx_is_lockup,
886 .get_rptr = &r600_gfx_get_rptr, 886 .get_rptr = &r600_gfx_get_rptr,
887 .get_wptr = &r600_gfx_get_wptr, 887 .get_wptr = &r600_gfx_get_wptr,
888 .set_wptr = &r600_gfx_set_wptr, 888 .set_wptr = &r600_gfx_set_wptr,
889 }; 889 };
890 890
891 static struct radeon_asic_ring r600_dma_ring = { 891 static struct radeon_asic_ring r600_dma_ring = {
892 .ib_execute = &r600_dma_ring_ib_execute, 892 .ib_execute = &r600_dma_ring_ib_execute,
893 .emit_fence = &r600_dma_fence_ring_emit, 893 .emit_fence = &r600_dma_fence_ring_emit,
894 .emit_semaphore = &r600_dma_semaphore_ring_emit, 894 .emit_semaphore = &r600_dma_semaphore_ring_emit,
895 .cs_parse = &r600_dma_cs_parse, 895 .cs_parse = &r600_dma_cs_parse,
896 .ring_test = &r600_dma_ring_test, 896 .ring_test = &r600_dma_ring_test,
897 .ib_test = &r600_dma_ib_test, 897 .ib_test = &r600_dma_ib_test,
898 .is_lockup = &r600_dma_is_lockup, 898 .is_lockup = &r600_dma_is_lockup,
899 .get_rptr = &r600_dma_get_rptr, 899 .get_rptr = &r600_dma_get_rptr,
900 .get_wptr = &r600_dma_get_wptr, 900 .get_wptr = &r600_dma_get_wptr,
901 .set_wptr = &r600_dma_set_wptr, 901 .set_wptr = &r600_dma_set_wptr,
902 }; 902 };
903 903
904 static struct radeon_asic r600_asic = { 904 static struct radeon_asic r600_asic = {
905 .init = &r600_init, 905 .init = &r600_init,
906 .fini = &r600_fini, 906 .fini = &r600_fini,
907 .suspend = &r600_suspend, 907 .suspend = &r600_suspend,
908 .resume = &r600_resume, 908 .resume = &r600_resume,
909 .vga_set_state = &r600_vga_set_state, 909 .vga_set_state = &r600_vga_set_state,
910 .asic_reset = &r600_asic_reset, 910 .asic_reset = &r600_asic_reset,
911 .ioctl_wait_idle = r600_ioctl_wait_idle, 911 .ioctl_wait_idle = r600_ioctl_wait_idle,
912 .gui_idle = &r600_gui_idle, 912 .gui_idle = &r600_gui_idle,
913 .mc_wait_for_idle = &r600_mc_wait_for_idle, 913 .mc_wait_for_idle = &r600_mc_wait_for_idle,
914 .get_xclk = &r600_get_xclk, 914 .get_xclk = &r600_get_xclk,
915 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 915 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
916 .gart = { 916 .gart = {
917 .tlb_flush = &r600_pcie_gart_tlb_flush, 917 .tlb_flush = &r600_pcie_gart_tlb_flush,
918 .set_page = &rs600_gart_set_page, 918 .set_page = &rs600_gart_set_page,
919 }, 919 },
920 .ring = { 920 .ring = {
921 [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring, 921 [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring,
922 [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring, 922 [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring,
923 }, 923 },
924 .irq = { 924 .irq = {
925 .set = &r600_irq_set, 925 .set = &r600_irq_set,
926 .process = &r600_irq_process, 926 .process = &r600_irq_process,
927 }, 927 },
928 .display = { 928 .display = {
929 .bandwidth_update = &rv515_bandwidth_update, 929 .bandwidth_update = &rv515_bandwidth_update,
930 .get_vblank_counter = &rs600_get_vblank_counter, 930 .get_vblank_counter = &rs600_get_vblank_counter,
931 .wait_for_vblank = &avivo_wait_for_vblank, 931 .wait_for_vblank = &avivo_wait_for_vblank,
932 .set_backlight_level = &atombios_set_backlight_level, 932 .set_backlight_level = &atombios_set_backlight_level,
933 .get_backlight_level = &atombios_get_backlight_level, 933 .get_backlight_level = &atombios_get_backlight_level,
934 .hdmi_enable = &r600_hdmi_enable, 934 .hdmi_enable = &r600_hdmi_enable,
935 .hdmi_setmode = &r600_hdmi_setmode, 935 .hdmi_setmode = &r600_hdmi_setmode,
936 }, 936 },
937 .copy = { 937 .copy = {
938 .blit = &r600_copy_cpdma, 938 .blit = &r600_copy_cpdma,
939 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 939 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
940 .dma = &r600_copy_dma, 940 .dma = &r600_copy_dma,
941 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 941 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
942 .copy = &r600_copy_cpdma, 942 .copy = &r600_copy_cpdma,
943 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, 943 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
944 }, 944 },
945 .surface = { 945 .surface = {
946 .set_reg = r600_set_surface_reg, 946 .set_reg = r600_set_surface_reg,
947 .clear_reg = r600_clear_surface_reg, 947 .clear_reg = r600_clear_surface_reg,
948 }, 948 },
949 .hpd = { 949 .hpd = {
950 .init = &r600_hpd_init, 950 .init = &r600_hpd_init,
951 .fini = &r600_hpd_fini, 951 .fini = &r600_hpd_fini,
952 .sense = &r600_hpd_sense, 952 .sense = &r600_hpd_sense,
953 .set_polarity = &r600_hpd_set_polarity, 953 .set_polarity = &r600_hpd_set_polarity,
954 }, 954 },
955 .pm = { 955 .pm = {
956 .misc = &r600_pm_misc, 956 .misc = &r600_pm_misc,
957 .prepare = &rs600_pm_prepare, 957 .prepare = &rs600_pm_prepare,
958 .finish = &rs600_pm_finish, 958 .finish = &rs600_pm_finish,
959 .init_profile = &r600_pm_init_profile, 959 .init_profile = &r600_pm_init_profile,
960 .get_dynpm_state = &r600_pm_get_dynpm_state, 960 .get_dynpm_state = &r600_pm_get_dynpm_state,
961 .get_engine_clock = &radeon_atom_get_engine_clock, 961 .get_engine_clock = &radeon_atom_get_engine_clock,
962 .set_engine_clock = &radeon_atom_set_engine_clock, 962 .set_engine_clock = &radeon_atom_set_engine_clock,
963 .get_memory_clock = &radeon_atom_get_memory_clock, 963 .get_memory_clock = &radeon_atom_get_memory_clock,
964 .set_memory_clock = &radeon_atom_set_memory_clock, 964 .set_memory_clock = &radeon_atom_set_memory_clock,
965 .get_pcie_lanes = &r600_get_pcie_lanes, 965 .get_pcie_lanes = &r600_get_pcie_lanes,
966 .set_pcie_lanes = &r600_set_pcie_lanes, 966 .set_pcie_lanes = &r600_set_pcie_lanes,
967 .set_clock_gating = NULL, 967 .set_clock_gating = NULL,
968 .get_temperature = &rv6xx_get_temp, 968 .get_temperature = &rv6xx_get_temp,
969 }, 969 },
970 .pflip = { 970 .pflip = {
971 .pre_page_flip = &rs600_pre_page_flip, 971 .pre_page_flip = &rs600_pre_page_flip,
972 .page_flip = &rs600_page_flip, 972 .page_flip = &rs600_page_flip,
973 .post_page_flip = &rs600_post_page_flip, 973 .post_page_flip = &rs600_post_page_flip,
974 }, 974 },
975 }; 975 };
976 976
977 static struct radeon_asic rv6xx_asic = { 977 static struct radeon_asic rv6xx_asic = {
978 .init = &r600_init, 978 .init = &r600_init,
979 .fini = &r600_fini, 979 .fini = &r600_fini,
980 .suspend = &r600_suspend, 980 .suspend = &r600_suspend,
981 .resume = &r600_resume, 981 .resume = &r600_resume,
982 .vga_set_state = &r600_vga_set_state, 982 .vga_set_state = &r600_vga_set_state,
983 .asic_reset = &r600_asic_reset, 983 .asic_reset = &r600_asic_reset,
984 .ioctl_wait_idle = r600_ioctl_wait_idle, 984 .ioctl_wait_idle = r600_ioctl_wait_idle,
985 .gui_idle = &r600_gui_idle, 985 .gui_idle = &r600_gui_idle,
986 .mc_wait_for_idle = &r600_mc_wait_for_idle, 986 .mc_wait_for_idle = &r600_mc_wait_for_idle,
987 .get_xclk = &r600_get_xclk, 987 .get_xclk = &r600_get_xclk,
988 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 988 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
989 .gart = { 989 .gart = {
990 .tlb_flush = &r600_pcie_gart_tlb_flush, 990 .tlb_flush = &r600_pcie_gart_tlb_flush,
991 .set_page = &rs600_gart_set_page, 991 .set_page = &rs600_gart_set_page,
992 }, 992 },
993 .ring = { 993 .ring = {
994 [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring, 994 [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring,
995 [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring, 995 [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring,
996 }, 996 },
997 .irq = { 997 .irq = {
998 .set = &r600_irq_set, 998 .set = &r600_irq_set,
999 .process = &r600_irq_process, 999 .process = &r600_irq_process,
1000 }, 1000 },
1001 .display = { 1001 .display = {
1002 .bandwidth_update = &rv515_bandwidth_update, 1002 .bandwidth_update = &rv515_bandwidth_update,
1003 .get_vblank_counter = &rs600_get_vblank_counter, 1003 .get_vblank_counter = &rs600_get_vblank_counter,
1004 .wait_for_vblank = &avivo_wait_for_vblank, 1004 .wait_for_vblank = &avivo_wait_for_vblank,
1005 .set_backlight_level = &atombios_set_backlight_level, 1005 .set_backlight_level = &atombios_set_backlight_level,
1006 .get_backlight_level = &atombios_get_backlight_level, 1006 .get_backlight_level = &atombios_get_backlight_level,
1007 .hdmi_enable = &r600_hdmi_enable, 1007 .hdmi_enable = &r600_hdmi_enable,
1008 .hdmi_setmode = &r600_hdmi_setmode, 1008 .hdmi_setmode = &r600_hdmi_setmode,
1009 }, 1009 },
1010 .copy = { 1010 .copy = {
1011 .blit = &r600_copy_cpdma, 1011 .blit = &r600_copy_cpdma,
1012 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1012 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1013 .dma = &r600_copy_dma, 1013 .dma = &r600_copy_dma,
1014 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 1014 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
1015 .copy = &r600_copy_cpdma, 1015 .copy = &r600_copy_cpdma,
1016 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1016 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1017 }, 1017 },
1018 .surface = { 1018 .surface = {
1019 .set_reg = r600_set_surface_reg, 1019 .set_reg = r600_set_surface_reg,
1020 .clear_reg = r600_clear_surface_reg, 1020 .clear_reg = r600_clear_surface_reg,
1021 }, 1021 },
1022 .hpd = { 1022 .hpd = {
1023 .init = &r600_hpd_init, 1023 .init = &r600_hpd_init,
1024 .fini = &r600_hpd_fini, 1024 .fini = &r600_hpd_fini,
1025 .sense = &r600_hpd_sense, 1025 .sense = &r600_hpd_sense,
1026 .set_polarity = &r600_hpd_set_polarity, 1026 .set_polarity = &r600_hpd_set_polarity,
1027 }, 1027 },
1028 .pm = { 1028 .pm = {
1029 .misc = &r600_pm_misc, 1029 .misc = &r600_pm_misc,
1030 .prepare = &rs600_pm_prepare, 1030 .prepare = &rs600_pm_prepare,
1031 .finish = &rs600_pm_finish, 1031 .finish = &rs600_pm_finish,
1032 .init_profile = &r600_pm_init_profile, 1032 .init_profile = &r600_pm_init_profile,
1033 .get_dynpm_state = &r600_pm_get_dynpm_state, 1033 .get_dynpm_state = &r600_pm_get_dynpm_state,
1034 .get_engine_clock = &radeon_atom_get_engine_clock, 1034 .get_engine_clock = &radeon_atom_get_engine_clock,
1035 .set_engine_clock = &radeon_atom_set_engine_clock, 1035 .set_engine_clock = &radeon_atom_set_engine_clock,
1036 .get_memory_clock = &radeon_atom_get_memory_clock, 1036 .get_memory_clock = &radeon_atom_get_memory_clock,
1037 .set_memory_clock = &radeon_atom_set_memory_clock, 1037 .set_memory_clock = &radeon_atom_set_memory_clock,
1038 .get_pcie_lanes = &r600_get_pcie_lanes, 1038 .get_pcie_lanes = &r600_get_pcie_lanes,
1039 .set_pcie_lanes = &r600_set_pcie_lanes, 1039 .set_pcie_lanes = &r600_set_pcie_lanes,
1040 .set_clock_gating = NULL, 1040 .set_clock_gating = NULL,
1041 .get_temperature = &rv6xx_get_temp, 1041 .get_temperature = &rv6xx_get_temp,
1042 .set_uvd_clocks = &r600_set_uvd_clocks, 1042 .set_uvd_clocks = &r600_set_uvd_clocks,
1043 }, 1043 },
1044 .dpm = { 1044 .dpm = {
1045 .init = &rv6xx_dpm_init, 1045 .init = &rv6xx_dpm_init,
1046 .setup_asic = &rv6xx_setup_asic, 1046 .setup_asic = &rv6xx_setup_asic,
1047 .enable = &rv6xx_dpm_enable, 1047 .enable = &rv6xx_dpm_enable,
1048 .late_enable = &r600_dpm_late_enable, 1048 .late_enable = &r600_dpm_late_enable,
1049 .disable = &rv6xx_dpm_disable, 1049 .disable = &rv6xx_dpm_disable,
1050 .pre_set_power_state = &r600_dpm_pre_set_power_state, 1050 .pre_set_power_state = &r600_dpm_pre_set_power_state,
1051 .set_power_state = &rv6xx_dpm_set_power_state, 1051 .set_power_state = &rv6xx_dpm_set_power_state,
1052 .post_set_power_state = &r600_dpm_post_set_power_state, 1052 .post_set_power_state = &r600_dpm_post_set_power_state,
1053 .display_configuration_changed = &rv6xx_dpm_display_configuration_changed, 1053 .display_configuration_changed = &rv6xx_dpm_display_configuration_changed,
1054 .fini = &rv6xx_dpm_fini, 1054 .fini = &rv6xx_dpm_fini,
1055 .get_sclk = &rv6xx_dpm_get_sclk, 1055 .get_sclk = &rv6xx_dpm_get_sclk,
1056 .get_mclk = &rv6xx_dpm_get_mclk, 1056 .get_mclk = &rv6xx_dpm_get_mclk,
1057 .print_power_state = &rv6xx_dpm_print_power_state, 1057 .print_power_state = &rv6xx_dpm_print_power_state,
1058 .debugfs_print_current_performance_level = &rv6xx_dpm_debugfs_print_current_performance_level, 1058 .debugfs_print_current_performance_level = &rv6xx_dpm_debugfs_print_current_performance_level,
1059 .force_performance_level = &rv6xx_dpm_force_performance_level, 1059 .force_performance_level = &rv6xx_dpm_force_performance_level,
1060 }, 1060 },
1061 .pflip = { 1061 .pflip = {
1062 .pre_page_flip = &rs600_pre_page_flip, 1062 .pre_page_flip = &rs600_pre_page_flip,
1063 .page_flip = &rs600_page_flip, 1063 .page_flip = &rs600_page_flip,
1064 .post_page_flip = &rs600_post_page_flip, 1064 .post_page_flip = &rs600_post_page_flip,
1065 }, 1065 },
1066 }; 1066 };
1067 1067
1068 static struct radeon_asic rs780_asic = { 1068 static struct radeon_asic rs780_asic = {
1069 .init = &r600_init, 1069 .init = &r600_init,
1070 .fini = &r600_fini, 1070 .fini = &r600_fini,
1071 .suspend = &r600_suspend, 1071 .suspend = &r600_suspend,
1072 .resume = &r600_resume, 1072 .resume = &r600_resume,
1073 .vga_set_state = &r600_vga_set_state, 1073 .vga_set_state = &r600_vga_set_state,
1074 .asic_reset = &r600_asic_reset, 1074 .asic_reset = &r600_asic_reset,
1075 .ioctl_wait_idle = r600_ioctl_wait_idle, 1075 .ioctl_wait_idle = r600_ioctl_wait_idle,
1076 .gui_idle = &r600_gui_idle, 1076 .gui_idle = &r600_gui_idle,
1077 .mc_wait_for_idle = &r600_mc_wait_for_idle, 1077 .mc_wait_for_idle = &r600_mc_wait_for_idle,
1078 .get_xclk = &r600_get_xclk, 1078 .get_xclk = &r600_get_xclk,
1079 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1079 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1080 .gart = { 1080 .gart = {
1081 .tlb_flush = &r600_pcie_gart_tlb_flush, 1081 .tlb_flush = &r600_pcie_gart_tlb_flush,
1082 .set_page = &rs600_gart_set_page, 1082 .set_page = &rs600_gart_set_page,
1083 }, 1083 },
1084 .ring = { 1084 .ring = {
1085 [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring, 1085 [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring,
1086 [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring, 1086 [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring,
1087 }, 1087 },
1088 .irq = { 1088 .irq = {
1089 .set = &r600_irq_set, 1089 .set = &r600_irq_set,
1090 .process = &r600_irq_process, 1090 .process = &r600_irq_process,
1091 }, 1091 },
1092 .display = { 1092 .display = {
1093 .bandwidth_update = &rs690_bandwidth_update, 1093 .bandwidth_update = &rs690_bandwidth_update,
1094 .get_vblank_counter = &rs600_get_vblank_counter, 1094 .get_vblank_counter = &rs600_get_vblank_counter,
1095 .wait_for_vblank = &avivo_wait_for_vblank, 1095 .wait_for_vblank = &avivo_wait_for_vblank,
1096 .set_backlight_level = &atombios_set_backlight_level, 1096 .set_backlight_level = &atombios_set_backlight_level,
1097 .get_backlight_level = &atombios_get_backlight_level, 1097 .get_backlight_level = &atombios_get_backlight_level,
1098 .hdmi_enable = &r600_hdmi_enable, 1098 .hdmi_enable = &r600_hdmi_enable,
1099 .hdmi_setmode = &r600_hdmi_setmode, 1099 .hdmi_setmode = &r600_hdmi_setmode,
1100 }, 1100 },
1101 .copy = { 1101 .copy = {
1102 .blit = &r600_copy_cpdma, 1102 .blit = &r600_copy_cpdma,
1103 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1103 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1104 .dma = &r600_copy_dma, 1104 .dma = &r600_copy_dma,
1105 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 1105 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
1106 .copy = &r600_copy_cpdma, 1106 .copy = &r600_copy_cpdma,
1107 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1107 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1108 }, 1108 },
1109 .surface = { 1109 .surface = {
1110 .set_reg = r600_set_surface_reg, 1110 .set_reg = r600_set_surface_reg,
1111 .clear_reg = r600_clear_surface_reg, 1111 .clear_reg = r600_clear_surface_reg,
1112 }, 1112 },
1113 .hpd = { 1113 .hpd = {
1114 .init = &r600_hpd_init, 1114 .init = &r600_hpd_init,
1115 .fini = &r600_hpd_fini, 1115 .fini = &r600_hpd_fini,
1116 .sense = &r600_hpd_sense, 1116 .sense = &r600_hpd_sense,
1117 .set_polarity = &r600_hpd_set_polarity, 1117 .set_polarity = &r600_hpd_set_polarity,
1118 }, 1118 },
1119 .pm = { 1119 .pm = {
1120 .misc = &r600_pm_misc, 1120 .misc = &r600_pm_misc,
1121 .prepare = &rs600_pm_prepare, 1121 .prepare = &rs600_pm_prepare,
1122 .finish = &rs600_pm_finish, 1122 .finish = &rs600_pm_finish,
1123 .init_profile = &rs780_pm_init_profile, 1123 .init_profile = &rs780_pm_init_profile,
1124 .get_dynpm_state = &r600_pm_get_dynpm_state, 1124 .get_dynpm_state = &r600_pm_get_dynpm_state,
1125 .get_engine_clock = &radeon_atom_get_engine_clock, 1125 .get_engine_clock = &radeon_atom_get_engine_clock,
1126 .set_engine_clock = &radeon_atom_set_engine_clock, 1126 .set_engine_clock = &radeon_atom_set_engine_clock,
1127 .get_memory_clock = NULL, 1127 .get_memory_clock = NULL,
1128 .set_memory_clock = NULL, 1128 .set_memory_clock = NULL,
1129 .get_pcie_lanes = NULL, 1129 .get_pcie_lanes = NULL,
1130 .set_pcie_lanes = NULL, 1130 .set_pcie_lanes = NULL,
1131 .set_clock_gating = NULL, 1131 .set_clock_gating = NULL,
1132 .get_temperature = &rv6xx_get_temp, 1132 .get_temperature = &rv6xx_get_temp,
1133 .set_uvd_clocks = &r600_set_uvd_clocks, 1133 .set_uvd_clocks = &r600_set_uvd_clocks,
1134 }, 1134 },
1135 .dpm = { 1135 .dpm = {
1136 .init = &rs780_dpm_init, 1136 .init = &rs780_dpm_init,
1137 .setup_asic = &rs780_dpm_setup_asic, 1137 .setup_asic = &rs780_dpm_setup_asic,
1138 .enable = &rs780_dpm_enable, 1138 .enable = &rs780_dpm_enable,
1139 .late_enable = &r600_dpm_late_enable, 1139 .late_enable = &r600_dpm_late_enable,
1140 .disable = &rs780_dpm_disable, 1140 .disable = &rs780_dpm_disable,
1141 .pre_set_power_state = &r600_dpm_pre_set_power_state, 1141 .pre_set_power_state = &r600_dpm_pre_set_power_state,
1142 .set_power_state = &rs780_dpm_set_power_state, 1142 .set_power_state = &rs780_dpm_set_power_state,
1143 .post_set_power_state = &r600_dpm_post_set_power_state, 1143 .post_set_power_state = &r600_dpm_post_set_power_state,
1144 .display_configuration_changed = &rs780_dpm_display_configuration_changed, 1144 .display_configuration_changed = &rs780_dpm_display_configuration_changed,
1145 .fini = &rs780_dpm_fini, 1145 .fini = &rs780_dpm_fini,
1146 .get_sclk = &rs780_dpm_get_sclk, 1146 .get_sclk = &rs780_dpm_get_sclk,
1147 .get_mclk = &rs780_dpm_get_mclk, 1147 .get_mclk = &rs780_dpm_get_mclk,
1148 .print_power_state = &rs780_dpm_print_power_state, 1148 .print_power_state = &rs780_dpm_print_power_state,
1149 .debugfs_print_current_performance_level = &rs780_dpm_debugfs_print_current_performance_level, 1149 .debugfs_print_current_performance_level = &rs780_dpm_debugfs_print_current_performance_level,
1150 .force_performance_level = &rs780_dpm_force_performance_level, 1150 .force_performance_level = &rs780_dpm_force_performance_level,
1151 }, 1151 },
1152 .pflip = { 1152 .pflip = {
1153 .pre_page_flip = &rs600_pre_page_flip, 1153 .pre_page_flip = &rs600_pre_page_flip,
1154 .page_flip = &rs600_page_flip, 1154 .page_flip = &rs600_page_flip,
1155 .post_page_flip = &rs600_post_page_flip, 1155 .post_page_flip = &rs600_post_page_flip,
1156 }, 1156 },
1157 }; 1157 };
1158 1158
1159 static struct radeon_asic_ring rv770_uvd_ring = { 1159 static struct radeon_asic_ring rv770_uvd_ring = {
1160 .ib_execute = &uvd_v1_0_ib_execute, 1160 .ib_execute = &uvd_v1_0_ib_execute,
1161 .emit_fence = &uvd_v2_2_fence_emit, 1161 .emit_fence = &uvd_v2_2_fence_emit,
1162 .emit_semaphore = &uvd_v1_0_semaphore_emit, 1162 .emit_semaphore = &uvd_v1_0_semaphore_emit,
1163 .cs_parse = &radeon_uvd_cs_parse, 1163 .cs_parse = &radeon_uvd_cs_parse,
1164 .ring_test = &uvd_v1_0_ring_test, 1164 .ring_test = &uvd_v1_0_ring_test,
1165 .ib_test = &uvd_v1_0_ib_test, 1165 .ib_test = &uvd_v1_0_ib_test,
1166 .is_lockup = &radeon_ring_test_lockup, 1166 .is_lockup = &radeon_ring_test_lockup,
1167 .get_rptr = &uvd_v1_0_get_rptr, 1167 .get_rptr = &uvd_v1_0_get_rptr,
1168 .get_wptr = &uvd_v1_0_get_wptr, 1168 .get_wptr = &uvd_v1_0_get_wptr,
1169 .set_wptr = &uvd_v1_0_set_wptr, 1169 .set_wptr = &uvd_v1_0_set_wptr,
1170 }; 1170 };
1171 1171
1172 static struct radeon_asic rv770_asic = { 1172 static struct radeon_asic rv770_asic = {
1173 .init = &rv770_init, 1173 .init = &rv770_init,
1174 .fini = &rv770_fini, 1174 .fini = &rv770_fini,
1175 .suspend = &rv770_suspend, 1175 .suspend = &rv770_suspend,
1176 .resume = &rv770_resume, 1176 .resume = &rv770_resume,
1177 .asic_reset = &r600_asic_reset, 1177 .asic_reset = &r600_asic_reset,
1178 .vga_set_state = &r600_vga_set_state, 1178 .vga_set_state = &r600_vga_set_state,
1179 .ioctl_wait_idle = r600_ioctl_wait_idle, 1179 .ioctl_wait_idle = r600_ioctl_wait_idle,
1180 .gui_idle = &r600_gui_idle, 1180 .gui_idle = &r600_gui_idle,
1181 .mc_wait_for_idle = &r600_mc_wait_for_idle, 1181 .mc_wait_for_idle = &r600_mc_wait_for_idle,
1182 .get_xclk = &rv770_get_xclk, 1182 .get_xclk = &rv770_get_xclk,
1183 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1183 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1184 .gart = { 1184 .gart = {
1185 .tlb_flush = &r600_pcie_gart_tlb_flush, 1185 .tlb_flush = &r600_pcie_gart_tlb_flush,
1186 .set_page = &rs600_gart_set_page, 1186 .set_page = &rs600_gart_set_page,
1187 }, 1187 },
1188 .ring = { 1188 .ring = {
1189 [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring, 1189 [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring,
1190 [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring, 1190 [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring,
1191 [R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring, 1191 [R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring,
1192 }, 1192 },
1193 .irq = { 1193 .irq = {
1194 .set = &r600_irq_set, 1194 .set = &r600_irq_set,
1195 .process = &r600_irq_process, 1195 .process = &r600_irq_process,
1196 }, 1196 },
1197 .display = { 1197 .display = {
1198 .bandwidth_update = &rv515_bandwidth_update, 1198 .bandwidth_update = &rv515_bandwidth_update,
1199 .get_vblank_counter = &rs600_get_vblank_counter, 1199 .get_vblank_counter = &rs600_get_vblank_counter,
1200 .wait_for_vblank = &avivo_wait_for_vblank, 1200 .wait_for_vblank = &avivo_wait_for_vblank,
1201 .set_backlight_level = &atombios_set_backlight_level, 1201 .set_backlight_level = &atombios_set_backlight_level,
1202 .get_backlight_level = &atombios_get_backlight_level, 1202 .get_backlight_level = &atombios_get_backlight_level,
1203 .hdmi_enable = &r600_hdmi_enable, 1203 .hdmi_enable = &r600_hdmi_enable,
1204 .hdmi_setmode = &r600_hdmi_setmode, 1204 .hdmi_setmode = &r600_hdmi_setmode,
1205 }, 1205 },
1206 .copy = { 1206 .copy = {
1207 .blit = &r600_copy_cpdma, 1207 .blit = &r600_copy_cpdma,
1208 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1208 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1209 .dma = &rv770_copy_dma, 1209 .dma = &rv770_copy_dma,
1210 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 1210 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
1211 .copy = &rv770_copy_dma, 1211 .copy = &rv770_copy_dma,
1212 .copy_ring_index = R600_RING_TYPE_DMA_INDEX, 1212 .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
1213 }, 1213 },
1214 .surface = { 1214 .surface = {
1215 .set_reg = r600_set_surface_reg, 1215 .set_reg = r600_set_surface_reg,
1216 .clear_reg = r600_clear_surface_reg, 1216 .clear_reg = r600_clear_surface_reg,
1217 }, 1217 },
1218 .hpd = { 1218 .hpd = {
1219 .init = &r600_hpd_init, 1219 .init = &r600_hpd_init,
1220 .fini = &r600_hpd_fini, 1220 .fini = &r600_hpd_fini,
1221 .sense = &r600_hpd_sense, 1221 .sense = &r600_hpd_sense,
1222 .set_polarity = &r600_hpd_set_polarity, 1222 .set_polarity = &r600_hpd_set_polarity,
1223 }, 1223 },
1224 .pm = { 1224 .pm = {
1225 .misc = &rv770_pm_misc, 1225 .misc = &rv770_pm_misc,
1226 .prepare = &rs600_pm_prepare, 1226 .prepare = &rs600_pm_prepare,
1227 .finish = &rs600_pm_finish, 1227 .finish = &rs600_pm_finish,
1228 .init_profile = &r600_pm_init_profile, 1228 .init_profile = &r600_pm_init_profile,
1229 .get_dynpm_state = &r600_pm_get_dynpm_state, 1229 .get_dynpm_state = &r600_pm_get_dynpm_state,
1230 .get_engine_clock = &radeon_atom_get_engine_clock, 1230 .get_engine_clock = &radeon_atom_get_engine_clock,
1231 .set_engine_clock = &radeon_atom_set_engine_clock, 1231 .set_engine_clock = &radeon_atom_set_engine_clock,
1232 .get_memory_clock = &radeon_atom_get_memory_clock, 1232 .get_memory_clock = &radeon_atom_get_memory_clock,
1233 .set_memory_clock = &radeon_atom_set_memory_clock, 1233 .set_memory_clock = &radeon_atom_set_memory_clock,
1234 .get_pcie_lanes = &r600_get_pcie_lanes, 1234 .get_pcie_lanes = &r600_get_pcie_lanes,
1235 .set_pcie_lanes = &r600_set_pcie_lanes, 1235 .set_pcie_lanes = &r600_set_pcie_lanes,
1236 .set_clock_gating = &radeon_atom_set_clock_gating, 1236 .set_clock_gating = &radeon_atom_set_clock_gating,
1237 .set_uvd_clocks = &rv770_set_uvd_clocks, 1237 .set_uvd_clocks = &rv770_set_uvd_clocks,
1238 .get_temperature = &rv770_get_temp, 1238 .get_temperature = &rv770_get_temp,
1239 }, 1239 },
1240 .dpm = { 1240 .dpm = {
1241 .init = &rv770_dpm_init, 1241 .init = &rv770_dpm_init,
1242 .setup_asic = &rv770_dpm_setup_asic, 1242 .setup_asic = &rv770_dpm_setup_asic,
1243 .enable = &rv770_dpm_enable, 1243 .enable = &rv770_dpm_enable,
1244 .late_enable = &rv770_dpm_late_enable, 1244 .late_enable = &rv770_dpm_late_enable,
1245 .disable = &rv770_dpm_disable, 1245 .disable = &rv770_dpm_disable,
1246 .pre_set_power_state = &r600_dpm_pre_set_power_state, 1246 .pre_set_power_state = &r600_dpm_pre_set_power_state,
1247 .set_power_state = &rv770_dpm_set_power_state, 1247 .set_power_state = &rv770_dpm_set_power_state,
1248 .post_set_power_state = &r600_dpm_post_set_power_state, 1248 .post_set_power_state = &r600_dpm_post_set_power_state,
1249 .display_configuration_changed = &rv770_dpm_display_configuration_changed, 1249 .display_configuration_changed = &rv770_dpm_display_configuration_changed,
1250 .fini = &rv770_dpm_fini, 1250 .fini = &rv770_dpm_fini,
1251 .get_sclk = &rv770_dpm_get_sclk, 1251 .get_sclk = &rv770_dpm_get_sclk,
1252 .get_mclk = &rv770_dpm_get_mclk, 1252 .get_mclk = &rv770_dpm_get_mclk,
1253 .print_power_state = &rv770_dpm_print_power_state, 1253 .print_power_state = &rv770_dpm_print_power_state,
1254 .debugfs_print_current_performance_level = &rv770_dpm_debugfs_print_current_performance_level, 1254 .debugfs_print_current_performance_level = &rv770_dpm_debugfs_print_current_performance_level,
1255 .force_performance_level = &rv770_dpm_force_performance_level, 1255 .force_performance_level = &rv770_dpm_force_performance_level,
1256 .vblank_too_short = &rv770_dpm_vblank_too_short, 1256 .vblank_too_short = &rv770_dpm_vblank_too_short,
1257 }, 1257 },
1258 .pflip = { 1258 .pflip = {
1259 .pre_page_flip = &rs600_pre_page_flip, 1259 .pre_page_flip = &rs600_pre_page_flip,
1260 .page_flip = &rv770_page_flip, 1260 .page_flip = &rv770_page_flip,
1261 .post_page_flip = &rs600_post_page_flip, 1261 .post_page_flip = &rs600_post_page_flip,
1262 }, 1262 },
1263 }; 1263 };
1264 1264
1265 static struct radeon_asic_ring evergreen_gfx_ring = { 1265 static struct radeon_asic_ring evergreen_gfx_ring = {
1266 .ib_execute = &evergreen_ring_ib_execute, 1266 .ib_execute = &evergreen_ring_ib_execute,
1267 .emit_fence = &r600_fence_ring_emit, 1267 .emit_fence = &r600_fence_ring_emit,
1268 .emit_semaphore = &r600_semaphore_ring_emit, 1268 .emit_semaphore = &r600_semaphore_ring_emit,
1269 .cs_parse = &evergreen_cs_parse, 1269 .cs_parse = &evergreen_cs_parse,
1270 .ring_test = &r600_ring_test, 1270 .ring_test = &r600_ring_test,
1271 .ib_test = &r600_ib_test, 1271 .ib_test = &r600_ib_test,
1272 .is_lockup = &evergreen_gfx_is_lockup, 1272 .is_lockup = &evergreen_gfx_is_lockup,
1273 .get_rptr = &r600_gfx_get_rptr, 1273 .get_rptr = &r600_gfx_get_rptr,
1274 .get_wptr = &r600_gfx_get_wptr, 1274 .get_wptr = &r600_gfx_get_wptr,
1275 .set_wptr = &r600_gfx_set_wptr, 1275 .set_wptr = &r600_gfx_set_wptr,
1276 }; 1276 };
1277 1277
1278 static struct radeon_asic_ring evergreen_dma_ring = { 1278 static struct radeon_asic_ring evergreen_dma_ring = {
1279 .ib_execute = &evergreen_dma_ring_ib_execute, 1279 .ib_execute = &evergreen_dma_ring_ib_execute,
1280 .emit_fence = &evergreen_dma_fence_ring_emit, 1280 .emit_fence = &evergreen_dma_fence_ring_emit,
1281 .emit_semaphore = &r600_dma_semaphore_ring_emit, 1281 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1282 .cs_parse = &evergreen_dma_cs_parse, 1282 .cs_parse = &evergreen_dma_cs_parse,
1283 .ring_test = &r600_dma_ring_test, 1283 .ring_test = &r600_dma_ring_test,
1284 .ib_test = &r600_dma_ib_test, 1284 .ib_test = &r600_dma_ib_test,
1285 .is_lockup = &evergreen_dma_is_lockup, 1285 .is_lockup = &evergreen_dma_is_lockup,
1286 .get_rptr = &r600_dma_get_rptr, 1286 .get_rptr = &r600_dma_get_rptr,
1287 .get_wptr = &r600_dma_get_wptr, 1287 .get_wptr = &r600_dma_get_wptr,
1288 .set_wptr = &r600_dma_set_wptr, 1288 .set_wptr = &r600_dma_set_wptr,
1289 }; 1289 };
1290 1290
1291 static struct radeon_asic evergreen_asic = { 1291 static struct radeon_asic evergreen_asic = {
1292 .init = &evergreen_init, 1292 .init = &evergreen_init,
1293 .fini = &evergreen_fini, 1293 .fini = &evergreen_fini,
1294 .suspend = &evergreen_suspend, 1294 .suspend = &evergreen_suspend,
1295 .resume = &evergreen_resume, 1295 .resume = &evergreen_resume,
1296 .asic_reset = &evergreen_asic_reset, 1296 .asic_reset = &evergreen_asic_reset,
1297 .vga_set_state = &r600_vga_set_state, 1297 .vga_set_state = &r600_vga_set_state,
1298 .ioctl_wait_idle = r600_ioctl_wait_idle, 1298 .ioctl_wait_idle = r600_ioctl_wait_idle,
1299 .gui_idle = &r600_gui_idle, 1299 .gui_idle = &r600_gui_idle,
1300 .mc_wait_for_idle = &evergreen_mc_wait_for_idle, 1300 .mc_wait_for_idle = &evergreen_mc_wait_for_idle,
1301 .get_xclk = &rv770_get_xclk, 1301 .get_xclk = &rv770_get_xclk,
1302 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1302 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1303 .gart = { 1303 .gart = {
1304 .tlb_flush = &evergreen_pcie_gart_tlb_flush, 1304 .tlb_flush = &evergreen_pcie_gart_tlb_flush,
1305 .set_page = &rs600_gart_set_page, 1305 .set_page = &rs600_gart_set_page,
1306 }, 1306 },
1307 .ring = { 1307 .ring = {
1308 [RADEON_RING_TYPE_GFX_INDEX] = &evergreen_gfx_ring, 1308 [RADEON_RING_TYPE_GFX_INDEX] = &evergreen_gfx_ring,
1309 [R600_RING_TYPE_DMA_INDEX] = &evergreen_dma_ring, 1309 [R600_RING_TYPE_DMA_INDEX] = &evergreen_dma_ring,
1310 [R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring, 1310 [R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring,
1311 }, 1311 },
1312 .irq = { 1312 .irq = {
1313 .set = &evergreen_irq_set, 1313 .set = &evergreen_irq_set,
1314 .process = &evergreen_irq_process, 1314 .process = &evergreen_irq_process,
1315 }, 1315 },
1316 .display = { 1316 .display = {
1317 .bandwidth_update = &evergreen_bandwidth_update, 1317 .bandwidth_update = &evergreen_bandwidth_update,
1318 .get_vblank_counter = &evergreen_get_vblank_counter, 1318 .get_vblank_counter = &evergreen_get_vblank_counter,
1319 .wait_for_vblank = &dce4_wait_for_vblank, 1319 .wait_for_vblank = &dce4_wait_for_vblank,
1320 .set_backlight_level = &atombios_set_backlight_level, 1320 .set_backlight_level = &atombios_set_backlight_level,
1321 .get_backlight_level = &atombios_get_backlight_level, 1321 .get_backlight_level = &atombios_get_backlight_level,
1322 .hdmi_enable = &evergreen_hdmi_enable, 1322 .hdmi_enable = &evergreen_hdmi_enable,
1323 .hdmi_setmode = &evergreen_hdmi_setmode, 1323 .hdmi_setmode = &evergreen_hdmi_setmode,
1324 }, 1324 },
1325 .copy = { 1325 .copy = {
1326 .blit = &r600_copy_cpdma, 1326 .blit = &r600_copy_cpdma,
1327 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1327 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1328 .dma = &evergreen_copy_dma, 1328 .dma = &evergreen_copy_dma,
1329 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 1329 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
1330 .copy = &evergreen_copy_dma, 1330 .copy = &evergreen_copy_dma,
1331 .copy_ring_index = R600_RING_TYPE_DMA_INDEX, 1331 .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
1332 }, 1332 },
1333 .surface = { 1333 .surface = {
1334 .set_reg = r600_set_surface_reg, 1334 .set_reg = r600_set_surface_reg,
1335 .clear_reg = r600_clear_surface_reg, 1335 .clear_reg = r600_clear_surface_reg,
1336 }, 1336 },
1337 .hpd = { 1337 .hpd = {
1338 .init = &evergreen_hpd_init, 1338 .init = &evergreen_hpd_init,
1339 .fini = &evergreen_hpd_fini, 1339 .fini = &evergreen_hpd_fini,
1340 .sense = &evergreen_hpd_sense, 1340 .sense = &evergreen_hpd_sense,
1341 .set_polarity = &evergreen_hpd_set_polarity, 1341 .set_polarity = &evergreen_hpd_set_polarity,
1342 }, 1342 },
1343 .pm = { 1343 .pm = {
1344 .misc = &evergreen_pm_misc, 1344 .misc = &evergreen_pm_misc,
1345 .prepare = &evergreen_pm_prepare, 1345 .prepare = &evergreen_pm_prepare,
1346 .finish = &evergreen_pm_finish, 1346 .finish = &evergreen_pm_finish,
1347 .init_profile = &r600_pm_init_profile, 1347 .init_profile = &r600_pm_init_profile,
1348 .get_dynpm_state = &r600_pm_get_dynpm_state, 1348 .get_dynpm_state = &r600_pm_get_dynpm_state,
1349 .get_engine_clock = &radeon_atom_get_engine_clock, 1349 .get_engine_clock = &radeon_atom_get_engine_clock,
1350 .set_engine_clock = &radeon_atom_set_engine_clock, 1350 .set_engine_clock = &radeon_atom_set_engine_clock,
1351 .get_memory_clock = &radeon_atom_get_memory_clock, 1351 .get_memory_clock = &radeon_atom_get_memory_clock,
1352 .set_memory_clock = &radeon_atom_set_memory_clock, 1352 .set_memory_clock = &radeon_atom_set_memory_clock,
1353 .get_pcie_lanes = &r600_get_pcie_lanes, 1353 .get_pcie_lanes = &r600_get_pcie_lanes,
1354 .set_pcie_lanes = &r600_set_pcie_lanes, 1354 .set_pcie_lanes = &r600_set_pcie_lanes,
1355 .set_clock_gating = NULL, 1355 .set_clock_gating = NULL,
1356 .set_uvd_clocks = &evergreen_set_uvd_clocks, 1356 .set_uvd_clocks = &evergreen_set_uvd_clocks,
1357 .get_temperature = &evergreen_get_temp, 1357 .get_temperature = &evergreen_get_temp,
1358 }, 1358 },
1359 .dpm = { 1359 .dpm = {
1360 .init = &cypress_dpm_init, 1360 .init = &cypress_dpm_init,
1361 .setup_asic = &cypress_dpm_setup_asic, 1361 .setup_asic = &cypress_dpm_setup_asic,
1362 .enable = &cypress_dpm_enable, 1362 .enable = &cypress_dpm_enable,
1363 .late_enable = &rv770_dpm_late_enable, 1363 .late_enable = &rv770_dpm_late_enable,
1364 .disable = &cypress_dpm_disable, 1364 .disable = &cypress_dpm_disable,
1365 .pre_set_power_state = &r600_dpm_pre_set_power_state, 1365 .pre_set_power_state = &r600_dpm_pre_set_power_state,
1366 .set_power_state = &cypress_dpm_set_power_state, 1366 .set_power_state = &cypress_dpm_set_power_state,
1367 .post_set_power_state = &r600_dpm_post_set_power_state, 1367 .post_set_power_state = &r600_dpm_post_set_power_state,
1368 .display_configuration_changed = &cypress_dpm_display_configuration_changed, 1368 .display_configuration_changed = &cypress_dpm_display_configuration_changed,
1369 .fini = &cypress_dpm_fini, 1369 .fini = &cypress_dpm_fini,
1370 .get_sclk = &rv770_dpm_get_sclk, 1370 .get_sclk = &rv770_dpm_get_sclk,
1371 .get_mclk = &rv770_dpm_get_mclk, 1371 .get_mclk = &rv770_dpm_get_mclk,
1372 .print_power_state = &rv770_dpm_print_power_state, 1372 .print_power_state = &rv770_dpm_print_power_state,
1373 .debugfs_print_current_performance_level = &rv770_dpm_debugfs_print_current_performance_level, 1373 .debugfs_print_current_performance_level = &rv770_dpm_debugfs_print_current_performance_level,
1374 .force_performance_level = &rv770_dpm_force_performance_level, 1374 .force_performance_level = &rv770_dpm_force_performance_level,
1375 .vblank_too_short = &cypress_dpm_vblank_too_short, 1375 .vblank_too_short = &cypress_dpm_vblank_too_short,
1376 }, 1376 },
1377 .pflip = { 1377 .pflip = {
1378 .pre_page_flip = &evergreen_pre_page_flip, 1378 .pre_page_flip = &evergreen_pre_page_flip,
1379 .page_flip = &evergreen_page_flip, 1379 .page_flip = &evergreen_page_flip,
1380 .post_page_flip = &evergreen_post_page_flip, 1380 .post_page_flip = &evergreen_post_page_flip,
1381 }, 1381 },
1382 }; 1382 };
1383 1383
1384 static struct radeon_asic sumo_asic = { 1384 static struct radeon_asic sumo_asic = {
1385 .init = &evergreen_init, 1385 .init = &evergreen_init,
1386 .fini = &evergreen_fini, 1386 .fini = &evergreen_fini,
1387 .suspend = &evergreen_suspend, 1387 .suspend = &evergreen_suspend,
1388 .resume = &evergreen_resume, 1388 .resume = &evergreen_resume,
1389 .asic_reset = &evergreen_asic_reset, 1389 .asic_reset = &evergreen_asic_reset,
1390 .vga_set_state = &r600_vga_set_state, 1390 .vga_set_state = &r600_vga_set_state,
1391 .ioctl_wait_idle = r600_ioctl_wait_idle, 1391 .ioctl_wait_idle = r600_ioctl_wait_idle,
1392 .gui_idle = &r600_gui_idle, 1392 .gui_idle = &r600_gui_idle,
1393 .mc_wait_for_idle = &evergreen_mc_wait_for_idle, 1393 .mc_wait_for_idle = &evergreen_mc_wait_for_idle,
1394 .get_xclk = &r600_get_xclk, 1394 .get_xclk = &r600_get_xclk,
1395 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1395 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1396 .gart = { 1396 .gart = {
1397 .tlb_flush = &evergreen_pcie_gart_tlb_flush, 1397 .tlb_flush = &evergreen_pcie_gart_tlb_flush,
1398 .set_page = &rs600_gart_set_page, 1398 .set_page = &rs600_gart_set_page,
1399 }, 1399 },
1400 .ring = { 1400 .ring = {
1401 [RADEON_RING_TYPE_GFX_INDEX] = &evergreen_gfx_ring, 1401 [RADEON_RING_TYPE_GFX_INDEX] = &evergreen_gfx_ring,
1402 [R600_RING_TYPE_DMA_INDEX] = &evergreen_dma_ring, 1402 [R600_RING_TYPE_DMA_INDEX] = &evergreen_dma_ring,
1403 [R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring, 1403 [R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring,
1404 }, 1404 },
1405 .irq = { 1405 .irq = {
1406 .set = &evergreen_irq_set, 1406 .set = &evergreen_irq_set,
1407 .process = &evergreen_irq_process, 1407 .process = &evergreen_irq_process,
1408 }, 1408 },
1409 .display = { 1409 .display = {
1410 .bandwidth_update = &evergreen_bandwidth_update, 1410 .bandwidth_update = &evergreen_bandwidth_update,
1411 .get_vblank_counter = &evergreen_get_vblank_counter, 1411 .get_vblank_counter = &evergreen_get_vblank_counter,
1412 .wait_for_vblank = &dce4_wait_for_vblank, 1412 .wait_for_vblank = &dce4_wait_for_vblank,
1413 .set_backlight_level = &atombios_set_backlight_level, 1413 .set_backlight_level = &atombios_set_backlight_level,
1414 .get_backlight_level = &atombios_get_backlight_level, 1414 .get_backlight_level = &atombios_get_backlight_level,
1415 .hdmi_enable = &evergreen_hdmi_enable, 1415 .hdmi_enable = &evergreen_hdmi_enable,
1416 .hdmi_setmode = &evergreen_hdmi_setmode, 1416 .hdmi_setmode = &evergreen_hdmi_setmode,
1417 }, 1417 },
1418 .copy = { 1418 .copy = {
1419 .blit = &r600_copy_cpdma, 1419 .blit = &r600_copy_cpdma,
1420 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1420 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1421 .dma = &evergreen_copy_dma, 1421 .dma = &evergreen_copy_dma,
1422 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 1422 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
1423 .copy = &evergreen_copy_dma, 1423 .copy = &evergreen_copy_dma,
1424 .copy_ring_index = R600_RING_TYPE_DMA_INDEX, 1424 .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
1425 }, 1425 },
1426 .surface = { 1426 .surface = {
1427 .set_reg = r600_set_surface_reg, 1427 .set_reg = r600_set_surface_reg,
1428 .clear_reg = r600_clear_surface_reg, 1428 .clear_reg = r600_clear_surface_reg,
1429 }, 1429 },
1430 .hpd = { 1430 .hpd = {
1431 .init = &evergreen_hpd_init, 1431 .init = &evergreen_hpd_init,
1432 .fini = &evergreen_hpd_fini, 1432 .fini = &evergreen_hpd_fini,
1433 .sense = &evergreen_hpd_sense, 1433 .sense = &evergreen_hpd_sense,
1434 .set_polarity = &evergreen_hpd_set_polarity, 1434 .set_polarity = &evergreen_hpd_set_polarity,
1435 }, 1435 },
1436 .pm = { 1436 .pm = {
1437 .misc = &evergreen_pm_misc, 1437 .misc = &evergreen_pm_misc,
1438 .prepare = &evergreen_pm_prepare, 1438 .prepare = &evergreen_pm_prepare,
1439 .finish = &evergreen_pm_finish, 1439 .finish = &evergreen_pm_finish,
1440 .init_profile = &sumo_pm_init_profile, 1440 .init_profile = &sumo_pm_init_profile,
1441 .get_dynpm_state = &r600_pm_get_dynpm_state, 1441 .get_dynpm_state = &r600_pm_get_dynpm_state,
1442 .get_engine_clock = &radeon_atom_get_engine_clock, 1442 .get_engine_clock = &radeon_atom_get_engine_clock,
1443 .set_engine_clock = &radeon_atom_set_engine_clock, 1443 .set_engine_clock = &radeon_atom_set_engine_clock,
1444 .get_memory_clock = NULL, 1444 .get_memory_clock = NULL,
1445 .set_memory_clock = NULL, 1445 .set_memory_clock = NULL,
1446 .get_pcie_lanes = NULL, 1446 .get_pcie_lanes = NULL,
1447 .set_pcie_lanes = NULL, 1447 .set_pcie_lanes = NULL,
1448 .set_clock_gating = NULL, 1448 .set_clock_gating = NULL,
1449 .set_uvd_clocks = &sumo_set_uvd_clocks, 1449 .set_uvd_clocks = &sumo_set_uvd_clocks,
1450 .get_temperature = &sumo_get_temp, 1450 .get_temperature = &sumo_get_temp,
1451 }, 1451 },
1452 .dpm = { 1452 .dpm = {
1453 .init = &sumo_dpm_init, 1453 .init = &sumo_dpm_init,
1454 .setup_asic = &sumo_dpm_setup_asic, 1454 .setup_asic = &sumo_dpm_setup_asic,
1455 .enable = &sumo_dpm_enable, 1455 .enable = &sumo_dpm_enable,
1456 .late_enable = &sumo_dpm_late_enable, 1456 .late_enable = &sumo_dpm_late_enable,
1457 .disable = &sumo_dpm_disable, 1457 .disable = &sumo_dpm_disable,
1458 .pre_set_power_state = &sumo_dpm_pre_set_power_state, 1458 .pre_set_power_state = &sumo_dpm_pre_set_power_state,
1459 .set_power_state = &sumo_dpm_set_power_state, 1459 .set_power_state = &sumo_dpm_set_power_state,
1460 .post_set_power_state = &sumo_dpm_post_set_power_state, 1460 .post_set_power_state = &sumo_dpm_post_set_power_state,
1461 .display_configuration_changed = &sumo_dpm_display_configuration_changed, 1461 .display_configuration_changed = &sumo_dpm_display_configuration_changed,
1462 .fini = &sumo_dpm_fini, 1462 .fini = &sumo_dpm_fini,
1463 .get_sclk = &sumo_dpm_get_sclk, 1463 .get_sclk = &sumo_dpm_get_sclk,
1464 .get_mclk = &sumo_dpm_get_mclk, 1464 .get_mclk = &sumo_dpm_get_mclk,
1465 .print_power_state = &sumo_dpm_print_power_state, 1465 .print_power_state = &sumo_dpm_print_power_state,
1466 .debugfs_print_current_performance_level = &sumo_dpm_debugfs_print_current_performance_level, 1466 .debugfs_print_current_performance_level = &sumo_dpm_debugfs_print_current_performance_level,
1467 .force_performance_level = &sumo_dpm_force_performance_level, 1467 .force_performance_level = &sumo_dpm_force_performance_level,
1468 }, 1468 },
1469 .pflip = { 1469 .pflip = {
1470 .pre_page_flip = &evergreen_pre_page_flip, 1470 .pre_page_flip = &evergreen_pre_page_flip,
1471 .page_flip = &evergreen_page_flip, 1471 .page_flip = &evergreen_page_flip,
1472 .post_page_flip = &evergreen_post_page_flip, 1472 .post_page_flip = &evergreen_post_page_flip,
1473 }, 1473 },
1474 }; 1474 };
1475 1475
1476 static struct radeon_asic btc_asic = { 1476 static struct radeon_asic btc_asic = {
1477 .init = &evergreen_init, 1477 .init = &evergreen_init,
1478 .fini = &evergreen_fini, 1478 .fini = &evergreen_fini,
1479 .suspend = &evergreen_suspend, 1479 .suspend = &evergreen_suspend,
1480 .resume = &evergreen_resume, 1480 .resume = &evergreen_resume,
1481 .asic_reset = &evergreen_asic_reset, 1481 .asic_reset = &evergreen_asic_reset,
1482 .vga_set_state = &r600_vga_set_state, 1482 .vga_set_state = &r600_vga_set_state,
1483 .ioctl_wait_idle = r600_ioctl_wait_idle, 1483 .ioctl_wait_idle = r600_ioctl_wait_idle,
1484 .gui_idle = &r600_gui_idle, 1484 .gui_idle = &r600_gui_idle,
1485 .mc_wait_for_idle = &evergreen_mc_wait_for_idle, 1485 .mc_wait_for_idle = &evergreen_mc_wait_for_idle,
1486 .get_xclk = &rv770_get_xclk, 1486 .get_xclk = &rv770_get_xclk,
1487 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1487 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1488 .gart = { 1488 .gart = {
1489 .tlb_flush = &evergreen_pcie_gart_tlb_flush, 1489 .tlb_flush = &evergreen_pcie_gart_tlb_flush,
1490 .set_page = &rs600_gart_set_page, 1490 .set_page = &rs600_gart_set_page,
1491 }, 1491 },
1492 .ring = { 1492 .ring = {
1493 [RADEON_RING_TYPE_GFX_INDEX] = &evergreen_gfx_ring, 1493 [RADEON_RING_TYPE_GFX_INDEX] = &evergreen_gfx_ring,
1494 [R600_RING_TYPE_DMA_INDEX] = &evergreen_dma_ring, 1494 [R600_RING_TYPE_DMA_INDEX] = &evergreen_dma_ring,
1495 [R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring, 1495 [R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring,
1496 }, 1496 },
1497 .irq = { 1497 .irq = {
1498 .set = &evergreen_irq_set, 1498 .set = &evergreen_irq_set,
1499 .process = &evergreen_irq_process, 1499 .process = &evergreen_irq_process,
1500 }, 1500 },
1501 .display = { 1501 .display = {
1502 .bandwidth_update = &evergreen_bandwidth_update, 1502 .bandwidth_update = &evergreen_bandwidth_update,
1503 .get_vblank_counter = &evergreen_get_vblank_counter, 1503 .get_vblank_counter = &evergreen_get_vblank_counter,
1504 .wait_for_vblank = &dce4_wait_for_vblank, 1504 .wait_for_vblank = &dce4_wait_for_vblank,
1505 .set_backlight_level = &atombios_set_backlight_level, 1505 .set_backlight_level = &atombios_set_backlight_level,
1506 .get_backlight_level = &atombios_get_backlight_level, 1506 .get_backlight_level = &atombios_get_backlight_level,
1507 .hdmi_enable = &evergreen_hdmi_enable, 1507 .hdmi_enable = &evergreen_hdmi_enable,
1508 .hdmi_setmode = &evergreen_hdmi_setmode, 1508 .hdmi_setmode = &evergreen_hdmi_setmode,
1509 }, 1509 },
1510 .copy = { 1510 .copy = {
1511 .blit = &r600_copy_cpdma, 1511 .blit = &r600_copy_cpdma,
1512 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1512 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1513 .dma = &evergreen_copy_dma, 1513 .dma = &evergreen_copy_dma,
1514 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 1514 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
1515 .copy = &evergreen_copy_dma, 1515 .copy = &evergreen_copy_dma,
1516 .copy_ring_index = R600_RING_TYPE_DMA_INDEX, 1516 .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
1517 }, 1517 },
1518 .surface = { 1518 .surface = {
1519 .set_reg = r600_set_surface_reg, 1519 .set_reg = r600_set_surface_reg,
1520 .clear_reg = r600_clear_surface_reg, 1520 .clear_reg = r600_clear_surface_reg,
1521 }, 1521 },
1522 .hpd = { 1522 .hpd = {
1523 .init = &evergreen_hpd_init, 1523 .init = &evergreen_hpd_init,
1524 .fini = &evergreen_hpd_fini, 1524 .fini = &evergreen_hpd_fini,
1525 .sense = &evergreen_hpd_sense, 1525 .sense = &evergreen_hpd_sense,
1526 .set_polarity = &evergreen_hpd_set_polarity, 1526 .set_polarity = &evergreen_hpd_set_polarity,
1527 }, 1527 },
1528 .pm = { 1528 .pm = {
1529 .misc = &evergreen_pm_misc, 1529 .misc = &evergreen_pm_misc,
1530 .prepare = &evergreen_pm_prepare, 1530 .prepare = &evergreen_pm_prepare,
1531 .finish = &evergreen_pm_finish, 1531 .finish = &evergreen_pm_finish,
1532 .init_profile = &btc_pm_init_profile, 1532 .init_profile = &btc_pm_init_profile,
1533 .get_dynpm_state = &r600_pm_get_dynpm_state, 1533 .get_dynpm_state = &r600_pm_get_dynpm_state,
1534 .get_engine_clock = &radeon_atom_get_engine_clock, 1534 .get_engine_clock = &radeon_atom_get_engine_clock,
1535 .set_engine_clock = &radeon_atom_set_engine_clock, 1535 .set_engine_clock = &radeon_atom_set_engine_clock,
1536 .get_memory_clock = &radeon_atom_get_memory_clock, 1536 .get_memory_clock = &radeon_atom_get_memory_clock,
1537 .set_memory_clock = &radeon_atom_set_memory_clock, 1537 .set_memory_clock = &radeon_atom_set_memory_clock,
1538 .get_pcie_lanes = &r600_get_pcie_lanes, 1538 .get_pcie_lanes = &r600_get_pcie_lanes,
1539 .set_pcie_lanes = &r600_set_pcie_lanes, 1539 .set_pcie_lanes = &r600_set_pcie_lanes,
1540 .set_clock_gating = NULL, 1540 .set_clock_gating = NULL,
1541 .set_uvd_clocks = &evergreen_set_uvd_clocks, 1541 .set_uvd_clocks = &evergreen_set_uvd_clocks,
1542 .get_temperature = &evergreen_get_temp, 1542 .get_temperature = &evergreen_get_temp,
1543 }, 1543 },
1544 .dpm = { 1544 .dpm = {
1545 .init = &btc_dpm_init, 1545 .init = &btc_dpm_init,
1546 .setup_asic = &btc_dpm_setup_asic, 1546 .setup_asic = &btc_dpm_setup_asic,
1547 .enable = &btc_dpm_enable, 1547 .enable = &btc_dpm_enable,
1548 .late_enable = &rv770_dpm_late_enable, 1548 .late_enable = &rv770_dpm_late_enable,
1549 .disable = &btc_dpm_disable, 1549 .disable = &btc_dpm_disable,
1550 .pre_set_power_state = &btc_dpm_pre_set_power_state, 1550 .pre_set_power_state = &btc_dpm_pre_set_power_state,
1551 .set_power_state = &btc_dpm_set_power_state, 1551 .set_power_state = &btc_dpm_set_power_state,
1552 .post_set_power_state = &btc_dpm_post_set_power_state, 1552 .post_set_power_state = &btc_dpm_post_set_power_state,
1553 .display_configuration_changed = &cypress_dpm_display_configuration_changed, 1553 .display_configuration_changed = &cypress_dpm_display_configuration_changed,
1554 .fini = &btc_dpm_fini, 1554 .fini = &btc_dpm_fini,
1555 .get_sclk = &btc_dpm_get_sclk, 1555 .get_sclk = &btc_dpm_get_sclk,
1556 .get_mclk = &btc_dpm_get_mclk, 1556 .get_mclk = &btc_dpm_get_mclk,
1557 .print_power_state = &rv770_dpm_print_power_state, 1557 .print_power_state = &rv770_dpm_print_power_state,
1558 .debugfs_print_current_performance_level = &btc_dpm_debugfs_print_current_performance_level, 1558 .debugfs_print_current_performance_level = &btc_dpm_debugfs_print_current_performance_level,
1559 .force_performance_level = &rv770_dpm_force_performance_level, 1559 .force_performance_level = &rv770_dpm_force_performance_level,
1560 .vblank_too_short = &btc_dpm_vblank_too_short, 1560 .vblank_too_short = &btc_dpm_vblank_too_short,
1561 }, 1561 },
1562 .pflip = { 1562 .pflip = {
1563 .pre_page_flip = &evergreen_pre_page_flip, 1563 .pre_page_flip = &evergreen_pre_page_flip,
1564 .page_flip = &evergreen_page_flip, 1564 .page_flip = &evergreen_page_flip,
1565 .post_page_flip = &evergreen_post_page_flip, 1565 .post_page_flip = &evergreen_post_page_flip,
1566 }, 1566 },
1567 }; 1567 };
1568 1568
1569 static struct radeon_asic_ring cayman_gfx_ring = { 1569 static struct radeon_asic_ring cayman_gfx_ring = {
1570 .ib_execute = &cayman_ring_ib_execute, 1570 .ib_execute = &cayman_ring_ib_execute,
1571 .ib_parse = &evergreen_ib_parse, 1571 .ib_parse = &evergreen_ib_parse,
1572 .emit_fence = &cayman_fence_ring_emit, 1572 .emit_fence = &cayman_fence_ring_emit,
1573 .emit_semaphore = &r600_semaphore_ring_emit, 1573 .emit_semaphore = &r600_semaphore_ring_emit,
1574 .cs_parse = &evergreen_cs_parse, 1574 .cs_parse = &evergreen_cs_parse,
1575 .ring_test = &r600_ring_test, 1575 .ring_test = &r600_ring_test,
1576 .ib_test = &r600_ib_test, 1576 .ib_test = &r600_ib_test,
1577 .is_lockup = &cayman_gfx_is_lockup, 1577 .is_lockup = &cayman_gfx_is_lockup,
1578 .vm_flush = &cayman_vm_flush, 1578 .vm_flush = &cayman_vm_flush,
1579 .get_rptr = &cayman_gfx_get_rptr, 1579 .get_rptr = &cayman_gfx_get_rptr,
1580 .get_wptr = &cayman_gfx_get_wptr, 1580 .get_wptr = &cayman_gfx_get_wptr,
1581 .set_wptr = &cayman_gfx_set_wptr, 1581 .set_wptr = &cayman_gfx_set_wptr,
1582 }; 1582 };
1583 1583
1584 static struct radeon_asic_ring cayman_dma_ring = { 1584 static struct radeon_asic_ring cayman_dma_ring = {
1585 .ib_execute = &cayman_dma_ring_ib_execute, 1585 .ib_execute = &cayman_dma_ring_ib_execute,
1586 .ib_parse = &evergreen_dma_ib_parse, 1586 .ib_parse = &evergreen_dma_ib_parse,
1587 .emit_fence = &evergreen_dma_fence_ring_emit, 1587 .emit_fence = &evergreen_dma_fence_ring_emit,
1588 .emit_semaphore = &r600_dma_semaphore_ring_emit, 1588 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1589 .cs_parse = &evergreen_dma_cs_parse, 1589 .cs_parse = &evergreen_dma_cs_parse,
1590 .ring_test = &r600_dma_ring_test, 1590 .ring_test = &r600_dma_ring_test,
1591 .ib_test = &r600_dma_ib_test, 1591 .ib_test = &r600_dma_ib_test,
1592 .is_lockup = &cayman_dma_is_lockup, 1592 .is_lockup = &cayman_dma_is_lockup,
1593 .vm_flush = &cayman_dma_vm_flush, 1593 .vm_flush = &cayman_dma_vm_flush,
1594 .get_rptr = &cayman_dma_get_rptr, 1594 .get_rptr = &cayman_dma_get_rptr,
1595 .get_wptr = &cayman_dma_get_wptr, 1595 .get_wptr = &cayman_dma_get_wptr,
1596 .set_wptr = &cayman_dma_set_wptr 1596 .set_wptr = &cayman_dma_set_wptr
1597 }; 1597 };
1598 1598
1599 static struct radeon_asic_ring cayman_uvd_ring = { 1599 static struct radeon_asic_ring cayman_uvd_ring = {
1600 .ib_execute = &uvd_v1_0_ib_execute, 1600 .ib_execute = &uvd_v1_0_ib_execute,
1601 .emit_fence = &uvd_v2_2_fence_emit, 1601 .emit_fence = &uvd_v2_2_fence_emit,
1602 .emit_semaphore = &uvd_v3_1_semaphore_emit, 1602 .emit_semaphore = &uvd_v3_1_semaphore_emit,
1603 .cs_parse = &radeon_uvd_cs_parse, 1603 .cs_parse = &radeon_uvd_cs_parse,
1604 .ring_test = &uvd_v1_0_ring_test, 1604 .ring_test = &uvd_v1_0_ring_test,
1605 .ib_test = &uvd_v1_0_ib_test, 1605 .ib_test = &uvd_v1_0_ib_test,
1606 .is_lockup = &radeon_ring_test_lockup, 1606 .is_lockup = &radeon_ring_test_lockup,
1607 .get_rptr = &uvd_v1_0_get_rptr, 1607 .get_rptr = &uvd_v1_0_get_rptr,
1608 .get_wptr = &uvd_v1_0_get_wptr, 1608 .get_wptr = &uvd_v1_0_get_wptr,
1609 .set_wptr = &uvd_v1_0_set_wptr, 1609 .set_wptr = &uvd_v1_0_set_wptr,
1610 }; 1610 };
1611 1611
1612 static struct radeon_asic cayman_asic = { 1612 static struct radeon_asic cayman_asic = {
1613 .init = &cayman_init, 1613 .init = &cayman_init,
1614 .fini = &cayman_fini, 1614 .fini = &cayman_fini,
1615 .suspend = &cayman_suspend, 1615 .suspend = &cayman_suspend,
1616 .resume = &cayman_resume, 1616 .resume = &cayman_resume,
1617 .asic_reset = &cayman_asic_reset, 1617 .asic_reset = &cayman_asic_reset,
1618 .vga_set_state = &r600_vga_set_state, 1618 .vga_set_state = &r600_vga_set_state,
1619 .ioctl_wait_idle = r600_ioctl_wait_idle, 1619 .ioctl_wait_idle = r600_ioctl_wait_idle,
1620 .gui_idle = &r600_gui_idle, 1620 .gui_idle = &r600_gui_idle,
1621 .mc_wait_for_idle = &evergreen_mc_wait_for_idle, 1621 .mc_wait_for_idle = &evergreen_mc_wait_for_idle,
1622 .get_xclk = &rv770_get_xclk, 1622 .get_xclk = &rv770_get_xclk,
1623 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1623 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1624 .gart = { 1624 .gart = {
1625 .tlb_flush = &cayman_pcie_gart_tlb_flush, 1625 .tlb_flush = &cayman_pcie_gart_tlb_flush,
1626 .set_page = &rs600_gart_set_page, 1626 .set_page = &rs600_gart_set_page,
1627 }, 1627 },
1628 .vm = { 1628 .vm = {
1629 .init = &cayman_vm_init, 1629 .init = &cayman_vm_init,
1630 .fini = &cayman_vm_fini, 1630 .fini = &cayman_vm_fini,
1631 .set_page = &cayman_dma_vm_set_page, 1631 .set_page = &cayman_dma_vm_set_page,
1632 }, 1632 },
1633 .ring = { 1633 .ring = {
1634 [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring, 1634 [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
1635 [CAYMAN_RING_TYPE_CP1_INDEX] = &cayman_gfx_ring, 1635 [CAYMAN_RING_TYPE_CP1_INDEX] = &cayman_gfx_ring,
1636 [CAYMAN_RING_TYPE_CP2_INDEX] = &cayman_gfx_ring, 1636 [CAYMAN_RING_TYPE_CP2_INDEX] = &cayman_gfx_ring,
1637 [R600_RING_TYPE_DMA_INDEX] = &cayman_dma_ring, 1637 [R600_RING_TYPE_DMA_INDEX] = &cayman_dma_ring,
1638 [CAYMAN_RING_TYPE_DMA1_INDEX] = &cayman_dma_ring, 1638 [CAYMAN_RING_TYPE_DMA1_INDEX] = &cayman_dma_ring,
1639 [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring, 1639 [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
1640 }, 1640 },
1641 .irq = { 1641 .irq = {
1642 .set = &evergreen_irq_set, 1642 .set = &evergreen_irq_set,
1643 .process = &evergreen_irq_process, 1643 .process = &evergreen_irq_process,
1644 }, 1644 },
1645 .display = { 1645 .display = {
1646 .bandwidth_update = &evergreen_bandwidth_update, 1646 .bandwidth_update = &evergreen_bandwidth_update,
1647 .get_vblank_counter = &evergreen_get_vblank_counter, 1647 .get_vblank_counter = &evergreen_get_vblank_counter,
1648 .wait_for_vblank = &dce4_wait_for_vblank, 1648 .wait_for_vblank = &dce4_wait_for_vblank,
1649 .set_backlight_level = &atombios_set_backlight_level, 1649 .set_backlight_level = &atombios_set_backlight_level,
1650 .get_backlight_level = &atombios_get_backlight_level, 1650 .get_backlight_level = &atombios_get_backlight_level,
1651 .hdmi_enable = &evergreen_hdmi_enable, 1651 .hdmi_enable = &evergreen_hdmi_enable,
1652 .hdmi_setmode = &evergreen_hdmi_setmode, 1652 .hdmi_setmode = &evergreen_hdmi_setmode,
1653 }, 1653 },
1654 .copy = { 1654 .copy = {
1655 .blit = &r600_copy_cpdma, 1655 .blit = &r600_copy_cpdma,
1656 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1656 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1657 .dma = &evergreen_copy_dma, 1657 .dma = &evergreen_copy_dma,
1658 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 1658 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
1659 .copy = &evergreen_copy_dma, 1659 .copy = &evergreen_copy_dma,
1660 .copy_ring_index = R600_RING_TYPE_DMA_INDEX, 1660 .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
1661 }, 1661 },
1662 .surface = { 1662 .surface = {
1663 .set_reg = r600_set_surface_reg, 1663 .set_reg = r600_set_surface_reg,
1664 .clear_reg = r600_clear_surface_reg, 1664 .clear_reg = r600_clear_surface_reg,
1665 }, 1665 },
1666 .hpd = { 1666 .hpd = {
1667 .init = &evergreen_hpd_init, 1667 .init = &evergreen_hpd_init,
1668 .fini = &evergreen_hpd_fini, 1668 .fini = &evergreen_hpd_fini,
1669 .sense = &evergreen_hpd_sense, 1669 .sense = &evergreen_hpd_sense,
1670 .set_polarity = &evergreen_hpd_set_polarity, 1670 .set_polarity = &evergreen_hpd_set_polarity,
1671 }, 1671 },
1672 .pm = { 1672 .pm = {
1673 .misc = &evergreen_pm_misc, 1673 .misc = &evergreen_pm_misc,
1674 .prepare = &evergreen_pm_prepare, 1674 .prepare = &evergreen_pm_prepare,
1675 .finish = &evergreen_pm_finish, 1675 .finish = &evergreen_pm_finish,
1676 .init_profile = &btc_pm_init_profile, 1676 .init_profile = &btc_pm_init_profile,
1677 .get_dynpm_state = &r600_pm_get_dynpm_state, 1677 .get_dynpm_state = &r600_pm_get_dynpm_state,
1678 .get_engine_clock = &radeon_atom_get_engine_clock, 1678 .get_engine_clock = &radeon_atom_get_engine_clock,
1679 .set_engine_clock = &radeon_atom_set_engine_clock, 1679 .set_engine_clock = &radeon_atom_set_engine_clock,
1680 .get_memory_clock = &radeon_atom_get_memory_clock, 1680 .get_memory_clock = &radeon_atom_get_memory_clock,
1681 .set_memory_clock = &radeon_atom_set_memory_clock, 1681 .set_memory_clock = &radeon_atom_set_memory_clock,
1682 .get_pcie_lanes = &r600_get_pcie_lanes, 1682 .get_pcie_lanes = &r600_get_pcie_lanes,
1683 .set_pcie_lanes = &r600_set_pcie_lanes, 1683 .set_pcie_lanes = &r600_set_pcie_lanes,
1684 .set_clock_gating = NULL, 1684 .set_clock_gating = NULL,
1685 .set_uvd_clocks = &evergreen_set_uvd_clocks, 1685 .set_uvd_clocks = &evergreen_set_uvd_clocks,
1686 .get_temperature = &evergreen_get_temp, 1686 .get_temperature = &evergreen_get_temp,
1687 }, 1687 },
1688 .dpm = { 1688 .dpm = {
1689 .init = &ni_dpm_init, 1689 .init = &ni_dpm_init,
1690 .setup_asic = &ni_dpm_setup_asic, 1690 .setup_asic = &ni_dpm_setup_asic,
1691 .enable = &ni_dpm_enable, 1691 .enable = &ni_dpm_enable,
1692 .late_enable = &rv770_dpm_late_enable, 1692 .late_enable = &rv770_dpm_late_enable,
1693 .disable = &ni_dpm_disable, 1693 .disable = &ni_dpm_disable,
1694 .pre_set_power_state = &ni_dpm_pre_set_power_state, 1694 .pre_set_power_state = &ni_dpm_pre_set_power_state,
1695 .set_power_state = &ni_dpm_set_power_state, 1695 .set_power_state = &ni_dpm_set_power_state,
1696 .post_set_power_state = &ni_dpm_post_set_power_state, 1696 .post_set_power_state = &ni_dpm_post_set_power_state,
1697 .display_configuration_changed = &cypress_dpm_display_configuration_changed, 1697 .display_configuration_changed = &cypress_dpm_display_configuration_changed,
1698 .fini = &ni_dpm_fini, 1698 .fini = &ni_dpm_fini,
1699 .get_sclk = &ni_dpm_get_sclk, 1699 .get_sclk = &ni_dpm_get_sclk,
1700 .get_mclk = &ni_dpm_get_mclk, 1700 .get_mclk = &ni_dpm_get_mclk,
1701 .print_power_state = &ni_dpm_print_power_state, 1701 .print_power_state = &ni_dpm_print_power_state,
1702 .debugfs_print_current_performance_level = &ni_dpm_debugfs_print_current_performance_level, 1702 .debugfs_print_current_performance_level = &ni_dpm_debugfs_print_current_performance_level,
1703 .force_performance_level = &ni_dpm_force_performance_level, 1703 .force_performance_level = &ni_dpm_force_performance_level,
1704 .vblank_too_short = &ni_dpm_vblank_too_short, 1704 .vblank_too_short = &ni_dpm_vblank_too_short,
1705 }, 1705 },
1706 .pflip = { 1706 .pflip = {
1707 .pre_page_flip = &evergreen_pre_page_flip, 1707 .pre_page_flip = &evergreen_pre_page_flip,
1708 .page_flip = &evergreen_page_flip, 1708 .page_flip = &evergreen_page_flip,
1709 .post_page_flip = &evergreen_post_page_flip, 1709 .post_page_flip = &evergreen_post_page_flip,
1710 }, 1710 },
1711 }; 1711 };
1712 1712
1713 static struct radeon_asic trinity_asic = { 1713 static struct radeon_asic trinity_asic = {
1714 .init = &cayman_init, 1714 .init = &cayman_init,
1715 .fini = &cayman_fini, 1715 .fini = &cayman_fini,
1716 .suspend = &cayman_suspend, 1716 .suspend = &cayman_suspend,
1717 .resume = &cayman_resume, 1717 .resume = &cayman_resume,
1718 .asic_reset = &cayman_asic_reset, 1718 .asic_reset = &cayman_asic_reset,
1719 .vga_set_state = &r600_vga_set_state, 1719 .vga_set_state = &r600_vga_set_state,
1720 .ioctl_wait_idle = r600_ioctl_wait_idle, 1720 .ioctl_wait_idle = r600_ioctl_wait_idle,
1721 .gui_idle = &r600_gui_idle, 1721 .gui_idle = &r600_gui_idle,
1722 .mc_wait_for_idle = &evergreen_mc_wait_for_idle, 1722 .mc_wait_for_idle = &evergreen_mc_wait_for_idle,
1723 .get_xclk = &r600_get_xclk, 1723 .get_xclk = &r600_get_xclk,
1724 .get_gpu_clock_counter = &r600_get_gpu_clock_counter, 1724 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1725 .gart = { 1725 .gart = {
1726 .tlb_flush = &cayman_pcie_gart_tlb_flush, 1726 .tlb_flush = &cayman_pcie_gart_tlb_flush,
1727 .set_page = &rs600_gart_set_page, 1727 .set_page = &rs600_gart_set_page,
1728 }, 1728 },
1729 .vm = { 1729 .vm = {
1730 .init = &cayman_vm_init, 1730 .init = &cayman_vm_init,
1731 .fini = &cayman_vm_fini, 1731 .fini = &cayman_vm_fini,
1732 .set_page = &cayman_dma_vm_set_page, 1732 .set_page = &cayman_dma_vm_set_page,
1733 }, 1733 },
1734 .ring = { 1734 .ring = {
1735 [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring, 1735 [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
1736 [CAYMAN_RING_TYPE_CP1_INDEX] = &cayman_gfx_ring, 1736 [CAYMAN_RING_TYPE_CP1_INDEX] = &cayman_gfx_ring,
1737 [CAYMAN_RING_TYPE_CP2_INDEX] = &cayman_gfx_ring, 1737 [CAYMAN_RING_TYPE_CP2_INDEX] = &cayman_gfx_ring,
1738 [R600_RING_TYPE_DMA_INDEX] = &cayman_dma_ring, 1738 [R600_RING_TYPE_DMA_INDEX] = &cayman_dma_ring,
1739 [CAYMAN_RING_TYPE_DMA1_INDEX] = &cayman_dma_ring, 1739 [CAYMAN_RING_TYPE_DMA1_INDEX] = &cayman_dma_ring,
1740 [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring, 1740 [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
1741 }, 1741 },
1742 .irq = { 1742 .irq = {
1743 .set = &evergreen_irq_set, 1743 .set = &evergreen_irq_set,
1744 .process = &evergreen_irq_process, 1744 .process = &evergreen_irq_process,
1745 }, 1745 },
1746 .display = { 1746 .display = {
1747 .bandwidth_update = &dce6_bandwidth_update, 1747 .bandwidth_update = &dce6_bandwidth_update,
1748 .get_vblank_counter = &evergreen_get_vblank_counter, 1748 .get_vblank_counter = &evergreen_get_vblank_counter,
1749 .wait_for_vblank = &dce4_wait_for_vblank, 1749 .wait_for_vblank = &dce4_wait_for_vblank,
1750 .set_backlight_level = &atombios_set_backlight_level, 1750 .set_backlight_level = &atombios_set_backlight_level,
1751 .get_backlight_level = &atombios_get_backlight_level, 1751 .get_backlight_level = &atombios_get_backlight_level,
1752 .hdmi_enable = &evergreen_hdmi_enable, 1752 .hdmi_enable = &evergreen_hdmi_enable,
1753 .hdmi_setmode = &evergreen_hdmi_setmode, 1753 .hdmi_setmode = &evergreen_hdmi_setmode,
1754 }, 1754 },
1755 .copy = { 1755 .copy = {
1756 .blit = &r600_copy_cpdma, 1756 .blit = &r600_copy_cpdma,
1757 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1757 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1758 .dma = &evergreen_copy_dma, 1758 .dma = &evergreen_copy_dma,
1759 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 1759 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
1760 .copy = &evergreen_copy_dma, 1760 .copy = &evergreen_copy_dma,
1761 .copy_ring_index = R600_RING_TYPE_DMA_INDEX, 1761 .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
1762 }, 1762 },
1763 .surface = { 1763 .surface = {
1764 .set_reg = r600_set_surface_reg, 1764 .set_reg = r600_set_surface_reg,
1765 .clear_reg = r600_clear_surface_reg, 1765 .clear_reg = r600_clear_surface_reg,
1766 }, 1766 },
1767 .hpd = { 1767 .hpd = {
1768 .init = &evergreen_hpd_init, 1768 .init = &evergreen_hpd_init,
1769 .fini = &evergreen_hpd_fini, 1769 .fini = &evergreen_hpd_fini,
1770 .sense = &evergreen_hpd_sense, 1770 .sense = &evergreen_hpd_sense,
1771 .set_polarity = &evergreen_hpd_set_polarity, 1771 .set_polarity = &evergreen_hpd_set_polarity,
1772 }, 1772 },
1773 .pm = { 1773 .pm = {
1774 .misc = &evergreen_pm_misc, 1774 .misc = &evergreen_pm_misc,
1775 .prepare = &evergreen_pm_prepare, 1775 .prepare = &evergreen_pm_prepare,
1776 .finish = &evergreen_pm_finish, 1776 .finish = &evergreen_pm_finish,
1777 .init_profile = &sumo_pm_init_profile, 1777 .init_profile = &sumo_pm_init_profile,
1778 .get_dynpm_state = &r600_pm_get_dynpm_state, 1778 .get_dynpm_state = &r600_pm_get_dynpm_state,
1779 .get_engine_clock = &radeon_atom_get_engine_clock, 1779 .get_engine_clock = &radeon_atom_get_engine_clock,
1780 .set_engine_clock = &radeon_atom_set_engine_clock, 1780 .set_engine_clock = &radeon_atom_set_engine_clock,
1781 .get_memory_clock = NULL, 1781 .get_memory_clock = NULL,
1782 .set_memory_clock = NULL, 1782 .set_memory_clock = NULL,
1783 .get_pcie_lanes = NULL, 1783 .get_pcie_lanes = NULL,
1784 .set_pcie_lanes = NULL, 1784 .set_pcie_lanes = NULL,
1785 .set_clock_gating = NULL, 1785 .set_clock_gating = NULL,
1786 .set_uvd_clocks = &sumo_set_uvd_clocks, 1786 .set_uvd_clocks = &sumo_set_uvd_clocks,
1787 .get_temperature = &tn_get_temp, 1787 .get_temperature = &tn_get_temp,
1788 }, 1788 },
1789 .dpm = { 1789 .dpm = {
1790 .init = &trinity_dpm_init, 1790 .init = &trinity_dpm_init,
1791 .setup_asic = &trinity_dpm_setup_asic, 1791 .setup_asic = &trinity_dpm_setup_asic,
1792 .enable = &trinity_dpm_enable, 1792 .enable = &trinity_dpm_enable,
1793 .late_enable = &trinity_dpm_late_enable, 1793 .late_enable = &trinity_dpm_late_enable,
1794 .disable = &trinity_dpm_disable, 1794 .disable = &trinity_dpm_disable,
1795 .pre_set_power_state = &trinity_dpm_pre_set_power_state, 1795 .pre_set_power_state = &trinity_dpm_pre_set_power_state,
1796 .set_power_state = &trinity_dpm_set_power_state, 1796 .set_power_state = &trinity_dpm_set_power_state,
1797 .post_set_power_state = &trinity_dpm_post_set_power_state, 1797 .post_set_power_state = &trinity_dpm_post_set_power_state,
1798 .display_configuration_changed = &trinity_dpm_display_configuration_changed, 1798 .display_configuration_changed = &trinity_dpm_display_configuration_changed,
1799 .fini = &trinity_dpm_fini, 1799 .fini = &trinity_dpm_fini,
1800 .get_sclk = &trinity_dpm_get_sclk, 1800 .get_sclk = &trinity_dpm_get_sclk,
1801 .get_mclk = &trinity_dpm_get_mclk, 1801 .get_mclk = &trinity_dpm_get_mclk,
1802 .print_power_state = &trinity_dpm_print_power_state, 1802 .print_power_state = &trinity_dpm_print_power_state,
1803 .debugfs_print_current_performance_level = &trinity_dpm_debugfs_print_current_performance_level, 1803 .debugfs_print_current_performance_level = &trinity_dpm_debugfs_print_current_performance_level,
1804 .force_performance_level = &trinity_dpm_force_performance_level, 1804 .force_performance_level = &trinity_dpm_force_performance_level,
1805 .enable_bapm = &trinity_dpm_enable_bapm, 1805 .enable_bapm = &trinity_dpm_enable_bapm,
1806 }, 1806 },
1807 .pflip = { 1807 .pflip = {
1808 .pre_page_flip = &evergreen_pre_page_flip, 1808 .pre_page_flip = &evergreen_pre_page_flip,
1809 .page_flip = &evergreen_page_flip, 1809 .page_flip = &evergreen_page_flip,
1810 .post_page_flip = &evergreen_post_page_flip, 1810 .post_page_flip = &evergreen_post_page_flip,
1811 }, 1811 },
1812 }; 1812 };
1813 1813
1814 static struct radeon_asic_ring si_gfx_ring = { 1814 static struct radeon_asic_ring si_gfx_ring = {
1815 .ib_execute = &si_ring_ib_execute, 1815 .ib_execute = &si_ring_ib_execute,
1816 .ib_parse = &si_ib_parse, 1816 .ib_parse = &si_ib_parse,
1817 .emit_fence = &si_fence_ring_emit, 1817 .emit_fence = &si_fence_ring_emit,
1818 .emit_semaphore = &r600_semaphore_ring_emit, 1818 .emit_semaphore = &r600_semaphore_ring_emit,
1819 .cs_parse = NULL, 1819 .cs_parse = NULL,
1820 .ring_test = &r600_ring_test, 1820 .ring_test = &r600_ring_test,
1821 .ib_test = &r600_ib_test, 1821 .ib_test = &r600_ib_test,
1822 .is_lockup = &si_gfx_is_lockup, 1822 .is_lockup = &si_gfx_is_lockup,
1823 .vm_flush = &si_vm_flush, 1823 .vm_flush = &si_vm_flush,
1824 .get_rptr = &cayman_gfx_get_rptr, 1824 .get_rptr = &cayman_gfx_get_rptr,
1825 .get_wptr = &cayman_gfx_get_wptr, 1825 .get_wptr = &cayman_gfx_get_wptr,
1826 .set_wptr = &cayman_gfx_set_wptr, 1826 .set_wptr = &cayman_gfx_set_wptr,
1827 }; 1827 };
1828 1828
1829 static struct radeon_asic_ring si_dma_ring = { 1829 static struct radeon_asic_ring si_dma_ring = {
1830 .ib_execute = &cayman_dma_ring_ib_execute, 1830 .ib_execute = &cayman_dma_ring_ib_execute,
1831 .ib_parse = &evergreen_dma_ib_parse, 1831 .ib_parse = &evergreen_dma_ib_parse,
1832 .emit_fence = &evergreen_dma_fence_ring_emit, 1832 .emit_fence = &evergreen_dma_fence_ring_emit,
1833 .emit_semaphore = &r600_dma_semaphore_ring_emit, 1833 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1834 .cs_parse = NULL, 1834 .cs_parse = NULL,
1835 .ring_test = &r600_dma_ring_test, 1835 .ring_test = &r600_dma_ring_test,
1836 .ib_test = &r600_dma_ib_test, 1836 .ib_test = &r600_dma_ib_test,
1837 .is_lockup = &si_dma_is_lockup, 1837 .is_lockup = &si_dma_is_lockup,
1838 .vm_flush = &si_dma_vm_flush, 1838 .vm_flush = &si_dma_vm_flush,
1839 .get_rptr = &cayman_dma_get_rptr, 1839 .get_rptr = &cayman_dma_get_rptr,
1840 .get_wptr = &cayman_dma_get_wptr, 1840 .get_wptr = &cayman_dma_get_wptr,
1841 .set_wptr = &cayman_dma_set_wptr, 1841 .set_wptr = &cayman_dma_set_wptr,
1842 }; 1842 };
1843 1843
1844 static struct radeon_asic si_asic = { 1844 static struct radeon_asic si_asic = {
1845 .init = &si_init, 1845 .init = &si_init,
1846 .fini = &si_fini, 1846 .fini = &si_fini,
1847 .suspend = &si_suspend, 1847 .suspend = &si_suspend,
1848 .resume = &si_resume, 1848 .resume = &si_resume,
1849 .asic_reset = &si_asic_reset, 1849 .asic_reset = &si_asic_reset,
1850 .vga_set_state = &r600_vga_set_state, 1850 .vga_set_state = &r600_vga_set_state,
1851 .ioctl_wait_idle = r600_ioctl_wait_idle, 1851 .ioctl_wait_idle = r600_ioctl_wait_idle,
1852 .gui_idle = &r600_gui_idle, 1852 .gui_idle = &r600_gui_idle,
1853 .mc_wait_for_idle = &evergreen_mc_wait_for_idle, 1853 .mc_wait_for_idle = &evergreen_mc_wait_for_idle,
1854 .get_xclk = &si_get_xclk, 1854 .get_xclk = &si_get_xclk,
1855 .get_gpu_clock_counter = &si_get_gpu_clock_counter, 1855 .get_gpu_clock_counter = &si_get_gpu_clock_counter,
1856 .gart = { 1856 .gart = {
1857 .tlb_flush = &si_pcie_gart_tlb_flush, 1857 .tlb_flush = &si_pcie_gart_tlb_flush,
1858 .set_page = &rs600_gart_set_page, 1858 .set_page = &rs600_gart_set_page,
1859 }, 1859 },
1860 .vm = { 1860 .vm = {
1861 .init = &si_vm_init, 1861 .init = &si_vm_init,
1862 .fini = &si_vm_fini, 1862 .fini = &si_vm_fini,
1863 .set_page = &si_dma_vm_set_page, 1863 .set_page = &si_dma_vm_set_page,
1864 }, 1864 },
1865 .ring = { 1865 .ring = {
1866 [RADEON_RING_TYPE_GFX_INDEX] = &si_gfx_ring, 1866 [RADEON_RING_TYPE_GFX_INDEX] = &si_gfx_ring,
1867 [CAYMAN_RING_TYPE_CP1_INDEX] = &si_gfx_ring, 1867 [CAYMAN_RING_TYPE_CP1_INDEX] = &si_gfx_ring,
1868 [CAYMAN_RING_TYPE_CP2_INDEX] = &si_gfx_ring, 1868 [CAYMAN_RING_TYPE_CP2_INDEX] = &si_gfx_ring,
1869 [R600_RING_TYPE_DMA_INDEX] = &si_dma_ring, 1869 [R600_RING_TYPE_DMA_INDEX] = &si_dma_ring,
1870 [CAYMAN_RING_TYPE_DMA1_INDEX] = &si_dma_ring, 1870 [CAYMAN_RING_TYPE_DMA1_INDEX] = &si_dma_ring,
1871 [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring, 1871 [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
1872 }, 1872 },
1873 .irq = { 1873 .irq = {
1874 .set = &si_irq_set, 1874 .set = &si_irq_set,
1875 .process = &si_irq_process, 1875 .process = &si_irq_process,
1876 }, 1876 },
1877 .display = { 1877 .display = {
1878 .bandwidth_update = &dce6_bandwidth_update, 1878 .bandwidth_update = &dce6_bandwidth_update,
1879 .get_vblank_counter = &evergreen_get_vblank_counter, 1879 .get_vblank_counter = &evergreen_get_vblank_counter,
1880 .wait_for_vblank = &dce4_wait_for_vblank, 1880 .wait_for_vblank = &dce4_wait_for_vblank,
1881 .set_backlight_level = &atombios_set_backlight_level, 1881 .set_backlight_level = &atombios_set_backlight_level,
1882 .get_backlight_level = &atombios_get_backlight_level, 1882 .get_backlight_level = &atombios_get_backlight_level,
1883 .hdmi_enable = &evergreen_hdmi_enable, 1883 .hdmi_enable = &evergreen_hdmi_enable,
1884 .hdmi_setmode = &evergreen_hdmi_setmode, 1884 .hdmi_setmode = &evergreen_hdmi_setmode,
1885 }, 1885 },
1886 .copy = { 1886 .copy = {
1887 .blit = &r600_copy_cpdma, 1887 .blit = &r600_copy_cpdma,
1888 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1888 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1889 .dma = &si_copy_dma, 1889 .dma = &si_copy_dma,
1890 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 1890 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
1891 .copy = &si_copy_dma, 1891 .copy = &si_copy_dma,
1892 .copy_ring_index = R600_RING_TYPE_DMA_INDEX, 1892 .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
1893 }, 1893 },
1894 .surface = { 1894 .surface = {
1895 .set_reg = r600_set_surface_reg, 1895 .set_reg = r600_set_surface_reg,
1896 .clear_reg = r600_clear_surface_reg, 1896 .clear_reg = r600_clear_surface_reg,
1897 }, 1897 },
1898 .hpd = { 1898 .hpd = {
1899 .init = &evergreen_hpd_init, 1899 .init = &evergreen_hpd_init,
1900 .fini = &evergreen_hpd_fini, 1900 .fini = &evergreen_hpd_fini,
1901 .sense = &evergreen_hpd_sense, 1901 .sense = &evergreen_hpd_sense,
1902 .set_polarity = &evergreen_hpd_set_polarity, 1902 .set_polarity = &evergreen_hpd_set_polarity,
1903 }, 1903 },
1904 .pm = { 1904 .pm = {
1905 .misc = &evergreen_pm_misc, 1905 .misc = &evergreen_pm_misc,
1906 .prepare = &evergreen_pm_prepare, 1906 .prepare = &evergreen_pm_prepare,
1907 .finish = &evergreen_pm_finish, 1907 .finish = &evergreen_pm_finish,
1908 .init_profile = &sumo_pm_init_profile, 1908 .init_profile = &sumo_pm_init_profile,
1909 .get_dynpm_state = &r600_pm_get_dynpm_state, 1909 .get_dynpm_state = &r600_pm_get_dynpm_state,
1910 .get_engine_clock = &radeon_atom_get_engine_clock, 1910 .get_engine_clock = &radeon_atom_get_engine_clock,
1911 .set_engine_clock = &radeon_atom_set_engine_clock, 1911 .set_engine_clock = &radeon_atom_set_engine_clock,
1912 .get_memory_clock = &radeon_atom_get_memory_clock, 1912 .get_memory_clock = &radeon_atom_get_memory_clock,
1913 .set_memory_clock = &radeon_atom_set_memory_clock, 1913 .set_memory_clock = &radeon_atom_set_memory_clock,
1914 .get_pcie_lanes = &r600_get_pcie_lanes, 1914 .get_pcie_lanes = &r600_get_pcie_lanes,
1915 .set_pcie_lanes = &r600_set_pcie_lanes, 1915 .set_pcie_lanes = &r600_set_pcie_lanes,
1916 .set_clock_gating = NULL, 1916 .set_clock_gating = NULL,
1917 .set_uvd_clocks = &si_set_uvd_clocks, 1917 .set_uvd_clocks = &si_set_uvd_clocks,
1918 .get_temperature = &si_get_temp, 1918 .get_temperature = &si_get_temp,
1919 }, 1919 },
1920 .dpm = { 1920 .dpm = {
1921 .init = &si_dpm_init, 1921 .init = &si_dpm_init,
1922 .setup_asic = &si_dpm_setup_asic, 1922 .setup_asic = &si_dpm_setup_asic,
1923 .enable = &si_dpm_enable, 1923 .enable = &si_dpm_enable,
1924 .late_enable = &si_dpm_late_enable, 1924 .late_enable = &si_dpm_late_enable,
1925 .disable = &si_dpm_disable, 1925 .disable = &si_dpm_disable,
1926 .pre_set_power_state = &si_dpm_pre_set_power_state, 1926 .pre_set_power_state = &si_dpm_pre_set_power_state,
1927 .set_power_state = &si_dpm_set_power_state, 1927 .set_power_state = &si_dpm_set_power_state,
1928 .post_set_power_state = &si_dpm_post_set_power_state, 1928 .post_set_power_state = &si_dpm_post_set_power_state,
1929 .display_configuration_changed = &si_dpm_display_configuration_changed, 1929 .display_configuration_changed = &si_dpm_display_configuration_changed,
1930 .fini = &si_dpm_fini, 1930 .fini = &si_dpm_fini,
1931 .get_sclk = &ni_dpm_get_sclk, 1931 .get_sclk = &ni_dpm_get_sclk,
1932 .get_mclk = &ni_dpm_get_mclk, 1932 .get_mclk = &ni_dpm_get_mclk,
1933 .print_power_state = &ni_dpm_print_power_state, 1933 .print_power_state = &ni_dpm_print_power_state,
1934 .debugfs_print_current_performance_level = &si_dpm_debugfs_print_current_performance_level, 1934 .debugfs_print_current_performance_level = &si_dpm_debugfs_print_current_performance_level,
1935 .force_performance_level = &si_dpm_force_performance_level, 1935 .force_performance_level = &si_dpm_force_performance_level,
1936 .vblank_too_short = &ni_dpm_vblank_too_short, 1936 .vblank_too_short = &ni_dpm_vblank_too_short,
1937 }, 1937 },
1938 .pflip = { 1938 .pflip = {
1939 .pre_page_flip = &evergreen_pre_page_flip, 1939 .pre_page_flip = &evergreen_pre_page_flip,
1940 .page_flip = &evergreen_page_flip, 1940 .page_flip = &evergreen_page_flip,
1941 .post_page_flip = &evergreen_post_page_flip, 1941 .post_page_flip = &evergreen_post_page_flip,
1942 }, 1942 },
1943 }; 1943 };
1944 1944
1945 static struct radeon_asic_ring ci_gfx_ring = { 1945 static struct radeon_asic_ring ci_gfx_ring = {
1946 .ib_execute = &cik_ring_ib_execute, 1946 .ib_execute = &cik_ring_ib_execute,
1947 .ib_parse = &cik_ib_parse, 1947 .ib_parse = &cik_ib_parse,
1948 .emit_fence = &cik_fence_gfx_ring_emit, 1948 .emit_fence = &cik_fence_gfx_ring_emit,
1949 .emit_semaphore = &cik_semaphore_ring_emit, 1949 .emit_semaphore = &cik_semaphore_ring_emit,
1950 .cs_parse = NULL, 1950 .cs_parse = NULL,
1951 .ring_test = &cik_ring_test, 1951 .ring_test = &cik_ring_test,
1952 .ib_test = &cik_ib_test, 1952 .ib_test = &cik_ib_test,
1953 .is_lockup = &cik_gfx_is_lockup, 1953 .is_lockup = &cik_gfx_is_lockup,
1954 .vm_flush = &cik_vm_flush, 1954 .vm_flush = &cik_vm_flush,
1955 .get_rptr = &cik_gfx_get_rptr, 1955 .get_rptr = &cik_gfx_get_rptr,
1956 .get_wptr = &cik_gfx_get_wptr, 1956 .get_wptr = &cik_gfx_get_wptr,
1957 .set_wptr = &cik_gfx_set_wptr, 1957 .set_wptr = &cik_gfx_set_wptr,
1958 }; 1958 };
1959 1959
1960 static struct radeon_asic_ring ci_cp_ring = { 1960 static struct radeon_asic_ring ci_cp_ring = {
1961 .ib_execute = &cik_ring_ib_execute, 1961 .ib_execute = &cik_ring_ib_execute,
1962 .ib_parse = &cik_ib_parse, 1962 .ib_parse = &cik_ib_parse,
1963 .emit_fence = &cik_fence_compute_ring_emit, 1963 .emit_fence = &cik_fence_compute_ring_emit,
1964 .emit_semaphore = &cik_semaphore_ring_emit, 1964 .emit_semaphore = &cik_semaphore_ring_emit,
1965 .cs_parse = NULL, 1965 .cs_parse = NULL,
1966 .ring_test = &cik_ring_test, 1966 .ring_test = &cik_ring_test,
1967 .ib_test = &cik_ib_test, 1967 .ib_test = &cik_ib_test,
1968 .is_lockup = &cik_gfx_is_lockup, 1968 .is_lockup = &cik_gfx_is_lockup,
1969 .vm_flush = &cik_vm_flush, 1969 .vm_flush = &cik_vm_flush,
1970 .get_rptr = &cik_compute_get_rptr, 1970 .get_rptr = &cik_compute_get_rptr,
1971 .get_wptr = &cik_compute_get_wptr, 1971 .get_wptr = &cik_compute_get_wptr,
1972 .set_wptr = &cik_compute_set_wptr, 1972 .set_wptr = &cik_compute_set_wptr,
1973 }; 1973 };
1974 1974
1975 static struct radeon_asic_ring ci_dma_ring = { 1975 static struct radeon_asic_ring ci_dma_ring = {
1976 .ib_execute = &cik_sdma_ring_ib_execute, 1976 .ib_execute = &cik_sdma_ring_ib_execute,
1977 .ib_parse = &cik_ib_parse, 1977 .ib_parse = &cik_ib_parse,
1978 .emit_fence = &cik_sdma_fence_ring_emit, 1978 .emit_fence = &cik_sdma_fence_ring_emit,
1979 .emit_semaphore = &cik_sdma_semaphore_ring_emit, 1979 .emit_semaphore = &cik_sdma_semaphore_ring_emit,
1980 .cs_parse = NULL, 1980 .cs_parse = NULL,
1981 .ring_test = &cik_sdma_ring_test, 1981 .ring_test = &cik_sdma_ring_test,
1982 .ib_test = &cik_sdma_ib_test, 1982 .ib_test = &cik_sdma_ib_test,
1983 .is_lockup = &cik_sdma_is_lockup, 1983 .is_lockup = &cik_sdma_is_lockup,
1984 .vm_flush = &cik_dma_vm_flush, 1984 .vm_flush = &cik_dma_vm_flush,
1985 .get_rptr = &cik_sdma_get_rptr, 1985 .get_rptr = &cik_sdma_get_rptr,
1986 .get_wptr = &cik_sdma_get_wptr, 1986 .get_wptr = &cik_sdma_get_wptr,
1987 .set_wptr = &cik_sdma_set_wptr, 1987 .set_wptr = &cik_sdma_set_wptr,
1988 }; 1988 };
1989 1989
1990 static struct radeon_asic_ring ci_vce_ring = { 1990 static struct radeon_asic_ring ci_vce_ring = {
1991 .ib_execute = &radeon_vce_ib_execute, 1991 .ib_execute = &radeon_vce_ib_execute,
1992 .emit_fence = &radeon_vce_fence_emit, 1992 .emit_fence = &radeon_vce_fence_emit,
1993 .emit_semaphore = &radeon_vce_semaphore_emit, 1993 .emit_semaphore = &radeon_vce_semaphore_emit,
1994 .cs_parse = &radeon_vce_cs_parse, 1994 .cs_parse = &radeon_vce_cs_parse,
1995 .ring_test = &radeon_vce_ring_test, 1995 .ring_test = &radeon_vce_ring_test,
1996 .ib_test = &radeon_vce_ib_test, 1996 .ib_test = &radeon_vce_ib_test,
1997 .is_lockup = &radeon_ring_test_lockup, 1997 .is_lockup = &radeon_ring_test_lockup,
1998 .get_rptr = &vce_v1_0_get_rptr, 1998 .get_rptr = &vce_v1_0_get_rptr,
1999 .get_wptr = &vce_v1_0_get_wptr, 1999 .get_wptr = &vce_v1_0_get_wptr,
2000 .set_wptr = &vce_v1_0_set_wptr, 2000 .set_wptr = &vce_v1_0_set_wptr,
2001 }; 2001 };
2002 2002
2003 static struct radeon_asic ci_asic = { 2003 static struct radeon_asic ci_asic = {
2004 .init = &cik_init, 2004 .init = &cik_init,
2005 .fini = &cik_fini, 2005 .fini = &cik_fini,
2006 .suspend = &cik_suspend, 2006 .suspend = &cik_suspend,
2007 .resume = &cik_resume, 2007 .resume = &cik_resume,
2008 .asic_reset = &cik_asic_reset, 2008 .asic_reset = &cik_asic_reset,
2009 .vga_set_state = &r600_vga_set_state, 2009 .vga_set_state = &r600_vga_set_state,
2010 .ioctl_wait_idle = NULL, 2010 .ioctl_wait_idle = NULL,
2011 .gui_idle = &r600_gui_idle, 2011 .gui_idle = &r600_gui_idle,
2012 .mc_wait_for_idle = &evergreen_mc_wait_for_idle, 2012 .mc_wait_for_idle = &evergreen_mc_wait_for_idle,
2013 .get_xclk = &cik_get_xclk, 2013 .get_xclk = &cik_get_xclk,
2014 .get_gpu_clock_counter = &cik_get_gpu_clock_counter, 2014 .get_gpu_clock_counter = &cik_get_gpu_clock_counter,
2015 .gart = { 2015 .gart = {
2016 .tlb_flush = &cik_pcie_gart_tlb_flush, 2016 .tlb_flush = &cik_pcie_gart_tlb_flush,
2017 .set_page = &rs600_gart_set_page, 2017 .set_page = &rs600_gart_set_page,
2018 }, 2018 },
2019 .vm = { 2019 .vm = {
2020 .init = &cik_vm_init, 2020 .init = &cik_vm_init,
2021 .fini = &cik_vm_fini, 2021 .fini = &cik_vm_fini,
2022 .set_page = &cik_sdma_vm_set_page, 2022 .set_page = &cik_sdma_vm_set_page,
2023 }, 2023 },
2024 .ring = { 2024 .ring = {
2025 [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring, 2025 [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
2026 [CAYMAN_RING_TYPE_CP1_INDEX] = &ci_cp_ring, 2026 [CAYMAN_RING_TYPE_CP1_INDEX] = &ci_cp_ring,
2027 [CAYMAN_RING_TYPE_CP2_INDEX] = &ci_cp_ring, 2027 [CAYMAN_RING_TYPE_CP2_INDEX] = &ci_cp_ring,
2028 [R600_RING_TYPE_DMA_INDEX] = &ci_dma_ring, 2028 [R600_RING_TYPE_DMA_INDEX] = &ci_dma_ring,
2029 [CAYMAN_RING_TYPE_DMA1_INDEX] = &ci_dma_ring, 2029 [CAYMAN_RING_TYPE_DMA1_INDEX] = &ci_dma_ring,
2030 [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring, 2030 [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
2031 [TN_RING_TYPE_VCE1_INDEX] = &ci_vce_ring, 2031 [TN_RING_TYPE_VCE1_INDEX] = &ci_vce_ring,
2032 [TN_RING_TYPE_VCE2_INDEX] = &ci_vce_ring, 2032 [TN_RING_TYPE_VCE2_INDEX] = &ci_vce_ring,
2033 }, 2033 },
2034 .irq = { 2034 .irq = {
2035 .set = &cik_irq_set, 2035 .set = &cik_irq_set,
2036 .process = &cik_irq_process, 2036 .process = &cik_irq_process,
2037 }, 2037 },
2038 .display = { 2038 .display = {
2039 .bandwidth_update = &dce8_bandwidth_update, 2039 .bandwidth_update = &dce8_bandwidth_update,
2040 .get_vblank_counter = &evergreen_get_vblank_counter, 2040 .get_vblank_counter = &evergreen_get_vblank_counter,
2041 .wait_for_vblank = &dce4_wait_for_vblank, 2041 .wait_for_vblank = &dce4_wait_for_vblank,
2042 .set_backlight_level = &atombios_set_backlight_level, 2042 .set_backlight_level = &atombios_set_backlight_level,
2043 .get_backlight_level = &atombios_get_backlight_level, 2043 .get_backlight_level = &atombios_get_backlight_level,
2044 .hdmi_enable = &evergreen_hdmi_enable, 2044 .hdmi_enable = &evergreen_hdmi_enable,
2045 .hdmi_setmode = &evergreen_hdmi_setmode, 2045 .hdmi_setmode = &evergreen_hdmi_setmode,
2046 }, 2046 },
2047 .copy = { 2047 .copy = {
2048 .blit = &cik_copy_cpdma, 2048 .blit = &cik_copy_cpdma,
2049 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 2049 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
2050 .dma = &cik_copy_dma, 2050 .dma = &cik_copy_dma,
2051 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 2051 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
2052 .copy = &cik_copy_dma, 2052 .copy = &cik_copy_cpdma,
2053 .copy_ring_index = R600_RING_TYPE_DMA_INDEX, 2053 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
2054 }, 2054 },
2055 .surface = { 2055 .surface = {
2056 .set_reg = r600_set_surface_reg, 2056 .set_reg = r600_set_surface_reg,
2057 .clear_reg = r600_clear_surface_reg, 2057 .clear_reg = r600_clear_surface_reg,
2058 }, 2058 },
2059 .hpd = { 2059 .hpd = {
2060 .init = &evergreen_hpd_init, 2060 .init = &evergreen_hpd_init,
2061 .fini = &evergreen_hpd_fini, 2061 .fini = &evergreen_hpd_fini,
2062 .sense = &evergreen_hpd_sense, 2062 .sense = &evergreen_hpd_sense,
2063 .set_polarity = &evergreen_hpd_set_polarity, 2063 .set_polarity = &evergreen_hpd_set_polarity,
2064 }, 2064 },
2065 .pm = { 2065 .pm = {
2066 .misc = &evergreen_pm_misc, 2066 .misc = &evergreen_pm_misc,
2067 .prepare = &evergreen_pm_prepare, 2067 .prepare = &evergreen_pm_prepare,
2068 .finish = &evergreen_pm_finish, 2068 .finish = &evergreen_pm_finish,
2069 .init_profile = &sumo_pm_init_profile, 2069 .init_profile = &sumo_pm_init_profile,
2070 .get_dynpm_state = &r600_pm_get_dynpm_state, 2070 .get_dynpm_state = &r600_pm_get_dynpm_state,
2071 .get_engine_clock = &radeon_atom_get_engine_clock, 2071 .get_engine_clock = &radeon_atom_get_engine_clock,
2072 .set_engine_clock = &radeon_atom_set_engine_clock, 2072 .set_engine_clock = &radeon_atom_set_engine_clock,
2073 .get_memory_clock = &radeon_atom_get_memory_clock, 2073 .get_memory_clock = &radeon_atom_get_memory_clock,
2074 .set_memory_clock = &radeon_atom_set_memory_clock, 2074 .set_memory_clock = &radeon_atom_set_memory_clock,
2075 .get_pcie_lanes = NULL, 2075 .get_pcie_lanes = NULL,
2076 .set_pcie_lanes = NULL, 2076 .set_pcie_lanes = NULL,
2077 .set_clock_gating = NULL, 2077 .set_clock_gating = NULL,
2078 .set_uvd_clocks = &cik_set_uvd_clocks, 2078 .set_uvd_clocks = &cik_set_uvd_clocks,
2079 .set_vce_clocks = &cik_set_vce_clocks, 2079 .set_vce_clocks = &cik_set_vce_clocks,
2080 .get_temperature = &ci_get_temp, 2080 .get_temperature = &ci_get_temp,
2081 }, 2081 },
2082 .dpm = { 2082 .dpm = {
2083 .init = &ci_dpm_init, 2083 .init = &ci_dpm_init,
2084 .setup_asic = &ci_dpm_setup_asic, 2084 .setup_asic = &ci_dpm_setup_asic,
2085 .enable = &ci_dpm_enable, 2085 .enable = &ci_dpm_enable,
2086 .late_enable = &ci_dpm_late_enable, 2086 .late_enable = &ci_dpm_late_enable,
2087 .disable = &ci_dpm_disable, 2087 .disable = &ci_dpm_disable,
2088 .pre_set_power_state = &ci_dpm_pre_set_power_state, 2088 .pre_set_power_state = &ci_dpm_pre_set_power_state,
2089 .set_power_state = &ci_dpm_set_power_state, 2089 .set_power_state = &ci_dpm_set_power_state,
2090 .post_set_power_state = &ci_dpm_post_set_power_state, 2090 .post_set_power_state = &ci_dpm_post_set_power_state,
2091 .display_configuration_changed = &ci_dpm_display_configuration_changed, 2091 .display_configuration_changed = &ci_dpm_display_configuration_changed,
2092 .fini = &ci_dpm_fini, 2092 .fini = &ci_dpm_fini,
2093 .get_sclk = &ci_dpm_get_sclk, 2093 .get_sclk = &ci_dpm_get_sclk,
2094 .get_mclk = &ci_dpm_get_mclk, 2094 .get_mclk = &ci_dpm_get_mclk,
2095 .print_power_state = &ci_dpm_print_power_state, 2095 .print_power_state = &ci_dpm_print_power_state,
2096 .debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level, 2096 .debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level,
2097 .force_performance_level = &ci_dpm_force_performance_level, 2097 .force_performance_level = &ci_dpm_force_performance_level,
2098 .vblank_too_short = &ci_dpm_vblank_too_short, 2098 .vblank_too_short = &ci_dpm_vblank_too_short,
2099 .powergate_uvd = &ci_dpm_powergate_uvd, 2099 .powergate_uvd = &ci_dpm_powergate_uvd,
2100 }, 2100 },
2101 .pflip = { 2101 .pflip = {
2102 .pre_page_flip = &evergreen_pre_page_flip, 2102 .pre_page_flip = &evergreen_pre_page_flip,
2103 .page_flip = &evergreen_page_flip, 2103 .page_flip = &evergreen_page_flip,
2104 .post_page_flip = &evergreen_post_page_flip, 2104 .post_page_flip = &evergreen_post_page_flip,
2105 }, 2105 },
2106 }; 2106 };
2107 2107
2108 static struct radeon_asic kv_asic = { 2108 static struct radeon_asic kv_asic = {
2109 .init = &cik_init, 2109 .init = &cik_init,
2110 .fini = &cik_fini, 2110 .fini = &cik_fini,
2111 .suspend = &cik_suspend, 2111 .suspend = &cik_suspend,
2112 .resume = &cik_resume, 2112 .resume = &cik_resume,
2113 .asic_reset = &cik_asic_reset, 2113 .asic_reset = &cik_asic_reset,
2114 .vga_set_state = &r600_vga_set_state, 2114 .vga_set_state = &r600_vga_set_state,
2115 .ioctl_wait_idle = NULL, 2115 .ioctl_wait_idle = NULL,
2116 .gui_idle = &r600_gui_idle, 2116 .gui_idle = &r600_gui_idle,
2117 .mc_wait_for_idle = &evergreen_mc_wait_for_idle, 2117 .mc_wait_for_idle = &evergreen_mc_wait_for_idle,
2118 .get_xclk = &cik_get_xclk, 2118 .get_xclk = &cik_get_xclk,
2119 .get_gpu_clock_counter = &cik_get_gpu_clock_counter, 2119 .get_gpu_clock_counter = &cik_get_gpu_clock_counter,
2120 .gart = { 2120 .gart = {
2121 .tlb_flush = &cik_pcie_gart_tlb_flush, 2121 .tlb_flush = &cik_pcie_gart_tlb_flush,
2122 .set_page = &rs600_gart_set_page, 2122 .set_page = &rs600_gart_set_page,
2123 }, 2123 },
2124 .vm = { 2124 .vm = {
2125 .init = &cik_vm_init, 2125 .init = &cik_vm_init,
2126 .fini = &cik_vm_fini, 2126 .fini = &cik_vm_fini,
2127 .set_page = &cik_sdma_vm_set_page, 2127 .set_page = &cik_sdma_vm_set_page,
2128 }, 2128 },
2129 .ring = { 2129 .ring = {
2130 [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring, 2130 [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
2131 [CAYMAN_RING_TYPE_CP1_INDEX] = &ci_cp_ring, 2131 [CAYMAN_RING_TYPE_CP1_INDEX] = &ci_cp_ring,
2132 [CAYMAN_RING_TYPE_CP2_INDEX] = &ci_cp_ring, 2132 [CAYMAN_RING_TYPE_CP2_INDEX] = &ci_cp_ring,
2133 [R600_RING_TYPE_DMA_INDEX] = &ci_dma_ring, 2133 [R600_RING_TYPE_DMA_INDEX] = &ci_dma_ring,
2134 [CAYMAN_RING_TYPE_DMA1_INDEX] = &ci_dma_ring, 2134 [CAYMAN_RING_TYPE_DMA1_INDEX] = &ci_dma_ring,
2135 [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring, 2135 [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
2136 [TN_RING_TYPE_VCE1_INDEX] = &ci_vce_ring, 2136 [TN_RING_TYPE_VCE1_INDEX] = &ci_vce_ring,
2137 [TN_RING_TYPE_VCE2_INDEX] = &ci_vce_ring, 2137 [TN_RING_TYPE_VCE2_INDEX] = &ci_vce_ring,
2138 }, 2138 },
2139 .irq = { 2139 .irq = {
2140 .set = &cik_irq_set, 2140 .set = &cik_irq_set,
2141 .process = &cik_irq_process, 2141 .process = &cik_irq_process,
2142 }, 2142 },
2143 .display = { 2143 .display = {
2144 .bandwidth_update = &dce8_bandwidth_update, 2144 .bandwidth_update = &dce8_bandwidth_update,
2145 .get_vblank_counter = &evergreen_get_vblank_counter, 2145 .get_vblank_counter = &evergreen_get_vblank_counter,
2146 .wait_for_vblank = &dce4_wait_for_vblank, 2146 .wait_for_vblank = &dce4_wait_for_vblank,
2147 .set_backlight_level = &atombios_set_backlight_level, 2147 .set_backlight_level = &atombios_set_backlight_level,
2148 .get_backlight_level = &atombios_get_backlight_level, 2148 .get_backlight_level = &atombios_get_backlight_level,
2149 .hdmi_enable = &evergreen_hdmi_enable, 2149 .hdmi_enable = &evergreen_hdmi_enable,
2150 .hdmi_setmode = &evergreen_hdmi_setmode, 2150 .hdmi_setmode = &evergreen_hdmi_setmode,
2151 }, 2151 },
2152 .copy = { 2152 .copy = {
2153 .blit = &cik_copy_cpdma, 2153 .blit = &cik_copy_cpdma,
2154 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 2154 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
2155 .dma = &cik_copy_dma, 2155 .dma = &cik_copy_dma,
2156 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 2156 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
2157 .copy = &cik_copy_dma, 2157 .copy = &cik_copy_dma,
2158 .copy_ring_index = R600_RING_TYPE_DMA_INDEX, 2158 .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
2159 }, 2159 },
2160 .surface = { 2160 .surface = {
2161 .set_reg = r600_set_surface_reg, 2161 .set_reg = r600_set_surface_reg,
2162 .clear_reg = r600_clear_surface_reg, 2162 .clear_reg = r600_clear_surface_reg,
2163 }, 2163 },
2164 .hpd = { 2164 .hpd = {
2165 .init = &evergreen_hpd_init, 2165 .init = &evergreen_hpd_init,
2166 .fini = &evergreen_hpd_fini, 2166 .fini = &evergreen_hpd_fini,
2167 .sense = &evergreen_hpd_sense, 2167 .sense = &evergreen_hpd_sense,
2168 .set_polarity = &evergreen_hpd_set_polarity, 2168 .set_polarity = &evergreen_hpd_set_polarity,
2169 }, 2169 },
2170 .pm = { 2170 .pm = {
2171 .misc = &evergreen_pm_misc, 2171 .misc = &evergreen_pm_misc,
2172 .prepare = &evergreen_pm_prepare, 2172 .prepare = &evergreen_pm_prepare,
2173 .finish = &evergreen_pm_finish, 2173 .finish = &evergreen_pm_finish,
2174 .init_profile = &sumo_pm_init_profile, 2174 .init_profile = &sumo_pm_init_profile,
2175 .get_dynpm_state = &r600_pm_get_dynpm_state, 2175 .get_dynpm_state = &r600_pm_get_dynpm_state,
2176 .get_engine_clock = &radeon_atom_get_engine_clock, 2176 .get_engine_clock = &radeon_atom_get_engine_clock,
2177 .set_engine_clock = &radeon_atom_set_engine_clock, 2177 .set_engine_clock = &radeon_atom_set_engine_clock,
2178 .get_memory_clock = &radeon_atom_get_memory_clock, 2178 .get_memory_clock = &radeon_atom_get_memory_clock,
2179 .set_memory_clock = &radeon_atom_set_memory_clock, 2179 .set_memory_clock = &radeon_atom_set_memory_clock,
2180 .get_pcie_lanes = NULL, 2180 .get_pcie_lanes = NULL,
2181 .set_pcie_lanes = NULL, 2181 .set_pcie_lanes = NULL,
2182 .set_clock_gating = NULL, 2182 .set_clock_gating = NULL,
2183 .set_uvd_clocks = &cik_set_uvd_clocks, 2183 .set_uvd_clocks = &cik_set_uvd_clocks,
2184 .set_vce_clocks = &cik_set_vce_clocks, 2184 .set_vce_clocks = &cik_set_vce_clocks,
2185 .get_temperature = &kv_get_temp, 2185 .get_temperature = &kv_get_temp,
2186 }, 2186 },
2187 .dpm = { 2187 .dpm = {
2188 .init = &kv_dpm_init, 2188 .init = &kv_dpm_init,
2189 .setup_asic = &kv_dpm_setup_asic, 2189 .setup_asic = &kv_dpm_setup_asic,
2190 .enable = &kv_dpm_enable, 2190 .enable = &kv_dpm_enable,
2191 .late_enable = &kv_dpm_late_enable, 2191 .late_enable = &kv_dpm_late_enable,
2192 .disable = &kv_dpm_disable, 2192 .disable = &kv_dpm_disable,
2193 .pre_set_power_state = &kv_dpm_pre_set_power_state, 2193 .pre_set_power_state = &kv_dpm_pre_set_power_state,
2194 .set_power_state = &kv_dpm_set_power_state, 2194 .set_power_state = &kv_dpm_set_power_state,
2195 .post_set_power_state = &kv_dpm_post_set_power_state, 2195 .post_set_power_state = &kv_dpm_post_set_power_state,
2196 .display_configuration_changed = &kv_dpm_display_configuration_changed, 2196 .display_configuration_changed = &kv_dpm_display_configuration_changed,
2197 .fini = &kv_dpm_fini, 2197 .fini = &kv_dpm_fini,
2198 .get_sclk = &kv_dpm_get_sclk, 2198 .get_sclk = &kv_dpm_get_sclk,
2199 .get_mclk = &kv_dpm_get_mclk, 2199 .get_mclk = &kv_dpm_get_mclk,
2200 .print_power_state = &kv_dpm_print_power_state, 2200 .print_power_state = &kv_dpm_print_power_state,
2201 .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level, 2201 .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level,
2202 .force_performance_level = &kv_dpm_force_performance_level, 2202 .force_performance_level = &kv_dpm_force_performance_level,
2203 .powergate_uvd = &kv_dpm_powergate_uvd, 2203 .powergate_uvd = &kv_dpm_powergate_uvd,
2204 .enable_bapm = &kv_dpm_enable_bapm, 2204 .enable_bapm = &kv_dpm_enable_bapm,
2205 }, 2205 },
2206 .pflip = { 2206 .pflip = {
2207 .pre_page_flip = &evergreen_pre_page_flip, 2207 .pre_page_flip = &evergreen_pre_page_flip,
2208 .page_flip = &evergreen_page_flip, 2208 .page_flip = &evergreen_page_flip,
2209 .post_page_flip = &evergreen_post_page_flip, 2209 .post_page_flip = &evergreen_post_page_flip,
2210 }, 2210 },
2211 }; 2211 };
2212 2212
2213 /** 2213 /**
2214 * radeon_asic_init - register asic specific callbacks 2214 * radeon_asic_init - register asic specific callbacks
2215 * 2215 *
2216 * @rdev: radeon device pointer 2216 * @rdev: radeon device pointer
2217 * 2217 *
2218 * Registers the appropriate asic specific callbacks for each 2218 * Registers the appropriate asic specific callbacks for each
2219 * chip family. Also sets other asics specific info like the number 2219 * chip family. Also sets other asics specific info like the number
2220 * of crtcs and the register aperture accessors (all asics). 2220 * of crtcs and the register aperture accessors (all asics).
2221 * Returns 0 for success. 2221 * Returns 0 for success.
2222 */ 2222 */
2223 int radeon_asic_init(struct radeon_device *rdev) 2223 int radeon_asic_init(struct radeon_device *rdev)
2224 { 2224 {
2225 radeon_register_accessor_init(rdev); 2225 radeon_register_accessor_init(rdev);
2226 2226
2227 /* set the number of crtcs */ 2227 /* set the number of crtcs */
2228 if (rdev->flags & RADEON_SINGLE_CRTC) 2228 if (rdev->flags & RADEON_SINGLE_CRTC)
2229 rdev->num_crtc = 1; 2229 rdev->num_crtc = 1;
2230 else 2230 else
2231 rdev->num_crtc = 2; 2231 rdev->num_crtc = 2;
2232 2232
2233 rdev->has_uvd = false; 2233 rdev->has_uvd = false;
2234 2234
2235 switch (rdev->family) { 2235 switch (rdev->family) {
2236 case CHIP_R100: 2236 case CHIP_R100:
2237 case CHIP_RV100: 2237 case CHIP_RV100:
2238 case CHIP_RS100: 2238 case CHIP_RS100:
2239 case CHIP_RV200: 2239 case CHIP_RV200:
2240 case CHIP_RS200: 2240 case CHIP_RS200:
2241 rdev->asic = &r100_asic; 2241 rdev->asic = &r100_asic;
2242 break; 2242 break;
2243 case CHIP_R200: 2243 case CHIP_R200:
2244 case CHIP_RV250: 2244 case CHIP_RV250:
2245 case CHIP_RS300: 2245 case CHIP_RS300:
2246 case CHIP_RV280: 2246 case CHIP_RV280:
2247 rdev->asic = &r200_asic; 2247 rdev->asic = &r200_asic;
2248 break; 2248 break;
2249 case CHIP_R300: 2249 case CHIP_R300:
2250 case CHIP_R350: 2250 case CHIP_R350:
2251 case CHIP_RV350: 2251 case CHIP_RV350:
2252 case CHIP_RV380: 2252 case CHIP_RV380:
2253 if (rdev->flags & RADEON_IS_PCIE) 2253 if (rdev->flags & RADEON_IS_PCIE)
2254 rdev->asic = &r300_asic_pcie; 2254 rdev->asic = &r300_asic_pcie;
2255 else 2255 else
2256 rdev->asic = &r300_asic; 2256 rdev->asic = &r300_asic;
2257 break; 2257 break;
2258 case CHIP_R420: 2258 case CHIP_R420:
2259 case CHIP_R423: 2259 case CHIP_R423:
2260 case CHIP_RV410: 2260 case CHIP_RV410:
2261 rdev->asic = &r420_asic; 2261 rdev->asic = &r420_asic;
2262 /* handle macs */ 2262 /* handle macs */
2263 if (rdev->bios == NULL) { 2263 if (rdev->bios == NULL) {
2264 rdev->asic->pm.get_engine_clock = &radeon_legacy_get_engine_clock; 2264 rdev->asic->pm.get_engine_clock = &radeon_legacy_get_engine_clock;
2265 rdev->asic->pm.set_engine_clock = &radeon_legacy_set_engine_clock; 2265 rdev->asic->pm.set_engine_clock = &radeon_legacy_set_engine_clock;
2266 rdev->asic->pm.get_memory_clock = &radeon_legacy_get_memory_clock; 2266 rdev->asic->pm.get_memory_clock = &radeon_legacy_get_memory_clock;
2267 rdev->asic->pm.set_memory_clock = NULL; 2267 rdev->asic->pm.set_memory_clock = NULL;
2268 rdev->asic->display.set_backlight_level = &radeon_legacy_set_backlight_level; 2268 rdev->asic->display.set_backlight_level = &radeon_legacy_set_backlight_level;
2269 } 2269 }
2270 break; 2270 break;
2271 case CHIP_RS400: 2271 case CHIP_RS400:
2272 case CHIP_RS480: 2272 case CHIP_RS480:
2273 rdev->asic = &rs400_asic; 2273 rdev->asic = &rs400_asic;
2274 break; 2274 break;
2275 case CHIP_RS600: 2275 case CHIP_RS600:
2276 rdev->asic = &rs600_asic; 2276 rdev->asic = &rs600_asic;
2277 break; 2277 break;
2278 case CHIP_RS690: 2278 case CHIP_RS690:
2279 case CHIP_RS740: 2279 case CHIP_RS740:
2280 rdev->asic = &rs690_asic; 2280 rdev->asic = &rs690_asic;
2281 break; 2281 break;
2282 case CHIP_RV515: 2282 case CHIP_RV515:
2283 rdev->asic = &rv515_asic; 2283 rdev->asic = &rv515_asic;
2284 break; 2284 break;
2285 case CHIP_R520: 2285 case CHIP_R520:
2286 case CHIP_RV530: 2286 case CHIP_RV530:
2287 case CHIP_RV560: 2287 case CHIP_RV560:
2288 case CHIP_RV570: 2288 case CHIP_RV570:
2289 case CHIP_R580: 2289 case CHIP_R580:
2290 rdev->asic = &r520_asic; 2290 rdev->asic = &r520_asic;
2291 break; 2291 break;
2292 case CHIP_R600: 2292 case CHIP_R600:
2293 rdev->asic = &r600_asic; 2293 rdev->asic = &r600_asic;
2294 break; 2294 break;
2295 case CHIP_RV610: 2295 case CHIP_RV610:
2296 case CHIP_RV630: 2296 case CHIP_RV630:
2297 case CHIP_RV620: 2297 case CHIP_RV620:
2298 case CHIP_RV635: 2298 case CHIP_RV635:
2299 case CHIP_RV670: 2299 case CHIP_RV670:
2300 rdev->asic = &rv6xx_asic; 2300 rdev->asic = &rv6xx_asic;
2301 rdev->has_uvd = true; 2301 rdev->has_uvd = true;
2302 break; 2302 break;
2303 case CHIP_RS780: 2303 case CHIP_RS780:
2304 case CHIP_RS880: 2304 case CHIP_RS880:
2305 rdev->asic = &rs780_asic; 2305 rdev->asic = &rs780_asic;
2306 rdev->has_uvd = true; 2306 rdev->has_uvd = true;
2307 break; 2307 break;
2308 case CHIP_RV770: 2308 case CHIP_RV770:
2309 case CHIP_RV730: 2309 case CHIP_RV730:
2310 case CHIP_RV710: 2310 case CHIP_RV710:
2311 case CHIP_RV740: 2311 case CHIP_RV740:
2312 rdev->asic = &rv770_asic; 2312 rdev->asic = &rv770_asic;
2313 rdev->has_uvd = true; 2313 rdev->has_uvd = true;
2314 break; 2314 break;
2315 case CHIP_CEDAR: 2315 case CHIP_CEDAR:
2316 case CHIP_REDWOOD: 2316 case CHIP_REDWOOD:
2317 case CHIP_JUNIPER: 2317 case CHIP_JUNIPER:
2318 case CHIP_CYPRESS: 2318 case CHIP_CYPRESS:
2319 case CHIP_HEMLOCK: 2319 case CHIP_HEMLOCK:
2320 /* set num crtcs */ 2320 /* set num crtcs */
2321 if (rdev->family == CHIP_CEDAR) 2321 if (rdev->family == CHIP_CEDAR)
2322 rdev->num_crtc = 4; 2322 rdev->num_crtc = 4;
2323 else 2323 else
2324 rdev->num_crtc = 6; 2324 rdev->num_crtc = 6;
2325 rdev->asic = &evergreen_asic; 2325 rdev->asic = &evergreen_asic;
2326 rdev->has_uvd = true; 2326 rdev->has_uvd = true;
2327 break; 2327 break;
2328 case CHIP_PALM: 2328 case CHIP_PALM:
2329 case CHIP_SUMO: 2329 case CHIP_SUMO:
2330 case CHIP_SUMO2: 2330 case CHIP_SUMO2:
2331 rdev->asic = &sumo_asic; 2331 rdev->asic = &sumo_asic;
2332 rdev->has_uvd = true; 2332 rdev->has_uvd = true;
2333 break; 2333 break;
2334 case CHIP_BARTS: 2334 case CHIP_BARTS:
2335 case CHIP_TURKS: 2335 case CHIP_TURKS:
2336 case CHIP_CAICOS: 2336 case CHIP_CAICOS:
2337 /* set num crtcs */ 2337 /* set num crtcs */
2338 if (rdev->family == CHIP_CAICOS) 2338 if (rdev->family == CHIP_CAICOS)
2339 rdev->num_crtc = 4; 2339 rdev->num_crtc = 4;
2340 else 2340 else
2341 rdev->num_crtc = 6; 2341 rdev->num_crtc = 6;
2342 rdev->asic = &btc_asic; 2342 rdev->asic = &btc_asic;
2343 rdev->has_uvd = true; 2343 rdev->has_uvd = true;
2344 break; 2344 break;
2345 case CHIP_CAYMAN: 2345 case CHIP_CAYMAN:
2346 rdev->asic = &cayman_asic; 2346 rdev->asic = &cayman_asic;
2347 /* set num crtcs */ 2347 /* set num crtcs */
2348 rdev->num_crtc = 6; 2348 rdev->num_crtc = 6;
2349 rdev->has_uvd = true; 2349 rdev->has_uvd = true;
2350 break; 2350 break;
2351 case CHIP_ARUBA: 2351 case CHIP_ARUBA:
2352 rdev->asic = &trinity_asic; 2352 rdev->asic = &trinity_asic;
2353 /* set num crtcs */ 2353 /* set num crtcs */
2354 rdev->num_crtc = 4; 2354 rdev->num_crtc = 4;
2355 rdev->has_uvd = true; 2355 rdev->has_uvd = true;
2356 break; 2356 break;
2357 case CHIP_TAHITI: 2357 case CHIP_TAHITI:
2358 case CHIP_PITCAIRN: 2358 case CHIP_PITCAIRN:
2359 case CHIP_VERDE: 2359 case CHIP_VERDE:
2360 case CHIP_OLAND: 2360 case CHIP_OLAND:
2361 case CHIP_HAINAN: 2361 case CHIP_HAINAN:
2362 rdev->asic = &si_asic; 2362 rdev->asic = &si_asic;
2363 /* set num crtcs */ 2363 /* set num crtcs */
2364 if (rdev->family == CHIP_HAINAN) 2364 if (rdev->family == CHIP_HAINAN)
2365 rdev->num_crtc = 0; 2365 rdev->num_crtc = 0;
2366 else if (rdev->family == CHIP_OLAND) 2366 else if (rdev->family == CHIP_OLAND)
2367 rdev->num_crtc = 2; 2367 rdev->num_crtc = 2;
2368 else 2368 else
2369 rdev->num_crtc = 6; 2369 rdev->num_crtc = 6;
2370 if (rdev->family == CHIP_HAINAN) 2370 if (rdev->family == CHIP_HAINAN)
2371 rdev->has_uvd = false; 2371 rdev->has_uvd = false;
2372 else 2372 else
2373 rdev->has_uvd = true; 2373 rdev->has_uvd = true;
2374 switch (rdev->family) { 2374 switch (rdev->family) {
2375 case CHIP_TAHITI: 2375 case CHIP_TAHITI:
2376 rdev->cg_flags = 2376 rdev->cg_flags =
2377 RADEON_CG_SUPPORT_GFX_MGCG | 2377 RADEON_CG_SUPPORT_GFX_MGCG |
2378 RADEON_CG_SUPPORT_GFX_MGLS | 2378 RADEON_CG_SUPPORT_GFX_MGLS |
2379 /*RADEON_CG_SUPPORT_GFX_CGCG |*/ 2379 /*RADEON_CG_SUPPORT_GFX_CGCG |*/
2380 RADEON_CG_SUPPORT_GFX_CGLS | 2380 RADEON_CG_SUPPORT_GFX_CGLS |
2381 RADEON_CG_SUPPORT_GFX_CGTS | 2381 RADEON_CG_SUPPORT_GFX_CGTS |
2382 RADEON_CG_SUPPORT_GFX_CP_LS | 2382 RADEON_CG_SUPPORT_GFX_CP_LS |
2383 RADEON_CG_SUPPORT_MC_MGCG | 2383 RADEON_CG_SUPPORT_MC_MGCG |
2384 RADEON_CG_SUPPORT_SDMA_MGCG | 2384 RADEON_CG_SUPPORT_SDMA_MGCG |
2385 RADEON_CG_SUPPORT_BIF_LS | 2385 RADEON_CG_SUPPORT_BIF_LS |
2386 RADEON_CG_SUPPORT_VCE_MGCG | 2386 RADEON_CG_SUPPORT_VCE_MGCG |
2387 RADEON_CG_SUPPORT_UVD_MGCG | 2387 RADEON_CG_SUPPORT_UVD_MGCG |
2388 RADEON_CG_SUPPORT_HDP_LS | 2388 RADEON_CG_SUPPORT_HDP_LS |
2389 RADEON_CG_SUPPORT_HDP_MGCG; 2389 RADEON_CG_SUPPORT_HDP_MGCG;
2390 rdev->pg_flags = 0; 2390 rdev->pg_flags = 0;
2391 break; 2391 break;
2392 case CHIP_PITCAIRN: 2392 case CHIP_PITCAIRN:
2393 rdev->cg_flags = 2393 rdev->cg_flags =
2394 RADEON_CG_SUPPORT_GFX_MGCG | 2394 RADEON_CG_SUPPORT_GFX_MGCG |
2395 RADEON_CG_SUPPORT_GFX_MGLS | 2395 RADEON_CG_SUPPORT_GFX_MGLS |
2396 /*RADEON_CG_SUPPORT_GFX_CGCG |*/ 2396 /*RADEON_CG_SUPPORT_GFX_CGCG |*/
2397 RADEON_CG_SUPPORT_GFX_CGLS | 2397 RADEON_CG_SUPPORT_GFX_CGLS |
2398 RADEON_CG_SUPPORT_GFX_CGTS | 2398 RADEON_CG_SUPPORT_GFX_CGTS |
2399 RADEON_CG_SUPPORT_GFX_CP_LS | 2399 RADEON_CG_SUPPORT_GFX_CP_LS |
2400 RADEON_CG_SUPPORT_GFX_RLC_LS | 2400 RADEON_CG_SUPPORT_GFX_RLC_LS |
2401 RADEON_CG_SUPPORT_MC_LS | 2401 RADEON_CG_SUPPORT_MC_LS |
2402 RADEON_CG_SUPPORT_MC_MGCG | 2402 RADEON_CG_SUPPORT_MC_MGCG |
2403 RADEON_CG_SUPPORT_SDMA_MGCG | 2403 RADEON_CG_SUPPORT_SDMA_MGCG |
2404 RADEON_CG_SUPPORT_BIF_LS | 2404 RADEON_CG_SUPPORT_BIF_LS |
2405 RADEON_CG_SUPPORT_VCE_MGCG | 2405 RADEON_CG_SUPPORT_VCE_MGCG |
2406 RADEON_CG_SUPPORT_UVD_MGCG | 2406 RADEON_CG_SUPPORT_UVD_MGCG |
2407 RADEON_CG_SUPPORT_HDP_LS | 2407 RADEON_CG_SUPPORT_HDP_LS |
2408 RADEON_CG_SUPPORT_HDP_MGCG; 2408 RADEON_CG_SUPPORT_HDP_MGCG;
2409 rdev->pg_flags = 0; 2409 rdev->pg_flags = 0;
2410 break; 2410 break;
2411 case CHIP_VERDE: 2411 case CHIP_VERDE:
2412 rdev->cg_flags = 2412 rdev->cg_flags =
2413 RADEON_CG_SUPPORT_GFX_MGCG | 2413 RADEON_CG_SUPPORT_GFX_MGCG |
2414 RADEON_CG_SUPPORT_GFX_MGLS | 2414 RADEON_CG_SUPPORT_GFX_MGLS |
2415 /*RADEON_CG_SUPPORT_GFX_CGCG |*/ 2415 /*RADEON_CG_SUPPORT_GFX_CGCG |*/
2416 RADEON_CG_SUPPORT_GFX_CGLS | 2416 RADEON_CG_SUPPORT_GFX_CGLS |
2417 RADEON_CG_SUPPORT_GFX_CGTS | 2417 RADEON_CG_SUPPORT_GFX_CGTS |
2418 RADEON_CG_SUPPORT_GFX_CP_LS | 2418 RADEON_CG_SUPPORT_GFX_CP_LS |
2419 RADEON_CG_SUPPORT_GFX_RLC_LS | 2419 RADEON_CG_SUPPORT_GFX_RLC_LS |
2420 RADEON_CG_SUPPORT_MC_LS | 2420 RADEON_CG_SUPPORT_MC_LS |
2421 RADEON_CG_SUPPORT_MC_MGCG | 2421 RADEON_CG_SUPPORT_MC_MGCG |
2422 RADEON_CG_SUPPORT_SDMA_MGCG | 2422 RADEON_CG_SUPPORT_SDMA_MGCG |
2423 RADEON_CG_SUPPORT_BIF_LS | 2423 RADEON_CG_SUPPORT_BIF_LS |
2424 RADEON_CG_SUPPORT_VCE_MGCG | 2424 RADEON_CG_SUPPORT_VCE_MGCG |
2425 RADEON_CG_SUPPORT_UVD_MGCG | 2425 RADEON_CG_SUPPORT_UVD_MGCG |
2426 RADEON_CG_SUPPORT_HDP_LS | 2426 RADEON_CG_SUPPORT_HDP_LS |
2427 RADEON_CG_SUPPORT_HDP_MGCG; 2427 RADEON_CG_SUPPORT_HDP_MGCG;
2428 rdev->pg_flags = 0 | 2428 rdev->pg_flags = 0 |
2429 /*RADEON_PG_SUPPORT_GFX_PG | */ 2429 /*RADEON_PG_SUPPORT_GFX_PG | */
2430 RADEON_PG_SUPPORT_SDMA; 2430 RADEON_PG_SUPPORT_SDMA;
2431 break; 2431 break;
2432 case CHIP_OLAND: 2432 case CHIP_OLAND:
2433 rdev->cg_flags = 2433 rdev->cg_flags =
2434 RADEON_CG_SUPPORT_GFX_MGCG | 2434 RADEON_CG_SUPPORT_GFX_MGCG |
2435 RADEON_CG_SUPPORT_GFX_MGLS | 2435 RADEON_CG_SUPPORT_GFX_MGLS |
2436 /*RADEON_CG_SUPPORT_GFX_CGCG |*/ 2436 /*RADEON_CG_SUPPORT_GFX_CGCG |*/
2437 RADEON_CG_SUPPORT_GFX_CGLS | 2437 RADEON_CG_SUPPORT_GFX_CGLS |
2438 RADEON_CG_SUPPORT_GFX_CGTS | 2438 RADEON_CG_SUPPORT_GFX_CGTS |
2439 RADEON_CG_SUPPORT_GFX_CP_LS | 2439 RADEON_CG_SUPPORT_GFX_CP_LS |
2440 RADEON_CG_SUPPORT_GFX_RLC_LS | 2440 RADEON_CG_SUPPORT_GFX_RLC_LS |
2441 RADEON_CG_SUPPORT_MC_LS | 2441 RADEON_CG_SUPPORT_MC_LS |
2442 RADEON_CG_SUPPORT_MC_MGCG | 2442 RADEON_CG_SUPPORT_MC_MGCG |
2443 RADEON_CG_SUPPORT_SDMA_MGCG | 2443 RADEON_CG_SUPPORT_SDMA_MGCG |
2444 RADEON_CG_SUPPORT_BIF_LS | 2444 RADEON_CG_SUPPORT_BIF_LS |
2445 RADEON_CG_SUPPORT_UVD_MGCG | 2445 RADEON_CG_SUPPORT_UVD_MGCG |
2446 RADEON_CG_SUPPORT_HDP_LS | 2446 RADEON_CG_SUPPORT_HDP_LS |
2447 RADEON_CG_SUPPORT_HDP_MGCG; 2447 RADEON_CG_SUPPORT_HDP_MGCG;
2448 rdev->pg_flags = 0; 2448 rdev->pg_flags = 0;
2449 break; 2449 break;
2450 case CHIP_HAINAN: 2450 case CHIP_HAINAN:
2451 rdev->cg_flags = 2451 rdev->cg_flags =
2452 RADEON_CG_SUPPORT_GFX_MGCG | 2452 RADEON_CG_SUPPORT_GFX_MGCG |
2453 RADEON_CG_SUPPORT_GFX_MGLS | 2453 RADEON_CG_SUPPORT_GFX_MGLS |
2454 /*RADEON_CG_SUPPORT_GFX_CGCG |*/ 2454 /*RADEON_CG_SUPPORT_GFX_CGCG |*/
2455 RADEON_CG_SUPPORT_GFX_CGLS | 2455 RADEON_CG_SUPPORT_GFX_CGLS |
2456 RADEON_CG_SUPPORT_GFX_CGTS | 2456 RADEON_CG_SUPPORT_GFX_CGTS |
2457 RADEON_CG_SUPPORT_GFX_CP_LS | 2457 RADEON_CG_SUPPORT_GFX_CP_LS |
2458 RADEON_CG_SUPPORT_GFX_RLC_LS | 2458 RADEON_CG_SUPPORT_GFX_RLC_LS |
2459 RADEON_CG_SUPPORT_MC_LS | 2459 RADEON_CG_SUPPORT_MC_LS |
2460 RADEON_CG_SUPPORT_MC_MGCG | 2460 RADEON_CG_SUPPORT_MC_MGCG |
2461 RADEON_CG_SUPPORT_SDMA_MGCG | 2461 RADEON_CG_SUPPORT_SDMA_MGCG |
2462 RADEON_CG_SUPPORT_BIF_LS | 2462 RADEON_CG_SUPPORT_BIF_LS |
2463 RADEON_CG_SUPPORT_HDP_LS | 2463 RADEON_CG_SUPPORT_HDP_LS |
2464 RADEON_CG_SUPPORT_HDP_MGCG; 2464 RADEON_CG_SUPPORT_HDP_MGCG;
2465 rdev->pg_flags = 0; 2465 rdev->pg_flags = 0;
2466 break; 2466 break;
2467 default: 2467 default:
2468 rdev->cg_flags = 0; 2468 rdev->cg_flags = 0;
2469 rdev->pg_flags = 0; 2469 rdev->pg_flags = 0;
2470 break; 2470 break;
2471 } 2471 }
2472 break; 2472 break;
2473 case CHIP_BONAIRE: 2473 case CHIP_BONAIRE:
2474 case CHIP_HAWAII: 2474 case CHIP_HAWAII:
2475 rdev->asic = &ci_asic; 2475 rdev->asic = &ci_asic;
2476 rdev->num_crtc = 6; 2476 rdev->num_crtc = 6;
2477 rdev->has_uvd = true; 2477 rdev->has_uvd = true;
2478 if (rdev->family == CHIP_BONAIRE) { 2478 if (rdev->family == CHIP_BONAIRE) {
2479 rdev->cg_flags = 2479 rdev->cg_flags =
2480 RADEON_CG_SUPPORT_GFX_MGCG | 2480 RADEON_CG_SUPPORT_GFX_MGCG |
2481 RADEON_CG_SUPPORT_GFX_MGLS | 2481 RADEON_CG_SUPPORT_GFX_MGLS |
2482 RADEON_CG_SUPPORT_GFX_CGCG | 2482 RADEON_CG_SUPPORT_GFX_CGCG |
2483 RADEON_CG_SUPPORT_GFX_CGLS | 2483 RADEON_CG_SUPPORT_GFX_CGLS |
2484 RADEON_CG_SUPPORT_GFX_CGTS | 2484 RADEON_CG_SUPPORT_GFX_CGTS |
2485 RADEON_CG_SUPPORT_GFX_CGTS_LS | 2485 RADEON_CG_SUPPORT_GFX_CGTS_LS |
2486 RADEON_CG_SUPPORT_GFX_CP_LS | 2486 RADEON_CG_SUPPORT_GFX_CP_LS |
2487 RADEON_CG_SUPPORT_MC_LS | 2487 RADEON_CG_SUPPORT_MC_LS |
2488 RADEON_CG_SUPPORT_MC_MGCG | 2488 RADEON_CG_SUPPORT_MC_MGCG |
2489 RADEON_CG_SUPPORT_SDMA_MGCG | 2489 RADEON_CG_SUPPORT_SDMA_MGCG |
2490 RADEON_CG_SUPPORT_SDMA_LS | 2490 RADEON_CG_SUPPORT_SDMA_LS |
2491 RADEON_CG_SUPPORT_BIF_LS | 2491 RADEON_CG_SUPPORT_BIF_LS |
2492 RADEON_CG_SUPPORT_VCE_MGCG | 2492 RADEON_CG_SUPPORT_VCE_MGCG |
2493 RADEON_CG_SUPPORT_UVD_MGCG | 2493 RADEON_CG_SUPPORT_UVD_MGCG |
2494 RADEON_CG_SUPPORT_HDP_LS | 2494 RADEON_CG_SUPPORT_HDP_LS |
2495 RADEON_CG_SUPPORT_HDP_MGCG; 2495 RADEON_CG_SUPPORT_HDP_MGCG;
2496 rdev->pg_flags = 0; 2496 rdev->pg_flags = 0;
2497 } else { 2497 } else {
2498 rdev->cg_flags = 2498 rdev->cg_flags =
2499 RADEON_CG_SUPPORT_GFX_MGCG | 2499 RADEON_CG_SUPPORT_GFX_MGCG |
2500 RADEON_CG_SUPPORT_GFX_MGLS | 2500 RADEON_CG_SUPPORT_GFX_MGLS |
2501 RADEON_CG_SUPPORT_GFX_CGCG | 2501 RADEON_CG_SUPPORT_GFX_CGCG |
2502 RADEON_CG_SUPPORT_GFX_CGLS | 2502 RADEON_CG_SUPPORT_GFX_CGLS |
2503 RADEON_CG_SUPPORT_GFX_CGTS | 2503 RADEON_CG_SUPPORT_GFX_CGTS |
2504 RADEON_CG_SUPPORT_GFX_CP_LS | 2504 RADEON_CG_SUPPORT_GFX_CP_LS |
2505 RADEON_CG_SUPPORT_MC_LS | 2505 RADEON_CG_SUPPORT_MC_LS |
2506 RADEON_CG_SUPPORT_MC_MGCG | 2506 RADEON_CG_SUPPORT_MC_MGCG |
2507 RADEON_CG_SUPPORT_SDMA_MGCG | 2507 RADEON_CG_SUPPORT_SDMA_MGCG |
2508 RADEON_CG_SUPPORT_SDMA_LS | 2508 RADEON_CG_SUPPORT_SDMA_LS |
2509 RADEON_CG_SUPPORT_BIF_LS | 2509 RADEON_CG_SUPPORT_BIF_LS |
2510 RADEON_CG_SUPPORT_VCE_MGCG | 2510 RADEON_CG_SUPPORT_VCE_MGCG |
2511 RADEON_CG_SUPPORT_UVD_MGCG | 2511 RADEON_CG_SUPPORT_UVD_MGCG |
2512 RADEON_CG_SUPPORT_HDP_LS | 2512 RADEON_CG_SUPPORT_HDP_LS |
2513 RADEON_CG_SUPPORT_HDP_MGCG; 2513 RADEON_CG_SUPPORT_HDP_MGCG;
2514 rdev->pg_flags = 0; 2514 rdev->pg_flags = 0;
2515 } 2515 }
2516 break; 2516 break;
2517 case CHIP_KAVERI: 2517 case CHIP_KAVERI:
2518 case CHIP_KABINI: 2518 case CHIP_KABINI:
2519 case CHIP_MULLINS: 2519 case CHIP_MULLINS:
2520 rdev->asic = &kv_asic; 2520 rdev->asic = &kv_asic;
2521 /* set num crtcs */ 2521 /* set num crtcs */
2522 if (rdev->family == CHIP_KAVERI) { 2522 if (rdev->family == CHIP_KAVERI) {
2523 rdev->num_crtc = 4; 2523 rdev->num_crtc = 4;
2524 rdev->cg_flags = 2524 rdev->cg_flags =
2525 RADEON_CG_SUPPORT_GFX_MGCG | 2525 RADEON_CG_SUPPORT_GFX_MGCG |
2526 RADEON_CG_SUPPORT_GFX_MGLS | 2526 RADEON_CG_SUPPORT_GFX_MGLS |
2527 RADEON_CG_SUPPORT_GFX_CGCG | 2527 RADEON_CG_SUPPORT_GFX_CGCG |
2528 RADEON_CG_SUPPORT_GFX_CGLS | 2528 RADEON_CG_SUPPORT_GFX_CGLS |
2529 RADEON_CG_SUPPORT_GFX_CGTS | 2529 RADEON_CG_SUPPORT_GFX_CGTS |
2530 RADEON_CG_SUPPORT_GFX_CGTS_LS | 2530 RADEON_CG_SUPPORT_GFX_CGTS_LS |
2531 RADEON_CG_SUPPORT_GFX_CP_LS | 2531 RADEON_CG_SUPPORT_GFX_CP_LS |
2532 RADEON_CG_SUPPORT_SDMA_MGCG | 2532 RADEON_CG_SUPPORT_SDMA_MGCG |
2533 RADEON_CG_SUPPORT_SDMA_LS | 2533 RADEON_CG_SUPPORT_SDMA_LS |
2534 RADEON_CG_SUPPORT_BIF_LS | 2534 RADEON_CG_SUPPORT_BIF_LS |
2535 RADEON_CG_SUPPORT_VCE_MGCG | 2535 RADEON_CG_SUPPORT_VCE_MGCG |
2536 RADEON_CG_SUPPORT_UVD_MGCG | 2536 RADEON_CG_SUPPORT_UVD_MGCG |
2537 RADEON_CG_SUPPORT_HDP_LS | 2537 RADEON_CG_SUPPORT_HDP_LS |
2538 RADEON_CG_SUPPORT_HDP_MGCG; 2538 RADEON_CG_SUPPORT_HDP_MGCG;
2539 rdev->pg_flags = 0; 2539 rdev->pg_flags = 0;
2540 /*RADEON_PG_SUPPORT_GFX_PG | 2540 /*RADEON_PG_SUPPORT_GFX_PG |
2541 RADEON_PG_SUPPORT_GFX_SMG | 2541 RADEON_PG_SUPPORT_GFX_SMG |
2542 RADEON_PG_SUPPORT_GFX_DMG | 2542 RADEON_PG_SUPPORT_GFX_DMG |
2543 RADEON_PG_SUPPORT_UVD | 2543 RADEON_PG_SUPPORT_UVD |
2544 RADEON_PG_SUPPORT_VCE | 2544 RADEON_PG_SUPPORT_VCE |
2545 RADEON_PG_SUPPORT_CP | 2545 RADEON_PG_SUPPORT_CP |
2546 RADEON_PG_SUPPORT_GDS | 2546 RADEON_PG_SUPPORT_GDS |
2547 RADEON_PG_SUPPORT_RLC_SMU_HS | 2547 RADEON_PG_SUPPORT_RLC_SMU_HS |
2548 RADEON_PG_SUPPORT_ACP | 2548 RADEON_PG_SUPPORT_ACP |
2549 RADEON_PG_SUPPORT_SAMU;*/ 2549 RADEON_PG_SUPPORT_SAMU;*/
2550 } else { 2550 } else {
2551 rdev->num_crtc = 2; 2551 rdev->num_crtc = 2;
2552 rdev->cg_flags = 2552 rdev->cg_flags =
2553 RADEON_CG_SUPPORT_GFX_MGCG | 2553 RADEON_CG_SUPPORT_GFX_MGCG |
2554 RADEON_CG_SUPPORT_GFX_MGLS | 2554 RADEON_CG_SUPPORT_GFX_MGLS |
2555 RADEON_CG_SUPPORT_GFX_CGCG | 2555 RADEON_CG_SUPPORT_GFX_CGCG |
2556 RADEON_CG_SUPPORT_GFX_CGLS | 2556 RADEON_CG_SUPPORT_GFX_CGLS |
2557 RADEON_CG_SUPPORT_GFX_CGTS | 2557 RADEON_CG_SUPPORT_GFX_CGTS |
2558 RADEON_CG_SUPPORT_GFX_CGTS_LS | 2558 RADEON_CG_SUPPORT_GFX_CGTS_LS |
2559 RADEON_CG_SUPPORT_GFX_CP_LS | 2559 RADEON_CG_SUPPORT_GFX_CP_LS |
2560 RADEON_CG_SUPPORT_SDMA_MGCG | 2560 RADEON_CG_SUPPORT_SDMA_MGCG |
2561 RADEON_CG_SUPPORT_SDMA_LS | 2561 RADEON_CG_SUPPORT_SDMA_LS |
2562 RADEON_CG_SUPPORT_BIF_LS | 2562 RADEON_CG_SUPPORT_BIF_LS |
2563 RADEON_CG_SUPPORT_VCE_MGCG | 2563 RADEON_CG_SUPPORT_VCE_MGCG |
2564 RADEON_CG_SUPPORT_UVD_MGCG | 2564 RADEON_CG_SUPPORT_UVD_MGCG |
2565 RADEON_CG_SUPPORT_HDP_LS | 2565 RADEON_CG_SUPPORT_HDP_LS |
2566 RADEON_CG_SUPPORT_HDP_MGCG; 2566 RADEON_CG_SUPPORT_HDP_MGCG;
2567 rdev->pg_flags = 0; 2567 rdev->pg_flags = 0;
2568 /*RADEON_PG_SUPPORT_GFX_PG | 2568 /*RADEON_PG_SUPPORT_GFX_PG |
2569 RADEON_PG_SUPPORT_GFX_SMG | 2569 RADEON_PG_SUPPORT_GFX_SMG |
2570 RADEON_PG_SUPPORT_UVD | 2570 RADEON_PG_SUPPORT_UVD |
2571 RADEON_PG_SUPPORT_VCE | 2571 RADEON_PG_SUPPORT_VCE |
2572 RADEON_PG_SUPPORT_CP | 2572 RADEON_PG_SUPPORT_CP |
2573 RADEON_PG_SUPPORT_GDS | 2573 RADEON_PG_SUPPORT_GDS |
2574 RADEON_PG_SUPPORT_RLC_SMU_HS | 2574 RADEON_PG_SUPPORT_RLC_SMU_HS |
2575 RADEON_PG_SUPPORT_SAMU;*/ 2575 RADEON_PG_SUPPORT_SAMU;*/
2576 } 2576 }
2577 rdev->has_uvd = true; 2577 rdev->has_uvd = true;
2578 break; 2578 break;
2579 default: 2579 default:
2580 /* FIXME: not supported yet */ 2580 /* FIXME: not supported yet */
2581 return -EINVAL; 2581 return -EINVAL;
2582 } 2582 }
2583 2583
2584 if (rdev->flags & RADEON_IS_IGP) { 2584 if (rdev->flags & RADEON_IS_IGP) {
2585 rdev->asic->pm.get_memory_clock = NULL; 2585 rdev->asic->pm.get_memory_clock = NULL;
2586 rdev->asic->pm.set_memory_clock = NULL; 2586 rdev->asic->pm.set_memory_clock = NULL;
2587 } 2587 }
2588 2588
2589 return 0; 2589 return 0;
2590 } 2590 }
2591 2591
2592 2592
drivers/gpu/drm/radeon/radeon_device.c
1 /* 1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc. 2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc. 3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse. 4 * Copyright 2009 Jerome Glisse.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation 8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the 10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions: 11 * Software is furnished to do so, subject to the following conditions:
12 * 12 *
13 * The above copyright notice and this permission notice shall be included in 13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software. 14 * all copies or substantial portions of the Software.
15 * 15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE. 22 * OTHER DEALINGS IN THE SOFTWARE.
23 * 23 *
24 * Authors: Dave Airlie 24 * Authors: Dave Airlie
25 * Alex Deucher 25 * Alex Deucher
26 * Jerome Glisse 26 * Jerome Glisse
27 */ 27 */
28 #include <linux/console.h> 28 #include <linux/console.h>
29 #include <linux/slab.h> 29 #include <linux/slab.h>
30 #include <drm/drmP.h> 30 #include <drm/drmP.h>
31 #include <drm/drm_crtc_helper.h> 31 #include <drm/drm_crtc_helper.h>
32 #include <drm/radeon_drm.h> 32 #include <drm/radeon_drm.h>
33 #include <linux/vgaarb.h> 33 #include <linux/vgaarb.h>
34 #include <linux/vga_switcheroo.h> 34 #include <linux/vga_switcheroo.h>
35 #include <linux/efi.h> 35 #include <linux/efi.h>
36 #include "radeon_reg.h" 36 #include "radeon_reg.h"
37 #include "radeon.h" 37 #include "radeon.h"
38 #include "atom.h" 38 #include "atom.h"
39 39
40 static const char radeon_family_name[][16] = { 40 static const char radeon_family_name[][16] = {
41 "R100", 41 "R100",
42 "RV100", 42 "RV100",
43 "RS100", 43 "RS100",
44 "RV200", 44 "RV200",
45 "RS200", 45 "RS200",
46 "R200", 46 "R200",
47 "RV250", 47 "RV250",
48 "RS300", 48 "RS300",
49 "RV280", 49 "RV280",
50 "R300", 50 "R300",
51 "R350", 51 "R350",
52 "RV350", 52 "RV350",
53 "RV380", 53 "RV380",
54 "R420", 54 "R420",
55 "R423", 55 "R423",
56 "RV410", 56 "RV410",
57 "RS400", 57 "RS400",
58 "RS480", 58 "RS480",
59 "RS600", 59 "RS600",
60 "RS690", 60 "RS690",
61 "RS740", 61 "RS740",
62 "RV515", 62 "RV515",
63 "R520", 63 "R520",
64 "RV530", 64 "RV530",
65 "RV560", 65 "RV560",
66 "RV570", 66 "RV570",
67 "R580", 67 "R580",
68 "R600", 68 "R600",
69 "RV610", 69 "RV610",
70 "RV630", 70 "RV630",
71 "RV670", 71 "RV670",
72 "RV620", 72 "RV620",
73 "RV635", 73 "RV635",
74 "RS780", 74 "RS780",
75 "RS880", 75 "RS880",
76 "RV770", 76 "RV770",
77 "RV730", 77 "RV730",
78 "RV710", 78 "RV710",
79 "RV740", 79 "RV740",
80 "CEDAR", 80 "CEDAR",
81 "REDWOOD", 81 "REDWOOD",
82 "JUNIPER", 82 "JUNIPER",
83 "CYPRESS", 83 "CYPRESS",
84 "HEMLOCK", 84 "HEMLOCK",
85 "PALM", 85 "PALM",
86 "SUMO", 86 "SUMO",
87 "SUMO2", 87 "SUMO2",
88 "BARTS", 88 "BARTS",
89 "TURKS", 89 "TURKS",
90 "CAICOS", 90 "CAICOS",
91 "CAYMAN", 91 "CAYMAN",
92 "ARUBA", 92 "ARUBA",
93 "TAHITI", 93 "TAHITI",
94 "PITCAIRN", 94 "PITCAIRN",
95 "VERDE", 95 "VERDE",
96 "OLAND", 96 "OLAND",
97 "HAINAN", 97 "HAINAN",
98 "BONAIRE", 98 "BONAIRE",
99 "KAVERI", 99 "KAVERI",
100 "KABINI", 100 "KABINI",
101 "HAWAII", 101 "HAWAII",
102 "MULLINS", 102 "MULLINS",
103 "LAST", 103 "LAST",
104 }; 104 };
105 105
106 bool radeon_is_px(struct drm_device *dev) 106 bool radeon_is_px(struct drm_device *dev)
107 { 107 {
108 struct radeon_device *rdev = dev->dev_private; 108 struct radeon_device *rdev = dev->dev_private;
109 109
110 if (rdev->flags & RADEON_IS_PX) 110 if (rdev->flags & RADEON_IS_PX)
111 return true; 111 return true;
112 return false; 112 return false;
113 } 113 }
114 114
115 /** 115 /**
116 * radeon_program_register_sequence - program an array of registers. 116 * radeon_program_register_sequence - program an array of registers.
117 * 117 *
118 * @rdev: radeon_device pointer 118 * @rdev: radeon_device pointer
119 * @registers: pointer to the register array 119 * @registers: pointer to the register array
120 * @array_size: size of the register array 120 * @array_size: size of the register array
121 * 121 *
122 * Programs an array or registers with and and or masks. 122 * Programs an array or registers with and and or masks.
123 * This is a helper for setting golden registers. 123 * This is a helper for setting golden registers.
124 */ 124 */
125 void radeon_program_register_sequence(struct radeon_device *rdev, 125 void radeon_program_register_sequence(struct radeon_device *rdev,
126 const u32 *registers, 126 const u32 *registers,
127 const u32 array_size) 127 const u32 array_size)
128 { 128 {
129 u32 tmp, reg, and_mask, or_mask; 129 u32 tmp, reg, and_mask, or_mask;
130 int i; 130 int i;
131 131
132 if (array_size % 3) 132 if (array_size % 3)
133 return; 133 return;
134 134
135 for (i = 0; i < array_size; i +=3) { 135 for (i = 0; i < array_size; i +=3) {
136 reg = registers[i + 0]; 136 reg = registers[i + 0];
137 and_mask = registers[i + 1]; 137 and_mask = registers[i + 1];
138 or_mask = registers[i + 2]; 138 or_mask = registers[i + 2];
139 139
140 if (and_mask == 0xffffffff) { 140 if (and_mask == 0xffffffff) {
141 tmp = or_mask; 141 tmp = or_mask;
142 } else { 142 } else {
143 tmp = RREG32(reg); 143 tmp = RREG32(reg);
144 tmp &= ~and_mask; 144 tmp &= ~and_mask;
145 tmp |= or_mask; 145 tmp |= or_mask;
146 } 146 }
147 WREG32(reg, tmp); 147 WREG32(reg, tmp);
148 } 148 }
149 } 149 }
150 150
151 void radeon_pci_config_reset(struct radeon_device *rdev) 151 void radeon_pci_config_reset(struct radeon_device *rdev)
152 { 152 {
153 pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA); 153 pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
154 } 154 }
155 155
156 /** 156 /**
157 * radeon_surface_init - Clear GPU surface registers. 157 * radeon_surface_init - Clear GPU surface registers.
158 * 158 *
159 * @rdev: radeon_device pointer 159 * @rdev: radeon_device pointer
160 * 160 *
161 * Clear GPU surface registers (r1xx-r5xx). 161 * Clear GPU surface registers (r1xx-r5xx).
162 */ 162 */
163 void radeon_surface_init(struct radeon_device *rdev) 163 void radeon_surface_init(struct radeon_device *rdev)
164 { 164 {
165 /* FIXME: check this out */ 165 /* FIXME: check this out */
166 if (rdev->family < CHIP_R600) { 166 if (rdev->family < CHIP_R600) {
167 int i; 167 int i;
168 168
169 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { 169 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
170 if (rdev->surface_regs[i].bo) 170 if (rdev->surface_regs[i].bo)
171 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo); 171 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
172 else 172 else
173 radeon_clear_surface_reg(rdev, i); 173 radeon_clear_surface_reg(rdev, i);
174 } 174 }
175 /* enable surfaces */ 175 /* enable surfaces */
176 WREG32(RADEON_SURFACE_CNTL, 0); 176 WREG32(RADEON_SURFACE_CNTL, 0);
177 } 177 }
178 } 178 }
179 179
180 /* 180 /*
181 * GPU scratch registers helpers function. 181 * GPU scratch registers helpers function.
182 */ 182 */
183 /** 183 /**
184 * radeon_scratch_init - Init scratch register driver information. 184 * radeon_scratch_init - Init scratch register driver information.
185 * 185 *
186 * @rdev: radeon_device pointer 186 * @rdev: radeon_device pointer
187 * 187 *
188 * Init CP scratch register driver information (r1xx-r5xx) 188 * Init CP scratch register driver information (r1xx-r5xx)
189 */ 189 */
190 void radeon_scratch_init(struct radeon_device *rdev) 190 void radeon_scratch_init(struct radeon_device *rdev)
191 { 191 {
192 int i; 192 int i;
193 193
194 /* FIXME: check this out */ 194 /* FIXME: check this out */
195 if (rdev->family < CHIP_R300) { 195 if (rdev->family < CHIP_R300) {
196 rdev->scratch.num_reg = 5; 196 rdev->scratch.num_reg = 5;
197 } else { 197 } else {
198 rdev->scratch.num_reg = 7; 198 rdev->scratch.num_reg = 7;
199 } 199 }
200 rdev->scratch.reg_base = RADEON_SCRATCH_REG0; 200 rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
201 for (i = 0; i < rdev->scratch.num_reg; i++) { 201 for (i = 0; i < rdev->scratch.num_reg; i++) {
202 rdev->scratch.free[i] = true; 202 rdev->scratch.free[i] = true;
203 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4); 203 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
204 } 204 }
205 } 205 }
206 206
207 /** 207 /**
208 * radeon_scratch_get - Allocate a scratch register 208 * radeon_scratch_get - Allocate a scratch register
209 * 209 *
210 * @rdev: radeon_device pointer 210 * @rdev: radeon_device pointer
211 * @reg: scratch register mmio offset 211 * @reg: scratch register mmio offset
212 * 212 *
213 * Allocate a CP scratch register for use by the driver (all asics). 213 * Allocate a CP scratch register for use by the driver (all asics).
214 * Returns 0 on success or -EINVAL on failure. 214 * Returns 0 on success or -EINVAL on failure.
215 */ 215 */
216 int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg) 216 int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
217 { 217 {
218 int i; 218 int i;
219 219
220 for (i = 0; i < rdev->scratch.num_reg; i++) { 220 for (i = 0; i < rdev->scratch.num_reg; i++) {
221 if (rdev->scratch.free[i]) { 221 if (rdev->scratch.free[i]) {
222 rdev->scratch.free[i] = false; 222 rdev->scratch.free[i] = false;
223 *reg = rdev->scratch.reg[i]; 223 *reg = rdev->scratch.reg[i];
224 return 0; 224 return 0;
225 } 225 }
226 } 226 }
227 return -EINVAL; 227 return -EINVAL;
228 } 228 }
229 229
230 /** 230 /**
231 * radeon_scratch_free - Free a scratch register 231 * radeon_scratch_free - Free a scratch register
232 * 232 *
233 * @rdev: radeon_device pointer 233 * @rdev: radeon_device pointer
234 * @reg: scratch register mmio offset 234 * @reg: scratch register mmio offset
235 * 235 *
236 * Free a CP scratch register allocated for use by the driver (all asics) 236 * Free a CP scratch register allocated for use by the driver (all asics)
237 */ 237 */
238 void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg) 238 void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
239 { 239 {
240 int i; 240 int i;
241 241
242 for (i = 0; i < rdev->scratch.num_reg; i++) { 242 for (i = 0; i < rdev->scratch.num_reg; i++) {
243 if (rdev->scratch.reg[i] == reg) { 243 if (rdev->scratch.reg[i] == reg) {
244 rdev->scratch.free[i] = true; 244 rdev->scratch.free[i] = true;
245 return; 245 return;
246 } 246 }
247 } 247 }
248 } 248 }
249 249
250 /* 250 /*
251 * GPU doorbell aperture helpers function. 251 * GPU doorbell aperture helpers function.
252 */ 252 */
253 /** 253 /**
254 * radeon_doorbell_init - Init doorbell driver information. 254 * radeon_doorbell_init - Init doorbell driver information.
255 * 255 *
256 * @rdev: radeon_device pointer 256 * @rdev: radeon_device pointer
257 * 257 *
258 * Init doorbell driver information (CIK) 258 * Init doorbell driver information (CIK)
259 * Returns 0 on success, error on failure. 259 * Returns 0 on success, error on failure.
260 */ 260 */
261 static int radeon_doorbell_init(struct radeon_device *rdev) 261 static int radeon_doorbell_init(struct radeon_device *rdev)
262 { 262 {
263 /* doorbell bar mapping */ 263 /* doorbell bar mapping */
264 rdev->doorbell.base = pci_resource_start(rdev->pdev, 2); 264 rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
265 rdev->doorbell.size = pci_resource_len(rdev->pdev, 2); 265 rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
266 266
267 rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS); 267 rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
268 if (rdev->doorbell.num_doorbells == 0) 268 if (rdev->doorbell.num_doorbells == 0)
269 return -EINVAL; 269 return -EINVAL;
270 270
271 rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32)); 271 rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
272 if (rdev->doorbell.ptr == NULL) { 272 if (rdev->doorbell.ptr == NULL) {
273 return -ENOMEM; 273 return -ENOMEM;
274 } 274 }
275 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base); 275 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
276 DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size); 276 DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
277 277
278 memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used)); 278 memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
279 279
280 return 0; 280 return 0;
281 } 281 }
282 282
283 /** 283 /**
284 * radeon_doorbell_fini - Tear down doorbell driver information. 284 * radeon_doorbell_fini - Tear down doorbell driver information.
285 * 285 *
286 * @rdev: radeon_device pointer 286 * @rdev: radeon_device pointer
287 * 287 *
288 * Tear down doorbell driver information (CIK) 288 * Tear down doorbell driver information (CIK)
289 */ 289 */
290 static void radeon_doorbell_fini(struct radeon_device *rdev) 290 static void radeon_doorbell_fini(struct radeon_device *rdev)
291 { 291 {
292 iounmap(rdev->doorbell.ptr); 292 iounmap(rdev->doorbell.ptr);
293 rdev->doorbell.ptr = NULL; 293 rdev->doorbell.ptr = NULL;
294 } 294 }
295 295
296 /** 296 /**
297 * radeon_doorbell_get - Allocate a doorbell entry 297 * radeon_doorbell_get - Allocate a doorbell entry
298 * 298 *
299 * @rdev: radeon_device pointer 299 * @rdev: radeon_device pointer
300 * @doorbell: doorbell index 300 * @doorbell: doorbell index
301 * 301 *
302 * Allocate a doorbell for use by the driver (all asics). 302 * Allocate a doorbell for use by the driver (all asics).
303 * Returns 0 on success or -EINVAL on failure. 303 * Returns 0 on success or -EINVAL on failure.
304 */ 304 */
305 int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell) 305 int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
306 { 306 {
307 unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells); 307 unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
308 if (offset < rdev->doorbell.num_doorbells) { 308 if (offset < rdev->doorbell.num_doorbells) {
309 __set_bit(offset, rdev->doorbell.used); 309 __set_bit(offset, rdev->doorbell.used);
310 *doorbell = offset; 310 *doorbell = offset;
311 return 0; 311 return 0;
312 } else { 312 } else {
313 return -EINVAL; 313 return -EINVAL;
314 } 314 }
315 } 315 }
316 316
317 /** 317 /**
318 * radeon_doorbell_free - Free a doorbell entry 318 * radeon_doorbell_free - Free a doorbell entry
319 * 319 *
320 * @rdev: radeon_device pointer 320 * @rdev: radeon_device pointer
321 * @doorbell: doorbell index 321 * @doorbell: doorbell index
322 * 322 *
323 * Free a doorbell allocated for use by the driver (all asics) 323 * Free a doorbell allocated for use by the driver (all asics)
324 */ 324 */
325 void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell) 325 void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
326 { 326 {
327 if (doorbell < rdev->doorbell.num_doorbells) 327 if (doorbell < rdev->doorbell.num_doorbells)
328 __clear_bit(doorbell, rdev->doorbell.used); 328 __clear_bit(doorbell, rdev->doorbell.used);
329 } 329 }
330 330
331 /* 331 /*
332 * radeon_wb_*() 332 * radeon_wb_*()
333 * Writeback is the the method by which the the GPU updates special pages 333 * Writeback is the the method by which the the GPU updates special pages
334 * in memory with the status of certain GPU events (fences, ring pointers, 334 * in memory with the status of certain GPU events (fences, ring pointers,
335 * etc.). 335 * etc.).
336 */ 336 */
337 337
338 /** 338 /**
339 * radeon_wb_disable - Disable Writeback 339 * radeon_wb_disable - Disable Writeback
340 * 340 *
341 * @rdev: radeon_device pointer 341 * @rdev: radeon_device pointer
342 * 342 *
343 * Disables Writeback (all asics). Used for suspend. 343 * Disables Writeback (all asics). Used for suspend.
344 */ 344 */
345 void radeon_wb_disable(struct radeon_device *rdev) 345 void radeon_wb_disable(struct radeon_device *rdev)
346 { 346 {
347 rdev->wb.enabled = false; 347 rdev->wb.enabled = false;
348 } 348 }
349 349
350 /** 350 /**
351 * radeon_wb_fini - Disable Writeback and free memory 351 * radeon_wb_fini - Disable Writeback and free memory
352 * 352 *
353 * @rdev: radeon_device pointer 353 * @rdev: radeon_device pointer
354 * 354 *
355 * Disables Writeback and frees the Writeback memory (all asics). 355 * Disables Writeback and frees the Writeback memory (all asics).
356 * Used at driver shutdown. 356 * Used at driver shutdown.
357 */ 357 */
358 void radeon_wb_fini(struct radeon_device *rdev) 358 void radeon_wb_fini(struct radeon_device *rdev)
359 { 359 {
360 radeon_wb_disable(rdev); 360 radeon_wb_disable(rdev);
361 if (rdev->wb.wb_obj) { 361 if (rdev->wb.wb_obj) {
362 if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) { 362 if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
363 radeon_bo_kunmap(rdev->wb.wb_obj); 363 radeon_bo_kunmap(rdev->wb.wb_obj);
364 radeon_bo_unpin(rdev->wb.wb_obj); 364 radeon_bo_unpin(rdev->wb.wb_obj);
365 radeon_bo_unreserve(rdev->wb.wb_obj); 365 radeon_bo_unreserve(rdev->wb.wb_obj);
366 } 366 }
367 radeon_bo_unref(&rdev->wb.wb_obj); 367 radeon_bo_unref(&rdev->wb.wb_obj);
368 rdev->wb.wb = NULL; 368 rdev->wb.wb = NULL;
369 rdev->wb.wb_obj = NULL; 369 rdev->wb.wb_obj = NULL;
370 } 370 }
371 } 371 }
372 372
373 /** 373 /**
374 * radeon_wb_init- Init Writeback driver info and allocate memory 374 * radeon_wb_init- Init Writeback driver info and allocate memory
375 * 375 *
376 * @rdev: radeon_device pointer 376 * @rdev: radeon_device pointer
377 * 377 *
378 * Disables Writeback and frees the Writeback memory (all asics). 378 * Disables Writeback and frees the Writeback memory (all asics).
379 * Used at driver startup. 379 * Used at driver startup.
380 * Returns 0 on success or an -error on failure. 380 * Returns 0 on success or an -error on failure.
381 */ 381 */
382 int radeon_wb_init(struct radeon_device *rdev) 382 int radeon_wb_init(struct radeon_device *rdev)
383 { 383 {
384 int r; 384 int r;
385 385
386 if (rdev->wb.wb_obj == NULL) { 386 if (rdev->wb.wb_obj == NULL) {
387 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, 387 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
388 RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj); 388 RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj);
389 if (r) { 389 if (r) {
390 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); 390 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
391 return r; 391 return r;
392 } 392 }
393 r = radeon_bo_reserve(rdev->wb.wb_obj, false); 393 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
394 if (unlikely(r != 0)) { 394 if (unlikely(r != 0)) {
395 radeon_wb_fini(rdev); 395 radeon_wb_fini(rdev);
396 return r; 396 return r;
397 } 397 }
398 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, 398 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
399 &rdev->wb.gpu_addr); 399 &rdev->wb.gpu_addr);
400 if (r) { 400 if (r) {
401 radeon_bo_unreserve(rdev->wb.wb_obj); 401 radeon_bo_unreserve(rdev->wb.wb_obj);
402 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r); 402 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
403 radeon_wb_fini(rdev); 403 radeon_wb_fini(rdev);
404 return r; 404 return r;
405 } 405 }
406 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); 406 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
407 radeon_bo_unreserve(rdev->wb.wb_obj); 407 radeon_bo_unreserve(rdev->wb.wb_obj);
408 if (r) { 408 if (r) {
409 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r); 409 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
410 radeon_wb_fini(rdev); 410 radeon_wb_fini(rdev);
411 return r; 411 return r;
412 } 412 }
413 } 413 }
414 414
415 /* clear wb memory */ 415 /* clear wb memory */
416 memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE); 416 memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
417 /* disable event_write fences */ 417 /* disable event_write fences */
418 rdev->wb.use_event = false; 418 rdev->wb.use_event = false;
419 /* disabled via module param */ 419 /* disabled via module param */
420 if (radeon_no_wb == 1) { 420 if (radeon_no_wb == 1) {
421 rdev->wb.enabled = false; 421 rdev->wb.enabled = false;
422 } else { 422 } else {
423 if (rdev->flags & RADEON_IS_AGP) { 423 if (rdev->flags & RADEON_IS_AGP) {
424 /* often unreliable on AGP */ 424 /* often unreliable on AGP */
425 rdev->wb.enabled = false; 425 rdev->wb.enabled = false;
426 } else if (rdev->family < CHIP_R300) { 426 } else if (rdev->family < CHIP_R300) {
427 /* often unreliable on pre-r300 */ 427 /* often unreliable on pre-r300 */
428 rdev->wb.enabled = false; 428 rdev->wb.enabled = false;
429 } else { 429 } else {
430 rdev->wb.enabled = true; 430 rdev->wb.enabled = true;
431 /* event_write fences are only available on r600+ */ 431 /* event_write fences are only available on r600+ */
432 if (rdev->family >= CHIP_R600) { 432 if (rdev->family >= CHIP_R600) {
433 rdev->wb.use_event = true; 433 rdev->wb.use_event = true;
434 } 434 }
435 } 435 }
436 } 436 }
437 /* always use writeback/events on NI, APUs */ 437 /* always use writeback/events on NI, APUs */
438 if (rdev->family >= CHIP_PALM) { 438 if (rdev->family >= CHIP_PALM) {
439 rdev->wb.enabled = true; 439 rdev->wb.enabled = true;
440 rdev->wb.use_event = true; 440 rdev->wb.use_event = true;
441 } 441 }
442 442
443 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis"); 443 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
444 444
445 return 0; 445 return 0;
446 } 446 }
447 447
448 /** 448 /**
449 * radeon_vram_location - try to find VRAM location 449 * radeon_vram_location - try to find VRAM location
450 * @rdev: radeon device structure holding all necessary informations 450 * @rdev: radeon device structure holding all necessary informations
451 * @mc: memory controller structure holding memory informations 451 * @mc: memory controller structure holding memory informations
452 * @base: base address at which to put VRAM 452 * @base: base address at which to put VRAM
453 * 453 *
454 * Function will place try to place VRAM at base address provided 454 * Function will place try to place VRAM at base address provided
455 * as parameter (which is so far either PCI aperture address or 455 * as parameter (which is so far either PCI aperture address or
456 * for IGP TOM base address). 456 * for IGP TOM base address).
457 * 457 *
458 * If there is not enough space to fit the unvisible VRAM in the 32bits 458 * If there is not enough space to fit the unvisible VRAM in the 32bits
459 * address space then we limit the VRAM size to the aperture. 459 * address space then we limit the VRAM size to the aperture.
460 * 460 *
461 * If we are using AGP and if the AGP aperture doesn't allow us to have 461 * If we are using AGP and if the AGP aperture doesn't allow us to have
462 * room for all the VRAM than we restrict the VRAM to the PCI aperture 462 * room for all the VRAM than we restrict the VRAM to the PCI aperture
463 * size and print a warning. 463 * size and print a warning.
464 * 464 *
465 * This function will never fails, worst case are limiting VRAM. 465 * This function will never fails, worst case are limiting VRAM.
466 * 466 *
467 * Note: GTT start, end, size should be initialized before calling this 467 * Note: GTT start, end, size should be initialized before calling this
468 * function on AGP platform. 468 * function on AGP platform.
469 * 469 *
470 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size, 470 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
471 * this shouldn't be a problem as we are using the PCI aperture as a reference. 471 * this shouldn't be a problem as we are using the PCI aperture as a reference.
472 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but 472 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
473 * not IGP. 473 * not IGP.
474 * 474 *
475 * Note: we use mc_vram_size as on some board we need to program the mc to 475 * Note: we use mc_vram_size as on some board we need to program the mc to
476 * cover the whole aperture even if VRAM size is inferior to aperture size 476 * cover the whole aperture even if VRAM size is inferior to aperture size
477 * Novell bug 204882 + along with lots of ubuntu ones 477 * Novell bug 204882 + along with lots of ubuntu ones
478 * 478 *
479 * Note: when limiting vram it's safe to overwritte real_vram_size because 479 * Note: when limiting vram it's safe to overwritte real_vram_size because
480 * we are not in case where real_vram_size is inferior to mc_vram_size (ie 480 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
481 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu 481 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
482 * ones) 482 * ones)
483 * 483 *
484 * Note: IGP TOM addr should be the same as the aperture addr, we don't 484 * Note: IGP TOM addr should be the same as the aperture addr, we don't
485 * explicitly check for that thought. 485 * explicitly check for that thought.
486 * 486 *
487 * FIXME: when reducing VRAM size align new size on power of 2. 487 * FIXME: when reducing VRAM size align new size on power of 2.
488 */ 488 */
489 void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base) 489 void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
490 { 490 {
491 uint64_t limit = (uint64_t)radeon_vram_limit << 20; 491 uint64_t limit = (uint64_t)radeon_vram_limit << 20;
492 492
493 mc->vram_start = base; 493 mc->vram_start = base;
494 if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) { 494 if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
495 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); 495 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
496 mc->real_vram_size = mc->aper_size; 496 mc->real_vram_size = mc->aper_size;
497 mc->mc_vram_size = mc->aper_size; 497 mc->mc_vram_size = mc->aper_size;
498 } 498 }
499 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 499 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
500 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) { 500 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
501 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); 501 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
502 mc->real_vram_size = mc->aper_size; 502 mc->real_vram_size = mc->aper_size;
503 mc->mc_vram_size = mc->aper_size; 503 mc->mc_vram_size = mc->aper_size;
504 } 504 }
505 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 505 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
506 if (limit && limit < mc->real_vram_size) 506 if (limit && limit < mc->real_vram_size)
507 mc->real_vram_size = limit; 507 mc->real_vram_size = limit;
508 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", 508 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
509 mc->mc_vram_size >> 20, mc->vram_start, 509 mc->mc_vram_size >> 20, mc->vram_start,
510 mc->vram_end, mc->real_vram_size >> 20); 510 mc->vram_end, mc->real_vram_size >> 20);
511 } 511 }
512 512
513 /** 513 /**
514 * radeon_gtt_location - try to find GTT location 514 * radeon_gtt_location - try to find GTT location
515 * @rdev: radeon device structure holding all necessary informations 515 * @rdev: radeon device structure holding all necessary informations
516 * @mc: memory controller structure holding memory informations 516 * @mc: memory controller structure holding memory informations
517 * 517 *
518 * Function will place try to place GTT before or after VRAM. 518 * Function will place try to place GTT before or after VRAM.
519 * 519 *
520 * If GTT size is bigger than space left then we ajust GTT size. 520 * If GTT size is bigger than space left then we ajust GTT size.
521 * Thus function will never fails. 521 * Thus function will never fails.
522 * 522 *
523 * FIXME: when reducing GTT size align new size on power of 2. 523 * FIXME: when reducing GTT size align new size on power of 2.
524 */ 524 */
525 void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) 525 void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
526 { 526 {
527 u64 size_af, size_bf; 527 u64 size_af, size_bf;
528 528
529 size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align; 529 size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
530 size_bf = mc->vram_start & ~mc->gtt_base_align; 530 size_bf = mc->vram_start & ~mc->gtt_base_align;
531 if (size_bf > size_af) { 531 if (size_bf > size_af) {
532 if (mc->gtt_size > size_bf) { 532 if (mc->gtt_size > size_bf) {
533 dev_warn(rdev->dev, "limiting GTT\n"); 533 dev_warn(rdev->dev, "limiting GTT\n");
534 mc->gtt_size = size_bf; 534 mc->gtt_size = size_bf;
535 } 535 }
536 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size; 536 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
537 } else { 537 } else {
538 if (mc->gtt_size > size_af) { 538 if (mc->gtt_size > size_af) {
539 dev_warn(rdev->dev, "limiting GTT\n"); 539 dev_warn(rdev->dev, "limiting GTT\n");
540 mc->gtt_size = size_af; 540 mc->gtt_size = size_af;
541 } 541 }
542 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align; 542 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
543 } 543 }
544 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; 544 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
545 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n", 545 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
546 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end); 546 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
547 } 547 }
548 548
549 /* 549 /*
550 * GPU helpers function. 550 * GPU helpers function.
551 */ 551 */
552 /** 552 /**
553 * radeon_card_posted - check if the hw has already been initialized 553 * radeon_card_posted - check if the hw has already been initialized
554 * 554 *
555 * @rdev: radeon_device pointer 555 * @rdev: radeon_device pointer
556 * 556 *
557 * Check if the asic has been initialized (all asics). 557 * Check if the asic has been initialized (all asics).
558 * Used at driver startup. 558 * Used at driver startup.
559 * Returns true if initialized or false if not. 559 * Returns true if initialized or false if not.
560 */ 560 */
561 bool radeon_card_posted(struct radeon_device *rdev) 561 bool radeon_card_posted(struct radeon_device *rdev)
562 { 562 {
563 uint32_t reg; 563 uint32_t reg;
564 564
565 /* required for EFI mode on macbook2,1 which uses an r5xx asic */ 565 /* required for EFI mode on macbook2,1 which uses an r5xx asic */
566 if (efi_enabled(EFI_BOOT) && 566 if (efi_enabled(EFI_BOOT) &&
567 (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) && 567 (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
568 (rdev->family < CHIP_R600)) 568 (rdev->family < CHIP_R600))
569 return false; 569 return false;
570 570
571 if (ASIC_IS_NODCE(rdev)) 571 if (ASIC_IS_NODCE(rdev))
572 goto check_memsize; 572 goto check_memsize;
573 573
574 /* first check CRTCs */ 574 /* first check CRTCs */
575 if (ASIC_IS_DCE4(rdev)) { 575 if (ASIC_IS_DCE4(rdev)) {
576 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | 576 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
577 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); 577 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
578 if (rdev->num_crtc >= 4) { 578 if (rdev->num_crtc >= 4) {
579 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | 579 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
580 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET); 580 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
581 } 581 }
582 if (rdev->num_crtc >= 6) { 582 if (rdev->num_crtc >= 6) {
583 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | 583 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
584 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); 584 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
585 } 585 }
586 if (reg & EVERGREEN_CRTC_MASTER_EN) 586 if (reg & EVERGREEN_CRTC_MASTER_EN)
587 return true; 587 return true;
588 } else if (ASIC_IS_AVIVO(rdev)) { 588 } else if (ASIC_IS_AVIVO(rdev)) {
589 reg = RREG32(AVIVO_D1CRTC_CONTROL) | 589 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
590 RREG32(AVIVO_D2CRTC_CONTROL); 590 RREG32(AVIVO_D2CRTC_CONTROL);
591 if (reg & AVIVO_CRTC_EN) { 591 if (reg & AVIVO_CRTC_EN) {
592 return true; 592 return true;
593 } 593 }
594 } else { 594 } else {
595 reg = RREG32(RADEON_CRTC_GEN_CNTL) | 595 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
596 RREG32(RADEON_CRTC2_GEN_CNTL); 596 RREG32(RADEON_CRTC2_GEN_CNTL);
597 if (reg & RADEON_CRTC_EN) { 597 if (reg & RADEON_CRTC_EN) {
598 return true; 598 return true;
599 } 599 }
600 } 600 }
601 601
602 check_memsize: 602 check_memsize:
603 /* then check MEM_SIZE, in case the crtcs are off */ 603 /* then check MEM_SIZE, in case the crtcs are off */
604 if (rdev->family >= CHIP_R600) 604 if (rdev->family >= CHIP_R600)
605 reg = RREG32(R600_CONFIG_MEMSIZE); 605 reg = RREG32(R600_CONFIG_MEMSIZE);
606 else 606 else
607 reg = RREG32(RADEON_CONFIG_MEMSIZE); 607 reg = RREG32(RADEON_CONFIG_MEMSIZE);
608 608
609 if (reg) 609 if (reg)
610 return true; 610 return true;
611 611
612 return false; 612 return false;
613 613
614 } 614 }
615 615
616 /** 616 /**
617 * radeon_update_bandwidth_info - update display bandwidth params 617 * radeon_update_bandwidth_info - update display bandwidth params
618 * 618 *
619 * @rdev: radeon_device pointer 619 * @rdev: radeon_device pointer
620 * 620 *
621 * Used when sclk/mclk are switched or display modes are set. 621 * Used when sclk/mclk are switched or display modes are set.
622 * params are used to calculate display watermarks (all asics) 622 * params are used to calculate display watermarks (all asics)
623 */ 623 */
624 void radeon_update_bandwidth_info(struct radeon_device *rdev) 624 void radeon_update_bandwidth_info(struct radeon_device *rdev)
625 { 625 {
626 fixed20_12 a; 626 fixed20_12 a;
627 u32 sclk = rdev->pm.current_sclk; 627 u32 sclk = rdev->pm.current_sclk;
628 u32 mclk = rdev->pm.current_mclk; 628 u32 mclk = rdev->pm.current_mclk;
629 629
630 /* sclk/mclk in Mhz */ 630 /* sclk/mclk in Mhz */
631 a.full = dfixed_const(100); 631 a.full = dfixed_const(100);
632 rdev->pm.sclk.full = dfixed_const(sclk); 632 rdev->pm.sclk.full = dfixed_const(sclk);
633 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a); 633 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
634 rdev->pm.mclk.full = dfixed_const(mclk); 634 rdev->pm.mclk.full = dfixed_const(mclk);
635 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a); 635 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
636 636
637 if (rdev->flags & RADEON_IS_IGP) { 637 if (rdev->flags & RADEON_IS_IGP) {
638 a.full = dfixed_const(16); 638 a.full = dfixed_const(16);
639 /* core_bandwidth = sclk(Mhz) * 16 */ 639 /* core_bandwidth = sclk(Mhz) * 16 */
640 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a); 640 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
641 } 641 }
642 } 642 }
643 643
644 /** 644 /**
645 * radeon_boot_test_post_card - check and possibly initialize the hw 645 * radeon_boot_test_post_card - check and possibly initialize the hw
646 * 646 *
647 * @rdev: radeon_device pointer 647 * @rdev: radeon_device pointer
648 * 648 *
649 * Check if the asic is initialized and if not, attempt to initialize 649 * Check if the asic is initialized and if not, attempt to initialize
650 * it (all asics). 650 * it (all asics).
651 * Returns true if initialized or false if not. 651 * Returns true if initialized or false if not.
652 */ 652 */
653 bool radeon_boot_test_post_card(struct radeon_device *rdev) 653 bool radeon_boot_test_post_card(struct radeon_device *rdev)
654 { 654 {
655 if (radeon_card_posted(rdev)) 655 if (radeon_card_posted(rdev))
656 return true; 656 return true;
657 657
658 if (rdev->bios) { 658 if (rdev->bios) {
659 DRM_INFO("GPU not posted. posting now...\n"); 659 DRM_INFO("GPU not posted. posting now...\n");
660 if (rdev->is_atom_bios) 660 if (rdev->is_atom_bios)
661 atom_asic_init(rdev->mode_info.atom_context); 661 atom_asic_init(rdev->mode_info.atom_context);
662 else 662 else
663 radeon_combios_asic_init(rdev->ddev); 663 radeon_combios_asic_init(rdev->ddev);
664 return true; 664 return true;
665 } else { 665 } else {
666 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 666 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
667 return false; 667 return false;
668 } 668 }
669 } 669 }
670 670
671 /** 671 /**
672 * radeon_dummy_page_init - init dummy page used by the driver 672 * radeon_dummy_page_init - init dummy page used by the driver
673 * 673 *
674 * @rdev: radeon_device pointer 674 * @rdev: radeon_device pointer
675 * 675 *
676 * Allocate the dummy page used by the driver (all asics). 676 * Allocate the dummy page used by the driver (all asics).
677 * This dummy page is used by the driver as a filler for gart entries 677 * This dummy page is used by the driver as a filler for gart entries
678 * when pages are taken out of the GART 678 * when pages are taken out of the GART
679 * Returns 0 on sucess, -ENOMEM on failure. 679 * Returns 0 on sucess, -ENOMEM on failure.
680 */ 680 */
681 int radeon_dummy_page_init(struct radeon_device *rdev) 681 int radeon_dummy_page_init(struct radeon_device *rdev)
682 { 682 {
683 if (rdev->dummy_page.page) 683 if (rdev->dummy_page.page)
684 return 0; 684 return 0;
685 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO); 685 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
686 if (rdev->dummy_page.page == NULL) 686 if (rdev->dummy_page.page == NULL)
687 return -ENOMEM; 687 return -ENOMEM;
688 rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page, 688 rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
689 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 689 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
690 if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) { 690 if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
691 dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n"); 691 dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
692 __free_page(rdev->dummy_page.page); 692 __free_page(rdev->dummy_page.page);
693 rdev->dummy_page.page = NULL; 693 rdev->dummy_page.page = NULL;
694 return -ENOMEM; 694 return -ENOMEM;
695 } 695 }
696 return 0; 696 return 0;
697 } 697 }
698 698
699 /** 699 /**
700 * radeon_dummy_page_fini - free dummy page used by the driver 700 * radeon_dummy_page_fini - free dummy page used by the driver
701 * 701 *
702 * @rdev: radeon_device pointer 702 * @rdev: radeon_device pointer
703 * 703 *
704 * Frees the dummy page used by the driver (all asics). 704 * Frees the dummy page used by the driver (all asics).
705 */ 705 */
706 void radeon_dummy_page_fini(struct radeon_device *rdev) 706 void radeon_dummy_page_fini(struct radeon_device *rdev)
707 { 707 {
708 if (rdev->dummy_page.page == NULL) 708 if (rdev->dummy_page.page == NULL)
709 return; 709 return;
710 pci_unmap_page(rdev->pdev, rdev->dummy_page.addr, 710 pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
711 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 711 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
712 __free_page(rdev->dummy_page.page); 712 __free_page(rdev->dummy_page.page);
713 rdev->dummy_page.page = NULL; 713 rdev->dummy_page.page = NULL;
714 } 714 }
715 715
716 716
717 /* ATOM accessor methods */ 717 /* ATOM accessor methods */
718 /* 718 /*
719 * ATOM is an interpreted byte code stored in tables in the vbios. The 719 * ATOM is an interpreted byte code stored in tables in the vbios. The
720 * driver registers callbacks to access registers and the interpreter 720 * driver registers callbacks to access registers and the interpreter
721 * in the driver parses the tables and executes then to program specific 721 * in the driver parses the tables and executes then to program specific
722 * actions (set display modes, asic init, etc.). See radeon_atombios.c, 722 * actions (set display modes, asic init, etc.). See radeon_atombios.c,
723 * atombios.h, and atom.c 723 * atombios.h, and atom.c
724 */ 724 */
725 725
726 /** 726 /**
727 * cail_pll_read - read PLL register 727 * cail_pll_read - read PLL register
728 * 728 *
729 * @info: atom card_info pointer 729 * @info: atom card_info pointer
730 * @reg: PLL register offset 730 * @reg: PLL register offset
731 * 731 *
732 * Provides a PLL register accessor for the atom interpreter (r4xx+). 732 * Provides a PLL register accessor for the atom interpreter (r4xx+).
733 * Returns the value of the PLL register. 733 * Returns the value of the PLL register.
734 */ 734 */
735 static uint32_t cail_pll_read(struct card_info *info, uint32_t reg) 735 static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
736 { 736 {
737 struct radeon_device *rdev = info->dev->dev_private; 737 struct radeon_device *rdev = info->dev->dev_private;
738 uint32_t r; 738 uint32_t r;
739 739
740 r = rdev->pll_rreg(rdev, reg); 740 r = rdev->pll_rreg(rdev, reg);
741 return r; 741 return r;
742 } 742 }
743 743
744 /** 744 /**
745 * cail_pll_write - write PLL register 745 * cail_pll_write - write PLL register
746 * 746 *
747 * @info: atom card_info pointer 747 * @info: atom card_info pointer
748 * @reg: PLL register offset 748 * @reg: PLL register offset
749 * @val: value to write to the pll register 749 * @val: value to write to the pll register
750 * 750 *
751 * Provides a PLL register accessor for the atom interpreter (r4xx+). 751 * Provides a PLL register accessor for the atom interpreter (r4xx+).
752 */ 752 */
753 static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val) 753 static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
754 { 754 {
755 struct radeon_device *rdev = info->dev->dev_private; 755 struct radeon_device *rdev = info->dev->dev_private;
756 756
757 rdev->pll_wreg(rdev, reg, val); 757 rdev->pll_wreg(rdev, reg, val);
758 } 758 }
759 759
760 /** 760 /**
761 * cail_mc_read - read MC (Memory Controller) register 761 * cail_mc_read - read MC (Memory Controller) register
762 * 762 *
763 * @info: atom card_info pointer 763 * @info: atom card_info pointer
764 * @reg: MC register offset 764 * @reg: MC register offset
765 * 765 *
766 * Provides an MC register accessor for the atom interpreter (r4xx+). 766 * Provides an MC register accessor for the atom interpreter (r4xx+).
767 * Returns the value of the MC register. 767 * Returns the value of the MC register.
768 */ 768 */
769 static uint32_t cail_mc_read(struct card_info *info, uint32_t reg) 769 static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
770 { 770 {
771 struct radeon_device *rdev = info->dev->dev_private; 771 struct radeon_device *rdev = info->dev->dev_private;
772 uint32_t r; 772 uint32_t r;
773 773
774 r = rdev->mc_rreg(rdev, reg); 774 r = rdev->mc_rreg(rdev, reg);
775 return r; 775 return r;
776 } 776 }
777 777
778 /** 778 /**
779 * cail_mc_write - write MC (Memory Controller) register 779 * cail_mc_write - write MC (Memory Controller) register
780 * 780 *
781 * @info: atom card_info pointer 781 * @info: atom card_info pointer
782 * @reg: MC register offset 782 * @reg: MC register offset
783 * @val: value to write to the pll register 783 * @val: value to write to the pll register
784 * 784 *
785 * Provides a MC register accessor for the atom interpreter (r4xx+). 785 * Provides a MC register accessor for the atom interpreter (r4xx+).
786 */ 786 */
787 static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val) 787 static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
788 { 788 {
789 struct radeon_device *rdev = info->dev->dev_private; 789 struct radeon_device *rdev = info->dev->dev_private;
790 790
791 rdev->mc_wreg(rdev, reg, val); 791 rdev->mc_wreg(rdev, reg, val);
792 } 792 }
793 793
794 /** 794 /**
795 * cail_reg_write - write MMIO register 795 * cail_reg_write - write MMIO register
796 * 796 *
797 * @info: atom card_info pointer 797 * @info: atom card_info pointer
798 * @reg: MMIO register offset 798 * @reg: MMIO register offset
799 * @val: value to write to the pll register 799 * @val: value to write to the pll register
800 * 800 *
801 * Provides a MMIO register accessor for the atom interpreter (r4xx+). 801 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
802 */ 802 */
803 static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val) 803 static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
804 { 804 {
805 struct radeon_device *rdev = info->dev->dev_private; 805 struct radeon_device *rdev = info->dev->dev_private;
806 806
807 WREG32(reg*4, val); 807 WREG32(reg*4, val);
808 } 808 }
809 809
810 /** 810 /**
811 * cail_reg_read - read MMIO register 811 * cail_reg_read - read MMIO register
812 * 812 *
813 * @info: atom card_info pointer 813 * @info: atom card_info pointer
814 * @reg: MMIO register offset 814 * @reg: MMIO register offset
815 * 815 *
816 * Provides an MMIO register accessor for the atom interpreter (r4xx+). 816 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
817 * Returns the value of the MMIO register. 817 * Returns the value of the MMIO register.
818 */ 818 */
819 static uint32_t cail_reg_read(struct card_info *info, uint32_t reg) 819 static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
820 { 820 {
821 struct radeon_device *rdev = info->dev->dev_private; 821 struct radeon_device *rdev = info->dev->dev_private;
822 uint32_t r; 822 uint32_t r;
823 823
824 r = RREG32(reg*4); 824 r = RREG32(reg*4);
825 return r; 825 return r;
826 } 826 }
827 827
828 /** 828 /**
829 * cail_ioreg_write - write IO register 829 * cail_ioreg_write - write IO register
830 * 830 *
831 * @info: atom card_info pointer 831 * @info: atom card_info pointer
832 * @reg: IO register offset 832 * @reg: IO register offset
833 * @val: value to write to the pll register 833 * @val: value to write to the pll register
834 * 834 *
835 * Provides a IO register accessor for the atom interpreter (r4xx+). 835 * Provides a IO register accessor for the atom interpreter (r4xx+).
836 */ 836 */
837 static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val) 837 static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
838 { 838 {
839 struct radeon_device *rdev = info->dev->dev_private; 839 struct radeon_device *rdev = info->dev->dev_private;
840 840
841 WREG32_IO(reg*4, val); 841 WREG32_IO(reg*4, val);
842 } 842 }
843 843
844 /** 844 /**
845 * cail_ioreg_read - read IO register 845 * cail_ioreg_read - read IO register
846 * 846 *
847 * @info: atom card_info pointer 847 * @info: atom card_info pointer
848 * @reg: IO register offset 848 * @reg: IO register offset
849 * 849 *
850 * Provides an IO register accessor for the atom interpreter (r4xx+). 850 * Provides an IO register accessor for the atom interpreter (r4xx+).
851 * Returns the value of the IO register. 851 * Returns the value of the IO register.
852 */ 852 */
853 static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg) 853 static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
854 { 854 {
855 struct radeon_device *rdev = info->dev->dev_private; 855 struct radeon_device *rdev = info->dev->dev_private;
856 uint32_t r; 856 uint32_t r;
857 857
858 r = RREG32_IO(reg*4); 858 r = RREG32_IO(reg*4);
859 return r; 859 return r;
860 } 860 }
861 861
862 /** 862 /**
863 * radeon_atombios_init - init the driver info and callbacks for atombios 863 * radeon_atombios_init - init the driver info and callbacks for atombios
864 * 864 *
865 * @rdev: radeon_device pointer 865 * @rdev: radeon_device pointer
866 * 866 *
867 * Initializes the driver info and register access callbacks for the 867 * Initializes the driver info and register access callbacks for the
868 * ATOM interpreter (r4xx+). 868 * ATOM interpreter (r4xx+).
869 * Returns 0 on sucess, -ENOMEM on failure. 869 * Returns 0 on sucess, -ENOMEM on failure.
870 * Called at driver startup. 870 * Called at driver startup.
871 */ 871 */
872 int radeon_atombios_init(struct radeon_device *rdev) 872 int radeon_atombios_init(struct radeon_device *rdev)
873 { 873 {
874 struct card_info *atom_card_info = 874 struct card_info *atom_card_info =
875 kzalloc(sizeof(struct card_info), GFP_KERNEL); 875 kzalloc(sizeof(struct card_info), GFP_KERNEL);
876 876
877 if (!atom_card_info) 877 if (!atom_card_info)
878 return -ENOMEM; 878 return -ENOMEM;
879 879
880 rdev->mode_info.atom_card_info = atom_card_info; 880 rdev->mode_info.atom_card_info = atom_card_info;
881 atom_card_info->dev = rdev->ddev; 881 atom_card_info->dev = rdev->ddev;
882 atom_card_info->reg_read = cail_reg_read; 882 atom_card_info->reg_read = cail_reg_read;
883 atom_card_info->reg_write = cail_reg_write; 883 atom_card_info->reg_write = cail_reg_write;
884 /* needed for iio ops */ 884 /* needed for iio ops */
885 if (rdev->rio_mem) { 885 if (rdev->rio_mem) {
886 atom_card_info->ioreg_read = cail_ioreg_read; 886 atom_card_info->ioreg_read = cail_ioreg_read;
887 atom_card_info->ioreg_write = cail_ioreg_write; 887 atom_card_info->ioreg_write = cail_ioreg_write;
888 } else { 888 } else {
889 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n"); 889 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
890 atom_card_info->ioreg_read = cail_reg_read; 890 atom_card_info->ioreg_read = cail_reg_read;
891 atom_card_info->ioreg_write = cail_reg_write; 891 atom_card_info->ioreg_write = cail_reg_write;
892 } 892 }
893 atom_card_info->mc_read = cail_mc_read; 893 atom_card_info->mc_read = cail_mc_read;
894 atom_card_info->mc_write = cail_mc_write; 894 atom_card_info->mc_write = cail_mc_write;
895 atom_card_info->pll_read = cail_pll_read; 895 atom_card_info->pll_read = cail_pll_read;
896 atom_card_info->pll_write = cail_pll_write; 896 atom_card_info->pll_write = cail_pll_write;
897 897
898 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios); 898 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
899 if (!rdev->mode_info.atom_context) { 899 if (!rdev->mode_info.atom_context) {
900 radeon_atombios_fini(rdev); 900 radeon_atombios_fini(rdev);
901 return -ENOMEM; 901 return -ENOMEM;
902 } 902 }
903 903
904 mutex_init(&rdev->mode_info.atom_context->mutex); 904 mutex_init(&rdev->mode_info.atom_context->mutex);
905 radeon_atom_initialize_bios_scratch_regs(rdev->ddev); 905 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
906 atom_allocate_fb_scratch(rdev->mode_info.atom_context); 906 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
907 return 0; 907 return 0;
908 } 908 }
909 909
910 /** 910 /**
911 * radeon_atombios_fini - free the driver info and callbacks for atombios 911 * radeon_atombios_fini - free the driver info and callbacks for atombios
912 * 912 *
913 * @rdev: radeon_device pointer 913 * @rdev: radeon_device pointer
914 * 914 *
915 * Frees the driver info and register access callbacks for the ATOM 915 * Frees the driver info and register access callbacks for the ATOM
916 * interpreter (r4xx+). 916 * interpreter (r4xx+).
917 * Called at driver shutdown. 917 * Called at driver shutdown.
918 */ 918 */
919 void radeon_atombios_fini(struct radeon_device *rdev) 919 void radeon_atombios_fini(struct radeon_device *rdev)
920 { 920 {
921 if (rdev->mode_info.atom_context) { 921 if (rdev->mode_info.atom_context) {
922 kfree(rdev->mode_info.atom_context->scratch); 922 kfree(rdev->mode_info.atom_context->scratch);
923 } 923 }
924 kfree(rdev->mode_info.atom_context); 924 kfree(rdev->mode_info.atom_context);
925 rdev->mode_info.atom_context = NULL; 925 rdev->mode_info.atom_context = NULL;
926 kfree(rdev->mode_info.atom_card_info); 926 kfree(rdev->mode_info.atom_card_info);
927 rdev->mode_info.atom_card_info = NULL; 927 rdev->mode_info.atom_card_info = NULL;
928 } 928 }
929 929
930 /* COMBIOS */ 930 /* COMBIOS */
931 /* 931 /*
932 * COMBIOS is the bios format prior to ATOM. It provides 932 * COMBIOS is the bios format prior to ATOM. It provides
933 * command tables similar to ATOM, but doesn't have a unified 933 * command tables similar to ATOM, but doesn't have a unified
934 * parser. See radeon_combios.c 934 * parser. See radeon_combios.c
935 */ 935 */
936 936
937 /** 937 /**
938 * radeon_combios_init - init the driver info for combios 938 * radeon_combios_init - init the driver info for combios
939 * 939 *
940 * @rdev: radeon_device pointer 940 * @rdev: radeon_device pointer
941 * 941 *
942 * Initializes the driver info for combios (r1xx-r3xx). 942 * Initializes the driver info for combios (r1xx-r3xx).
943 * Returns 0 on sucess. 943 * Returns 0 on sucess.
944 * Called at driver startup. 944 * Called at driver startup.
945 */ 945 */
946 int radeon_combios_init(struct radeon_device *rdev) 946 int radeon_combios_init(struct radeon_device *rdev)
947 { 947 {
948 radeon_combios_initialize_bios_scratch_regs(rdev->ddev); 948 radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
949 return 0; 949 return 0;
950 } 950 }
951 951
952 /** 952 /**
953 * radeon_combios_fini - free the driver info for combios 953 * radeon_combios_fini - free the driver info for combios
954 * 954 *
955 * @rdev: radeon_device pointer 955 * @rdev: radeon_device pointer
956 * 956 *
957 * Frees the driver info for combios (r1xx-r3xx). 957 * Frees the driver info for combios (r1xx-r3xx).
958 * Called at driver shutdown. 958 * Called at driver shutdown.
959 */ 959 */
960 void radeon_combios_fini(struct radeon_device *rdev) 960 void radeon_combios_fini(struct radeon_device *rdev)
961 { 961 {
962 } 962 }
963 963
964 /* if we get transitioned to only one device, take VGA back */ 964 /* if we get transitioned to only one device, take VGA back */
965 /** 965 /**
966 * radeon_vga_set_decode - enable/disable vga decode 966 * radeon_vga_set_decode - enable/disable vga decode
967 * 967 *
968 * @cookie: radeon_device pointer 968 * @cookie: radeon_device pointer
969 * @state: enable/disable vga decode 969 * @state: enable/disable vga decode
970 * 970 *
971 * Enable/disable vga decode (all asics). 971 * Enable/disable vga decode (all asics).
972 * Returns VGA resource flags. 972 * Returns VGA resource flags.
973 */ 973 */
974 static unsigned int radeon_vga_set_decode(void *cookie, bool state) 974 static unsigned int radeon_vga_set_decode(void *cookie, bool state)
975 { 975 {
976 struct radeon_device *rdev = cookie; 976 struct radeon_device *rdev = cookie;
977 radeon_vga_set_state(rdev, state); 977 radeon_vga_set_state(rdev, state);
978 if (state) 978 if (state)
979 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 979 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
980 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 980 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
981 else 981 else
982 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 982 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
983 } 983 }
984 984
985 /** 985 /**
986 * radeon_check_pot_argument - check that argument is a power of two 986 * radeon_check_pot_argument - check that argument is a power of two
987 * 987 *
988 * @arg: value to check 988 * @arg: value to check
989 * 989 *
990 * Validates that a certain argument is a power of two (all asics). 990 * Validates that a certain argument is a power of two (all asics).
991 * Returns true if argument is valid. 991 * Returns true if argument is valid.
992 */ 992 */
993 static bool radeon_check_pot_argument(int arg) 993 static bool radeon_check_pot_argument(int arg)
994 { 994 {
995 return (arg & (arg - 1)) == 0; 995 return (arg & (arg - 1)) == 0;
996 } 996 }
997 997
998 /** 998 /**
999 * radeon_check_arguments - validate module params 999 * radeon_check_arguments - validate module params
1000 * 1000 *
1001 * @rdev: radeon_device pointer 1001 * @rdev: radeon_device pointer
1002 * 1002 *
1003 * Validates certain module parameters and updates 1003 * Validates certain module parameters and updates
1004 * the associated values used by the driver (all asics). 1004 * the associated values used by the driver (all asics).
1005 */ 1005 */
1006 static void radeon_check_arguments(struct radeon_device *rdev) 1006 static void radeon_check_arguments(struct radeon_device *rdev)
1007 { 1007 {
1008 /* vramlimit must be a power of two */ 1008 /* vramlimit must be a power of two */
1009 if (!radeon_check_pot_argument(radeon_vram_limit)) { 1009 if (!radeon_check_pot_argument(radeon_vram_limit)) {
1010 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n", 1010 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
1011 radeon_vram_limit); 1011 radeon_vram_limit);
1012 radeon_vram_limit = 0; 1012 radeon_vram_limit = 0;
1013 } 1013 }
1014 1014
1015 if (radeon_gart_size == -1) { 1015 if (radeon_gart_size == -1) {
1016 /* default to a larger gart size on newer asics */ 1016 /* default to a larger gart size on newer asics */
1017 if (rdev->family >= CHIP_RV770) 1017 if (rdev->family >= CHIP_RV770)
1018 radeon_gart_size = 1024; 1018 radeon_gart_size = 1024;
1019 else 1019 else
1020 radeon_gart_size = 512; 1020 radeon_gart_size = 512;
1021 } 1021 }
1022 /* gtt size must be power of two and greater or equal to 32M */ 1022 /* gtt size must be power of two and greater or equal to 32M */
1023 if (radeon_gart_size < 32) { 1023 if (radeon_gart_size < 32) {
1024 dev_warn(rdev->dev, "gart size (%d) too small\n", 1024 dev_warn(rdev->dev, "gart size (%d) too small\n",
1025 radeon_gart_size); 1025 radeon_gart_size);
1026 if (rdev->family >= CHIP_RV770) 1026 if (rdev->family >= CHIP_RV770)
1027 radeon_gart_size = 1024; 1027 radeon_gart_size = 1024;
1028 else 1028 else
1029 radeon_gart_size = 512; 1029 radeon_gart_size = 512;
1030 } else if (!radeon_check_pot_argument(radeon_gart_size)) { 1030 } else if (!radeon_check_pot_argument(radeon_gart_size)) {
1031 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n", 1031 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1032 radeon_gart_size); 1032 radeon_gart_size);
1033 if (rdev->family >= CHIP_RV770) 1033 if (rdev->family >= CHIP_RV770)
1034 radeon_gart_size = 1024; 1034 radeon_gart_size = 1024;
1035 else 1035 else
1036 radeon_gart_size = 512; 1036 radeon_gart_size = 512;
1037 } 1037 }
1038 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20; 1038 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1039 1039
1040 /* AGP mode can only be -1, 1, 2, 4, 8 */ 1040 /* AGP mode can only be -1, 1, 2, 4, 8 */
1041 switch (radeon_agpmode) { 1041 switch (radeon_agpmode) {
1042 case -1: 1042 case -1:
1043 case 0: 1043 case 0:
1044 case 1: 1044 case 1:
1045 case 2: 1045 case 2:
1046 case 4: 1046 case 4:
1047 case 8: 1047 case 8:
1048 break; 1048 break;
1049 default: 1049 default:
1050 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: " 1050 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
1051 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode); 1051 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
1052 radeon_agpmode = 0; 1052 radeon_agpmode = 0;
1053 break; 1053 break;
1054 } 1054 }
1055 } 1055 }
1056 1056
1057 /** 1057 /**
1058 * radeon_switcheroo_quirk_long_wakeup - return true if longer d3 delay is 1058 * radeon_switcheroo_quirk_long_wakeup - return true if longer d3 delay is
1059 * needed for waking up. 1059 * needed for waking up.
1060 * 1060 *
1061 * @pdev: pci dev pointer 1061 * @pdev: pci dev pointer
1062 */ 1062 */
1063 static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev) 1063 static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
1064 { 1064 {
1065 1065
1066 /* 6600m in a macbook pro */ 1066 /* 6600m in a macbook pro */
1067 if (pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE && 1067 if (pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
1068 pdev->subsystem_device == 0x00e2) { 1068 pdev->subsystem_device == 0x00e2) {
1069 printk(KERN_INFO "radeon: quirking longer d3 wakeup delay\n"); 1069 printk(KERN_INFO "radeon: quirking longer d3 wakeup delay\n");
1070 return true; 1070 return true;
1071 } 1071 }
1072 1072
1073 return false; 1073 return false;
1074 } 1074 }
1075 1075
1076 /** 1076 /**
1077 * radeon_switcheroo_set_state - set switcheroo state 1077 * radeon_switcheroo_set_state - set switcheroo state
1078 * 1078 *
1079 * @pdev: pci dev pointer 1079 * @pdev: pci dev pointer
1080 * @state: vga switcheroo state 1080 * @state: vga switcheroo state
1081 * 1081 *
1082 * Callback for the switcheroo driver. Suspends or resumes the 1082 * Callback for the switcheroo driver. Suspends or resumes the
1083 * the asics before or after it is powered up using ACPI methods. 1083 * the asics before or after it is powered up using ACPI methods.
1084 */ 1084 */
1085 static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) 1085 static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1086 { 1086 {
1087 struct drm_device *dev = pci_get_drvdata(pdev); 1087 struct drm_device *dev = pci_get_drvdata(pdev);
1088 1088
1089 if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF) 1089 if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1090 return; 1090 return;
1091 1091
1092 if (state == VGA_SWITCHEROO_ON) { 1092 if (state == VGA_SWITCHEROO_ON) {
1093 unsigned d3_delay = dev->pdev->d3_delay; 1093 unsigned d3_delay = dev->pdev->d3_delay;
1094 1094
1095 printk(KERN_INFO "radeon: switched on\n"); 1095 printk(KERN_INFO "radeon: switched on\n");
1096 /* don't suspend or resume card normally */ 1096 /* don't suspend or resume card normally */
1097 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1097 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1098 1098
1099 if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev)) 1099 if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev))
1100 dev->pdev->d3_delay = 20; 1100 dev->pdev->d3_delay = 20;
1101 1101
1102 radeon_resume_kms(dev, true, true); 1102 radeon_resume_kms(dev, true, true);
1103 1103
1104 dev->pdev->d3_delay = d3_delay; 1104 dev->pdev->d3_delay = d3_delay;
1105 1105
1106 dev->switch_power_state = DRM_SWITCH_POWER_ON; 1106 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1107 drm_kms_helper_poll_enable(dev); 1107 drm_kms_helper_poll_enable(dev);
1108 } else { 1108 } else {
1109 printk(KERN_INFO "radeon: switched off\n"); 1109 printk(KERN_INFO "radeon: switched off\n");
1110 drm_kms_helper_poll_disable(dev); 1110 drm_kms_helper_poll_disable(dev);
1111 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1111 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1112 radeon_suspend_kms(dev, true, true); 1112 radeon_suspend_kms(dev, true, true);
1113 dev->switch_power_state = DRM_SWITCH_POWER_OFF; 1113 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1114 } 1114 }
1115 } 1115 }
1116 1116
1117 /** 1117 /**
1118 * radeon_switcheroo_can_switch - see if switcheroo state can change 1118 * radeon_switcheroo_can_switch - see if switcheroo state can change
1119 * 1119 *
1120 * @pdev: pci dev pointer 1120 * @pdev: pci dev pointer
1121 * 1121 *
1122 * Callback for the switcheroo driver. Check of the switcheroo 1122 * Callback for the switcheroo driver. Check of the switcheroo
1123 * state can be changed. 1123 * state can be changed.
1124 * Returns true if the state can be changed, false if not. 1124 * Returns true if the state can be changed, false if not.
1125 */ 1125 */
1126 static bool radeon_switcheroo_can_switch(struct pci_dev *pdev) 1126 static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
1127 { 1127 {
1128 struct drm_device *dev = pci_get_drvdata(pdev); 1128 struct drm_device *dev = pci_get_drvdata(pdev);
1129 bool can_switch; 1129 bool can_switch;
1130 1130
1131 spin_lock(&dev->count_lock); 1131 spin_lock(&dev->count_lock);
1132 can_switch = (dev->open_count == 0); 1132 can_switch = (dev->open_count == 0);
1133 spin_unlock(&dev->count_lock); 1133 spin_unlock(&dev->count_lock);
1134 return can_switch; 1134 return can_switch;
1135 } 1135 }
1136 1136
1137 static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = { 1137 static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
1138 .set_gpu_state = radeon_switcheroo_set_state, 1138 .set_gpu_state = radeon_switcheroo_set_state,
1139 .reprobe = NULL, 1139 .reprobe = NULL,
1140 .can_switch = radeon_switcheroo_can_switch, 1140 .can_switch = radeon_switcheroo_can_switch,
1141 }; 1141 };
1142 1142
1143 /** 1143 /**
1144 * radeon_device_init - initialize the driver 1144 * radeon_device_init - initialize the driver
1145 * 1145 *
1146 * @rdev: radeon_device pointer 1146 * @rdev: radeon_device pointer
1147 * @pdev: drm dev pointer 1147 * @pdev: drm dev pointer
1148 * @pdev: pci dev pointer 1148 * @pdev: pci dev pointer
1149 * @flags: driver flags 1149 * @flags: driver flags
1150 * 1150 *
1151 * Initializes the driver info and hw (all asics). 1151 * Initializes the driver info and hw (all asics).
1152 * Returns 0 for success or an error on failure. 1152 * Returns 0 for success or an error on failure.
1153 * Called at driver startup. 1153 * Called at driver startup.
1154 */ 1154 */
1155 int radeon_device_init(struct radeon_device *rdev, 1155 int radeon_device_init(struct radeon_device *rdev,
1156 struct drm_device *ddev, 1156 struct drm_device *ddev,
1157 struct pci_dev *pdev, 1157 struct pci_dev *pdev,
1158 uint32_t flags) 1158 uint32_t flags)
1159 { 1159 {
1160 int r, i; 1160 int r, i;
1161 int dma_bits; 1161 int dma_bits;
1162 bool runtime = false; 1162 bool runtime = false;
1163 1163
1164 rdev->shutdown = false; 1164 rdev->shutdown = false;
1165 rdev->dev = &pdev->dev; 1165 rdev->dev = &pdev->dev;
1166 rdev->ddev = ddev; 1166 rdev->ddev = ddev;
1167 rdev->pdev = pdev; 1167 rdev->pdev = pdev;
1168 rdev->flags = flags; 1168 rdev->flags = flags;
1169 rdev->family = flags & RADEON_FAMILY_MASK; 1169 rdev->family = flags & RADEON_FAMILY_MASK;
1170 rdev->is_atom_bios = false; 1170 rdev->is_atom_bios = false;
1171 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT; 1171 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
1172 rdev->mc.gtt_size = 512 * 1024 * 1024; 1172 rdev->mc.gtt_size = 512 * 1024 * 1024;
1173 rdev->accel_working = false; 1173 rdev->accel_working = false;
1174 /* set up ring ids */ 1174 /* set up ring ids */
1175 for (i = 0; i < RADEON_NUM_RINGS; i++) { 1175 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1176 rdev->ring[i].idx = i; 1176 rdev->ring[i].idx = i;
1177 } 1177 }
1178 1178
1179 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n", 1179 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
1180 radeon_family_name[rdev->family], pdev->vendor, pdev->device, 1180 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1181 pdev->subsystem_vendor, pdev->subsystem_device); 1181 pdev->subsystem_vendor, pdev->subsystem_device);
1182 1182
1183 /* mutex initialization are all done here so we 1183 /* mutex initialization are all done here so we
1184 * can recall function without having locking issues */ 1184 * can recall function without having locking issues */
1185 mutex_init(&rdev->ring_lock); 1185 mutex_init(&rdev->ring_lock);
1186 mutex_init(&rdev->dc_hw_i2c_mutex); 1186 mutex_init(&rdev->dc_hw_i2c_mutex);
1187 atomic_set(&rdev->ih.lock, 0); 1187 atomic_set(&rdev->ih.lock, 0);
1188 mutex_init(&rdev->gem.mutex); 1188 mutex_init(&rdev->gem.mutex);
1189 mutex_init(&rdev->pm.mutex); 1189 mutex_init(&rdev->pm.mutex);
1190 mutex_init(&rdev->gpu_clock_mutex); 1190 mutex_init(&rdev->gpu_clock_mutex);
1191 mutex_init(&rdev->srbm_mutex); 1191 mutex_init(&rdev->srbm_mutex);
1192 init_rwsem(&rdev->pm.mclk_lock); 1192 init_rwsem(&rdev->pm.mclk_lock);
1193 init_rwsem(&rdev->exclusive_lock); 1193 init_rwsem(&rdev->exclusive_lock);
1194 init_waitqueue_head(&rdev->irq.vblank_queue); 1194 init_waitqueue_head(&rdev->irq.vblank_queue);
1195 r = radeon_gem_init(rdev); 1195 r = radeon_gem_init(rdev);
1196 if (r) 1196 if (r)
1197 return r; 1197 return r;
1198 1198
1199 /* Adjust VM size here. 1199 /* Adjust VM size here.
1200 * Currently set to 4GB ((1 << 20) 4k pages). 1200 * Currently set to 4GB ((1 << 20) 4k pages).
1201 * Max GPUVM size for cayman and SI is 40 bits. 1201 * Max GPUVM size for cayman and SI is 40 bits.
1202 */ 1202 */
1203 rdev->vm_manager.max_pfn = 1 << 20; 1203 rdev->vm_manager.max_pfn = 1 << 20;
1204 1204
1205 /* Set asic functions */ 1205 /* Set asic functions */
1206 r = radeon_asic_init(rdev); 1206 r = radeon_asic_init(rdev);
1207 if (r) 1207 if (r)
1208 return r; 1208 return r;
1209 radeon_check_arguments(rdev); 1209 radeon_check_arguments(rdev);
1210 1210
1211 /* all of the newer IGP chips have an internal gart 1211 /* all of the newer IGP chips have an internal gart
1212 * However some rs4xx report as AGP, so remove that here. 1212 * However some rs4xx report as AGP, so remove that here.
1213 */ 1213 */
1214 if ((rdev->family >= CHIP_RS400) && 1214 if ((rdev->family >= CHIP_RS400) &&
1215 (rdev->flags & RADEON_IS_IGP)) { 1215 (rdev->flags & RADEON_IS_IGP)) {
1216 rdev->flags &= ~RADEON_IS_AGP; 1216 rdev->flags &= ~RADEON_IS_AGP;
1217 } 1217 }
1218 1218
1219 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) { 1219 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
1220 radeon_agp_disable(rdev); 1220 radeon_agp_disable(rdev);
1221 } 1221 }
1222 1222
1223 /* Set the internal MC address mask 1223 /* Set the internal MC address mask
1224 * This is the max address of the GPU's 1224 * This is the max address of the GPU's
1225 * internal address space. 1225 * internal address space.
1226 */ 1226 */
1227 if (rdev->family >= CHIP_CAYMAN) 1227 if (rdev->family >= CHIP_CAYMAN)
1228 rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */ 1228 rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1229 else if (rdev->family >= CHIP_CEDAR) 1229 else if (rdev->family >= CHIP_CEDAR)
1230 rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */ 1230 rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
1231 else 1231 else
1232 rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */ 1232 rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
1233 1233
1234 /* set DMA mask + need_dma32 flags. 1234 /* set DMA mask + need_dma32 flags.
1235 * PCIE - can handle 40-bits. 1235 * PCIE - can handle 40-bits.
1236 * IGP - can handle 40-bits 1236 * IGP - can handle 40-bits
1237 * AGP - generally dma32 is safest 1237 * AGP - generally dma32 is safest
1238 * PCI - dma32 for legacy pci gart, 40 bits on newer asics 1238 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
1239 */ 1239 */
1240 rdev->need_dma32 = false; 1240 rdev->need_dma32 = false;
1241 if (rdev->flags & RADEON_IS_AGP) 1241 if (rdev->flags & RADEON_IS_AGP)
1242 rdev->need_dma32 = true; 1242 rdev->need_dma32 = true;
1243 if ((rdev->flags & RADEON_IS_PCI) && 1243 if ((rdev->flags & RADEON_IS_PCI) &&
1244 (rdev->family <= CHIP_RS740)) 1244 (rdev->family <= CHIP_RS740))
1245 rdev->need_dma32 = true; 1245 rdev->need_dma32 = true;
1246 1246
1247 dma_bits = rdev->need_dma32 ? 32 : 40; 1247 dma_bits = rdev->need_dma32 ? 32 : 40;
1248 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); 1248 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1249 if (r) { 1249 if (r) {
1250 rdev->need_dma32 = true; 1250 rdev->need_dma32 = true;
1251 dma_bits = 32; 1251 dma_bits = 32;
1252 printk(KERN_WARNING "radeon: No suitable DMA available.\n"); 1252 printk(KERN_WARNING "radeon: No suitable DMA available.\n");
1253 } 1253 }
1254 r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); 1254 r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1255 if (r) { 1255 if (r) {
1256 pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32)); 1256 pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
1257 printk(KERN_WARNING "radeon: No coherent DMA available.\n"); 1257 printk(KERN_WARNING "radeon: No coherent DMA available.\n");
1258 } 1258 }
1259 1259
1260 /* Registers mapping */ 1260 /* Registers mapping */
1261 /* TODO: block userspace mapping of io register */ 1261 /* TODO: block userspace mapping of io register */
1262 spin_lock_init(&rdev->mmio_idx_lock); 1262 spin_lock_init(&rdev->mmio_idx_lock);
1263 spin_lock_init(&rdev->smc_idx_lock); 1263 spin_lock_init(&rdev->smc_idx_lock);
1264 spin_lock_init(&rdev->pll_idx_lock); 1264 spin_lock_init(&rdev->pll_idx_lock);
1265 spin_lock_init(&rdev->mc_idx_lock); 1265 spin_lock_init(&rdev->mc_idx_lock);
1266 spin_lock_init(&rdev->pcie_idx_lock); 1266 spin_lock_init(&rdev->pcie_idx_lock);
1267 spin_lock_init(&rdev->pciep_idx_lock); 1267 spin_lock_init(&rdev->pciep_idx_lock);
1268 spin_lock_init(&rdev->pif_idx_lock); 1268 spin_lock_init(&rdev->pif_idx_lock);
1269 spin_lock_init(&rdev->cg_idx_lock); 1269 spin_lock_init(&rdev->cg_idx_lock);
1270 spin_lock_init(&rdev->uvd_idx_lock); 1270 spin_lock_init(&rdev->uvd_idx_lock);
1271 spin_lock_init(&rdev->rcu_idx_lock); 1271 spin_lock_init(&rdev->rcu_idx_lock);
1272 spin_lock_init(&rdev->didt_idx_lock); 1272 spin_lock_init(&rdev->didt_idx_lock);
1273 spin_lock_init(&rdev->end_idx_lock); 1273 spin_lock_init(&rdev->end_idx_lock);
1274 if (rdev->family >= CHIP_BONAIRE) { 1274 if (rdev->family >= CHIP_BONAIRE) {
1275 rdev->rmmio_base = pci_resource_start(rdev->pdev, 5); 1275 rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1276 rdev->rmmio_size = pci_resource_len(rdev->pdev, 5); 1276 rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
1277 } else { 1277 } else {
1278 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2); 1278 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1279 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2); 1279 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
1280 } 1280 }
1281 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size); 1281 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1282 if (rdev->rmmio == NULL) { 1282 if (rdev->rmmio == NULL) {
1283 return -ENOMEM; 1283 return -ENOMEM;
1284 } 1284 }
1285 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base); 1285 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
1286 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size); 1286 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
1287 1287
1288 /* doorbell bar mapping */ 1288 /* doorbell bar mapping */
1289 if (rdev->family >= CHIP_BONAIRE) 1289 if (rdev->family >= CHIP_BONAIRE)
1290 radeon_doorbell_init(rdev); 1290 radeon_doorbell_init(rdev);
1291 1291
1292 /* io port mapping */ 1292 /* io port mapping */
1293 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 1293 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1294 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) { 1294 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1295 rdev->rio_mem_size = pci_resource_len(rdev->pdev, i); 1295 rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1296 rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size); 1296 rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1297 break; 1297 break;
1298 } 1298 }
1299 } 1299 }
1300 if (rdev->rio_mem == NULL) 1300 if (rdev->rio_mem == NULL)
1301 DRM_ERROR("Unable to find PCI I/O BAR\n"); 1301 DRM_ERROR("Unable to find PCI I/O BAR\n");
1302 1302
1303 /* if we have > 1 VGA cards, then disable the radeon VGA resources */ 1303 /* if we have > 1 VGA cards, then disable the radeon VGA resources */
1304 /* this will fail for cards that aren't VGA class devices, just 1304 /* this will fail for cards that aren't VGA class devices, just
1305 * ignore it */ 1305 * ignore it */
1306 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); 1306 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
1307 1307
1308 if (rdev->flags & RADEON_IS_PX) 1308 if (rdev->flags & RADEON_IS_PX)
1309 runtime = true; 1309 runtime = true;
1310 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime); 1310 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
1311 if (runtime) 1311 if (runtime)
1312 vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain); 1312 vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
1313 1313
1314 r = radeon_init(rdev); 1314 r = radeon_init(rdev);
1315 if (r) 1315 if (r)
1316 return r; 1316 return r;
1317 1317
1318 r = radeon_ib_ring_tests(rdev); 1318 r = radeon_ib_ring_tests(rdev);
1319 if (r) 1319 if (r)
1320 DRM_ERROR("ib ring test failed (%d).\n", r); 1320 DRM_ERROR("ib ring test failed (%d).\n", r);
1321 1321
1322 r = radeon_gem_debugfs_init(rdev); 1322 r = radeon_gem_debugfs_init(rdev);
1323 if (r) { 1323 if (r) {
1324 DRM_ERROR("registering gem debugfs failed (%d).\n", r); 1324 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
1325 } 1325 }
1326 1326
1327 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) { 1327 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1328 /* Acceleration not working on AGP card try again 1328 /* Acceleration not working on AGP card try again
1329 * with fallback to PCI or PCIE GART 1329 * with fallback to PCI or PCIE GART
1330 */ 1330 */
1331 radeon_asic_reset(rdev); 1331 radeon_asic_reset(rdev);
1332 radeon_fini(rdev); 1332 radeon_fini(rdev);
1333 radeon_agp_disable(rdev); 1333 radeon_agp_disable(rdev);
1334 r = radeon_init(rdev); 1334 r = radeon_init(rdev);
1335 if (r) 1335 if (r)
1336 return r; 1336 return r;
1337 } 1337 }
1338 1338
1339 if ((radeon_testing & 1)) { 1339 if ((radeon_testing & 1)) {
1340 if (rdev->accel_working) 1340 if (rdev->accel_working)
1341 radeon_test_moves(rdev); 1341 radeon_test_moves(rdev);
1342 else 1342 else
1343 DRM_INFO("radeon: acceleration disabled, skipping move tests\n"); 1343 DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
1344 } 1344 }
1345 if ((radeon_testing & 2)) { 1345 if ((radeon_testing & 2)) {
1346 if (rdev->accel_working) 1346 if (rdev->accel_working)
1347 radeon_test_syncing(rdev); 1347 radeon_test_syncing(rdev);
1348 else 1348 else
1349 DRM_INFO("radeon: acceleration disabled, skipping sync tests\n"); 1349 DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
1350 } 1350 }
1351 if (radeon_benchmarking) { 1351 if (radeon_benchmarking) {
1352 if (rdev->accel_working) 1352 if (rdev->accel_working)
1353 radeon_benchmark(rdev, radeon_benchmarking); 1353 radeon_benchmark(rdev, radeon_benchmarking);
1354 else 1354 else
1355 DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n"); 1355 DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
1356 } 1356 }
1357 return 0; 1357 return 0;
1358 } 1358 }
1359 1359
1360 static void radeon_debugfs_remove_files(struct radeon_device *rdev); 1360 static void radeon_debugfs_remove_files(struct radeon_device *rdev);
1361 1361
1362 /** 1362 /**
1363 * radeon_device_fini - tear down the driver 1363 * radeon_device_fini - tear down the driver
1364 * 1364 *
1365 * @rdev: radeon_device pointer 1365 * @rdev: radeon_device pointer
1366 * 1366 *
1367 * Tear down the driver info (all asics). 1367 * Tear down the driver info (all asics).
1368 * Called at driver shutdown. 1368 * Called at driver shutdown.
1369 */ 1369 */
1370 void radeon_device_fini(struct radeon_device *rdev) 1370 void radeon_device_fini(struct radeon_device *rdev)
1371 { 1371 {
1372 DRM_INFO("radeon: finishing device.\n"); 1372 DRM_INFO("radeon: finishing device.\n");
1373 rdev->shutdown = true; 1373 rdev->shutdown = true;
1374 /* evict vram memory */ 1374 /* evict vram memory */
1375 radeon_bo_evict_vram(rdev); 1375 radeon_bo_evict_vram(rdev);
1376 radeon_fini(rdev); 1376 radeon_fini(rdev);
1377 vga_switcheroo_unregister_client(rdev->pdev); 1377 vga_switcheroo_unregister_client(rdev->pdev);
1378 vga_client_register(rdev->pdev, NULL, NULL, NULL); 1378 vga_client_register(rdev->pdev, NULL, NULL, NULL);
1379 if (rdev->rio_mem) 1379 if (rdev->rio_mem)
1380 pci_iounmap(rdev->pdev, rdev->rio_mem); 1380 pci_iounmap(rdev->pdev, rdev->rio_mem);
1381 rdev->rio_mem = NULL; 1381 rdev->rio_mem = NULL;
1382 iounmap(rdev->rmmio); 1382 iounmap(rdev->rmmio);
1383 rdev->rmmio = NULL; 1383 rdev->rmmio = NULL;
1384 if (rdev->family >= CHIP_BONAIRE) 1384 if (rdev->family >= CHIP_BONAIRE)
1385 radeon_doorbell_fini(rdev); 1385 radeon_doorbell_fini(rdev);
1386 radeon_debugfs_remove_files(rdev); 1386 radeon_debugfs_remove_files(rdev);
1387 } 1387 }
1388 1388
1389 1389
1390 /* 1390 /*
1391 * Suspend & resume. 1391 * Suspend & resume.
1392 */ 1392 */
1393 /** 1393 /**
1394 * radeon_suspend_kms - initiate device suspend 1394 * radeon_suspend_kms - initiate device suspend
1395 * 1395 *
1396 * @pdev: drm dev pointer 1396 * @pdev: drm dev pointer
1397 * @state: suspend state 1397 * @state: suspend state
1398 * 1398 *
1399 * Puts the hw in the suspend state (all asics). 1399 * Puts the hw in the suspend state (all asics).
1400 * Returns 0 for success or an error on failure. 1400 * Returns 0 for success or an error on failure.
1401 * Called at driver suspend. 1401 * Called at driver suspend.
1402 */ 1402 */
1403 int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon) 1403 int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
1404 { 1404 {
1405 struct radeon_device *rdev; 1405 struct radeon_device *rdev;
1406 struct drm_crtc *crtc; 1406 struct drm_crtc *crtc;
1407 struct drm_connector *connector; 1407 struct drm_connector *connector;
1408 int i, r; 1408 int i, r;
1409 bool force_completion = false; 1409 bool force_completion = false;
1410 1410
1411 if (dev == NULL || dev->dev_private == NULL) { 1411 if (dev == NULL || dev->dev_private == NULL) {
1412 return -ENODEV; 1412 return -ENODEV;
1413 } 1413 }
1414 1414
1415 rdev = dev->dev_private; 1415 rdev = dev->dev_private;
1416 1416
1417 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1417 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1418 return 0; 1418 return 0;
1419 1419
1420 drm_kms_helper_poll_disable(dev); 1420 drm_kms_helper_poll_disable(dev);
1421 1421
1422 /* turn off display hw */ 1422 /* turn off display hw */
1423 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1423 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1424 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 1424 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1425 } 1425 }
1426 1426
1427 /* unpin the front buffers */ 1427 /* unpin the front buffers */
1428 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 1428 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1429 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb); 1429 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb);
1430 struct radeon_bo *robj; 1430 struct radeon_bo *robj;
1431 1431
1432 if (rfb == NULL || rfb->obj == NULL) { 1432 if (rfb == NULL || rfb->obj == NULL) {
1433 continue; 1433 continue;
1434 } 1434 }
1435 robj = gem_to_radeon_bo(rfb->obj); 1435 robj = gem_to_radeon_bo(rfb->obj);
1436 /* don't unpin kernel fb objects */ 1436 /* don't unpin kernel fb objects */
1437 if (!radeon_fbdev_robj_is_fb(rdev, robj)) { 1437 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
1438 r = radeon_bo_reserve(robj, false); 1438 r = radeon_bo_reserve(robj, false);
1439 if (r == 0) { 1439 if (r == 0) {
1440 radeon_bo_unpin(robj); 1440 radeon_bo_unpin(robj);
1441 radeon_bo_unreserve(robj); 1441 radeon_bo_unreserve(robj);
1442 } 1442 }
1443 } 1443 }
1444 } 1444 }
1445 /* evict vram memory */ 1445 /* evict vram memory */
1446 radeon_bo_evict_vram(rdev); 1446 radeon_bo_evict_vram(rdev);
1447 1447
1448 /* wait for gpu to finish processing current batch */ 1448 /* wait for gpu to finish processing current batch */
1449 for (i = 0; i < RADEON_NUM_RINGS; i++) { 1449 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1450 r = radeon_fence_wait_empty(rdev, i); 1450 r = radeon_fence_wait_empty(rdev, i);
1451 if (r) { 1451 if (r) {
1452 /* delay GPU reset to resume */ 1452 /* delay GPU reset to resume */
1453 force_completion = true; 1453 force_completion = true;
1454 } 1454 }
1455 } 1455 }
1456 if (force_completion) { 1456 if (force_completion) {
1457 radeon_fence_driver_force_completion(rdev); 1457 radeon_fence_driver_force_completion(rdev);
1458 } 1458 }
1459 1459
1460 radeon_save_bios_scratch_regs(rdev); 1460 radeon_save_bios_scratch_regs(rdev);
1461 1461
1462 radeon_suspend(rdev); 1462 radeon_suspend(rdev);
1463 radeon_hpd_fini(rdev); 1463 radeon_hpd_fini(rdev);
1464 /* evict remaining vram memory */ 1464 /* evict remaining vram memory */
1465 radeon_bo_evict_vram(rdev); 1465 radeon_bo_evict_vram(rdev);
1466 1466
1467 radeon_agp_suspend(rdev); 1467 radeon_agp_suspend(rdev);
1468 1468
1469 pci_save_state(dev->pdev); 1469 pci_save_state(dev->pdev);
1470 if (suspend) { 1470 if (suspend) {
1471 /* Shut down the device */ 1471 /* Shut down the device */
1472 pci_disable_device(dev->pdev); 1472 pci_disable_device(dev->pdev);
1473 pci_set_power_state(dev->pdev, PCI_D3hot); 1473 pci_set_power_state(dev->pdev, PCI_D3hot);
1474 } 1474 }
1475 1475
1476 if (fbcon) { 1476 if (fbcon) {
1477 console_lock(); 1477 console_lock();
1478 radeon_fbdev_set_suspend(rdev, 1); 1478 radeon_fbdev_set_suspend(rdev, 1);
1479 console_unlock(); 1479 console_unlock();
1480 } 1480 }
1481 return 0; 1481 return 0;
1482 } 1482 }
1483 1483
1484 /** 1484 /**
1485 * radeon_resume_kms - initiate device resume 1485 * radeon_resume_kms - initiate device resume
1486 * 1486 *
1487 * @pdev: drm dev pointer 1487 * @pdev: drm dev pointer
1488 * 1488 *
1489 * Bring the hw back to operating state (all asics). 1489 * Bring the hw back to operating state (all asics).
1490 * Returns 0 for success or an error on failure. 1490 * Returns 0 for success or an error on failure.
1491 * Called at driver resume. 1491 * Called at driver resume.
1492 */ 1492 */
1493 int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) 1493 int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1494 { 1494 {
1495 struct drm_connector *connector; 1495 struct drm_connector *connector;
1496 struct radeon_device *rdev = dev->dev_private; 1496 struct radeon_device *rdev = dev->dev_private;
1497 int r; 1497 int r;
1498 1498
1499 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1499 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1500 return 0; 1500 return 0;
1501 1501
1502 if (fbcon) { 1502 if (fbcon) {
1503 console_lock(); 1503 console_lock();
1504 } 1504 }
1505 if (resume) { 1505 if (resume) {
1506 pci_set_power_state(dev->pdev, PCI_D0); 1506 pci_set_power_state(dev->pdev, PCI_D0);
1507 pci_restore_state(dev->pdev); 1507 pci_restore_state(dev->pdev);
1508 if (pci_enable_device(dev->pdev)) { 1508 if (pci_enable_device(dev->pdev)) {
1509 if (fbcon) 1509 if (fbcon)
1510 console_unlock(); 1510 console_unlock();
1511 return -1; 1511 return -1;
1512 } 1512 }
1513 } 1513 }
1514 /* resume AGP if in use */ 1514 /* resume AGP if in use */
1515 radeon_agp_resume(rdev); 1515 radeon_agp_resume(rdev);
1516 radeon_resume(rdev); 1516 radeon_resume(rdev);
1517 1517
1518 r = radeon_ib_ring_tests(rdev); 1518 r = radeon_ib_ring_tests(rdev);
1519 if (r) 1519 if (r)
1520 DRM_ERROR("ib ring test failed (%d).\n", r); 1520 DRM_ERROR("ib ring test failed (%d).\n", r);
1521 1521
1522 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { 1522 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
1523 /* do dpm late init */ 1523 /* do dpm late init */
1524 r = radeon_pm_late_init(rdev); 1524 r = radeon_pm_late_init(rdev);
1525 if (r) { 1525 if (r) {
1526 rdev->pm.dpm_enabled = false; 1526 rdev->pm.dpm_enabled = false;
1527 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); 1527 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1528 } 1528 }
1529 } else { 1529 } else {
1530 /* resume old pm late */ 1530 /* resume old pm late */
1531 radeon_pm_resume(rdev); 1531 radeon_pm_resume(rdev);
1532 } 1532 }
1533 1533
1534 radeon_restore_bios_scratch_regs(rdev); 1534 radeon_restore_bios_scratch_regs(rdev);
1535 1535
1536 /* init dig PHYs, disp eng pll */ 1536 /* init dig PHYs, disp eng pll */
1537 if (rdev->is_atom_bios) { 1537 if (rdev->is_atom_bios) {
1538 radeon_atom_encoder_init(rdev); 1538 radeon_atom_encoder_init(rdev);
1539 radeon_atom_disp_eng_pll_init(rdev); 1539 radeon_atom_disp_eng_pll_init(rdev);
1540 /* turn on the BL */ 1540 /* turn on the BL */
1541 if (rdev->mode_info.bl_encoder) { 1541 if (rdev->mode_info.bl_encoder) {
1542 u8 bl_level = radeon_get_backlight_level(rdev, 1542 u8 bl_level = radeon_get_backlight_level(rdev,
1543 rdev->mode_info.bl_encoder); 1543 rdev->mode_info.bl_encoder);
1544 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder, 1544 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1545 bl_level); 1545 bl_level);
1546 } 1546 }
1547 } 1547 }
1548 /* reset hpd state */ 1548 /* reset hpd state */
1549 radeon_hpd_init(rdev); 1549 radeon_hpd_init(rdev);
1550 /* blat the mode back in */ 1550 /* blat the mode back in */
1551 if (fbcon) { 1551 if (fbcon) {
1552 drm_helper_resume_force_mode(dev); 1552 drm_helper_resume_force_mode(dev);
1553 /* turn on display hw */ 1553 /* turn on display hw */
1554 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1554 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1555 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); 1555 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1556 } 1556 }
1557 } 1557 }
1558 1558
1559 drm_kms_helper_poll_enable(dev); 1559 drm_kms_helper_poll_enable(dev);
1560 1560
1561 /* set the power state here in case we are a PX system or headless */
1562 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1563 radeon_pm_compute_clocks(rdev);
1564
1561 if (fbcon) { 1565 if (fbcon) {
1562 radeon_fbdev_set_suspend(rdev, 0); 1566 radeon_fbdev_set_suspend(rdev, 0);
1563 console_unlock(); 1567 console_unlock();
1564 } 1568 }
1565 1569
1566 return 0; 1570 return 0;
1567 } 1571 }
1568 1572
1569 /** 1573 /**
1570 * radeon_gpu_reset - reset the asic 1574 * radeon_gpu_reset - reset the asic
1571 * 1575 *
1572 * @rdev: radeon device pointer 1576 * @rdev: radeon device pointer
1573 * 1577 *
1574 * Attempt the reset the GPU if it has hung (all asics). 1578 * Attempt the reset the GPU if it has hung (all asics).
1575 * Returns 0 for success or an error on failure. 1579 * Returns 0 for success or an error on failure.
1576 */ 1580 */
1577 int radeon_gpu_reset(struct radeon_device *rdev) 1581 int radeon_gpu_reset(struct radeon_device *rdev)
1578 { 1582 {
1579 unsigned ring_sizes[RADEON_NUM_RINGS]; 1583 unsigned ring_sizes[RADEON_NUM_RINGS];
1580 uint32_t *ring_data[RADEON_NUM_RINGS]; 1584 uint32_t *ring_data[RADEON_NUM_RINGS];
1581 1585
1582 bool saved = false; 1586 bool saved = false;
1583 1587
1584 int i, r; 1588 int i, r;
1585 int resched; 1589 int resched;
1586 1590
1587 down_write(&rdev->exclusive_lock); 1591 down_write(&rdev->exclusive_lock);
1588 1592
1589 if (!rdev->needs_reset) { 1593 if (!rdev->needs_reset) {
1590 up_write(&rdev->exclusive_lock); 1594 up_write(&rdev->exclusive_lock);
1591 return 0; 1595 return 0;
1592 } 1596 }
1593 1597
1594 rdev->needs_reset = false; 1598 rdev->needs_reset = false;
1595 1599
1596 radeon_save_bios_scratch_regs(rdev); 1600 radeon_save_bios_scratch_regs(rdev);
1597 /* block TTM */ 1601 /* block TTM */
1598 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); 1602 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
1599 radeon_pm_suspend(rdev); 1603 radeon_pm_suspend(rdev);
1600 radeon_suspend(rdev); 1604 radeon_suspend(rdev);
1601 1605
1602 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 1606 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1603 ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i], 1607 ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1604 &ring_data[i]); 1608 &ring_data[i]);
1605 if (ring_sizes[i]) { 1609 if (ring_sizes[i]) {
1606 saved = true; 1610 saved = true;
1607 dev_info(rdev->dev, "Saved %d dwords of commands " 1611 dev_info(rdev->dev, "Saved %d dwords of commands "
1608 "on ring %d.\n", ring_sizes[i], i); 1612 "on ring %d.\n", ring_sizes[i], i);
1609 } 1613 }
1610 } 1614 }
1611 1615
1612 retry: 1616 retry:
1613 r = radeon_asic_reset(rdev); 1617 r = radeon_asic_reset(rdev);
1614 if (!r) { 1618 if (!r) {
1615 dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n"); 1619 dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
1616 radeon_resume(rdev); 1620 radeon_resume(rdev);
1617 } 1621 }
1618 1622
1619 radeon_restore_bios_scratch_regs(rdev); 1623 radeon_restore_bios_scratch_regs(rdev);
1620 1624
1621 if (!r) { 1625 if (!r) {
1622 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 1626 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1623 radeon_ring_restore(rdev, &rdev->ring[i], 1627 radeon_ring_restore(rdev, &rdev->ring[i],
1624 ring_sizes[i], ring_data[i]); 1628 ring_sizes[i], ring_data[i]);
1625 ring_sizes[i] = 0; 1629 ring_sizes[i] = 0;
1626 ring_data[i] = NULL; 1630 ring_data[i] = NULL;
1627 } 1631 }
1628 1632
1629 r = radeon_ib_ring_tests(rdev); 1633 r = radeon_ib_ring_tests(rdev);
1630 if (r) { 1634 if (r) {
1631 dev_err(rdev->dev, "ib ring test failed (%d).\n", r); 1635 dev_err(rdev->dev, "ib ring test failed (%d).\n", r);
1632 if (saved) { 1636 if (saved) {
1633 saved = false; 1637 saved = false;
1634 radeon_suspend(rdev); 1638 radeon_suspend(rdev);
1635 goto retry; 1639 goto retry;
1636 } 1640 }
1637 } 1641 }
1638 } else { 1642 } else {
1639 radeon_fence_driver_force_completion(rdev); 1643 radeon_fence_driver_force_completion(rdev);
1640 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 1644 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1641 kfree(ring_data[i]); 1645 kfree(ring_data[i]);
1642 } 1646 }
1643 } 1647 }
1644 1648
1645 radeon_pm_resume(rdev); 1649 radeon_pm_resume(rdev);
1646 drm_helper_resume_force_mode(rdev->ddev); 1650 drm_helper_resume_force_mode(rdev->ddev);
1647 1651
1648 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); 1652 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
1649 if (r) { 1653 if (r) {
1650 /* bad news, how to tell it to userspace ? */ 1654 /* bad news, how to tell it to userspace ? */
1651 dev_info(rdev->dev, "GPU reset failed\n"); 1655 dev_info(rdev->dev, "GPU reset failed\n");
1652 } 1656 }
1653 1657
1654 up_write(&rdev->exclusive_lock); 1658 up_write(&rdev->exclusive_lock);
1655 return r; 1659 return r;
1656 } 1660 }
1657 1661
1658 1662
1659 /* 1663 /*
1660 * Debugfs 1664 * Debugfs
1661 */ 1665 */
1662 int radeon_debugfs_add_files(struct radeon_device *rdev, 1666 int radeon_debugfs_add_files(struct radeon_device *rdev,
1663 struct drm_info_list *files, 1667 struct drm_info_list *files,
1664 unsigned nfiles) 1668 unsigned nfiles)
1665 { 1669 {
1666 unsigned i; 1670 unsigned i;
1667 1671
1668 for (i = 0; i < rdev->debugfs_count; i++) { 1672 for (i = 0; i < rdev->debugfs_count; i++) {
1669 if (rdev->debugfs[i].files == files) { 1673 if (rdev->debugfs[i].files == files) {
1670 /* Already registered */ 1674 /* Already registered */
1671 return 0; 1675 return 0;
1672 } 1676 }
1673 } 1677 }
1674 1678
1675 i = rdev->debugfs_count + 1; 1679 i = rdev->debugfs_count + 1;
1676 if (i > RADEON_DEBUGFS_MAX_COMPONENTS) { 1680 if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
1677 DRM_ERROR("Reached maximum number of debugfs components.\n"); 1681 DRM_ERROR("Reached maximum number of debugfs components.\n");
1678 DRM_ERROR("Report so we increase " 1682 DRM_ERROR("Report so we increase "
1679 "RADEON_DEBUGFS_MAX_COMPONENTS.\n"); 1683 "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
1680 return -EINVAL; 1684 return -EINVAL;
1681 } 1685 }
1682 rdev->debugfs[rdev->debugfs_count].files = files; 1686 rdev->debugfs[rdev->debugfs_count].files = files;
1683 rdev->debugfs[rdev->debugfs_count].num_files = nfiles; 1687 rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
1684 rdev->debugfs_count = i; 1688 rdev->debugfs_count = i;
1685 #if defined(CONFIG_DEBUG_FS) 1689 #if defined(CONFIG_DEBUG_FS)
1686 drm_debugfs_create_files(files, nfiles, 1690 drm_debugfs_create_files(files, nfiles,
1687 rdev->ddev->control->debugfs_root, 1691 rdev->ddev->control->debugfs_root,
1688 rdev->ddev->control); 1692 rdev->ddev->control);
1689 drm_debugfs_create_files(files, nfiles, 1693 drm_debugfs_create_files(files, nfiles,
1690 rdev->ddev->primary->debugfs_root, 1694 rdev->ddev->primary->debugfs_root,
1691 rdev->ddev->primary); 1695 rdev->ddev->primary);
1692 #endif 1696 #endif
1693 return 0; 1697 return 0;
1694 } 1698 }
1695 1699
1696 static void radeon_debugfs_remove_files(struct radeon_device *rdev) 1700 static void radeon_debugfs_remove_files(struct radeon_device *rdev)
1697 { 1701 {
1698 #if defined(CONFIG_DEBUG_FS) 1702 #if defined(CONFIG_DEBUG_FS)
1699 unsigned i; 1703 unsigned i;
1700 1704
1701 for (i = 0; i < rdev->debugfs_count; i++) { 1705 for (i = 0; i < rdev->debugfs_count; i++) {
1702 drm_debugfs_remove_files(rdev->debugfs[i].files, 1706 drm_debugfs_remove_files(rdev->debugfs[i].files,
1703 rdev->debugfs[i].num_files, 1707 rdev->debugfs[i].num_files,
1704 rdev->ddev->control); 1708 rdev->ddev->control);
1705 drm_debugfs_remove_files(rdev->debugfs[i].files, 1709 drm_debugfs_remove_files(rdev->debugfs[i].files,
1706 rdev->debugfs[i].num_files, 1710 rdev->debugfs[i].num_files,
1707 rdev->ddev->primary); 1711 rdev->ddev->primary);
1708 } 1712 }
1709 #endif 1713 #endif
1710 } 1714 }
1711 1715
1712 #if defined(CONFIG_DEBUG_FS) 1716 #if defined(CONFIG_DEBUG_FS)
1713 int radeon_debugfs_init(struct drm_minor *minor) 1717 int radeon_debugfs_init(struct drm_minor *minor)
1714 { 1718 {
1715 return 0; 1719 return 0;
1716 } 1720 }
1717 1721
1718 void radeon_debugfs_cleanup(struct drm_minor *minor) 1722 void radeon_debugfs_cleanup(struct drm_minor *minor)
1719 { 1723 {
1720 } 1724 }
1721 #endif 1725 #endif
1722 1726
drivers/gpu/drm/radeon/radeon_pm.c
1 /* 1 /*
2 * Permission is hereby granted, free of charge, to any person obtaining a 2 * Permission is hereby granted, free of charge, to any person obtaining a
3 * copy of this software and associated documentation files (the "Software"), 3 * copy of this software and associated documentation files (the "Software"),
4 * to deal in the Software without restriction, including without limitation 4 * to deal in the Software without restriction, including without limitation
5 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 5 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
6 * and/or sell copies of the Software, and to permit persons to whom the 6 * and/or sell copies of the Software, and to permit persons to whom the
7 * Software is furnished to do so, subject to the following conditions: 7 * Software is furnished to do so, subject to the following conditions:
8 * 8 *
9 * The above copyright notice and this permission notice shall be included in 9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software. 10 * all copies or substantial portions of the Software.
11 * 11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
15 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 15 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
16 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 16 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
17 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 17 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
18 * OTHER DEALINGS IN THE SOFTWARE. 18 * OTHER DEALINGS IN THE SOFTWARE.
19 * 19 *
20 * Authors: Rafaล‚ Miล‚ecki <zajec5@gmail.com> 20 * Authors: Rafaล‚ Miล‚ecki <zajec5@gmail.com>
21 * Alex Deucher <alexdeucher@gmail.com> 21 * Alex Deucher <alexdeucher@gmail.com>
22 */ 22 */
23 #include <drm/drmP.h> 23 #include <drm/drmP.h>
24 #include "radeon.h" 24 #include "radeon.h"
25 #include "avivod.h" 25 #include "avivod.h"
26 #include "atom.h" 26 #include "atom.h"
27 #include <linux/power_supply.h> 27 #include <linux/power_supply.h>
28 #include <linux/hwmon.h> 28 #include <linux/hwmon.h>
29 #include <linux/hwmon-sysfs.h> 29 #include <linux/hwmon-sysfs.h>
30 30
31 #define RADEON_IDLE_LOOP_MS 100 31 #define RADEON_IDLE_LOOP_MS 100
32 #define RADEON_RECLOCK_DELAY_MS 200 32 #define RADEON_RECLOCK_DELAY_MS 200
33 #define RADEON_WAIT_VBLANK_TIMEOUT 200 33 #define RADEON_WAIT_VBLANK_TIMEOUT 200
34 34
35 static const char *radeon_pm_state_type_name[5] = { 35 static const char *radeon_pm_state_type_name[5] = {
36 "", 36 "",
37 "Powersave", 37 "Powersave",
38 "Battery", 38 "Battery",
39 "Balanced", 39 "Balanced",
40 "Performance", 40 "Performance",
41 }; 41 };
42 42
43 static void radeon_dynpm_idle_work_handler(struct work_struct *work); 43 static void radeon_dynpm_idle_work_handler(struct work_struct *work);
44 static int radeon_debugfs_pm_init(struct radeon_device *rdev); 44 static int radeon_debugfs_pm_init(struct radeon_device *rdev);
45 static bool radeon_pm_in_vbl(struct radeon_device *rdev); 45 static bool radeon_pm_in_vbl(struct radeon_device *rdev);
46 static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish); 46 static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
47 static void radeon_pm_update_profile(struct radeon_device *rdev); 47 static void radeon_pm_update_profile(struct radeon_device *rdev);
48 static void radeon_pm_set_clocks(struct radeon_device *rdev); 48 static void radeon_pm_set_clocks(struct radeon_device *rdev);
49 49
50 int radeon_pm_get_type_index(struct radeon_device *rdev, 50 int radeon_pm_get_type_index(struct radeon_device *rdev,
51 enum radeon_pm_state_type ps_type, 51 enum radeon_pm_state_type ps_type,
52 int instance) 52 int instance)
53 { 53 {
54 int i; 54 int i;
55 int found_instance = -1; 55 int found_instance = -1;
56 56
57 for (i = 0; i < rdev->pm.num_power_states; i++) { 57 for (i = 0; i < rdev->pm.num_power_states; i++) {
58 if (rdev->pm.power_state[i].type == ps_type) { 58 if (rdev->pm.power_state[i].type == ps_type) {
59 found_instance++; 59 found_instance++;
60 if (found_instance == instance) 60 if (found_instance == instance)
61 return i; 61 return i;
62 } 62 }
63 } 63 }
64 /* return default if no match */ 64 /* return default if no match */
65 return rdev->pm.default_power_state_index; 65 return rdev->pm.default_power_state_index;
66 } 66 }
67 67
68 void radeon_pm_acpi_event_handler(struct radeon_device *rdev) 68 void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
69 { 69 {
70 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { 70 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
71 mutex_lock(&rdev->pm.mutex); 71 mutex_lock(&rdev->pm.mutex);
72 if (power_supply_is_system_supplied() > 0) 72 if (power_supply_is_system_supplied() > 0)
73 rdev->pm.dpm.ac_power = true; 73 rdev->pm.dpm.ac_power = true;
74 else 74 else
75 rdev->pm.dpm.ac_power = false; 75 rdev->pm.dpm.ac_power = false;
76 if (rdev->asic->dpm.enable_bapm) 76 if (rdev->asic->dpm.enable_bapm)
77 radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power); 77 radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
78 mutex_unlock(&rdev->pm.mutex); 78 mutex_unlock(&rdev->pm.mutex);
79 } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 79 } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
80 if (rdev->pm.profile == PM_PROFILE_AUTO) { 80 if (rdev->pm.profile == PM_PROFILE_AUTO) {
81 mutex_lock(&rdev->pm.mutex); 81 mutex_lock(&rdev->pm.mutex);
82 radeon_pm_update_profile(rdev); 82 radeon_pm_update_profile(rdev);
83 radeon_pm_set_clocks(rdev); 83 radeon_pm_set_clocks(rdev);
84 mutex_unlock(&rdev->pm.mutex); 84 mutex_unlock(&rdev->pm.mutex);
85 } 85 }
86 } 86 }
87 } 87 }
88 88
89 static void radeon_pm_update_profile(struct radeon_device *rdev) 89 static void radeon_pm_update_profile(struct radeon_device *rdev)
90 { 90 {
91 switch (rdev->pm.profile) { 91 switch (rdev->pm.profile) {
92 case PM_PROFILE_DEFAULT: 92 case PM_PROFILE_DEFAULT:
93 rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX; 93 rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX;
94 break; 94 break;
95 case PM_PROFILE_AUTO: 95 case PM_PROFILE_AUTO:
96 if (power_supply_is_system_supplied() > 0) { 96 if (power_supply_is_system_supplied() > 0) {
97 if (rdev->pm.active_crtc_count > 1) 97 if (rdev->pm.active_crtc_count > 1)
98 rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX; 98 rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
99 else 99 else
100 rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX; 100 rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
101 } else { 101 } else {
102 if (rdev->pm.active_crtc_count > 1) 102 if (rdev->pm.active_crtc_count > 1)
103 rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX; 103 rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
104 else 104 else
105 rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX; 105 rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
106 } 106 }
107 break; 107 break;
108 case PM_PROFILE_LOW: 108 case PM_PROFILE_LOW:
109 if (rdev->pm.active_crtc_count > 1) 109 if (rdev->pm.active_crtc_count > 1)
110 rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX; 110 rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
111 else 111 else
112 rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX; 112 rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
113 break; 113 break;
114 case PM_PROFILE_MID: 114 case PM_PROFILE_MID:
115 if (rdev->pm.active_crtc_count > 1) 115 if (rdev->pm.active_crtc_count > 1)
116 rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX; 116 rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
117 else 117 else
118 rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX; 118 rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
119 break; 119 break;
120 case PM_PROFILE_HIGH: 120 case PM_PROFILE_HIGH:
121 if (rdev->pm.active_crtc_count > 1) 121 if (rdev->pm.active_crtc_count > 1)
122 rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX; 122 rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
123 else 123 else
124 rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX; 124 rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
125 break; 125 break;
126 } 126 }
127 127
128 if (rdev->pm.active_crtc_count == 0) { 128 if (rdev->pm.active_crtc_count == 0) {
129 rdev->pm.requested_power_state_index = 129 rdev->pm.requested_power_state_index =
130 rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx; 130 rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx;
131 rdev->pm.requested_clock_mode_index = 131 rdev->pm.requested_clock_mode_index =
132 rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx; 132 rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx;
133 } else { 133 } else {
134 rdev->pm.requested_power_state_index = 134 rdev->pm.requested_power_state_index =
135 rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx; 135 rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx;
136 rdev->pm.requested_clock_mode_index = 136 rdev->pm.requested_clock_mode_index =
137 rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx; 137 rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx;
138 } 138 }
139 } 139 }
140 140
141 static void radeon_unmap_vram_bos(struct radeon_device *rdev) 141 static void radeon_unmap_vram_bos(struct radeon_device *rdev)
142 { 142 {
143 struct radeon_bo *bo, *n; 143 struct radeon_bo *bo, *n;
144 144
145 if (list_empty(&rdev->gem.objects)) 145 if (list_empty(&rdev->gem.objects))
146 return; 146 return;
147 147
148 list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { 148 list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
149 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) 149 if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
150 ttm_bo_unmap_virtual(&bo->tbo); 150 ttm_bo_unmap_virtual(&bo->tbo);
151 } 151 }
152 } 152 }
153 153
154 static void radeon_sync_with_vblank(struct radeon_device *rdev) 154 static void radeon_sync_with_vblank(struct radeon_device *rdev)
155 { 155 {
156 if (rdev->pm.active_crtcs) { 156 if (rdev->pm.active_crtcs) {
157 rdev->pm.vblank_sync = false; 157 rdev->pm.vblank_sync = false;
158 wait_event_timeout( 158 wait_event_timeout(
159 rdev->irq.vblank_queue, rdev->pm.vblank_sync, 159 rdev->irq.vblank_queue, rdev->pm.vblank_sync,
160 msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT)); 160 msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
161 } 161 }
162 } 162 }
163 163
164 static void radeon_set_power_state(struct radeon_device *rdev) 164 static void radeon_set_power_state(struct radeon_device *rdev)
165 { 165 {
166 u32 sclk, mclk; 166 u32 sclk, mclk;
167 bool misc_after = false; 167 bool misc_after = false;
168 168
169 if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) && 169 if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
170 (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index)) 170 (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
171 return; 171 return;
172 172
173 if (radeon_gui_idle(rdev)) { 173 if (radeon_gui_idle(rdev)) {
174 sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. 174 sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
175 clock_info[rdev->pm.requested_clock_mode_index].sclk; 175 clock_info[rdev->pm.requested_clock_mode_index].sclk;
176 if (sclk > rdev->pm.default_sclk) 176 if (sclk > rdev->pm.default_sclk)
177 sclk = rdev->pm.default_sclk; 177 sclk = rdev->pm.default_sclk;
178 178
179 /* starting with BTC, there is one state that is used for both 179 /* starting with BTC, there is one state that is used for both
180 * MH and SH. Difference is that we always use the high clock index for 180 * MH and SH. Difference is that we always use the high clock index for
181 * mclk and vddci. 181 * mclk and vddci.
182 */ 182 */
183 if ((rdev->pm.pm_method == PM_METHOD_PROFILE) && 183 if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
184 (rdev->family >= CHIP_BARTS) && 184 (rdev->family >= CHIP_BARTS) &&
185 rdev->pm.active_crtc_count && 185 rdev->pm.active_crtc_count &&
186 ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) || 186 ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
187 (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX))) 187 (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
188 mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. 188 mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
189 clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].mclk; 189 clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].mclk;
190 else 190 else
191 mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. 191 mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
192 clock_info[rdev->pm.requested_clock_mode_index].mclk; 192 clock_info[rdev->pm.requested_clock_mode_index].mclk;
193 193
194 if (mclk > rdev->pm.default_mclk) 194 if (mclk > rdev->pm.default_mclk)
195 mclk = rdev->pm.default_mclk; 195 mclk = rdev->pm.default_mclk;
196 196
197 /* upvolt before raising clocks, downvolt after lowering clocks */ 197 /* upvolt before raising clocks, downvolt after lowering clocks */
198 if (sclk < rdev->pm.current_sclk) 198 if (sclk < rdev->pm.current_sclk)
199 misc_after = true; 199 misc_after = true;
200 200
201 radeon_sync_with_vblank(rdev); 201 radeon_sync_with_vblank(rdev);
202 202
203 if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 203 if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
204 if (!radeon_pm_in_vbl(rdev)) 204 if (!radeon_pm_in_vbl(rdev))
205 return; 205 return;
206 } 206 }
207 207
208 radeon_pm_prepare(rdev); 208 radeon_pm_prepare(rdev);
209 209
210 if (!misc_after) 210 if (!misc_after)
211 /* voltage, pcie lanes, etc.*/ 211 /* voltage, pcie lanes, etc.*/
212 radeon_pm_misc(rdev); 212 radeon_pm_misc(rdev);
213 213
214 /* set engine clock */ 214 /* set engine clock */
215 if (sclk != rdev->pm.current_sclk) { 215 if (sclk != rdev->pm.current_sclk) {
216 radeon_pm_debug_check_in_vbl(rdev, false); 216 radeon_pm_debug_check_in_vbl(rdev, false);
217 radeon_set_engine_clock(rdev, sclk); 217 radeon_set_engine_clock(rdev, sclk);
218 radeon_pm_debug_check_in_vbl(rdev, true); 218 radeon_pm_debug_check_in_vbl(rdev, true);
219 rdev->pm.current_sclk = sclk; 219 rdev->pm.current_sclk = sclk;
220 DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk); 220 DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk);
221 } 221 }
222 222
223 /* set memory clock */ 223 /* set memory clock */
224 if (rdev->asic->pm.set_memory_clock && (mclk != rdev->pm.current_mclk)) { 224 if (rdev->asic->pm.set_memory_clock && (mclk != rdev->pm.current_mclk)) {
225 radeon_pm_debug_check_in_vbl(rdev, false); 225 radeon_pm_debug_check_in_vbl(rdev, false);
226 radeon_set_memory_clock(rdev, mclk); 226 radeon_set_memory_clock(rdev, mclk);
227 radeon_pm_debug_check_in_vbl(rdev, true); 227 radeon_pm_debug_check_in_vbl(rdev, true);
228 rdev->pm.current_mclk = mclk; 228 rdev->pm.current_mclk = mclk;
229 DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk); 229 DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk);
230 } 230 }
231 231
232 if (misc_after) 232 if (misc_after)
233 /* voltage, pcie lanes, etc.*/ 233 /* voltage, pcie lanes, etc.*/
234 radeon_pm_misc(rdev); 234 radeon_pm_misc(rdev);
235 235
236 radeon_pm_finish(rdev); 236 radeon_pm_finish(rdev);
237 237
238 rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index; 238 rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
239 rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index; 239 rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
240 } else 240 } else
241 DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n"); 241 DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n");
242 } 242 }
243 243
244 static void radeon_pm_set_clocks(struct radeon_device *rdev) 244 static void radeon_pm_set_clocks(struct radeon_device *rdev)
245 { 245 {
246 int i, r; 246 int i, r;
247 247
248 /* no need to take locks, etc. if nothing's going to change */ 248 /* no need to take locks, etc. if nothing's going to change */
249 if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) && 249 if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
250 (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index)) 250 (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
251 return; 251 return;
252 252
253 mutex_lock(&rdev->ddev->struct_mutex); 253 mutex_lock(&rdev->ddev->struct_mutex);
254 down_write(&rdev->pm.mclk_lock); 254 down_write(&rdev->pm.mclk_lock);
255 mutex_lock(&rdev->ring_lock); 255 mutex_lock(&rdev->ring_lock);
256 256
257 /* wait for the rings to drain */ 257 /* wait for the rings to drain */
258 for (i = 0; i < RADEON_NUM_RINGS; i++) { 258 for (i = 0; i < RADEON_NUM_RINGS; i++) {
259 struct radeon_ring *ring = &rdev->ring[i]; 259 struct radeon_ring *ring = &rdev->ring[i];
260 if (!ring->ready) { 260 if (!ring->ready) {
261 continue; 261 continue;
262 } 262 }
263 r = radeon_fence_wait_empty(rdev, i); 263 r = radeon_fence_wait_empty(rdev, i);
264 if (r) { 264 if (r) {
265 /* needs a GPU reset dont reset here */ 265 /* needs a GPU reset dont reset here */
266 mutex_unlock(&rdev->ring_lock); 266 mutex_unlock(&rdev->ring_lock);
267 up_write(&rdev->pm.mclk_lock); 267 up_write(&rdev->pm.mclk_lock);
268 mutex_unlock(&rdev->ddev->struct_mutex); 268 mutex_unlock(&rdev->ddev->struct_mutex);
269 return; 269 return;
270 } 270 }
271 } 271 }
272 272
273 radeon_unmap_vram_bos(rdev); 273 radeon_unmap_vram_bos(rdev);
274 274
275 if (rdev->irq.installed) { 275 if (rdev->irq.installed) {
276 for (i = 0; i < rdev->num_crtc; i++) { 276 for (i = 0; i < rdev->num_crtc; i++) {
277 if (rdev->pm.active_crtcs & (1 << i)) { 277 if (rdev->pm.active_crtcs & (1 << i)) {
278 rdev->pm.req_vblank |= (1 << i); 278 rdev->pm.req_vblank |= (1 << i);
279 drm_vblank_get(rdev->ddev, i); 279 drm_vblank_get(rdev->ddev, i);
280 } 280 }
281 } 281 }
282 } 282 }
283 283
284 radeon_set_power_state(rdev); 284 radeon_set_power_state(rdev);
285 285
286 if (rdev->irq.installed) { 286 if (rdev->irq.installed) {
287 for (i = 0; i < rdev->num_crtc; i++) { 287 for (i = 0; i < rdev->num_crtc; i++) {
288 if (rdev->pm.req_vblank & (1 << i)) { 288 if (rdev->pm.req_vblank & (1 << i)) {
289 rdev->pm.req_vblank &= ~(1 << i); 289 rdev->pm.req_vblank &= ~(1 << i);
290 drm_vblank_put(rdev->ddev, i); 290 drm_vblank_put(rdev->ddev, i);
291 } 291 }
292 } 292 }
293 } 293 }
294 294
295 /* update display watermarks based on new power state */ 295 /* update display watermarks based on new power state */
296 radeon_update_bandwidth_info(rdev); 296 radeon_update_bandwidth_info(rdev);
297 if (rdev->pm.active_crtc_count) 297 if (rdev->pm.active_crtc_count)
298 radeon_bandwidth_update(rdev); 298 radeon_bandwidth_update(rdev);
299 299
300 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 300 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
301 301
302 mutex_unlock(&rdev->ring_lock); 302 mutex_unlock(&rdev->ring_lock);
303 up_write(&rdev->pm.mclk_lock); 303 up_write(&rdev->pm.mclk_lock);
304 mutex_unlock(&rdev->ddev->struct_mutex); 304 mutex_unlock(&rdev->ddev->struct_mutex);
305 } 305 }
306 306
307 static void radeon_pm_print_states(struct radeon_device *rdev) 307 static void radeon_pm_print_states(struct radeon_device *rdev)
308 { 308 {
309 int i, j; 309 int i, j;
310 struct radeon_power_state *power_state; 310 struct radeon_power_state *power_state;
311 struct radeon_pm_clock_info *clock_info; 311 struct radeon_pm_clock_info *clock_info;
312 312
313 DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states); 313 DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states);
314 for (i = 0; i < rdev->pm.num_power_states; i++) { 314 for (i = 0; i < rdev->pm.num_power_states; i++) {
315 power_state = &rdev->pm.power_state[i]; 315 power_state = &rdev->pm.power_state[i];
316 DRM_DEBUG_DRIVER("State %d: %s\n", i, 316 DRM_DEBUG_DRIVER("State %d: %s\n", i,
317 radeon_pm_state_type_name[power_state->type]); 317 radeon_pm_state_type_name[power_state->type]);
318 if (i == rdev->pm.default_power_state_index) 318 if (i == rdev->pm.default_power_state_index)
319 DRM_DEBUG_DRIVER("\tDefault"); 319 DRM_DEBUG_DRIVER("\tDefault");
320 if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP)) 320 if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
321 DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes); 321 DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes);
322 if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) 322 if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
323 DRM_DEBUG_DRIVER("\tSingle display only\n"); 323 DRM_DEBUG_DRIVER("\tSingle display only\n");
324 DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes); 324 DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes);
325 for (j = 0; j < power_state->num_clock_modes; j++) { 325 for (j = 0; j < power_state->num_clock_modes; j++) {
326 clock_info = &(power_state->clock_info[j]); 326 clock_info = &(power_state->clock_info[j]);
327 if (rdev->flags & RADEON_IS_IGP) 327 if (rdev->flags & RADEON_IS_IGP)
328 DRM_DEBUG_DRIVER("\t\t%d e: %d\n", 328 DRM_DEBUG_DRIVER("\t\t%d e: %d\n",
329 j, 329 j,
330 clock_info->sclk * 10); 330 clock_info->sclk * 10);
331 else 331 else
332 DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d\n", 332 DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d\n",
333 j, 333 j,
334 clock_info->sclk * 10, 334 clock_info->sclk * 10,
335 clock_info->mclk * 10, 335 clock_info->mclk * 10,
336 clock_info->voltage.voltage); 336 clock_info->voltage.voltage);
337 } 337 }
338 } 338 }
339 } 339 }
340 340
341 static ssize_t radeon_get_pm_profile(struct device *dev, 341 static ssize_t radeon_get_pm_profile(struct device *dev,
342 struct device_attribute *attr, 342 struct device_attribute *attr,
343 char *buf) 343 char *buf)
344 { 344 {
345 struct drm_device *ddev = dev_get_drvdata(dev); 345 struct drm_device *ddev = dev_get_drvdata(dev);
346 struct radeon_device *rdev = ddev->dev_private; 346 struct radeon_device *rdev = ddev->dev_private;
347 int cp = rdev->pm.profile; 347 int cp = rdev->pm.profile;
348 348
349 return snprintf(buf, PAGE_SIZE, "%s\n", 349 return snprintf(buf, PAGE_SIZE, "%s\n",
350 (cp == PM_PROFILE_AUTO) ? "auto" : 350 (cp == PM_PROFILE_AUTO) ? "auto" :
351 (cp == PM_PROFILE_LOW) ? "low" : 351 (cp == PM_PROFILE_LOW) ? "low" :
352 (cp == PM_PROFILE_MID) ? "mid" : 352 (cp == PM_PROFILE_MID) ? "mid" :
353 (cp == PM_PROFILE_HIGH) ? "high" : "default"); 353 (cp == PM_PROFILE_HIGH) ? "high" : "default");
354 } 354 }
355 355
356 static ssize_t radeon_set_pm_profile(struct device *dev, 356 static ssize_t radeon_set_pm_profile(struct device *dev,
357 struct device_attribute *attr, 357 struct device_attribute *attr,
358 const char *buf, 358 const char *buf,
359 size_t count) 359 size_t count)
360 { 360 {
361 struct drm_device *ddev = dev_get_drvdata(dev); 361 struct drm_device *ddev = dev_get_drvdata(dev);
362 struct radeon_device *rdev = ddev->dev_private; 362 struct radeon_device *rdev = ddev->dev_private;
363 363
364 /* Can't set profile when the card is off */ 364 /* Can't set profile when the card is off */
365 if ((rdev->flags & RADEON_IS_PX) && 365 if ((rdev->flags & RADEON_IS_PX) &&
366 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 366 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
367 return -EINVAL; 367 return -EINVAL;
368 368
369 mutex_lock(&rdev->pm.mutex); 369 mutex_lock(&rdev->pm.mutex);
370 if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 370 if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
371 if (strncmp("default", buf, strlen("default")) == 0) 371 if (strncmp("default", buf, strlen("default")) == 0)
372 rdev->pm.profile = PM_PROFILE_DEFAULT; 372 rdev->pm.profile = PM_PROFILE_DEFAULT;
373 else if (strncmp("auto", buf, strlen("auto")) == 0) 373 else if (strncmp("auto", buf, strlen("auto")) == 0)
374 rdev->pm.profile = PM_PROFILE_AUTO; 374 rdev->pm.profile = PM_PROFILE_AUTO;
375 else if (strncmp("low", buf, strlen("low")) == 0) 375 else if (strncmp("low", buf, strlen("low")) == 0)
376 rdev->pm.profile = PM_PROFILE_LOW; 376 rdev->pm.profile = PM_PROFILE_LOW;
377 else if (strncmp("mid", buf, strlen("mid")) == 0) 377 else if (strncmp("mid", buf, strlen("mid")) == 0)
378 rdev->pm.profile = PM_PROFILE_MID; 378 rdev->pm.profile = PM_PROFILE_MID;
379 else if (strncmp("high", buf, strlen("high")) == 0) 379 else if (strncmp("high", buf, strlen("high")) == 0)
380 rdev->pm.profile = PM_PROFILE_HIGH; 380 rdev->pm.profile = PM_PROFILE_HIGH;
381 else { 381 else {
382 count = -EINVAL; 382 count = -EINVAL;
383 goto fail; 383 goto fail;
384 } 384 }
385 radeon_pm_update_profile(rdev); 385 radeon_pm_update_profile(rdev);
386 radeon_pm_set_clocks(rdev); 386 radeon_pm_set_clocks(rdev);
387 } else 387 } else
388 count = -EINVAL; 388 count = -EINVAL;
389 389
390 fail: 390 fail:
391 mutex_unlock(&rdev->pm.mutex); 391 mutex_unlock(&rdev->pm.mutex);
392 392
393 return count; 393 return count;
394 } 394 }
395 395
396 static ssize_t radeon_get_pm_method(struct device *dev, 396 static ssize_t radeon_get_pm_method(struct device *dev,
397 struct device_attribute *attr, 397 struct device_attribute *attr,
398 char *buf) 398 char *buf)
399 { 399 {
400 struct drm_device *ddev = dev_get_drvdata(dev); 400 struct drm_device *ddev = dev_get_drvdata(dev);
401 struct radeon_device *rdev = ddev->dev_private; 401 struct radeon_device *rdev = ddev->dev_private;
402 int pm = rdev->pm.pm_method; 402 int pm = rdev->pm.pm_method;
403 403
404 return snprintf(buf, PAGE_SIZE, "%s\n", 404 return snprintf(buf, PAGE_SIZE, "%s\n",
405 (pm == PM_METHOD_DYNPM) ? "dynpm" : 405 (pm == PM_METHOD_DYNPM) ? "dynpm" :
406 (pm == PM_METHOD_PROFILE) ? "profile" : "dpm"); 406 (pm == PM_METHOD_PROFILE) ? "profile" : "dpm");
407 } 407 }
408 408
409 static ssize_t radeon_set_pm_method(struct device *dev, 409 static ssize_t radeon_set_pm_method(struct device *dev,
410 struct device_attribute *attr, 410 struct device_attribute *attr,
411 const char *buf, 411 const char *buf,
412 size_t count) 412 size_t count)
413 { 413 {
414 struct drm_device *ddev = dev_get_drvdata(dev); 414 struct drm_device *ddev = dev_get_drvdata(dev);
415 struct radeon_device *rdev = ddev->dev_private; 415 struct radeon_device *rdev = ddev->dev_private;
416 416
417 /* Can't set method when the card is off */ 417 /* Can't set method when the card is off */
418 if ((rdev->flags & RADEON_IS_PX) && 418 if ((rdev->flags & RADEON_IS_PX) &&
419 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { 419 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
420 count = -EINVAL; 420 count = -EINVAL;
421 goto fail; 421 goto fail;
422 } 422 }
423 423
424 /* we don't support the legacy modes with dpm */ 424 /* we don't support the legacy modes with dpm */
425 if (rdev->pm.pm_method == PM_METHOD_DPM) { 425 if (rdev->pm.pm_method == PM_METHOD_DPM) {
426 count = -EINVAL; 426 count = -EINVAL;
427 goto fail; 427 goto fail;
428 } 428 }
429 429
430 if (strncmp("dynpm", buf, strlen("dynpm")) == 0) { 430 if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
431 mutex_lock(&rdev->pm.mutex); 431 mutex_lock(&rdev->pm.mutex);
432 rdev->pm.pm_method = PM_METHOD_DYNPM; 432 rdev->pm.pm_method = PM_METHOD_DYNPM;
433 rdev->pm.dynpm_state = DYNPM_STATE_PAUSED; 433 rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
434 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; 434 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
435 mutex_unlock(&rdev->pm.mutex); 435 mutex_unlock(&rdev->pm.mutex);
436 } else if (strncmp("profile", buf, strlen("profile")) == 0) { 436 } else if (strncmp("profile", buf, strlen("profile")) == 0) {
437 mutex_lock(&rdev->pm.mutex); 437 mutex_lock(&rdev->pm.mutex);
438 /* disable dynpm */ 438 /* disable dynpm */
439 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; 439 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
440 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 440 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
441 rdev->pm.pm_method = PM_METHOD_PROFILE; 441 rdev->pm.pm_method = PM_METHOD_PROFILE;
442 mutex_unlock(&rdev->pm.mutex); 442 mutex_unlock(&rdev->pm.mutex);
443 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); 443 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
444 } else { 444 } else {
445 count = -EINVAL; 445 count = -EINVAL;
446 goto fail; 446 goto fail;
447 } 447 }
448 radeon_pm_compute_clocks(rdev); 448 radeon_pm_compute_clocks(rdev);
449 fail: 449 fail:
450 return count; 450 return count;
451 } 451 }
452 452
453 static ssize_t radeon_get_dpm_state(struct device *dev, 453 static ssize_t radeon_get_dpm_state(struct device *dev,
454 struct device_attribute *attr, 454 struct device_attribute *attr,
455 char *buf) 455 char *buf)
456 { 456 {
457 struct drm_device *ddev = dev_get_drvdata(dev); 457 struct drm_device *ddev = dev_get_drvdata(dev);
458 struct radeon_device *rdev = ddev->dev_private; 458 struct radeon_device *rdev = ddev->dev_private;
459 enum radeon_pm_state_type pm = rdev->pm.dpm.user_state; 459 enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
460 460
461 if ((rdev->flags & RADEON_IS_PX) && 461 if ((rdev->flags & RADEON_IS_PX) &&
462 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 462 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
463 return snprintf(buf, PAGE_SIZE, "off\n"); 463 return snprintf(buf, PAGE_SIZE, "off\n");
464 464
465 return snprintf(buf, PAGE_SIZE, "%s\n", 465 return snprintf(buf, PAGE_SIZE, "%s\n",
466 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : 466 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
467 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); 467 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
468 } 468 }
469 469
470 static ssize_t radeon_set_dpm_state(struct device *dev, 470 static ssize_t radeon_set_dpm_state(struct device *dev,
471 struct device_attribute *attr, 471 struct device_attribute *attr,
472 const char *buf, 472 const char *buf,
473 size_t count) 473 size_t count)
474 { 474 {
475 struct drm_device *ddev = dev_get_drvdata(dev); 475 struct drm_device *ddev = dev_get_drvdata(dev);
476 struct radeon_device *rdev = ddev->dev_private; 476 struct radeon_device *rdev = ddev->dev_private;
477 477
478 /* Can't set dpm state when the card is off */ 478 /* Can't set dpm state when the card is off */
479 if ((rdev->flags & RADEON_IS_PX) && 479 if ((rdev->flags & RADEON_IS_PX) &&
480 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 480 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
481 return -EINVAL; 481 return -EINVAL;
482 482
483 mutex_lock(&rdev->pm.mutex); 483 mutex_lock(&rdev->pm.mutex);
484 if (strncmp("battery", buf, strlen("battery")) == 0) 484 if (strncmp("battery", buf, strlen("battery")) == 0)
485 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY; 485 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
486 else if (strncmp("balanced", buf, strlen("balanced")) == 0) 486 else if (strncmp("balanced", buf, strlen("balanced")) == 0)
487 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; 487 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
488 else if (strncmp("performance", buf, strlen("performance")) == 0) 488 else if (strncmp("performance", buf, strlen("performance")) == 0)
489 rdev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE; 489 rdev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE;
490 else { 490 else {
491 mutex_unlock(&rdev->pm.mutex); 491 mutex_unlock(&rdev->pm.mutex);
492 count = -EINVAL; 492 count = -EINVAL;
493 goto fail; 493 goto fail;
494 } 494 }
495 mutex_unlock(&rdev->pm.mutex); 495 mutex_unlock(&rdev->pm.mutex);
496 radeon_pm_compute_clocks(rdev); 496 radeon_pm_compute_clocks(rdev);
497 fail: 497 fail:
498 return count; 498 return count;
499 } 499 }
500 500
501 static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev, 501 static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
502 struct device_attribute *attr, 502 struct device_attribute *attr,
503 char *buf) 503 char *buf)
504 { 504 {
505 struct drm_device *ddev = dev_get_drvdata(dev); 505 struct drm_device *ddev = dev_get_drvdata(dev);
506 struct radeon_device *rdev = ddev->dev_private; 506 struct radeon_device *rdev = ddev->dev_private;
507 enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; 507 enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
508 508
509 if ((rdev->flags & RADEON_IS_PX) && 509 if ((rdev->flags & RADEON_IS_PX) &&
510 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 510 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
511 return snprintf(buf, PAGE_SIZE, "off\n"); 511 return snprintf(buf, PAGE_SIZE, "off\n");
512 512
513 return snprintf(buf, PAGE_SIZE, "%s\n", 513 return snprintf(buf, PAGE_SIZE, "%s\n",
514 (level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" : 514 (level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" :
515 (level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high"); 515 (level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
516 } 516 }
517 517
518 static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev, 518 static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
519 struct device_attribute *attr, 519 struct device_attribute *attr,
520 const char *buf, 520 const char *buf,
521 size_t count) 521 size_t count)
522 { 522 {
523 struct drm_device *ddev = dev_get_drvdata(dev); 523 struct drm_device *ddev = dev_get_drvdata(dev);
524 struct radeon_device *rdev = ddev->dev_private; 524 struct radeon_device *rdev = ddev->dev_private;
525 enum radeon_dpm_forced_level level; 525 enum radeon_dpm_forced_level level;
526 int ret = 0; 526 int ret = 0;
527 527
528 /* Can't force performance level when the card is off */ 528 /* Can't force performance level when the card is off */
529 if ((rdev->flags & RADEON_IS_PX) && 529 if ((rdev->flags & RADEON_IS_PX) &&
530 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 530 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
531 return -EINVAL; 531 return -EINVAL;
532 532
533 mutex_lock(&rdev->pm.mutex); 533 mutex_lock(&rdev->pm.mutex);
534 if (strncmp("low", buf, strlen("low")) == 0) { 534 if (strncmp("low", buf, strlen("low")) == 0) {
535 level = RADEON_DPM_FORCED_LEVEL_LOW; 535 level = RADEON_DPM_FORCED_LEVEL_LOW;
536 } else if (strncmp("high", buf, strlen("high")) == 0) { 536 } else if (strncmp("high", buf, strlen("high")) == 0) {
537 level = RADEON_DPM_FORCED_LEVEL_HIGH; 537 level = RADEON_DPM_FORCED_LEVEL_HIGH;
538 } else if (strncmp("auto", buf, strlen("auto")) == 0) { 538 } else if (strncmp("auto", buf, strlen("auto")) == 0) {
539 level = RADEON_DPM_FORCED_LEVEL_AUTO; 539 level = RADEON_DPM_FORCED_LEVEL_AUTO;
540 } else { 540 } else {
541 count = -EINVAL; 541 count = -EINVAL;
542 goto fail; 542 goto fail;
543 } 543 }
544 if (rdev->asic->dpm.force_performance_level) { 544 if (rdev->asic->dpm.force_performance_level) {
545 if (rdev->pm.dpm.thermal_active) { 545 if (rdev->pm.dpm.thermal_active) {
546 count = -EINVAL; 546 count = -EINVAL;
547 goto fail; 547 goto fail;
548 } 548 }
549 ret = radeon_dpm_force_performance_level(rdev, level); 549 ret = radeon_dpm_force_performance_level(rdev, level);
550 if (ret) 550 if (ret)
551 count = -EINVAL; 551 count = -EINVAL;
552 } 552 }
553 fail: 553 fail:
554 mutex_unlock(&rdev->pm.mutex); 554 mutex_unlock(&rdev->pm.mutex);
555 555
556 return count; 556 return count;
557 } 557 }
558 558
559 static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile); 559 static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile);
560 static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method); 560 static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
561 static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, radeon_get_dpm_state, radeon_set_dpm_state); 561 static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, radeon_get_dpm_state, radeon_set_dpm_state);
562 static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR, 562 static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
563 radeon_get_dpm_forced_performance_level, 563 radeon_get_dpm_forced_performance_level,
564 radeon_set_dpm_forced_performance_level); 564 radeon_set_dpm_forced_performance_level);
565 565
566 static ssize_t radeon_hwmon_show_temp(struct device *dev, 566 static ssize_t radeon_hwmon_show_temp(struct device *dev,
567 struct device_attribute *attr, 567 struct device_attribute *attr,
568 char *buf) 568 char *buf)
569 { 569 {
570 struct radeon_device *rdev = dev_get_drvdata(dev); 570 struct radeon_device *rdev = dev_get_drvdata(dev);
571 struct drm_device *ddev = rdev->ddev; 571 struct drm_device *ddev = rdev->ddev;
572 int temp; 572 int temp;
573 573
574 /* Can't get temperature when the card is off */ 574 /* Can't get temperature when the card is off */
575 if ((rdev->flags & RADEON_IS_PX) && 575 if ((rdev->flags & RADEON_IS_PX) &&
576 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 576 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
577 return -EINVAL; 577 return -EINVAL;
578 578
579 if (rdev->asic->pm.get_temperature) 579 if (rdev->asic->pm.get_temperature)
580 temp = radeon_get_temperature(rdev); 580 temp = radeon_get_temperature(rdev);
581 else 581 else
582 temp = 0; 582 temp = 0;
583 583
584 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 584 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
585 } 585 }
586 586
587 static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev, 587 static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
588 struct device_attribute *attr, 588 struct device_attribute *attr,
589 char *buf) 589 char *buf)
590 { 590 {
591 struct radeon_device *rdev = dev_get_drvdata(dev); 591 struct radeon_device *rdev = dev_get_drvdata(dev);
592 int hyst = to_sensor_dev_attr(attr)->index; 592 int hyst = to_sensor_dev_attr(attr)->index;
593 int temp; 593 int temp;
594 594
595 if (hyst) 595 if (hyst)
596 temp = rdev->pm.dpm.thermal.min_temp; 596 temp = rdev->pm.dpm.thermal.min_temp;
597 else 597 else
598 temp = rdev->pm.dpm.thermal.max_temp; 598 temp = rdev->pm.dpm.thermal.max_temp;
599 599
600 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 600 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
601 } 601 }
602 602
603 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0); 603 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
604 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0); 604 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0);
605 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1); 605 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1);
606 606
607 static struct attribute *hwmon_attributes[] = { 607 static struct attribute *hwmon_attributes[] = {
608 &sensor_dev_attr_temp1_input.dev_attr.attr, 608 &sensor_dev_attr_temp1_input.dev_attr.attr,
609 &sensor_dev_attr_temp1_crit.dev_attr.attr, 609 &sensor_dev_attr_temp1_crit.dev_attr.attr,
610 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, 610 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
611 NULL 611 NULL
612 }; 612 };
613 613
614 static umode_t hwmon_attributes_visible(struct kobject *kobj, 614 static umode_t hwmon_attributes_visible(struct kobject *kobj,
615 struct attribute *attr, int index) 615 struct attribute *attr, int index)
616 { 616 {
617 struct device *dev = container_of(kobj, struct device, kobj); 617 struct device *dev = container_of(kobj, struct device, kobj);
618 struct radeon_device *rdev = dev_get_drvdata(dev); 618 struct radeon_device *rdev = dev_get_drvdata(dev);
619 619
620 /* Skip limit attributes if DPM is not enabled */ 620 /* Skip limit attributes if DPM is not enabled */
621 if (rdev->pm.pm_method != PM_METHOD_DPM && 621 if (rdev->pm.pm_method != PM_METHOD_DPM &&
622 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || 622 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
623 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr)) 623 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
624 return 0; 624 return 0;
625 625
626 return attr->mode; 626 return attr->mode;
627 } 627 }
628 628
629 static const struct attribute_group hwmon_attrgroup = { 629 static const struct attribute_group hwmon_attrgroup = {
630 .attrs = hwmon_attributes, 630 .attrs = hwmon_attributes,
631 .is_visible = hwmon_attributes_visible, 631 .is_visible = hwmon_attributes_visible,
632 }; 632 };
633 633
634 static const struct attribute_group *hwmon_groups[] = { 634 static const struct attribute_group *hwmon_groups[] = {
635 &hwmon_attrgroup, 635 &hwmon_attrgroup,
636 NULL 636 NULL
637 }; 637 };
638 638
639 static int radeon_hwmon_init(struct radeon_device *rdev) 639 static int radeon_hwmon_init(struct radeon_device *rdev)
640 { 640 {
641 int err = 0; 641 int err = 0;
642 642
643 switch (rdev->pm.int_thermal_type) { 643 switch (rdev->pm.int_thermal_type) {
644 case THERMAL_TYPE_RV6XX: 644 case THERMAL_TYPE_RV6XX:
645 case THERMAL_TYPE_RV770: 645 case THERMAL_TYPE_RV770:
646 case THERMAL_TYPE_EVERGREEN: 646 case THERMAL_TYPE_EVERGREEN:
647 case THERMAL_TYPE_NI: 647 case THERMAL_TYPE_NI:
648 case THERMAL_TYPE_SUMO: 648 case THERMAL_TYPE_SUMO:
649 case THERMAL_TYPE_SI: 649 case THERMAL_TYPE_SI:
650 case THERMAL_TYPE_CI: 650 case THERMAL_TYPE_CI:
651 case THERMAL_TYPE_KV: 651 case THERMAL_TYPE_KV:
652 if (rdev->asic->pm.get_temperature == NULL) 652 if (rdev->asic->pm.get_temperature == NULL)
653 return err; 653 return err;
654 rdev->pm.int_hwmon_dev = hwmon_device_register_with_groups(rdev->dev, 654 rdev->pm.int_hwmon_dev = hwmon_device_register_with_groups(rdev->dev,
655 "radeon", rdev, 655 "radeon", rdev,
656 hwmon_groups); 656 hwmon_groups);
657 if (IS_ERR(rdev->pm.int_hwmon_dev)) { 657 if (IS_ERR(rdev->pm.int_hwmon_dev)) {
658 err = PTR_ERR(rdev->pm.int_hwmon_dev); 658 err = PTR_ERR(rdev->pm.int_hwmon_dev);
659 dev_err(rdev->dev, 659 dev_err(rdev->dev,
660 "Unable to register hwmon device: %d\n", err); 660 "Unable to register hwmon device: %d\n", err);
661 } 661 }
662 break; 662 break;
663 default: 663 default:
664 break; 664 break;
665 } 665 }
666 666
667 return err; 667 return err;
668 } 668 }
669 669
670 static void radeon_hwmon_fini(struct radeon_device *rdev) 670 static void radeon_hwmon_fini(struct radeon_device *rdev)
671 { 671 {
672 if (rdev->pm.int_hwmon_dev) 672 if (rdev->pm.int_hwmon_dev)
673 hwmon_device_unregister(rdev->pm.int_hwmon_dev); 673 hwmon_device_unregister(rdev->pm.int_hwmon_dev);
674 } 674 }
675 675
676 static void radeon_dpm_thermal_work_handler(struct work_struct *work) 676 static void radeon_dpm_thermal_work_handler(struct work_struct *work)
677 { 677 {
678 struct radeon_device *rdev = 678 struct radeon_device *rdev =
679 container_of(work, struct radeon_device, 679 container_of(work, struct radeon_device,
680 pm.dpm.thermal.work); 680 pm.dpm.thermal.work);
681 /* switch to the thermal state */ 681 /* switch to the thermal state */
682 enum radeon_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL; 682 enum radeon_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
683 683
684 if (!rdev->pm.dpm_enabled) 684 if (!rdev->pm.dpm_enabled)
685 return; 685 return;
686 686
687 if (rdev->asic->pm.get_temperature) { 687 if (rdev->asic->pm.get_temperature) {
688 int temp = radeon_get_temperature(rdev); 688 int temp = radeon_get_temperature(rdev);
689 689
690 if (temp < rdev->pm.dpm.thermal.min_temp) 690 if (temp < rdev->pm.dpm.thermal.min_temp)
691 /* switch back the user state */ 691 /* switch back the user state */
692 dpm_state = rdev->pm.dpm.user_state; 692 dpm_state = rdev->pm.dpm.user_state;
693 } else { 693 } else {
694 if (rdev->pm.dpm.thermal.high_to_low) 694 if (rdev->pm.dpm.thermal.high_to_low)
695 /* switch back the user state */ 695 /* switch back the user state */
696 dpm_state = rdev->pm.dpm.user_state; 696 dpm_state = rdev->pm.dpm.user_state;
697 } 697 }
698 mutex_lock(&rdev->pm.mutex); 698 mutex_lock(&rdev->pm.mutex);
699 if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL) 699 if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
700 rdev->pm.dpm.thermal_active = true; 700 rdev->pm.dpm.thermal_active = true;
701 else 701 else
702 rdev->pm.dpm.thermal_active = false; 702 rdev->pm.dpm.thermal_active = false;
703 rdev->pm.dpm.state = dpm_state; 703 rdev->pm.dpm.state = dpm_state;
704 mutex_unlock(&rdev->pm.mutex); 704 mutex_unlock(&rdev->pm.mutex);
705 705
706 radeon_pm_compute_clocks(rdev); 706 radeon_pm_compute_clocks(rdev);
707 } 707 }
708 708
709 static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev, 709 static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
710 enum radeon_pm_state_type dpm_state) 710 enum radeon_pm_state_type dpm_state)
711 { 711 {
712 int i; 712 int i;
713 struct radeon_ps *ps; 713 struct radeon_ps *ps;
714 u32 ui_class; 714 u32 ui_class;
715 bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ? 715 bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ?
716 true : false; 716 true : false;
717 717
718 /* check if the vblank period is too short to adjust the mclk */ 718 /* check if the vblank period is too short to adjust the mclk */
719 if (single_display && rdev->asic->dpm.vblank_too_short) { 719 if (single_display && rdev->asic->dpm.vblank_too_short) {
720 if (radeon_dpm_vblank_too_short(rdev)) 720 if (radeon_dpm_vblank_too_short(rdev))
721 single_display = false; 721 single_display = false;
722 } 722 }
723 723
724 /* certain older asics have a separare 3D performance state, 724 /* certain older asics have a separare 3D performance state,
725 * so try that first if the user selected performance 725 * so try that first if the user selected performance
726 */ 726 */
727 if (dpm_state == POWER_STATE_TYPE_PERFORMANCE) 727 if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
728 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF; 728 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
729 /* balanced states don't exist at the moment */ 729 /* balanced states don't exist at the moment */
730 if (dpm_state == POWER_STATE_TYPE_BALANCED) 730 if (dpm_state == POWER_STATE_TYPE_BALANCED)
731 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 731 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
732 732
733 restart_search: 733 restart_search:
734 /* Pick the best power state based on current conditions */ 734 /* Pick the best power state based on current conditions */
735 for (i = 0; i < rdev->pm.dpm.num_ps; i++) { 735 for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
736 ps = &rdev->pm.dpm.ps[i]; 736 ps = &rdev->pm.dpm.ps[i];
737 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK; 737 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
738 switch (dpm_state) { 738 switch (dpm_state) {
739 /* user states */ 739 /* user states */
740 case POWER_STATE_TYPE_BATTERY: 740 case POWER_STATE_TYPE_BATTERY:
741 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) { 741 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
742 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 742 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
743 if (single_display) 743 if (single_display)
744 return ps; 744 return ps;
745 } else 745 } else
746 return ps; 746 return ps;
747 } 747 }
748 break; 748 break;
749 case POWER_STATE_TYPE_BALANCED: 749 case POWER_STATE_TYPE_BALANCED:
750 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) { 750 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
751 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 751 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
752 if (single_display) 752 if (single_display)
753 return ps; 753 return ps;
754 } else 754 } else
755 return ps; 755 return ps;
756 } 756 }
757 break; 757 break;
758 case POWER_STATE_TYPE_PERFORMANCE: 758 case POWER_STATE_TYPE_PERFORMANCE:
759 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) { 759 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
760 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 760 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
761 if (single_display) 761 if (single_display)
762 return ps; 762 return ps;
763 } else 763 } else
764 return ps; 764 return ps;
765 } 765 }
766 break; 766 break;
767 /* internal states */ 767 /* internal states */
768 case POWER_STATE_TYPE_INTERNAL_UVD: 768 case POWER_STATE_TYPE_INTERNAL_UVD:
769 if (rdev->pm.dpm.uvd_ps) 769 if (rdev->pm.dpm.uvd_ps)
770 return rdev->pm.dpm.uvd_ps; 770 return rdev->pm.dpm.uvd_ps;
771 else 771 else
772 break; 772 break;
773 case POWER_STATE_TYPE_INTERNAL_UVD_SD: 773 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
774 if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) 774 if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
775 return ps; 775 return ps;
776 break; 776 break;
777 case POWER_STATE_TYPE_INTERNAL_UVD_HD: 777 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
778 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) 778 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
779 return ps; 779 return ps;
780 break; 780 break;
781 case POWER_STATE_TYPE_INTERNAL_UVD_HD2: 781 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
782 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) 782 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
783 return ps; 783 return ps;
784 break; 784 break;
785 case POWER_STATE_TYPE_INTERNAL_UVD_MVC: 785 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
786 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) 786 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
787 return ps; 787 return ps;
788 break; 788 break;
789 case POWER_STATE_TYPE_INTERNAL_BOOT: 789 case POWER_STATE_TYPE_INTERNAL_BOOT:
790 return rdev->pm.dpm.boot_ps; 790 return rdev->pm.dpm.boot_ps;
791 case POWER_STATE_TYPE_INTERNAL_THERMAL: 791 case POWER_STATE_TYPE_INTERNAL_THERMAL:
792 if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL) 792 if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
793 return ps; 793 return ps;
794 break; 794 break;
795 case POWER_STATE_TYPE_INTERNAL_ACPI: 795 case POWER_STATE_TYPE_INTERNAL_ACPI:
796 if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) 796 if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
797 return ps; 797 return ps;
798 break; 798 break;
799 case POWER_STATE_TYPE_INTERNAL_ULV: 799 case POWER_STATE_TYPE_INTERNAL_ULV:
800 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) 800 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
801 return ps; 801 return ps;
802 break; 802 break;
803 case POWER_STATE_TYPE_INTERNAL_3DPERF: 803 case POWER_STATE_TYPE_INTERNAL_3DPERF:
804 if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) 804 if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
805 return ps; 805 return ps;
806 break; 806 break;
807 default: 807 default:
808 break; 808 break;
809 } 809 }
810 } 810 }
811 /* use a fallback state if we didn't match */ 811 /* use a fallback state if we didn't match */
812 switch (dpm_state) { 812 switch (dpm_state) {
813 case POWER_STATE_TYPE_INTERNAL_UVD_SD: 813 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
814 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; 814 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
815 goto restart_search; 815 goto restart_search;
816 case POWER_STATE_TYPE_INTERNAL_UVD_HD: 816 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
817 case POWER_STATE_TYPE_INTERNAL_UVD_HD2: 817 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
818 case POWER_STATE_TYPE_INTERNAL_UVD_MVC: 818 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
819 if (rdev->pm.dpm.uvd_ps) { 819 if (rdev->pm.dpm.uvd_ps) {
820 return rdev->pm.dpm.uvd_ps; 820 return rdev->pm.dpm.uvd_ps;
821 } else { 821 } else {
822 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 822 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
823 goto restart_search; 823 goto restart_search;
824 } 824 }
825 case POWER_STATE_TYPE_INTERNAL_THERMAL: 825 case POWER_STATE_TYPE_INTERNAL_THERMAL:
826 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI; 826 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
827 goto restart_search; 827 goto restart_search;
828 case POWER_STATE_TYPE_INTERNAL_ACPI: 828 case POWER_STATE_TYPE_INTERNAL_ACPI:
829 dpm_state = POWER_STATE_TYPE_BATTERY; 829 dpm_state = POWER_STATE_TYPE_BATTERY;
830 goto restart_search; 830 goto restart_search;
831 case POWER_STATE_TYPE_BATTERY: 831 case POWER_STATE_TYPE_BATTERY:
832 case POWER_STATE_TYPE_BALANCED: 832 case POWER_STATE_TYPE_BALANCED:
833 case POWER_STATE_TYPE_INTERNAL_3DPERF: 833 case POWER_STATE_TYPE_INTERNAL_3DPERF:
834 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 834 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
835 goto restart_search; 835 goto restart_search;
836 default: 836 default:
837 break; 837 break;
838 } 838 }
839 839
840 return NULL; 840 return NULL;
841 } 841 }
842 842
843 static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev) 843 static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
844 { 844 {
845 int i; 845 int i;
846 struct radeon_ps *ps; 846 struct radeon_ps *ps;
847 enum radeon_pm_state_type dpm_state; 847 enum radeon_pm_state_type dpm_state;
848 int ret; 848 int ret;
849 849
850 /* if dpm init failed */ 850 /* if dpm init failed */
851 if (!rdev->pm.dpm_enabled) 851 if (!rdev->pm.dpm_enabled)
852 return; 852 return;
853 853
854 if (rdev->pm.dpm.user_state != rdev->pm.dpm.state) { 854 if (rdev->pm.dpm.user_state != rdev->pm.dpm.state) {
855 /* add other state override checks here */ 855 /* add other state override checks here */
856 if ((!rdev->pm.dpm.thermal_active) && 856 if ((!rdev->pm.dpm.thermal_active) &&
857 (!rdev->pm.dpm.uvd_active)) 857 (!rdev->pm.dpm.uvd_active))
858 rdev->pm.dpm.state = rdev->pm.dpm.user_state; 858 rdev->pm.dpm.state = rdev->pm.dpm.user_state;
859 } 859 }
860 dpm_state = rdev->pm.dpm.state; 860 dpm_state = rdev->pm.dpm.state;
861 861
862 ps = radeon_dpm_pick_power_state(rdev, dpm_state); 862 ps = radeon_dpm_pick_power_state(rdev, dpm_state);
863 if (ps) 863 if (ps)
864 rdev->pm.dpm.requested_ps = ps; 864 rdev->pm.dpm.requested_ps = ps;
865 else 865 else
866 return; 866 return;
867 867
868 /* no need to reprogram if nothing changed unless we are on BTC+ */ 868 /* no need to reprogram if nothing changed unless we are on BTC+ */
869 if (rdev->pm.dpm.current_ps == rdev->pm.dpm.requested_ps) { 869 if (rdev->pm.dpm.current_ps == rdev->pm.dpm.requested_ps) {
870 /* vce just modifies an existing state so force a change */ 870 /* vce just modifies an existing state so force a change */
871 if (ps->vce_active != rdev->pm.dpm.vce_active) 871 if (ps->vce_active != rdev->pm.dpm.vce_active)
872 goto force; 872 goto force;
873 if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) { 873 if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) {
874 /* for pre-BTC and APUs if the num crtcs changed but state is the same, 874 /* for pre-BTC and APUs if the num crtcs changed but state is the same,
875 * all we need to do is update the display configuration. 875 * all we need to do is update the display configuration.
876 */ 876 */
877 if (rdev->pm.dpm.new_active_crtcs != rdev->pm.dpm.current_active_crtcs) { 877 if (rdev->pm.dpm.new_active_crtcs != rdev->pm.dpm.current_active_crtcs) {
878 /* update display watermarks based on new power state */ 878 /* update display watermarks based on new power state */
879 radeon_bandwidth_update(rdev); 879 radeon_bandwidth_update(rdev);
880 /* update displays */ 880 /* update displays */
881 radeon_dpm_display_configuration_changed(rdev); 881 radeon_dpm_display_configuration_changed(rdev);
882 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs; 882 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
883 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count; 883 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
884 } 884 }
885 return; 885 return;
886 } else { 886 } else {
887 /* for BTC+ if the num crtcs hasn't changed and state is the same, 887 /* for BTC+ if the num crtcs hasn't changed and state is the same,
888 * nothing to do, if the num crtcs is > 1 and state is the same, 888 * nothing to do, if the num crtcs is > 1 and state is the same,
889 * update display configuration. 889 * update display configuration.
890 */ 890 */
891 if (rdev->pm.dpm.new_active_crtcs == 891 if (rdev->pm.dpm.new_active_crtcs ==
892 rdev->pm.dpm.current_active_crtcs) { 892 rdev->pm.dpm.current_active_crtcs) {
893 return; 893 return;
894 } else { 894 } else {
895 if ((rdev->pm.dpm.current_active_crtc_count > 1) && 895 if ((rdev->pm.dpm.current_active_crtc_count > 1) &&
896 (rdev->pm.dpm.new_active_crtc_count > 1)) { 896 (rdev->pm.dpm.new_active_crtc_count > 1)) {
897 /* update display watermarks based on new power state */ 897 /* update display watermarks based on new power state */
898 radeon_bandwidth_update(rdev); 898 radeon_bandwidth_update(rdev);
899 /* update displays */ 899 /* update displays */
900 radeon_dpm_display_configuration_changed(rdev); 900 radeon_dpm_display_configuration_changed(rdev);
901 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs; 901 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
902 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count; 902 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
903 return; 903 return;
904 } 904 }
905 } 905 }
906 } 906 }
907 } 907 }
908 908
909 force: 909 force:
910 if (radeon_dpm == 1) { 910 if (radeon_dpm == 1) {
911 printk("switching from power state:\n"); 911 printk("switching from power state:\n");
912 radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps); 912 radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps);
913 printk("switching to power state:\n"); 913 printk("switching to power state:\n");
914 radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps); 914 radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps);
915 } 915 }
916 916
917 mutex_lock(&rdev->ddev->struct_mutex); 917 mutex_lock(&rdev->ddev->struct_mutex);
918 down_write(&rdev->pm.mclk_lock); 918 down_write(&rdev->pm.mclk_lock);
919 mutex_lock(&rdev->ring_lock); 919 mutex_lock(&rdev->ring_lock);
920 920
921 /* update whether vce is active */ 921 /* update whether vce is active */
922 ps->vce_active = rdev->pm.dpm.vce_active; 922 ps->vce_active = rdev->pm.dpm.vce_active;
923 923
924 ret = radeon_dpm_pre_set_power_state(rdev); 924 ret = radeon_dpm_pre_set_power_state(rdev);
925 if (ret) 925 if (ret)
926 goto done; 926 goto done;
927 927
928 /* update display watermarks based on new power state */ 928 /* update display watermarks based on new power state */
929 radeon_bandwidth_update(rdev); 929 radeon_bandwidth_update(rdev);
930 /* update displays */ 930 /* update displays */
931 radeon_dpm_display_configuration_changed(rdev); 931 radeon_dpm_display_configuration_changed(rdev);
932 932
933 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs; 933 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
934 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count; 934 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
935 935
936 /* wait for the rings to drain */ 936 /* wait for the rings to drain */
937 for (i = 0; i < RADEON_NUM_RINGS; i++) { 937 for (i = 0; i < RADEON_NUM_RINGS; i++) {
938 struct radeon_ring *ring = &rdev->ring[i]; 938 struct radeon_ring *ring = &rdev->ring[i];
939 if (ring->ready) 939 if (ring->ready)
940 radeon_fence_wait_empty(rdev, i); 940 radeon_fence_wait_empty(rdev, i);
941 } 941 }
942 942
943 /* program the new power state */ 943 /* program the new power state */
944 radeon_dpm_set_power_state(rdev); 944 radeon_dpm_set_power_state(rdev);
945 945
946 /* update current power state */ 946 /* update current power state */
947 rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps; 947 rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps;
948 948
949 radeon_dpm_post_set_power_state(rdev); 949 radeon_dpm_post_set_power_state(rdev);
950 950
951 if (rdev->asic->dpm.force_performance_level) { 951 if (rdev->asic->dpm.force_performance_level) {
952 if (rdev->pm.dpm.thermal_active) { 952 if (rdev->pm.dpm.thermal_active) {
953 enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; 953 enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
954 /* force low perf level for thermal */ 954 /* force low perf level for thermal */
955 radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW); 955 radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
956 /* save the user's level */ 956 /* save the user's level */
957 rdev->pm.dpm.forced_level = level; 957 rdev->pm.dpm.forced_level = level;
958 } else { 958 } else {
959 /* otherwise, user selected level */ 959 /* otherwise, user selected level */
960 radeon_dpm_force_performance_level(rdev, rdev->pm.dpm.forced_level); 960 radeon_dpm_force_performance_level(rdev, rdev->pm.dpm.forced_level);
961 } 961 }
962 } 962 }
963 963
964 done: 964 done:
965 mutex_unlock(&rdev->ring_lock); 965 mutex_unlock(&rdev->ring_lock);
966 up_write(&rdev->pm.mclk_lock); 966 up_write(&rdev->pm.mclk_lock);
967 mutex_unlock(&rdev->ddev->struct_mutex); 967 mutex_unlock(&rdev->ddev->struct_mutex);
968 } 968 }
969 969
970 void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable) 970 void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
971 { 971 {
972 enum radeon_pm_state_type dpm_state; 972 enum radeon_pm_state_type dpm_state;
973 973
974 if (rdev->asic->dpm.powergate_uvd) { 974 if (rdev->asic->dpm.powergate_uvd) {
975 mutex_lock(&rdev->pm.mutex); 975 mutex_lock(&rdev->pm.mutex);
976 /* don't powergate anything if we 976 /* don't powergate anything if we
977 have active but pause streams */ 977 have active but pause streams */
978 enable |= rdev->pm.dpm.sd > 0; 978 enable |= rdev->pm.dpm.sd > 0;
979 enable |= rdev->pm.dpm.hd > 0; 979 enable |= rdev->pm.dpm.hd > 0;
980 /* enable/disable UVD */ 980 /* enable/disable UVD */
981 radeon_dpm_powergate_uvd(rdev, !enable); 981 radeon_dpm_powergate_uvd(rdev, !enable);
982 mutex_unlock(&rdev->pm.mutex); 982 mutex_unlock(&rdev->pm.mutex);
983 } else { 983 } else {
984 if (enable) { 984 if (enable) {
985 mutex_lock(&rdev->pm.mutex); 985 mutex_lock(&rdev->pm.mutex);
986 rdev->pm.dpm.uvd_active = true; 986 rdev->pm.dpm.uvd_active = true;
987 if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0)) 987 if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0))
988 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD; 988 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD;
989 else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0)) 989 else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0))
990 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; 990 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
991 else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 1)) 991 else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 1))
992 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; 992 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
993 else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2)) 993 else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2))
994 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2; 994 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2;
995 else 995 else
996 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD; 996 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD;
997 rdev->pm.dpm.state = dpm_state; 997 rdev->pm.dpm.state = dpm_state;
998 mutex_unlock(&rdev->pm.mutex); 998 mutex_unlock(&rdev->pm.mutex);
999 } else { 999 } else {
1000 mutex_lock(&rdev->pm.mutex); 1000 mutex_lock(&rdev->pm.mutex);
1001 rdev->pm.dpm.uvd_active = false; 1001 rdev->pm.dpm.uvd_active = false;
1002 mutex_unlock(&rdev->pm.mutex); 1002 mutex_unlock(&rdev->pm.mutex);
1003 } 1003 }
1004 1004
1005 radeon_pm_compute_clocks(rdev); 1005 radeon_pm_compute_clocks(rdev);
1006 } 1006 }
1007 } 1007 }
1008 1008
1009 void radeon_dpm_enable_vce(struct radeon_device *rdev, bool enable) 1009 void radeon_dpm_enable_vce(struct radeon_device *rdev, bool enable)
1010 { 1010 {
1011 if (enable) { 1011 if (enable) {
1012 mutex_lock(&rdev->pm.mutex); 1012 mutex_lock(&rdev->pm.mutex);
1013 rdev->pm.dpm.vce_active = true; 1013 rdev->pm.dpm.vce_active = true;
1014 /* XXX select vce level based on ring/task */ 1014 /* XXX select vce level based on ring/task */
1015 rdev->pm.dpm.vce_level = RADEON_VCE_LEVEL_AC_ALL; 1015 rdev->pm.dpm.vce_level = RADEON_VCE_LEVEL_AC_ALL;
1016 mutex_unlock(&rdev->pm.mutex); 1016 mutex_unlock(&rdev->pm.mutex);
1017 } else { 1017 } else {
1018 mutex_lock(&rdev->pm.mutex); 1018 mutex_lock(&rdev->pm.mutex);
1019 rdev->pm.dpm.vce_active = false; 1019 rdev->pm.dpm.vce_active = false;
1020 mutex_unlock(&rdev->pm.mutex); 1020 mutex_unlock(&rdev->pm.mutex);
1021 } 1021 }
1022 1022
1023 radeon_pm_compute_clocks(rdev); 1023 radeon_pm_compute_clocks(rdev);
1024 } 1024 }
1025 1025
1026 static void radeon_pm_suspend_old(struct radeon_device *rdev) 1026 static void radeon_pm_suspend_old(struct radeon_device *rdev)
1027 { 1027 {
1028 mutex_lock(&rdev->pm.mutex); 1028 mutex_lock(&rdev->pm.mutex);
1029 if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 1029 if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1030 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) 1030 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
1031 rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED; 1031 rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
1032 } 1032 }
1033 mutex_unlock(&rdev->pm.mutex); 1033 mutex_unlock(&rdev->pm.mutex);
1034 1034
1035 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); 1035 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
1036 } 1036 }
1037 1037
1038 static void radeon_pm_suspend_dpm(struct radeon_device *rdev) 1038 static void radeon_pm_suspend_dpm(struct radeon_device *rdev)
1039 { 1039 {
1040 mutex_lock(&rdev->pm.mutex); 1040 mutex_lock(&rdev->pm.mutex);
1041 /* disable dpm */ 1041 /* disable dpm */
1042 radeon_dpm_disable(rdev); 1042 radeon_dpm_disable(rdev);
1043 /* reset the power state */ 1043 /* reset the power state */
1044 rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps; 1044 rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1045 rdev->pm.dpm_enabled = false; 1045 rdev->pm.dpm_enabled = false;
1046 mutex_unlock(&rdev->pm.mutex); 1046 mutex_unlock(&rdev->pm.mutex);
1047 } 1047 }
1048 1048
1049 void radeon_pm_suspend(struct radeon_device *rdev) 1049 void radeon_pm_suspend(struct radeon_device *rdev)
1050 { 1050 {
1051 if (rdev->pm.pm_method == PM_METHOD_DPM) 1051 if (rdev->pm.pm_method == PM_METHOD_DPM)
1052 radeon_pm_suspend_dpm(rdev); 1052 radeon_pm_suspend_dpm(rdev);
1053 else 1053 else
1054 radeon_pm_suspend_old(rdev); 1054 radeon_pm_suspend_old(rdev);
1055 } 1055 }
1056 1056
1057 static void radeon_pm_resume_old(struct radeon_device *rdev) 1057 static void radeon_pm_resume_old(struct radeon_device *rdev)
1058 { 1058 {
1059 /* set up the default clocks if the MC ucode is loaded */ 1059 /* set up the default clocks if the MC ucode is loaded */
1060 if ((rdev->family >= CHIP_BARTS) && 1060 if ((rdev->family >= CHIP_BARTS) &&
1061 (rdev->family <= CHIP_CAYMAN) && 1061 (rdev->family <= CHIP_CAYMAN) &&
1062 rdev->mc_fw) { 1062 rdev->mc_fw) {
1063 if (rdev->pm.default_vddc) 1063 if (rdev->pm.default_vddc)
1064 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 1064 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1065 SET_VOLTAGE_TYPE_ASIC_VDDC); 1065 SET_VOLTAGE_TYPE_ASIC_VDDC);
1066 if (rdev->pm.default_vddci) 1066 if (rdev->pm.default_vddci)
1067 radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, 1067 radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1068 SET_VOLTAGE_TYPE_ASIC_VDDCI); 1068 SET_VOLTAGE_TYPE_ASIC_VDDCI);
1069 if (rdev->pm.default_sclk) 1069 if (rdev->pm.default_sclk)
1070 radeon_set_engine_clock(rdev, rdev->pm.default_sclk); 1070 radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1071 if (rdev->pm.default_mclk) 1071 if (rdev->pm.default_mclk)
1072 radeon_set_memory_clock(rdev, rdev->pm.default_mclk); 1072 radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1073 } 1073 }
1074 /* asic init will reset the default power state */ 1074 /* asic init will reset the default power state */
1075 mutex_lock(&rdev->pm.mutex); 1075 mutex_lock(&rdev->pm.mutex);
1076 rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; 1076 rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
1077 rdev->pm.current_clock_mode_index = 0; 1077 rdev->pm.current_clock_mode_index = 0;
1078 rdev->pm.current_sclk = rdev->pm.default_sclk; 1078 rdev->pm.current_sclk = rdev->pm.default_sclk;
1079 rdev->pm.current_mclk = rdev->pm.default_mclk; 1079 rdev->pm.current_mclk = rdev->pm.default_mclk;
1080 if (rdev->pm.power_state) { 1080 if (rdev->pm.power_state) {
1081 rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; 1081 rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
1082 rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci; 1082 rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
1083 } 1083 }
1084 if (rdev->pm.pm_method == PM_METHOD_DYNPM 1084 if (rdev->pm.pm_method == PM_METHOD_DYNPM
1085 && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) { 1085 && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
1086 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; 1086 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1087 schedule_delayed_work(&rdev->pm.dynpm_idle_work, 1087 schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1088 msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 1088 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1089 } 1089 }
1090 mutex_unlock(&rdev->pm.mutex); 1090 mutex_unlock(&rdev->pm.mutex);
1091 radeon_pm_compute_clocks(rdev); 1091 radeon_pm_compute_clocks(rdev);
1092 } 1092 }
1093 1093
1094 static void radeon_pm_resume_dpm(struct radeon_device *rdev) 1094 static void radeon_pm_resume_dpm(struct radeon_device *rdev)
1095 { 1095 {
1096 int ret; 1096 int ret;
1097 1097
1098 /* asic init will reset to the boot state */ 1098 /* asic init will reset to the boot state */
1099 mutex_lock(&rdev->pm.mutex); 1099 mutex_lock(&rdev->pm.mutex);
1100 rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps; 1100 rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1101 radeon_dpm_setup_asic(rdev); 1101 radeon_dpm_setup_asic(rdev);
1102 ret = radeon_dpm_enable(rdev); 1102 ret = radeon_dpm_enable(rdev);
1103 mutex_unlock(&rdev->pm.mutex); 1103 mutex_unlock(&rdev->pm.mutex);
1104 if (ret) 1104 if (ret)
1105 goto dpm_resume_fail; 1105 goto dpm_resume_fail;
1106 rdev->pm.dpm_enabled = true; 1106 rdev->pm.dpm_enabled = true;
1107 radeon_pm_compute_clocks(rdev);
1108 return; 1107 return;
1109 1108
1110 dpm_resume_fail: 1109 dpm_resume_fail:
1111 DRM_ERROR("radeon: dpm resume failed\n"); 1110 DRM_ERROR("radeon: dpm resume failed\n");
1112 if ((rdev->family >= CHIP_BARTS) && 1111 if ((rdev->family >= CHIP_BARTS) &&
1113 (rdev->family <= CHIP_CAYMAN) && 1112 (rdev->family <= CHIP_CAYMAN) &&
1114 rdev->mc_fw) { 1113 rdev->mc_fw) {
1115 if (rdev->pm.default_vddc) 1114 if (rdev->pm.default_vddc)
1116 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 1115 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1117 SET_VOLTAGE_TYPE_ASIC_VDDC); 1116 SET_VOLTAGE_TYPE_ASIC_VDDC);
1118 if (rdev->pm.default_vddci) 1117 if (rdev->pm.default_vddci)
1119 radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, 1118 radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1120 SET_VOLTAGE_TYPE_ASIC_VDDCI); 1119 SET_VOLTAGE_TYPE_ASIC_VDDCI);
1121 if (rdev->pm.default_sclk) 1120 if (rdev->pm.default_sclk)
1122 radeon_set_engine_clock(rdev, rdev->pm.default_sclk); 1121 radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1123 if (rdev->pm.default_mclk) 1122 if (rdev->pm.default_mclk)
1124 radeon_set_memory_clock(rdev, rdev->pm.default_mclk); 1123 radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1125 } 1124 }
1126 } 1125 }
1127 1126
1128 void radeon_pm_resume(struct radeon_device *rdev) 1127 void radeon_pm_resume(struct radeon_device *rdev)
1129 { 1128 {
1130 if (rdev->pm.pm_method == PM_METHOD_DPM) 1129 if (rdev->pm.pm_method == PM_METHOD_DPM)
1131 radeon_pm_resume_dpm(rdev); 1130 radeon_pm_resume_dpm(rdev);
1132 else 1131 else
1133 radeon_pm_resume_old(rdev); 1132 radeon_pm_resume_old(rdev);
1134 } 1133 }
1135 1134
1136 static int radeon_pm_init_old(struct radeon_device *rdev) 1135 static int radeon_pm_init_old(struct radeon_device *rdev)
1137 { 1136 {
1138 int ret; 1137 int ret;
1139 1138
1140 rdev->pm.profile = PM_PROFILE_DEFAULT; 1139 rdev->pm.profile = PM_PROFILE_DEFAULT;
1141 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; 1140 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
1142 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 1141 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
1143 rdev->pm.dynpm_can_upclock = true; 1142 rdev->pm.dynpm_can_upclock = true;
1144 rdev->pm.dynpm_can_downclock = true; 1143 rdev->pm.dynpm_can_downclock = true;
1145 rdev->pm.default_sclk = rdev->clock.default_sclk; 1144 rdev->pm.default_sclk = rdev->clock.default_sclk;
1146 rdev->pm.default_mclk = rdev->clock.default_mclk; 1145 rdev->pm.default_mclk = rdev->clock.default_mclk;
1147 rdev->pm.current_sclk = rdev->clock.default_sclk; 1146 rdev->pm.current_sclk = rdev->clock.default_sclk;
1148 rdev->pm.current_mclk = rdev->clock.default_mclk; 1147 rdev->pm.current_mclk = rdev->clock.default_mclk;
1149 rdev->pm.int_thermal_type = THERMAL_TYPE_NONE; 1148 rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
1150 1149
1151 if (rdev->bios) { 1150 if (rdev->bios) {
1152 if (rdev->is_atom_bios) 1151 if (rdev->is_atom_bios)
1153 radeon_atombios_get_power_modes(rdev); 1152 radeon_atombios_get_power_modes(rdev);
1154 else 1153 else
1155 radeon_combios_get_power_modes(rdev); 1154 radeon_combios_get_power_modes(rdev);
1156 radeon_pm_print_states(rdev); 1155 radeon_pm_print_states(rdev);
1157 radeon_pm_init_profile(rdev); 1156 radeon_pm_init_profile(rdev);
1158 /* set up the default clocks if the MC ucode is loaded */ 1157 /* set up the default clocks if the MC ucode is loaded */
1159 if ((rdev->family >= CHIP_BARTS) && 1158 if ((rdev->family >= CHIP_BARTS) &&
1160 (rdev->family <= CHIP_CAYMAN) && 1159 (rdev->family <= CHIP_CAYMAN) &&
1161 rdev->mc_fw) { 1160 rdev->mc_fw) {
1162 if (rdev->pm.default_vddc) 1161 if (rdev->pm.default_vddc)
1163 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 1162 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1164 SET_VOLTAGE_TYPE_ASIC_VDDC); 1163 SET_VOLTAGE_TYPE_ASIC_VDDC);
1165 if (rdev->pm.default_vddci) 1164 if (rdev->pm.default_vddci)
1166 radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, 1165 radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1167 SET_VOLTAGE_TYPE_ASIC_VDDCI); 1166 SET_VOLTAGE_TYPE_ASIC_VDDCI);
1168 if (rdev->pm.default_sclk) 1167 if (rdev->pm.default_sclk)
1169 radeon_set_engine_clock(rdev, rdev->pm.default_sclk); 1168 radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1170 if (rdev->pm.default_mclk) 1169 if (rdev->pm.default_mclk)
1171 radeon_set_memory_clock(rdev, rdev->pm.default_mclk); 1170 radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1172 } 1171 }
1173 } 1172 }
1174 1173
1175 /* set up the internal thermal sensor if applicable */ 1174 /* set up the internal thermal sensor if applicable */
1176 ret = radeon_hwmon_init(rdev); 1175 ret = radeon_hwmon_init(rdev);
1177 if (ret) 1176 if (ret)
1178 return ret; 1177 return ret;
1179 1178
1180 INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler); 1179 INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
1181 1180
1182 if (rdev->pm.num_power_states > 1) { 1181 if (rdev->pm.num_power_states > 1) {
1183 /* where's the best place to put these? */ 1182 /* where's the best place to put these? */
1184 ret = device_create_file(rdev->dev, &dev_attr_power_profile); 1183 ret = device_create_file(rdev->dev, &dev_attr_power_profile);
1185 if (ret) 1184 if (ret)
1186 DRM_ERROR("failed to create device file for power profile\n"); 1185 DRM_ERROR("failed to create device file for power profile\n");
1187 ret = device_create_file(rdev->dev, &dev_attr_power_method); 1186 ret = device_create_file(rdev->dev, &dev_attr_power_method);
1188 if (ret) 1187 if (ret)
1189 DRM_ERROR("failed to create device file for power method\n"); 1188 DRM_ERROR("failed to create device file for power method\n");
1190 1189
1191 if (radeon_debugfs_pm_init(rdev)) { 1190 if (radeon_debugfs_pm_init(rdev)) {
1192 DRM_ERROR("Failed to register debugfs file for PM!\n"); 1191 DRM_ERROR("Failed to register debugfs file for PM!\n");
1193 } 1192 }
1194 1193
1195 DRM_INFO("radeon: power management initialized\n"); 1194 DRM_INFO("radeon: power management initialized\n");
1196 } 1195 }
1197 1196
1198 return 0; 1197 return 0;
1199 } 1198 }
1200 1199
1201 static void radeon_dpm_print_power_states(struct radeon_device *rdev) 1200 static void radeon_dpm_print_power_states(struct radeon_device *rdev)
1202 { 1201 {
1203 int i; 1202 int i;
1204 1203
1205 for (i = 0; i < rdev->pm.dpm.num_ps; i++) { 1204 for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
1206 printk("== power state %d ==\n", i); 1205 printk("== power state %d ==\n", i);
1207 radeon_dpm_print_power_state(rdev, &rdev->pm.dpm.ps[i]); 1206 radeon_dpm_print_power_state(rdev, &rdev->pm.dpm.ps[i]);
1208 } 1207 }
1209 } 1208 }
1210 1209
1211 static int radeon_pm_init_dpm(struct radeon_device *rdev) 1210 static int radeon_pm_init_dpm(struct radeon_device *rdev)
1212 { 1211 {
1213 int ret; 1212 int ret;
1214 1213
1215 /* default to balanced state */ 1214 /* default to balanced state */
1216 rdev->pm.dpm.state = POWER_STATE_TYPE_BALANCED; 1215 rdev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
1217 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; 1216 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
1218 rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO; 1217 rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
1219 rdev->pm.default_sclk = rdev->clock.default_sclk; 1218 rdev->pm.default_sclk = rdev->clock.default_sclk;
1220 rdev->pm.default_mclk = rdev->clock.default_mclk; 1219 rdev->pm.default_mclk = rdev->clock.default_mclk;
1221 rdev->pm.current_sclk = rdev->clock.default_sclk; 1220 rdev->pm.current_sclk = rdev->clock.default_sclk;
1222 rdev->pm.current_mclk = rdev->clock.default_mclk; 1221 rdev->pm.current_mclk = rdev->clock.default_mclk;
1223 rdev->pm.int_thermal_type = THERMAL_TYPE_NONE; 1222 rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
1224 1223
1225 if (rdev->bios && rdev->is_atom_bios) 1224 if (rdev->bios && rdev->is_atom_bios)
1226 radeon_atombios_get_power_modes(rdev); 1225 radeon_atombios_get_power_modes(rdev);
1227 else 1226 else
1228 return -EINVAL; 1227 return -EINVAL;
1229 1228
1230 /* set up the internal thermal sensor if applicable */ 1229 /* set up the internal thermal sensor if applicable */
1231 ret = radeon_hwmon_init(rdev); 1230 ret = radeon_hwmon_init(rdev);
1232 if (ret) 1231 if (ret)
1233 return ret; 1232 return ret;
1234 1233
1235 INIT_WORK(&rdev->pm.dpm.thermal.work, radeon_dpm_thermal_work_handler); 1234 INIT_WORK(&rdev->pm.dpm.thermal.work, radeon_dpm_thermal_work_handler);
1236 mutex_lock(&rdev->pm.mutex); 1235 mutex_lock(&rdev->pm.mutex);
1237 radeon_dpm_init(rdev); 1236 radeon_dpm_init(rdev);
1238 rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps; 1237 rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1239 if (radeon_dpm == 1) 1238 if (radeon_dpm == 1)
1240 radeon_dpm_print_power_states(rdev); 1239 radeon_dpm_print_power_states(rdev);
1241 radeon_dpm_setup_asic(rdev); 1240 radeon_dpm_setup_asic(rdev);
1242 ret = radeon_dpm_enable(rdev); 1241 ret = radeon_dpm_enable(rdev);
1243 mutex_unlock(&rdev->pm.mutex); 1242 mutex_unlock(&rdev->pm.mutex);
1244 if (ret) 1243 if (ret)
1245 goto dpm_failed; 1244 goto dpm_failed;
1246 rdev->pm.dpm_enabled = true; 1245 rdev->pm.dpm_enabled = true;
1247 1246
1248 ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state); 1247 ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
1249 if (ret) 1248 if (ret)
1250 DRM_ERROR("failed to create device file for dpm state\n"); 1249 DRM_ERROR("failed to create device file for dpm state\n");
1251 ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level); 1250 ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
1252 if (ret) 1251 if (ret)
1253 DRM_ERROR("failed to create device file for dpm state\n"); 1252 DRM_ERROR("failed to create device file for dpm state\n");
1254 /* XXX: these are noops for dpm but are here for backwards compat */ 1253 /* XXX: these are noops for dpm but are here for backwards compat */
1255 ret = device_create_file(rdev->dev, &dev_attr_power_profile); 1254 ret = device_create_file(rdev->dev, &dev_attr_power_profile);
1256 if (ret) 1255 if (ret)
1257 DRM_ERROR("failed to create device file for power profile\n"); 1256 DRM_ERROR("failed to create device file for power profile\n");
1258 ret = device_create_file(rdev->dev, &dev_attr_power_method); 1257 ret = device_create_file(rdev->dev, &dev_attr_power_method);
1259 if (ret) 1258 if (ret)
1260 DRM_ERROR("failed to create device file for power method\n"); 1259 DRM_ERROR("failed to create device file for power method\n");
1261 1260
1262 if (radeon_debugfs_pm_init(rdev)) { 1261 if (radeon_debugfs_pm_init(rdev)) {
1263 DRM_ERROR("Failed to register debugfs file for dpm!\n"); 1262 DRM_ERROR("Failed to register debugfs file for dpm!\n");
1264 } 1263 }
1265 1264
1266 DRM_INFO("radeon: dpm initialized\n"); 1265 DRM_INFO("radeon: dpm initialized\n");
1267 1266
1268 return 0; 1267 return 0;
1269 1268
1270 dpm_failed: 1269 dpm_failed:
1271 rdev->pm.dpm_enabled = false; 1270 rdev->pm.dpm_enabled = false;
1272 if ((rdev->family >= CHIP_BARTS) && 1271 if ((rdev->family >= CHIP_BARTS) &&
1273 (rdev->family <= CHIP_CAYMAN) && 1272 (rdev->family <= CHIP_CAYMAN) &&
1274 rdev->mc_fw) { 1273 rdev->mc_fw) {
1275 if (rdev->pm.default_vddc) 1274 if (rdev->pm.default_vddc)
1276 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 1275 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1277 SET_VOLTAGE_TYPE_ASIC_VDDC); 1276 SET_VOLTAGE_TYPE_ASIC_VDDC);
1278 if (rdev->pm.default_vddci) 1277 if (rdev->pm.default_vddci)
1279 radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, 1278 radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1280 SET_VOLTAGE_TYPE_ASIC_VDDCI); 1279 SET_VOLTAGE_TYPE_ASIC_VDDCI);
1281 if (rdev->pm.default_sclk) 1280 if (rdev->pm.default_sclk)
1282 radeon_set_engine_clock(rdev, rdev->pm.default_sclk); 1281 radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1283 if (rdev->pm.default_mclk) 1282 if (rdev->pm.default_mclk)
1284 radeon_set_memory_clock(rdev, rdev->pm.default_mclk); 1283 radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1285 } 1284 }
1286 DRM_ERROR("radeon: dpm initialization failed\n"); 1285 DRM_ERROR("radeon: dpm initialization failed\n");
1287 return ret; 1286 return ret;
1288 } 1287 }
1289 1288
1290 int radeon_pm_init(struct radeon_device *rdev) 1289 int radeon_pm_init(struct radeon_device *rdev)
1291 { 1290 {
1292 /* enable dpm on rv6xx+ */ 1291 /* enable dpm on rv6xx+ */
1293 switch (rdev->family) { 1292 switch (rdev->family) {
1294 case CHIP_RV610: 1293 case CHIP_RV610:
1295 case CHIP_RV630: 1294 case CHIP_RV630:
1296 case CHIP_RV620: 1295 case CHIP_RV620:
1297 case CHIP_RV635: 1296 case CHIP_RV635:
1298 case CHIP_RV670: 1297 case CHIP_RV670:
1299 case CHIP_RS780: 1298 case CHIP_RS780:
1300 case CHIP_RS880: 1299 case CHIP_RS880:
1301 case CHIP_RV770: 1300 case CHIP_RV770:
1302 case CHIP_BARTS: 1301 case CHIP_BARTS:
1303 case CHIP_TURKS: 1302 case CHIP_TURKS:
1304 case CHIP_CAICOS: 1303 case CHIP_CAICOS:
1305 case CHIP_CAYMAN: 1304 case CHIP_CAYMAN:
1306 /* DPM requires the RLC, RV770+ dGPU requires SMC */ 1305 /* DPM requires the RLC, RV770+ dGPU requires SMC */
1307 if (!rdev->rlc_fw) 1306 if (!rdev->rlc_fw)
1308 rdev->pm.pm_method = PM_METHOD_PROFILE; 1307 rdev->pm.pm_method = PM_METHOD_PROFILE;
1309 else if ((rdev->family >= CHIP_RV770) && 1308 else if ((rdev->family >= CHIP_RV770) &&
1310 (!(rdev->flags & RADEON_IS_IGP)) && 1309 (!(rdev->flags & RADEON_IS_IGP)) &&
1311 (!rdev->smc_fw)) 1310 (!rdev->smc_fw))
1312 rdev->pm.pm_method = PM_METHOD_PROFILE; 1311 rdev->pm.pm_method = PM_METHOD_PROFILE;
1313 else if (radeon_dpm == 1) 1312 else if (radeon_dpm == 1)
1314 rdev->pm.pm_method = PM_METHOD_DPM; 1313 rdev->pm.pm_method = PM_METHOD_DPM;
1315 else 1314 else
1316 rdev->pm.pm_method = PM_METHOD_PROFILE; 1315 rdev->pm.pm_method = PM_METHOD_PROFILE;
1317 break; 1316 break;
1318 case CHIP_RV730: 1317 case CHIP_RV730:
1319 case CHIP_RV710: 1318 case CHIP_RV710:
1320 case CHIP_RV740: 1319 case CHIP_RV740:
1321 case CHIP_CEDAR: 1320 case CHIP_CEDAR:
1322 case CHIP_REDWOOD: 1321 case CHIP_REDWOOD:
1323 case CHIP_JUNIPER: 1322 case CHIP_JUNIPER:
1324 case CHIP_CYPRESS: 1323 case CHIP_CYPRESS:
1325 case CHIP_HEMLOCK: 1324 case CHIP_HEMLOCK:
1326 case CHIP_PALM: 1325 case CHIP_PALM:
1327 case CHIP_SUMO: 1326 case CHIP_SUMO:
1328 case CHIP_SUMO2: 1327 case CHIP_SUMO2:
1329 case CHIP_ARUBA: 1328 case CHIP_ARUBA:
1330 case CHIP_TAHITI: 1329 case CHIP_TAHITI:
1331 case CHIP_PITCAIRN: 1330 case CHIP_PITCAIRN:
1332 case CHIP_VERDE: 1331 case CHIP_VERDE:
1333 case CHIP_OLAND: 1332 case CHIP_OLAND:
1334 case CHIP_HAINAN: 1333 case CHIP_HAINAN:
1335 case CHIP_BONAIRE: 1334 case CHIP_BONAIRE:
1336 case CHIP_KABINI: 1335 case CHIP_KABINI:
1337 case CHIP_KAVERI: 1336 case CHIP_KAVERI:
1338 case CHIP_HAWAII: 1337 case CHIP_HAWAII:
1339 case CHIP_MULLINS: 1338 case CHIP_MULLINS:
1340 /* DPM requires the RLC, RV770+ dGPU requires SMC */ 1339 /* DPM requires the RLC, RV770+ dGPU requires SMC */
1341 if (!rdev->rlc_fw) 1340 if (!rdev->rlc_fw)
1342 rdev->pm.pm_method = PM_METHOD_PROFILE; 1341 rdev->pm.pm_method = PM_METHOD_PROFILE;
1343 else if ((rdev->family >= CHIP_RV770) && 1342 else if ((rdev->family >= CHIP_RV770) &&
1344 (!(rdev->flags & RADEON_IS_IGP)) && 1343 (!(rdev->flags & RADEON_IS_IGP)) &&
1345 (!rdev->smc_fw)) 1344 (!rdev->smc_fw))
1346 rdev->pm.pm_method = PM_METHOD_PROFILE; 1345 rdev->pm.pm_method = PM_METHOD_PROFILE;
1347 else if (radeon_dpm == 0) 1346 else if (radeon_dpm == 0)
1348 rdev->pm.pm_method = PM_METHOD_PROFILE; 1347 rdev->pm.pm_method = PM_METHOD_PROFILE;
1349 else 1348 else
1350 rdev->pm.pm_method = PM_METHOD_DPM; 1349 rdev->pm.pm_method = PM_METHOD_DPM;
1351 break; 1350 break;
1352 default: 1351 default:
1353 /* default to profile method */ 1352 /* default to profile method */
1354 rdev->pm.pm_method = PM_METHOD_PROFILE; 1353 rdev->pm.pm_method = PM_METHOD_PROFILE;
1355 break; 1354 break;
1356 } 1355 }
1357 1356
1358 if (rdev->pm.pm_method == PM_METHOD_DPM) 1357 if (rdev->pm.pm_method == PM_METHOD_DPM)
1359 return radeon_pm_init_dpm(rdev); 1358 return radeon_pm_init_dpm(rdev);
1360 else 1359 else
1361 return radeon_pm_init_old(rdev); 1360 return radeon_pm_init_old(rdev);
1362 } 1361 }
1363 1362
1364 int radeon_pm_late_init(struct radeon_device *rdev) 1363 int radeon_pm_late_init(struct radeon_device *rdev)
1365 { 1364 {
1366 int ret = 0; 1365 int ret = 0;
1367 1366
1368 if (rdev->pm.pm_method == PM_METHOD_DPM) { 1367 if (rdev->pm.pm_method == PM_METHOD_DPM) {
1369 mutex_lock(&rdev->pm.mutex); 1368 mutex_lock(&rdev->pm.mutex);
1370 ret = radeon_dpm_late_enable(rdev); 1369 ret = radeon_dpm_late_enable(rdev);
1371 mutex_unlock(&rdev->pm.mutex); 1370 mutex_unlock(&rdev->pm.mutex);
1372 } 1371 }
1373 return ret; 1372 return ret;
1374 } 1373 }
1375 1374
1376 static void radeon_pm_fini_old(struct radeon_device *rdev) 1375 static void radeon_pm_fini_old(struct radeon_device *rdev)
1377 { 1376 {
1378 if (rdev->pm.num_power_states > 1) { 1377 if (rdev->pm.num_power_states > 1) {
1379 mutex_lock(&rdev->pm.mutex); 1378 mutex_lock(&rdev->pm.mutex);
1380 if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 1379 if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
1381 rdev->pm.profile = PM_PROFILE_DEFAULT; 1380 rdev->pm.profile = PM_PROFILE_DEFAULT;
1382 radeon_pm_update_profile(rdev); 1381 radeon_pm_update_profile(rdev);
1383 radeon_pm_set_clocks(rdev); 1382 radeon_pm_set_clocks(rdev);
1384 } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 1383 } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1385 /* reset default clocks */ 1384 /* reset default clocks */
1386 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; 1385 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
1387 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; 1386 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
1388 radeon_pm_set_clocks(rdev); 1387 radeon_pm_set_clocks(rdev);
1389 } 1388 }
1390 mutex_unlock(&rdev->pm.mutex); 1389 mutex_unlock(&rdev->pm.mutex);
1391 1390
1392 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); 1391 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
1393 1392
1394 device_remove_file(rdev->dev, &dev_attr_power_profile); 1393 device_remove_file(rdev->dev, &dev_attr_power_profile);
1395 device_remove_file(rdev->dev, &dev_attr_power_method); 1394 device_remove_file(rdev->dev, &dev_attr_power_method);
1396 } 1395 }
1397 1396
1398 radeon_hwmon_fini(rdev); 1397 radeon_hwmon_fini(rdev);
1399 1398
1400 if (rdev->pm.power_state) 1399 if (rdev->pm.power_state)
1401 kfree(rdev->pm.power_state); 1400 kfree(rdev->pm.power_state);
1402 } 1401 }
1403 1402
1404 static void radeon_pm_fini_dpm(struct radeon_device *rdev) 1403 static void radeon_pm_fini_dpm(struct radeon_device *rdev)
1405 { 1404 {
1406 if (rdev->pm.num_power_states > 1) { 1405 if (rdev->pm.num_power_states > 1) {
1407 mutex_lock(&rdev->pm.mutex); 1406 mutex_lock(&rdev->pm.mutex);
1408 radeon_dpm_disable(rdev); 1407 radeon_dpm_disable(rdev);
1409 mutex_unlock(&rdev->pm.mutex); 1408 mutex_unlock(&rdev->pm.mutex);
1410 1409
1411 device_remove_file(rdev->dev, &dev_attr_power_dpm_state); 1410 device_remove_file(rdev->dev, &dev_attr_power_dpm_state);
1412 device_remove_file(rdev->dev, &dev_attr_power_dpm_force_performance_level); 1411 device_remove_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
1413 /* XXX backwards compat */ 1412 /* XXX backwards compat */
1414 device_remove_file(rdev->dev, &dev_attr_power_profile); 1413 device_remove_file(rdev->dev, &dev_attr_power_profile);
1415 device_remove_file(rdev->dev, &dev_attr_power_method); 1414 device_remove_file(rdev->dev, &dev_attr_power_method);
1416 } 1415 }
1417 radeon_dpm_fini(rdev); 1416 radeon_dpm_fini(rdev);
1418 1417
1419 radeon_hwmon_fini(rdev); 1418 radeon_hwmon_fini(rdev);
1420 1419
1421 if (rdev->pm.power_state) 1420 if (rdev->pm.power_state)
1422 kfree(rdev->pm.power_state); 1421 kfree(rdev->pm.power_state);
1423 } 1422 }
1424 1423
1425 void radeon_pm_fini(struct radeon_device *rdev) 1424 void radeon_pm_fini(struct radeon_device *rdev)
1426 { 1425 {
1427 if (rdev->pm.pm_method == PM_METHOD_DPM) 1426 if (rdev->pm.pm_method == PM_METHOD_DPM)
1428 radeon_pm_fini_dpm(rdev); 1427 radeon_pm_fini_dpm(rdev);
1429 else 1428 else
1430 radeon_pm_fini_old(rdev); 1429 radeon_pm_fini_old(rdev);
1431 } 1430 }
1432 1431
1433 static void radeon_pm_compute_clocks_old(struct radeon_device *rdev) 1432 static void radeon_pm_compute_clocks_old(struct radeon_device *rdev)
1434 { 1433 {
1435 struct drm_device *ddev = rdev->ddev; 1434 struct drm_device *ddev = rdev->ddev;
1436 struct drm_crtc *crtc; 1435 struct drm_crtc *crtc;
1437 struct radeon_crtc *radeon_crtc; 1436 struct radeon_crtc *radeon_crtc;
1438 1437
1439 if (rdev->pm.num_power_states < 2) 1438 if (rdev->pm.num_power_states < 2)
1440 return; 1439 return;
1441 1440
1442 mutex_lock(&rdev->pm.mutex); 1441 mutex_lock(&rdev->pm.mutex);
1443 1442
1444 rdev->pm.active_crtcs = 0; 1443 rdev->pm.active_crtcs = 0;
1445 rdev->pm.active_crtc_count = 0; 1444 rdev->pm.active_crtc_count = 0;
1446 if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) { 1445 if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
1447 list_for_each_entry(crtc, 1446 list_for_each_entry(crtc,
1448 &ddev->mode_config.crtc_list, head) { 1447 &ddev->mode_config.crtc_list, head) {
1449 radeon_crtc = to_radeon_crtc(crtc); 1448 radeon_crtc = to_radeon_crtc(crtc);
1450 if (radeon_crtc->enabled) { 1449 if (radeon_crtc->enabled) {
1451 rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id); 1450 rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
1452 rdev->pm.active_crtc_count++; 1451 rdev->pm.active_crtc_count++;
1453 } 1452 }
1454 } 1453 }
1455 } 1454 }
1456 1455
1457 if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 1456 if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
1458 radeon_pm_update_profile(rdev); 1457 radeon_pm_update_profile(rdev);
1459 radeon_pm_set_clocks(rdev); 1458 radeon_pm_set_clocks(rdev);
1460 } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 1459 } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1461 if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) { 1460 if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) {
1462 if (rdev->pm.active_crtc_count > 1) { 1461 if (rdev->pm.active_crtc_count > 1) {
1463 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) { 1462 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
1464 cancel_delayed_work(&rdev->pm.dynpm_idle_work); 1463 cancel_delayed_work(&rdev->pm.dynpm_idle_work);
1465 1464
1466 rdev->pm.dynpm_state = DYNPM_STATE_PAUSED; 1465 rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
1467 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; 1466 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
1468 radeon_pm_get_dynpm_state(rdev); 1467 radeon_pm_get_dynpm_state(rdev);
1469 radeon_pm_set_clocks(rdev); 1468 radeon_pm_set_clocks(rdev);
1470 1469
1471 DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n"); 1470 DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n");
1472 } 1471 }
1473 } else if (rdev->pm.active_crtc_count == 1) { 1472 } else if (rdev->pm.active_crtc_count == 1) {
1474 /* TODO: Increase clocks if needed for current mode */ 1473 /* TODO: Increase clocks if needed for current mode */
1475 1474
1476 if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) { 1475 if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) {
1477 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; 1476 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1478 rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK; 1477 rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK;
1479 radeon_pm_get_dynpm_state(rdev); 1478 radeon_pm_get_dynpm_state(rdev);
1480 radeon_pm_set_clocks(rdev); 1479 radeon_pm_set_clocks(rdev);
1481 1480
1482 schedule_delayed_work(&rdev->pm.dynpm_idle_work, 1481 schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1483 msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 1482 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1484 } else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) { 1483 } else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
1485 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; 1484 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1486 schedule_delayed_work(&rdev->pm.dynpm_idle_work, 1485 schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1487 msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 1486 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1488 DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n"); 1487 DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n");
1489 } 1488 }
1490 } else { /* count == 0 */ 1489 } else { /* count == 0 */
1491 if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) { 1490 if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
1492 cancel_delayed_work(&rdev->pm.dynpm_idle_work); 1491 cancel_delayed_work(&rdev->pm.dynpm_idle_work);
1493 1492
1494 rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM; 1493 rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM;
1495 rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM; 1494 rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM;
1496 radeon_pm_get_dynpm_state(rdev); 1495 radeon_pm_get_dynpm_state(rdev);
1497 radeon_pm_set_clocks(rdev); 1496 radeon_pm_set_clocks(rdev);
1498 } 1497 }
1499 } 1498 }
1500 } 1499 }
1501 } 1500 }
1502 1501
1503 mutex_unlock(&rdev->pm.mutex); 1502 mutex_unlock(&rdev->pm.mutex);
1504 } 1503 }
1505 1504
1506 static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev) 1505 static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
1507 { 1506 {
1508 struct drm_device *ddev = rdev->ddev; 1507 struct drm_device *ddev = rdev->ddev;
1509 struct drm_crtc *crtc; 1508 struct drm_crtc *crtc;
1510 struct radeon_crtc *radeon_crtc; 1509 struct radeon_crtc *radeon_crtc;
1511 1510
1512 if (!rdev->pm.dpm_enabled) 1511 if (!rdev->pm.dpm_enabled)
1513 return; 1512 return;
1514 1513
1515 mutex_lock(&rdev->pm.mutex); 1514 mutex_lock(&rdev->pm.mutex);
1516 1515
1517 /* update active crtc counts */ 1516 /* update active crtc counts */
1518 rdev->pm.dpm.new_active_crtcs = 0; 1517 rdev->pm.dpm.new_active_crtcs = 0;
1519 rdev->pm.dpm.new_active_crtc_count = 0; 1518 rdev->pm.dpm.new_active_crtc_count = 0;
1520 if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) { 1519 if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
1521 list_for_each_entry(crtc, 1520 list_for_each_entry(crtc,
1522 &ddev->mode_config.crtc_list, head) { 1521 &ddev->mode_config.crtc_list, head) {
1523 radeon_crtc = to_radeon_crtc(crtc); 1522 radeon_crtc = to_radeon_crtc(crtc);
1524 if (crtc->enabled) { 1523 if (crtc->enabled) {
1525 rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id); 1524 rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
1526 rdev->pm.dpm.new_active_crtc_count++; 1525 rdev->pm.dpm.new_active_crtc_count++;
1527 } 1526 }
1528 } 1527 }
1529 } 1528 }
1530 1529
1531 /* update battery/ac status */ 1530 /* update battery/ac status */
1532 if (power_supply_is_system_supplied() > 0) 1531 if (power_supply_is_system_supplied() > 0)
1533 rdev->pm.dpm.ac_power = true; 1532 rdev->pm.dpm.ac_power = true;
1534 else 1533 else
1535 rdev->pm.dpm.ac_power = false; 1534 rdev->pm.dpm.ac_power = false;
1536 1535
1537 radeon_dpm_change_power_state_locked(rdev); 1536 radeon_dpm_change_power_state_locked(rdev);
1538 1537
1539 mutex_unlock(&rdev->pm.mutex); 1538 mutex_unlock(&rdev->pm.mutex);
1540 1539
1541 } 1540 }
1542 1541
1543 void radeon_pm_compute_clocks(struct radeon_device *rdev) 1542 void radeon_pm_compute_clocks(struct radeon_device *rdev)
1544 { 1543 {
1545 if (rdev->pm.pm_method == PM_METHOD_DPM) 1544 if (rdev->pm.pm_method == PM_METHOD_DPM)
1546 radeon_pm_compute_clocks_dpm(rdev); 1545 radeon_pm_compute_clocks_dpm(rdev);
1547 else 1546 else
1548 radeon_pm_compute_clocks_old(rdev); 1547 radeon_pm_compute_clocks_old(rdev);
1549 } 1548 }
1550 1549
1551 static bool radeon_pm_in_vbl(struct radeon_device *rdev) 1550 static bool radeon_pm_in_vbl(struct radeon_device *rdev)
1552 { 1551 {
1553 int crtc, vpos, hpos, vbl_status; 1552 int crtc, vpos, hpos, vbl_status;
1554 bool in_vbl = true; 1553 bool in_vbl = true;
1555 1554
1556 /* Iterate over all active crtc's. All crtc's must be in vblank, 1555 /* Iterate over all active crtc's. All crtc's must be in vblank,
1557 * otherwise return in_vbl == false. 1556 * otherwise return in_vbl == false.
1558 */ 1557 */
1559 for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) { 1558 for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
1560 if (rdev->pm.active_crtcs & (1 << crtc)) { 1559 if (rdev->pm.active_crtcs & (1 << crtc)) {
1561 vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, 0, &vpos, &hpos, NULL, NULL); 1560 vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, 0, &vpos, &hpos, NULL, NULL);
1562 if ((vbl_status & DRM_SCANOUTPOS_VALID) && 1561 if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
1563 !(vbl_status & DRM_SCANOUTPOS_INVBL)) 1562 !(vbl_status & DRM_SCANOUTPOS_INVBL))
1564 in_vbl = false; 1563 in_vbl = false;
1565 } 1564 }
1566 } 1565 }
1567 1566
1568 return in_vbl; 1567 return in_vbl;
1569 } 1568 }
1570 1569
1571 static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish) 1570 static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
1572 { 1571 {
1573 u32 stat_crtc = 0; 1572 u32 stat_crtc = 0;
1574 bool in_vbl = radeon_pm_in_vbl(rdev); 1573 bool in_vbl = radeon_pm_in_vbl(rdev);
1575 1574
1576 if (in_vbl == false) 1575 if (in_vbl == false)
1577 DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc, 1576 DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc,
1578 finish ? "exit" : "entry"); 1577 finish ? "exit" : "entry");
1579 return in_vbl; 1578 return in_vbl;
1580 } 1579 }
1581 1580
1582 static void radeon_dynpm_idle_work_handler(struct work_struct *work) 1581 static void radeon_dynpm_idle_work_handler(struct work_struct *work)
1583 { 1582 {
1584 struct radeon_device *rdev; 1583 struct radeon_device *rdev;
1585 int resched; 1584 int resched;
1586 rdev = container_of(work, struct radeon_device, 1585 rdev = container_of(work, struct radeon_device,
1587 pm.dynpm_idle_work.work); 1586 pm.dynpm_idle_work.work);
1588 1587
1589 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); 1588 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
1590 mutex_lock(&rdev->pm.mutex); 1589 mutex_lock(&rdev->pm.mutex);
1591 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) { 1590 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
1592 int not_processed = 0; 1591 int not_processed = 0;
1593 int i; 1592 int i;
1594 1593
1595 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 1594 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1596 struct radeon_ring *ring = &rdev->ring[i]; 1595 struct radeon_ring *ring = &rdev->ring[i];
1597 1596
1598 if (ring->ready) { 1597 if (ring->ready) {
1599 not_processed += radeon_fence_count_emitted(rdev, i); 1598 not_processed += radeon_fence_count_emitted(rdev, i);
1600 if (not_processed >= 3) 1599 if (not_processed >= 3)
1601 break; 1600 break;
1602 } 1601 }
1603 } 1602 }
1604 1603
1605 if (not_processed >= 3) { /* should upclock */ 1604 if (not_processed >= 3) { /* should upclock */
1606 if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) { 1605 if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) {
1607 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 1606 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
1608 } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE && 1607 } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
1609 rdev->pm.dynpm_can_upclock) { 1608 rdev->pm.dynpm_can_upclock) {
1610 rdev->pm.dynpm_planned_action = 1609 rdev->pm.dynpm_planned_action =
1611 DYNPM_ACTION_UPCLOCK; 1610 DYNPM_ACTION_UPCLOCK;
1612 rdev->pm.dynpm_action_timeout = jiffies + 1611 rdev->pm.dynpm_action_timeout = jiffies +
1613 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS); 1612 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
1614 } 1613 }
1615 } else if (not_processed == 0) { /* should downclock */ 1614 } else if (not_processed == 0) { /* should downclock */
1616 if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) { 1615 if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) {
1617 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 1616 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
1618 } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE && 1617 } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
1619 rdev->pm.dynpm_can_downclock) { 1618 rdev->pm.dynpm_can_downclock) {
1620 rdev->pm.dynpm_planned_action = 1619 rdev->pm.dynpm_planned_action =
1621 DYNPM_ACTION_DOWNCLOCK; 1620 DYNPM_ACTION_DOWNCLOCK;
1622 rdev->pm.dynpm_action_timeout = jiffies + 1621 rdev->pm.dynpm_action_timeout = jiffies +
1623 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS); 1622 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
1624 } 1623 }
1625 } 1624 }
1626 1625
1627 /* Note, radeon_pm_set_clocks is called with static_switch set 1626 /* Note, radeon_pm_set_clocks is called with static_switch set
1628 * to false since we want to wait for vbl to avoid flicker. 1627 * to false since we want to wait for vbl to avoid flicker.
1629 */ 1628 */
1630 if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE && 1629 if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE &&
1631 jiffies > rdev->pm.dynpm_action_timeout) { 1630 jiffies > rdev->pm.dynpm_action_timeout) {
1632 radeon_pm_get_dynpm_state(rdev); 1631 radeon_pm_get_dynpm_state(rdev);
1633 radeon_pm_set_clocks(rdev); 1632 radeon_pm_set_clocks(rdev);
1634 } 1633 }
1635 1634
1636 schedule_delayed_work(&rdev->pm.dynpm_idle_work, 1635 schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1637 msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 1636 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1638 } 1637 }
1639 mutex_unlock(&rdev->pm.mutex); 1638 mutex_unlock(&rdev->pm.mutex);
1640 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); 1639 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
1641 } 1640 }
1642 1641
1643 /* 1642 /*
1644 * Debugfs info 1643 * Debugfs info
1645 */ 1644 */
1646 #if defined(CONFIG_DEBUG_FS) 1645 #if defined(CONFIG_DEBUG_FS)
1647 1646
1648 static int radeon_debugfs_pm_info(struct seq_file *m, void *data) 1647 static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
1649 { 1648 {
1650 struct drm_info_node *node = (struct drm_info_node *) m->private; 1649 struct drm_info_node *node = (struct drm_info_node *) m->private;
1651 struct drm_device *dev = node->minor->dev; 1650 struct drm_device *dev = node->minor->dev;
1652 struct radeon_device *rdev = dev->dev_private; 1651 struct radeon_device *rdev = dev->dev_private;
1653 struct drm_device *ddev = rdev->ddev; 1652 struct drm_device *ddev = rdev->ddev;
1654 1653
1655 if ((rdev->flags & RADEON_IS_PX) && 1654 if ((rdev->flags & RADEON_IS_PX) &&
1656 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { 1655 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
1657 seq_printf(m, "PX asic powered off\n"); 1656 seq_printf(m, "PX asic powered off\n");
1658 } else if (rdev->pm.dpm_enabled) { 1657 } else if (rdev->pm.dpm_enabled) {
1659 mutex_lock(&rdev->pm.mutex); 1658 mutex_lock(&rdev->pm.mutex);
1660 if (rdev->asic->dpm.debugfs_print_current_performance_level) 1659 if (rdev->asic->dpm.debugfs_print_current_performance_level)
1661 radeon_dpm_debugfs_print_current_performance_level(rdev, m); 1660 radeon_dpm_debugfs_print_current_performance_level(rdev, m);
1662 else 1661 else
1663 seq_printf(m, "Debugfs support not implemented for this asic\n"); 1662 seq_printf(m, "Debugfs support not implemented for this asic\n");
1664 mutex_unlock(&rdev->pm.mutex); 1663 mutex_unlock(&rdev->pm.mutex);
1665 } else { 1664 } else {
1666 seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk); 1665 seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
1667 /* radeon_get_engine_clock is not reliable on APUs so just print the current clock */ 1666 /* radeon_get_engine_clock is not reliable on APUs so just print the current clock */
1668 if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP)) 1667 if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP))
1669 seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk); 1668 seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk);
1670 else 1669 else
1671 seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); 1670 seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
1672 seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk); 1671 seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
1673 if (rdev->asic->pm.get_memory_clock) 1672 if (rdev->asic->pm.get_memory_clock)
1674 seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); 1673 seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
1675 if (rdev->pm.current_vddc) 1674 if (rdev->pm.current_vddc)
1676 seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc); 1675 seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
1677 if (rdev->asic->pm.get_pcie_lanes) 1676 if (rdev->asic->pm.get_pcie_lanes)
1678 seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev)); 1677 seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
1679 } 1678 }
1680 1679
1681 return 0; 1680 return 0;
1682 } 1681 }
1683 1682
1684 static struct drm_info_list radeon_pm_info_list[] = { 1683 static struct drm_info_list radeon_pm_info_list[] = {
1685 {"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL}, 1684 {"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
1686 }; 1685 };
1687 #endif 1686 #endif
1688 1687
1689 static int radeon_debugfs_pm_init(struct radeon_device *rdev) 1688 static int radeon_debugfs_pm_init(struct radeon_device *rdev)
1690 { 1689 {
1691 #if defined(CONFIG_DEBUG_FS) 1690 #if defined(CONFIG_DEBUG_FS)
1692 return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list)); 1691 return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
1693 #else 1692 #else
1694 return 0; 1693 return 0;
1695 #endif 1694 #endif
1696 } 1695 }
1697 1696
drivers/gpu/drm/radeon/radeon_vm.c
1 /* 1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc. 2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc. 3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse. 4 * Copyright 2009 Jerome Glisse.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation 8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the 10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions: 11 * Software is furnished to do so, subject to the following conditions:
12 * 12 *
13 * The above copyright notice and this permission notice shall be included in 13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software. 14 * all copies or substantial portions of the Software.
15 * 15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE. 22 * OTHER DEALINGS IN THE SOFTWARE.
23 * 23 *
24 * Authors: Dave Airlie 24 * Authors: Dave Airlie
25 * Alex Deucher 25 * Alex Deucher
26 * Jerome Glisse 26 * Jerome Glisse
27 */ 27 */
28 #include <drm/drmP.h> 28 #include <drm/drmP.h>
29 #include <drm/radeon_drm.h> 29 #include <drm/radeon_drm.h>
30 #include "radeon.h" 30 #include "radeon.h"
31 #include "radeon_trace.h" 31 #include "radeon_trace.h"
32 32
33 /* 33 /*
34 * GPUVM 34 * GPUVM
35 * GPUVM is similar to the legacy gart on older asics, however 35 * GPUVM is similar to the legacy gart on older asics, however
36 * rather than there being a single global gart table 36 * rather than there being a single global gart table
37 * for the entire GPU, there are multiple VM page tables active 37 * for the entire GPU, there are multiple VM page tables active
38 * at any given time. The VM page tables can contain a mix 38 * at any given time. The VM page tables can contain a mix
39 * vram pages and system memory pages and system memory pages 39 * vram pages and system memory pages and system memory pages
40 * can be mapped as snooped (cached system pages) or unsnooped 40 * can be mapped as snooped (cached system pages) or unsnooped
41 * (uncached system pages). 41 * (uncached system pages).
42 * Each VM has an ID associated with it and there is a page table 42 * Each VM has an ID associated with it and there is a page table
43 * associated with each VMID. When execting a command buffer, 43 * associated with each VMID. When execting a command buffer,
44 * the kernel tells the the ring what VMID to use for that command 44 * the kernel tells the the ring what VMID to use for that command
45 * buffer. VMIDs are allocated dynamically as commands are submitted. 45 * buffer. VMIDs are allocated dynamically as commands are submitted.
46 * The userspace drivers maintain their own address space and the kernel 46 * The userspace drivers maintain their own address space and the kernel
47 * sets up their pages tables accordingly when they submit their 47 * sets up their pages tables accordingly when they submit their
48 * command buffers and a VMID is assigned. 48 * command buffers and a VMID is assigned.
49 * Cayman/Trinity support up to 8 active VMs at any given time; 49 * Cayman/Trinity support up to 8 active VMs at any given time;
50 * SI supports 16. 50 * SI supports 16.
51 */ 51 */
52 52
53 /** 53 /**
54 * radeon_vm_num_pde - return the number of page directory entries 54 * radeon_vm_num_pde - return the number of page directory entries
55 * 55 *
56 * @rdev: radeon_device pointer 56 * @rdev: radeon_device pointer
57 * 57 *
58 * Calculate the number of page directory entries (cayman+). 58 * Calculate the number of page directory entries (cayman+).
59 */ 59 */
60 static unsigned radeon_vm_num_pdes(struct radeon_device *rdev) 60 static unsigned radeon_vm_num_pdes(struct radeon_device *rdev)
61 { 61 {
62 return rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE; 62 return rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE;
63 } 63 }
64 64
65 /** 65 /**
66 * radeon_vm_directory_size - returns the size of the page directory in bytes 66 * radeon_vm_directory_size - returns the size of the page directory in bytes
67 * 67 *
68 * @rdev: radeon_device pointer 68 * @rdev: radeon_device pointer
69 * 69 *
70 * Calculate the size of the page directory in bytes (cayman+). 70 * Calculate the size of the page directory in bytes (cayman+).
71 */ 71 */
72 static unsigned radeon_vm_directory_size(struct radeon_device *rdev) 72 static unsigned radeon_vm_directory_size(struct radeon_device *rdev)
73 { 73 {
74 return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8); 74 return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8);
75 } 75 }
76 76
77 /** 77 /**
78 * radeon_vm_manager_init - init the vm manager 78 * radeon_vm_manager_init - init the vm manager
79 * 79 *
80 * @rdev: radeon_device pointer 80 * @rdev: radeon_device pointer
81 * 81 *
82 * Init the vm manager (cayman+). 82 * Init the vm manager (cayman+).
83 * Returns 0 for success, error for failure. 83 * Returns 0 for success, error for failure.
84 */ 84 */
85 int radeon_vm_manager_init(struct radeon_device *rdev) 85 int radeon_vm_manager_init(struct radeon_device *rdev)
86 { 86 {
87 int r; 87 int r;
88 88
89 if (!rdev->vm_manager.enabled) { 89 if (!rdev->vm_manager.enabled) {
90 r = radeon_asic_vm_init(rdev); 90 r = radeon_asic_vm_init(rdev);
91 if (r) 91 if (r)
92 return r; 92 return r;
93 93
94 rdev->vm_manager.enabled = true; 94 rdev->vm_manager.enabled = true;
95 } 95 }
96 return 0; 96 return 0;
97 } 97 }
98 98
99 /** 99 /**
100 * radeon_vm_manager_fini - tear down the vm manager 100 * radeon_vm_manager_fini - tear down the vm manager
101 * 101 *
102 * @rdev: radeon_device pointer 102 * @rdev: radeon_device pointer
103 * 103 *
104 * Tear down the VM manager (cayman+). 104 * Tear down the VM manager (cayman+).
105 */ 105 */
106 void radeon_vm_manager_fini(struct radeon_device *rdev) 106 void radeon_vm_manager_fini(struct radeon_device *rdev)
107 { 107 {
108 int i; 108 int i;
109 109
110 if (!rdev->vm_manager.enabled) 110 if (!rdev->vm_manager.enabled)
111 return; 111 return;
112 112
113 for (i = 0; i < RADEON_NUM_VM; ++i) 113 for (i = 0; i < RADEON_NUM_VM; ++i)
114 radeon_fence_unref(&rdev->vm_manager.active[i]); 114 radeon_fence_unref(&rdev->vm_manager.active[i]);
115 radeon_asic_vm_fini(rdev); 115 radeon_asic_vm_fini(rdev);
116 rdev->vm_manager.enabled = false; 116 rdev->vm_manager.enabled = false;
117 } 117 }
118 118
119 /** 119 /**
120 * radeon_vm_get_bos - add the vm BOs to a validation list 120 * radeon_vm_get_bos - add the vm BOs to a validation list
121 * 121 *
122 * @vm: vm providing the BOs 122 * @vm: vm providing the BOs
123 * @head: head of validation list 123 * @head: head of validation list
124 * 124 *
125 * Add the page directory to the list of BOs to 125 * Add the page directory to the list of BOs to
126 * validate for command submission (cayman+). 126 * validate for command submission (cayman+).
127 */ 127 */
128 struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev, 128 struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
129 struct radeon_vm *vm, 129 struct radeon_vm *vm,
130 struct list_head *head) 130 struct list_head *head)
131 { 131 {
132 struct radeon_cs_reloc *list; 132 struct radeon_cs_reloc *list;
133 unsigned i, idx; 133 unsigned i, idx;
134 134
135 list = kmalloc_array(vm->max_pde_used + 1, 135 list = kmalloc_array(vm->max_pde_used + 2,
136 sizeof(struct radeon_cs_reloc), GFP_KERNEL); 136 sizeof(struct radeon_cs_reloc), GFP_KERNEL);
137 if (!list) 137 if (!list)
138 return NULL; 138 return NULL;
139 139
140 /* add the vm page table to the list */ 140 /* add the vm page table to the list */
141 list[0].gobj = NULL; 141 list[0].gobj = NULL;
142 list[0].robj = vm->page_directory; 142 list[0].robj = vm->page_directory;
143 list[0].domain = RADEON_GEM_DOMAIN_VRAM; 143 list[0].domain = RADEON_GEM_DOMAIN_VRAM;
144 list[0].alt_domain = RADEON_GEM_DOMAIN_VRAM; 144 list[0].alt_domain = RADEON_GEM_DOMAIN_VRAM;
145 list[0].tv.bo = &vm->page_directory->tbo; 145 list[0].tv.bo = &vm->page_directory->tbo;
146 list[0].tiling_flags = 0; 146 list[0].tiling_flags = 0;
147 list[0].handle = 0; 147 list[0].handle = 0;
148 list_add(&list[0].tv.head, head); 148 list_add(&list[0].tv.head, head);
149 149
150 for (i = 0, idx = 1; i <= vm->max_pde_used; i++) { 150 for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
151 if (!vm->page_tables[i].bo) 151 if (!vm->page_tables[i].bo)
152 continue; 152 continue;
153 153
154 list[idx].gobj = NULL; 154 list[idx].gobj = NULL;
155 list[idx].robj = vm->page_tables[i].bo; 155 list[idx].robj = vm->page_tables[i].bo;
156 list[idx].domain = RADEON_GEM_DOMAIN_VRAM; 156 list[idx].domain = RADEON_GEM_DOMAIN_VRAM;
157 list[idx].alt_domain = RADEON_GEM_DOMAIN_VRAM; 157 list[idx].alt_domain = RADEON_GEM_DOMAIN_VRAM;
158 list[idx].tv.bo = &list[idx].robj->tbo; 158 list[idx].tv.bo = &list[idx].robj->tbo;
159 list[idx].tiling_flags = 0; 159 list[idx].tiling_flags = 0;
160 list[idx].handle = 0; 160 list[idx].handle = 0;
161 list_add(&list[idx++].tv.head, head); 161 list_add(&list[idx++].tv.head, head);
162 } 162 }
163 163
164 return list; 164 return list;
165 } 165 }
166 166
167 /** 167 /**
168 * radeon_vm_grab_id - allocate the next free VMID 168 * radeon_vm_grab_id - allocate the next free VMID
169 * 169 *
170 * @rdev: radeon_device pointer 170 * @rdev: radeon_device pointer
171 * @vm: vm to allocate id for 171 * @vm: vm to allocate id for
172 * @ring: ring we want to submit job to 172 * @ring: ring we want to submit job to
173 * 173 *
174 * Allocate an id for the vm (cayman+). 174 * Allocate an id for the vm (cayman+).
175 * Returns the fence we need to sync to (if any). 175 * Returns the fence we need to sync to (if any).
176 * 176 *
177 * Global and local mutex must be locked! 177 * Global and local mutex must be locked!
178 */ 178 */
179 struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, 179 struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
180 struct radeon_vm *vm, int ring) 180 struct radeon_vm *vm, int ring)
181 { 181 {
182 struct radeon_fence *best[RADEON_NUM_RINGS] = {}; 182 struct radeon_fence *best[RADEON_NUM_RINGS] = {};
183 unsigned choices[2] = {}; 183 unsigned choices[2] = {};
184 unsigned i; 184 unsigned i;
185 185
186 /* check if the id is still valid */ 186 /* check if the id is still valid */
187 if (vm->last_id_use && vm->last_id_use == rdev->vm_manager.active[vm->id]) 187 if (vm->last_id_use && vm->last_id_use == rdev->vm_manager.active[vm->id])
188 return NULL; 188 return NULL;
189 189
190 /* we definately need to flush */ 190 /* we definately need to flush */
191 radeon_fence_unref(&vm->last_flush); 191 radeon_fence_unref(&vm->last_flush);
192 192
193 /* skip over VMID 0, since it is the system VM */ 193 /* skip over VMID 0, since it is the system VM */
194 for (i = 1; i < rdev->vm_manager.nvm; ++i) { 194 for (i = 1; i < rdev->vm_manager.nvm; ++i) {
195 struct radeon_fence *fence = rdev->vm_manager.active[i]; 195 struct radeon_fence *fence = rdev->vm_manager.active[i];
196 196
197 if (fence == NULL) { 197 if (fence == NULL) {
198 /* found a free one */ 198 /* found a free one */
199 vm->id = i; 199 vm->id = i;
200 trace_radeon_vm_grab_id(vm->id, ring); 200 trace_radeon_vm_grab_id(vm->id, ring);
201 return NULL; 201 return NULL;
202 } 202 }
203 203
204 if (radeon_fence_is_earlier(fence, best[fence->ring])) { 204 if (radeon_fence_is_earlier(fence, best[fence->ring])) {
205 best[fence->ring] = fence; 205 best[fence->ring] = fence;
206 choices[fence->ring == ring ? 0 : 1] = i; 206 choices[fence->ring == ring ? 0 : 1] = i;
207 } 207 }
208 } 208 }
209 209
210 for (i = 0; i < 2; ++i) { 210 for (i = 0; i < 2; ++i) {
211 if (choices[i]) { 211 if (choices[i]) {
212 vm->id = choices[i]; 212 vm->id = choices[i];
213 trace_radeon_vm_grab_id(vm->id, ring); 213 trace_radeon_vm_grab_id(vm->id, ring);
214 return rdev->vm_manager.active[choices[i]]; 214 return rdev->vm_manager.active[choices[i]];
215 } 215 }
216 } 216 }
217 217
218 /* should never happen */ 218 /* should never happen */
219 BUG(); 219 BUG();
220 return NULL; 220 return NULL;
221 } 221 }
222 222
223 /** 223 /**
224 * radeon_vm_flush - hardware flush the vm 224 * radeon_vm_flush - hardware flush the vm
225 * 225 *
226 * @rdev: radeon_device pointer 226 * @rdev: radeon_device pointer
227 * @vm: vm we want to flush 227 * @vm: vm we want to flush
228 * @ring: ring to use for flush 228 * @ring: ring to use for flush
229 * 229 *
230 * Flush the vm (cayman+). 230 * Flush the vm (cayman+).
231 * 231 *
232 * Global and local mutex must be locked! 232 * Global and local mutex must be locked!
233 */ 233 */
234 void radeon_vm_flush(struct radeon_device *rdev, 234 void radeon_vm_flush(struct radeon_device *rdev,
235 struct radeon_vm *vm, 235 struct radeon_vm *vm,
236 int ring) 236 int ring)
237 { 237 {
238 uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory); 238 uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory);
239 239
240 /* if we can't remember our last VM flush then flush now! */ 240 /* if we can't remember our last VM flush then flush now! */
241 /* XXX figure out why we have to flush all the time */ 241 /* XXX figure out why we have to flush all the time */
242 if (!vm->last_flush || true || pd_addr != vm->pd_gpu_addr) { 242 if (!vm->last_flush || true || pd_addr != vm->pd_gpu_addr) {
243 vm->pd_gpu_addr = pd_addr; 243 vm->pd_gpu_addr = pd_addr;
244 radeon_ring_vm_flush(rdev, ring, vm); 244 radeon_ring_vm_flush(rdev, ring, vm);
245 } 245 }
246 } 246 }
247 247
248 /** 248 /**
249 * radeon_vm_fence - remember fence for vm 249 * radeon_vm_fence - remember fence for vm
250 * 250 *
251 * @rdev: radeon_device pointer 251 * @rdev: radeon_device pointer
252 * @vm: vm we want to fence 252 * @vm: vm we want to fence
253 * @fence: fence to remember 253 * @fence: fence to remember
254 * 254 *
255 * Fence the vm (cayman+). 255 * Fence the vm (cayman+).
256 * Set the fence used to protect page table and id. 256 * Set the fence used to protect page table and id.
257 * 257 *
258 * Global and local mutex must be locked! 258 * Global and local mutex must be locked!
259 */ 259 */
260 void radeon_vm_fence(struct radeon_device *rdev, 260 void radeon_vm_fence(struct radeon_device *rdev,
261 struct radeon_vm *vm, 261 struct radeon_vm *vm,
262 struct radeon_fence *fence) 262 struct radeon_fence *fence)
263 { 263 {
264 radeon_fence_unref(&vm->fence); 264 radeon_fence_unref(&vm->fence);
265 vm->fence = radeon_fence_ref(fence); 265 vm->fence = radeon_fence_ref(fence);
266 266
267 radeon_fence_unref(&rdev->vm_manager.active[vm->id]); 267 radeon_fence_unref(&rdev->vm_manager.active[vm->id]);
268 rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence); 268 rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence);
269 269
270 radeon_fence_unref(&vm->last_id_use); 270 radeon_fence_unref(&vm->last_id_use);
271 vm->last_id_use = radeon_fence_ref(fence); 271 vm->last_id_use = radeon_fence_ref(fence);
272 272
273 /* we just flushed the VM, remember that */ 273 /* we just flushed the VM, remember that */
274 if (!vm->last_flush) 274 if (!vm->last_flush)
275 vm->last_flush = radeon_fence_ref(fence); 275 vm->last_flush = radeon_fence_ref(fence);
276 } 276 }
277 277
278 /** 278 /**
279 * radeon_vm_bo_find - find the bo_va for a specific vm & bo 279 * radeon_vm_bo_find - find the bo_va for a specific vm & bo
280 * 280 *
281 * @vm: requested vm 281 * @vm: requested vm
282 * @bo: requested buffer object 282 * @bo: requested buffer object
283 * 283 *
284 * Find @bo inside the requested vm (cayman+). 284 * Find @bo inside the requested vm (cayman+).
285 * Search inside the @bos vm list for the requested vm 285 * Search inside the @bos vm list for the requested vm
286 * Returns the found bo_va or NULL if none is found 286 * Returns the found bo_va or NULL if none is found
287 * 287 *
288 * Object has to be reserved! 288 * Object has to be reserved!
289 */ 289 */
290 struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm, 290 struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
291 struct radeon_bo *bo) 291 struct radeon_bo *bo)
292 { 292 {
293 struct radeon_bo_va *bo_va; 293 struct radeon_bo_va *bo_va;
294 294
295 list_for_each_entry(bo_va, &bo->va, bo_list) { 295 list_for_each_entry(bo_va, &bo->va, bo_list) {
296 if (bo_va->vm == vm) { 296 if (bo_va->vm == vm) {
297 return bo_va; 297 return bo_va;
298 } 298 }
299 } 299 }
300 return NULL; 300 return NULL;
301 } 301 }
302 302
303 /** 303 /**
304 * radeon_vm_bo_add - add a bo to a specific vm 304 * radeon_vm_bo_add - add a bo to a specific vm
305 * 305 *
306 * @rdev: radeon_device pointer 306 * @rdev: radeon_device pointer
307 * @vm: requested vm 307 * @vm: requested vm
308 * @bo: radeon buffer object 308 * @bo: radeon buffer object
309 * 309 *
310 * Add @bo into the requested vm (cayman+). 310 * Add @bo into the requested vm (cayman+).
311 * Add @bo to the list of bos associated with the vm 311 * Add @bo to the list of bos associated with the vm
312 * Returns newly added bo_va or NULL for failure 312 * Returns newly added bo_va or NULL for failure
313 * 313 *
314 * Object has to be reserved! 314 * Object has to be reserved!
315 */ 315 */
316 struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev, 316 struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
317 struct radeon_vm *vm, 317 struct radeon_vm *vm,
318 struct radeon_bo *bo) 318 struct radeon_bo *bo)
319 { 319 {
320 struct radeon_bo_va *bo_va; 320 struct radeon_bo_va *bo_va;
321 321
322 bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); 322 bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
323 if (bo_va == NULL) { 323 if (bo_va == NULL) {
324 return NULL; 324 return NULL;
325 } 325 }
326 bo_va->vm = vm; 326 bo_va->vm = vm;
327 bo_va->bo = bo; 327 bo_va->bo = bo;
328 bo_va->soffset = 0; 328 bo_va->soffset = 0;
329 bo_va->eoffset = 0; 329 bo_va->eoffset = 0;
330 bo_va->flags = 0; 330 bo_va->flags = 0;
331 bo_va->valid = false; 331 bo_va->valid = false;
332 bo_va->ref_count = 1; 332 bo_va->ref_count = 1;
333 INIT_LIST_HEAD(&bo_va->bo_list); 333 INIT_LIST_HEAD(&bo_va->bo_list);
334 INIT_LIST_HEAD(&bo_va->vm_list); 334 INIT_LIST_HEAD(&bo_va->vm_list);
335 335
336 mutex_lock(&vm->mutex); 336 mutex_lock(&vm->mutex);
337 list_add(&bo_va->vm_list, &vm->va); 337 list_add(&bo_va->vm_list, &vm->va);
338 list_add_tail(&bo_va->bo_list, &bo->va); 338 list_add_tail(&bo_va->bo_list, &bo->va);
339 mutex_unlock(&vm->mutex); 339 mutex_unlock(&vm->mutex);
340 340
341 return bo_va; 341 return bo_va;
342 } 342 }
343 343
344 /** 344 /**
345 * radeon_vm_clear_bo - initially clear the page dir/table 345 * radeon_vm_clear_bo - initially clear the page dir/table
346 * 346 *
347 * @rdev: radeon_device pointer 347 * @rdev: radeon_device pointer
348 * @bo: bo to clear 348 * @bo: bo to clear
349 */ 349 */
350 static int radeon_vm_clear_bo(struct radeon_device *rdev, 350 static int radeon_vm_clear_bo(struct radeon_device *rdev,
351 struct radeon_bo *bo) 351 struct radeon_bo *bo)
352 { 352 {
353 struct ttm_validate_buffer tv; 353 struct ttm_validate_buffer tv;
354 struct ww_acquire_ctx ticket; 354 struct ww_acquire_ctx ticket;
355 struct list_head head; 355 struct list_head head;
356 struct radeon_ib ib; 356 struct radeon_ib ib;
357 unsigned entries; 357 unsigned entries;
358 uint64_t addr; 358 uint64_t addr;
359 int r; 359 int r;
360 360
361 memset(&tv, 0, sizeof(tv)); 361 memset(&tv, 0, sizeof(tv));
362 tv.bo = &bo->tbo; 362 tv.bo = &bo->tbo;
363 363
364 INIT_LIST_HEAD(&head); 364 INIT_LIST_HEAD(&head);
365 list_add(&tv.head, &head); 365 list_add(&tv.head, &head);
366 366
367 r = ttm_eu_reserve_buffers(&ticket, &head); 367 r = ttm_eu_reserve_buffers(&ticket, &head);
368 if (r) 368 if (r)
369 return r; 369 return r;
370 370
371 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 371 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
372 if (r) 372 if (r)
373 goto error; 373 goto error;
374 374
375 addr = radeon_bo_gpu_offset(bo); 375 addr = radeon_bo_gpu_offset(bo);
376 entries = radeon_bo_size(bo) / 8; 376 entries = radeon_bo_size(bo) / 8;
377 377
378 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, 378 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib,
379 NULL, entries * 2 + 64); 379 NULL, entries * 2 + 64);
380 if (r) 380 if (r)
381 goto error; 381 goto error;
382 382
383 ib.length_dw = 0; 383 ib.length_dw = 0;
384 384
385 radeon_asic_vm_set_page(rdev, &ib, addr, 0, entries, 0, 0); 385 radeon_asic_vm_set_page(rdev, &ib, addr, 0, entries, 0, 0);
386 386
387 r = radeon_ib_schedule(rdev, &ib, NULL); 387 r = radeon_ib_schedule(rdev, &ib, NULL);
388 if (r) 388 if (r)
389 goto error; 389 goto error;
390 390
391 ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence); 391 ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence);
392 radeon_ib_free(rdev, &ib); 392 radeon_ib_free(rdev, &ib);
393 393
394 return 0; 394 return 0;
395 395
396 error: 396 error:
397 ttm_eu_backoff_reservation(&ticket, &head); 397 ttm_eu_backoff_reservation(&ticket, &head);
398 return r; 398 return r;
399 } 399 }
400 400
401 /** 401 /**
402 * radeon_vm_bo_set_addr - set bos virtual address inside a vm 402 * radeon_vm_bo_set_addr - set bos virtual address inside a vm
403 * 403 *
404 * @rdev: radeon_device pointer 404 * @rdev: radeon_device pointer
405 * @bo_va: bo_va to store the address 405 * @bo_va: bo_va to store the address
406 * @soffset: requested offset of the buffer in the VM address space 406 * @soffset: requested offset of the buffer in the VM address space
407 * @flags: attributes of pages (read/write/valid/etc.) 407 * @flags: attributes of pages (read/write/valid/etc.)
408 * 408 *
409 * Set offset of @bo_va (cayman+). 409 * Set offset of @bo_va (cayman+).
410 * Validate and set the offset requested within the vm address space. 410 * Validate and set the offset requested within the vm address space.
411 * Returns 0 for success, error for failure. 411 * Returns 0 for success, error for failure.
412 * 412 *
413 * Object has to be reserved! 413 * Object has to be reserved!
414 */ 414 */
415 int radeon_vm_bo_set_addr(struct radeon_device *rdev, 415 int radeon_vm_bo_set_addr(struct radeon_device *rdev,
416 struct radeon_bo_va *bo_va, 416 struct radeon_bo_va *bo_va,
417 uint64_t soffset, 417 uint64_t soffset,
418 uint32_t flags) 418 uint32_t flags)
419 { 419 {
420 uint64_t size = radeon_bo_size(bo_va->bo); 420 uint64_t size = radeon_bo_size(bo_va->bo);
421 uint64_t eoffset, last_offset = 0; 421 uint64_t eoffset, last_offset = 0;
422 struct radeon_vm *vm = bo_va->vm; 422 struct radeon_vm *vm = bo_va->vm;
423 struct radeon_bo_va *tmp; 423 struct radeon_bo_va *tmp;
424 struct list_head *head; 424 struct list_head *head;
425 unsigned last_pfn, pt_idx; 425 unsigned last_pfn, pt_idx;
426 int r; 426 int r;
427 427
428 if (soffset) { 428 if (soffset) {
429 /* make sure object fit at this offset */ 429 /* make sure object fit at this offset */
430 eoffset = soffset + size; 430 eoffset = soffset + size;
431 if (soffset >= eoffset) { 431 if (soffset >= eoffset) {
432 return -EINVAL; 432 return -EINVAL;
433 } 433 }
434 434
435 last_pfn = eoffset / RADEON_GPU_PAGE_SIZE; 435 last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
436 if (last_pfn > rdev->vm_manager.max_pfn) { 436 if (last_pfn > rdev->vm_manager.max_pfn) {
437 dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n", 437 dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
438 last_pfn, rdev->vm_manager.max_pfn); 438 last_pfn, rdev->vm_manager.max_pfn);
439 return -EINVAL; 439 return -EINVAL;
440 } 440 }
441 441
442 } else { 442 } else {
443 eoffset = last_pfn = 0; 443 eoffset = last_pfn = 0;
444 } 444 }
445 445
446 mutex_lock(&vm->mutex); 446 mutex_lock(&vm->mutex);
447 head = &vm->va; 447 head = &vm->va;
448 last_offset = 0; 448 last_offset = 0;
449 list_for_each_entry(tmp, &vm->va, vm_list) { 449 list_for_each_entry(tmp, &vm->va, vm_list) {
450 if (bo_va == tmp) { 450 if (bo_va == tmp) {
451 /* skip over currently modified bo */ 451 /* skip over currently modified bo */
452 continue; 452 continue;
453 } 453 }
454 454
455 if (soffset >= last_offset && eoffset <= tmp->soffset) { 455 if (soffset >= last_offset && eoffset <= tmp->soffset) {
456 /* bo can be added before this one */ 456 /* bo can be added before this one */
457 break; 457 break;
458 } 458 }
459 if (eoffset > tmp->soffset && soffset < tmp->eoffset) { 459 if (eoffset > tmp->soffset && soffset < tmp->eoffset) {
460 /* bo and tmp overlap, invalid offset */ 460 /* bo and tmp overlap, invalid offset */
461 dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n", 461 dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
462 bo_va->bo, (unsigned)bo_va->soffset, tmp->bo, 462 bo_va->bo, (unsigned)bo_va->soffset, tmp->bo,
463 (unsigned)tmp->soffset, (unsigned)tmp->eoffset); 463 (unsigned)tmp->soffset, (unsigned)tmp->eoffset);
464 mutex_unlock(&vm->mutex); 464 mutex_unlock(&vm->mutex);
465 return -EINVAL; 465 return -EINVAL;
466 } 466 }
467 last_offset = tmp->eoffset; 467 last_offset = tmp->eoffset;
468 head = &tmp->vm_list; 468 head = &tmp->vm_list;
469 } 469 }
470 470
471 bo_va->soffset = soffset; 471 bo_va->soffset = soffset;
472 bo_va->eoffset = eoffset; 472 bo_va->eoffset = eoffset;
473 bo_va->flags = flags; 473 bo_va->flags = flags;
474 bo_va->valid = false; 474 bo_va->valid = false;
475 list_move(&bo_va->vm_list, head); 475 list_move(&bo_va->vm_list, head);
476 476
477 soffset = (soffset / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE; 477 soffset = (soffset / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
478 eoffset = (eoffset / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE; 478 eoffset = (eoffset / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
479 479
480 if (eoffset > vm->max_pde_used) 480 if (eoffset > vm->max_pde_used)
481 vm->max_pde_used = eoffset; 481 vm->max_pde_used = eoffset;
482 482
483 radeon_bo_unreserve(bo_va->bo); 483 radeon_bo_unreserve(bo_va->bo);
484 484
485 /* walk over the address space and allocate the page tables */ 485 /* walk over the address space and allocate the page tables */
486 for (pt_idx = soffset; pt_idx <= eoffset; ++pt_idx) { 486 for (pt_idx = soffset; pt_idx <= eoffset; ++pt_idx) {
487 struct radeon_bo *pt; 487 struct radeon_bo *pt;
488 488
489 if (vm->page_tables[pt_idx].bo) 489 if (vm->page_tables[pt_idx].bo)
490 continue; 490 continue;
491 491
492 /* drop mutex to allocate and clear page table */ 492 /* drop mutex to allocate and clear page table */
493 mutex_unlock(&vm->mutex); 493 mutex_unlock(&vm->mutex);
494 494
495 r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8, 495 r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8,
496 RADEON_GPU_PAGE_SIZE, false, 496 RADEON_GPU_PAGE_SIZE, false,
497 RADEON_GEM_DOMAIN_VRAM, NULL, &pt); 497 RADEON_GEM_DOMAIN_VRAM, NULL, &pt);
498 if (r) 498 if (r)
499 return r; 499 return r;
500 500
501 r = radeon_vm_clear_bo(rdev, pt); 501 r = radeon_vm_clear_bo(rdev, pt);
502 if (r) { 502 if (r) {
503 radeon_bo_unref(&pt); 503 radeon_bo_unref(&pt);
504 radeon_bo_reserve(bo_va->bo, false); 504 radeon_bo_reserve(bo_va->bo, false);
505 return r; 505 return r;
506 } 506 }
507 507
508 /* aquire mutex again */ 508 /* aquire mutex again */
509 mutex_lock(&vm->mutex); 509 mutex_lock(&vm->mutex);
510 if (vm->page_tables[pt_idx].bo) { 510 if (vm->page_tables[pt_idx].bo) {
511 /* someone else allocated the pt in the meantime */ 511 /* someone else allocated the pt in the meantime */
512 mutex_unlock(&vm->mutex); 512 mutex_unlock(&vm->mutex);
513 radeon_bo_unref(&pt); 513 radeon_bo_unref(&pt);
514 mutex_lock(&vm->mutex); 514 mutex_lock(&vm->mutex);
515 continue; 515 continue;
516 } 516 }
517 517
518 vm->page_tables[pt_idx].addr = 0; 518 vm->page_tables[pt_idx].addr = 0;
519 vm->page_tables[pt_idx].bo = pt; 519 vm->page_tables[pt_idx].bo = pt;
520 } 520 }
521 521
522 mutex_unlock(&vm->mutex); 522 mutex_unlock(&vm->mutex);
523 return radeon_bo_reserve(bo_va->bo, false); 523 return radeon_bo_reserve(bo_va->bo, false);
524 } 524 }
525 525
526 /** 526 /**
527 * radeon_vm_map_gart - get the physical address of a gart page 527 * radeon_vm_map_gart - get the physical address of a gart page
528 * 528 *
529 * @rdev: radeon_device pointer 529 * @rdev: radeon_device pointer
530 * @addr: the unmapped addr 530 * @addr: the unmapped addr
531 * 531 *
532 * Look up the physical address of the page that the pte resolves 532 * Look up the physical address of the page that the pte resolves
533 * to (cayman+). 533 * to (cayman+).
534 * Returns the physical address of the page. 534 * Returns the physical address of the page.
535 */ 535 */
536 uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr) 536 uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
537 { 537 {
538 uint64_t result; 538 uint64_t result;
539 539
540 /* page table offset */ 540 /* page table offset */
541 result = rdev->gart.pages_addr[addr >> PAGE_SHIFT]; 541 result = rdev->gart.pages_addr[addr >> PAGE_SHIFT];
542 542
543 /* in case cpu page size != gpu page size*/ 543 /* in case cpu page size != gpu page size*/
544 result |= addr & (~PAGE_MASK); 544 result |= addr & (~PAGE_MASK);
545 545
546 return result; 546 return result;
547 } 547 }
548 548
549 /** 549 /**
550 * radeon_vm_page_flags - translate page flags to what the hw uses 550 * radeon_vm_page_flags - translate page flags to what the hw uses
551 * 551 *
552 * @flags: flags comming from userspace 552 * @flags: flags comming from userspace
553 * 553 *
554 * Translate the flags the userspace ABI uses to hw flags. 554 * Translate the flags the userspace ABI uses to hw flags.
555 */ 555 */
556 static uint32_t radeon_vm_page_flags(uint32_t flags) 556 static uint32_t radeon_vm_page_flags(uint32_t flags)
557 { 557 {
558 uint32_t hw_flags = 0; 558 uint32_t hw_flags = 0;
559 hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0; 559 hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
560 hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0; 560 hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
561 hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0; 561 hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
562 if (flags & RADEON_VM_PAGE_SYSTEM) { 562 if (flags & RADEON_VM_PAGE_SYSTEM) {
563 hw_flags |= R600_PTE_SYSTEM; 563 hw_flags |= R600_PTE_SYSTEM;
564 hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0; 564 hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
565 } 565 }
566 return hw_flags; 566 return hw_flags;
567 } 567 }
568 568
569 /** 569 /**
570 * radeon_vm_update_pdes - make sure that page directory is valid 570 * radeon_vm_update_pdes - make sure that page directory is valid
571 * 571 *
572 * @rdev: radeon_device pointer 572 * @rdev: radeon_device pointer
573 * @vm: requested vm 573 * @vm: requested vm
574 * @start: start of GPU address range 574 * @start: start of GPU address range
575 * @end: end of GPU address range 575 * @end: end of GPU address range
576 * 576 *
577 * Allocates new page tables if necessary 577 * Allocates new page tables if necessary
578 * and updates the page directory (cayman+). 578 * and updates the page directory (cayman+).
579 * Returns 0 for success, error for failure. 579 * Returns 0 for success, error for failure.
580 * 580 *
581 * Global and local mutex must be locked! 581 * Global and local mutex must be locked!
582 */ 582 */
583 int radeon_vm_update_page_directory(struct radeon_device *rdev, 583 int radeon_vm_update_page_directory(struct radeon_device *rdev,
584 struct radeon_vm *vm) 584 struct radeon_vm *vm)
585 { 585 {
586 static const uint32_t incr = RADEON_VM_PTE_COUNT * 8; 586 static const uint32_t incr = RADEON_VM_PTE_COUNT * 8;
587 587
588 uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory); 588 struct radeon_bo *pd = vm->page_directory;
589 uint64_t pd_addr = radeon_bo_gpu_offset(pd);
589 uint64_t last_pde = ~0, last_pt = ~0; 590 uint64_t last_pde = ~0, last_pt = ~0;
590 unsigned count = 0, pt_idx, ndw; 591 unsigned count = 0, pt_idx, ndw;
591 struct radeon_ib ib; 592 struct radeon_ib ib;
592 int r; 593 int r;
593 594
594 /* padding, etc. */ 595 /* padding, etc. */
595 ndw = 64; 596 ndw = 64;
596 597
597 /* assume the worst case */ 598 /* assume the worst case */
598 ndw += vm->max_pde_used * 16; 599 ndw += vm->max_pde_used * 16;
599 600
600 /* update too big for an IB */ 601 /* update too big for an IB */
601 if (ndw > 0xfffff) 602 if (ndw > 0xfffff)
602 return -ENOMEM; 603 return -ENOMEM;
603 604
604 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4); 605 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
605 if (r) 606 if (r)
606 return r; 607 return r;
607 ib.length_dw = 0; 608 ib.length_dw = 0;
608 609
609 /* walk over the address space and update the page directory */ 610 /* walk over the address space and update the page directory */
610 for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { 611 for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
611 struct radeon_bo *bo = vm->page_tables[pt_idx].bo; 612 struct radeon_bo *bo = vm->page_tables[pt_idx].bo;
612 uint64_t pde, pt; 613 uint64_t pde, pt;
613 614
614 if (bo == NULL) 615 if (bo == NULL)
615 continue; 616 continue;
616 617
617 pt = radeon_bo_gpu_offset(bo); 618 pt = radeon_bo_gpu_offset(bo);
618 if (vm->page_tables[pt_idx].addr == pt) 619 if (vm->page_tables[pt_idx].addr == pt)
619 continue; 620 continue;
620 vm->page_tables[pt_idx].addr = pt; 621 vm->page_tables[pt_idx].addr = pt;
621 622
622 pde = pd_addr + pt_idx * 8; 623 pde = pd_addr + pt_idx * 8;
623 if (((last_pde + 8 * count) != pde) || 624 if (((last_pde + 8 * count) != pde) ||
624 ((last_pt + incr * count) != pt)) { 625 ((last_pt + incr * count) != pt)) {
625 626
626 if (count) { 627 if (count) {
627 radeon_asic_vm_set_page(rdev, &ib, last_pde, 628 radeon_asic_vm_set_page(rdev, &ib, last_pde,
628 last_pt, count, incr, 629 last_pt, count, incr,
629 R600_PTE_VALID); 630 R600_PTE_VALID);
630 } 631 }
631 632
632 count = 1; 633 count = 1;
633 last_pde = pde; 634 last_pde = pde;
634 last_pt = pt; 635 last_pt = pt;
635 } else { 636 } else {
636 ++count; 637 ++count;
637 } 638 }
638 } 639 }
639 640
640 if (count) 641 if (count)
641 radeon_asic_vm_set_page(rdev, &ib, last_pde, last_pt, count, 642 radeon_asic_vm_set_page(rdev, &ib, last_pde, last_pt, count,
642 incr, R600_PTE_VALID); 643 incr, R600_PTE_VALID);
643 644
644 if (ib.length_dw != 0) { 645 if (ib.length_dw != 0) {
646 radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj);
645 radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use); 647 radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);
646 r = radeon_ib_schedule(rdev, &ib, NULL); 648 r = radeon_ib_schedule(rdev, &ib, NULL);
647 if (r) { 649 if (r) {
648 radeon_ib_free(rdev, &ib); 650 radeon_ib_free(rdev, &ib);
649 return r; 651 return r;
650 } 652 }
651 radeon_fence_unref(&vm->fence); 653 radeon_fence_unref(&vm->fence);
652 vm->fence = radeon_fence_ref(ib.fence); 654 vm->fence = radeon_fence_ref(ib.fence);
653 radeon_fence_unref(&vm->last_flush); 655 radeon_fence_unref(&vm->last_flush);
654 } 656 }
655 radeon_ib_free(rdev, &ib); 657 radeon_ib_free(rdev, &ib);
656 658
657 return 0; 659 return 0;
658 } 660 }
659 661
660 /** 662 /**
661 * radeon_vm_update_ptes - make sure that page tables are valid 663 * radeon_vm_update_ptes - make sure that page tables are valid
662 * 664 *
663 * @rdev: radeon_device pointer 665 * @rdev: radeon_device pointer
664 * @vm: requested vm 666 * @vm: requested vm
665 * @start: start of GPU address range 667 * @start: start of GPU address range
666 * @end: end of GPU address range 668 * @end: end of GPU address range
667 * @dst: destination address to map to 669 * @dst: destination address to map to
668 * @flags: mapping flags 670 * @flags: mapping flags
669 * 671 *
670 * Update the page tables in the range @start - @end (cayman+). 672 * Update the page tables in the range @start - @end (cayman+).
671 * 673 *
672 * Global and local mutex must be locked! 674 * Global and local mutex must be locked!
673 */ 675 */
674 static void radeon_vm_update_ptes(struct radeon_device *rdev, 676 static void radeon_vm_update_ptes(struct radeon_device *rdev,
675 struct radeon_vm *vm, 677 struct radeon_vm *vm,
676 struct radeon_ib *ib, 678 struct radeon_ib *ib,
677 uint64_t start, uint64_t end, 679 uint64_t start, uint64_t end,
678 uint64_t dst, uint32_t flags) 680 uint64_t dst, uint32_t flags)
679 { 681 {
680 static const uint64_t mask = RADEON_VM_PTE_COUNT - 1; 682 static const uint64_t mask = RADEON_VM_PTE_COUNT - 1;
681 683
682 uint64_t last_pte = ~0, last_dst = ~0; 684 uint64_t last_pte = ~0, last_dst = ~0;
683 unsigned count = 0; 685 unsigned count = 0;
684 uint64_t addr; 686 uint64_t addr;
685 687
686 start = start / RADEON_GPU_PAGE_SIZE; 688 start = start / RADEON_GPU_PAGE_SIZE;
687 end = end / RADEON_GPU_PAGE_SIZE; 689 end = end / RADEON_GPU_PAGE_SIZE;
688 690
689 /* walk over the address space and update the page tables */ 691 /* walk over the address space and update the page tables */
690 for (addr = start; addr < end; ) { 692 for (addr = start; addr < end; ) {
691 uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE; 693 uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE;
694 struct radeon_bo *pt = vm->page_tables[pt_idx].bo;
692 unsigned nptes; 695 unsigned nptes;
693 uint64_t pte; 696 uint64_t pte;
694 697
698 radeon_semaphore_sync_to(ib->semaphore, pt->tbo.sync_obj);
699
695 if ((addr & ~mask) == (end & ~mask)) 700 if ((addr & ~mask) == (end & ~mask))
696 nptes = end - addr; 701 nptes = end - addr;
697 else 702 else
698 nptes = RADEON_VM_PTE_COUNT - (addr & mask); 703 nptes = RADEON_VM_PTE_COUNT - (addr & mask);
699 704
700 pte = radeon_bo_gpu_offset(vm->page_tables[pt_idx].bo); 705 pte = radeon_bo_gpu_offset(pt);
701 pte += (addr & mask) * 8; 706 pte += (addr & mask) * 8;
702 707
703 if ((last_pte + 8 * count) != pte) { 708 if ((last_pte + 8 * count) != pte) {
704 709
705 if (count) { 710 if (count) {
706 radeon_asic_vm_set_page(rdev, ib, last_pte, 711 radeon_asic_vm_set_page(rdev, ib, last_pte,
707 last_dst, count, 712 last_dst, count,
708 RADEON_GPU_PAGE_SIZE, 713 RADEON_GPU_PAGE_SIZE,
709 flags); 714 flags);
710 } 715 }
711 716
712 count = nptes; 717 count = nptes;
713 last_pte = pte; 718 last_pte = pte;
714 last_dst = dst; 719 last_dst = dst;
715 } else { 720 } else {
716 count += nptes; 721 count += nptes;
717 } 722 }
718 723
719 addr += nptes; 724 addr += nptes;
720 dst += nptes * RADEON_GPU_PAGE_SIZE; 725 dst += nptes * RADEON_GPU_PAGE_SIZE;
721 } 726 }
722 727
723 if (count) { 728 if (count) {
724 radeon_asic_vm_set_page(rdev, ib, last_pte, 729 radeon_asic_vm_set_page(rdev, ib, last_pte,
725 last_dst, count, 730 last_dst, count,
726 RADEON_GPU_PAGE_SIZE, flags); 731 RADEON_GPU_PAGE_SIZE, flags);
727 } 732 }
728 } 733 }
729 734
730 /** 735 /**
731 * radeon_vm_bo_update - map a bo into the vm page table 736 * radeon_vm_bo_update - map a bo into the vm page table
732 * 737 *
733 * @rdev: radeon_device pointer 738 * @rdev: radeon_device pointer
734 * @vm: requested vm 739 * @vm: requested vm
735 * @bo: radeon buffer object 740 * @bo: radeon buffer object
736 * @mem: ttm mem 741 * @mem: ttm mem
737 * 742 *
738 * Fill in the page table entries for @bo (cayman+). 743 * Fill in the page table entries for @bo (cayman+).
739 * Returns 0 for success, -EINVAL for failure. 744 * Returns 0 for success, -EINVAL for failure.
740 * 745 *
741 * Object have to be reserved and mutex must be locked! 746 * Object have to be reserved and mutex must be locked!
742 */ 747 */
743 int radeon_vm_bo_update(struct radeon_device *rdev, 748 int radeon_vm_bo_update(struct radeon_device *rdev,
744 struct radeon_vm *vm, 749 struct radeon_vm *vm,
745 struct radeon_bo *bo, 750 struct radeon_bo *bo,
746 struct ttm_mem_reg *mem) 751 struct ttm_mem_reg *mem)
747 { 752 {
748 struct radeon_ib ib; 753 struct radeon_ib ib;
749 struct radeon_bo_va *bo_va; 754 struct radeon_bo_va *bo_va;
750 unsigned nptes, ndw; 755 unsigned nptes, ndw;
751 uint64_t addr; 756 uint64_t addr;
752 int r; 757 int r;
753 758
754 bo_va = radeon_vm_bo_find(vm, bo); 759 bo_va = radeon_vm_bo_find(vm, bo);
755 if (bo_va == NULL) { 760 if (bo_va == NULL) {
756 dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm); 761 dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
757 return -EINVAL; 762 return -EINVAL;
758 } 763 }
759 764
760 if (!bo_va->soffset) { 765 if (!bo_va->soffset) {
761 dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n", 766 dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
762 bo, vm); 767 bo, vm);
763 return -EINVAL; 768 return -EINVAL;
764 } 769 }
765 770
766 if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL)) 771 if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL))
767 return 0; 772 return 0;
768 773
769 bo_va->flags &= ~RADEON_VM_PAGE_VALID; 774 bo_va->flags &= ~RADEON_VM_PAGE_VALID;
770 bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM; 775 bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
771 if (mem) { 776 if (mem) {
772 addr = mem->start << PAGE_SHIFT; 777 addr = mem->start << PAGE_SHIFT;
773 if (mem->mem_type != TTM_PL_SYSTEM) { 778 if (mem->mem_type != TTM_PL_SYSTEM) {
774 bo_va->flags |= RADEON_VM_PAGE_VALID; 779 bo_va->flags |= RADEON_VM_PAGE_VALID;
775 bo_va->valid = true; 780 bo_va->valid = true;
776 } 781 }
777 if (mem->mem_type == TTM_PL_TT) { 782 if (mem->mem_type == TTM_PL_TT) {
778 bo_va->flags |= RADEON_VM_PAGE_SYSTEM; 783 bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
779 } else { 784 } else {
780 addr += rdev->vm_manager.vram_base_offset; 785 addr += rdev->vm_manager.vram_base_offset;
781 } 786 }
782 } else { 787 } else {
783 addr = 0; 788 addr = 0;
784 bo_va->valid = false; 789 bo_va->valid = false;
785 } 790 }
786 791
787 trace_radeon_vm_bo_update(bo_va); 792 trace_radeon_vm_bo_update(bo_va);
788 793
789 nptes = radeon_bo_ngpu_pages(bo); 794 nptes = radeon_bo_ngpu_pages(bo);
790 795
791 /* padding, etc. */ 796 /* padding, etc. */
792 ndw = 64; 797 ndw = 64;
793 798
794 if (RADEON_VM_BLOCK_SIZE > 11) 799 if (RADEON_VM_BLOCK_SIZE > 11)
795 /* reserve space for one header for every 2k dwords */ 800 /* reserve space for one header for every 2k dwords */
796 ndw += (nptes >> 11) * 4; 801 ndw += (nptes >> 11) * 4;
797 else 802 else
798 /* reserve space for one header for 803 /* reserve space for one header for
799 every (1 << BLOCK_SIZE) entries */ 804 every (1 << BLOCK_SIZE) entries */
800 ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 4; 805 ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 4;
801 806
802 /* reserve space for pte addresses */ 807 /* reserve space for pte addresses */
803 ndw += nptes * 2; 808 ndw += nptes * 2;
804 809
805 /* update too big for an IB */ 810 /* update too big for an IB */
806 if (ndw > 0xfffff) 811 if (ndw > 0xfffff)
807 return -ENOMEM; 812 return -ENOMEM;
808 813
809 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4); 814 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
810 if (r) 815 if (r)
811 return r; 816 return r;
812 ib.length_dw = 0; 817 ib.length_dw = 0;
813 818
814 radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset, 819 radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
815 addr, radeon_vm_page_flags(bo_va->flags)); 820 addr, radeon_vm_page_flags(bo_va->flags));
816 821
817 radeon_semaphore_sync_to(ib.semaphore, vm->fence); 822 radeon_semaphore_sync_to(ib.semaphore, vm->fence);
818 r = radeon_ib_schedule(rdev, &ib, NULL); 823 r = radeon_ib_schedule(rdev, &ib, NULL);
819 if (r) { 824 if (r) {
820 radeon_ib_free(rdev, &ib); 825 radeon_ib_free(rdev, &ib);
821 return r; 826 return r;
822 } 827 }
823 radeon_fence_unref(&vm->fence); 828 radeon_fence_unref(&vm->fence);
824 vm->fence = radeon_fence_ref(ib.fence); 829 vm->fence = radeon_fence_ref(ib.fence);
825 radeon_ib_free(rdev, &ib); 830 radeon_ib_free(rdev, &ib);
826 radeon_fence_unref(&vm->last_flush); 831 radeon_fence_unref(&vm->last_flush);
827 832
828 return 0; 833 return 0;
829 } 834 }
830 835
831 /** 836 /**
832 * radeon_vm_bo_rmv - remove a bo to a specific vm 837 * radeon_vm_bo_rmv - remove a bo to a specific vm
833 * 838 *
834 * @rdev: radeon_device pointer 839 * @rdev: radeon_device pointer
835 * @bo_va: requested bo_va 840 * @bo_va: requested bo_va
836 * 841 *
837 * Remove @bo_va->bo from the requested vm (cayman+). 842 * Remove @bo_va->bo from the requested vm (cayman+).
838 * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and 843 * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and
839 * remove the ptes for @bo_va in the page table. 844 * remove the ptes for @bo_va in the page table.
840 * Returns 0 for success. 845 * Returns 0 for success.
841 * 846 *
842 * Object have to be reserved! 847 * Object have to be reserved!
843 */ 848 */
844 int radeon_vm_bo_rmv(struct radeon_device *rdev, 849 int radeon_vm_bo_rmv(struct radeon_device *rdev,
845 struct radeon_bo_va *bo_va) 850 struct radeon_bo_va *bo_va)
846 { 851 {
847 int r = 0; 852 int r = 0;
848 853
849 mutex_lock(&bo_va->vm->mutex); 854 mutex_lock(&bo_va->vm->mutex);
850 if (bo_va->soffset) 855 if (bo_va->soffset)
851 r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL); 856 r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL);
852 857
853 list_del(&bo_va->vm_list); 858 list_del(&bo_va->vm_list);
854 mutex_unlock(&bo_va->vm->mutex); 859 mutex_unlock(&bo_va->vm->mutex);
855 list_del(&bo_va->bo_list); 860 list_del(&bo_va->bo_list);
856 861
857 kfree(bo_va); 862 kfree(bo_va);
858 return r; 863 return r;
859 } 864 }
860 865
861 /** 866 /**
862 * radeon_vm_bo_invalidate - mark the bo as invalid 867 * radeon_vm_bo_invalidate - mark the bo as invalid
863 * 868 *
864 * @rdev: radeon_device pointer 869 * @rdev: radeon_device pointer
865 * @vm: requested vm 870 * @vm: requested vm
866 * @bo: radeon buffer object 871 * @bo: radeon buffer object
867 * 872 *
868 * Mark @bo as invalid (cayman+). 873 * Mark @bo as invalid (cayman+).
869 */ 874 */
870 void radeon_vm_bo_invalidate(struct radeon_device *rdev, 875 void radeon_vm_bo_invalidate(struct radeon_device *rdev,
871 struct radeon_bo *bo) 876 struct radeon_bo *bo)
872 { 877 {
873 struct radeon_bo_va *bo_va; 878 struct radeon_bo_va *bo_va;
874 879
875 list_for_each_entry(bo_va, &bo->va, bo_list) { 880 list_for_each_entry(bo_va, &bo->va, bo_list) {
876 bo_va->valid = false; 881 bo_va->valid = false;
877 } 882 }
878 } 883 }
879 884
880 /** 885 /**
881 * radeon_vm_init - initialize a vm instance 886 * radeon_vm_init - initialize a vm instance
882 * 887 *
883 * @rdev: radeon_device pointer 888 * @rdev: radeon_device pointer
884 * @vm: requested vm 889 * @vm: requested vm
885 * 890 *
886 * Init @vm fields (cayman+). 891 * Init @vm fields (cayman+).
887 */ 892 */
888 int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) 893 int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
889 { 894 {
890 unsigned pd_size, pd_entries, pts_size; 895 unsigned pd_size, pd_entries, pts_size;
891 int r; 896 int r;
892 897
893 vm->id = 0; 898 vm->id = 0;
894 vm->fence = NULL; 899 vm->fence = NULL;
895 vm->last_flush = NULL; 900 vm->last_flush = NULL;
896 vm->last_id_use = NULL; 901 vm->last_id_use = NULL;
897 mutex_init(&vm->mutex); 902 mutex_init(&vm->mutex);
898 INIT_LIST_HEAD(&vm->va); 903 INIT_LIST_HEAD(&vm->va);
899 904
900 pd_size = radeon_vm_directory_size(rdev); 905 pd_size = radeon_vm_directory_size(rdev);
901 pd_entries = radeon_vm_num_pdes(rdev); 906 pd_entries = radeon_vm_num_pdes(rdev);
902 907
903 /* allocate page table array */ 908 /* allocate page table array */
904 pts_size = pd_entries * sizeof(struct radeon_vm_pt); 909 pts_size = pd_entries * sizeof(struct radeon_vm_pt);
905 vm->page_tables = kzalloc(pts_size, GFP_KERNEL); 910 vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
906 if (vm->page_tables == NULL) { 911 if (vm->page_tables == NULL) {
907 DRM_ERROR("Cannot allocate memory for page table array\n"); 912 DRM_ERROR("Cannot allocate memory for page table array\n");
908 return -ENOMEM; 913 return -ENOMEM;
909 } 914 }
910 915
911 r = radeon_bo_create(rdev, pd_size, RADEON_VM_PTB_ALIGN_SIZE, false, 916 r = radeon_bo_create(rdev, pd_size, RADEON_VM_PTB_ALIGN_SIZE, false,
912 RADEON_GEM_DOMAIN_VRAM, NULL, 917 RADEON_GEM_DOMAIN_VRAM, NULL,
913 &vm->page_directory); 918 &vm->page_directory);
914 if (r) 919 if (r)
915 return r; 920 return r;
916 921
917 r = radeon_vm_clear_bo(rdev, vm->page_directory); 922 r = radeon_vm_clear_bo(rdev, vm->page_directory);
918 if (r) { 923 if (r) {
919 radeon_bo_unref(&vm->page_directory); 924 radeon_bo_unref(&vm->page_directory);
920 vm->page_directory = NULL; 925 vm->page_directory = NULL;
921 return r; 926 return r;
922 } 927 }
923 928
924 return 0; 929 return 0;
925 } 930 }
926 931
927 /** 932 /**
928 * radeon_vm_fini - tear down a vm instance 933 * radeon_vm_fini - tear down a vm instance
929 * 934 *
930 * @rdev: radeon_device pointer 935 * @rdev: radeon_device pointer
931 * @vm: requested vm 936 * @vm: requested vm
932 * 937 *
933 * Tear down @vm (cayman+). 938 * Tear down @vm (cayman+).
934 * Unbind the VM and remove all bos from the vm bo list 939 * Unbind the VM and remove all bos from the vm bo list
935 */ 940 */
936 void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) 941 void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
937 { 942 {
938 struct radeon_bo_va *bo_va, *tmp; 943 struct radeon_bo_va *bo_va, *tmp;
939 int i, r; 944 int i, r;
940 945
941 if (!list_empty(&vm->va)) { 946 if (!list_empty(&vm->va)) {
942 dev_err(rdev->dev, "still active bo inside vm\n"); 947 dev_err(rdev->dev, "still active bo inside vm\n");
943 } 948 }
944 list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) { 949 list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) {
945 list_del_init(&bo_va->vm_list); 950 list_del_init(&bo_va->vm_list);
946 r = radeon_bo_reserve(bo_va->bo, false); 951 r = radeon_bo_reserve(bo_va->bo, false);
947 if (!r) { 952 if (!r) {
948 list_del_init(&bo_va->bo_list); 953 list_del_init(&bo_va->bo_list);
949 radeon_bo_unreserve(bo_va->bo); 954 radeon_bo_unreserve(bo_va->bo);
950 kfree(bo_va); 955 kfree(bo_va);
951 } 956 }
952 } 957 }
953 958
954 959
955 for (i = 0; i < radeon_vm_num_pdes(rdev); i++) 960 for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
956 radeon_bo_unref(&vm->page_tables[i].bo); 961 radeon_bo_unref(&vm->page_tables[i].bo);
957 kfree(vm->page_tables); 962 kfree(vm->page_tables);
958 963
959 radeon_bo_unref(&vm->page_directory); 964 radeon_bo_unref(&vm->page_directory);
960 965
961 radeon_fence_unref(&vm->fence); 966 radeon_fence_unref(&vm->fence);
962 radeon_fence_unref(&vm->last_flush); 967 radeon_fence_unref(&vm->last_flush);
963 radeon_fence_unref(&vm->last_id_use); 968 radeon_fence_unref(&vm->last_id_use);
964 969
965 mutex_destroy(&vm->mutex); 970 mutex_destroy(&vm->mutex);
966 } 971 }
967 972