Commit bbb0aef5cfe95fe9b51a7eeba4a440b69037b01f

Authored by Joe Perches
Committed by Dave Airlie
1 parent 5ad3d8831f

drm: Verify debug message arguments

Add __attribute__((format (printf, 4, 5))) to drm_ut_debug_printk
and fix fallout.

Signed-off-by: Joe Perches <joe@perches.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>

Showing 5 changed files with 17 additions and 14 deletions Inline Diff

drivers/gpu/drm/drm_irq.c
1 /** 1 /**
2 * \file drm_irq.c 2 * \file drm_irq.c
3 * IRQ support 3 * IRQ support
4 * 4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com> 5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com> 6 * \author Gareth Hughes <gareth@valinux.com>
7 */ 7 */
8 8
9 /* 9 /*
10 * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com 10 * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
11 * 11 *
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. 12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved. 14 * All Rights Reserved.
15 * 15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a 16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"), 17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation 18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the 20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions: 21 * Software is furnished to do so, subject to the following conditions:
22 * 22 *
23 * The above copyright notice and this permission notice (including the next 23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the 24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software. 25 * Software.
26 * 26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE. 33 * OTHER DEALINGS IN THE SOFTWARE.
34 */ 34 */
35 35
36 #include "drmP.h" 36 #include "drmP.h"
37 #include "drm_trace.h" 37 #include "drm_trace.h"
38 38
39 #include <linux/interrupt.h> /* For task queue support */ 39 #include <linux/interrupt.h> /* For task queue support */
40 #include <linux/slab.h> 40 #include <linux/slab.h>
41 41
42 #include <linux/vgaarb.h> 42 #include <linux/vgaarb.h>
43 43
44 /* Access macro for slots in vblank timestamp ringbuffer. */ 44 /* Access macro for slots in vblank timestamp ringbuffer. */
45 #define vblanktimestamp(dev, crtc, count) ( \ 45 #define vblanktimestamp(dev, crtc, count) ( \
46 (dev)->_vblank_time[(crtc) * DRM_VBLANKTIME_RBSIZE + \ 46 (dev)->_vblank_time[(crtc) * DRM_VBLANKTIME_RBSIZE + \
47 ((count) % DRM_VBLANKTIME_RBSIZE)]) 47 ((count) % DRM_VBLANKTIME_RBSIZE)])
48 48
49 /* Retry timestamp calculation up to 3 times to satisfy 49 /* Retry timestamp calculation up to 3 times to satisfy
50 * drm_timestamp_precision before giving up. 50 * drm_timestamp_precision before giving up.
51 */ 51 */
52 #define DRM_TIMESTAMP_MAXRETRIES 3 52 #define DRM_TIMESTAMP_MAXRETRIES 3
53 53
54 /* Threshold in nanoseconds for detection of redundant 54 /* Threshold in nanoseconds for detection of redundant
55 * vblank irq in drm_handle_vblank(). 1 msec should be ok. 55 * vblank irq in drm_handle_vblank(). 1 msec should be ok.
56 */ 56 */
57 #define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000 57 #define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000
58 58
59 /** 59 /**
60 * Get interrupt from bus id. 60 * Get interrupt from bus id.
61 * 61 *
62 * \param inode device inode. 62 * \param inode device inode.
63 * \param file_priv DRM file private. 63 * \param file_priv DRM file private.
64 * \param cmd command. 64 * \param cmd command.
65 * \param arg user argument, pointing to a drm_irq_busid structure. 65 * \param arg user argument, pointing to a drm_irq_busid structure.
66 * \return zero on success or a negative number on failure. 66 * \return zero on success or a negative number on failure.
67 * 67 *
68 * Finds the PCI device with the specified bus id and gets its IRQ number. 68 * Finds the PCI device with the specified bus id and gets its IRQ number.
69 * This IOCTL is deprecated, and will now return EINVAL for any busid not equal 69 * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
70 * to that of the device that this DRM instance attached to. 70 * to that of the device that this DRM instance attached to.
71 */ 71 */
72 int drm_irq_by_busid(struct drm_device *dev, void *data, 72 int drm_irq_by_busid(struct drm_device *dev, void *data,
73 struct drm_file *file_priv) 73 struct drm_file *file_priv)
74 { 74 {
75 struct drm_irq_busid *p = data; 75 struct drm_irq_busid *p = data;
76 76
77 if (!dev->driver->bus->irq_by_busid) 77 if (!dev->driver->bus->irq_by_busid)
78 return -EINVAL; 78 return -EINVAL;
79 79
80 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 80 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
81 return -EINVAL; 81 return -EINVAL;
82 82
83 return dev->driver->bus->irq_by_busid(dev, p); 83 return dev->driver->bus->irq_by_busid(dev, p);
84 } 84 }
85 85
86 /* 86 /*
87 * Clear vblank timestamp buffer for a crtc. 87 * Clear vblank timestamp buffer for a crtc.
88 */ 88 */
89 static void clear_vblank_timestamps(struct drm_device *dev, int crtc) 89 static void clear_vblank_timestamps(struct drm_device *dev, int crtc)
90 { 90 {
91 memset(&dev->_vblank_time[crtc * DRM_VBLANKTIME_RBSIZE], 0, 91 memset(&dev->_vblank_time[crtc * DRM_VBLANKTIME_RBSIZE], 0,
92 DRM_VBLANKTIME_RBSIZE * sizeof(struct timeval)); 92 DRM_VBLANKTIME_RBSIZE * sizeof(struct timeval));
93 } 93 }
94 94
95 /* 95 /*
96 * Disable vblank irq's on crtc, make sure that last vblank count 96 * Disable vblank irq's on crtc, make sure that last vblank count
97 * of hardware and corresponding consistent software vblank counter 97 * of hardware and corresponding consistent software vblank counter
98 * are preserved, even if there are any spurious vblank irq's after 98 * are preserved, even if there are any spurious vblank irq's after
99 * disable. 99 * disable.
100 */ 100 */
101 static void vblank_disable_and_save(struct drm_device *dev, int crtc) 101 static void vblank_disable_and_save(struct drm_device *dev, int crtc)
102 { 102 {
103 unsigned long irqflags; 103 unsigned long irqflags;
104 u32 vblcount; 104 u32 vblcount;
105 s64 diff_ns; 105 s64 diff_ns;
106 int vblrc; 106 int vblrc;
107 struct timeval tvblank; 107 struct timeval tvblank;
108 108
109 /* Prevent vblank irq processing while disabling vblank irqs, 109 /* Prevent vblank irq processing while disabling vblank irqs,
110 * so no updates of timestamps or count can happen after we've 110 * so no updates of timestamps or count can happen after we've
111 * disabled. Needed to prevent races in case of delayed irq's. 111 * disabled. Needed to prevent races in case of delayed irq's.
112 * Disable preemption, so vblank_time_lock is held as short as 112 * Disable preemption, so vblank_time_lock is held as short as
113 * possible, even under a kernel with PREEMPT_RT patches. 113 * possible, even under a kernel with PREEMPT_RT patches.
114 */ 114 */
115 preempt_disable(); 115 preempt_disable();
116 spin_lock_irqsave(&dev->vblank_time_lock, irqflags); 116 spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
117 117
118 dev->driver->disable_vblank(dev, crtc); 118 dev->driver->disable_vblank(dev, crtc);
119 dev->vblank_enabled[crtc] = 0; 119 dev->vblank_enabled[crtc] = 0;
120 120
121 /* No further vblank irq's will be processed after 121 /* No further vblank irq's will be processed after
122 * this point. Get current hardware vblank count and 122 * this point. Get current hardware vblank count and
123 * vblank timestamp, repeat until they are consistent. 123 * vblank timestamp, repeat until they are consistent.
124 * 124 *
125 * FIXME: There is still a race condition here and in 125 * FIXME: There is still a race condition here and in
126 * drm_update_vblank_count() which can cause off-by-one 126 * drm_update_vblank_count() which can cause off-by-one
127 * reinitialization of software vblank counter. If gpu 127 * reinitialization of software vblank counter. If gpu
128 * vblank counter doesn't increment exactly at the leading 128 * vblank counter doesn't increment exactly at the leading
129 * edge of a vblank interval, then we can lose 1 count if 129 * edge of a vblank interval, then we can lose 1 count if
130 * we happen to execute between start of vblank and the 130 * we happen to execute between start of vblank and the
131 * delayed gpu counter increment. 131 * delayed gpu counter increment.
132 */ 132 */
133 do { 133 do {
134 dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc); 134 dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
135 vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0); 135 vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
136 } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc)); 136 } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc));
137 137
138 /* Compute time difference to stored timestamp of last vblank 138 /* Compute time difference to stored timestamp of last vblank
139 * as updated by last invocation of drm_handle_vblank() in vblank irq. 139 * as updated by last invocation of drm_handle_vblank() in vblank irq.
140 */ 140 */
141 vblcount = atomic_read(&dev->_vblank_count[crtc]); 141 vblcount = atomic_read(&dev->_vblank_count[crtc]);
142 diff_ns = timeval_to_ns(&tvblank) - 142 diff_ns = timeval_to_ns(&tvblank) -
143 timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount)); 143 timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
144 144
145 /* If there is at least 1 msec difference between the last stored 145 /* If there is at least 1 msec difference between the last stored
146 * timestamp and tvblank, then we are currently executing our 146 * timestamp and tvblank, then we are currently executing our
147 * disable inside a new vblank interval, the tvblank timestamp 147 * disable inside a new vblank interval, the tvblank timestamp
148 * corresponds to this new vblank interval and the irq handler 148 * corresponds to this new vblank interval and the irq handler
149 * for this vblank didn't run yet and won't run due to our disable. 149 * for this vblank didn't run yet and won't run due to our disable.
150 * Therefore we need to do the job of drm_handle_vblank() and 150 * Therefore we need to do the job of drm_handle_vblank() and
151 * increment the vblank counter by one to account for this vblank. 151 * increment the vblank counter by one to account for this vblank.
152 * 152 *
153 * Skip this step if there isn't any high precision timestamp 153 * Skip this step if there isn't any high precision timestamp
154 * available. In that case we can't account for this and just 154 * available. In that case we can't account for this and just
155 * hope for the best. 155 * hope for the best.
156 */ 156 */
157 if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) { 157 if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
158 atomic_inc(&dev->_vblank_count[crtc]); 158 atomic_inc(&dev->_vblank_count[crtc]);
159 smp_mb__after_atomic_inc(); 159 smp_mb__after_atomic_inc();
160 } 160 }
161 161
162 /* Invalidate all timestamps while vblank irq's are off. */ 162 /* Invalidate all timestamps while vblank irq's are off. */
163 clear_vblank_timestamps(dev, crtc); 163 clear_vblank_timestamps(dev, crtc);
164 164
165 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); 165 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
166 preempt_enable(); 166 preempt_enable();
167 } 167 }
168 168
169 static void vblank_disable_fn(unsigned long arg) 169 static void vblank_disable_fn(unsigned long arg)
170 { 170 {
171 struct drm_device *dev = (struct drm_device *)arg; 171 struct drm_device *dev = (struct drm_device *)arg;
172 unsigned long irqflags; 172 unsigned long irqflags;
173 int i; 173 int i;
174 174
175 if (!dev->vblank_disable_allowed) 175 if (!dev->vblank_disable_allowed)
176 return; 176 return;
177 177
178 for (i = 0; i < dev->num_crtcs; i++) { 178 for (i = 0; i < dev->num_crtcs; i++) {
179 spin_lock_irqsave(&dev->vbl_lock, irqflags); 179 spin_lock_irqsave(&dev->vbl_lock, irqflags);
180 if (atomic_read(&dev->vblank_refcount[i]) == 0 && 180 if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
181 dev->vblank_enabled[i]) { 181 dev->vblank_enabled[i]) {
182 DRM_DEBUG("disabling vblank on crtc %d\n", i); 182 DRM_DEBUG("disabling vblank on crtc %d\n", i);
183 vblank_disable_and_save(dev, i); 183 vblank_disable_and_save(dev, i);
184 } 184 }
185 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 185 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
186 } 186 }
187 } 187 }
188 188
189 void drm_vblank_cleanup(struct drm_device *dev) 189 void drm_vblank_cleanup(struct drm_device *dev)
190 { 190 {
191 /* Bail if the driver didn't call drm_vblank_init() */ 191 /* Bail if the driver didn't call drm_vblank_init() */
192 if (dev->num_crtcs == 0) 192 if (dev->num_crtcs == 0)
193 return; 193 return;
194 194
195 del_timer(&dev->vblank_disable_timer); 195 del_timer(&dev->vblank_disable_timer);
196 196
197 vblank_disable_fn((unsigned long)dev); 197 vblank_disable_fn((unsigned long)dev);
198 198
199 kfree(dev->vbl_queue); 199 kfree(dev->vbl_queue);
200 kfree(dev->_vblank_count); 200 kfree(dev->_vblank_count);
201 kfree(dev->vblank_refcount); 201 kfree(dev->vblank_refcount);
202 kfree(dev->vblank_enabled); 202 kfree(dev->vblank_enabled);
203 kfree(dev->last_vblank); 203 kfree(dev->last_vblank);
204 kfree(dev->last_vblank_wait); 204 kfree(dev->last_vblank_wait);
205 kfree(dev->vblank_inmodeset); 205 kfree(dev->vblank_inmodeset);
206 kfree(dev->_vblank_time); 206 kfree(dev->_vblank_time);
207 207
208 dev->num_crtcs = 0; 208 dev->num_crtcs = 0;
209 } 209 }
210 EXPORT_SYMBOL(drm_vblank_cleanup); 210 EXPORT_SYMBOL(drm_vblank_cleanup);
211 211
212 int drm_vblank_init(struct drm_device *dev, int num_crtcs) 212 int drm_vblank_init(struct drm_device *dev, int num_crtcs)
213 { 213 {
214 int i, ret = -ENOMEM; 214 int i, ret = -ENOMEM;
215 215
216 setup_timer(&dev->vblank_disable_timer, vblank_disable_fn, 216 setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
217 (unsigned long)dev); 217 (unsigned long)dev);
218 spin_lock_init(&dev->vbl_lock); 218 spin_lock_init(&dev->vbl_lock);
219 spin_lock_init(&dev->vblank_time_lock); 219 spin_lock_init(&dev->vblank_time_lock);
220 220
221 dev->num_crtcs = num_crtcs; 221 dev->num_crtcs = num_crtcs;
222 222
223 dev->vbl_queue = kmalloc(sizeof(wait_queue_head_t) * num_crtcs, 223 dev->vbl_queue = kmalloc(sizeof(wait_queue_head_t) * num_crtcs,
224 GFP_KERNEL); 224 GFP_KERNEL);
225 if (!dev->vbl_queue) 225 if (!dev->vbl_queue)
226 goto err; 226 goto err;
227 227
228 dev->_vblank_count = kmalloc(sizeof(atomic_t) * num_crtcs, GFP_KERNEL); 228 dev->_vblank_count = kmalloc(sizeof(atomic_t) * num_crtcs, GFP_KERNEL);
229 if (!dev->_vblank_count) 229 if (!dev->_vblank_count)
230 goto err; 230 goto err;
231 231
232 dev->vblank_refcount = kmalloc(sizeof(atomic_t) * num_crtcs, 232 dev->vblank_refcount = kmalloc(sizeof(atomic_t) * num_crtcs,
233 GFP_KERNEL); 233 GFP_KERNEL);
234 if (!dev->vblank_refcount) 234 if (!dev->vblank_refcount)
235 goto err; 235 goto err;
236 236
237 dev->vblank_enabled = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL); 237 dev->vblank_enabled = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
238 if (!dev->vblank_enabled) 238 if (!dev->vblank_enabled)
239 goto err; 239 goto err;
240 240
241 dev->last_vblank = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL); 241 dev->last_vblank = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
242 if (!dev->last_vblank) 242 if (!dev->last_vblank)
243 goto err; 243 goto err;
244 244
245 dev->last_vblank_wait = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL); 245 dev->last_vblank_wait = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
246 if (!dev->last_vblank_wait) 246 if (!dev->last_vblank_wait)
247 goto err; 247 goto err;
248 248
249 dev->vblank_inmodeset = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL); 249 dev->vblank_inmodeset = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
250 if (!dev->vblank_inmodeset) 250 if (!dev->vblank_inmodeset)
251 goto err; 251 goto err;
252 252
253 dev->_vblank_time = kcalloc(num_crtcs * DRM_VBLANKTIME_RBSIZE, 253 dev->_vblank_time = kcalloc(num_crtcs * DRM_VBLANKTIME_RBSIZE,
254 sizeof(struct timeval), GFP_KERNEL); 254 sizeof(struct timeval), GFP_KERNEL);
255 if (!dev->_vblank_time) 255 if (!dev->_vblank_time)
256 goto err; 256 goto err;
257 257
258 DRM_INFO("Supports vblank timestamp caching Rev 1 (10.10.2010).\n"); 258 DRM_INFO("Supports vblank timestamp caching Rev 1 (10.10.2010).\n");
259 259
260 /* Driver specific high-precision vblank timestamping supported? */ 260 /* Driver specific high-precision vblank timestamping supported? */
261 if (dev->driver->get_vblank_timestamp) 261 if (dev->driver->get_vblank_timestamp)
262 DRM_INFO("Driver supports precise vblank timestamp query.\n"); 262 DRM_INFO("Driver supports precise vblank timestamp query.\n");
263 else 263 else
264 DRM_INFO("No driver support for vblank timestamp query.\n"); 264 DRM_INFO("No driver support for vblank timestamp query.\n");
265 265
266 /* Zero per-crtc vblank stuff */ 266 /* Zero per-crtc vblank stuff */
267 for (i = 0; i < num_crtcs; i++) { 267 for (i = 0; i < num_crtcs; i++) {
268 init_waitqueue_head(&dev->vbl_queue[i]); 268 init_waitqueue_head(&dev->vbl_queue[i]);
269 atomic_set(&dev->_vblank_count[i], 0); 269 atomic_set(&dev->_vblank_count[i], 0);
270 atomic_set(&dev->vblank_refcount[i], 0); 270 atomic_set(&dev->vblank_refcount[i], 0);
271 } 271 }
272 272
273 dev->vblank_disable_allowed = 0; 273 dev->vblank_disable_allowed = 0;
274 return 0; 274 return 0;
275 275
276 err: 276 err:
277 drm_vblank_cleanup(dev); 277 drm_vblank_cleanup(dev);
278 return ret; 278 return ret;
279 } 279 }
280 EXPORT_SYMBOL(drm_vblank_init); 280 EXPORT_SYMBOL(drm_vblank_init);
281 281
282 static void drm_irq_vgaarb_nokms(void *cookie, bool state) 282 static void drm_irq_vgaarb_nokms(void *cookie, bool state)
283 { 283 {
284 struct drm_device *dev = cookie; 284 struct drm_device *dev = cookie;
285 285
286 if (dev->driver->vgaarb_irq) { 286 if (dev->driver->vgaarb_irq) {
287 dev->driver->vgaarb_irq(dev, state); 287 dev->driver->vgaarb_irq(dev, state);
288 return; 288 return;
289 } 289 }
290 290
291 if (!dev->irq_enabled) 291 if (!dev->irq_enabled)
292 return; 292 return;
293 293
294 if (state) 294 if (state)
295 dev->driver->irq_uninstall(dev); 295 dev->driver->irq_uninstall(dev);
296 else { 296 else {
297 dev->driver->irq_preinstall(dev); 297 dev->driver->irq_preinstall(dev);
298 dev->driver->irq_postinstall(dev); 298 dev->driver->irq_postinstall(dev);
299 } 299 }
300 } 300 }
301 301
302 /** 302 /**
303 * Install IRQ handler. 303 * Install IRQ handler.
304 * 304 *
305 * \param dev DRM device. 305 * \param dev DRM device.
306 * 306 *
307 * Initializes the IRQ related data. Installs the handler, calling the driver 307 * Initializes the IRQ related data. Installs the handler, calling the driver
308 * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions 308 * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions
309 * before and after the installation. 309 * before and after the installation.
310 */ 310 */
311 int drm_irq_install(struct drm_device *dev) 311 int drm_irq_install(struct drm_device *dev)
312 { 312 {
313 int ret = 0; 313 int ret = 0;
314 unsigned long sh_flags = 0; 314 unsigned long sh_flags = 0;
315 char *irqname; 315 char *irqname;
316 316
317 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 317 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
318 return -EINVAL; 318 return -EINVAL;
319 319
320 if (drm_dev_to_irq(dev) == 0) 320 if (drm_dev_to_irq(dev) == 0)
321 return -EINVAL; 321 return -EINVAL;
322 322
323 mutex_lock(&dev->struct_mutex); 323 mutex_lock(&dev->struct_mutex);
324 324
325 /* Driver must have been initialized */ 325 /* Driver must have been initialized */
326 if (!dev->dev_private) { 326 if (!dev->dev_private) {
327 mutex_unlock(&dev->struct_mutex); 327 mutex_unlock(&dev->struct_mutex);
328 return -EINVAL; 328 return -EINVAL;
329 } 329 }
330 330
331 if (dev->irq_enabled) { 331 if (dev->irq_enabled) {
332 mutex_unlock(&dev->struct_mutex); 332 mutex_unlock(&dev->struct_mutex);
333 return -EBUSY; 333 return -EBUSY;
334 } 334 }
335 dev->irq_enabled = 1; 335 dev->irq_enabled = 1;
336 mutex_unlock(&dev->struct_mutex); 336 mutex_unlock(&dev->struct_mutex);
337 337
338 DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev)); 338 DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
339 339
340 /* Before installing handler */ 340 /* Before installing handler */
341 dev->driver->irq_preinstall(dev); 341 dev->driver->irq_preinstall(dev);
342 342
343 /* Install handler */ 343 /* Install handler */
344 if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED)) 344 if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
345 sh_flags = IRQF_SHARED; 345 sh_flags = IRQF_SHARED;
346 346
347 if (dev->devname) 347 if (dev->devname)
348 irqname = dev->devname; 348 irqname = dev->devname;
349 else 349 else
350 irqname = dev->driver->name; 350 irqname = dev->driver->name;
351 351
352 ret = request_irq(drm_dev_to_irq(dev), dev->driver->irq_handler, 352 ret = request_irq(drm_dev_to_irq(dev), dev->driver->irq_handler,
353 sh_flags, irqname, dev); 353 sh_flags, irqname, dev);
354 354
355 if (ret < 0) { 355 if (ret < 0) {
356 mutex_lock(&dev->struct_mutex); 356 mutex_lock(&dev->struct_mutex);
357 dev->irq_enabled = 0; 357 dev->irq_enabled = 0;
358 mutex_unlock(&dev->struct_mutex); 358 mutex_unlock(&dev->struct_mutex);
359 return ret; 359 return ret;
360 } 360 }
361 361
362 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 362 if (!drm_core_check_feature(dev, DRIVER_MODESET))
363 vga_client_register(dev->pdev, (void *)dev, drm_irq_vgaarb_nokms, NULL); 363 vga_client_register(dev->pdev, (void *)dev, drm_irq_vgaarb_nokms, NULL);
364 364
365 /* After installing handler */ 365 /* After installing handler */
366 ret = dev->driver->irq_postinstall(dev); 366 ret = dev->driver->irq_postinstall(dev);
367 if (ret < 0) { 367 if (ret < 0) {
368 mutex_lock(&dev->struct_mutex); 368 mutex_lock(&dev->struct_mutex);
369 dev->irq_enabled = 0; 369 dev->irq_enabled = 0;
370 mutex_unlock(&dev->struct_mutex); 370 mutex_unlock(&dev->struct_mutex);
371 } 371 }
372 372
373 return ret; 373 return ret;
374 } 374 }
375 EXPORT_SYMBOL(drm_irq_install); 375 EXPORT_SYMBOL(drm_irq_install);
376 376
377 /** 377 /**
378 * Uninstall the IRQ handler. 378 * Uninstall the IRQ handler.
379 * 379 *
380 * \param dev DRM device. 380 * \param dev DRM device.
381 * 381 *
382 * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq. 382 * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq.
383 */ 383 */
384 int drm_irq_uninstall(struct drm_device *dev) 384 int drm_irq_uninstall(struct drm_device *dev)
385 { 385 {
386 unsigned long irqflags; 386 unsigned long irqflags;
387 int irq_enabled, i; 387 int irq_enabled, i;
388 388
389 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 389 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
390 return -EINVAL; 390 return -EINVAL;
391 391
392 mutex_lock(&dev->struct_mutex); 392 mutex_lock(&dev->struct_mutex);
393 irq_enabled = dev->irq_enabled; 393 irq_enabled = dev->irq_enabled;
394 dev->irq_enabled = 0; 394 dev->irq_enabled = 0;
395 mutex_unlock(&dev->struct_mutex); 395 mutex_unlock(&dev->struct_mutex);
396 396
397 /* 397 /*
398 * Wake up any waiters so they don't hang. 398 * Wake up any waiters so they don't hang.
399 */ 399 */
400 spin_lock_irqsave(&dev->vbl_lock, irqflags); 400 spin_lock_irqsave(&dev->vbl_lock, irqflags);
401 for (i = 0; i < dev->num_crtcs; i++) { 401 for (i = 0; i < dev->num_crtcs; i++) {
402 DRM_WAKEUP(&dev->vbl_queue[i]); 402 DRM_WAKEUP(&dev->vbl_queue[i]);
403 dev->vblank_enabled[i] = 0; 403 dev->vblank_enabled[i] = 0;
404 dev->last_vblank[i] = dev->driver->get_vblank_counter(dev, i); 404 dev->last_vblank[i] = dev->driver->get_vblank_counter(dev, i);
405 } 405 }
406 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 406 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
407 407
408 if (!irq_enabled) 408 if (!irq_enabled)
409 return -EINVAL; 409 return -EINVAL;
410 410
411 DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev)); 411 DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
412 412
413 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 413 if (!drm_core_check_feature(dev, DRIVER_MODESET))
414 vga_client_register(dev->pdev, NULL, NULL, NULL); 414 vga_client_register(dev->pdev, NULL, NULL, NULL);
415 415
416 dev->driver->irq_uninstall(dev); 416 dev->driver->irq_uninstall(dev);
417 417
418 free_irq(drm_dev_to_irq(dev), dev); 418 free_irq(drm_dev_to_irq(dev), dev);
419 419
420 return 0; 420 return 0;
421 } 421 }
422 EXPORT_SYMBOL(drm_irq_uninstall); 422 EXPORT_SYMBOL(drm_irq_uninstall);
423 423
424 /** 424 /**
425 * IRQ control ioctl. 425 * IRQ control ioctl.
426 * 426 *
427 * \param inode device inode. 427 * \param inode device inode.
428 * \param file_priv DRM file private. 428 * \param file_priv DRM file private.
429 * \param cmd command. 429 * \param cmd command.
430 * \param arg user argument, pointing to a drm_control structure. 430 * \param arg user argument, pointing to a drm_control structure.
431 * \return zero on success or a negative number on failure. 431 * \return zero on success or a negative number on failure.
432 * 432 *
433 * Calls irq_install() or irq_uninstall() according to \p arg. 433 * Calls irq_install() or irq_uninstall() according to \p arg.
434 */ 434 */
435 int drm_control(struct drm_device *dev, void *data, 435 int drm_control(struct drm_device *dev, void *data,
436 struct drm_file *file_priv) 436 struct drm_file *file_priv)
437 { 437 {
438 struct drm_control *ctl = data; 438 struct drm_control *ctl = data;
439 439
440 /* if we haven't irq we fallback for compatibility reasons - 440 /* if we haven't irq we fallback for compatibility reasons -
441 * this used to be a separate function in drm_dma.h 441 * this used to be a separate function in drm_dma.h
442 */ 442 */
443 443
444 444
445 switch (ctl->func) { 445 switch (ctl->func) {
446 case DRM_INST_HANDLER: 446 case DRM_INST_HANDLER:
447 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 447 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
448 return 0; 448 return 0;
449 if (drm_core_check_feature(dev, DRIVER_MODESET)) 449 if (drm_core_check_feature(dev, DRIVER_MODESET))
450 return 0; 450 return 0;
451 if (dev->if_version < DRM_IF_VERSION(1, 2) && 451 if (dev->if_version < DRM_IF_VERSION(1, 2) &&
452 ctl->irq != drm_dev_to_irq(dev)) 452 ctl->irq != drm_dev_to_irq(dev))
453 return -EINVAL; 453 return -EINVAL;
454 return drm_irq_install(dev); 454 return drm_irq_install(dev);
455 case DRM_UNINST_HANDLER: 455 case DRM_UNINST_HANDLER:
456 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 456 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
457 return 0; 457 return 0;
458 if (drm_core_check_feature(dev, DRIVER_MODESET)) 458 if (drm_core_check_feature(dev, DRIVER_MODESET))
459 return 0; 459 return 0;
460 return drm_irq_uninstall(dev); 460 return drm_irq_uninstall(dev);
461 default: 461 default:
462 return -EINVAL; 462 return -EINVAL;
463 } 463 }
464 } 464 }
465 465
466 /** 466 /**
467 * drm_calc_timestamping_constants - Calculate and 467 * drm_calc_timestamping_constants - Calculate and
468 * store various constants which are later needed by 468 * store various constants which are later needed by
469 * vblank and swap-completion timestamping, e.g, by 469 * vblank and swap-completion timestamping, e.g, by
470 * drm_calc_vbltimestamp_from_scanoutpos(). 470 * drm_calc_vbltimestamp_from_scanoutpos().
471 * They are derived from crtc's true scanout timing, 471 * They are derived from crtc's true scanout timing,
472 * so they take things like panel scaling or other 472 * so they take things like panel scaling or other
473 * adjustments into account. 473 * adjustments into account.
474 * 474 *
475 * @crtc drm_crtc whose timestamp constants should be updated. 475 * @crtc drm_crtc whose timestamp constants should be updated.
476 * 476 *
477 */ 477 */
478 void drm_calc_timestamping_constants(struct drm_crtc *crtc) 478 void drm_calc_timestamping_constants(struct drm_crtc *crtc)
479 { 479 {
480 s64 linedur_ns = 0, pixeldur_ns = 0, framedur_ns = 0; 480 s64 linedur_ns = 0, pixeldur_ns = 0, framedur_ns = 0;
481 u64 dotclock; 481 u64 dotclock;
482 482
483 /* Dot clock in Hz: */ 483 /* Dot clock in Hz: */
484 dotclock = (u64) crtc->hwmode.clock * 1000; 484 dotclock = (u64) crtc->hwmode.clock * 1000;
485 485
486 /* Fields of interlaced scanout modes are only halve a frame duration. 486 /* Fields of interlaced scanout modes are only halve a frame duration.
487 * Double the dotclock to get halve the frame-/line-/pixelduration. 487 * Double the dotclock to get halve the frame-/line-/pixelduration.
488 */ 488 */
489 if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE) 489 if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE)
490 dotclock *= 2; 490 dotclock *= 2;
491 491
492 /* Valid dotclock? */ 492 /* Valid dotclock? */
493 if (dotclock > 0) { 493 if (dotclock > 0) {
494 /* Convert scanline length in pixels and video dot clock to 494 /* Convert scanline length in pixels and video dot clock to
495 * line duration, frame duration and pixel duration in 495 * line duration, frame duration and pixel duration in
496 * nanoseconds: 496 * nanoseconds:
497 */ 497 */
498 pixeldur_ns = (s64) div64_u64(1000000000, dotclock); 498 pixeldur_ns = (s64) div64_u64(1000000000, dotclock);
499 linedur_ns = (s64) div64_u64(((u64) crtc->hwmode.crtc_htotal * 499 linedur_ns = (s64) div64_u64(((u64) crtc->hwmode.crtc_htotal *
500 1000000000), dotclock); 500 1000000000), dotclock);
501 framedur_ns = (s64) crtc->hwmode.crtc_vtotal * linedur_ns; 501 framedur_ns = (s64) crtc->hwmode.crtc_vtotal * linedur_ns;
502 } else 502 } else
503 DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n", 503 DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n",
504 crtc->base.id); 504 crtc->base.id);
505 505
506 crtc->pixeldur_ns = pixeldur_ns; 506 crtc->pixeldur_ns = pixeldur_ns;
507 crtc->linedur_ns = linedur_ns; 507 crtc->linedur_ns = linedur_ns;
508 crtc->framedur_ns = framedur_ns; 508 crtc->framedur_ns = framedur_ns;
509 509
510 DRM_DEBUG("crtc %d: hwmode: htotal %d, vtotal %d, vdisplay %d\n", 510 DRM_DEBUG("crtc %d: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
511 crtc->base.id, crtc->hwmode.crtc_htotal, 511 crtc->base.id, crtc->hwmode.crtc_htotal,
512 crtc->hwmode.crtc_vtotal, crtc->hwmode.crtc_vdisplay); 512 crtc->hwmode.crtc_vtotal, crtc->hwmode.crtc_vdisplay);
513 DRM_DEBUG("crtc %d: clock %d kHz framedur %d linedur %d, pixeldur %d\n", 513 DRM_DEBUG("crtc %d: clock %d kHz framedur %d linedur %d, pixeldur %d\n",
514 crtc->base.id, (int) dotclock/1000, (int) framedur_ns, 514 crtc->base.id, (int) dotclock/1000, (int) framedur_ns,
515 (int) linedur_ns, (int) pixeldur_ns); 515 (int) linedur_ns, (int) pixeldur_ns);
516 } 516 }
517 EXPORT_SYMBOL(drm_calc_timestamping_constants); 517 EXPORT_SYMBOL(drm_calc_timestamping_constants);
518 518
519 /** 519 /**
520 * drm_calc_vbltimestamp_from_scanoutpos - helper routine for kms 520 * drm_calc_vbltimestamp_from_scanoutpos - helper routine for kms
521 * drivers. Implements calculation of exact vblank timestamps from 521 * drivers. Implements calculation of exact vblank timestamps from
522 * given drm_display_mode timings and current video scanout position 522 * given drm_display_mode timings and current video scanout position
523 * of a crtc. This can be called from within get_vblank_timestamp() 523 * of a crtc. This can be called from within get_vblank_timestamp()
524 * implementation of a kms driver to implement the actual timestamping. 524 * implementation of a kms driver to implement the actual timestamping.
525 * 525 *
526 * Should return timestamps conforming to the OML_sync_control OpenML 526 * Should return timestamps conforming to the OML_sync_control OpenML
527 * extension specification. The timestamp corresponds to the end of 527 * extension specification. The timestamp corresponds to the end of
528 * the vblank interval, aka start of scanout of topmost-leftmost display 528 * the vblank interval, aka start of scanout of topmost-leftmost display
529 * pixel in the following video frame. 529 * pixel in the following video frame.
530 * 530 *
531 * Requires support for optional dev->driver->get_scanout_position() 531 * Requires support for optional dev->driver->get_scanout_position()
532 * in kms driver, plus a bit of setup code to provide a drm_display_mode 532 * in kms driver, plus a bit of setup code to provide a drm_display_mode
533 * that corresponds to the true scanout timing. 533 * that corresponds to the true scanout timing.
534 * 534 *
535 * The current implementation only handles standard video modes. It 535 * The current implementation only handles standard video modes. It
536 * returns as no operation if a doublescan or interlaced video mode is 536 * returns as no operation if a doublescan or interlaced video mode is
537 * active. Higher level code is expected to handle this. 537 * active. Higher level code is expected to handle this.
538 * 538 *
539 * @dev: DRM device. 539 * @dev: DRM device.
540 * @crtc: Which crtc's vblank timestamp to retrieve. 540 * @crtc: Which crtc's vblank timestamp to retrieve.
541 * @max_error: Desired maximum allowable error in timestamps (nanosecs). 541 * @max_error: Desired maximum allowable error in timestamps (nanosecs).
542 * On return contains true maximum error of timestamp. 542 * On return contains true maximum error of timestamp.
543 * @vblank_time: Pointer to struct timeval which should receive the timestamp. 543 * @vblank_time: Pointer to struct timeval which should receive the timestamp.
544 * @flags: Flags to pass to driver: 544 * @flags: Flags to pass to driver:
545 * 0 = Default. 545 * 0 = Default.
546 * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler. 546 * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
547 * @refcrtc: drm_crtc* of crtc which defines scanout timing. 547 * @refcrtc: drm_crtc* of crtc which defines scanout timing.
548 * 548 *
549 * Returns negative value on error, failure or if not supported in current 549 * Returns negative value on error, failure or if not supported in current
550 * video mode: 550 * video mode:
551 * 551 *
552 * -EINVAL - Invalid crtc. 552 * -EINVAL - Invalid crtc.
553 * -EAGAIN - Temporary unavailable, e.g., called before initial modeset. 553 * -EAGAIN - Temporary unavailable, e.g., called before initial modeset.
554 * -ENOTSUPP - Function not supported in current display mode. 554 * -ENOTSUPP - Function not supported in current display mode.
555 * -EIO - Failed, e.g., due to failed scanout position query. 555 * -EIO - Failed, e.g., due to failed scanout position query.
556 * 556 *
557 * Returns or'ed positive status flags on success: 557 * Returns or'ed positive status flags on success:
558 * 558 *
559 * DRM_VBLANKTIME_SCANOUTPOS_METHOD - Signal this method used for timestamping. 559 * DRM_VBLANKTIME_SCANOUTPOS_METHOD - Signal this method used for timestamping.
560 * DRM_VBLANKTIME_INVBL - Timestamp taken while scanout was in vblank interval. 560 * DRM_VBLANKTIME_INVBL - Timestamp taken while scanout was in vblank interval.
561 * 561 *
562 */ 562 */
563 int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, 563 int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
564 int *max_error, 564 int *max_error,
565 struct timeval *vblank_time, 565 struct timeval *vblank_time,
566 unsigned flags, 566 unsigned flags,
567 struct drm_crtc *refcrtc) 567 struct drm_crtc *refcrtc)
568 { 568 {
569 struct timeval stime, raw_time; 569 struct timeval stime, raw_time;
570 struct drm_display_mode *mode; 570 struct drm_display_mode *mode;
571 int vbl_status, vtotal, vdisplay; 571 int vbl_status, vtotal, vdisplay;
572 int vpos, hpos, i; 572 int vpos, hpos, i;
573 s64 framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns; 573 s64 framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns;
574 bool invbl; 574 bool invbl;
575 575
576 if (crtc < 0 || crtc >= dev->num_crtcs) { 576 if (crtc < 0 || crtc >= dev->num_crtcs) {
577 DRM_ERROR("Invalid crtc %d\n", crtc); 577 DRM_ERROR("Invalid crtc %d\n", crtc);
578 return -EINVAL; 578 return -EINVAL;
579 } 579 }
580 580
581 /* Scanout position query not supported? Should not happen. */ 581 /* Scanout position query not supported? Should not happen. */
582 if (!dev->driver->get_scanout_position) { 582 if (!dev->driver->get_scanout_position) {
583 DRM_ERROR("Called from driver w/o get_scanout_position()!?\n"); 583 DRM_ERROR("Called from driver w/o get_scanout_position()!?\n");
584 return -EIO; 584 return -EIO;
585 } 585 }
586 586
587 mode = &refcrtc->hwmode; 587 mode = &refcrtc->hwmode;
588 vtotal = mode->crtc_vtotal; 588 vtotal = mode->crtc_vtotal;
589 vdisplay = mode->crtc_vdisplay; 589 vdisplay = mode->crtc_vdisplay;
590 590
591 /* Durations of frames, lines, pixels in nanoseconds. */ 591 /* Durations of frames, lines, pixels in nanoseconds. */
592 framedur_ns = refcrtc->framedur_ns; 592 framedur_ns = refcrtc->framedur_ns;
593 linedur_ns = refcrtc->linedur_ns; 593 linedur_ns = refcrtc->linedur_ns;
594 pixeldur_ns = refcrtc->pixeldur_ns; 594 pixeldur_ns = refcrtc->pixeldur_ns;
595 595
596 /* If mode timing undefined, just return as no-op: 596 /* If mode timing undefined, just return as no-op:
597 * Happens during initial modesetting of a crtc. 597 * Happens during initial modesetting of a crtc.
598 */ 598 */
599 if (vtotal <= 0 || vdisplay <= 0 || framedur_ns == 0) { 599 if (vtotal <= 0 || vdisplay <= 0 || framedur_ns == 0) {
600 DRM_DEBUG("crtc %d: Noop due to uninitialized mode.\n", crtc); 600 DRM_DEBUG("crtc %d: Noop due to uninitialized mode.\n", crtc);
601 return -EAGAIN; 601 return -EAGAIN;
602 } 602 }
603 603
604 /* Get current scanout position with system timestamp. 604 /* Get current scanout position with system timestamp.
605 * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times 605 * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times
606 * if single query takes longer than max_error nanoseconds. 606 * if single query takes longer than max_error nanoseconds.
607 * 607 *
608 * This guarantees a tight bound on maximum error if 608 * This guarantees a tight bound on maximum error if
609 * code gets preempted or delayed for some reason. 609 * code gets preempted or delayed for some reason.
610 */ 610 */
611 for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) { 611 for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) {
612 /* Disable preemption to make it very likely to 612 /* Disable preemption to make it very likely to
613 * succeed in the first iteration even on PREEMPT_RT kernel. 613 * succeed in the first iteration even on PREEMPT_RT kernel.
614 */ 614 */
615 preempt_disable(); 615 preempt_disable();
616 616
617 /* Get system timestamp before query. */ 617 /* Get system timestamp before query. */
618 do_gettimeofday(&stime); 618 do_gettimeofday(&stime);
619 619
620 /* Get vertical and horizontal scanout pos. vpos, hpos. */ 620 /* Get vertical and horizontal scanout pos. vpos, hpos. */
621 vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos); 621 vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos);
622 622
623 /* Get system timestamp after query. */ 623 /* Get system timestamp after query. */
624 do_gettimeofday(&raw_time); 624 do_gettimeofday(&raw_time);
625 625
626 preempt_enable(); 626 preempt_enable();
627 627
628 /* Return as no-op if scanout query unsupported or failed. */ 628 /* Return as no-op if scanout query unsupported or failed. */
629 if (!(vbl_status & DRM_SCANOUTPOS_VALID)) { 629 if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
630 DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n", 630 DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n",
631 crtc, vbl_status); 631 crtc, vbl_status);
632 return -EIO; 632 return -EIO;
633 } 633 }
634 634
635 duration_ns = timeval_to_ns(&raw_time) - timeval_to_ns(&stime); 635 duration_ns = timeval_to_ns(&raw_time) - timeval_to_ns(&stime);
636 636
637 /* Accept result with < max_error nsecs timing uncertainty. */ 637 /* Accept result with < max_error nsecs timing uncertainty. */
638 if (duration_ns <= (s64) *max_error) 638 if (duration_ns <= (s64) *max_error)
639 break; 639 break;
640 } 640 }
641 641
642 /* Noisy system timing? */ 642 /* Noisy system timing? */
643 if (i == DRM_TIMESTAMP_MAXRETRIES) { 643 if (i == DRM_TIMESTAMP_MAXRETRIES) {
644 DRM_DEBUG("crtc %d: Noisy timestamp %d us > %d us [%d reps].\n", 644 DRM_DEBUG("crtc %d: Noisy timestamp %d us > %d us [%d reps].\n",
645 crtc, (int) duration_ns/1000, *max_error/1000, i); 645 crtc, (int) duration_ns/1000, *max_error/1000, i);
646 } 646 }
647 647
648 /* Return upper bound of timestamp precision error. */ 648 /* Return upper bound of timestamp precision error. */
649 *max_error = (int) duration_ns; 649 *max_error = (int) duration_ns;
650 650
651 /* Check if in vblank area: 651 /* Check if in vblank area:
652 * vpos is >=0 in video scanout area, but negative 652 * vpos is >=0 in video scanout area, but negative
653 * within vblank area, counting down the number of lines until 653 * within vblank area, counting down the number of lines until
654 * start of scanout. 654 * start of scanout.
655 */ 655 */
656 invbl = vbl_status & DRM_SCANOUTPOS_INVBL; 656 invbl = vbl_status & DRM_SCANOUTPOS_INVBL;
657 657
658 /* Convert scanout position into elapsed time at raw_time query 658 /* Convert scanout position into elapsed time at raw_time query
659 * since start of scanout at first display scanline. delta_ns 659 * since start of scanout at first display scanline. delta_ns
660 * can be negative if start of scanout hasn't happened yet. 660 * can be negative if start of scanout hasn't happened yet.
661 */ 661 */
662 delta_ns = (s64) vpos * linedur_ns + (s64) hpos * pixeldur_ns; 662 delta_ns = (s64) vpos * linedur_ns + (s64) hpos * pixeldur_ns;
663 663
664 /* Is vpos outside nominal vblank area, but less than 664 /* Is vpos outside nominal vblank area, but less than
665 * 1/100 of a frame height away from start of vblank? 665 * 1/100 of a frame height away from start of vblank?
666 * If so, assume this isn't a massively delayed vblank 666 * If so, assume this isn't a massively delayed vblank
667 * interrupt, but a vblank interrupt that fired a few 667 * interrupt, but a vblank interrupt that fired a few
668 * microseconds before true start of vblank. Compensate 668 * microseconds before true start of vblank. Compensate
669 * by adding a full frame duration to the final timestamp. 669 * by adding a full frame duration to the final timestamp.
670 * Happens, e.g., on ATI R500, R600. 670 * Happens, e.g., on ATI R500, R600.
671 * 671 *
672 * We only do this if DRM_CALLED_FROM_VBLIRQ. 672 * We only do this if DRM_CALLED_FROM_VBLIRQ.
673 */ 673 */
674 if ((flags & DRM_CALLED_FROM_VBLIRQ) && !invbl && 674 if ((flags & DRM_CALLED_FROM_VBLIRQ) && !invbl &&
675 ((vdisplay - vpos) < vtotal / 100)) { 675 ((vdisplay - vpos) < vtotal / 100)) {
676 delta_ns = delta_ns - framedur_ns; 676 delta_ns = delta_ns - framedur_ns;
677 677
678 /* Signal this correction as "applied". */ 678 /* Signal this correction as "applied". */
679 vbl_status |= 0x8; 679 vbl_status |= 0x8;
680 } 680 }
681 681
682 /* Subtract time delta from raw timestamp to get final 682 /* Subtract time delta from raw timestamp to get final
683 * vblank_time timestamp for end of vblank. 683 * vblank_time timestamp for end of vblank.
684 */ 684 */
685 *vblank_time = ns_to_timeval(timeval_to_ns(&raw_time) - delta_ns); 685 *vblank_time = ns_to_timeval(timeval_to_ns(&raw_time) - delta_ns);
686 686
687 DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %d.%d -> %d.%d [e %d us, %d rep]\n", 687 DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
688 crtc, (int) vbl_status, hpos, vpos, raw_time.tv_sec, 688 crtc, (int)vbl_status, hpos, vpos,
689 raw_time.tv_usec, vblank_time->tv_sec, vblank_time->tv_usec, 689 (long)raw_time.tv_sec, (long)raw_time.tv_usec,
690 (int) duration_ns/1000, i); 690 (long)vblank_time->tv_sec, (long)vblank_time->tv_usec,
691 (int)duration_ns/1000, i);
691 692
692 vbl_status = DRM_VBLANKTIME_SCANOUTPOS_METHOD; 693 vbl_status = DRM_VBLANKTIME_SCANOUTPOS_METHOD;
693 if (invbl) 694 if (invbl)
694 vbl_status |= DRM_VBLANKTIME_INVBL; 695 vbl_status |= DRM_VBLANKTIME_INVBL;
695 696
696 return vbl_status; 697 return vbl_status;
697 } 698 }
698 EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos); 699 EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
699 700
700 /** 701 /**
701 * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent 702 * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
702 * vblank interval. 703 * vblank interval.
703 * 704 *
704 * @dev: DRM device 705 * @dev: DRM device
705 * @crtc: which crtc's vblank timestamp to retrieve 706 * @crtc: which crtc's vblank timestamp to retrieve
706 * @tvblank: Pointer to target struct timeval which should receive the timestamp 707 * @tvblank: Pointer to target struct timeval which should receive the timestamp
707 * @flags: Flags to pass to driver: 708 * @flags: Flags to pass to driver:
708 * 0 = Default. 709 * 0 = Default.
709 * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler. 710 * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
710 * 711 *
711 * Fetches the system timestamp corresponding to the time of the most recent 712 * Fetches the system timestamp corresponding to the time of the most recent
712 * vblank interval on specified crtc. May call into kms-driver to 713 * vblank interval on specified crtc. May call into kms-driver to
713 * compute the timestamp with a high-precision GPU specific method. 714 * compute the timestamp with a high-precision GPU specific method.
714 * 715 *
715 * Returns zero if timestamp originates from uncorrected do_gettimeofday() 716 * Returns zero if timestamp originates from uncorrected do_gettimeofday()
716 * call, i.e., it isn't very precisely locked to the true vblank. 717 * call, i.e., it isn't very precisely locked to the true vblank.
717 * 718 *
718 * Returns non-zero if timestamp is considered to be very precise. 719 * Returns non-zero if timestamp is considered to be very precise.
719 */ 720 */
720 u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc, 721 u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
721 struct timeval *tvblank, unsigned flags) 722 struct timeval *tvblank, unsigned flags)
722 { 723 {
723 int ret = 0; 724 int ret = 0;
724 725
725 /* Define requested maximum error on timestamps (nanoseconds). */ 726 /* Define requested maximum error on timestamps (nanoseconds). */
726 int max_error = (int) drm_timestamp_precision * 1000; 727 int max_error = (int) drm_timestamp_precision * 1000;
727 728
728 /* Query driver if possible and precision timestamping enabled. */ 729 /* Query driver if possible and precision timestamping enabled. */
729 if (dev->driver->get_vblank_timestamp && (max_error > 0)) { 730 if (dev->driver->get_vblank_timestamp && (max_error > 0)) {
730 ret = dev->driver->get_vblank_timestamp(dev, crtc, &max_error, 731 ret = dev->driver->get_vblank_timestamp(dev, crtc, &max_error,
731 tvblank, flags); 732 tvblank, flags);
732 if (ret > 0) 733 if (ret > 0)
733 return (u32) ret; 734 return (u32) ret;
734 } 735 }
735 736
736 /* GPU high precision timestamp query unsupported or failed. 737 /* GPU high precision timestamp query unsupported or failed.
737 * Return gettimeofday timestamp as best estimate. 738 * Return gettimeofday timestamp as best estimate.
738 */ 739 */
739 do_gettimeofday(tvblank); 740 do_gettimeofday(tvblank);
740 741
741 return 0; 742 return 0;
742 } 743 }
743 EXPORT_SYMBOL(drm_get_last_vbltimestamp); 744 EXPORT_SYMBOL(drm_get_last_vbltimestamp);
744 745
745 /** 746 /**
746 * drm_vblank_count - retrieve "cooked" vblank counter value 747 * drm_vblank_count - retrieve "cooked" vblank counter value
747 * @dev: DRM device 748 * @dev: DRM device
748 * @crtc: which counter to retrieve 749 * @crtc: which counter to retrieve
749 * 750 *
750 * Fetches the "cooked" vblank count value that represents the number of 751 * Fetches the "cooked" vblank count value that represents the number of
751 * vblank events since the system was booted, including lost events due to 752 * vblank events since the system was booted, including lost events due to
752 * modesetting activity. 753 * modesetting activity.
753 */ 754 */
754 u32 drm_vblank_count(struct drm_device *dev, int crtc) 755 u32 drm_vblank_count(struct drm_device *dev, int crtc)
755 { 756 {
756 return atomic_read(&dev->_vblank_count[crtc]); 757 return atomic_read(&dev->_vblank_count[crtc]);
757 } 758 }
758 EXPORT_SYMBOL(drm_vblank_count); 759 EXPORT_SYMBOL(drm_vblank_count);
759 760
760 /** 761 /**
761 * drm_vblank_count_and_time - retrieve "cooked" vblank counter value 762 * drm_vblank_count_and_time - retrieve "cooked" vblank counter value
762 * and the system timestamp corresponding to that vblank counter value. 763 * and the system timestamp corresponding to that vblank counter value.
763 * 764 *
764 * @dev: DRM device 765 * @dev: DRM device
765 * @crtc: which counter to retrieve 766 * @crtc: which counter to retrieve
766 * @vblanktime: Pointer to struct timeval to receive the vblank timestamp. 767 * @vblanktime: Pointer to struct timeval to receive the vblank timestamp.
767 * 768 *
768 * Fetches the "cooked" vblank count value that represents the number of 769 * Fetches the "cooked" vblank count value that represents the number of
769 * vblank events since the system was booted, including lost events due to 770 * vblank events since the system was booted, including lost events due to
770 * modesetting activity. Returns corresponding system timestamp of the time 771 * modesetting activity. Returns corresponding system timestamp of the time
771 * of the vblank interval that corresponds to the current value vblank counter 772 * of the vblank interval that corresponds to the current value vblank counter
772 * value. 773 * value.
773 */ 774 */
774 u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc, 775 u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
775 struct timeval *vblanktime) 776 struct timeval *vblanktime)
776 { 777 {
777 u32 cur_vblank; 778 u32 cur_vblank;
778 779
779 /* Read timestamp from slot of _vblank_time ringbuffer 780 /* Read timestamp from slot of _vblank_time ringbuffer
780 * that corresponds to current vblank count. Retry if 781 * that corresponds to current vblank count. Retry if
781 * count has incremented during readout. This works like 782 * count has incremented during readout. This works like
782 * a seqlock. 783 * a seqlock.
783 */ 784 */
784 do { 785 do {
785 cur_vblank = atomic_read(&dev->_vblank_count[crtc]); 786 cur_vblank = atomic_read(&dev->_vblank_count[crtc]);
786 *vblanktime = vblanktimestamp(dev, crtc, cur_vblank); 787 *vblanktime = vblanktimestamp(dev, crtc, cur_vblank);
787 smp_rmb(); 788 smp_rmb();
788 } while (cur_vblank != atomic_read(&dev->_vblank_count[crtc])); 789 } while (cur_vblank != atomic_read(&dev->_vblank_count[crtc]));
789 790
790 return cur_vblank; 791 return cur_vblank;
791 } 792 }
792 EXPORT_SYMBOL(drm_vblank_count_and_time); 793 EXPORT_SYMBOL(drm_vblank_count_and_time);
793 794
794 /** 795 /**
795 * drm_update_vblank_count - update the master vblank counter 796 * drm_update_vblank_count - update the master vblank counter
796 * @dev: DRM device 797 * @dev: DRM device
797 * @crtc: counter to update 798 * @crtc: counter to update
798 * 799 *
799 * Call back into the driver to update the appropriate vblank counter 800 * Call back into the driver to update the appropriate vblank counter
800 * (specified by @crtc). Deal with wraparound, if it occurred, and 801 * (specified by @crtc). Deal with wraparound, if it occurred, and
801 * update the last read value so we can deal with wraparound on the next 802 * update the last read value so we can deal with wraparound on the next
802 * call if necessary. 803 * call if necessary.
803 * 804 *
804 * Only necessary when going from off->on, to account for frames we 805 * Only necessary when going from off->on, to account for frames we
805 * didn't get an interrupt for. 806 * didn't get an interrupt for.
806 * 807 *
807 * Note: caller must hold dev->vbl_lock since this reads & writes 808 * Note: caller must hold dev->vbl_lock since this reads & writes
808 * device vblank fields. 809 * device vblank fields.
809 */ 810 */
810 static void drm_update_vblank_count(struct drm_device *dev, int crtc) 811 static void drm_update_vblank_count(struct drm_device *dev, int crtc)
811 { 812 {
812 u32 cur_vblank, diff, tslot, rc; 813 u32 cur_vblank, diff, tslot, rc;
813 struct timeval t_vblank; 814 struct timeval t_vblank;
814 815
815 /* 816 /*
816 * Interrupts were disabled prior to this call, so deal with counter 817 * Interrupts were disabled prior to this call, so deal with counter
817 * wrap if needed. 818 * wrap if needed.
818 * NOTE! It's possible we lost a full dev->max_vblank_count events 819 * NOTE! It's possible we lost a full dev->max_vblank_count events
819 * here if the register is small or we had vblank interrupts off for 820 * here if the register is small or we had vblank interrupts off for
820 * a long time. 821 * a long time.
821 * 822 *
822 * We repeat the hardware vblank counter & timestamp query until 823 * We repeat the hardware vblank counter & timestamp query until
823 * we get consistent results. This to prevent races between gpu 824 * we get consistent results. This to prevent races between gpu
824 * updating its hardware counter while we are retrieving the 825 * updating its hardware counter while we are retrieving the
825 * corresponding vblank timestamp. 826 * corresponding vblank timestamp.
826 */ 827 */
827 do { 828 do {
828 cur_vblank = dev->driver->get_vblank_counter(dev, crtc); 829 cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
829 rc = drm_get_last_vbltimestamp(dev, crtc, &t_vblank, 0); 830 rc = drm_get_last_vbltimestamp(dev, crtc, &t_vblank, 0);
830 } while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc)); 831 } while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc));
831 832
832 /* Deal with counter wrap */ 833 /* Deal with counter wrap */
833 diff = cur_vblank - dev->last_vblank[crtc]; 834 diff = cur_vblank - dev->last_vblank[crtc];
834 if (cur_vblank < dev->last_vblank[crtc]) { 835 if (cur_vblank < dev->last_vblank[crtc]) {
835 diff += dev->max_vblank_count; 836 diff += dev->max_vblank_count;
836 837
837 DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n", 838 DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
838 crtc, dev->last_vblank[crtc], cur_vblank, diff); 839 crtc, dev->last_vblank[crtc], cur_vblank, diff);
839 } 840 }
840 841
841 DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n", 842 DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
842 crtc, diff); 843 crtc, diff);
843 844
844 /* Reinitialize corresponding vblank timestamp if high-precision query 845 /* Reinitialize corresponding vblank timestamp if high-precision query
845 * available. Skip this step if query unsupported or failed. Will 846 * available. Skip this step if query unsupported or failed. Will
846 * reinitialize delayed at next vblank interrupt in that case. 847 * reinitialize delayed at next vblank interrupt in that case.
847 */ 848 */
848 if (rc) { 849 if (rc) {
849 tslot = atomic_read(&dev->_vblank_count[crtc]) + diff; 850 tslot = atomic_read(&dev->_vblank_count[crtc]) + diff;
850 vblanktimestamp(dev, crtc, tslot) = t_vblank; 851 vblanktimestamp(dev, crtc, tslot) = t_vblank;
851 } 852 }
852 853
853 smp_mb__before_atomic_inc(); 854 smp_mb__before_atomic_inc();
854 atomic_add(diff, &dev->_vblank_count[crtc]); 855 atomic_add(diff, &dev->_vblank_count[crtc]);
855 smp_mb__after_atomic_inc(); 856 smp_mb__after_atomic_inc();
856 } 857 }
857 858
858 /** 859 /**
859 * drm_vblank_get - get a reference count on vblank events 860 * drm_vblank_get - get a reference count on vblank events
860 * @dev: DRM device 861 * @dev: DRM device
861 * @crtc: which CRTC to own 862 * @crtc: which CRTC to own
862 * 863 *
863 * Acquire a reference count on vblank events to avoid having them disabled 864 * Acquire a reference count on vblank events to avoid having them disabled
864 * while in use. 865 * while in use.
865 * 866 *
866 * RETURNS 867 * RETURNS
867 * Zero on success, nonzero on failure. 868 * Zero on success, nonzero on failure.
868 */ 869 */
869 int drm_vblank_get(struct drm_device *dev, int crtc) 870 int drm_vblank_get(struct drm_device *dev, int crtc)
870 { 871 {
871 unsigned long irqflags, irqflags2; 872 unsigned long irqflags, irqflags2;
872 int ret = 0; 873 int ret = 0;
873 874
874 spin_lock_irqsave(&dev->vbl_lock, irqflags); 875 spin_lock_irqsave(&dev->vbl_lock, irqflags);
875 /* Going from 0->1 means we have to enable interrupts again */ 876 /* Going from 0->1 means we have to enable interrupts again */
876 if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) { 877 if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
877 /* Disable preemption while holding vblank_time_lock. Do 878 /* Disable preemption while holding vblank_time_lock. Do
878 * it explicitely to guard against PREEMPT_RT kernel. 879 * it explicitely to guard against PREEMPT_RT kernel.
879 */ 880 */
880 preempt_disable(); 881 preempt_disable();
881 spin_lock_irqsave(&dev->vblank_time_lock, irqflags2); 882 spin_lock_irqsave(&dev->vblank_time_lock, irqflags2);
882 if (!dev->vblank_enabled[crtc]) { 883 if (!dev->vblank_enabled[crtc]) {
883 /* Enable vblank irqs under vblank_time_lock protection. 884 /* Enable vblank irqs under vblank_time_lock protection.
884 * All vblank count & timestamp updates are held off 885 * All vblank count & timestamp updates are held off
885 * until we are done reinitializing master counter and 886 * until we are done reinitializing master counter and
886 * timestamps. Filtercode in drm_handle_vblank() will 887 * timestamps. Filtercode in drm_handle_vblank() will
887 * prevent double-accounting of same vblank interval. 888 * prevent double-accounting of same vblank interval.
888 */ 889 */
889 ret = dev->driver->enable_vblank(dev, crtc); 890 ret = dev->driver->enable_vblank(dev, crtc);
890 DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", 891 DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n",
891 crtc, ret); 892 crtc, ret);
892 if (ret) 893 if (ret)
893 atomic_dec(&dev->vblank_refcount[crtc]); 894 atomic_dec(&dev->vblank_refcount[crtc]);
894 else { 895 else {
895 dev->vblank_enabled[crtc] = 1; 896 dev->vblank_enabled[crtc] = 1;
896 drm_update_vblank_count(dev, crtc); 897 drm_update_vblank_count(dev, crtc);
897 } 898 }
898 } 899 }
899 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2); 900 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2);
900 preempt_enable(); 901 preempt_enable();
901 } else { 902 } else {
902 if (!dev->vblank_enabled[crtc]) { 903 if (!dev->vblank_enabled[crtc]) {
903 atomic_dec(&dev->vblank_refcount[crtc]); 904 atomic_dec(&dev->vblank_refcount[crtc]);
904 ret = -EINVAL; 905 ret = -EINVAL;
905 } 906 }
906 } 907 }
907 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 908 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
908 909
909 return ret; 910 return ret;
910 } 911 }
911 EXPORT_SYMBOL(drm_vblank_get); 912 EXPORT_SYMBOL(drm_vblank_get);
912 913
913 /** 914 /**
914 * drm_vblank_put - give up ownership of vblank events 915 * drm_vblank_put - give up ownership of vblank events
915 * @dev: DRM device 916 * @dev: DRM device
916 * @crtc: which counter to give up 917 * @crtc: which counter to give up
917 * 918 *
918 * Release ownership of a given vblank counter, turning off interrupts 919 * Release ownership of a given vblank counter, turning off interrupts
919 * if possible. Disable interrupts after drm_vblank_offdelay milliseconds. 920 * if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
920 */ 921 */
921 void drm_vblank_put(struct drm_device *dev, int crtc) 922 void drm_vblank_put(struct drm_device *dev, int crtc)
922 { 923 {
923 BUG_ON(atomic_read(&dev->vblank_refcount[crtc]) == 0); 924 BUG_ON(atomic_read(&dev->vblank_refcount[crtc]) == 0);
924 925
925 /* Last user schedules interrupt disable */ 926 /* Last user schedules interrupt disable */
926 if (atomic_dec_and_test(&dev->vblank_refcount[crtc]) && 927 if (atomic_dec_and_test(&dev->vblank_refcount[crtc]) &&
927 (drm_vblank_offdelay > 0)) 928 (drm_vblank_offdelay > 0))
928 mod_timer(&dev->vblank_disable_timer, 929 mod_timer(&dev->vblank_disable_timer,
929 jiffies + ((drm_vblank_offdelay * DRM_HZ)/1000)); 930 jiffies + ((drm_vblank_offdelay * DRM_HZ)/1000));
930 } 931 }
931 EXPORT_SYMBOL(drm_vblank_put); 932 EXPORT_SYMBOL(drm_vblank_put);
932 933
933 void drm_vblank_off(struct drm_device *dev, int crtc) 934 void drm_vblank_off(struct drm_device *dev, int crtc)
934 { 935 {
935 unsigned long irqflags; 936 unsigned long irqflags;
936 937
937 spin_lock_irqsave(&dev->vbl_lock, irqflags); 938 spin_lock_irqsave(&dev->vbl_lock, irqflags);
938 vblank_disable_and_save(dev, crtc); 939 vblank_disable_and_save(dev, crtc);
939 DRM_WAKEUP(&dev->vbl_queue[crtc]); 940 DRM_WAKEUP(&dev->vbl_queue[crtc]);
940 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 941 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
941 } 942 }
942 EXPORT_SYMBOL(drm_vblank_off); 943 EXPORT_SYMBOL(drm_vblank_off);
943 944
944 /** 945 /**
945 * drm_vblank_pre_modeset - account for vblanks across mode sets 946 * drm_vblank_pre_modeset - account for vblanks across mode sets
946 * @dev: DRM device 947 * @dev: DRM device
947 * @crtc: CRTC in question 948 * @crtc: CRTC in question
948 * @post: post or pre mode set? 949 * @post: post or pre mode set?
949 * 950 *
950 * Account for vblank events across mode setting events, which will likely 951 * Account for vblank events across mode setting events, which will likely
951 * reset the hardware frame counter. 952 * reset the hardware frame counter.
952 */ 953 */
953 void drm_vblank_pre_modeset(struct drm_device *dev, int crtc) 954 void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
954 { 955 {
955 /* vblank is not initialized (IRQ not installed ?) */ 956 /* vblank is not initialized (IRQ not installed ?) */
956 if (!dev->num_crtcs) 957 if (!dev->num_crtcs)
957 return; 958 return;
958 /* 959 /*
959 * To avoid all the problems that might happen if interrupts 960 * To avoid all the problems that might happen if interrupts
960 * were enabled/disabled around or between these calls, we just 961 * were enabled/disabled around or between these calls, we just
961 * have the kernel take a reference on the CRTC (just once though 962 * have the kernel take a reference on the CRTC (just once though
962 * to avoid corrupting the count if multiple, mismatch calls occur), 963 * to avoid corrupting the count if multiple, mismatch calls occur),
963 * so that interrupts remain enabled in the interim. 964 * so that interrupts remain enabled in the interim.
964 */ 965 */
965 if (!dev->vblank_inmodeset[crtc]) { 966 if (!dev->vblank_inmodeset[crtc]) {
966 dev->vblank_inmodeset[crtc] = 0x1; 967 dev->vblank_inmodeset[crtc] = 0x1;
967 if (drm_vblank_get(dev, crtc) == 0) 968 if (drm_vblank_get(dev, crtc) == 0)
968 dev->vblank_inmodeset[crtc] |= 0x2; 969 dev->vblank_inmodeset[crtc] |= 0x2;
969 } 970 }
970 } 971 }
971 EXPORT_SYMBOL(drm_vblank_pre_modeset); 972 EXPORT_SYMBOL(drm_vblank_pre_modeset);
972 973
973 void drm_vblank_post_modeset(struct drm_device *dev, int crtc) 974 void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
974 { 975 {
975 unsigned long irqflags; 976 unsigned long irqflags;
976 977
977 if (dev->vblank_inmodeset[crtc]) { 978 if (dev->vblank_inmodeset[crtc]) {
978 spin_lock_irqsave(&dev->vbl_lock, irqflags); 979 spin_lock_irqsave(&dev->vbl_lock, irqflags);
979 dev->vblank_disable_allowed = 1; 980 dev->vblank_disable_allowed = 1;
980 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 981 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
981 982
982 if (dev->vblank_inmodeset[crtc] & 0x2) 983 if (dev->vblank_inmodeset[crtc] & 0x2)
983 drm_vblank_put(dev, crtc); 984 drm_vblank_put(dev, crtc);
984 985
985 dev->vblank_inmodeset[crtc] = 0; 986 dev->vblank_inmodeset[crtc] = 0;
986 } 987 }
987 } 988 }
988 EXPORT_SYMBOL(drm_vblank_post_modeset); 989 EXPORT_SYMBOL(drm_vblank_post_modeset);
989 990
990 /** 991 /**
991 * drm_modeset_ctl - handle vblank event counter changes across mode switch 992 * drm_modeset_ctl - handle vblank event counter changes across mode switch
992 * @DRM_IOCTL_ARGS: standard ioctl arguments 993 * @DRM_IOCTL_ARGS: standard ioctl arguments
993 * 994 *
994 * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET 995 * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET
995 * ioctls around modesetting so that any lost vblank events are accounted for. 996 * ioctls around modesetting so that any lost vblank events are accounted for.
996 * 997 *
997 * Generally the counter will reset across mode sets. If interrupts are 998 * Generally the counter will reset across mode sets. If interrupts are
998 * enabled around this call, we don't have to do anything since the counter 999 * enabled around this call, we don't have to do anything since the counter
999 * will have already been incremented. 1000 * will have already been incremented.
1000 */ 1001 */
1001 int drm_modeset_ctl(struct drm_device *dev, void *data, 1002 int drm_modeset_ctl(struct drm_device *dev, void *data,
1002 struct drm_file *file_priv) 1003 struct drm_file *file_priv)
1003 { 1004 {
1004 struct drm_modeset_ctl *modeset = data; 1005 struct drm_modeset_ctl *modeset = data;
1005 int ret = 0; 1006 int ret = 0;
1006 unsigned int crtc; 1007 unsigned int crtc;
1007 1008
1008 /* If drm_vblank_init() hasn't been called yet, just no-op */ 1009 /* If drm_vblank_init() hasn't been called yet, just no-op */
1009 if (!dev->num_crtcs) 1010 if (!dev->num_crtcs)
1010 goto out; 1011 goto out;
1011 1012
1012 crtc = modeset->crtc; 1013 crtc = modeset->crtc;
1013 if (crtc >= dev->num_crtcs) { 1014 if (crtc >= dev->num_crtcs) {
1014 ret = -EINVAL; 1015 ret = -EINVAL;
1015 goto out; 1016 goto out;
1016 } 1017 }
1017 1018
1018 switch (modeset->cmd) { 1019 switch (modeset->cmd) {
1019 case _DRM_PRE_MODESET: 1020 case _DRM_PRE_MODESET:
1020 drm_vblank_pre_modeset(dev, crtc); 1021 drm_vblank_pre_modeset(dev, crtc);
1021 break; 1022 break;
1022 case _DRM_POST_MODESET: 1023 case _DRM_POST_MODESET:
1023 drm_vblank_post_modeset(dev, crtc); 1024 drm_vblank_post_modeset(dev, crtc);
1024 break; 1025 break;
1025 default: 1026 default:
1026 ret = -EINVAL; 1027 ret = -EINVAL;
1027 break; 1028 break;
1028 } 1029 }
1029 1030
1030 out: 1031 out:
1031 return ret; 1032 return ret;
1032 } 1033 }
1033 1034
1034 static int drm_queue_vblank_event(struct drm_device *dev, int pipe, 1035 static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
1035 union drm_wait_vblank *vblwait, 1036 union drm_wait_vblank *vblwait,
1036 struct drm_file *file_priv) 1037 struct drm_file *file_priv)
1037 { 1038 {
1038 struct drm_pending_vblank_event *e; 1039 struct drm_pending_vblank_event *e;
1039 struct timeval now; 1040 struct timeval now;
1040 unsigned long flags; 1041 unsigned long flags;
1041 unsigned int seq; 1042 unsigned int seq;
1042 int ret; 1043 int ret;
1043 1044
1044 e = kzalloc(sizeof *e, GFP_KERNEL); 1045 e = kzalloc(sizeof *e, GFP_KERNEL);
1045 if (e == NULL) { 1046 if (e == NULL) {
1046 ret = -ENOMEM; 1047 ret = -ENOMEM;
1047 goto err_put; 1048 goto err_put;
1048 } 1049 }
1049 1050
1050 e->pipe = pipe; 1051 e->pipe = pipe;
1051 e->base.pid = current->pid; 1052 e->base.pid = current->pid;
1052 e->event.base.type = DRM_EVENT_VBLANK; 1053 e->event.base.type = DRM_EVENT_VBLANK;
1053 e->event.base.length = sizeof e->event; 1054 e->event.base.length = sizeof e->event;
1054 e->event.user_data = vblwait->request.signal; 1055 e->event.user_data = vblwait->request.signal;
1055 e->base.event = &e->event.base; 1056 e->base.event = &e->event.base;
1056 e->base.file_priv = file_priv; 1057 e->base.file_priv = file_priv;
1057 e->base.destroy = (void (*) (struct drm_pending_event *)) kfree; 1058 e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
1058 1059
1059 spin_lock_irqsave(&dev->event_lock, flags); 1060 spin_lock_irqsave(&dev->event_lock, flags);
1060 1061
1061 if (file_priv->event_space < sizeof e->event) { 1062 if (file_priv->event_space < sizeof e->event) {
1062 ret = -EBUSY; 1063 ret = -EBUSY;
1063 goto err_unlock; 1064 goto err_unlock;
1064 } 1065 }
1065 1066
1066 file_priv->event_space -= sizeof e->event; 1067 file_priv->event_space -= sizeof e->event;
1067 seq = drm_vblank_count_and_time(dev, pipe, &now); 1068 seq = drm_vblank_count_and_time(dev, pipe, &now);
1068 1069
1069 if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) && 1070 if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
1070 (seq - vblwait->request.sequence) <= (1 << 23)) { 1071 (seq - vblwait->request.sequence) <= (1 << 23)) {
1071 vblwait->request.sequence = seq + 1; 1072 vblwait->request.sequence = seq + 1;
1072 vblwait->reply.sequence = vblwait->request.sequence; 1073 vblwait->reply.sequence = vblwait->request.sequence;
1073 } 1074 }
1074 1075
1075 DRM_DEBUG("event on vblank count %d, current %d, crtc %d\n", 1076 DRM_DEBUG("event on vblank count %d, current %d, crtc %d\n",
1076 vblwait->request.sequence, seq, pipe); 1077 vblwait->request.sequence, seq, pipe);
1077 1078
1078 trace_drm_vblank_event_queued(current->pid, pipe, 1079 trace_drm_vblank_event_queued(current->pid, pipe,
1079 vblwait->request.sequence); 1080 vblwait->request.sequence);
1080 1081
1081 e->event.sequence = vblwait->request.sequence; 1082 e->event.sequence = vblwait->request.sequence;
1082 if ((seq - vblwait->request.sequence) <= (1 << 23)) { 1083 if ((seq - vblwait->request.sequence) <= (1 << 23)) {
1083 e->event.sequence = seq; 1084 e->event.sequence = seq;
1084 e->event.tv_sec = now.tv_sec; 1085 e->event.tv_sec = now.tv_sec;
1085 e->event.tv_usec = now.tv_usec; 1086 e->event.tv_usec = now.tv_usec;
1086 drm_vblank_put(dev, pipe); 1087 drm_vblank_put(dev, pipe);
1087 list_add_tail(&e->base.link, &e->base.file_priv->event_list); 1088 list_add_tail(&e->base.link, &e->base.file_priv->event_list);
1088 wake_up_interruptible(&e->base.file_priv->event_wait); 1089 wake_up_interruptible(&e->base.file_priv->event_wait);
1089 vblwait->reply.sequence = seq; 1090 vblwait->reply.sequence = seq;
1090 trace_drm_vblank_event_delivered(current->pid, pipe, 1091 trace_drm_vblank_event_delivered(current->pid, pipe,
1091 vblwait->request.sequence); 1092 vblwait->request.sequence);
1092 } else { 1093 } else {
1093 list_add_tail(&e->base.link, &dev->vblank_event_list); 1094 list_add_tail(&e->base.link, &dev->vblank_event_list);
1094 vblwait->reply.sequence = vblwait->request.sequence; 1095 vblwait->reply.sequence = vblwait->request.sequence;
1095 } 1096 }
1096 1097
1097 spin_unlock_irqrestore(&dev->event_lock, flags); 1098 spin_unlock_irqrestore(&dev->event_lock, flags);
1098 1099
1099 return 0; 1100 return 0;
1100 1101
1101 err_unlock: 1102 err_unlock:
1102 spin_unlock_irqrestore(&dev->event_lock, flags); 1103 spin_unlock_irqrestore(&dev->event_lock, flags);
1103 kfree(e); 1104 kfree(e);
1104 err_put: 1105 err_put:
1105 drm_vblank_put(dev, pipe); 1106 drm_vblank_put(dev, pipe);
1106 return ret; 1107 return ret;
1107 } 1108 }
1108 1109
1109 /** 1110 /**
1110 * Wait for VBLANK. 1111 * Wait for VBLANK.
1111 * 1112 *
1112 * \param inode device inode. 1113 * \param inode device inode.
1113 * \param file_priv DRM file private. 1114 * \param file_priv DRM file private.
1114 * \param cmd command. 1115 * \param cmd command.
1115 * \param data user argument, pointing to a drm_wait_vblank structure. 1116 * \param data user argument, pointing to a drm_wait_vblank structure.
1116 * \return zero on success or a negative number on failure. 1117 * \return zero on success or a negative number on failure.
1117 * 1118 *
1118 * This function enables the vblank interrupt on the pipe requested, then 1119 * This function enables the vblank interrupt on the pipe requested, then
1119 * sleeps waiting for the requested sequence number to occur, and drops 1120 * sleeps waiting for the requested sequence number to occur, and drops
1120 * the vblank interrupt refcount afterwards. (vblank irq disable follows that 1121 * the vblank interrupt refcount afterwards. (vblank irq disable follows that
1121 * after a timeout with no further vblank waits scheduled). 1122 * after a timeout with no further vblank waits scheduled).
1122 */ 1123 */
1123 int drm_wait_vblank(struct drm_device *dev, void *data, 1124 int drm_wait_vblank(struct drm_device *dev, void *data,
1124 struct drm_file *file_priv) 1125 struct drm_file *file_priv)
1125 { 1126 {
1126 union drm_wait_vblank *vblwait = data; 1127 union drm_wait_vblank *vblwait = data;
1127 int ret = 0; 1128 int ret = 0;
1128 unsigned int flags, seq, crtc, high_crtc; 1129 unsigned int flags, seq, crtc, high_crtc;
1129 1130
1130 if ((!drm_dev_to_irq(dev)) || (!dev->irq_enabled)) 1131 if ((!drm_dev_to_irq(dev)) || (!dev->irq_enabled))
1131 return -EINVAL; 1132 return -EINVAL;
1132 1133
1133 if (vblwait->request.type & _DRM_VBLANK_SIGNAL) 1134 if (vblwait->request.type & _DRM_VBLANK_SIGNAL)
1134 return -EINVAL; 1135 return -EINVAL;
1135 1136
1136 if (vblwait->request.type & 1137 if (vblwait->request.type &
1137 ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK | 1138 ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK |
1138 _DRM_VBLANK_HIGH_CRTC_MASK)) { 1139 _DRM_VBLANK_HIGH_CRTC_MASK)) {
1139 DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n", 1140 DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
1140 vblwait->request.type, 1141 vblwait->request.type,
1141 (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK | 1142 (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK |
1142 _DRM_VBLANK_HIGH_CRTC_MASK)); 1143 _DRM_VBLANK_HIGH_CRTC_MASK));
1143 return -EINVAL; 1144 return -EINVAL;
1144 } 1145 }
1145 1146
1146 flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK; 1147 flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
1147 high_crtc = (vblwait->request.type & _DRM_VBLANK_HIGH_CRTC_MASK); 1148 high_crtc = (vblwait->request.type & _DRM_VBLANK_HIGH_CRTC_MASK);
1148 if (high_crtc) 1149 if (high_crtc)
1149 crtc = high_crtc >> _DRM_VBLANK_HIGH_CRTC_SHIFT; 1150 crtc = high_crtc >> _DRM_VBLANK_HIGH_CRTC_SHIFT;
1150 else 1151 else
1151 crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0; 1152 crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
1152 if (crtc >= dev->num_crtcs) 1153 if (crtc >= dev->num_crtcs)
1153 return -EINVAL; 1154 return -EINVAL;
1154 1155
1155 ret = drm_vblank_get(dev, crtc); 1156 ret = drm_vblank_get(dev, crtc);
1156 if (ret) { 1157 if (ret) {
1157 DRM_DEBUG("failed to acquire vblank counter, %d\n", ret); 1158 DRM_DEBUG("failed to acquire vblank counter, %d\n", ret);
1158 return ret; 1159 return ret;
1159 } 1160 }
1160 seq = drm_vblank_count(dev, crtc); 1161 seq = drm_vblank_count(dev, crtc);
1161 1162
1162 switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) { 1163 switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
1163 case _DRM_VBLANK_RELATIVE: 1164 case _DRM_VBLANK_RELATIVE:
1164 vblwait->request.sequence += seq; 1165 vblwait->request.sequence += seq;
1165 vblwait->request.type &= ~_DRM_VBLANK_RELATIVE; 1166 vblwait->request.type &= ~_DRM_VBLANK_RELATIVE;
1166 case _DRM_VBLANK_ABSOLUTE: 1167 case _DRM_VBLANK_ABSOLUTE:
1167 break; 1168 break;
1168 default: 1169 default:
1169 ret = -EINVAL; 1170 ret = -EINVAL;
1170 goto done; 1171 goto done;
1171 } 1172 }
1172 1173
1173 if (flags & _DRM_VBLANK_EVENT) 1174 if (flags & _DRM_VBLANK_EVENT)
1174 return drm_queue_vblank_event(dev, crtc, vblwait, file_priv); 1175 return drm_queue_vblank_event(dev, crtc, vblwait, file_priv);
1175 1176
1176 if ((flags & _DRM_VBLANK_NEXTONMISS) && 1177 if ((flags & _DRM_VBLANK_NEXTONMISS) &&
1177 (seq - vblwait->request.sequence) <= (1<<23)) { 1178 (seq - vblwait->request.sequence) <= (1<<23)) {
1178 vblwait->request.sequence = seq + 1; 1179 vblwait->request.sequence = seq + 1;
1179 } 1180 }
1180 1181
1181 DRM_DEBUG("waiting on vblank count %d, crtc %d\n", 1182 DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
1182 vblwait->request.sequence, crtc); 1183 vblwait->request.sequence, crtc);
1183 dev->last_vblank_wait[crtc] = vblwait->request.sequence; 1184 dev->last_vblank_wait[crtc] = vblwait->request.sequence;
1184 DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ, 1185 DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
1185 (((drm_vblank_count(dev, crtc) - 1186 (((drm_vblank_count(dev, crtc) -
1186 vblwait->request.sequence) <= (1 << 23)) || 1187 vblwait->request.sequence) <= (1 << 23)) ||
1187 !dev->irq_enabled)); 1188 !dev->irq_enabled));
1188 1189
1189 if (ret != -EINTR) { 1190 if (ret != -EINTR) {
1190 struct timeval now; 1191 struct timeval now;
1191 1192
1192 vblwait->reply.sequence = drm_vblank_count_and_time(dev, crtc, &now); 1193 vblwait->reply.sequence = drm_vblank_count_and_time(dev, crtc, &now);
1193 vblwait->reply.tval_sec = now.tv_sec; 1194 vblwait->reply.tval_sec = now.tv_sec;
1194 vblwait->reply.tval_usec = now.tv_usec; 1195 vblwait->reply.tval_usec = now.tv_usec;
1195 1196
1196 DRM_DEBUG("returning %d to client\n", 1197 DRM_DEBUG("returning %d to client\n",
1197 vblwait->reply.sequence); 1198 vblwait->reply.sequence);
1198 } else { 1199 } else {
1199 DRM_DEBUG("vblank wait interrupted by signal\n"); 1200 DRM_DEBUG("vblank wait interrupted by signal\n");
1200 } 1201 }
1201 1202
1202 done: 1203 done:
1203 drm_vblank_put(dev, crtc); 1204 drm_vblank_put(dev, crtc);
1204 return ret; 1205 return ret;
1205 } 1206 }
1206 1207
1207 void drm_handle_vblank_events(struct drm_device *dev, int crtc) 1208 void drm_handle_vblank_events(struct drm_device *dev, int crtc)
1208 { 1209 {
1209 struct drm_pending_vblank_event *e, *t; 1210 struct drm_pending_vblank_event *e, *t;
1210 struct timeval now; 1211 struct timeval now;
1211 unsigned long flags; 1212 unsigned long flags;
1212 unsigned int seq; 1213 unsigned int seq;
1213 1214
1214 seq = drm_vblank_count_and_time(dev, crtc, &now); 1215 seq = drm_vblank_count_and_time(dev, crtc, &now);
1215 1216
1216 spin_lock_irqsave(&dev->event_lock, flags); 1217 spin_lock_irqsave(&dev->event_lock, flags);
1217 1218
1218 list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) { 1219 list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
1219 if (e->pipe != crtc) 1220 if (e->pipe != crtc)
1220 continue; 1221 continue;
1221 if ((seq - e->event.sequence) > (1<<23)) 1222 if ((seq - e->event.sequence) > (1<<23))
1222 continue; 1223 continue;
1223 1224
1224 DRM_DEBUG("vblank event on %d, current %d\n", 1225 DRM_DEBUG("vblank event on %d, current %d\n",
1225 e->event.sequence, seq); 1226 e->event.sequence, seq);
1226 1227
1227 e->event.sequence = seq; 1228 e->event.sequence = seq;
1228 e->event.tv_sec = now.tv_sec; 1229 e->event.tv_sec = now.tv_sec;
1229 e->event.tv_usec = now.tv_usec; 1230 e->event.tv_usec = now.tv_usec;
1230 drm_vblank_put(dev, e->pipe); 1231 drm_vblank_put(dev, e->pipe);
1231 list_move_tail(&e->base.link, &e->base.file_priv->event_list); 1232 list_move_tail(&e->base.link, &e->base.file_priv->event_list);
1232 wake_up_interruptible(&e->base.file_priv->event_wait); 1233 wake_up_interruptible(&e->base.file_priv->event_wait);
1233 trace_drm_vblank_event_delivered(e->base.pid, e->pipe, 1234 trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
1234 e->event.sequence); 1235 e->event.sequence);
1235 } 1236 }
1236 1237
1237 spin_unlock_irqrestore(&dev->event_lock, flags); 1238 spin_unlock_irqrestore(&dev->event_lock, flags);
1238 1239
1239 trace_drm_vblank_event(crtc, seq); 1240 trace_drm_vblank_event(crtc, seq);
1240 } 1241 }
1241 1242
1242 /** 1243 /**
1243 * drm_handle_vblank - handle a vblank event 1244 * drm_handle_vblank - handle a vblank event
1244 * @dev: DRM device 1245 * @dev: DRM device
1245 * @crtc: where this event occurred 1246 * @crtc: where this event occurred
1246 * 1247 *
1247 * Drivers should call this routine in their vblank interrupt handlers to 1248 * Drivers should call this routine in their vblank interrupt handlers to
1248 * update the vblank counter and send any signals that may be pending. 1249 * update the vblank counter and send any signals that may be pending.
1249 */ 1250 */
1250 bool drm_handle_vblank(struct drm_device *dev, int crtc) 1251 bool drm_handle_vblank(struct drm_device *dev, int crtc)
1251 { 1252 {
1252 u32 vblcount; 1253 u32 vblcount;
1253 s64 diff_ns; 1254 s64 diff_ns;
1254 struct timeval tvblank; 1255 struct timeval tvblank;
1255 unsigned long irqflags; 1256 unsigned long irqflags;
1256 1257
1257 if (!dev->num_crtcs) 1258 if (!dev->num_crtcs)
1258 return false; 1259 return false;
1259 1260
1260 /* Need timestamp lock to prevent concurrent execution with 1261 /* Need timestamp lock to prevent concurrent execution with
1261 * vblank enable/disable, as this would cause inconsistent 1262 * vblank enable/disable, as this would cause inconsistent
1262 * or corrupted timestamps and vblank counts. 1263 * or corrupted timestamps and vblank counts.
1263 */ 1264 */
1264 spin_lock_irqsave(&dev->vblank_time_lock, irqflags); 1265 spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
1265 1266
1266 /* Vblank irq handling disabled. Nothing to do. */ 1267 /* Vblank irq handling disabled. Nothing to do. */
1267 if (!dev->vblank_enabled[crtc]) { 1268 if (!dev->vblank_enabled[crtc]) {
1268 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); 1269 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
1269 return false; 1270 return false;
1270 } 1271 }
1271 1272
1272 /* Fetch corresponding timestamp for this vblank interval from 1273 /* Fetch corresponding timestamp for this vblank interval from
1273 * driver and store it in proper slot of timestamp ringbuffer. 1274 * driver and store it in proper slot of timestamp ringbuffer.
1274 */ 1275 */
1275 1276
1276 /* Get current timestamp and count. */ 1277 /* Get current timestamp and count. */
1277 vblcount = atomic_read(&dev->_vblank_count[crtc]); 1278 vblcount = atomic_read(&dev->_vblank_count[crtc]);
1278 drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ); 1279 drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ);
1279 1280
1280 /* Compute time difference to timestamp of last vblank */ 1281 /* Compute time difference to timestamp of last vblank */
1281 diff_ns = timeval_to_ns(&tvblank) - 1282 diff_ns = timeval_to_ns(&tvblank) -
1282 timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount)); 1283 timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
1283 1284
1284 /* Update vblank timestamp and count if at least 1285 /* Update vblank timestamp and count if at least
1285 * DRM_REDUNDANT_VBLIRQ_THRESH_NS nanoseconds 1286 * DRM_REDUNDANT_VBLIRQ_THRESH_NS nanoseconds
1286 * difference between last stored timestamp and current 1287 * difference between last stored timestamp and current
1287 * timestamp. A smaller difference means basically 1288 * timestamp. A smaller difference means basically
1288 * identical timestamps. Happens if this vblank has 1289 * identical timestamps. Happens if this vblank has
1289 * been already processed and this is a redundant call, 1290 * been already processed and this is a redundant call,
1290 * e.g., due to spurious vblank interrupts. We need to 1291 * e.g., due to spurious vblank interrupts. We need to
1291 * ignore those for accounting. 1292 * ignore those for accounting.
1292 */ 1293 */
1293 if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) { 1294 if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) {
1294 /* Store new timestamp in ringbuffer. */ 1295 /* Store new timestamp in ringbuffer. */
1295 vblanktimestamp(dev, crtc, vblcount + 1) = tvblank; 1296 vblanktimestamp(dev, crtc, vblcount + 1) = tvblank;
1296 1297
1297 /* Increment cooked vblank count. This also atomically commits 1298 /* Increment cooked vblank count. This also atomically commits
1298 * the timestamp computed above. 1299 * the timestamp computed above.
1299 */ 1300 */
1300 smp_mb__before_atomic_inc(); 1301 smp_mb__before_atomic_inc();
1301 atomic_inc(&dev->_vblank_count[crtc]); 1302 atomic_inc(&dev->_vblank_count[crtc]);
1302 smp_mb__after_atomic_inc(); 1303 smp_mb__after_atomic_inc();
1303 } else { 1304 } else {
1304 DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n", 1305 DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
1305 crtc, (int) diff_ns); 1306 crtc, (int) diff_ns);
1306 } 1307 }
1307 1308
1308 DRM_WAKEUP(&dev->vbl_queue[crtc]); 1309 DRM_WAKEUP(&dev->vbl_queue[crtc]);
1309 drm_handle_vblank_events(dev, crtc); 1310 drm_handle_vblank_events(dev, crtc);
1310 1311
1311 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); 1312 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
1312 return true; 1313 return true;
1313 } 1314 }
1314 EXPORT_SYMBOL(drm_handle_vblank); 1315 EXPORT_SYMBOL(drm_handle_vblank);
1315 1316
drivers/gpu/drm/i915/intel_bios.c
1 /* 1 /*
2 * Copyright ยฉ 2006 Intel Corporation 2 * Copyright ยฉ 2006 Intel Corporation
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation 6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the 8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions: 9 * Software is furnished to do so, subject to the following conditions:
10 * 10 *
11 * The above copyright notice and this permission notice (including the next 11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the 12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software. 13 * Software.
14 * 14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE. 21 * SOFTWARE.
22 * 22 *
23 * Authors: 23 * Authors:
24 * Eric Anholt <eric@anholt.net> 24 * Eric Anholt <eric@anholt.net>
25 * 25 *
26 */ 26 */
27 #include <drm/drm_dp_helper.h> 27 #include <drm/drm_dp_helper.h>
28 #include "drmP.h" 28 #include "drmP.h"
29 #include "drm.h" 29 #include "drm.h"
30 #include "i915_drm.h" 30 #include "i915_drm.h"
31 #include "i915_drv.h" 31 #include "i915_drv.h"
32 #include "intel_bios.h" 32 #include "intel_bios.h"
33 33
34 #define SLAVE_ADDR1 0x70 34 #define SLAVE_ADDR1 0x70
35 #define SLAVE_ADDR2 0x72 35 #define SLAVE_ADDR2 0x72
36 36
37 static int panel_type; 37 static int panel_type;
38 38
39 static void * 39 static void *
40 find_section(struct bdb_header *bdb, int section_id) 40 find_section(struct bdb_header *bdb, int section_id)
41 { 41 {
42 u8 *base = (u8 *)bdb; 42 u8 *base = (u8 *)bdb;
43 int index = 0; 43 int index = 0;
44 u16 total, current_size; 44 u16 total, current_size;
45 u8 current_id; 45 u8 current_id;
46 46
47 /* skip to first section */ 47 /* skip to first section */
48 index += bdb->header_size; 48 index += bdb->header_size;
49 total = bdb->bdb_size; 49 total = bdb->bdb_size;
50 50
51 /* walk the sections looking for section_id */ 51 /* walk the sections looking for section_id */
52 while (index < total) { 52 while (index < total) {
53 current_id = *(base + index); 53 current_id = *(base + index);
54 index++; 54 index++;
55 current_size = *((u16 *)(base + index)); 55 current_size = *((u16 *)(base + index));
56 index += 2; 56 index += 2;
57 if (current_id == section_id) 57 if (current_id == section_id)
58 return base + index; 58 return base + index;
59 index += current_size; 59 index += current_size;
60 } 60 }
61 61
62 return NULL; 62 return NULL;
63 } 63 }
64 64
65 static u16 65 static u16
66 get_blocksize(void *p) 66 get_blocksize(void *p)
67 { 67 {
68 u16 *block_ptr, block_size; 68 u16 *block_ptr, block_size;
69 69
70 block_ptr = (u16 *)((char *)p - 2); 70 block_ptr = (u16 *)((char *)p - 2);
71 block_size = *block_ptr; 71 block_size = *block_ptr;
72 return block_size; 72 return block_size;
73 } 73 }
74 74
75 static void 75 static void
76 fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode, 76 fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
77 struct lvds_dvo_timing *dvo_timing) 77 struct lvds_dvo_timing *dvo_timing)
78 { 78 {
79 panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) | 79 panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) |
80 dvo_timing->hactive_lo; 80 dvo_timing->hactive_lo;
81 panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay + 81 panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay +
82 ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo); 82 ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo);
83 panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start + 83 panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start +
84 dvo_timing->hsync_pulse_width; 84 dvo_timing->hsync_pulse_width;
85 panel_fixed_mode->htotal = panel_fixed_mode->hdisplay + 85 panel_fixed_mode->htotal = panel_fixed_mode->hdisplay +
86 ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo); 86 ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo);
87 87
88 panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) | 88 panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) |
89 dvo_timing->vactive_lo; 89 dvo_timing->vactive_lo;
90 panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay + 90 panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay +
91 dvo_timing->vsync_off; 91 dvo_timing->vsync_off;
92 panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start + 92 panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start +
93 dvo_timing->vsync_pulse_width; 93 dvo_timing->vsync_pulse_width;
94 panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay + 94 panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay +
95 ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo); 95 ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo);
96 panel_fixed_mode->clock = dvo_timing->clock * 10; 96 panel_fixed_mode->clock = dvo_timing->clock * 10;
97 panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED; 97 panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
98 98
99 if (dvo_timing->hsync_positive) 99 if (dvo_timing->hsync_positive)
100 panel_fixed_mode->flags |= DRM_MODE_FLAG_PHSYNC; 100 panel_fixed_mode->flags |= DRM_MODE_FLAG_PHSYNC;
101 else 101 else
102 panel_fixed_mode->flags |= DRM_MODE_FLAG_NHSYNC; 102 panel_fixed_mode->flags |= DRM_MODE_FLAG_NHSYNC;
103 103
104 if (dvo_timing->vsync_positive) 104 if (dvo_timing->vsync_positive)
105 panel_fixed_mode->flags |= DRM_MODE_FLAG_PVSYNC; 105 panel_fixed_mode->flags |= DRM_MODE_FLAG_PVSYNC;
106 else 106 else
107 panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC; 107 panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
108 108
109 /* Some VBTs have bogus h/vtotal values */ 109 /* Some VBTs have bogus h/vtotal values */
110 if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal) 110 if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
111 panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1; 111 panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
112 if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal) 112 if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal)
113 panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1; 113 panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1;
114 114
115 drm_mode_set_name(panel_fixed_mode); 115 drm_mode_set_name(panel_fixed_mode);
116 } 116 }
117 117
118 /* Try to find integrated panel data */ 118 /* Try to find integrated panel data */
119 static void 119 static void
120 parse_lfp_panel_data(struct drm_i915_private *dev_priv, 120 parse_lfp_panel_data(struct drm_i915_private *dev_priv,
121 struct bdb_header *bdb) 121 struct bdb_header *bdb)
122 { 122 {
123 struct bdb_lvds_options *lvds_options; 123 struct bdb_lvds_options *lvds_options;
124 struct bdb_lvds_lfp_data *lvds_lfp_data; 124 struct bdb_lvds_lfp_data *lvds_lfp_data;
125 struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs; 125 struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
126 struct bdb_lvds_lfp_data_entry *entry; 126 struct bdb_lvds_lfp_data_entry *entry;
127 struct lvds_dvo_timing *dvo_timing; 127 struct lvds_dvo_timing *dvo_timing;
128 struct drm_display_mode *panel_fixed_mode; 128 struct drm_display_mode *panel_fixed_mode;
129 int lfp_data_size, dvo_timing_offset; 129 int lfp_data_size, dvo_timing_offset;
130 int i, temp_downclock; 130 int i, temp_downclock;
131 struct drm_display_mode *temp_mode; 131 struct drm_display_mode *temp_mode;
132 132
133 lvds_options = find_section(bdb, BDB_LVDS_OPTIONS); 133 lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
134 if (!lvds_options) 134 if (!lvds_options)
135 return; 135 return;
136 136
137 dev_priv->lvds_dither = lvds_options->pixel_dither; 137 dev_priv->lvds_dither = lvds_options->pixel_dither;
138 if (lvds_options->panel_type == 0xff) 138 if (lvds_options->panel_type == 0xff)
139 return; 139 return;
140 140
141 panel_type = lvds_options->panel_type; 141 panel_type = lvds_options->panel_type;
142 142
143 lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA); 143 lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
144 if (!lvds_lfp_data) 144 if (!lvds_lfp_data)
145 return; 145 return;
146 146
147 lvds_lfp_data_ptrs = find_section(bdb, BDB_LVDS_LFP_DATA_PTRS); 147 lvds_lfp_data_ptrs = find_section(bdb, BDB_LVDS_LFP_DATA_PTRS);
148 if (!lvds_lfp_data_ptrs) 148 if (!lvds_lfp_data_ptrs)
149 return; 149 return;
150 150
151 dev_priv->lvds_vbt = 1; 151 dev_priv->lvds_vbt = 1;
152 152
153 lfp_data_size = lvds_lfp_data_ptrs->ptr[1].dvo_timing_offset - 153 lfp_data_size = lvds_lfp_data_ptrs->ptr[1].dvo_timing_offset -
154 lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset; 154 lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset;
155 entry = (struct bdb_lvds_lfp_data_entry *) 155 entry = (struct bdb_lvds_lfp_data_entry *)
156 ((uint8_t *)lvds_lfp_data->data + (lfp_data_size * 156 ((uint8_t *)lvds_lfp_data->data + (lfp_data_size *
157 lvds_options->panel_type)); 157 lvds_options->panel_type));
158 dvo_timing_offset = lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset - 158 dvo_timing_offset = lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset -
159 lvds_lfp_data_ptrs->ptr[0].fp_timing_offset; 159 lvds_lfp_data_ptrs->ptr[0].fp_timing_offset;
160 160
161 /* 161 /*
162 * the size of fp_timing varies on the different platform. 162 * the size of fp_timing varies on the different platform.
163 * So calculate the DVO timing relative offset in LVDS data 163 * So calculate the DVO timing relative offset in LVDS data
164 * entry to get the DVO timing entry 164 * entry to get the DVO timing entry
165 */ 165 */
166 dvo_timing = (struct lvds_dvo_timing *) 166 dvo_timing = (struct lvds_dvo_timing *)
167 ((unsigned char *)entry + dvo_timing_offset); 167 ((unsigned char *)entry + dvo_timing_offset);
168 168
169 panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); 169 panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
170 if (!panel_fixed_mode) 170 if (!panel_fixed_mode)
171 return; 171 return;
172 172
173 fill_detail_timing_data(panel_fixed_mode, dvo_timing); 173 fill_detail_timing_data(panel_fixed_mode, dvo_timing);
174 174
175 dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode; 175 dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
176 176
177 DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n"); 177 DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n");
178 drm_mode_debug_printmodeline(panel_fixed_mode); 178 drm_mode_debug_printmodeline(panel_fixed_mode);
179 179
180 temp_mode = kzalloc(sizeof(*temp_mode), GFP_KERNEL); 180 temp_mode = kzalloc(sizeof(*temp_mode), GFP_KERNEL);
181 temp_downclock = panel_fixed_mode->clock; 181 temp_downclock = panel_fixed_mode->clock;
182 /* 182 /*
183 * enumerate the LVDS panel timing info entry in VBT to check whether 183 * enumerate the LVDS panel timing info entry in VBT to check whether
184 * the LVDS downclock is found. 184 * the LVDS downclock is found.
185 */ 185 */
186 for (i = 0; i < 16; i++) { 186 for (i = 0; i < 16; i++) {
187 entry = (struct bdb_lvds_lfp_data_entry *) 187 entry = (struct bdb_lvds_lfp_data_entry *)
188 ((uint8_t *)lvds_lfp_data->data + (lfp_data_size * i)); 188 ((uint8_t *)lvds_lfp_data->data + (lfp_data_size * i));
189 dvo_timing = (struct lvds_dvo_timing *) 189 dvo_timing = (struct lvds_dvo_timing *)
190 ((unsigned char *)entry + dvo_timing_offset); 190 ((unsigned char *)entry + dvo_timing_offset);
191 191
192 fill_detail_timing_data(temp_mode, dvo_timing); 192 fill_detail_timing_data(temp_mode, dvo_timing);
193 193
194 if (temp_mode->hdisplay == panel_fixed_mode->hdisplay && 194 if (temp_mode->hdisplay == panel_fixed_mode->hdisplay &&
195 temp_mode->hsync_start == panel_fixed_mode->hsync_start && 195 temp_mode->hsync_start == panel_fixed_mode->hsync_start &&
196 temp_mode->hsync_end == panel_fixed_mode->hsync_end && 196 temp_mode->hsync_end == panel_fixed_mode->hsync_end &&
197 temp_mode->htotal == panel_fixed_mode->htotal && 197 temp_mode->htotal == panel_fixed_mode->htotal &&
198 temp_mode->vdisplay == panel_fixed_mode->vdisplay && 198 temp_mode->vdisplay == panel_fixed_mode->vdisplay &&
199 temp_mode->vsync_start == panel_fixed_mode->vsync_start && 199 temp_mode->vsync_start == panel_fixed_mode->vsync_start &&
200 temp_mode->vsync_end == panel_fixed_mode->vsync_end && 200 temp_mode->vsync_end == panel_fixed_mode->vsync_end &&
201 temp_mode->vtotal == panel_fixed_mode->vtotal && 201 temp_mode->vtotal == panel_fixed_mode->vtotal &&
202 temp_mode->clock < temp_downclock) { 202 temp_mode->clock < temp_downclock) {
203 /* 203 /*
204 * downclock is already found. But we expect 204 * downclock is already found. But we expect
205 * to find the lower downclock. 205 * to find the lower downclock.
206 */ 206 */
207 temp_downclock = temp_mode->clock; 207 temp_downclock = temp_mode->clock;
208 } 208 }
209 /* clear it to zero */ 209 /* clear it to zero */
210 memset(temp_mode, 0, sizeof(*temp_mode)); 210 memset(temp_mode, 0, sizeof(*temp_mode));
211 } 211 }
212 kfree(temp_mode); 212 kfree(temp_mode);
213 if (temp_downclock < panel_fixed_mode->clock && 213 if (temp_downclock < panel_fixed_mode->clock &&
214 i915_lvds_downclock) { 214 i915_lvds_downclock) {
215 dev_priv->lvds_downclock_avail = 1; 215 dev_priv->lvds_downclock_avail = 1;
216 dev_priv->lvds_downclock = temp_downclock; 216 dev_priv->lvds_downclock = temp_downclock;
217 DRM_DEBUG_KMS("LVDS downclock is found in VBT. ", 217 DRM_DEBUG_KMS("LVDS downclock is found in VBT. "
218 "Normal Clock %dKHz, downclock %dKHz\n", 218 "Normal Clock %dKHz, downclock %dKHz\n",
219 temp_downclock, panel_fixed_mode->clock); 219 temp_downclock, panel_fixed_mode->clock);
220 } 220 }
221 return; 221 return;
222 } 222 }
223 223
224 /* Try to find sdvo panel data */ 224 /* Try to find sdvo panel data */
225 static void 225 static void
226 parse_sdvo_panel_data(struct drm_i915_private *dev_priv, 226 parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
227 struct bdb_header *bdb) 227 struct bdb_header *bdb)
228 { 228 {
229 struct lvds_dvo_timing *dvo_timing; 229 struct lvds_dvo_timing *dvo_timing;
230 struct drm_display_mode *panel_fixed_mode; 230 struct drm_display_mode *panel_fixed_mode;
231 int index; 231 int index;
232 232
233 index = i915_vbt_sdvo_panel_type; 233 index = i915_vbt_sdvo_panel_type;
234 if (index == -1) { 234 if (index == -1) {
235 struct bdb_sdvo_lvds_options *sdvo_lvds_options; 235 struct bdb_sdvo_lvds_options *sdvo_lvds_options;
236 236
237 sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS); 237 sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
238 if (!sdvo_lvds_options) 238 if (!sdvo_lvds_options)
239 return; 239 return;
240 240
241 index = sdvo_lvds_options->panel_type; 241 index = sdvo_lvds_options->panel_type;
242 } 242 }
243 243
244 dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS); 244 dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS);
245 if (!dvo_timing) 245 if (!dvo_timing)
246 return; 246 return;
247 247
248 panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); 248 panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
249 if (!panel_fixed_mode) 249 if (!panel_fixed_mode)
250 return; 250 return;
251 251
252 fill_detail_timing_data(panel_fixed_mode, dvo_timing + index); 252 fill_detail_timing_data(panel_fixed_mode, dvo_timing + index);
253 253
254 dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode; 254 dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode;
255 255
256 DRM_DEBUG_KMS("Found SDVO panel mode in BIOS VBT tables:\n"); 256 DRM_DEBUG_KMS("Found SDVO panel mode in BIOS VBT tables:\n");
257 drm_mode_debug_printmodeline(panel_fixed_mode); 257 drm_mode_debug_printmodeline(panel_fixed_mode);
258 } 258 }
259 259
260 static int intel_bios_ssc_frequency(struct drm_device *dev, 260 static int intel_bios_ssc_frequency(struct drm_device *dev,
261 bool alternate) 261 bool alternate)
262 { 262 {
263 switch (INTEL_INFO(dev)->gen) { 263 switch (INTEL_INFO(dev)->gen) {
264 case 2: 264 case 2:
265 return alternate ? 66 : 48; 265 return alternate ? 66 : 48;
266 case 3: 266 case 3:
267 case 4: 267 case 4:
268 return alternate ? 100 : 96; 268 return alternate ? 100 : 96;
269 default: 269 default:
270 return alternate ? 100 : 120; 270 return alternate ? 100 : 120;
271 } 271 }
272 } 272 }
273 273
274 static void 274 static void
275 parse_general_features(struct drm_i915_private *dev_priv, 275 parse_general_features(struct drm_i915_private *dev_priv,
276 struct bdb_header *bdb) 276 struct bdb_header *bdb)
277 { 277 {
278 struct drm_device *dev = dev_priv->dev; 278 struct drm_device *dev = dev_priv->dev;
279 struct bdb_general_features *general; 279 struct bdb_general_features *general;
280 280
281 general = find_section(bdb, BDB_GENERAL_FEATURES); 281 general = find_section(bdb, BDB_GENERAL_FEATURES);
282 if (general) { 282 if (general) {
283 dev_priv->int_tv_support = general->int_tv_support; 283 dev_priv->int_tv_support = general->int_tv_support;
284 dev_priv->int_crt_support = general->int_crt_support; 284 dev_priv->int_crt_support = general->int_crt_support;
285 dev_priv->lvds_use_ssc = general->enable_ssc; 285 dev_priv->lvds_use_ssc = general->enable_ssc;
286 dev_priv->lvds_ssc_freq = 286 dev_priv->lvds_ssc_freq =
287 intel_bios_ssc_frequency(dev, general->ssc_freq); 287 intel_bios_ssc_frequency(dev, general->ssc_freq);
288 } 288 }
289 } 289 }
290 290
291 static void 291 static void
292 parse_general_definitions(struct drm_i915_private *dev_priv, 292 parse_general_definitions(struct drm_i915_private *dev_priv,
293 struct bdb_header *bdb) 293 struct bdb_header *bdb)
294 { 294 {
295 struct bdb_general_definitions *general; 295 struct bdb_general_definitions *general;
296 296
297 general = find_section(bdb, BDB_GENERAL_DEFINITIONS); 297 general = find_section(bdb, BDB_GENERAL_DEFINITIONS);
298 if (general) { 298 if (general) {
299 u16 block_size = get_blocksize(general); 299 u16 block_size = get_blocksize(general);
300 if (block_size >= sizeof(*general)) { 300 if (block_size >= sizeof(*general)) {
301 int bus_pin = general->crt_ddc_gmbus_pin; 301 int bus_pin = general->crt_ddc_gmbus_pin;
302 DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin); 302 DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
303 if (bus_pin >= 1 && bus_pin <= 6) 303 if (bus_pin >= 1 && bus_pin <= 6)
304 dev_priv->crt_ddc_pin = bus_pin; 304 dev_priv->crt_ddc_pin = bus_pin;
305 } else { 305 } else {
306 DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n", 306 DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
307 block_size); 307 block_size);
308 } 308 }
309 } 309 }
310 } 310 }
311 311
312 static void 312 static void
313 parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, 313 parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
314 struct bdb_header *bdb) 314 struct bdb_header *bdb)
315 { 315 {
316 struct sdvo_device_mapping *p_mapping; 316 struct sdvo_device_mapping *p_mapping;
317 struct bdb_general_definitions *p_defs; 317 struct bdb_general_definitions *p_defs;
318 struct child_device_config *p_child; 318 struct child_device_config *p_child;
319 int i, child_device_num, count; 319 int i, child_device_num, count;
320 u16 block_size; 320 u16 block_size;
321 321
322 p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); 322 p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
323 if (!p_defs) { 323 if (!p_defs) {
324 DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n"); 324 DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n");
325 return; 325 return;
326 } 326 }
327 /* judge whether the size of child device meets the requirements. 327 /* judge whether the size of child device meets the requirements.
328 * If the child device size obtained from general definition block 328 * If the child device size obtained from general definition block
329 * is different with sizeof(struct child_device_config), skip the 329 * is different with sizeof(struct child_device_config), skip the
330 * parsing of sdvo device info 330 * parsing of sdvo device info
331 */ 331 */
332 if (p_defs->child_dev_size != sizeof(*p_child)) { 332 if (p_defs->child_dev_size != sizeof(*p_child)) {
333 /* different child dev size . Ignore it */ 333 /* different child dev size . Ignore it */
334 DRM_DEBUG_KMS("different child size is found. Invalid.\n"); 334 DRM_DEBUG_KMS("different child size is found. Invalid.\n");
335 return; 335 return;
336 } 336 }
337 /* get the block size of general definitions */ 337 /* get the block size of general definitions */
338 block_size = get_blocksize(p_defs); 338 block_size = get_blocksize(p_defs);
339 /* get the number of child device */ 339 /* get the number of child device */
340 child_device_num = (block_size - sizeof(*p_defs)) / 340 child_device_num = (block_size - sizeof(*p_defs)) /
341 sizeof(*p_child); 341 sizeof(*p_child);
342 count = 0; 342 count = 0;
343 for (i = 0; i < child_device_num; i++) { 343 for (i = 0; i < child_device_num; i++) {
344 p_child = &(p_defs->devices[i]); 344 p_child = &(p_defs->devices[i]);
345 if (!p_child->device_type) { 345 if (!p_child->device_type) {
346 /* skip the device block if device type is invalid */ 346 /* skip the device block if device type is invalid */
347 continue; 347 continue;
348 } 348 }
349 if (p_child->slave_addr != SLAVE_ADDR1 && 349 if (p_child->slave_addr != SLAVE_ADDR1 &&
350 p_child->slave_addr != SLAVE_ADDR2) { 350 p_child->slave_addr != SLAVE_ADDR2) {
351 /* 351 /*
352 * If the slave address is neither 0x70 nor 0x72, 352 * If the slave address is neither 0x70 nor 0x72,
353 * it is not a SDVO device. Skip it. 353 * it is not a SDVO device. Skip it.
354 */ 354 */
355 continue; 355 continue;
356 } 356 }
357 if (p_child->dvo_port != DEVICE_PORT_DVOB && 357 if (p_child->dvo_port != DEVICE_PORT_DVOB &&
358 p_child->dvo_port != DEVICE_PORT_DVOC) { 358 p_child->dvo_port != DEVICE_PORT_DVOC) {
359 /* skip the incorrect SDVO port */ 359 /* skip the incorrect SDVO port */
360 DRM_DEBUG_KMS("Incorrect SDVO port. Skip it \n"); 360 DRM_DEBUG_KMS("Incorrect SDVO port. Skip it \n");
361 continue; 361 continue;
362 } 362 }
363 DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on" 363 DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
364 " %s port\n", 364 " %s port\n",
365 p_child->slave_addr, 365 p_child->slave_addr,
366 (p_child->dvo_port == DEVICE_PORT_DVOB) ? 366 (p_child->dvo_port == DEVICE_PORT_DVOB) ?
367 "SDVOB" : "SDVOC"); 367 "SDVOB" : "SDVOC");
368 p_mapping = &(dev_priv->sdvo_mappings[p_child->dvo_port - 1]); 368 p_mapping = &(dev_priv->sdvo_mappings[p_child->dvo_port - 1]);
369 if (!p_mapping->initialized) { 369 if (!p_mapping->initialized) {
370 p_mapping->dvo_port = p_child->dvo_port; 370 p_mapping->dvo_port = p_child->dvo_port;
371 p_mapping->slave_addr = p_child->slave_addr; 371 p_mapping->slave_addr = p_child->slave_addr;
372 p_mapping->dvo_wiring = p_child->dvo_wiring; 372 p_mapping->dvo_wiring = p_child->dvo_wiring;
373 p_mapping->ddc_pin = p_child->ddc_pin; 373 p_mapping->ddc_pin = p_child->ddc_pin;
374 p_mapping->i2c_pin = p_child->i2c_pin; 374 p_mapping->i2c_pin = p_child->i2c_pin;
375 p_mapping->i2c_speed = p_child->i2c_speed; 375 p_mapping->i2c_speed = p_child->i2c_speed;
376 p_mapping->initialized = 1; 376 p_mapping->initialized = 1;
377 DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d, i2c_speed=%d\n", 377 DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d, i2c_speed=%d\n",
378 p_mapping->dvo_port, 378 p_mapping->dvo_port,
379 p_mapping->slave_addr, 379 p_mapping->slave_addr,
380 p_mapping->dvo_wiring, 380 p_mapping->dvo_wiring,
381 p_mapping->ddc_pin, 381 p_mapping->ddc_pin,
382 p_mapping->i2c_pin, 382 p_mapping->i2c_pin,
383 p_mapping->i2c_speed); 383 p_mapping->i2c_speed);
384 } else { 384 } else {
385 DRM_DEBUG_KMS("Maybe one SDVO port is shared by " 385 DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
386 "two SDVO device.\n"); 386 "two SDVO device.\n");
387 } 387 }
388 if (p_child->slave2_addr) { 388 if (p_child->slave2_addr) {
389 /* Maybe this is a SDVO device with multiple inputs */ 389 /* Maybe this is a SDVO device with multiple inputs */
390 /* And the mapping info is not added */ 390 /* And the mapping info is not added */
391 DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this" 391 DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
392 " is a SDVO device with multiple inputs.\n"); 392 " is a SDVO device with multiple inputs.\n");
393 } 393 }
394 count++; 394 count++;
395 } 395 }
396 396
397 if (!count) { 397 if (!count) {
398 /* No SDVO device info is found */ 398 /* No SDVO device info is found */
399 DRM_DEBUG_KMS("No SDVO device info is found in VBT\n"); 399 DRM_DEBUG_KMS("No SDVO device info is found in VBT\n");
400 } 400 }
401 return; 401 return;
402 } 402 }
403 403
404 static void 404 static void
405 parse_driver_features(struct drm_i915_private *dev_priv, 405 parse_driver_features(struct drm_i915_private *dev_priv,
406 struct bdb_header *bdb) 406 struct bdb_header *bdb)
407 { 407 {
408 struct drm_device *dev = dev_priv->dev; 408 struct drm_device *dev = dev_priv->dev;
409 struct bdb_driver_features *driver; 409 struct bdb_driver_features *driver;
410 410
411 driver = find_section(bdb, BDB_DRIVER_FEATURES); 411 driver = find_section(bdb, BDB_DRIVER_FEATURES);
412 if (!driver) 412 if (!driver)
413 return; 413 return;
414 414
415 if (SUPPORTS_EDP(dev) && 415 if (SUPPORTS_EDP(dev) &&
416 driver->lvds_config == BDB_DRIVER_FEATURE_EDP) 416 driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
417 dev_priv->edp.support = 1; 417 dev_priv->edp.support = 1;
418 418
419 if (driver->dual_frequency) 419 if (driver->dual_frequency)
420 dev_priv->render_reclock_avail = true; 420 dev_priv->render_reclock_avail = true;
421 } 421 }
422 422
423 static void 423 static void
424 parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) 424 parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
425 { 425 {
426 struct bdb_edp *edp; 426 struct bdb_edp *edp;
427 struct edp_power_seq *edp_pps; 427 struct edp_power_seq *edp_pps;
428 struct edp_link_params *edp_link_params; 428 struct edp_link_params *edp_link_params;
429 429
430 edp = find_section(bdb, BDB_EDP); 430 edp = find_section(bdb, BDB_EDP);
431 if (!edp) { 431 if (!edp) {
432 if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support) { 432 if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support) {
433 DRM_DEBUG_KMS("No eDP BDB found but eDP panel " 433 DRM_DEBUG_KMS("No eDP BDB found but eDP panel "
434 "supported, assume %dbpp panel color " 434 "supported, assume %dbpp panel color "
435 "depth.\n", 435 "depth.\n",
436 dev_priv->edp.bpp); 436 dev_priv->edp.bpp);
437 } 437 }
438 return; 438 return;
439 } 439 }
440 440
441 switch ((edp->color_depth >> (panel_type * 2)) & 3) { 441 switch ((edp->color_depth >> (panel_type * 2)) & 3) {
442 case EDP_18BPP: 442 case EDP_18BPP:
443 dev_priv->edp.bpp = 18; 443 dev_priv->edp.bpp = 18;
444 break; 444 break;
445 case EDP_24BPP: 445 case EDP_24BPP:
446 dev_priv->edp.bpp = 24; 446 dev_priv->edp.bpp = 24;
447 break; 447 break;
448 case EDP_30BPP: 448 case EDP_30BPP:
449 dev_priv->edp.bpp = 30; 449 dev_priv->edp.bpp = 30;
450 break; 450 break;
451 } 451 }
452 452
453 /* Get the eDP sequencing and link info */ 453 /* Get the eDP sequencing and link info */
454 edp_pps = &edp->power_seqs[panel_type]; 454 edp_pps = &edp->power_seqs[panel_type];
455 edp_link_params = &edp->link_params[panel_type]; 455 edp_link_params = &edp->link_params[panel_type];
456 456
457 dev_priv->edp.pps = *edp_pps; 457 dev_priv->edp.pps = *edp_pps;
458 458
459 dev_priv->edp.rate = edp_link_params->rate ? DP_LINK_BW_2_7 : 459 dev_priv->edp.rate = edp_link_params->rate ? DP_LINK_BW_2_7 :
460 DP_LINK_BW_1_62; 460 DP_LINK_BW_1_62;
461 switch (edp_link_params->lanes) { 461 switch (edp_link_params->lanes) {
462 case 0: 462 case 0:
463 dev_priv->edp.lanes = 1; 463 dev_priv->edp.lanes = 1;
464 break; 464 break;
465 case 1: 465 case 1:
466 dev_priv->edp.lanes = 2; 466 dev_priv->edp.lanes = 2;
467 break; 467 break;
468 case 3: 468 case 3:
469 default: 469 default:
470 dev_priv->edp.lanes = 4; 470 dev_priv->edp.lanes = 4;
471 break; 471 break;
472 } 472 }
473 switch (edp_link_params->preemphasis) { 473 switch (edp_link_params->preemphasis) {
474 case 0: 474 case 0:
475 dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_0; 475 dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
476 break; 476 break;
477 case 1: 477 case 1:
478 dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5; 478 dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
479 break; 479 break;
480 case 2: 480 case 2:
481 dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_6; 481 dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
482 break; 482 break;
483 case 3: 483 case 3:
484 dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5; 484 dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
485 break; 485 break;
486 } 486 }
487 switch (edp_link_params->vswing) { 487 switch (edp_link_params->vswing) {
488 case 0: 488 case 0:
489 dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_400; 489 dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_400;
490 break; 490 break;
491 case 1: 491 case 1:
492 dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_600; 492 dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_600;
493 break; 493 break;
494 case 2: 494 case 2:
495 dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_800; 495 dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_800;
496 break; 496 break;
497 case 3: 497 case 3:
498 dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_1200; 498 dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_1200;
499 break; 499 break;
500 } 500 }
501 } 501 }
502 502
503 static void 503 static void
504 parse_device_mapping(struct drm_i915_private *dev_priv, 504 parse_device_mapping(struct drm_i915_private *dev_priv,
505 struct bdb_header *bdb) 505 struct bdb_header *bdb)
506 { 506 {
507 struct bdb_general_definitions *p_defs; 507 struct bdb_general_definitions *p_defs;
508 struct child_device_config *p_child, *child_dev_ptr; 508 struct child_device_config *p_child, *child_dev_ptr;
509 int i, child_device_num, count; 509 int i, child_device_num, count;
510 u16 block_size; 510 u16 block_size;
511 511
512 p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); 512 p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
513 if (!p_defs) { 513 if (!p_defs) {
514 DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n"); 514 DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
515 return; 515 return;
516 } 516 }
517 /* judge whether the size of child device meets the requirements. 517 /* judge whether the size of child device meets the requirements.
518 * If the child device size obtained from general definition block 518 * If the child device size obtained from general definition block
519 * is different with sizeof(struct child_device_config), skip the 519 * is different with sizeof(struct child_device_config), skip the
520 * parsing of sdvo device info 520 * parsing of sdvo device info
521 */ 521 */
522 if (p_defs->child_dev_size != sizeof(*p_child)) { 522 if (p_defs->child_dev_size != sizeof(*p_child)) {
523 /* different child dev size . Ignore it */ 523 /* different child dev size . Ignore it */
524 DRM_DEBUG_KMS("different child size is found. Invalid.\n"); 524 DRM_DEBUG_KMS("different child size is found. Invalid.\n");
525 return; 525 return;
526 } 526 }
527 /* get the block size of general definitions */ 527 /* get the block size of general definitions */
528 block_size = get_blocksize(p_defs); 528 block_size = get_blocksize(p_defs);
529 /* get the number of child device */ 529 /* get the number of child device */
530 child_device_num = (block_size - sizeof(*p_defs)) / 530 child_device_num = (block_size - sizeof(*p_defs)) /
531 sizeof(*p_child); 531 sizeof(*p_child);
532 count = 0; 532 count = 0;
533 /* get the number of child device that is present */ 533 /* get the number of child device that is present */
534 for (i = 0; i < child_device_num; i++) { 534 for (i = 0; i < child_device_num; i++) {
535 p_child = &(p_defs->devices[i]); 535 p_child = &(p_defs->devices[i]);
536 if (!p_child->device_type) { 536 if (!p_child->device_type) {
537 /* skip the device block if device type is invalid */ 537 /* skip the device block if device type is invalid */
538 continue; 538 continue;
539 } 539 }
540 count++; 540 count++;
541 } 541 }
542 if (!count) { 542 if (!count) {
543 DRM_DEBUG_KMS("no child dev is parsed from VBT \n"); 543 DRM_DEBUG_KMS("no child dev is parsed from VBT \n");
544 return; 544 return;
545 } 545 }
546 dev_priv->child_dev = kzalloc(sizeof(*p_child) * count, GFP_KERNEL); 546 dev_priv->child_dev = kzalloc(sizeof(*p_child) * count, GFP_KERNEL);
547 if (!dev_priv->child_dev) { 547 if (!dev_priv->child_dev) {
548 DRM_DEBUG_KMS("No memory space for child device\n"); 548 DRM_DEBUG_KMS("No memory space for child device\n");
549 return; 549 return;
550 } 550 }
551 551
552 dev_priv->child_dev_num = count; 552 dev_priv->child_dev_num = count;
553 count = 0; 553 count = 0;
554 for (i = 0; i < child_device_num; i++) { 554 for (i = 0; i < child_device_num; i++) {
555 p_child = &(p_defs->devices[i]); 555 p_child = &(p_defs->devices[i]);
556 if (!p_child->device_type) { 556 if (!p_child->device_type) {
557 /* skip the device block if device type is invalid */ 557 /* skip the device block if device type is invalid */
558 continue; 558 continue;
559 } 559 }
560 child_dev_ptr = dev_priv->child_dev + count; 560 child_dev_ptr = dev_priv->child_dev + count;
561 count++; 561 count++;
562 memcpy((void *)child_dev_ptr, (void *)p_child, 562 memcpy((void *)child_dev_ptr, (void *)p_child,
563 sizeof(*p_child)); 563 sizeof(*p_child));
564 } 564 }
565 return; 565 return;
566 } 566 }
567 567
568 static void 568 static void
569 init_vbt_defaults(struct drm_i915_private *dev_priv) 569 init_vbt_defaults(struct drm_i915_private *dev_priv)
570 { 570 {
571 struct drm_device *dev = dev_priv->dev; 571 struct drm_device *dev = dev_priv->dev;
572 572
573 dev_priv->crt_ddc_pin = GMBUS_PORT_VGADDC; 573 dev_priv->crt_ddc_pin = GMBUS_PORT_VGADDC;
574 574
575 /* LFP panel data */ 575 /* LFP panel data */
576 dev_priv->lvds_dither = 1; 576 dev_priv->lvds_dither = 1;
577 dev_priv->lvds_vbt = 0; 577 dev_priv->lvds_vbt = 0;
578 578
579 /* SDVO panel data */ 579 /* SDVO panel data */
580 dev_priv->sdvo_lvds_vbt_mode = NULL; 580 dev_priv->sdvo_lvds_vbt_mode = NULL;
581 581
582 /* general features */ 582 /* general features */
583 dev_priv->int_tv_support = 1; 583 dev_priv->int_tv_support = 1;
584 dev_priv->int_crt_support = 1; 584 dev_priv->int_crt_support = 1;
585 585
586 /* Default to using SSC */ 586 /* Default to using SSC */
587 dev_priv->lvds_use_ssc = 1; 587 dev_priv->lvds_use_ssc = 1;
588 dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1); 588 dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
589 DRM_DEBUG("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq); 589 DRM_DEBUG("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq);
590 590
591 /* eDP data */ 591 /* eDP data */
592 dev_priv->edp.bpp = 18; 592 dev_priv->edp.bpp = 18;
593 } 593 }
594 594
595 /** 595 /**
596 * intel_parse_bios - find VBT and initialize settings from the BIOS 596 * intel_parse_bios - find VBT and initialize settings from the BIOS
597 * @dev: DRM device 597 * @dev: DRM device
598 * 598 *
599 * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers 599 * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers
600 * to appropriate values. 600 * to appropriate values.
601 * 601 *
602 * Returns 0 on success, nonzero on failure. 602 * Returns 0 on success, nonzero on failure.
603 */ 603 */
604 bool 604 bool
605 intel_parse_bios(struct drm_device *dev) 605 intel_parse_bios(struct drm_device *dev)
606 { 606 {
607 struct drm_i915_private *dev_priv = dev->dev_private; 607 struct drm_i915_private *dev_priv = dev->dev_private;
608 struct pci_dev *pdev = dev->pdev; 608 struct pci_dev *pdev = dev->pdev;
609 struct bdb_header *bdb = NULL; 609 struct bdb_header *bdb = NULL;
610 u8 __iomem *bios = NULL; 610 u8 __iomem *bios = NULL;
611 611
612 init_vbt_defaults(dev_priv); 612 init_vbt_defaults(dev_priv);
613 613
614 /* XXX Should this validation be moved to intel_opregion.c? */ 614 /* XXX Should this validation be moved to intel_opregion.c? */
615 if (dev_priv->opregion.vbt) { 615 if (dev_priv->opregion.vbt) {
616 struct vbt_header *vbt = dev_priv->opregion.vbt; 616 struct vbt_header *vbt = dev_priv->opregion.vbt;
617 if (memcmp(vbt->signature, "$VBT", 4) == 0) { 617 if (memcmp(vbt->signature, "$VBT", 4) == 0) {
618 DRM_DEBUG_DRIVER("Using VBT from OpRegion: %20s\n", 618 DRM_DEBUG_DRIVER("Using VBT from OpRegion: %20s\n",
619 vbt->signature); 619 vbt->signature);
620 bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset); 620 bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset);
621 } else 621 } else
622 dev_priv->opregion.vbt = NULL; 622 dev_priv->opregion.vbt = NULL;
623 } 623 }
624 624
625 if (bdb == NULL) { 625 if (bdb == NULL) {
626 struct vbt_header *vbt = NULL; 626 struct vbt_header *vbt = NULL;
627 size_t size; 627 size_t size;
628 int i; 628 int i;
629 629
630 bios = pci_map_rom(pdev, &size); 630 bios = pci_map_rom(pdev, &size);
631 if (!bios) 631 if (!bios)
632 return -1; 632 return -1;
633 633
634 /* Scour memory looking for the VBT signature */ 634 /* Scour memory looking for the VBT signature */
635 for (i = 0; i + 4 < size; i++) { 635 for (i = 0; i + 4 < size; i++) {
636 if (!memcmp(bios + i, "$VBT", 4)) { 636 if (!memcmp(bios + i, "$VBT", 4)) {
637 vbt = (struct vbt_header *)(bios + i); 637 vbt = (struct vbt_header *)(bios + i);
638 break; 638 break;
639 } 639 }
640 } 640 }
641 641
642 if (!vbt) { 642 if (!vbt) {
643 DRM_ERROR("VBT signature missing\n"); 643 DRM_ERROR("VBT signature missing\n");
644 pci_unmap_rom(pdev, bios); 644 pci_unmap_rom(pdev, bios);
645 return -1; 645 return -1;
646 } 646 }
647 647
648 bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset); 648 bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
649 } 649 }
650 650
651 /* Grab useful general definitions */ 651 /* Grab useful general definitions */
652 parse_general_features(dev_priv, bdb); 652 parse_general_features(dev_priv, bdb);
653 parse_general_definitions(dev_priv, bdb); 653 parse_general_definitions(dev_priv, bdb);
654 parse_lfp_panel_data(dev_priv, bdb); 654 parse_lfp_panel_data(dev_priv, bdb);
655 parse_sdvo_panel_data(dev_priv, bdb); 655 parse_sdvo_panel_data(dev_priv, bdb);
656 parse_sdvo_device_mapping(dev_priv, bdb); 656 parse_sdvo_device_mapping(dev_priv, bdb);
657 parse_device_mapping(dev_priv, bdb); 657 parse_device_mapping(dev_priv, bdb);
658 parse_driver_features(dev_priv, bdb); 658 parse_driver_features(dev_priv, bdb);
659 parse_edp(dev_priv, bdb); 659 parse_edp(dev_priv, bdb);
660 660
661 if (bios) 661 if (bios)
662 pci_unmap_rom(pdev, bios); 662 pci_unmap_rom(pdev, bios);
663 663
664 return 0; 664 return 0;
665 } 665 }
666 666
667 /* Ensure that vital registers have been initialised, even if the BIOS 667 /* Ensure that vital registers have been initialised, even if the BIOS
668 * is absent or just failing to do its job. 668 * is absent or just failing to do its job.
669 */ 669 */
670 void intel_setup_bios(struct drm_device *dev) 670 void intel_setup_bios(struct drm_device *dev)
671 { 671 {
672 struct drm_i915_private *dev_priv = dev->dev_private; 672 struct drm_i915_private *dev_priv = dev->dev_private;
673 673
674 /* Set the Panel Power On/Off timings if uninitialized. */ 674 /* Set the Panel Power On/Off timings if uninitialized. */
675 if ((I915_READ(PP_ON_DELAYS) == 0) && (I915_READ(PP_OFF_DELAYS) == 0)) { 675 if ((I915_READ(PP_ON_DELAYS) == 0) && (I915_READ(PP_OFF_DELAYS) == 0)) {
676 /* Set T2 to 40ms and T5 to 200ms */ 676 /* Set T2 to 40ms and T5 to 200ms */
677 I915_WRITE(PP_ON_DELAYS, 0x019007d0); 677 I915_WRITE(PP_ON_DELAYS, 0x019007d0);
678 678
679 /* Set T3 to 35ms and Tx to 200ms */ 679 /* Set T3 to 35ms and Tx to 200ms */
680 I915_WRITE(PP_OFF_DELAYS, 0x015e07d0); 680 I915_WRITE(PP_OFF_DELAYS, 0x015e07d0);
681 } 681 }
682 } 682 }
683 683
drivers/gpu/drm/i915/intel_display.c
1 /* 1 /*
2 * Copyright ยฉ 2006-2007 Intel Corporation 2 * Copyright ยฉ 2006-2007 Intel Corporation
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation 6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the 8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions: 9 * Software is furnished to do so, subject to the following conditions:
10 * 10 *
11 * The above copyright notice and this permission notice (including the next 11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the 12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software. 13 * Software.
14 * 14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE. 21 * DEALINGS IN THE SOFTWARE.
22 * 22 *
23 * Authors: 23 * Authors:
24 * Eric Anholt <eric@anholt.net> 24 * Eric Anholt <eric@anholt.net>
25 */ 25 */
26 26
27 #include <linux/module.h> 27 #include <linux/module.h>
28 #include <linux/input.h> 28 #include <linux/input.h>
29 #include <linux/i2c.h> 29 #include <linux/i2c.h>
30 #include <linux/kernel.h> 30 #include <linux/kernel.h>
31 #include <linux/slab.h> 31 #include <linux/slab.h>
32 #include <linux/vgaarb.h> 32 #include <linux/vgaarb.h>
33 #include "drmP.h" 33 #include "drmP.h"
34 #include "intel_drv.h" 34 #include "intel_drv.h"
35 #include "i915_drm.h" 35 #include "i915_drm.h"
36 #include "i915_drv.h" 36 #include "i915_drv.h"
37 #include "i915_trace.h" 37 #include "i915_trace.h"
38 #include "drm_dp_helper.h" 38 #include "drm_dp_helper.h"
39 39
40 #include "drm_crtc_helper.h" 40 #include "drm_crtc_helper.h"
41 41
42 #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) 42 #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
43 43
44 bool intel_pipe_has_type (struct drm_crtc *crtc, int type); 44 bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
45 static void intel_update_watermarks(struct drm_device *dev); 45 static void intel_update_watermarks(struct drm_device *dev);
46 static void intel_increase_pllclock(struct drm_crtc *crtc); 46 static void intel_increase_pllclock(struct drm_crtc *crtc);
47 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); 47 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
48 48
49 typedef struct { 49 typedef struct {
50 /* given values */ 50 /* given values */
51 int n; 51 int n;
52 int m1, m2; 52 int m1, m2;
53 int p1, p2; 53 int p1, p2;
54 /* derived values */ 54 /* derived values */
55 int dot; 55 int dot;
56 int vco; 56 int vco;
57 int m; 57 int m;
58 int p; 58 int p;
59 } intel_clock_t; 59 } intel_clock_t;
60 60
61 typedef struct { 61 typedef struct {
62 int min, max; 62 int min, max;
63 } intel_range_t; 63 } intel_range_t;
64 64
65 typedef struct { 65 typedef struct {
66 int dot_limit; 66 int dot_limit;
67 int p2_slow, p2_fast; 67 int p2_slow, p2_fast;
68 } intel_p2_t; 68 } intel_p2_t;
69 69
70 #define INTEL_P2_NUM 2 70 #define INTEL_P2_NUM 2
71 typedef struct intel_limit intel_limit_t; 71 typedef struct intel_limit intel_limit_t;
72 struct intel_limit { 72 struct intel_limit {
73 intel_range_t dot, vco, n, m, m1, m2, p, p1; 73 intel_range_t dot, vco, n, m, m1, m2, p, p1;
74 intel_p2_t p2; 74 intel_p2_t p2;
75 bool (* find_pll)(const intel_limit_t *, struct drm_crtc *, 75 bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
76 int, int, intel_clock_t *); 76 int, int, intel_clock_t *);
77 }; 77 };
78 78
79 #define I8XX_DOT_MIN 25000 79 #define I8XX_DOT_MIN 25000
80 #define I8XX_DOT_MAX 350000 80 #define I8XX_DOT_MAX 350000
81 #define I8XX_VCO_MIN 930000 81 #define I8XX_VCO_MIN 930000
82 #define I8XX_VCO_MAX 1400000 82 #define I8XX_VCO_MAX 1400000
83 #define I8XX_N_MIN 3 83 #define I8XX_N_MIN 3
84 #define I8XX_N_MAX 16 84 #define I8XX_N_MAX 16
85 #define I8XX_M_MIN 96 85 #define I8XX_M_MIN 96
86 #define I8XX_M_MAX 140 86 #define I8XX_M_MAX 140
87 #define I8XX_M1_MIN 18 87 #define I8XX_M1_MIN 18
88 #define I8XX_M1_MAX 26 88 #define I8XX_M1_MAX 26
89 #define I8XX_M2_MIN 6 89 #define I8XX_M2_MIN 6
90 #define I8XX_M2_MAX 16 90 #define I8XX_M2_MAX 16
91 #define I8XX_P_MIN 4 91 #define I8XX_P_MIN 4
92 #define I8XX_P_MAX 128 92 #define I8XX_P_MAX 128
93 #define I8XX_P1_MIN 2 93 #define I8XX_P1_MIN 2
94 #define I8XX_P1_MAX 33 94 #define I8XX_P1_MAX 33
95 #define I8XX_P1_LVDS_MIN 1 95 #define I8XX_P1_LVDS_MIN 1
96 #define I8XX_P1_LVDS_MAX 6 96 #define I8XX_P1_LVDS_MAX 6
97 #define I8XX_P2_SLOW 4 97 #define I8XX_P2_SLOW 4
98 #define I8XX_P2_FAST 2 98 #define I8XX_P2_FAST 2
99 #define I8XX_P2_LVDS_SLOW 14 99 #define I8XX_P2_LVDS_SLOW 14
100 #define I8XX_P2_LVDS_FAST 7 100 #define I8XX_P2_LVDS_FAST 7
101 #define I8XX_P2_SLOW_LIMIT 165000 101 #define I8XX_P2_SLOW_LIMIT 165000
102 102
103 #define I9XX_DOT_MIN 20000 103 #define I9XX_DOT_MIN 20000
104 #define I9XX_DOT_MAX 400000 104 #define I9XX_DOT_MAX 400000
105 #define I9XX_VCO_MIN 1400000 105 #define I9XX_VCO_MIN 1400000
106 #define I9XX_VCO_MAX 2800000 106 #define I9XX_VCO_MAX 2800000
107 #define PINEVIEW_VCO_MIN 1700000 107 #define PINEVIEW_VCO_MIN 1700000
108 #define PINEVIEW_VCO_MAX 3500000 108 #define PINEVIEW_VCO_MAX 3500000
109 #define I9XX_N_MIN 1 109 #define I9XX_N_MIN 1
110 #define I9XX_N_MAX 6 110 #define I9XX_N_MAX 6
111 /* Pineview's Ncounter is a ring counter */ 111 /* Pineview's Ncounter is a ring counter */
112 #define PINEVIEW_N_MIN 3 112 #define PINEVIEW_N_MIN 3
113 #define PINEVIEW_N_MAX 6 113 #define PINEVIEW_N_MAX 6
114 #define I9XX_M_MIN 70 114 #define I9XX_M_MIN 70
115 #define I9XX_M_MAX 120 115 #define I9XX_M_MAX 120
116 #define PINEVIEW_M_MIN 2 116 #define PINEVIEW_M_MIN 2
117 #define PINEVIEW_M_MAX 256 117 #define PINEVIEW_M_MAX 256
118 #define I9XX_M1_MIN 10 118 #define I9XX_M1_MIN 10
119 #define I9XX_M1_MAX 22 119 #define I9XX_M1_MAX 22
120 #define I9XX_M2_MIN 5 120 #define I9XX_M2_MIN 5
121 #define I9XX_M2_MAX 9 121 #define I9XX_M2_MAX 9
122 /* Pineview M1 is reserved, and must be 0 */ 122 /* Pineview M1 is reserved, and must be 0 */
123 #define PINEVIEW_M1_MIN 0 123 #define PINEVIEW_M1_MIN 0
124 #define PINEVIEW_M1_MAX 0 124 #define PINEVIEW_M1_MAX 0
125 #define PINEVIEW_M2_MIN 0 125 #define PINEVIEW_M2_MIN 0
126 #define PINEVIEW_M2_MAX 254 126 #define PINEVIEW_M2_MAX 254
127 #define I9XX_P_SDVO_DAC_MIN 5 127 #define I9XX_P_SDVO_DAC_MIN 5
128 #define I9XX_P_SDVO_DAC_MAX 80 128 #define I9XX_P_SDVO_DAC_MAX 80
129 #define I9XX_P_LVDS_MIN 7 129 #define I9XX_P_LVDS_MIN 7
130 #define I9XX_P_LVDS_MAX 98 130 #define I9XX_P_LVDS_MAX 98
131 #define PINEVIEW_P_LVDS_MIN 7 131 #define PINEVIEW_P_LVDS_MIN 7
132 #define PINEVIEW_P_LVDS_MAX 112 132 #define PINEVIEW_P_LVDS_MAX 112
133 #define I9XX_P1_MIN 1 133 #define I9XX_P1_MIN 1
134 #define I9XX_P1_MAX 8 134 #define I9XX_P1_MAX 8
135 #define I9XX_P2_SDVO_DAC_SLOW 10 135 #define I9XX_P2_SDVO_DAC_SLOW 10
136 #define I9XX_P2_SDVO_DAC_FAST 5 136 #define I9XX_P2_SDVO_DAC_FAST 5
137 #define I9XX_P2_SDVO_DAC_SLOW_LIMIT 200000 137 #define I9XX_P2_SDVO_DAC_SLOW_LIMIT 200000
138 #define I9XX_P2_LVDS_SLOW 14 138 #define I9XX_P2_LVDS_SLOW 14
139 #define I9XX_P2_LVDS_FAST 7 139 #define I9XX_P2_LVDS_FAST 7
140 #define I9XX_P2_LVDS_SLOW_LIMIT 112000 140 #define I9XX_P2_LVDS_SLOW_LIMIT 112000
141 141
142 /*The parameter is for SDVO on G4x platform*/ 142 /*The parameter is for SDVO on G4x platform*/
143 #define G4X_DOT_SDVO_MIN 25000 143 #define G4X_DOT_SDVO_MIN 25000
144 #define G4X_DOT_SDVO_MAX 270000 144 #define G4X_DOT_SDVO_MAX 270000
145 #define G4X_VCO_MIN 1750000 145 #define G4X_VCO_MIN 1750000
146 #define G4X_VCO_MAX 3500000 146 #define G4X_VCO_MAX 3500000
147 #define G4X_N_SDVO_MIN 1 147 #define G4X_N_SDVO_MIN 1
148 #define G4X_N_SDVO_MAX 4 148 #define G4X_N_SDVO_MAX 4
149 #define G4X_M_SDVO_MIN 104 149 #define G4X_M_SDVO_MIN 104
150 #define G4X_M_SDVO_MAX 138 150 #define G4X_M_SDVO_MAX 138
151 #define G4X_M1_SDVO_MIN 17 151 #define G4X_M1_SDVO_MIN 17
152 #define G4X_M1_SDVO_MAX 23 152 #define G4X_M1_SDVO_MAX 23
153 #define G4X_M2_SDVO_MIN 5 153 #define G4X_M2_SDVO_MIN 5
154 #define G4X_M2_SDVO_MAX 11 154 #define G4X_M2_SDVO_MAX 11
155 #define G4X_P_SDVO_MIN 10 155 #define G4X_P_SDVO_MIN 10
156 #define G4X_P_SDVO_MAX 30 156 #define G4X_P_SDVO_MAX 30
157 #define G4X_P1_SDVO_MIN 1 157 #define G4X_P1_SDVO_MIN 1
158 #define G4X_P1_SDVO_MAX 3 158 #define G4X_P1_SDVO_MAX 3
159 #define G4X_P2_SDVO_SLOW 10 159 #define G4X_P2_SDVO_SLOW 10
160 #define G4X_P2_SDVO_FAST 10 160 #define G4X_P2_SDVO_FAST 10
161 #define G4X_P2_SDVO_LIMIT 270000 161 #define G4X_P2_SDVO_LIMIT 270000
162 162
163 /*The parameter is for HDMI_DAC on G4x platform*/ 163 /*The parameter is for HDMI_DAC on G4x platform*/
164 #define G4X_DOT_HDMI_DAC_MIN 22000 164 #define G4X_DOT_HDMI_DAC_MIN 22000
165 #define G4X_DOT_HDMI_DAC_MAX 400000 165 #define G4X_DOT_HDMI_DAC_MAX 400000
166 #define G4X_N_HDMI_DAC_MIN 1 166 #define G4X_N_HDMI_DAC_MIN 1
167 #define G4X_N_HDMI_DAC_MAX 4 167 #define G4X_N_HDMI_DAC_MAX 4
168 #define G4X_M_HDMI_DAC_MIN 104 168 #define G4X_M_HDMI_DAC_MIN 104
169 #define G4X_M_HDMI_DAC_MAX 138 169 #define G4X_M_HDMI_DAC_MAX 138
170 #define G4X_M1_HDMI_DAC_MIN 16 170 #define G4X_M1_HDMI_DAC_MIN 16
171 #define G4X_M1_HDMI_DAC_MAX 23 171 #define G4X_M1_HDMI_DAC_MAX 23
172 #define G4X_M2_HDMI_DAC_MIN 5 172 #define G4X_M2_HDMI_DAC_MIN 5
173 #define G4X_M2_HDMI_DAC_MAX 11 173 #define G4X_M2_HDMI_DAC_MAX 11
174 #define G4X_P_HDMI_DAC_MIN 5 174 #define G4X_P_HDMI_DAC_MIN 5
175 #define G4X_P_HDMI_DAC_MAX 80 175 #define G4X_P_HDMI_DAC_MAX 80
176 #define G4X_P1_HDMI_DAC_MIN 1 176 #define G4X_P1_HDMI_DAC_MIN 1
177 #define G4X_P1_HDMI_DAC_MAX 8 177 #define G4X_P1_HDMI_DAC_MAX 8
178 #define G4X_P2_HDMI_DAC_SLOW 10 178 #define G4X_P2_HDMI_DAC_SLOW 10
179 #define G4X_P2_HDMI_DAC_FAST 5 179 #define G4X_P2_HDMI_DAC_FAST 5
180 #define G4X_P2_HDMI_DAC_LIMIT 165000 180 #define G4X_P2_HDMI_DAC_LIMIT 165000
181 181
182 /*The parameter is for SINGLE_CHANNEL_LVDS on G4x platform*/ 182 /*The parameter is for SINGLE_CHANNEL_LVDS on G4x platform*/
183 #define G4X_DOT_SINGLE_CHANNEL_LVDS_MIN 20000 183 #define G4X_DOT_SINGLE_CHANNEL_LVDS_MIN 20000
184 #define G4X_DOT_SINGLE_CHANNEL_LVDS_MAX 115000 184 #define G4X_DOT_SINGLE_CHANNEL_LVDS_MAX 115000
185 #define G4X_N_SINGLE_CHANNEL_LVDS_MIN 1 185 #define G4X_N_SINGLE_CHANNEL_LVDS_MIN 1
186 #define G4X_N_SINGLE_CHANNEL_LVDS_MAX 3 186 #define G4X_N_SINGLE_CHANNEL_LVDS_MAX 3
187 #define G4X_M_SINGLE_CHANNEL_LVDS_MIN 104 187 #define G4X_M_SINGLE_CHANNEL_LVDS_MIN 104
188 #define G4X_M_SINGLE_CHANNEL_LVDS_MAX 138 188 #define G4X_M_SINGLE_CHANNEL_LVDS_MAX 138
189 #define G4X_M1_SINGLE_CHANNEL_LVDS_MIN 17 189 #define G4X_M1_SINGLE_CHANNEL_LVDS_MIN 17
190 #define G4X_M1_SINGLE_CHANNEL_LVDS_MAX 23 190 #define G4X_M1_SINGLE_CHANNEL_LVDS_MAX 23
191 #define G4X_M2_SINGLE_CHANNEL_LVDS_MIN 5 191 #define G4X_M2_SINGLE_CHANNEL_LVDS_MIN 5
192 #define G4X_M2_SINGLE_CHANNEL_LVDS_MAX 11 192 #define G4X_M2_SINGLE_CHANNEL_LVDS_MAX 11
193 #define G4X_P_SINGLE_CHANNEL_LVDS_MIN 28 193 #define G4X_P_SINGLE_CHANNEL_LVDS_MIN 28
194 #define G4X_P_SINGLE_CHANNEL_LVDS_MAX 112 194 #define G4X_P_SINGLE_CHANNEL_LVDS_MAX 112
195 #define G4X_P1_SINGLE_CHANNEL_LVDS_MIN 2 195 #define G4X_P1_SINGLE_CHANNEL_LVDS_MIN 2
196 #define G4X_P1_SINGLE_CHANNEL_LVDS_MAX 8 196 #define G4X_P1_SINGLE_CHANNEL_LVDS_MAX 8
197 #define G4X_P2_SINGLE_CHANNEL_LVDS_SLOW 14 197 #define G4X_P2_SINGLE_CHANNEL_LVDS_SLOW 14
198 #define G4X_P2_SINGLE_CHANNEL_LVDS_FAST 14 198 #define G4X_P2_SINGLE_CHANNEL_LVDS_FAST 14
199 #define G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT 0 199 #define G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT 0
200 200
201 /*The parameter is for DUAL_CHANNEL_LVDS on G4x platform*/ 201 /*The parameter is for DUAL_CHANNEL_LVDS on G4x platform*/
202 #define G4X_DOT_DUAL_CHANNEL_LVDS_MIN 80000 202 #define G4X_DOT_DUAL_CHANNEL_LVDS_MIN 80000
203 #define G4X_DOT_DUAL_CHANNEL_LVDS_MAX 224000 203 #define G4X_DOT_DUAL_CHANNEL_LVDS_MAX 224000
204 #define G4X_N_DUAL_CHANNEL_LVDS_MIN 1 204 #define G4X_N_DUAL_CHANNEL_LVDS_MIN 1
205 #define G4X_N_DUAL_CHANNEL_LVDS_MAX 3 205 #define G4X_N_DUAL_CHANNEL_LVDS_MAX 3
206 #define G4X_M_DUAL_CHANNEL_LVDS_MIN 104 206 #define G4X_M_DUAL_CHANNEL_LVDS_MIN 104
207 #define G4X_M_DUAL_CHANNEL_LVDS_MAX 138 207 #define G4X_M_DUAL_CHANNEL_LVDS_MAX 138
208 #define G4X_M1_DUAL_CHANNEL_LVDS_MIN 17 208 #define G4X_M1_DUAL_CHANNEL_LVDS_MIN 17
209 #define G4X_M1_DUAL_CHANNEL_LVDS_MAX 23 209 #define G4X_M1_DUAL_CHANNEL_LVDS_MAX 23
210 #define G4X_M2_DUAL_CHANNEL_LVDS_MIN 5 210 #define G4X_M2_DUAL_CHANNEL_LVDS_MIN 5
211 #define G4X_M2_DUAL_CHANNEL_LVDS_MAX 11 211 #define G4X_M2_DUAL_CHANNEL_LVDS_MAX 11
212 #define G4X_P_DUAL_CHANNEL_LVDS_MIN 14 212 #define G4X_P_DUAL_CHANNEL_LVDS_MIN 14
213 #define G4X_P_DUAL_CHANNEL_LVDS_MAX 42 213 #define G4X_P_DUAL_CHANNEL_LVDS_MAX 42
214 #define G4X_P1_DUAL_CHANNEL_LVDS_MIN 2 214 #define G4X_P1_DUAL_CHANNEL_LVDS_MIN 2
215 #define G4X_P1_DUAL_CHANNEL_LVDS_MAX 6 215 #define G4X_P1_DUAL_CHANNEL_LVDS_MAX 6
216 #define G4X_P2_DUAL_CHANNEL_LVDS_SLOW 7 216 #define G4X_P2_DUAL_CHANNEL_LVDS_SLOW 7
217 #define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7 217 #define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7
218 #define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0 218 #define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0
219 219
220 /*The parameter is for DISPLAY PORT on G4x platform*/ 220 /*The parameter is for DISPLAY PORT on G4x platform*/
221 #define G4X_DOT_DISPLAY_PORT_MIN 161670 221 #define G4X_DOT_DISPLAY_PORT_MIN 161670
222 #define G4X_DOT_DISPLAY_PORT_MAX 227000 222 #define G4X_DOT_DISPLAY_PORT_MAX 227000
223 #define G4X_N_DISPLAY_PORT_MIN 1 223 #define G4X_N_DISPLAY_PORT_MIN 1
224 #define G4X_N_DISPLAY_PORT_MAX 2 224 #define G4X_N_DISPLAY_PORT_MAX 2
225 #define G4X_M_DISPLAY_PORT_MIN 97 225 #define G4X_M_DISPLAY_PORT_MIN 97
226 #define G4X_M_DISPLAY_PORT_MAX 108 226 #define G4X_M_DISPLAY_PORT_MAX 108
227 #define G4X_M1_DISPLAY_PORT_MIN 0x10 227 #define G4X_M1_DISPLAY_PORT_MIN 0x10
228 #define G4X_M1_DISPLAY_PORT_MAX 0x12 228 #define G4X_M1_DISPLAY_PORT_MAX 0x12
229 #define G4X_M2_DISPLAY_PORT_MIN 0x05 229 #define G4X_M2_DISPLAY_PORT_MIN 0x05
230 #define G4X_M2_DISPLAY_PORT_MAX 0x06 230 #define G4X_M2_DISPLAY_PORT_MAX 0x06
231 #define G4X_P_DISPLAY_PORT_MIN 10 231 #define G4X_P_DISPLAY_PORT_MIN 10
232 #define G4X_P_DISPLAY_PORT_MAX 20 232 #define G4X_P_DISPLAY_PORT_MAX 20
233 #define G4X_P1_DISPLAY_PORT_MIN 1 233 #define G4X_P1_DISPLAY_PORT_MIN 1
234 #define G4X_P1_DISPLAY_PORT_MAX 2 234 #define G4X_P1_DISPLAY_PORT_MAX 2
235 #define G4X_P2_DISPLAY_PORT_SLOW 10 235 #define G4X_P2_DISPLAY_PORT_SLOW 10
236 #define G4X_P2_DISPLAY_PORT_FAST 10 236 #define G4X_P2_DISPLAY_PORT_FAST 10
237 #define G4X_P2_DISPLAY_PORT_LIMIT 0 237 #define G4X_P2_DISPLAY_PORT_LIMIT 0
238 238
239 /* Ironlake / Sandybridge */ 239 /* Ironlake / Sandybridge */
240 /* as we calculate clock using (register_value + 2) for 240 /* as we calculate clock using (register_value + 2) for
241 N/M1/M2, so here the range value for them is (actual_value-2). 241 N/M1/M2, so here the range value for them is (actual_value-2).
242 */ 242 */
243 #define IRONLAKE_DOT_MIN 25000 243 #define IRONLAKE_DOT_MIN 25000
244 #define IRONLAKE_DOT_MAX 350000 244 #define IRONLAKE_DOT_MAX 350000
245 #define IRONLAKE_VCO_MIN 1760000 245 #define IRONLAKE_VCO_MIN 1760000
246 #define IRONLAKE_VCO_MAX 3510000 246 #define IRONLAKE_VCO_MAX 3510000
247 #define IRONLAKE_M1_MIN 12 247 #define IRONLAKE_M1_MIN 12
248 #define IRONLAKE_M1_MAX 22 248 #define IRONLAKE_M1_MAX 22
249 #define IRONLAKE_M2_MIN 5 249 #define IRONLAKE_M2_MIN 5
250 #define IRONLAKE_M2_MAX 9 250 #define IRONLAKE_M2_MAX 9
251 #define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */ 251 #define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */
252 252
253 /* We have parameter ranges for different type of outputs. */ 253 /* We have parameter ranges for different type of outputs. */
254 254
255 /* DAC & HDMI Refclk 120Mhz */ 255 /* DAC & HDMI Refclk 120Mhz */
256 #define IRONLAKE_DAC_N_MIN 1 256 #define IRONLAKE_DAC_N_MIN 1
257 #define IRONLAKE_DAC_N_MAX 5 257 #define IRONLAKE_DAC_N_MAX 5
258 #define IRONLAKE_DAC_M_MIN 79 258 #define IRONLAKE_DAC_M_MIN 79
259 #define IRONLAKE_DAC_M_MAX 127 259 #define IRONLAKE_DAC_M_MAX 127
260 #define IRONLAKE_DAC_P_MIN 5 260 #define IRONLAKE_DAC_P_MIN 5
261 #define IRONLAKE_DAC_P_MAX 80 261 #define IRONLAKE_DAC_P_MAX 80
262 #define IRONLAKE_DAC_P1_MIN 1 262 #define IRONLAKE_DAC_P1_MIN 1
263 #define IRONLAKE_DAC_P1_MAX 8 263 #define IRONLAKE_DAC_P1_MAX 8
264 #define IRONLAKE_DAC_P2_SLOW 10 264 #define IRONLAKE_DAC_P2_SLOW 10
265 #define IRONLAKE_DAC_P2_FAST 5 265 #define IRONLAKE_DAC_P2_FAST 5
266 266
267 /* LVDS single-channel 120Mhz refclk */ 267 /* LVDS single-channel 120Mhz refclk */
268 #define IRONLAKE_LVDS_S_N_MIN 1 268 #define IRONLAKE_LVDS_S_N_MIN 1
269 #define IRONLAKE_LVDS_S_N_MAX 3 269 #define IRONLAKE_LVDS_S_N_MAX 3
270 #define IRONLAKE_LVDS_S_M_MIN 79 270 #define IRONLAKE_LVDS_S_M_MIN 79
271 #define IRONLAKE_LVDS_S_M_MAX 118 271 #define IRONLAKE_LVDS_S_M_MAX 118
272 #define IRONLAKE_LVDS_S_P_MIN 28 272 #define IRONLAKE_LVDS_S_P_MIN 28
273 #define IRONLAKE_LVDS_S_P_MAX 112 273 #define IRONLAKE_LVDS_S_P_MAX 112
274 #define IRONLAKE_LVDS_S_P1_MIN 2 274 #define IRONLAKE_LVDS_S_P1_MIN 2
275 #define IRONLAKE_LVDS_S_P1_MAX 8 275 #define IRONLAKE_LVDS_S_P1_MAX 8
276 #define IRONLAKE_LVDS_S_P2_SLOW 14 276 #define IRONLAKE_LVDS_S_P2_SLOW 14
277 #define IRONLAKE_LVDS_S_P2_FAST 14 277 #define IRONLAKE_LVDS_S_P2_FAST 14
278 278
279 /* LVDS dual-channel 120Mhz refclk */ 279 /* LVDS dual-channel 120Mhz refclk */
280 #define IRONLAKE_LVDS_D_N_MIN 1 280 #define IRONLAKE_LVDS_D_N_MIN 1
281 #define IRONLAKE_LVDS_D_N_MAX 3 281 #define IRONLAKE_LVDS_D_N_MAX 3
282 #define IRONLAKE_LVDS_D_M_MIN 79 282 #define IRONLAKE_LVDS_D_M_MIN 79
283 #define IRONLAKE_LVDS_D_M_MAX 127 283 #define IRONLAKE_LVDS_D_M_MAX 127
284 #define IRONLAKE_LVDS_D_P_MIN 14 284 #define IRONLAKE_LVDS_D_P_MIN 14
285 #define IRONLAKE_LVDS_D_P_MAX 56 285 #define IRONLAKE_LVDS_D_P_MAX 56
286 #define IRONLAKE_LVDS_D_P1_MIN 2 286 #define IRONLAKE_LVDS_D_P1_MIN 2
287 #define IRONLAKE_LVDS_D_P1_MAX 8 287 #define IRONLAKE_LVDS_D_P1_MAX 8
288 #define IRONLAKE_LVDS_D_P2_SLOW 7 288 #define IRONLAKE_LVDS_D_P2_SLOW 7
289 #define IRONLAKE_LVDS_D_P2_FAST 7 289 #define IRONLAKE_LVDS_D_P2_FAST 7
290 290
291 /* LVDS single-channel 100Mhz refclk */ 291 /* LVDS single-channel 100Mhz refclk */
292 #define IRONLAKE_LVDS_S_SSC_N_MIN 1 292 #define IRONLAKE_LVDS_S_SSC_N_MIN 1
293 #define IRONLAKE_LVDS_S_SSC_N_MAX 2 293 #define IRONLAKE_LVDS_S_SSC_N_MAX 2
294 #define IRONLAKE_LVDS_S_SSC_M_MIN 79 294 #define IRONLAKE_LVDS_S_SSC_M_MIN 79
295 #define IRONLAKE_LVDS_S_SSC_M_MAX 126 295 #define IRONLAKE_LVDS_S_SSC_M_MAX 126
296 #define IRONLAKE_LVDS_S_SSC_P_MIN 28 296 #define IRONLAKE_LVDS_S_SSC_P_MIN 28
297 #define IRONLAKE_LVDS_S_SSC_P_MAX 112 297 #define IRONLAKE_LVDS_S_SSC_P_MAX 112
298 #define IRONLAKE_LVDS_S_SSC_P1_MIN 2 298 #define IRONLAKE_LVDS_S_SSC_P1_MIN 2
299 #define IRONLAKE_LVDS_S_SSC_P1_MAX 8 299 #define IRONLAKE_LVDS_S_SSC_P1_MAX 8
300 #define IRONLAKE_LVDS_S_SSC_P2_SLOW 14 300 #define IRONLAKE_LVDS_S_SSC_P2_SLOW 14
301 #define IRONLAKE_LVDS_S_SSC_P2_FAST 14 301 #define IRONLAKE_LVDS_S_SSC_P2_FAST 14
302 302
303 /* LVDS dual-channel 100Mhz refclk */ 303 /* LVDS dual-channel 100Mhz refclk */
304 #define IRONLAKE_LVDS_D_SSC_N_MIN 1 304 #define IRONLAKE_LVDS_D_SSC_N_MIN 1
305 #define IRONLAKE_LVDS_D_SSC_N_MAX 3 305 #define IRONLAKE_LVDS_D_SSC_N_MAX 3
306 #define IRONLAKE_LVDS_D_SSC_M_MIN 79 306 #define IRONLAKE_LVDS_D_SSC_M_MIN 79
307 #define IRONLAKE_LVDS_D_SSC_M_MAX 126 307 #define IRONLAKE_LVDS_D_SSC_M_MAX 126
308 #define IRONLAKE_LVDS_D_SSC_P_MIN 14 308 #define IRONLAKE_LVDS_D_SSC_P_MIN 14
309 #define IRONLAKE_LVDS_D_SSC_P_MAX 42 309 #define IRONLAKE_LVDS_D_SSC_P_MAX 42
310 #define IRONLAKE_LVDS_D_SSC_P1_MIN 2 310 #define IRONLAKE_LVDS_D_SSC_P1_MIN 2
311 #define IRONLAKE_LVDS_D_SSC_P1_MAX 6 311 #define IRONLAKE_LVDS_D_SSC_P1_MAX 6
312 #define IRONLAKE_LVDS_D_SSC_P2_SLOW 7 312 #define IRONLAKE_LVDS_D_SSC_P2_SLOW 7
313 #define IRONLAKE_LVDS_D_SSC_P2_FAST 7 313 #define IRONLAKE_LVDS_D_SSC_P2_FAST 7
314 314
315 /* DisplayPort */ 315 /* DisplayPort */
316 #define IRONLAKE_DP_N_MIN 1 316 #define IRONLAKE_DP_N_MIN 1
317 #define IRONLAKE_DP_N_MAX 2 317 #define IRONLAKE_DP_N_MAX 2
318 #define IRONLAKE_DP_M_MIN 81 318 #define IRONLAKE_DP_M_MIN 81
319 #define IRONLAKE_DP_M_MAX 90 319 #define IRONLAKE_DP_M_MAX 90
320 #define IRONLAKE_DP_P_MIN 10 320 #define IRONLAKE_DP_P_MIN 10
321 #define IRONLAKE_DP_P_MAX 20 321 #define IRONLAKE_DP_P_MAX 20
322 #define IRONLAKE_DP_P2_FAST 10 322 #define IRONLAKE_DP_P2_FAST 10
323 #define IRONLAKE_DP_P2_SLOW 10 323 #define IRONLAKE_DP_P2_SLOW 10
324 #define IRONLAKE_DP_P2_LIMIT 0 324 #define IRONLAKE_DP_P2_LIMIT 0
325 #define IRONLAKE_DP_P1_MIN 1 325 #define IRONLAKE_DP_P1_MIN 1
326 #define IRONLAKE_DP_P1_MAX 2 326 #define IRONLAKE_DP_P1_MAX 2
327 327
328 /* FDI */ 328 /* FDI */
329 #define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */ 329 #define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */
330 330
331 static bool 331 static bool
332 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 332 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
333 int target, int refclk, intel_clock_t *best_clock); 333 int target, int refclk, intel_clock_t *best_clock);
334 static bool 334 static bool
335 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 335 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
336 int target, int refclk, intel_clock_t *best_clock); 336 int target, int refclk, intel_clock_t *best_clock);
337 337
338 static bool 338 static bool
339 intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, 339 intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
340 int target, int refclk, intel_clock_t *best_clock); 340 int target, int refclk, intel_clock_t *best_clock);
341 static bool 341 static bool
342 intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc, 342 intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
343 int target, int refclk, intel_clock_t *best_clock); 343 int target, int refclk, intel_clock_t *best_clock);
344 344
345 static inline u32 /* units of 100MHz */ 345 static inline u32 /* units of 100MHz */
346 intel_fdi_link_freq(struct drm_device *dev) 346 intel_fdi_link_freq(struct drm_device *dev)
347 { 347 {
348 if (IS_GEN5(dev)) { 348 if (IS_GEN5(dev)) {
349 struct drm_i915_private *dev_priv = dev->dev_private; 349 struct drm_i915_private *dev_priv = dev->dev_private;
350 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2; 350 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
351 } else 351 } else
352 return 27; 352 return 27;
353 } 353 }
354 354
355 static const intel_limit_t intel_limits_i8xx_dvo = { 355 static const intel_limit_t intel_limits_i8xx_dvo = {
356 .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, 356 .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
357 .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX }, 357 .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX },
358 .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX }, 358 .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX },
359 .m = { .min = I8XX_M_MIN, .max = I8XX_M_MAX }, 359 .m = { .min = I8XX_M_MIN, .max = I8XX_M_MAX },
360 .m1 = { .min = I8XX_M1_MIN, .max = I8XX_M1_MAX }, 360 .m1 = { .min = I8XX_M1_MIN, .max = I8XX_M1_MAX },
361 .m2 = { .min = I8XX_M2_MIN, .max = I8XX_M2_MAX }, 361 .m2 = { .min = I8XX_M2_MIN, .max = I8XX_M2_MAX },
362 .p = { .min = I8XX_P_MIN, .max = I8XX_P_MAX }, 362 .p = { .min = I8XX_P_MIN, .max = I8XX_P_MAX },
363 .p1 = { .min = I8XX_P1_MIN, .max = I8XX_P1_MAX }, 363 .p1 = { .min = I8XX_P1_MIN, .max = I8XX_P1_MAX },
364 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, 364 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
365 .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST }, 365 .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST },
366 .find_pll = intel_find_best_PLL, 366 .find_pll = intel_find_best_PLL,
367 }; 367 };
368 368
369 static const intel_limit_t intel_limits_i8xx_lvds = { 369 static const intel_limit_t intel_limits_i8xx_lvds = {
370 .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, 370 .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
371 .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX }, 371 .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX },
372 .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX }, 372 .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX },
373 .m = { .min = I8XX_M_MIN, .max = I8XX_M_MAX }, 373 .m = { .min = I8XX_M_MIN, .max = I8XX_M_MAX },
374 .m1 = { .min = I8XX_M1_MIN, .max = I8XX_M1_MAX }, 374 .m1 = { .min = I8XX_M1_MIN, .max = I8XX_M1_MAX },
375 .m2 = { .min = I8XX_M2_MIN, .max = I8XX_M2_MAX }, 375 .m2 = { .min = I8XX_M2_MIN, .max = I8XX_M2_MAX },
376 .p = { .min = I8XX_P_MIN, .max = I8XX_P_MAX }, 376 .p = { .min = I8XX_P_MIN, .max = I8XX_P_MAX },
377 .p1 = { .min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX }, 377 .p1 = { .min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX },
378 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, 378 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
379 .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST }, 379 .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST },
380 .find_pll = intel_find_best_PLL, 380 .find_pll = intel_find_best_PLL,
381 }; 381 };
382 382
383 static const intel_limit_t intel_limits_i9xx_sdvo = { 383 static const intel_limit_t intel_limits_i9xx_sdvo = {
384 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, 384 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
385 .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX }, 385 .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX },
386 .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX }, 386 .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX },
387 .m = { .min = I9XX_M_MIN, .max = I9XX_M_MAX }, 387 .m = { .min = I9XX_M_MIN, .max = I9XX_M_MAX },
388 .m1 = { .min = I9XX_M1_MIN, .max = I9XX_M1_MAX }, 388 .m1 = { .min = I9XX_M1_MIN, .max = I9XX_M1_MAX },
389 .m2 = { .min = I9XX_M2_MIN, .max = I9XX_M2_MAX }, 389 .m2 = { .min = I9XX_M2_MIN, .max = I9XX_M2_MAX },
390 .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX }, 390 .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX },
391 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, 391 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
392 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, 392 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
393 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, 393 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
394 .find_pll = intel_find_best_PLL, 394 .find_pll = intel_find_best_PLL,
395 }; 395 };
396 396
397 static const intel_limit_t intel_limits_i9xx_lvds = { 397 static const intel_limit_t intel_limits_i9xx_lvds = {
398 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, 398 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
399 .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX }, 399 .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX },
400 .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX }, 400 .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX },
401 .m = { .min = I9XX_M_MIN, .max = I9XX_M_MAX }, 401 .m = { .min = I9XX_M_MIN, .max = I9XX_M_MAX },
402 .m1 = { .min = I9XX_M1_MIN, .max = I9XX_M1_MAX }, 402 .m1 = { .min = I9XX_M1_MIN, .max = I9XX_M1_MAX },
403 .m2 = { .min = I9XX_M2_MIN, .max = I9XX_M2_MAX }, 403 .m2 = { .min = I9XX_M2_MIN, .max = I9XX_M2_MAX },
404 .p = { .min = I9XX_P_LVDS_MIN, .max = I9XX_P_LVDS_MAX }, 404 .p = { .min = I9XX_P_LVDS_MIN, .max = I9XX_P_LVDS_MAX },
405 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, 405 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
406 /* The single-channel range is 25-112Mhz, and dual-channel 406 /* The single-channel range is 25-112Mhz, and dual-channel
407 * is 80-224Mhz. Prefer single channel as much as possible. 407 * is 80-224Mhz. Prefer single channel as much as possible.
408 */ 408 */
409 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, 409 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
410 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST }, 410 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST },
411 .find_pll = intel_find_best_PLL, 411 .find_pll = intel_find_best_PLL,
412 }; 412 };
413 413
414 /* below parameter and function is for G4X Chipset Family*/ 414 /* below parameter and function is for G4X Chipset Family*/
415 static const intel_limit_t intel_limits_g4x_sdvo = { 415 static const intel_limit_t intel_limits_g4x_sdvo = {
416 .dot = { .min = G4X_DOT_SDVO_MIN, .max = G4X_DOT_SDVO_MAX }, 416 .dot = { .min = G4X_DOT_SDVO_MIN, .max = G4X_DOT_SDVO_MAX },
417 .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX}, 417 .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX},
418 .n = { .min = G4X_N_SDVO_MIN, .max = G4X_N_SDVO_MAX }, 418 .n = { .min = G4X_N_SDVO_MIN, .max = G4X_N_SDVO_MAX },
419 .m = { .min = G4X_M_SDVO_MIN, .max = G4X_M_SDVO_MAX }, 419 .m = { .min = G4X_M_SDVO_MIN, .max = G4X_M_SDVO_MAX },
420 .m1 = { .min = G4X_M1_SDVO_MIN, .max = G4X_M1_SDVO_MAX }, 420 .m1 = { .min = G4X_M1_SDVO_MIN, .max = G4X_M1_SDVO_MAX },
421 .m2 = { .min = G4X_M2_SDVO_MIN, .max = G4X_M2_SDVO_MAX }, 421 .m2 = { .min = G4X_M2_SDVO_MIN, .max = G4X_M2_SDVO_MAX },
422 .p = { .min = G4X_P_SDVO_MIN, .max = G4X_P_SDVO_MAX }, 422 .p = { .min = G4X_P_SDVO_MIN, .max = G4X_P_SDVO_MAX },
423 .p1 = { .min = G4X_P1_SDVO_MIN, .max = G4X_P1_SDVO_MAX}, 423 .p1 = { .min = G4X_P1_SDVO_MIN, .max = G4X_P1_SDVO_MAX},
424 .p2 = { .dot_limit = G4X_P2_SDVO_LIMIT, 424 .p2 = { .dot_limit = G4X_P2_SDVO_LIMIT,
425 .p2_slow = G4X_P2_SDVO_SLOW, 425 .p2_slow = G4X_P2_SDVO_SLOW,
426 .p2_fast = G4X_P2_SDVO_FAST 426 .p2_fast = G4X_P2_SDVO_FAST
427 }, 427 },
428 .find_pll = intel_g4x_find_best_PLL, 428 .find_pll = intel_g4x_find_best_PLL,
429 }; 429 };
430 430
431 static const intel_limit_t intel_limits_g4x_hdmi = { 431 static const intel_limit_t intel_limits_g4x_hdmi = {
432 .dot = { .min = G4X_DOT_HDMI_DAC_MIN, .max = G4X_DOT_HDMI_DAC_MAX }, 432 .dot = { .min = G4X_DOT_HDMI_DAC_MIN, .max = G4X_DOT_HDMI_DAC_MAX },
433 .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX}, 433 .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX},
434 .n = { .min = G4X_N_HDMI_DAC_MIN, .max = G4X_N_HDMI_DAC_MAX }, 434 .n = { .min = G4X_N_HDMI_DAC_MIN, .max = G4X_N_HDMI_DAC_MAX },
435 .m = { .min = G4X_M_HDMI_DAC_MIN, .max = G4X_M_HDMI_DAC_MAX }, 435 .m = { .min = G4X_M_HDMI_DAC_MIN, .max = G4X_M_HDMI_DAC_MAX },
436 .m1 = { .min = G4X_M1_HDMI_DAC_MIN, .max = G4X_M1_HDMI_DAC_MAX }, 436 .m1 = { .min = G4X_M1_HDMI_DAC_MIN, .max = G4X_M1_HDMI_DAC_MAX },
437 .m2 = { .min = G4X_M2_HDMI_DAC_MIN, .max = G4X_M2_HDMI_DAC_MAX }, 437 .m2 = { .min = G4X_M2_HDMI_DAC_MIN, .max = G4X_M2_HDMI_DAC_MAX },
438 .p = { .min = G4X_P_HDMI_DAC_MIN, .max = G4X_P_HDMI_DAC_MAX }, 438 .p = { .min = G4X_P_HDMI_DAC_MIN, .max = G4X_P_HDMI_DAC_MAX },
439 .p1 = { .min = G4X_P1_HDMI_DAC_MIN, .max = G4X_P1_HDMI_DAC_MAX}, 439 .p1 = { .min = G4X_P1_HDMI_DAC_MIN, .max = G4X_P1_HDMI_DAC_MAX},
440 .p2 = { .dot_limit = G4X_P2_HDMI_DAC_LIMIT, 440 .p2 = { .dot_limit = G4X_P2_HDMI_DAC_LIMIT,
441 .p2_slow = G4X_P2_HDMI_DAC_SLOW, 441 .p2_slow = G4X_P2_HDMI_DAC_SLOW,
442 .p2_fast = G4X_P2_HDMI_DAC_FAST 442 .p2_fast = G4X_P2_HDMI_DAC_FAST
443 }, 443 },
444 .find_pll = intel_g4x_find_best_PLL, 444 .find_pll = intel_g4x_find_best_PLL,
445 }; 445 };
446 446
447 static const intel_limit_t intel_limits_g4x_single_channel_lvds = { 447 static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
448 .dot = { .min = G4X_DOT_SINGLE_CHANNEL_LVDS_MIN, 448 .dot = { .min = G4X_DOT_SINGLE_CHANNEL_LVDS_MIN,
449 .max = G4X_DOT_SINGLE_CHANNEL_LVDS_MAX }, 449 .max = G4X_DOT_SINGLE_CHANNEL_LVDS_MAX },
450 .vco = { .min = G4X_VCO_MIN, 450 .vco = { .min = G4X_VCO_MIN,
451 .max = G4X_VCO_MAX }, 451 .max = G4X_VCO_MAX },
452 .n = { .min = G4X_N_SINGLE_CHANNEL_LVDS_MIN, 452 .n = { .min = G4X_N_SINGLE_CHANNEL_LVDS_MIN,
453 .max = G4X_N_SINGLE_CHANNEL_LVDS_MAX }, 453 .max = G4X_N_SINGLE_CHANNEL_LVDS_MAX },
454 .m = { .min = G4X_M_SINGLE_CHANNEL_LVDS_MIN, 454 .m = { .min = G4X_M_SINGLE_CHANNEL_LVDS_MIN,
455 .max = G4X_M_SINGLE_CHANNEL_LVDS_MAX }, 455 .max = G4X_M_SINGLE_CHANNEL_LVDS_MAX },
456 .m1 = { .min = G4X_M1_SINGLE_CHANNEL_LVDS_MIN, 456 .m1 = { .min = G4X_M1_SINGLE_CHANNEL_LVDS_MIN,
457 .max = G4X_M1_SINGLE_CHANNEL_LVDS_MAX }, 457 .max = G4X_M1_SINGLE_CHANNEL_LVDS_MAX },
458 .m2 = { .min = G4X_M2_SINGLE_CHANNEL_LVDS_MIN, 458 .m2 = { .min = G4X_M2_SINGLE_CHANNEL_LVDS_MIN,
459 .max = G4X_M2_SINGLE_CHANNEL_LVDS_MAX }, 459 .max = G4X_M2_SINGLE_CHANNEL_LVDS_MAX },
460 .p = { .min = G4X_P_SINGLE_CHANNEL_LVDS_MIN, 460 .p = { .min = G4X_P_SINGLE_CHANNEL_LVDS_MIN,
461 .max = G4X_P_SINGLE_CHANNEL_LVDS_MAX }, 461 .max = G4X_P_SINGLE_CHANNEL_LVDS_MAX },
462 .p1 = { .min = G4X_P1_SINGLE_CHANNEL_LVDS_MIN, 462 .p1 = { .min = G4X_P1_SINGLE_CHANNEL_LVDS_MIN,
463 .max = G4X_P1_SINGLE_CHANNEL_LVDS_MAX }, 463 .max = G4X_P1_SINGLE_CHANNEL_LVDS_MAX },
464 .p2 = { .dot_limit = G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT, 464 .p2 = { .dot_limit = G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT,
465 .p2_slow = G4X_P2_SINGLE_CHANNEL_LVDS_SLOW, 465 .p2_slow = G4X_P2_SINGLE_CHANNEL_LVDS_SLOW,
466 .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST 466 .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST
467 }, 467 },
468 .find_pll = intel_g4x_find_best_PLL, 468 .find_pll = intel_g4x_find_best_PLL,
469 }; 469 };
470 470
471 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { 471 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
472 .dot = { .min = G4X_DOT_DUAL_CHANNEL_LVDS_MIN, 472 .dot = { .min = G4X_DOT_DUAL_CHANNEL_LVDS_MIN,
473 .max = G4X_DOT_DUAL_CHANNEL_LVDS_MAX }, 473 .max = G4X_DOT_DUAL_CHANNEL_LVDS_MAX },
474 .vco = { .min = G4X_VCO_MIN, 474 .vco = { .min = G4X_VCO_MIN,
475 .max = G4X_VCO_MAX }, 475 .max = G4X_VCO_MAX },
476 .n = { .min = G4X_N_DUAL_CHANNEL_LVDS_MIN, 476 .n = { .min = G4X_N_DUAL_CHANNEL_LVDS_MIN,
477 .max = G4X_N_DUAL_CHANNEL_LVDS_MAX }, 477 .max = G4X_N_DUAL_CHANNEL_LVDS_MAX },
478 .m = { .min = G4X_M_DUAL_CHANNEL_LVDS_MIN, 478 .m = { .min = G4X_M_DUAL_CHANNEL_LVDS_MIN,
479 .max = G4X_M_DUAL_CHANNEL_LVDS_MAX }, 479 .max = G4X_M_DUAL_CHANNEL_LVDS_MAX },
480 .m1 = { .min = G4X_M1_DUAL_CHANNEL_LVDS_MIN, 480 .m1 = { .min = G4X_M1_DUAL_CHANNEL_LVDS_MIN,
481 .max = G4X_M1_DUAL_CHANNEL_LVDS_MAX }, 481 .max = G4X_M1_DUAL_CHANNEL_LVDS_MAX },
482 .m2 = { .min = G4X_M2_DUAL_CHANNEL_LVDS_MIN, 482 .m2 = { .min = G4X_M2_DUAL_CHANNEL_LVDS_MIN,
483 .max = G4X_M2_DUAL_CHANNEL_LVDS_MAX }, 483 .max = G4X_M2_DUAL_CHANNEL_LVDS_MAX },
484 .p = { .min = G4X_P_DUAL_CHANNEL_LVDS_MIN, 484 .p = { .min = G4X_P_DUAL_CHANNEL_LVDS_MIN,
485 .max = G4X_P_DUAL_CHANNEL_LVDS_MAX }, 485 .max = G4X_P_DUAL_CHANNEL_LVDS_MAX },
486 .p1 = { .min = G4X_P1_DUAL_CHANNEL_LVDS_MIN, 486 .p1 = { .min = G4X_P1_DUAL_CHANNEL_LVDS_MIN,
487 .max = G4X_P1_DUAL_CHANNEL_LVDS_MAX }, 487 .max = G4X_P1_DUAL_CHANNEL_LVDS_MAX },
488 .p2 = { .dot_limit = G4X_P2_DUAL_CHANNEL_LVDS_LIMIT, 488 .p2 = { .dot_limit = G4X_P2_DUAL_CHANNEL_LVDS_LIMIT,
489 .p2_slow = G4X_P2_DUAL_CHANNEL_LVDS_SLOW, 489 .p2_slow = G4X_P2_DUAL_CHANNEL_LVDS_SLOW,
490 .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST 490 .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST
491 }, 491 },
492 .find_pll = intel_g4x_find_best_PLL, 492 .find_pll = intel_g4x_find_best_PLL,
493 }; 493 };
494 494
495 static const intel_limit_t intel_limits_g4x_display_port = { 495 static const intel_limit_t intel_limits_g4x_display_port = {
496 .dot = { .min = G4X_DOT_DISPLAY_PORT_MIN, 496 .dot = { .min = G4X_DOT_DISPLAY_PORT_MIN,
497 .max = G4X_DOT_DISPLAY_PORT_MAX }, 497 .max = G4X_DOT_DISPLAY_PORT_MAX },
498 .vco = { .min = G4X_VCO_MIN, 498 .vco = { .min = G4X_VCO_MIN,
499 .max = G4X_VCO_MAX}, 499 .max = G4X_VCO_MAX},
500 .n = { .min = G4X_N_DISPLAY_PORT_MIN, 500 .n = { .min = G4X_N_DISPLAY_PORT_MIN,
501 .max = G4X_N_DISPLAY_PORT_MAX }, 501 .max = G4X_N_DISPLAY_PORT_MAX },
502 .m = { .min = G4X_M_DISPLAY_PORT_MIN, 502 .m = { .min = G4X_M_DISPLAY_PORT_MIN,
503 .max = G4X_M_DISPLAY_PORT_MAX }, 503 .max = G4X_M_DISPLAY_PORT_MAX },
504 .m1 = { .min = G4X_M1_DISPLAY_PORT_MIN, 504 .m1 = { .min = G4X_M1_DISPLAY_PORT_MIN,
505 .max = G4X_M1_DISPLAY_PORT_MAX }, 505 .max = G4X_M1_DISPLAY_PORT_MAX },
506 .m2 = { .min = G4X_M2_DISPLAY_PORT_MIN, 506 .m2 = { .min = G4X_M2_DISPLAY_PORT_MIN,
507 .max = G4X_M2_DISPLAY_PORT_MAX }, 507 .max = G4X_M2_DISPLAY_PORT_MAX },
508 .p = { .min = G4X_P_DISPLAY_PORT_MIN, 508 .p = { .min = G4X_P_DISPLAY_PORT_MIN,
509 .max = G4X_P_DISPLAY_PORT_MAX }, 509 .max = G4X_P_DISPLAY_PORT_MAX },
510 .p1 = { .min = G4X_P1_DISPLAY_PORT_MIN, 510 .p1 = { .min = G4X_P1_DISPLAY_PORT_MIN,
511 .max = G4X_P1_DISPLAY_PORT_MAX}, 511 .max = G4X_P1_DISPLAY_PORT_MAX},
512 .p2 = { .dot_limit = G4X_P2_DISPLAY_PORT_LIMIT, 512 .p2 = { .dot_limit = G4X_P2_DISPLAY_PORT_LIMIT,
513 .p2_slow = G4X_P2_DISPLAY_PORT_SLOW, 513 .p2_slow = G4X_P2_DISPLAY_PORT_SLOW,
514 .p2_fast = G4X_P2_DISPLAY_PORT_FAST }, 514 .p2_fast = G4X_P2_DISPLAY_PORT_FAST },
515 .find_pll = intel_find_pll_g4x_dp, 515 .find_pll = intel_find_pll_g4x_dp,
516 }; 516 };
517 517
518 static const intel_limit_t intel_limits_pineview_sdvo = { 518 static const intel_limit_t intel_limits_pineview_sdvo = {
519 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX}, 519 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
520 .vco = { .min = PINEVIEW_VCO_MIN, .max = PINEVIEW_VCO_MAX }, 520 .vco = { .min = PINEVIEW_VCO_MIN, .max = PINEVIEW_VCO_MAX },
521 .n = { .min = PINEVIEW_N_MIN, .max = PINEVIEW_N_MAX }, 521 .n = { .min = PINEVIEW_N_MIN, .max = PINEVIEW_N_MAX },
522 .m = { .min = PINEVIEW_M_MIN, .max = PINEVIEW_M_MAX }, 522 .m = { .min = PINEVIEW_M_MIN, .max = PINEVIEW_M_MAX },
523 .m1 = { .min = PINEVIEW_M1_MIN, .max = PINEVIEW_M1_MAX }, 523 .m1 = { .min = PINEVIEW_M1_MIN, .max = PINEVIEW_M1_MAX },
524 .m2 = { .min = PINEVIEW_M2_MIN, .max = PINEVIEW_M2_MAX }, 524 .m2 = { .min = PINEVIEW_M2_MIN, .max = PINEVIEW_M2_MAX },
525 .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX }, 525 .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX },
526 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, 526 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
527 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, 527 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
528 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, 528 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
529 .find_pll = intel_find_best_PLL, 529 .find_pll = intel_find_best_PLL,
530 }; 530 };
531 531
532 static const intel_limit_t intel_limits_pineview_lvds = { 532 static const intel_limit_t intel_limits_pineview_lvds = {
533 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, 533 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
534 .vco = { .min = PINEVIEW_VCO_MIN, .max = PINEVIEW_VCO_MAX }, 534 .vco = { .min = PINEVIEW_VCO_MIN, .max = PINEVIEW_VCO_MAX },
535 .n = { .min = PINEVIEW_N_MIN, .max = PINEVIEW_N_MAX }, 535 .n = { .min = PINEVIEW_N_MIN, .max = PINEVIEW_N_MAX },
536 .m = { .min = PINEVIEW_M_MIN, .max = PINEVIEW_M_MAX }, 536 .m = { .min = PINEVIEW_M_MIN, .max = PINEVIEW_M_MAX },
537 .m1 = { .min = PINEVIEW_M1_MIN, .max = PINEVIEW_M1_MAX }, 537 .m1 = { .min = PINEVIEW_M1_MIN, .max = PINEVIEW_M1_MAX },
538 .m2 = { .min = PINEVIEW_M2_MIN, .max = PINEVIEW_M2_MAX }, 538 .m2 = { .min = PINEVIEW_M2_MIN, .max = PINEVIEW_M2_MAX },
539 .p = { .min = PINEVIEW_P_LVDS_MIN, .max = PINEVIEW_P_LVDS_MAX }, 539 .p = { .min = PINEVIEW_P_LVDS_MIN, .max = PINEVIEW_P_LVDS_MAX },
540 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, 540 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
541 /* Pineview only supports single-channel mode. */ 541 /* Pineview only supports single-channel mode. */
542 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, 542 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
543 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, 543 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
544 .find_pll = intel_find_best_PLL, 544 .find_pll = intel_find_best_PLL,
545 }; 545 };
546 546
547 static const intel_limit_t intel_limits_ironlake_dac = { 547 static const intel_limit_t intel_limits_ironlake_dac = {
548 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, 548 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
549 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, 549 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
550 .n = { .min = IRONLAKE_DAC_N_MIN, .max = IRONLAKE_DAC_N_MAX }, 550 .n = { .min = IRONLAKE_DAC_N_MIN, .max = IRONLAKE_DAC_N_MAX },
551 .m = { .min = IRONLAKE_DAC_M_MIN, .max = IRONLAKE_DAC_M_MAX }, 551 .m = { .min = IRONLAKE_DAC_M_MIN, .max = IRONLAKE_DAC_M_MAX },
552 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, 552 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
553 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, 553 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
554 .p = { .min = IRONLAKE_DAC_P_MIN, .max = IRONLAKE_DAC_P_MAX }, 554 .p = { .min = IRONLAKE_DAC_P_MIN, .max = IRONLAKE_DAC_P_MAX },
555 .p1 = { .min = IRONLAKE_DAC_P1_MIN, .max = IRONLAKE_DAC_P1_MAX }, 555 .p1 = { .min = IRONLAKE_DAC_P1_MIN, .max = IRONLAKE_DAC_P1_MAX },
556 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 556 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
557 .p2_slow = IRONLAKE_DAC_P2_SLOW, 557 .p2_slow = IRONLAKE_DAC_P2_SLOW,
558 .p2_fast = IRONLAKE_DAC_P2_FAST }, 558 .p2_fast = IRONLAKE_DAC_P2_FAST },
559 .find_pll = intel_g4x_find_best_PLL, 559 .find_pll = intel_g4x_find_best_PLL,
560 }; 560 };
561 561
562 static const intel_limit_t intel_limits_ironlake_single_lvds = { 562 static const intel_limit_t intel_limits_ironlake_single_lvds = {
563 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, 563 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
564 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, 564 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
565 .n = { .min = IRONLAKE_LVDS_S_N_MIN, .max = IRONLAKE_LVDS_S_N_MAX }, 565 .n = { .min = IRONLAKE_LVDS_S_N_MIN, .max = IRONLAKE_LVDS_S_N_MAX },
566 .m = { .min = IRONLAKE_LVDS_S_M_MIN, .max = IRONLAKE_LVDS_S_M_MAX }, 566 .m = { .min = IRONLAKE_LVDS_S_M_MIN, .max = IRONLAKE_LVDS_S_M_MAX },
567 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, 567 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
568 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, 568 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
569 .p = { .min = IRONLAKE_LVDS_S_P_MIN, .max = IRONLAKE_LVDS_S_P_MAX }, 569 .p = { .min = IRONLAKE_LVDS_S_P_MIN, .max = IRONLAKE_LVDS_S_P_MAX },
570 .p1 = { .min = IRONLAKE_LVDS_S_P1_MIN, .max = IRONLAKE_LVDS_S_P1_MAX }, 570 .p1 = { .min = IRONLAKE_LVDS_S_P1_MIN, .max = IRONLAKE_LVDS_S_P1_MAX },
571 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 571 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
572 .p2_slow = IRONLAKE_LVDS_S_P2_SLOW, 572 .p2_slow = IRONLAKE_LVDS_S_P2_SLOW,
573 .p2_fast = IRONLAKE_LVDS_S_P2_FAST }, 573 .p2_fast = IRONLAKE_LVDS_S_P2_FAST },
574 .find_pll = intel_g4x_find_best_PLL, 574 .find_pll = intel_g4x_find_best_PLL,
575 }; 575 };
576 576
577 static const intel_limit_t intel_limits_ironlake_dual_lvds = { 577 static const intel_limit_t intel_limits_ironlake_dual_lvds = {
578 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, 578 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
579 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, 579 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
580 .n = { .min = IRONLAKE_LVDS_D_N_MIN, .max = IRONLAKE_LVDS_D_N_MAX }, 580 .n = { .min = IRONLAKE_LVDS_D_N_MIN, .max = IRONLAKE_LVDS_D_N_MAX },
581 .m = { .min = IRONLAKE_LVDS_D_M_MIN, .max = IRONLAKE_LVDS_D_M_MAX }, 581 .m = { .min = IRONLAKE_LVDS_D_M_MIN, .max = IRONLAKE_LVDS_D_M_MAX },
582 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, 582 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
583 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, 583 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
584 .p = { .min = IRONLAKE_LVDS_D_P_MIN, .max = IRONLAKE_LVDS_D_P_MAX }, 584 .p = { .min = IRONLAKE_LVDS_D_P_MIN, .max = IRONLAKE_LVDS_D_P_MAX },
585 .p1 = { .min = IRONLAKE_LVDS_D_P1_MIN, .max = IRONLAKE_LVDS_D_P1_MAX }, 585 .p1 = { .min = IRONLAKE_LVDS_D_P1_MIN, .max = IRONLAKE_LVDS_D_P1_MAX },
586 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 586 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
587 .p2_slow = IRONLAKE_LVDS_D_P2_SLOW, 587 .p2_slow = IRONLAKE_LVDS_D_P2_SLOW,
588 .p2_fast = IRONLAKE_LVDS_D_P2_FAST }, 588 .p2_fast = IRONLAKE_LVDS_D_P2_FAST },
589 .find_pll = intel_g4x_find_best_PLL, 589 .find_pll = intel_g4x_find_best_PLL,
590 }; 590 };
591 591
592 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { 592 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
593 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, 593 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
594 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, 594 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
595 .n = { .min = IRONLAKE_LVDS_S_SSC_N_MIN, .max = IRONLAKE_LVDS_S_SSC_N_MAX }, 595 .n = { .min = IRONLAKE_LVDS_S_SSC_N_MIN, .max = IRONLAKE_LVDS_S_SSC_N_MAX },
596 .m = { .min = IRONLAKE_LVDS_S_SSC_M_MIN, .max = IRONLAKE_LVDS_S_SSC_M_MAX }, 596 .m = { .min = IRONLAKE_LVDS_S_SSC_M_MIN, .max = IRONLAKE_LVDS_S_SSC_M_MAX },
597 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, 597 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
598 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, 598 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
599 .p = { .min = IRONLAKE_LVDS_S_SSC_P_MIN, .max = IRONLAKE_LVDS_S_SSC_P_MAX }, 599 .p = { .min = IRONLAKE_LVDS_S_SSC_P_MIN, .max = IRONLAKE_LVDS_S_SSC_P_MAX },
600 .p1 = { .min = IRONLAKE_LVDS_S_SSC_P1_MIN,.max = IRONLAKE_LVDS_S_SSC_P1_MAX }, 600 .p1 = { .min = IRONLAKE_LVDS_S_SSC_P1_MIN,.max = IRONLAKE_LVDS_S_SSC_P1_MAX },
601 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 601 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
602 .p2_slow = IRONLAKE_LVDS_S_SSC_P2_SLOW, 602 .p2_slow = IRONLAKE_LVDS_S_SSC_P2_SLOW,
603 .p2_fast = IRONLAKE_LVDS_S_SSC_P2_FAST }, 603 .p2_fast = IRONLAKE_LVDS_S_SSC_P2_FAST },
604 .find_pll = intel_g4x_find_best_PLL, 604 .find_pll = intel_g4x_find_best_PLL,
605 }; 605 };
606 606
607 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { 607 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
608 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, 608 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
609 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, 609 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
610 .n = { .min = IRONLAKE_LVDS_D_SSC_N_MIN, .max = IRONLAKE_LVDS_D_SSC_N_MAX }, 610 .n = { .min = IRONLAKE_LVDS_D_SSC_N_MIN, .max = IRONLAKE_LVDS_D_SSC_N_MAX },
611 .m = { .min = IRONLAKE_LVDS_D_SSC_M_MIN, .max = IRONLAKE_LVDS_D_SSC_M_MAX }, 611 .m = { .min = IRONLAKE_LVDS_D_SSC_M_MIN, .max = IRONLAKE_LVDS_D_SSC_M_MAX },
612 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, 612 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
613 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, 613 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
614 .p = { .min = IRONLAKE_LVDS_D_SSC_P_MIN, .max = IRONLAKE_LVDS_D_SSC_P_MAX }, 614 .p = { .min = IRONLAKE_LVDS_D_SSC_P_MIN, .max = IRONLAKE_LVDS_D_SSC_P_MAX },
615 .p1 = { .min = IRONLAKE_LVDS_D_SSC_P1_MIN,.max = IRONLAKE_LVDS_D_SSC_P1_MAX }, 615 .p1 = { .min = IRONLAKE_LVDS_D_SSC_P1_MIN,.max = IRONLAKE_LVDS_D_SSC_P1_MAX },
616 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 616 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
617 .p2_slow = IRONLAKE_LVDS_D_SSC_P2_SLOW, 617 .p2_slow = IRONLAKE_LVDS_D_SSC_P2_SLOW,
618 .p2_fast = IRONLAKE_LVDS_D_SSC_P2_FAST }, 618 .p2_fast = IRONLAKE_LVDS_D_SSC_P2_FAST },
619 .find_pll = intel_g4x_find_best_PLL, 619 .find_pll = intel_g4x_find_best_PLL,
620 }; 620 };
621 621
622 static const intel_limit_t intel_limits_ironlake_display_port = { 622 static const intel_limit_t intel_limits_ironlake_display_port = {
623 .dot = { .min = IRONLAKE_DOT_MIN, 623 .dot = { .min = IRONLAKE_DOT_MIN,
624 .max = IRONLAKE_DOT_MAX }, 624 .max = IRONLAKE_DOT_MAX },
625 .vco = { .min = IRONLAKE_VCO_MIN, 625 .vco = { .min = IRONLAKE_VCO_MIN,
626 .max = IRONLAKE_VCO_MAX}, 626 .max = IRONLAKE_VCO_MAX},
627 .n = { .min = IRONLAKE_DP_N_MIN, 627 .n = { .min = IRONLAKE_DP_N_MIN,
628 .max = IRONLAKE_DP_N_MAX }, 628 .max = IRONLAKE_DP_N_MAX },
629 .m = { .min = IRONLAKE_DP_M_MIN, 629 .m = { .min = IRONLAKE_DP_M_MIN,
630 .max = IRONLAKE_DP_M_MAX }, 630 .max = IRONLAKE_DP_M_MAX },
631 .m1 = { .min = IRONLAKE_M1_MIN, 631 .m1 = { .min = IRONLAKE_M1_MIN,
632 .max = IRONLAKE_M1_MAX }, 632 .max = IRONLAKE_M1_MAX },
633 .m2 = { .min = IRONLAKE_M2_MIN, 633 .m2 = { .min = IRONLAKE_M2_MIN,
634 .max = IRONLAKE_M2_MAX }, 634 .max = IRONLAKE_M2_MAX },
635 .p = { .min = IRONLAKE_DP_P_MIN, 635 .p = { .min = IRONLAKE_DP_P_MIN,
636 .max = IRONLAKE_DP_P_MAX }, 636 .max = IRONLAKE_DP_P_MAX },
637 .p1 = { .min = IRONLAKE_DP_P1_MIN, 637 .p1 = { .min = IRONLAKE_DP_P1_MIN,
638 .max = IRONLAKE_DP_P1_MAX}, 638 .max = IRONLAKE_DP_P1_MAX},
639 .p2 = { .dot_limit = IRONLAKE_DP_P2_LIMIT, 639 .p2 = { .dot_limit = IRONLAKE_DP_P2_LIMIT,
640 .p2_slow = IRONLAKE_DP_P2_SLOW, 640 .p2_slow = IRONLAKE_DP_P2_SLOW,
641 .p2_fast = IRONLAKE_DP_P2_FAST }, 641 .p2_fast = IRONLAKE_DP_P2_FAST },
642 .find_pll = intel_find_pll_ironlake_dp, 642 .find_pll = intel_find_pll_ironlake_dp,
643 }; 643 };
644 644
645 static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, 645 static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
646 int refclk) 646 int refclk)
647 { 647 {
648 struct drm_device *dev = crtc->dev; 648 struct drm_device *dev = crtc->dev;
649 struct drm_i915_private *dev_priv = dev->dev_private; 649 struct drm_i915_private *dev_priv = dev->dev_private;
650 const intel_limit_t *limit; 650 const intel_limit_t *limit;
651 651
652 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 652 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
653 if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == 653 if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
654 LVDS_CLKB_POWER_UP) { 654 LVDS_CLKB_POWER_UP) {
655 /* LVDS dual channel */ 655 /* LVDS dual channel */
656 if (refclk == 100000) 656 if (refclk == 100000)
657 limit = &intel_limits_ironlake_dual_lvds_100m; 657 limit = &intel_limits_ironlake_dual_lvds_100m;
658 else 658 else
659 limit = &intel_limits_ironlake_dual_lvds; 659 limit = &intel_limits_ironlake_dual_lvds;
660 } else { 660 } else {
661 if (refclk == 100000) 661 if (refclk == 100000)
662 limit = &intel_limits_ironlake_single_lvds_100m; 662 limit = &intel_limits_ironlake_single_lvds_100m;
663 else 663 else
664 limit = &intel_limits_ironlake_single_lvds; 664 limit = &intel_limits_ironlake_single_lvds;
665 } 665 }
666 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || 666 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
667 HAS_eDP) 667 HAS_eDP)
668 limit = &intel_limits_ironlake_display_port; 668 limit = &intel_limits_ironlake_display_port;
669 else 669 else
670 limit = &intel_limits_ironlake_dac; 670 limit = &intel_limits_ironlake_dac;
671 671
672 return limit; 672 return limit;
673 } 673 }
674 674
675 static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) 675 static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
676 { 676 {
677 struct drm_device *dev = crtc->dev; 677 struct drm_device *dev = crtc->dev;
678 struct drm_i915_private *dev_priv = dev->dev_private; 678 struct drm_i915_private *dev_priv = dev->dev_private;
679 const intel_limit_t *limit; 679 const intel_limit_t *limit;
680 680
681 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 681 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
682 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == 682 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
683 LVDS_CLKB_POWER_UP) 683 LVDS_CLKB_POWER_UP)
684 /* LVDS with dual channel */ 684 /* LVDS with dual channel */
685 limit = &intel_limits_g4x_dual_channel_lvds; 685 limit = &intel_limits_g4x_dual_channel_lvds;
686 else 686 else
687 /* LVDS with dual channel */ 687 /* LVDS with dual channel */
688 limit = &intel_limits_g4x_single_channel_lvds; 688 limit = &intel_limits_g4x_single_channel_lvds;
689 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) || 689 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
690 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) { 690 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
691 limit = &intel_limits_g4x_hdmi; 691 limit = &intel_limits_g4x_hdmi;
692 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) { 692 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
693 limit = &intel_limits_g4x_sdvo; 693 limit = &intel_limits_g4x_sdvo;
694 } else if (intel_pipe_has_type (crtc, INTEL_OUTPUT_DISPLAYPORT)) { 694 } else if (intel_pipe_has_type (crtc, INTEL_OUTPUT_DISPLAYPORT)) {
695 limit = &intel_limits_g4x_display_port; 695 limit = &intel_limits_g4x_display_port;
696 } else /* The option is for other outputs */ 696 } else /* The option is for other outputs */
697 limit = &intel_limits_i9xx_sdvo; 697 limit = &intel_limits_i9xx_sdvo;
698 698
699 return limit; 699 return limit;
700 } 700 }
701 701
702 static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk) 702 static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
703 { 703 {
704 struct drm_device *dev = crtc->dev; 704 struct drm_device *dev = crtc->dev;
705 const intel_limit_t *limit; 705 const intel_limit_t *limit;
706 706
707 if (HAS_PCH_SPLIT(dev)) 707 if (HAS_PCH_SPLIT(dev))
708 limit = intel_ironlake_limit(crtc, refclk); 708 limit = intel_ironlake_limit(crtc, refclk);
709 else if (IS_G4X(dev)) { 709 else if (IS_G4X(dev)) {
710 limit = intel_g4x_limit(crtc); 710 limit = intel_g4x_limit(crtc);
711 } else if (IS_PINEVIEW(dev)) { 711 } else if (IS_PINEVIEW(dev)) {
712 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 712 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
713 limit = &intel_limits_pineview_lvds; 713 limit = &intel_limits_pineview_lvds;
714 else 714 else
715 limit = &intel_limits_pineview_sdvo; 715 limit = &intel_limits_pineview_sdvo;
716 } else if (!IS_GEN2(dev)) { 716 } else if (!IS_GEN2(dev)) {
717 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 717 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
718 limit = &intel_limits_i9xx_lvds; 718 limit = &intel_limits_i9xx_lvds;
719 else 719 else
720 limit = &intel_limits_i9xx_sdvo; 720 limit = &intel_limits_i9xx_sdvo;
721 } else { 721 } else {
722 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 722 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
723 limit = &intel_limits_i8xx_lvds; 723 limit = &intel_limits_i8xx_lvds;
724 else 724 else
725 limit = &intel_limits_i8xx_dvo; 725 limit = &intel_limits_i8xx_dvo;
726 } 726 }
727 return limit; 727 return limit;
728 } 728 }
729 729
730 /* m1 is reserved as 0 in Pineview, n is a ring counter */ 730 /* m1 is reserved as 0 in Pineview, n is a ring counter */
731 static void pineview_clock(int refclk, intel_clock_t *clock) 731 static void pineview_clock(int refclk, intel_clock_t *clock)
732 { 732 {
733 clock->m = clock->m2 + 2; 733 clock->m = clock->m2 + 2;
734 clock->p = clock->p1 * clock->p2; 734 clock->p = clock->p1 * clock->p2;
735 clock->vco = refclk * clock->m / clock->n; 735 clock->vco = refclk * clock->m / clock->n;
736 clock->dot = clock->vco / clock->p; 736 clock->dot = clock->vco / clock->p;
737 } 737 }
738 738
739 static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock) 739 static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
740 { 740 {
741 if (IS_PINEVIEW(dev)) { 741 if (IS_PINEVIEW(dev)) {
742 pineview_clock(refclk, clock); 742 pineview_clock(refclk, clock);
743 return; 743 return;
744 } 744 }
745 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); 745 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
746 clock->p = clock->p1 * clock->p2; 746 clock->p = clock->p1 * clock->p2;
747 clock->vco = refclk * clock->m / (clock->n + 2); 747 clock->vco = refclk * clock->m / (clock->n + 2);
748 clock->dot = clock->vco / clock->p; 748 clock->dot = clock->vco / clock->p;
749 } 749 }
750 750
751 /** 751 /**
752 * Returns whether any output on the specified pipe is of the specified type 752 * Returns whether any output on the specified pipe is of the specified type
753 */ 753 */
754 bool intel_pipe_has_type(struct drm_crtc *crtc, int type) 754 bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
755 { 755 {
756 struct drm_device *dev = crtc->dev; 756 struct drm_device *dev = crtc->dev;
757 struct drm_mode_config *mode_config = &dev->mode_config; 757 struct drm_mode_config *mode_config = &dev->mode_config;
758 struct intel_encoder *encoder; 758 struct intel_encoder *encoder;
759 759
760 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) 760 list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
761 if (encoder->base.crtc == crtc && encoder->type == type) 761 if (encoder->base.crtc == crtc && encoder->type == type)
762 return true; 762 return true;
763 763
764 return false; 764 return false;
765 } 765 }
766 766
767 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) 767 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
768 /** 768 /**
769 * Returns whether the given set of divisors are valid for a given refclk with 769 * Returns whether the given set of divisors are valid for a given refclk with
770 * the given connectors. 770 * the given connectors.
771 */ 771 */
772 772
773 static bool intel_PLL_is_valid(struct drm_device *dev, 773 static bool intel_PLL_is_valid(struct drm_device *dev,
774 const intel_limit_t *limit, 774 const intel_limit_t *limit,
775 const intel_clock_t *clock) 775 const intel_clock_t *clock)
776 { 776 {
777 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 777 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
778 INTELPllInvalid ("p1 out of range\n"); 778 INTELPllInvalid ("p1 out of range\n");
779 if (clock->p < limit->p.min || limit->p.max < clock->p) 779 if (clock->p < limit->p.min || limit->p.max < clock->p)
780 INTELPllInvalid ("p out of range\n"); 780 INTELPllInvalid ("p out of range\n");
781 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) 781 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
782 INTELPllInvalid ("m2 out of range\n"); 782 INTELPllInvalid ("m2 out of range\n");
783 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 783 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
784 INTELPllInvalid ("m1 out of range\n"); 784 INTELPllInvalid ("m1 out of range\n");
785 if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev)) 785 if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
786 INTELPllInvalid ("m1 <= m2\n"); 786 INTELPllInvalid ("m1 <= m2\n");
787 if (clock->m < limit->m.min || limit->m.max < clock->m) 787 if (clock->m < limit->m.min || limit->m.max < clock->m)
788 INTELPllInvalid ("m out of range\n"); 788 INTELPllInvalid ("m out of range\n");
789 if (clock->n < limit->n.min || limit->n.max < clock->n) 789 if (clock->n < limit->n.min || limit->n.max < clock->n)
790 INTELPllInvalid ("n out of range\n"); 790 INTELPllInvalid ("n out of range\n");
791 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) 791 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
792 INTELPllInvalid ("vco out of range\n"); 792 INTELPllInvalid ("vco out of range\n");
793 /* XXX: We may need to be checking "Dot clock" depending on the multiplier, 793 /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
794 * connector, etc., rather than just a single range. 794 * connector, etc., rather than just a single range.
795 */ 795 */
796 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) 796 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
797 INTELPllInvalid ("dot out of range\n"); 797 INTELPllInvalid ("dot out of range\n");
798 798
799 return true; 799 return true;
800 } 800 }
801 801
802 static bool 802 static bool
803 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 803 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
804 int target, int refclk, intel_clock_t *best_clock) 804 int target, int refclk, intel_clock_t *best_clock)
805 805
806 { 806 {
807 struct drm_device *dev = crtc->dev; 807 struct drm_device *dev = crtc->dev;
808 struct drm_i915_private *dev_priv = dev->dev_private; 808 struct drm_i915_private *dev_priv = dev->dev_private;
809 intel_clock_t clock; 809 intel_clock_t clock;
810 int err = target; 810 int err = target;
811 811
812 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 812 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
813 (I915_READ(LVDS)) != 0) { 813 (I915_READ(LVDS)) != 0) {
814 /* 814 /*
815 * For LVDS, if the panel is on, just rely on its current 815 * For LVDS, if the panel is on, just rely on its current
816 * settings for dual-channel. We haven't figured out how to 816 * settings for dual-channel. We haven't figured out how to
817 * reliably set up different single/dual channel state, if we 817 * reliably set up different single/dual channel state, if we
818 * even can. 818 * even can.
819 */ 819 */
820 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == 820 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
821 LVDS_CLKB_POWER_UP) 821 LVDS_CLKB_POWER_UP)
822 clock.p2 = limit->p2.p2_fast; 822 clock.p2 = limit->p2.p2_fast;
823 else 823 else
824 clock.p2 = limit->p2.p2_slow; 824 clock.p2 = limit->p2.p2_slow;
825 } else { 825 } else {
826 if (target < limit->p2.dot_limit) 826 if (target < limit->p2.dot_limit)
827 clock.p2 = limit->p2.p2_slow; 827 clock.p2 = limit->p2.p2_slow;
828 else 828 else
829 clock.p2 = limit->p2.p2_fast; 829 clock.p2 = limit->p2.p2_fast;
830 } 830 }
831 831
832 memset (best_clock, 0, sizeof (*best_clock)); 832 memset (best_clock, 0, sizeof (*best_clock));
833 833
834 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 834 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
835 clock.m1++) { 835 clock.m1++) {
836 for (clock.m2 = limit->m2.min; 836 for (clock.m2 = limit->m2.min;
837 clock.m2 <= limit->m2.max; clock.m2++) { 837 clock.m2 <= limit->m2.max; clock.m2++) {
838 /* m1 is always 0 in Pineview */ 838 /* m1 is always 0 in Pineview */
839 if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev)) 839 if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
840 break; 840 break;
841 for (clock.n = limit->n.min; 841 for (clock.n = limit->n.min;
842 clock.n <= limit->n.max; clock.n++) { 842 clock.n <= limit->n.max; clock.n++) {
843 for (clock.p1 = limit->p1.min; 843 for (clock.p1 = limit->p1.min;
844 clock.p1 <= limit->p1.max; clock.p1++) { 844 clock.p1 <= limit->p1.max; clock.p1++) {
845 int this_err; 845 int this_err;
846 846
847 intel_clock(dev, refclk, &clock); 847 intel_clock(dev, refclk, &clock);
848 if (!intel_PLL_is_valid(dev, limit, 848 if (!intel_PLL_is_valid(dev, limit,
849 &clock)) 849 &clock))
850 continue; 850 continue;
851 851
852 this_err = abs(clock.dot - target); 852 this_err = abs(clock.dot - target);
853 if (this_err < err) { 853 if (this_err < err) {
854 *best_clock = clock; 854 *best_clock = clock;
855 err = this_err; 855 err = this_err;
856 } 856 }
857 } 857 }
858 } 858 }
859 } 859 }
860 } 860 }
861 861
862 return (err != target); 862 return (err != target);
863 } 863 }
864 864
865 static bool 865 static bool
866 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 866 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
867 int target, int refclk, intel_clock_t *best_clock) 867 int target, int refclk, intel_clock_t *best_clock)
868 { 868 {
869 struct drm_device *dev = crtc->dev; 869 struct drm_device *dev = crtc->dev;
870 struct drm_i915_private *dev_priv = dev->dev_private; 870 struct drm_i915_private *dev_priv = dev->dev_private;
871 intel_clock_t clock; 871 intel_clock_t clock;
872 int max_n; 872 int max_n;
873 bool found; 873 bool found;
874 /* approximately equals target * 0.00585 */ 874 /* approximately equals target * 0.00585 */
875 int err_most = (target >> 8) + (target >> 9); 875 int err_most = (target >> 8) + (target >> 9);
876 found = false; 876 found = false;
877 877
878 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 878 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
879 int lvds_reg; 879 int lvds_reg;
880 880
881 if (HAS_PCH_SPLIT(dev)) 881 if (HAS_PCH_SPLIT(dev))
882 lvds_reg = PCH_LVDS; 882 lvds_reg = PCH_LVDS;
883 else 883 else
884 lvds_reg = LVDS; 884 lvds_reg = LVDS;
885 if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) == 885 if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
886 LVDS_CLKB_POWER_UP) 886 LVDS_CLKB_POWER_UP)
887 clock.p2 = limit->p2.p2_fast; 887 clock.p2 = limit->p2.p2_fast;
888 else 888 else
889 clock.p2 = limit->p2.p2_slow; 889 clock.p2 = limit->p2.p2_slow;
890 } else { 890 } else {
891 if (target < limit->p2.dot_limit) 891 if (target < limit->p2.dot_limit)
892 clock.p2 = limit->p2.p2_slow; 892 clock.p2 = limit->p2.p2_slow;
893 else 893 else
894 clock.p2 = limit->p2.p2_fast; 894 clock.p2 = limit->p2.p2_fast;
895 } 895 }
896 896
897 memset(best_clock, 0, sizeof(*best_clock)); 897 memset(best_clock, 0, sizeof(*best_clock));
898 max_n = limit->n.max; 898 max_n = limit->n.max;
899 /* based on hardware requirement, prefer smaller n to precision */ 899 /* based on hardware requirement, prefer smaller n to precision */
900 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 900 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
901 /* based on hardware requirement, prefere larger m1,m2 */ 901 /* based on hardware requirement, prefere larger m1,m2 */
902 for (clock.m1 = limit->m1.max; 902 for (clock.m1 = limit->m1.max;
903 clock.m1 >= limit->m1.min; clock.m1--) { 903 clock.m1 >= limit->m1.min; clock.m1--) {
904 for (clock.m2 = limit->m2.max; 904 for (clock.m2 = limit->m2.max;
905 clock.m2 >= limit->m2.min; clock.m2--) { 905 clock.m2 >= limit->m2.min; clock.m2--) {
906 for (clock.p1 = limit->p1.max; 906 for (clock.p1 = limit->p1.max;
907 clock.p1 >= limit->p1.min; clock.p1--) { 907 clock.p1 >= limit->p1.min; clock.p1--) {
908 int this_err; 908 int this_err;
909 909
910 intel_clock(dev, refclk, &clock); 910 intel_clock(dev, refclk, &clock);
911 if (!intel_PLL_is_valid(dev, limit, 911 if (!intel_PLL_is_valid(dev, limit,
912 &clock)) 912 &clock))
913 continue; 913 continue;
914 914
915 this_err = abs(clock.dot - target); 915 this_err = abs(clock.dot - target);
916 if (this_err < err_most) { 916 if (this_err < err_most) {
917 *best_clock = clock; 917 *best_clock = clock;
918 err_most = this_err; 918 err_most = this_err;
919 max_n = clock.n; 919 max_n = clock.n;
920 found = true; 920 found = true;
921 } 921 }
922 } 922 }
923 } 923 }
924 } 924 }
925 } 925 }
926 return found; 926 return found;
927 } 927 }
928 928
929 static bool 929 static bool
930 intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc, 930 intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
931 int target, int refclk, intel_clock_t *best_clock) 931 int target, int refclk, intel_clock_t *best_clock)
932 { 932 {
933 struct drm_device *dev = crtc->dev; 933 struct drm_device *dev = crtc->dev;
934 intel_clock_t clock; 934 intel_clock_t clock;
935 935
936 if (target < 200000) { 936 if (target < 200000) {
937 clock.n = 1; 937 clock.n = 1;
938 clock.p1 = 2; 938 clock.p1 = 2;
939 clock.p2 = 10; 939 clock.p2 = 10;
940 clock.m1 = 12; 940 clock.m1 = 12;
941 clock.m2 = 9; 941 clock.m2 = 9;
942 } else { 942 } else {
943 clock.n = 2; 943 clock.n = 2;
944 clock.p1 = 1; 944 clock.p1 = 1;
945 clock.p2 = 10; 945 clock.p2 = 10;
946 clock.m1 = 14; 946 clock.m1 = 14;
947 clock.m2 = 8; 947 clock.m2 = 8;
948 } 948 }
949 intel_clock(dev, refclk, &clock); 949 intel_clock(dev, refclk, &clock);
950 memcpy(best_clock, &clock, sizeof(intel_clock_t)); 950 memcpy(best_clock, &clock, sizeof(intel_clock_t));
951 return true; 951 return true;
952 } 952 }
953 953
954 /* DisplayPort has only two frequencies, 162MHz and 270MHz */ 954 /* DisplayPort has only two frequencies, 162MHz and 270MHz */
955 static bool 955 static bool
956 intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, 956 intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
957 int target, int refclk, intel_clock_t *best_clock) 957 int target, int refclk, intel_clock_t *best_clock)
958 { 958 {
959 intel_clock_t clock; 959 intel_clock_t clock;
960 if (target < 200000) { 960 if (target < 200000) {
961 clock.p1 = 2; 961 clock.p1 = 2;
962 clock.p2 = 10; 962 clock.p2 = 10;
963 clock.n = 2; 963 clock.n = 2;
964 clock.m1 = 23; 964 clock.m1 = 23;
965 clock.m2 = 8; 965 clock.m2 = 8;
966 } else { 966 } else {
967 clock.p1 = 1; 967 clock.p1 = 1;
968 clock.p2 = 10; 968 clock.p2 = 10;
969 clock.n = 1; 969 clock.n = 1;
970 clock.m1 = 14; 970 clock.m1 = 14;
971 clock.m2 = 2; 971 clock.m2 = 2;
972 } 972 }
973 clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2); 973 clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
974 clock.p = (clock.p1 * clock.p2); 974 clock.p = (clock.p1 * clock.p2);
975 clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p; 975 clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
976 clock.vco = 0; 976 clock.vco = 0;
977 memcpy(best_clock, &clock, sizeof(intel_clock_t)); 977 memcpy(best_clock, &clock, sizeof(intel_clock_t));
978 return true; 978 return true;
979 } 979 }
980 980
981 /** 981 /**
982 * intel_wait_for_vblank - wait for vblank on a given pipe 982 * intel_wait_for_vblank - wait for vblank on a given pipe
983 * @dev: drm device 983 * @dev: drm device
984 * @pipe: pipe to wait for 984 * @pipe: pipe to wait for
985 * 985 *
986 * Wait for vblank to occur on a given pipe. Needed for various bits of 986 * Wait for vblank to occur on a given pipe. Needed for various bits of
987 * mode setting code. 987 * mode setting code.
988 */ 988 */
989 void intel_wait_for_vblank(struct drm_device *dev, int pipe) 989 void intel_wait_for_vblank(struct drm_device *dev, int pipe)
990 { 990 {
991 struct drm_i915_private *dev_priv = dev->dev_private; 991 struct drm_i915_private *dev_priv = dev->dev_private;
992 int pipestat_reg = PIPESTAT(pipe); 992 int pipestat_reg = PIPESTAT(pipe);
993 993
994 /* Clear existing vblank status. Note this will clear any other 994 /* Clear existing vblank status. Note this will clear any other
995 * sticky status fields as well. 995 * sticky status fields as well.
996 * 996 *
997 * This races with i915_driver_irq_handler() with the result 997 * This races with i915_driver_irq_handler() with the result
998 * that either function could miss a vblank event. Here it is not 998 * that either function could miss a vblank event. Here it is not
999 * fatal, as we will either wait upon the next vblank interrupt or 999 * fatal, as we will either wait upon the next vblank interrupt or
1000 * timeout. Generally speaking intel_wait_for_vblank() is only 1000 * timeout. Generally speaking intel_wait_for_vblank() is only
1001 * called during modeset at which time the GPU should be idle and 1001 * called during modeset at which time the GPU should be idle and
1002 * should *not* be performing page flips and thus not waiting on 1002 * should *not* be performing page flips and thus not waiting on
1003 * vblanks... 1003 * vblanks...
1004 * Currently, the result of us stealing a vblank from the irq 1004 * Currently, the result of us stealing a vblank from the irq
1005 * handler is that a single frame will be skipped during swapbuffers. 1005 * handler is that a single frame will be skipped during swapbuffers.
1006 */ 1006 */
1007 I915_WRITE(pipestat_reg, 1007 I915_WRITE(pipestat_reg,
1008 I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS); 1008 I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
1009 1009
1010 /* Wait for vblank interrupt bit to set */ 1010 /* Wait for vblank interrupt bit to set */
1011 if (wait_for(I915_READ(pipestat_reg) & 1011 if (wait_for(I915_READ(pipestat_reg) &
1012 PIPE_VBLANK_INTERRUPT_STATUS, 1012 PIPE_VBLANK_INTERRUPT_STATUS,
1013 50)) 1013 50))
1014 DRM_DEBUG_KMS("vblank wait timed out\n"); 1014 DRM_DEBUG_KMS("vblank wait timed out\n");
1015 } 1015 }
1016 1016
1017 /* 1017 /*
1018 * intel_wait_for_pipe_off - wait for pipe to turn off 1018 * intel_wait_for_pipe_off - wait for pipe to turn off
1019 * @dev: drm device 1019 * @dev: drm device
1020 * @pipe: pipe to wait for 1020 * @pipe: pipe to wait for
1021 * 1021 *
1022 * After disabling a pipe, we can't wait for vblank in the usual way, 1022 * After disabling a pipe, we can't wait for vblank in the usual way,
1023 * spinning on the vblank interrupt status bit, since we won't actually 1023 * spinning on the vblank interrupt status bit, since we won't actually
1024 * see an interrupt when the pipe is disabled. 1024 * see an interrupt when the pipe is disabled.
1025 * 1025 *
1026 * On Gen4 and above: 1026 * On Gen4 and above:
1027 * wait for the pipe register state bit to turn off 1027 * wait for the pipe register state bit to turn off
1028 * 1028 *
1029 * Otherwise: 1029 * Otherwise:
1030 * wait for the display line value to settle (it usually 1030 * wait for the display line value to settle (it usually
1031 * ends up stopping at the start of the next frame). 1031 * ends up stopping at the start of the next frame).
1032 * 1032 *
1033 */ 1033 */
1034 void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) 1034 void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
1035 { 1035 {
1036 struct drm_i915_private *dev_priv = dev->dev_private; 1036 struct drm_i915_private *dev_priv = dev->dev_private;
1037 1037
1038 if (INTEL_INFO(dev)->gen >= 4) { 1038 if (INTEL_INFO(dev)->gen >= 4) {
1039 int reg = PIPECONF(pipe); 1039 int reg = PIPECONF(pipe);
1040 1040
1041 /* Wait for the Pipe State to go off */ 1041 /* Wait for the Pipe State to go off */
1042 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 1042 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
1043 100)) 1043 100))
1044 DRM_DEBUG_KMS("pipe_off wait timed out\n"); 1044 DRM_DEBUG_KMS("pipe_off wait timed out\n");
1045 } else { 1045 } else {
1046 u32 last_line; 1046 u32 last_line;
1047 int reg = PIPEDSL(pipe); 1047 int reg = PIPEDSL(pipe);
1048 unsigned long timeout = jiffies + msecs_to_jiffies(100); 1048 unsigned long timeout = jiffies + msecs_to_jiffies(100);
1049 1049
1050 /* Wait for the display line to settle */ 1050 /* Wait for the display line to settle */
1051 do { 1051 do {
1052 last_line = I915_READ(reg) & DSL_LINEMASK; 1052 last_line = I915_READ(reg) & DSL_LINEMASK;
1053 mdelay(5); 1053 mdelay(5);
1054 } while (((I915_READ(reg) & DSL_LINEMASK) != last_line) && 1054 } while (((I915_READ(reg) & DSL_LINEMASK) != last_line) &&
1055 time_after(timeout, jiffies)); 1055 time_after(timeout, jiffies));
1056 if (time_after(jiffies, timeout)) 1056 if (time_after(jiffies, timeout))
1057 DRM_DEBUG_KMS("pipe_off wait timed out\n"); 1057 DRM_DEBUG_KMS("pipe_off wait timed out\n");
1058 } 1058 }
1059 } 1059 }
1060 1060
1061 static const char *state_string(bool enabled) 1061 static const char *state_string(bool enabled)
1062 { 1062 {
1063 return enabled ? "on" : "off"; 1063 return enabled ? "on" : "off";
1064 } 1064 }
1065 1065
1066 /* Only for pre-ILK configs */ 1066 /* Only for pre-ILK configs */
1067 static void assert_pll(struct drm_i915_private *dev_priv, 1067 static void assert_pll(struct drm_i915_private *dev_priv,
1068 enum pipe pipe, bool state) 1068 enum pipe pipe, bool state)
1069 { 1069 {
1070 int reg; 1070 int reg;
1071 u32 val; 1071 u32 val;
1072 bool cur_state; 1072 bool cur_state;
1073 1073
1074 reg = DPLL(pipe); 1074 reg = DPLL(pipe);
1075 val = I915_READ(reg); 1075 val = I915_READ(reg);
1076 cur_state = !!(val & DPLL_VCO_ENABLE); 1076 cur_state = !!(val & DPLL_VCO_ENABLE);
1077 WARN(cur_state != state, 1077 WARN(cur_state != state,
1078 "PLL state assertion failure (expected %s, current %s)\n", 1078 "PLL state assertion failure (expected %s, current %s)\n",
1079 state_string(state), state_string(cur_state)); 1079 state_string(state), state_string(cur_state));
1080 } 1080 }
1081 #define assert_pll_enabled(d, p) assert_pll(d, p, true) 1081 #define assert_pll_enabled(d, p) assert_pll(d, p, true)
1082 #define assert_pll_disabled(d, p) assert_pll(d, p, false) 1082 #define assert_pll_disabled(d, p) assert_pll(d, p, false)
1083 1083
1084 /* For ILK+ */ 1084 /* For ILK+ */
1085 static void assert_pch_pll(struct drm_i915_private *dev_priv, 1085 static void assert_pch_pll(struct drm_i915_private *dev_priv,
1086 enum pipe pipe, bool state) 1086 enum pipe pipe, bool state)
1087 { 1087 {
1088 int reg; 1088 int reg;
1089 u32 val; 1089 u32 val;
1090 bool cur_state; 1090 bool cur_state;
1091 1091
1092 reg = PCH_DPLL(pipe); 1092 reg = PCH_DPLL(pipe);
1093 val = I915_READ(reg); 1093 val = I915_READ(reg);
1094 cur_state = !!(val & DPLL_VCO_ENABLE); 1094 cur_state = !!(val & DPLL_VCO_ENABLE);
1095 WARN(cur_state != state, 1095 WARN(cur_state != state,
1096 "PCH PLL state assertion failure (expected %s, current %s)\n", 1096 "PCH PLL state assertion failure (expected %s, current %s)\n",
1097 state_string(state), state_string(cur_state)); 1097 state_string(state), state_string(cur_state));
1098 } 1098 }
1099 #define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true) 1099 #define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
1100 #define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false) 1100 #define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
1101 1101
1102 static void assert_fdi_tx(struct drm_i915_private *dev_priv, 1102 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1103 enum pipe pipe, bool state) 1103 enum pipe pipe, bool state)
1104 { 1104 {
1105 int reg; 1105 int reg;
1106 u32 val; 1106 u32 val;
1107 bool cur_state; 1107 bool cur_state;
1108 1108
1109 reg = FDI_TX_CTL(pipe); 1109 reg = FDI_TX_CTL(pipe);
1110 val = I915_READ(reg); 1110 val = I915_READ(reg);
1111 cur_state = !!(val & FDI_TX_ENABLE); 1111 cur_state = !!(val & FDI_TX_ENABLE);
1112 WARN(cur_state != state, 1112 WARN(cur_state != state,
1113 "FDI TX state assertion failure (expected %s, current %s)\n", 1113 "FDI TX state assertion failure (expected %s, current %s)\n",
1114 state_string(state), state_string(cur_state)); 1114 state_string(state), state_string(cur_state));
1115 } 1115 }
1116 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) 1116 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1117 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) 1117 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1118 1118
1119 static void assert_fdi_rx(struct drm_i915_private *dev_priv, 1119 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1120 enum pipe pipe, bool state) 1120 enum pipe pipe, bool state)
1121 { 1121 {
1122 int reg; 1122 int reg;
1123 u32 val; 1123 u32 val;
1124 bool cur_state; 1124 bool cur_state;
1125 1125
1126 reg = FDI_RX_CTL(pipe); 1126 reg = FDI_RX_CTL(pipe);
1127 val = I915_READ(reg); 1127 val = I915_READ(reg);
1128 cur_state = !!(val & FDI_RX_ENABLE); 1128 cur_state = !!(val & FDI_RX_ENABLE);
1129 WARN(cur_state != state, 1129 WARN(cur_state != state,
1130 "FDI RX state assertion failure (expected %s, current %s)\n", 1130 "FDI RX state assertion failure (expected %s, current %s)\n",
1131 state_string(state), state_string(cur_state)); 1131 state_string(state), state_string(cur_state));
1132 } 1132 }
1133 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) 1133 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1134 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) 1134 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1135 1135
1136 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, 1136 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1137 enum pipe pipe) 1137 enum pipe pipe)
1138 { 1138 {
1139 int reg; 1139 int reg;
1140 u32 val; 1140 u32 val;
1141 1141
1142 /* ILK FDI PLL is always enabled */ 1142 /* ILK FDI PLL is always enabled */
1143 if (dev_priv->info->gen == 5) 1143 if (dev_priv->info->gen == 5)
1144 return; 1144 return;
1145 1145
1146 reg = FDI_TX_CTL(pipe); 1146 reg = FDI_TX_CTL(pipe);
1147 val = I915_READ(reg); 1147 val = I915_READ(reg);
1148 WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); 1148 WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1149 } 1149 }
1150 1150
1151 static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv, 1151 static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
1152 enum pipe pipe) 1152 enum pipe pipe)
1153 { 1153 {
1154 int reg; 1154 int reg;
1155 u32 val; 1155 u32 val;
1156 1156
1157 reg = FDI_RX_CTL(pipe); 1157 reg = FDI_RX_CTL(pipe);
1158 val = I915_READ(reg); 1158 val = I915_READ(reg);
1159 WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n"); 1159 WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
1160 } 1160 }
1161 1161
1162 static void assert_panel_unlocked(struct drm_i915_private *dev_priv, 1162 static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1163 enum pipe pipe) 1163 enum pipe pipe)
1164 { 1164 {
1165 int pp_reg, lvds_reg; 1165 int pp_reg, lvds_reg;
1166 u32 val; 1166 u32 val;
1167 enum pipe panel_pipe = PIPE_A; 1167 enum pipe panel_pipe = PIPE_A;
1168 bool locked = locked; 1168 bool locked = locked;
1169 1169
1170 if (HAS_PCH_SPLIT(dev_priv->dev)) { 1170 if (HAS_PCH_SPLIT(dev_priv->dev)) {
1171 pp_reg = PCH_PP_CONTROL; 1171 pp_reg = PCH_PP_CONTROL;
1172 lvds_reg = PCH_LVDS; 1172 lvds_reg = PCH_LVDS;
1173 } else { 1173 } else {
1174 pp_reg = PP_CONTROL; 1174 pp_reg = PP_CONTROL;
1175 lvds_reg = LVDS; 1175 lvds_reg = LVDS;
1176 } 1176 }
1177 1177
1178 val = I915_READ(pp_reg); 1178 val = I915_READ(pp_reg);
1179 if (!(val & PANEL_POWER_ON) || 1179 if (!(val & PANEL_POWER_ON) ||
1180 ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)) 1180 ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
1181 locked = false; 1181 locked = false;
1182 1182
1183 if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT) 1183 if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
1184 panel_pipe = PIPE_B; 1184 panel_pipe = PIPE_B;
1185 1185
1186 WARN(panel_pipe == pipe && locked, 1186 WARN(panel_pipe == pipe && locked,
1187 "panel assertion failure, pipe %c regs locked\n", 1187 "panel assertion failure, pipe %c regs locked\n",
1188 pipe_name(pipe)); 1188 pipe_name(pipe));
1189 } 1189 }
1190 1190
1191 static void assert_pipe(struct drm_i915_private *dev_priv, 1191 static void assert_pipe(struct drm_i915_private *dev_priv,
1192 enum pipe pipe, bool state) 1192 enum pipe pipe, bool state)
1193 { 1193 {
1194 int reg; 1194 int reg;
1195 u32 val; 1195 u32 val;
1196 bool cur_state; 1196 bool cur_state;
1197 1197
1198 reg = PIPECONF(pipe); 1198 reg = PIPECONF(pipe);
1199 val = I915_READ(reg); 1199 val = I915_READ(reg);
1200 cur_state = !!(val & PIPECONF_ENABLE); 1200 cur_state = !!(val & PIPECONF_ENABLE);
1201 WARN(cur_state != state, 1201 WARN(cur_state != state,
1202 "pipe %c assertion failure (expected %s, current %s)\n", 1202 "pipe %c assertion failure (expected %s, current %s)\n",
1203 pipe_name(pipe), state_string(state), state_string(cur_state)); 1203 pipe_name(pipe), state_string(state), state_string(cur_state));
1204 } 1204 }
1205 #define assert_pipe_enabled(d, p) assert_pipe(d, p, true) 1205 #define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
1206 #define assert_pipe_disabled(d, p) assert_pipe(d, p, false) 1206 #define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
1207 1207
1208 static void assert_plane_enabled(struct drm_i915_private *dev_priv, 1208 static void assert_plane_enabled(struct drm_i915_private *dev_priv,
1209 enum plane plane) 1209 enum plane plane)
1210 { 1210 {
1211 int reg; 1211 int reg;
1212 u32 val; 1212 u32 val;
1213 1213
1214 reg = DSPCNTR(plane); 1214 reg = DSPCNTR(plane);
1215 val = I915_READ(reg); 1215 val = I915_READ(reg);
1216 WARN(!(val & DISPLAY_PLANE_ENABLE), 1216 WARN(!(val & DISPLAY_PLANE_ENABLE),
1217 "plane %c assertion failure, should be active but is disabled\n", 1217 "plane %c assertion failure, should be active but is disabled\n",
1218 plane_name(plane)); 1218 plane_name(plane));
1219 } 1219 }
1220 1220
1221 static void assert_planes_disabled(struct drm_i915_private *dev_priv, 1221 static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1222 enum pipe pipe) 1222 enum pipe pipe)
1223 { 1223 {
1224 int reg, i; 1224 int reg, i;
1225 u32 val; 1225 u32 val;
1226 int cur_pipe; 1226 int cur_pipe;
1227 1227
1228 /* Planes are fixed to pipes on ILK+ */ 1228 /* Planes are fixed to pipes on ILK+ */
1229 if (HAS_PCH_SPLIT(dev_priv->dev)) 1229 if (HAS_PCH_SPLIT(dev_priv->dev))
1230 return; 1230 return;
1231 1231
1232 /* Need to check both planes against the pipe */ 1232 /* Need to check both planes against the pipe */
1233 for (i = 0; i < 2; i++) { 1233 for (i = 0; i < 2; i++) {
1234 reg = DSPCNTR(i); 1234 reg = DSPCNTR(i);
1235 val = I915_READ(reg); 1235 val = I915_READ(reg);
1236 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> 1236 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1237 DISPPLANE_SEL_PIPE_SHIFT; 1237 DISPPLANE_SEL_PIPE_SHIFT;
1238 WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe, 1238 WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1239 "plane %c assertion failure, should be off on pipe %c but is still active\n", 1239 "plane %c assertion failure, should be off on pipe %c but is still active\n",
1240 plane_name(i), pipe_name(pipe)); 1240 plane_name(i), pipe_name(pipe));
1241 } 1241 }
1242 } 1242 }
1243 1243
1244 static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) 1244 static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1245 { 1245 {
1246 u32 val; 1246 u32 val;
1247 bool enabled; 1247 bool enabled;
1248 1248
1249 val = I915_READ(PCH_DREF_CONTROL); 1249 val = I915_READ(PCH_DREF_CONTROL);
1250 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | 1250 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1251 DREF_SUPERSPREAD_SOURCE_MASK)); 1251 DREF_SUPERSPREAD_SOURCE_MASK));
1252 WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n"); 1252 WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1253 } 1253 }
1254 1254
1255 static void assert_transcoder_disabled(struct drm_i915_private *dev_priv, 1255 static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
1256 enum pipe pipe) 1256 enum pipe pipe)
1257 { 1257 {
1258 int reg; 1258 int reg;
1259 u32 val; 1259 u32 val;
1260 bool enabled; 1260 bool enabled;
1261 1261
1262 reg = TRANSCONF(pipe); 1262 reg = TRANSCONF(pipe);
1263 val = I915_READ(reg); 1263 val = I915_READ(reg);
1264 enabled = !!(val & TRANS_ENABLE); 1264 enabled = !!(val & TRANS_ENABLE);
1265 WARN(enabled, 1265 WARN(enabled,
1266 "transcoder assertion failed, should be off on pipe %c but is still active\n", 1266 "transcoder assertion failed, should be off on pipe %c but is still active\n",
1267 pipe_name(pipe)); 1267 pipe_name(pipe));
1268 } 1268 }
1269 1269
1270 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, 1270 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1271 enum pipe pipe, int reg) 1271 enum pipe pipe, int reg)
1272 { 1272 {
1273 u32 val = I915_READ(reg); 1273 u32 val = I915_READ(reg);
1274 WARN(DP_PIPE_ENABLED(val, pipe), 1274 WARN(DP_PIPE_ENABLED(val, pipe),
1275 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", 1275 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1276 reg, pipe_name(pipe)); 1276 reg, pipe_name(pipe));
1277 } 1277 }
1278 1278
1279 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, 1279 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1280 enum pipe pipe, int reg) 1280 enum pipe pipe, int reg)
1281 { 1281 {
1282 u32 val = I915_READ(reg); 1282 u32 val = I915_READ(reg);
1283 WARN(HDMI_PIPE_ENABLED(val, pipe), 1283 WARN(HDMI_PIPE_ENABLED(val, pipe),
1284 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", 1284 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1285 reg, pipe_name(pipe)); 1285 reg, pipe_name(pipe));
1286 } 1286 }
1287 1287
1288 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, 1288 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1289 enum pipe pipe) 1289 enum pipe pipe)
1290 { 1290 {
1291 int reg; 1291 int reg;
1292 u32 val; 1292 u32 val;
1293 1293
1294 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B); 1294 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B);
1295 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C); 1295 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C);
1296 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D); 1296 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D);
1297 1297
1298 reg = PCH_ADPA; 1298 reg = PCH_ADPA;
1299 val = I915_READ(reg); 1299 val = I915_READ(reg);
1300 WARN(ADPA_PIPE_ENABLED(val, pipe), 1300 WARN(ADPA_PIPE_ENABLED(val, pipe),
1301 "PCH VGA enabled on transcoder %c, should be disabled\n", 1301 "PCH VGA enabled on transcoder %c, should be disabled\n",
1302 pipe_name(pipe)); 1302 pipe_name(pipe));
1303 1303
1304 reg = PCH_LVDS; 1304 reg = PCH_LVDS;
1305 val = I915_READ(reg); 1305 val = I915_READ(reg);
1306 WARN(LVDS_PIPE_ENABLED(val, pipe), 1306 WARN(LVDS_PIPE_ENABLED(val, pipe),
1307 "PCH LVDS enabled on transcoder %c, should be disabled\n", 1307 "PCH LVDS enabled on transcoder %c, should be disabled\n",
1308 pipe_name(pipe)); 1308 pipe_name(pipe));
1309 1309
1310 assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB); 1310 assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
1311 assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC); 1311 assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
1312 assert_pch_hdmi_disabled(dev_priv, pipe, HDMID); 1312 assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
1313 } 1313 }
1314 1314
1315 /** 1315 /**
1316 * intel_enable_pll - enable a PLL 1316 * intel_enable_pll - enable a PLL
1317 * @dev_priv: i915 private structure 1317 * @dev_priv: i915 private structure
1318 * @pipe: pipe PLL to enable 1318 * @pipe: pipe PLL to enable
1319 * 1319 *
1320 * Enable @pipe's PLL so we can start pumping pixels from a plane. Check to 1320 * Enable @pipe's PLL so we can start pumping pixels from a plane. Check to
1321 * make sure the PLL reg is writable first though, since the panel write 1321 * make sure the PLL reg is writable first though, since the panel write
1322 * protect mechanism may be enabled. 1322 * protect mechanism may be enabled.
1323 * 1323 *
1324 * Note! This is for pre-ILK only. 1324 * Note! This is for pre-ILK only.
1325 */ 1325 */
1326 static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) 1326 static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1327 { 1327 {
1328 int reg; 1328 int reg;
1329 u32 val; 1329 u32 val;
1330 1330
1331 /* No really, not for ILK+ */ 1331 /* No really, not for ILK+ */
1332 BUG_ON(dev_priv->info->gen >= 5); 1332 BUG_ON(dev_priv->info->gen >= 5);
1333 1333
1334 /* PLL is protected by panel, make sure we can write it */ 1334 /* PLL is protected by panel, make sure we can write it */
1335 if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev)) 1335 if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1336 assert_panel_unlocked(dev_priv, pipe); 1336 assert_panel_unlocked(dev_priv, pipe);
1337 1337
1338 reg = DPLL(pipe); 1338 reg = DPLL(pipe);
1339 val = I915_READ(reg); 1339 val = I915_READ(reg);
1340 val |= DPLL_VCO_ENABLE; 1340 val |= DPLL_VCO_ENABLE;
1341 1341
1342 /* We do this three times for luck */ 1342 /* We do this three times for luck */
1343 I915_WRITE(reg, val); 1343 I915_WRITE(reg, val);
1344 POSTING_READ(reg); 1344 POSTING_READ(reg);
1345 udelay(150); /* wait for warmup */ 1345 udelay(150); /* wait for warmup */
1346 I915_WRITE(reg, val); 1346 I915_WRITE(reg, val);
1347 POSTING_READ(reg); 1347 POSTING_READ(reg);
1348 udelay(150); /* wait for warmup */ 1348 udelay(150); /* wait for warmup */
1349 I915_WRITE(reg, val); 1349 I915_WRITE(reg, val);
1350 POSTING_READ(reg); 1350 POSTING_READ(reg);
1351 udelay(150); /* wait for warmup */ 1351 udelay(150); /* wait for warmup */
1352 } 1352 }
1353 1353
1354 /** 1354 /**
1355 * intel_disable_pll - disable a PLL 1355 * intel_disable_pll - disable a PLL
1356 * @dev_priv: i915 private structure 1356 * @dev_priv: i915 private structure
1357 * @pipe: pipe PLL to disable 1357 * @pipe: pipe PLL to disable
1358 * 1358 *
1359 * Disable the PLL for @pipe, making sure the pipe is off first. 1359 * Disable the PLL for @pipe, making sure the pipe is off first.
1360 * 1360 *
1361 * Note! This is for pre-ILK only. 1361 * Note! This is for pre-ILK only.
1362 */ 1362 */
1363 static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) 1363 static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1364 { 1364 {
1365 int reg; 1365 int reg;
1366 u32 val; 1366 u32 val;
1367 1367
1368 /* Don't disable pipe A or pipe A PLLs if needed */ 1368 /* Don't disable pipe A or pipe A PLLs if needed */
1369 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) 1369 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1370 return; 1370 return;
1371 1371
1372 /* Make sure the pipe isn't still relying on us */ 1372 /* Make sure the pipe isn't still relying on us */
1373 assert_pipe_disabled(dev_priv, pipe); 1373 assert_pipe_disabled(dev_priv, pipe);
1374 1374
1375 reg = DPLL(pipe); 1375 reg = DPLL(pipe);
1376 val = I915_READ(reg); 1376 val = I915_READ(reg);
1377 val &= ~DPLL_VCO_ENABLE; 1377 val &= ~DPLL_VCO_ENABLE;
1378 I915_WRITE(reg, val); 1378 I915_WRITE(reg, val);
1379 POSTING_READ(reg); 1379 POSTING_READ(reg);
1380 } 1380 }
1381 1381
1382 /** 1382 /**
1383 * intel_enable_pch_pll - enable PCH PLL 1383 * intel_enable_pch_pll - enable PCH PLL
1384 * @dev_priv: i915 private structure 1384 * @dev_priv: i915 private structure
1385 * @pipe: pipe PLL to enable 1385 * @pipe: pipe PLL to enable
1386 * 1386 *
1387 * The PCH PLL needs to be enabled before the PCH transcoder, since it 1387 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1388 * drives the transcoder clock. 1388 * drives the transcoder clock.
1389 */ 1389 */
1390 static void intel_enable_pch_pll(struct drm_i915_private *dev_priv, 1390 static void intel_enable_pch_pll(struct drm_i915_private *dev_priv,
1391 enum pipe pipe) 1391 enum pipe pipe)
1392 { 1392 {
1393 int reg; 1393 int reg;
1394 u32 val; 1394 u32 val;
1395 1395
1396 /* PCH only available on ILK+ */ 1396 /* PCH only available on ILK+ */
1397 BUG_ON(dev_priv->info->gen < 5); 1397 BUG_ON(dev_priv->info->gen < 5);
1398 1398
1399 /* PCH refclock must be enabled first */ 1399 /* PCH refclock must be enabled first */
1400 assert_pch_refclk_enabled(dev_priv); 1400 assert_pch_refclk_enabled(dev_priv);
1401 1401
1402 reg = PCH_DPLL(pipe); 1402 reg = PCH_DPLL(pipe);
1403 val = I915_READ(reg); 1403 val = I915_READ(reg);
1404 val |= DPLL_VCO_ENABLE; 1404 val |= DPLL_VCO_ENABLE;
1405 I915_WRITE(reg, val); 1405 I915_WRITE(reg, val);
1406 POSTING_READ(reg); 1406 POSTING_READ(reg);
1407 udelay(200); 1407 udelay(200);
1408 } 1408 }
1409 1409
1410 static void intel_disable_pch_pll(struct drm_i915_private *dev_priv, 1410 static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
1411 enum pipe pipe) 1411 enum pipe pipe)
1412 { 1412 {
1413 int reg; 1413 int reg;
1414 u32 val; 1414 u32 val;
1415 1415
1416 /* PCH only available on ILK+ */ 1416 /* PCH only available on ILK+ */
1417 BUG_ON(dev_priv->info->gen < 5); 1417 BUG_ON(dev_priv->info->gen < 5);
1418 1418
1419 /* Make sure transcoder isn't still depending on us */ 1419 /* Make sure transcoder isn't still depending on us */
1420 assert_transcoder_disabled(dev_priv, pipe); 1420 assert_transcoder_disabled(dev_priv, pipe);
1421 1421
1422 reg = PCH_DPLL(pipe); 1422 reg = PCH_DPLL(pipe);
1423 val = I915_READ(reg); 1423 val = I915_READ(reg);
1424 val &= ~DPLL_VCO_ENABLE; 1424 val &= ~DPLL_VCO_ENABLE;
1425 I915_WRITE(reg, val); 1425 I915_WRITE(reg, val);
1426 POSTING_READ(reg); 1426 POSTING_READ(reg);
1427 udelay(200); 1427 udelay(200);
1428 } 1428 }
1429 1429
1430 static void intel_enable_transcoder(struct drm_i915_private *dev_priv, 1430 static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
1431 enum pipe pipe) 1431 enum pipe pipe)
1432 { 1432 {
1433 int reg; 1433 int reg;
1434 u32 val; 1434 u32 val;
1435 1435
1436 /* PCH only available on ILK+ */ 1436 /* PCH only available on ILK+ */
1437 BUG_ON(dev_priv->info->gen < 5); 1437 BUG_ON(dev_priv->info->gen < 5);
1438 1438
1439 /* Make sure PCH DPLL is enabled */ 1439 /* Make sure PCH DPLL is enabled */
1440 assert_pch_pll_enabled(dev_priv, pipe); 1440 assert_pch_pll_enabled(dev_priv, pipe);
1441 1441
1442 /* FDI must be feeding us bits for PCH ports */ 1442 /* FDI must be feeding us bits for PCH ports */
1443 assert_fdi_tx_enabled(dev_priv, pipe); 1443 assert_fdi_tx_enabled(dev_priv, pipe);
1444 assert_fdi_rx_enabled(dev_priv, pipe); 1444 assert_fdi_rx_enabled(dev_priv, pipe);
1445 1445
1446 reg = TRANSCONF(pipe); 1446 reg = TRANSCONF(pipe);
1447 val = I915_READ(reg); 1447 val = I915_READ(reg);
1448 /* 1448 /*
1449 * make the BPC in transcoder be consistent with 1449 * make the BPC in transcoder be consistent with
1450 * that in pipeconf reg. 1450 * that in pipeconf reg.
1451 */ 1451 */
1452 val &= ~PIPE_BPC_MASK; 1452 val &= ~PIPE_BPC_MASK;
1453 val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK; 1453 val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
1454 I915_WRITE(reg, val | TRANS_ENABLE); 1454 I915_WRITE(reg, val | TRANS_ENABLE);
1455 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) 1455 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1456 DRM_ERROR("failed to enable transcoder %d\n", pipe); 1456 DRM_ERROR("failed to enable transcoder %d\n", pipe);
1457 } 1457 }
1458 1458
1459 static void intel_disable_transcoder(struct drm_i915_private *dev_priv, 1459 static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
1460 enum pipe pipe) 1460 enum pipe pipe)
1461 { 1461 {
1462 int reg; 1462 int reg;
1463 u32 val; 1463 u32 val;
1464 1464
1465 /* FDI relies on the transcoder */ 1465 /* FDI relies on the transcoder */
1466 assert_fdi_tx_disabled(dev_priv, pipe); 1466 assert_fdi_tx_disabled(dev_priv, pipe);
1467 assert_fdi_rx_disabled(dev_priv, pipe); 1467 assert_fdi_rx_disabled(dev_priv, pipe);
1468 1468
1469 /* Ports must be off as well */ 1469 /* Ports must be off as well */
1470 assert_pch_ports_disabled(dev_priv, pipe); 1470 assert_pch_ports_disabled(dev_priv, pipe);
1471 1471
1472 reg = TRANSCONF(pipe); 1472 reg = TRANSCONF(pipe);
1473 val = I915_READ(reg); 1473 val = I915_READ(reg);
1474 val &= ~TRANS_ENABLE; 1474 val &= ~TRANS_ENABLE;
1475 I915_WRITE(reg, val); 1475 I915_WRITE(reg, val);
1476 /* wait for PCH transcoder off, transcoder state */ 1476 /* wait for PCH transcoder off, transcoder state */
1477 if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) 1477 if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
1478 DRM_ERROR("failed to disable transcoder\n"); 1478 DRM_ERROR("failed to disable transcoder\n");
1479 } 1479 }
1480 1480
1481 /** 1481 /**
1482 * intel_enable_pipe - enable a pipe, asserting requirements 1482 * intel_enable_pipe - enable a pipe, asserting requirements
1483 * @dev_priv: i915 private structure 1483 * @dev_priv: i915 private structure
1484 * @pipe: pipe to enable 1484 * @pipe: pipe to enable
1485 * @pch_port: on ILK+, is this pipe driving a PCH port or not 1485 * @pch_port: on ILK+, is this pipe driving a PCH port or not
1486 * 1486 *
1487 * Enable @pipe, making sure that various hardware specific requirements 1487 * Enable @pipe, making sure that various hardware specific requirements
1488 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc. 1488 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1489 * 1489 *
1490 * @pipe should be %PIPE_A or %PIPE_B. 1490 * @pipe should be %PIPE_A or %PIPE_B.
1491 * 1491 *
1492 * Will wait until the pipe is actually running (i.e. first vblank) before 1492 * Will wait until the pipe is actually running (i.e. first vblank) before
1493 * returning. 1493 * returning.
1494 */ 1494 */
1495 static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, 1495 static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1496 bool pch_port) 1496 bool pch_port)
1497 { 1497 {
1498 int reg; 1498 int reg;
1499 u32 val; 1499 u32 val;
1500 1500
1501 /* 1501 /*
1502 * A pipe without a PLL won't actually be able to drive bits from 1502 * A pipe without a PLL won't actually be able to drive bits from
1503 * a plane. On ILK+ the pipe PLLs are integrated, so we don't 1503 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
1504 * need the check. 1504 * need the check.
1505 */ 1505 */
1506 if (!HAS_PCH_SPLIT(dev_priv->dev)) 1506 if (!HAS_PCH_SPLIT(dev_priv->dev))
1507 assert_pll_enabled(dev_priv, pipe); 1507 assert_pll_enabled(dev_priv, pipe);
1508 else { 1508 else {
1509 if (pch_port) { 1509 if (pch_port) {
1510 /* if driving the PCH, we need FDI enabled */ 1510 /* if driving the PCH, we need FDI enabled */
1511 assert_fdi_rx_pll_enabled(dev_priv, pipe); 1511 assert_fdi_rx_pll_enabled(dev_priv, pipe);
1512 assert_fdi_tx_pll_enabled(dev_priv, pipe); 1512 assert_fdi_tx_pll_enabled(dev_priv, pipe);
1513 } 1513 }
1514 /* FIXME: assert CPU port conditions for SNB+ */ 1514 /* FIXME: assert CPU port conditions for SNB+ */
1515 } 1515 }
1516 1516
1517 reg = PIPECONF(pipe); 1517 reg = PIPECONF(pipe);
1518 val = I915_READ(reg); 1518 val = I915_READ(reg);
1519 if (val & PIPECONF_ENABLE) 1519 if (val & PIPECONF_ENABLE)
1520 return; 1520 return;
1521 1521
1522 I915_WRITE(reg, val | PIPECONF_ENABLE); 1522 I915_WRITE(reg, val | PIPECONF_ENABLE);
1523 intel_wait_for_vblank(dev_priv->dev, pipe); 1523 intel_wait_for_vblank(dev_priv->dev, pipe);
1524 } 1524 }
1525 1525
1526 /** 1526 /**
1527 * intel_disable_pipe - disable a pipe, asserting requirements 1527 * intel_disable_pipe - disable a pipe, asserting requirements
1528 * @dev_priv: i915 private structure 1528 * @dev_priv: i915 private structure
1529 * @pipe: pipe to disable 1529 * @pipe: pipe to disable
1530 * 1530 *
1531 * Disable @pipe, making sure that various hardware specific requirements 1531 * Disable @pipe, making sure that various hardware specific requirements
1532 * are met, if applicable, e.g. plane disabled, panel fitter off, etc. 1532 * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
1533 * 1533 *
1534 * @pipe should be %PIPE_A or %PIPE_B. 1534 * @pipe should be %PIPE_A or %PIPE_B.
1535 * 1535 *
1536 * Will wait until the pipe has shut down before returning. 1536 * Will wait until the pipe has shut down before returning.
1537 */ 1537 */
1538 static void intel_disable_pipe(struct drm_i915_private *dev_priv, 1538 static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1539 enum pipe pipe) 1539 enum pipe pipe)
1540 { 1540 {
1541 int reg; 1541 int reg;
1542 u32 val; 1542 u32 val;
1543 1543
1544 /* 1544 /*
1545 * Make sure planes won't keep trying to pump pixels to us, 1545 * Make sure planes won't keep trying to pump pixels to us,
1546 * or we might hang the display. 1546 * or we might hang the display.
1547 */ 1547 */
1548 assert_planes_disabled(dev_priv, pipe); 1548 assert_planes_disabled(dev_priv, pipe);
1549 1549
1550 /* Don't disable pipe A or pipe A PLLs if needed */ 1550 /* Don't disable pipe A or pipe A PLLs if needed */
1551 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) 1551 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1552 return; 1552 return;
1553 1553
1554 reg = PIPECONF(pipe); 1554 reg = PIPECONF(pipe);
1555 val = I915_READ(reg); 1555 val = I915_READ(reg);
1556 if ((val & PIPECONF_ENABLE) == 0) 1556 if ((val & PIPECONF_ENABLE) == 0)
1557 return; 1557 return;
1558 1558
1559 I915_WRITE(reg, val & ~PIPECONF_ENABLE); 1559 I915_WRITE(reg, val & ~PIPECONF_ENABLE);
1560 intel_wait_for_pipe_off(dev_priv->dev, pipe); 1560 intel_wait_for_pipe_off(dev_priv->dev, pipe);
1561 } 1561 }
1562 1562
1563 /** 1563 /**
1564 * intel_enable_plane - enable a display plane on a given pipe 1564 * intel_enable_plane - enable a display plane on a given pipe
1565 * @dev_priv: i915 private structure 1565 * @dev_priv: i915 private structure
1566 * @plane: plane to enable 1566 * @plane: plane to enable
1567 * @pipe: pipe being fed 1567 * @pipe: pipe being fed
1568 * 1568 *
1569 * Enable @plane on @pipe, making sure that @pipe is running first. 1569 * Enable @plane on @pipe, making sure that @pipe is running first.
1570 */ 1570 */
1571 static void intel_enable_plane(struct drm_i915_private *dev_priv, 1571 static void intel_enable_plane(struct drm_i915_private *dev_priv,
1572 enum plane plane, enum pipe pipe) 1572 enum plane plane, enum pipe pipe)
1573 { 1573 {
1574 int reg; 1574 int reg;
1575 u32 val; 1575 u32 val;
1576 1576
1577 /* If the pipe isn't enabled, we can't pump pixels and may hang */ 1577 /* If the pipe isn't enabled, we can't pump pixels and may hang */
1578 assert_pipe_enabled(dev_priv, pipe); 1578 assert_pipe_enabled(dev_priv, pipe);
1579 1579
1580 reg = DSPCNTR(plane); 1580 reg = DSPCNTR(plane);
1581 val = I915_READ(reg); 1581 val = I915_READ(reg);
1582 if (val & DISPLAY_PLANE_ENABLE) 1582 if (val & DISPLAY_PLANE_ENABLE)
1583 return; 1583 return;
1584 1584
1585 I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE); 1585 I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
1586 intel_wait_for_vblank(dev_priv->dev, pipe); 1586 intel_wait_for_vblank(dev_priv->dev, pipe);
1587 } 1587 }
1588 1588
1589 /* 1589 /*
1590 * Plane regs are double buffered, going from enabled->disabled needs a 1590 * Plane regs are double buffered, going from enabled->disabled needs a
1591 * trigger in order to latch. The display address reg provides this. 1591 * trigger in order to latch. The display address reg provides this.
1592 */ 1592 */
1593 static void intel_flush_display_plane(struct drm_i915_private *dev_priv, 1593 static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
1594 enum plane plane) 1594 enum plane plane)
1595 { 1595 {
1596 u32 reg = DSPADDR(plane); 1596 u32 reg = DSPADDR(plane);
1597 I915_WRITE(reg, I915_READ(reg)); 1597 I915_WRITE(reg, I915_READ(reg));
1598 } 1598 }
1599 1599
1600 /** 1600 /**
1601 * intel_disable_plane - disable a display plane 1601 * intel_disable_plane - disable a display plane
1602 * @dev_priv: i915 private structure 1602 * @dev_priv: i915 private structure
1603 * @plane: plane to disable 1603 * @plane: plane to disable
1604 * @pipe: pipe consuming the data 1604 * @pipe: pipe consuming the data
1605 * 1605 *
1606 * Disable @plane; should be an independent operation. 1606 * Disable @plane; should be an independent operation.
1607 */ 1607 */
1608 static void intel_disable_plane(struct drm_i915_private *dev_priv, 1608 static void intel_disable_plane(struct drm_i915_private *dev_priv,
1609 enum plane plane, enum pipe pipe) 1609 enum plane plane, enum pipe pipe)
1610 { 1610 {
1611 int reg; 1611 int reg;
1612 u32 val; 1612 u32 val;
1613 1613
1614 reg = DSPCNTR(plane); 1614 reg = DSPCNTR(plane);
1615 val = I915_READ(reg); 1615 val = I915_READ(reg);
1616 if ((val & DISPLAY_PLANE_ENABLE) == 0) 1616 if ((val & DISPLAY_PLANE_ENABLE) == 0)
1617 return; 1617 return;
1618 1618
1619 I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE); 1619 I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
1620 intel_flush_display_plane(dev_priv, plane); 1620 intel_flush_display_plane(dev_priv, plane);
1621 intel_wait_for_vblank(dev_priv->dev, pipe); 1621 intel_wait_for_vblank(dev_priv->dev, pipe);
1622 } 1622 }
1623 1623
1624 static void disable_pch_dp(struct drm_i915_private *dev_priv, 1624 static void disable_pch_dp(struct drm_i915_private *dev_priv,
1625 enum pipe pipe, int reg) 1625 enum pipe pipe, int reg)
1626 { 1626 {
1627 u32 val = I915_READ(reg); 1627 u32 val = I915_READ(reg);
1628 if (DP_PIPE_ENABLED(val, pipe)) 1628 if (DP_PIPE_ENABLED(val, pipe))
1629 I915_WRITE(reg, val & ~DP_PORT_EN); 1629 I915_WRITE(reg, val & ~DP_PORT_EN);
1630 } 1630 }
1631 1631
1632 static void disable_pch_hdmi(struct drm_i915_private *dev_priv, 1632 static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
1633 enum pipe pipe, int reg) 1633 enum pipe pipe, int reg)
1634 { 1634 {
1635 u32 val = I915_READ(reg); 1635 u32 val = I915_READ(reg);
1636 if (HDMI_PIPE_ENABLED(val, pipe)) 1636 if (HDMI_PIPE_ENABLED(val, pipe))
1637 I915_WRITE(reg, val & ~PORT_ENABLE); 1637 I915_WRITE(reg, val & ~PORT_ENABLE);
1638 } 1638 }
1639 1639
1640 /* Disable any ports connected to this transcoder */ 1640 /* Disable any ports connected to this transcoder */
1641 static void intel_disable_pch_ports(struct drm_i915_private *dev_priv, 1641 static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
1642 enum pipe pipe) 1642 enum pipe pipe)
1643 { 1643 {
1644 u32 reg, val; 1644 u32 reg, val;
1645 1645
1646 val = I915_READ(PCH_PP_CONTROL); 1646 val = I915_READ(PCH_PP_CONTROL);
1647 I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS); 1647 I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
1648 1648
1649 disable_pch_dp(dev_priv, pipe, PCH_DP_B); 1649 disable_pch_dp(dev_priv, pipe, PCH_DP_B);
1650 disable_pch_dp(dev_priv, pipe, PCH_DP_C); 1650 disable_pch_dp(dev_priv, pipe, PCH_DP_C);
1651 disable_pch_dp(dev_priv, pipe, PCH_DP_D); 1651 disable_pch_dp(dev_priv, pipe, PCH_DP_D);
1652 1652
1653 reg = PCH_ADPA; 1653 reg = PCH_ADPA;
1654 val = I915_READ(reg); 1654 val = I915_READ(reg);
1655 if (ADPA_PIPE_ENABLED(val, pipe)) 1655 if (ADPA_PIPE_ENABLED(val, pipe))
1656 I915_WRITE(reg, val & ~ADPA_DAC_ENABLE); 1656 I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
1657 1657
1658 reg = PCH_LVDS; 1658 reg = PCH_LVDS;
1659 val = I915_READ(reg); 1659 val = I915_READ(reg);
1660 if (LVDS_PIPE_ENABLED(val, pipe)) { 1660 if (LVDS_PIPE_ENABLED(val, pipe)) {
1661 I915_WRITE(reg, val & ~LVDS_PORT_EN); 1661 I915_WRITE(reg, val & ~LVDS_PORT_EN);
1662 POSTING_READ(reg); 1662 POSTING_READ(reg);
1663 udelay(100); 1663 udelay(100);
1664 } 1664 }
1665 1665
1666 disable_pch_hdmi(dev_priv, pipe, HDMIB); 1666 disable_pch_hdmi(dev_priv, pipe, HDMIB);
1667 disable_pch_hdmi(dev_priv, pipe, HDMIC); 1667 disable_pch_hdmi(dev_priv, pipe, HDMIC);
1668 disable_pch_hdmi(dev_priv, pipe, HDMID); 1668 disable_pch_hdmi(dev_priv, pipe, HDMID);
1669 } 1669 }
1670 1670
1671 static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) 1671 static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1672 { 1672 {
1673 struct drm_device *dev = crtc->dev; 1673 struct drm_device *dev = crtc->dev;
1674 struct drm_i915_private *dev_priv = dev->dev_private; 1674 struct drm_i915_private *dev_priv = dev->dev_private;
1675 struct drm_framebuffer *fb = crtc->fb; 1675 struct drm_framebuffer *fb = crtc->fb;
1676 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 1676 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1677 struct drm_i915_gem_object *obj = intel_fb->obj; 1677 struct drm_i915_gem_object *obj = intel_fb->obj;
1678 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1678 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1679 int plane, i; 1679 int plane, i;
1680 u32 fbc_ctl, fbc_ctl2; 1680 u32 fbc_ctl, fbc_ctl2;
1681 1681
1682 if (fb->pitch == dev_priv->cfb_pitch && 1682 if (fb->pitch == dev_priv->cfb_pitch &&
1683 obj->fence_reg == dev_priv->cfb_fence && 1683 obj->fence_reg == dev_priv->cfb_fence &&
1684 intel_crtc->plane == dev_priv->cfb_plane && 1684 intel_crtc->plane == dev_priv->cfb_plane &&
1685 I915_READ(FBC_CONTROL) & FBC_CTL_EN) 1685 I915_READ(FBC_CONTROL) & FBC_CTL_EN)
1686 return; 1686 return;
1687 1687
1688 i8xx_disable_fbc(dev); 1688 i8xx_disable_fbc(dev);
1689 1689
1690 dev_priv->cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE; 1690 dev_priv->cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
1691 1691
1692 if (fb->pitch < dev_priv->cfb_pitch) 1692 if (fb->pitch < dev_priv->cfb_pitch)
1693 dev_priv->cfb_pitch = fb->pitch; 1693 dev_priv->cfb_pitch = fb->pitch;
1694 1694
1695 /* FBC_CTL wants 64B units */ 1695 /* FBC_CTL wants 64B units */
1696 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; 1696 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
1697 dev_priv->cfb_fence = obj->fence_reg; 1697 dev_priv->cfb_fence = obj->fence_reg;
1698 dev_priv->cfb_plane = intel_crtc->plane; 1698 dev_priv->cfb_plane = intel_crtc->plane;
1699 plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB; 1699 plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
1700 1700
1701 /* Clear old tags */ 1701 /* Clear old tags */
1702 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) 1702 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
1703 I915_WRITE(FBC_TAG + (i * 4), 0); 1703 I915_WRITE(FBC_TAG + (i * 4), 0);
1704 1704
1705 /* Set it up... */ 1705 /* Set it up... */
1706 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane; 1706 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane;
1707 if (obj->tiling_mode != I915_TILING_NONE) 1707 if (obj->tiling_mode != I915_TILING_NONE)
1708 fbc_ctl2 |= FBC_CTL_CPU_FENCE; 1708 fbc_ctl2 |= FBC_CTL_CPU_FENCE;
1709 I915_WRITE(FBC_CONTROL2, fbc_ctl2); 1709 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
1710 I915_WRITE(FBC_FENCE_OFF, crtc->y); 1710 I915_WRITE(FBC_FENCE_OFF, crtc->y);
1711 1711
1712 /* enable it... */ 1712 /* enable it... */
1713 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; 1713 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
1714 if (IS_I945GM(dev)) 1714 if (IS_I945GM(dev))
1715 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ 1715 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
1716 fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; 1716 fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
1717 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; 1717 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
1718 if (obj->tiling_mode != I915_TILING_NONE) 1718 if (obj->tiling_mode != I915_TILING_NONE)
1719 fbc_ctl |= dev_priv->cfb_fence; 1719 fbc_ctl |= dev_priv->cfb_fence;
1720 I915_WRITE(FBC_CONTROL, fbc_ctl); 1720 I915_WRITE(FBC_CONTROL, fbc_ctl);
1721 1721
1722 DRM_DEBUG_KMS("enabled FBC, pitch %ld, yoff %d, plane %d, ", 1722 DRM_DEBUG_KMS("enabled FBC, pitch %ld, yoff %d, plane %d, ",
1723 dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane); 1723 dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane);
1724 } 1724 }
1725 1725
1726 void i8xx_disable_fbc(struct drm_device *dev) 1726 void i8xx_disable_fbc(struct drm_device *dev)
1727 { 1727 {
1728 struct drm_i915_private *dev_priv = dev->dev_private; 1728 struct drm_i915_private *dev_priv = dev->dev_private;
1729 u32 fbc_ctl; 1729 u32 fbc_ctl;
1730 1730
1731 /* Disable compression */ 1731 /* Disable compression */
1732 fbc_ctl = I915_READ(FBC_CONTROL); 1732 fbc_ctl = I915_READ(FBC_CONTROL);
1733 if ((fbc_ctl & FBC_CTL_EN) == 0) 1733 if ((fbc_ctl & FBC_CTL_EN) == 0)
1734 return; 1734 return;
1735 1735
1736 fbc_ctl &= ~FBC_CTL_EN; 1736 fbc_ctl &= ~FBC_CTL_EN;
1737 I915_WRITE(FBC_CONTROL, fbc_ctl); 1737 I915_WRITE(FBC_CONTROL, fbc_ctl);
1738 1738
1739 /* Wait for compressing bit to clear */ 1739 /* Wait for compressing bit to clear */
1740 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) { 1740 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
1741 DRM_DEBUG_KMS("FBC idle timed out\n"); 1741 DRM_DEBUG_KMS("FBC idle timed out\n");
1742 return; 1742 return;
1743 } 1743 }
1744 1744
1745 DRM_DEBUG_KMS("disabled FBC\n"); 1745 DRM_DEBUG_KMS("disabled FBC\n");
1746 } 1746 }
1747 1747
1748 static bool i8xx_fbc_enabled(struct drm_device *dev) 1748 static bool i8xx_fbc_enabled(struct drm_device *dev)
1749 { 1749 {
1750 struct drm_i915_private *dev_priv = dev->dev_private; 1750 struct drm_i915_private *dev_priv = dev->dev_private;
1751 1751
1752 return I915_READ(FBC_CONTROL) & FBC_CTL_EN; 1752 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
1753 } 1753 }
1754 1754
1755 static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) 1755 static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1756 { 1756 {
1757 struct drm_device *dev = crtc->dev; 1757 struct drm_device *dev = crtc->dev;
1758 struct drm_i915_private *dev_priv = dev->dev_private; 1758 struct drm_i915_private *dev_priv = dev->dev_private;
1759 struct drm_framebuffer *fb = crtc->fb; 1759 struct drm_framebuffer *fb = crtc->fb;
1760 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 1760 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1761 struct drm_i915_gem_object *obj = intel_fb->obj; 1761 struct drm_i915_gem_object *obj = intel_fb->obj;
1762 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1762 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1763 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; 1763 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1764 unsigned long stall_watermark = 200; 1764 unsigned long stall_watermark = 200;
1765 u32 dpfc_ctl; 1765 u32 dpfc_ctl;
1766 1766
1767 dpfc_ctl = I915_READ(DPFC_CONTROL); 1767 dpfc_ctl = I915_READ(DPFC_CONTROL);
1768 if (dpfc_ctl & DPFC_CTL_EN) { 1768 if (dpfc_ctl & DPFC_CTL_EN) {
1769 if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 && 1769 if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
1770 dev_priv->cfb_fence == obj->fence_reg && 1770 dev_priv->cfb_fence == obj->fence_reg &&
1771 dev_priv->cfb_plane == intel_crtc->plane && 1771 dev_priv->cfb_plane == intel_crtc->plane &&
1772 dev_priv->cfb_y == crtc->y) 1772 dev_priv->cfb_y == crtc->y)
1773 return; 1773 return;
1774 1774
1775 I915_WRITE(DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN); 1775 I915_WRITE(DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN);
1776 intel_wait_for_vblank(dev, intel_crtc->pipe); 1776 intel_wait_for_vblank(dev, intel_crtc->pipe);
1777 } 1777 }
1778 1778
1779 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; 1779 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
1780 dev_priv->cfb_fence = obj->fence_reg; 1780 dev_priv->cfb_fence = obj->fence_reg;
1781 dev_priv->cfb_plane = intel_crtc->plane; 1781 dev_priv->cfb_plane = intel_crtc->plane;
1782 dev_priv->cfb_y = crtc->y; 1782 dev_priv->cfb_y = crtc->y;
1783 1783
1784 dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; 1784 dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
1785 if (obj->tiling_mode != I915_TILING_NONE) { 1785 if (obj->tiling_mode != I915_TILING_NONE) {
1786 dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence; 1786 dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence;
1787 I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY); 1787 I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
1788 } else { 1788 } else {
1789 I915_WRITE(DPFC_CHICKEN, ~DPFC_HT_MODIFY); 1789 I915_WRITE(DPFC_CHICKEN, ~DPFC_HT_MODIFY);
1790 } 1790 }
1791 1791
1792 I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | 1792 I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1793 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | 1793 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1794 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); 1794 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1795 I915_WRITE(DPFC_FENCE_YOFF, crtc->y); 1795 I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
1796 1796
1797 /* enable it... */ 1797 /* enable it... */
1798 I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN); 1798 I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
1799 1799
1800 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); 1800 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1801 } 1801 }
1802 1802
1803 void g4x_disable_fbc(struct drm_device *dev) 1803 void g4x_disable_fbc(struct drm_device *dev)
1804 { 1804 {
1805 struct drm_i915_private *dev_priv = dev->dev_private; 1805 struct drm_i915_private *dev_priv = dev->dev_private;
1806 u32 dpfc_ctl; 1806 u32 dpfc_ctl;
1807 1807
1808 /* Disable compression */ 1808 /* Disable compression */
1809 dpfc_ctl = I915_READ(DPFC_CONTROL); 1809 dpfc_ctl = I915_READ(DPFC_CONTROL);
1810 if (dpfc_ctl & DPFC_CTL_EN) { 1810 if (dpfc_ctl & DPFC_CTL_EN) {
1811 dpfc_ctl &= ~DPFC_CTL_EN; 1811 dpfc_ctl &= ~DPFC_CTL_EN;
1812 I915_WRITE(DPFC_CONTROL, dpfc_ctl); 1812 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
1813 1813
1814 DRM_DEBUG_KMS("disabled FBC\n"); 1814 DRM_DEBUG_KMS("disabled FBC\n");
1815 } 1815 }
1816 } 1816 }
1817 1817
1818 static bool g4x_fbc_enabled(struct drm_device *dev) 1818 static bool g4x_fbc_enabled(struct drm_device *dev)
1819 { 1819 {
1820 struct drm_i915_private *dev_priv = dev->dev_private; 1820 struct drm_i915_private *dev_priv = dev->dev_private;
1821 1821
1822 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; 1822 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
1823 } 1823 }
1824 1824
1825 static void sandybridge_blit_fbc_update(struct drm_device *dev) 1825 static void sandybridge_blit_fbc_update(struct drm_device *dev)
1826 { 1826 {
1827 struct drm_i915_private *dev_priv = dev->dev_private; 1827 struct drm_i915_private *dev_priv = dev->dev_private;
1828 u32 blt_ecoskpd; 1828 u32 blt_ecoskpd;
1829 1829
1830 /* Make sure blitter notifies FBC of writes */ 1830 /* Make sure blitter notifies FBC of writes */
1831 __gen6_gt_force_wake_get(dev_priv); 1831 __gen6_gt_force_wake_get(dev_priv);
1832 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); 1832 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
1833 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << 1833 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
1834 GEN6_BLITTER_LOCK_SHIFT; 1834 GEN6_BLITTER_LOCK_SHIFT;
1835 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); 1835 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1836 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY; 1836 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
1837 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); 1837 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1838 blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY << 1838 blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
1839 GEN6_BLITTER_LOCK_SHIFT); 1839 GEN6_BLITTER_LOCK_SHIFT);
1840 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); 1840 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1841 POSTING_READ(GEN6_BLITTER_ECOSKPD); 1841 POSTING_READ(GEN6_BLITTER_ECOSKPD);
1842 __gen6_gt_force_wake_put(dev_priv); 1842 __gen6_gt_force_wake_put(dev_priv);
1843 } 1843 }
1844 1844
1845 static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) 1845 static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1846 { 1846 {
1847 struct drm_device *dev = crtc->dev; 1847 struct drm_device *dev = crtc->dev;
1848 struct drm_i915_private *dev_priv = dev->dev_private; 1848 struct drm_i915_private *dev_priv = dev->dev_private;
1849 struct drm_framebuffer *fb = crtc->fb; 1849 struct drm_framebuffer *fb = crtc->fb;
1850 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 1850 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1851 struct drm_i915_gem_object *obj = intel_fb->obj; 1851 struct drm_i915_gem_object *obj = intel_fb->obj;
1852 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1852 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1853 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; 1853 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1854 unsigned long stall_watermark = 200; 1854 unsigned long stall_watermark = 200;
1855 u32 dpfc_ctl; 1855 u32 dpfc_ctl;
1856 1856
1857 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); 1857 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1858 if (dpfc_ctl & DPFC_CTL_EN) { 1858 if (dpfc_ctl & DPFC_CTL_EN) {
1859 if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 && 1859 if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
1860 dev_priv->cfb_fence == obj->fence_reg && 1860 dev_priv->cfb_fence == obj->fence_reg &&
1861 dev_priv->cfb_plane == intel_crtc->plane && 1861 dev_priv->cfb_plane == intel_crtc->plane &&
1862 dev_priv->cfb_offset == obj->gtt_offset && 1862 dev_priv->cfb_offset == obj->gtt_offset &&
1863 dev_priv->cfb_y == crtc->y) 1863 dev_priv->cfb_y == crtc->y)
1864 return; 1864 return;
1865 1865
1866 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN); 1866 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN);
1867 intel_wait_for_vblank(dev, intel_crtc->pipe); 1867 intel_wait_for_vblank(dev, intel_crtc->pipe);
1868 } 1868 }
1869 1869
1870 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; 1870 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
1871 dev_priv->cfb_fence = obj->fence_reg; 1871 dev_priv->cfb_fence = obj->fence_reg;
1872 dev_priv->cfb_plane = intel_crtc->plane; 1872 dev_priv->cfb_plane = intel_crtc->plane;
1873 dev_priv->cfb_offset = obj->gtt_offset; 1873 dev_priv->cfb_offset = obj->gtt_offset;
1874 dev_priv->cfb_y = crtc->y; 1874 dev_priv->cfb_y = crtc->y;
1875 1875
1876 dpfc_ctl &= DPFC_RESERVED; 1876 dpfc_ctl &= DPFC_RESERVED;
1877 dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); 1877 dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
1878 if (obj->tiling_mode != I915_TILING_NONE) { 1878 if (obj->tiling_mode != I915_TILING_NONE) {
1879 dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence); 1879 dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence);
1880 I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); 1880 I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
1881 } else { 1881 } else {
1882 I915_WRITE(ILK_DPFC_CHICKEN, ~DPFC_HT_MODIFY); 1882 I915_WRITE(ILK_DPFC_CHICKEN, ~DPFC_HT_MODIFY);
1883 } 1883 }
1884 1884
1885 I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | 1885 I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1886 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | 1886 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1887 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); 1887 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1888 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); 1888 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
1889 I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID); 1889 I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
1890 /* enable it... */ 1890 /* enable it... */
1891 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); 1891 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
1892 1892
1893 if (IS_GEN6(dev)) { 1893 if (IS_GEN6(dev)) {
1894 I915_WRITE(SNB_DPFC_CTL_SA, 1894 I915_WRITE(SNB_DPFC_CTL_SA,
1895 SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence); 1895 SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence);
1896 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); 1896 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
1897 sandybridge_blit_fbc_update(dev); 1897 sandybridge_blit_fbc_update(dev);
1898 } 1898 }
1899 1899
1900 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); 1900 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1901 } 1901 }
1902 1902
1903 void ironlake_disable_fbc(struct drm_device *dev) 1903 void ironlake_disable_fbc(struct drm_device *dev)
1904 { 1904 {
1905 struct drm_i915_private *dev_priv = dev->dev_private; 1905 struct drm_i915_private *dev_priv = dev->dev_private;
1906 u32 dpfc_ctl; 1906 u32 dpfc_ctl;
1907 1907
1908 /* Disable compression */ 1908 /* Disable compression */
1909 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); 1909 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1910 if (dpfc_ctl & DPFC_CTL_EN) { 1910 if (dpfc_ctl & DPFC_CTL_EN) {
1911 dpfc_ctl &= ~DPFC_CTL_EN; 1911 dpfc_ctl &= ~DPFC_CTL_EN;
1912 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); 1912 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
1913 1913
1914 DRM_DEBUG_KMS("disabled FBC\n"); 1914 DRM_DEBUG_KMS("disabled FBC\n");
1915 } 1915 }
1916 } 1916 }
1917 1917
1918 static bool ironlake_fbc_enabled(struct drm_device *dev) 1918 static bool ironlake_fbc_enabled(struct drm_device *dev)
1919 { 1919 {
1920 struct drm_i915_private *dev_priv = dev->dev_private; 1920 struct drm_i915_private *dev_priv = dev->dev_private;
1921 1921
1922 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN; 1922 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
1923 } 1923 }
1924 1924
1925 bool intel_fbc_enabled(struct drm_device *dev) 1925 bool intel_fbc_enabled(struct drm_device *dev)
1926 { 1926 {
1927 struct drm_i915_private *dev_priv = dev->dev_private; 1927 struct drm_i915_private *dev_priv = dev->dev_private;
1928 1928
1929 if (!dev_priv->display.fbc_enabled) 1929 if (!dev_priv->display.fbc_enabled)
1930 return false; 1930 return false;
1931 1931
1932 return dev_priv->display.fbc_enabled(dev); 1932 return dev_priv->display.fbc_enabled(dev);
1933 } 1933 }
1934 1934
1935 void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval) 1935 void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1936 { 1936 {
1937 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 1937 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
1938 1938
1939 if (!dev_priv->display.enable_fbc) 1939 if (!dev_priv->display.enable_fbc)
1940 return; 1940 return;
1941 1941
1942 dev_priv->display.enable_fbc(crtc, interval); 1942 dev_priv->display.enable_fbc(crtc, interval);
1943 } 1943 }
1944 1944
1945 void intel_disable_fbc(struct drm_device *dev) 1945 void intel_disable_fbc(struct drm_device *dev)
1946 { 1946 {
1947 struct drm_i915_private *dev_priv = dev->dev_private; 1947 struct drm_i915_private *dev_priv = dev->dev_private;
1948 1948
1949 if (!dev_priv->display.disable_fbc) 1949 if (!dev_priv->display.disable_fbc)
1950 return; 1950 return;
1951 1951
1952 dev_priv->display.disable_fbc(dev); 1952 dev_priv->display.disable_fbc(dev);
1953 } 1953 }
1954 1954
1955 /** 1955 /**
1956 * intel_update_fbc - enable/disable FBC as needed 1956 * intel_update_fbc - enable/disable FBC as needed
1957 * @dev: the drm_device 1957 * @dev: the drm_device
1958 * 1958 *
1959 * Set up the framebuffer compression hardware at mode set time. We 1959 * Set up the framebuffer compression hardware at mode set time. We
1960 * enable it if possible: 1960 * enable it if possible:
1961 * - plane A only (on pre-965) 1961 * - plane A only (on pre-965)
1962 * - no pixel mulitply/line duplication 1962 * - no pixel mulitply/line duplication
1963 * - no alpha buffer discard 1963 * - no alpha buffer discard
1964 * - no dual wide 1964 * - no dual wide
1965 * - framebuffer <= 2048 in width, 1536 in height 1965 * - framebuffer <= 2048 in width, 1536 in height
1966 * 1966 *
1967 * We can't assume that any compression will take place (worst case), 1967 * We can't assume that any compression will take place (worst case),
1968 * so the compressed buffer has to be the same size as the uncompressed 1968 * so the compressed buffer has to be the same size as the uncompressed
1969 * one. It also must reside (along with the line length buffer) in 1969 * one. It also must reside (along with the line length buffer) in
1970 * stolen memory. 1970 * stolen memory.
1971 * 1971 *
1972 * We need to enable/disable FBC on a global basis. 1972 * We need to enable/disable FBC on a global basis.
1973 */ 1973 */
1974 static void intel_update_fbc(struct drm_device *dev) 1974 static void intel_update_fbc(struct drm_device *dev)
1975 { 1975 {
1976 struct drm_i915_private *dev_priv = dev->dev_private; 1976 struct drm_i915_private *dev_priv = dev->dev_private;
1977 struct drm_crtc *crtc = NULL, *tmp_crtc; 1977 struct drm_crtc *crtc = NULL, *tmp_crtc;
1978 struct intel_crtc *intel_crtc; 1978 struct intel_crtc *intel_crtc;
1979 struct drm_framebuffer *fb; 1979 struct drm_framebuffer *fb;
1980 struct intel_framebuffer *intel_fb; 1980 struct intel_framebuffer *intel_fb;
1981 struct drm_i915_gem_object *obj; 1981 struct drm_i915_gem_object *obj;
1982 1982
1983 DRM_DEBUG_KMS("\n"); 1983 DRM_DEBUG_KMS("\n");
1984 1984
1985 if (!i915_powersave) 1985 if (!i915_powersave)
1986 return; 1986 return;
1987 1987
1988 if (!I915_HAS_FBC(dev)) 1988 if (!I915_HAS_FBC(dev))
1989 return; 1989 return;
1990 1990
1991 /* 1991 /*
1992 * If FBC is already on, we just have to verify that we can 1992 * If FBC is already on, we just have to verify that we can
1993 * keep it that way... 1993 * keep it that way...
1994 * Need to disable if: 1994 * Need to disable if:
1995 * - more than one pipe is active 1995 * - more than one pipe is active
1996 * - changing FBC params (stride, fence, mode) 1996 * - changing FBC params (stride, fence, mode)
1997 * - new fb is too large to fit in compressed buffer 1997 * - new fb is too large to fit in compressed buffer
1998 * - going to an unsupported config (interlace, pixel multiply, etc.) 1998 * - going to an unsupported config (interlace, pixel multiply, etc.)
1999 */ 1999 */
2000 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { 2000 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
2001 if (tmp_crtc->enabled && tmp_crtc->fb) { 2001 if (tmp_crtc->enabled && tmp_crtc->fb) {
2002 if (crtc) { 2002 if (crtc) {
2003 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); 2003 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
2004 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; 2004 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
2005 goto out_disable; 2005 goto out_disable;
2006 } 2006 }
2007 crtc = tmp_crtc; 2007 crtc = tmp_crtc;
2008 } 2008 }
2009 } 2009 }
2010 2010
2011 if (!crtc || crtc->fb == NULL) { 2011 if (!crtc || crtc->fb == NULL) {
2012 DRM_DEBUG_KMS("no output, disabling\n"); 2012 DRM_DEBUG_KMS("no output, disabling\n");
2013 dev_priv->no_fbc_reason = FBC_NO_OUTPUT; 2013 dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
2014 goto out_disable; 2014 goto out_disable;
2015 } 2015 }
2016 2016
2017 intel_crtc = to_intel_crtc(crtc); 2017 intel_crtc = to_intel_crtc(crtc);
2018 fb = crtc->fb; 2018 fb = crtc->fb;
2019 intel_fb = to_intel_framebuffer(fb); 2019 intel_fb = to_intel_framebuffer(fb);
2020 obj = intel_fb->obj; 2020 obj = intel_fb->obj;
2021 2021
2022 if (intel_fb->obj->base.size > dev_priv->cfb_size) { 2022 if (intel_fb->obj->base.size > dev_priv->cfb_size) {
2023 DRM_DEBUG_KMS("framebuffer too large, disabling " 2023 DRM_DEBUG_KMS("framebuffer too large, disabling "
2024 "compression\n"); 2024 "compression\n");
2025 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; 2025 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
2026 goto out_disable; 2026 goto out_disable;
2027 } 2027 }
2028 if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) || 2028 if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
2029 (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) { 2029 (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
2030 DRM_DEBUG_KMS("mode incompatible with compression, " 2030 DRM_DEBUG_KMS("mode incompatible with compression, "
2031 "disabling\n"); 2031 "disabling\n");
2032 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE; 2032 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
2033 goto out_disable; 2033 goto out_disable;
2034 } 2034 }
2035 if ((crtc->mode.hdisplay > 2048) || 2035 if ((crtc->mode.hdisplay > 2048) ||
2036 (crtc->mode.vdisplay > 1536)) { 2036 (crtc->mode.vdisplay > 1536)) {
2037 DRM_DEBUG_KMS("mode too large for compression, disabling\n"); 2037 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
2038 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE; 2038 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
2039 goto out_disable; 2039 goto out_disable;
2040 } 2040 }
2041 if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) { 2041 if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
2042 DRM_DEBUG_KMS("plane not 0, disabling compression\n"); 2042 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
2043 dev_priv->no_fbc_reason = FBC_BAD_PLANE; 2043 dev_priv->no_fbc_reason = FBC_BAD_PLANE;
2044 goto out_disable; 2044 goto out_disable;
2045 } 2045 }
2046 if (obj->tiling_mode != I915_TILING_X) { 2046 if (obj->tiling_mode != I915_TILING_X) {
2047 DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n"); 2047 DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n");
2048 dev_priv->no_fbc_reason = FBC_NOT_TILED; 2048 dev_priv->no_fbc_reason = FBC_NOT_TILED;
2049 goto out_disable; 2049 goto out_disable;
2050 } 2050 }
2051 2051
2052 /* If the kernel debugger is active, always disable compression */ 2052 /* If the kernel debugger is active, always disable compression */
2053 if (in_dbg_master()) 2053 if (in_dbg_master())
2054 goto out_disable; 2054 goto out_disable;
2055 2055
2056 intel_enable_fbc(crtc, 500); 2056 intel_enable_fbc(crtc, 500);
2057 return; 2057 return;
2058 2058
2059 out_disable: 2059 out_disable:
2060 /* Multiple disables should be harmless */ 2060 /* Multiple disables should be harmless */
2061 if (intel_fbc_enabled(dev)) { 2061 if (intel_fbc_enabled(dev)) {
2062 DRM_DEBUG_KMS("unsupported config, disabling FBC\n"); 2062 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
2063 intel_disable_fbc(dev); 2063 intel_disable_fbc(dev);
2064 } 2064 }
2065 } 2065 }
2066 2066
2067 int 2067 int
2068 intel_pin_and_fence_fb_obj(struct drm_device *dev, 2068 intel_pin_and_fence_fb_obj(struct drm_device *dev,
2069 struct drm_i915_gem_object *obj, 2069 struct drm_i915_gem_object *obj,
2070 struct intel_ring_buffer *pipelined) 2070 struct intel_ring_buffer *pipelined)
2071 { 2071 {
2072 struct drm_i915_private *dev_priv = dev->dev_private; 2072 struct drm_i915_private *dev_priv = dev->dev_private;
2073 u32 alignment; 2073 u32 alignment;
2074 int ret; 2074 int ret;
2075 2075
2076 switch (obj->tiling_mode) { 2076 switch (obj->tiling_mode) {
2077 case I915_TILING_NONE: 2077 case I915_TILING_NONE:
2078 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) 2078 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
2079 alignment = 128 * 1024; 2079 alignment = 128 * 1024;
2080 else if (INTEL_INFO(dev)->gen >= 4) 2080 else if (INTEL_INFO(dev)->gen >= 4)
2081 alignment = 4 * 1024; 2081 alignment = 4 * 1024;
2082 else 2082 else
2083 alignment = 64 * 1024; 2083 alignment = 64 * 1024;
2084 break; 2084 break;
2085 case I915_TILING_X: 2085 case I915_TILING_X:
2086 /* pin() will align the object as required by fence */ 2086 /* pin() will align the object as required by fence */
2087 alignment = 0; 2087 alignment = 0;
2088 break; 2088 break;
2089 case I915_TILING_Y: 2089 case I915_TILING_Y:
2090 /* FIXME: Is this true? */ 2090 /* FIXME: Is this true? */
2091 DRM_ERROR("Y tiled not allowed for scan out buffers\n"); 2091 DRM_ERROR("Y tiled not allowed for scan out buffers\n");
2092 return -EINVAL; 2092 return -EINVAL;
2093 default: 2093 default:
2094 BUG(); 2094 BUG();
2095 } 2095 }
2096 2096
2097 dev_priv->mm.interruptible = false; 2097 dev_priv->mm.interruptible = false;
2098 ret = i915_gem_object_pin(obj, alignment, true); 2098 ret = i915_gem_object_pin(obj, alignment, true);
2099 if (ret) 2099 if (ret)
2100 goto err_interruptible; 2100 goto err_interruptible;
2101 2101
2102 ret = i915_gem_object_set_to_display_plane(obj, pipelined); 2102 ret = i915_gem_object_set_to_display_plane(obj, pipelined);
2103 if (ret) 2103 if (ret)
2104 goto err_unpin; 2104 goto err_unpin;
2105 2105
2106 /* Install a fence for tiled scan-out. Pre-i965 always needs a 2106 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2107 * fence, whereas 965+ only requires a fence if using 2107 * fence, whereas 965+ only requires a fence if using
2108 * framebuffer compression. For simplicity, we always install 2108 * framebuffer compression. For simplicity, we always install
2109 * a fence as the cost is not that onerous. 2109 * a fence as the cost is not that onerous.
2110 */ 2110 */
2111 if (obj->tiling_mode != I915_TILING_NONE) { 2111 if (obj->tiling_mode != I915_TILING_NONE) {
2112 ret = i915_gem_object_get_fence(obj, pipelined); 2112 ret = i915_gem_object_get_fence(obj, pipelined);
2113 if (ret) 2113 if (ret)
2114 goto err_unpin; 2114 goto err_unpin;
2115 } 2115 }
2116 2116
2117 dev_priv->mm.interruptible = true; 2117 dev_priv->mm.interruptible = true;
2118 return 0; 2118 return 0;
2119 2119
2120 err_unpin: 2120 err_unpin:
2121 i915_gem_object_unpin(obj); 2121 i915_gem_object_unpin(obj);
2122 err_interruptible: 2122 err_interruptible:
2123 dev_priv->mm.interruptible = true; 2123 dev_priv->mm.interruptible = true;
2124 return ret; 2124 return ret;
2125 } 2125 }
2126 2126
2127 /* Assume fb object is pinned & idle & fenced and just update base pointers */ 2127 /* Assume fb object is pinned & idle & fenced and just update base pointers */
2128 static int 2128 static int
2129 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, 2129 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2130 int x, int y, enum mode_set_atomic state) 2130 int x, int y, enum mode_set_atomic state)
2131 { 2131 {
2132 struct drm_device *dev = crtc->dev; 2132 struct drm_device *dev = crtc->dev;
2133 struct drm_i915_private *dev_priv = dev->dev_private; 2133 struct drm_i915_private *dev_priv = dev->dev_private;
2134 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2134 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2135 struct intel_framebuffer *intel_fb; 2135 struct intel_framebuffer *intel_fb;
2136 struct drm_i915_gem_object *obj; 2136 struct drm_i915_gem_object *obj;
2137 int plane = intel_crtc->plane; 2137 int plane = intel_crtc->plane;
2138 unsigned long Start, Offset; 2138 unsigned long Start, Offset;
2139 u32 dspcntr; 2139 u32 dspcntr;
2140 u32 reg; 2140 u32 reg;
2141 2141
2142 switch (plane) { 2142 switch (plane) {
2143 case 0: 2143 case 0:
2144 case 1: 2144 case 1:
2145 break; 2145 break;
2146 default: 2146 default:
2147 DRM_ERROR("Can't update plane %d in SAREA\n", plane); 2147 DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2148 return -EINVAL; 2148 return -EINVAL;
2149 } 2149 }
2150 2150
2151 intel_fb = to_intel_framebuffer(fb); 2151 intel_fb = to_intel_framebuffer(fb);
2152 obj = intel_fb->obj; 2152 obj = intel_fb->obj;
2153 2153
2154 reg = DSPCNTR(plane); 2154 reg = DSPCNTR(plane);
2155 dspcntr = I915_READ(reg); 2155 dspcntr = I915_READ(reg);
2156 /* Mask out pixel format bits in case we change it */ 2156 /* Mask out pixel format bits in case we change it */
2157 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; 2157 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2158 switch (fb->bits_per_pixel) { 2158 switch (fb->bits_per_pixel) {
2159 case 8: 2159 case 8:
2160 dspcntr |= DISPPLANE_8BPP; 2160 dspcntr |= DISPPLANE_8BPP;
2161 break; 2161 break;
2162 case 16: 2162 case 16:
2163 if (fb->depth == 15) 2163 if (fb->depth == 15)
2164 dspcntr |= DISPPLANE_15_16BPP; 2164 dspcntr |= DISPPLANE_15_16BPP;
2165 else 2165 else
2166 dspcntr |= DISPPLANE_16BPP; 2166 dspcntr |= DISPPLANE_16BPP;
2167 break; 2167 break;
2168 case 24: 2168 case 24:
2169 case 32: 2169 case 32:
2170 dspcntr |= DISPPLANE_32BPP_NO_ALPHA; 2170 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
2171 break; 2171 break;
2172 default: 2172 default:
2173 DRM_ERROR("Unknown color depth\n"); 2173 DRM_ERROR("Unknown color depth\n");
2174 return -EINVAL; 2174 return -EINVAL;
2175 } 2175 }
2176 if (INTEL_INFO(dev)->gen >= 4) { 2176 if (INTEL_INFO(dev)->gen >= 4) {
2177 if (obj->tiling_mode != I915_TILING_NONE) 2177 if (obj->tiling_mode != I915_TILING_NONE)
2178 dspcntr |= DISPPLANE_TILED; 2178 dspcntr |= DISPPLANE_TILED;
2179 else 2179 else
2180 dspcntr &= ~DISPPLANE_TILED; 2180 dspcntr &= ~DISPPLANE_TILED;
2181 } 2181 }
2182 2182
2183 if (HAS_PCH_SPLIT(dev)) 2183 if (HAS_PCH_SPLIT(dev))
2184 /* must disable */ 2184 /* must disable */
2185 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 2185 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2186 2186
2187 I915_WRITE(reg, dspcntr); 2187 I915_WRITE(reg, dspcntr);
2188 2188
2189 Start = obj->gtt_offset; 2189 Start = obj->gtt_offset;
2190 Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8); 2190 Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
2191 2191
2192 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", 2192 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2193 Start, Offset, x, y, fb->pitch); 2193 Start, Offset, x, y, fb->pitch);
2194 I915_WRITE(DSPSTRIDE(plane), fb->pitch); 2194 I915_WRITE(DSPSTRIDE(plane), fb->pitch);
2195 if (INTEL_INFO(dev)->gen >= 4) { 2195 if (INTEL_INFO(dev)->gen >= 4) {
2196 I915_WRITE(DSPSURF(plane), Start); 2196 I915_WRITE(DSPSURF(plane), Start);
2197 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 2197 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2198 I915_WRITE(DSPADDR(plane), Offset); 2198 I915_WRITE(DSPADDR(plane), Offset);
2199 } else 2199 } else
2200 I915_WRITE(DSPADDR(plane), Start + Offset); 2200 I915_WRITE(DSPADDR(plane), Start + Offset);
2201 POSTING_READ(reg); 2201 POSTING_READ(reg);
2202 2202
2203 intel_update_fbc(dev); 2203 intel_update_fbc(dev);
2204 intel_increase_pllclock(crtc); 2204 intel_increase_pllclock(crtc);
2205 2205
2206 return 0; 2206 return 0;
2207 } 2207 }
2208 2208
2209 static int 2209 static int
2210 intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, 2210 intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2211 struct drm_framebuffer *old_fb) 2211 struct drm_framebuffer *old_fb)
2212 { 2212 {
2213 struct drm_device *dev = crtc->dev; 2213 struct drm_device *dev = crtc->dev;
2214 struct drm_i915_master_private *master_priv; 2214 struct drm_i915_master_private *master_priv;
2215 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2215 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2216 int ret; 2216 int ret;
2217 2217
2218 /* no fb bound */ 2218 /* no fb bound */
2219 if (!crtc->fb) { 2219 if (!crtc->fb) {
2220 DRM_DEBUG_KMS("No FB bound\n"); 2220 DRM_DEBUG_KMS("No FB bound\n");
2221 return 0; 2221 return 0;
2222 } 2222 }
2223 2223
2224 switch (intel_crtc->plane) { 2224 switch (intel_crtc->plane) {
2225 case 0: 2225 case 0:
2226 case 1: 2226 case 1:
2227 break; 2227 break;
2228 default: 2228 default:
2229 return -EINVAL; 2229 return -EINVAL;
2230 } 2230 }
2231 2231
2232 mutex_lock(&dev->struct_mutex); 2232 mutex_lock(&dev->struct_mutex);
2233 ret = intel_pin_and_fence_fb_obj(dev, 2233 ret = intel_pin_and_fence_fb_obj(dev,
2234 to_intel_framebuffer(crtc->fb)->obj, 2234 to_intel_framebuffer(crtc->fb)->obj,
2235 NULL); 2235 NULL);
2236 if (ret != 0) { 2236 if (ret != 0) {
2237 mutex_unlock(&dev->struct_mutex); 2237 mutex_unlock(&dev->struct_mutex);
2238 return ret; 2238 return ret;
2239 } 2239 }
2240 2240
2241 if (old_fb) { 2241 if (old_fb) {
2242 struct drm_i915_private *dev_priv = dev->dev_private; 2242 struct drm_i915_private *dev_priv = dev->dev_private;
2243 struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; 2243 struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
2244 2244
2245 wait_event(dev_priv->pending_flip_queue, 2245 wait_event(dev_priv->pending_flip_queue,
2246 atomic_read(&dev_priv->mm.wedged) || 2246 atomic_read(&dev_priv->mm.wedged) ||
2247 atomic_read(&obj->pending_flip) == 0); 2247 atomic_read(&obj->pending_flip) == 0);
2248 2248
2249 /* Big Hammer, we also need to ensure that any pending 2249 /* Big Hammer, we also need to ensure that any pending
2250 * MI_WAIT_FOR_EVENT inside a user batch buffer on the 2250 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2251 * current scanout is retired before unpinning the old 2251 * current scanout is retired before unpinning the old
2252 * framebuffer. 2252 * framebuffer.
2253 * 2253 *
2254 * This should only fail upon a hung GPU, in which case we 2254 * This should only fail upon a hung GPU, in which case we
2255 * can safely continue. 2255 * can safely continue.
2256 */ 2256 */
2257 ret = i915_gem_object_flush_gpu(obj); 2257 ret = i915_gem_object_flush_gpu(obj);
2258 (void) ret; 2258 (void) ret;
2259 } 2259 }
2260 2260
2261 ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, 2261 ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
2262 LEAVE_ATOMIC_MODE_SET); 2262 LEAVE_ATOMIC_MODE_SET);
2263 if (ret) { 2263 if (ret) {
2264 i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); 2264 i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
2265 mutex_unlock(&dev->struct_mutex); 2265 mutex_unlock(&dev->struct_mutex);
2266 return ret; 2266 return ret;
2267 } 2267 }
2268 2268
2269 if (old_fb) { 2269 if (old_fb) {
2270 intel_wait_for_vblank(dev, intel_crtc->pipe); 2270 intel_wait_for_vblank(dev, intel_crtc->pipe);
2271 i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj); 2271 i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj);
2272 } 2272 }
2273 2273
2274 mutex_unlock(&dev->struct_mutex); 2274 mutex_unlock(&dev->struct_mutex);
2275 2275
2276 if (!dev->primary->master) 2276 if (!dev->primary->master)
2277 return 0; 2277 return 0;
2278 2278
2279 master_priv = dev->primary->master->driver_priv; 2279 master_priv = dev->primary->master->driver_priv;
2280 if (!master_priv->sarea_priv) 2280 if (!master_priv->sarea_priv)
2281 return 0; 2281 return 0;
2282 2282
2283 if (intel_crtc->pipe) { 2283 if (intel_crtc->pipe) {
2284 master_priv->sarea_priv->pipeB_x = x; 2284 master_priv->sarea_priv->pipeB_x = x;
2285 master_priv->sarea_priv->pipeB_y = y; 2285 master_priv->sarea_priv->pipeB_y = y;
2286 } else { 2286 } else {
2287 master_priv->sarea_priv->pipeA_x = x; 2287 master_priv->sarea_priv->pipeA_x = x;
2288 master_priv->sarea_priv->pipeA_y = y; 2288 master_priv->sarea_priv->pipeA_y = y;
2289 } 2289 }
2290 2290
2291 return 0; 2291 return 0;
2292 } 2292 }
2293 2293
2294 static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock) 2294 static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
2295 { 2295 {
2296 struct drm_device *dev = crtc->dev; 2296 struct drm_device *dev = crtc->dev;
2297 struct drm_i915_private *dev_priv = dev->dev_private; 2297 struct drm_i915_private *dev_priv = dev->dev_private;
2298 u32 dpa_ctl; 2298 u32 dpa_ctl;
2299 2299
2300 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock); 2300 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
2301 dpa_ctl = I915_READ(DP_A); 2301 dpa_ctl = I915_READ(DP_A);
2302 dpa_ctl &= ~DP_PLL_FREQ_MASK; 2302 dpa_ctl &= ~DP_PLL_FREQ_MASK;
2303 2303
2304 if (clock < 200000) { 2304 if (clock < 200000) {
2305 u32 temp; 2305 u32 temp;
2306 dpa_ctl |= DP_PLL_FREQ_160MHZ; 2306 dpa_ctl |= DP_PLL_FREQ_160MHZ;
2307 /* workaround for 160Mhz: 2307 /* workaround for 160Mhz:
2308 1) program 0x4600c bits 15:0 = 0x8124 2308 1) program 0x4600c bits 15:0 = 0x8124
2309 2) program 0x46010 bit 0 = 1 2309 2) program 0x46010 bit 0 = 1
2310 3) program 0x46034 bit 24 = 1 2310 3) program 0x46034 bit 24 = 1
2311 4) program 0x64000 bit 14 = 1 2311 4) program 0x64000 bit 14 = 1
2312 */ 2312 */
2313 temp = I915_READ(0x4600c); 2313 temp = I915_READ(0x4600c);
2314 temp &= 0xffff0000; 2314 temp &= 0xffff0000;
2315 I915_WRITE(0x4600c, temp | 0x8124); 2315 I915_WRITE(0x4600c, temp | 0x8124);
2316 2316
2317 temp = I915_READ(0x46010); 2317 temp = I915_READ(0x46010);
2318 I915_WRITE(0x46010, temp | 1); 2318 I915_WRITE(0x46010, temp | 1);
2319 2319
2320 temp = I915_READ(0x46034); 2320 temp = I915_READ(0x46034);
2321 I915_WRITE(0x46034, temp | (1 << 24)); 2321 I915_WRITE(0x46034, temp | (1 << 24));
2322 } else { 2322 } else {
2323 dpa_ctl |= DP_PLL_FREQ_270MHZ; 2323 dpa_ctl |= DP_PLL_FREQ_270MHZ;
2324 } 2324 }
2325 I915_WRITE(DP_A, dpa_ctl); 2325 I915_WRITE(DP_A, dpa_ctl);
2326 2326
2327 POSTING_READ(DP_A); 2327 POSTING_READ(DP_A);
2328 udelay(500); 2328 udelay(500);
2329 } 2329 }
2330 2330
2331 static void intel_fdi_normal_train(struct drm_crtc *crtc) 2331 static void intel_fdi_normal_train(struct drm_crtc *crtc)
2332 { 2332 {
2333 struct drm_device *dev = crtc->dev; 2333 struct drm_device *dev = crtc->dev;
2334 struct drm_i915_private *dev_priv = dev->dev_private; 2334 struct drm_i915_private *dev_priv = dev->dev_private;
2335 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2335 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2336 int pipe = intel_crtc->pipe; 2336 int pipe = intel_crtc->pipe;
2337 u32 reg, temp; 2337 u32 reg, temp;
2338 2338
2339 /* enable normal train */ 2339 /* enable normal train */
2340 reg = FDI_TX_CTL(pipe); 2340 reg = FDI_TX_CTL(pipe);
2341 temp = I915_READ(reg); 2341 temp = I915_READ(reg);
2342 temp &= ~FDI_LINK_TRAIN_NONE; 2342 temp &= ~FDI_LINK_TRAIN_NONE;
2343 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; 2343 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
2344 I915_WRITE(reg, temp); 2344 I915_WRITE(reg, temp);
2345 2345
2346 reg = FDI_RX_CTL(pipe); 2346 reg = FDI_RX_CTL(pipe);
2347 temp = I915_READ(reg); 2347 temp = I915_READ(reg);
2348 if (HAS_PCH_CPT(dev)) { 2348 if (HAS_PCH_CPT(dev)) {
2349 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 2349 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2350 temp |= FDI_LINK_TRAIN_NORMAL_CPT; 2350 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2351 } else { 2351 } else {
2352 temp &= ~FDI_LINK_TRAIN_NONE; 2352 temp &= ~FDI_LINK_TRAIN_NONE;
2353 temp |= FDI_LINK_TRAIN_NONE; 2353 temp |= FDI_LINK_TRAIN_NONE;
2354 } 2354 }
2355 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); 2355 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2356 2356
2357 /* wait one idle pattern time */ 2357 /* wait one idle pattern time */
2358 POSTING_READ(reg); 2358 POSTING_READ(reg);
2359 udelay(1000); 2359 udelay(1000);
2360 } 2360 }
2361 2361
2362 /* The FDI link training functions for ILK/Ibexpeak. */ 2362 /* The FDI link training functions for ILK/Ibexpeak. */
2363 static void ironlake_fdi_link_train(struct drm_crtc *crtc) 2363 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2364 { 2364 {
2365 struct drm_device *dev = crtc->dev; 2365 struct drm_device *dev = crtc->dev;
2366 struct drm_i915_private *dev_priv = dev->dev_private; 2366 struct drm_i915_private *dev_priv = dev->dev_private;
2367 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2367 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2368 int pipe = intel_crtc->pipe; 2368 int pipe = intel_crtc->pipe;
2369 int plane = intel_crtc->plane; 2369 int plane = intel_crtc->plane;
2370 u32 reg, temp, tries; 2370 u32 reg, temp, tries;
2371 2371
2372 /* FDI needs bits from pipe & plane first */ 2372 /* FDI needs bits from pipe & plane first */
2373 assert_pipe_enabled(dev_priv, pipe); 2373 assert_pipe_enabled(dev_priv, pipe);
2374 assert_plane_enabled(dev_priv, plane); 2374 assert_plane_enabled(dev_priv, plane);
2375 2375
2376 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 2376 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2377 for train result */ 2377 for train result */
2378 reg = FDI_RX_IMR(pipe); 2378 reg = FDI_RX_IMR(pipe);
2379 temp = I915_READ(reg); 2379 temp = I915_READ(reg);
2380 temp &= ~FDI_RX_SYMBOL_LOCK; 2380 temp &= ~FDI_RX_SYMBOL_LOCK;
2381 temp &= ~FDI_RX_BIT_LOCK; 2381 temp &= ~FDI_RX_BIT_LOCK;
2382 I915_WRITE(reg, temp); 2382 I915_WRITE(reg, temp);
2383 I915_READ(reg); 2383 I915_READ(reg);
2384 udelay(150); 2384 udelay(150);
2385 2385
2386 /* enable CPU FDI TX and PCH FDI RX */ 2386 /* enable CPU FDI TX and PCH FDI RX */
2387 reg = FDI_TX_CTL(pipe); 2387 reg = FDI_TX_CTL(pipe);
2388 temp = I915_READ(reg); 2388 temp = I915_READ(reg);
2389 temp &= ~(7 << 19); 2389 temp &= ~(7 << 19);
2390 temp |= (intel_crtc->fdi_lanes - 1) << 19; 2390 temp |= (intel_crtc->fdi_lanes - 1) << 19;
2391 temp &= ~FDI_LINK_TRAIN_NONE; 2391 temp &= ~FDI_LINK_TRAIN_NONE;
2392 temp |= FDI_LINK_TRAIN_PATTERN_1; 2392 temp |= FDI_LINK_TRAIN_PATTERN_1;
2393 I915_WRITE(reg, temp | FDI_TX_ENABLE); 2393 I915_WRITE(reg, temp | FDI_TX_ENABLE);
2394 2394
2395 reg = FDI_RX_CTL(pipe); 2395 reg = FDI_RX_CTL(pipe);
2396 temp = I915_READ(reg); 2396 temp = I915_READ(reg);
2397 temp &= ~FDI_LINK_TRAIN_NONE; 2397 temp &= ~FDI_LINK_TRAIN_NONE;
2398 temp |= FDI_LINK_TRAIN_PATTERN_1; 2398 temp |= FDI_LINK_TRAIN_PATTERN_1;
2399 I915_WRITE(reg, temp | FDI_RX_ENABLE); 2399 I915_WRITE(reg, temp | FDI_RX_ENABLE);
2400 2400
2401 POSTING_READ(reg); 2401 POSTING_READ(reg);
2402 udelay(150); 2402 udelay(150);
2403 2403
2404 /* Ironlake workaround, enable clock pointer after FDI enable*/ 2404 /* Ironlake workaround, enable clock pointer after FDI enable*/
2405 if (HAS_PCH_IBX(dev)) { 2405 if (HAS_PCH_IBX(dev)) {
2406 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 2406 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2407 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | 2407 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2408 FDI_RX_PHASE_SYNC_POINTER_EN); 2408 FDI_RX_PHASE_SYNC_POINTER_EN);
2409 } 2409 }
2410 2410
2411 reg = FDI_RX_IIR(pipe); 2411 reg = FDI_RX_IIR(pipe);
2412 for (tries = 0; tries < 5; tries++) { 2412 for (tries = 0; tries < 5; tries++) {
2413 temp = I915_READ(reg); 2413 temp = I915_READ(reg);
2414 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2414 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2415 2415
2416 if ((temp & FDI_RX_BIT_LOCK)) { 2416 if ((temp & FDI_RX_BIT_LOCK)) {
2417 DRM_DEBUG_KMS("FDI train 1 done.\n"); 2417 DRM_DEBUG_KMS("FDI train 1 done.\n");
2418 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 2418 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2419 break; 2419 break;
2420 } 2420 }
2421 } 2421 }
2422 if (tries == 5) 2422 if (tries == 5)
2423 DRM_ERROR("FDI train 1 fail!\n"); 2423 DRM_ERROR("FDI train 1 fail!\n");
2424 2424
2425 /* Train 2 */ 2425 /* Train 2 */
2426 reg = FDI_TX_CTL(pipe); 2426 reg = FDI_TX_CTL(pipe);
2427 temp = I915_READ(reg); 2427 temp = I915_READ(reg);
2428 temp &= ~FDI_LINK_TRAIN_NONE; 2428 temp &= ~FDI_LINK_TRAIN_NONE;
2429 temp |= FDI_LINK_TRAIN_PATTERN_2; 2429 temp |= FDI_LINK_TRAIN_PATTERN_2;
2430 I915_WRITE(reg, temp); 2430 I915_WRITE(reg, temp);
2431 2431
2432 reg = FDI_RX_CTL(pipe); 2432 reg = FDI_RX_CTL(pipe);
2433 temp = I915_READ(reg); 2433 temp = I915_READ(reg);
2434 temp &= ~FDI_LINK_TRAIN_NONE; 2434 temp &= ~FDI_LINK_TRAIN_NONE;
2435 temp |= FDI_LINK_TRAIN_PATTERN_2; 2435 temp |= FDI_LINK_TRAIN_PATTERN_2;
2436 I915_WRITE(reg, temp); 2436 I915_WRITE(reg, temp);
2437 2437
2438 POSTING_READ(reg); 2438 POSTING_READ(reg);
2439 udelay(150); 2439 udelay(150);
2440 2440
2441 reg = FDI_RX_IIR(pipe); 2441 reg = FDI_RX_IIR(pipe);
2442 for (tries = 0; tries < 5; tries++) { 2442 for (tries = 0; tries < 5; tries++) {
2443 temp = I915_READ(reg); 2443 temp = I915_READ(reg);
2444 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2444 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2445 2445
2446 if (temp & FDI_RX_SYMBOL_LOCK) { 2446 if (temp & FDI_RX_SYMBOL_LOCK) {
2447 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 2447 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2448 DRM_DEBUG_KMS("FDI train 2 done.\n"); 2448 DRM_DEBUG_KMS("FDI train 2 done.\n");
2449 break; 2449 break;
2450 } 2450 }
2451 } 2451 }
2452 if (tries == 5) 2452 if (tries == 5)
2453 DRM_ERROR("FDI train 2 fail!\n"); 2453 DRM_ERROR("FDI train 2 fail!\n");
2454 2454
2455 DRM_DEBUG_KMS("FDI train done\n"); 2455 DRM_DEBUG_KMS("FDI train done\n");
2456 2456
2457 } 2457 }
2458 2458
2459 static const int snb_b_fdi_train_param [] = { 2459 static const int snb_b_fdi_train_param [] = {
2460 FDI_LINK_TRAIN_400MV_0DB_SNB_B, 2460 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2461 FDI_LINK_TRAIN_400MV_6DB_SNB_B, 2461 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2462 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, 2462 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2463 FDI_LINK_TRAIN_800MV_0DB_SNB_B, 2463 FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2464 }; 2464 };
2465 2465
2466 /* The FDI link training functions for SNB/Cougarpoint. */ 2466 /* The FDI link training functions for SNB/Cougarpoint. */
2467 static void gen6_fdi_link_train(struct drm_crtc *crtc) 2467 static void gen6_fdi_link_train(struct drm_crtc *crtc)
2468 { 2468 {
2469 struct drm_device *dev = crtc->dev; 2469 struct drm_device *dev = crtc->dev;
2470 struct drm_i915_private *dev_priv = dev->dev_private; 2470 struct drm_i915_private *dev_priv = dev->dev_private;
2471 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2471 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2472 int pipe = intel_crtc->pipe; 2472 int pipe = intel_crtc->pipe;
2473 u32 reg, temp, i; 2473 u32 reg, temp, i;
2474 2474
2475 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 2475 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2476 for train result */ 2476 for train result */
2477 reg = FDI_RX_IMR(pipe); 2477 reg = FDI_RX_IMR(pipe);
2478 temp = I915_READ(reg); 2478 temp = I915_READ(reg);
2479 temp &= ~FDI_RX_SYMBOL_LOCK; 2479 temp &= ~FDI_RX_SYMBOL_LOCK;
2480 temp &= ~FDI_RX_BIT_LOCK; 2480 temp &= ~FDI_RX_BIT_LOCK;
2481 I915_WRITE(reg, temp); 2481 I915_WRITE(reg, temp);
2482 2482
2483 POSTING_READ(reg); 2483 POSTING_READ(reg);
2484 udelay(150); 2484 udelay(150);
2485 2485
2486 /* enable CPU FDI TX and PCH FDI RX */ 2486 /* enable CPU FDI TX and PCH FDI RX */
2487 reg = FDI_TX_CTL(pipe); 2487 reg = FDI_TX_CTL(pipe);
2488 temp = I915_READ(reg); 2488 temp = I915_READ(reg);
2489 temp &= ~(7 << 19); 2489 temp &= ~(7 << 19);
2490 temp |= (intel_crtc->fdi_lanes - 1) << 19; 2490 temp |= (intel_crtc->fdi_lanes - 1) << 19;
2491 temp &= ~FDI_LINK_TRAIN_NONE; 2491 temp &= ~FDI_LINK_TRAIN_NONE;
2492 temp |= FDI_LINK_TRAIN_PATTERN_1; 2492 temp |= FDI_LINK_TRAIN_PATTERN_1;
2493 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2493 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2494 /* SNB-B */ 2494 /* SNB-B */
2495 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 2495 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2496 I915_WRITE(reg, temp | FDI_TX_ENABLE); 2496 I915_WRITE(reg, temp | FDI_TX_ENABLE);
2497 2497
2498 reg = FDI_RX_CTL(pipe); 2498 reg = FDI_RX_CTL(pipe);
2499 temp = I915_READ(reg); 2499 temp = I915_READ(reg);
2500 if (HAS_PCH_CPT(dev)) { 2500 if (HAS_PCH_CPT(dev)) {
2501 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 2501 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2502 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 2502 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2503 } else { 2503 } else {
2504 temp &= ~FDI_LINK_TRAIN_NONE; 2504 temp &= ~FDI_LINK_TRAIN_NONE;
2505 temp |= FDI_LINK_TRAIN_PATTERN_1; 2505 temp |= FDI_LINK_TRAIN_PATTERN_1;
2506 } 2506 }
2507 I915_WRITE(reg, temp | FDI_RX_ENABLE); 2507 I915_WRITE(reg, temp | FDI_RX_ENABLE);
2508 2508
2509 POSTING_READ(reg); 2509 POSTING_READ(reg);
2510 udelay(150); 2510 udelay(150);
2511 2511
2512 for (i = 0; i < 4; i++ ) { 2512 for (i = 0; i < 4; i++ ) {
2513 reg = FDI_TX_CTL(pipe); 2513 reg = FDI_TX_CTL(pipe);
2514 temp = I915_READ(reg); 2514 temp = I915_READ(reg);
2515 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2515 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2516 temp |= snb_b_fdi_train_param[i]; 2516 temp |= snb_b_fdi_train_param[i];
2517 I915_WRITE(reg, temp); 2517 I915_WRITE(reg, temp);
2518 2518
2519 POSTING_READ(reg); 2519 POSTING_READ(reg);
2520 udelay(500); 2520 udelay(500);
2521 2521
2522 reg = FDI_RX_IIR(pipe); 2522 reg = FDI_RX_IIR(pipe);
2523 temp = I915_READ(reg); 2523 temp = I915_READ(reg);
2524 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2524 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2525 2525
2526 if (temp & FDI_RX_BIT_LOCK) { 2526 if (temp & FDI_RX_BIT_LOCK) {
2527 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 2527 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2528 DRM_DEBUG_KMS("FDI train 1 done.\n"); 2528 DRM_DEBUG_KMS("FDI train 1 done.\n");
2529 break; 2529 break;
2530 } 2530 }
2531 } 2531 }
2532 if (i == 4) 2532 if (i == 4)
2533 DRM_ERROR("FDI train 1 fail!\n"); 2533 DRM_ERROR("FDI train 1 fail!\n");
2534 2534
2535 /* Train 2 */ 2535 /* Train 2 */
2536 reg = FDI_TX_CTL(pipe); 2536 reg = FDI_TX_CTL(pipe);
2537 temp = I915_READ(reg); 2537 temp = I915_READ(reg);
2538 temp &= ~FDI_LINK_TRAIN_NONE; 2538 temp &= ~FDI_LINK_TRAIN_NONE;
2539 temp |= FDI_LINK_TRAIN_PATTERN_2; 2539 temp |= FDI_LINK_TRAIN_PATTERN_2;
2540 if (IS_GEN6(dev)) { 2540 if (IS_GEN6(dev)) {
2541 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2541 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2542 /* SNB-B */ 2542 /* SNB-B */
2543 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 2543 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2544 } 2544 }
2545 I915_WRITE(reg, temp); 2545 I915_WRITE(reg, temp);
2546 2546
2547 reg = FDI_RX_CTL(pipe); 2547 reg = FDI_RX_CTL(pipe);
2548 temp = I915_READ(reg); 2548 temp = I915_READ(reg);
2549 if (HAS_PCH_CPT(dev)) { 2549 if (HAS_PCH_CPT(dev)) {
2550 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 2550 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2551 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 2551 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2552 } else { 2552 } else {
2553 temp &= ~FDI_LINK_TRAIN_NONE; 2553 temp &= ~FDI_LINK_TRAIN_NONE;
2554 temp |= FDI_LINK_TRAIN_PATTERN_2; 2554 temp |= FDI_LINK_TRAIN_PATTERN_2;
2555 } 2555 }
2556 I915_WRITE(reg, temp); 2556 I915_WRITE(reg, temp);
2557 2557
2558 POSTING_READ(reg); 2558 POSTING_READ(reg);
2559 udelay(150); 2559 udelay(150);
2560 2560
2561 for (i = 0; i < 4; i++ ) { 2561 for (i = 0; i < 4; i++ ) {
2562 reg = FDI_TX_CTL(pipe); 2562 reg = FDI_TX_CTL(pipe);
2563 temp = I915_READ(reg); 2563 temp = I915_READ(reg);
2564 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2564 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2565 temp |= snb_b_fdi_train_param[i]; 2565 temp |= snb_b_fdi_train_param[i];
2566 I915_WRITE(reg, temp); 2566 I915_WRITE(reg, temp);
2567 2567
2568 POSTING_READ(reg); 2568 POSTING_READ(reg);
2569 udelay(500); 2569 udelay(500);
2570 2570
2571 reg = FDI_RX_IIR(pipe); 2571 reg = FDI_RX_IIR(pipe);
2572 temp = I915_READ(reg); 2572 temp = I915_READ(reg);
2573 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2573 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2574 2574
2575 if (temp & FDI_RX_SYMBOL_LOCK) { 2575 if (temp & FDI_RX_SYMBOL_LOCK) {
2576 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 2576 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2577 DRM_DEBUG_KMS("FDI train 2 done.\n"); 2577 DRM_DEBUG_KMS("FDI train 2 done.\n");
2578 break; 2578 break;
2579 } 2579 }
2580 } 2580 }
2581 if (i == 4) 2581 if (i == 4)
2582 DRM_ERROR("FDI train 2 fail!\n"); 2582 DRM_ERROR("FDI train 2 fail!\n");
2583 2583
2584 DRM_DEBUG_KMS("FDI train done.\n"); 2584 DRM_DEBUG_KMS("FDI train done.\n");
2585 } 2585 }
2586 2586
2587 static void ironlake_fdi_enable(struct drm_crtc *crtc) 2587 static void ironlake_fdi_enable(struct drm_crtc *crtc)
2588 { 2588 {
2589 struct drm_device *dev = crtc->dev; 2589 struct drm_device *dev = crtc->dev;
2590 struct drm_i915_private *dev_priv = dev->dev_private; 2590 struct drm_i915_private *dev_priv = dev->dev_private;
2591 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2591 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2592 int pipe = intel_crtc->pipe; 2592 int pipe = intel_crtc->pipe;
2593 u32 reg, temp; 2593 u32 reg, temp;
2594 2594
2595 /* Write the TU size bits so error detection works */ 2595 /* Write the TU size bits so error detection works */
2596 I915_WRITE(FDI_RX_TUSIZE1(pipe), 2596 I915_WRITE(FDI_RX_TUSIZE1(pipe),
2597 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); 2597 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
2598 2598
2599 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 2599 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
2600 reg = FDI_RX_CTL(pipe); 2600 reg = FDI_RX_CTL(pipe);
2601 temp = I915_READ(reg); 2601 temp = I915_READ(reg);
2602 temp &= ~((0x7 << 19) | (0x7 << 16)); 2602 temp &= ~((0x7 << 19) | (0x7 << 16));
2603 temp |= (intel_crtc->fdi_lanes - 1) << 19; 2603 temp |= (intel_crtc->fdi_lanes - 1) << 19;
2604 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; 2604 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2605 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); 2605 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
2606 2606
2607 POSTING_READ(reg); 2607 POSTING_READ(reg);
2608 udelay(200); 2608 udelay(200);
2609 2609
2610 /* Switch from Rawclk to PCDclk */ 2610 /* Switch from Rawclk to PCDclk */
2611 temp = I915_READ(reg); 2611 temp = I915_READ(reg);
2612 I915_WRITE(reg, temp | FDI_PCDCLK); 2612 I915_WRITE(reg, temp | FDI_PCDCLK);
2613 2613
2614 POSTING_READ(reg); 2614 POSTING_READ(reg);
2615 udelay(200); 2615 udelay(200);
2616 2616
2617 /* Enable CPU FDI TX PLL, always on for Ironlake */ 2617 /* Enable CPU FDI TX PLL, always on for Ironlake */
2618 reg = FDI_TX_CTL(pipe); 2618 reg = FDI_TX_CTL(pipe);
2619 temp = I915_READ(reg); 2619 temp = I915_READ(reg);
2620 if ((temp & FDI_TX_PLL_ENABLE) == 0) { 2620 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2621 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); 2621 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
2622 2622
2623 POSTING_READ(reg); 2623 POSTING_READ(reg);
2624 udelay(100); 2624 udelay(100);
2625 } 2625 }
2626 } 2626 }
2627 2627
2628 static void ironlake_fdi_disable(struct drm_crtc *crtc) 2628 static void ironlake_fdi_disable(struct drm_crtc *crtc)
2629 { 2629 {
2630 struct drm_device *dev = crtc->dev; 2630 struct drm_device *dev = crtc->dev;
2631 struct drm_i915_private *dev_priv = dev->dev_private; 2631 struct drm_i915_private *dev_priv = dev->dev_private;
2632 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2632 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2633 int pipe = intel_crtc->pipe; 2633 int pipe = intel_crtc->pipe;
2634 u32 reg, temp; 2634 u32 reg, temp;
2635 2635
2636 /* disable CPU FDI tx and PCH FDI rx */ 2636 /* disable CPU FDI tx and PCH FDI rx */
2637 reg = FDI_TX_CTL(pipe); 2637 reg = FDI_TX_CTL(pipe);
2638 temp = I915_READ(reg); 2638 temp = I915_READ(reg);
2639 I915_WRITE(reg, temp & ~FDI_TX_ENABLE); 2639 I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2640 POSTING_READ(reg); 2640 POSTING_READ(reg);
2641 2641
2642 reg = FDI_RX_CTL(pipe); 2642 reg = FDI_RX_CTL(pipe);
2643 temp = I915_READ(reg); 2643 temp = I915_READ(reg);
2644 temp &= ~(0x7 << 16); 2644 temp &= ~(0x7 << 16);
2645 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; 2645 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2646 I915_WRITE(reg, temp & ~FDI_RX_ENABLE); 2646 I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2647 2647
2648 POSTING_READ(reg); 2648 POSTING_READ(reg);
2649 udelay(100); 2649 udelay(100);
2650 2650
2651 /* Ironlake workaround, disable clock pointer after downing FDI */ 2651 /* Ironlake workaround, disable clock pointer after downing FDI */
2652 if (HAS_PCH_IBX(dev)) { 2652 if (HAS_PCH_IBX(dev)) {
2653 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 2653 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2654 I915_WRITE(FDI_RX_CHICKEN(pipe), 2654 I915_WRITE(FDI_RX_CHICKEN(pipe),
2655 I915_READ(FDI_RX_CHICKEN(pipe) & 2655 I915_READ(FDI_RX_CHICKEN(pipe) &
2656 ~FDI_RX_PHASE_SYNC_POINTER_EN)); 2656 ~FDI_RX_PHASE_SYNC_POINTER_EN));
2657 } 2657 }
2658 2658
2659 /* still set train pattern 1 */ 2659 /* still set train pattern 1 */
2660 reg = FDI_TX_CTL(pipe); 2660 reg = FDI_TX_CTL(pipe);
2661 temp = I915_READ(reg); 2661 temp = I915_READ(reg);
2662 temp &= ~FDI_LINK_TRAIN_NONE; 2662 temp &= ~FDI_LINK_TRAIN_NONE;
2663 temp |= FDI_LINK_TRAIN_PATTERN_1; 2663 temp |= FDI_LINK_TRAIN_PATTERN_1;
2664 I915_WRITE(reg, temp); 2664 I915_WRITE(reg, temp);
2665 2665
2666 reg = FDI_RX_CTL(pipe); 2666 reg = FDI_RX_CTL(pipe);
2667 temp = I915_READ(reg); 2667 temp = I915_READ(reg);
2668 if (HAS_PCH_CPT(dev)) { 2668 if (HAS_PCH_CPT(dev)) {
2669 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 2669 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2670 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 2670 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2671 } else { 2671 } else {
2672 temp &= ~FDI_LINK_TRAIN_NONE; 2672 temp &= ~FDI_LINK_TRAIN_NONE;
2673 temp |= FDI_LINK_TRAIN_PATTERN_1; 2673 temp |= FDI_LINK_TRAIN_PATTERN_1;
2674 } 2674 }
2675 /* BPC in FDI rx is consistent with that in PIPECONF */ 2675 /* BPC in FDI rx is consistent with that in PIPECONF */
2676 temp &= ~(0x07 << 16); 2676 temp &= ~(0x07 << 16);
2677 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; 2677 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2678 I915_WRITE(reg, temp); 2678 I915_WRITE(reg, temp);
2679 2679
2680 POSTING_READ(reg); 2680 POSTING_READ(reg);
2681 udelay(100); 2681 udelay(100);
2682 } 2682 }
2683 2683
2684 /* 2684 /*
2685 * When we disable a pipe, we need to clear any pending scanline wait events 2685 * When we disable a pipe, we need to clear any pending scanline wait events
2686 * to avoid hanging the ring, which we assume we are waiting on. 2686 * to avoid hanging the ring, which we assume we are waiting on.
2687 */ 2687 */
2688 static void intel_clear_scanline_wait(struct drm_device *dev) 2688 static void intel_clear_scanline_wait(struct drm_device *dev)
2689 { 2689 {
2690 struct drm_i915_private *dev_priv = dev->dev_private; 2690 struct drm_i915_private *dev_priv = dev->dev_private;
2691 struct intel_ring_buffer *ring; 2691 struct intel_ring_buffer *ring;
2692 u32 tmp; 2692 u32 tmp;
2693 2693
2694 if (IS_GEN2(dev)) 2694 if (IS_GEN2(dev))
2695 /* Can't break the hang on i8xx */ 2695 /* Can't break the hang on i8xx */
2696 return; 2696 return;
2697 2697
2698 ring = LP_RING(dev_priv); 2698 ring = LP_RING(dev_priv);
2699 tmp = I915_READ_CTL(ring); 2699 tmp = I915_READ_CTL(ring);
2700 if (tmp & RING_WAIT) 2700 if (tmp & RING_WAIT)
2701 I915_WRITE_CTL(ring, tmp); 2701 I915_WRITE_CTL(ring, tmp);
2702 } 2702 }
2703 2703
2704 static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) 2704 static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2705 { 2705 {
2706 struct drm_i915_gem_object *obj; 2706 struct drm_i915_gem_object *obj;
2707 struct drm_i915_private *dev_priv; 2707 struct drm_i915_private *dev_priv;
2708 2708
2709 if (crtc->fb == NULL) 2709 if (crtc->fb == NULL)
2710 return; 2710 return;
2711 2711
2712 obj = to_intel_framebuffer(crtc->fb)->obj; 2712 obj = to_intel_framebuffer(crtc->fb)->obj;
2713 dev_priv = crtc->dev->dev_private; 2713 dev_priv = crtc->dev->dev_private;
2714 wait_event(dev_priv->pending_flip_queue, 2714 wait_event(dev_priv->pending_flip_queue,
2715 atomic_read(&obj->pending_flip) == 0); 2715 atomic_read(&obj->pending_flip) == 0);
2716 } 2716 }
2717 2717
2718 static bool intel_crtc_driving_pch(struct drm_crtc *crtc) 2718 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2719 { 2719 {
2720 struct drm_device *dev = crtc->dev; 2720 struct drm_device *dev = crtc->dev;
2721 struct drm_mode_config *mode_config = &dev->mode_config; 2721 struct drm_mode_config *mode_config = &dev->mode_config;
2722 struct intel_encoder *encoder; 2722 struct intel_encoder *encoder;
2723 2723
2724 /* 2724 /*
2725 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that 2725 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
2726 * must be driven by its own crtc; no sharing is possible. 2726 * must be driven by its own crtc; no sharing is possible.
2727 */ 2727 */
2728 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { 2728 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
2729 if (encoder->base.crtc != crtc) 2729 if (encoder->base.crtc != crtc)
2730 continue; 2730 continue;
2731 2731
2732 switch (encoder->type) { 2732 switch (encoder->type) {
2733 case INTEL_OUTPUT_EDP: 2733 case INTEL_OUTPUT_EDP:
2734 if (!intel_encoder_is_pch_edp(&encoder->base)) 2734 if (!intel_encoder_is_pch_edp(&encoder->base))
2735 return false; 2735 return false;
2736 continue; 2736 continue;
2737 } 2737 }
2738 } 2738 }
2739 2739
2740 return true; 2740 return true;
2741 } 2741 }
2742 2742
2743 /* 2743 /*
2744 * Enable PCH resources required for PCH ports: 2744 * Enable PCH resources required for PCH ports:
2745 * - PCH PLLs 2745 * - PCH PLLs
2746 * - FDI training & RX/TX 2746 * - FDI training & RX/TX
2747 * - update transcoder timings 2747 * - update transcoder timings
2748 * - DP transcoding bits 2748 * - DP transcoding bits
2749 * - transcoder 2749 * - transcoder
2750 */ 2750 */
2751 static void ironlake_pch_enable(struct drm_crtc *crtc) 2751 static void ironlake_pch_enable(struct drm_crtc *crtc)
2752 { 2752 {
2753 struct drm_device *dev = crtc->dev; 2753 struct drm_device *dev = crtc->dev;
2754 struct drm_i915_private *dev_priv = dev->dev_private; 2754 struct drm_i915_private *dev_priv = dev->dev_private;
2755 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2755 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2756 int pipe = intel_crtc->pipe; 2756 int pipe = intel_crtc->pipe;
2757 u32 reg, temp; 2757 u32 reg, temp;
2758 2758
2759 /* For PCH output, training FDI link */ 2759 /* For PCH output, training FDI link */
2760 if (IS_GEN6(dev)) 2760 if (IS_GEN6(dev))
2761 gen6_fdi_link_train(crtc); 2761 gen6_fdi_link_train(crtc);
2762 else 2762 else
2763 ironlake_fdi_link_train(crtc); 2763 ironlake_fdi_link_train(crtc);
2764 2764
2765 intel_enable_pch_pll(dev_priv, pipe); 2765 intel_enable_pch_pll(dev_priv, pipe);
2766 2766
2767 if (HAS_PCH_CPT(dev)) { 2767 if (HAS_PCH_CPT(dev)) {
2768 /* Be sure PCH DPLL SEL is set */ 2768 /* Be sure PCH DPLL SEL is set */
2769 temp = I915_READ(PCH_DPLL_SEL); 2769 temp = I915_READ(PCH_DPLL_SEL);
2770 if (pipe == 0 && (temp & TRANSA_DPLL_ENABLE) == 0) 2770 if (pipe == 0 && (temp & TRANSA_DPLL_ENABLE) == 0)
2771 temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); 2771 temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
2772 else if (pipe == 1 && (temp & TRANSB_DPLL_ENABLE) == 0) 2772 else if (pipe == 1 && (temp & TRANSB_DPLL_ENABLE) == 0)
2773 temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); 2773 temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
2774 I915_WRITE(PCH_DPLL_SEL, temp); 2774 I915_WRITE(PCH_DPLL_SEL, temp);
2775 } 2775 }
2776 2776
2777 /* set transcoder timing, panel must allow it */ 2777 /* set transcoder timing, panel must allow it */
2778 assert_panel_unlocked(dev_priv, pipe); 2778 assert_panel_unlocked(dev_priv, pipe);
2779 I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe))); 2779 I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
2780 I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe))); 2780 I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
2781 I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe))); 2781 I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe)));
2782 2782
2783 I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe))); 2783 I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
2784 I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe))); 2784 I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
2785 I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); 2785 I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
2786 2786
2787 intel_fdi_normal_train(crtc); 2787 intel_fdi_normal_train(crtc);
2788 2788
2789 /* For PCH DP, enable TRANS_DP_CTL */ 2789 /* For PCH DP, enable TRANS_DP_CTL */
2790 if (HAS_PCH_CPT(dev) && 2790 if (HAS_PCH_CPT(dev) &&
2791 intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { 2791 intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
2792 reg = TRANS_DP_CTL(pipe); 2792 reg = TRANS_DP_CTL(pipe);
2793 temp = I915_READ(reg); 2793 temp = I915_READ(reg);
2794 temp &= ~(TRANS_DP_PORT_SEL_MASK | 2794 temp &= ~(TRANS_DP_PORT_SEL_MASK |
2795 TRANS_DP_SYNC_MASK | 2795 TRANS_DP_SYNC_MASK |
2796 TRANS_DP_BPC_MASK); 2796 TRANS_DP_BPC_MASK);
2797 temp |= (TRANS_DP_OUTPUT_ENABLE | 2797 temp |= (TRANS_DP_OUTPUT_ENABLE |
2798 TRANS_DP_ENH_FRAMING); 2798 TRANS_DP_ENH_FRAMING);
2799 temp |= TRANS_DP_8BPC; 2799 temp |= TRANS_DP_8BPC;
2800 2800
2801 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) 2801 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
2802 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; 2802 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
2803 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) 2803 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
2804 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; 2804 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
2805 2805
2806 switch (intel_trans_dp_port_sel(crtc)) { 2806 switch (intel_trans_dp_port_sel(crtc)) {
2807 case PCH_DP_B: 2807 case PCH_DP_B:
2808 temp |= TRANS_DP_PORT_SEL_B; 2808 temp |= TRANS_DP_PORT_SEL_B;
2809 break; 2809 break;
2810 case PCH_DP_C: 2810 case PCH_DP_C:
2811 temp |= TRANS_DP_PORT_SEL_C; 2811 temp |= TRANS_DP_PORT_SEL_C;
2812 break; 2812 break;
2813 case PCH_DP_D: 2813 case PCH_DP_D:
2814 temp |= TRANS_DP_PORT_SEL_D; 2814 temp |= TRANS_DP_PORT_SEL_D;
2815 break; 2815 break;
2816 default: 2816 default:
2817 DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n"); 2817 DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
2818 temp |= TRANS_DP_PORT_SEL_B; 2818 temp |= TRANS_DP_PORT_SEL_B;
2819 break; 2819 break;
2820 } 2820 }
2821 2821
2822 I915_WRITE(reg, temp); 2822 I915_WRITE(reg, temp);
2823 } 2823 }
2824 2824
2825 intel_enable_transcoder(dev_priv, pipe); 2825 intel_enable_transcoder(dev_priv, pipe);
2826 } 2826 }
2827 2827
2828 static void ironlake_crtc_enable(struct drm_crtc *crtc) 2828 static void ironlake_crtc_enable(struct drm_crtc *crtc)
2829 { 2829 {
2830 struct drm_device *dev = crtc->dev; 2830 struct drm_device *dev = crtc->dev;
2831 struct drm_i915_private *dev_priv = dev->dev_private; 2831 struct drm_i915_private *dev_priv = dev->dev_private;
2832 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2832 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2833 int pipe = intel_crtc->pipe; 2833 int pipe = intel_crtc->pipe;
2834 int plane = intel_crtc->plane; 2834 int plane = intel_crtc->plane;
2835 u32 temp; 2835 u32 temp;
2836 bool is_pch_port; 2836 bool is_pch_port;
2837 2837
2838 if (intel_crtc->active) 2838 if (intel_crtc->active)
2839 return; 2839 return;
2840 2840
2841 intel_crtc->active = true; 2841 intel_crtc->active = true;
2842 intel_update_watermarks(dev); 2842 intel_update_watermarks(dev);
2843 2843
2844 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 2844 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
2845 temp = I915_READ(PCH_LVDS); 2845 temp = I915_READ(PCH_LVDS);
2846 if ((temp & LVDS_PORT_EN) == 0) 2846 if ((temp & LVDS_PORT_EN) == 0)
2847 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); 2847 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
2848 } 2848 }
2849 2849
2850 is_pch_port = intel_crtc_driving_pch(crtc); 2850 is_pch_port = intel_crtc_driving_pch(crtc);
2851 2851
2852 if (is_pch_port) 2852 if (is_pch_port)
2853 ironlake_fdi_enable(crtc); 2853 ironlake_fdi_enable(crtc);
2854 else 2854 else
2855 ironlake_fdi_disable(crtc); 2855 ironlake_fdi_disable(crtc);
2856 2856
2857 /* Enable panel fitting for LVDS */ 2857 /* Enable panel fitting for LVDS */
2858 if (dev_priv->pch_pf_size && 2858 if (dev_priv->pch_pf_size &&
2859 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) { 2859 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
2860 /* Force use of hard-coded filter coefficients 2860 /* Force use of hard-coded filter coefficients
2861 * as some pre-programmed values are broken, 2861 * as some pre-programmed values are broken,
2862 * e.g. x201. 2862 * e.g. x201.
2863 */ 2863 */
2864 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); 2864 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
2865 I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos); 2865 I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
2866 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size); 2866 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
2867 } 2867 }
2868 2868
2869 intel_enable_pipe(dev_priv, pipe, is_pch_port); 2869 intel_enable_pipe(dev_priv, pipe, is_pch_port);
2870 intel_enable_plane(dev_priv, plane, pipe); 2870 intel_enable_plane(dev_priv, plane, pipe);
2871 2871
2872 if (is_pch_port) 2872 if (is_pch_port)
2873 ironlake_pch_enable(crtc); 2873 ironlake_pch_enable(crtc);
2874 2874
2875 intel_crtc_load_lut(crtc); 2875 intel_crtc_load_lut(crtc);
2876 intel_update_fbc(dev); 2876 intel_update_fbc(dev);
2877 intel_crtc_update_cursor(crtc, true); 2877 intel_crtc_update_cursor(crtc, true);
2878 } 2878 }
2879 2879
2880 static void ironlake_crtc_disable(struct drm_crtc *crtc) 2880 static void ironlake_crtc_disable(struct drm_crtc *crtc)
2881 { 2881 {
2882 struct drm_device *dev = crtc->dev; 2882 struct drm_device *dev = crtc->dev;
2883 struct drm_i915_private *dev_priv = dev->dev_private; 2883 struct drm_i915_private *dev_priv = dev->dev_private;
2884 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2884 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2885 int pipe = intel_crtc->pipe; 2885 int pipe = intel_crtc->pipe;
2886 int plane = intel_crtc->plane; 2886 int plane = intel_crtc->plane;
2887 u32 reg, temp; 2887 u32 reg, temp;
2888 2888
2889 if (!intel_crtc->active) 2889 if (!intel_crtc->active)
2890 return; 2890 return;
2891 2891
2892 intel_crtc_wait_for_pending_flips(crtc); 2892 intel_crtc_wait_for_pending_flips(crtc);
2893 drm_vblank_off(dev, pipe); 2893 drm_vblank_off(dev, pipe);
2894 intel_crtc_update_cursor(crtc, false); 2894 intel_crtc_update_cursor(crtc, false);
2895 2895
2896 intel_disable_plane(dev_priv, plane, pipe); 2896 intel_disable_plane(dev_priv, plane, pipe);
2897 2897
2898 if (dev_priv->cfb_plane == plane && 2898 if (dev_priv->cfb_plane == plane &&
2899 dev_priv->display.disable_fbc) 2899 dev_priv->display.disable_fbc)
2900 dev_priv->display.disable_fbc(dev); 2900 dev_priv->display.disable_fbc(dev);
2901 2901
2902 intel_disable_pipe(dev_priv, pipe); 2902 intel_disable_pipe(dev_priv, pipe);
2903 2903
2904 /* Disable PF */ 2904 /* Disable PF */
2905 I915_WRITE(PF_CTL(pipe), 0); 2905 I915_WRITE(PF_CTL(pipe), 0);
2906 I915_WRITE(PF_WIN_SZ(pipe), 0); 2906 I915_WRITE(PF_WIN_SZ(pipe), 0);
2907 2907
2908 ironlake_fdi_disable(crtc); 2908 ironlake_fdi_disable(crtc);
2909 2909
2910 /* This is a horrible layering violation; we should be doing this in 2910 /* This is a horrible layering violation; we should be doing this in
2911 * the connector/encoder ->prepare instead, but we don't always have 2911 * the connector/encoder ->prepare instead, but we don't always have
2912 * enough information there about the config to know whether it will 2912 * enough information there about the config to know whether it will
2913 * actually be necessary or just cause undesired flicker. 2913 * actually be necessary or just cause undesired flicker.
2914 */ 2914 */
2915 intel_disable_pch_ports(dev_priv, pipe); 2915 intel_disable_pch_ports(dev_priv, pipe);
2916 2916
2917 intel_disable_transcoder(dev_priv, pipe); 2917 intel_disable_transcoder(dev_priv, pipe);
2918 2918
2919 if (HAS_PCH_CPT(dev)) { 2919 if (HAS_PCH_CPT(dev)) {
2920 /* disable TRANS_DP_CTL */ 2920 /* disable TRANS_DP_CTL */
2921 reg = TRANS_DP_CTL(pipe); 2921 reg = TRANS_DP_CTL(pipe);
2922 temp = I915_READ(reg); 2922 temp = I915_READ(reg);
2923 temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK); 2923 temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
2924 temp |= TRANS_DP_PORT_SEL_NONE; 2924 temp |= TRANS_DP_PORT_SEL_NONE;
2925 I915_WRITE(reg, temp); 2925 I915_WRITE(reg, temp);
2926 2926
2927 /* disable DPLL_SEL */ 2927 /* disable DPLL_SEL */
2928 temp = I915_READ(PCH_DPLL_SEL); 2928 temp = I915_READ(PCH_DPLL_SEL);
2929 switch (pipe) { 2929 switch (pipe) {
2930 case 0: 2930 case 0:
2931 temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); 2931 temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
2932 break; 2932 break;
2933 case 1: 2933 case 1:
2934 temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); 2934 temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
2935 break; 2935 break;
2936 case 2: 2936 case 2:
2937 /* FIXME: manage transcoder PLLs? */ 2937 /* FIXME: manage transcoder PLLs? */
2938 temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL); 2938 temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
2939 break; 2939 break;
2940 default: 2940 default:
2941 BUG(); /* wtf */ 2941 BUG(); /* wtf */
2942 } 2942 }
2943 I915_WRITE(PCH_DPLL_SEL, temp); 2943 I915_WRITE(PCH_DPLL_SEL, temp);
2944 } 2944 }
2945 2945
2946 /* disable PCH DPLL */ 2946 /* disable PCH DPLL */
2947 intel_disable_pch_pll(dev_priv, pipe); 2947 intel_disable_pch_pll(dev_priv, pipe);
2948 2948
2949 /* Switch from PCDclk to Rawclk */ 2949 /* Switch from PCDclk to Rawclk */
2950 reg = FDI_RX_CTL(pipe); 2950 reg = FDI_RX_CTL(pipe);
2951 temp = I915_READ(reg); 2951 temp = I915_READ(reg);
2952 I915_WRITE(reg, temp & ~FDI_PCDCLK); 2952 I915_WRITE(reg, temp & ~FDI_PCDCLK);
2953 2953
2954 /* Disable CPU FDI TX PLL */ 2954 /* Disable CPU FDI TX PLL */
2955 reg = FDI_TX_CTL(pipe); 2955 reg = FDI_TX_CTL(pipe);
2956 temp = I915_READ(reg); 2956 temp = I915_READ(reg);
2957 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); 2957 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
2958 2958
2959 POSTING_READ(reg); 2959 POSTING_READ(reg);
2960 udelay(100); 2960 udelay(100);
2961 2961
2962 reg = FDI_RX_CTL(pipe); 2962 reg = FDI_RX_CTL(pipe);
2963 temp = I915_READ(reg); 2963 temp = I915_READ(reg);
2964 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); 2964 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
2965 2965
2966 /* Wait for the clocks to turn off. */ 2966 /* Wait for the clocks to turn off. */
2967 POSTING_READ(reg); 2967 POSTING_READ(reg);
2968 udelay(100); 2968 udelay(100);
2969 2969
2970 intel_crtc->active = false; 2970 intel_crtc->active = false;
2971 intel_update_watermarks(dev); 2971 intel_update_watermarks(dev);
2972 intel_update_fbc(dev); 2972 intel_update_fbc(dev);
2973 intel_clear_scanline_wait(dev); 2973 intel_clear_scanline_wait(dev);
2974 } 2974 }
2975 2975
2976 static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) 2976 static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
2977 { 2977 {
2978 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2978 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2979 int pipe = intel_crtc->pipe; 2979 int pipe = intel_crtc->pipe;
2980 int plane = intel_crtc->plane; 2980 int plane = intel_crtc->plane;
2981 2981
2982 /* XXX: When our outputs are all unaware of DPMS modes other than off 2982 /* XXX: When our outputs are all unaware of DPMS modes other than off
2983 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. 2983 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
2984 */ 2984 */
2985 switch (mode) { 2985 switch (mode) {
2986 case DRM_MODE_DPMS_ON: 2986 case DRM_MODE_DPMS_ON:
2987 case DRM_MODE_DPMS_STANDBY: 2987 case DRM_MODE_DPMS_STANDBY:
2988 case DRM_MODE_DPMS_SUSPEND: 2988 case DRM_MODE_DPMS_SUSPEND:
2989 DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane); 2989 DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
2990 ironlake_crtc_enable(crtc); 2990 ironlake_crtc_enable(crtc);
2991 break; 2991 break;
2992 2992
2993 case DRM_MODE_DPMS_OFF: 2993 case DRM_MODE_DPMS_OFF:
2994 DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane); 2994 DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
2995 ironlake_crtc_disable(crtc); 2995 ironlake_crtc_disable(crtc);
2996 break; 2996 break;
2997 } 2997 }
2998 } 2998 }
2999 2999
3000 static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) 3000 static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3001 { 3001 {
3002 if (!enable && intel_crtc->overlay) { 3002 if (!enable && intel_crtc->overlay) {
3003 struct drm_device *dev = intel_crtc->base.dev; 3003 struct drm_device *dev = intel_crtc->base.dev;
3004 struct drm_i915_private *dev_priv = dev->dev_private; 3004 struct drm_i915_private *dev_priv = dev->dev_private;
3005 3005
3006 mutex_lock(&dev->struct_mutex); 3006 mutex_lock(&dev->struct_mutex);
3007 dev_priv->mm.interruptible = false; 3007 dev_priv->mm.interruptible = false;
3008 (void) intel_overlay_switch_off(intel_crtc->overlay); 3008 (void) intel_overlay_switch_off(intel_crtc->overlay);
3009 dev_priv->mm.interruptible = true; 3009 dev_priv->mm.interruptible = true;
3010 mutex_unlock(&dev->struct_mutex); 3010 mutex_unlock(&dev->struct_mutex);
3011 } 3011 }
3012 3012
3013 /* Let userspace switch the overlay on again. In most cases userspace 3013 /* Let userspace switch the overlay on again. In most cases userspace
3014 * has to recompute where to put it anyway. 3014 * has to recompute where to put it anyway.
3015 */ 3015 */
3016 } 3016 }
3017 3017
3018 static void i9xx_crtc_enable(struct drm_crtc *crtc) 3018 static void i9xx_crtc_enable(struct drm_crtc *crtc)
3019 { 3019 {
3020 struct drm_device *dev = crtc->dev; 3020 struct drm_device *dev = crtc->dev;
3021 struct drm_i915_private *dev_priv = dev->dev_private; 3021 struct drm_i915_private *dev_priv = dev->dev_private;
3022 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3022 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3023 int pipe = intel_crtc->pipe; 3023 int pipe = intel_crtc->pipe;
3024 int plane = intel_crtc->plane; 3024 int plane = intel_crtc->plane;
3025 3025
3026 if (intel_crtc->active) 3026 if (intel_crtc->active)
3027 return; 3027 return;
3028 3028
3029 intel_crtc->active = true; 3029 intel_crtc->active = true;
3030 intel_update_watermarks(dev); 3030 intel_update_watermarks(dev);
3031 3031
3032 intel_enable_pll(dev_priv, pipe); 3032 intel_enable_pll(dev_priv, pipe);
3033 intel_enable_pipe(dev_priv, pipe, false); 3033 intel_enable_pipe(dev_priv, pipe, false);
3034 intel_enable_plane(dev_priv, plane, pipe); 3034 intel_enable_plane(dev_priv, plane, pipe);
3035 3035
3036 intel_crtc_load_lut(crtc); 3036 intel_crtc_load_lut(crtc);
3037 intel_update_fbc(dev); 3037 intel_update_fbc(dev);
3038 3038
3039 /* Give the overlay scaler a chance to enable if it's on this pipe */ 3039 /* Give the overlay scaler a chance to enable if it's on this pipe */
3040 intel_crtc_dpms_overlay(intel_crtc, true); 3040 intel_crtc_dpms_overlay(intel_crtc, true);
3041 intel_crtc_update_cursor(crtc, true); 3041 intel_crtc_update_cursor(crtc, true);
3042 } 3042 }
3043 3043
3044 static void i9xx_crtc_disable(struct drm_crtc *crtc) 3044 static void i9xx_crtc_disable(struct drm_crtc *crtc)
3045 { 3045 {
3046 struct drm_device *dev = crtc->dev; 3046 struct drm_device *dev = crtc->dev;
3047 struct drm_i915_private *dev_priv = dev->dev_private; 3047 struct drm_i915_private *dev_priv = dev->dev_private;
3048 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3048 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3049 int pipe = intel_crtc->pipe; 3049 int pipe = intel_crtc->pipe;
3050 int plane = intel_crtc->plane; 3050 int plane = intel_crtc->plane;
3051 3051
3052 if (!intel_crtc->active) 3052 if (!intel_crtc->active)
3053 return; 3053 return;
3054 3054
3055 /* Give the overlay scaler a chance to disable if it's on this pipe */ 3055 /* Give the overlay scaler a chance to disable if it's on this pipe */
3056 intel_crtc_wait_for_pending_flips(crtc); 3056 intel_crtc_wait_for_pending_flips(crtc);
3057 drm_vblank_off(dev, pipe); 3057 drm_vblank_off(dev, pipe);
3058 intel_crtc_dpms_overlay(intel_crtc, false); 3058 intel_crtc_dpms_overlay(intel_crtc, false);
3059 intel_crtc_update_cursor(crtc, false); 3059 intel_crtc_update_cursor(crtc, false);
3060 3060
3061 if (dev_priv->cfb_plane == plane && 3061 if (dev_priv->cfb_plane == plane &&
3062 dev_priv->display.disable_fbc) 3062 dev_priv->display.disable_fbc)
3063 dev_priv->display.disable_fbc(dev); 3063 dev_priv->display.disable_fbc(dev);
3064 3064
3065 intel_disable_plane(dev_priv, plane, pipe); 3065 intel_disable_plane(dev_priv, plane, pipe);
3066 intel_disable_pipe(dev_priv, pipe); 3066 intel_disable_pipe(dev_priv, pipe);
3067 intel_disable_pll(dev_priv, pipe); 3067 intel_disable_pll(dev_priv, pipe);
3068 3068
3069 intel_crtc->active = false; 3069 intel_crtc->active = false;
3070 intel_update_fbc(dev); 3070 intel_update_fbc(dev);
3071 intel_update_watermarks(dev); 3071 intel_update_watermarks(dev);
3072 intel_clear_scanline_wait(dev); 3072 intel_clear_scanline_wait(dev);
3073 } 3073 }
3074 3074
3075 static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) 3075 static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
3076 { 3076 {
3077 /* XXX: When our outputs are all unaware of DPMS modes other than off 3077 /* XXX: When our outputs are all unaware of DPMS modes other than off
3078 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. 3078 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3079 */ 3079 */
3080 switch (mode) { 3080 switch (mode) {
3081 case DRM_MODE_DPMS_ON: 3081 case DRM_MODE_DPMS_ON:
3082 case DRM_MODE_DPMS_STANDBY: 3082 case DRM_MODE_DPMS_STANDBY:
3083 case DRM_MODE_DPMS_SUSPEND: 3083 case DRM_MODE_DPMS_SUSPEND:
3084 i9xx_crtc_enable(crtc); 3084 i9xx_crtc_enable(crtc);
3085 break; 3085 break;
3086 case DRM_MODE_DPMS_OFF: 3086 case DRM_MODE_DPMS_OFF:
3087 i9xx_crtc_disable(crtc); 3087 i9xx_crtc_disable(crtc);
3088 break; 3088 break;
3089 } 3089 }
3090 } 3090 }
3091 3091
3092 /** 3092 /**
3093 * Sets the power management mode of the pipe and plane. 3093 * Sets the power management mode of the pipe and plane.
3094 */ 3094 */
3095 static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) 3095 static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
3096 { 3096 {
3097 struct drm_device *dev = crtc->dev; 3097 struct drm_device *dev = crtc->dev;
3098 struct drm_i915_private *dev_priv = dev->dev_private; 3098 struct drm_i915_private *dev_priv = dev->dev_private;
3099 struct drm_i915_master_private *master_priv; 3099 struct drm_i915_master_private *master_priv;
3100 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3100 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3101 int pipe = intel_crtc->pipe; 3101 int pipe = intel_crtc->pipe;
3102 bool enabled; 3102 bool enabled;
3103 3103
3104 if (intel_crtc->dpms_mode == mode) 3104 if (intel_crtc->dpms_mode == mode)
3105 return; 3105 return;
3106 3106
3107 intel_crtc->dpms_mode = mode; 3107 intel_crtc->dpms_mode = mode;
3108 3108
3109 dev_priv->display.dpms(crtc, mode); 3109 dev_priv->display.dpms(crtc, mode);
3110 3110
3111 if (!dev->primary->master) 3111 if (!dev->primary->master)
3112 return; 3112 return;
3113 3113
3114 master_priv = dev->primary->master->driver_priv; 3114 master_priv = dev->primary->master->driver_priv;
3115 if (!master_priv->sarea_priv) 3115 if (!master_priv->sarea_priv)
3116 return; 3116 return;
3117 3117
3118 enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF; 3118 enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
3119 3119
3120 switch (pipe) { 3120 switch (pipe) {
3121 case 0: 3121 case 0:
3122 master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0; 3122 master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
3123 master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0; 3123 master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
3124 break; 3124 break;
3125 case 1: 3125 case 1:
3126 master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0; 3126 master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
3127 master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0; 3127 master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
3128 break; 3128 break;
3129 default: 3129 default:
3130 DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe)); 3130 DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
3131 break; 3131 break;
3132 } 3132 }
3133 } 3133 }
3134 3134
3135 static void intel_crtc_disable(struct drm_crtc *crtc) 3135 static void intel_crtc_disable(struct drm_crtc *crtc)
3136 { 3136 {
3137 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 3137 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
3138 struct drm_device *dev = crtc->dev; 3138 struct drm_device *dev = crtc->dev;
3139 3139
3140 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); 3140 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
3141 3141
3142 if (crtc->fb) { 3142 if (crtc->fb) {
3143 mutex_lock(&dev->struct_mutex); 3143 mutex_lock(&dev->struct_mutex);
3144 i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); 3144 i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
3145 mutex_unlock(&dev->struct_mutex); 3145 mutex_unlock(&dev->struct_mutex);
3146 } 3146 }
3147 } 3147 }
3148 3148
3149 /* Prepare for a mode set. 3149 /* Prepare for a mode set.
3150 * 3150 *
3151 * Note we could be a lot smarter here. We need to figure out which outputs 3151 * Note we could be a lot smarter here. We need to figure out which outputs
3152 * will be enabled, which disabled (in short, how the config will changes) 3152 * will be enabled, which disabled (in short, how the config will changes)
3153 * and perform the minimum necessary steps to accomplish that, e.g. updating 3153 * and perform the minimum necessary steps to accomplish that, e.g. updating
3154 * watermarks, FBC configuration, making sure PLLs are programmed correctly, 3154 * watermarks, FBC configuration, making sure PLLs are programmed correctly,
3155 * panel fitting is in the proper state, etc. 3155 * panel fitting is in the proper state, etc.
3156 */ 3156 */
3157 static void i9xx_crtc_prepare(struct drm_crtc *crtc) 3157 static void i9xx_crtc_prepare(struct drm_crtc *crtc)
3158 { 3158 {
3159 i9xx_crtc_disable(crtc); 3159 i9xx_crtc_disable(crtc);
3160 } 3160 }
3161 3161
3162 static void i9xx_crtc_commit(struct drm_crtc *crtc) 3162 static void i9xx_crtc_commit(struct drm_crtc *crtc)
3163 { 3163 {
3164 i9xx_crtc_enable(crtc); 3164 i9xx_crtc_enable(crtc);
3165 } 3165 }
3166 3166
3167 static void ironlake_crtc_prepare(struct drm_crtc *crtc) 3167 static void ironlake_crtc_prepare(struct drm_crtc *crtc)
3168 { 3168 {
3169 ironlake_crtc_disable(crtc); 3169 ironlake_crtc_disable(crtc);
3170 } 3170 }
3171 3171
3172 static void ironlake_crtc_commit(struct drm_crtc *crtc) 3172 static void ironlake_crtc_commit(struct drm_crtc *crtc)
3173 { 3173 {
3174 ironlake_crtc_enable(crtc); 3174 ironlake_crtc_enable(crtc);
3175 } 3175 }
3176 3176
3177 void intel_encoder_prepare (struct drm_encoder *encoder) 3177 void intel_encoder_prepare (struct drm_encoder *encoder)
3178 { 3178 {
3179 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 3179 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3180 /* lvds has its own version of prepare see intel_lvds_prepare */ 3180 /* lvds has its own version of prepare see intel_lvds_prepare */
3181 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF); 3181 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
3182 } 3182 }
3183 3183
3184 void intel_encoder_commit (struct drm_encoder *encoder) 3184 void intel_encoder_commit (struct drm_encoder *encoder)
3185 { 3185 {
3186 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 3186 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3187 /* lvds has its own version of commit see intel_lvds_commit */ 3187 /* lvds has its own version of commit see intel_lvds_commit */
3188 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); 3188 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
3189 } 3189 }
3190 3190
3191 void intel_encoder_destroy(struct drm_encoder *encoder) 3191 void intel_encoder_destroy(struct drm_encoder *encoder)
3192 { 3192 {
3193 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 3193 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3194 3194
3195 drm_encoder_cleanup(encoder); 3195 drm_encoder_cleanup(encoder);
3196 kfree(intel_encoder); 3196 kfree(intel_encoder);
3197 } 3197 }
3198 3198
3199 static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, 3199 static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
3200 struct drm_display_mode *mode, 3200 struct drm_display_mode *mode,
3201 struct drm_display_mode *adjusted_mode) 3201 struct drm_display_mode *adjusted_mode)
3202 { 3202 {
3203 struct drm_device *dev = crtc->dev; 3203 struct drm_device *dev = crtc->dev;
3204 3204
3205 if (HAS_PCH_SPLIT(dev)) { 3205 if (HAS_PCH_SPLIT(dev)) {
3206 /* FDI link clock is fixed at 2.7G */ 3206 /* FDI link clock is fixed at 2.7G */
3207 if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4) 3207 if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
3208 return false; 3208 return false;
3209 } 3209 }
3210 3210
3211 /* XXX some encoders set the crtcinfo, others don't. 3211 /* XXX some encoders set the crtcinfo, others don't.
3212 * Obviously we need some form of conflict resolution here... 3212 * Obviously we need some form of conflict resolution here...
3213 */ 3213 */
3214 if (adjusted_mode->crtc_htotal == 0) 3214 if (adjusted_mode->crtc_htotal == 0)
3215 drm_mode_set_crtcinfo(adjusted_mode, 0); 3215 drm_mode_set_crtcinfo(adjusted_mode, 0);
3216 3216
3217 return true; 3217 return true;
3218 } 3218 }
3219 3219
3220 static int i945_get_display_clock_speed(struct drm_device *dev) 3220 static int i945_get_display_clock_speed(struct drm_device *dev)
3221 { 3221 {
3222 return 400000; 3222 return 400000;
3223 } 3223 }
3224 3224
3225 static int i915_get_display_clock_speed(struct drm_device *dev) 3225 static int i915_get_display_clock_speed(struct drm_device *dev)
3226 { 3226 {
3227 return 333000; 3227 return 333000;
3228 } 3228 }
3229 3229
3230 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev) 3230 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
3231 { 3231 {
3232 return 200000; 3232 return 200000;
3233 } 3233 }
3234 3234
3235 static int i915gm_get_display_clock_speed(struct drm_device *dev) 3235 static int i915gm_get_display_clock_speed(struct drm_device *dev)
3236 { 3236 {
3237 u16 gcfgc = 0; 3237 u16 gcfgc = 0;
3238 3238
3239 pci_read_config_word(dev->pdev, GCFGC, &gcfgc); 3239 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3240 3240
3241 if (gcfgc & GC_LOW_FREQUENCY_ENABLE) 3241 if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
3242 return 133000; 3242 return 133000;
3243 else { 3243 else {
3244 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { 3244 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
3245 case GC_DISPLAY_CLOCK_333_MHZ: 3245 case GC_DISPLAY_CLOCK_333_MHZ:
3246 return 333000; 3246 return 333000;
3247 default: 3247 default:
3248 case GC_DISPLAY_CLOCK_190_200_MHZ: 3248 case GC_DISPLAY_CLOCK_190_200_MHZ:
3249 return 190000; 3249 return 190000;
3250 } 3250 }
3251 } 3251 }
3252 } 3252 }
3253 3253
3254 static int i865_get_display_clock_speed(struct drm_device *dev) 3254 static int i865_get_display_clock_speed(struct drm_device *dev)
3255 { 3255 {
3256 return 266000; 3256 return 266000;
3257 } 3257 }
3258 3258
3259 static int i855_get_display_clock_speed(struct drm_device *dev) 3259 static int i855_get_display_clock_speed(struct drm_device *dev)
3260 { 3260 {
3261 u16 hpllcc = 0; 3261 u16 hpllcc = 0;
3262 /* Assume that the hardware is in the high speed state. This 3262 /* Assume that the hardware is in the high speed state. This
3263 * should be the default. 3263 * should be the default.
3264 */ 3264 */
3265 switch (hpllcc & GC_CLOCK_CONTROL_MASK) { 3265 switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
3266 case GC_CLOCK_133_200: 3266 case GC_CLOCK_133_200:
3267 case GC_CLOCK_100_200: 3267 case GC_CLOCK_100_200:
3268 return 200000; 3268 return 200000;
3269 case GC_CLOCK_166_250: 3269 case GC_CLOCK_166_250:
3270 return 250000; 3270 return 250000;
3271 case GC_CLOCK_100_133: 3271 case GC_CLOCK_100_133:
3272 return 133000; 3272 return 133000;
3273 } 3273 }
3274 3274
3275 /* Shouldn't happen */ 3275 /* Shouldn't happen */
3276 return 0; 3276 return 0;
3277 } 3277 }
3278 3278
3279 static int i830_get_display_clock_speed(struct drm_device *dev) 3279 static int i830_get_display_clock_speed(struct drm_device *dev)
3280 { 3280 {
3281 return 133000; 3281 return 133000;
3282 } 3282 }
3283 3283
3284 struct fdi_m_n { 3284 struct fdi_m_n {
3285 u32 tu; 3285 u32 tu;
3286 u32 gmch_m; 3286 u32 gmch_m;
3287 u32 gmch_n; 3287 u32 gmch_n;
3288 u32 link_m; 3288 u32 link_m;
3289 u32 link_n; 3289 u32 link_n;
3290 }; 3290 };
3291 3291
3292 static void 3292 static void
3293 fdi_reduce_ratio(u32 *num, u32 *den) 3293 fdi_reduce_ratio(u32 *num, u32 *den)
3294 { 3294 {
3295 while (*num > 0xffffff || *den > 0xffffff) { 3295 while (*num > 0xffffff || *den > 0xffffff) {
3296 *num >>= 1; 3296 *num >>= 1;
3297 *den >>= 1; 3297 *den >>= 1;
3298 } 3298 }
3299 } 3299 }
3300 3300
3301 static void 3301 static void
3302 ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock, 3302 ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
3303 int link_clock, struct fdi_m_n *m_n) 3303 int link_clock, struct fdi_m_n *m_n)
3304 { 3304 {
3305 m_n->tu = 64; /* default size */ 3305 m_n->tu = 64; /* default size */
3306 3306
3307 /* BUG_ON(pixel_clock > INT_MAX / 36); */ 3307 /* BUG_ON(pixel_clock > INT_MAX / 36); */
3308 m_n->gmch_m = bits_per_pixel * pixel_clock; 3308 m_n->gmch_m = bits_per_pixel * pixel_clock;
3309 m_n->gmch_n = link_clock * nlanes * 8; 3309 m_n->gmch_n = link_clock * nlanes * 8;
3310 fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); 3310 fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
3311 3311
3312 m_n->link_m = pixel_clock; 3312 m_n->link_m = pixel_clock;
3313 m_n->link_n = link_clock; 3313 m_n->link_n = link_clock;
3314 fdi_reduce_ratio(&m_n->link_m, &m_n->link_n); 3314 fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
3315 } 3315 }
3316 3316
3317 3317
3318 struct intel_watermark_params { 3318 struct intel_watermark_params {
3319 unsigned long fifo_size; 3319 unsigned long fifo_size;
3320 unsigned long max_wm; 3320 unsigned long max_wm;
3321 unsigned long default_wm; 3321 unsigned long default_wm;
3322 unsigned long guard_size; 3322 unsigned long guard_size;
3323 unsigned long cacheline_size; 3323 unsigned long cacheline_size;
3324 }; 3324 };
3325 3325
3326 /* Pineview has different values for various configs */ 3326 /* Pineview has different values for various configs */
3327 static const struct intel_watermark_params pineview_display_wm = { 3327 static const struct intel_watermark_params pineview_display_wm = {
3328 PINEVIEW_DISPLAY_FIFO, 3328 PINEVIEW_DISPLAY_FIFO,
3329 PINEVIEW_MAX_WM, 3329 PINEVIEW_MAX_WM,
3330 PINEVIEW_DFT_WM, 3330 PINEVIEW_DFT_WM,
3331 PINEVIEW_GUARD_WM, 3331 PINEVIEW_GUARD_WM,
3332 PINEVIEW_FIFO_LINE_SIZE 3332 PINEVIEW_FIFO_LINE_SIZE
3333 }; 3333 };
3334 static const struct intel_watermark_params pineview_display_hplloff_wm = { 3334 static const struct intel_watermark_params pineview_display_hplloff_wm = {
3335 PINEVIEW_DISPLAY_FIFO, 3335 PINEVIEW_DISPLAY_FIFO,
3336 PINEVIEW_MAX_WM, 3336 PINEVIEW_MAX_WM,
3337 PINEVIEW_DFT_HPLLOFF_WM, 3337 PINEVIEW_DFT_HPLLOFF_WM,
3338 PINEVIEW_GUARD_WM, 3338 PINEVIEW_GUARD_WM,
3339 PINEVIEW_FIFO_LINE_SIZE 3339 PINEVIEW_FIFO_LINE_SIZE
3340 }; 3340 };
3341 static const struct intel_watermark_params pineview_cursor_wm = { 3341 static const struct intel_watermark_params pineview_cursor_wm = {
3342 PINEVIEW_CURSOR_FIFO, 3342 PINEVIEW_CURSOR_FIFO,
3343 PINEVIEW_CURSOR_MAX_WM, 3343 PINEVIEW_CURSOR_MAX_WM,
3344 PINEVIEW_CURSOR_DFT_WM, 3344 PINEVIEW_CURSOR_DFT_WM,
3345 PINEVIEW_CURSOR_GUARD_WM, 3345 PINEVIEW_CURSOR_GUARD_WM,
3346 PINEVIEW_FIFO_LINE_SIZE, 3346 PINEVIEW_FIFO_LINE_SIZE,
3347 }; 3347 };
3348 static const struct intel_watermark_params pineview_cursor_hplloff_wm = { 3348 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
3349 PINEVIEW_CURSOR_FIFO, 3349 PINEVIEW_CURSOR_FIFO,
3350 PINEVIEW_CURSOR_MAX_WM, 3350 PINEVIEW_CURSOR_MAX_WM,
3351 PINEVIEW_CURSOR_DFT_WM, 3351 PINEVIEW_CURSOR_DFT_WM,
3352 PINEVIEW_CURSOR_GUARD_WM, 3352 PINEVIEW_CURSOR_GUARD_WM,
3353 PINEVIEW_FIFO_LINE_SIZE 3353 PINEVIEW_FIFO_LINE_SIZE
3354 }; 3354 };
3355 static const struct intel_watermark_params g4x_wm_info = { 3355 static const struct intel_watermark_params g4x_wm_info = {
3356 G4X_FIFO_SIZE, 3356 G4X_FIFO_SIZE,
3357 G4X_MAX_WM, 3357 G4X_MAX_WM,
3358 G4X_MAX_WM, 3358 G4X_MAX_WM,
3359 2, 3359 2,
3360 G4X_FIFO_LINE_SIZE, 3360 G4X_FIFO_LINE_SIZE,
3361 }; 3361 };
3362 static const struct intel_watermark_params g4x_cursor_wm_info = { 3362 static const struct intel_watermark_params g4x_cursor_wm_info = {
3363 I965_CURSOR_FIFO, 3363 I965_CURSOR_FIFO,
3364 I965_CURSOR_MAX_WM, 3364 I965_CURSOR_MAX_WM,
3365 I965_CURSOR_DFT_WM, 3365 I965_CURSOR_DFT_WM,
3366 2, 3366 2,
3367 G4X_FIFO_LINE_SIZE, 3367 G4X_FIFO_LINE_SIZE,
3368 }; 3368 };
3369 static const struct intel_watermark_params i965_cursor_wm_info = { 3369 static const struct intel_watermark_params i965_cursor_wm_info = {
3370 I965_CURSOR_FIFO, 3370 I965_CURSOR_FIFO,
3371 I965_CURSOR_MAX_WM, 3371 I965_CURSOR_MAX_WM,
3372 I965_CURSOR_DFT_WM, 3372 I965_CURSOR_DFT_WM,
3373 2, 3373 2,
3374 I915_FIFO_LINE_SIZE, 3374 I915_FIFO_LINE_SIZE,
3375 }; 3375 };
3376 static const struct intel_watermark_params i945_wm_info = { 3376 static const struct intel_watermark_params i945_wm_info = {
3377 I945_FIFO_SIZE, 3377 I945_FIFO_SIZE,
3378 I915_MAX_WM, 3378 I915_MAX_WM,
3379 1, 3379 1,
3380 2, 3380 2,
3381 I915_FIFO_LINE_SIZE 3381 I915_FIFO_LINE_SIZE
3382 }; 3382 };
3383 static const struct intel_watermark_params i915_wm_info = { 3383 static const struct intel_watermark_params i915_wm_info = {
3384 I915_FIFO_SIZE, 3384 I915_FIFO_SIZE,
3385 I915_MAX_WM, 3385 I915_MAX_WM,
3386 1, 3386 1,
3387 2, 3387 2,
3388 I915_FIFO_LINE_SIZE 3388 I915_FIFO_LINE_SIZE
3389 }; 3389 };
3390 static const struct intel_watermark_params i855_wm_info = { 3390 static const struct intel_watermark_params i855_wm_info = {
3391 I855GM_FIFO_SIZE, 3391 I855GM_FIFO_SIZE,
3392 I915_MAX_WM, 3392 I915_MAX_WM,
3393 1, 3393 1,
3394 2, 3394 2,
3395 I830_FIFO_LINE_SIZE 3395 I830_FIFO_LINE_SIZE
3396 }; 3396 };
3397 static const struct intel_watermark_params i830_wm_info = { 3397 static const struct intel_watermark_params i830_wm_info = {
3398 I830_FIFO_SIZE, 3398 I830_FIFO_SIZE,
3399 I915_MAX_WM, 3399 I915_MAX_WM,
3400 1, 3400 1,
3401 2, 3401 2,
3402 I830_FIFO_LINE_SIZE 3402 I830_FIFO_LINE_SIZE
3403 }; 3403 };
3404 3404
3405 static const struct intel_watermark_params ironlake_display_wm_info = { 3405 static const struct intel_watermark_params ironlake_display_wm_info = {
3406 ILK_DISPLAY_FIFO, 3406 ILK_DISPLAY_FIFO,
3407 ILK_DISPLAY_MAXWM, 3407 ILK_DISPLAY_MAXWM,
3408 ILK_DISPLAY_DFTWM, 3408 ILK_DISPLAY_DFTWM,
3409 2, 3409 2,
3410 ILK_FIFO_LINE_SIZE 3410 ILK_FIFO_LINE_SIZE
3411 }; 3411 };
3412 static const struct intel_watermark_params ironlake_cursor_wm_info = { 3412 static const struct intel_watermark_params ironlake_cursor_wm_info = {
3413 ILK_CURSOR_FIFO, 3413 ILK_CURSOR_FIFO,
3414 ILK_CURSOR_MAXWM, 3414 ILK_CURSOR_MAXWM,
3415 ILK_CURSOR_DFTWM, 3415 ILK_CURSOR_DFTWM,
3416 2, 3416 2,
3417 ILK_FIFO_LINE_SIZE 3417 ILK_FIFO_LINE_SIZE
3418 }; 3418 };
3419 static const struct intel_watermark_params ironlake_display_srwm_info = { 3419 static const struct intel_watermark_params ironlake_display_srwm_info = {
3420 ILK_DISPLAY_SR_FIFO, 3420 ILK_DISPLAY_SR_FIFO,
3421 ILK_DISPLAY_MAX_SRWM, 3421 ILK_DISPLAY_MAX_SRWM,
3422 ILK_DISPLAY_DFT_SRWM, 3422 ILK_DISPLAY_DFT_SRWM,
3423 2, 3423 2,
3424 ILK_FIFO_LINE_SIZE 3424 ILK_FIFO_LINE_SIZE
3425 }; 3425 };
3426 static const struct intel_watermark_params ironlake_cursor_srwm_info = { 3426 static const struct intel_watermark_params ironlake_cursor_srwm_info = {
3427 ILK_CURSOR_SR_FIFO, 3427 ILK_CURSOR_SR_FIFO,
3428 ILK_CURSOR_MAX_SRWM, 3428 ILK_CURSOR_MAX_SRWM,
3429 ILK_CURSOR_DFT_SRWM, 3429 ILK_CURSOR_DFT_SRWM,
3430 2, 3430 2,
3431 ILK_FIFO_LINE_SIZE 3431 ILK_FIFO_LINE_SIZE
3432 }; 3432 };
3433 3433
3434 static const struct intel_watermark_params sandybridge_display_wm_info = { 3434 static const struct intel_watermark_params sandybridge_display_wm_info = {
3435 SNB_DISPLAY_FIFO, 3435 SNB_DISPLAY_FIFO,
3436 SNB_DISPLAY_MAXWM, 3436 SNB_DISPLAY_MAXWM,
3437 SNB_DISPLAY_DFTWM, 3437 SNB_DISPLAY_DFTWM,
3438 2, 3438 2,
3439 SNB_FIFO_LINE_SIZE 3439 SNB_FIFO_LINE_SIZE
3440 }; 3440 };
3441 static const struct intel_watermark_params sandybridge_cursor_wm_info = { 3441 static const struct intel_watermark_params sandybridge_cursor_wm_info = {
3442 SNB_CURSOR_FIFO, 3442 SNB_CURSOR_FIFO,
3443 SNB_CURSOR_MAXWM, 3443 SNB_CURSOR_MAXWM,
3444 SNB_CURSOR_DFTWM, 3444 SNB_CURSOR_DFTWM,
3445 2, 3445 2,
3446 SNB_FIFO_LINE_SIZE 3446 SNB_FIFO_LINE_SIZE
3447 }; 3447 };
3448 static const struct intel_watermark_params sandybridge_display_srwm_info = { 3448 static const struct intel_watermark_params sandybridge_display_srwm_info = {
3449 SNB_DISPLAY_SR_FIFO, 3449 SNB_DISPLAY_SR_FIFO,
3450 SNB_DISPLAY_MAX_SRWM, 3450 SNB_DISPLAY_MAX_SRWM,
3451 SNB_DISPLAY_DFT_SRWM, 3451 SNB_DISPLAY_DFT_SRWM,
3452 2, 3452 2,
3453 SNB_FIFO_LINE_SIZE 3453 SNB_FIFO_LINE_SIZE
3454 }; 3454 };
3455 static const struct intel_watermark_params sandybridge_cursor_srwm_info = { 3455 static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
3456 SNB_CURSOR_SR_FIFO, 3456 SNB_CURSOR_SR_FIFO,
3457 SNB_CURSOR_MAX_SRWM, 3457 SNB_CURSOR_MAX_SRWM,
3458 SNB_CURSOR_DFT_SRWM, 3458 SNB_CURSOR_DFT_SRWM,
3459 2, 3459 2,
3460 SNB_FIFO_LINE_SIZE 3460 SNB_FIFO_LINE_SIZE
3461 }; 3461 };
3462 3462
3463 3463
3464 /** 3464 /**
3465 * intel_calculate_wm - calculate watermark level 3465 * intel_calculate_wm - calculate watermark level
3466 * @clock_in_khz: pixel clock 3466 * @clock_in_khz: pixel clock
3467 * @wm: chip FIFO params 3467 * @wm: chip FIFO params
3468 * @pixel_size: display pixel size 3468 * @pixel_size: display pixel size
3469 * @latency_ns: memory latency for the platform 3469 * @latency_ns: memory latency for the platform
3470 * 3470 *
3471 * Calculate the watermark level (the level at which the display plane will 3471 * Calculate the watermark level (the level at which the display plane will
3472 * start fetching from memory again). Each chip has a different display 3472 * start fetching from memory again). Each chip has a different display
3473 * FIFO size and allocation, so the caller needs to figure that out and pass 3473 * FIFO size and allocation, so the caller needs to figure that out and pass
3474 * in the correct intel_watermark_params structure. 3474 * in the correct intel_watermark_params structure.
3475 * 3475 *
3476 * As the pixel clock runs, the FIFO will be drained at a rate that depends 3476 * As the pixel clock runs, the FIFO will be drained at a rate that depends
3477 * on the pixel size. When it reaches the watermark level, it'll start 3477 * on the pixel size. When it reaches the watermark level, it'll start
3478 * fetching FIFO line sized based chunks from memory until the FIFO fills 3478 * fetching FIFO line sized based chunks from memory until the FIFO fills
3479 * past the watermark point. If the FIFO drains completely, a FIFO underrun 3479 * past the watermark point. If the FIFO drains completely, a FIFO underrun
3480 * will occur, and a display engine hang could result. 3480 * will occur, and a display engine hang could result.
3481 */ 3481 */
3482 static unsigned long intel_calculate_wm(unsigned long clock_in_khz, 3482 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
3483 const struct intel_watermark_params *wm, 3483 const struct intel_watermark_params *wm,
3484 int fifo_size, 3484 int fifo_size,
3485 int pixel_size, 3485 int pixel_size,
3486 unsigned long latency_ns) 3486 unsigned long latency_ns)
3487 { 3487 {
3488 long entries_required, wm_size; 3488 long entries_required, wm_size;
3489 3489
3490 /* 3490 /*
3491 * Note: we need to make sure we don't overflow for various clock & 3491 * Note: we need to make sure we don't overflow for various clock &
3492 * latency values. 3492 * latency values.
3493 * clocks go from a few thousand to several hundred thousand. 3493 * clocks go from a few thousand to several hundred thousand.
3494 * latency is usually a few thousand 3494 * latency is usually a few thousand
3495 */ 3495 */
3496 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) / 3496 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
3497 1000; 3497 1000;
3498 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size); 3498 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
3499 3499
3500 DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries_required); 3500 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
3501 3501
3502 wm_size = fifo_size - (entries_required + wm->guard_size); 3502 wm_size = fifo_size - (entries_required + wm->guard_size);
3503 3503
3504 DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size); 3504 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
3505 3505
3506 /* Don't promote wm_size to unsigned... */ 3506 /* Don't promote wm_size to unsigned... */
3507 if (wm_size > (long)wm->max_wm) 3507 if (wm_size > (long)wm->max_wm)
3508 wm_size = wm->max_wm; 3508 wm_size = wm->max_wm;
3509 if (wm_size <= 0) 3509 if (wm_size <= 0)
3510 wm_size = wm->default_wm; 3510 wm_size = wm->default_wm;
3511 return wm_size; 3511 return wm_size;
3512 } 3512 }
3513 3513
3514 struct cxsr_latency { 3514 struct cxsr_latency {
3515 int is_desktop; 3515 int is_desktop;
3516 int is_ddr3; 3516 int is_ddr3;
3517 unsigned long fsb_freq; 3517 unsigned long fsb_freq;
3518 unsigned long mem_freq; 3518 unsigned long mem_freq;
3519 unsigned long display_sr; 3519 unsigned long display_sr;
3520 unsigned long display_hpll_disable; 3520 unsigned long display_hpll_disable;
3521 unsigned long cursor_sr; 3521 unsigned long cursor_sr;
3522 unsigned long cursor_hpll_disable; 3522 unsigned long cursor_hpll_disable;
3523 }; 3523 };
3524 3524
3525 static const struct cxsr_latency cxsr_latency_table[] = { 3525 static const struct cxsr_latency cxsr_latency_table[] = {
3526 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ 3526 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
3527 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ 3527 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
3528 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ 3528 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
3529 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */ 3529 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
3530 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */ 3530 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
3531 3531
3532 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ 3532 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
3533 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ 3533 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
3534 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ 3534 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
3535 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */ 3535 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
3536 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */ 3536 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
3537 3537
3538 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ 3538 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
3539 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ 3539 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
3540 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ 3540 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
3541 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */ 3541 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
3542 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */ 3542 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
3543 3543
3544 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ 3544 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
3545 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ 3545 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
3546 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ 3546 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
3547 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */ 3547 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
3548 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */ 3548 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
3549 3549
3550 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ 3550 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
3551 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ 3551 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
3552 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ 3552 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
3553 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */ 3553 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
3554 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */ 3554 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
3555 3555
3556 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ 3556 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
3557 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ 3557 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
3558 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ 3558 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
3559 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */ 3559 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
3560 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */ 3560 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
3561 }; 3561 };
3562 3562
3563 static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, 3563 static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
3564 int is_ddr3, 3564 int is_ddr3,
3565 int fsb, 3565 int fsb,
3566 int mem) 3566 int mem)
3567 { 3567 {
3568 const struct cxsr_latency *latency; 3568 const struct cxsr_latency *latency;
3569 int i; 3569 int i;
3570 3570
3571 if (fsb == 0 || mem == 0) 3571 if (fsb == 0 || mem == 0)
3572 return NULL; 3572 return NULL;
3573 3573
3574 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { 3574 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
3575 latency = &cxsr_latency_table[i]; 3575 latency = &cxsr_latency_table[i];
3576 if (is_desktop == latency->is_desktop && 3576 if (is_desktop == latency->is_desktop &&
3577 is_ddr3 == latency->is_ddr3 && 3577 is_ddr3 == latency->is_ddr3 &&
3578 fsb == latency->fsb_freq && mem == latency->mem_freq) 3578 fsb == latency->fsb_freq && mem == latency->mem_freq)
3579 return latency; 3579 return latency;
3580 } 3580 }
3581 3581
3582 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); 3582 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
3583 3583
3584 return NULL; 3584 return NULL;
3585 } 3585 }
3586 3586
3587 static void pineview_disable_cxsr(struct drm_device *dev) 3587 static void pineview_disable_cxsr(struct drm_device *dev)
3588 { 3588 {
3589 struct drm_i915_private *dev_priv = dev->dev_private; 3589 struct drm_i915_private *dev_priv = dev->dev_private;
3590 3590
3591 /* deactivate cxsr */ 3591 /* deactivate cxsr */
3592 I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN); 3592 I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
3593 } 3593 }
3594 3594
3595 /* 3595 /*
3596 * Latency for FIFO fetches is dependent on several factors: 3596 * Latency for FIFO fetches is dependent on several factors:
3597 * - memory configuration (speed, channels) 3597 * - memory configuration (speed, channels)
3598 * - chipset 3598 * - chipset
3599 * - current MCH state 3599 * - current MCH state
3600 * It can be fairly high in some situations, so here we assume a fairly 3600 * It can be fairly high in some situations, so here we assume a fairly
3601 * pessimal value. It's a tradeoff between extra memory fetches (if we 3601 * pessimal value. It's a tradeoff between extra memory fetches (if we
3602 * set this value too high, the FIFO will fetch frequently to stay full) 3602 * set this value too high, the FIFO will fetch frequently to stay full)
3603 * and power consumption (set it too low to save power and we might see 3603 * and power consumption (set it too low to save power and we might see
3604 * FIFO underruns and display "flicker"). 3604 * FIFO underruns and display "flicker").
3605 * 3605 *
3606 * A value of 5us seems to be a good balance; safe for very low end 3606 * A value of 5us seems to be a good balance; safe for very low end
3607 * platforms but not overly aggressive on lower latency configs. 3607 * platforms but not overly aggressive on lower latency configs.
3608 */ 3608 */
3609 static const int latency_ns = 5000; 3609 static const int latency_ns = 5000;
3610 3610
3611 static int i9xx_get_fifo_size(struct drm_device *dev, int plane) 3611 static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
3612 { 3612 {
3613 struct drm_i915_private *dev_priv = dev->dev_private; 3613 struct drm_i915_private *dev_priv = dev->dev_private;
3614 uint32_t dsparb = I915_READ(DSPARB); 3614 uint32_t dsparb = I915_READ(DSPARB);
3615 int size; 3615 int size;
3616 3616
3617 size = dsparb & 0x7f; 3617 size = dsparb & 0x7f;
3618 if (plane) 3618 if (plane)
3619 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size; 3619 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
3620 3620
3621 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 3621 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3622 plane ? "B" : "A", size); 3622 plane ? "B" : "A", size);
3623 3623
3624 return size; 3624 return size;
3625 } 3625 }
3626 3626
3627 static int i85x_get_fifo_size(struct drm_device *dev, int plane) 3627 static int i85x_get_fifo_size(struct drm_device *dev, int plane)
3628 { 3628 {
3629 struct drm_i915_private *dev_priv = dev->dev_private; 3629 struct drm_i915_private *dev_priv = dev->dev_private;
3630 uint32_t dsparb = I915_READ(DSPARB); 3630 uint32_t dsparb = I915_READ(DSPARB);
3631 int size; 3631 int size;
3632 3632
3633 size = dsparb & 0x1ff; 3633 size = dsparb & 0x1ff;
3634 if (plane) 3634 if (plane)
3635 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size; 3635 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
3636 size >>= 1; /* Convert to cachelines */ 3636 size >>= 1; /* Convert to cachelines */
3637 3637
3638 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 3638 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3639 plane ? "B" : "A", size); 3639 plane ? "B" : "A", size);
3640 3640
3641 return size; 3641 return size;
3642 } 3642 }
3643 3643
3644 static int i845_get_fifo_size(struct drm_device *dev, int plane) 3644 static int i845_get_fifo_size(struct drm_device *dev, int plane)
3645 { 3645 {
3646 struct drm_i915_private *dev_priv = dev->dev_private; 3646 struct drm_i915_private *dev_priv = dev->dev_private;
3647 uint32_t dsparb = I915_READ(DSPARB); 3647 uint32_t dsparb = I915_READ(DSPARB);
3648 int size; 3648 int size;
3649 3649
3650 size = dsparb & 0x7f; 3650 size = dsparb & 0x7f;
3651 size >>= 2; /* Convert to cachelines */ 3651 size >>= 2; /* Convert to cachelines */
3652 3652
3653 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 3653 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3654 plane ? "B" : "A", 3654 plane ? "B" : "A",
3655 size); 3655 size);
3656 3656
3657 return size; 3657 return size;
3658 } 3658 }
3659 3659
3660 static int i830_get_fifo_size(struct drm_device *dev, int plane) 3660 static int i830_get_fifo_size(struct drm_device *dev, int plane)
3661 { 3661 {
3662 struct drm_i915_private *dev_priv = dev->dev_private; 3662 struct drm_i915_private *dev_priv = dev->dev_private;
3663 uint32_t dsparb = I915_READ(DSPARB); 3663 uint32_t dsparb = I915_READ(DSPARB);
3664 int size; 3664 int size;
3665 3665
3666 size = dsparb & 0x7f; 3666 size = dsparb & 0x7f;
3667 size >>= 1; /* Convert to cachelines */ 3667 size >>= 1; /* Convert to cachelines */
3668 3668
3669 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 3669 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3670 plane ? "B" : "A", size); 3670 plane ? "B" : "A", size);
3671 3671
3672 return size; 3672 return size;
3673 } 3673 }
3674 3674
3675 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev) 3675 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
3676 { 3676 {
3677 struct drm_crtc *crtc, *enabled = NULL; 3677 struct drm_crtc *crtc, *enabled = NULL;
3678 3678
3679 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 3679 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3680 if (crtc->enabled && crtc->fb) { 3680 if (crtc->enabled && crtc->fb) {
3681 if (enabled) 3681 if (enabled)
3682 return NULL; 3682 return NULL;
3683 enabled = crtc; 3683 enabled = crtc;
3684 } 3684 }
3685 } 3685 }
3686 3686
3687 return enabled; 3687 return enabled;
3688 } 3688 }
3689 3689
3690 static void pineview_update_wm(struct drm_device *dev) 3690 static void pineview_update_wm(struct drm_device *dev)
3691 { 3691 {
3692 struct drm_i915_private *dev_priv = dev->dev_private; 3692 struct drm_i915_private *dev_priv = dev->dev_private;
3693 struct drm_crtc *crtc; 3693 struct drm_crtc *crtc;
3694 const struct cxsr_latency *latency; 3694 const struct cxsr_latency *latency;
3695 u32 reg; 3695 u32 reg;
3696 unsigned long wm; 3696 unsigned long wm;
3697 3697
3698 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3, 3698 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
3699 dev_priv->fsb_freq, dev_priv->mem_freq); 3699 dev_priv->fsb_freq, dev_priv->mem_freq);
3700 if (!latency) { 3700 if (!latency) {
3701 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); 3701 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
3702 pineview_disable_cxsr(dev); 3702 pineview_disable_cxsr(dev);
3703 return; 3703 return;
3704 } 3704 }
3705 3705
3706 crtc = single_enabled_crtc(dev); 3706 crtc = single_enabled_crtc(dev);
3707 if (crtc) { 3707 if (crtc) {
3708 int clock = crtc->mode.clock; 3708 int clock = crtc->mode.clock;
3709 int pixel_size = crtc->fb->bits_per_pixel / 8; 3709 int pixel_size = crtc->fb->bits_per_pixel / 8;
3710 3710
3711 /* Display SR */ 3711 /* Display SR */
3712 wm = intel_calculate_wm(clock, &pineview_display_wm, 3712 wm = intel_calculate_wm(clock, &pineview_display_wm,
3713 pineview_display_wm.fifo_size, 3713 pineview_display_wm.fifo_size,
3714 pixel_size, latency->display_sr); 3714 pixel_size, latency->display_sr);
3715 reg = I915_READ(DSPFW1); 3715 reg = I915_READ(DSPFW1);
3716 reg &= ~DSPFW_SR_MASK; 3716 reg &= ~DSPFW_SR_MASK;
3717 reg |= wm << DSPFW_SR_SHIFT; 3717 reg |= wm << DSPFW_SR_SHIFT;
3718 I915_WRITE(DSPFW1, reg); 3718 I915_WRITE(DSPFW1, reg);
3719 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); 3719 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
3720 3720
3721 /* cursor SR */ 3721 /* cursor SR */
3722 wm = intel_calculate_wm(clock, &pineview_cursor_wm, 3722 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
3723 pineview_display_wm.fifo_size, 3723 pineview_display_wm.fifo_size,
3724 pixel_size, latency->cursor_sr); 3724 pixel_size, latency->cursor_sr);
3725 reg = I915_READ(DSPFW3); 3725 reg = I915_READ(DSPFW3);
3726 reg &= ~DSPFW_CURSOR_SR_MASK; 3726 reg &= ~DSPFW_CURSOR_SR_MASK;
3727 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT; 3727 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
3728 I915_WRITE(DSPFW3, reg); 3728 I915_WRITE(DSPFW3, reg);
3729 3729
3730 /* Display HPLL off SR */ 3730 /* Display HPLL off SR */
3731 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm, 3731 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
3732 pineview_display_hplloff_wm.fifo_size, 3732 pineview_display_hplloff_wm.fifo_size,
3733 pixel_size, latency->display_hpll_disable); 3733 pixel_size, latency->display_hpll_disable);
3734 reg = I915_READ(DSPFW3); 3734 reg = I915_READ(DSPFW3);
3735 reg &= ~DSPFW_HPLL_SR_MASK; 3735 reg &= ~DSPFW_HPLL_SR_MASK;
3736 reg |= wm & DSPFW_HPLL_SR_MASK; 3736 reg |= wm & DSPFW_HPLL_SR_MASK;
3737 I915_WRITE(DSPFW3, reg); 3737 I915_WRITE(DSPFW3, reg);
3738 3738
3739 /* cursor HPLL off SR */ 3739 /* cursor HPLL off SR */
3740 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, 3740 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
3741 pineview_display_hplloff_wm.fifo_size, 3741 pineview_display_hplloff_wm.fifo_size,
3742 pixel_size, latency->cursor_hpll_disable); 3742 pixel_size, latency->cursor_hpll_disable);
3743 reg = I915_READ(DSPFW3); 3743 reg = I915_READ(DSPFW3);
3744 reg &= ~DSPFW_HPLL_CURSOR_MASK; 3744 reg &= ~DSPFW_HPLL_CURSOR_MASK;
3745 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT; 3745 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
3746 I915_WRITE(DSPFW3, reg); 3746 I915_WRITE(DSPFW3, reg);
3747 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); 3747 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
3748 3748
3749 /* activate cxsr */ 3749 /* activate cxsr */
3750 I915_WRITE(DSPFW3, 3750 I915_WRITE(DSPFW3,
3751 I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN); 3751 I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
3752 DRM_DEBUG_KMS("Self-refresh is enabled\n"); 3752 DRM_DEBUG_KMS("Self-refresh is enabled\n");
3753 } else { 3753 } else {
3754 pineview_disable_cxsr(dev); 3754 pineview_disable_cxsr(dev);
3755 DRM_DEBUG_KMS("Self-refresh is disabled\n"); 3755 DRM_DEBUG_KMS("Self-refresh is disabled\n");
3756 } 3756 }
3757 } 3757 }
3758 3758
3759 static bool g4x_compute_wm0(struct drm_device *dev, 3759 static bool g4x_compute_wm0(struct drm_device *dev,
3760 int plane, 3760 int plane,
3761 const struct intel_watermark_params *display, 3761 const struct intel_watermark_params *display,
3762 int display_latency_ns, 3762 int display_latency_ns,
3763 const struct intel_watermark_params *cursor, 3763 const struct intel_watermark_params *cursor,
3764 int cursor_latency_ns, 3764 int cursor_latency_ns,
3765 int *plane_wm, 3765 int *plane_wm,
3766 int *cursor_wm) 3766 int *cursor_wm)
3767 { 3767 {
3768 struct drm_crtc *crtc; 3768 struct drm_crtc *crtc;
3769 int htotal, hdisplay, clock, pixel_size; 3769 int htotal, hdisplay, clock, pixel_size;
3770 int line_time_us, line_count; 3770 int line_time_us, line_count;
3771 int entries, tlb_miss; 3771 int entries, tlb_miss;
3772 3772
3773 crtc = intel_get_crtc_for_plane(dev, plane); 3773 crtc = intel_get_crtc_for_plane(dev, plane);
3774 if (crtc->fb == NULL || !crtc->enabled) { 3774 if (crtc->fb == NULL || !crtc->enabled) {
3775 *cursor_wm = cursor->guard_size; 3775 *cursor_wm = cursor->guard_size;
3776 *plane_wm = display->guard_size; 3776 *plane_wm = display->guard_size;
3777 return false; 3777 return false;
3778 } 3778 }
3779 3779
3780 htotal = crtc->mode.htotal; 3780 htotal = crtc->mode.htotal;
3781 hdisplay = crtc->mode.hdisplay; 3781 hdisplay = crtc->mode.hdisplay;
3782 clock = crtc->mode.clock; 3782 clock = crtc->mode.clock;
3783 pixel_size = crtc->fb->bits_per_pixel / 8; 3783 pixel_size = crtc->fb->bits_per_pixel / 8;
3784 3784
3785 /* Use the small buffer method to calculate plane watermark */ 3785 /* Use the small buffer method to calculate plane watermark */
3786 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; 3786 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
3787 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8; 3787 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
3788 if (tlb_miss > 0) 3788 if (tlb_miss > 0)
3789 entries += tlb_miss; 3789 entries += tlb_miss;
3790 entries = DIV_ROUND_UP(entries, display->cacheline_size); 3790 entries = DIV_ROUND_UP(entries, display->cacheline_size);
3791 *plane_wm = entries + display->guard_size; 3791 *plane_wm = entries + display->guard_size;
3792 if (*plane_wm > (int)display->max_wm) 3792 if (*plane_wm > (int)display->max_wm)
3793 *plane_wm = display->max_wm; 3793 *plane_wm = display->max_wm;
3794 3794
3795 /* Use the large buffer method to calculate cursor watermark */ 3795 /* Use the large buffer method to calculate cursor watermark */
3796 line_time_us = ((htotal * 1000) / clock); 3796 line_time_us = ((htotal * 1000) / clock);
3797 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000; 3797 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
3798 entries = line_count * 64 * pixel_size; 3798 entries = line_count * 64 * pixel_size;
3799 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8; 3799 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
3800 if (tlb_miss > 0) 3800 if (tlb_miss > 0)
3801 entries += tlb_miss; 3801 entries += tlb_miss;
3802 entries = DIV_ROUND_UP(entries, cursor->cacheline_size); 3802 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
3803 *cursor_wm = entries + cursor->guard_size; 3803 *cursor_wm = entries + cursor->guard_size;
3804 if (*cursor_wm > (int)cursor->max_wm) 3804 if (*cursor_wm > (int)cursor->max_wm)
3805 *cursor_wm = (int)cursor->max_wm; 3805 *cursor_wm = (int)cursor->max_wm;
3806 3806
3807 return true; 3807 return true;
3808 } 3808 }
3809 3809
3810 /* 3810 /*
3811 * Check the wm result. 3811 * Check the wm result.
3812 * 3812 *
3813 * If any calculated watermark values is larger than the maximum value that 3813 * If any calculated watermark values is larger than the maximum value that
3814 * can be programmed into the associated watermark register, that watermark 3814 * can be programmed into the associated watermark register, that watermark
3815 * must be disabled. 3815 * must be disabled.
3816 */ 3816 */
3817 static bool g4x_check_srwm(struct drm_device *dev, 3817 static bool g4x_check_srwm(struct drm_device *dev,
3818 int display_wm, int cursor_wm, 3818 int display_wm, int cursor_wm,
3819 const struct intel_watermark_params *display, 3819 const struct intel_watermark_params *display,
3820 const struct intel_watermark_params *cursor) 3820 const struct intel_watermark_params *cursor)
3821 { 3821 {
3822 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n", 3822 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
3823 display_wm, cursor_wm); 3823 display_wm, cursor_wm);
3824 3824
3825 if (display_wm > display->max_wm) { 3825 if (display_wm > display->max_wm) {
3826 DRM_DEBUG_KMS("display watermark is too large(%d), disabling\n", 3826 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
3827 display_wm, display->max_wm); 3827 display_wm, display->max_wm);
3828 return false; 3828 return false;
3829 } 3829 }
3830 3830
3831 if (cursor_wm > cursor->max_wm) { 3831 if (cursor_wm > cursor->max_wm) {
3832 DRM_DEBUG_KMS("cursor watermark is too large(%d), disabling\n", 3832 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
3833 cursor_wm, cursor->max_wm); 3833 cursor_wm, cursor->max_wm);
3834 return false; 3834 return false;
3835 } 3835 }
3836 3836
3837 if (!(display_wm || cursor_wm)) { 3837 if (!(display_wm || cursor_wm)) {
3838 DRM_DEBUG_KMS("SR latency is 0, disabling\n"); 3838 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
3839 return false; 3839 return false;
3840 } 3840 }
3841 3841
3842 return true; 3842 return true;
3843 } 3843 }
3844 3844
3845 static bool g4x_compute_srwm(struct drm_device *dev, 3845 static bool g4x_compute_srwm(struct drm_device *dev,
3846 int plane, 3846 int plane,
3847 int latency_ns, 3847 int latency_ns,
3848 const struct intel_watermark_params *display, 3848 const struct intel_watermark_params *display,
3849 const struct intel_watermark_params *cursor, 3849 const struct intel_watermark_params *cursor,
3850 int *display_wm, int *cursor_wm) 3850 int *display_wm, int *cursor_wm)
3851 { 3851 {
3852 struct drm_crtc *crtc; 3852 struct drm_crtc *crtc;
3853 int hdisplay, htotal, pixel_size, clock; 3853 int hdisplay, htotal, pixel_size, clock;
3854 unsigned long line_time_us; 3854 unsigned long line_time_us;
3855 int line_count, line_size; 3855 int line_count, line_size;
3856 int small, large; 3856 int small, large;
3857 int entries; 3857 int entries;
3858 3858
3859 if (!latency_ns) { 3859 if (!latency_ns) {
3860 *display_wm = *cursor_wm = 0; 3860 *display_wm = *cursor_wm = 0;
3861 return false; 3861 return false;
3862 } 3862 }
3863 3863
3864 crtc = intel_get_crtc_for_plane(dev, plane); 3864 crtc = intel_get_crtc_for_plane(dev, plane);
3865 hdisplay = crtc->mode.hdisplay; 3865 hdisplay = crtc->mode.hdisplay;
3866 htotal = crtc->mode.htotal; 3866 htotal = crtc->mode.htotal;
3867 clock = crtc->mode.clock; 3867 clock = crtc->mode.clock;
3868 pixel_size = crtc->fb->bits_per_pixel / 8; 3868 pixel_size = crtc->fb->bits_per_pixel / 8;
3869 3869
3870 line_time_us = (htotal * 1000) / clock; 3870 line_time_us = (htotal * 1000) / clock;
3871 line_count = (latency_ns / line_time_us + 1000) / 1000; 3871 line_count = (latency_ns / line_time_us + 1000) / 1000;
3872 line_size = hdisplay * pixel_size; 3872 line_size = hdisplay * pixel_size;
3873 3873
3874 /* Use the minimum of the small and large buffer method for primary */ 3874 /* Use the minimum of the small and large buffer method for primary */
3875 small = ((clock * pixel_size / 1000) * latency_ns) / 1000; 3875 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
3876 large = line_count * line_size; 3876 large = line_count * line_size;
3877 3877
3878 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); 3878 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
3879 *display_wm = entries + display->guard_size; 3879 *display_wm = entries + display->guard_size;
3880 3880
3881 /* calculate the self-refresh watermark for display cursor */ 3881 /* calculate the self-refresh watermark for display cursor */
3882 entries = line_count * pixel_size * 64; 3882 entries = line_count * pixel_size * 64;
3883 entries = DIV_ROUND_UP(entries, cursor->cacheline_size); 3883 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
3884 *cursor_wm = entries + cursor->guard_size; 3884 *cursor_wm = entries + cursor->guard_size;
3885 3885
3886 return g4x_check_srwm(dev, 3886 return g4x_check_srwm(dev,
3887 *display_wm, *cursor_wm, 3887 *display_wm, *cursor_wm,
3888 display, cursor); 3888 display, cursor);
3889 } 3889 }
3890 3890
3891 #define single_plane_enabled(mask) is_power_of_2(mask) 3891 #define single_plane_enabled(mask) is_power_of_2(mask)
3892 3892
3893 static void g4x_update_wm(struct drm_device *dev) 3893 static void g4x_update_wm(struct drm_device *dev)
3894 { 3894 {
3895 static const int sr_latency_ns = 12000; 3895 static const int sr_latency_ns = 12000;
3896 struct drm_i915_private *dev_priv = dev->dev_private; 3896 struct drm_i915_private *dev_priv = dev->dev_private;
3897 int planea_wm, planeb_wm, cursora_wm, cursorb_wm; 3897 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
3898 int plane_sr, cursor_sr; 3898 int plane_sr, cursor_sr;
3899 unsigned int enabled = 0; 3899 unsigned int enabled = 0;
3900 3900
3901 if (g4x_compute_wm0(dev, 0, 3901 if (g4x_compute_wm0(dev, 0,
3902 &g4x_wm_info, latency_ns, 3902 &g4x_wm_info, latency_ns,
3903 &g4x_cursor_wm_info, latency_ns, 3903 &g4x_cursor_wm_info, latency_ns,
3904 &planea_wm, &cursora_wm)) 3904 &planea_wm, &cursora_wm))
3905 enabled |= 1; 3905 enabled |= 1;
3906 3906
3907 if (g4x_compute_wm0(dev, 1, 3907 if (g4x_compute_wm0(dev, 1,
3908 &g4x_wm_info, latency_ns, 3908 &g4x_wm_info, latency_ns,
3909 &g4x_cursor_wm_info, latency_ns, 3909 &g4x_cursor_wm_info, latency_ns,
3910 &planeb_wm, &cursorb_wm)) 3910 &planeb_wm, &cursorb_wm))
3911 enabled |= 2; 3911 enabled |= 2;
3912 3912
3913 plane_sr = cursor_sr = 0; 3913 plane_sr = cursor_sr = 0;
3914 if (single_plane_enabled(enabled) && 3914 if (single_plane_enabled(enabled) &&
3915 g4x_compute_srwm(dev, ffs(enabled) - 1, 3915 g4x_compute_srwm(dev, ffs(enabled) - 1,
3916 sr_latency_ns, 3916 sr_latency_ns,
3917 &g4x_wm_info, 3917 &g4x_wm_info,
3918 &g4x_cursor_wm_info, 3918 &g4x_cursor_wm_info,
3919 &plane_sr, &cursor_sr)) 3919 &plane_sr, &cursor_sr))
3920 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 3920 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
3921 else 3921 else
3922 I915_WRITE(FW_BLC_SELF, 3922 I915_WRITE(FW_BLC_SELF,
3923 I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN); 3923 I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
3924 3924
3925 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", 3925 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
3926 planea_wm, cursora_wm, 3926 planea_wm, cursora_wm,
3927 planeb_wm, cursorb_wm, 3927 planeb_wm, cursorb_wm,
3928 plane_sr, cursor_sr); 3928 plane_sr, cursor_sr);
3929 3929
3930 I915_WRITE(DSPFW1, 3930 I915_WRITE(DSPFW1,
3931 (plane_sr << DSPFW_SR_SHIFT) | 3931 (plane_sr << DSPFW_SR_SHIFT) |
3932 (cursorb_wm << DSPFW_CURSORB_SHIFT) | 3932 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
3933 (planeb_wm << DSPFW_PLANEB_SHIFT) | 3933 (planeb_wm << DSPFW_PLANEB_SHIFT) |
3934 planea_wm); 3934 planea_wm);
3935 I915_WRITE(DSPFW2, 3935 I915_WRITE(DSPFW2,
3936 (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) | 3936 (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
3937 (cursora_wm << DSPFW_CURSORA_SHIFT)); 3937 (cursora_wm << DSPFW_CURSORA_SHIFT));
3938 /* HPLL off in SR has some issues on G4x... disable it */ 3938 /* HPLL off in SR has some issues on G4x... disable it */
3939 I915_WRITE(DSPFW3, 3939 I915_WRITE(DSPFW3,
3940 (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) | 3940 (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
3941 (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); 3941 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
3942 } 3942 }
3943 3943
3944 static void i965_update_wm(struct drm_device *dev) 3944 static void i965_update_wm(struct drm_device *dev)
3945 { 3945 {
3946 struct drm_i915_private *dev_priv = dev->dev_private; 3946 struct drm_i915_private *dev_priv = dev->dev_private;
3947 struct drm_crtc *crtc; 3947 struct drm_crtc *crtc;
3948 int srwm = 1; 3948 int srwm = 1;
3949 int cursor_sr = 16; 3949 int cursor_sr = 16;
3950 3950
3951 /* Calc sr entries for one plane configs */ 3951 /* Calc sr entries for one plane configs */
3952 crtc = single_enabled_crtc(dev); 3952 crtc = single_enabled_crtc(dev);
3953 if (crtc) { 3953 if (crtc) {
3954 /* self-refresh has much higher latency */ 3954 /* self-refresh has much higher latency */
3955 static const int sr_latency_ns = 12000; 3955 static const int sr_latency_ns = 12000;
3956 int clock = crtc->mode.clock; 3956 int clock = crtc->mode.clock;
3957 int htotal = crtc->mode.htotal; 3957 int htotal = crtc->mode.htotal;
3958 int hdisplay = crtc->mode.hdisplay; 3958 int hdisplay = crtc->mode.hdisplay;
3959 int pixel_size = crtc->fb->bits_per_pixel / 8; 3959 int pixel_size = crtc->fb->bits_per_pixel / 8;
3960 unsigned long line_time_us; 3960 unsigned long line_time_us;
3961 int entries; 3961 int entries;
3962 3962
3963 line_time_us = ((htotal * 1000) / clock); 3963 line_time_us = ((htotal * 1000) / clock);
3964 3964
3965 /* Use ns/us then divide to preserve precision */ 3965 /* Use ns/us then divide to preserve precision */
3966 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * 3966 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
3967 pixel_size * hdisplay; 3967 pixel_size * hdisplay;
3968 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE); 3968 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
3969 srwm = I965_FIFO_SIZE - entries; 3969 srwm = I965_FIFO_SIZE - entries;
3970 if (srwm < 0) 3970 if (srwm < 0)
3971 srwm = 1; 3971 srwm = 1;
3972 srwm &= 0x1ff; 3972 srwm &= 0x1ff;
3973 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n", 3973 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
3974 entries, srwm); 3974 entries, srwm);
3975 3975
3976 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * 3976 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
3977 pixel_size * 64; 3977 pixel_size * 64;
3978 entries = DIV_ROUND_UP(entries, 3978 entries = DIV_ROUND_UP(entries,
3979 i965_cursor_wm_info.cacheline_size); 3979 i965_cursor_wm_info.cacheline_size);
3980 cursor_sr = i965_cursor_wm_info.fifo_size - 3980 cursor_sr = i965_cursor_wm_info.fifo_size -
3981 (entries + i965_cursor_wm_info.guard_size); 3981 (entries + i965_cursor_wm_info.guard_size);
3982 3982
3983 if (cursor_sr > i965_cursor_wm_info.max_wm) 3983 if (cursor_sr > i965_cursor_wm_info.max_wm)
3984 cursor_sr = i965_cursor_wm_info.max_wm; 3984 cursor_sr = i965_cursor_wm_info.max_wm;
3985 3985
3986 DRM_DEBUG_KMS("self-refresh watermark: display plane %d " 3986 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
3987 "cursor %d\n", srwm, cursor_sr); 3987 "cursor %d\n", srwm, cursor_sr);
3988 3988
3989 if (IS_CRESTLINE(dev)) 3989 if (IS_CRESTLINE(dev))
3990 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 3990 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
3991 } else { 3991 } else {
3992 /* Turn off self refresh if both pipes are enabled */ 3992 /* Turn off self refresh if both pipes are enabled */
3993 if (IS_CRESTLINE(dev)) 3993 if (IS_CRESTLINE(dev))
3994 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) 3994 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
3995 & ~FW_BLC_SELF_EN); 3995 & ~FW_BLC_SELF_EN);
3996 } 3996 }
3997 3997
3998 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", 3998 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
3999 srwm); 3999 srwm);
4000 4000
4001 /* 965 has limitations... */ 4001 /* 965 has limitations... */
4002 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | 4002 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
4003 (8 << 16) | (8 << 8) | (8 << 0)); 4003 (8 << 16) | (8 << 8) | (8 << 0));
4004 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); 4004 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
4005 /* update cursor SR watermark */ 4005 /* update cursor SR watermark */
4006 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); 4006 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
4007 } 4007 }
4008 4008
4009 static void i9xx_update_wm(struct drm_device *dev) 4009 static void i9xx_update_wm(struct drm_device *dev)
4010 { 4010 {
4011 struct drm_i915_private *dev_priv = dev->dev_private; 4011 struct drm_i915_private *dev_priv = dev->dev_private;
4012 const struct intel_watermark_params *wm_info; 4012 const struct intel_watermark_params *wm_info;
4013 uint32_t fwater_lo; 4013 uint32_t fwater_lo;
4014 uint32_t fwater_hi; 4014 uint32_t fwater_hi;
4015 int cwm, srwm = 1; 4015 int cwm, srwm = 1;
4016 int fifo_size; 4016 int fifo_size;
4017 int planea_wm, planeb_wm; 4017 int planea_wm, planeb_wm;
4018 struct drm_crtc *crtc, *enabled = NULL; 4018 struct drm_crtc *crtc, *enabled = NULL;
4019 4019
4020 if (IS_I945GM(dev)) 4020 if (IS_I945GM(dev))
4021 wm_info = &i945_wm_info; 4021 wm_info = &i945_wm_info;
4022 else if (!IS_GEN2(dev)) 4022 else if (!IS_GEN2(dev))
4023 wm_info = &i915_wm_info; 4023 wm_info = &i915_wm_info;
4024 else 4024 else
4025 wm_info = &i855_wm_info; 4025 wm_info = &i855_wm_info;
4026 4026
4027 fifo_size = dev_priv->display.get_fifo_size(dev, 0); 4027 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
4028 crtc = intel_get_crtc_for_plane(dev, 0); 4028 crtc = intel_get_crtc_for_plane(dev, 0);
4029 if (crtc->enabled && crtc->fb) { 4029 if (crtc->enabled && crtc->fb) {
4030 planea_wm = intel_calculate_wm(crtc->mode.clock, 4030 planea_wm = intel_calculate_wm(crtc->mode.clock,
4031 wm_info, fifo_size, 4031 wm_info, fifo_size,
4032 crtc->fb->bits_per_pixel / 8, 4032 crtc->fb->bits_per_pixel / 8,
4033 latency_ns); 4033 latency_ns);
4034 enabled = crtc; 4034 enabled = crtc;
4035 } else 4035 } else
4036 planea_wm = fifo_size - wm_info->guard_size; 4036 planea_wm = fifo_size - wm_info->guard_size;
4037 4037
4038 fifo_size = dev_priv->display.get_fifo_size(dev, 1); 4038 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
4039 crtc = intel_get_crtc_for_plane(dev, 1); 4039 crtc = intel_get_crtc_for_plane(dev, 1);
4040 if (crtc->enabled && crtc->fb) { 4040 if (crtc->enabled && crtc->fb) {
4041 planeb_wm = intel_calculate_wm(crtc->mode.clock, 4041 planeb_wm = intel_calculate_wm(crtc->mode.clock,
4042 wm_info, fifo_size, 4042 wm_info, fifo_size,
4043 crtc->fb->bits_per_pixel / 8, 4043 crtc->fb->bits_per_pixel / 8,
4044 latency_ns); 4044 latency_ns);
4045 if (enabled == NULL) 4045 if (enabled == NULL)
4046 enabled = crtc; 4046 enabled = crtc;
4047 else 4047 else
4048 enabled = NULL; 4048 enabled = NULL;
4049 } else 4049 } else
4050 planeb_wm = fifo_size - wm_info->guard_size; 4050 planeb_wm = fifo_size - wm_info->guard_size;
4051 4051
4052 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); 4052 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
4053 4053
4054 /* 4054 /*
4055 * Overlay gets an aggressive default since video jitter is bad. 4055 * Overlay gets an aggressive default since video jitter is bad.
4056 */ 4056 */
4057 cwm = 2; 4057 cwm = 2;
4058 4058
4059 /* Play safe and disable self-refresh before adjusting watermarks. */ 4059 /* Play safe and disable self-refresh before adjusting watermarks. */
4060 if (IS_I945G(dev) || IS_I945GM(dev)) 4060 if (IS_I945G(dev) || IS_I945GM(dev))
4061 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0); 4061 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
4062 else if (IS_I915GM(dev)) 4062 else if (IS_I915GM(dev))
4063 I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN); 4063 I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
4064 4064
4065 /* Calc sr entries for one plane configs */ 4065 /* Calc sr entries for one plane configs */
4066 if (HAS_FW_BLC(dev) && enabled) { 4066 if (HAS_FW_BLC(dev) && enabled) {
4067 /* self-refresh has much higher latency */ 4067 /* self-refresh has much higher latency */
4068 static const int sr_latency_ns = 6000; 4068 static const int sr_latency_ns = 6000;
4069 int clock = enabled->mode.clock; 4069 int clock = enabled->mode.clock;
4070 int htotal = enabled->mode.htotal; 4070 int htotal = enabled->mode.htotal;
4071 int hdisplay = enabled->mode.hdisplay; 4071 int hdisplay = enabled->mode.hdisplay;
4072 int pixel_size = enabled->fb->bits_per_pixel / 8; 4072 int pixel_size = enabled->fb->bits_per_pixel / 8;
4073 unsigned long line_time_us; 4073 unsigned long line_time_us;
4074 int entries; 4074 int entries;
4075 4075
4076 line_time_us = (htotal * 1000) / clock; 4076 line_time_us = (htotal * 1000) / clock;
4077 4077
4078 /* Use ns/us then divide to preserve precision */ 4078 /* Use ns/us then divide to preserve precision */
4079 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * 4079 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4080 pixel_size * hdisplay; 4080 pixel_size * hdisplay;
4081 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size); 4081 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
4082 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries); 4082 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
4083 srwm = wm_info->fifo_size - entries; 4083 srwm = wm_info->fifo_size - entries;
4084 if (srwm < 0) 4084 if (srwm < 0)
4085 srwm = 1; 4085 srwm = 1;
4086 4086
4087 if (IS_I945G(dev) || IS_I945GM(dev)) 4087 if (IS_I945G(dev) || IS_I945GM(dev))
4088 I915_WRITE(FW_BLC_SELF, 4088 I915_WRITE(FW_BLC_SELF,
4089 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); 4089 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
4090 else if (IS_I915GM(dev)) 4090 else if (IS_I915GM(dev))
4091 I915_WRITE(FW_BLC_SELF, srwm & 0x3f); 4091 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
4092 } 4092 }
4093 4093
4094 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", 4094 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
4095 planea_wm, planeb_wm, cwm, srwm); 4095 planea_wm, planeb_wm, cwm, srwm);
4096 4096
4097 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); 4097 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
4098 fwater_hi = (cwm & 0x1f); 4098 fwater_hi = (cwm & 0x1f);
4099 4099
4100 /* Set request length to 8 cachelines per fetch */ 4100 /* Set request length to 8 cachelines per fetch */
4101 fwater_lo = fwater_lo | (1 << 24) | (1 << 8); 4101 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
4102 fwater_hi = fwater_hi | (1 << 8); 4102 fwater_hi = fwater_hi | (1 << 8);
4103 4103
4104 I915_WRITE(FW_BLC, fwater_lo); 4104 I915_WRITE(FW_BLC, fwater_lo);
4105 I915_WRITE(FW_BLC2, fwater_hi); 4105 I915_WRITE(FW_BLC2, fwater_hi);
4106 4106
4107 if (HAS_FW_BLC(dev)) { 4107 if (HAS_FW_BLC(dev)) {
4108 if (enabled) { 4108 if (enabled) {
4109 if (IS_I945G(dev) || IS_I945GM(dev)) 4109 if (IS_I945G(dev) || IS_I945GM(dev))
4110 I915_WRITE(FW_BLC_SELF, 4110 I915_WRITE(FW_BLC_SELF,
4111 FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); 4111 FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
4112 else if (IS_I915GM(dev)) 4112 else if (IS_I915GM(dev))
4113 I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN); 4113 I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
4114 DRM_DEBUG_KMS("memory self refresh enabled\n"); 4114 DRM_DEBUG_KMS("memory self refresh enabled\n");
4115 } else 4115 } else
4116 DRM_DEBUG_KMS("memory self refresh disabled\n"); 4116 DRM_DEBUG_KMS("memory self refresh disabled\n");
4117 } 4117 }
4118 } 4118 }
4119 4119
4120 static void i830_update_wm(struct drm_device *dev) 4120 static void i830_update_wm(struct drm_device *dev)
4121 { 4121 {
4122 struct drm_i915_private *dev_priv = dev->dev_private; 4122 struct drm_i915_private *dev_priv = dev->dev_private;
4123 struct drm_crtc *crtc; 4123 struct drm_crtc *crtc;
4124 uint32_t fwater_lo; 4124 uint32_t fwater_lo;
4125 int planea_wm; 4125 int planea_wm;
4126 4126
4127 crtc = single_enabled_crtc(dev); 4127 crtc = single_enabled_crtc(dev);
4128 if (crtc == NULL) 4128 if (crtc == NULL)
4129 return; 4129 return;
4130 4130
4131 planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info, 4131 planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
4132 dev_priv->display.get_fifo_size(dev, 0), 4132 dev_priv->display.get_fifo_size(dev, 0),
4133 crtc->fb->bits_per_pixel / 8, 4133 crtc->fb->bits_per_pixel / 8,
4134 latency_ns); 4134 latency_ns);
4135 fwater_lo = I915_READ(FW_BLC) & ~0xfff; 4135 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
4136 fwater_lo |= (3<<8) | planea_wm; 4136 fwater_lo |= (3<<8) | planea_wm;
4137 4137
4138 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm); 4138 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
4139 4139
4140 I915_WRITE(FW_BLC, fwater_lo); 4140 I915_WRITE(FW_BLC, fwater_lo);
4141 } 4141 }
4142 4142
4143 #define ILK_LP0_PLANE_LATENCY 700 4143 #define ILK_LP0_PLANE_LATENCY 700
4144 #define ILK_LP0_CURSOR_LATENCY 1300 4144 #define ILK_LP0_CURSOR_LATENCY 1300
4145 4145
4146 static bool ironlake_compute_wm0(struct drm_device *dev, 4146 static bool ironlake_compute_wm0(struct drm_device *dev,
4147 int pipe, 4147 int pipe,
4148 const struct intel_watermark_params *display, 4148 const struct intel_watermark_params *display,
4149 int display_latency_ns, 4149 int display_latency_ns,
4150 const struct intel_watermark_params *cursor, 4150 const struct intel_watermark_params *cursor,
4151 int cursor_latency_ns, 4151 int cursor_latency_ns,
4152 int *plane_wm, 4152 int *plane_wm,
4153 int *cursor_wm) 4153 int *cursor_wm)
4154 { 4154 {
4155 struct drm_crtc *crtc; 4155 struct drm_crtc *crtc;
4156 int htotal, hdisplay, clock, pixel_size; 4156 int htotal, hdisplay, clock, pixel_size;
4157 int line_time_us, line_count; 4157 int line_time_us, line_count;
4158 int entries, tlb_miss; 4158 int entries, tlb_miss;
4159 4159
4160 crtc = intel_get_crtc_for_pipe(dev, pipe); 4160 crtc = intel_get_crtc_for_pipe(dev, pipe);
4161 if (crtc->fb == NULL || !crtc->enabled) 4161 if (crtc->fb == NULL || !crtc->enabled)
4162 return false; 4162 return false;
4163 4163
4164 htotal = crtc->mode.htotal; 4164 htotal = crtc->mode.htotal;
4165 hdisplay = crtc->mode.hdisplay; 4165 hdisplay = crtc->mode.hdisplay;
4166 clock = crtc->mode.clock; 4166 clock = crtc->mode.clock;
4167 pixel_size = crtc->fb->bits_per_pixel / 8; 4167 pixel_size = crtc->fb->bits_per_pixel / 8;
4168 4168
4169 /* Use the small buffer method to calculate plane watermark */ 4169 /* Use the small buffer method to calculate plane watermark */
4170 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; 4170 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
4171 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8; 4171 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
4172 if (tlb_miss > 0) 4172 if (tlb_miss > 0)
4173 entries += tlb_miss; 4173 entries += tlb_miss;
4174 entries = DIV_ROUND_UP(entries, display->cacheline_size); 4174 entries = DIV_ROUND_UP(entries, display->cacheline_size);
4175 *plane_wm = entries + display->guard_size; 4175 *plane_wm = entries + display->guard_size;
4176 if (*plane_wm > (int)display->max_wm) 4176 if (*plane_wm > (int)display->max_wm)
4177 *plane_wm = display->max_wm; 4177 *plane_wm = display->max_wm;
4178 4178
4179 /* Use the large buffer method to calculate cursor watermark */ 4179 /* Use the large buffer method to calculate cursor watermark */
4180 line_time_us = ((htotal * 1000) / clock); 4180 line_time_us = ((htotal * 1000) / clock);
4181 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000; 4181 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
4182 entries = line_count * 64 * pixel_size; 4182 entries = line_count * 64 * pixel_size;
4183 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8; 4183 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
4184 if (tlb_miss > 0) 4184 if (tlb_miss > 0)
4185 entries += tlb_miss; 4185 entries += tlb_miss;
4186 entries = DIV_ROUND_UP(entries, cursor->cacheline_size); 4186 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4187 *cursor_wm = entries + cursor->guard_size; 4187 *cursor_wm = entries + cursor->guard_size;
4188 if (*cursor_wm > (int)cursor->max_wm) 4188 if (*cursor_wm > (int)cursor->max_wm)
4189 *cursor_wm = (int)cursor->max_wm; 4189 *cursor_wm = (int)cursor->max_wm;
4190 4190
4191 return true; 4191 return true;
4192 } 4192 }
4193 4193
4194 /* 4194 /*
4195 * Check the wm result. 4195 * Check the wm result.
4196 * 4196 *
4197 * If any calculated watermark values is larger than the maximum value that 4197 * If any calculated watermark values is larger than the maximum value that
4198 * can be programmed into the associated watermark register, that watermark 4198 * can be programmed into the associated watermark register, that watermark
4199 * must be disabled. 4199 * must be disabled.
4200 */ 4200 */
4201 static bool ironlake_check_srwm(struct drm_device *dev, int level, 4201 static bool ironlake_check_srwm(struct drm_device *dev, int level,
4202 int fbc_wm, int display_wm, int cursor_wm, 4202 int fbc_wm, int display_wm, int cursor_wm,
4203 const struct intel_watermark_params *display, 4203 const struct intel_watermark_params *display,
4204 const struct intel_watermark_params *cursor) 4204 const struct intel_watermark_params *cursor)
4205 { 4205 {
4206 struct drm_i915_private *dev_priv = dev->dev_private; 4206 struct drm_i915_private *dev_priv = dev->dev_private;
4207 4207
4208 DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d," 4208 DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
4209 " cursor %d\n", level, display_wm, fbc_wm, cursor_wm); 4209 " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
4210 4210
4211 if (fbc_wm > SNB_FBC_MAX_SRWM) { 4211 if (fbc_wm > SNB_FBC_MAX_SRWM) {
4212 DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n", 4212 DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
4213 fbc_wm, SNB_FBC_MAX_SRWM, level); 4213 fbc_wm, SNB_FBC_MAX_SRWM, level);
4214 4214
4215 /* fbc has it's own way to disable FBC WM */ 4215 /* fbc has it's own way to disable FBC WM */
4216 I915_WRITE(DISP_ARB_CTL, 4216 I915_WRITE(DISP_ARB_CTL,
4217 I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS); 4217 I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
4218 return false; 4218 return false;
4219 } 4219 }
4220 4220
4221 if (display_wm > display->max_wm) { 4221 if (display_wm > display->max_wm) {
4222 DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n", 4222 DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
4223 display_wm, SNB_DISPLAY_MAX_SRWM, level); 4223 display_wm, SNB_DISPLAY_MAX_SRWM, level);
4224 return false; 4224 return false;
4225 } 4225 }
4226 4226
4227 if (cursor_wm > cursor->max_wm) { 4227 if (cursor_wm > cursor->max_wm) {
4228 DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n", 4228 DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
4229 cursor_wm, SNB_CURSOR_MAX_SRWM, level); 4229 cursor_wm, SNB_CURSOR_MAX_SRWM, level);
4230 return false; 4230 return false;
4231 } 4231 }
4232 4232
4233 if (!(fbc_wm || display_wm || cursor_wm)) { 4233 if (!(fbc_wm || display_wm || cursor_wm)) {
4234 DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level); 4234 DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
4235 return false; 4235 return false;
4236 } 4236 }
4237 4237
4238 return true; 4238 return true;
4239 } 4239 }
4240 4240
4241 /* 4241 /*
4242 * Compute watermark values of WM[1-3], 4242 * Compute watermark values of WM[1-3],
4243 */ 4243 */
4244 static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane, 4244 static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
4245 int latency_ns, 4245 int latency_ns,
4246 const struct intel_watermark_params *display, 4246 const struct intel_watermark_params *display,
4247 const struct intel_watermark_params *cursor, 4247 const struct intel_watermark_params *cursor,
4248 int *fbc_wm, int *display_wm, int *cursor_wm) 4248 int *fbc_wm, int *display_wm, int *cursor_wm)
4249 { 4249 {
4250 struct drm_crtc *crtc; 4250 struct drm_crtc *crtc;
4251 unsigned long line_time_us; 4251 unsigned long line_time_us;
4252 int hdisplay, htotal, pixel_size, clock; 4252 int hdisplay, htotal, pixel_size, clock;
4253 int line_count, line_size; 4253 int line_count, line_size;
4254 int small, large; 4254 int small, large;
4255 int entries; 4255 int entries;
4256 4256
4257 if (!latency_ns) { 4257 if (!latency_ns) {
4258 *fbc_wm = *display_wm = *cursor_wm = 0; 4258 *fbc_wm = *display_wm = *cursor_wm = 0;
4259 return false; 4259 return false;
4260 } 4260 }
4261 4261
4262 crtc = intel_get_crtc_for_plane(dev, plane); 4262 crtc = intel_get_crtc_for_plane(dev, plane);
4263 hdisplay = crtc->mode.hdisplay; 4263 hdisplay = crtc->mode.hdisplay;
4264 htotal = crtc->mode.htotal; 4264 htotal = crtc->mode.htotal;
4265 clock = crtc->mode.clock; 4265 clock = crtc->mode.clock;
4266 pixel_size = crtc->fb->bits_per_pixel / 8; 4266 pixel_size = crtc->fb->bits_per_pixel / 8;
4267 4267
4268 line_time_us = (htotal * 1000) / clock; 4268 line_time_us = (htotal * 1000) / clock;
4269 line_count = (latency_ns / line_time_us + 1000) / 1000; 4269 line_count = (latency_ns / line_time_us + 1000) / 1000;
4270 line_size = hdisplay * pixel_size; 4270 line_size = hdisplay * pixel_size;
4271 4271
4272 /* Use the minimum of the small and large buffer method for primary */ 4272 /* Use the minimum of the small and large buffer method for primary */
4273 small = ((clock * pixel_size / 1000) * latency_ns) / 1000; 4273 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4274 large = line_count * line_size; 4274 large = line_count * line_size;
4275 4275
4276 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); 4276 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
4277 *display_wm = entries + display->guard_size; 4277 *display_wm = entries + display->guard_size;
4278 4278
4279 /* 4279 /*
4280 * Spec says: 4280 * Spec says:
4281 * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2 4281 * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
4282 */ 4282 */
4283 *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2; 4283 *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
4284 4284
4285 /* calculate the self-refresh watermark for display cursor */ 4285 /* calculate the self-refresh watermark for display cursor */
4286 entries = line_count * pixel_size * 64; 4286 entries = line_count * pixel_size * 64;
4287 entries = DIV_ROUND_UP(entries, cursor->cacheline_size); 4287 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4288 *cursor_wm = entries + cursor->guard_size; 4288 *cursor_wm = entries + cursor->guard_size;
4289 4289
4290 return ironlake_check_srwm(dev, level, 4290 return ironlake_check_srwm(dev, level,
4291 *fbc_wm, *display_wm, *cursor_wm, 4291 *fbc_wm, *display_wm, *cursor_wm,
4292 display, cursor); 4292 display, cursor);
4293 } 4293 }
4294 4294
4295 static void ironlake_update_wm(struct drm_device *dev) 4295 static void ironlake_update_wm(struct drm_device *dev)
4296 { 4296 {
4297 struct drm_i915_private *dev_priv = dev->dev_private; 4297 struct drm_i915_private *dev_priv = dev->dev_private;
4298 int fbc_wm, plane_wm, cursor_wm; 4298 int fbc_wm, plane_wm, cursor_wm;
4299 unsigned int enabled; 4299 unsigned int enabled;
4300 4300
4301 enabled = 0; 4301 enabled = 0;
4302 if (ironlake_compute_wm0(dev, 0, 4302 if (ironlake_compute_wm0(dev, 0,
4303 &ironlake_display_wm_info, 4303 &ironlake_display_wm_info,
4304 ILK_LP0_PLANE_LATENCY, 4304 ILK_LP0_PLANE_LATENCY,
4305 &ironlake_cursor_wm_info, 4305 &ironlake_cursor_wm_info,
4306 ILK_LP0_CURSOR_LATENCY, 4306 ILK_LP0_CURSOR_LATENCY,
4307 &plane_wm, &cursor_wm)) { 4307 &plane_wm, &cursor_wm)) {
4308 I915_WRITE(WM0_PIPEA_ILK, 4308 I915_WRITE(WM0_PIPEA_ILK,
4309 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); 4309 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4310 DRM_DEBUG_KMS("FIFO watermarks For pipe A -" 4310 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4311 " plane %d, " "cursor: %d\n", 4311 " plane %d, " "cursor: %d\n",
4312 plane_wm, cursor_wm); 4312 plane_wm, cursor_wm);
4313 enabled |= 1; 4313 enabled |= 1;
4314 } 4314 }
4315 4315
4316 if (ironlake_compute_wm0(dev, 1, 4316 if (ironlake_compute_wm0(dev, 1,
4317 &ironlake_display_wm_info, 4317 &ironlake_display_wm_info,
4318 ILK_LP0_PLANE_LATENCY, 4318 ILK_LP0_PLANE_LATENCY,
4319 &ironlake_cursor_wm_info, 4319 &ironlake_cursor_wm_info,
4320 ILK_LP0_CURSOR_LATENCY, 4320 ILK_LP0_CURSOR_LATENCY,
4321 &plane_wm, &cursor_wm)) { 4321 &plane_wm, &cursor_wm)) {
4322 I915_WRITE(WM0_PIPEB_ILK, 4322 I915_WRITE(WM0_PIPEB_ILK,
4323 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); 4323 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4324 DRM_DEBUG_KMS("FIFO watermarks For pipe B -" 4324 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4325 " plane %d, cursor: %d\n", 4325 " plane %d, cursor: %d\n",
4326 plane_wm, cursor_wm); 4326 plane_wm, cursor_wm);
4327 enabled |= 2; 4327 enabled |= 2;
4328 } 4328 }
4329 4329
4330 /* 4330 /*
4331 * Calculate and update the self-refresh watermark only when one 4331 * Calculate and update the self-refresh watermark only when one
4332 * display plane is used. 4332 * display plane is used.
4333 */ 4333 */
4334 I915_WRITE(WM3_LP_ILK, 0); 4334 I915_WRITE(WM3_LP_ILK, 0);
4335 I915_WRITE(WM2_LP_ILK, 0); 4335 I915_WRITE(WM2_LP_ILK, 0);
4336 I915_WRITE(WM1_LP_ILK, 0); 4336 I915_WRITE(WM1_LP_ILK, 0);
4337 4337
4338 if (!single_plane_enabled(enabled)) 4338 if (!single_plane_enabled(enabled))
4339 return; 4339 return;
4340 enabled = ffs(enabled) - 1; 4340 enabled = ffs(enabled) - 1;
4341 4341
4342 /* WM1 */ 4342 /* WM1 */
4343 if (!ironlake_compute_srwm(dev, 1, enabled, 4343 if (!ironlake_compute_srwm(dev, 1, enabled,
4344 ILK_READ_WM1_LATENCY() * 500, 4344 ILK_READ_WM1_LATENCY() * 500,
4345 &ironlake_display_srwm_info, 4345 &ironlake_display_srwm_info,
4346 &ironlake_cursor_srwm_info, 4346 &ironlake_cursor_srwm_info,
4347 &fbc_wm, &plane_wm, &cursor_wm)) 4347 &fbc_wm, &plane_wm, &cursor_wm))
4348 return; 4348 return;
4349 4349
4350 I915_WRITE(WM1_LP_ILK, 4350 I915_WRITE(WM1_LP_ILK,
4351 WM1_LP_SR_EN | 4351 WM1_LP_SR_EN |
4352 (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | 4352 (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4353 (fbc_wm << WM1_LP_FBC_SHIFT) | 4353 (fbc_wm << WM1_LP_FBC_SHIFT) |
4354 (plane_wm << WM1_LP_SR_SHIFT) | 4354 (plane_wm << WM1_LP_SR_SHIFT) |
4355 cursor_wm); 4355 cursor_wm);
4356 4356
4357 /* WM2 */ 4357 /* WM2 */
4358 if (!ironlake_compute_srwm(dev, 2, enabled, 4358 if (!ironlake_compute_srwm(dev, 2, enabled,
4359 ILK_READ_WM2_LATENCY() * 500, 4359 ILK_READ_WM2_LATENCY() * 500,
4360 &ironlake_display_srwm_info, 4360 &ironlake_display_srwm_info,
4361 &ironlake_cursor_srwm_info, 4361 &ironlake_cursor_srwm_info,
4362 &fbc_wm, &plane_wm, &cursor_wm)) 4362 &fbc_wm, &plane_wm, &cursor_wm))
4363 return; 4363 return;
4364 4364
4365 I915_WRITE(WM2_LP_ILK, 4365 I915_WRITE(WM2_LP_ILK,
4366 WM2_LP_EN | 4366 WM2_LP_EN |
4367 (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | 4367 (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4368 (fbc_wm << WM1_LP_FBC_SHIFT) | 4368 (fbc_wm << WM1_LP_FBC_SHIFT) |
4369 (plane_wm << WM1_LP_SR_SHIFT) | 4369 (plane_wm << WM1_LP_SR_SHIFT) |
4370 cursor_wm); 4370 cursor_wm);
4371 4371
4372 /* 4372 /*
4373 * WM3 is unsupported on ILK, probably because we don't have latency 4373 * WM3 is unsupported on ILK, probably because we don't have latency
4374 * data for that power state 4374 * data for that power state
4375 */ 4375 */
4376 } 4376 }
4377 4377
4378 static void sandybridge_update_wm(struct drm_device *dev) 4378 static void sandybridge_update_wm(struct drm_device *dev)
4379 { 4379 {
4380 struct drm_i915_private *dev_priv = dev->dev_private; 4380 struct drm_i915_private *dev_priv = dev->dev_private;
4381 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ 4381 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
4382 int fbc_wm, plane_wm, cursor_wm; 4382 int fbc_wm, plane_wm, cursor_wm;
4383 unsigned int enabled; 4383 unsigned int enabled;
4384 4384
4385 enabled = 0; 4385 enabled = 0;
4386 if (ironlake_compute_wm0(dev, 0, 4386 if (ironlake_compute_wm0(dev, 0,
4387 &sandybridge_display_wm_info, latency, 4387 &sandybridge_display_wm_info, latency,
4388 &sandybridge_cursor_wm_info, latency, 4388 &sandybridge_cursor_wm_info, latency,
4389 &plane_wm, &cursor_wm)) { 4389 &plane_wm, &cursor_wm)) {
4390 I915_WRITE(WM0_PIPEA_ILK, 4390 I915_WRITE(WM0_PIPEA_ILK,
4391 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); 4391 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4392 DRM_DEBUG_KMS("FIFO watermarks For pipe A -" 4392 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4393 " plane %d, " "cursor: %d\n", 4393 " plane %d, " "cursor: %d\n",
4394 plane_wm, cursor_wm); 4394 plane_wm, cursor_wm);
4395 enabled |= 1; 4395 enabled |= 1;
4396 } 4396 }
4397 4397
4398 if (ironlake_compute_wm0(dev, 1, 4398 if (ironlake_compute_wm0(dev, 1,
4399 &sandybridge_display_wm_info, latency, 4399 &sandybridge_display_wm_info, latency,
4400 &sandybridge_cursor_wm_info, latency, 4400 &sandybridge_cursor_wm_info, latency,
4401 &plane_wm, &cursor_wm)) { 4401 &plane_wm, &cursor_wm)) {
4402 I915_WRITE(WM0_PIPEB_ILK, 4402 I915_WRITE(WM0_PIPEB_ILK,
4403 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); 4403 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4404 DRM_DEBUG_KMS("FIFO watermarks For pipe B -" 4404 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4405 " plane %d, cursor: %d\n", 4405 " plane %d, cursor: %d\n",
4406 plane_wm, cursor_wm); 4406 plane_wm, cursor_wm);
4407 enabled |= 2; 4407 enabled |= 2;
4408 } 4408 }
4409 4409
4410 /* 4410 /*
4411 * Calculate and update the self-refresh watermark only when one 4411 * Calculate and update the self-refresh watermark only when one
4412 * display plane is used. 4412 * display plane is used.
4413 * 4413 *
4414 * SNB support 3 levels of watermark. 4414 * SNB support 3 levels of watermark.
4415 * 4415 *
4416 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order, 4416 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
4417 * and disabled in the descending order 4417 * and disabled in the descending order
4418 * 4418 *
4419 */ 4419 */
4420 I915_WRITE(WM3_LP_ILK, 0); 4420 I915_WRITE(WM3_LP_ILK, 0);
4421 I915_WRITE(WM2_LP_ILK, 0); 4421 I915_WRITE(WM2_LP_ILK, 0);
4422 I915_WRITE(WM1_LP_ILK, 0); 4422 I915_WRITE(WM1_LP_ILK, 0);
4423 4423
4424 if (!single_plane_enabled(enabled)) 4424 if (!single_plane_enabled(enabled))
4425 return; 4425 return;
4426 enabled = ffs(enabled) - 1; 4426 enabled = ffs(enabled) - 1;
4427 4427
4428 /* WM1 */ 4428 /* WM1 */
4429 if (!ironlake_compute_srwm(dev, 1, enabled, 4429 if (!ironlake_compute_srwm(dev, 1, enabled,
4430 SNB_READ_WM1_LATENCY() * 500, 4430 SNB_READ_WM1_LATENCY() * 500,
4431 &sandybridge_display_srwm_info, 4431 &sandybridge_display_srwm_info,
4432 &sandybridge_cursor_srwm_info, 4432 &sandybridge_cursor_srwm_info,
4433 &fbc_wm, &plane_wm, &cursor_wm)) 4433 &fbc_wm, &plane_wm, &cursor_wm))
4434 return; 4434 return;
4435 4435
4436 I915_WRITE(WM1_LP_ILK, 4436 I915_WRITE(WM1_LP_ILK,
4437 WM1_LP_SR_EN | 4437 WM1_LP_SR_EN |
4438 (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | 4438 (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4439 (fbc_wm << WM1_LP_FBC_SHIFT) | 4439 (fbc_wm << WM1_LP_FBC_SHIFT) |
4440 (plane_wm << WM1_LP_SR_SHIFT) | 4440 (plane_wm << WM1_LP_SR_SHIFT) |
4441 cursor_wm); 4441 cursor_wm);
4442 4442
4443 /* WM2 */ 4443 /* WM2 */
4444 if (!ironlake_compute_srwm(dev, 2, enabled, 4444 if (!ironlake_compute_srwm(dev, 2, enabled,
4445 SNB_READ_WM2_LATENCY() * 500, 4445 SNB_READ_WM2_LATENCY() * 500,
4446 &sandybridge_display_srwm_info, 4446 &sandybridge_display_srwm_info,
4447 &sandybridge_cursor_srwm_info, 4447 &sandybridge_cursor_srwm_info,
4448 &fbc_wm, &plane_wm, &cursor_wm)) 4448 &fbc_wm, &plane_wm, &cursor_wm))
4449 return; 4449 return;
4450 4450
4451 I915_WRITE(WM2_LP_ILK, 4451 I915_WRITE(WM2_LP_ILK,
4452 WM2_LP_EN | 4452 WM2_LP_EN |
4453 (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | 4453 (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4454 (fbc_wm << WM1_LP_FBC_SHIFT) | 4454 (fbc_wm << WM1_LP_FBC_SHIFT) |
4455 (plane_wm << WM1_LP_SR_SHIFT) | 4455 (plane_wm << WM1_LP_SR_SHIFT) |
4456 cursor_wm); 4456 cursor_wm);
4457 4457
4458 /* WM3 */ 4458 /* WM3 */
4459 if (!ironlake_compute_srwm(dev, 3, enabled, 4459 if (!ironlake_compute_srwm(dev, 3, enabled,
4460 SNB_READ_WM3_LATENCY() * 500, 4460 SNB_READ_WM3_LATENCY() * 500,
4461 &sandybridge_display_srwm_info, 4461 &sandybridge_display_srwm_info,
4462 &sandybridge_cursor_srwm_info, 4462 &sandybridge_cursor_srwm_info,
4463 &fbc_wm, &plane_wm, &cursor_wm)) 4463 &fbc_wm, &plane_wm, &cursor_wm))
4464 return; 4464 return;
4465 4465
4466 I915_WRITE(WM3_LP_ILK, 4466 I915_WRITE(WM3_LP_ILK,
4467 WM3_LP_EN | 4467 WM3_LP_EN |
4468 (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) | 4468 (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4469 (fbc_wm << WM1_LP_FBC_SHIFT) | 4469 (fbc_wm << WM1_LP_FBC_SHIFT) |
4470 (plane_wm << WM1_LP_SR_SHIFT) | 4470 (plane_wm << WM1_LP_SR_SHIFT) |
4471 cursor_wm); 4471 cursor_wm);
4472 } 4472 }
4473 4473
4474 /** 4474 /**
4475 * intel_update_watermarks - update FIFO watermark values based on current modes 4475 * intel_update_watermarks - update FIFO watermark values based on current modes
4476 * 4476 *
4477 * Calculate watermark values for the various WM regs based on current mode 4477 * Calculate watermark values for the various WM regs based on current mode
4478 * and plane configuration. 4478 * and plane configuration.
4479 * 4479 *
4480 * There are several cases to deal with here: 4480 * There are several cases to deal with here:
4481 * - normal (i.e. non-self-refresh) 4481 * - normal (i.e. non-self-refresh)
4482 * - self-refresh (SR) mode 4482 * - self-refresh (SR) mode
4483 * - lines are large relative to FIFO size (buffer can hold up to 2) 4483 * - lines are large relative to FIFO size (buffer can hold up to 2)
4484 * - lines are small relative to FIFO size (buffer can hold more than 2 4484 * - lines are small relative to FIFO size (buffer can hold more than 2
4485 * lines), so need to account for TLB latency 4485 * lines), so need to account for TLB latency
4486 * 4486 *
4487 * The normal calculation is: 4487 * The normal calculation is:
4488 * watermark = dotclock * bytes per pixel * latency 4488 * watermark = dotclock * bytes per pixel * latency
4489 * where latency is platform & configuration dependent (we assume pessimal 4489 * where latency is platform & configuration dependent (we assume pessimal
4490 * values here). 4490 * values here).
4491 * 4491 *
4492 * The SR calculation is: 4492 * The SR calculation is:
4493 * watermark = (trunc(latency/line time)+1) * surface width * 4493 * watermark = (trunc(latency/line time)+1) * surface width *
4494 * bytes per pixel 4494 * bytes per pixel
4495 * where 4495 * where
4496 * line time = htotal / dotclock 4496 * line time = htotal / dotclock
4497 * surface width = hdisplay for normal plane and 64 for cursor 4497 * surface width = hdisplay for normal plane and 64 for cursor
4498 * and latency is assumed to be high, as above. 4498 * and latency is assumed to be high, as above.
4499 * 4499 *
4500 * The final value programmed to the register should always be rounded up, 4500 * The final value programmed to the register should always be rounded up,
4501 * and include an extra 2 entries to account for clock crossings. 4501 * and include an extra 2 entries to account for clock crossings.
4502 * 4502 *
4503 * We don't use the sprite, so we can ignore that. And on Crestline we have 4503 * We don't use the sprite, so we can ignore that. And on Crestline we have
4504 * to set the non-SR watermarks to 8. 4504 * to set the non-SR watermarks to 8.
4505 */ 4505 */
4506 static void intel_update_watermarks(struct drm_device *dev) 4506 static void intel_update_watermarks(struct drm_device *dev)
4507 { 4507 {
4508 struct drm_i915_private *dev_priv = dev->dev_private; 4508 struct drm_i915_private *dev_priv = dev->dev_private;
4509 4509
4510 if (dev_priv->display.update_wm) 4510 if (dev_priv->display.update_wm)
4511 dev_priv->display.update_wm(dev); 4511 dev_priv->display.update_wm(dev);
4512 } 4512 }
4513 4513
4514 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 4514 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4515 { 4515 {
4516 return dev_priv->lvds_use_ssc && i915_panel_use_ssc; 4516 return dev_priv->lvds_use_ssc && i915_panel_use_ssc;
4517 } 4517 }
4518 4518
4519 static int intel_crtc_mode_set(struct drm_crtc *crtc, 4519 static int intel_crtc_mode_set(struct drm_crtc *crtc,
4520 struct drm_display_mode *mode, 4520 struct drm_display_mode *mode,
4521 struct drm_display_mode *adjusted_mode, 4521 struct drm_display_mode *adjusted_mode,
4522 int x, int y, 4522 int x, int y,
4523 struct drm_framebuffer *old_fb) 4523 struct drm_framebuffer *old_fb)
4524 { 4524 {
4525 struct drm_device *dev = crtc->dev; 4525 struct drm_device *dev = crtc->dev;
4526 struct drm_i915_private *dev_priv = dev->dev_private; 4526 struct drm_i915_private *dev_priv = dev->dev_private;
4527 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4527 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4528 int pipe = intel_crtc->pipe; 4528 int pipe = intel_crtc->pipe;
4529 int plane = intel_crtc->plane; 4529 int plane = intel_crtc->plane;
4530 u32 fp_reg, dpll_reg; 4530 u32 fp_reg, dpll_reg;
4531 int refclk, num_connectors = 0; 4531 int refclk, num_connectors = 0;
4532 intel_clock_t clock, reduced_clock; 4532 intel_clock_t clock, reduced_clock;
4533 u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf; 4533 u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
4534 bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; 4534 bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
4535 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; 4535 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
4536 struct intel_encoder *has_edp_encoder = NULL; 4536 struct intel_encoder *has_edp_encoder = NULL;
4537 struct drm_mode_config *mode_config = &dev->mode_config; 4537 struct drm_mode_config *mode_config = &dev->mode_config;
4538 struct intel_encoder *encoder; 4538 struct intel_encoder *encoder;
4539 const intel_limit_t *limit; 4539 const intel_limit_t *limit;
4540 int ret; 4540 int ret;
4541 struct fdi_m_n m_n = {0}; 4541 struct fdi_m_n m_n = {0};
4542 u32 reg, temp; 4542 u32 reg, temp;
4543 u32 lvds_sync = 0; 4543 u32 lvds_sync = 0;
4544 int target_clock; 4544 int target_clock;
4545 4545
4546 drm_vblank_pre_modeset(dev, pipe); 4546 drm_vblank_pre_modeset(dev, pipe);
4547 4547
4548 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { 4548 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
4549 if (encoder->base.crtc != crtc) 4549 if (encoder->base.crtc != crtc)
4550 continue; 4550 continue;
4551 4551
4552 switch (encoder->type) { 4552 switch (encoder->type) {
4553 case INTEL_OUTPUT_LVDS: 4553 case INTEL_OUTPUT_LVDS:
4554 is_lvds = true; 4554 is_lvds = true;
4555 break; 4555 break;
4556 case INTEL_OUTPUT_SDVO: 4556 case INTEL_OUTPUT_SDVO:
4557 case INTEL_OUTPUT_HDMI: 4557 case INTEL_OUTPUT_HDMI:
4558 is_sdvo = true; 4558 is_sdvo = true;
4559 if (encoder->needs_tv_clock) 4559 if (encoder->needs_tv_clock)
4560 is_tv = true; 4560 is_tv = true;
4561 break; 4561 break;
4562 case INTEL_OUTPUT_DVO: 4562 case INTEL_OUTPUT_DVO:
4563 is_dvo = true; 4563 is_dvo = true;
4564 break; 4564 break;
4565 case INTEL_OUTPUT_TVOUT: 4565 case INTEL_OUTPUT_TVOUT:
4566 is_tv = true; 4566 is_tv = true;
4567 break; 4567 break;
4568 case INTEL_OUTPUT_ANALOG: 4568 case INTEL_OUTPUT_ANALOG:
4569 is_crt = true; 4569 is_crt = true;
4570 break; 4570 break;
4571 case INTEL_OUTPUT_DISPLAYPORT: 4571 case INTEL_OUTPUT_DISPLAYPORT:
4572 is_dp = true; 4572 is_dp = true;
4573 break; 4573 break;
4574 case INTEL_OUTPUT_EDP: 4574 case INTEL_OUTPUT_EDP:
4575 has_edp_encoder = encoder; 4575 has_edp_encoder = encoder;
4576 break; 4576 break;
4577 } 4577 }
4578 4578
4579 num_connectors++; 4579 num_connectors++;
4580 } 4580 }
4581 4581
4582 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { 4582 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4583 refclk = dev_priv->lvds_ssc_freq * 1000; 4583 refclk = dev_priv->lvds_ssc_freq * 1000;
4584 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", 4584 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
4585 refclk / 1000); 4585 refclk / 1000);
4586 } else if (!IS_GEN2(dev)) { 4586 } else if (!IS_GEN2(dev)) {
4587 refclk = 96000; 4587 refclk = 96000;
4588 if (HAS_PCH_SPLIT(dev) && 4588 if (HAS_PCH_SPLIT(dev) &&
4589 (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base))) 4589 (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)))
4590 refclk = 120000; /* 120Mhz refclk */ 4590 refclk = 120000; /* 120Mhz refclk */
4591 } else { 4591 } else {
4592 refclk = 48000; 4592 refclk = 48000;
4593 } 4593 }
4594 4594
4595 /* 4595 /*
4596 * Returns a set of divisors for the desired target clock with the given 4596 * Returns a set of divisors for the desired target clock with the given
4597 * refclk, or FALSE. The returned values represent the clock equation: 4597 * refclk, or FALSE. The returned values represent the clock equation:
4598 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 4598 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
4599 */ 4599 */
4600 limit = intel_limit(crtc, refclk); 4600 limit = intel_limit(crtc, refclk);
4601 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock); 4601 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
4602 if (!ok) { 4602 if (!ok) {
4603 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 4603 DRM_ERROR("Couldn't find PLL settings for mode!\n");
4604 drm_vblank_post_modeset(dev, pipe); 4604 drm_vblank_post_modeset(dev, pipe);
4605 return -EINVAL; 4605 return -EINVAL;
4606 } 4606 }
4607 4607
4608 /* Ensure that the cursor is valid for the new mode before changing... */ 4608 /* Ensure that the cursor is valid for the new mode before changing... */
4609 intel_crtc_update_cursor(crtc, true); 4609 intel_crtc_update_cursor(crtc, true);
4610 4610
4611 if (is_lvds && dev_priv->lvds_downclock_avail) { 4611 if (is_lvds && dev_priv->lvds_downclock_avail) {
4612 has_reduced_clock = limit->find_pll(limit, crtc, 4612 has_reduced_clock = limit->find_pll(limit, crtc,
4613 dev_priv->lvds_downclock, 4613 dev_priv->lvds_downclock,
4614 refclk, 4614 refclk,
4615 &reduced_clock); 4615 &reduced_clock);
4616 if (has_reduced_clock && (clock.p != reduced_clock.p)) { 4616 if (has_reduced_clock && (clock.p != reduced_clock.p)) {
4617 /* 4617 /*
4618 * If the different P is found, it means that we can't 4618 * If the different P is found, it means that we can't
4619 * switch the display clock by using the FP0/FP1. 4619 * switch the display clock by using the FP0/FP1.
4620 * In such case we will disable the LVDS downclock 4620 * In such case we will disable the LVDS downclock
4621 * feature. 4621 * feature.
4622 */ 4622 */
4623 DRM_DEBUG_KMS("Different P is found for " 4623 DRM_DEBUG_KMS("Different P is found for "
4624 "LVDS clock/downclock\n"); 4624 "LVDS clock/downclock\n");
4625 has_reduced_clock = 0; 4625 has_reduced_clock = 0;
4626 } 4626 }
4627 } 4627 }
4628 /* SDVO TV has fixed PLL values depend on its clock range, 4628 /* SDVO TV has fixed PLL values depend on its clock range,
4629 this mirrors vbios setting. */ 4629 this mirrors vbios setting. */
4630 if (is_sdvo && is_tv) { 4630 if (is_sdvo && is_tv) {
4631 if (adjusted_mode->clock >= 100000 4631 if (adjusted_mode->clock >= 100000
4632 && adjusted_mode->clock < 140500) { 4632 && adjusted_mode->clock < 140500) {
4633 clock.p1 = 2; 4633 clock.p1 = 2;
4634 clock.p2 = 10; 4634 clock.p2 = 10;
4635 clock.n = 3; 4635 clock.n = 3;
4636 clock.m1 = 16; 4636 clock.m1 = 16;
4637 clock.m2 = 8; 4637 clock.m2 = 8;
4638 } else if (adjusted_mode->clock >= 140500 4638 } else if (adjusted_mode->clock >= 140500
4639 && adjusted_mode->clock <= 200000) { 4639 && adjusted_mode->clock <= 200000) {
4640 clock.p1 = 1; 4640 clock.p1 = 1;
4641 clock.p2 = 10; 4641 clock.p2 = 10;
4642 clock.n = 6; 4642 clock.n = 6;
4643 clock.m1 = 12; 4643 clock.m1 = 12;
4644 clock.m2 = 8; 4644 clock.m2 = 8;
4645 } 4645 }
4646 } 4646 }
4647 4647
4648 /* FDI link */ 4648 /* FDI link */
4649 if (HAS_PCH_SPLIT(dev)) { 4649 if (HAS_PCH_SPLIT(dev)) {
4650 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); 4650 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
4651 int lane = 0, link_bw, bpp; 4651 int lane = 0, link_bw, bpp;
4652 /* CPU eDP doesn't require FDI link, so just set DP M/N 4652 /* CPU eDP doesn't require FDI link, so just set DP M/N
4653 according to current link config */ 4653 according to current link config */
4654 if (has_edp_encoder && !intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 4654 if (has_edp_encoder && !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
4655 target_clock = mode->clock; 4655 target_clock = mode->clock;
4656 intel_edp_link_config(has_edp_encoder, 4656 intel_edp_link_config(has_edp_encoder,
4657 &lane, &link_bw); 4657 &lane, &link_bw);
4658 } else { 4658 } else {
4659 /* [e]DP over FDI requires target mode clock 4659 /* [e]DP over FDI requires target mode clock
4660 instead of link clock */ 4660 instead of link clock */
4661 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) 4661 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
4662 target_clock = mode->clock; 4662 target_clock = mode->clock;
4663 else 4663 else
4664 target_clock = adjusted_mode->clock; 4664 target_clock = adjusted_mode->clock;
4665 4665
4666 /* FDI is a binary signal running at ~2.7GHz, encoding 4666 /* FDI is a binary signal running at ~2.7GHz, encoding
4667 * each output octet as 10 bits. The actual frequency 4667 * each output octet as 10 bits. The actual frequency
4668 * is stored as a divider into a 100MHz clock, and the 4668 * is stored as a divider into a 100MHz clock, and the
4669 * mode pixel clock is stored in units of 1KHz. 4669 * mode pixel clock is stored in units of 1KHz.
4670 * Hence the bw of each lane in terms of the mode signal 4670 * Hence the bw of each lane in terms of the mode signal
4671 * is: 4671 * is:
4672 */ 4672 */
4673 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10; 4673 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
4674 } 4674 }
4675 4675
4676 /* determine panel color depth */ 4676 /* determine panel color depth */
4677 temp = I915_READ(PIPECONF(pipe)); 4677 temp = I915_READ(PIPECONF(pipe));
4678 temp &= ~PIPE_BPC_MASK; 4678 temp &= ~PIPE_BPC_MASK;
4679 if (is_lvds) { 4679 if (is_lvds) {
4680 /* the BPC will be 6 if it is 18-bit LVDS panel */ 4680 /* the BPC will be 6 if it is 18-bit LVDS panel */
4681 if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP) 4681 if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
4682 temp |= PIPE_8BPC; 4682 temp |= PIPE_8BPC;
4683 else 4683 else
4684 temp |= PIPE_6BPC; 4684 temp |= PIPE_6BPC;
4685 } else if (has_edp_encoder) { 4685 } else if (has_edp_encoder) {
4686 switch (dev_priv->edp.bpp/3) { 4686 switch (dev_priv->edp.bpp/3) {
4687 case 8: 4687 case 8:
4688 temp |= PIPE_8BPC; 4688 temp |= PIPE_8BPC;
4689 break; 4689 break;
4690 case 10: 4690 case 10:
4691 temp |= PIPE_10BPC; 4691 temp |= PIPE_10BPC;
4692 break; 4692 break;
4693 case 6: 4693 case 6:
4694 temp |= PIPE_6BPC; 4694 temp |= PIPE_6BPC;
4695 break; 4695 break;
4696 case 12: 4696 case 12:
4697 temp |= PIPE_12BPC; 4697 temp |= PIPE_12BPC;
4698 break; 4698 break;
4699 } 4699 }
4700 } else 4700 } else
4701 temp |= PIPE_8BPC; 4701 temp |= PIPE_8BPC;
4702 I915_WRITE(PIPECONF(pipe), temp); 4702 I915_WRITE(PIPECONF(pipe), temp);
4703 4703
4704 switch (temp & PIPE_BPC_MASK) { 4704 switch (temp & PIPE_BPC_MASK) {
4705 case PIPE_8BPC: 4705 case PIPE_8BPC:
4706 bpp = 24; 4706 bpp = 24;
4707 break; 4707 break;
4708 case PIPE_10BPC: 4708 case PIPE_10BPC:
4709 bpp = 30; 4709 bpp = 30;
4710 break; 4710 break;
4711 case PIPE_6BPC: 4711 case PIPE_6BPC:
4712 bpp = 18; 4712 bpp = 18;
4713 break; 4713 break;
4714 case PIPE_12BPC: 4714 case PIPE_12BPC:
4715 bpp = 36; 4715 bpp = 36;
4716 break; 4716 break;
4717 default: 4717 default:
4718 DRM_ERROR("unknown pipe bpc value\n"); 4718 DRM_ERROR("unknown pipe bpc value\n");
4719 bpp = 24; 4719 bpp = 24;
4720 } 4720 }
4721 4721
4722 if (!lane) { 4722 if (!lane) {
4723 /* 4723 /*
4724 * Account for spread spectrum to avoid 4724 * Account for spread spectrum to avoid
4725 * oversubscribing the link. Max center spread 4725 * oversubscribing the link. Max center spread
4726 * is 2.5%; use 5% for safety's sake. 4726 * is 2.5%; use 5% for safety's sake.
4727 */ 4727 */
4728 u32 bps = target_clock * bpp * 21 / 20; 4728 u32 bps = target_clock * bpp * 21 / 20;
4729 lane = bps / (link_bw * 8) + 1; 4729 lane = bps / (link_bw * 8) + 1;
4730 } 4730 }
4731 4731
4732 intel_crtc->fdi_lanes = lane; 4732 intel_crtc->fdi_lanes = lane;
4733 4733
4734 if (pixel_multiplier > 1) 4734 if (pixel_multiplier > 1)
4735 link_bw *= pixel_multiplier; 4735 link_bw *= pixel_multiplier;
4736 ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n); 4736 ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n);
4737 } 4737 }
4738 4738
4739 /* Ironlake: try to setup display ref clock before DPLL 4739 /* Ironlake: try to setup display ref clock before DPLL
4740 * enabling. This is only under driver's control after 4740 * enabling. This is only under driver's control after
4741 * PCH B stepping, previous chipset stepping should be 4741 * PCH B stepping, previous chipset stepping should be
4742 * ignoring this setting. 4742 * ignoring this setting.
4743 */ 4743 */
4744 if (HAS_PCH_SPLIT(dev)) { 4744 if (HAS_PCH_SPLIT(dev)) {
4745 temp = I915_READ(PCH_DREF_CONTROL); 4745 temp = I915_READ(PCH_DREF_CONTROL);
4746 /* Always enable nonspread source */ 4746 /* Always enable nonspread source */
4747 temp &= ~DREF_NONSPREAD_SOURCE_MASK; 4747 temp &= ~DREF_NONSPREAD_SOURCE_MASK;
4748 temp |= DREF_NONSPREAD_SOURCE_ENABLE; 4748 temp |= DREF_NONSPREAD_SOURCE_ENABLE;
4749 temp &= ~DREF_SSC_SOURCE_MASK; 4749 temp &= ~DREF_SSC_SOURCE_MASK;
4750 temp |= DREF_SSC_SOURCE_ENABLE; 4750 temp |= DREF_SSC_SOURCE_ENABLE;
4751 I915_WRITE(PCH_DREF_CONTROL, temp); 4751 I915_WRITE(PCH_DREF_CONTROL, temp);
4752 4752
4753 POSTING_READ(PCH_DREF_CONTROL); 4753 POSTING_READ(PCH_DREF_CONTROL);
4754 udelay(200); 4754 udelay(200);
4755 4755
4756 if (has_edp_encoder) { 4756 if (has_edp_encoder) {
4757 if (intel_panel_use_ssc(dev_priv)) { 4757 if (intel_panel_use_ssc(dev_priv)) {
4758 temp |= DREF_SSC1_ENABLE; 4758 temp |= DREF_SSC1_ENABLE;
4759 I915_WRITE(PCH_DREF_CONTROL, temp); 4759 I915_WRITE(PCH_DREF_CONTROL, temp);
4760 4760
4761 POSTING_READ(PCH_DREF_CONTROL); 4761 POSTING_READ(PCH_DREF_CONTROL);
4762 udelay(200); 4762 udelay(200);
4763 } 4763 }
4764 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 4764 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4765 4765
4766 /* Enable CPU source on CPU attached eDP */ 4766 /* Enable CPU source on CPU attached eDP */
4767 if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 4767 if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
4768 if (intel_panel_use_ssc(dev_priv)) 4768 if (intel_panel_use_ssc(dev_priv))
4769 temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 4769 temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
4770 else 4770 else
4771 temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 4771 temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
4772 } else { 4772 } else {
4773 /* Enable SSC on PCH eDP if needed */ 4773 /* Enable SSC on PCH eDP if needed */
4774 if (intel_panel_use_ssc(dev_priv)) { 4774 if (intel_panel_use_ssc(dev_priv)) {
4775 DRM_ERROR("enabling SSC on PCH\n"); 4775 DRM_ERROR("enabling SSC on PCH\n");
4776 temp |= DREF_SUPERSPREAD_SOURCE_ENABLE; 4776 temp |= DREF_SUPERSPREAD_SOURCE_ENABLE;
4777 } 4777 }
4778 } 4778 }
4779 I915_WRITE(PCH_DREF_CONTROL, temp); 4779 I915_WRITE(PCH_DREF_CONTROL, temp);
4780 POSTING_READ(PCH_DREF_CONTROL); 4780 POSTING_READ(PCH_DREF_CONTROL);
4781 udelay(200); 4781 udelay(200);
4782 } 4782 }
4783 } 4783 }
4784 4784
4785 if (IS_PINEVIEW(dev)) { 4785 if (IS_PINEVIEW(dev)) {
4786 fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2; 4786 fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
4787 if (has_reduced_clock) 4787 if (has_reduced_clock)
4788 fp2 = (1 << reduced_clock.n) << 16 | 4788 fp2 = (1 << reduced_clock.n) << 16 |
4789 reduced_clock.m1 << 8 | reduced_clock.m2; 4789 reduced_clock.m1 << 8 | reduced_clock.m2;
4790 } else { 4790 } else {
4791 fp = clock.n << 16 | clock.m1 << 8 | clock.m2; 4791 fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
4792 if (has_reduced_clock) 4792 if (has_reduced_clock)
4793 fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | 4793 fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
4794 reduced_clock.m2; 4794 reduced_clock.m2;
4795 } 4795 }
4796 4796
4797 /* Enable autotuning of the PLL clock (if permissible) */ 4797 /* Enable autotuning of the PLL clock (if permissible) */
4798 if (HAS_PCH_SPLIT(dev)) { 4798 if (HAS_PCH_SPLIT(dev)) {
4799 int factor = 21; 4799 int factor = 21;
4800 4800
4801 if (is_lvds) { 4801 if (is_lvds) {
4802 if ((intel_panel_use_ssc(dev_priv) && 4802 if ((intel_panel_use_ssc(dev_priv) &&
4803 dev_priv->lvds_ssc_freq == 100) || 4803 dev_priv->lvds_ssc_freq == 100) ||
4804 (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP) 4804 (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
4805 factor = 25; 4805 factor = 25;
4806 } else if (is_sdvo && is_tv) 4806 } else if (is_sdvo && is_tv)
4807 factor = 20; 4807 factor = 20;
4808 4808
4809 if (clock.m1 < factor * clock.n) 4809 if (clock.m1 < factor * clock.n)
4810 fp |= FP_CB_TUNE; 4810 fp |= FP_CB_TUNE;
4811 } 4811 }
4812 4812
4813 dpll = 0; 4813 dpll = 0;
4814 if (!HAS_PCH_SPLIT(dev)) 4814 if (!HAS_PCH_SPLIT(dev))
4815 dpll = DPLL_VGA_MODE_DIS; 4815 dpll = DPLL_VGA_MODE_DIS;
4816 4816
4817 if (!IS_GEN2(dev)) { 4817 if (!IS_GEN2(dev)) {
4818 if (is_lvds) 4818 if (is_lvds)
4819 dpll |= DPLLB_MODE_LVDS; 4819 dpll |= DPLLB_MODE_LVDS;
4820 else 4820 else
4821 dpll |= DPLLB_MODE_DAC_SERIAL; 4821 dpll |= DPLLB_MODE_DAC_SERIAL;
4822 if (is_sdvo) { 4822 if (is_sdvo) {
4823 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); 4823 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
4824 if (pixel_multiplier > 1) { 4824 if (pixel_multiplier > 1) {
4825 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 4825 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4826 dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; 4826 dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
4827 else if (HAS_PCH_SPLIT(dev)) 4827 else if (HAS_PCH_SPLIT(dev))
4828 dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 4828 dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
4829 } 4829 }
4830 dpll |= DPLL_DVO_HIGH_SPEED; 4830 dpll |= DPLL_DVO_HIGH_SPEED;
4831 } 4831 }
4832 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) 4832 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
4833 dpll |= DPLL_DVO_HIGH_SPEED; 4833 dpll |= DPLL_DVO_HIGH_SPEED;
4834 4834
4835 /* compute bitmask from p1 value */ 4835 /* compute bitmask from p1 value */
4836 if (IS_PINEVIEW(dev)) 4836 if (IS_PINEVIEW(dev))
4837 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; 4837 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
4838 else { 4838 else {
4839 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 4839 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4840 /* also FPA1 */ 4840 /* also FPA1 */
4841 if (HAS_PCH_SPLIT(dev)) 4841 if (HAS_PCH_SPLIT(dev))
4842 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 4842 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
4843 if (IS_G4X(dev) && has_reduced_clock) 4843 if (IS_G4X(dev) && has_reduced_clock)
4844 dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 4844 dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
4845 } 4845 }
4846 switch (clock.p2) { 4846 switch (clock.p2) {
4847 case 5: 4847 case 5:
4848 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 4848 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
4849 break; 4849 break;
4850 case 7: 4850 case 7:
4851 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 4851 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
4852 break; 4852 break;
4853 case 10: 4853 case 10:
4854 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 4854 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
4855 break; 4855 break;
4856 case 14: 4856 case 14:
4857 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 4857 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
4858 break; 4858 break;
4859 } 4859 }
4860 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) 4860 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
4861 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 4861 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
4862 } else { 4862 } else {
4863 if (is_lvds) { 4863 if (is_lvds) {
4864 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 4864 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4865 } else { 4865 } else {
4866 if (clock.p1 == 2) 4866 if (clock.p1 == 2)
4867 dpll |= PLL_P1_DIVIDE_BY_TWO; 4867 dpll |= PLL_P1_DIVIDE_BY_TWO;
4868 else 4868 else
4869 dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; 4869 dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4870 if (clock.p2 == 4) 4870 if (clock.p2 == 4)
4871 dpll |= PLL_P2_DIVIDE_BY_4; 4871 dpll |= PLL_P2_DIVIDE_BY_4;
4872 } 4872 }
4873 } 4873 }
4874 4874
4875 if (is_sdvo && is_tv) 4875 if (is_sdvo && is_tv)
4876 dpll |= PLL_REF_INPUT_TVCLKINBC; 4876 dpll |= PLL_REF_INPUT_TVCLKINBC;
4877 else if (is_tv) 4877 else if (is_tv)
4878 /* XXX: just matching BIOS for now */ 4878 /* XXX: just matching BIOS for now */
4879 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ 4879 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
4880 dpll |= 3; 4880 dpll |= 3;
4881 else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) 4881 else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
4882 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 4882 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
4883 else 4883 else
4884 dpll |= PLL_REF_INPUT_DREFCLK; 4884 dpll |= PLL_REF_INPUT_DREFCLK;
4885 4885
4886 /* setup pipeconf */ 4886 /* setup pipeconf */
4887 pipeconf = I915_READ(PIPECONF(pipe)); 4887 pipeconf = I915_READ(PIPECONF(pipe));
4888 4888
4889 /* Set up the display plane register */ 4889 /* Set up the display plane register */
4890 dspcntr = DISPPLANE_GAMMA_ENABLE; 4890 dspcntr = DISPPLANE_GAMMA_ENABLE;
4891 4891
4892 /* Ironlake's plane is forced to pipe, bit 24 is to 4892 /* Ironlake's plane is forced to pipe, bit 24 is to
4893 enable color space conversion */ 4893 enable color space conversion */
4894 if (!HAS_PCH_SPLIT(dev)) { 4894 if (!HAS_PCH_SPLIT(dev)) {
4895 if (pipe == 0) 4895 if (pipe == 0)
4896 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; 4896 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
4897 else 4897 else
4898 dspcntr |= DISPPLANE_SEL_PIPE_B; 4898 dspcntr |= DISPPLANE_SEL_PIPE_B;
4899 } 4899 }
4900 4900
4901 if (pipe == 0 && INTEL_INFO(dev)->gen < 4) { 4901 if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
4902 /* Enable pixel doubling when the dot clock is > 90% of the (display) 4902 /* Enable pixel doubling when the dot clock is > 90% of the (display)
4903 * core speed. 4903 * core speed.
4904 * 4904 *
4905 * XXX: No double-wide on 915GM pipe B. Is that the only reason for the 4905 * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
4906 * pipe == 0 check? 4906 * pipe == 0 check?
4907 */ 4907 */
4908 if (mode->clock > 4908 if (mode->clock >
4909 dev_priv->display.get_display_clock_speed(dev) * 9 / 10) 4909 dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
4910 pipeconf |= PIPECONF_DOUBLE_WIDE; 4910 pipeconf |= PIPECONF_DOUBLE_WIDE;
4911 else 4911 else
4912 pipeconf &= ~PIPECONF_DOUBLE_WIDE; 4912 pipeconf &= ~PIPECONF_DOUBLE_WIDE;
4913 } 4913 }
4914 4914
4915 if (!HAS_PCH_SPLIT(dev)) 4915 if (!HAS_PCH_SPLIT(dev))
4916 dpll |= DPLL_VCO_ENABLE; 4916 dpll |= DPLL_VCO_ENABLE;
4917 4917
4918 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); 4918 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
4919 drm_mode_debug_printmodeline(mode); 4919 drm_mode_debug_printmodeline(mode);
4920 4920
4921 /* assign to Ironlake registers */ 4921 /* assign to Ironlake registers */
4922 if (HAS_PCH_SPLIT(dev)) { 4922 if (HAS_PCH_SPLIT(dev)) {
4923 fp_reg = PCH_FP0(pipe); 4923 fp_reg = PCH_FP0(pipe);
4924 dpll_reg = PCH_DPLL(pipe); 4924 dpll_reg = PCH_DPLL(pipe);
4925 } else { 4925 } else {
4926 fp_reg = FP0(pipe); 4926 fp_reg = FP0(pipe);
4927 dpll_reg = DPLL(pipe); 4927 dpll_reg = DPLL(pipe);
4928 } 4928 }
4929 4929
4930 /* PCH eDP needs FDI, but CPU eDP does not */ 4930 /* PCH eDP needs FDI, but CPU eDP does not */
4931 if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 4931 if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
4932 I915_WRITE(fp_reg, fp); 4932 I915_WRITE(fp_reg, fp);
4933 I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); 4933 I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
4934 4934
4935 POSTING_READ(dpll_reg); 4935 POSTING_READ(dpll_reg);
4936 udelay(150); 4936 udelay(150);
4937 } 4937 }
4938 4938
4939 /* enable transcoder DPLL */ 4939 /* enable transcoder DPLL */
4940 if (HAS_PCH_CPT(dev)) { 4940 if (HAS_PCH_CPT(dev)) {
4941 temp = I915_READ(PCH_DPLL_SEL); 4941 temp = I915_READ(PCH_DPLL_SEL);
4942 switch (pipe) { 4942 switch (pipe) {
4943 case 0: 4943 case 0:
4944 temp |= TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL; 4944 temp |= TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL;
4945 break; 4945 break;
4946 case 1: 4946 case 1:
4947 temp |= TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL; 4947 temp |= TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL;
4948 break; 4948 break;
4949 case 2: 4949 case 2:
4950 /* FIXME: manage transcoder PLLs? */ 4950 /* FIXME: manage transcoder PLLs? */
4951 temp |= TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL; 4951 temp |= TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL;
4952 break; 4952 break;
4953 default: 4953 default:
4954 BUG(); 4954 BUG();
4955 } 4955 }
4956 I915_WRITE(PCH_DPLL_SEL, temp); 4956 I915_WRITE(PCH_DPLL_SEL, temp);
4957 4957
4958 POSTING_READ(PCH_DPLL_SEL); 4958 POSTING_READ(PCH_DPLL_SEL);
4959 udelay(150); 4959 udelay(150);
4960 } 4960 }
4961 4961
4962 /* The LVDS pin pair needs to be on before the DPLLs are enabled. 4962 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
4963 * This is an exception to the general rule that mode_set doesn't turn 4963 * This is an exception to the general rule that mode_set doesn't turn
4964 * things on. 4964 * things on.
4965 */ 4965 */
4966 if (is_lvds) { 4966 if (is_lvds) {
4967 reg = LVDS; 4967 reg = LVDS;
4968 if (HAS_PCH_SPLIT(dev)) 4968 if (HAS_PCH_SPLIT(dev))
4969 reg = PCH_LVDS; 4969 reg = PCH_LVDS;
4970 4970
4971 temp = I915_READ(reg); 4971 temp = I915_READ(reg);
4972 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; 4972 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
4973 if (pipe == 1) { 4973 if (pipe == 1) {
4974 if (HAS_PCH_CPT(dev)) 4974 if (HAS_PCH_CPT(dev))
4975 temp |= PORT_TRANS_B_SEL_CPT; 4975 temp |= PORT_TRANS_B_SEL_CPT;
4976 else 4976 else
4977 temp |= LVDS_PIPEB_SELECT; 4977 temp |= LVDS_PIPEB_SELECT;
4978 } else { 4978 } else {
4979 if (HAS_PCH_CPT(dev)) 4979 if (HAS_PCH_CPT(dev))
4980 temp &= ~PORT_TRANS_SEL_MASK; 4980 temp &= ~PORT_TRANS_SEL_MASK;
4981 else 4981 else
4982 temp &= ~LVDS_PIPEB_SELECT; 4982 temp &= ~LVDS_PIPEB_SELECT;
4983 } 4983 }
4984 /* set the corresponsding LVDS_BORDER bit */ 4984 /* set the corresponsding LVDS_BORDER bit */
4985 temp |= dev_priv->lvds_border_bits; 4985 temp |= dev_priv->lvds_border_bits;
4986 /* Set the B0-B3 data pairs corresponding to whether we're going to 4986 /* Set the B0-B3 data pairs corresponding to whether we're going to
4987 * set the DPLLs for dual-channel mode or not. 4987 * set the DPLLs for dual-channel mode or not.
4988 */ 4988 */
4989 if (clock.p2 == 7) 4989 if (clock.p2 == 7)
4990 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; 4990 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
4991 else 4991 else
4992 temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); 4992 temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
4993 4993
4994 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) 4994 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
4995 * appropriately here, but we need to look more thoroughly into how 4995 * appropriately here, but we need to look more thoroughly into how
4996 * panels behave in the two modes. 4996 * panels behave in the two modes.
4997 */ 4997 */
4998 /* set the dithering flag on non-PCH LVDS as needed */ 4998 /* set the dithering flag on non-PCH LVDS as needed */
4999 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { 4999 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
5000 if (dev_priv->lvds_dither) 5000 if (dev_priv->lvds_dither)
5001 temp |= LVDS_ENABLE_DITHER; 5001 temp |= LVDS_ENABLE_DITHER;
5002 else 5002 else
5003 temp &= ~LVDS_ENABLE_DITHER; 5003 temp &= ~LVDS_ENABLE_DITHER;
5004 } 5004 }
5005 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) 5005 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5006 lvds_sync |= LVDS_HSYNC_POLARITY; 5006 lvds_sync |= LVDS_HSYNC_POLARITY;
5007 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) 5007 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5008 lvds_sync |= LVDS_VSYNC_POLARITY; 5008 lvds_sync |= LVDS_VSYNC_POLARITY;
5009 if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY)) 5009 if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
5010 != lvds_sync) { 5010 != lvds_sync) {
5011 char flags[2] = "-+"; 5011 char flags[2] = "-+";
5012 DRM_INFO("Changing LVDS panel from " 5012 DRM_INFO("Changing LVDS panel from "
5013 "(%chsync, %cvsync) to (%chsync, %cvsync)\n", 5013 "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
5014 flags[!(temp & LVDS_HSYNC_POLARITY)], 5014 flags[!(temp & LVDS_HSYNC_POLARITY)],
5015 flags[!(temp & LVDS_VSYNC_POLARITY)], 5015 flags[!(temp & LVDS_VSYNC_POLARITY)],
5016 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)], 5016 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
5017 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]); 5017 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
5018 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); 5018 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5019 temp |= lvds_sync; 5019 temp |= lvds_sync;
5020 } 5020 }
5021 I915_WRITE(reg, temp); 5021 I915_WRITE(reg, temp);
5022 } 5022 }
5023 5023
5024 /* set the dithering flag and clear for anything other than a panel. */ 5024 /* set the dithering flag and clear for anything other than a panel. */
5025 if (HAS_PCH_SPLIT(dev)) { 5025 if (HAS_PCH_SPLIT(dev)) {
5026 pipeconf &= ~PIPECONF_DITHER_EN; 5026 pipeconf &= ~PIPECONF_DITHER_EN;
5027 pipeconf &= ~PIPECONF_DITHER_TYPE_MASK; 5027 pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
5028 if (dev_priv->lvds_dither && (is_lvds || has_edp_encoder)) { 5028 if (dev_priv->lvds_dither && (is_lvds || has_edp_encoder)) {
5029 pipeconf |= PIPECONF_DITHER_EN; 5029 pipeconf |= PIPECONF_DITHER_EN;
5030 pipeconf |= PIPECONF_DITHER_TYPE_ST1; 5030 pipeconf |= PIPECONF_DITHER_TYPE_ST1;
5031 } 5031 }
5032 } 5032 }
5033 5033
5034 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 5034 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5035 intel_dp_set_m_n(crtc, mode, adjusted_mode); 5035 intel_dp_set_m_n(crtc, mode, adjusted_mode);
5036 } else if (HAS_PCH_SPLIT(dev)) { 5036 } else if (HAS_PCH_SPLIT(dev)) {
5037 /* For non-DP output, clear any trans DP clock recovery setting.*/ 5037 /* For non-DP output, clear any trans DP clock recovery setting.*/
5038 I915_WRITE(TRANSDATA_M1(pipe), 0); 5038 I915_WRITE(TRANSDATA_M1(pipe), 0);
5039 I915_WRITE(TRANSDATA_N1(pipe), 0); 5039 I915_WRITE(TRANSDATA_N1(pipe), 0);
5040 I915_WRITE(TRANSDPLINK_M1(pipe), 0); 5040 I915_WRITE(TRANSDPLINK_M1(pipe), 0);
5041 I915_WRITE(TRANSDPLINK_N1(pipe), 0); 5041 I915_WRITE(TRANSDPLINK_N1(pipe), 0);
5042 } 5042 }
5043 5043
5044 if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 5044 if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5045 I915_WRITE(dpll_reg, dpll); 5045 I915_WRITE(dpll_reg, dpll);
5046 5046
5047 /* Wait for the clocks to stabilize. */ 5047 /* Wait for the clocks to stabilize. */
5048 POSTING_READ(dpll_reg); 5048 POSTING_READ(dpll_reg);
5049 udelay(150); 5049 udelay(150);
5050 5050
5051 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { 5051 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
5052 temp = 0; 5052 temp = 0;
5053 if (is_sdvo) { 5053 if (is_sdvo) {
5054 temp = intel_mode_get_pixel_multiplier(adjusted_mode); 5054 temp = intel_mode_get_pixel_multiplier(adjusted_mode);
5055 if (temp > 1) 5055 if (temp > 1)
5056 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 5056 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5057 else 5057 else
5058 temp = 0; 5058 temp = 0;
5059 } 5059 }
5060 I915_WRITE(DPLL_MD(pipe), temp); 5060 I915_WRITE(DPLL_MD(pipe), temp);
5061 } else { 5061 } else {
5062 /* The pixel multiplier can only be updated once the 5062 /* The pixel multiplier can only be updated once the
5063 * DPLL is enabled and the clocks are stable. 5063 * DPLL is enabled and the clocks are stable.
5064 * 5064 *
5065 * So write it again. 5065 * So write it again.
5066 */ 5066 */
5067 I915_WRITE(dpll_reg, dpll); 5067 I915_WRITE(dpll_reg, dpll);
5068 } 5068 }
5069 } 5069 }
5070 5070
5071 intel_crtc->lowfreq_avail = false; 5071 intel_crtc->lowfreq_avail = false;
5072 if (is_lvds && has_reduced_clock && i915_powersave) { 5072 if (is_lvds && has_reduced_clock && i915_powersave) {
5073 I915_WRITE(fp_reg + 4, fp2); 5073 I915_WRITE(fp_reg + 4, fp2);
5074 intel_crtc->lowfreq_avail = true; 5074 intel_crtc->lowfreq_avail = true;
5075 if (HAS_PIPE_CXSR(dev)) { 5075 if (HAS_PIPE_CXSR(dev)) {
5076 DRM_DEBUG_KMS("enabling CxSR downclocking\n"); 5076 DRM_DEBUG_KMS("enabling CxSR downclocking\n");
5077 pipeconf |= PIPECONF_CXSR_DOWNCLOCK; 5077 pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
5078 } 5078 }
5079 } else { 5079 } else {
5080 I915_WRITE(fp_reg + 4, fp); 5080 I915_WRITE(fp_reg + 4, fp);
5081 if (HAS_PIPE_CXSR(dev)) { 5081 if (HAS_PIPE_CXSR(dev)) {
5082 DRM_DEBUG_KMS("disabling CxSR downclocking\n"); 5082 DRM_DEBUG_KMS("disabling CxSR downclocking\n");
5083 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; 5083 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
5084 } 5084 }
5085 } 5085 }
5086 5086
5087 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 5087 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5088 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 5088 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
5089 /* the chip adds 2 halflines automatically */ 5089 /* the chip adds 2 halflines automatically */
5090 adjusted_mode->crtc_vdisplay -= 1; 5090 adjusted_mode->crtc_vdisplay -= 1;
5091 adjusted_mode->crtc_vtotal -= 1; 5091 adjusted_mode->crtc_vtotal -= 1;
5092 adjusted_mode->crtc_vblank_start -= 1; 5092 adjusted_mode->crtc_vblank_start -= 1;
5093 adjusted_mode->crtc_vblank_end -= 1; 5093 adjusted_mode->crtc_vblank_end -= 1;
5094 adjusted_mode->crtc_vsync_end -= 1; 5094 adjusted_mode->crtc_vsync_end -= 1;
5095 adjusted_mode->crtc_vsync_start -= 1; 5095 adjusted_mode->crtc_vsync_start -= 1;
5096 } else 5096 } else
5097 pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */ 5097 pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
5098 5098
5099 I915_WRITE(HTOTAL(pipe), 5099 I915_WRITE(HTOTAL(pipe),
5100 (adjusted_mode->crtc_hdisplay - 1) | 5100 (adjusted_mode->crtc_hdisplay - 1) |
5101 ((adjusted_mode->crtc_htotal - 1) << 16)); 5101 ((adjusted_mode->crtc_htotal - 1) << 16));
5102 I915_WRITE(HBLANK(pipe), 5102 I915_WRITE(HBLANK(pipe),
5103 (adjusted_mode->crtc_hblank_start - 1) | 5103 (adjusted_mode->crtc_hblank_start - 1) |
5104 ((adjusted_mode->crtc_hblank_end - 1) << 16)); 5104 ((adjusted_mode->crtc_hblank_end - 1) << 16));
5105 I915_WRITE(HSYNC(pipe), 5105 I915_WRITE(HSYNC(pipe),
5106 (adjusted_mode->crtc_hsync_start - 1) | 5106 (adjusted_mode->crtc_hsync_start - 1) |
5107 ((adjusted_mode->crtc_hsync_end - 1) << 16)); 5107 ((adjusted_mode->crtc_hsync_end - 1) << 16));
5108 5108
5109 I915_WRITE(VTOTAL(pipe), 5109 I915_WRITE(VTOTAL(pipe),
5110 (adjusted_mode->crtc_vdisplay - 1) | 5110 (adjusted_mode->crtc_vdisplay - 1) |
5111 ((adjusted_mode->crtc_vtotal - 1) << 16)); 5111 ((adjusted_mode->crtc_vtotal - 1) << 16));
5112 I915_WRITE(VBLANK(pipe), 5112 I915_WRITE(VBLANK(pipe),
5113 (adjusted_mode->crtc_vblank_start - 1) | 5113 (adjusted_mode->crtc_vblank_start - 1) |
5114 ((adjusted_mode->crtc_vblank_end - 1) << 16)); 5114 ((adjusted_mode->crtc_vblank_end - 1) << 16));
5115 I915_WRITE(VSYNC(pipe), 5115 I915_WRITE(VSYNC(pipe),
5116 (adjusted_mode->crtc_vsync_start - 1) | 5116 (adjusted_mode->crtc_vsync_start - 1) |
5117 ((adjusted_mode->crtc_vsync_end - 1) << 16)); 5117 ((adjusted_mode->crtc_vsync_end - 1) << 16));
5118 5118
5119 /* pipesrc and dspsize control the size that is scaled from, 5119 /* pipesrc and dspsize control the size that is scaled from,
5120 * which should always be the user's requested size. 5120 * which should always be the user's requested size.
5121 */ 5121 */
5122 if (!HAS_PCH_SPLIT(dev)) { 5122 if (!HAS_PCH_SPLIT(dev)) {
5123 I915_WRITE(DSPSIZE(plane), 5123 I915_WRITE(DSPSIZE(plane),
5124 ((mode->vdisplay - 1) << 16) | 5124 ((mode->vdisplay - 1) << 16) |
5125 (mode->hdisplay - 1)); 5125 (mode->hdisplay - 1));
5126 I915_WRITE(DSPPOS(plane), 0); 5126 I915_WRITE(DSPPOS(plane), 0);
5127 } 5127 }
5128 I915_WRITE(PIPESRC(pipe), 5128 I915_WRITE(PIPESRC(pipe),
5129 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); 5129 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
5130 5130
5131 if (HAS_PCH_SPLIT(dev)) { 5131 if (HAS_PCH_SPLIT(dev)) {
5132 I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); 5132 I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
5133 I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n); 5133 I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
5134 I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); 5134 I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
5135 I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n); 5135 I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
5136 5136
5137 if (has_edp_encoder && !intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 5137 if (has_edp_encoder && !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5138 ironlake_set_pll_edp(crtc, adjusted_mode->clock); 5138 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
5139 } 5139 }
5140 } 5140 }
5141 5141
5142 I915_WRITE(PIPECONF(pipe), pipeconf); 5142 I915_WRITE(PIPECONF(pipe), pipeconf);
5143 POSTING_READ(PIPECONF(pipe)); 5143 POSTING_READ(PIPECONF(pipe));
5144 if (!HAS_PCH_SPLIT(dev)) 5144 if (!HAS_PCH_SPLIT(dev))
5145 intel_enable_pipe(dev_priv, pipe, false); 5145 intel_enable_pipe(dev_priv, pipe, false);
5146 5146
5147 intel_wait_for_vblank(dev, pipe); 5147 intel_wait_for_vblank(dev, pipe);
5148 5148
5149 if (IS_GEN5(dev)) { 5149 if (IS_GEN5(dev)) {
5150 /* enable address swizzle for tiling buffer */ 5150 /* enable address swizzle for tiling buffer */
5151 temp = I915_READ(DISP_ARB_CTL); 5151 temp = I915_READ(DISP_ARB_CTL);
5152 I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING); 5152 I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
5153 } 5153 }
5154 5154
5155 I915_WRITE(DSPCNTR(plane), dspcntr); 5155 I915_WRITE(DSPCNTR(plane), dspcntr);
5156 POSTING_READ(DSPCNTR(plane)); 5156 POSTING_READ(DSPCNTR(plane));
5157 if (!HAS_PCH_SPLIT(dev)) 5157 if (!HAS_PCH_SPLIT(dev))
5158 intel_enable_plane(dev_priv, plane, pipe); 5158 intel_enable_plane(dev_priv, plane, pipe);
5159 5159
5160 ret = intel_pipe_set_base(crtc, x, y, old_fb); 5160 ret = intel_pipe_set_base(crtc, x, y, old_fb);
5161 5161
5162 intel_update_watermarks(dev); 5162 intel_update_watermarks(dev);
5163 5163
5164 drm_vblank_post_modeset(dev, pipe); 5164 drm_vblank_post_modeset(dev, pipe);
5165 5165
5166 return ret; 5166 return ret;
5167 } 5167 }
5168 5168
5169 /** Loads the palette/gamma unit for the CRTC with the prepared values */ 5169 /** Loads the palette/gamma unit for the CRTC with the prepared values */
5170 void intel_crtc_load_lut(struct drm_crtc *crtc) 5170 void intel_crtc_load_lut(struct drm_crtc *crtc)
5171 { 5171 {
5172 struct drm_device *dev = crtc->dev; 5172 struct drm_device *dev = crtc->dev;
5173 struct drm_i915_private *dev_priv = dev->dev_private; 5173 struct drm_i915_private *dev_priv = dev->dev_private;
5174 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5174 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5175 int palreg = PALETTE(intel_crtc->pipe); 5175 int palreg = PALETTE(intel_crtc->pipe);
5176 int i; 5176 int i;
5177 5177
5178 /* The clocks have to be on to load the palette. */ 5178 /* The clocks have to be on to load the palette. */
5179 if (!crtc->enabled) 5179 if (!crtc->enabled)
5180 return; 5180 return;
5181 5181
5182 /* use legacy palette for Ironlake */ 5182 /* use legacy palette for Ironlake */
5183 if (HAS_PCH_SPLIT(dev)) 5183 if (HAS_PCH_SPLIT(dev))
5184 palreg = LGC_PALETTE(intel_crtc->pipe); 5184 palreg = LGC_PALETTE(intel_crtc->pipe);
5185 5185
5186 for (i = 0; i < 256; i++) { 5186 for (i = 0; i < 256; i++) {
5187 I915_WRITE(palreg + 4 * i, 5187 I915_WRITE(palreg + 4 * i,
5188 (intel_crtc->lut_r[i] << 16) | 5188 (intel_crtc->lut_r[i] << 16) |
5189 (intel_crtc->lut_g[i] << 8) | 5189 (intel_crtc->lut_g[i] << 8) |
5190 intel_crtc->lut_b[i]); 5190 intel_crtc->lut_b[i]);
5191 } 5191 }
5192 } 5192 }
5193 5193
5194 static void i845_update_cursor(struct drm_crtc *crtc, u32 base) 5194 static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
5195 { 5195 {
5196 struct drm_device *dev = crtc->dev; 5196 struct drm_device *dev = crtc->dev;
5197 struct drm_i915_private *dev_priv = dev->dev_private; 5197 struct drm_i915_private *dev_priv = dev->dev_private;
5198 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5198 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5199 bool visible = base != 0; 5199 bool visible = base != 0;
5200 u32 cntl; 5200 u32 cntl;
5201 5201
5202 if (intel_crtc->cursor_visible == visible) 5202 if (intel_crtc->cursor_visible == visible)
5203 return; 5203 return;
5204 5204
5205 cntl = I915_READ(_CURACNTR); 5205 cntl = I915_READ(_CURACNTR);
5206 if (visible) { 5206 if (visible) {
5207 /* On these chipsets we can only modify the base whilst 5207 /* On these chipsets we can only modify the base whilst
5208 * the cursor is disabled. 5208 * the cursor is disabled.
5209 */ 5209 */
5210 I915_WRITE(_CURABASE, base); 5210 I915_WRITE(_CURABASE, base);
5211 5211
5212 cntl &= ~(CURSOR_FORMAT_MASK); 5212 cntl &= ~(CURSOR_FORMAT_MASK);
5213 /* XXX width must be 64, stride 256 => 0x00 << 28 */ 5213 /* XXX width must be 64, stride 256 => 0x00 << 28 */
5214 cntl |= CURSOR_ENABLE | 5214 cntl |= CURSOR_ENABLE |
5215 CURSOR_GAMMA_ENABLE | 5215 CURSOR_GAMMA_ENABLE |
5216 CURSOR_FORMAT_ARGB; 5216 CURSOR_FORMAT_ARGB;
5217 } else 5217 } else
5218 cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE); 5218 cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
5219 I915_WRITE(_CURACNTR, cntl); 5219 I915_WRITE(_CURACNTR, cntl);
5220 5220
5221 intel_crtc->cursor_visible = visible; 5221 intel_crtc->cursor_visible = visible;
5222 } 5222 }
5223 5223
5224 static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) 5224 static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
5225 { 5225 {
5226 struct drm_device *dev = crtc->dev; 5226 struct drm_device *dev = crtc->dev;
5227 struct drm_i915_private *dev_priv = dev->dev_private; 5227 struct drm_i915_private *dev_priv = dev->dev_private;
5228 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5228 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5229 int pipe = intel_crtc->pipe; 5229 int pipe = intel_crtc->pipe;
5230 bool visible = base != 0; 5230 bool visible = base != 0;
5231 5231
5232 if (intel_crtc->cursor_visible != visible) { 5232 if (intel_crtc->cursor_visible != visible) {
5233 uint32_t cntl = I915_READ(CURCNTR(pipe)); 5233 uint32_t cntl = I915_READ(CURCNTR(pipe));
5234 if (base) { 5234 if (base) {
5235 cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT); 5235 cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
5236 cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; 5236 cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
5237 cntl |= pipe << 28; /* Connect to correct pipe */ 5237 cntl |= pipe << 28; /* Connect to correct pipe */
5238 } else { 5238 } else {
5239 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); 5239 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
5240 cntl |= CURSOR_MODE_DISABLE; 5240 cntl |= CURSOR_MODE_DISABLE;
5241 } 5241 }
5242 I915_WRITE(CURCNTR(pipe), cntl); 5242 I915_WRITE(CURCNTR(pipe), cntl);
5243 5243
5244 intel_crtc->cursor_visible = visible; 5244 intel_crtc->cursor_visible = visible;
5245 } 5245 }
5246 /* and commit changes on next vblank */ 5246 /* and commit changes on next vblank */
5247 I915_WRITE(CURBASE(pipe), base); 5247 I915_WRITE(CURBASE(pipe), base);
5248 } 5248 }
5249 5249
5250 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ 5250 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
5251 static void intel_crtc_update_cursor(struct drm_crtc *crtc, 5251 static void intel_crtc_update_cursor(struct drm_crtc *crtc,
5252 bool on) 5252 bool on)
5253 { 5253 {
5254 struct drm_device *dev = crtc->dev; 5254 struct drm_device *dev = crtc->dev;
5255 struct drm_i915_private *dev_priv = dev->dev_private; 5255 struct drm_i915_private *dev_priv = dev->dev_private;
5256 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5256 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5257 int pipe = intel_crtc->pipe; 5257 int pipe = intel_crtc->pipe;
5258 int x = intel_crtc->cursor_x; 5258 int x = intel_crtc->cursor_x;
5259 int y = intel_crtc->cursor_y; 5259 int y = intel_crtc->cursor_y;
5260 u32 base, pos; 5260 u32 base, pos;
5261 bool visible; 5261 bool visible;
5262 5262
5263 pos = 0; 5263 pos = 0;
5264 5264
5265 if (on && crtc->enabled && crtc->fb) { 5265 if (on && crtc->enabled && crtc->fb) {
5266 base = intel_crtc->cursor_addr; 5266 base = intel_crtc->cursor_addr;
5267 if (x > (int) crtc->fb->width) 5267 if (x > (int) crtc->fb->width)
5268 base = 0; 5268 base = 0;
5269 5269
5270 if (y > (int) crtc->fb->height) 5270 if (y > (int) crtc->fb->height)
5271 base = 0; 5271 base = 0;
5272 } else 5272 } else
5273 base = 0; 5273 base = 0;
5274 5274
5275 if (x < 0) { 5275 if (x < 0) {
5276 if (x + intel_crtc->cursor_width < 0) 5276 if (x + intel_crtc->cursor_width < 0)
5277 base = 0; 5277 base = 0;
5278 5278
5279 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; 5279 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
5280 x = -x; 5280 x = -x;
5281 } 5281 }
5282 pos |= x << CURSOR_X_SHIFT; 5282 pos |= x << CURSOR_X_SHIFT;
5283 5283
5284 if (y < 0) { 5284 if (y < 0) {
5285 if (y + intel_crtc->cursor_height < 0) 5285 if (y + intel_crtc->cursor_height < 0)
5286 base = 0; 5286 base = 0;
5287 5287
5288 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; 5288 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
5289 y = -y; 5289 y = -y;
5290 } 5290 }
5291 pos |= y << CURSOR_Y_SHIFT; 5291 pos |= y << CURSOR_Y_SHIFT;
5292 5292
5293 visible = base != 0; 5293 visible = base != 0;
5294 if (!visible && !intel_crtc->cursor_visible) 5294 if (!visible && !intel_crtc->cursor_visible)
5295 return; 5295 return;
5296 5296
5297 I915_WRITE(CURPOS(pipe), pos); 5297 I915_WRITE(CURPOS(pipe), pos);
5298 if (IS_845G(dev) || IS_I865G(dev)) 5298 if (IS_845G(dev) || IS_I865G(dev))
5299 i845_update_cursor(crtc, base); 5299 i845_update_cursor(crtc, base);
5300 else 5300 else
5301 i9xx_update_cursor(crtc, base); 5301 i9xx_update_cursor(crtc, base);
5302 5302
5303 if (visible) 5303 if (visible)
5304 intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj); 5304 intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj);
5305 } 5305 }
5306 5306
5307 static int intel_crtc_cursor_set(struct drm_crtc *crtc, 5307 static int intel_crtc_cursor_set(struct drm_crtc *crtc,
5308 struct drm_file *file, 5308 struct drm_file *file,
5309 uint32_t handle, 5309 uint32_t handle,
5310 uint32_t width, uint32_t height) 5310 uint32_t width, uint32_t height)
5311 { 5311 {
5312 struct drm_device *dev = crtc->dev; 5312 struct drm_device *dev = crtc->dev;
5313 struct drm_i915_private *dev_priv = dev->dev_private; 5313 struct drm_i915_private *dev_priv = dev->dev_private;
5314 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5314 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5315 struct drm_i915_gem_object *obj; 5315 struct drm_i915_gem_object *obj;
5316 uint32_t addr; 5316 uint32_t addr;
5317 int ret; 5317 int ret;
5318 5318
5319 DRM_DEBUG_KMS("\n"); 5319 DRM_DEBUG_KMS("\n");
5320 5320
5321 /* if we want to turn off the cursor ignore width and height */ 5321 /* if we want to turn off the cursor ignore width and height */
5322 if (!handle) { 5322 if (!handle) {
5323 DRM_DEBUG_KMS("cursor off\n"); 5323 DRM_DEBUG_KMS("cursor off\n");
5324 addr = 0; 5324 addr = 0;
5325 obj = NULL; 5325 obj = NULL;
5326 mutex_lock(&dev->struct_mutex); 5326 mutex_lock(&dev->struct_mutex);
5327 goto finish; 5327 goto finish;
5328 } 5328 }
5329 5329
5330 /* Currently we only support 64x64 cursors */ 5330 /* Currently we only support 64x64 cursors */
5331 if (width != 64 || height != 64) { 5331 if (width != 64 || height != 64) {
5332 DRM_ERROR("we currently only support 64x64 cursors\n"); 5332 DRM_ERROR("we currently only support 64x64 cursors\n");
5333 return -EINVAL; 5333 return -EINVAL;
5334 } 5334 }
5335 5335
5336 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); 5336 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
5337 if (&obj->base == NULL) 5337 if (&obj->base == NULL)
5338 return -ENOENT; 5338 return -ENOENT;
5339 5339
5340 if (obj->base.size < width * height * 4) { 5340 if (obj->base.size < width * height * 4) {
5341 DRM_ERROR("buffer is to small\n"); 5341 DRM_ERROR("buffer is to small\n");
5342 ret = -ENOMEM; 5342 ret = -ENOMEM;
5343 goto fail; 5343 goto fail;
5344 } 5344 }
5345 5345
5346 /* we only need to pin inside GTT if cursor is non-phy */ 5346 /* we only need to pin inside GTT if cursor is non-phy */
5347 mutex_lock(&dev->struct_mutex); 5347 mutex_lock(&dev->struct_mutex);
5348 if (!dev_priv->info->cursor_needs_physical) { 5348 if (!dev_priv->info->cursor_needs_physical) {
5349 if (obj->tiling_mode) { 5349 if (obj->tiling_mode) {
5350 DRM_ERROR("cursor cannot be tiled\n"); 5350 DRM_ERROR("cursor cannot be tiled\n");
5351 ret = -EINVAL; 5351 ret = -EINVAL;
5352 goto fail_locked; 5352 goto fail_locked;
5353 } 5353 }
5354 5354
5355 ret = i915_gem_object_pin(obj, PAGE_SIZE, true); 5355 ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
5356 if (ret) { 5356 if (ret) {
5357 DRM_ERROR("failed to pin cursor bo\n"); 5357 DRM_ERROR("failed to pin cursor bo\n");
5358 goto fail_locked; 5358 goto fail_locked;
5359 } 5359 }
5360 5360
5361 ret = i915_gem_object_set_to_gtt_domain(obj, 0); 5361 ret = i915_gem_object_set_to_gtt_domain(obj, 0);
5362 if (ret) { 5362 if (ret) {
5363 DRM_ERROR("failed to move cursor bo into the GTT\n"); 5363 DRM_ERROR("failed to move cursor bo into the GTT\n");
5364 goto fail_unpin; 5364 goto fail_unpin;
5365 } 5365 }
5366 5366
5367 ret = i915_gem_object_put_fence(obj); 5367 ret = i915_gem_object_put_fence(obj);
5368 if (ret) { 5368 if (ret) {
5369 DRM_ERROR("failed to move cursor bo into the GTT\n"); 5369 DRM_ERROR("failed to move cursor bo into the GTT\n");
5370 goto fail_unpin; 5370 goto fail_unpin;
5371 } 5371 }
5372 5372
5373 addr = obj->gtt_offset; 5373 addr = obj->gtt_offset;
5374 } else { 5374 } else {
5375 int align = IS_I830(dev) ? 16 * 1024 : 256; 5375 int align = IS_I830(dev) ? 16 * 1024 : 256;
5376 ret = i915_gem_attach_phys_object(dev, obj, 5376 ret = i915_gem_attach_phys_object(dev, obj,
5377 (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1, 5377 (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
5378 align); 5378 align);
5379 if (ret) { 5379 if (ret) {
5380 DRM_ERROR("failed to attach phys object\n"); 5380 DRM_ERROR("failed to attach phys object\n");
5381 goto fail_locked; 5381 goto fail_locked;
5382 } 5382 }
5383 addr = obj->phys_obj->handle->busaddr; 5383 addr = obj->phys_obj->handle->busaddr;
5384 } 5384 }
5385 5385
5386 if (IS_GEN2(dev)) 5386 if (IS_GEN2(dev))
5387 I915_WRITE(CURSIZE, (height << 12) | width); 5387 I915_WRITE(CURSIZE, (height << 12) | width);
5388 5388
5389 finish: 5389 finish:
5390 if (intel_crtc->cursor_bo) { 5390 if (intel_crtc->cursor_bo) {
5391 if (dev_priv->info->cursor_needs_physical) { 5391 if (dev_priv->info->cursor_needs_physical) {
5392 if (intel_crtc->cursor_bo != obj) 5392 if (intel_crtc->cursor_bo != obj)
5393 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); 5393 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
5394 } else 5394 } else
5395 i915_gem_object_unpin(intel_crtc->cursor_bo); 5395 i915_gem_object_unpin(intel_crtc->cursor_bo);
5396 drm_gem_object_unreference(&intel_crtc->cursor_bo->base); 5396 drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
5397 } 5397 }
5398 5398
5399 mutex_unlock(&dev->struct_mutex); 5399 mutex_unlock(&dev->struct_mutex);
5400 5400
5401 intel_crtc->cursor_addr = addr; 5401 intel_crtc->cursor_addr = addr;
5402 intel_crtc->cursor_bo = obj; 5402 intel_crtc->cursor_bo = obj;
5403 intel_crtc->cursor_width = width; 5403 intel_crtc->cursor_width = width;
5404 intel_crtc->cursor_height = height; 5404 intel_crtc->cursor_height = height;
5405 5405
5406 intel_crtc_update_cursor(crtc, true); 5406 intel_crtc_update_cursor(crtc, true);
5407 5407
5408 return 0; 5408 return 0;
5409 fail_unpin: 5409 fail_unpin:
5410 i915_gem_object_unpin(obj); 5410 i915_gem_object_unpin(obj);
5411 fail_locked: 5411 fail_locked:
5412 mutex_unlock(&dev->struct_mutex); 5412 mutex_unlock(&dev->struct_mutex);
5413 fail: 5413 fail:
5414 drm_gem_object_unreference_unlocked(&obj->base); 5414 drm_gem_object_unreference_unlocked(&obj->base);
5415 return ret; 5415 return ret;
5416 } 5416 }
5417 5417
5418 static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) 5418 static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
5419 { 5419 {
5420 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5420 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5421 5421
5422 intel_crtc->cursor_x = x; 5422 intel_crtc->cursor_x = x;
5423 intel_crtc->cursor_y = y; 5423 intel_crtc->cursor_y = y;
5424 5424
5425 intel_crtc_update_cursor(crtc, true); 5425 intel_crtc_update_cursor(crtc, true);
5426 5426
5427 return 0; 5427 return 0;
5428 } 5428 }
5429 5429
5430 /** Sets the color ramps on behalf of RandR */ 5430 /** Sets the color ramps on behalf of RandR */
5431 void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 5431 void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
5432 u16 blue, int regno) 5432 u16 blue, int regno)
5433 { 5433 {
5434 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5434 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5435 5435
5436 intel_crtc->lut_r[regno] = red >> 8; 5436 intel_crtc->lut_r[regno] = red >> 8;
5437 intel_crtc->lut_g[regno] = green >> 8; 5437 intel_crtc->lut_g[regno] = green >> 8;
5438 intel_crtc->lut_b[regno] = blue >> 8; 5438 intel_crtc->lut_b[regno] = blue >> 8;
5439 } 5439 }
5440 5440
5441 void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, 5441 void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
5442 u16 *blue, int regno) 5442 u16 *blue, int regno)
5443 { 5443 {
5444 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5444 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5445 5445
5446 *red = intel_crtc->lut_r[regno] << 8; 5446 *red = intel_crtc->lut_r[regno] << 8;
5447 *green = intel_crtc->lut_g[regno] << 8; 5447 *green = intel_crtc->lut_g[regno] << 8;
5448 *blue = intel_crtc->lut_b[regno] << 8; 5448 *blue = intel_crtc->lut_b[regno] << 8;
5449 } 5449 }
5450 5450
5451 static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 5451 static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
5452 u16 *blue, uint32_t start, uint32_t size) 5452 u16 *blue, uint32_t start, uint32_t size)
5453 { 5453 {
5454 int end = (start + size > 256) ? 256 : start + size, i; 5454 int end = (start + size > 256) ? 256 : start + size, i;
5455 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5455 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5456 5456
5457 for (i = start; i < end; i++) { 5457 for (i = start; i < end; i++) {
5458 intel_crtc->lut_r[i] = red[i] >> 8; 5458 intel_crtc->lut_r[i] = red[i] >> 8;
5459 intel_crtc->lut_g[i] = green[i] >> 8; 5459 intel_crtc->lut_g[i] = green[i] >> 8;
5460 intel_crtc->lut_b[i] = blue[i] >> 8; 5460 intel_crtc->lut_b[i] = blue[i] >> 8;
5461 } 5461 }
5462 5462
5463 intel_crtc_load_lut(crtc); 5463 intel_crtc_load_lut(crtc);
5464 } 5464 }
5465 5465
5466 /** 5466 /**
5467 * Get a pipe with a simple mode set on it for doing load-based monitor 5467 * Get a pipe with a simple mode set on it for doing load-based monitor
5468 * detection. 5468 * detection.
5469 * 5469 *
5470 * It will be up to the load-detect code to adjust the pipe as appropriate for 5470 * It will be up to the load-detect code to adjust the pipe as appropriate for
5471 * its requirements. The pipe will be connected to no other encoders. 5471 * its requirements. The pipe will be connected to no other encoders.
5472 * 5472 *
5473 * Currently this code will only succeed if there is a pipe with no encoders 5473 * Currently this code will only succeed if there is a pipe with no encoders
5474 * configured for it. In the future, it could choose to temporarily disable 5474 * configured for it. In the future, it could choose to temporarily disable
5475 * some outputs to free up a pipe for its use. 5475 * some outputs to free up a pipe for its use.
5476 * 5476 *
5477 * \return crtc, or NULL if no pipes are available. 5477 * \return crtc, or NULL if no pipes are available.
5478 */ 5478 */
5479 5479
5480 /* VESA 640x480x72Hz mode to set on the pipe */ 5480 /* VESA 640x480x72Hz mode to set on the pipe */
5481 static struct drm_display_mode load_detect_mode = { 5481 static struct drm_display_mode load_detect_mode = {
5482 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, 5482 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
5483 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 5483 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
5484 }; 5484 };
5485 5485
5486 struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, 5486 struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
5487 struct drm_connector *connector, 5487 struct drm_connector *connector,
5488 struct drm_display_mode *mode, 5488 struct drm_display_mode *mode,
5489 int *dpms_mode) 5489 int *dpms_mode)
5490 { 5490 {
5491 struct intel_crtc *intel_crtc; 5491 struct intel_crtc *intel_crtc;
5492 struct drm_crtc *possible_crtc; 5492 struct drm_crtc *possible_crtc;
5493 struct drm_crtc *supported_crtc =NULL; 5493 struct drm_crtc *supported_crtc =NULL;
5494 struct drm_encoder *encoder = &intel_encoder->base; 5494 struct drm_encoder *encoder = &intel_encoder->base;
5495 struct drm_crtc *crtc = NULL; 5495 struct drm_crtc *crtc = NULL;
5496 struct drm_device *dev = encoder->dev; 5496 struct drm_device *dev = encoder->dev;
5497 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 5497 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
5498 struct drm_crtc_helper_funcs *crtc_funcs; 5498 struct drm_crtc_helper_funcs *crtc_funcs;
5499 int i = -1; 5499 int i = -1;
5500 5500
5501 /* 5501 /*
5502 * Algorithm gets a little messy: 5502 * Algorithm gets a little messy:
5503 * - if the connector already has an assigned crtc, use it (but make 5503 * - if the connector already has an assigned crtc, use it (but make
5504 * sure it's on first) 5504 * sure it's on first)
5505 * - try to find the first unused crtc that can drive this connector, 5505 * - try to find the first unused crtc that can drive this connector,
5506 * and use that if we find one 5506 * and use that if we find one
5507 * - if there are no unused crtcs available, try to use the first 5507 * - if there are no unused crtcs available, try to use the first
5508 * one we found that supports the connector 5508 * one we found that supports the connector
5509 */ 5509 */
5510 5510
5511 /* See if we already have a CRTC for this connector */ 5511 /* See if we already have a CRTC for this connector */
5512 if (encoder->crtc) { 5512 if (encoder->crtc) {
5513 crtc = encoder->crtc; 5513 crtc = encoder->crtc;
5514 /* Make sure the crtc and connector are running */ 5514 /* Make sure the crtc and connector are running */
5515 intel_crtc = to_intel_crtc(crtc); 5515 intel_crtc = to_intel_crtc(crtc);
5516 *dpms_mode = intel_crtc->dpms_mode; 5516 *dpms_mode = intel_crtc->dpms_mode;
5517 if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) { 5517 if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
5518 crtc_funcs = crtc->helper_private; 5518 crtc_funcs = crtc->helper_private;
5519 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); 5519 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
5520 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); 5520 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
5521 } 5521 }
5522 return crtc; 5522 return crtc;
5523 } 5523 }
5524 5524
5525 /* Find an unused one (if possible) */ 5525 /* Find an unused one (if possible) */
5526 list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) { 5526 list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
5527 i++; 5527 i++;
5528 if (!(encoder->possible_crtcs & (1 << i))) 5528 if (!(encoder->possible_crtcs & (1 << i)))
5529 continue; 5529 continue;
5530 if (!possible_crtc->enabled) { 5530 if (!possible_crtc->enabled) {
5531 crtc = possible_crtc; 5531 crtc = possible_crtc;
5532 break; 5532 break;
5533 } 5533 }
5534 if (!supported_crtc) 5534 if (!supported_crtc)
5535 supported_crtc = possible_crtc; 5535 supported_crtc = possible_crtc;
5536 } 5536 }
5537 5537
5538 /* 5538 /*
5539 * If we didn't find an unused CRTC, don't use any. 5539 * If we didn't find an unused CRTC, don't use any.
5540 */ 5540 */
5541 if (!crtc) { 5541 if (!crtc) {
5542 return NULL; 5542 return NULL;
5543 } 5543 }
5544 5544
5545 encoder->crtc = crtc; 5545 encoder->crtc = crtc;
5546 connector->encoder = encoder; 5546 connector->encoder = encoder;
5547 intel_encoder->load_detect_temp = true; 5547 intel_encoder->load_detect_temp = true;
5548 5548
5549 intel_crtc = to_intel_crtc(crtc); 5549 intel_crtc = to_intel_crtc(crtc);
5550 *dpms_mode = intel_crtc->dpms_mode; 5550 *dpms_mode = intel_crtc->dpms_mode;
5551 5551
5552 if (!crtc->enabled) { 5552 if (!crtc->enabled) {
5553 if (!mode) 5553 if (!mode)
5554 mode = &load_detect_mode; 5554 mode = &load_detect_mode;
5555 drm_crtc_helper_set_mode(crtc, mode, 0, 0, crtc->fb); 5555 drm_crtc_helper_set_mode(crtc, mode, 0, 0, crtc->fb);
5556 } else { 5556 } else {
5557 if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) { 5557 if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
5558 crtc_funcs = crtc->helper_private; 5558 crtc_funcs = crtc->helper_private;
5559 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); 5559 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
5560 } 5560 }
5561 5561
5562 /* Add this connector to the crtc */ 5562 /* Add this connector to the crtc */
5563 encoder_funcs->mode_set(encoder, &crtc->mode, &crtc->mode); 5563 encoder_funcs->mode_set(encoder, &crtc->mode, &crtc->mode);
5564 encoder_funcs->commit(encoder); 5564 encoder_funcs->commit(encoder);
5565 } 5565 }
5566 /* let the connector get through one full cycle before testing */ 5566 /* let the connector get through one full cycle before testing */
5567 intel_wait_for_vblank(dev, intel_crtc->pipe); 5567 intel_wait_for_vblank(dev, intel_crtc->pipe);
5568 5568
5569 return crtc; 5569 return crtc;
5570 } 5570 }
5571 5571
5572 void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, 5572 void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
5573 struct drm_connector *connector, int dpms_mode) 5573 struct drm_connector *connector, int dpms_mode)
5574 { 5574 {
5575 struct drm_encoder *encoder = &intel_encoder->base; 5575 struct drm_encoder *encoder = &intel_encoder->base;
5576 struct drm_device *dev = encoder->dev; 5576 struct drm_device *dev = encoder->dev;
5577 struct drm_crtc *crtc = encoder->crtc; 5577 struct drm_crtc *crtc = encoder->crtc;
5578 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 5578 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
5579 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 5579 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
5580 5580
5581 if (intel_encoder->load_detect_temp) { 5581 if (intel_encoder->load_detect_temp) {
5582 encoder->crtc = NULL; 5582 encoder->crtc = NULL;
5583 connector->encoder = NULL; 5583 connector->encoder = NULL;
5584 intel_encoder->load_detect_temp = false; 5584 intel_encoder->load_detect_temp = false;
5585 crtc->enabled = drm_helper_crtc_in_use(crtc); 5585 crtc->enabled = drm_helper_crtc_in_use(crtc);
5586 drm_helper_disable_unused_functions(dev); 5586 drm_helper_disable_unused_functions(dev);
5587 } 5587 }
5588 5588
5589 /* Switch crtc and encoder back off if necessary */ 5589 /* Switch crtc and encoder back off if necessary */
5590 if (crtc->enabled && dpms_mode != DRM_MODE_DPMS_ON) { 5590 if (crtc->enabled && dpms_mode != DRM_MODE_DPMS_ON) {
5591 if (encoder->crtc == crtc) 5591 if (encoder->crtc == crtc)
5592 encoder_funcs->dpms(encoder, dpms_mode); 5592 encoder_funcs->dpms(encoder, dpms_mode);
5593 crtc_funcs->dpms(crtc, dpms_mode); 5593 crtc_funcs->dpms(crtc, dpms_mode);
5594 } 5594 }
5595 } 5595 }
5596 5596
5597 /* Returns the clock of the currently programmed mode of the given pipe. */ 5597 /* Returns the clock of the currently programmed mode of the given pipe. */
5598 static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) 5598 static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
5599 { 5599 {
5600 struct drm_i915_private *dev_priv = dev->dev_private; 5600 struct drm_i915_private *dev_priv = dev->dev_private;
5601 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5601 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5602 int pipe = intel_crtc->pipe; 5602 int pipe = intel_crtc->pipe;
5603 u32 dpll = I915_READ(DPLL(pipe)); 5603 u32 dpll = I915_READ(DPLL(pipe));
5604 u32 fp; 5604 u32 fp;
5605 intel_clock_t clock; 5605 intel_clock_t clock;
5606 5606
5607 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 5607 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
5608 fp = FP0(pipe); 5608 fp = FP0(pipe);
5609 else 5609 else
5610 fp = FP1(pipe); 5610 fp = FP1(pipe);
5611 5611
5612 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 5612 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
5613 if (IS_PINEVIEW(dev)) { 5613 if (IS_PINEVIEW(dev)) {
5614 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; 5614 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
5615 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; 5615 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
5616 } else { 5616 } else {
5617 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 5617 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
5618 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 5618 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
5619 } 5619 }
5620 5620
5621 if (!IS_GEN2(dev)) { 5621 if (!IS_GEN2(dev)) {
5622 if (IS_PINEVIEW(dev)) 5622 if (IS_PINEVIEW(dev))
5623 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> 5623 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
5624 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); 5624 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
5625 else 5625 else
5626 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> 5626 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
5627 DPLL_FPA01_P1_POST_DIV_SHIFT); 5627 DPLL_FPA01_P1_POST_DIV_SHIFT);
5628 5628
5629 switch (dpll & DPLL_MODE_MASK) { 5629 switch (dpll & DPLL_MODE_MASK) {
5630 case DPLLB_MODE_DAC_SERIAL: 5630 case DPLLB_MODE_DAC_SERIAL:
5631 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? 5631 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
5632 5 : 10; 5632 5 : 10;
5633 break; 5633 break;
5634 case DPLLB_MODE_LVDS: 5634 case DPLLB_MODE_LVDS:
5635 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? 5635 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
5636 7 : 14; 5636 7 : 14;
5637 break; 5637 break;
5638 default: 5638 default:
5639 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " 5639 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
5640 "mode\n", (int)(dpll & DPLL_MODE_MASK)); 5640 "mode\n", (int)(dpll & DPLL_MODE_MASK));
5641 return 0; 5641 return 0;
5642 } 5642 }
5643 5643
5644 /* XXX: Handle the 100Mhz refclk */ 5644 /* XXX: Handle the 100Mhz refclk */
5645 intel_clock(dev, 96000, &clock); 5645 intel_clock(dev, 96000, &clock);
5646 } else { 5646 } else {
5647 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); 5647 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
5648 5648
5649 if (is_lvds) { 5649 if (is_lvds) {
5650 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> 5650 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
5651 DPLL_FPA01_P1_POST_DIV_SHIFT); 5651 DPLL_FPA01_P1_POST_DIV_SHIFT);
5652 clock.p2 = 14; 5652 clock.p2 = 14;
5653 5653
5654 if ((dpll & PLL_REF_INPUT_MASK) == 5654 if ((dpll & PLL_REF_INPUT_MASK) ==
5655 PLLB_REF_INPUT_SPREADSPECTRUMIN) { 5655 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
5656 /* XXX: might not be 66MHz */ 5656 /* XXX: might not be 66MHz */
5657 intel_clock(dev, 66000, &clock); 5657 intel_clock(dev, 66000, &clock);
5658 } else 5658 } else
5659 intel_clock(dev, 48000, &clock); 5659 intel_clock(dev, 48000, &clock);
5660 } else { 5660 } else {
5661 if (dpll & PLL_P1_DIVIDE_BY_TWO) 5661 if (dpll & PLL_P1_DIVIDE_BY_TWO)
5662 clock.p1 = 2; 5662 clock.p1 = 2;
5663 else { 5663 else {
5664 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> 5664 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
5665 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; 5665 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
5666 } 5666 }
5667 if (dpll & PLL_P2_DIVIDE_BY_4) 5667 if (dpll & PLL_P2_DIVIDE_BY_4)
5668 clock.p2 = 4; 5668 clock.p2 = 4;
5669 else 5669 else
5670 clock.p2 = 2; 5670 clock.p2 = 2;
5671 5671
5672 intel_clock(dev, 48000, &clock); 5672 intel_clock(dev, 48000, &clock);
5673 } 5673 }
5674 } 5674 }
5675 5675
5676 /* XXX: It would be nice to validate the clocks, but we can't reuse 5676 /* XXX: It would be nice to validate the clocks, but we can't reuse
5677 * i830PllIsValid() because it relies on the xf86_config connector 5677 * i830PllIsValid() because it relies on the xf86_config connector
5678 * configuration being accurate, which it isn't necessarily. 5678 * configuration being accurate, which it isn't necessarily.
5679 */ 5679 */
5680 5680
5681 return clock.dot; 5681 return clock.dot;
5682 } 5682 }
5683 5683
5684 /** Returns the currently programmed mode of the given pipe. */ 5684 /** Returns the currently programmed mode of the given pipe. */
5685 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, 5685 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
5686 struct drm_crtc *crtc) 5686 struct drm_crtc *crtc)
5687 { 5687 {
5688 struct drm_i915_private *dev_priv = dev->dev_private; 5688 struct drm_i915_private *dev_priv = dev->dev_private;
5689 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5689 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5690 int pipe = intel_crtc->pipe; 5690 int pipe = intel_crtc->pipe;
5691 struct drm_display_mode *mode; 5691 struct drm_display_mode *mode;
5692 int htot = I915_READ(HTOTAL(pipe)); 5692 int htot = I915_READ(HTOTAL(pipe));
5693 int hsync = I915_READ(HSYNC(pipe)); 5693 int hsync = I915_READ(HSYNC(pipe));
5694 int vtot = I915_READ(VTOTAL(pipe)); 5694 int vtot = I915_READ(VTOTAL(pipe));
5695 int vsync = I915_READ(VSYNC(pipe)); 5695 int vsync = I915_READ(VSYNC(pipe));
5696 5696
5697 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 5697 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
5698 if (!mode) 5698 if (!mode)
5699 return NULL; 5699 return NULL;
5700 5700
5701 mode->clock = intel_crtc_clock_get(dev, crtc); 5701 mode->clock = intel_crtc_clock_get(dev, crtc);
5702 mode->hdisplay = (htot & 0xffff) + 1; 5702 mode->hdisplay = (htot & 0xffff) + 1;
5703 mode->htotal = ((htot & 0xffff0000) >> 16) + 1; 5703 mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
5704 mode->hsync_start = (hsync & 0xffff) + 1; 5704 mode->hsync_start = (hsync & 0xffff) + 1;
5705 mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1; 5705 mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
5706 mode->vdisplay = (vtot & 0xffff) + 1; 5706 mode->vdisplay = (vtot & 0xffff) + 1;
5707 mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1; 5707 mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
5708 mode->vsync_start = (vsync & 0xffff) + 1; 5708 mode->vsync_start = (vsync & 0xffff) + 1;
5709 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1; 5709 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
5710 5710
5711 drm_mode_set_name(mode); 5711 drm_mode_set_name(mode);
5712 drm_mode_set_crtcinfo(mode, 0); 5712 drm_mode_set_crtcinfo(mode, 0);
5713 5713
5714 return mode; 5714 return mode;
5715 } 5715 }
5716 5716
5717 #define GPU_IDLE_TIMEOUT 500 /* ms */ 5717 #define GPU_IDLE_TIMEOUT 500 /* ms */
5718 5718
5719 /* When this timer fires, we've been idle for awhile */ 5719 /* When this timer fires, we've been idle for awhile */
5720 static void intel_gpu_idle_timer(unsigned long arg) 5720 static void intel_gpu_idle_timer(unsigned long arg)
5721 { 5721 {
5722 struct drm_device *dev = (struct drm_device *)arg; 5722 struct drm_device *dev = (struct drm_device *)arg;
5723 drm_i915_private_t *dev_priv = dev->dev_private; 5723 drm_i915_private_t *dev_priv = dev->dev_private;
5724 5724
5725 if (!list_empty(&dev_priv->mm.active_list)) { 5725 if (!list_empty(&dev_priv->mm.active_list)) {
5726 /* Still processing requests, so just re-arm the timer. */ 5726 /* Still processing requests, so just re-arm the timer. */
5727 mod_timer(&dev_priv->idle_timer, jiffies + 5727 mod_timer(&dev_priv->idle_timer, jiffies +
5728 msecs_to_jiffies(GPU_IDLE_TIMEOUT)); 5728 msecs_to_jiffies(GPU_IDLE_TIMEOUT));
5729 return; 5729 return;
5730 } 5730 }
5731 5731
5732 dev_priv->busy = false; 5732 dev_priv->busy = false;
5733 queue_work(dev_priv->wq, &dev_priv->idle_work); 5733 queue_work(dev_priv->wq, &dev_priv->idle_work);
5734 } 5734 }
5735 5735
5736 #define CRTC_IDLE_TIMEOUT 1000 /* ms */ 5736 #define CRTC_IDLE_TIMEOUT 1000 /* ms */
5737 5737
5738 static void intel_crtc_idle_timer(unsigned long arg) 5738 static void intel_crtc_idle_timer(unsigned long arg)
5739 { 5739 {
5740 struct intel_crtc *intel_crtc = (struct intel_crtc *)arg; 5740 struct intel_crtc *intel_crtc = (struct intel_crtc *)arg;
5741 struct drm_crtc *crtc = &intel_crtc->base; 5741 struct drm_crtc *crtc = &intel_crtc->base;
5742 drm_i915_private_t *dev_priv = crtc->dev->dev_private; 5742 drm_i915_private_t *dev_priv = crtc->dev->dev_private;
5743 struct intel_framebuffer *intel_fb; 5743 struct intel_framebuffer *intel_fb;
5744 5744
5745 intel_fb = to_intel_framebuffer(crtc->fb); 5745 intel_fb = to_intel_framebuffer(crtc->fb);
5746 if (intel_fb && intel_fb->obj->active) { 5746 if (intel_fb && intel_fb->obj->active) {
5747 /* The framebuffer is still being accessed by the GPU. */ 5747 /* The framebuffer is still being accessed by the GPU. */
5748 mod_timer(&intel_crtc->idle_timer, jiffies + 5748 mod_timer(&intel_crtc->idle_timer, jiffies +
5749 msecs_to_jiffies(CRTC_IDLE_TIMEOUT)); 5749 msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
5750 return; 5750 return;
5751 } 5751 }
5752 5752
5753 intel_crtc->busy = false; 5753 intel_crtc->busy = false;
5754 queue_work(dev_priv->wq, &dev_priv->idle_work); 5754 queue_work(dev_priv->wq, &dev_priv->idle_work);
5755 } 5755 }
5756 5756
5757 static void intel_increase_pllclock(struct drm_crtc *crtc) 5757 static void intel_increase_pllclock(struct drm_crtc *crtc)
5758 { 5758 {
5759 struct drm_device *dev = crtc->dev; 5759 struct drm_device *dev = crtc->dev;
5760 drm_i915_private_t *dev_priv = dev->dev_private; 5760 drm_i915_private_t *dev_priv = dev->dev_private;
5761 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5761 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5762 int pipe = intel_crtc->pipe; 5762 int pipe = intel_crtc->pipe;
5763 int dpll_reg = DPLL(pipe); 5763 int dpll_reg = DPLL(pipe);
5764 int dpll; 5764 int dpll;
5765 5765
5766 if (HAS_PCH_SPLIT(dev)) 5766 if (HAS_PCH_SPLIT(dev))
5767 return; 5767 return;
5768 5768
5769 if (!dev_priv->lvds_downclock_avail) 5769 if (!dev_priv->lvds_downclock_avail)
5770 return; 5770 return;
5771 5771
5772 dpll = I915_READ(dpll_reg); 5772 dpll = I915_READ(dpll_reg);
5773 if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { 5773 if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
5774 DRM_DEBUG_DRIVER("upclocking LVDS\n"); 5774 DRM_DEBUG_DRIVER("upclocking LVDS\n");
5775 5775
5776 /* Unlock panel regs */ 5776 /* Unlock panel regs */
5777 I915_WRITE(PP_CONTROL, 5777 I915_WRITE(PP_CONTROL,
5778 I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); 5778 I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
5779 5779
5780 dpll &= ~DISPLAY_RATE_SELECT_FPA1; 5780 dpll &= ~DISPLAY_RATE_SELECT_FPA1;
5781 I915_WRITE(dpll_reg, dpll); 5781 I915_WRITE(dpll_reg, dpll);
5782 intel_wait_for_vblank(dev, pipe); 5782 intel_wait_for_vblank(dev, pipe);
5783 5783
5784 dpll = I915_READ(dpll_reg); 5784 dpll = I915_READ(dpll_reg);
5785 if (dpll & DISPLAY_RATE_SELECT_FPA1) 5785 if (dpll & DISPLAY_RATE_SELECT_FPA1)
5786 DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); 5786 DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
5787 5787
5788 /* ...and lock them again */ 5788 /* ...and lock them again */
5789 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3); 5789 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
5790 } 5790 }
5791 5791
5792 /* Schedule downclock */ 5792 /* Schedule downclock */
5793 mod_timer(&intel_crtc->idle_timer, jiffies + 5793 mod_timer(&intel_crtc->idle_timer, jiffies +
5794 msecs_to_jiffies(CRTC_IDLE_TIMEOUT)); 5794 msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
5795 } 5795 }
5796 5796
5797 static void intel_decrease_pllclock(struct drm_crtc *crtc) 5797 static void intel_decrease_pllclock(struct drm_crtc *crtc)
5798 { 5798 {
5799 struct drm_device *dev = crtc->dev; 5799 struct drm_device *dev = crtc->dev;
5800 drm_i915_private_t *dev_priv = dev->dev_private; 5800 drm_i915_private_t *dev_priv = dev->dev_private;
5801 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5801 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5802 int pipe = intel_crtc->pipe; 5802 int pipe = intel_crtc->pipe;
5803 int dpll_reg = DPLL(pipe); 5803 int dpll_reg = DPLL(pipe);
5804 int dpll = I915_READ(dpll_reg); 5804 int dpll = I915_READ(dpll_reg);
5805 5805
5806 if (HAS_PCH_SPLIT(dev)) 5806 if (HAS_PCH_SPLIT(dev))
5807 return; 5807 return;
5808 5808
5809 if (!dev_priv->lvds_downclock_avail) 5809 if (!dev_priv->lvds_downclock_avail)
5810 return; 5810 return;
5811 5811
5812 /* 5812 /*
5813 * Since this is called by a timer, we should never get here in 5813 * Since this is called by a timer, we should never get here in
5814 * the manual case. 5814 * the manual case.
5815 */ 5815 */
5816 if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) { 5816 if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
5817 DRM_DEBUG_DRIVER("downclocking LVDS\n"); 5817 DRM_DEBUG_DRIVER("downclocking LVDS\n");
5818 5818
5819 /* Unlock panel regs */ 5819 /* Unlock panel regs */
5820 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | 5820 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) |
5821 PANEL_UNLOCK_REGS); 5821 PANEL_UNLOCK_REGS);
5822 5822
5823 dpll |= DISPLAY_RATE_SELECT_FPA1; 5823 dpll |= DISPLAY_RATE_SELECT_FPA1;
5824 I915_WRITE(dpll_reg, dpll); 5824 I915_WRITE(dpll_reg, dpll);
5825 intel_wait_for_vblank(dev, pipe); 5825 intel_wait_for_vblank(dev, pipe);
5826 dpll = I915_READ(dpll_reg); 5826 dpll = I915_READ(dpll_reg);
5827 if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) 5827 if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
5828 DRM_DEBUG_DRIVER("failed to downclock LVDS!\n"); 5828 DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
5829 5829
5830 /* ...and lock them again */ 5830 /* ...and lock them again */
5831 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3); 5831 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
5832 } 5832 }
5833 5833
5834 } 5834 }
5835 5835
5836 /** 5836 /**
5837 * intel_idle_update - adjust clocks for idleness 5837 * intel_idle_update - adjust clocks for idleness
5838 * @work: work struct 5838 * @work: work struct
5839 * 5839 *
5840 * Either the GPU or display (or both) went idle. Check the busy status 5840 * Either the GPU or display (or both) went idle. Check the busy status
5841 * here and adjust the CRTC and GPU clocks as necessary. 5841 * here and adjust the CRTC and GPU clocks as necessary.
5842 */ 5842 */
5843 static void intel_idle_update(struct work_struct *work) 5843 static void intel_idle_update(struct work_struct *work)
5844 { 5844 {
5845 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 5845 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
5846 idle_work); 5846 idle_work);
5847 struct drm_device *dev = dev_priv->dev; 5847 struct drm_device *dev = dev_priv->dev;
5848 struct drm_crtc *crtc; 5848 struct drm_crtc *crtc;
5849 struct intel_crtc *intel_crtc; 5849 struct intel_crtc *intel_crtc;
5850 5850
5851 if (!i915_powersave) 5851 if (!i915_powersave)
5852 return; 5852 return;
5853 5853
5854 mutex_lock(&dev->struct_mutex); 5854 mutex_lock(&dev->struct_mutex);
5855 5855
5856 i915_update_gfx_val(dev_priv); 5856 i915_update_gfx_val(dev_priv);
5857 5857
5858 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 5858 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
5859 /* Skip inactive CRTCs */ 5859 /* Skip inactive CRTCs */
5860 if (!crtc->fb) 5860 if (!crtc->fb)
5861 continue; 5861 continue;
5862 5862
5863 intel_crtc = to_intel_crtc(crtc); 5863 intel_crtc = to_intel_crtc(crtc);
5864 if (!intel_crtc->busy) 5864 if (!intel_crtc->busy)
5865 intel_decrease_pllclock(crtc); 5865 intel_decrease_pllclock(crtc);
5866 } 5866 }
5867 5867
5868 5868
5869 mutex_unlock(&dev->struct_mutex); 5869 mutex_unlock(&dev->struct_mutex);
5870 } 5870 }
5871 5871
5872 /** 5872 /**
5873 * intel_mark_busy - mark the GPU and possibly the display busy 5873 * intel_mark_busy - mark the GPU and possibly the display busy
5874 * @dev: drm device 5874 * @dev: drm device
5875 * @obj: object we're operating on 5875 * @obj: object we're operating on
5876 * 5876 *
5877 * Callers can use this function to indicate that the GPU is busy processing 5877 * Callers can use this function to indicate that the GPU is busy processing
5878 * commands. If @obj matches one of the CRTC objects (i.e. it's a scanout 5878 * commands. If @obj matches one of the CRTC objects (i.e. it's a scanout
5879 * buffer), we'll also mark the display as busy, so we know to increase its 5879 * buffer), we'll also mark the display as busy, so we know to increase its
5880 * clock frequency. 5880 * clock frequency.
5881 */ 5881 */
5882 void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj) 5882 void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
5883 { 5883 {
5884 drm_i915_private_t *dev_priv = dev->dev_private; 5884 drm_i915_private_t *dev_priv = dev->dev_private;
5885 struct drm_crtc *crtc = NULL; 5885 struct drm_crtc *crtc = NULL;
5886 struct intel_framebuffer *intel_fb; 5886 struct intel_framebuffer *intel_fb;
5887 struct intel_crtc *intel_crtc; 5887 struct intel_crtc *intel_crtc;
5888 5888
5889 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 5889 if (!drm_core_check_feature(dev, DRIVER_MODESET))
5890 return; 5890 return;
5891 5891
5892 if (!dev_priv->busy) 5892 if (!dev_priv->busy)
5893 dev_priv->busy = true; 5893 dev_priv->busy = true;
5894 else 5894 else
5895 mod_timer(&dev_priv->idle_timer, jiffies + 5895 mod_timer(&dev_priv->idle_timer, jiffies +
5896 msecs_to_jiffies(GPU_IDLE_TIMEOUT)); 5896 msecs_to_jiffies(GPU_IDLE_TIMEOUT));
5897 5897
5898 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 5898 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
5899 if (!crtc->fb) 5899 if (!crtc->fb)
5900 continue; 5900 continue;
5901 5901
5902 intel_crtc = to_intel_crtc(crtc); 5902 intel_crtc = to_intel_crtc(crtc);
5903 intel_fb = to_intel_framebuffer(crtc->fb); 5903 intel_fb = to_intel_framebuffer(crtc->fb);
5904 if (intel_fb->obj == obj) { 5904 if (intel_fb->obj == obj) {
5905 if (!intel_crtc->busy) { 5905 if (!intel_crtc->busy) {
5906 /* Non-busy -> busy, upclock */ 5906 /* Non-busy -> busy, upclock */
5907 intel_increase_pllclock(crtc); 5907 intel_increase_pllclock(crtc);
5908 intel_crtc->busy = true; 5908 intel_crtc->busy = true;
5909 } else { 5909 } else {
5910 /* Busy -> busy, put off timer */ 5910 /* Busy -> busy, put off timer */
5911 mod_timer(&intel_crtc->idle_timer, jiffies + 5911 mod_timer(&intel_crtc->idle_timer, jiffies +
5912 msecs_to_jiffies(CRTC_IDLE_TIMEOUT)); 5912 msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
5913 } 5913 }
5914 } 5914 }
5915 } 5915 }
5916 } 5916 }
5917 5917
5918 static void intel_crtc_destroy(struct drm_crtc *crtc) 5918 static void intel_crtc_destroy(struct drm_crtc *crtc)
5919 { 5919 {
5920 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5920 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5921 struct drm_device *dev = crtc->dev; 5921 struct drm_device *dev = crtc->dev;
5922 struct intel_unpin_work *work; 5922 struct intel_unpin_work *work;
5923 unsigned long flags; 5923 unsigned long flags;
5924 5924
5925 spin_lock_irqsave(&dev->event_lock, flags); 5925 spin_lock_irqsave(&dev->event_lock, flags);
5926 work = intel_crtc->unpin_work; 5926 work = intel_crtc->unpin_work;
5927 intel_crtc->unpin_work = NULL; 5927 intel_crtc->unpin_work = NULL;
5928 spin_unlock_irqrestore(&dev->event_lock, flags); 5928 spin_unlock_irqrestore(&dev->event_lock, flags);
5929 5929
5930 if (work) { 5930 if (work) {
5931 cancel_work_sync(&work->work); 5931 cancel_work_sync(&work->work);
5932 kfree(work); 5932 kfree(work);
5933 } 5933 }
5934 5934
5935 drm_crtc_cleanup(crtc); 5935 drm_crtc_cleanup(crtc);
5936 5936
5937 kfree(intel_crtc); 5937 kfree(intel_crtc);
5938 } 5938 }
5939 5939
5940 static void intel_unpin_work_fn(struct work_struct *__work) 5940 static void intel_unpin_work_fn(struct work_struct *__work)
5941 { 5941 {
5942 struct intel_unpin_work *work = 5942 struct intel_unpin_work *work =
5943 container_of(__work, struct intel_unpin_work, work); 5943 container_of(__work, struct intel_unpin_work, work);
5944 5944
5945 mutex_lock(&work->dev->struct_mutex); 5945 mutex_lock(&work->dev->struct_mutex);
5946 i915_gem_object_unpin(work->old_fb_obj); 5946 i915_gem_object_unpin(work->old_fb_obj);
5947 drm_gem_object_unreference(&work->pending_flip_obj->base); 5947 drm_gem_object_unreference(&work->pending_flip_obj->base);
5948 drm_gem_object_unreference(&work->old_fb_obj->base); 5948 drm_gem_object_unreference(&work->old_fb_obj->base);
5949 5949
5950 mutex_unlock(&work->dev->struct_mutex); 5950 mutex_unlock(&work->dev->struct_mutex);
5951 kfree(work); 5951 kfree(work);
5952 } 5952 }
5953 5953
5954 static void do_intel_finish_page_flip(struct drm_device *dev, 5954 static void do_intel_finish_page_flip(struct drm_device *dev,
5955 struct drm_crtc *crtc) 5955 struct drm_crtc *crtc)
5956 { 5956 {
5957 drm_i915_private_t *dev_priv = dev->dev_private; 5957 drm_i915_private_t *dev_priv = dev->dev_private;
5958 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5958 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5959 struct intel_unpin_work *work; 5959 struct intel_unpin_work *work;
5960 struct drm_i915_gem_object *obj; 5960 struct drm_i915_gem_object *obj;
5961 struct drm_pending_vblank_event *e; 5961 struct drm_pending_vblank_event *e;
5962 struct timeval tnow, tvbl; 5962 struct timeval tnow, tvbl;
5963 unsigned long flags; 5963 unsigned long flags;
5964 5964
5965 /* Ignore early vblank irqs */ 5965 /* Ignore early vblank irqs */
5966 if (intel_crtc == NULL) 5966 if (intel_crtc == NULL)
5967 return; 5967 return;
5968 5968
5969 do_gettimeofday(&tnow); 5969 do_gettimeofday(&tnow);
5970 5970
5971 spin_lock_irqsave(&dev->event_lock, flags); 5971 spin_lock_irqsave(&dev->event_lock, flags);
5972 work = intel_crtc->unpin_work; 5972 work = intel_crtc->unpin_work;
5973 if (work == NULL || !work->pending) { 5973 if (work == NULL || !work->pending) {
5974 spin_unlock_irqrestore(&dev->event_lock, flags); 5974 spin_unlock_irqrestore(&dev->event_lock, flags);
5975 return; 5975 return;
5976 } 5976 }
5977 5977
5978 intel_crtc->unpin_work = NULL; 5978 intel_crtc->unpin_work = NULL;
5979 5979
5980 if (work->event) { 5980 if (work->event) {
5981 e = work->event; 5981 e = work->event;
5982 e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl); 5982 e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
5983 5983
5984 /* Called before vblank count and timestamps have 5984 /* Called before vblank count and timestamps have
5985 * been updated for the vblank interval of flip 5985 * been updated for the vblank interval of flip
5986 * completion? Need to increment vblank count and 5986 * completion? Need to increment vblank count and
5987 * add one videorefresh duration to returned timestamp 5987 * add one videorefresh duration to returned timestamp
5988 * to account for this. We assume this happened if we 5988 * to account for this. We assume this happened if we
5989 * get called over 0.9 frame durations after the last 5989 * get called over 0.9 frame durations after the last
5990 * timestamped vblank. 5990 * timestamped vblank.
5991 * 5991 *
5992 * This calculation can not be used with vrefresh rates 5992 * This calculation can not be used with vrefresh rates
5993 * below 5Hz (10Hz to be on the safe side) without 5993 * below 5Hz (10Hz to be on the safe side) without
5994 * promoting to 64 integers. 5994 * promoting to 64 integers.
5995 */ 5995 */
5996 if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) > 5996 if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) >
5997 9 * crtc->framedur_ns) { 5997 9 * crtc->framedur_ns) {
5998 e->event.sequence++; 5998 e->event.sequence++;
5999 tvbl = ns_to_timeval(timeval_to_ns(&tvbl) + 5999 tvbl = ns_to_timeval(timeval_to_ns(&tvbl) +
6000 crtc->framedur_ns); 6000 crtc->framedur_ns);
6001 } 6001 }
6002 6002
6003 e->event.tv_sec = tvbl.tv_sec; 6003 e->event.tv_sec = tvbl.tv_sec;
6004 e->event.tv_usec = tvbl.tv_usec; 6004 e->event.tv_usec = tvbl.tv_usec;
6005 6005
6006 list_add_tail(&e->base.link, 6006 list_add_tail(&e->base.link,
6007 &e->base.file_priv->event_list); 6007 &e->base.file_priv->event_list);
6008 wake_up_interruptible(&e->base.file_priv->event_wait); 6008 wake_up_interruptible(&e->base.file_priv->event_wait);
6009 } 6009 }
6010 6010
6011 drm_vblank_put(dev, intel_crtc->pipe); 6011 drm_vblank_put(dev, intel_crtc->pipe);
6012 6012
6013 spin_unlock_irqrestore(&dev->event_lock, flags); 6013 spin_unlock_irqrestore(&dev->event_lock, flags);
6014 6014
6015 obj = work->old_fb_obj; 6015 obj = work->old_fb_obj;
6016 6016
6017 atomic_clear_mask(1 << intel_crtc->plane, 6017 atomic_clear_mask(1 << intel_crtc->plane,
6018 &obj->pending_flip.counter); 6018 &obj->pending_flip.counter);
6019 if (atomic_read(&obj->pending_flip) == 0) 6019 if (atomic_read(&obj->pending_flip) == 0)
6020 wake_up(&dev_priv->pending_flip_queue); 6020 wake_up(&dev_priv->pending_flip_queue);
6021 6021
6022 schedule_work(&work->work); 6022 schedule_work(&work->work);
6023 6023
6024 trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); 6024 trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
6025 } 6025 }
6026 6026
6027 void intel_finish_page_flip(struct drm_device *dev, int pipe) 6027 void intel_finish_page_flip(struct drm_device *dev, int pipe)
6028 { 6028 {
6029 drm_i915_private_t *dev_priv = dev->dev_private; 6029 drm_i915_private_t *dev_priv = dev->dev_private;
6030 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 6030 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
6031 6031
6032 do_intel_finish_page_flip(dev, crtc); 6032 do_intel_finish_page_flip(dev, crtc);
6033 } 6033 }
6034 6034
6035 void intel_finish_page_flip_plane(struct drm_device *dev, int plane) 6035 void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
6036 { 6036 {
6037 drm_i915_private_t *dev_priv = dev->dev_private; 6037 drm_i915_private_t *dev_priv = dev->dev_private;
6038 struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane]; 6038 struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
6039 6039
6040 do_intel_finish_page_flip(dev, crtc); 6040 do_intel_finish_page_flip(dev, crtc);
6041 } 6041 }
6042 6042
6043 void intel_prepare_page_flip(struct drm_device *dev, int plane) 6043 void intel_prepare_page_flip(struct drm_device *dev, int plane)
6044 { 6044 {
6045 drm_i915_private_t *dev_priv = dev->dev_private; 6045 drm_i915_private_t *dev_priv = dev->dev_private;
6046 struct intel_crtc *intel_crtc = 6046 struct intel_crtc *intel_crtc =
6047 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); 6047 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
6048 unsigned long flags; 6048 unsigned long flags;
6049 6049
6050 spin_lock_irqsave(&dev->event_lock, flags); 6050 spin_lock_irqsave(&dev->event_lock, flags);
6051 if (intel_crtc->unpin_work) { 6051 if (intel_crtc->unpin_work) {
6052 if ((++intel_crtc->unpin_work->pending) > 1) 6052 if ((++intel_crtc->unpin_work->pending) > 1)
6053 DRM_ERROR("Prepared flip multiple times\n"); 6053 DRM_ERROR("Prepared flip multiple times\n");
6054 } else { 6054 } else {
6055 DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n"); 6055 DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
6056 } 6056 }
6057 spin_unlock_irqrestore(&dev->event_lock, flags); 6057 spin_unlock_irqrestore(&dev->event_lock, flags);
6058 } 6058 }
6059 6059
6060 static int intel_crtc_page_flip(struct drm_crtc *crtc, 6060 static int intel_crtc_page_flip(struct drm_crtc *crtc,
6061 struct drm_framebuffer *fb, 6061 struct drm_framebuffer *fb,
6062 struct drm_pending_vblank_event *event) 6062 struct drm_pending_vblank_event *event)
6063 { 6063 {
6064 struct drm_device *dev = crtc->dev; 6064 struct drm_device *dev = crtc->dev;
6065 struct drm_i915_private *dev_priv = dev->dev_private; 6065 struct drm_i915_private *dev_priv = dev->dev_private;
6066 struct intel_framebuffer *intel_fb; 6066 struct intel_framebuffer *intel_fb;
6067 struct drm_i915_gem_object *obj; 6067 struct drm_i915_gem_object *obj;
6068 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6068 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6069 struct intel_unpin_work *work; 6069 struct intel_unpin_work *work;
6070 unsigned long flags, offset; 6070 unsigned long flags, offset;
6071 int pipe = intel_crtc->pipe; 6071 int pipe = intel_crtc->pipe;
6072 u32 pf, pipesrc; 6072 u32 pf, pipesrc;
6073 int ret; 6073 int ret;
6074 6074
6075 work = kzalloc(sizeof *work, GFP_KERNEL); 6075 work = kzalloc(sizeof *work, GFP_KERNEL);
6076 if (work == NULL) 6076 if (work == NULL)
6077 return -ENOMEM; 6077 return -ENOMEM;
6078 6078
6079 work->event = event; 6079 work->event = event;
6080 work->dev = crtc->dev; 6080 work->dev = crtc->dev;
6081 intel_fb = to_intel_framebuffer(crtc->fb); 6081 intel_fb = to_intel_framebuffer(crtc->fb);
6082 work->old_fb_obj = intel_fb->obj; 6082 work->old_fb_obj = intel_fb->obj;
6083 INIT_WORK(&work->work, intel_unpin_work_fn); 6083 INIT_WORK(&work->work, intel_unpin_work_fn);
6084 6084
6085 /* We borrow the event spin lock for protecting unpin_work */ 6085 /* We borrow the event spin lock for protecting unpin_work */
6086 spin_lock_irqsave(&dev->event_lock, flags); 6086 spin_lock_irqsave(&dev->event_lock, flags);
6087 if (intel_crtc->unpin_work) { 6087 if (intel_crtc->unpin_work) {
6088 spin_unlock_irqrestore(&dev->event_lock, flags); 6088 spin_unlock_irqrestore(&dev->event_lock, flags);
6089 kfree(work); 6089 kfree(work);
6090 6090
6091 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); 6091 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
6092 return -EBUSY; 6092 return -EBUSY;
6093 } 6093 }
6094 intel_crtc->unpin_work = work; 6094 intel_crtc->unpin_work = work;
6095 spin_unlock_irqrestore(&dev->event_lock, flags); 6095 spin_unlock_irqrestore(&dev->event_lock, flags);
6096 6096
6097 intel_fb = to_intel_framebuffer(fb); 6097 intel_fb = to_intel_framebuffer(fb);
6098 obj = intel_fb->obj; 6098 obj = intel_fb->obj;
6099 6099
6100 mutex_lock(&dev->struct_mutex); 6100 mutex_lock(&dev->struct_mutex);
6101 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); 6101 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
6102 if (ret) 6102 if (ret)
6103 goto cleanup_work; 6103 goto cleanup_work;
6104 6104
6105 /* Reference the objects for the scheduled work. */ 6105 /* Reference the objects for the scheduled work. */
6106 drm_gem_object_reference(&work->old_fb_obj->base); 6106 drm_gem_object_reference(&work->old_fb_obj->base);
6107 drm_gem_object_reference(&obj->base); 6107 drm_gem_object_reference(&obj->base);
6108 6108
6109 crtc->fb = fb; 6109 crtc->fb = fb;
6110 6110
6111 ret = drm_vblank_get(dev, intel_crtc->pipe); 6111 ret = drm_vblank_get(dev, intel_crtc->pipe);
6112 if (ret) 6112 if (ret)
6113 goto cleanup_objs; 6113 goto cleanup_objs;
6114 6114
6115 if (IS_GEN3(dev) || IS_GEN2(dev)) { 6115 if (IS_GEN3(dev) || IS_GEN2(dev)) {
6116 u32 flip_mask; 6116 u32 flip_mask;
6117 6117
6118 /* Can't queue multiple flips, so wait for the previous 6118 /* Can't queue multiple flips, so wait for the previous
6119 * one to finish before executing the next. 6119 * one to finish before executing the next.
6120 */ 6120 */
6121 ret = BEGIN_LP_RING(2); 6121 ret = BEGIN_LP_RING(2);
6122 if (ret) 6122 if (ret)
6123 goto cleanup_objs; 6123 goto cleanup_objs;
6124 6124
6125 if (intel_crtc->plane) 6125 if (intel_crtc->plane)
6126 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 6126 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
6127 else 6127 else
6128 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 6128 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
6129 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); 6129 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
6130 OUT_RING(MI_NOOP); 6130 OUT_RING(MI_NOOP);
6131 ADVANCE_LP_RING(); 6131 ADVANCE_LP_RING();
6132 } 6132 }
6133 6133
6134 work->pending_flip_obj = obj; 6134 work->pending_flip_obj = obj;
6135 6135
6136 work->enable_stall_check = true; 6136 work->enable_stall_check = true;
6137 6137
6138 /* Offset into the new buffer for cases of shared fbs between CRTCs */ 6138 /* Offset into the new buffer for cases of shared fbs between CRTCs */
6139 offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8; 6139 offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
6140 6140
6141 ret = BEGIN_LP_RING(4); 6141 ret = BEGIN_LP_RING(4);
6142 if (ret) 6142 if (ret)
6143 goto cleanup_objs; 6143 goto cleanup_objs;
6144 6144
6145 /* Block clients from rendering to the new back buffer until 6145 /* Block clients from rendering to the new back buffer until
6146 * the flip occurs and the object is no longer visible. 6146 * the flip occurs and the object is no longer visible.
6147 */ 6147 */
6148 atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); 6148 atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
6149 6149
6150 switch (INTEL_INFO(dev)->gen) { 6150 switch (INTEL_INFO(dev)->gen) {
6151 case 2: 6151 case 2:
6152 OUT_RING(MI_DISPLAY_FLIP | 6152 OUT_RING(MI_DISPLAY_FLIP |
6153 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 6153 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6154 OUT_RING(fb->pitch); 6154 OUT_RING(fb->pitch);
6155 OUT_RING(obj->gtt_offset + offset); 6155 OUT_RING(obj->gtt_offset + offset);
6156 OUT_RING(MI_NOOP); 6156 OUT_RING(MI_NOOP);
6157 break; 6157 break;
6158 6158
6159 case 3: 6159 case 3:
6160 OUT_RING(MI_DISPLAY_FLIP_I915 | 6160 OUT_RING(MI_DISPLAY_FLIP_I915 |
6161 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 6161 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6162 OUT_RING(fb->pitch); 6162 OUT_RING(fb->pitch);
6163 OUT_RING(obj->gtt_offset + offset); 6163 OUT_RING(obj->gtt_offset + offset);
6164 OUT_RING(MI_NOOP); 6164 OUT_RING(MI_NOOP);
6165 break; 6165 break;
6166 6166
6167 case 4: 6167 case 4:
6168 case 5: 6168 case 5:
6169 /* i965+ uses the linear or tiled offsets from the 6169 /* i965+ uses the linear or tiled offsets from the
6170 * Display Registers (which do not change across a page-flip) 6170 * Display Registers (which do not change across a page-flip)
6171 * so we need only reprogram the base address. 6171 * so we need only reprogram the base address.
6172 */ 6172 */
6173 OUT_RING(MI_DISPLAY_FLIP | 6173 OUT_RING(MI_DISPLAY_FLIP |
6174 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 6174 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6175 OUT_RING(fb->pitch); 6175 OUT_RING(fb->pitch);
6176 OUT_RING(obj->gtt_offset | obj->tiling_mode); 6176 OUT_RING(obj->gtt_offset | obj->tiling_mode);
6177 6177
6178 /* XXX Enabling the panel-fitter across page-flip is so far 6178 /* XXX Enabling the panel-fitter across page-flip is so far
6179 * untested on non-native modes, so ignore it for now. 6179 * untested on non-native modes, so ignore it for now.
6180 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; 6180 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
6181 */ 6181 */
6182 pf = 0; 6182 pf = 0;
6183 pipesrc = I915_READ(PIPESRC(pipe)) & 0x0fff0fff; 6183 pipesrc = I915_READ(PIPESRC(pipe)) & 0x0fff0fff;
6184 OUT_RING(pf | pipesrc); 6184 OUT_RING(pf | pipesrc);
6185 break; 6185 break;
6186 6186
6187 case 6: 6187 case 6:
6188 OUT_RING(MI_DISPLAY_FLIP | 6188 OUT_RING(MI_DISPLAY_FLIP |
6189 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 6189 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6190 OUT_RING(fb->pitch | obj->tiling_mode); 6190 OUT_RING(fb->pitch | obj->tiling_mode);
6191 OUT_RING(obj->gtt_offset); 6191 OUT_RING(obj->gtt_offset);
6192 6192
6193 pf = I915_READ(PF_CTL(pipe)) & PF_ENABLE; 6193 pf = I915_READ(PF_CTL(pipe)) & PF_ENABLE;
6194 pipesrc = I915_READ(PIPESRC(pipe)) & 0x0fff0fff; 6194 pipesrc = I915_READ(PIPESRC(pipe)) & 0x0fff0fff;
6195 OUT_RING(pf | pipesrc); 6195 OUT_RING(pf | pipesrc);
6196 break; 6196 break;
6197 } 6197 }
6198 ADVANCE_LP_RING(); 6198 ADVANCE_LP_RING();
6199 6199
6200 mutex_unlock(&dev->struct_mutex); 6200 mutex_unlock(&dev->struct_mutex);
6201 6201
6202 trace_i915_flip_request(intel_crtc->plane, obj); 6202 trace_i915_flip_request(intel_crtc->plane, obj);
6203 6203
6204 return 0; 6204 return 0;
6205 6205
6206 cleanup_objs: 6206 cleanup_objs:
6207 drm_gem_object_unreference(&work->old_fb_obj->base); 6207 drm_gem_object_unreference(&work->old_fb_obj->base);
6208 drm_gem_object_unreference(&obj->base); 6208 drm_gem_object_unreference(&obj->base);
6209 cleanup_work: 6209 cleanup_work:
6210 mutex_unlock(&dev->struct_mutex); 6210 mutex_unlock(&dev->struct_mutex);
6211 6211
6212 spin_lock_irqsave(&dev->event_lock, flags); 6212 spin_lock_irqsave(&dev->event_lock, flags);
6213 intel_crtc->unpin_work = NULL; 6213 intel_crtc->unpin_work = NULL;
6214 spin_unlock_irqrestore(&dev->event_lock, flags); 6214 spin_unlock_irqrestore(&dev->event_lock, flags);
6215 6215
6216 kfree(work); 6216 kfree(work);
6217 6217
6218 return ret; 6218 return ret;
6219 } 6219 }
6220 6220
6221 static void intel_sanitize_modesetting(struct drm_device *dev, 6221 static void intel_sanitize_modesetting(struct drm_device *dev,
6222 int pipe, int plane) 6222 int pipe, int plane)
6223 { 6223 {
6224 struct drm_i915_private *dev_priv = dev->dev_private; 6224 struct drm_i915_private *dev_priv = dev->dev_private;
6225 u32 reg, val; 6225 u32 reg, val;
6226 6226
6227 if (HAS_PCH_SPLIT(dev)) 6227 if (HAS_PCH_SPLIT(dev))
6228 return; 6228 return;
6229 6229
6230 /* Who knows what state these registers were left in by the BIOS or 6230 /* Who knows what state these registers were left in by the BIOS or
6231 * grub? 6231 * grub?
6232 * 6232 *
6233 * If we leave the registers in a conflicting state (e.g. with the 6233 * If we leave the registers in a conflicting state (e.g. with the
6234 * display plane reading from the other pipe than the one we intend 6234 * display plane reading from the other pipe than the one we intend
6235 * to use) then when we attempt to teardown the active mode, we will 6235 * to use) then when we attempt to teardown the active mode, we will
6236 * not disable the pipes and planes in the correct order -- leaving 6236 * not disable the pipes and planes in the correct order -- leaving
6237 * a plane reading from a disabled pipe and possibly leading to 6237 * a plane reading from a disabled pipe and possibly leading to
6238 * undefined behaviour. 6238 * undefined behaviour.
6239 */ 6239 */
6240 6240
6241 reg = DSPCNTR(plane); 6241 reg = DSPCNTR(plane);
6242 val = I915_READ(reg); 6242 val = I915_READ(reg);
6243 6243
6244 if ((val & DISPLAY_PLANE_ENABLE) == 0) 6244 if ((val & DISPLAY_PLANE_ENABLE) == 0)
6245 return; 6245 return;
6246 if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe) 6246 if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe)
6247 return; 6247 return;
6248 6248
6249 /* This display plane is active and attached to the other CPU pipe. */ 6249 /* This display plane is active and attached to the other CPU pipe. */
6250 pipe = !pipe; 6250 pipe = !pipe;
6251 6251
6252 /* Disable the plane and wait for it to stop reading from the pipe. */ 6252 /* Disable the plane and wait for it to stop reading from the pipe. */
6253 intel_disable_plane(dev_priv, plane, pipe); 6253 intel_disable_plane(dev_priv, plane, pipe);
6254 intel_disable_pipe(dev_priv, pipe); 6254 intel_disable_pipe(dev_priv, pipe);
6255 } 6255 }
6256 6256
6257 static void intel_crtc_reset(struct drm_crtc *crtc) 6257 static void intel_crtc_reset(struct drm_crtc *crtc)
6258 { 6258 {
6259 struct drm_device *dev = crtc->dev; 6259 struct drm_device *dev = crtc->dev;
6260 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6260 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6261 6261
6262 /* Reset flags back to the 'unknown' status so that they 6262 /* Reset flags back to the 'unknown' status so that they
6263 * will be correctly set on the initial modeset. 6263 * will be correctly set on the initial modeset.
6264 */ 6264 */
6265 intel_crtc->dpms_mode = -1; 6265 intel_crtc->dpms_mode = -1;
6266 6266
6267 /* We need to fix up any BIOS configuration that conflicts with 6267 /* We need to fix up any BIOS configuration that conflicts with
6268 * our expectations. 6268 * our expectations.
6269 */ 6269 */
6270 intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane); 6270 intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
6271 } 6271 }
6272 6272
6273 static struct drm_crtc_helper_funcs intel_helper_funcs = { 6273 static struct drm_crtc_helper_funcs intel_helper_funcs = {
6274 .dpms = intel_crtc_dpms, 6274 .dpms = intel_crtc_dpms,
6275 .mode_fixup = intel_crtc_mode_fixup, 6275 .mode_fixup = intel_crtc_mode_fixup,
6276 .mode_set = intel_crtc_mode_set, 6276 .mode_set = intel_crtc_mode_set,
6277 .mode_set_base = intel_pipe_set_base, 6277 .mode_set_base = intel_pipe_set_base,
6278 .mode_set_base_atomic = intel_pipe_set_base_atomic, 6278 .mode_set_base_atomic = intel_pipe_set_base_atomic,
6279 .load_lut = intel_crtc_load_lut, 6279 .load_lut = intel_crtc_load_lut,
6280 .disable = intel_crtc_disable, 6280 .disable = intel_crtc_disable,
6281 }; 6281 };
6282 6282
6283 static const struct drm_crtc_funcs intel_crtc_funcs = { 6283 static const struct drm_crtc_funcs intel_crtc_funcs = {
6284 .reset = intel_crtc_reset, 6284 .reset = intel_crtc_reset,
6285 .cursor_set = intel_crtc_cursor_set, 6285 .cursor_set = intel_crtc_cursor_set,
6286 .cursor_move = intel_crtc_cursor_move, 6286 .cursor_move = intel_crtc_cursor_move,
6287 .gamma_set = intel_crtc_gamma_set, 6287 .gamma_set = intel_crtc_gamma_set,
6288 .set_config = drm_crtc_helper_set_config, 6288 .set_config = drm_crtc_helper_set_config,
6289 .destroy = intel_crtc_destroy, 6289 .destroy = intel_crtc_destroy,
6290 .page_flip = intel_crtc_page_flip, 6290 .page_flip = intel_crtc_page_flip,
6291 }; 6291 };
6292 6292
6293 static void intel_crtc_init(struct drm_device *dev, int pipe) 6293 static void intel_crtc_init(struct drm_device *dev, int pipe)
6294 { 6294 {
6295 drm_i915_private_t *dev_priv = dev->dev_private; 6295 drm_i915_private_t *dev_priv = dev->dev_private;
6296 struct intel_crtc *intel_crtc; 6296 struct intel_crtc *intel_crtc;
6297 int i; 6297 int i;
6298 6298
6299 intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); 6299 intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
6300 if (intel_crtc == NULL) 6300 if (intel_crtc == NULL)
6301 return; 6301 return;
6302 6302
6303 drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs); 6303 drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
6304 6304
6305 drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); 6305 drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
6306 for (i = 0; i < 256; i++) { 6306 for (i = 0; i < 256; i++) {
6307 intel_crtc->lut_r[i] = i; 6307 intel_crtc->lut_r[i] = i;
6308 intel_crtc->lut_g[i] = i; 6308 intel_crtc->lut_g[i] = i;
6309 intel_crtc->lut_b[i] = i; 6309 intel_crtc->lut_b[i] = i;
6310 } 6310 }
6311 6311
6312 /* Swap pipes & planes for FBC on pre-965 */ 6312 /* Swap pipes & planes for FBC on pre-965 */
6313 intel_crtc->pipe = pipe; 6313 intel_crtc->pipe = pipe;
6314 intel_crtc->plane = pipe; 6314 intel_crtc->plane = pipe;
6315 if (IS_MOBILE(dev) && IS_GEN3(dev)) { 6315 if (IS_MOBILE(dev) && IS_GEN3(dev)) {
6316 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); 6316 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
6317 intel_crtc->plane = !pipe; 6317 intel_crtc->plane = !pipe;
6318 } 6318 }
6319 6319
6320 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || 6320 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
6321 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL); 6321 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
6322 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; 6322 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
6323 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; 6323 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
6324 6324
6325 intel_crtc_reset(&intel_crtc->base); 6325 intel_crtc_reset(&intel_crtc->base);
6326 intel_crtc->active = true; /* force the pipe off on setup_init_config */ 6326 intel_crtc->active = true; /* force the pipe off on setup_init_config */
6327 6327
6328 if (HAS_PCH_SPLIT(dev)) { 6328 if (HAS_PCH_SPLIT(dev)) {
6329 intel_helper_funcs.prepare = ironlake_crtc_prepare; 6329 intel_helper_funcs.prepare = ironlake_crtc_prepare;
6330 intel_helper_funcs.commit = ironlake_crtc_commit; 6330 intel_helper_funcs.commit = ironlake_crtc_commit;
6331 } else { 6331 } else {
6332 intel_helper_funcs.prepare = i9xx_crtc_prepare; 6332 intel_helper_funcs.prepare = i9xx_crtc_prepare;
6333 intel_helper_funcs.commit = i9xx_crtc_commit; 6333 intel_helper_funcs.commit = i9xx_crtc_commit;
6334 } 6334 }
6335 6335
6336 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 6336 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
6337 6337
6338 intel_crtc->busy = false; 6338 intel_crtc->busy = false;
6339 6339
6340 setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer, 6340 setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
6341 (unsigned long)intel_crtc); 6341 (unsigned long)intel_crtc);
6342 } 6342 }
6343 6343
6344 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 6344 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
6345 struct drm_file *file) 6345 struct drm_file *file)
6346 { 6346 {
6347 drm_i915_private_t *dev_priv = dev->dev_private; 6347 drm_i915_private_t *dev_priv = dev->dev_private;
6348 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 6348 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
6349 struct drm_mode_object *drmmode_obj; 6349 struct drm_mode_object *drmmode_obj;
6350 struct intel_crtc *crtc; 6350 struct intel_crtc *crtc;
6351 6351
6352 if (!dev_priv) { 6352 if (!dev_priv) {
6353 DRM_ERROR("called with no initialization\n"); 6353 DRM_ERROR("called with no initialization\n");
6354 return -EINVAL; 6354 return -EINVAL;
6355 } 6355 }
6356 6356
6357 drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id, 6357 drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
6358 DRM_MODE_OBJECT_CRTC); 6358 DRM_MODE_OBJECT_CRTC);
6359 6359
6360 if (!drmmode_obj) { 6360 if (!drmmode_obj) {
6361 DRM_ERROR("no such CRTC id\n"); 6361 DRM_ERROR("no such CRTC id\n");
6362 return -EINVAL; 6362 return -EINVAL;
6363 } 6363 }
6364 6364
6365 crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); 6365 crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
6366 pipe_from_crtc_id->pipe = crtc->pipe; 6366 pipe_from_crtc_id->pipe = crtc->pipe;
6367 6367
6368 return 0; 6368 return 0;
6369 } 6369 }
6370 6370
6371 static int intel_encoder_clones(struct drm_device *dev, int type_mask) 6371 static int intel_encoder_clones(struct drm_device *dev, int type_mask)
6372 { 6372 {
6373 struct intel_encoder *encoder; 6373 struct intel_encoder *encoder;
6374 int index_mask = 0; 6374 int index_mask = 0;
6375 int entry = 0; 6375 int entry = 0;
6376 6376
6377 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { 6377 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
6378 if (type_mask & encoder->clone_mask) 6378 if (type_mask & encoder->clone_mask)
6379 index_mask |= (1 << entry); 6379 index_mask |= (1 << entry);
6380 entry++; 6380 entry++;
6381 } 6381 }
6382 6382
6383 return index_mask; 6383 return index_mask;
6384 } 6384 }
6385 6385
6386 static bool has_edp_a(struct drm_device *dev) 6386 static bool has_edp_a(struct drm_device *dev)
6387 { 6387 {
6388 struct drm_i915_private *dev_priv = dev->dev_private; 6388 struct drm_i915_private *dev_priv = dev->dev_private;
6389 6389
6390 if (!IS_MOBILE(dev)) 6390 if (!IS_MOBILE(dev))
6391 return false; 6391 return false;
6392 6392
6393 if ((I915_READ(DP_A) & DP_DETECTED) == 0) 6393 if ((I915_READ(DP_A) & DP_DETECTED) == 0)
6394 return false; 6394 return false;
6395 6395
6396 if (IS_GEN5(dev) && 6396 if (IS_GEN5(dev) &&
6397 (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE)) 6397 (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
6398 return false; 6398 return false;
6399 6399
6400 return true; 6400 return true;
6401 } 6401 }
6402 6402
6403 static void intel_setup_outputs(struct drm_device *dev) 6403 static void intel_setup_outputs(struct drm_device *dev)
6404 { 6404 {
6405 struct drm_i915_private *dev_priv = dev->dev_private; 6405 struct drm_i915_private *dev_priv = dev->dev_private;
6406 struct intel_encoder *encoder; 6406 struct intel_encoder *encoder;
6407 bool dpd_is_edp = false; 6407 bool dpd_is_edp = false;
6408 bool has_lvds = false; 6408 bool has_lvds = false;
6409 6409
6410 if (IS_MOBILE(dev) && !IS_I830(dev)) 6410 if (IS_MOBILE(dev) && !IS_I830(dev))
6411 has_lvds = intel_lvds_init(dev); 6411 has_lvds = intel_lvds_init(dev);
6412 if (!has_lvds && !HAS_PCH_SPLIT(dev)) { 6412 if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
6413 /* disable the panel fitter on everything but LVDS */ 6413 /* disable the panel fitter on everything but LVDS */
6414 I915_WRITE(PFIT_CONTROL, 0); 6414 I915_WRITE(PFIT_CONTROL, 0);
6415 } 6415 }
6416 6416
6417 if (HAS_PCH_SPLIT(dev)) { 6417 if (HAS_PCH_SPLIT(dev)) {
6418 dpd_is_edp = intel_dpd_is_edp(dev); 6418 dpd_is_edp = intel_dpd_is_edp(dev);
6419 6419
6420 if (has_edp_a(dev)) 6420 if (has_edp_a(dev))
6421 intel_dp_init(dev, DP_A); 6421 intel_dp_init(dev, DP_A);
6422 6422
6423 if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) 6423 if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
6424 intel_dp_init(dev, PCH_DP_D); 6424 intel_dp_init(dev, PCH_DP_D);
6425 } 6425 }
6426 6426
6427 intel_crt_init(dev); 6427 intel_crt_init(dev);
6428 6428
6429 if (HAS_PCH_SPLIT(dev)) { 6429 if (HAS_PCH_SPLIT(dev)) {
6430 int found; 6430 int found;
6431 6431
6432 if (I915_READ(HDMIB) & PORT_DETECTED) { 6432 if (I915_READ(HDMIB) & PORT_DETECTED) {
6433 /* PCH SDVOB multiplex with HDMIB */ 6433 /* PCH SDVOB multiplex with HDMIB */
6434 found = intel_sdvo_init(dev, PCH_SDVOB); 6434 found = intel_sdvo_init(dev, PCH_SDVOB);
6435 if (!found) 6435 if (!found)
6436 intel_hdmi_init(dev, HDMIB); 6436 intel_hdmi_init(dev, HDMIB);
6437 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) 6437 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
6438 intel_dp_init(dev, PCH_DP_B); 6438 intel_dp_init(dev, PCH_DP_B);
6439 } 6439 }
6440 6440
6441 if (I915_READ(HDMIC) & PORT_DETECTED) 6441 if (I915_READ(HDMIC) & PORT_DETECTED)
6442 intel_hdmi_init(dev, HDMIC); 6442 intel_hdmi_init(dev, HDMIC);
6443 6443
6444 if (I915_READ(HDMID) & PORT_DETECTED) 6444 if (I915_READ(HDMID) & PORT_DETECTED)
6445 intel_hdmi_init(dev, HDMID); 6445 intel_hdmi_init(dev, HDMID);
6446 6446
6447 if (I915_READ(PCH_DP_C) & DP_DETECTED) 6447 if (I915_READ(PCH_DP_C) & DP_DETECTED)
6448 intel_dp_init(dev, PCH_DP_C); 6448 intel_dp_init(dev, PCH_DP_C);
6449 6449
6450 if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) 6450 if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
6451 intel_dp_init(dev, PCH_DP_D); 6451 intel_dp_init(dev, PCH_DP_D);
6452 6452
6453 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { 6453 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
6454 bool found = false; 6454 bool found = false;
6455 6455
6456 if (I915_READ(SDVOB) & SDVO_DETECTED) { 6456 if (I915_READ(SDVOB) & SDVO_DETECTED) {
6457 DRM_DEBUG_KMS("probing SDVOB\n"); 6457 DRM_DEBUG_KMS("probing SDVOB\n");
6458 found = intel_sdvo_init(dev, SDVOB); 6458 found = intel_sdvo_init(dev, SDVOB);
6459 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { 6459 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
6460 DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); 6460 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
6461 intel_hdmi_init(dev, SDVOB); 6461 intel_hdmi_init(dev, SDVOB);
6462 } 6462 }
6463 6463
6464 if (!found && SUPPORTS_INTEGRATED_DP(dev)) { 6464 if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
6465 DRM_DEBUG_KMS("probing DP_B\n"); 6465 DRM_DEBUG_KMS("probing DP_B\n");
6466 intel_dp_init(dev, DP_B); 6466 intel_dp_init(dev, DP_B);
6467 } 6467 }
6468 } 6468 }
6469 6469
6470 /* Before G4X SDVOC doesn't have its own detect register */ 6470 /* Before G4X SDVOC doesn't have its own detect register */
6471 6471
6472 if (I915_READ(SDVOB) & SDVO_DETECTED) { 6472 if (I915_READ(SDVOB) & SDVO_DETECTED) {
6473 DRM_DEBUG_KMS("probing SDVOC\n"); 6473 DRM_DEBUG_KMS("probing SDVOC\n");
6474 found = intel_sdvo_init(dev, SDVOC); 6474 found = intel_sdvo_init(dev, SDVOC);
6475 } 6475 }
6476 6476
6477 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) { 6477 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
6478 6478
6479 if (SUPPORTS_INTEGRATED_HDMI(dev)) { 6479 if (SUPPORTS_INTEGRATED_HDMI(dev)) {
6480 DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); 6480 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
6481 intel_hdmi_init(dev, SDVOC); 6481 intel_hdmi_init(dev, SDVOC);
6482 } 6482 }
6483 if (SUPPORTS_INTEGRATED_DP(dev)) { 6483 if (SUPPORTS_INTEGRATED_DP(dev)) {
6484 DRM_DEBUG_KMS("probing DP_C\n"); 6484 DRM_DEBUG_KMS("probing DP_C\n");
6485 intel_dp_init(dev, DP_C); 6485 intel_dp_init(dev, DP_C);
6486 } 6486 }
6487 } 6487 }
6488 6488
6489 if (SUPPORTS_INTEGRATED_DP(dev) && 6489 if (SUPPORTS_INTEGRATED_DP(dev) &&
6490 (I915_READ(DP_D) & DP_DETECTED)) { 6490 (I915_READ(DP_D) & DP_DETECTED)) {
6491 DRM_DEBUG_KMS("probing DP_D\n"); 6491 DRM_DEBUG_KMS("probing DP_D\n");
6492 intel_dp_init(dev, DP_D); 6492 intel_dp_init(dev, DP_D);
6493 } 6493 }
6494 } else if (IS_GEN2(dev)) 6494 } else if (IS_GEN2(dev))
6495 intel_dvo_init(dev); 6495 intel_dvo_init(dev);
6496 6496
6497 if (SUPPORTS_TV(dev)) 6497 if (SUPPORTS_TV(dev))
6498 intel_tv_init(dev); 6498 intel_tv_init(dev);
6499 6499
6500 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { 6500 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
6501 encoder->base.possible_crtcs = encoder->crtc_mask; 6501 encoder->base.possible_crtcs = encoder->crtc_mask;
6502 encoder->base.possible_clones = 6502 encoder->base.possible_clones =
6503 intel_encoder_clones(dev, encoder->clone_mask); 6503 intel_encoder_clones(dev, encoder->clone_mask);
6504 } 6504 }
6505 6505
6506 intel_panel_setup_backlight(dev); 6506 intel_panel_setup_backlight(dev);
6507 } 6507 }
6508 6508
6509 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) 6509 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
6510 { 6510 {
6511 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 6511 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
6512 6512
6513 drm_framebuffer_cleanup(fb); 6513 drm_framebuffer_cleanup(fb);
6514 drm_gem_object_unreference_unlocked(&intel_fb->obj->base); 6514 drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
6515 6515
6516 kfree(intel_fb); 6516 kfree(intel_fb);
6517 } 6517 }
6518 6518
6519 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, 6519 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
6520 struct drm_file *file, 6520 struct drm_file *file,
6521 unsigned int *handle) 6521 unsigned int *handle)
6522 { 6522 {
6523 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 6523 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
6524 struct drm_i915_gem_object *obj = intel_fb->obj; 6524 struct drm_i915_gem_object *obj = intel_fb->obj;
6525 6525
6526 return drm_gem_handle_create(file, &obj->base, handle); 6526 return drm_gem_handle_create(file, &obj->base, handle);
6527 } 6527 }
6528 6528
6529 static const struct drm_framebuffer_funcs intel_fb_funcs = { 6529 static const struct drm_framebuffer_funcs intel_fb_funcs = {
6530 .destroy = intel_user_framebuffer_destroy, 6530 .destroy = intel_user_framebuffer_destroy,
6531 .create_handle = intel_user_framebuffer_create_handle, 6531 .create_handle = intel_user_framebuffer_create_handle,
6532 }; 6532 };
6533 6533
6534 int intel_framebuffer_init(struct drm_device *dev, 6534 int intel_framebuffer_init(struct drm_device *dev,
6535 struct intel_framebuffer *intel_fb, 6535 struct intel_framebuffer *intel_fb,
6536 struct drm_mode_fb_cmd *mode_cmd, 6536 struct drm_mode_fb_cmd *mode_cmd,
6537 struct drm_i915_gem_object *obj) 6537 struct drm_i915_gem_object *obj)
6538 { 6538 {
6539 int ret; 6539 int ret;
6540 6540
6541 if (obj->tiling_mode == I915_TILING_Y) 6541 if (obj->tiling_mode == I915_TILING_Y)
6542 return -EINVAL; 6542 return -EINVAL;
6543 6543
6544 if (mode_cmd->pitch & 63) 6544 if (mode_cmd->pitch & 63)
6545 return -EINVAL; 6545 return -EINVAL;
6546 6546
6547 switch (mode_cmd->bpp) { 6547 switch (mode_cmd->bpp) {
6548 case 8: 6548 case 8:
6549 case 16: 6549 case 16:
6550 case 24: 6550 case 24:
6551 case 32: 6551 case 32:
6552 break; 6552 break;
6553 default: 6553 default:
6554 return -EINVAL; 6554 return -EINVAL;
6555 } 6555 }
6556 6556
6557 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); 6557 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
6558 if (ret) { 6558 if (ret) {
6559 DRM_ERROR("framebuffer init failed %d\n", ret); 6559 DRM_ERROR("framebuffer init failed %d\n", ret);
6560 return ret; 6560 return ret;
6561 } 6561 }
6562 6562
6563 drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); 6563 drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
6564 intel_fb->obj = obj; 6564 intel_fb->obj = obj;
6565 return 0; 6565 return 0;
6566 } 6566 }
6567 6567
6568 static struct drm_framebuffer * 6568 static struct drm_framebuffer *
6569 intel_user_framebuffer_create(struct drm_device *dev, 6569 intel_user_framebuffer_create(struct drm_device *dev,
6570 struct drm_file *filp, 6570 struct drm_file *filp,
6571 struct drm_mode_fb_cmd *mode_cmd) 6571 struct drm_mode_fb_cmd *mode_cmd)
6572 { 6572 {
6573 struct drm_i915_gem_object *obj; 6573 struct drm_i915_gem_object *obj;
6574 struct intel_framebuffer *intel_fb; 6574 struct intel_framebuffer *intel_fb;
6575 int ret; 6575 int ret;
6576 6576
6577 obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle)); 6577 obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle));
6578 if (&obj->base == NULL) 6578 if (&obj->base == NULL)
6579 return ERR_PTR(-ENOENT); 6579 return ERR_PTR(-ENOENT);
6580 6580
6581 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 6581 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
6582 if (!intel_fb) 6582 if (!intel_fb)
6583 return ERR_PTR(-ENOMEM); 6583 return ERR_PTR(-ENOMEM);
6584 6584
6585 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); 6585 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
6586 if (ret) { 6586 if (ret) {
6587 drm_gem_object_unreference_unlocked(&obj->base); 6587 drm_gem_object_unreference_unlocked(&obj->base);
6588 kfree(intel_fb); 6588 kfree(intel_fb);
6589 return ERR_PTR(ret); 6589 return ERR_PTR(ret);
6590 } 6590 }
6591 6591
6592 return &intel_fb->base; 6592 return &intel_fb->base;
6593 } 6593 }
6594 6594
6595 static const struct drm_mode_config_funcs intel_mode_funcs = { 6595 static const struct drm_mode_config_funcs intel_mode_funcs = {
6596 .fb_create = intel_user_framebuffer_create, 6596 .fb_create = intel_user_framebuffer_create,
6597 .output_poll_changed = intel_fb_output_poll_changed, 6597 .output_poll_changed = intel_fb_output_poll_changed,
6598 }; 6598 };
6599 6599
6600 static struct drm_i915_gem_object * 6600 static struct drm_i915_gem_object *
6601 intel_alloc_context_page(struct drm_device *dev) 6601 intel_alloc_context_page(struct drm_device *dev)
6602 { 6602 {
6603 struct drm_i915_gem_object *ctx; 6603 struct drm_i915_gem_object *ctx;
6604 int ret; 6604 int ret;
6605 6605
6606 ctx = i915_gem_alloc_object(dev, 4096); 6606 ctx = i915_gem_alloc_object(dev, 4096);
6607 if (!ctx) { 6607 if (!ctx) {
6608 DRM_DEBUG("failed to alloc power context, RC6 disabled\n"); 6608 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
6609 return NULL; 6609 return NULL;
6610 } 6610 }
6611 6611
6612 mutex_lock(&dev->struct_mutex); 6612 mutex_lock(&dev->struct_mutex);
6613 ret = i915_gem_object_pin(ctx, 4096, true); 6613 ret = i915_gem_object_pin(ctx, 4096, true);
6614 if (ret) { 6614 if (ret) {
6615 DRM_ERROR("failed to pin power context: %d\n", ret); 6615 DRM_ERROR("failed to pin power context: %d\n", ret);
6616 goto err_unref; 6616 goto err_unref;
6617 } 6617 }
6618 6618
6619 ret = i915_gem_object_set_to_gtt_domain(ctx, 1); 6619 ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
6620 if (ret) { 6620 if (ret) {
6621 DRM_ERROR("failed to set-domain on power context: %d\n", ret); 6621 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
6622 goto err_unpin; 6622 goto err_unpin;
6623 } 6623 }
6624 mutex_unlock(&dev->struct_mutex); 6624 mutex_unlock(&dev->struct_mutex);
6625 6625
6626 return ctx; 6626 return ctx;
6627 6627
6628 err_unpin: 6628 err_unpin:
6629 i915_gem_object_unpin(ctx); 6629 i915_gem_object_unpin(ctx);
6630 err_unref: 6630 err_unref:
6631 drm_gem_object_unreference(&ctx->base); 6631 drm_gem_object_unreference(&ctx->base);
6632 mutex_unlock(&dev->struct_mutex); 6632 mutex_unlock(&dev->struct_mutex);
6633 return NULL; 6633 return NULL;
6634 } 6634 }
6635 6635
6636 bool ironlake_set_drps(struct drm_device *dev, u8 val) 6636 bool ironlake_set_drps(struct drm_device *dev, u8 val)
6637 { 6637 {
6638 struct drm_i915_private *dev_priv = dev->dev_private; 6638 struct drm_i915_private *dev_priv = dev->dev_private;
6639 u16 rgvswctl; 6639 u16 rgvswctl;
6640 6640
6641 rgvswctl = I915_READ16(MEMSWCTL); 6641 rgvswctl = I915_READ16(MEMSWCTL);
6642 if (rgvswctl & MEMCTL_CMD_STS) { 6642 if (rgvswctl & MEMCTL_CMD_STS) {
6643 DRM_DEBUG("gpu busy, RCS change rejected\n"); 6643 DRM_DEBUG("gpu busy, RCS change rejected\n");
6644 return false; /* still busy with another command */ 6644 return false; /* still busy with another command */
6645 } 6645 }
6646 6646
6647 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | 6647 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
6648 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; 6648 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
6649 I915_WRITE16(MEMSWCTL, rgvswctl); 6649 I915_WRITE16(MEMSWCTL, rgvswctl);
6650 POSTING_READ16(MEMSWCTL); 6650 POSTING_READ16(MEMSWCTL);
6651 6651
6652 rgvswctl |= MEMCTL_CMD_STS; 6652 rgvswctl |= MEMCTL_CMD_STS;
6653 I915_WRITE16(MEMSWCTL, rgvswctl); 6653 I915_WRITE16(MEMSWCTL, rgvswctl);
6654 6654
6655 return true; 6655 return true;
6656 } 6656 }
6657 6657
6658 void ironlake_enable_drps(struct drm_device *dev) 6658 void ironlake_enable_drps(struct drm_device *dev)
6659 { 6659 {
6660 struct drm_i915_private *dev_priv = dev->dev_private; 6660 struct drm_i915_private *dev_priv = dev->dev_private;
6661 u32 rgvmodectl = I915_READ(MEMMODECTL); 6661 u32 rgvmodectl = I915_READ(MEMMODECTL);
6662 u8 fmax, fmin, fstart, vstart; 6662 u8 fmax, fmin, fstart, vstart;
6663 6663
6664 /* Enable temp reporting */ 6664 /* Enable temp reporting */
6665 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN); 6665 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
6666 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE); 6666 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
6667 6667
6668 /* 100ms RC evaluation intervals */ 6668 /* 100ms RC evaluation intervals */
6669 I915_WRITE(RCUPEI, 100000); 6669 I915_WRITE(RCUPEI, 100000);
6670 I915_WRITE(RCDNEI, 100000); 6670 I915_WRITE(RCDNEI, 100000);
6671 6671
6672 /* Set max/min thresholds to 90ms and 80ms respectively */ 6672 /* Set max/min thresholds to 90ms and 80ms respectively */
6673 I915_WRITE(RCBMAXAVG, 90000); 6673 I915_WRITE(RCBMAXAVG, 90000);
6674 I915_WRITE(RCBMINAVG, 80000); 6674 I915_WRITE(RCBMINAVG, 80000);
6675 6675
6676 I915_WRITE(MEMIHYST, 1); 6676 I915_WRITE(MEMIHYST, 1);
6677 6677
6678 /* Set up min, max, and cur for interrupt handling */ 6678 /* Set up min, max, and cur for interrupt handling */
6679 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT; 6679 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
6680 fmin = (rgvmodectl & MEMMODE_FMIN_MASK); 6680 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
6681 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> 6681 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
6682 MEMMODE_FSTART_SHIFT; 6682 MEMMODE_FSTART_SHIFT;
6683 6683
6684 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> 6684 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
6685 PXVFREQ_PX_SHIFT; 6685 PXVFREQ_PX_SHIFT;
6686 6686
6687 dev_priv->fmax = fmax; /* IPS callback will increase this */ 6687 dev_priv->fmax = fmax; /* IPS callback will increase this */
6688 dev_priv->fstart = fstart; 6688 dev_priv->fstart = fstart;
6689 6689
6690 dev_priv->max_delay = fstart; 6690 dev_priv->max_delay = fstart;
6691 dev_priv->min_delay = fmin; 6691 dev_priv->min_delay = fmin;
6692 dev_priv->cur_delay = fstart; 6692 dev_priv->cur_delay = fstart;
6693 6693
6694 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", 6694 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
6695 fmax, fmin, fstart); 6695 fmax, fmin, fstart);
6696 6696
6697 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); 6697 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
6698 6698
6699 /* 6699 /*
6700 * Interrupts will be enabled in ironlake_irq_postinstall 6700 * Interrupts will be enabled in ironlake_irq_postinstall
6701 */ 6701 */
6702 6702
6703 I915_WRITE(VIDSTART, vstart); 6703 I915_WRITE(VIDSTART, vstart);
6704 POSTING_READ(VIDSTART); 6704 POSTING_READ(VIDSTART);
6705 6705
6706 rgvmodectl |= MEMMODE_SWMODE_EN; 6706 rgvmodectl |= MEMMODE_SWMODE_EN;
6707 I915_WRITE(MEMMODECTL, rgvmodectl); 6707 I915_WRITE(MEMMODECTL, rgvmodectl);
6708 6708
6709 if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10)) 6709 if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
6710 DRM_ERROR("stuck trying to change perf mode\n"); 6710 DRM_ERROR("stuck trying to change perf mode\n");
6711 msleep(1); 6711 msleep(1);
6712 6712
6713 ironlake_set_drps(dev, fstart); 6713 ironlake_set_drps(dev, fstart);
6714 6714
6715 dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) + 6715 dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
6716 I915_READ(0x112e0); 6716 I915_READ(0x112e0);
6717 dev_priv->last_time1 = jiffies_to_msecs(jiffies); 6717 dev_priv->last_time1 = jiffies_to_msecs(jiffies);
6718 dev_priv->last_count2 = I915_READ(0x112f4); 6718 dev_priv->last_count2 = I915_READ(0x112f4);
6719 getrawmonotonic(&dev_priv->last_time2); 6719 getrawmonotonic(&dev_priv->last_time2);
6720 } 6720 }
6721 6721
6722 void ironlake_disable_drps(struct drm_device *dev) 6722 void ironlake_disable_drps(struct drm_device *dev)
6723 { 6723 {
6724 struct drm_i915_private *dev_priv = dev->dev_private; 6724 struct drm_i915_private *dev_priv = dev->dev_private;
6725 u16 rgvswctl = I915_READ16(MEMSWCTL); 6725 u16 rgvswctl = I915_READ16(MEMSWCTL);
6726 6726
6727 /* Ack interrupts, disable EFC interrupt */ 6727 /* Ack interrupts, disable EFC interrupt */
6728 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN); 6728 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
6729 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG); 6729 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
6730 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT); 6730 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
6731 I915_WRITE(DEIIR, DE_PCU_EVENT); 6731 I915_WRITE(DEIIR, DE_PCU_EVENT);
6732 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); 6732 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
6733 6733
6734 /* Go back to the starting frequency */ 6734 /* Go back to the starting frequency */
6735 ironlake_set_drps(dev, dev_priv->fstart); 6735 ironlake_set_drps(dev, dev_priv->fstart);
6736 msleep(1); 6736 msleep(1);
6737 rgvswctl |= MEMCTL_CMD_STS; 6737 rgvswctl |= MEMCTL_CMD_STS;
6738 I915_WRITE(MEMSWCTL, rgvswctl); 6738 I915_WRITE(MEMSWCTL, rgvswctl);
6739 msleep(1); 6739 msleep(1);
6740 6740
6741 } 6741 }
6742 6742
6743 void gen6_set_rps(struct drm_device *dev, u8 val) 6743 void gen6_set_rps(struct drm_device *dev, u8 val)
6744 { 6744 {
6745 struct drm_i915_private *dev_priv = dev->dev_private; 6745 struct drm_i915_private *dev_priv = dev->dev_private;
6746 u32 swreq; 6746 u32 swreq;
6747 6747
6748 swreq = (val & 0x3ff) << 25; 6748 swreq = (val & 0x3ff) << 25;
6749 I915_WRITE(GEN6_RPNSWREQ, swreq); 6749 I915_WRITE(GEN6_RPNSWREQ, swreq);
6750 } 6750 }
6751 6751
6752 void gen6_disable_rps(struct drm_device *dev) 6752 void gen6_disable_rps(struct drm_device *dev)
6753 { 6753 {
6754 struct drm_i915_private *dev_priv = dev->dev_private; 6754 struct drm_i915_private *dev_priv = dev->dev_private;
6755 6755
6756 I915_WRITE(GEN6_RPNSWREQ, 1 << 31); 6756 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
6757 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); 6757 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
6758 I915_WRITE(GEN6_PMIER, 0); 6758 I915_WRITE(GEN6_PMIER, 0);
6759 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); 6759 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
6760 } 6760 }
6761 6761
6762 static unsigned long intel_pxfreq(u32 vidfreq) 6762 static unsigned long intel_pxfreq(u32 vidfreq)
6763 { 6763 {
6764 unsigned long freq; 6764 unsigned long freq;
6765 int div = (vidfreq & 0x3f0000) >> 16; 6765 int div = (vidfreq & 0x3f0000) >> 16;
6766 int post = (vidfreq & 0x3000) >> 12; 6766 int post = (vidfreq & 0x3000) >> 12;
6767 int pre = (vidfreq & 0x7); 6767 int pre = (vidfreq & 0x7);
6768 6768
6769 if (!pre) 6769 if (!pre)
6770 return 0; 6770 return 0;
6771 6771
6772 freq = ((div * 133333) / ((1<<post) * pre)); 6772 freq = ((div * 133333) / ((1<<post) * pre));
6773 6773
6774 return freq; 6774 return freq;
6775 } 6775 }
6776 6776
6777 void intel_init_emon(struct drm_device *dev) 6777 void intel_init_emon(struct drm_device *dev)
6778 { 6778 {
6779 struct drm_i915_private *dev_priv = dev->dev_private; 6779 struct drm_i915_private *dev_priv = dev->dev_private;
6780 u32 lcfuse; 6780 u32 lcfuse;
6781 u8 pxw[16]; 6781 u8 pxw[16];
6782 int i; 6782 int i;
6783 6783
6784 /* Disable to program */ 6784 /* Disable to program */
6785 I915_WRITE(ECR, 0); 6785 I915_WRITE(ECR, 0);
6786 POSTING_READ(ECR); 6786 POSTING_READ(ECR);
6787 6787
6788 /* Program energy weights for various events */ 6788 /* Program energy weights for various events */
6789 I915_WRITE(SDEW, 0x15040d00); 6789 I915_WRITE(SDEW, 0x15040d00);
6790 I915_WRITE(CSIEW0, 0x007f0000); 6790 I915_WRITE(CSIEW0, 0x007f0000);
6791 I915_WRITE(CSIEW1, 0x1e220004); 6791 I915_WRITE(CSIEW1, 0x1e220004);
6792 I915_WRITE(CSIEW2, 0x04000004); 6792 I915_WRITE(CSIEW2, 0x04000004);
6793 6793
6794 for (i = 0; i < 5; i++) 6794 for (i = 0; i < 5; i++)
6795 I915_WRITE(PEW + (i * 4), 0); 6795 I915_WRITE(PEW + (i * 4), 0);
6796 for (i = 0; i < 3; i++) 6796 for (i = 0; i < 3; i++)
6797 I915_WRITE(DEW + (i * 4), 0); 6797 I915_WRITE(DEW + (i * 4), 0);
6798 6798
6799 /* Program P-state weights to account for frequency power adjustment */ 6799 /* Program P-state weights to account for frequency power adjustment */
6800 for (i = 0; i < 16; i++) { 6800 for (i = 0; i < 16; i++) {
6801 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4)); 6801 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
6802 unsigned long freq = intel_pxfreq(pxvidfreq); 6802 unsigned long freq = intel_pxfreq(pxvidfreq);
6803 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >> 6803 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
6804 PXVFREQ_PX_SHIFT; 6804 PXVFREQ_PX_SHIFT;
6805 unsigned long val; 6805 unsigned long val;
6806 6806
6807 val = vid * vid; 6807 val = vid * vid;
6808 val *= (freq / 1000); 6808 val *= (freq / 1000);
6809 val *= 255; 6809 val *= 255;
6810 val /= (127*127*900); 6810 val /= (127*127*900);
6811 if (val > 0xff) 6811 if (val > 0xff)
6812 DRM_ERROR("bad pxval: %ld\n", val); 6812 DRM_ERROR("bad pxval: %ld\n", val);
6813 pxw[i] = val; 6813 pxw[i] = val;
6814 } 6814 }
6815 /* Render standby states get 0 weight */ 6815 /* Render standby states get 0 weight */
6816 pxw[14] = 0; 6816 pxw[14] = 0;
6817 pxw[15] = 0; 6817 pxw[15] = 0;
6818 6818
6819 for (i = 0; i < 4; i++) { 6819 for (i = 0; i < 4; i++) {
6820 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) | 6820 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
6821 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]); 6821 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
6822 I915_WRITE(PXW + (i * 4), val); 6822 I915_WRITE(PXW + (i * 4), val);
6823 } 6823 }
6824 6824
6825 /* Adjust magic regs to magic values (more experimental results) */ 6825 /* Adjust magic regs to magic values (more experimental results) */
6826 I915_WRITE(OGW0, 0); 6826 I915_WRITE(OGW0, 0);
6827 I915_WRITE(OGW1, 0); 6827 I915_WRITE(OGW1, 0);
6828 I915_WRITE(EG0, 0x00007f00); 6828 I915_WRITE(EG0, 0x00007f00);
6829 I915_WRITE(EG1, 0x0000000e); 6829 I915_WRITE(EG1, 0x0000000e);
6830 I915_WRITE(EG2, 0x000e0000); 6830 I915_WRITE(EG2, 0x000e0000);
6831 I915_WRITE(EG3, 0x68000300); 6831 I915_WRITE(EG3, 0x68000300);
6832 I915_WRITE(EG4, 0x42000000); 6832 I915_WRITE(EG4, 0x42000000);
6833 I915_WRITE(EG5, 0x00140031); 6833 I915_WRITE(EG5, 0x00140031);
6834 I915_WRITE(EG6, 0); 6834 I915_WRITE(EG6, 0);
6835 I915_WRITE(EG7, 0); 6835 I915_WRITE(EG7, 0);
6836 6836
6837 for (i = 0; i < 8; i++) 6837 for (i = 0; i < 8; i++)
6838 I915_WRITE(PXWL + (i * 4), 0); 6838 I915_WRITE(PXWL + (i * 4), 0);
6839 6839
6840 /* Enable PMON + select events */ 6840 /* Enable PMON + select events */
6841 I915_WRITE(ECR, 0x80000019); 6841 I915_WRITE(ECR, 0x80000019);
6842 6842
6843 lcfuse = I915_READ(LCFUSE02); 6843 lcfuse = I915_READ(LCFUSE02);
6844 6844
6845 dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); 6845 dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
6846 } 6846 }
6847 6847
6848 void gen6_enable_rps(struct drm_i915_private *dev_priv) 6848 void gen6_enable_rps(struct drm_i915_private *dev_priv)
6849 { 6849 {
6850 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 6850 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
6851 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 6851 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
6852 u32 pcu_mbox; 6852 u32 pcu_mbox;
6853 int cur_freq, min_freq, max_freq; 6853 int cur_freq, min_freq, max_freq;
6854 int i; 6854 int i;
6855 6855
6856 /* Here begins a magic sequence of register writes to enable 6856 /* Here begins a magic sequence of register writes to enable
6857 * auto-downclocking. 6857 * auto-downclocking.
6858 * 6858 *
6859 * Perhaps there might be some value in exposing these to 6859 * Perhaps there might be some value in exposing these to
6860 * userspace... 6860 * userspace...
6861 */ 6861 */
6862 I915_WRITE(GEN6_RC_STATE, 0); 6862 I915_WRITE(GEN6_RC_STATE, 0);
6863 __gen6_gt_force_wake_get(dev_priv); 6863 __gen6_gt_force_wake_get(dev_priv);
6864 6864
6865 /* disable the counters and set deterministic thresholds */ 6865 /* disable the counters and set deterministic thresholds */
6866 I915_WRITE(GEN6_RC_CONTROL, 0); 6866 I915_WRITE(GEN6_RC_CONTROL, 0);
6867 6867
6868 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16); 6868 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
6869 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30); 6869 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
6870 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30); 6870 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
6871 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); 6871 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
6872 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); 6872 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
6873 6873
6874 for (i = 0; i < I915_NUM_RINGS; i++) 6874 for (i = 0; i < I915_NUM_RINGS; i++)
6875 I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10); 6875 I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10);
6876 6876
6877 I915_WRITE(GEN6_RC_SLEEP, 0); 6877 I915_WRITE(GEN6_RC_SLEEP, 0);
6878 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); 6878 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
6879 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); 6879 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
6880 I915_WRITE(GEN6_RC6p_THRESHOLD, 100000); 6880 I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
6881 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ 6881 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
6882 6882
6883 I915_WRITE(GEN6_RC_CONTROL, 6883 I915_WRITE(GEN6_RC_CONTROL,
6884 GEN6_RC_CTL_RC6p_ENABLE | 6884 GEN6_RC_CTL_RC6p_ENABLE |
6885 GEN6_RC_CTL_RC6_ENABLE | 6885 GEN6_RC_CTL_RC6_ENABLE |
6886 GEN6_RC_CTL_EI_MODE(1) | 6886 GEN6_RC_CTL_EI_MODE(1) |
6887 GEN6_RC_CTL_HW_ENABLE); 6887 GEN6_RC_CTL_HW_ENABLE);
6888 6888
6889 I915_WRITE(GEN6_RPNSWREQ, 6889 I915_WRITE(GEN6_RPNSWREQ,
6890 GEN6_FREQUENCY(10) | 6890 GEN6_FREQUENCY(10) |
6891 GEN6_OFFSET(0) | 6891 GEN6_OFFSET(0) |
6892 GEN6_AGGRESSIVE_TURBO); 6892 GEN6_AGGRESSIVE_TURBO);
6893 I915_WRITE(GEN6_RC_VIDEO_FREQ, 6893 I915_WRITE(GEN6_RC_VIDEO_FREQ,
6894 GEN6_FREQUENCY(12)); 6894 GEN6_FREQUENCY(12));
6895 6895
6896 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); 6896 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
6897 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 6897 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
6898 18 << 24 | 6898 18 << 24 |
6899 6 << 16); 6899 6 << 16);
6900 I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000); 6900 I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
6901 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000); 6901 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
6902 I915_WRITE(GEN6_RP_UP_EI, 100000); 6902 I915_WRITE(GEN6_RP_UP_EI, 100000);
6903 I915_WRITE(GEN6_RP_DOWN_EI, 5000000); 6903 I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
6904 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 6904 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
6905 I915_WRITE(GEN6_RP_CONTROL, 6905 I915_WRITE(GEN6_RP_CONTROL,
6906 GEN6_RP_MEDIA_TURBO | 6906 GEN6_RP_MEDIA_TURBO |
6907 GEN6_RP_USE_NORMAL_FREQ | 6907 GEN6_RP_USE_NORMAL_FREQ |
6908 GEN6_RP_MEDIA_IS_GFX | 6908 GEN6_RP_MEDIA_IS_GFX |
6909 GEN6_RP_ENABLE | 6909 GEN6_RP_ENABLE |
6910 GEN6_RP_UP_BUSY_AVG | 6910 GEN6_RP_UP_BUSY_AVG |
6911 GEN6_RP_DOWN_IDLE_CONT); 6911 GEN6_RP_DOWN_IDLE_CONT);
6912 6912
6913 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 6913 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6914 500)) 6914 500))
6915 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); 6915 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
6916 6916
6917 I915_WRITE(GEN6_PCODE_DATA, 0); 6917 I915_WRITE(GEN6_PCODE_DATA, 0);
6918 I915_WRITE(GEN6_PCODE_MAILBOX, 6918 I915_WRITE(GEN6_PCODE_MAILBOX,
6919 GEN6_PCODE_READY | 6919 GEN6_PCODE_READY |
6920 GEN6_PCODE_WRITE_MIN_FREQ_TABLE); 6920 GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
6921 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 6921 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6922 500)) 6922 500))
6923 DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); 6923 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
6924 6924
6925 min_freq = (rp_state_cap & 0xff0000) >> 16; 6925 min_freq = (rp_state_cap & 0xff0000) >> 16;
6926 max_freq = rp_state_cap & 0xff; 6926 max_freq = rp_state_cap & 0xff;
6927 cur_freq = (gt_perf_status & 0xff00) >> 8; 6927 cur_freq = (gt_perf_status & 0xff00) >> 8;
6928 6928
6929 /* Check for overclock support */ 6929 /* Check for overclock support */
6930 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 6930 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6931 500)) 6931 500))
6932 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); 6932 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
6933 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS); 6933 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
6934 pcu_mbox = I915_READ(GEN6_PCODE_DATA); 6934 pcu_mbox = I915_READ(GEN6_PCODE_DATA);
6935 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 6935 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6936 500)) 6936 500))
6937 DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); 6937 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
6938 if (pcu_mbox & (1<<31)) { /* OC supported */ 6938 if (pcu_mbox & (1<<31)) { /* OC supported */
6939 max_freq = pcu_mbox & 0xff; 6939 max_freq = pcu_mbox & 0xff;
6940 DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50); 6940 DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
6941 } 6941 }
6942 6942
6943 /* In units of 100MHz */ 6943 /* In units of 100MHz */
6944 dev_priv->max_delay = max_freq; 6944 dev_priv->max_delay = max_freq;
6945 dev_priv->min_delay = min_freq; 6945 dev_priv->min_delay = min_freq;
6946 dev_priv->cur_delay = cur_freq; 6946 dev_priv->cur_delay = cur_freq;
6947 6947
6948 /* requires MSI enabled */ 6948 /* requires MSI enabled */
6949 I915_WRITE(GEN6_PMIER, 6949 I915_WRITE(GEN6_PMIER,
6950 GEN6_PM_MBOX_EVENT | 6950 GEN6_PM_MBOX_EVENT |
6951 GEN6_PM_THERMAL_EVENT | 6951 GEN6_PM_THERMAL_EVENT |
6952 GEN6_PM_RP_DOWN_TIMEOUT | 6952 GEN6_PM_RP_DOWN_TIMEOUT |
6953 GEN6_PM_RP_UP_THRESHOLD | 6953 GEN6_PM_RP_UP_THRESHOLD |
6954 GEN6_PM_RP_DOWN_THRESHOLD | 6954 GEN6_PM_RP_DOWN_THRESHOLD |
6955 GEN6_PM_RP_UP_EI_EXPIRED | 6955 GEN6_PM_RP_UP_EI_EXPIRED |
6956 GEN6_PM_RP_DOWN_EI_EXPIRED); 6956 GEN6_PM_RP_DOWN_EI_EXPIRED);
6957 I915_WRITE(GEN6_PMIMR, 0); 6957 I915_WRITE(GEN6_PMIMR, 0);
6958 /* enable all PM interrupts */ 6958 /* enable all PM interrupts */
6959 I915_WRITE(GEN6_PMINTRMSK, 0); 6959 I915_WRITE(GEN6_PMINTRMSK, 0);
6960 6960
6961 __gen6_gt_force_wake_put(dev_priv); 6961 __gen6_gt_force_wake_put(dev_priv);
6962 } 6962 }
6963 6963
6964 void intel_enable_clock_gating(struct drm_device *dev) 6964 void intel_enable_clock_gating(struct drm_device *dev)
6965 { 6965 {
6966 struct drm_i915_private *dev_priv = dev->dev_private; 6966 struct drm_i915_private *dev_priv = dev->dev_private;
6967 int pipe; 6967 int pipe;
6968 6968
6969 /* 6969 /*
6970 * Disable clock gating reported to work incorrectly according to the 6970 * Disable clock gating reported to work incorrectly according to the
6971 * specs, but enable as much else as we can. 6971 * specs, but enable as much else as we can.
6972 */ 6972 */
6973 if (HAS_PCH_SPLIT(dev)) { 6973 if (HAS_PCH_SPLIT(dev)) {
6974 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; 6974 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
6975 6975
6976 if (IS_GEN5(dev)) { 6976 if (IS_GEN5(dev)) {
6977 /* Required for FBC */ 6977 /* Required for FBC */
6978 dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE | 6978 dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
6979 DPFCRUNIT_CLOCK_GATE_DISABLE | 6979 DPFCRUNIT_CLOCK_GATE_DISABLE |
6980 DPFDUNIT_CLOCK_GATE_DISABLE; 6980 DPFDUNIT_CLOCK_GATE_DISABLE;
6981 /* Required for CxSR */ 6981 /* Required for CxSR */
6982 dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE; 6982 dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
6983 6983
6984 I915_WRITE(PCH_3DCGDIS0, 6984 I915_WRITE(PCH_3DCGDIS0,
6985 MARIUNIT_CLOCK_GATE_DISABLE | 6985 MARIUNIT_CLOCK_GATE_DISABLE |
6986 SVSMUNIT_CLOCK_GATE_DISABLE); 6986 SVSMUNIT_CLOCK_GATE_DISABLE);
6987 I915_WRITE(PCH_3DCGDIS1, 6987 I915_WRITE(PCH_3DCGDIS1,
6988 VFMUNIT_CLOCK_GATE_DISABLE); 6988 VFMUNIT_CLOCK_GATE_DISABLE);
6989 } 6989 }
6990 6990
6991 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); 6991 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
6992 6992
6993 /* 6993 /*
6994 * On Ibex Peak and Cougar Point, we need to disable clock 6994 * On Ibex Peak and Cougar Point, we need to disable clock
6995 * gating for the panel power sequencer or it will fail to 6995 * gating for the panel power sequencer or it will fail to
6996 * start up when no ports are active. 6996 * start up when no ports are active.
6997 */ 6997 */
6998 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); 6998 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
6999 6999
7000 /* 7000 /*
7001 * According to the spec the following bits should be set in 7001 * According to the spec the following bits should be set in
7002 * order to enable memory self-refresh 7002 * order to enable memory self-refresh
7003 * The bit 22/21 of 0x42004 7003 * The bit 22/21 of 0x42004
7004 * The bit 5 of 0x42020 7004 * The bit 5 of 0x42020
7005 * The bit 15 of 0x45000 7005 * The bit 15 of 0x45000
7006 */ 7006 */
7007 if (IS_GEN5(dev)) { 7007 if (IS_GEN5(dev)) {
7008 I915_WRITE(ILK_DISPLAY_CHICKEN2, 7008 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7009 (I915_READ(ILK_DISPLAY_CHICKEN2) | 7009 (I915_READ(ILK_DISPLAY_CHICKEN2) |
7010 ILK_DPARB_GATE | ILK_VSDPFD_FULL)); 7010 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
7011 I915_WRITE(ILK_DSPCLK_GATE, 7011 I915_WRITE(ILK_DSPCLK_GATE,
7012 (I915_READ(ILK_DSPCLK_GATE) | 7012 (I915_READ(ILK_DSPCLK_GATE) |
7013 ILK_DPARB_CLK_GATE)); 7013 ILK_DPARB_CLK_GATE));
7014 I915_WRITE(DISP_ARB_CTL, 7014 I915_WRITE(DISP_ARB_CTL,
7015 (I915_READ(DISP_ARB_CTL) | 7015 (I915_READ(DISP_ARB_CTL) |
7016 DISP_FBC_WM_DIS)); 7016 DISP_FBC_WM_DIS));
7017 I915_WRITE(WM3_LP_ILK, 0); 7017 I915_WRITE(WM3_LP_ILK, 0);
7018 I915_WRITE(WM2_LP_ILK, 0); 7018 I915_WRITE(WM2_LP_ILK, 0);
7019 I915_WRITE(WM1_LP_ILK, 0); 7019 I915_WRITE(WM1_LP_ILK, 0);
7020 } 7020 }
7021 /* 7021 /*
7022 * Based on the document from hardware guys the following bits 7022 * Based on the document from hardware guys the following bits
7023 * should be set unconditionally in order to enable FBC. 7023 * should be set unconditionally in order to enable FBC.
7024 * The bit 22 of 0x42000 7024 * The bit 22 of 0x42000
7025 * The bit 22 of 0x42004 7025 * The bit 22 of 0x42004
7026 * The bit 7,8,9 of 0x42020. 7026 * The bit 7,8,9 of 0x42020.
7027 */ 7027 */
7028 if (IS_IRONLAKE_M(dev)) { 7028 if (IS_IRONLAKE_M(dev)) {
7029 I915_WRITE(ILK_DISPLAY_CHICKEN1, 7029 I915_WRITE(ILK_DISPLAY_CHICKEN1,
7030 I915_READ(ILK_DISPLAY_CHICKEN1) | 7030 I915_READ(ILK_DISPLAY_CHICKEN1) |
7031 ILK_FBCQ_DIS); 7031 ILK_FBCQ_DIS);
7032 I915_WRITE(ILK_DISPLAY_CHICKEN2, 7032 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7033 I915_READ(ILK_DISPLAY_CHICKEN2) | 7033 I915_READ(ILK_DISPLAY_CHICKEN2) |
7034 ILK_DPARB_GATE); 7034 ILK_DPARB_GATE);
7035 I915_WRITE(ILK_DSPCLK_GATE, 7035 I915_WRITE(ILK_DSPCLK_GATE,
7036 I915_READ(ILK_DSPCLK_GATE) | 7036 I915_READ(ILK_DSPCLK_GATE) |
7037 ILK_DPFC_DIS1 | 7037 ILK_DPFC_DIS1 |
7038 ILK_DPFC_DIS2 | 7038 ILK_DPFC_DIS2 |
7039 ILK_CLK_FBC); 7039 ILK_CLK_FBC);
7040 } 7040 }
7041 7041
7042 I915_WRITE(ILK_DISPLAY_CHICKEN2, 7042 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7043 I915_READ(ILK_DISPLAY_CHICKEN2) | 7043 I915_READ(ILK_DISPLAY_CHICKEN2) |
7044 ILK_ELPIN_409_SELECT); 7044 ILK_ELPIN_409_SELECT);
7045 7045
7046 if (IS_GEN5(dev)) { 7046 if (IS_GEN5(dev)) {
7047 I915_WRITE(_3D_CHICKEN2, 7047 I915_WRITE(_3D_CHICKEN2,
7048 _3D_CHICKEN2_WM_READ_PIPELINED << 16 | 7048 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
7049 _3D_CHICKEN2_WM_READ_PIPELINED); 7049 _3D_CHICKEN2_WM_READ_PIPELINED);
7050 } 7050 }
7051 7051
7052 if (IS_GEN6(dev)) { 7052 if (IS_GEN6(dev)) {
7053 I915_WRITE(WM3_LP_ILK, 0); 7053 I915_WRITE(WM3_LP_ILK, 0);
7054 I915_WRITE(WM2_LP_ILK, 0); 7054 I915_WRITE(WM2_LP_ILK, 0);
7055 I915_WRITE(WM1_LP_ILK, 0); 7055 I915_WRITE(WM1_LP_ILK, 0);
7056 7056
7057 /* 7057 /*
7058 * According to the spec the following bits should be 7058 * According to the spec the following bits should be
7059 * set in order to enable memory self-refresh and fbc: 7059 * set in order to enable memory self-refresh and fbc:
7060 * The bit21 and bit22 of 0x42000 7060 * The bit21 and bit22 of 0x42000
7061 * The bit21 and bit22 of 0x42004 7061 * The bit21 and bit22 of 0x42004
7062 * The bit5 and bit7 of 0x42020 7062 * The bit5 and bit7 of 0x42020
7063 * The bit14 of 0x70180 7063 * The bit14 of 0x70180
7064 * The bit14 of 0x71180 7064 * The bit14 of 0x71180
7065 */ 7065 */
7066 I915_WRITE(ILK_DISPLAY_CHICKEN1, 7066 I915_WRITE(ILK_DISPLAY_CHICKEN1,
7067 I915_READ(ILK_DISPLAY_CHICKEN1) | 7067 I915_READ(ILK_DISPLAY_CHICKEN1) |
7068 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS); 7068 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
7069 I915_WRITE(ILK_DISPLAY_CHICKEN2, 7069 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7070 I915_READ(ILK_DISPLAY_CHICKEN2) | 7070 I915_READ(ILK_DISPLAY_CHICKEN2) |
7071 ILK_DPARB_GATE | ILK_VSDPFD_FULL); 7071 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
7072 I915_WRITE(ILK_DSPCLK_GATE, 7072 I915_WRITE(ILK_DSPCLK_GATE,
7073 I915_READ(ILK_DSPCLK_GATE) | 7073 I915_READ(ILK_DSPCLK_GATE) |
7074 ILK_DPARB_CLK_GATE | 7074 ILK_DPARB_CLK_GATE |
7075 ILK_DPFD_CLK_GATE); 7075 ILK_DPFD_CLK_GATE);
7076 7076
7077 for_each_pipe(pipe) 7077 for_each_pipe(pipe)
7078 I915_WRITE(DSPCNTR(pipe), 7078 I915_WRITE(DSPCNTR(pipe),
7079 I915_READ(DSPCNTR(pipe)) | 7079 I915_READ(DSPCNTR(pipe)) |
7080 DISPPLANE_TRICKLE_FEED_DISABLE); 7080 DISPPLANE_TRICKLE_FEED_DISABLE);
7081 } 7081 }
7082 } else if (IS_G4X(dev)) { 7082 } else if (IS_G4X(dev)) {
7083 uint32_t dspclk_gate; 7083 uint32_t dspclk_gate;
7084 I915_WRITE(RENCLK_GATE_D1, 0); 7084 I915_WRITE(RENCLK_GATE_D1, 0);
7085 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE | 7085 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
7086 GS_UNIT_CLOCK_GATE_DISABLE | 7086 GS_UNIT_CLOCK_GATE_DISABLE |
7087 CL_UNIT_CLOCK_GATE_DISABLE); 7087 CL_UNIT_CLOCK_GATE_DISABLE);
7088 I915_WRITE(RAMCLK_GATE_D, 0); 7088 I915_WRITE(RAMCLK_GATE_D, 0);
7089 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE | 7089 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
7090 OVRUNIT_CLOCK_GATE_DISABLE | 7090 OVRUNIT_CLOCK_GATE_DISABLE |
7091 OVCUNIT_CLOCK_GATE_DISABLE; 7091 OVCUNIT_CLOCK_GATE_DISABLE;
7092 if (IS_GM45(dev)) 7092 if (IS_GM45(dev))
7093 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; 7093 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
7094 I915_WRITE(DSPCLK_GATE_D, dspclk_gate); 7094 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
7095 } else if (IS_CRESTLINE(dev)) { 7095 } else if (IS_CRESTLINE(dev)) {
7096 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); 7096 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
7097 I915_WRITE(RENCLK_GATE_D2, 0); 7097 I915_WRITE(RENCLK_GATE_D2, 0);
7098 I915_WRITE(DSPCLK_GATE_D, 0); 7098 I915_WRITE(DSPCLK_GATE_D, 0);
7099 I915_WRITE(RAMCLK_GATE_D, 0); 7099 I915_WRITE(RAMCLK_GATE_D, 0);
7100 I915_WRITE16(DEUC, 0); 7100 I915_WRITE16(DEUC, 0);
7101 } else if (IS_BROADWATER(dev)) { 7101 } else if (IS_BROADWATER(dev)) {
7102 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | 7102 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
7103 I965_RCC_CLOCK_GATE_DISABLE | 7103 I965_RCC_CLOCK_GATE_DISABLE |
7104 I965_RCPB_CLOCK_GATE_DISABLE | 7104 I965_RCPB_CLOCK_GATE_DISABLE |
7105 I965_ISC_CLOCK_GATE_DISABLE | 7105 I965_ISC_CLOCK_GATE_DISABLE |
7106 I965_FBC_CLOCK_GATE_DISABLE); 7106 I965_FBC_CLOCK_GATE_DISABLE);
7107 I915_WRITE(RENCLK_GATE_D2, 0); 7107 I915_WRITE(RENCLK_GATE_D2, 0);
7108 } else if (IS_GEN3(dev)) { 7108 } else if (IS_GEN3(dev)) {
7109 u32 dstate = I915_READ(D_STATE); 7109 u32 dstate = I915_READ(D_STATE);
7110 7110
7111 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | 7111 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
7112 DSTATE_DOT_CLOCK_GATING; 7112 DSTATE_DOT_CLOCK_GATING;
7113 I915_WRITE(D_STATE, dstate); 7113 I915_WRITE(D_STATE, dstate);
7114 } else if (IS_I85X(dev) || IS_I865G(dev)) { 7114 } else if (IS_I85X(dev) || IS_I865G(dev)) {
7115 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); 7115 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
7116 } else if (IS_I830(dev)) { 7116 } else if (IS_I830(dev)) {
7117 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); 7117 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
7118 } 7118 }
7119 } 7119 }
7120 7120
7121 static void ironlake_teardown_rc6(struct drm_device *dev) 7121 static void ironlake_teardown_rc6(struct drm_device *dev)
7122 { 7122 {
7123 struct drm_i915_private *dev_priv = dev->dev_private; 7123 struct drm_i915_private *dev_priv = dev->dev_private;
7124 7124
7125 if (dev_priv->renderctx) { 7125 if (dev_priv->renderctx) {
7126 i915_gem_object_unpin(dev_priv->renderctx); 7126 i915_gem_object_unpin(dev_priv->renderctx);
7127 drm_gem_object_unreference(&dev_priv->renderctx->base); 7127 drm_gem_object_unreference(&dev_priv->renderctx->base);
7128 dev_priv->renderctx = NULL; 7128 dev_priv->renderctx = NULL;
7129 } 7129 }
7130 7130
7131 if (dev_priv->pwrctx) { 7131 if (dev_priv->pwrctx) {
7132 i915_gem_object_unpin(dev_priv->pwrctx); 7132 i915_gem_object_unpin(dev_priv->pwrctx);
7133 drm_gem_object_unreference(&dev_priv->pwrctx->base); 7133 drm_gem_object_unreference(&dev_priv->pwrctx->base);
7134 dev_priv->pwrctx = NULL; 7134 dev_priv->pwrctx = NULL;
7135 } 7135 }
7136 } 7136 }
7137 7137
7138 static void ironlake_disable_rc6(struct drm_device *dev) 7138 static void ironlake_disable_rc6(struct drm_device *dev)
7139 { 7139 {
7140 struct drm_i915_private *dev_priv = dev->dev_private; 7140 struct drm_i915_private *dev_priv = dev->dev_private;
7141 7141
7142 if (I915_READ(PWRCTXA)) { 7142 if (I915_READ(PWRCTXA)) {
7143 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */ 7143 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
7144 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT); 7144 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
7145 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON), 7145 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
7146 50); 7146 50);
7147 7147
7148 I915_WRITE(PWRCTXA, 0); 7148 I915_WRITE(PWRCTXA, 0);
7149 POSTING_READ(PWRCTXA); 7149 POSTING_READ(PWRCTXA);
7150 7150
7151 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); 7151 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
7152 POSTING_READ(RSTDBYCTL); 7152 POSTING_READ(RSTDBYCTL);
7153 } 7153 }
7154 7154
7155 ironlake_teardown_rc6(dev); 7155 ironlake_teardown_rc6(dev);
7156 } 7156 }
7157 7157
7158 static int ironlake_setup_rc6(struct drm_device *dev) 7158 static int ironlake_setup_rc6(struct drm_device *dev)
7159 { 7159 {
7160 struct drm_i915_private *dev_priv = dev->dev_private; 7160 struct drm_i915_private *dev_priv = dev->dev_private;
7161 7161
7162 if (dev_priv->renderctx == NULL) 7162 if (dev_priv->renderctx == NULL)
7163 dev_priv->renderctx = intel_alloc_context_page(dev); 7163 dev_priv->renderctx = intel_alloc_context_page(dev);
7164 if (!dev_priv->renderctx) 7164 if (!dev_priv->renderctx)
7165 return -ENOMEM; 7165 return -ENOMEM;
7166 7166
7167 if (dev_priv->pwrctx == NULL) 7167 if (dev_priv->pwrctx == NULL)
7168 dev_priv->pwrctx = intel_alloc_context_page(dev); 7168 dev_priv->pwrctx = intel_alloc_context_page(dev);
7169 if (!dev_priv->pwrctx) { 7169 if (!dev_priv->pwrctx) {
7170 ironlake_teardown_rc6(dev); 7170 ironlake_teardown_rc6(dev);
7171 return -ENOMEM; 7171 return -ENOMEM;
7172 } 7172 }
7173 7173
7174 return 0; 7174 return 0;
7175 } 7175 }
7176 7176
7177 void ironlake_enable_rc6(struct drm_device *dev) 7177 void ironlake_enable_rc6(struct drm_device *dev)
7178 { 7178 {
7179 struct drm_i915_private *dev_priv = dev->dev_private; 7179 struct drm_i915_private *dev_priv = dev->dev_private;
7180 int ret; 7180 int ret;
7181 7181
7182 /* rc6 disabled by default due to repeated reports of hanging during 7182 /* rc6 disabled by default due to repeated reports of hanging during
7183 * boot and resume. 7183 * boot and resume.
7184 */ 7184 */
7185 if (!i915_enable_rc6) 7185 if (!i915_enable_rc6)
7186 return; 7186 return;
7187 7187
7188 ret = ironlake_setup_rc6(dev); 7188 ret = ironlake_setup_rc6(dev);
7189 if (ret) 7189 if (ret)
7190 return; 7190 return;
7191 7191
7192 /* 7192 /*
7193 * GPU can automatically power down the render unit if given a page 7193 * GPU can automatically power down the render unit if given a page
7194 * to save state. 7194 * to save state.
7195 */ 7195 */
7196 ret = BEGIN_LP_RING(6); 7196 ret = BEGIN_LP_RING(6);
7197 if (ret) { 7197 if (ret) {
7198 ironlake_teardown_rc6(dev); 7198 ironlake_teardown_rc6(dev);
7199 return; 7199 return;
7200 } 7200 }
7201 7201
7202 OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); 7202 OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
7203 OUT_RING(MI_SET_CONTEXT); 7203 OUT_RING(MI_SET_CONTEXT);
7204 OUT_RING(dev_priv->renderctx->gtt_offset | 7204 OUT_RING(dev_priv->renderctx->gtt_offset |
7205 MI_MM_SPACE_GTT | 7205 MI_MM_SPACE_GTT |
7206 MI_SAVE_EXT_STATE_EN | 7206 MI_SAVE_EXT_STATE_EN |
7207 MI_RESTORE_EXT_STATE_EN | 7207 MI_RESTORE_EXT_STATE_EN |
7208 MI_RESTORE_INHIBIT); 7208 MI_RESTORE_INHIBIT);
7209 OUT_RING(MI_SUSPEND_FLUSH); 7209 OUT_RING(MI_SUSPEND_FLUSH);
7210 OUT_RING(MI_NOOP); 7210 OUT_RING(MI_NOOP);
7211 OUT_RING(MI_FLUSH); 7211 OUT_RING(MI_FLUSH);
7212 ADVANCE_LP_RING(); 7212 ADVANCE_LP_RING();
7213 7213
7214 I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN); 7214 I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
7215 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); 7215 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
7216 } 7216 }
7217 7217
7218 7218
7219 /* Set up chip specific display functions */ 7219 /* Set up chip specific display functions */
7220 static void intel_init_display(struct drm_device *dev) 7220 static void intel_init_display(struct drm_device *dev)
7221 { 7221 {
7222 struct drm_i915_private *dev_priv = dev->dev_private; 7222 struct drm_i915_private *dev_priv = dev->dev_private;
7223 7223
7224 /* We always want a DPMS function */ 7224 /* We always want a DPMS function */
7225 if (HAS_PCH_SPLIT(dev)) 7225 if (HAS_PCH_SPLIT(dev))
7226 dev_priv->display.dpms = ironlake_crtc_dpms; 7226 dev_priv->display.dpms = ironlake_crtc_dpms;
7227 else 7227 else
7228 dev_priv->display.dpms = i9xx_crtc_dpms; 7228 dev_priv->display.dpms = i9xx_crtc_dpms;
7229 7229
7230 if (I915_HAS_FBC(dev)) { 7230 if (I915_HAS_FBC(dev)) {
7231 if (HAS_PCH_SPLIT(dev)) { 7231 if (HAS_PCH_SPLIT(dev)) {
7232 dev_priv->display.fbc_enabled = ironlake_fbc_enabled; 7232 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
7233 dev_priv->display.enable_fbc = ironlake_enable_fbc; 7233 dev_priv->display.enable_fbc = ironlake_enable_fbc;
7234 dev_priv->display.disable_fbc = ironlake_disable_fbc; 7234 dev_priv->display.disable_fbc = ironlake_disable_fbc;
7235 } else if (IS_GM45(dev)) { 7235 } else if (IS_GM45(dev)) {
7236 dev_priv->display.fbc_enabled = g4x_fbc_enabled; 7236 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
7237 dev_priv->display.enable_fbc = g4x_enable_fbc; 7237 dev_priv->display.enable_fbc = g4x_enable_fbc;
7238 dev_priv->display.disable_fbc = g4x_disable_fbc; 7238 dev_priv->display.disable_fbc = g4x_disable_fbc;
7239 } else if (IS_CRESTLINE(dev)) { 7239 } else if (IS_CRESTLINE(dev)) {
7240 dev_priv->display.fbc_enabled = i8xx_fbc_enabled; 7240 dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
7241 dev_priv->display.enable_fbc = i8xx_enable_fbc; 7241 dev_priv->display.enable_fbc = i8xx_enable_fbc;
7242 dev_priv->display.disable_fbc = i8xx_disable_fbc; 7242 dev_priv->display.disable_fbc = i8xx_disable_fbc;
7243 } 7243 }
7244 /* 855GM needs testing */ 7244 /* 855GM needs testing */
7245 } 7245 }
7246 7246
7247 /* Returns the core display clock speed */ 7247 /* Returns the core display clock speed */
7248 if (IS_I945G(dev) || (IS_G33(dev) && ! IS_PINEVIEW_M(dev))) 7248 if (IS_I945G(dev) || (IS_G33(dev) && ! IS_PINEVIEW_M(dev)))
7249 dev_priv->display.get_display_clock_speed = 7249 dev_priv->display.get_display_clock_speed =
7250 i945_get_display_clock_speed; 7250 i945_get_display_clock_speed;
7251 else if (IS_I915G(dev)) 7251 else if (IS_I915G(dev))
7252 dev_priv->display.get_display_clock_speed = 7252 dev_priv->display.get_display_clock_speed =
7253 i915_get_display_clock_speed; 7253 i915_get_display_clock_speed;
7254 else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev)) 7254 else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
7255 dev_priv->display.get_display_clock_speed = 7255 dev_priv->display.get_display_clock_speed =
7256 i9xx_misc_get_display_clock_speed; 7256 i9xx_misc_get_display_clock_speed;
7257 else if (IS_I915GM(dev)) 7257 else if (IS_I915GM(dev))
7258 dev_priv->display.get_display_clock_speed = 7258 dev_priv->display.get_display_clock_speed =
7259 i915gm_get_display_clock_speed; 7259 i915gm_get_display_clock_speed;
7260 else if (IS_I865G(dev)) 7260 else if (IS_I865G(dev))
7261 dev_priv->display.get_display_clock_speed = 7261 dev_priv->display.get_display_clock_speed =
7262 i865_get_display_clock_speed; 7262 i865_get_display_clock_speed;
7263 else if (IS_I85X(dev)) 7263 else if (IS_I85X(dev))
7264 dev_priv->display.get_display_clock_speed = 7264 dev_priv->display.get_display_clock_speed =
7265 i855_get_display_clock_speed; 7265 i855_get_display_clock_speed;
7266 else /* 852, 830 */ 7266 else /* 852, 830 */
7267 dev_priv->display.get_display_clock_speed = 7267 dev_priv->display.get_display_clock_speed =
7268 i830_get_display_clock_speed; 7268 i830_get_display_clock_speed;
7269 7269
7270 /* For FIFO watermark updates */ 7270 /* For FIFO watermark updates */
7271 if (HAS_PCH_SPLIT(dev)) { 7271 if (HAS_PCH_SPLIT(dev)) {
7272 if (IS_GEN5(dev)) { 7272 if (IS_GEN5(dev)) {
7273 if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) 7273 if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
7274 dev_priv->display.update_wm = ironlake_update_wm; 7274 dev_priv->display.update_wm = ironlake_update_wm;
7275 else { 7275 else {
7276 DRM_DEBUG_KMS("Failed to get proper latency. " 7276 DRM_DEBUG_KMS("Failed to get proper latency. "
7277 "Disable CxSR\n"); 7277 "Disable CxSR\n");
7278 dev_priv->display.update_wm = NULL; 7278 dev_priv->display.update_wm = NULL;
7279 } 7279 }
7280 } else if (IS_GEN6(dev)) { 7280 } else if (IS_GEN6(dev)) {
7281 if (SNB_READ_WM0_LATENCY()) { 7281 if (SNB_READ_WM0_LATENCY()) {
7282 dev_priv->display.update_wm = sandybridge_update_wm; 7282 dev_priv->display.update_wm = sandybridge_update_wm;
7283 } else { 7283 } else {
7284 DRM_DEBUG_KMS("Failed to read display plane latency. " 7284 DRM_DEBUG_KMS("Failed to read display plane latency. "
7285 "Disable CxSR\n"); 7285 "Disable CxSR\n");
7286 dev_priv->display.update_wm = NULL; 7286 dev_priv->display.update_wm = NULL;
7287 } 7287 }
7288 } else 7288 } else
7289 dev_priv->display.update_wm = NULL; 7289 dev_priv->display.update_wm = NULL;
7290 } else if (IS_PINEVIEW(dev)) { 7290 } else if (IS_PINEVIEW(dev)) {
7291 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev), 7291 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
7292 dev_priv->is_ddr3, 7292 dev_priv->is_ddr3,
7293 dev_priv->fsb_freq, 7293 dev_priv->fsb_freq,
7294 dev_priv->mem_freq)) { 7294 dev_priv->mem_freq)) {
7295 DRM_INFO("failed to find known CxSR latency " 7295 DRM_INFO("failed to find known CxSR latency "
7296 "(found ddr%s fsb freq %d, mem freq %d), " 7296 "(found ddr%s fsb freq %d, mem freq %d), "
7297 "disabling CxSR\n", 7297 "disabling CxSR\n",
7298 (dev_priv->is_ddr3 == 1) ? "3": "2", 7298 (dev_priv->is_ddr3 == 1) ? "3": "2",
7299 dev_priv->fsb_freq, dev_priv->mem_freq); 7299 dev_priv->fsb_freq, dev_priv->mem_freq);
7300 /* Disable CxSR and never update its watermark again */ 7300 /* Disable CxSR and never update its watermark again */
7301 pineview_disable_cxsr(dev); 7301 pineview_disable_cxsr(dev);
7302 dev_priv->display.update_wm = NULL; 7302 dev_priv->display.update_wm = NULL;
7303 } else 7303 } else
7304 dev_priv->display.update_wm = pineview_update_wm; 7304 dev_priv->display.update_wm = pineview_update_wm;
7305 } else if (IS_G4X(dev)) 7305 } else if (IS_G4X(dev))
7306 dev_priv->display.update_wm = g4x_update_wm; 7306 dev_priv->display.update_wm = g4x_update_wm;
7307 else if (IS_GEN4(dev)) 7307 else if (IS_GEN4(dev))
7308 dev_priv->display.update_wm = i965_update_wm; 7308 dev_priv->display.update_wm = i965_update_wm;
7309 else if (IS_GEN3(dev)) { 7309 else if (IS_GEN3(dev)) {
7310 dev_priv->display.update_wm = i9xx_update_wm; 7310 dev_priv->display.update_wm = i9xx_update_wm;
7311 dev_priv->display.get_fifo_size = i9xx_get_fifo_size; 7311 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
7312 } else if (IS_I85X(dev)) { 7312 } else if (IS_I85X(dev)) {
7313 dev_priv->display.update_wm = i9xx_update_wm; 7313 dev_priv->display.update_wm = i9xx_update_wm;
7314 dev_priv->display.get_fifo_size = i85x_get_fifo_size; 7314 dev_priv->display.get_fifo_size = i85x_get_fifo_size;
7315 } else { 7315 } else {
7316 dev_priv->display.update_wm = i830_update_wm; 7316 dev_priv->display.update_wm = i830_update_wm;
7317 if (IS_845G(dev)) 7317 if (IS_845G(dev))
7318 dev_priv->display.get_fifo_size = i845_get_fifo_size; 7318 dev_priv->display.get_fifo_size = i845_get_fifo_size;
7319 else 7319 else
7320 dev_priv->display.get_fifo_size = i830_get_fifo_size; 7320 dev_priv->display.get_fifo_size = i830_get_fifo_size;
7321 } 7321 }
7322 } 7322 }
7323 7323
7324 /* 7324 /*
7325 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend, 7325 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
7326 * resume, or other times. This quirk makes sure that's the case for 7326 * resume, or other times. This quirk makes sure that's the case for
7327 * affected systems. 7327 * affected systems.
7328 */ 7328 */
7329 static void quirk_pipea_force (struct drm_device *dev) 7329 static void quirk_pipea_force (struct drm_device *dev)
7330 { 7330 {
7331 struct drm_i915_private *dev_priv = dev->dev_private; 7331 struct drm_i915_private *dev_priv = dev->dev_private;
7332 7332
7333 dev_priv->quirks |= QUIRK_PIPEA_FORCE; 7333 dev_priv->quirks |= QUIRK_PIPEA_FORCE;
7334 DRM_DEBUG_DRIVER("applying pipe a force quirk\n"); 7334 DRM_DEBUG_DRIVER("applying pipe a force quirk\n");
7335 } 7335 }
7336 7336
7337 struct intel_quirk { 7337 struct intel_quirk {
7338 int device; 7338 int device;
7339 int subsystem_vendor; 7339 int subsystem_vendor;
7340 int subsystem_device; 7340 int subsystem_device;
7341 void (*hook)(struct drm_device *dev); 7341 void (*hook)(struct drm_device *dev);
7342 }; 7342 };
7343 7343
7344 struct intel_quirk intel_quirks[] = { 7344 struct intel_quirk intel_quirks[] = {
7345 /* HP Compaq 2730p needs pipe A force quirk (LP: #291555) */ 7345 /* HP Compaq 2730p needs pipe A force quirk (LP: #291555) */
7346 { 0x2a42, 0x103c, 0x30eb, quirk_pipea_force }, 7346 { 0x2a42, 0x103c, 0x30eb, quirk_pipea_force },
7347 /* HP Mini needs pipe A force quirk (LP: #322104) */ 7347 /* HP Mini needs pipe A force quirk (LP: #322104) */
7348 { 0x27ae,0x103c, 0x361a, quirk_pipea_force }, 7348 { 0x27ae,0x103c, 0x361a, quirk_pipea_force },
7349 7349
7350 /* Thinkpad R31 needs pipe A force quirk */ 7350 /* Thinkpad R31 needs pipe A force quirk */
7351 { 0x3577, 0x1014, 0x0505, quirk_pipea_force }, 7351 { 0x3577, 0x1014, 0x0505, quirk_pipea_force },
7352 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ 7352 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
7353 { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, 7353 { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
7354 7354
7355 /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */ 7355 /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */
7356 { 0x3577, 0x1014, 0x0513, quirk_pipea_force }, 7356 { 0x3577, 0x1014, 0x0513, quirk_pipea_force },
7357 /* ThinkPad X40 needs pipe A force quirk */ 7357 /* ThinkPad X40 needs pipe A force quirk */
7358 7358
7359 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ 7359 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
7360 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, 7360 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
7361 7361
7362 /* 855 & before need to leave pipe A & dpll A up */ 7362 /* 855 & before need to leave pipe A & dpll A up */
7363 { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, 7363 { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
7364 { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, 7364 { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
7365 }; 7365 };
7366 7366
7367 static void intel_init_quirks(struct drm_device *dev) 7367 static void intel_init_quirks(struct drm_device *dev)
7368 { 7368 {
7369 struct pci_dev *d = dev->pdev; 7369 struct pci_dev *d = dev->pdev;
7370 int i; 7370 int i;
7371 7371
7372 for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) { 7372 for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
7373 struct intel_quirk *q = &intel_quirks[i]; 7373 struct intel_quirk *q = &intel_quirks[i];
7374 7374
7375 if (d->device == q->device && 7375 if (d->device == q->device &&
7376 (d->subsystem_vendor == q->subsystem_vendor || 7376 (d->subsystem_vendor == q->subsystem_vendor ||
7377 q->subsystem_vendor == PCI_ANY_ID) && 7377 q->subsystem_vendor == PCI_ANY_ID) &&
7378 (d->subsystem_device == q->subsystem_device || 7378 (d->subsystem_device == q->subsystem_device ||
7379 q->subsystem_device == PCI_ANY_ID)) 7379 q->subsystem_device == PCI_ANY_ID))
7380 q->hook(dev); 7380 q->hook(dev);
7381 } 7381 }
7382 } 7382 }
7383 7383
7384 /* Disable the VGA plane that we never use */ 7384 /* Disable the VGA plane that we never use */
7385 static void i915_disable_vga(struct drm_device *dev) 7385 static void i915_disable_vga(struct drm_device *dev)
7386 { 7386 {
7387 struct drm_i915_private *dev_priv = dev->dev_private; 7387 struct drm_i915_private *dev_priv = dev->dev_private;
7388 u8 sr1; 7388 u8 sr1;
7389 u32 vga_reg; 7389 u32 vga_reg;
7390 7390
7391 if (HAS_PCH_SPLIT(dev)) 7391 if (HAS_PCH_SPLIT(dev))
7392 vga_reg = CPU_VGACNTRL; 7392 vga_reg = CPU_VGACNTRL;
7393 else 7393 else
7394 vga_reg = VGACNTRL; 7394 vga_reg = VGACNTRL;
7395 7395
7396 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); 7396 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
7397 outb(1, VGA_SR_INDEX); 7397 outb(1, VGA_SR_INDEX);
7398 sr1 = inb(VGA_SR_DATA); 7398 sr1 = inb(VGA_SR_DATA);
7399 outb(sr1 | 1<<5, VGA_SR_DATA); 7399 outb(sr1 | 1<<5, VGA_SR_DATA);
7400 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); 7400 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
7401 udelay(300); 7401 udelay(300);
7402 7402
7403 I915_WRITE(vga_reg, VGA_DISP_DISABLE); 7403 I915_WRITE(vga_reg, VGA_DISP_DISABLE);
7404 POSTING_READ(vga_reg); 7404 POSTING_READ(vga_reg);
7405 } 7405 }
7406 7406
7407 void intel_modeset_init(struct drm_device *dev) 7407 void intel_modeset_init(struct drm_device *dev)
7408 { 7408 {
7409 struct drm_i915_private *dev_priv = dev->dev_private; 7409 struct drm_i915_private *dev_priv = dev->dev_private;
7410 int i; 7410 int i;
7411 7411
7412 drm_mode_config_init(dev); 7412 drm_mode_config_init(dev);
7413 7413
7414 dev->mode_config.min_width = 0; 7414 dev->mode_config.min_width = 0;
7415 dev->mode_config.min_height = 0; 7415 dev->mode_config.min_height = 0;
7416 7416
7417 dev->mode_config.funcs = (void *)&intel_mode_funcs; 7417 dev->mode_config.funcs = (void *)&intel_mode_funcs;
7418 7418
7419 intel_init_quirks(dev); 7419 intel_init_quirks(dev);
7420 7420
7421 intel_init_display(dev); 7421 intel_init_display(dev);
7422 7422
7423 if (IS_GEN2(dev)) { 7423 if (IS_GEN2(dev)) {
7424 dev->mode_config.max_width = 2048; 7424 dev->mode_config.max_width = 2048;
7425 dev->mode_config.max_height = 2048; 7425 dev->mode_config.max_height = 2048;
7426 } else if (IS_GEN3(dev)) { 7426 } else if (IS_GEN3(dev)) {
7427 dev->mode_config.max_width = 4096; 7427 dev->mode_config.max_width = 4096;
7428 dev->mode_config.max_height = 4096; 7428 dev->mode_config.max_height = 4096;
7429 } else { 7429 } else {
7430 dev->mode_config.max_width = 8192; 7430 dev->mode_config.max_width = 8192;
7431 dev->mode_config.max_height = 8192; 7431 dev->mode_config.max_height = 8192;
7432 } 7432 }
7433 dev->mode_config.fb_base = dev->agp->base; 7433 dev->mode_config.fb_base = dev->agp->base;
7434 7434
7435 DRM_DEBUG_KMS("%d display pipe%s available.\n", 7435 DRM_DEBUG_KMS("%d display pipe%s available.\n",
7436 dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : ""); 7436 dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
7437 7437
7438 for (i = 0; i < dev_priv->num_pipe; i++) { 7438 for (i = 0; i < dev_priv->num_pipe; i++) {
7439 intel_crtc_init(dev, i); 7439 intel_crtc_init(dev, i);
7440 } 7440 }
7441 7441
7442 intel_setup_outputs(dev); 7442 intel_setup_outputs(dev);
7443 7443
7444 intel_enable_clock_gating(dev); 7444 intel_enable_clock_gating(dev);
7445 7445
7446 /* Just disable it once at startup */ 7446 /* Just disable it once at startup */
7447 i915_disable_vga(dev); 7447 i915_disable_vga(dev);
7448 7448
7449 if (IS_IRONLAKE_M(dev)) { 7449 if (IS_IRONLAKE_M(dev)) {
7450 ironlake_enable_drps(dev); 7450 ironlake_enable_drps(dev);
7451 intel_init_emon(dev); 7451 intel_init_emon(dev);
7452 } 7452 }
7453 7453
7454 if (IS_GEN6(dev)) 7454 if (IS_GEN6(dev))
7455 gen6_enable_rps(dev_priv); 7455 gen6_enable_rps(dev_priv);
7456 7456
7457 if (IS_IRONLAKE_M(dev)) 7457 if (IS_IRONLAKE_M(dev))
7458 ironlake_enable_rc6(dev); 7458 ironlake_enable_rc6(dev);
7459 7459
7460 INIT_WORK(&dev_priv->idle_work, intel_idle_update); 7460 INIT_WORK(&dev_priv->idle_work, intel_idle_update);
7461 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, 7461 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
7462 (unsigned long)dev); 7462 (unsigned long)dev);
7463 7463
7464 intel_setup_overlay(dev); 7464 intel_setup_overlay(dev);
7465 } 7465 }
7466 7466
7467 void intel_modeset_cleanup(struct drm_device *dev) 7467 void intel_modeset_cleanup(struct drm_device *dev)
7468 { 7468 {
7469 struct drm_i915_private *dev_priv = dev->dev_private; 7469 struct drm_i915_private *dev_priv = dev->dev_private;
7470 struct drm_crtc *crtc; 7470 struct drm_crtc *crtc;
7471 struct intel_crtc *intel_crtc; 7471 struct intel_crtc *intel_crtc;
7472 7472
7473 drm_kms_helper_poll_fini(dev); 7473 drm_kms_helper_poll_fini(dev);
7474 mutex_lock(&dev->struct_mutex); 7474 mutex_lock(&dev->struct_mutex);
7475 7475
7476 intel_unregister_dsm_handler(); 7476 intel_unregister_dsm_handler();
7477 7477
7478 7478
7479 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 7479 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7480 /* Skip inactive CRTCs */ 7480 /* Skip inactive CRTCs */
7481 if (!crtc->fb) 7481 if (!crtc->fb)
7482 continue; 7482 continue;
7483 7483
7484 intel_crtc = to_intel_crtc(crtc); 7484 intel_crtc = to_intel_crtc(crtc);
7485 intel_increase_pllclock(crtc); 7485 intel_increase_pllclock(crtc);
7486 } 7486 }
7487 7487
7488 if (dev_priv->display.disable_fbc) 7488 if (dev_priv->display.disable_fbc)
7489 dev_priv->display.disable_fbc(dev); 7489 dev_priv->display.disable_fbc(dev);
7490 7490
7491 if (IS_IRONLAKE_M(dev)) 7491 if (IS_IRONLAKE_M(dev))
7492 ironlake_disable_drps(dev); 7492 ironlake_disable_drps(dev);
7493 if (IS_GEN6(dev)) 7493 if (IS_GEN6(dev))
7494 gen6_disable_rps(dev); 7494 gen6_disable_rps(dev);
7495 7495
7496 if (IS_IRONLAKE_M(dev)) 7496 if (IS_IRONLAKE_M(dev))
7497 ironlake_disable_rc6(dev); 7497 ironlake_disable_rc6(dev);
7498 7498
7499 mutex_unlock(&dev->struct_mutex); 7499 mutex_unlock(&dev->struct_mutex);
7500 7500
7501 /* Disable the irq before mode object teardown, for the irq might 7501 /* Disable the irq before mode object teardown, for the irq might
7502 * enqueue unpin/hotplug work. */ 7502 * enqueue unpin/hotplug work. */
7503 drm_irq_uninstall(dev); 7503 drm_irq_uninstall(dev);
7504 cancel_work_sync(&dev_priv->hotplug_work); 7504 cancel_work_sync(&dev_priv->hotplug_work);
7505 7505
7506 /* Shut off idle work before the crtcs get freed. */ 7506 /* Shut off idle work before the crtcs get freed. */
7507 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 7507 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7508 intel_crtc = to_intel_crtc(crtc); 7508 intel_crtc = to_intel_crtc(crtc);
7509 del_timer_sync(&intel_crtc->idle_timer); 7509 del_timer_sync(&intel_crtc->idle_timer);
7510 } 7510 }
7511 del_timer_sync(&dev_priv->idle_timer); 7511 del_timer_sync(&dev_priv->idle_timer);
7512 cancel_work_sync(&dev_priv->idle_work); 7512 cancel_work_sync(&dev_priv->idle_work);
7513 7513
7514 drm_mode_config_cleanup(dev); 7514 drm_mode_config_cleanup(dev);
7515 } 7515 }
7516 7516
7517 /* 7517 /*
7518 * Return which encoder is currently attached for connector. 7518 * Return which encoder is currently attached for connector.
7519 */ 7519 */
7520 struct drm_encoder *intel_best_encoder(struct drm_connector *connector) 7520 struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
7521 { 7521 {
7522 return &intel_attached_encoder(connector)->base; 7522 return &intel_attached_encoder(connector)->base;
7523 } 7523 }
7524 7524
7525 void intel_connector_attach_encoder(struct intel_connector *connector, 7525 void intel_connector_attach_encoder(struct intel_connector *connector,
7526 struct intel_encoder *encoder) 7526 struct intel_encoder *encoder)
7527 { 7527 {
7528 connector->encoder = encoder; 7528 connector->encoder = encoder;
7529 drm_mode_connector_attach_encoder(&connector->base, 7529 drm_mode_connector_attach_encoder(&connector->base,
7530 &encoder->base); 7530 &encoder->base);
7531 } 7531 }
7532 7532
7533 /* 7533 /*
7534 * set vga decode state - true == enable VGA decode 7534 * set vga decode state - true == enable VGA decode
7535 */ 7535 */
7536 int intel_modeset_vga_set_state(struct drm_device *dev, bool state) 7536 int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
7537 { 7537 {
7538 struct drm_i915_private *dev_priv = dev->dev_private; 7538 struct drm_i915_private *dev_priv = dev->dev_private;
7539 u16 gmch_ctrl; 7539 u16 gmch_ctrl;
7540 7540
7541 pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl); 7541 pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl);
7542 if (state) 7542 if (state)
7543 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; 7543 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
7544 else 7544 else
7545 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; 7545 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
7546 pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl); 7546 pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
7547 return 0; 7547 return 0;
7548 } 7548 }
7549 7549
7550 #ifdef CONFIG_DEBUG_FS 7550 #ifdef CONFIG_DEBUG_FS
7551 #include <linux/seq_file.h> 7551 #include <linux/seq_file.h>
7552 7552
7553 struct intel_display_error_state { 7553 struct intel_display_error_state {
7554 struct intel_cursor_error_state { 7554 struct intel_cursor_error_state {
7555 u32 control; 7555 u32 control;
7556 u32 position; 7556 u32 position;
7557 u32 base; 7557 u32 base;
7558 u32 size; 7558 u32 size;
7559 } cursor[2]; 7559 } cursor[2];
7560 7560
7561 struct intel_pipe_error_state { 7561 struct intel_pipe_error_state {
7562 u32 conf; 7562 u32 conf;
7563 u32 source; 7563 u32 source;
7564 7564
7565 u32 htotal; 7565 u32 htotal;
7566 u32 hblank; 7566 u32 hblank;
7567 u32 hsync; 7567 u32 hsync;
7568 u32 vtotal; 7568 u32 vtotal;
7569 u32 vblank; 7569 u32 vblank;
7570 u32 vsync; 7570 u32 vsync;
7571 } pipe[2]; 7571 } pipe[2];
7572 7572
7573 struct intel_plane_error_state { 7573 struct intel_plane_error_state {
7574 u32 control; 7574 u32 control;
7575 u32 stride; 7575 u32 stride;
7576 u32 size; 7576 u32 size;
7577 u32 pos; 7577 u32 pos;
7578 u32 addr; 7578 u32 addr;
7579 u32 surface; 7579 u32 surface;
7580 u32 tile_offset; 7580 u32 tile_offset;
7581 } plane[2]; 7581 } plane[2];
7582 }; 7582 };
7583 7583
7584 struct intel_display_error_state * 7584 struct intel_display_error_state *
7585 intel_display_capture_error_state(struct drm_device *dev) 7585 intel_display_capture_error_state(struct drm_device *dev)
7586 { 7586 {
7587 drm_i915_private_t *dev_priv = dev->dev_private; 7587 drm_i915_private_t *dev_priv = dev->dev_private;
7588 struct intel_display_error_state *error; 7588 struct intel_display_error_state *error;
7589 int i; 7589 int i;
7590 7590
7591 error = kmalloc(sizeof(*error), GFP_ATOMIC); 7591 error = kmalloc(sizeof(*error), GFP_ATOMIC);
7592 if (error == NULL) 7592 if (error == NULL)
7593 return NULL; 7593 return NULL;
7594 7594
7595 for (i = 0; i < 2; i++) { 7595 for (i = 0; i < 2; i++) {
7596 error->cursor[i].control = I915_READ(CURCNTR(i)); 7596 error->cursor[i].control = I915_READ(CURCNTR(i));
7597 error->cursor[i].position = I915_READ(CURPOS(i)); 7597 error->cursor[i].position = I915_READ(CURPOS(i));
7598 error->cursor[i].base = I915_READ(CURBASE(i)); 7598 error->cursor[i].base = I915_READ(CURBASE(i));
7599 7599
7600 error->plane[i].control = I915_READ(DSPCNTR(i)); 7600 error->plane[i].control = I915_READ(DSPCNTR(i));
7601 error->plane[i].stride = I915_READ(DSPSTRIDE(i)); 7601 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
7602 error->plane[i].size = I915_READ(DSPSIZE(i)); 7602 error->plane[i].size = I915_READ(DSPSIZE(i));
7603 error->plane[i].pos= I915_READ(DSPPOS(i)); 7603 error->plane[i].pos= I915_READ(DSPPOS(i));
7604 error->plane[i].addr = I915_READ(DSPADDR(i)); 7604 error->plane[i].addr = I915_READ(DSPADDR(i));
7605 if (INTEL_INFO(dev)->gen >= 4) { 7605 if (INTEL_INFO(dev)->gen >= 4) {
7606 error->plane[i].surface = I915_READ(DSPSURF(i)); 7606 error->plane[i].surface = I915_READ(DSPSURF(i));
7607 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); 7607 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
7608 } 7608 }
7609 7609
7610 error->pipe[i].conf = I915_READ(PIPECONF(i)); 7610 error->pipe[i].conf = I915_READ(PIPECONF(i));
7611 error->pipe[i].source = I915_READ(PIPESRC(i)); 7611 error->pipe[i].source = I915_READ(PIPESRC(i));
7612 error->pipe[i].htotal = I915_READ(HTOTAL(i)); 7612 error->pipe[i].htotal = I915_READ(HTOTAL(i));
7613 error->pipe[i].hblank = I915_READ(HBLANK(i)); 7613 error->pipe[i].hblank = I915_READ(HBLANK(i));
7614 error->pipe[i].hsync = I915_READ(HSYNC(i)); 7614 error->pipe[i].hsync = I915_READ(HSYNC(i));
7615 error->pipe[i].vtotal = I915_READ(VTOTAL(i)); 7615 error->pipe[i].vtotal = I915_READ(VTOTAL(i));
7616 error->pipe[i].vblank = I915_READ(VBLANK(i)); 7616 error->pipe[i].vblank = I915_READ(VBLANK(i));
7617 error->pipe[i].vsync = I915_READ(VSYNC(i)); 7617 error->pipe[i].vsync = I915_READ(VSYNC(i));
7618 } 7618 }
7619 7619
7620 return error; 7620 return error;
7621 } 7621 }
7622 7622
7623 void 7623 void
7624 intel_display_print_error_state(struct seq_file *m, 7624 intel_display_print_error_state(struct seq_file *m,
7625 struct drm_device *dev, 7625 struct drm_device *dev,
7626 struct intel_display_error_state *error) 7626 struct intel_display_error_state *error)
7627 { 7627 {
7628 int i; 7628 int i;
7629 7629
7630 for (i = 0; i < 2; i++) { 7630 for (i = 0; i < 2; i++) {
7631 seq_printf(m, "Pipe [%d]:\n", i); 7631 seq_printf(m, "Pipe [%d]:\n", i);
7632 seq_printf(m, " CONF: %08x\n", error->pipe[i].conf); 7632 seq_printf(m, " CONF: %08x\n", error->pipe[i].conf);
7633 seq_printf(m, " SRC: %08x\n", error->pipe[i].source); 7633 seq_printf(m, " SRC: %08x\n", error->pipe[i].source);
7634 seq_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal); 7634 seq_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal);
7635 seq_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank); 7635 seq_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank);
7636 seq_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync); 7636 seq_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync);
7637 seq_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal); 7637 seq_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal);
7638 seq_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank); 7638 seq_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank);
7639 seq_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync); 7639 seq_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync);
7640 7640
7641 seq_printf(m, "Plane [%d]:\n", i); 7641 seq_printf(m, "Plane [%d]:\n", i);
7642 seq_printf(m, " CNTR: %08x\n", error->plane[i].control); 7642 seq_printf(m, " CNTR: %08x\n", error->plane[i].control);
7643 seq_printf(m, " STRIDE: %08x\n", error->plane[i].stride); 7643 seq_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
7644 seq_printf(m, " SIZE: %08x\n", error->plane[i].size); 7644 seq_printf(m, " SIZE: %08x\n", error->plane[i].size);
7645 seq_printf(m, " POS: %08x\n", error->plane[i].pos); 7645 seq_printf(m, " POS: %08x\n", error->plane[i].pos);
7646 seq_printf(m, " ADDR: %08x\n", error->plane[i].addr); 7646 seq_printf(m, " ADDR: %08x\n", error->plane[i].addr);
7647 if (INTEL_INFO(dev)->gen >= 4) { 7647 if (INTEL_INFO(dev)->gen >= 4) {
7648 seq_printf(m, " SURF: %08x\n", error->plane[i].surface); 7648 seq_printf(m, " SURF: %08x\n", error->plane[i].surface);
7649 seq_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); 7649 seq_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
7650 } 7650 }
7651 7651
7652 seq_printf(m, "Cursor [%d]:\n", i); 7652 seq_printf(m, "Cursor [%d]:\n", i);
7653 seq_printf(m, " CNTR: %08x\n", error->cursor[i].control); 7653 seq_printf(m, " CNTR: %08x\n", error->cursor[i].control);
7654 seq_printf(m, " POS: %08x\n", error->cursor[i].position); 7654 seq_printf(m, " POS: %08x\n", error->cursor[i].position);
7655 seq_printf(m, " BASE: %08x\n", error->cursor[i].base); 7655 seq_printf(m, " BASE: %08x\n", error->cursor[i].base);
7656 } 7656 }
7657 } 7657 }
7658 #endif 7658 #endif
7659 7659
drivers/gpu/drm/radeon/radeon_display.c
1 /* 1 /*
2 * Copyright 2007-8 Advanced Micro Devices, Inc. 2 * Copyright 2007-8 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc. 3 * Copyright 2008 Red Hat Inc.
4 * 4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a 5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"), 6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation 7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the 9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions: 10 * Software is furnished to do so, subject to the following conditions:
11 * 11 *
12 * The above copyright notice and this permission notice shall be included in 12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software. 13 * all copies or substantial portions of the Software.
14 * 14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE. 21 * OTHER DEALINGS IN THE SOFTWARE.
22 * 22 *
23 * Authors: Dave Airlie 23 * Authors: Dave Airlie
24 * Alex Deucher 24 * Alex Deucher
25 */ 25 */
26 #include "drmP.h" 26 #include "drmP.h"
27 #include "radeon_drm.h" 27 #include "radeon_drm.h"
28 #include "radeon.h" 28 #include "radeon.h"
29 29
30 #include "atom.h" 30 #include "atom.h"
31 #include <asm/div64.h> 31 #include <asm/div64.h>
32 32
33 #include "drm_crtc_helper.h" 33 #include "drm_crtc_helper.h"
34 #include "drm_edid.h" 34 #include "drm_edid.h"
35 35
36 static int radeon_ddc_dump(struct drm_connector *connector); 36 static int radeon_ddc_dump(struct drm_connector *connector);
37 37
38 static void avivo_crtc_load_lut(struct drm_crtc *crtc) 38 static void avivo_crtc_load_lut(struct drm_crtc *crtc)
39 { 39 {
40 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 40 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
41 struct drm_device *dev = crtc->dev; 41 struct drm_device *dev = crtc->dev;
42 struct radeon_device *rdev = dev->dev_private; 42 struct radeon_device *rdev = dev->dev_private;
43 int i; 43 int i;
44 44
45 DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id); 45 DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id);
46 WREG32(AVIVO_DC_LUTA_CONTROL + radeon_crtc->crtc_offset, 0); 46 WREG32(AVIVO_DC_LUTA_CONTROL + radeon_crtc->crtc_offset, 0);
47 47
48 WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0); 48 WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
49 WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0); 49 WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0);
50 WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0); 50 WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0);
51 51
52 WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff); 52 WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff);
53 WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff); 53 WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
54 WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff); 54 WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
55 55
56 WREG32(AVIVO_DC_LUT_RW_SELECT, radeon_crtc->crtc_id); 56 WREG32(AVIVO_DC_LUT_RW_SELECT, radeon_crtc->crtc_id);
57 WREG32(AVIVO_DC_LUT_RW_MODE, 0); 57 WREG32(AVIVO_DC_LUT_RW_MODE, 0);
58 WREG32(AVIVO_DC_LUT_WRITE_EN_MASK, 0x0000003f); 58 WREG32(AVIVO_DC_LUT_WRITE_EN_MASK, 0x0000003f);
59 59
60 WREG8(AVIVO_DC_LUT_RW_INDEX, 0); 60 WREG8(AVIVO_DC_LUT_RW_INDEX, 0);
61 for (i = 0; i < 256; i++) { 61 for (i = 0; i < 256; i++) {
62 WREG32(AVIVO_DC_LUT_30_COLOR, 62 WREG32(AVIVO_DC_LUT_30_COLOR,
63 (radeon_crtc->lut_r[i] << 20) | 63 (radeon_crtc->lut_r[i] << 20) |
64 (radeon_crtc->lut_g[i] << 10) | 64 (radeon_crtc->lut_g[i] << 10) |
65 (radeon_crtc->lut_b[i] << 0)); 65 (radeon_crtc->lut_b[i] << 0));
66 } 66 }
67 67
68 WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id); 68 WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id);
69 } 69 }
70 70
71 static void dce4_crtc_load_lut(struct drm_crtc *crtc) 71 static void dce4_crtc_load_lut(struct drm_crtc *crtc)
72 { 72 {
73 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 73 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
74 struct drm_device *dev = crtc->dev; 74 struct drm_device *dev = crtc->dev;
75 struct radeon_device *rdev = dev->dev_private; 75 struct radeon_device *rdev = dev->dev_private;
76 int i; 76 int i;
77 77
78 DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id); 78 DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id);
79 WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0); 79 WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0);
80 80
81 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0); 81 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
82 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0); 82 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0);
83 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0); 83 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0);
84 84
85 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff); 85 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff);
86 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff); 86 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
87 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff); 87 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
88 88
89 WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0); 89 WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0);
90 WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007); 90 WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007);
91 91
92 WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0); 92 WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0);
93 for (i = 0; i < 256; i++) { 93 for (i = 0; i < 256; i++) {
94 WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset, 94 WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset,
95 (radeon_crtc->lut_r[i] << 20) | 95 (radeon_crtc->lut_r[i] << 20) |
96 (radeon_crtc->lut_g[i] << 10) | 96 (radeon_crtc->lut_g[i] << 10) |
97 (radeon_crtc->lut_b[i] << 0)); 97 (radeon_crtc->lut_b[i] << 0));
98 } 98 }
99 } 99 }
100 100
101 static void dce5_crtc_load_lut(struct drm_crtc *crtc) 101 static void dce5_crtc_load_lut(struct drm_crtc *crtc)
102 { 102 {
103 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 103 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
104 struct drm_device *dev = crtc->dev; 104 struct drm_device *dev = crtc->dev;
105 struct radeon_device *rdev = dev->dev_private; 105 struct radeon_device *rdev = dev->dev_private;
106 int i; 106 int i;
107 107
108 DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id); 108 DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id);
109 109
110 WREG32(NI_INPUT_CSC_CONTROL + radeon_crtc->crtc_offset, 110 WREG32(NI_INPUT_CSC_CONTROL + radeon_crtc->crtc_offset,
111 (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) | 111 (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) |
112 NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS))); 112 NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS)));
113 WREG32(NI_PRESCALE_GRPH_CONTROL + radeon_crtc->crtc_offset, 113 WREG32(NI_PRESCALE_GRPH_CONTROL + radeon_crtc->crtc_offset,
114 NI_GRPH_PRESCALE_BYPASS); 114 NI_GRPH_PRESCALE_BYPASS);
115 WREG32(NI_PRESCALE_OVL_CONTROL + radeon_crtc->crtc_offset, 115 WREG32(NI_PRESCALE_OVL_CONTROL + radeon_crtc->crtc_offset,
116 NI_OVL_PRESCALE_BYPASS); 116 NI_OVL_PRESCALE_BYPASS);
117 WREG32(NI_INPUT_GAMMA_CONTROL + radeon_crtc->crtc_offset, 117 WREG32(NI_INPUT_GAMMA_CONTROL + radeon_crtc->crtc_offset,
118 (NI_GRPH_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT) | 118 (NI_GRPH_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT) |
119 NI_OVL_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT))); 119 NI_OVL_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT)));
120 120
121 WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0); 121 WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0);
122 122
123 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0); 123 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
124 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0); 124 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0);
125 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0); 125 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0);
126 126
127 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff); 127 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff);
128 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff); 128 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
129 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff); 129 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
130 130
131 WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0); 131 WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0);
132 WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007); 132 WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007);
133 133
134 WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0); 134 WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0);
135 for (i = 0; i < 256; i++) { 135 for (i = 0; i < 256; i++) {
136 WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset, 136 WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset,
137 (radeon_crtc->lut_r[i] << 20) | 137 (radeon_crtc->lut_r[i] << 20) |
138 (radeon_crtc->lut_g[i] << 10) | 138 (radeon_crtc->lut_g[i] << 10) |
139 (radeon_crtc->lut_b[i] << 0)); 139 (radeon_crtc->lut_b[i] << 0));
140 } 140 }
141 141
142 WREG32(NI_DEGAMMA_CONTROL + radeon_crtc->crtc_offset, 142 WREG32(NI_DEGAMMA_CONTROL + radeon_crtc->crtc_offset,
143 (NI_GRPH_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) | 143 (NI_GRPH_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
144 NI_OVL_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) | 144 NI_OVL_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
145 NI_ICON_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) | 145 NI_ICON_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
146 NI_CURSOR_DEGAMMA_MODE(NI_DEGAMMA_BYPASS))); 146 NI_CURSOR_DEGAMMA_MODE(NI_DEGAMMA_BYPASS)));
147 WREG32(NI_GAMUT_REMAP_CONTROL + radeon_crtc->crtc_offset, 147 WREG32(NI_GAMUT_REMAP_CONTROL + radeon_crtc->crtc_offset,
148 (NI_GRPH_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS) | 148 (NI_GRPH_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS) |
149 NI_OVL_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS))); 149 NI_OVL_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS)));
150 WREG32(NI_REGAMMA_CONTROL + radeon_crtc->crtc_offset, 150 WREG32(NI_REGAMMA_CONTROL + radeon_crtc->crtc_offset,
151 (NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS) | 151 (NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS) |
152 NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS))); 152 NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS)));
153 WREG32(NI_OUTPUT_CSC_CONTROL + radeon_crtc->crtc_offset, 153 WREG32(NI_OUTPUT_CSC_CONTROL + radeon_crtc->crtc_offset,
154 (NI_OUTPUT_CSC_GRPH_MODE(NI_OUTPUT_CSC_BYPASS) | 154 (NI_OUTPUT_CSC_GRPH_MODE(NI_OUTPUT_CSC_BYPASS) |
155 NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS))); 155 NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS)));
156 /* XXX match this to the depth of the crtc fmt block, move to modeset? */ 156 /* XXX match this to the depth of the crtc fmt block, move to modeset? */
157 WREG32(0x6940 + radeon_crtc->crtc_offset, 0); 157 WREG32(0x6940 + radeon_crtc->crtc_offset, 0);
158 158
159 } 159 }
160 160
161 static void legacy_crtc_load_lut(struct drm_crtc *crtc) 161 static void legacy_crtc_load_lut(struct drm_crtc *crtc)
162 { 162 {
163 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 163 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
164 struct drm_device *dev = crtc->dev; 164 struct drm_device *dev = crtc->dev;
165 struct radeon_device *rdev = dev->dev_private; 165 struct radeon_device *rdev = dev->dev_private;
166 int i; 166 int i;
167 uint32_t dac2_cntl; 167 uint32_t dac2_cntl;
168 168
169 dac2_cntl = RREG32(RADEON_DAC_CNTL2); 169 dac2_cntl = RREG32(RADEON_DAC_CNTL2);
170 if (radeon_crtc->crtc_id == 0) 170 if (radeon_crtc->crtc_id == 0)
171 dac2_cntl &= (uint32_t)~RADEON_DAC2_PALETTE_ACC_CTL; 171 dac2_cntl &= (uint32_t)~RADEON_DAC2_PALETTE_ACC_CTL;
172 else 172 else
173 dac2_cntl |= RADEON_DAC2_PALETTE_ACC_CTL; 173 dac2_cntl |= RADEON_DAC2_PALETTE_ACC_CTL;
174 WREG32(RADEON_DAC_CNTL2, dac2_cntl); 174 WREG32(RADEON_DAC_CNTL2, dac2_cntl);
175 175
176 WREG8(RADEON_PALETTE_INDEX, 0); 176 WREG8(RADEON_PALETTE_INDEX, 0);
177 for (i = 0; i < 256; i++) { 177 for (i = 0; i < 256; i++) {
178 WREG32(RADEON_PALETTE_30_DATA, 178 WREG32(RADEON_PALETTE_30_DATA,
179 (radeon_crtc->lut_r[i] << 20) | 179 (radeon_crtc->lut_r[i] << 20) |
180 (radeon_crtc->lut_g[i] << 10) | 180 (radeon_crtc->lut_g[i] << 10) |
181 (radeon_crtc->lut_b[i] << 0)); 181 (radeon_crtc->lut_b[i] << 0));
182 } 182 }
183 } 183 }
184 184
185 void radeon_crtc_load_lut(struct drm_crtc *crtc) 185 void radeon_crtc_load_lut(struct drm_crtc *crtc)
186 { 186 {
187 struct drm_device *dev = crtc->dev; 187 struct drm_device *dev = crtc->dev;
188 struct radeon_device *rdev = dev->dev_private; 188 struct radeon_device *rdev = dev->dev_private;
189 189
190 if (!crtc->enabled) 190 if (!crtc->enabled)
191 return; 191 return;
192 192
193 if (ASIC_IS_DCE5(rdev)) 193 if (ASIC_IS_DCE5(rdev))
194 dce5_crtc_load_lut(crtc); 194 dce5_crtc_load_lut(crtc);
195 else if (ASIC_IS_DCE4(rdev)) 195 else if (ASIC_IS_DCE4(rdev))
196 dce4_crtc_load_lut(crtc); 196 dce4_crtc_load_lut(crtc);
197 else if (ASIC_IS_AVIVO(rdev)) 197 else if (ASIC_IS_AVIVO(rdev))
198 avivo_crtc_load_lut(crtc); 198 avivo_crtc_load_lut(crtc);
199 else 199 else
200 legacy_crtc_load_lut(crtc); 200 legacy_crtc_load_lut(crtc);
201 } 201 }
202 202
203 /** Sets the color ramps on behalf of fbcon */ 203 /** Sets the color ramps on behalf of fbcon */
204 void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 204 void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
205 u16 blue, int regno) 205 u16 blue, int regno)
206 { 206 {
207 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 207 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
208 208
209 radeon_crtc->lut_r[regno] = red >> 6; 209 radeon_crtc->lut_r[regno] = red >> 6;
210 radeon_crtc->lut_g[regno] = green >> 6; 210 radeon_crtc->lut_g[regno] = green >> 6;
211 radeon_crtc->lut_b[regno] = blue >> 6; 211 radeon_crtc->lut_b[regno] = blue >> 6;
212 } 212 }
213 213
214 /** Gets the color ramps on behalf of fbcon */ 214 /** Gets the color ramps on behalf of fbcon */
215 void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, 215 void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
216 u16 *blue, int regno) 216 u16 *blue, int regno)
217 { 217 {
218 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 218 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
219 219
220 *red = radeon_crtc->lut_r[regno] << 6; 220 *red = radeon_crtc->lut_r[regno] << 6;
221 *green = radeon_crtc->lut_g[regno] << 6; 221 *green = radeon_crtc->lut_g[regno] << 6;
222 *blue = radeon_crtc->lut_b[regno] << 6; 222 *blue = radeon_crtc->lut_b[regno] << 6;
223 } 223 }
224 224
225 static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 225 static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
226 u16 *blue, uint32_t start, uint32_t size) 226 u16 *blue, uint32_t start, uint32_t size)
227 { 227 {
228 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 228 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
229 int end = (start + size > 256) ? 256 : start + size, i; 229 int end = (start + size > 256) ? 256 : start + size, i;
230 230
231 /* userspace palettes are always correct as is */ 231 /* userspace palettes are always correct as is */
232 for (i = start; i < end; i++) { 232 for (i = start; i < end; i++) {
233 radeon_crtc->lut_r[i] = red[i] >> 6; 233 radeon_crtc->lut_r[i] = red[i] >> 6;
234 radeon_crtc->lut_g[i] = green[i] >> 6; 234 radeon_crtc->lut_g[i] = green[i] >> 6;
235 radeon_crtc->lut_b[i] = blue[i] >> 6; 235 radeon_crtc->lut_b[i] = blue[i] >> 6;
236 } 236 }
237 radeon_crtc_load_lut(crtc); 237 radeon_crtc_load_lut(crtc);
238 } 238 }
239 239
240 static void radeon_crtc_destroy(struct drm_crtc *crtc) 240 static void radeon_crtc_destroy(struct drm_crtc *crtc)
241 { 241 {
242 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 242 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
243 243
244 drm_crtc_cleanup(crtc); 244 drm_crtc_cleanup(crtc);
245 kfree(radeon_crtc); 245 kfree(radeon_crtc);
246 } 246 }
247 247
248 /* 248 /*
249 * Handle unpin events outside the interrupt handler proper. 249 * Handle unpin events outside the interrupt handler proper.
250 */ 250 */
251 static void radeon_unpin_work_func(struct work_struct *__work) 251 static void radeon_unpin_work_func(struct work_struct *__work)
252 { 252 {
253 struct radeon_unpin_work *work = 253 struct radeon_unpin_work *work =
254 container_of(__work, struct radeon_unpin_work, work); 254 container_of(__work, struct radeon_unpin_work, work);
255 int r; 255 int r;
256 256
257 /* unpin of the old buffer */ 257 /* unpin of the old buffer */
258 r = radeon_bo_reserve(work->old_rbo, false); 258 r = radeon_bo_reserve(work->old_rbo, false);
259 if (likely(r == 0)) { 259 if (likely(r == 0)) {
260 r = radeon_bo_unpin(work->old_rbo); 260 r = radeon_bo_unpin(work->old_rbo);
261 if (unlikely(r != 0)) { 261 if (unlikely(r != 0)) {
262 DRM_ERROR("failed to unpin buffer after flip\n"); 262 DRM_ERROR("failed to unpin buffer after flip\n");
263 } 263 }
264 radeon_bo_unreserve(work->old_rbo); 264 radeon_bo_unreserve(work->old_rbo);
265 } else 265 } else
266 DRM_ERROR("failed to reserve buffer after flip\n"); 266 DRM_ERROR("failed to reserve buffer after flip\n");
267 kfree(work); 267 kfree(work);
268 } 268 }
269 269
270 void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id) 270 void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
271 { 271 {
272 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 272 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
273 struct radeon_unpin_work *work; 273 struct radeon_unpin_work *work;
274 struct drm_pending_vblank_event *e; 274 struct drm_pending_vblank_event *e;
275 struct timeval now; 275 struct timeval now;
276 unsigned long flags; 276 unsigned long flags;
277 u32 update_pending; 277 u32 update_pending;
278 int vpos, hpos; 278 int vpos, hpos;
279 279
280 spin_lock_irqsave(&rdev->ddev->event_lock, flags); 280 spin_lock_irqsave(&rdev->ddev->event_lock, flags);
281 work = radeon_crtc->unpin_work; 281 work = radeon_crtc->unpin_work;
282 if (work == NULL || 282 if (work == NULL ||
283 !radeon_fence_signaled(work->fence)) { 283 !radeon_fence_signaled(work->fence)) {
284 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); 284 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
285 return; 285 return;
286 } 286 }
287 /* New pageflip, or just completion of a previous one? */ 287 /* New pageflip, or just completion of a previous one? */
288 if (!radeon_crtc->deferred_flip_completion) { 288 if (!radeon_crtc->deferred_flip_completion) {
289 /* do the flip (mmio) */ 289 /* do the flip (mmio) */
290 update_pending = radeon_page_flip(rdev, crtc_id, work->new_crtc_base); 290 update_pending = radeon_page_flip(rdev, crtc_id, work->new_crtc_base);
291 } else { 291 } else {
292 /* This is just a completion of a flip queued in crtc 292 /* This is just a completion of a flip queued in crtc
293 * at last invocation. Make sure we go directly to 293 * at last invocation. Make sure we go directly to
294 * completion routine. 294 * completion routine.
295 */ 295 */
296 update_pending = 0; 296 update_pending = 0;
297 radeon_crtc->deferred_flip_completion = 0; 297 radeon_crtc->deferred_flip_completion = 0;
298 } 298 }
299 299
300 /* Has the pageflip already completed in crtc, or is it certain 300 /* Has the pageflip already completed in crtc, or is it certain
301 * to complete in this vblank? 301 * to complete in this vblank?
302 */ 302 */
303 if (update_pending && 303 if (update_pending &&
304 (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id, 304 (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id,
305 &vpos, &hpos)) && 305 &vpos, &hpos)) &&
306 (vpos >=0) && 306 (vpos >=0) &&
307 (vpos < (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100)) { 307 (vpos < (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100)) {
308 /* crtc didn't flip in this target vblank interval, 308 /* crtc didn't flip in this target vblank interval,
309 * but flip is pending in crtc. It will complete it 309 * but flip is pending in crtc. It will complete it
310 * in next vblank interval, so complete the flip at 310 * in next vblank interval, so complete the flip at
311 * next vblank irq. 311 * next vblank irq.
312 */ 312 */
313 radeon_crtc->deferred_flip_completion = 1; 313 radeon_crtc->deferred_flip_completion = 1;
314 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); 314 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
315 return; 315 return;
316 } 316 }
317 317
318 /* Pageflip (will be) certainly completed in this vblank. Clean up. */ 318 /* Pageflip (will be) certainly completed in this vblank. Clean up. */
319 radeon_crtc->unpin_work = NULL; 319 radeon_crtc->unpin_work = NULL;
320 320
321 /* wakeup userspace */ 321 /* wakeup userspace */
322 if (work->event) { 322 if (work->event) {
323 e = work->event; 323 e = work->event;
324 e->event.sequence = drm_vblank_count_and_time(rdev->ddev, crtc_id, &now); 324 e->event.sequence = drm_vblank_count_and_time(rdev->ddev, crtc_id, &now);
325 e->event.tv_sec = now.tv_sec; 325 e->event.tv_sec = now.tv_sec;
326 e->event.tv_usec = now.tv_usec; 326 e->event.tv_usec = now.tv_usec;
327 list_add_tail(&e->base.link, &e->base.file_priv->event_list); 327 list_add_tail(&e->base.link, &e->base.file_priv->event_list);
328 wake_up_interruptible(&e->base.file_priv->event_wait); 328 wake_up_interruptible(&e->base.file_priv->event_wait);
329 } 329 }
330 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); 330 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
331 331
332 drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id); 332 drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id);
333 radeon_fence_unref(&work->fence); 333 radeon_fence_unref(&work->fence);
334 radeon_post_page_flip(work->rdev, work->crtc_id); 334 radeon_post_page_flip(work->rdev, work->crtc_id);
335 schedule_work(&work->work); 335 schedule_work(&work->work);
336 } 336 }
337 337
338 static int radeon_crtc_page_flip(struct drm_crtc *crtc, 338 static int radeon_crtc_page_flip(struct drm_crtc *crtc,
339 struct drm_framebuffer *fb, 339 struct drm_framebuffer *fb,
340 struct drm_pending_vblank_event *event) 340 struct drm_pending_vblank_event *event)
341 { 341 {
342 struct drm_device *dev = crtc->dev; 342 struct drm_device *dev = crtc->dev;
343 struct radeon_device *rdev = dev->dev_private; 343 struct radeon_device *rdev = dev->dev_private;
344 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 344 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
345 struct radeon_framebuffer *old_radeon_fb; 345 struct radeon_framebuffer *old_radeon_fb;
346 struct radeon_framebuffer *new_radeon_fb; 346 struct radeon_framebuffer *new_radeon_fb;
347 struct drm_gem_object *obj; 347 struct drm_gem_object *obj;
348 struct radeon_bo *rbo; 348 struct radeon_bo *rbo;
349 struct radeon_fence *fence; 349 struct radeon_fence *fence;
350 struct radeon_unpin_work *work; 350 struct radeon_unpin_work *work;
351 unsigned long flags; 351 unsigned long flags;
352 u32 tiling_flags, pitch_pixels; 352 u32 tiling_flags, pitch_pixels;
353 u64 base; 353 u64 base;
354 int r; 354 int r;
355 355
356 work = kzalloc(sizeof *work, GFP_KERNEL); 356 work = kzalloc(sizeof *work, GFP_KERNEL);
357 if (work == NULL) 357 if (work == NULL)
358 return -ENOMEM; 358 return -ENOMEM;
359 359
360 r = radeon_fence_create(rdev, &fence); 360 r = radeon_fence_create(rdev, &fence);
361 if (unlikely(r != 0)) { 361 if (unlikely(r != 0)) {
362 kfree(work); 362 kfree(work);
363 DRM_ERROR("flip queue: failed to create fence.\n"); 363 DRM_ERROR("flip queue: failed to create fence.\n");
364 return -ENOMEM; 364 return -ENOMEM;
365 } 365 }
366 work->event = event; 366 work->event = event;
367 work->rdev = rdev; 367 work->rdev = rdev;
368 work->crtc_id = radeon_crtc->crtc_id; 368 work->crtc_id = radeon_crtc->crtc_id;
369 work->fence = radeon_fence_ref(fence); 369 work->fence = radeon_fence_ref(fence);
370 old_radeon_fb = to_radeon_framebuffer(crtc->fb); 370 old_radeon_fb = to_radeon_framebuffer(crtc->fb);
371 new_radeon_fb = to_radeon_framebuffer(fb); 371 new_radeon_fb = to_radeon_framebuffer(fb);
372 /* schedule unpin of the old buffer */ 372 /* schedule unpin of the old buffer */
373 obj = old_radeon_fb->obj; 373 obj = old_radeon_fb->obj;
374 rbo = gem_to_radeon_bo(obj); 374 rbo = gem_to_radeon_bo(obj);
375 work->old_rbo = rbo; 375 work->old_rbo = rbo;
376 INIT_WORK(&work->work, radeon_unpin_work_func); 376 INIT_WORK(&work->work, radeon_unpin_work_func);
377 377
378 /* We borrow the event spin lock for protecting unpin_work */ 378 /* We borrow the event spin lock for protecting unpin_work */
379 spin_lock_irqsave(&dev->event_lock, flags); 379 spin_lock_irqsave(&dev->event_lock, flags);
380 if (radeon_crtc->unpin_work) { 380 if (radeon_crtc->unpin_work) {
381 spin_unlock_irqrestore(&dev->event_lock, flags); 381 spin_unlock_irqrestore(&dev->event_lock, flags);
382 kfree(work); 382 kfree(work);
383 radeon_fence_unref(&fence); 383 radeon_fence_unref(&fence);
384 384
385 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); 385 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
386 return -EBUSY; 386 return -EBUSY;
387 } 387 }
388 radeon_crtc->unpin_work = work; 388 radeon_crtc->unpin_work = work;
389 radeon_crtc->deferred_flip_completion = 0; 389 radeon_crtc->deferred_flip_completion = 0;
390 spin_unlock_irqrestore(&dev->event_lock, flags); 390 spin_unlock_irqrestore(&dev->event_lock, flags);
391 391
392 /* pin the new buffer */ 392 /* pin the new buffer */
393 obj = new_radeon_fb->obj; 393 obj = new_radeon_fb->obj;
394 rbo = gem_to_radeon_bo(obj); 394 rbo = gem_to_radeon_bo(obj);
395 395
396 DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n", 396 DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n",
397 work->old_rbo, rbo); 397 work->old_rbo, rbo);
398 398
399 r = radeon_bo_reserve(rbo, false); 399 r = radeon_bo_reserve(rbo, false);
400 if (unlikely(r != 0)) { 400 if (unlikely(r != 0)) {
401 DRM_ERROR("failed to reserve new rbo buffer before flip\n"); 401 DRM_ERROR("failed to reserve new rbo buffer before flip\n");
402 goto pflip_cleanup; 402 goto pflip_cleanup;
403 } 403 }
404 r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &base); 404 r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &base);
405 if (unlikely(r != 0)) { 405 if (unlikely(r != 0)) {
406 radeon_bo_unreserve(rbo); 406 radeon_bo_unreserve(rbo);
407 r = -EINVAL; 407 r = -EINVAL;
408 DRM_ERROR("failed to pin new rbo buffer before flip\n"); 408 DRM_ERROR("failed to pin new rbo buffer before flip\n");
409 goto pflip_cleanup; 409 goto pflip_cleanup;
410 } 410 }
411 radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); 411 radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
412 radeon_bo_unreserve(rbo); 412 radeon_bo_unreserve(rbo);
413 413
414 if (!ASIC_IS_AVIVO(rdev)) { 414 if (!ASIC_IS_AVIVO(rdev)) {
415 /* crtc offset is from display base addr not FB location */ 415 /* crtc offset is from display base addr not FB location */
416 base -= radeon_crtc->legacy_display_base_addr; 416 base -= radeon_crtc->legacy_display_base_addr;
417 pitch_pixels = fb->pitch / (fb->bits_per_pixel / 8); 417 pitch_pixels = fb->pitch / (fb->bits_per_pixel / 8);
418 418
419 if (tiling_flags & RADEON_TILING_MACRO) { 419 if (tiling_flags & RADEON_TILING_MACRO) {
420 if (ASIC_IS_R300(rdev)) { 420 if (ASIC_IS_R300(rdev)) {
421 base &= ~0x7ff; 421 base &= ~0x7ff;
422 } else { 422 } else {
423 int byteshift = fb->bits_per_pixel >> 4; 423 int byteshift = fb->bits_per_pixel >> 4;
424 int tile_addr = (((crtc->y >> 3) * pitch_pixels + crtc->x) >> (8 - byteshift)) << 11; 424 int tile_addr = (((crtc->y >> 3) * pitch_pixels + crtc->x) >> (8 - byteshift)) << 11;
425 base += tile_addr + ((crtc->x << byteshift) % 256) + ((crtc->y % 8) << 8); 425 base += tile_addr + ((crtc->x << byteshift) % 256) + ((crtc->y % 8) << 8);
426 } 426 }
427 } else { 427 } else {
428 int offset = crtc->y * pitch_pixels + crtc->x; 428 int offset = crtc->y * pitch_pixels + crtc->x;
429 switch (fb->bits_per_pixel) { 429 switch (fb->bits_per_pixel) {
430 case 8: 430 case 8:
431 default: 431 default:
432 offset *= 1; 432 offset *= 1;
433 break; 433 break;
434 case 15: 434 case 15:
435 case 16: 435 case 16:
436 offset *= 2; 436 offset *= 2;
437 break; 437 break;
438 case 24: 438 case 24:
439 offset *= 3; 439 offset *= 3;
440 break; 440 break;
441 case 32: 441 case 32:
442 offset *= 4; 442 offset *= 4;
443 break; 443 break;
444 } 444 }
445 base += offset; 445 base += offset;
446 } 446 }
447 base &= ~7; 447 base &= ~7;
448 } 448 }
449 449
450 spin_lock_irqsave(&dev->event_lock, flags); 450 spin_lock_irqsave(&dev->event_lock, flags);
451 work->new_crtc_base = base; 451 work->new_crtc_base = base;
452 spin_unlock_irqrestore(&dev->event_lock, flags); 452 spin_unlock_irqrestore(&dev->event_lock, flags);
453 453
454 /* update crtc fb */ 454 /* update crtc fb */
455 crtc->fb = fb; 455 crtc->fb = fb;
456 456
457 r = drm_vblank_get(dev, radeon_crtc->crtc_id); 457 r = drm_vblank_get(dev, radeon_crtc->crtc_id);
458 if (r) { 458 if (r) {
459 DRM_ERROR("failed to get vblank before flip\n"); 459 DRM_ERROR("failed to get vblank before flip\n");
460 goto pflip_cleanup1; 460 goto pflip_cleanup1;
461 } 461 }
462 462
463 /* 32 ought to cover us */ 463 /* 32 ought to cover us */
464 r = radeon_ring_lock(rdev, 32); 464 r = radeon_ring_lock(rdev, 32);
465 if (r) { 465 if (r) {
466 DRM_ERROR("failed to lock the ring before flip\n"); 466 DRM_ERROR("failed to lock the ring before flip\n");
467 goto pflip_cleanup2; 467 goto pflip_cleanup2;
468 } 468 }
469 469
470 /* emit the fence */ 470 /* emit the fence */
471 radeon_fence_emit(rdev, fence); 471 radeon_fence_emit(rdev, fence);
472 /* set the proper interrupt */ 472 /* set the proper interrupt */
473 radeon_pre_page_flip(rdev, radeon_crtc->crtc_id); 473 radeon_pre_page_flip(rdev, radeon_crtc->crtc_id);
474 /* fire the ring */ 474 /* fire the ring */
475 radeon_ring_unlock_commit(rdev); 475 radeon_ring_unlock_commit(rdev);
476 476
477 return 0; 477 return 0;
478 478
479 pflip_cleanup2: 479 pflip_cleanup2:
480 drm_vblank_put(dev, radeon_crtc->crtc_id); 480 drm_vblank_put(dev, radeon_crtc->crtc_id);
481 481
482 pflip_cleanup1: 482 pflip_cleanup1:
483 r = radeon_bo_reserve(rbo, false); 483 r = radeon_bo_reserve(rbo, false);
484 if (unlikely(r != 0)) { 484 if (unlikely(r != 0)) {
485 DRM_ERROR("failed to reserve new rbo in error path\n"); 485 DRM_ERROR("failed to reserve new rbo in error path\n");
486 goto pflip_cleanup; 486 goto pflip_cleanup;
487 } 487 }
488 r = radeon_bo_unpin(rbo); 488 r = radeon_bo_unpin(rbo);
489 if (unlikely(r != 0)) { 489 if (unlikely(r != 0)) {
490 radeon_bo_unreserve(rbo); 490 radeon_bo_unreserve(rbo);
491 r = -EINVAL; 491 r = -EINVAL;
492 DRM_ERROR("failed to unpin new rbo in error path\n"); 492 DRM_ERROR("failed to unpin new rbo in error path\n");
493 goto pflip_cleanup; 493 goto pflip_cleanup;
494 } 494 }
495 radeon_bo_unreserve(rbo); 495 radeon_bo_unreserve(rbo);
496 496
497 pflip_cleanup: 497 pflip_cleanup:
498 spin_lock_irqsave(&dev->event_lock, flags); 498 spin_lock_irqsave(&dev->event_lock, flags);
499 radeon_crtc->unpin_work = NULL; 499 radeon_crtc->unpin_work = NULL;
500 spin_unlock_irqrestore(&dev->event_lock, flags); 500 spin_unlock_irqrestore(&dev->event_lock, flags);
501 radeon_fence_unref(&fence); 501 radeon_fence_unref(&fence);
502 kfree(work); 502 kfree(work);
503 503
504 return r; 504 return r;
505 } 505 }
506 506
507 static const struct drm_crtc_funcs radeon_crtc_funcs = { 507 static const struct drm_crtc_funcs radeon_crtc_funcs = {
508 .cursor_set = radeon_crtc_cursor_set, 508 .cursor_set = radeon_crtc_cursor_set,
509 .cursor_move = radeon_crtc_cursor_move, 509 .cursor_move = radeon_crtc_cursor_move,
510 .gamma_set = radeon_crtc_gamma_set, 510 .gamma_set = radeon_crtc_gamma_set,
511 .set_config = drm_crtc_helper_set_config, 511 .set_config = drm_crtc_helper_set_config,
512 .destroy = radeon_crtc_destroy, 512 .destroy = radeon_crtc_destroy,
513 .page_flip = radeon_crtc_page_flip, 513 .page_flip = radeon_crtc_page_flip,
514 }; 514 };
515 515
516 static void radeon_crtc_init(struct drm_device *dev, int index) 516 static void radeon_crtc_init(struct drm_device *dev, int index)
517 { 517 {
518 struct radeon_device *rdev = dev->dev_private; 518 struct radeon_device *rdev = dev->dev_private;
519 struct radeon_crtc *radeon_crtc; 519 struct radeon_crtc *radeon_crtc;
520 int i; 520 int i;
521 521
522 radeon_crtc = kzalloc(sizeof(struct radeon_crtc) + (RADEONFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); 522 radeon_crtc = kzalloc(sizeof(struct radeon_crtc) + (RADEONFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
523 if (radeon_crtc == NULL) 523 if (radeon_crtc == NULL)
524 return; 524 return;
525 525
526 drm_crtc_init(dev, &radeon_crtc->base, &radeon_crtc_funcs); 526 drm_crtc_init(dev, &radeon_crtc->base, &radeon_crtc_funcs);
527 527
528 drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256); 528 drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256);
529 radeon_crtc->crtc_id = index; 529 radeon_crtc->crtc_id = index;
530 rdev->mode_info.crtcs[index] = radeon_crtc; 530 rdev->mode_info.crtcs[index] = radeon_crtc;
531 531
532 #if 0 532 #if 0
533 radeon_crtc->mode_set.crtc = &radeon_crtc->base; 533 radeon_crtc->mode_set.crtc = &radeon_crtc->base;
534 radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1); 534 radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1);
535 radeon_crtc->mode_set.num_connectors = 0; 535 radeon_crtc->mode_set.num_connectors = 0;
536 #endif 536 #endif
537 537
538 for (i = 0; i < 256; i++) { 538 for (i = 0; i < 256; i++) {
539 radeon_crtc->lut_r[i] = i << 2; 539 radeon_crtc->lut_r[i] = i << 2;
540 radeon_crtc->lut_g[i] = i << 2; 540 radeon_crtc->lut_g[i] = i << 2;
541 radeon_crtc->lut_b[i] = i << 2; 541 radeon_crtc->lut_b[i] = i << 2;
542 } 542 }
543 543
544 if (rdev->is_atom_bios && (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom)) 544 if (rdev->is_atom_bios && (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom))
545 radeon_atombios_init_crtc(dev, radeon_crtc); 545 radeon_atombios_init_crtc(dev, radeon_crtc);
546 else 546 else
547 radeon_legacy_init_crtc(dev, radeon_crtc); 547 radeon_legacy_init_crtc(dev, radeon_crtc);
548 } 548 }
549 549
550 static const char *encoder_names[36] = { 550 static const char *encoder_names[36] = {
551 "NONE", 551 "NONE",
552 "INTERNAL_LVDS", 552 "INTERNAL_LVDS",
553 "INTERNAL_TMDS1", 553 "INTERNAL_TMDS1",
554 "INTERNAL_TMDS2", 554 "INTERNAL_TMDS2",
555 "INTERNAL_DAC1", 555 "INTERNAL_DAC1",
556 "INTERNAL_DAC2", 556 "INTERNAL_DAC2",
557 "INTERNAL_SDVOA", 557 "INTERNAL_SDVOA",
558 "INTERNAL_SDVOB", 558 "INTERNAL_SDVOB",
559 "SI170B", 559 "SI170B",
560 "CH7303", 560 "CH7303",
561 "CH7301", 561 "CH7301",
562 "INTERNAL_DVO1", 562 "INTERNAL_DVO1",
563 "EXTERNAL_SDVOA", 563 "EXTERNAL_SDVOA",
564 "EXTERNAL_SDVOB", 564 "EXTERNAL_SDVOB",
565 "TITFP513", 565 "TITFP513",
566 "INTERNAL_LVTM1", 566 "INTERNAL_LVTM1",
567 "VT1623", 567 "VT1623",
568 "HDMI_SI1930", 568 "HDMI_SI1930",
569 "HDMI_INTERNAL", 569 "HDMI_INTERNAL",
570 "INTERNAL_KLDSCP_TMDS1", 570 "INTERNAL_KLDSCP_TMDS1",
571 "INTERNAL_KLDSCP_DVO1", 571 "INTERNAL_KLDSCP_DVO1",
572 "INTERNAL_KLDSCP_DAC1", 572 "INTERNAL_KLDSCP_DAC1",
573 "INTERNAL_KLDSCP_DAC2", 573 "INTERNAL_KLDSCP_DAC2",
574 "SI178", 574 "SI178",
575 "MVPU_FPGA", 575 "MVPU_FPGA",
576 "INTERNAL_DDI", 576 "INTERNAL_DDI",
577 "VT1625", 577 "VT1625",
578 "HDMI_SI1932", 578 "HDMI_SI1932",
579 "DP_AN9801", 579 "DP_AN9801",
580 "DP_DP501", 580 "DP_DP501",
581 "INTERNAL_UNIPHY", 581 "INTERNAL_UNIPHY",
582 "INTERNAL_KLDSCP_LVTMA", 582 "INTERNAL_KLDSCP_LVTMA",
583 "INTERNAL_UNIPHY1", 583 "INTERNAL_UNIPHY1",
584 "INTERNAL_UNIPHY2", 584 "INTERNAL_UNIPHY2",
585 "NUTMEG", 585 "NUTMEG",
586 "TRAVIS", 586 "TRAVIS",
587 }; 587 };
588 588
589 static const char *connector_names[15] = { 589 static const char *connector_names[15] = {
590 "Unknown", 590 "Unknown",
591 "VGA", 591 "VGA",
592 "DVI-I", 592 "DVI-I",
593 "DVI-D", 593 "DVI-D",
594 "DVI-A", 594 "DVI-A",
595 "Composite", 595 "Composite",
596 "S-video", 596 "S-video",
597 "LVDS", 597 "LVDS",
598 "Component", 598 "Component",
599 "DIN", 599 "DIN",
600 "DisplayPort", 600 "DisplayPort",
601 "HDMI-A", 601 "HDMI-A",
602 "HDMI-B", 602 "HDMI-B",
603 "TV", 603 "TV",
604 "eDP", 604 "eDP",
605 }; 605 };
606 606
607 static const char *hpd_names[6] = { 607 static const char *hpd_names[6] = {
608 "HPD1", 608 "HPD1",
609 "HPD2", 609 "HPD2",
610 "HPD3", 610 "HPD3",
611 "HPD4", 611 "HPD4",
612 "HPD5", 612 "HPD5",
613 "HPD6", 613 "HPD6",
614 }; 614 };
615 615
616 static void radeon_print_display_setup(struct drm_device *dev) 616 static void radeon_print_display_setup(struct drm_device *dev)
617 { 617 {
618 struct drm_connector *connector; 618 struct drm_connector *connector;
619 struct radeon_connector *radeon_connector; 619 struct radeon_connector *radeon_connector;
620 struct drm_encoder *encoder; 620 struct drm_encoder *encoder;
621 struct radeon_encoder *radeon_encoder; 621 struct radeon_encoder *radeon_encoder;
622 uint32_t devices; 622 uint32_t devices;
623 int i = 0; 623 int i = 0;
624 624
625 DRM_INFO("Radeon Display Connectors\n"); 625 DRM_INFO("Radeon Display Connectors\n");
626 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 626 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
627 radeon_connector = to_radeon_connector(connector); 627 radeon_connector = to_radeon_connector(connector);
628 DRM_INFO("Connector %d:\n", i); 628 DRM_INFO("Connector %d:\n", i);
629 DRM_INFO(" %s\n", connector_names[connector->connector_type]); 629 DRM_INFO(" %s\n", connector_names[connector->connector_type]);
630 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) 630 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
631 DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]); 631 DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]);
632 if (radeon_connector->ddc_bus) { 632 if (radeon_connector->ddc_bus) {
633 DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", 633 DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
634 radeon_connector->ddc_bus->rec.mask_clk_reg, 634 radeon_connector->ddc_bus->rec.mask_clk_reg,
635 radeon_connector->ddc_bus->rec.mask_data_reg, 635 radeon_connector->ddc_bus->rec.mask_data_reg,
636 radeon_connector->ddc_bus->rec.a_clk_reg, 636 radeon_connector->ddc_bus->rec.a_clk_reg,
637 radeon_connector->ddc_bus->rec.a_data_reg, 637 radeon_connector->ddc_bus->rec.a_data_reg,
638 radeon_connector->ddc_bus->rec.en_clk_reg, 638 radeon_connector->ddc_bus->rec.en_clk_reg,
639 radeon_connector->ddc_bus->rec.en_data_reg, 639 radeon_connector->ddc_bus->rec.en_data_reg,
640 radeon_connector->ddc_bus->rec.y_clk_reg, 640 radeon_connector->ddc_bus->rec.y_clk_reg,
641 radeon_connector->ddc_bus->rec.y_data_reg); 641 radeon_connector->ddc_bus->rec.y_data_reg);
642 if (radeon_connector->router.ddc_valid) 642 if (radeon_connector->router.ddc_valid)
643 DRM_INFO(" DDC Router 0x%x/0x%x\n", 643 DRM_INFO(" DDC Router 0x%x/0x%x\n",
644 radeon_connector->router.ddc_mux_control_pin, 644 radeon_connector->router.ddc_mux_control_pin,
645 radeon_connector->router.ddc_mux_state); 645 radeon_connector->router.ddc_mux_state);
646 if (radeon_connector->router.cd_valid) 646 if (radeon_connector->router.cd_valid)
647 DRM_INFO(" Clock/Data Router 0x%x/0x%x\n", 647 DRM_INFO(" Clock/Data Router 0x%x/0x%x\n",
648 radeon_connector->router.cd_mux_control_pin, 648 radeon_connector->router.cd_mux_control_pin,
649 radeon_connector->router.cd_mux_state); 649 radeon_connector->router.cd_mux_state);
650 } else { 650 } else {
651 if (connector->connector_type == DRM_MODE_CONNECTOR_VGA || 651 if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
652 connector->connector_type == DRM_MODE_CONNECTOR_DVII || 652 connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
653 connector->connector_type == DRM_MODE_CONNECTOR_DVID || 653 connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
654 connector->connector_type == DRM_MODE_CONNECTOR_DVIA || 654 connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
655 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || 655 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
656 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) 656 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
657 DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n"); 657 DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n");
658 } 658 }
659 DRM_INFO(" Encoders:\n"); 659 DRM_INFO(" Encoders:\n");
660 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 660 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
661 radeon_encoder = to_radeon_encoder(encoder); 661 radeon_encoder = to_radeon_encoder(encoder);
662 devices = radeon_encoder->devices & radeon_connector->devices; 662 devices = radeon_encoder->devices & radeon_connector->devices;
663 if (devices) { 663 if (devices) {
664 if (devices & ATOM_DEVICE_CRT1_SUPPORT) 664 if (devices & ATOM_DEVICE_CRT1_SUPPORT)
665 DRM_INFO(" CRT1: %s\n", encoder_names[radeon_encoder->encoder_id]); 665 DRM_INFO(" CRT1: %s\n", encoder_names[radeon_encoder->encoder_id]);
666 if (devices & ATOM_DEVICE_CRT2_SUPPORT) 666 if (devices & ATOM_DEVICE_CRT2_SUPPORT)
667 DRM_INFO(" CRT2: %s\n", encoder_names[radeon_encoder->encoder_id]); 667 DRM_INFO(" CRT2: %s\n", encoder_names[radeon_encoder->encoder_id]);
668 if (devices & ATOM_DEVICE_LCD1_SUPPORT) 668 if (devices & ATOM_DEVICE_LCD1_SUPPORT)
669 DRM_INFO(" LCD1: %s\n", encoder_names[radeon_encoder->encoder_id]); 669 DRM_INFO(" LCD1: %s\n", encoder_names[radeon_encoder->encoder_id]);
670 if (devices & ATOM_DEVICE_DFP1_SUPPORT) 670 if (devices & ATOM_DEVICE_DFP1_SUPPORT)
671 DRM_INFO(" DFP1: %s\n", encoder_names[radeon_encoder->encoder_id]); 671 DRM_INFO(" DFP1: %s\n", encoder_names[radeon_encoder->encoder_id]);
672 if (devices & ATOM_DEVICE_DFP2_SUPPORT) 672 if (devices & ATOM_DEVICE_DFP2_SUPPORT)
673 DRM_INFO(" DFP2: %s\n", encoder_names[radeon_encoder->encoder_id]); 673 DRM_INFO(" DFP2: %s\n", encoder_names[radeon_encoder->encoder_id]);
674 if (devices & ATOM_DEVICE_DFP3_SUPPORT) 674 if (devices & ATOM_DEVICE_DFP3_SUPPORT)
675 DRM_INFO(" DFP3: %s\n", encoder_names[radeon_encoder->encoder_id]); 675 DRM_INFO(" DFP3: %s\n", encoder_names[radeon_encoder->encoder_id]);
676 if (devices & ATOM_DEVICE_DFP4_SUPPORT) 676 if (devices & ATOM_DEVICE_DFP4_SUPPORT)
677 DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]); 677 DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]);
678 if (devices & ATOM_DEVICE_DFP5_SUPPORT) 678 if (devices & ATOM_DEVICE_DFP5_SUPPORT)
679 DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]); 679 DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]);
680 if (devices & ATOM_DEVICE_DFP6_SUPPORT) 680 if (devices & ATOM_DEVICE_DFP6_SUPPORT)
681 DRM_INFO(" DFP6: %s\n", encoder_names[radeon_encoder->encoder_id]); 681 DRM_INFO(" DFP6: %s\n", encoder_names[radeon_encoder->encoder_id]);
682 if (devices & ATOM_DEVICE_TV1_SUPPORT) 682 if (devices & ATOM_DEVICE_TV1_SUPPORT)
683 DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]); 683 DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]);
684 if (devices & ATOM_DEVICE_CV_SUPPORT) 684 if (devices & ATOM_DEVICE_CV_SUPPORT)
685 DRM_INFO(" CV: %s\n", encoder_names[radeon_encoder->encoder_id]); 685 DRM_INFO(" CV: %s\n", encoder_names[radeon_encoder->encoder_id]);
686 } 686 }
687 } 687 }
688 i++; 688 i++;
689 } 689 }
690 } 690 }
691 691
692 static bool radeon_setup_enc_conn(struct drm_device *dev) 692 static bool radeon_setup_enc_conn(struct drm_device *dev)
693 { 693 {
694 struct radeon_device *rdev = dev->dev_private; 694 struct radeon_device *rdev = dev->dev_private;
695 struct drm_connector *drm_connector; 695 struct drm_connector *drm_connector;
696 bool ret = false; 696 bool ret = false;
697 697
698 if (rdev->bios) { 698 if (rdev->bios) {
699 if (rdev->is_atom_bios) { 699 if (rdev->is_atom_bios) {
700 ret = radeon_get_atom_connector_info_from_supported_devices_table(dev); 700 ret = radeon_get_atom_connector_info_from_supported_devices_table(dev);
701 if (ret == false) 701 if (ret == false)
702 ret = radeon_get_atom_connector_info_from_object_table(dev); 702 ret = radeon_get_atom_connector_info_from_object_table(dev);
703 } else { 703 } else {
704 ret = radeon_get_legacy_connector_info_from_bios(dev); 704 ret = radeon_get_legacy_connector_info_from_bios(dev);
705 if (ret == false) 705 if (ret == false)
706 ret = radeon_get_legacy_connector_info_from_table(dev); 706 ret = radeon_get_legacy_connector_info_from_table(dev);
707 } 707 }
708 } else { 708 } else {
709 if (!ASIC_IS_AVIVO(rdev)) 709 if (!ASIC_IS_AVIVO(rdev))
710 ret = radeon_get_legacy_connector_info_from_table(dev); 710 ret = radeon_get_legacy_connector_info_from_table(dev);
711 } 711 }
712 if (ret) { 712 if (ret) {
713 radeon_setup_encoder_clones(dev); 713 radeon_setup_encoder_clones(dev);
714 radeon_print_display_setup(dev); 714 radeon_print_display_setup(dev);
715 list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head) 715 list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head)
716 radeon_ddc_dump(drm_connector); 716 radeon_ddc_dump(drm_connector);
717 } 717 }
718 718
719 return ret; 719 return ret;
720 } 720 }
721 721
722 int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) 722 int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
723 { 723 {
724 struct drm_device *dev = radeon_connector->base.dev; 724 struct drm_device *dev = radeon_connector->base.dev;
725 struct radeon_device *rdev = dev->dev_private; 725 struct radeon_device *rdev = dev->dev_private;
726 int ret = 0; 726 int ret = 0;
727 727
728 /* on hw with routers, select right port */ 728 /* on hw with routers, select right port */
729 if (radeon_connector->router.ddc_valid) 729 if (radeon_connector->router.ddc_valid)
730 radeon_router_select_ddc_port(radeon_connector); 730 radeon_router_select_ddc_port(radeon_connector);
731 731
732 if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || 732 if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
733 (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) { 733 (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
734 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; 734 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
735 if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT || 735 if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
736 dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus) 736 dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus)
737 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter); 737 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter);
738 } 738 }
739 if (!radeon_connector->ddc_bus) 739 if (!radeon_connector->ddc_bus)
740 return -1; 740 return -1;
741 if (!radeon_connector->edid) { 741 if (!radeon_connector->edid) {
742 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); 742 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
743 } 743 }
744 744
745 if (!radeon_connector->edid) { 745 if (!radeon_connector->edid) {
746 if (rdev->is_atom_bios) { 746 if (rdev->is_atom_bios) {
747 /* some laptops provide a hardcoded edid in rom for LCDs */ 747 /* some laptops provide a hardcoded edid in rom for LCDs */
748 if (((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_LVDS) || 748 if (((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_LVDS) ||
749 (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP))) 749 (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)))
750 radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev); 750 radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
751 } else 751 } else
752 /* some servers provide a hardcoded edid in rom for KVMs */ 752 /* some servers provide a hardcoded edid in rom for KVMs */
753 radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev); 753 radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
754 } 754 }
755 if (radeon_connector->edid) { 755 if (radeon_connector->edid) {
756 drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid); 756 drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
757 ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid); 757 ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
758 return ret; 758 return ret;
759 } 759 }
760 drm_mode_connector_update_edid_property(&radeon_connector->base, NULL); 760 drm_mode_connector_update_edid_property(&radeon_connector->base, NULL);
761 return 0; 761 return 0;
762 } 762 }
763 763
764 static int radeon_ddc_dump(struct drm_connector *connector) 764 static int radeon_ddc_dump(struct drm_connector *connector)
765 { 765 {
766 struct edid *edid; 766 struct edid *edid;
767 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 767 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
768 int ret = 0; 768 int ret = 0;
769 769
770 /* on hw with routers, select right port */ 770 /* on hw with routers, select right port */
771 if (radeon_connector->router.ddc_valid) 771 if (radeon_connector->router.ddc_valid)
772 radeon_router_select_ddc_port(radeon_connector); 772 radeon_router_select_ddc_port(radeon_connector);
773 773
774 if (!radeon_connector->ddc_bus) 774 if (!radeon_connector->ddc_bus)
775 return -1; 775 return -1;
776 edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter); 776 edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter);
777 if (edid) { 777 if (edid) {
778 kfree(edid); 778 kfree(edid);
779 } 779 }
780 return ret; 780 return ret;
781 } 781 }
782 782
783 /* avivo */ 783 /* avivo */
784 static void avivo_get_fb_div(struct radeon_pll *pll, 784 static void avivo_get_fb_div(struct radeon_pll *pll,
785 u32 target_clock, 785 u32 target_clock,
786 u32 post_div, 786 u32 post_div,
787 u32 ref_div, 787 u32 ref_div,
788 u32 *fb_div, 788 u32 *fb_div,
789 u32 *frac_fb_div) 789 u32 *frac_fb_div)
790 { 790 {
791 u32 tmp = post_div * ref_div; 791 u32 tmp = post_div * ref_div;
792 792
793 tmp *= target_clock; 793 tmp *= target_clock;
794 *fb_div = tmp / pll->reference_freq; 794 *fb_div = tmp / pll->reference_freq;
795 *frac_fb_div = tmp % pll->reference_freq; 795 *frac_fb_div = tmp % pll->reference_freq;
796 796
797 if (*fb_div > pll->max_feedback_div) 797 if (*fb_div > pll->max_feedback_div)
798 *fb_div = pll->max_feedback_div; 798 *fb_div = pll->max_feedback_div;
799 else if (*fb_div < pll->min_feedback_div) 799 else if (*fb_div < pll->min_feedback_div)
800 *fb_div = pll->min_feedback_div; 800 *fb_div = pll->min_feedback_div;
801 } 801 }
802 802
803 static u32 avivo_get_post_div(struct radeon_pll *pll, 803 static u32 avivo_get_post_div(struct radeon_pll *pll,
804 u32 target_clock) 804 u32 target_clock)
805 { 805 {
806 u32 vco, post_div, tmp; 806 u32 vco, post_div, tmp;
807 807
808 if (pll->flags & RADEON_PLL_USE_POST_DIV) 808 if (pll->flags & RADEON_PLL_USE_POST_DIV)
809 return pll->post_div; 809 return pll->post_div;
810 810
811 if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) { 811 if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) {
812 if (pll->flags & RADEON_PLL_IS_LCD) 812 if (pll->flags & RADEON_PLL_IS_LCD)
813 vco = pll->lcd_pll_out_min; 813 vco = pll->lcd_pll_out_min;
814 else 814 else
815 vco = pll->pll_out_min; 815 vco = pll->pll_out_min;
816 } else { 816 } else {
817 if (pll->flags & RADEON_PLL_IS_LCD) 817 if (pll->flags & RADEON_PLL_IS_LCD)
818 vco = pll->lcd_pll_out_max; 818 vco = pll->lcd_pll_out_max;
819 else 819 else
820 vco = pll->pll_out_max; 820 vco = pll->pll_out_max;
821 } 821 }
822 822
823 post_div = vco / target_clock; 823 post_div = vco / target_clock;
824 tmp = vco % target_clock; 824 tmp = vco % target_clock;
825 825
826 if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) { 826 if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) {
827 if (tmp) 827 if (tmp)
828 post_div++; 828 post_div++;
829 } else { 829 } else {
830 if (!tmp) 830 if (!tmp)
831 post_div--; 831 post_div--;
832 } 832 }
833 833
834 if (post_div > pll->max_post_div) 834 if (post_div > pll->max_post_div)
835 post_div = pll->max_post_div; 835 post_div = pll->max_post_div;
836 else if (post_div < pll->min_post_div) 836 else if (post_div < pll->min_post_div)
837 post_div = pll->min_post_div; 837 post_div = pll->min_post_div;
838 838
839 return post_div; 839 return post_div;
840 } 840 }
841 841
842 #define MAX_TOLERANCE 10 842 #define MAX_TOLERANCE 10
843 843
844 void radeon_compute_pll_avivo(struct radeon_pll *pll, 844 void radeon_compute_pll_avivo(struct radeon_pll *pll,
845 u32 freq, 845 u32 freq,
846 u32 *dot_clock_p, 846 u32 *dot_clock_p,
847 u32 *fb_div_p, 847 u32 *fb_div_p,
848 u32 *frac_fb_div_p, 848 u32 *frac_fb_div_p,
849 u32 *ref_div_p, 849 u32 *ref_div_p,
850 u32 *post_div_p) 850 u32 *post_div_p)
851 { 851 {
852 u32 target_clock = freq / 10; 852 u32 target_clock = freq / 10;
853 u32 post_div = avivo_get_post_div(pll, target_clock); 853 u32 post_div = avivo_get_post_div(pll, target_clock);
854 u32 ref_div = pll->min_ref_div; 854 u32 ref_div = pll->min_ref_div;
855 u32 fb_div = 0, frac_fb_div = 0, tmp; 855 u32 fb_div = 0, frac_fb_div = 0, tmp;
856 856
857 if (pll->flags & RADEON_PLL_USE_REF_DIV) 857 if (pll->flags & RADEON_PLL_USE_REF_DIV)
858 ref_div = pll->reference_div; 858 ref_div = pll->reference_div;
859 859
860 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { 860 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
861 avivo_get_fb_div(pll, target_clock, post_div, ref_div, &fb_div, &frac_fb_div); 861 avivo_get_fb_div(pll, target_clock, post_div, ref_div, &fb_div, &frac_fb_div);
862 frac_fb_div = (100 * frac_fb_div) / pll->reference_freq; 862 frac_fb_div = (100 * frac_fb_div) / pll->reference_freq;
863 if (frac_fb_div >= 5) { 863 if (frac_fb_div >= 5) {
864 frac_fb_div -= 5; 864 frac_fb_div -= 5;
865 frac_fb_div = frac_fb_div / 10; 865 frac_fb_div = frac_fb_div / 10;
866 frac_fb_div++; 866 frac_fb_div++;
867 } 867 }
868 if (frac_fb_div >= 10) { 868 if (frac_fb_div >= 10) {
869 fb_div++; 869 fb_div++;
870 frac_fb_div = 0; 870 frac_fb_div = 0;
871 } 871 }
872 } else { 872 } else {
873 while (ref_div <= pll->max_ref_div) { 873 while (ref_div <= pll->max_ref_div) {
874 avivo_get_fb_div(pll, target_clock, post_div, ref_div, 874 avivo_get_fb_div(pll, target_clock, post_div, ref_div,
875 &fb_div, &frac_fb_div); 875 &fb_div, &frac_fb_div);
876 if (frac_fb_div >= (pll->reference_freq / 2)) 876 if (frac_fb_div >= (pll->reference_freq / 2))
877 fb_div++; 877 fb_div++;
878 frac_fb_div = 0; 878 frac_fb_div = 0;
879 tmp = (pll->reference_freq * fb_div) / (post_div * ref_div); 879 tmp = (pll->reference_freq * fb_div) / (post_div * ref_div);
880 tmp = (tmp * 10000) / target_clock; 880 tmp = (tmp * 10000) / target_clock;
881 881
882 if (tmp > (10000 + MAX_TOLERANCE)) 882 if (tmp > (10000 + MAX_TOLERANCE))
883 ref_div++; 883 ref_div++;
884 else if (tmp >= (10000 - MAX_TOLERANCE)) 884 else if (tmp >= (10000 - MAX_TOLERANCE))
885 break; 885 break;
886 else 886 else
887 ref_div++; 887 ref_div++;
888 } 888 }
889 } 889 }
890 890
891 *dot_clock_p = ((pll->reference_freq * fb_div * 10) + (pll->reference_freq * frac_fb_div)) / 891 *dot_clock_p = ((pll->reference_freq * fb_div * 10) + (pll->reference_freq * frac_fb_div)) /
892 (ref_div * post_div * 10); 892 (ref_div * post_div * 10);
893 *fb_div_p = fb_div; 893 *fb_div_p = fb_div;
894 *frac_fb_div_p = frac_fb_div; 894 *frac_fb_div_p = frac_fb_div;
895 *ref_div_p = ref_div; 895 *ref_div_p = ref_div;
896 *post_div_p = post_div; 896 *post_div_p = post_div;
897 DRM_DEBUG_KMS("%d, pll dividers - fb: %d.%d ref: %d, post %d\n", 897 DRM_DEBUG_KMS("%d, pll dividers - fb: %d.%d ref: %d, post %d\n",
898 *dot_clock_p, fb_div, frac_fb_div, ref_div, post_div); 898 *dot_clock_p, fb_div, frac_fb_div, ref_div, post_div);
899 } 899 }
900 900
901 /* pre-avivo */ 901 /* pre-avivo */
902 static inline uint32_t radeon_div(uint64_t n, uint32_t d) 902 static inline uint32_t radeon_div(uint64_t n, uint32_t d)
903 { 903 {
904 uint64_t mod; 904 uint64_t mod;
905 905
906 n += d / 2; 906 n += d / 2;
907 907
908 mod = do_div(n, d); 908 mod = do_div(n, d);
909 return n; 909 return n;
910 } 910 }
911 911
912 void radeon_compute_pll_legacy(struct radeon_pll *pll, 912 void radeon_compute_pll_legacy(struct radeon_pll *pll,
913 uint64_t freq, 913 uint64_t freq,
914 uint32_t *dot_clock_p, 914 uint32_t *dot_clock_p,
915 uint32_t *fb_div_p, 915 uint32_t *fb_div_p,
916 uint32_t *frac_fb_div_p, 916 uint32_t *frac_fb_div_p,
917 uint32_t *ref_div_p, 917 uint32_t *ref_div_p,
918 uint32_t *post_div_p) 918 uint32_t *post_div_p)
919 { 919 {
920 uint32_t min_ref_div = pll->min_ref_div; 920 uint32_t min_ref_div = pll->min_ref_div;
921 uint32_t max_ref_div = pll->max_ref_div; 921 uint32_t max_ref_div = pll->max_ref_div;
922 uint32_t min_post_div = pll->min_post_div; 922 uint32_t min_post_div = pll->min_post_div;
923 uint32_t max_post_div = pll->max_post_div; 923 uint32_t max_post_div = pll->max_post_div;
924 uint32_t min_fractional_feed_div = 0; 924 uint32_t min_fractional_feed_div = 0;
925 uint32_t max_fractional_feed_div = 0; 925 uint32_t max_fractional_feed_div = 0;
926 uint32_t best_vco = pll->best_vco; 926 uint32_t best_vco = pll->best_vco;
927 uint32_t best_post_div = 1; 927 uint32_t best_post_div = 1;
928 uint32_t best_ref_div = 1; 928 uint32_t best_ref_div = 1;
929 uint32_t best_feedback_div = 1; 929 uint32_t best_feedback_div = 1;
930 uint32_t best_frac_feedback_div = 0; 930 uint32_t best_frac_feedback_div = 0;
931 uint32_t best_freq = -1; 931 uint32_t best_freq = -1;
932 uint32_t best_error = 0xffffffff; 932 uint32_t best_error = 0xffffffff;
933 uint32_t best_vco_diff = 1; 933 uint32_t best_vco_diff = 1;
934 uint32_t post_div; 934 uint32_t post_div;
935 u32 pll_out_min, pll_out_max; 935 u32 pll_out_min, pll_out_max;
936 936
937 DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div); 937 DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
938 freq = freq * 1000; 938 freq = freq * 1000;
939 939
940 if (pll->flags & RADEON_PLL_IS_LCD) { 940 if (pll->flags & RADEON_PLL_IS_LCD) {
941 pll_out_min = pll->lcd_pll_out_min; 941 pll_out_min = pll->lcd_pll_out_min;
942 pll_out_max = pll->lcd_pll_out_max; 942 pll_out_max = pll->lcd_pll_out_max;
943 } else { 943 } else {
944 pll_out_min = pll->pll_out_min; 944 pll_out_min = pll->pll_out_min;
945 pll_out_max = pll->pll_out_max; 945 pll_out_max = pll->pll_out_max;
946 } 946 }
947 947
948 if (pll_out_min > 64800) 948 if (pll_out_min > 64800)
949 pll_out_min = 64800; 949 pll_out_min = 64800;
950 950
951 if (pll->flags & RADEON_PLL_USE_REF_DIV) 951 if (pll->flags & RADEON_PLL_USE_REF_DIV)
952 min_ref_div = max_ref_div = pll->reference_div; 952 min_ref_div = max_ref_div = pll->reference_div;
953 else { 953 else {
954 while (min_ref_div < max_ref_div-1) { 954 while (min_ref_div < max_ref_div-1) {
955 uint32_t mid = (min_ref_div + max_ref_div) / 2; 955 uint32_t mid = (min_ref_div + max_ref_div) / 2;
956 uint32_t pll_in = pll->reference_freq / mid; 956 uint32_t pll_in = pll->reference_freq / mid;
957 if (pll_in < pll->pll_in_min) 957 if (pll_in < pll->pll_in_min)
958 max_ref_div = mid; 958 max_ref_div = mid;
959 else if (pll_in > pll->pll_in_max) 959 else if (pll_in > pll->pll_in_max)
960 min_ref_div = mid; 960 min_ref_div = mid;
961 else 961 else
962 break; 962 break;
963 } 963 }
964 } 964 }
965 965
966 if (pll->flags & RADEON_PLL_USE_POST_DIV) 966 if (pll->flags & RADEON_PLL_USE_POST_DIV)
967 min_post_div = max_post_div = pll->post_div; 967 min_post_div = max_post_div = pll->post_div;
968 968
969 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { 969 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
970 min_fractional_feed_div = pll->min_frac_feedback_div; 970 min_fractional_feed_div = pll->min_frac_feedback_div;
971 max_fractional_feed_div = pll->max_frac_feedback_div; 971 max_fractional_feed_div = pll->max_frac_feedback_div;
972 } 972 }
973 973
974 for (post_div = max_post_div; post_div >= min_post_div; --post_div) { 974 for (post_div = max_post_div; post_div >= min_post_div; --post_div) {
975 uint32_t ref_div; 975 uint32_t ref_div;
976 976
977 if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) 977 if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
978 continue; 978 continue;
979 979
980 /* legacy radeons only have a few post_divs */ 980 /* legacy radeons only have a few post_divs */
981 if (pll->flags & RADEON_PLL_LEGACY) { 981 if (pll->flags & RADEON_PLL_LEGACY) {
982 if ((post_div == 5) || 982 if ((post_div == 5) ||
983 (post_div == 7) || 983 (post_div == 7) ||
984 (post_div == 9) || 984 (post_div == 9) ||
985 (post_div == 10) || 985 (post_div == 10) ||
986 (post_div == 11) || 986 (post_div == 11) ||
987 (post_div == 13) || 987 (post_div == 13) ||
988 (post_div == 14) || 988 (post_div == 14) ||
989 (post_div == 15)) 989 (post_div == 15))
990 continue; 990 continue;
991 } 991 }
992 992
993 for (ref_div = min_ref_div; ref_div <= max_ref_div; ++ref_div) { 993 for (ref_div = min_ref_div; ref_div <= max_ref_div; ++ref_div) {
994 uint32_t feedback_div, current_freq = 0, error, vco_diff; 994 uint32_t feedback_div, current_freq = 0, error, vco_diff;
995 uint32_t pll_in = pll->reference_freq / ref_div; 995 uint32_t pll_in = pll->reference_freq / ref_div;
996 uint32_t min_feed_div = pll->min_feedback_div; 996 uint32_t min_feed_div = pll->min_feedback_div;
997 uint32_t max_feed_div = pll->max_feedback_div + 1; 997 uint32_t max_feed_div = pll->max_feedback_div + 1;
998 998
999 if (pll_in < pll->pll_in_min || pll_in > pll->pll_in_max) 999 if (pll_in < pll->pll_in_min || pll_in > pll->pll_in_max)
1000 continue; 1000 continue;
1001 1001
1002 while (min_feed_div < max_feed_div) { 1002 while (min_feed_div < max_feed_div) {
1003 uint32_t vco; 1003 uint32_t vco;
1004 uint32_t min_frac_feed_div = min_fractional_feed_div; 1004 uint32_t min_frac_feed_div = min_fractional_feed_div;
1005 uint32_t max_frac_feed_div = max_fractional_feed_div + 1; 1005 uint32_t max_frac_feed_div = max_fractional_feed_div + 1;
1006 uint32_t frac_feedback_div; 1006 uint32_t frac_feedback_div;
1007 uint64_t tmp; 1007 uint64_t tmp;
1008 1008
1009 feedback_div = (min_feed_div + max_feed_div) / 2; 1009 feedback_div = (min_feed_div + max_feed_div) / 2;
1010 1010
1011 tmp = (uint64_t)pll->reference_freq * feedback_div; 1011 tmp = (uint64_t)pll->reference_freq * feedback_div;
1012 vco = radeon_div(tmp, ref_div); 1012 vco = radeon_div(tmp, ref_div);
1013 1013
1014 if (vco < pll_out_min) { 1014 if (vco < pll_out_min) {
1015 min_feed_div = feedback_div + 1; 1015 min_feed_div = feedback_div + 1;
1016 continue; 1016 continue;
1017 } else if (vco > pll_out_max) { 1017 } else if (vco > pll_out_max) {
1018 max_feed_div = feedback_div; 1018 max_feed_div = feedback_div;
1019 continue; 1019 continue;
1020 } 1020 }
1021 1021
1022 while (min_frac_feed_div < max_frac_feed_div) { 1022 while (min_frac_feed_div < max_frac_feed_div) {
1023 frac_feedback_div = (min_frac_feed_div + max_frac_feed_div) / 2; 1023 frac_feedback_div = (min_frac_feed_div + max_frac_feed_div) / 2;
1024 tmp = (uint64_t)pll->reference_freq * 10000 * feedback_div; 1024 tmp = (uint64_t)pll->reference_freq * 10000 * feedback_div;
1025 tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div; 1025 tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div;
1026 current_freq = radeon_div(tmp, ref_div * post_div); 1026 current_freq = radeon_div(tmp, ref_div * post_div);
1027 1027
1028 if (pll->flags & RADEON_PLL_PREFER_CLOSEST_LOWER) { 1028 if (pll->flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
1029 if (freq < current_freq) 1029 if (freq < current_freq)
1030 error = 0xffffffff; 1030 error = 0xffffffff;
1031 else 1031 else
1032 error = freq - current_freq; 1032 error = freq - current_freq;
1033 } else 1033 } else
1034 error = abs(current_freq - freq); 1034 error = abs(current_freq - freq);
1035 vco_diff = abs(vco - best_vco); 1035 vco_diff = abs(vco - best_vco);
1036 1036
1037 if ((best_vco == 0 && error < best_error) || 1037 if ((best_vco == 0 && error < best_error) ||
1038 (best_vco != 0 && 1038 (best_vco != 0 &&
1039 ((best_error > 100 && error < best_error - 100) || 1039 ((best_error > 100 && error < best_error - 100) ||
1040 (abs(error - best_error) < 100 && vco_diff < best_vco_diff)))) { 1040 (abs(error - best_error) < 100 && vco_diff < best_vco_diff)))) {
1041 best_post_div = post_div; 1041 best_post_div = post_div;
1042 best_ref_div = ref_div; 1042 best_ref_div = ref_div;
1043 best_feedback_div = feedback_div; 1043 best_feedback_div = feedback_div;
1044 best_frac_feedback_div = frac_feedback_div; 1044 best_frac_feedback_div = frac_feedback_div;
1045 best_freq = current_freq; 1045 best_freq = current_freq;
1046 best_error = error; 1046 best_error = error;
1047 best_vco_diff = vco_diff; 1047 best_vco_diff = vco_diff;
1048 } else if (current_freq == freq) { 1048 } else if (current_freq == freq) {
1049 if (best_freq == -1) { 1049 if (best_freq == -1) {
1050 best_post_div = post_div; 1050 best_post_div = post_div;
1051 best_ref_div = ref_div; 1051 best_ref_div = ref_div;
1052 best_feedback_div = feedback_div; 1052 best_feedback_div = feedback_div;
1053 best_frac_feedback_div = frac_feedback_div; 1053 best_frac_feedback_div = frac_feedback_div;
1054 best_freq = current_freq; 1054 best_freq = current_freq;
1055 best_error = error; 1055 best_error = error;
1056 best_vco_diff = vco_diff; 1056 best_vco_diff = vco_diff;
1057 } else if (((pll->flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) || 1057 } else if (((pll->flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) ||
1058 ((pll->flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) || 1058 ((pll->flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) ||
1059 ((pll->flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) || 1059 ((pll->flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) ||
1060 ((pll->flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) || 1060 ((pll->flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) ||
1061 ((pll->flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) || 1061 ((pll->flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) ||
1062 ((pll->flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) { 1062 ((pll->flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) {
1063 best_post_div = post_div; 1063 best_post_div = post_div;
1064 best_ref_div = ref_div; 1064 best_ref_div = ref_div;
1065 best_feedback_div = feedback_div; 1065 best_feedback_div = feedback_div;
1066 best_frac_feedback_div = frac_feedback_div; 1066 best_frac_feedback_div = frac_feedback_div;
1067 best_freq = current_freq; 1067 best_freq = current_freq;
1068 best_error = error; 1068 best_error = error;
1069 best_vco_diff = vco_diff; 1069 best_vco_diff = vco_diff;
1070 } 1070 }
1071 } 1071 }
1072 if (current_freq < freq) 1072 if (current_freq < freq)
1073 min_frac_feed_div = frac_feedback_div + 1; 1073 min_frac_feed_div = frac_feedback_div + 1;
1074 else 1074 else
1075 max_frac_feed_div = frac_feedback_div; 1075 max_frac_feed_div = frac_feedback_div;
1076 } 1076 }
1077 if (current_freq < freq) 1077 if (current_freq < freq)
1078 min_feed_div = feedback_div + 1; 1078 min_feed_div = feedback_div + 1;
1079 else 1079 else
1080 max_feed_div = feedback_div; 1080 max_feed_div = feedback_div;
1081 } 1081 }
1082 } 1082 }
1083 } 1083 }
1084 1084
1085 *dot_clock_p = best_freq / 10000; 1085 *dot_clock_p = best_freq / 10000;
1086 *fb_div_p = best_feedback_div; 1086 *fb_div_p = best_feedback_div;
1087 *frac_fb_div_p = best_frac_feedback_div; 1087 *frac_fb_div_p = best_frac_feedback_div;
1088 *ref_div_p = best_ref_div; 1088 *ref_div_p = best_ref_div;
1089 *post_div_p = best_post_div; 1089 *post_div_p = best_post_div;
1090 DRM_DEBUG_KMS("%d %d, pll dividers - fb: %d.%d ref: %d, post %d\n", 1090 DRM_DEBUG_KMS("%lld %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
1091 freq, best_freq / 1000, best_feedback_div, best_frac_feedback_div, 1091 (long long)freq,
1092 best_freq / 1000, best_feedback_div, best_frac_feedback_div,
1092 best_ref_div, best_post_div); 1093 best_ref_div, best_post_div);
1093 1094
1094 } 1095 }
1095 1096
1096 static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) 1097 static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
1097 { 1098 {
1098 struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); 1099 struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
1099 1100
1100 if (radeon_fb->obj) { 1101 if (radeon_fb->obj) {
1101 drm_gem_object_unreference_unlocked(radeon_fb->obj); 1102 drm_gem_object_unreference_unlocked(radeon_fb->obj);
1102 } 1103 }
1103 drm_framebuffer_cleanup(fb); 1104 drm_framebuffer_cleanup(fb);
1104 kfree(radeon_fb); 1105 kfree(radeon_fb);
1105 } 1106 }
1106 1107
1107 static int radeon_user_framebuffer_create_handle(struct drm_framebuffer *fb, 1108 static int radeon_user_framebuffer_create_handle(struct drm_framebuffer *fb,
1108 struct drm_file *file_priv, 1109 struct drm_file *file_priv,
1109 unsigned int *handle) 1110 unsigned int *handle)
1110 { 1111 {
1111 struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); 1112 struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
1112 1113
1113 return drm_gem_handle_create(file_priv, radeon_fb->obj, handle); 1114 return drm_gem_handle_create(file_priv, radeon_fb->obj, handle);
1114 } 1115 }
1115 1116
1116 static const struct drm_framebuffer_funcs radeon_fb_funcs = { 1117 static const struct drm_framebuffer_funcs radeon_fb_funcs = {
1117 .destroy = radeon_user_framebuffer_destroy, 1118 .destroy = radeon_user_framebuffer_destroy,
1118 .create_handle = radeon_user_framebuffer_create_handle, 1119 .create_handle = radeon_user_framebuffer_create_handle,
1119 }; 1120 };
1120 1121
1121 void 1122 void
1122 radeon_framebuffer_init(struct drm_device *dev, 1123 radeon_framebuffer_init(struct drm_device *dev,
1123 struct radeon_framebuffer *rfb, 1124 struct radeon_framebuffer *rfb,
1124 struct drm_mode_fb_cmd *mode_cmd, 1125 struct drm_mode_fb_cmd *mode_cmd,
1125 struct drm_gem_object *obj) 1126 struct drm_gem_object *obj)
1126 { 1127 {
1127 rfb->obj = obj; 1128 rfb->obj = obj;
1128 drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs); 1129 drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs);
1129 drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd); 1130 drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd);
1130 } 1131 }
1131 1132
1132 static struct drm_framebuffer * 1133 static struct drm_framebuffer *
1133 radeon_user_framebuffer_create(struct drm_device *dev, 1134 radeon_user_framebuffer_create(struct drm_device *dev,
1134 struct drm_file *file_priv, 1135 struct drm_file *file_priv,
1135 struct drm_mode_fb_cmd *mode_cmd) 1136 struct drm_mode_fb_cmd *mode_cmd)
1136 { 1137 {
1137 struct drm_gem_object *obj; 1138 struct drm_gem_object *obj;
1138 struct radeon_framebuffer *radeon_fb; 1139 struct radeon_framebuffer *radeon_fb;
1139 1140
1140 obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle); 1141 obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
1141 if (obj == NULL) { 1142 if (obj == NULL) {
1142 dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, " 1143 dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
1143 "can't create framebuffer\n", mode_cmd->handle); 1144 "can't create framebuffer\n", mode_cmd->handle);
1144 return ERR_PTR(-ENOENT); 1145 return ERR_PTR(-ENOENT);
1145 } 1146 }
1146 1147
1147 radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL); 1148 radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
1148 if (radeon_fb == NULL) 1149 if (radeon_fb == NULL)
1149 return ERR_PTR(-ENOMEM); 1150 return ERR_PTR(-ENOMEM);
1150 1151
1151 radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj); 1152 radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
1152 1153
1153 return &radeon_fb->base; 1154 return &radeon_fb->base;
1154 } 1155 }
1155 1156
1156 static void radeon_output_poll_changed(struct drm_device *dev) 1157 static void radeon_output_poll_changed(struct drm_device *dev)
1157 { 1158 {
1158 struct radeon_device *rdev = dev->dev_private; 1159 struct radeon_device *rdev = dev->dev_private;
1159 radeon_fb_output_poll_changed(rdev); 1160 radeon_fb_output_poll_changed(rdev);
1160 } 1161 }
1161 1162
1162 static const struct drm_mode_config_funcs radeon_mode_funcs = { 1163 static const struct drm_mode_config_funcs radeon_mode_funcs = {
1163 .fb_create = radeon_user_framebuffer_create, 1164 .fb_create = radeon_user_framebuffer_create,
1164 .output_poll_changed = radeon_output_poll_changed 1165 .output_poll_changed = radeon_output_poll_changed
1165 }; 1166 };
1166 1167
1167 struct drm_prop_enum_list { 1168 struct drm_prop_enum_list {
1168 int type; 1169 int type;
1169 char *name; 1170 char *name;
1170 }; 1171 };
1171 1172
1172 static struct drm_prop_enum_list radeon_tmds_pll_enum_list[] = 1173 static struct drm_prop_enum_list radeon_tmds_pll_enum_list[] =
1173 { { 0, "driver" }, 1174 { { 0, "driver" },
1174 { 1, "bios" }, 1175 { 1, "bios" },
1175 }; 1176 };
1176 1177
1177 static struct drm_prop_enum_list radeon_tv_std_enum_list[] = 1178 static struct drm_prop_enum_list radeon_tv_std_enum_list[] =
1178 { { TV_STD_NTSC, "ntsc" }, 1179 { { TV_STD_NTSC, "ntsc" },
1179 { TV_STD_PAL, "pal" }, 1180 { TV_STD_PAL, "pal" },
1180 { TV_STD_PAL_M, "pal-m" }, 1181 { TV_STD_PAL_M, "pal-m" },
1181 { TV_STD_PAL_60, "pal-60" }, 1182 { TV_STD_PAL_60, "pal-60" },
1182 { TV_STD_NTSC_J, "ntsc-j" }, 1183 { TV_STD_NTSC_J, "ntsc-j" },
1183 { TV_STD_SCART_PAL, "scart-pal" }, 1184 { TV_STD_SCART_PAL, "scart-pal" },
1184 { TV_STD_PAL_CN, "pal-cn" }, 1185 { TV_STD_PAL_CN, "pal-cn" },
1185 { TV_STD_SECAM, "secam" }, 1186 { TV_STD_SECAM, "secam" },
1186 }; 1187 };
1187 1188
1188 static struct drm_prop_enum_list radeon_underscan_enum_list[] = 1189 static struct drm_prop_enum_list radeon_underscan_enum_list[] =
1189 { { UNDERSCAN_OFF, "off" }, 1190 { { UNDERSCAN_OFF, "off" },
1190 { UNDERSCAN_ON, "on" }, 1191 { UNDERSCAN_ON, "on" },
1191 { UNDERSCAN_AUTO, "auto" }, 1192 { UNDERSCAN_AUTO, "auto" },
1192 }; 1193 };
1193 1194
1194 static int radeon_modeset_create_props(struct radeon_device *rdev) 1195 static int radeon_modeset_create_props(struct radeon_device *rdev)
1195 { 1196 {
1196 int i, sz; 1197 int i, sz;
1197 1198
1198 if (rdev->is_atom_bios) { 1199 if (rdev->is_atom_bios) {
1199 rdev->mode_info.coherent_mode_property = 1200 rdev->mode_info.coherent_mode_property =
1200 drm_property_create(rdev->ddev, 1201 drm_property_create(rdev->ddev,
1201 DRM_MODE_PROP_RANGE, 1202 DRM_MODE_PROP_RANGE,
1202 "coherent", 2); 1203 "coherent", 2);
1203 if (!rdev->mode_info.coherent_mode_property) 1204 if (!rdev->mode_info.coherent_mode_property)
1204 return -ENOMEM; 1205 return -ENOMEM;
1205 1206
1206 rdev->mode_info.coherent_mode_property->values[0] = 0; 1207 rdev->mode_info.coherent_mode_property->values[0] = 0;
1207 rdev->mode_info.coherent_mode_property->values[1] = 1; 1208 rdev->mode_info.coherent_mode_property->values[1] = 1;
1208 } 1209 }
1209 1210
1210 if (!ASIC_IS_AVIVO(rdev)) { 1211 if (!ASIC_IS_AVIVO(rdev)) {
1211 sz = ARRAY_SIZE(radeon_tmds_pll_enum_list); 1212 sz = ARRAY_SIZE(radeon_tmds_pll_enum_list);
1212 rdev->mode_info.tmds_pll_property = 1213 rdev->mode_info.tmds_pll_property =
1213 drm_property_create(rdev->ddev, 1214 drm_property_create(rdev->ddev,
1214 DRM_MODE_PROP_ENUM, 1215 DRM_MODE_PROP_ENUM,
1215 "tmds_pll", sz); 1216 "tmds_pll", sz);
1216 for (i = 0; i < sz; i++) { 1217 for (i = 0; i < sz; i++) {
1217 drm_property_add_enum(rdev->mode_info.tmds_pll_property, 1218 drm_property_add_enum(rdev->mode_info.tmds_pll_property,
1218 i, 1219 i,
1219 radeon_tmds_pll_enum_list[i].type, 1220 radeon_tmds_pll_enum_list[i].type,
1220 radeon_tmds_pll_enum_list[i].name); 1221 radeon_tmds_pll_enum_list[i].name);
1221 } 1222 }
1222 } 1223 }
1223 1224
1224 rdev->mode_info.load_detect_property = 1225 rdev->mode_info.load_detect_property =
1225 drm_property_create(rdev->ddev, 1226 drm_property_create(rdev->ddev,
1226 DRM_MODE_PROP_RANGE, 1227 DRM_MODE_PROP_RANGE,
1227 "load detection", 2); 1228 "load detection", 2);
1228 if (!rdev->mode_info.load_detect_property) 1229 if (!rdev->mode_info.load_detect_property)
1229 return -ENOMEM; 1230 return -ENOMEM;
1230 rdev->mode_info.load_detect_property->values[0] = 0; 1231 rdev->mode_info.load_detect_property->values[0] = 0;
1231 rdev->mode_info.load_detect_property->values[1] = 1; 1232 rdev->mode_info.load_detect_property->values[1] = 1;
1232 1233
1233 drm_mode_create_scaling_mode_property(rdev->ddev); 1234 drm_mode_create_scaling_mode_property(rdev->ddev);
1234 1235
1235 sz = ARRAY_SIZE(radeon_tv_std_enum_list); 1236 sz = ARRAY_SIZE(radeon_tv_std_enum_list);
1236 rdev->mode_info.tv_std_property = 1237 rdev->mode_info.tv_std_property =
1237 drm_property_create(rdev->ddev, 1238 drm_property_create(rdev->ddev,
1238 DRM_MODE_PROP_ENUM, 1239 DRM_MODE_PROP_ENUM,
1239 "tv standard", sz); 1240 "tv standard", sz);
1240 for (i = 0; i < sz; i++) { 1241 for (i = 0; i < sz; i++) {
1241 drm_property_add_enum(rdev->mode_info.tv_std_property, 1242 drm_property_add_enum(rdev->mode_info.tv_std_property,
1242 i, 1243 i,
1243 radeon_tv_std_enum_list[i].type, 1244 radeon_tv_std_enum_list[i].type,
1244 radeon_tv_std_enum_list[i].name); 1245 radeon_tv_std_enum_list[i].name);
1245 } 1246 }
1246 1247
1247 sz = ARRAY_SIZE(radeon_underscan_enum_list); 1248 sz = ARRAY_SIZE(radeon_underscan_enum_list);
1248 rdev->mode_info.underscan_property = 1249 rdev->mode_info.underscan_property =
1249 drm_property_create(rdev->ddev, 1250 drm_property_create(rdev->ddev,
1250 DRM_MODE_PROP_ENUM, 1251 DRM_MODE_PROP_ENUM,
1251 "underscan", sz); 1252 "underscan", sz);
1252 for (i = 0; i < sz; i++) { 1253 for (i = 0; i < sz; i++) {
1253 drm_property_add_enum(rdev->mode_info.underscan_property, 1254 drm_property_add_enum(rdev->mode_info.underscan_property,
1254 i, 1255 i,
1255 radeon_underscan_enum_list[i].type, 1256 radeon_underscan_enum_list[i].type,
1256 radeon_underscan_enum_list[i].name); 1257 radeon_underscan_enum_list[i].name);
1257 } 1258 }
1258 1259
1259 rdev->mode_info.underscan_hborder_property = 1260 rdev->mode_info.underscan_hborder_property =
1260 drm_property_create(rdev->ddev, 1261 drm_property_create(rdev->ddev,
1261 DRM_MODE_PROP_RANGE, 1262 DRM_MODE_PROP_RANGE,
1262 "underscan hborder", 2); 1263 "underscan hborder", 2);
1263 if (!rdev->mode_info.underscan_hborder_property) 1264 if (!rdev->mode_info.underscan_hborder_property)
1264 return -ENOMEM; 1265 return -ENOMEM;
1265 rdev->mode_info.underscan_hborder_property->values[0] = 0; 1266 rdev->mode_info.underscan_hborder_property->values[0] = 0;
1266 rdev->mode_info.underscan_hborder_property->values[1] = 128; 1267 rdev->mode_info.underscan_hborder_property->values[1] = 128;
1267 1268
1268 rdev->mode_info.underscan_vborder_property = 1269 rdev->mode_info.underscan_vborder_property =
1269 drm_property_create(rdev->ddev, 1270 drm_property_create(rdev->ddev,
1270 DRM_MODE_PROP_RANGE, 1271 DRM_MODE_PROP_RANGE,
1271 "underscan vborder", 2); 1272 "underscan vborder", 2);
1272 if (!rdev->mode_info.underscan_vborder_property) 1273 if (!rdev->mode_info.underscan_vborder_property)
1273 return -ENOMEM; 1274 return -ENOMEM;
1274 rdev->mode_info.underscan_vborder_property->values[0] = 0; 1275 rdev->mode_info.underscan_vborder_property->values[0] = 0;
1275 rdev->mode_info.underscan_vborder_property->values[1] = 128; 1276 rdev->mode_info.underscan_vborder_property->values[1] = 128;
1276 1277
1277 return 0; 1278 return 0;
1278 } 1279 }
1279 1280
1280 void radeon_update_display_priority(struct radeon_device *rdev) 1281 void radeon_update_display_priority(struct radeon_device *rdev)
1281 { 1282 {
1282 /* adjustment options for the display watermarks */ 1283 /* adjustment options for the display watermarks */
1283 if ((radeon_disp_priority == 0) || (radeon_disp_priority > 2)) { 1284 if ((radeon_disp_priority == 0) || (radeon_disp_priority > 2)) {
1284 /* set display priority to high for r3xx, rv515 chips 1285 /* set display priority to high for r3xx, rv515 chips
1285 * this avoids flickering due to underflow to the 1286 * this avoids flickering due to underflow to the
1286 * display controllers during heavy acceleration. 1287 * display controllers during heavy acceleration.
1287 * Don't force high on rs4xx igp chips as it seems to 1288 * Don't force high on rs4xx igp chips as it seems to
1288 * affect the sound card. See kernel bug 15982. 1289 * affect the sound card. See kernel bug 15982.
1289 */ 1290 */
1290 if ((ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515)) && 1291 if ((ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515)) &&
1291 !(rdev->flags & RADEON_IS_IGP)) 1292 !(rdev->flags & RADEON_IS_IGP))
1292 rdev->disp_priority = 2; 1293 rdev->disp_priority = 2;
1293 else 1294 else
1294 rdev->disp_priority = 0; 1295 rdev->disp_priority = 0;
1295 } else 1296 } else
1296 rdev->disp_priority = radeon_disp_priority; 1297 rdev->disp_priority = radeon_disp_priority;
1297 1298
1298 } 1299 }
1299 1300
1300 int radeon_modeset_init(struct radeon_device *rdev) 1301 int radeon_modeset_init(struct radeon_device *rdev)
1301 { 1302 {
1302 int i; 1303 int i;
1303 int ret; 1304 int ret;
1304 1305
1305 drm_mode_config_init(rdev->ddev); 1306 drm_mode_config_init(rdev->ddev);
1306 rdev->mode_info.mode_config_initialized = true; 1307 rdev->mode_info.mode_config_initialized = true;
1307 1308
1308 rdev->ddev->mode_config.funcs = (void *)&radeon_mode_funcs; 1309 rdev->ddev->mode_config.funcs = (void *)&radeon_mode_funcs;
1309 1310
1310 if (ASIC_IS_DCE5(rdev)) { 1311 if (ASIC_IS_DCE5(rdev)) {
1311 rdev->ddev->mode_config.max_width = 16384; 1312 rdev->ddev->mode_config.max_width = 16384;
1312 rdev->ddev->mode_config.max_height = 16384; 1313 rdev->ddev->mode_config.max_height = 16384;
1313 } else if (ASIC_IS_AVIVO(rdev)) { 1314 } else if (ASIC_IS_AVIVO(rdev)) {
1314 rdev->ddev->mode_config.max_width = 8192; 1315 rdev->ddev->mode_config.max_width = 8192;
1315 rdev->ddev->mode_config.max_height = 8192; 1316 rdev->ddev->mode_config.max_height = 8192;
1316 } else { 1317 } else {
1317 rdev->ddev->mode_config.max_width = 4096; 1318 rdev->ddev->mode_config.max_width = 4096;
1318 rdev->ddev->mode_config.max_height = 4096; 1319 rdev->ddev->mode_config.max_height = 4096;
1319 } 1320 }
1320 1321
1321 rdev->ddev->mode_config.fb_base = rdev->mc.aper_base; 1322 rdev->ddev->mode_config.fb_base = rdev->mc.aper_base;
1322 1323
1323 ret = radeon_modeset_create_props(rdev); 1324 ret = radeon_modeset_create_props(rdev);
1324 if (ret) { 1325 if (ret) {
1325 return ret; 1326 return ret;
1326 } 1327 }
1327 1328
1328 /* init i2c buses */ 1329 /* init i2c buses */
1329 radeon_i2c_init(rdev); 1330 radeon_i2c_init(rdev);
1330 1331
1331 /* check combios for a valid hardcoded EDID - Sun servers */ 1332 /* check combios for a valid hardcoded EDID - Sun servers */
1332 if (!rdev->is_atom_bios) { 1333 if (!rdev->is_atom_bios) {
1333 /* check for hardcoded EDID in BIOS */ 1334 /* check for hardcoded EDID in BIOS */
1334 radeon_combios_check_hardcoded_edid(rdev); 1335 radeon_combios_check_hardcoded_edid(rdev);
1335 } 1336 }
1336 1337
1337 /* allocate crtcs */ 1338 /* allocate crtcs */
1338 for (i = 0; i < rdev->num_crtc; i++) { 1339 for (i = 0; i < rdev->num_crtc; i++) {
1339 radeon_crtc_init(rdev->ddev, i); 1340 radeon_crtc_init(rdev->ddev, i);
1340 } 1341 }
1341 1342
1342 /* okay we should have all the bios connectors */ 1343 /* okay we should have all the bios connectors */
1343 ret = radeon_setup_enc_conn(rdev->ddev); 1344 ret = radeon_setup_enc_conn(rdev->ddev);
1344 if (!ret) { 1345 if (!ret) {
1345 return ret; 1346 return ret;
1346 } 1347 }
1347 /* initialize hpd */ 1348 /* initialize hpd */
1348 radeon_hpd_init(rdev); 1349 radeon_hpd_init(rdev);
1349 1350
1350 /* Initialize power management */ 1351 /* Initialize power management */
1351 radeon_pm_init(rdev); 1352 radeon_pm_init(rdev);
1352 1353
1353 radeon_fbdev_init(rdev); 1354 radeon_fbdev_init(rdev);
1354 drm_kms_helper_poll_init(rdev->ddev); 1355 drm_kms_helper_poll_init(rdev->ddev);
1355 1356
1356 return 0; 1357 return 0;
1357 } 1358 }
1358 1359
1359 void radeon_modeset_fini(struct radeon_device *rdev) 1360 void radeon_modeset_fini(struct radeon_device *rdev)
1360 { 1361 {
1361 radeon_fbdev_fini(rdev); 1362 radeon_fbdev_fini(rdev);
1362 kfree(rdev->mode_info.bios_hardcoded_edid); 1363 kfree(rdev->mode_info.bios_hardcoded_edid);
1363 radeon_pm_fini(rdev); 1364 radeon_pm_fini(rdev);
1364 1365
1365 if (rdev->mode_info.mode_config_initialized) { 1366 if (rdev->mode_info.mode_config_initialized) {
1366 drm_kms_helper_poll_fini(rdev->ddev); 1367 drm_kms_helper_poll_fini(rdev->ddev);
1367 radeon_hpd_fini(rdev); 1368 radeon_hpd_fini(rdev);
1368 drm_mode_config_cleanup(rdev->ddev); 1369 drm_mode_config_cleanup(rdev->ddev);
1369 rdev->mode_info.mode_config_initialized = false; 1370 rdev->mode_info.mode_config_initialized = false;
1370 } 1371 }
1371 /* free i2c buses */ 1372 /* free i2c buses */
1372 radeon_i2c_fini(rdev); 1373 radeon_i2c_fini(rdev);
1373 } 1374 }
1374 1375
1375 static bool is_hdtv_mode(struct drm_display_mode *mode) 1376 static bool is_hdtv_mode(struct drm_display_mode *mode)
1376 { 1377 {
1377 /* try and guess if this is a tv or a monitor */ 1378 /* try and guess if this is a tv or a monitor */
1378 if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */ 1379 if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */
1379 (mode->vdisplay == 576) || /* 576p */ 1380 (mode->vdisplay == 576) || /* 576p */
1380 (mode->vdisplay == 720) || /* 720p */ 1381 (mode->vdisplay == 720) || /* 720p */
1381 (mode->vdisplay == 1080)) /* 1080p */ 1382 (mode->vdisplay == 1080)) /* 1080p */
1382 return true; 1383 return true;
1383 else 1384 else
1384 return false; 1385 return false;
1385 } 1386 }
1386 1387
1387 bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, 1388 bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
1388 struct drm_display_mode *mode, 1389 struct drm_display_mode *mode,
1389 struct drm_display_mode *adjusted_mode) 1390 struct drm_display_mode *adjusted_mode)
1390 { 1391 {
1391 struct drm_device *dev = crtc->dev; 1392 struct drm_device *dev = crtc->dev;
1392 struct radeon_device *rdev = dev->dev_private; 1393 struct radeon_device *rdev = dev->dev_private;
1393 struct drm_encoder *encoder; 1394 struct drm_encoder *encoder;
1394 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 1395 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1395 struct radeon_encoder *radeon_encoder; 1396 struct radeon_encoder *radeon_encoder;
1396 struct drm_connector *connector; 1397 struct drm_connector *connector;
1397 struct radeon_connector *radeon_connector; 1398 struct radeon_connector *radeon_connector;
1398 bool first = true; 1399 bool first = true;
1399 u32 src_v = 1, dst_v = 1; 1400 u32 src_v = 1, dst_v = 1;
1400 u32 src_h = 1, dst_h = 1; 1401 u32 src_h = 1, dst_h = 1;
1401 1402
1402 radeon_crtc->h_border = 0; 1403 radeon_crtc->h_border = 0;
1403 radeon_crtc->v_border = 0; 1404 radeon_crtc->v_border = 0;
1404 1405
1405 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 1406 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1406 if (encoder->crtc != crtc) 1407 if (encoder->crtc != crtc)
1407 continue; 1408 continue;
1408 radeon_encoder = to_radeon_encoder(encoder); 1409 radeon_encoder = to_radeon_encoder(encoder);
1409 connector = radeon_get_connector_for_encoder(encoder); 1410 connector = radeon_get_connector_for_encoder(encoder);
1410 radeon_connector = to_radeon_connector(connector); 1411 radeon_connector = to_radeon_connector(connector);
1411 1412
1412 if (first) { 1413 if (first) {
1413 /* set scaling */ 1414 /* set scaling */
1414 if (radeon_encoder->rmx_type == RMX_OFF) 1415 if (radeon_encoder->rmx_type == RMX_OFF)
1415 radeon_crtc->rmx_type = RMX_OFF; 1416 radeon_crtc->rmx_type = RMX_OFF;
1416 else if (mode->hdisplay < radeon_encoder->native_mode.hdisplay || 1417 else if (mode->hdisplay < radeon_encoder->native_mode.hdisplay ||
1417 mode->vdisplay < radeon_encoder->native_mode.vdisplay) 1418 mode->vdisplay < radeon_encoder->native_mode.vdisplay)
1418 radeon_crtc->rmx_type = radeon_encoder->rmx_type; 1419 radeon_crtc->rmx_type = radeon_encoder->rmx_type;
1419 else 1420 else
1420 radeon_crtc->rmx_type = RMX_OFF; 1421 radeon_crtc->rmx_type = RMX_OFF;
1421 /* copy native mode */ 1422 /* copy native mode */
1422 memcpy(&radeon_crtc->native_mode, 1423 memcpy(&radeon_crtc->native_mode,
1423 &radeon_encoder->native_mode, 1424 &radeon_encoder->native_mode,
1424 sizeof(struct drm_display_mode)); 1425 sizeof(struct drm_display_mode));
1425 src_v = crtc->mode.vdisplay; 1426 src_v = crtc->mode.vdisplay;
1426 dst_v = radeon_crtc->native_mode.vdisplay; 1427 dst_v = radeon_crtc->native_mode.vdisplay;
1427 src_h = crtc->mode.hdisplay; 1428 src_h = crtc->mode.hdisplay;
1428 dst_h = radeon_crtc->native_mode.hdisplay; 1429 dst_h = radeon_crtc->native_mode.hdisplay;
1429 1430
1430 /* fix up for overscan on hdmi */ 1431 /* fix up for overscan on hdmi */
1431 if (ASIC_IS_AVIVO(rdev) && 1432 if (ASIC_IS_AVIVO(rdev) &&
1432 (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) && 1433 (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
1433 ((radeon_encoder->underscan_type == UNDERSCAN_ON) || 1434 ((radeon_encoder->underscan_type == UNDERSCAN_ON) ||
1434 ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) && 1435 ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) &&
1435 drm_detect_hdmi_monitor(radeon_connector->edid) && 1436 drm_detect_hdmi_monitor(radeon_connector->edid) &&
1436 is_hdtv_mode(mode)))) { 1437 is_hdtv_mode(mode)))) {
1437 if (radeon_encoder->underscan_hborder != 0) 1438 if (radeon_encoder->underscan_hborder != 0)
1438 radeon_crtc->h_border = radeon_encoder->underscan_hborder; 1439 radeon_crtc->h_border = radeon_encoder->underscan_hborder;
1439 else 1440 else
1440 radeon_crtc->h_border = (mode->hdisplay >> 5) + 16; 1441 radeon_crtc->h_border = (mode->hdisplay >> 5) + 16;
1441 if (radeon_encoder->underscan_vborder != 0) 1442 if (radeon_encoder->underscan_vborder != 0)
1442 radeon_crtc->v_border = radeon_encoder->underscan_vborder; 1443 radeon_crtc->v_border = radeon_encoder->underscan_vborder;
1443 else 1444 else
1444 radeon_crtc->v_border = (mode->vdisplay >> 5) + 16; 1445 radeon_crtc->v_border = (mode->vdisplay >> 5) + 16;
1445 radeon_crtc->rmx_type = RMX_FULL; 1446 radeon_crtc->rmx_type = RMX_FULL;
1446 src_v = crtc->mode.vdisplay; 1447 src_v = crtc->mode.vdisplay;
1447 dst_v = crtc->mode.vdisplay - (radeon_crtc->v_border * 2); 1448 dst_v = crtc->mode.vdisplay - (radeon_crtc->v_border * 2);
1448 src_h = crtc->mode.hdisplay; 1449 src_h = crtc->mode.hdisplay;
1449 dst_h = crtc->mode.hdisplay - (radeon_crtc->h_border * 2); 1450 dst_h = crtc->mode.hdisplay - (radeon_crtc->h_border * 2);
1450 } 1451 }
1451 first = false; 1452 first = false;
1452 } else { 1453 } else {
1453 if (radeon_crtc->rmx_type != radeon_encoder->rmx_type) { 1454 if (radeon_crtc->rmx_type != radeon_encoder->rmx_type) {
1454 /* WARNING: Right now this can't happen but 1455 /* WARNING: Right now this can't happen but
1455 * in the future we need to check that scaling 1456 * in the future we need to check that scaling
1456 * are consistent across different encoder 1457 * are consistent across different encoder
1457 * (ie all encoder can work with the same 1458 * (ie all encoder can work with the same
1458 * scaling). 1459 * scaling).
1459 */ 1460 */
1460 DRM_ERROR("Scaling not consistent across encoder.\n"); 1461 DRM_ERROR("Scaling not consistent across encoder.\n");
1461 return false; 1462 return false;
1462 } 1463 }
1463 } 1464 }
1464 } 1465 }
1465 if (radeon_crtc->rmx_type != RMX_OFF) { 1466 if (radeon_crtc->rmx_type != RMX_OFF) {
1466 fixed20_12 a, b; 1467 fixed20_12 a, b;
1467 a.full = dfixed_const(src_v); 1468 a.full = dfixed_const(src_v);
1468 b.full = dfixed_const(dst_v); 1469 b.full = dfixed_const(dst_v);
1469 radeon_crtc->vsc.full = dfixed_div(a, b); 1470 radeon_crtc->vsc.full = dfixed_div(a, b);
1470 a.full = dfixed_const(src_h); 1471 a.full = dfixed_const(src_h);
1471 b.full = dfixed_const(dst_h); 1472 b.full = dfixed_const(dst_h);
1472 radeon_crtc->hsc.full = dfixed_div(a, b); 1473 radeon_crtc->hsc.full = dfixed_div(a, b);
1473 } else { 1474 } else {
1474 radeon_crtc->vsc.full = dfixed_const(1); 1475 radeon_crtc->vsc.full = dfixed_const(1);
1475 radeon_crtc->hsc.full = dfixed_const(1); 1476 radeon_crtc->hsc.full = dfixed_const(1);
1476 } 1477 }
1477 return true; 1478 return true;
1478 } 1479 }
1479 1480
1480 /* 1481 /*
1481 * Retrieve current video scanout position of crtc on a given gpu. 1482 * Retrieve current video scanout position of crtc on a given gpu.
1482 * 1483 *
1483 * \param dev Device to query. 1484 * \param dev Device to query.
1484 * \param crtc Crtc to query. 1485 * \param crtc Crtc to query.
1485 * \param *vpos Location where vertical scanout position should be stored. 1486 * \param *vpos Location where vertical scanout position should be stored.
1486 * \param *hpos Location where horizontal scanout position should go. 1487 * \param *hpos Location where horizontal scanout position should go.
1487 * 1488 *
1488 * Returns vpos as a positive number while in active scanout area. 1489 * Returns vpos as a positive number while in active scanout area.
1489 * Returns vpos as a negative number inside vblank, counting the number 1490 * Returns vpos as a negative number inside vblank, counting the number
1490 * of scanlines to go until end of vblank, e.g., -1 means "one scanline 1491 * of scanlines to go until end of vblank, e.g., -1 means "one scanline
1491 * until start of active scanout / end of vblank." 1492 * until start of active scanout / end of vblank."
1492 * 1493 *
1493 * \return Flags, or'ed together as follows: 1494 * \return Flags, or'ed together as follows:
1494 * 1495 *
1495 * DRM_SCANOUTPOS_VALID = Query successful. 1496 * DRM_SCANOUTPOS_VALID = Query successful.
1496 * DRM_SCANOUTPOS_INVBL = Inside vblank. 1497 * DRM_SCANOUTPOS_INVBL = Inside vblank.
1497 * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of 1498 * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
1498 * this flag means that returned position may be offset by a constant but 1499 * this flag means that returned position may be offset by a constant but
1499 * unknown small number of scanlines wrt. real scanout position. 1500 * unknown small number of scanlines wrt. real scanout position.
1500 * 1501 *
1501 */ 1502 */
1502 int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int *hpos) 1503 int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int *hpos)
1503 { 1504 {
1504 u32 stat_crtc = 0, vbl = 0, position = 0; 1505 u32 stat_crtc = 0, vbl = 0, position = 0;
1505 int vbl_start, vbl_end, vtotal, ret = 0; 1506 int vbl_start, vbl_end, vtotal, ret = 0;
1506 bool in_vbl = true; 1507 bool in_vbl = true;
1507 1508
1508 struct radeon_device *rdev = dev->dev_private; 1509 struct radeon_device *rdev = dev->dev_private;
1509 1510
1510 if (ASIC_IS_DCE4(rdev)) { 1511 if (ASIC_IS_DCE4(rdev)) {
1511 if (crtc == 0) { 1512 if (crtc == 0) {
1512 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 1513 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
1513 EVERGREEN_CRTC0_REGISTER_OFFSET); 1514 EVERGREEN_CRTC0_REGISTER_OFFSET);
1514 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 1515 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
1515 EVERGREEN_CRTC0_REGISTER_OFFSET); 1516 EVERGREEN_CRTC0_REGISTER_OFFSET);
1516 ret |= DRM_SCANOUTPOS_VALID; 1517 ret |= DRM_SCANOUTPOS_VALID;
1517 } 1518 }
1518 if (crtc == 1) { 1519 if (crtc == 1) {
1519 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 1520 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
1520 EVERGREEN_CRTC1_REGISTER_OFFSET); 1521 EVERGREEN_CRTC1_REGISTER_OFFSET);
1521 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 1522 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
1522 EVERGREEN_CRTC1_REGISTER_OFFSET); 1523 EVERGREEN_CRTC1_REGISTER_OFFSET);
1523 ret |= DRM_SCANOUTPOS_VALID; 1524 ret |= DRM_SCANOUTPOS_VALID;
1524 } 1525 }
1525 if (crtc == 2) { 1526 if (crtc == 2) {
1526 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 1527 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
1527 EVERGREEN_CRTC2_REGISTER_OFFSET); 1528 EVERGREEN_CRTC2_REGISTER_OFFSET);
1528 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 1529 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
1529 EVERGREEN_CRTC2_REGISTER_OFFSET); 1530 EVERGREEN_CRTC2_REGISTER_OFFSET);
1530 ret |= DRM_SCANOUTPOS_VALID; 1531 ret |= DRM_SCANOUTPOS_VALID;
1531 } 1532 }
1532 if (crtc == 3) { 1533 if (crtc == 3) {
1533 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 1534 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
1534 EVERGREEN_CRTC3_REGISTER_OFFSET); 1535 EVERGREEN_CRTC3_REGISTER_OFFSET);
1535 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 1536 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
1536 EVERGREEN_CRTC3_REGISTER_OFFSET); 1537 EVERGREEN_CRTC3_REGISTER_OFFSET);
1537 ret |= DRM_SCANOUTPOS_VALID; 1538 ret |= DRM_SCANOUTPOS_VALID;
1538 } 1539 }
1539 if (crtc == 4) { 1540 if (crtc == 4) {
1540 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 1541 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
1541 EVERGREEN_CRTC4_REGISTER_OFFSET); 1542 EVERGREEN_CRTC4_REGISTER_OFFSET);
1542 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 1543 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
1543 EVERGREEN_CRTC4_REGISTER_OFFSET); 1544 EVERGREEN_CRTC4_REGISTER_OFFSET);
1544 ret |= DRM_SCANOUTPOS_VALID; 1545 ret |= DRM_SCANOUTPOS_VALID;
1545 } 1546 }
1546 if (crtc == 5) { 1547 if (crtc == 5) {
1547 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 1548 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
1548 EVERGREEN_CRTC5_REGISTER_OFFSET); 1549 EVERGREEN_CRTC5_REGISTER_OFFSET);
1549 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 1550 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
1550 EVERGREEN_CRTC5_REGISTER_OFFSET); 1551 EVERGREEN_CRTC5_REGISTER_OFFSET);
1551 ret |= DRM_SCANOUTPOS_VALID; 1552 ret |= DRM_SCANOUTPOS_VALID;
1552 } 1553 }
1553 } else if (ASIC_IS_AVIVO(rdev)) { 1554 } else if (ASIC_IS_AVIVO(rdev)) {
1554 if (crtc == 0) { 1555 if (crtc == 0) {
1555 vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END); 1556 vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END);
1556 position = RREG32(AVIVO_D1CRTC_STATUS_POSITION); 1557 position = RREG32(AVIVO_D1CRTC_STATUS_POSITION);
1557 ret |= DRM_SCANOUTPOS_VALID; 1558 ret |= DRM_SCANOUTPOS_VALID;
1558 } 1559 }
1559 if (crtc == 1) { 1560 if (crtc == 1) {
1560 vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END); 1561 vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END);
1561 position = RREG32(AVIVO_D2CRTC_STATUS_POSITION); 1562 position = RREG32(AVIVO_D2CRTC_STATUS_POSITION);
1562 ret |= DRM_SCANOUTPOS_VALID; 1563 ret |= DRM_SCANOUTPOS_VALID;
1563 } 1564 }
1564 } else { 1565 } else {
1565 /* Pre-AVIVO: Different encoding of scanout pos and vblank interval. */ 1566 /* Pre-AVIVO: Different encoding of scanout pos and vblank interval. */
1566 if (crtc == 0) { 1567 if (crtc == 0) {
1567 /* Assume vbl_end == 0, get vbl_start from 1568 /* Assume vbl_end == 0, get vbl_start from
1568 * upper 16 bits. 1569 * upper 16 bits.
1569 */ 1570 */
1570 vbl = (RREG32(RADEON_CRTC_V_TOTAL_DISP) & 1571 vbl = (RREG32(RADEON_CRTC_V_TOTAL_DISP) &
1571 RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT; 1572 RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT;
1572 /* Only retrieve vpos from upper 16 bits, set hpos == 0. */ 1573 /* Only retrieve vpos from upper 16 bits, set hpos == 0. */
1573 position = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; 1574 position = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
1574 stat_crtc = RREG32(RADEON_CRTC_STATUS); 1575 stat_crtc = RREG32(RADEON_CRTC_STATUS);
1575 if (!(stat_crtc & 1)) 1576 if (!(stat_crtc & 1))
1576 in_vbl = false; 1577 in_vbl = false;
1577 1578
1578 ret |= DRM_SCANOUTPOS_VALID; 1579 ret |= DRM_SCANOUTPOS_VALID;
1579 } 1580 }
1580 if (crtc == 1) { 1581 if (crtc == 1) {
1581 vbl = (RREG32(RADEON_CRTC2_V_TOTAL_DISP) & 1582 vbl = (RREG32(RADEON_CRTC2_V_TOTAL_DISP) &
1582 RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT; 1583 RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT;
1583 position = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; 1584 position = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
1584 stat_crtc = RREG32(RADEON_CRTC2_STATUS); 1585 stat_crtc = RREG32(RADEON_CRTC2_STATUS);
1585 if (!(stat_crtc & 1)) 1586 if (!(stat_crtc & 1))
1586 in_vbl = false; 1587 in_vbl = false;
1587 1588
1588 ret |= DRM_SCANOUTPOS_VALID; 1589 ret |= DRM_SCANOUTPOS_VALID;
1589 } 1590 }
1590 } 1591 }
1591 1592
1592 /* Decode into vertical and horizontal scanout position. */ 1593 /* Decode into vertical and horizontal scanout position. */
1593 *vpos = position & 0x1fff; 1594 *vpos = position & 0x1fff;
1594 *hpos = (position >> 16) & 0x1fff; 1595 *hpos = (position >> 16) & 0x1fff;
1595 1596
1596 /* Valid vblank area boundaries from gpu retrieved? */ 1597 /* Valid vblank area boundaries from gpu retrieved? */
1597 if (vbl > 0) { 1598 if (vbl > 0) {
1598 /* Yes: Decode. */ 1599 /* Yes: Decode. */
1599 ret |= DRM_SCANOUTPOS_ACCURATE; 1600 ret |= DRM_SCANOUTPOS_ACCURATE;
1600 vbl_start = vbl & 0x1fff; 1601 vbl_start = vbl & 0x1fff;
1601 vbl_end = (vbl >> 16) & 0x1fff; 1602 vbl_end = (vbl >> 16) & 0x1fff;
1602 } 1603 }
1603 else { 1604 else {
1604 /* No: Fake something reasonable which gives at least ok results. */ 1605 /* No: Fake something reasonable which gives at least ok results. */
1605 vbl_start = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay; 1606 vbl_start = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay;
1606 vbl_end = 0; 1607 vbl_end = 0;
1607 } 1608 }
1608 1609
1609 /* Test scanout position against vblank region. */ 1610 /* Test scanout position against vblank region. */
1610 if ((*vpos < vbl_start) && (*vpos >= vbl_end)) 1611 if ((*vpos < vbl_start) && (*vpos >= vbl_end))
1611 in_vbl = false; 1612 in_vbl = false;
1612 1613
1613 /* Check if inside vblank area and apply corrective offsets: 1614 /* Check if inside vblank area and apply corrective offsets:
1614 * vpos will then be >=0 in video scanout area, but negative 1615 * vpos will then be >=0 in video scanout area, but negative
1615 * within vblank area, counting down the number of lines until 1616 * within vblank area, counting down the number of lines until
1616 * start of scanout. 1617 * start of scanout.
1617 */ 1618 */
1618 1619
1619 /* Inside "upper part" of vblank area? Apply corrective offset if so: */ 1620 /* Inside "upper part" of vblank area? Apply corrective offset if so: */
1620 if (in_vbl && (*vpos >= vbl_start)) { 1621 if (in_vbl && (*vpos >= vbl_start)) {
1621 vtotal = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal; 1622 vtotal = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal;
1622 *vpos = *vpos - vtotal; 1623 *vpos = *vpos - vtotal;
1623 } 1624 }
1624 1625
1625 /* Correct for shifted end of vbl at vbl_end. */ 1626 /* Correct for shifted end of vbl at vbl_end. */
1626 *vpos = *vpos - vbl_end; 1627 *vpos = *vpos - vbl_end;
1627 1628
1628 /* In vblank? */ 1629 /* In vblank? */
1629 if (in_vbl) 1630 if (in_vbl)
1630 ret |= DRM_SCANOUTPOS_INVBL; 1631 ret |= DRM_SCANOUTPOS_INVBL;
1631 1632
1632 return ret; 1633 return ret;
1633 } 1634 }
1634 1635
1 /** 1 /**
2 * \file drmP.h 2 * \file drmP.h
3 * Private header for Direct Rendering Manager 3 * Private header for Direct Rendering Manager
4 * 4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com> 5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com> 6 * \author Gareth Hughes <gareth@valinux.com>
7 */ 7 */
8 8
9 /* 9 /*
10 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. 10 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
11 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 11 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
12 * Copyright (c) 2009-2010, Code Aurora Forum. 12 * Copyright (c) 2009-2010, Code Aurora Forum.
13 * All rights reserved. 13 * All rights reserved.
14 * 14 *
15 * Permission is hereby granted, free of charge, to any person obtaining a 15 * Permission is hereby granted, free of charge, to any person obtaining a
16 * copy of this software and associated documentation files (the "Software"), 16 * copy of this software and associated documentation files (the "Software"),
17 * to deal in the Software without restriction, including without limitation 17 * to deal in the Software without restriction, including without limitation
18 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 18 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
19 * and/or sell copies of the Software, and to permit persons to whom the 19 * and/or sell copies of the Software, and to permit persons to whom the
20 * Software is furnished to do so, subject to the following conditions: 20 * Software is furnished to do so, subject to the following conditions:
21 * 21 *
22 * The above copyright notice and this permission notice (including the next 22 * The above copyright notice and this permission notice (including the next
23 * paragraph) shall be included in all copies or substantial portions of the 23 * paragraph) shall be included in all copies or substantial portions of the
24 * Software. 24 * Software.
25 * 25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 27 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 28 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
29 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 29 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
30 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 30 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
31 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 31 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
32 * OTHER DEALINGS IN THE SOFTWARE. 32 * OTHER DEALINGS IN THE SOFTWARE.
33 */ 33 */
34 34
35 #ifndef _DRM_P_H_ 35 #ifndef _DRM_P_H_
36 #define _DRM_P_H_ 36 #define _DRM_P_H_
37 37
38 #ifdef __KERNEL__ 38 #ifdef __KERNEL__
39 #ifdef __alpha__ 39 #ifdef __alpha__
40 /* add include of current.h so that "current" is defined 40 /* add include of current.h so that "current" is defined
41 * before static inline funcs in wait.h. Doing this so we 41 * before static inline funcs in wait.h. Doing this so we
42 * can build the DRM (part of PI DRI). 4/21/2000 S + B */ 42 * can build the DRM (part of PI DRI). 4/21/2000 S + B */
43 #include <asm/current.h> 43 #include <asm/current.h>
44 #endif /* __alpha__ */ 44 #endif /* __alpha__ */
45 #include <linux/module.h> 45 #include <linux/module.h>
46 #include <linux/kernel.h> 46 #include <linux/kernel.h>
47 #include <linux/miscdevice.h> 47 #include <linux/miscdevice.h>
48 #include <linux/fs.h> 48 #include <linux/fs.h>
49 #include <linux/proc_fs.h> 49 #include <linux/proc_fs.h>
50 #include <linux/init.h> 50 #include <linux/init.h>
51 #include <linux/file.h> 51 #include <linux/file.h>
52 #include <linux/platform_device.h> 52 #include <linux/platform_device.h>
53 #include <linux/pci.h> 53 #include <linux/pci.h>
54 #include <linux/jiffies.h> 54 #include <linux/jiffies.h>
55 #include <linux/dma-mapping.h> 55 #include <linux/dma-mapping.h>
56 #include <linux/mm.h> 56 #include <linux/mm.h>
57 #include <linux/cdev.h> 57 #include <linux/cdev.h>
58 #include <linux/mutex.h> 58 #include <linux/mutex.h>
59 #include <linux/slab.h> 59 #include <linux/slab.h>
60 #if defined(__alpha__) || defined(__powerpc__) 60 #if defined(__alpha__) || defined(__powerpc__)
61 #include <asm/pgtable.h> /* For pte_wrprotect */ 61 #include <asm/pgtable.h> /* For pte_wrprotect */
62 #endif 62 #endif
63 #include <asm/io.h> 63 #include <asm/io.h>
64 #include <asm/mman.h> 64 #include <asm/mman.h>
65 #include <asm/uaccess.h> 65 #include <asm/uaccess.h>
66 #ifdef CONFIG_MTRR 66 #ifdef CONFIG_MTRR
67 #include <asm/mtrr.h> 67 #include <asm/mtrr.h>
68 #endif 68 #endif
69 #if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE) 69 #if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
70 #include <linux/types.h> 70 #include <linux/types.h>
71 #include <linux/agp_backend.h> 71 #include <linux/agp_backend.h>
72 #endif 72 #endif
73 #include <linux/workqueue.h> 73 #include <linux/workqueue.h>
74 #include <linux/poll.h> 74 #include <linux/poll.h>
75 #include <asm/pgalloc.h> 75 #include <asm/pgalloc.h>
76 #include "drm.h" 76 #include "drm.h"
77 77
78 #include <linux/idr.h> 78 #include <linux/idr.h>
79 79
80 #define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))) 80 #define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
81 #define __OS_HAS_MTRR (defined(CONFIG_MTRR)) 81 #define __OS_HAS_MTRR (defined(CONFIG_MTRR))
82 82
83 struct drm_file; 83 struct drm_file;
84 struct drm_device; 84 struct drm_device;
85 85
86 #include "drm_os_linux.h" 86 #include "drm_os_linux.h"
87 #include "drm_hashtab.h" 87 #include "drm_hashtab.h"
88 #include "drm_mm.h" 88 #include "drm_mm.h"
89 89
90 #define DRM_UT_CORE 0x01 90 #define DRM_UT_CORE 0x01
91 #define DRM_UT_DRIVER 0x02 91 #define DRM_UT_DRIVER 0x02
92 #define DRM_UT_KMS 0x04 92 #define DRM_UT_KMS 0x04
93 /* 93 /*
94 * Three debug levels are defined. 94 * Three debug levels are defined.
95 * drm_core, drm_driver, drm_kms 95 * drm_core, drm_driver, drm_kms
96 * drm_core level can be used in the generic drm code. For example: 96 * drm_core level can be used in the generic drm code. For example:
97 * drm_ioctl, drm_mm, drm_memory 97 * drm_ioctl, drm_mm, drm_memory
98 * The macro definition of DRM_DEBUG is used. 98 * The macro definition of DRM_DEBUG is used.
99 * DRM_DEBUG(fmt, args...) 99 * DRM_DEBUG(fmt, args...)
100 * The debug info by using the DRM_DEBUG can be obtained by adding 100 * The debug info by using the DRM_DEBUG can be obtained by adding
101 * the boot option of "drm.debug=1". 101 * the boot option of "drm.debug=1".
102 * 102 *
103 * drm_driver level can be used in the specific drm driver. It is used 103 * drm_driver level can be used in the specific drm driver. It is used
104 * to add the debug info related with the drm driver. For example: 104 * to add the debug info related with the drm driver. For example:
105 * i915_drv, i915_dma, i915_gem, radeon_drv, 105 * i915_drv, i915_dma, i915_gem, radeon_drv,
106 * The macro definition of DRM_DEBUG_DRIVER can be used. 106 * The macro definition of DRM_DEBUG_DRIVER can be used.
107 * DRM_DEBUG_DRIVER(fmt, args...) 107 * DRM_DEBUG_DRIVER(fmt, args...)
108 * The debug info by using the DRM_DEBUG_DRIVER can be obtained by 108 * The debug info by using the DRM_DEBUG_DRIVER can be obtained by
109 * adding the boot option of "drm.debug=0x02" 109 * adding the boot option of "drm.debug=0x02"
110 * 110 *
111 * drm_kms level can be used in the KMS code related with specific drm driver. 111 * drm_kms level can be used in the KMS code related with specific drm driver.
112 * It is used to add the debug info related with KMS mode. For example: 112 * It is used to add the debug info related with KMS mode. For example:
113 * the connector/crtc , 113 * the connector/crtc ,
114 * The macro definition of DRM_DEBUG_KMS can be used. 114 * The macro definition of DRM_DEBUG_KMS can be used.
115 * DRM_DEBUG_KMS(fmt, args...) 115 * DRM_DEBUG_KMS(fmt, args...)
116 * The debug info by using the DRM_DEBUG_KMS can be obtained by 116 * The debug info by using the DRM_DEBUG_KMS can be obtained by
117 * adding the boot option of "drm.debug=0x04" 117 * adding the boot option of "drm.debug=0x04"
118 * 118 *
119 * If we add the boot option of "drm.debug=0x06", we can get the debug info by 119 * If we add the boot option of "drm.debug=0x06", we can get the debug info by
120 * using the DRM_DEBUG_KMS and DRM_DEBUG_DRIVER. 120 * using the DRM_DEBUG_KMS and DRM_DEBUG_DRIVER.
121 * If we add the boot option of "drm.debug=0x05", we can get the debug info by 121 * If we add the boot option of "drm.debug=0x05", we can get the debug info by
122 * using the DRM_DEBUG_KMS and DRM_DEBUG. 122 * using the DRM_DEBUG_KMS and DRM_DEBUG.
123 */ 123 */
124 124
125 extern void drm_ut_debug_printk(unsigned int request_level, 125 extern __attribute__((format (printf, 4, 5)))
126 void drm_ut_debug_printk(unsigned int request_level,
126 const char *prefix, 127 const char *prefix,
127 const char *function_name, 128 const char *function_name,
128 const char *format, ...); 129 const char *format, ...);
129 extern __attribute__((format (printf, 2, 3))) 130 extern __attribute__((format (printf, 2, 3)))
130 int drm_err(const char *func, const char *format, ...); 131 int drm_err(const char *func, const char *format, ...);
131 132
132 /***********************************************************************/ 133 /***********************************************************************/
133 /** \name DRM template customization defaults */ 134 /** \name DRM template customization defaults */
134 /*@{*/ 135 /*@{*/
135 136
136 /* driver capabilities and requirements mask */ 137 /* driver capabilities and requirements mask */
137 #define DRIVER_USE_AGP 0x1 138 #define DRIVER_USE_AGP 0x1
138 #define DRIVER_REQUIRE_AGP 0x2 139 #define DRIVER_REQUIRE_AGP 0x2
139 #define DRIVER_USE_MTRR 0x4 140 #define DRIVER_USE_MTRR 0x4
140 #define DRIVER_PCI_DMA 0x8 141 #define DRIVER_PCI_DMA 0x8
141 #define DRIVER_SG 0x10 142 #define DRIVER_SG 0x10
142 #define DRIVER_HAVE_DMA 0x20 143 #define DRIVER_HAVE_DMA 0x20
143 #define DRIVER_HAVE_IRQ 0x40 144 #define DRIVER_HAVE_IRQ 0x40
144 #define DRIVER_IRQ_SHARED 0x80 145 #define DRIVER_IRQ_SHARED 0x80
145 #define DRIVER_IRQ_VBL 0x100 146 #define DRIVER_IRQ_VBL 0x100
146 #define DRIVER_DMA_QUEUE 0x200 147 #define DRIVER_DMA_QUEUE 0x200
147 #define DRIVER_FB_DMA 0x400 148 #define DRIVER_FB_DMA 0x400
148 #define DRIVER_IRQ_VBL2 0x800 149 #define DRIVER_IRQ_VBL2 0x800
149 #define DRIVER_GEM 0x1000 150 #define DRIVER_GEM 0x1000
150 #define DRIVER_MODESET 0x2000 151 #define DRIVER_MODESET 0x2000
151 152
152 #define DRIVER_BUS_PCI 0x1 153 #define DRIVER_BUS_PCI 0x1
153 #define DRIVER_BUS_PLATFORM 0x2 154 #define DRIVER_BUS_PLATFORM 0x2
154 #define DRIVER_BUS_USB 0x3 155 #define DRIVER_BUS_USB 0x3
155 156
156 /***********************************************************************/ 157 /***********************************************************************/
157 /** \name Begin the DRM... */ 158 /** \name Begin the DRM... */
158 /*@{*/ 159 /*@{*/
159 160
160 #define DRM_DEBUG_CODE 2 /**< Include debugging code if > 1, then 161 #define DRM_DEBUG_CODE 2 /**< Include debugging code if > 1, then
161 also include looping detection. */ 162 also include looping detection. */
162 163
163 #define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */ 164 #define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */
164 #define DRM_KERNEL_CONTEXT 0 /**< Change drm_resctx if changed */ 165 #define DRM_KERNEL_CONTEXT 0 /**< Change drm_resctx if changed */
165 #define DRM_RESERVED_CONTEXTS 1 /**< Change drm_resctx if changed */ 166 #define DRM_RESERVED_CONTEXTS 1 /**< Change drm_resctx if changed */
166 #define DRM_LOOPING_LIMIT 5000000 167 #define DRM_LOOPING_LIMIT 5000000
167 #define DRM_TIME_SLICE (HZ/20) /**< Time slice for GLXContexts */ 168 #define DRM_TIME_SLICE (HZ/20) /**< Time slice for GLXContexts */
168 #define DRM_LOCK_SLICE 1 /**< Time slice for lock, in jiffies */ 169 #define DRM_LOCK_SLICE 1 /**< Time slice for lock, in jiffies */
169 170
170 #define DRM_FLAG_DEBUG 0x01 171 #define DRM_FLAG_DEBUG 0x01
171 172
172 #define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8) 173 #define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
173 #define DRM_MAP_HASH_OFFSET 0x10000000 174 #define DRM_MAP_HASH_OFFSET 0x10000000
174 175
175 /*@}*/ 176 /*@}*/
176 177
177 /***********************************************************************/ 178 /***********************************************************************/
178 /** \name Macros to make printk easier */ 179 /** \name Macros to make printk easier */
179 /*@{*/ 180 /*@{*/
180 181
181 /** 182 /**
182 * Error output. 183 * Error output.
183 * 184 *
184 * \param fmt printf() like format string. 185 * \param fmt printf() like format string.
185 * \param arg arguments 186 * \param arg arguments
186 */ 187 */
187 #define DRM_ERROR(fmt, ...) \ 188 #define DRM_ERROR(fmt, ...) \
188 drm_err(__func__, fmt, ##__VA_ARGS__) 189 drm_err(__func__, fmt, ##__VA_ARGS__)
189 190
190 #define DRM_INFO(fmt, ...) \ 191 #define DRM_INFO(fmt, ...) \
191 printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__) 192 printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
192 193
193 /** 194 /**
194 * Debug output. 195 * Debug output.
195 * 196 *
196 * \param fmt printf() like format string. 197 * \param fmt printf() like format string.
197 * \param arg arguments 198 * \param arg arguments
198 */ 199 */
199 #if DRM_DEBUG_CODE 200 #if DRM_DEBUG_CODE
200 #define DRM_DEBUG(fmt, args...) \ 201 #define DRM_DEBUG(fmt, args...) \
201 do { \ 202 do { \
202 drm_ut_debug_printk(DRM_UT_CORE, DRM_NAME, \ 203 drm_ut_debug_printk(DRM_UT_CORE, DRM_NAME, \
203 __func__, fmt, ##args); \ 204 __func__, fmt, ##args); \
204 } while (0) 205 } while (0)
205 206
206 #define DRM_DEBUG_DRIVER(fmt, args...) \ 207 #define DRM_DEBUG_DRIVER(fmt, args...) \
207 do { \ 208 do { \
208 drm_ut_debug_printk(DRM_UT_DRIVER, DRM_NAME, \ 209 drm_ut_debug_printk(DRM_UT_DRIVER, DRM_NAME, \
209 __func__, fmt, ##args); \ 210 __func__, fmt, ##args); \
210 } while (0) 211 } while (0)
211 #define DRM_DEBUG_KMS(fmt, args...) \ 212 #define DRM_DEBUG_KMS(fmt, args...) \
212 do { \ 213 do { \
213 drm_ut_debug_printk(DRM_UT_KMS, DRM_NAME, \ 214 drm_ut_debug_printk(DRM_UT_KMS, DRM_NAME, \
214 __func__, fmt, ##args); \ 215 __func__, fmt, ##args); \
215 } while (0) 216 } while (0)
216 #define DRM_LOG(fmt, args...) \ 217 #define DRM_LOG(fmt, args...) \
217 do { \ 218 do { \
218 drm_ut_debug_printk(DRM_UT_CORE, NULL, \ 219 drm_ut_debug_printk(DRM_UT_CORE, NULL, \
219 NULL, fmt, ##args); \ 220 NULL, fmt, ##args); \
220 } while (0) 221 } while (0)
221 #define DRM_LOG_KMS(fmt, args...) \ 222 #define DRM_LOG_KMS(fmt, args...) \
222 do { \ 223 do { \
223 drm_ut_debug_printk(DRM_UT_KMS, NULL, \ 224 drm_ut_debug_printk(DRM_UT_KMS, NULL, \
224 NULL, fmt, ##args); \ 225 NULL, fmt, ##args); \
225 } while (0) 226 } while (0)
226 #define DRM_LOG_MODE(fmt, args...) \ 227 #define DRM_LOG_MODE(fmt, args...) \
227 do { \ 228 do { \
228 drm_ut_debug_printk(DRM_UT_MODE, NULL, \ 229 drm_ut_debug_printk(DRM_UT_MODE, NULL, \
229 NULL, fmt, ##args); \ 230 NULL, fmt, ##args); \
230 } while (0) 231 } while (0)
231 #define DRM_LOG_DRIVER(fmt, args...) \ 232 #define DRM_LOG_DRIVER(fmt, args...) \
232 do { \ 233 do { \
233 drm_ut_debug_printk(DRM_UT_DRIVER, NULL, \ 234 drm_ut_debug_printk(DRM_UT_DRIVER, NULL, \
234 NULL, fmt, ##args); \ 235 NULL, fmt, ##args); \
235 } while (0) 236 } while (0)
236 #else 237 #else
237 #define DRM_DEBUG_DRIVER(fmt, args...) do { } while (0) 238 #define DRM_DEBUG_DRIVER(fmt, args...) do { } while (0)
238 #define DRM_DEBUG_KMS(fmt, args...) do { } while (0) 239 #define DRM_DEBUG_KMS(fmt, args...) do { } while (0)
239 #define DRM_DEBUG(fmt, arg...) do { } while (0) 240 #define DRM_DEBUG(fmt, arg...) do { } while (0)
240 #define DRM_LOG(fmt, arg...) do { } while (0) 241 #define DRM_LOG(fmt, arg...) do { } while (0)
241 #define DRM_LOG_KMS(fmt, args...) do { } while (0) 242 #define DRM_LOG_KMS(fmt, args...) do { } while (0)
242 #define DRM_LOG_MODE(fmt, arg...) do { } while (0) 243 #define DRM_LOG_MODE(fmt, arg...) do { } while (0)
243 #define DRM_LOG_DRIVER(fmt, arg...) do { } while (0) 244 #define DRM_LOG_DRIVER(fmt, arg...) do { } while (0)
244 245
245 #endif 246 #endif
246 247
247 /*@}*/ 248 /*@}*/
248 249
249 /***********************************************************************/ 250 /***********************************************************************/
250 /** \name Internal types and structures */ 251 /** \name Internal types and structures */
251 /*@{*/ 252 /*@{*/
252 253
253 #define DRM_ARRAY_SIZE(x) ARRAY_SIZE(x) 254 #define DRM_ARRAY_SIZE(x) ARRAY_SIZE(x)
254 255
255 #define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1)) 256 #define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1))
256 #define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x)) 257 #define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x))
257 258
258 #define DRM_IF_VERSION(maj, min) (maj << 16 | min) 259 #define DRM_IF_VERSION(maj, min) (maj << 16 | min)
259 260
260 /** 261 /**
261 * Test that the hardware lock is held by the caller, returning otherwise. 262 * Test that the hardware lock is held by the caller, returning otherwise.
262 * 263 *
263 * \param dev DRM device. 264 * \param dev DRM device.
264 * \param filp file pointer of the caller. 265 * \param filp file pointer of the caller.
265 */ 266 */
266 #define LOCK_TEST_WITH_RETURN( dev, _file_priv ) \ 267 #define LOCK_TEST_WITH_RETURN( dev, _file_priv ) \
267 do { \ 268 do { \
268 if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) || \ 269 if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) || \
269 _file_priv->master->lock.file_priv != _file_priv) { \ 270 _file_priv->master->lock.file_priv != _file_priv) { \
270 DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\ 271 DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\
271 __func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\ 272 __func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\
272 _file_priv->master->lock.file_priv, _file_priv); \ 273 _file_priv->master->lock.file_priv, _file_priv); \
273 return -EINVAL; \ 274 return -EINVAL; \
274 } \ 275 } \
275 } while (0) 276 } while (0)
276 277
277 /** 278 /**
278 * Ioctl function type. 279 * Ioctl function type.
279 * 280 *
280 * \param inode device inode. 281 * \param inode device inode.
281 * \param file_priv DRM file private pointer. 282 * \param file_priv DRM file private pointer.
282 * \param cmd command. 283 * \param cmd command.
283 * \param arg argument. 284 * \param arg argument.
284 */ 285 */
285 typedef int drm_ioctl_t(struct drm_device *dev, void *data, 286 typedef int drm_ioctl_t(struct drm_device *dev, void *data,
286 struct drm_file *file_priv); 287 struct drm_file *file_priv);
287 288
288 typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd, 289 typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
289 unsigned long arg); 290 unsigned long arg);
290 291
291 #define DRM_IOCTL_NR(n) _IOC_NR(n) 292 #define DRM_IOCTL_NR(n) _IOC_NR(n)
292 #define DRM_MAJOR 226 293 #define DRM_MAJOR 226
293 294
294 #define DRM_AUTH 0x1 295 #define DRM_AUTH 0x1
295 #define DRM_MASTER 0x2 296 #define DRM_MASTER 0x2
296 #define DRM_ROOT_ONLY 0x4 297 #define DRM_ROOT_ONLY 0x4
297 #define DRM_CONTROL_ALLOW 0x8 298 #define DRM_CONTROL_ALLOW 0x8
298 #define DRM_UNLOCKED 0x10 299 #define DRM_UNLOCKED 0x10
299 300
300 struct drm_ioctl_desc { 301 struct drm_ioctl_desc {
301 unsigned int cmd; 302 unsigned int cmd;
302 int flags; 303 int flags;
303 drm_ioctl_t *func; 304 drm_ioctl_t *func;
304 unsigned int cmd_drv; 305 unsigned int cmd_drv;
305 }; 306 };
306 307
307 /** 308 /**
308 * Creates a driver or general drm_ioctl_desc array entry for the given 309 * Creates a driver or general drm_ioctl_desc array entry for the given
309 * ioctl, for use by drm_ioctl(). 310 * ioctl, for use by drm_ioctl().
310 */ 311 */
311 312
312 #define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \ 313 #define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \
313 [DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl} 314 [DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl}
314 315
315 struct drm_magic_entry { 316 struct drm_magic_entry {
316 struct list_head head; 317 struct list_head head;
317 struct drm_hash_item hash_item; 318 struct drm_hash_item hash_item;
318 struct drm_file *priv; 319 struct drm_file *priv;
319 }; 320 };
320 321
321 struct drm_vma_entry { 322 struct drm_vma_entry {
322 struct list_head head; 323 struct list_head head;
323 struct vm_area_struct *vma; 324 struct vm_area_struct *vma;
324 pid_t pid; 325 pid_t pid;
325 }; 326 };
326 327
327 /** 328 /**
328 * DMA buffer. 329 * DMA buffer.
329 */ 330 */
330 struct drm_buf { 331 struct drm_buf {
331 int idx; /**< Index into master buflist */ 332 int idx; /**< Index into master buflist */
332 int total; /**< Buffer size */ 333 int total; /**< Buffer size */
333 int order; /**< log-base-2(total) */ 334 int order; /**< log-base-2(total) */
334 int used; /**< Amount of buffer in use (for DMA) */ 335 int used; /**< Amount of buffer in use (for DMA) */
335 unsigned long offset; /**< Byte offset (used internally) */ 336 unsigned long offset; /**< Byte offset (used internally) */
336 void *address; /**< Address of buffer */ 337 void *address; /**< Address of buffer */
337 unsigned long bus_address; /**< Bus address of buffer */ 338 unsigned long bus_address; /**< Bus address of buffer */
338 struct drm_buf *next; /**< Kernel-only: used for free list */ 339 struct drm_buf *next; /**< Kernel-only: used for free list */
339 __volatile__ int waiting; /**< On kernel DMA queue */ 340 __volatile__ int waiting; /**< On kernel DMA queue */
340 __volatile__ int pending; /**< On hardware DMA queue */ 341 __volatile__ int pending; /**< On hardware DMA queue */
341 wait_queue_head_t dma_wait; /**< Processes waiting */ 342 wait_queue_head_t dma_wait; /**< Processes waiting */
342 struct drm_file *file_priv; /**< Private of holding file descr */ 343 struct drm_file *file_priv; /**< Private of holding file descr */
343 int context; /**< Kernel queue for this buffer */ 344 int context; /**< Kernel queue for this buffer */
344 int while_locked; /**< Dispatch this buffer while locked */ 345 int while_locked; /**< Dispatch this buffer while locked */
345 enum { 346 enum {
346 DRM_LIST_NONE = 0, 347 DRM_LIST_NONE = 0,
347 DRM_LIST_FREE = 1, 348 DRM_LIST_FREE = 1,
348 DRM_LIST_WAIT = 2, 349 DRM_LIST_WAIT = 2,
349 DRM_LIST_PEND = 3, 350 DRM_LIST_PEND = 3,
350 DRM_LIST_PRIO = 4, 351 DRM_LIST_PRIO = 4,
351 DRM_LIST_RECLAIM = 5 352 DRM_LIST_RECLAIM = 5
352 } list; /**< Which list we're on */ 353 } list; /**< Which list we're on */
353 354
354 int dev_priv_size; /**< Size of buffer private storage */ 355 int dev_priv_size; /**< Size of buffer private storage */
355 void *dev_private; /**< Per-buffer private storage */ 356 void *dev_private; /**< Per-buffer private storage */
356 }; 357 };
357 358
358 /** bufs is one longer than it has to be */ 359 /** bufs is one longer than it has to be */
359 struct drm_waitlist { 360 struct drm_waitlist {
360 int count; /**< Number of possible buffers */ 361 int count; /**< Number of possible buffers */
361 struct drm_buf **bufs; /**< List of pointers to buffers */ 362 struct drm_buf **bufs; /**< List of pointers to buffers */
362 struct drm_buf **rp; /**< Read pointer */ 363 struct drm_buf **rp; /**< Read pointer */
363 struct drm_buf **wp; /**< Write pointer */ 364 struct drm_buf **wp; /**< Write pointer */
364 struct drm_buf **end; /**< End pointer */ 365 struct drm_buf **end; /**< End pointer */
365 spinlock_t read_lock; 366 spinlock_t read_lock;
366 spinlock_t write_lock; 367 spinlock_t write_lock;
367 }; 368 };
368 369
369 struct drm_freelist { 370 struct drm_freelist {
370 int initialized; /**< Freelist in use */ 371 int initialized; /**< Freelist in use */
371 atomic_t count; /**< Number of free buffers */ 372 atomic_t count; /**< Number of free buffers */
372 struct drm_buf *next; /**< End pointer */ 373 struct drm_buf *next; /**< End pointer */
373 374
374 wait_queue_head_t waiting; /**< Processes waiting on free bufs */ 375 wait_queue_head_t waiting; /**< Processes waiting on free bufs */
375 int low_mark; /**< Low water mark */ 376 int low_mark; /**< Low water mark */
376 int high_mark; /**< High water mark */ 377 int high_mark; /**< High water mark */
377 atomic_t wfh; /**< If waiting for high mark */ 378 atomic_t wfh; /**< If waiting for high mark */
378 spinlock_t lock; 379 spinlock_t lock;
379 }; 380 };
380 381
381 typedef struct drm_dma_handle { 382 typedef struct drm_dma_handle {
382 dma_addr_t busaddr; 383 dma_addr_t busaddr;
383 void *vaddr; 384 void *vaddr;
384 size_t size; 385 size_t size;
385 } drm_dma_handle_t; 386 } drm_dma_handle_t;
386 387
387 /** 388 /**
388 * Buffer entry. There is one of this for each buffer size order. 389 * Buffer entry. There is one of this for each buffer size order.
389 */ 390 */
390 struct drm_buf_entry { 391 struct drm_buf_entry {
391 int buf_size; /**< size */ 392 int buf_size; /**< size */
392 int buf_count; /**< number of buffers */ 393 int buf_count; /**< number of buffers */
393 struct drm_buf *buflist; /**< buffer list */ 394 struct drm_buf *buflist; /**< buffer list */
394 int seg_count; 395 int seg_count;
395 int page_order; 396 int page_order;
396 struct drm_dma_handle **seglist; 397 struct drm_dma_handle **seglist;
397 398
398 struct drm_freelist freelist; 399 struct drm_freelist freelist;
399 }; 400 };
400 401
401 /* Event queued up for userspace to read */ 402 /* Event queued up for userspace to read */
402 struct drm_pending_event { 403 struct drm_pending_event {
403 struct drm_event *event; 404 struct drm_event *event;
404 struct list_head link; 405 struct list_head link;
405 struct drm_file *file_priv; 406 struct drm_file *file_priv;
406 pid_t pid; /* pid of requester, no guarantee it's valid by the time 407 pid_t pid; /* pid of requester, no guarantee it's valid by the time
407 we deliver the event, for tracing only */ 408 we deliver the event, for tracing only */
408 void (*destroy)(struct drm_pending_event *event); 409 void (*destroy)(struct drm_pending_event *event);
409 }; 410 };
410 411
411 /** File private data */ 412 /** File private data */
412 struct drm_file { 413 struct drm_file {
413 int authenticated; 414 int authenticated;
414 pid_t pid; 415 pid_t pid;
415 uid_t uid; 416 uid_t uid;
416 drm_magic_t magic; 417 drm_magic_t magic;
417 unsigned long ioctl_count; 418 unsigned long ioctl_count;
418 struct list_head lhead; 419 struct list_head lhead;
419 struct drm_minor *minor; 420 struct drm_minor *minor;
420 unsigned long lock_count; 421 unsigned long lock_count;
421 422
422 /** Mapping of mm object handles to object pointers. */ 423 /** Mapping of mm object handles to object pointers. */
423 struct idr object_idr; 424 struct idr object_idr;
424 /** Lock for synchronization of access to object_idr. */ 425 /** Lock for synchronization of access to object_idr. */
425 spinlock_t table_lock; 426 spinlock_t table_lock;
426 427
427 struct file *filp; 428 struct file *filp;
428 void *driver_priv; 429 void *driver_priv;
429 430
430 int is_master; /* this file private is a master for a minor */ 431 int is_master; /* this file private is a master for a minor */
431 struct drm_master *master; /* master this node is currently associated with 432 struct drm_master *master; /* master this node is currently associated with
432 N.B. not always minor->master */ 433 N.B. not always minor->master */
433 struct list_head fbs; 434 struct list_head fbs;
434 435
435 wait_queue_head_t event_wait; 436 wait_queue_head_t event_wait;
436 struct list_head event_list; 437 struct list_head event_list;
437 int event_space; 438 int event_space;
438 }; 439 };
439 440
440 /** Wait queue */ 441 /** Wait queue */
441 struct drm_queue { 442 struct drm_queue {
442 atomic_t use_count; /**< Outstanding uses (+1) */ 443 atomic_t use_count; /**< Outstanding uses (+1) */
443 atomic_t finalization; /**< Finalization in progress */ 444 atomic_t finalization; /**< Finalization in progress */
444 atomic_t block_count; /**< Count of processes waiting */ 445 atomic_t block_count; /**< Count of processes waiting */
445 atomic_t block_read; /**< Queue blocked for reads */ 446 atomic_t block_read; /**< Queue blocked for reads */
446 wait_queue_head_t read_queue; /**< Processes waiting on block_read */ 447 wait_queue_head_t read_queue; /**< Processes waiting on block_read */
447 atomic_t block_write; /**< Queue blocked for writes */ 448 atomic_t block_write; /**< Queue blocked for writes */
448 wait_queue_head_t write_queue; /**< Processes waiting on block_write */ 449 wait_queue_head_t write_queue; /**< Processes waiting on block_write */
449 atomic_t total_queued; /**< Total queued statistic */ 450 atomic_t total_queued; /**< Total queued statistic */
450 atomic_t total_flushed; /**< Total flushes statistic */ 451 atomic_t total_flushed; /**< Total flushes statistic */
451 atomic_t total_locks; /**< Total locks statistics */ 452 atomic_t total_locks; /**< Total locks statistics */
452 enum drm_ctx_flags flags; /**< Context preserving and 2D-only */ 453 enum drm_ctx_flags flags; /**< Context preserving and 2D-only */
453 struct drm_waitlist waitlist; /**< Pending buffers */ 454 struct drm_waitlist waitlist; /**< Pending buffers */
454 wait_queue_head_t flush_queue; /**< Processes waiting until flush */ 455 wait_queue_head_t flush_queue; /**< Processes waiting until flush */
455 }; 456 };
456 457
457 /** 458 /**
458 * Lock data. 459 * Lock data.
459 */ 460 */
460 struct drm_lock_data { 461 struct drm_lock_data {
461 struct drm_hw_lock *hw_lock; /**< Hardware lock */ 462 struct drm_hw_lock *hw_lock; /**< Hardware lock */
462 /** Private of lock holder's file (NULL=kernel) */ 463 /** Private of lock holder's file (NULL=kernel) */
463 struct drm_file *file_priv; 464 struct drm_file *file_priv;
464 wait_queue_head_t lock_queue; /**< Queue of blocked processes */ 465 wait_queue_head_t lock_queue; /**< Queue of blocked processes */
465 unsigned long lock_time; /**< Time of last lock in jiffies */ 466 unsigned long lock_time; /**< Time of last lock in jiffies */
466 spinlock_t spinlock; 467 spinlock_t spinlock;
467 uint32_t kernel_waiters; 468 uint32_t kernel_waiters;
468 uint32_t user_waiters; 469 uint32_t user_waiters;
469 int idle_has_lock; 470 int idle_has_lock;
470 }; 471 };
471 472
472 /** 473 /**
473 * DMA data. 474 * DMA data.
474 */ 475 */
475 struct drm_device_dma { 476 struct drm_device_dma {
476 477
477 struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */ 478 struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */
478 int buf_count; /**< total number of buffers */ 479 int buf_count; /**< total number of buffers */
479 struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */ 480 struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */
480 int seg_count; 481 int seg_count;
481 int page_count; /**< number of pages */ 482 int page_count; /**< number of pages */
482 unsigned long *pagelist; /**< page list */ 483 unsigned long *pagelist; /**< page list */
483 unsigned long byte_count; 484 unsigned long byte_count;
484 enum { 485 enum {
485 _DRM_DMA_USE_AGP = 0x01, 486 _DRM_DMA_USE_AGP = 0x01,
486 _DRM_DMA_USE_SG = 0x02, 487 _DRM_DMA_USE_SG = 0x02,
487 _DRM_DMA_USE_FB = 0x04, 488 _DRM_DMA_USE_FB = 0x04,
488 _DRM_DMA_USE_PCI_RO = 0x08 489 _DRM_DMA_USE_PCI_RO = 0x08
489 } flags; 490 } flags;
490 491
491 }; 492 };
492 493
493 /** 494 /**
494 * AGP memory entry. Stored as a doubly linked list. 495 * AGP memory entry. Stored as a doubly linked list.
495 */ 496 */
496 struct drm_agp_mem { 497 struct drm_agp_mem {
497 unsigned long handle; /**< handle */ 498 unsigned long handle; /**< handle */
498 DRM_AGP_MEM *memory; 499 DRM_AGP_MEM *memory;
499 unsigned long bound; /**< address */ 500 unsigned long bound; /**< address */
500 int pages; 501 int pages;
501 struct list_head head; 502 struct list_head head;
502 }; 503 };
503 504
504 /** 505 /**
505 * AGP data. 506 * AGP data.
506 * 507 *
507 * \sa drm_agp_init() and drm_device::agp. 508 * \sa drm_agp_init() and drm_device::agp.
508 */ 509 */
509 struct drm_agp_head { 510 struct drm_agp_head {
510 DRM_AGP_KERN agp_info; /**< AGP device information */ 511 DRM_AGP_KERN agp_info; /**< AGP device information */
511 struct list_head memory; 512 struct list_head memory;
512 unsigned long mode; /**< AGP mode */ 513 unsigned long mode; /**< AGP mode */
513 struct agp_bridge_data *bridge; 514 struct agp_bridge_data *bridge;
514 int enabled; /**< whether the AGP bus as been enabled */ 515 int enabled; /**< whether the AGP bus as been enabled */
515 int acquired; /**< whether the AGP device has been acquired */ 516 int acquired; /**< whether the AGP device has been acquired */
516 unsigned long base; 517 unsigned long base;
517 int agp_mtrr; 518 int agp_mtrr;
518 int cant_use_aperture; 519 int cant_use_aperture;
519 unsigned long page_mask; 520 unsigned long page_mask;
520 }; 521 };
521 522
522 /** 523 /**
523 * Scatter-gather memory. 524 * Scatter-gather memory.
524 */ 525 */
525 struct drm_sg_mem { 526 struct drm_sg_mem {
526 unsigned long handle; 527 unsigned long handle;
527 void *virtual; 528 void *virtual;
528 int pages; 529 int pages;
529 struct page **pagelist; 530 struct page **pagelist;
530 dma_addr_t *busaddr; 531 dma_addr_t *busaddr;
531 }; 532 };
532 533
533 struct drm_sigdata { 534 struct drm_sigdata {
534 int context; 535 int context;
535 struct drm_hw_lock *lock; 536 struct drm_hw_lock *lock;
536 }; 537 };
537 538
538 539
539 /** 540 /**
540 * Kernel side of a mapping 541 * Kernel side of a mapping
541 */ 542 */
542 struct drm_local_map { 543 struct drm_local_map {
543 resource_size_t offset; /**< Requested physical address (0 for SAREA)*/ 544 resource_size_t offset; /**< Requested physical address (0 for SAREA)*/
544 unsigned long size; /**< Requested physical size (bytes) */ 545 unsigned long size; /**< Requested physical size (bytes) */
545 enum drm_map_type type; /**< Type of memory to map */ 546 enum drm_map_type type; /**< Type of memory to map */
546 enum drm_map_flags flags; /**< Flags */ 547 enum drm_map_flags flags; /**< Flags */
547 void *handle; /**< User-space: "Handle" to pass to mmap() */ 548 void *handle; /**< User-space: "Handle" to pass to mmap() */
548 /**< Kernel-space: kernel-virtual address */ 549 /**< Kernel-space: kernel-virtual address */
549 int mtrr; /**< MTRR slot used */ 550 int mtrr; /**< MTRR slot used */
550 }; 551 };
551 552
552 typedef struct drm_local_map drm_local_map_t; 553 typedef struct drm_local_map drm_local_map_t;
553 554
554 /** 555 /**
555 * Mappings list 556 * Mappings list
556 */ 557 */
557 struct drm_map_list { 558 struct drm_map_list {
558 struct list_head head; /**< list head */ 559 struct list_head head; /**< list head */
559 struct drm_hash_item hash; 560 struct drm_hash_item hash;
560 struct drm_local_map *map; /**< mapping */ 561 struct drm_local_map *map; /**< mapping */
561 uint64_t user_token; 562 uint64_t user_token;
562 struct drm_master *master; 563 struct drm_master *master;
563 struct drm_mm_node *file_offset_node; /**< fake offset */ 564 struct drm_mm_node *file_offset_node; /**< fake offset */
564 }; 565 };
565 566
566 /** 567 /**
567 * Context handle list 568 * Context handle list
568 */ 569 */
569 struct drm_ctx_list { 570 struct drm_ctx_list {
570 struct list_head head; /**< list head */ 571 struct list_head head; /**< list head */
571 drm_context_t handle; /**< context handle */ 572 drm_context_t handle; /**< context handle */
572 struct drm_file *tag; /**< associated fd private data */ 573 struct drm_file *tag; /**< associated fd private data */
573 }; 574 };
574 575
575 /* location of GART table */ 576 /* location of GART table */
576 #define DRM_ATI_GART_MAIN 1 577 #define DRM_ATI_GART_MAIN 1
577 #define DRM_ATI_GART_FB 2 578 #define DRM_ATI_GART_FB 2
578 579
579 #define DRM_ATI_GART_PCI 1 580 #define DRM_ATI_GART_PCI 1
580 #define DRM_ATI_GART_PCIE 2 581 #define DRM_ATI_GART_PCIE 2
581 #define DRM_ATI_GART_IGP 3 582 #define DRM_ATI_GART_IGP 3
582 583
583 struct drm_ati_pcigart_info { 584 struct drm_ati_pcigart_info {
584 int gart_table_location; 585 int gart_table_location;
585 int gart_reg_if; 586 int gart_reg_if;
586 void *addr; 587 void *addr;
587 dma_addr_t bus_addr; 588 dma_addr_t bus_addr;
588 dma_addr_t table_mask; 589 dma_addr_t table_mask;
589 struct drm_dma_handle *table_handle; 590 struct drm_dma_handle *table_handle;
590 struct drm_local_map mapping; 591 struct drm_local_map mapping;
591 int table_size; 592 int table_size;
592 }; 593 };
593 594
594 /** 595 /**
595 * GEM specific mm private for tracking GEM objects 596 * GEM specific mm private for tracking GEM objects
596 */ 597 */
597 struct drm_gem_mm { 598 struct drm_gem_mm {
598 struct drm_mm offset_manager; /**< Offset mgmt for buffer objects */ 599 struct drm_mm offset_manager; /**< Offset mgmt for buffer objects */
599 struct drm_open_hash offset_hash; /**< User token hash table for maps */ 600 struct drm_open_hash offset_hash; /**< User token hash table for maps */
600 }; 601 };
601 602
602 /** 603 /**
603 * This structure defines the drm_mm memory object, which will be used by the 604 * This structure defines the drm_mm memory object, which will be used by the
604 * DRM for its buffer objects. 605 * DRM for its buffer objects.
605 */ 606 */
606 struct drm_gem_object { 607 struct drm_gem_object {
607 /** Reference count of this object */ 608 /** Reference count of this object */
608 struct kref refcount; 609 struct kref refcount;
609 610
610 /** Handle count of this object. Each handle also holds a reference */ 611 /** Handle count of this object. Each handle also holds a reference */
611 atomic_t handle_count; /* number of handles on this object */ 612 atomic_t handle_count; /* number of handles on this object */
612 613
613 /** Related drm device */ 614 /** Related drm device */
614 struct drm_device *dev; 615 struct drm_device *dev;
615 616
616 /** File representing the shmem storage */ 617 /** File representing the shmem storage */
617 struct file *filp; 618 struct file *filp;
618 619
619 /* Mapping info for this object */ 620 /* Mapping info for this object */
620 struct drm_map_list map_list; 621 struct drm_map_list map_list;
621 622
622 /** 623 /**
623 * Size of the object, in bytes. Immutable over the object's 624 * Size of the object, in bytes. Immutable over the object's
624 * lifetime. 625 * lifetime.
625 */ 626 */
626 size_t size; 627 size_t size;
627 628
628 /** 629 /**
629 * Global name for this object, starts at 1. 0 means unnamed. 630 * Global name for this object, starts at 1. 0 means unnamed.
630 * Access is covered by the object_name_lock in the related drm_device 631 * Access is covered by the object_name_lock in the related drm_device
631 */ 632 */
632 int name; 633 int name;
633 634
634 /** 635 /**
635 * Memory domains. These monitor which caches contain read/write data 636 * Memory domains. These monitor which caches contain read/write data
636 * related to the object. When transitioning from one set of domains 637 * related to the object. When transitioning from one set of domains
637 * to another, the driver is called to ensure that caches are suitably 638 * to another, the driver is called to ensure that caches are suitably
638 * flushed and invalidated 639 * flushed and invalidated
639 */ 640 */
640 uint32_t read_domains; 641 uint32_t read_domains;
641 uint32_t write_domain; 642 uint32_t write_domain;
642 643
643 /** 644 /**
644 * While validating an exec operation, the 645 * While validating an exec operation, the
645 * new read/write domain values are computed here. 646 * new read/write domain values are computed here.
646 * They will be transferred to the above values 647 * They will be transferred to the above values
647 * at the point that any cache flushing occurs 648 * at the point that any cache flushing occurs
648 */ 649 */
649 uint32_t pending_read_domains; 650 uint32_t pending_read_domains;
650 uint32_t pending_write_domain; 651 uint32_t pending_write_domain;
651 652
652 void *driver_private; 653 void *driver_private;
653 }; 654 };
654 655
655 #include "drm_crtc.h" 656 #include "drm_crtc.h"
656 657
657 /* per-master structure */ 658 /* per-master structure */
658 struct drm_master { 659 struct drm_master {
659 660
660 struct kref refcount; /* refcount for this master */ 661 struct kref refcount; /* refcount for this master */
661 662
662 struct list_head head; /**< each minor contains a list of masters */ 663 struct list_head head; /**< each minor contains a list of masters */
663 struct drm_minor *minor; /**< link back to minor we are a master for */ 664 struct drm_minor *minor; /**< link back to minor we are a master for */
664 665
665 char *unique; /**< Unique identifier: e.g., busid */ 666 char *unique; /**< Unique identifier: e.g., busid */
666 int unique_len; /**< Length of unique field */ 667 int unique_len; /**< Length of unique field */
667 int unique_size; /**< amount allocated */ 668 int unique_size; /**< amount allocated */
668 669
669 int blocked; /**< Blocked due to VC switch? */ 670 int blocked; /**< Blocked due to VC switch? */
670 671
671 /** \name Authentication */ 672 /** \name Authentication */
672 /*@{ */ 673 /*@{ */
673 struct drm_open_hash magiclist; 674 struct drm_open_hash magiclist;
674 struct list_head magicfree; 675 struct list_head magicfree;
675 /*@} */ 676 /*@} */
676 677
677 struct drm_lock_data lock; /**< Information on hardware lock */ 678 struct drm_lock_data lock; /**< Information on hardware lock */
678 679
679 void *driver_priv; /**< Private structure for driver to use */ 680 void *driver_priv; /**< Private structure for driver to use */
680 }; 681 };
681 682
682 /* Size of ringbuffer for vblank timestamps. Just double-buffer 683 /* Size of ringbuffer for vblank timestamps. Just double-buffer
683 * in initial implementation. 684 * in initial implementation.
684 */ 685 */
685 #define DRM_VBLANKTIME_RBSIZE 2 686 #define DRM_VBLANKTIME_RBSIZE 2
686 687
687 /* Flags and return codes for get_vblank_timestamp() driver function. */ 688 /* Flags and return codes for get_vblank_timestamp() driver function. */
688 #define DRM_CALLED_FROM_VBLIRQ 1 689 #define DRM_CALLED_FROM_VBLIRQ 1
689 #define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0) 690 #define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0)
690 #define DRM_VBLANKTIME_INVBL (1 << 1) 691 #define DRM_VBLANKTIME_INVBL (1 << 1)
691 692
692 /* get_scanout_position() return flags */ 693 /* get_scanout_position() return flags */
693 #define DRM_SCANOUTPOS_VALID (1 << 0) 694 #define DRM_SCANOUTPOS_VALID (1 << 0)
694 #define DRM_SCANOUTPOS_INVBL (1 << 1) 695 #define DRM_SCANOUTPOS_INVBL (1 << 1)
695 #define DRM_SCANOUTPOS_ACCURATE (1 << 2) 696 #define DRM_SCANOUTPOS_ACCURATE (1 << 2)
696 697
697 struct drm_bus { 698 struct drm_bus {
698 int bus_type; 699 int bus_type;
699 int (*get_irq)(struct drm_device *dev); 700 int (*get_irq)(struct drm_device *dev);
700 const char *(*get_name)(struct drm_device *dev); 701 const char *(*get_name)(struct drm_device *dev);
701 int (*set_busid)(struct drm_device *dev, struct drm_master *master); 702 int (*set_busid)(struct drm_device *dev, struct drm_master *master);
702 int (*set_unique)(struct drm_device *dev, struct drm_master *master, 703 int (*set_unique)(struct drm_device *dev, struct drm_master *master,
703 struct drm_unique *unique); 704 struct drm_unique *unique);
704 int (*irq_by_busid)(struct drm_device *dev, struct drm_irq_busid *p); 705 int (*irq_by_busid)(struct drm_device *dev, struct drm_irq_busid *p);
705 /* hooks that are for PCI */ 706 /* hooks that are for PCI */
706 int (*agp_init)(struct drm_device *dev); 707 int (*agp_init)(struct drm_device *dev);
707 708
708 }; 709 };
709 710
710 /** 711 /**
711 * DRM driver structure. This structure represent the common code for 712 * DRM driver structure. This structure represent the common code for
712 * a family of cards. There will one drm_device for each card present 713 * a family of cards. There will one drm_device for each card present
713 * in this family 714 * in this family
714 */ 715 */
715 struct drm_driver { 716 struct drm_driver {
716 int (*load) (struct drm_device *, unsigned long flags); 717 int (*load) (struct drm_device *, unsigned long flags);
717 int (*firstopen) (struct drm_device *); 718 int (*firstopen) (struct drm_device *);
718 int (*open) (struct drm_device *, struct drm_file *); 719 int (*open) (struct drm_device *, struct drm_file *);
719 void (*preclose) (struct drm_device *, struct drm_file *file_priv); 720 void (*preclose) (struct drm_device *, struct drm_file *file_priv);
720 void (*postclose) (struct drm_device *, struct drm_file *); 721 void (*postclose) (struct drm_device *, struct drm_file *);
721 void (*lastclose) (struct drm_device *); 722 void (*lastclose) (struct drm_device *);
722 int (*unload) (struct drm_device *); 723 int (*unload) (struct drm_device *);
723 int (*suspend) (struct drm_device *, pm_message_t state); 724 int (*suspend) (struct drm_device *, pm_message_t state);
724 int (*resume) (struct drm_device *); 725 int (*resume) (struct drm_device *);
725 int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv); 726 int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
726 int (*dma_quiescent) (struct drm_device *); 727 int (*dma_quiescent) (struct drm_device *);
727 int (*context_dtor) (struct drm_device *dev, int context); 728 int (*context_dtor) (struct drm_device *dev, int context);
728 729
729 /** 730 /**
730 * get_vblank_counter - get raw hardware vblank counter 731 * get_vblank_counter - get raw hardware vblank counter
731 * @dev: DRM device 732 * @dev: DRM device
732 * @crtc: counter to fetch 733 * @crtc: counter to fetch
733 * 734 *
734 * Driver callback for fetching a raw hardware vblank counter 735 * Driver callback for fetching a raw hardware vblank counter
735 * for @crtc. If a device doesn't have a hardware counter, the 736 * for @crtc. If a device doesn't have a hardware counter, the
736 * driver can simply return the value of drm_vblank_count and 737 * driver can simply return the value of drm_vblank_count and
737 * make the enable_vblank() and disable_vblank() hooks into no-ops, 738 * make the enable_vblank() and disable_vblank() hooks into no-ops,
738 * leaving interrupts enabled at all times. 739 * leaving interrupts enabled at all times.
739 * 740 *
740 * Wraparound handling and loss of events due to modesetting is dealt 741 * Wraparound handling and loss of events due to modesetting is dealt
741 * with in the DRM core code. 742 * with in the DRM core code.
742 * 743 *
743 * RETURNS 744 * RETURNS
744 * Raw vblank counter value. 745 * Raw vblank counter value.
745 */ 746 */
746 u32 (*get_vblank_counter) (struct drm_device *dev, int crtc); 747 u32 (*get_vblank_counter) (struct drm_device *dev, int crtc);
747 748
748 /** 749 /**
749 * enable_vblank - enable vblank interrupt events 750 * enable_vblank - enable vblank interrupt events
750 * @dev: DRM device 751 * @dev: DRM device
751 * @crtc: which irq to enable 752 * @crtc: which irq to enable
752 * 753 *
753 * Enable vblank interrupts for @crtc. If the device doesn't have 754 * Enable vblank interrupts for @crtc. If the device doesn't have
754 * a hardware vblank counter, this routine should be a no-op, since 755 * a hardware vblank counter, this routine should be a no-op, since
755 * interrupts will have to stay on to keep the count accurate. 756 * interrupts will have to stay on to keep the count accurate.
756 * 757 *
757 * RETURNS 758 * RETURNS
758 * Zero on success, appropriate errno if the given @crtc's vblank 759 * Zero on success, appropriate errno if the given @crtc's vblank
759 * interrupt cannot be enabled. 760 * interrupt cannot be enabled.
760 */ 761 */
761 int (*enable_vblank) (struct drm_device *dev, int crtc); 762 int (*enable_vblank) (struct drm_device *dev, int crtc);
762 763
763 /** 764 /**
764 * disable_vblank - disable vblank interrupt events 765 * disable_vblank - disable vblank interrupt events
765 * @dev: DRM device 766 * @dev: DRM device
766 * @crtc: which irq to enable 767 * @crtc: which irq to enable
767 * 768 *
768 * Disable vblank interrupts for @crtc. If the device doesn't have 769 * Disable vblank interrupts for @crtc. If the device doesn't have
769 * a hardware vblank counter, this routine should be a no-op, since 770 * a hardware vblank counter, this routine should be a no-op, since
770 * interrupts will have to stay on to keep the count accurate. 771 * interrupts will have to stay on to keep the count accurate.
771 */ 772 */
772 void (*disable_vblank) (struct drm_device *dev, int crtc); 773 void (*disable_vblank) (struct drm_device *dev, int crtc);
773 774
774 /** 775 /**
775 * Called by \c drm_device_is_agp. Typically used to determine if a 776 * Called by \c drm_device_is_agp. Typically used to determine if a
776 * card is really attached to AGP or not. 777 * card is really attached to AGP or not.
777 * 778 *
778 * \param dev DRM device handle 779 * \param dev DRM device handle
779 * 780 *
780 * \returns 781 * \returns
781 * One of three values is returned depending on whether or not the 782 * One of three values is returned depending on whether or not the
782 * card is absolutely \b not AGP (return of 0), absolutely \b is AGP 783 * card is absolutely \b not AGP (return of 0), absolutely \b is AGP
783 * (return of 1), or may or may not be AGP (return of 2). 784 * (return of 1), or may or may not be AGP (return of 2).
784 */ 785 */
785 int (*device_is_agp) (struct drm_device *dev); 786 int (*device_is_agp) (struct drm_device *dev);
786 787
787 /** 788 /**
788 * Called by vblank timestamping code. 789 * Called by vblank timestamping code.
789 * 790 *
790 * Return the current display scanout position from a crtc. 791 * Return the current display scanout position from a crtc.
791 * 792 *
792 * \param dev DRM device. 793 * \param dev DRM device.
793 * \param crtc Id of the crtc to query. 794 * \param crtc Id of the crtc to query.
794 * \param *vpos Target location for current vertical scanout position. 795 * \param *vpos Target location for current vertical scanout position.
795 * \param *hpos Target location for current horizontal scanout position. 796 * \param *hpos Target location for current horizontal scanout position.
796 * 797 *
797 * Returns vpos as a positive number while in active scanout area. 798 * Returns vpos as a positive number while in active scanout area.
798 * Returns vpos as a negative number inside vblank, counting the number 799 * Returns vpos as a negative number inside vblank, counting the number
799 * of scanlines to go until end of vblank, e.g., -1 means "one scanline 800 * of scanlines to go until end of vblank, e.g., -1 means "one scanline
800 * until start of active scanout / end of vblank." 801 * until start of active scanout / end of vblank."
801 * 802 *
802 * \return Flags, or'ed together as follows: 803 * \return Flags, or'ed together as follows:
803 * 804 *
804 * DRM_SCANOUTPOS_VALID = Query successful. 805 * DRM_SCANOUTPOS_VALID = Query successful.
805 * DRM_SCANOUTPOS_INVBL = Inside vblank. 806 * DRM_SCANOUTPOS_INVBL = Inside vblank.
806 * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of 807 * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
807 * this flag means that returned position may be offset by a constant 808 * this flag means that returned position may be offset by a constant
808 * but unknown small number of scanlines wrt. real scanout position. 809 * but unknown small number of scanlines wrt. real scanout position.
809 * 810 *
810 */ 811 */
811 int (*get_scanout_position) (struct drm_device *dev, int crtc, 812 int (*get_scanout_position) (struct drm_device *dev, int crtc,
812 int *vpos, int *hpos); 813 int *vpos, int *hpos);
813 814
814 /** 815 /**
815 * Called by \c drm_get_last_vbltimestamp. Should return a precise 816 * Called by \c drm_get_last_vbltimestamp. Should return a precise
816 * timestamp when the most recent VBLANK interval ended or will end. 817 * timestamp when the most recent VBLANK interval ended or will end.
817 * 818 *
818 * Specifically, the timestamp in @vblank_time should correspond as 819 * Specifically, the timestamp in @vblank_time should correspond as
819 * closely as possible to the time when the first video scanline of 820 * closely as possible to the time when the first video scanline of
820 * the video frame after the end of VBLANK will start scanning out, 821 * the video frame after the end of VBLANK will start scanning out,
821 * the time immmediately after end of the VBLANK interval. If the 822 * the time immmediately after end of the VBLANK interval. If the
822 * @crtc is currently inside VBLANK, this will be a time in the future. 823 * @crtc is currently inside VBLANK, this will be a time in the future.
823 * If the @crtc is currently scanning out a frame, this will be the 824 * If the @crtc is currently scanning out a frame, this will be the
824 * past start time of the current scanout. This is meant to adhere 825 * past start time of the current scanout. This is meant to adhere
825 * to the OpenML OML_sync_control extension specification. 826 * to the OpenML OML_sync_control extension specification.
826 * 827 *
827 * \param dev dev DRM device handle. 828 * \param dev dev DRM device handle.
828 * \param crtc crtc for which timestamp should be returned. 829 * \param crtc crtc for which timestamp should be returned.
829 * \param *max_error Maximum allowable timestamp error in nanoseconds. 830 * \param *max_error Maximum allowable timestamp error in nanoseconds.
830 * Implementation should strive to provide timestamp 831 * Implementation should strive to provide timestamp
831 * with an error of at most *max_error nanoseconds. 832 * with an error of at most *max_error nanoseconds.
832 * Returns true upper bound on error for timestamp. 833 * Returns true upper bound on error for timestamp.
833 * \param *vblank_time Target location for returned vblank timestamp. 834 * \param *vblank_time Target location for returned vblank timestamp.
834 * \param flags 0 = Defaults, no special treatment needed. 835 * \param flags 0 = Defaults, no special treatment needed.
835 * \param DRM_CALLED_FROM_VBLIRQ = Function is called from vblank 836 * \param DRM_CALLED_FROM_VBLIRQ = Function is called from vblank
836 * irq handler. Some drivers need to apply some workarounds 837 * irq handler. Some drivers need to apply some workarounds
837 * for gpu-specific vblank irq quirks if flag is set. 838 * for gpu-specific vblank irq quirks if flag is set.
838 * 839 *
839 * \returns 840 * \returns
840 * Zero if timestamping isn't supported in current display mode or a 841 * Zero if timestamping isn't supported in current display mode or a
841 * negative number on failure. A positive status code on success, 842 * negative number on failure. A positive status code on success,
842 * which describes how the vblank_time timestamp was computed. 843 * which describes how the vblank_time timestamp was computed.
843 */ 844 */
844 int (*get_vblank_timestamp) (struct drm_device *dev, int crtc, 845 int (*get_vblank_timestamp) (struct drm_device *dev, int crtc,
845 int *max_error, 846 int *max_error,
846 struct timeval *vblank_time, 847 struct timeval *vblank_time,
847 unsigned flags); 848 unsigned flags);
848 849
849 /* these have to be filled in */ 850 /* these have to be filled in */
850 851
851 irqreturn_t(*irq_handler) (DRM_IRQ_ARGS); 852 irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
852 void (*irq_preinstall) (struct drm_device *dev); 853 void (*irq_preinstall) (struct drm_device *dev);
853 int (*irq_postinstall) (struct drm_device *dev); 854 int (*irq_postinstall) (struct drm_device *dev);
854 void (*irq_uninstall) (struct drm_device *dev); 855 void (*irq_uninstall) (struct drm_device *dev);
855 void (*reclaim_buffers) (struct drm_device *dev, 856 void (*reclaim_buffers) (struct drm_device *dev,
856 struct drm_file * file_priv); 857 struct drm_file * file_priv);
857 void (*reclaim_buffers_locked) (struct drm_device *dev, 858 void (*reclaim_buffers_locked) (struct drm_device *dev,
858 struct drm_file *file_priv); 859 struct drm_file *file_priv);
859 void (*reclaim_buffers_idlelocked) (struct drm_device *dev, 860 void (*reclaim_buffers_idlelocked) (struct drm_device *dev,
860 struct drm_file *file_priv); 861 struct drm_file *file_priv);
861 void (*set_version) (struct drm_device *dev, 862 void (*set_version) (struct drm_device *dev,
862 struct drm_set_version *sv); 863 struct drm_set_version *sv);
863 864
864 /* Master routines */ 865 /* Master routines */
865 int (*master_create)(struct drm_device *dev, struct drm_master *master); 866 int (*master_create)(struct drm_device *dev, struct drm_master *master);
866 void (*master_destroy)(struct drm_device *dev, struct drm_master *master); 867 void (*master_destroy)(struct drm_device *dev, struct drm_master *master);
867 /** 868 /**
868 * master_set is called whenever the minor master is set. 869 * master_set is called whenever the minor master is set.
869 * master_drop is called whenever the minor master is dropped. 870 * master_drop is called whenever the minor master is dropped.
870 */ 871 */
871 872
872 int (*master_set)(struct drm_device *dev, struct drm_file *file_priv, 873 int (*master_set)(struct drm_device *dev, struct drm_file *file_priv,
873 bool from_open); 874 bool from_open);
874 void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv, 875 void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv,
875 bool from_release); 876 bool from_release);
876 877
877 int (*debugfs_init)(struct drm_minor *minor); 878 int (*debugfs_init)(struct drm_minor *minor);
878 void (*debugfs_cleanup)(struct drm_minor *minor); 879 void (*debugfs_cleanup)(struct drm_minor *minor);
879 880
880 /** 881 /**
881 * Driver-specific constructor for drm_gem_objects, to set up 882 * Driver-specific constructor for drm_gem_objects, to set up
882 * obj->driver_private. 883 * obj->driver_private.
883 * 884 *
884 * Returns 0 on success. 885 * Returns 0 on success.
885 */ 886 */
886 int (*gem_init_object) (struct drm_gem_object *obj); 887 int (*gem_init_object) (struct drm_gem_object *obj);
887 void (*gem_free_object) (struct drm_gem_object *obj); 888 void (*gem_free_object) (struct drm_gem_object *obj);
888 889
889 /* vga arb irq handler */ 890 /* vga arb irq handler */
890 void (*vgaarb_irq)(struct drm_device *dev, bool state); 891 void (*vgaarb_irq)(struct drm_device *dev, bool state);
891 892
892 /* dumb alloc support */ 893 /* dumb alloc support */
893 int (*dumb_create)(struct drm_file *file_priv, 894 int (*dumb_create)(struct drm_file *file_priv,
894 struct drm_device *dev, 895 struct drm_device *dev,
895 struct drm_mode_create_dumb *args); 896 struct drm_mode_create_dumb *args);
896 int (*dumb_map_offset)(struct drm_file *file_priv, 897 int (*dumb_map_offset)(struct drm_file *file_priv,
897 struct drm_device *dev, uint32_t handle, 898 struct drm_device *dev, uint32_t handle,
898 uint64_t *offset); 899 uint64_t *offset);
899 int (*dumb_destroy)(struct drm_file *file_priv, 900 int (*dumb_destroy)(struct drm_file *file_priv,
900 struct drm_device *dev, 901 struct drm_device *dev,
901 uint32_t handle); 902 uint32_t handle);
902 903
903 /* Driver private ops for this object */ 904 /* Driver private ops for this object */
904 struct vm_operations_struct *gem_vm_ops; 905 struct vm_operations_struct *gem_vm_ops;
905 906
906 int major; 907 int major;
907 int minor; 908 int minor;
908 int patchlevel; 909 int patchlevel;
909 char *name; 910 char *name;
910 char *desc; 911 char *desc;
911 char *date; 912 char *date;
912 913
913 u32 driver_features; 914 u32 driver_features;
914 int dev_priv_size; 915 int dev_priv_size;
915 struct drm_ioctl_desc *ioctls; 916 struct drm_ioctl_desc *ioctls;
916 int num_ioctls; 917 int num_ioctls;
917 struct file_operations fops; 918 struct file_operations fops;
918 union { 919 union {
919 struct pci_driver *pci; 920 struct pci_driver *pci;
920 struct platform_device *platform_device; 921 struct platform_device *platform_device;
921 struct usb_driver *usb; 922 struct usb_driver *usb;
922 } kdriver; 923 } kdriver;
923 struct drm_bus *bus; 924 struct drm_bus *bus;
924 925
925 /* List of devices hanging off this driver */ 926 /* List of devices hanging off this driver */
926 struct list_head device_list; 927 struct list_head device_list;
927 }; 928 };
928 929
929 #define DRM_MINOR_UNASSIGNED 0 930 #define DRM_MINOR_UNASSIGNED 0
930 #define DRM_MINOR_LEGACY 1 931 #define DRM_MINOR_LEGACY 1
931 #define DRM_MINOR_CONTROL 2 932 #define DRM_MINOR_CONTROL 2
932 #define DRM_MINOR_RENDER 3 933 #define DRM_MINOR_RENDER 3
933 934
934 935
935 /** 936 /**
936 * debugfs node list. This structure represents a debugfs file to 937 * debugfs node list. This structure represents a debugfs file to
937 * be created by the drm core 938 * be created by the drm core
938 */ 939 */
939 struct drm_debugfs_list { 940 struct drm_debugfs_list {
940 const char *name; /** file name */ 941 const char *name; /** file name */
941 int (*show)(struct seq_file*, void*); /** show callback */ 942 int (*show)(struct seq_file*, void*); /** show callback */
942 u32 driver_features; /**< Required driver features for this entry */ 943 u32 driver_features; /**< Required driver features for this entry */
943 }; 944 };
944 945
945 /** 946 /**
946 * debugfs node structure. This structure represents a debugfs file. 947 * debugfs node structure. This structure represents a debugfs file.
947 */ 948 */
948 struct drm_debugfs_node { 949 struct drm_debugfs_node {
949 struct list_head list; 950 struct list_head list;
950 struct drm_minor *minor; 951 struct drm_minor *minor;
951 struct drm_debugfs_list *debugfs_ent; 952 struct drm_debugfs_list *debugfs_ent;
952 struct dentry *dent; 953 struct dentry *dent;
953 }; 954 };
954 955
955 /** 956 /**
956 * Info file list entry. This structure represents a debugfs or proc file to 957 * Info file list entry. This structure represents a debugfs or proc file to
957 * be created by the drm core 958 * be created by the drm core
958 */ 959 */
959 struct drm_info_list { 960 struct drm_info_list {
960 const char *name; /** file name */ 961 const char *name; /** file name */
961 int (*show)(struct seq_file*, void*); /** show callback */ 962 int (*show)(struct seq_file*, void*); /** show callback */
962 u32 driver_features; /**< Required driver features for this entry */ 963 u32 driver_features; /**< Required driver features for this entry */
963 void *data; 964 void *data;
964 }; 965 };
965 966
966 /** 967 /**
967 * debugfs node structure. This structure represents a debugfs file. 968 * debugfs node structure. This structure represents a debugfs file.
968 */ 969 */
969 struct drm_info_node { 970 struct drm_info_node {
970 struct list_head list; 971 struct list_head list;
971 struct drm_minor *minor; 972 struct drm_minor *minor;
972 struct drm_info_list *info_ent; 973 struct drm_info_list *info_ent;
973 struct dentry *dent; 974 struct dentry *dent;
974 }; 975 };
975 976
976 /** 977 /**
977 * DRM minor structure. This structure represents a drm minor number. 978 * DRM minor structure. This structure represents a drm minor number.
978 */ 979 */
979 struct drm_minor { 980 struct drm_minor {
980 int index; /**< Minor device number */ 981 int index; /**< Minor device number */
981 int type; /**< Control or render */ 982 int type; /**< Control or render */
982 dev_t device; /**< Device number for mknod */ 983 dev_t device; /**< Device number for mknod */
983 struct device kdev; /**< Linux device */ 984 struct device kdev; /**< Linux device */
984 struct drm_device *dev; 985 struct drm_device *dev;
985 986
986 struct proc_dir_entry *proc_root; /**< proc directory entry */ 987 struct proc_dir_entry *proc_root; /**< proc directory entry */
987 struct drm_info_node proc_nodes; 988 struct drm_info_node proc_nodes;
988 struct dentry *debugfs_root; 989 struct dentry *debugfs_root;
989 struct drm_info_node debugfs_nodes; 990 struct drm_info_node debugfs_nodes;
990 991
991 struct drm_master *master; /* currently active master for this node */ 992 struct drm_master *master; /* currently active master for this node */
992 struct list_head master_list; 993 struct list_head master_list;
993 struct drm_mode_group mode_group; 994 struct drm_mode_group mode_group;
994 }; 995 };
995 996
996 struct drm_pending_vblank_event { 997 struct drm_pending_vblank_event {
997 struct drm_pending_event base; 998 struct drm_pending_event base;
998 int pipe; 999 int pipe;
999 struct drm_event_vblank event; 1000 struct drm_event_vblank event;
1000 }; 1001 };
1001 1002
1002 /** 1003 /**
1003 * DRM device structure. This structure represent a complete card that 1004 * DRM device structure. This structure represent a complete card that
1004 * may contain multiple heads. 1005 * may contain multiple heads.
1005 */ 1006 */
1006 struct drm_device { 1007 struct drm_device {
1007 struct list_head driver_item; /**< list of devices per driver */ 1008 struct list_head driver_item; /**< list of devices per driver */
1008 char *devname; /**< For /proc/interrupts */ 1009 char *devname; /**< For /proc/interrupts */
1009 int if_version; /**< Highest interface version set */ 1010 int if_version; /**< Highest interface version set */
1010 1011
1011 /** \name Locks */ 1012 /** \name Locks */
1012 /*@{ */ 1013 /*@{ */
1013 spinlock_t count_lock; /**< For inuse, drm_device::open_count, drm_device::buf_use */ 1014 spinlock_t count_lock; /**< For inuse, drm_device::open_count, drm_device::buf_use */
1014 struct mutex struct_mutex; /**< For others */ 1015 struct mutex struct_mutex; /**< For others */
1015 /*@} */ 1016 /*@} */
1016 1017
1017 /** \name Usage Counters */ 1018 /** \name Usage Counters */
1018 /*@{ */ 1019 /*@{ */
1019 int open_count; /**< Outstanding files open */ 1020 int open_count; /**< Outstanding files open */
1020 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */ 1021 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
1021 atomic_t vma_count; /**< Outstanding vma areas open */ 1022 atomic_t vma_count; /**< Outstanding vma areas open */
1022 int buf_use; /**< Buffers in use -- cannot alloc */ 1023 int buf_use; /**< Buffers in use -- cannot alloc */
1023 atomic_t buf_alloc; /**< Buffer allocation in progress */ 1024 atomic_t buf_alloc; /**< Buffer allocation in progress */
1024 /*@} */ 1025 /*@} */
1025 1026
1026 /** \name Performance counters */ 1027 /** \name Performance counters */
1027 /*@{ */ 1028 /*@{ */
1028 unsigned long counters; 1029 unsigned long counters;
1029 enum drm_stat_type types[15]; 1030 enum drm_stat_type types[15];
1030 atomic_t counts[15]; 1031 atomic_t counts[15];
1031 /*@} */ 1032 /*@} */
1032 1033
1033 struct list_head filelist; 1034 struct list_head filelist;
1034 1035
1035 /** \name Memory management */ 1036 /** \name Memory management */
1036 /*@{ */ 1037 /*@{ */
1037 struct list_head maplist; /**< Linked list of regions */ 1038 struct list_head maplist; /**< Linked list of regions */
1038 int map_count; /**< Number of mappable regions */ 1039 int map_count; /**< Number of mappable regions */
1039 struct drm_open_hash map_hash; /**< User token hash table for maps */ 1040 struct drm_open_hash map_hash; /**< User token hash table for maps */
1040 1041
1041 /** \name Context handle management */ 1042 /** \name Context handle management */
1042 /*@{ */ 1043 /*@{ */
1043 struct list_head ctxlist; /**< Linked list of context handles */ 1044 struct list_head ctxlist; /**< Linked list of context handles */
1044 int ctx_count; /**< Number of context handles */ 1045 int ctx_count; /**< Number of context handles */
1045 struct mutex ctxlist_mutex; /**< For ctxlist */ 1046 struct mutex ctxlist_mutex; /**< For ctxlist */
1046 1047
1047 struct idr ctx_idr; 1048 struct idr ctx_idr;
1048 1049
1049 struct list_head vmalist; /**< List of vmas (for debugging) */ 1050 struct list_head vmalist; /**< List of vmas (for debugging) */
1050 1051
1051 /*@} */ 1052 /*@} */
1052 1053
1053 /** \name DMA queues (contexts) */ 1054 /** \name DMA queues (contexts) */
1054 /*@{ */ 1055 /*@{ */
1055 int queue_count; /**< Number of active DMA queues */ 1056 int queue_count; /**< Number of active DMA queues */
1056 int queue_reserved; /**< Number of reserved DMA queues */ 1057 int queue_reserved; /**< Number of reserved DMA queues */
1057 int queue_slots; /**< Actual length of queuelist */ 1058 int queue_slots; /**< Actual length of queuelist */
1058 struct drm_queue **queuelist; /**< Vector of pointers to DMA queues */ 1059 struct drm_queue **queuelist; /**< Vector of pointers to DMA queues */
1059 struct drm_device_dma *dma; /**< Optional pointer for DMA support */ 1060 struct drm_device_dma *dma; /**< Optional pointer for DMA support */
1060 /*@} */ 1061 /*@} */
1061 1062
1062 /** \name Context support */ 1063 /** \name Context support */
1063 /*@{ */ 1064 /*@{ */
1064 int irq_enabled; /**< True if irq handler is enabled */ 1065 int irq_enabled; /**< True if irq handler is enabled */
1065 __volatile__ long context_flag; /**< Context swapping flag */ 1066 __volatile__ long context_flag; /**< Context swapping flag */
1066 __volatile__ long interrupt_flag; /**< Interruption handler flag */ 1067 __volatile__ long interrupt_flag; /**< Interruption handler flag */
1067 __volatile__ long dma_flag; /**< DMA dispatch flag */ 1068 __volatile__ long dma_flag; /**< DMA dispatch flag */
1068 wait_queue_head_t context_wait; /**< Processes waiting on ctx switch */ 1069 wait_queue_head_t context_wait; /**< Processes waiting on ctx switch */
1069 int last_checked; /**< Last context checked for DMA */ 1070 int last_checked; /**< Last context checked for DMA */
1070 int last_context; /**< Last current context */ 1071 int last_context; /**< Last current context */
1071 unsigned long last_switch; /**< jiffies at last context switch */ 1072 unsigned long last_switch; /**< jiffies at last context switch */
1072 /*@} */ 1073 /*@} */
1073 1074
1074 struct work_struct work; 1075 struct work_struct work;
1075 /** \name VBLANK IRQ support */ 1076 /** \name VBLANK IRQ support */
1076 /*@{ */ 1077 /*@{ */
1077 1078
1078 /* 1079 /*
1079 * At load time, disabling the vblank interrupt won't be allowed since 1080 * At load time, disabling the vblank interrupt won't be allowed since
1080 * old clients may not call the modeset ioctl and therefore misbehave. 1081 * old clients may not call the modeset ioctl and therefore misbehave.
1081 * Once the modeset ioctl *has* been called though, we can safely 1082 * Once the modeset ioctl *has* been called though, we can safely
1082 * disable them when unused. 1083 * disable them when unused.
1083 */ 1084 */
1084 int vblank_disable_allowed; 1085 int vblank_disable_allowed;
1085 1086
1086 wait_queue_head_t *vbl_queue; /**< VBLANK wait queue */ 1087 wait_queue_head_t *vbl_queue; /**< VBLANK wait queue */
1087 atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */ 1088 atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */
1088 struct timeval *_vblank_time; /**< timestamp of current vblank_count (drivers must alloc right number of fields) */ 1089 struct timeval *_vblank_time; /**< timestamp of current vblank_count (drivers must alloc right number of fields) */
1089 spinlock_t vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */ 1090 spinlock_t vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */
1090 spinlock_t vbl_lock; 1091 spinlock_t vbl_lock;
1091 atomic_t *vblank_refcount; /* number of users of vblank interruptsper crtc */ 1092 atomic_t *vblank_refcount; /* number of users of vblank interruptsper crtc */
1092 u32 *last_vblank; /* protected by dev->vbl_lock, used */ 1093 u32 *last_vblank; /* protected by dev->vbl_lock, used */
1093 /* for wraparound handling */ 1094 /* for wraparound handling */
1094 int *vblank_enabled; /* so we don't call enable more than 1095 int *vblank_enabled; /* so we don't call enable more than
1095 once per disable */ 1096 once per disable */
1096 int *vblank_inmodeset; /* Display driver is setting mode */ 1097 int *vblank_inmodeset; /* Display driver is setting mode */
1097 u32 *last_vblank_wait; /* Last vblank seqno waited per CRTC */ 1098 u32 *last_vblank_wait; /* Last vblank seqno waited per CRTC */
1098 struct timer_list vblank_disable_timer; 1099 struct timer_list vblank_disable_timer;
1099 1100
1100 u32 max_vblank_count; /**< size of vblank counter register */ 1101 u32 max_vblank_count; /**< size of vblank counter register */
1101 1102
1102 /** 1103 /**
1103 * List of events 1104 * List of events
1104 */ 1105 */
1105 struct list_head vblank_event_list; 1106 struct list_head vblank_event_list;
1106 spinlock_t event_lock; 1107 spinlock_t event_lock;
1107 1108
1108 /*@} */ 1109 /*@} */
1109 cycles_t ctx_start; 1110 cycles_t ctx_start;
1110 cycles_t lck_start; 1111 cycles_t lck_start;
1111 1112
1112 struct fasync_struct *buf_async;/**< Processes waiting for SIGIO */ 1113 struct fasync_struct *buf_async;/**< Processes waiting for SIGIO */
1113 wait_queue_head_t buf_readers; /**< Processes waiting to read */ 1114 wait_queue_head_t buf_readers; /**< Processes waiting to read */
1114 wait_queue_head_t buf_writers; /**< Processes waiting to ctx switch */ 1115 wait_queue_head_t buf_writers; /**< Processes waiting to ctx switch */
1115 1116
1116 struct drm_agp_head *agp; /**< AGP data */ 1117 struct drm_agp_head *agp; /**< AGP data */
1117 1118
1118 struct device *dev; /**< Device structure */ 1119 struct device *dev; /**< Device structure */
1119 struct pci_dev *pdev; /**< PCI device structure */ 1120 struct pci_dev *pdev; /**< PCI device structure */
1120 int pci_vendor; /**< PCI vendor id */ 1121 int pci_vendor; /**< PCI vendor id */
1121 int pci_device; /**< PCI device id */ 1122 int pci_device; /**< PCI device id */
1122 #ifdef __alpha__ 1123 #ifdef __alpha__
1123 struct pci_controller *hose; 1124 struct pci_controller *hose;
1124 #endif 1125 #endif
1125 1126
1126 struct platform_device *platformdev; /**< Platform device struture */ 1127 struct platform_device *platformdev; /**< Platform device struture */
1127 struct usb_device *usbdev; 1128 struct usb_device *usbdev;
1128 1129
1129 struct drm_sg_mem *sg; /**< Scatter gather memory */ 1130 struct drm_sg_mem *sg; /**< Scatter gather memory */
1130 unsigned int num_crtcs; /**< Number of CRTCs on this device */ 1131 unsigned int num_crtcs; /**< Number of CRTCs on this device */
1131 void *dev_private; /**< device private data */ 1132 void *dev_private; /**< device private data */
1132 void *mm_private; 1133 void *mm_private;
1133 struct address_space *dev_mapping; 1134 struct address_space *dev_mapping;
1134 struct drm_sigdata sigdata; /**< For block_all_signals */ 1135 struct drm_sigdata sigdata; /**< For block_all_signals */
1135 sigset_t sigmask; 1136 sigset_t sigmask;
1136 1137
1137 struct drm_driver *driver; 1138 struct drm_driver *driver;
1138 struct drm_local_map *agp_buffer_map; 1139 struct drm_local_map *agp_buffer_map;
1139 unsigned int agp_buffer_token; 1140 unsigned int agp_buffer_token;
1140 struct drm_minor *control; /**< Control node for card */ 1141 struct drm_minor *control; /**< Control node for card */
1141 struct drm_minor *primary; /**< render type primary screen head */ 1142 struct drm_minor *primary; /**< render type primary screen head */
1142 1143
1143 struct drm_mode_config mode_config; /**< Current mode config */ 1144 struct drm_mode_config mode_config; /**< Current mode config */
1144 1145
1145 /** \name GEM information */ 1146 /** \name GEM information */
1146 /*@{ */ 1147 /*@{ */
1147 spinlock_t object_name_lock; 1148 spinlock_t object_name_lock;
1148 struct idr object_name_idr; 1149 struct idr object_name_idr;
1149 /*@} */ 1150 /*@} */
1150 int switch_power_state; 1151 int switch_power_state;
1151 }; 1152 };
1152 1153
1153 #define DRM_SWITCH_POWER_ON 0 1154 #define DRM_SWITCH_POWER_ON 0
1154 #define DRM_SWITCH_POWER_OFF 1 1155 #define DRM_SWITCH_POWER_OFF 1
1155 #define DRM_SWITCH_POWER_CHANGING 2 1156 #define DRM_SWITCH_POWER_CHANGING 2
1156 1157
1157 static __inline__ int drm_core_check_feature(struct drm_device *dev, 1158 static __inline__ int drm_core_check_feature(struct drm_device *dev,
1158 int feature) 1159 int feature)
1159 { 1160 {
1160 return ((dev->driver->driver_features & feature) ? 1 : 0); 1161 return ((dev->driver->driver_features & feature) ? 1 : 0);
1161 } 1162 }
1162 1163
1163 static inline int drm_dev_to_irq(struct drm_device *dev) 1164 static inline int drm_dev_to_irq(struct drm_device *dev)
1164 { 1165 {
1165 return dev->driver->bus->get_irq(dev); 1166 return dev->driver->bus->get_irq(dev);
1166 } 1167 }
1167 1168
1168 1169
1169 #if __OS_HAS_AGP 1170 #if __OS_HAS_AGP
1170 static inline int drm_core_has_AGP(struct drm_device *dev) 1171 static inline int drm_core_has_AGP(struct drm_device *dev)
1171 { 1172 {
1172 return drm_core_check_feature(dev, DRIVER_USE_AGP); 1173 return drm_core_check_feature(dev, DRIVER_USE_AGP);
1173 } 1174 }
1174 #else 1175 #else
1175 #define drm_core_has_AGP(dev) (0) 1176 #define drm_core_has_AGP(dev) (0)
1176 #endif 1177 #endif
1177 1178
1178 #if __OS_HAS_MTRR 1179 #if __OS_HAS_MTRR
1179 static inline int drm_core_has_MTRR(struct drm_device *dev) 1180 static inline int drm_core_has_MTRR(struct drm_device *dev)
1180 { 1181 {
1181 return drm_core_check_feature(dev, DRIVER_USE_MTRR); 1182 return drm_core_check_feature(dev, DRIVER_USE_MTRR);
1182 } 1183 }
1183 1184
1184 #define DRM_MTRR_WC MTRR_TYPE_WRCOMB 1185 #define DRM_MTRR_WC MTRR_TYPE_WRCOMB
1185 1186
1186 static inline int drm_mtrr_add(unsigned long offset, unsigned long size, 1187 static inline int drm_mtrr_add(unsigned long offset, unsigned long size,
1187 unsigned int flags) 1188 unsigned int flags)
1188 { 1189 {
1189 return mtrr_add(offset, size, flags, 1); 1190 return mtrr_add(offset, size, flags, 1);
1190 } 1191 }
1191 1192
1192 static inline int drm_mtrr_del(int handle, unsigned long offset, 1193 static inline int drm_mtrr_del(int handle, unsigned long offset,
1193 unsigned long size, unsigned int flags) 1194 unsigned long size, unsigned int flags)
1194 { 1195 {
1195 return mtrr_del(handle, offset, size); 1196 return mtrr_del(handle, offset, size);
1196 } 1197 }
1197 1198
1198 #else 1199 #else
1199 #define drm_core_has_MTRR(dev) (0) 1200 #define drm_core_has_MTRR(dev) (0)
1200 1201
1201 #define DRM_MTRR_WC 0 1202 #define DRM_MTRR_WC 0
1202 1203
1203 static inline int drm_mtrr_add(unsigned long offset, unsigned long size, 1204 static inline int drm_mtrr_add(unsigned long offset, unsigned long size,
1204 unsigned int flags) 1205 unsigned int flags)
1205 { 1206 {
1206 return 0; 1207 return 0;
1207 } 1208 }
1208 1209
1209 static inline int drm_mtrr_del(int handle, unsigned long offset, 1210 static inline int drm_mtrr_del(int handle, unsigned long offset,
1210 unsigned long size, unsigned int flags) 1211 unsigned long size, unsigned int flags)
1211 { 1212 {
1212 return 0; 1213 return 0;
1213 } 1214 }
1214 #endif 1215 #endif
1215 1216
1216 /******************************************************************/ 1217 /******************************************************************/
1217 /** \name Internal function definitions */ 1218 /** \name Internal function definitions */
1218 /*@{*/ 1219 /*@{*/
1219 1220
1220 /* Driver support (drm_drv.h) */ 1221 /* Driver support (drm_drv.h) */
1221 extern long drm_ioctl(struct file *filp, 1222 extern long drm_ioctl(struct file *filp,
1222 unsigned int cmd, unsigned long arg); 1223 unsigned int cmd, unsigned long arg);
1223 extern long drm_compat_ioctl(struct file *filp, 1224 extern long drm_compat_ioctl(struct file *filp,
1224 unsigned int cmd, unsigned long arg); 1225 unsigned int cmd, unsigned long arg);
1225 extern int drm_lastclose(struct drm_device *dev); 1226 extern int drm_lastclose(struct drm_device *dev);
1226 1227
1227 /* Device support (drm_fops.h) */ 1228 /* Device support (drm_fops.h) */
1228 extern struct mutex drm_global_mutex; 1229 extern struct mutex drm_global_mutex;
1229 extern int drm_open(struct inode *inode, struct file *filp); 1230 extern int drm_open(struct inode *inode, struct file *filp);
1230 extern int drm_stub_open(struct inode *inode, struct file *filp); 1231 extern int drm_stub_open(struct inode *inode, struct file *filp);
1231 extern int drm_fasync(int fd, struct file *filp, int on); 1232 extern int drm_fasync(int fd, struct file *filp, int on);
1232 extern ssize_t drm_read(struct file *filp, char __user *buffer, 1233 extern ssize_t drm_read(struct file *filp, char __user *buffer,
1233 size_t count, loff_t *offset); 1234 size_t count, loff_t *offset);
1234 extern int drm_release(struct inode *inode, struct file *filp); 1235 extern int drm_release(struct inode *inode, struct file *filp);
1235 1236
1236 /* Mapping support (drm_vm.h) */ 1237 /* Mapping support (drm_vm.h) */
1237 extern int drm_mmap(struct file *filp, struct vm_area_struct *vma); 1238 extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
1238 extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma); 1239 extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma);
1239 extern void drm_vm_open_locked(struct vm_area_struct *vma); 1240 extern void drm_vm_open_locked(struct vm_area_struct *vma);
1240 extern void drm_vm_close_locked(struct vm_area_struct *vma); 1241 extern void drm_vm_close_locked(struct vm_area_struct *vma);
1241 extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); 1242 extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
1242 1243
1243 /* Memory management support (drm_memory.h) */ 1244 /* Memory management support (drm_memory.h) */
1244 #include "drm_memory.h" 1245 #include "drm_memory.h"
1245 extern void drm_mem_init(void); 1246 extern void drm_mem_init(void);
1246 extern int drm_mem_info(char *buf, char **start, off_t offset, 1247 extern int drm_mem_info(char *buf, char **start, off_t offset,
1247 int request, int *eof, void *data); 1248 int request, int *eof, void *data);
1248 extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area); 1249 extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area);
1249 1250
1250 extern void drm_free_agp(DRM_AGP_MEM * handle, int pages); 1251 extern void drm_free_agp(DRM_AGP_MEM * handle, int pages);
1251 extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start); 1252 extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
1252 extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev, 1253 extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev,
1253 struct page **pages, 1254 struct page **pages,
1254 unsigned long num_pages, 1255 unsigned long num_pages,
1255 uint32_t gtt_offset, 1256 uint32_t gtt_offset,
1256 uint32_t type); 1257 uint32_t type);
1257 extern int drm_unbind_agp(DRM_AGP_MEM * handle); 1258 extern int drm_unbind_agp(DRM_AGP_MEM * handle);
1258 1259
1259 /* Misc. IOCTL support (drm_ioctl.h) */ 1260 /* Misc. IOCTL support (drm_ioctl.h) */
1260 extern int drm_irq_by_busid(struct drm_device *dev, void *data, 1261 extern int drm_irq_by_busid(struct drm_device *dev, void *data,
1261 struct drm_file *file_priv); 1262 struct drm_file *file_priv);
1262 extern int drm_getunique(struct drm_device *dev, void *data, 1263 extern int drm_getunique(struct drm_device *dev, void *data,
1263 struct drm_file *file_priv); 1264 struct drm_file *file_priv);
1264 extern int drm_setunique(struct drm_device *dev, void *data, 1265 extern int drm_setunique(struct drm_device *dev, void *data,
1265 struct drm_file *file_priv); 1266 struct drm_file *file_priv);
1266 extern int drm_getmap(struct drm_device *dev, void *data, 1267 extern int drm_getmap(struct drm_device *dev, void *data,
1267 struct drm_file *file_priv); 1268 struct drm_file *file_priv);
1268 extern int drm_getclient(struct drm_device *dev, void *data, 1269 extern int drm_getclient(struct drm_device *dev, void *data,
1269 struct drm_file *file_priv); 1270 struct drm_file *file_priv);
1270 extern int drm_getstats(struct drm_device *dev, void *data, 1271 extern int drm_getstats(struct drm_device *dev, void *data,
1271 struct drm_file *file_priv); 1272 struct drm_file *file_priv);
1272 extern int drm_getcap(struct drm_device *dev, void *data, 1273 extern int drm_getcap(struct drm_device *dev, void *data,
1273 struct drm_file *file_priv); 1274 struct drm_file *file_priv);
1274 extern int drm_setversion(struct drm_device *dev, void *data, 1275 extern int drm_setversion(struct drm_device *dev, void *data,
1275 struct drm_file *file_priv); 1276 struct drm_file *file_priv);
1276 extern int drm_noop(struct drm_device *dev, void *data, 1277 extern int drm_noop(struct drm_device *dev, void *data,
1277 struct drm_file *file_priv); 1278 struct drm_file *file_priv);
1278 1279
1279 /* Context IOCTL support (drm_context.h) */ 1280 /* Context IOCTL support (drm_context.h) */
1280 extern int drm_resctx(struct drm_device *dev, void *data, 1281 extern int drm_resctx(struct drm_device *dev, void *data,
1281 struct drm_file *file_priv); 1282 struct drm_file *file_priv);
1282 extern int drm_addctx(struct drm_device *dev, void *data, 1283 extern int drm_addctx(struct drm_device *dev, void *data,
1283 struct drm_file *file_priv); 1284 struct drm_file *file_priv);
1284 extern int drm_modctx(struct drm_device *dev, void *data, 1285 extern int drm_modctx(struct drm_device *dev, void *data,
1285 struct drm_file *file_priv); 1286 struct drm_file *file_priv);
1286 extern int drm_getctx(struct drm_device *dev, void *data, 1287 extern int drm_getctx(struct drm_device *dev, void *data,
1287 struct drm_file *file_priv); 1288 struct drm_file *file_priv);
1288 extern int drm_switchctx(struct drm_device *dev, void *data, 1289 extern int drm_switchctx(struct drm_device *dev, void *data,
1289 struct drm_file *file_priv); 1290 struct drm_file *file_priv);
1290 extern int drm_newctx(struct drm_device *dev, void *data, 1291 extern int drm_newctx(struct drm_device *dev, void *data,
1291 struct drm_file *file_priv); 1292 struct drm_file *file_priv);
1292 extern int drm_rmctx(struct drm_device *dev, void *data, 1293 extern int drm_rmctx(struct drm_device *dev, void *data,
1293 struct drm_file *file_priv); 1294 struct drm_file *file_priv);
1294 1295
1295 extern int drm_ctxbitmap_init(struct drm_device *dev); 1296 extern int drm_ctxbitmap_init(struct drm_device *dev);
1296 extern void drm_ctxbitmap_cleanup(struct drm_device *dev); 1297 extern void drm_ctxbitmap_cleanup(struct drm_device *dev);
1297 extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle); 1298 extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
1298 1299
1299 extern int drm_setsareactx(struct drm_device *dev, void *data, 1300 extern int drm_setsareactx(struct drm_device *dev, void *data,
1300 struct drm_file *file_priv); 1301 struct drm_file *file_priv);
1301 extern int drm_getsareactx(struct drm_device *dev, void *data, 1302 extern int drm_getsareactx(struct drm_device *dev, void *data,
1302 struct drm_file *file_priv); 1303 struct drm_file *file_priv);
1303 1304
1304 /* Authentication IOCTL support (drm_auth.h) */ 1305 /* Authentication IOCTL support (drm_auth.h) */
1305 extern int drm_getmagic(struct drm_device *dev, void *data, 1306 extern int drm_getmagic(struct drm_device *dev, void *data,
1306 struct drm_file *file_priv); 1307 struct drm_file *file_priv);
1307 extern int drm_authmagic(struct drm_device *dev, void *data, 1308 extern int drm_authmagic(struct drm_device *dev, void *data,
1308 struct drm_file *file_priv); 1309 struct drm_file *file_priv);
1309 1310
1310 /* Cache management (drm_cache.c) */ 1311 /* Cache management (drm_cache.c) */
1311 void drm_clflush_pages(struct page *pages[], unsigned long num_pages); 1312 void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
1312 1313
1313 /* Locking IOCTL support (drm_lock.h) */ 1314 /* Locking IOCTL support (drm_lock.h) */
1314 extern int drm_lock(struct drm_device *dev, void *data, 1315 extern int drm_lock(struct drm_device *dev, void *data,
1315 struct drm_file *file_priv); 1316 struct drm_file *file_priv);
1316 extern int drm_unlock(struct drm_device *dev, void *data, 1317 extern int drm_unlock(struct drm_device *dev, void *data,
1317 struct drm_file *file_priv); 1318 struct drm_file *file_priv);
1318 extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context); 1319 extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context);
1319 extern void drm_idlelock_take(struct drm_lock_data *lock_data); 1320 extern void drm_idlelock_take(struct drm_lock_data *lock_data);
1320 extern void drm_idlelock_release(struct drm_lock_data *lock_data); 1321 extern void drm_idlelock_release(struct drm_lock_data *lock_data);
1321 1322
1322 /* 1323 /*
1323 * These are exported to drivers so that they can implement fencing using 1324 * These are exported to drivers so that they can implement fencing using
1324 * DMA quiscent + idle. DMA quiescent usually requires the hardware lock. 1325 * DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
1325 */ 1326 */
1326 1327
1327 extern int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv); 1328 extern int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv);
1328 1329
1329 /* Buffer management support (drm_bufs.h) */ 1330 /* Buffer management support (drm_bufs.h) */
1330 extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request); 1331 extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request);
1331 extern int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request); 1332 extern int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request);
1332 extern int drm_addmap(struct drm_device *dev, resource_size_t offset, 1333 extern int drm_addmap(struct drm_device *dev, resource_size_t offset,
1333 unsigned int size, enum drm_map_type type, 1334 unsigned int size, enum drm_map_type type,
1334 enum drm_map_flags flags, struct drm_local_map **map_ptr); 1335 enum drm_map_flags flags, struct drm_local_map **map_ptr);
1335 extern int drm_addmap_ioctl(struct drm_device *dev, void *data, 1336 extern int drm_addmap_ioctl(struct drm_device *dev, void *data,
1336 struct drm_file *file_priv); 1337 struct drm_file *file_priv);
1337 extern int drm_rmmap(struct drm_device *dev, struct drm_local_map *map); 1338 extern int drm_rmmap(struct drm_device *dev, struct drm_local_map *map);
1338 extern int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map); 1339 extern int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map);
1339 extern int drm_rmmap_ioctl(struct drm_device *dev, void *data, 1340 extern int drm_rmmap_ioctl(struct drm_device *dev, void *data,
1340 struct drm_file *file_priv); 1341 struct drm_file *file_priv);
1341 extern int drm_addbufs(struct drm_device *dev, void *data, 1342 extern int drm_addbufs(struct drm_device *dev, void *data,
1342 struct drm_file *file_priv); 1343 struct drm_file *file_priv);
1343 extern int drm_infobufs(struct drm_device *dev, void *data, 1344 extern int drm_infobufs(struct drm_device *dev, void *data,
1344 struct drm_file *file_priv); 1345 struct drm_file *file_priv);
1345 extern int drm_markbufs(struct drm_device *dev, void *data, 1346 extern int drm_markbufs(struct drm_device *dev, void *data,
1346 struct drm_file *file_priv); 1347 struct drm_file *file_priv);
1347 extern int drm_freebufs(struct drm_device *dev, void *data, 1348 extern int drm_freebufs(struct drm_device *dev, void *data,
1348 struct drm_file *file_priv); 1349 struct drm_file *file_priv);
1349 extern int drm_mapbufs(struct drm_device *dev, void *data, 1350 extern int drm_mapbufs(struct drm_device *dev, void *data,
1350 struct drm_file *file_priv); 1351 struct drm_file *file_priv);
1351 extern int drm_order(unsigned long size); 1352 extern int drm_order(unsigned long size);
1352 1353
1353 /* DMA support (drm_dma.h) */ 1354 /* DMA support (drm_dma.h) */
1354 extern int drm_dma_setup(struct drm_device *dev); 1355 extern int drm_dma_setup(struct drm_device *dev);
1355 extern void drm_dma_takedown(struct drm_device *dev); 1356 extern void drm_dma_takedown(struct drm_device *dev);
1356 extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf); 1357 extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf);
1357 extern void drm_core_reclaim_buffers(struct drm_device *dev, 1358 extern void drm_core_reclaim_buffers(struct drm_device *dev,
1358 struct drm_file *filp); 1359 struct drm_file *filp);
1359 1360
1360 /* IRQ support (drm_irq.h) */ 1361 /* IRQ support (drm_irq.h) */
1361 extern int drm_control(struct drm_device *dev, void *data, 1362 extern int drm_control(struct drm_device *dev, void *data,
1362 struct drm_file *file_priv); 1363 struct drm_file *file_priv);
1363 extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS); 1364 extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS);
1364 extern int drm_irq_install(struct drm_device *dev); 1365 extern int drm_irq_install(struct drm_device *dev);
1365 extern int drm_irq_uninstall(struct drm_device *dev); 1366 extern int drm_irq_uninstall(struct drm_device *dev);
1366 extern void drm_driver_irq_preinstall(struct drm_device *dev); 1367 extern void drm_driver_irq_preinstall(struct drm_device *dev);
1367 extern void drm_driver_irq_postinstall(struct drm_device *dev); 1368 extern void drm_driver_irq_postinstall(struct drm_device *dev);
1368 extern void drm_driver_irq_uninstall(struct drm_device *dev); 1369 extern void drm_driver_irq_uninstall(struct drm_device *dev);
1369 1370
1370 extern int drm_vblank_init(struct drm_device *dev, int num_crtcs); 1371 extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
1371 extern int drm_wait_vblank(struct drm_device *dev, void *data, 1372 extern int drm_wait_vblank(struct drm_device *dev, void *data,
1372 struct drm_file *filp); 1373 struct drm_file *filp);
1373 extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq); 1374 extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
1374 extern u32 drm_vblank_count(struct drm_device *dev, int crtc); 1375 extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
1375 extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc, 1376 extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
1376 struct timeval *vblanktime); 1377 struct timeval *vblanktime);
1377 extern bool drm_handle_vblank(struct drm_device *dev, int crtc); 1378 extern bool drm_handle_vblank(struct drm_device *dev, int crtc);
1378 extern int drm_vblank_get(struct drm_device *dev, int crtc); 1379 extern int drm_vblank_get(struct drm_device *dev, int crtc);
1379 extern void drm_vblank_put(struct drm_device *dev, int crtc); 1380 extern void drm_vblank_put(struct drm_device *dev, int crtc);
1380 extern void drm_vblank_off(struct drm_device *dev, int crtc); 1381 extern void drm_vblank_off(struct drm_device *dev, int crtc);
1381 extern void drm_vblank_cleanup(struct drm_device *dev); 1382 extern void drm_vblank_cleanup(struct drm_device *dev);
1382 extern u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc, 1383 extern u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
1383 struct timeval *tvblank, unsigned flags); 1384 struct timeval *tvblank, unsigned flags);
1384 extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, 1385 extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
1385 int crtc, int *max_error, 1386 int crtc, int *max_error,
1386 struct timeval *vblank_time, 1387 struct timeval *vblank_time,
1387 unsigned flags, 1388 unsigned flags,
1388 struct drm_crtc *refcrtc); 1389 struct drm_crtc *refcrtc);
1389 extern void drm_calc_timestamping_constants(struct drm_crtc *crtc); 1390 extern void drm_calc_timestamping_constants(struct drm_crtc *crtc);
1390 1391
1391 /* Modesetting support */ 1392 /* Modesetting support */
1392 extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc); 1393 extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
1393 extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc); 1394 extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc);
1394 extern int drm_modeset_ctl(struct drm_device *dev, void *data, 1395 extern int drm_modeset_ctl(struct drm_device *dev, void *data,
1395 struct drm_file *file_priv); 1396 struct drm_file *file_priv);
1396 1397
1397 /* AGP/GART support (drm_agpsupport.h) */ 1398 /* AGP/GART support (drm_agpsupport.h) */
1398 extern struct drm_agp_head *drm_agp_init(struct drm_device *dev); 1399 extern struct drm_agp_head *drm_agp_init(struct drm_device *dev);
1399 extern int drm_agp_acquire(struct drm_device *dev); 1400 extern int drm_agp_acquire(struct drm_device *dev);
1400 extern int drm_agp_acquire_ioctl(struct drm_device *dev, void *data, 1401 extern int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
1401 struct drm_file *file_priv); 1402 struct drm_file *file_priv);
1402 extern int drm_agp_release(struct drm_device *dev); 1403 extern int drm_agp_release(struct drm_device *dev);
1403 extern int drm_agp_release_ioctl(struct drm_device *dev, void *data, 1404 extern int drm_agp_release_ioctl(struct drm_device *dev, void *data,
1404 struct drm_file *file_priv); 1405 struct drm_file *file_priv);
1405 extern int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode); 1406 extern int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode);
1406 extern int drm_agp_enable_ioctl(struct drm_device *dev, void *data, 1407 extern int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
1407 struct drm_file *file_priv); 1408 struct drm_file *file_priv);
1408 extern int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info); 1409 extern int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info);
1409 extern int drm_agp_info_ioctl(struct drm_device *dev, void *data, 1410 extern int drm_agp_info_ioctl(struct drm_device *dev, void *data,
1410 struct drm_file *file_priv); 1411 struct drm_file *file_priv);
1411 extern int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request); 1412 extern int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request);
1412 extern int drm_agp_alloc_ioctl(struct drm_device *dev, void *data, 1413 extern int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
1413 struct drm_file *file_priv); 1414 struct drm_file *file_priv);
1414 extern int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request); 1415 extern int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request);
1415 extern int drm_agp_free_ioctl(struct drm_device *dev, void *data, 1416 extern int drm_agp_free_ioctl(struct drm_device *dev, void *data,
1416 struct drm_file *file_priv); 1417 struct drm_file *file_priv);
1417 extern int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request); 1418 extern int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request);
1418 extern int drm_agp_unbind_ioctl(struct drm_device *dev, void *data, 1419 extern int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
1419 struct drm_file *file_priv); 1420 struct drm_file *file_priv);
1420 extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request); 1421 extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
1421 extern int drm_agp_bind_ioctl(struct drm_device *dev, void *data, 1422 extern int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
1422 struct drm_file *file_priv); 1423 struct drm_file *file_priv);
1423 1424
1424 /* Stub support (drm_stub.h) */ 1425 /* Stub support (drm_stub.h) */
1425 extern int drm_setmaster_ioctl(struct drm_device *dev, void *data, 1426 extern int drm_setmaster_ioctl(struct drm_device *dev, void *data,
1426 struct drm_file *file_priv); 1427 struct drm_file *file_priv);
1427 extern int drm_dropmaster_ioctl(struct drm_device *dev, void *data, 1428 extern int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
1428 struct drm_file *file_priv); 1429 struct drm_file *file_priv);
1429 struct drm_master *drm_master_create(struct drm_minor *minor); 1430 struct drm_master *drm_master_create(struct drm_minor *minor);
1430 extern struct drm_master *drm_master_get(struct drm_master *master); 1431 extern struct drm_master *drm_master_get(struct drm_master *master);
1431 extern void drm_master_put(struct drm_master **master); 1432 extern void drm_master_put(struct drm_master **master);
1432 1433
1433 extern void drm_put_dev(struct drm_device *dev); 1434 extern void drm_put_dev(struct drm_device *dev);
1434 extern int drm_put_minor(struct drm_minor **minor); 1435 extern int drm_put_minor(struct drm_minor **minor);
1435 extern unsigned int drm_debug; 1436 extern unsigned int drm_debug;
1436 1437
1437 extern unsigned int drm_vblank_offdelay; 1438 extern unsigned int drm_vblank_offdelay;
1438 extern unsigned int drm_timestamp_precision; 1439 extern unsigned int drm_timestamp_precision;
1439 1440
1440 extern struct class *drm_class; 1441 extern struct class *drm_class;
1441 extern struct proc_dir_entry *drm_proc_root; 1442 extern struct proc_dir_entry *drm_proc_root;
1442 extern struct dentry *drm_debugfs_root; 1443 extern struct dentry *drm_debugfs_root;
1443 1444
1444 extern struct idr drm_minors_idr; 1445 extern struct idr drm_minors_idr;
1445 1446
1446 extern struct drm_local_map *drm_getsarea(struct drm_device *dev); 1447 extern struct drm_local_map *drm_getsarea(struct drm_device *dev);
1447 1448
1448 /* Proc support (drm_proc.h) */ 1449 /* Proc support (drm_proc.h) */
1449 extern int drm_proc_init(struct drm_minor *minor, int minor_id, 1450 extern int drm_proc_init(struct drm_minor *minor, int minor_id,
1450 struct proc_dir_entry *root); 1451 struct proc_dir_entry *root);
1451 extern int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root); 1452 extern int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root);
1452 1453
1453 /* Debugfs support */ 1454 /* Debugfs support */
1454 #if defined(CONFIG_DEBUG_FS) 1455 #if defined(CONFIG_DEBUG_FS)
1455 extern int drm_debugfs_init(struct drm_minor *minor, int minor_id, 1456 extern int drm_debugfs_init(struct drm_minor *minor, int minor_id,
1456 struct dentry *root); 1457 struct dentry *root);
1457 extern int drm_debugfs_create_files(struct drm_info_list *files, int count, 1458 extern int drm_debugfs_create_files(struct drm_info_list *files, int count,
1458 struct dentry *root, struct drm_minor *minor); 1459 struct dentry *root, struct drm_minor *minor);
1459 extern int drm_debugfs_remove_files(struct drm_info_list *files, int count, 1460 extern int drm_debugfs_remove_files(struct drm_info_list *files, int count,
1460 struct drm_minor *minor); 1461 struct drm_minor *minor);
1461 extern int drm_debugfs_cleanup(struct drm_minor *minor); 1462 extern int drm_debugfs_cleanup(struct drm_minor *minor);
1462 #endif 1463 #endif
1463 1464
1464 /* Info file support */ 1465 /* Info file support */
1465 extern int drm_name_info(struct seq_file *m, void *data); 1466 extern int drm_name_info(struct seq_file *m, void *data);
1466 extern int drm_vm_info(struct seq_file *m, void *data); 1467 extern int drm_vm_info(struct seq_file *m, void *data);
1467 extern int drm_queues_info(struct seq_file *m, void *data); 1468 extern int drm_queues_info(struct seq_file *m, void *data);
1468 extern int drm_bufs_info(struct seq_file *m, void *data); 1469 extern int drm_bufs_info(struct seq_file *m, void *data);
1469 extern int drm_vblank_info(struct seq_file *m, void *data); 1470 extern int drm_vblank_info(struct seq_file *m, void *data);
1470 extern int drm_clients_info(struct seq_file *m, void* data); 1471 extern int drm_clients_info(struct seq_file *m, void* data);
1471 extern int drm_gem_name_info(struct seq_file *m, void *data); 1472 extern int drm_gem_name_info(struct seq_file *m, void *data);
1472 1473
1473 #if DRM_DEBUG_CODE 1474 #if DRM_DEBUG_CODE
1474 extern int drm_vma_info(struct seq_file *m, void *data); 1475 extern int drm_vma_info(struct seq_file *m, void *data);
1475 #endif 1476 #endif
1476 1477
1477 /* Scatter Gather Support (drm_scatter.h) */ 1478 /* Scatter Gather Support (drm_scatter.h) */
1478 extern void drm_sg_cleanup(struct drm_sg_mem * entry); 1479 extern void drm_sg_cleanup(struct drm_sg_mem * entry);
1479 extern int drm_sg_alloc_ioctl(struct drm_device *dev, void *data, 1480 extern int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
1480 struct drm_file *file_priv); 1481 struct drm_file *file_priv);
1481 extern int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request); 1482 extern int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request);
1482 extern int drm_sg_free(struct drm_device *dev, void *data, 1483 extern int drm_sg_free(struct drm_device *dev, void *data,
1483 struct drm_file *file_priv); 1484 struct drm_file *file_priv);
1484 1485
1485 /* ATI PCIGART support (ati_pcigart.h) */ 1486 /* ATI PCIGART support (ati_pcigart.h) */
1486 extern int drm_ati_pcigart_init(struct drm_device *dev, 1487 extern int drm_ati_pcigart_init(struct drm_device *dev,
1487 struct drm_ati_pcigart_info * gart_info); 1488 struct drm_ati_pcigart_info * gart_info);
1488 extern int drm_ati_pcigart_cleanup(struct drm_device *dev, 1489 extern int drm_ati_pcigart_cleanup(struct drm_device *dev,
1489 struct drm_ati_pcigart_info * gart_info); 1490 struct drm_ati_pcigart_info * gart_info);
1490 1491
1491 extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size, 1492 extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size,
1492 size_t align); 1493 size_t align);
1493 extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah); 1494 extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
1494 extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah); 1495 extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
1495 1496
1496 /* sysfs support (drm_sysfs.c) */ 1497 /* sysfs support (drm_sysfs.c) */
1497 struct drm_sysfs_class; 1498 struct drm_sysfs_class;
1498 extern struct class *drm_sysfs_create(struct module *owner, char *name); 1499 extern struct class *drm_sysfs_create(struct module *owner, char *name);
1499 extern void drm_sysfs_destroy(void); 1500 extern void drm_sysfs_destroy(void);
1500 extern int drm_sysfs_device_add(struct drm_minor *minor); 1501 extern int drm_sysfs_device_add(struct drm_minor *minor);
1501 extern void drm_sysfs_hotplug_event(struct drm_device *dev); 1502 extern void drm_sysfs_hotplug_event(struct drm_device *dev);
1502 extern void drm_sysfs_device_remove(struct drm_minor *minor); 1503 extern void drm_sysfs_device_remove(struct drm_minor *minor);
1503 extern char *drm_get_connector_status_name(enum drm_connector_status status); 1504 extern char *drm_get_connector_status_name(enum drm_connector_status status);
1504 extern int drm_sysfs_connector_add(struct drm_connector *connector); 1505 extern int drm_sysfs_connector_add(struct drm_connector *connector);
1505 extern void drm_sysfs_connector_remove(struct drm_connector *connector); 1506 extern void drm_sysfs_connector_remove(struct drm_connector *connector);
1506 1507
1507 /* Graphics Execution Manager library functions (drm_gem.c) */ 1508 /* Graphics Execution Manager library functions (drm_gem.c) */
1508 int drm_gem_init(struct drm_device *dev); 1509 int drm_gem_init(struct drm_device *dev);
1509 void drm_gem_destroy(struct drm_device *dev); 1510 void drm_gem_destroy(struct drm_device *dev);
1510 void drm_gem_object_release(struct drm_gem_object *obj); 1511 void drm_gem_object_release(struct drm_gem_object *obj);
1511 void drm_gem_object_free(struct kref *kref); 1512 void drm_gem_object_free(struct kref *kref);
1512 struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev, 1513 struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
1513 size_t size); 1514 size_t size);
1514 int drm_gem_object_init(struct drm_device *dev, 1515 int drm_gem_object_init(struct drm_device *dev,
1515 struct drm_gem_object *obj, size_t size); 1516 struct drm_gem_object *obj, size_t size);
1516 void drm_gem_object_handle_free(struct drm_gem_object *obj); 1517 void drm_gem_object_handle_free(struct drm_gem_object *obj);
1517 void drm_gem_vm_open(struct vm_area_struct *vma); 1518 void drm_gem_vm_open(struct vm_area_struct *vma);
1518 void drm_gem_vm_close(struct vm_area_struct *vma); 1519 void drm_gem_vm_close(struct vm_area_struct *vma);
1519 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); 1520 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
1520 1521
1521 #include "drm_global.h" 1522 #include "drm_global.h"
1522 1523
1523 static inline void 1524 static inline void
1524 drm_gem_object_reference(struct drm_gem_object *obj) 1525 drm_gem_object_reference(struct drm_gem_object *obj)
1525 { 1526 {
1526 kref_get(&obj->refcount); 1527 kref_get(&obj->refcount);
1527 } 1528 }
1528 1529
1529 static inline void 1530 static inline void
1530 drm_gem_object_unreference(struct drm_gem_object *obj) 1531 drm_gem_object_unreference(struct drm_gem_object *obj)
1531 { 1532 {
1532 if (obj != NULL) 1533 if (obj != NULL)
1533 kref_put(&obj->refcount, drm_gem_object_free); 1534 kref_put(&obj->refcount, drm_gem_object_free);
1534 } 1535 }
1535 1536
1536 static inline void 1537 static inline void
1537 drm_gem_object_unreference_unlocked(struct drm_gem_object *obj) 1538 drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
1538 { 1539 {
1539 if (obj != NULL) { 1540 if (obj != NULL) {
1540 struct drm_device *dev = obj->dev; 1541 struct drm_device *dev = obj->dev;
1541 mutex_lock(&dev->struct_mutex); 1542 mutex_lock(&dev->struct_mutex);
1542 kref_put(&obj->refcount, drm_gem_object_free); 1543 kref_put(&obj->refcount, drm_gem_object_free);
1543 mutex_unlock(&dev->struct_mutex); 1544 mutex_unlock(&dev->struct_mutex);
1544 } 1545 }
1545 } 1546 }
1546 1547
1547 int drm_gem_handle_create(struct drm_file *file_priv, 1548 int drm_gem_handle_create(struct drm_file *file_priv,
1548 struct drm_gem_object *obj, 1549 struct drm_gem_object *obj,
1549 u32 *handlep); 1550 u32 *handlep);
1550 int drm_gem_handle_delete(struct drm_file *filp, u32 handle); 1551 int drm_gem_handle_delete(struct drm_file *filp, u32 handle);
1551 1552
1552 static inline void 1553 static inline void
1553 drm_gem_object_handle_reference(struct drm_gem_object *obj) 1554 drm_gem_object_handle_reference(struct drm_gem_object *obj)
1554 { 1555 {
1555 drm_gem_object_reference(obj); 1556 drm_gem_object_reference(obj);
1556 atomic_inc(&obj->handle_count); 1557 atomic_inc(&obj->handle_count);
1557 } 1558 }
1558 1559
1559 static inline void 1560 static inline void
1560 drm_gem_object_handle_unreference(struct drm_gem_object *obj) 1561 drm_gem_object_handle_unreference(struct drm_gem_object *obj)
1561 { 1562 {
1562 if (obj == NULL) 1563 if (obj == NULL)
1563 return; 1564 return;
1564 1565
1565 if (atomic_read(&obj->handle_count) == 0) 1566 if (atomic_read(&obj->handle_count) == 0)
1566 return; 1567 return;
1567 /* 1568 /*
1568 * Must bump handle count first as this may be the last 1569 * Must bump handle count first as this may be the last
1569 * ref, in which case the object would disappear before we 1570 * ref, in which case the object would disappear before we
1570 * checked for a name 1571 * checked for a name
1571 */ 1572 */
1572 if (atomic_dec_and_test(&obj->handle_count)) 1573 if (atomic_dec_and_test(&obj->handle_count))
1573 drm_gem_object_handle_free(obj); 1574 drm_gem_object_handle_free(obj);
1574 drm_gem_object_unreference(obj); 1575 drm_gem_object_unreference(obj);
1575 } 1576 }
1576 1577
1577 static inline void 1578 static inline void
1578 drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) 1579 drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
1579 { 1580 {
1580 if (obj == NULL) 1581 if (obj == NULL)
1581 return; 1582 return;
1582 1583
1583 if (atomic_read(&obj->handle_count) == 0) 1584 if (atomic_read(&obj->handle_count) == 0)
1584 return; 1585 return;
1585 1586
1586 /* 1587 /*
1587 * Must bump handle count first as this may be the last 1588 * Must bump handle count first as this may be the last
1588 * ref, in which case the object would disappear before we 1589 * ref, in which case the object would disappear before we
1589 * checked for a name 1590 * checked for a name
1590 */ 1591 */
1591 1592
1592 if (atomic_dec_and_test(&obj->handle_count)) 1593 if (atomic_dec_and_test(&obj->handle_count))
1593 drm_gem_object_handle_free(obj); 1594 drm_gem_object_handle_free(obj);
1594 drm_gem_object_unreference_unlocked(obj); 1595 drm_gem_object_unreference_unlocked(obj);
1595 } 1596 }
1596 1597
1597 struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev, 1598 struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
1598 struct drm_file *filp, 1599 struct drm_file *filp,
1599 u32 handle); 1600 u32 handle);
1600 int drm_gem_close_ioctl(struct drm_device *dev, void *data, 1601 int drm_gem_close_ioctl(struct drm_device *dev, void *data,
1601 struct drm_file *file_priv); 1602 struct drm_file *file_priv);
1602 int drm_gem_flink_ioctl(struct drm_device *dev, void *data, 1603 int drm_gem_flink_ioctl(struct drm_device *dev, void *data,
1603 struct drm_file *file_priv); 1604 struct drm_file *file_priv);
1604 int drm_gem_open_ioctl(struct drm_device *dev, void *data, 1605 int drm_gem_open_ioctl(struct drm_device *dev, void *data,
1605 struct drm_file *file_priv); 1606 struct drm_file *file_priv);
1606 void drm_gem_open(struct drm_device *dev, struct drm_file *file_private); 1607 void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
1607 void drm_gem_release(struct drm_device *dev, struct drm_file *file_private); 1608 void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
1608 1609
1609 extern void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev); 1610 extern void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev);
1610 extern void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev); 1611 extern void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev);
1611 extern void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev); 1612 extern void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev);
1612 1613
1613 static __inline__ struct drm_local_map *drm_core_findmap(struct drm_device *dev, 1614 static __inline__ struct drm_local_map *drm_core_findmap(struct drm_device *dev,
1614 unsigned int token) 1615 unsigned int token)
1615 { 1616 {
1616 struct drm_map_list *_entry; 1617 struct drm_map_list *_entry;
1617 list_for_each_entry(_entry, &dev->maplist, head) 1618 list_for_each_entry(_entry, &dev->maplist, head)
1618 if (_entry->user_token == token) 1619 if (_entry->user_token == token)
1619 return _entry->map; 1620 return _entry->map;
1620 return NULL; 1621 return NULL;
1621 } 1622 }
1622 1623
1623 static __inline__ void drm_core_dropmap(struct drm_local_map *map) 1624 static __inline__ void drm_core_dropmap(struct drm_local_map *map)
1624 { 1625 {
1625 } 1626 }
1626 1627
1627 #include "drm_mem_util.h" 1628 #include "drm_mem_util.h"
1628 1629
1629 extern int drm_fill_in_dev(struct drm_device *dev, 1630 extern int drm_fill_in_dev(struct drm_device *dev,
1630 const struct pci_device_id *ent, 1631 const struct pci_device_id *ent,
1631 struct drm_driver *driver); 1632 struct drm_driver *driver);
1632 int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type); 1633 int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type);
1633 /*@}*/ 1634 /*@}*/
1634 1635
1635 /* PCI section */ 1636 /* PCI section */
1636 static __inline__ int drm_pci_device_is_agp(struct drm_device *dev) 1637 static __inline__ int drm_pci_device_is_agp(struct drm_device *dev)
1637 { 1638 {
1638 if (dev->driver->device_is_agp != NULL) { 1639 if (dev->driver->device_is_agp != NULL) {
1639 int err = (*dev->driver->device_is_agp) (dev); 1640 int err = (*dev->driver->device_is_agp) (dev);
1640 1641
1641 if (err != 2) { 1642 if (err != 2) {
1642 return err; 1643 return err;
1643 } 1644 }
1644 } 1645 }
1645 1646
1646 return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP); 1647 return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP);
1647 } 1648 }
1648 1649
1649 1650
1650 static __inline__ int drm_pci_device_is_pcie(struct drm_device *dev) 1651 static __inline__ int drm_pci_device_is_pcie(struct drm_device *dev)
1651 { 1652 {
1652 return pci_find_capability(dev->pdev, PCI_CAP_ID_EXP); 1653 return pci_find_capability(dev->pdev, PCI_CAP_ID_EXP);
1653 } 1654 }
1654 1655
1655 1656
1656 extern int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver); 1657 extern int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver);
1657 extern void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver); 1658 extern void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver);
1658 extern int drm_get_pci_dev(struct pci_dev *pdev, 1659 extern int drm_get_pci_dev(struct pci_dev *pdev,
1659 const struct pci_device_id *ent, 1660 const struct pci_device_id *ent,
1660 struct drm_driver *driver); 1661 struct drm_driver *driver);
1661 1662
1662 1663
1663 /* platform section */ 1664 /* platform section */
1664 extern int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device); 1665 extern int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device);
1665 extern void drm_platform_exit(struct drm_driver *driver, struct platform_device *platform_device); 1666 extern void drm_platform_exit(struct drm_driver *driver, struct platform_device *platform_device);
1666 1667
1667 extern int drm_get_platform_dev(struct platform_device *pdev, 1668 extern int drm_get_platform_dev(struct platform_device *pdev,
1668 struct drm_driver *driver); 1669 struct drm_driver *driver);
1669 1670
1670 #endif /* __KERNEL__ */ 1671 #endif /* __KERNEL__ */
1671 #endif 1672 #endif
1672 1673