Commit 3d29b842e58fbca2c13a9f458fddbaa535c6e578

Authored by Eugeni Dodonov
Committed by Daniel Vetter
1 parent b2c606fe1d

drm/i915: add a LLC feature flag in device description

LLC is not SNB/IVB-specific, so we should check for it in a more generic
way.

Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Eric Anholt <eric@anholt.net>
Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
Signed-off-by: Eugeni Dodonov <eugeni.dodonov@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>

Showing 6 changed files with 13 additions and 2 deletions Inline Diff

drivers/gpu/drm/i915/i915_debugfs.c
1 /* 1 /*
2 * Copyright © 2008 Intel Corporation 2 * Copyright © 2008 Intel Corporation
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation 6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the 8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions: 9 * Software is furnished to do so, subject to the following conditions:
10 * 10 *
11 * The above copyright notice and this permission notice (including the next 11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the 12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software. 13 * Software.
14 * 14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE. 21 * IN THE SOFTWARE.
22 * 22 *
23 * Authors: 23 * Authors:
24 * Eric Anholt <eric@anholt.net> 24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com> 25 * Keith Packard <keithp@keithp.com>
26 * 26 *
27 */ 27 */
28 28
29 #include <linux/seq_file.h> 29 #include <linux/seq_file.h>
30 #include <linux/debugfs.h> 30 #include <linux/debugfs.h>
31 #include <linux/slab.h> 31 #include <linux/slab.h>
32 #include <linux/export.h> 32 #include <linux/export.h>
33 #include "drmP.h" 33 #include "drmP.h"
34 #include "drm.h" 34 #include "drm.h"
35 #include "intel_drv.h" 35 #include "intel_drv.h"
36 #include "intel_ringbuffer.h" 36 #include "intel_ringbuffer.h"
37 #include "i915_drm.h" 37 #include "i915_drm.h"
38 #include "i915_drv.h" 38 #include "i915_drv.h"
39 39
40 #define DRM_I915_RING_DEBUG 1 40 #define DRM_I915_RING_DEBUG 1
41 41
42 42
43 #if defined(CONFIG_DEBUG_FS) 43 #if defined(CONFIG_DEBUG_FS)
44 44
45 enum { 45 enum {
46 ACTIVE_LIST, 46 ACTIVE_LIST,
47 FLUSHING_LIST, 47 FLUSHING_LIST,
48 INACTIVE_LIST, 48 INACTIVE_LIST,
49 PINNED_LIST, 49 PINNED_LIST,
50 DEFERRED_FREE_LIST, 50 DEFERRED_FREE_LIST,
51 }; 51 };
52 52
53 static const char *yesno(int v) 53 static const char *yesno(int v)
54 { 54 {
55 return v ? "yes" : "no"; 55 return v ? "yes" : "no";
56 } 56 }
57 57
58 static int i915_capabilities(struct seq_file *m, void *data) 58 static int i915_capabilities(struct seq_file *m, void *data)
59 { 59 {
60 struct drm_info_node *node = (struct drm_info_node *) m->private; 60 struct drm_info_node *node = (struct drm_info_node *) m->private;
61 struct drm_device *dev = node->minor->dev; 61 struct drm_device *dev = node->minor->dev;
62 const struct intel_device_info *info = INTEL_INFO(dev); 62 const struct intel_device_info *info = INTEL_INFO(dev);
63 63
64 seq_printf(m, "gen: %d\n", info->gen); 64 seq_printf(m, "gen: %d\n", info->gen);
65 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev)); 65 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
66 #define B(x) seq_printf(m, #x ": %s\n", yesno(info->x)) 66 #define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
67 B(is_mobile); 67 B(is_mobile);
68 B(is_i85x); 68 B(is_i85x);
69 B(is_i915g); 69 B(is_i915g);
70 B(is_i945gm); 70 B(is_i945gm);
71 B(is_g33); 71 B(is_g33);
72 B(need_gfx_hws); 72 B(need_gfx_hws);
73 B(is_g4x); 73 B(is_g4x);
74 B(is_pineview); 74 B(is_pineview);
75 B(is_broadwater); 75 B(is_broadwater);
76 B(is_crestline); 76 B(is_crestline);
77 B(has_fbc); 77 B(has_fbc);
78 B(has_pipe_cxsr); 78 B(has_pipe_cxsr);
79 B(has_hotplug); 79 B(has_hotplug);
80 B(cursor_needs_physical); 80 B(cursor_needs_physical);
81 B(has_overlay); 81 B(has_overlay);
82 B(overlay_needs_physical); 82 B(overlay_needs_physical);
83 B(supports_tv); 83 B(supports_tv);
84 B(has_bsd_ring); 84 B(has_bsd_ring);
85 B(has_blt_ring); 85 B(has_blt_ring);
86 B(has_llc);
86 #undef B 87 #undef B
87 88
88 return 0; 89 return 0;
89 } 90 }
90 91
91 static const char *get_pin_flag(struct drm_i915_gem_object *obj) 92 static const char *get_pin_flag(struct drm_i915_gem_object *obj)
92 { 93 {
93 if (obj->user_pin_count > 0) 94 if (obj->user_pin_count > 0)
94 return "P"; 95 return "P";
95 else if (obj->pin_count > 0) 96 else if (obj->pin_count > 0)
96 return "p"; 97 return "p";
97 else 98 else
98 return " "; 99 return " ";
99 } 100 }
100 101
101 static const char *get_tiling_flag(struct drm_i915_gem_object *obj) 102 static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
102 { 103 {
103 switch (obj->tiling_mode) { 104 switch (obj->tiling_mode) {
104 default: 105 default:
105 case I915_TILING_NONE: return " "; 106 case I915_TILING_NONE: return " ";
106 case I915_TILING_X: return "X"; 107 case I915_TILING_X: return "X";
107 case I915_TILING_Y: return "Y"; 108 case I915_TILING_Y: return "Y";
108 } 109 }
109 } 110 }
110 111
111 static const char *cache_level_str(int type) 112 static const char *cache_level_str(int type)
112 { 113 {
113 switch (type) { 114 switch (type) {
114 case I915_CACHE_NONE: return " uncached"; 115 case I915_CACHE_NONE: return " uncached";
115 case I915_CACHE_LLC: return " snooped (LLC)"; 116 case I915_CACHE_LLC: return " snooped (LLC)";
116 case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)"; 117 case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)";
117 default: return ""; 118 default: return "";
118 } 119 }
119 } 120 }
120 121
121 static void 122 static void
122 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 123 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
123 { 124 {
124 seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s%s", 125 seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s%s",
125 &obj->base, 126 &obj->base,
126 get_pin_flag(obj), 127 get_pin_flag(obj),
127 get_tiling_flag(obj), 128 get_tiling_flag(obj),
128 obj->base.size, 129 obj->base.size,
129 obj->base.read_domains, 130 obj->base.read_domains,
130 obj->base.write_domain, 131 obj->base.write_domain,
131 obj->last_rendering_seqno, 132 obj->last_rendering_seqno,
132 obj->last_fenced_seqno, 133 obj->last_fenced_seqno,
133 cache_level_str(obj->cache_level), 134 cache_level_str(obj->cache_level),
134 obj->dirty ? " dirty" : "", 135 obj->dirty ? " dirty" : "",
135 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 136 obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
136 if (obj->base.name) 137 if (obj->base.name)
137 seq_printf(m, " (name: %d)", obj->base.name); 138 seq_printf(m, " (name: %d)", obj->base.name);
138 if (obj->fence_reg != I915_FENCE_REG_NONE) 139 if (obj->fence_reg != I915_FENCE_REG_NONE)
139 seq_printf(m, " (fence: %d)", obj->fence_reg); 140 seq_printf(m, " (fence: %d)", obj->fence_reg);
140 if (obj->gtt_space != NULL) 141 if (obj->gtt_space != NULL)
141 seq_printf(m, " (gtt offset: %08x, size: %08x)", 142 seq_printf(m, " (gtt offset: %08x, size: %08x)",
142 obj->gtt_offset, (unsigned int)obj->gtt_space->size); 143 obj->gtt_offset, (unsigned int)obj->gtt_space->size);
143 if (obj->pin_mappable || obj->fault_mappable) { 144 if (obj->pin_mappable || obj->fault_mappable) {
144 char s[3], *t = s; 145 char s[3], *t = s;
145 if (obj->pin_mappable) 146 if (obj->pin_mappable)
146 *t++ = 'p'; 147 *t++ = 'p';
147 if (obj->fault_mappable) 148 if (obj->fault_mappable)
148 *t++ = 'f'; 149 *t++ = 'f';
149 *t = '\0'; 150 *t = '\0';
150 seq_printf(m, " (%s mappable)", s); 151 seq_printf(m, " (%s mappable)", s);
151 } 152 }
152 if (obj->ring != NULL) 153 if (obj->ring != NULL)
153 seq_printf(m, " (%s)", obj->ring->name); 154 seq_printf(m, " (%s)", obj->ring->name);
154 } 155 }
155 156
156 static int i915_gem_object_list_info(struct seq_file *m, void *data) 157 static int i915_gem_object_list_info(struct seq_file *m, void *data)
157 { 158 {
158 struct drm_info_node *node = (struct drm_info_node *) m->private; 159 struct drm_info_node *node = (struct drm_info_node *) m->private;
159 uintptr_t list = (uintptr_t) node->info_ent->data; 160 uintptr_t list = (uintptr_t) node->info_ent->data;
160 struct list_head *head; 161 struct list_head *head;
161 struct drm_device *dev = node->minor->dev; 162 struct drm_device *dev = node->minor->dev;
162 drm_i915_private_t *dev_priv = dev->dev_private; 163 drm_i915_private_t *dev_priv = dev->dev_private;
163 struct drm_i915_gem_object *obj; 164 struct drm_i915_gem_object *obj;
164 size_t total_obj_size, total_gtt_size; 165 size_t total_obj_size, total_gtt_size;
165 int count, ret; 166 int count, ret;
166 167
167 ret = mutex_lock_interruptible(&dev->struct_mutex); 168 ret = mutex_lock_interruptible(&dev->struct_mutex);
168 if (ret) 169 if (ret)
169 return ret; 170 return ret;
170 171
171 switch (list) { 172 switch (list) {
172 case ACTIVE_LIST: 173 case ACTIVE_LIST:
173 seq_printf(m, "Active:\n"); 174 seq_printf(m, "Active:\n");
174 head = &dev_priv->mm.active_list; 175 head = &dev_priv->mm.active_list;
175 break; 176 break;
176 case INACTIVE_LIST: 177 case INACTIVE_LIST:
177 seq_printf(m, "Inactive:\n"); 178 seq_printf(m, "Inactive:\n");
178 head = &dev_priv->mm.inactive_list; 179 head = &dev_priv->mm.inactive_list;
179 break; 180 break;
180 case PINNED_LIST: 181 case PINNED_LIST:
181 seq_printf(m, "Pinned:\n"); 182 seq_printf(m, "Pinned:\n");
182 head = &dev_priv->mm.pinned_list; 183 head = &dev_priv->mm.pinned_list;
183 break; 184 break;
184 case FLUSHING_LIST: 185 case FLUSHING_LIST:
185 seq_printf(m, "Flushing:\n"); 186 seq_printf(m, "Flushing:\n");
186 head = &dev_priv->mm.flushing_list; 187 head = &dev_priv->mm.flushing_list;
187 break; 188 break;
188 case DEFERRED_FREE_LIST: 189 case DEFERRED_FREE_LIST:
189 seq_printf(m, "Deferred free:\n"); 190 seq_printf(m, "Deferred free:\n");
190 head = &dev_priv->mm.deferred_free_list; 191 head = &dev_priv->mm.deferred_free_list;
191 break; 192 break;
192 default: 193 default:
193 mutex_unlock(&dev->struct_mutex); 194 mutex_unlock(&dev->struct_mutex);
194 return -EINVAL; 195 return -EINVAL;
195 } 196 }
196 197
197 total_obj_size = total_gtt_size = count = 0; 198 total_obj_size = total_gtt_size = count = 0;
198 list_for_each_entry(obj, head, mm_list) { 199 list_for_each_entry(obj, head, mm_list) {
199 seq_printf(m, " "); 200 seq_printf(m, " ");
200 describe_obj(m, obj); 201 describe_obj(m, obj);
201 seq_printf(m, "\n"); 202 seq_printf(m, "\n");
202 total_obj_size += obj->base.size; 203 total_obj_size += obj->base.size;
203 total_gtt_size += obj->gtt_space->size; 204 total_gtt_size += obj->gtt_space->size;
204 count++; 205 count++;
205 } 206 }
206 mutex_unlock(&dev->struct_mutex); 207 mutex_unlock(&dev->struct_mutex);
207 208
208 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 209 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
209 count, total_obj_size, total_gtt_size); 210 count, total_obj_size, total_gtt_size);
210 return 0; 211 return 0;
211 } 212 }
212 213
213 #define count_objects(list, member) do { \ 214 #define count_objects(list, member) do { \
214 list_for_each_entry(obj, list, member) { \ 215 list_for_each_entry(obj, list, member) { \
215 size += obj->gtt_space->size; \ 216 size += obj->gtt_space->size; \
216 ++count; \ 217 ++count; \
217 if (obj->map_and_fenceable) { \ 218 if (obj->map_and_fenceable) { \
218 mappable_size += obj->gtt_space->size; \ 219 mappable_size += obj->gtt_space->size; \
219 ++mappable_count; \ 220 ++mappable_count; \
220 } \ 221 } \
221 } \ 222 } \
222 } while (0) 223 } while (0)
223 224
224 static int i915_gem_object_info(struct seq_file *m, void* data) 225 static int i915_gem_object_info(struct seq_file *m, void* data)
225 { 226 {
226 struct drm_info_node *node = (struct drm_info_node *) m->private; 227 struct drm_info_node *node = (struct drm_info_node *) m->private;
227 struct drm_device *dev = node->minor->dev; 228 struct drm_device *dev = node->minor->dev;
228 struct drm_i915_private *dev_priv = dev->dev_private; 229 struct drm_i915_private *dev_priv = dev->dev_private;
229 u32 count, mappable_count; 230 u32 count, mappable_count;
230 size_t size, mappable_size; 231 size_t size, mappable_size;
231 struct drm_i915_gem_object *obj; 232 struct drm_i915_gem_object *obj;
232 int ret; 233 int ret;
233 234
234 ret = mutex_lock_interruptible(&dev->struct_mutex); 235 ret = mutex_lock_interruptible(&dev->struct_mutex);
235 if (ret) 236 if (ret)
236 return ret; 237 return ret;
237 238
238 seq_printf(m, "%u objects, %zu bytes\n", 239 seq_printf(m, "%u objects, %zu bytes\n",
239 dev_priv->mm.object_count, 240 dev_priv->mm.object_count,
240 dev_priv->mm.object_memory); 241 dev_priv->mm.object_memory);
241 242
242 size = count = mappable_size = mappable_count = 0; 243 size = count = mappable_size = mappable_count = 0;
243 count_objects(&dev_priv->mm.gtt_list, gtt_list); 244 count_objects(&dev_priv->mm.gtt_list, gtt_list);
244 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n", 245 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
245 count, mappable_count, size, mappable_size); 246 count, mappable_count, size, mappable_size);
246 247
247 size = count = mappable_size = mappable_count = 0; 248 size = count = mappable_size = mappable_count = 0;
248 count_objects(&dev_priv->mm.active_list, mm_list); 249 count_objects(&dev_priv->mm.active_list, mm_list);
249 count_objects(&dev_priv->mm.flushing_list, mm_list); 250 count_objects(&dev_priv->mm.flushing_list, mm_list);
250 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", 251 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
251 count, mappable_count, size, mappable_size); 252 count, mappable_count, size, mappable_size);
252 253
253 size = count = mappable_size = mappable_count = 0; 254 size = count = mappable_size = mappable_count = 0;
254 count_objects(&dev_priv->mm.pinned_list, mm_list); 255 count_objects(&dev_priv->mm.pinned_list, mm_list);
255 seq_printf(m, " %u [%u] pinned objects, %zu [%zu] bytes\n", 256 seq_printf(m, " %u [%u] pinned objects, %zu [%zu] bytes\n",
256 count, mappable_count, size, mappable_size); 257 count, mappable_count, size, mappable_size);
257 258
258 size = count = mappable_size = mappable_count = 0; 259 size = count = mappable_size = mappable_count = 0;
259 count_objects(&dev_priv->mm.inactive_list, mm_list); 260 count_objects(&dev_priv->mm.inactive_list, mm_list);
260 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", 261 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
261 count, mappable_count, size, mappable_size); 262 count, mappable_count, size, mappable_size);
262 263
263 size = count = mappable_size = mappable_count = 0; 264 size = count = mappable_size = mappable_count = 0;
264 count_objects(&dev_priv->mm.deferred_free_list, mm_list); 265 count_objects(&dev_priv->mm.deferred_free_list, mm_list);
265 seq_printf(m, " %u [%u] freed objects, %zu [%zu] bytes\n", 266 seq_printf(m, " %u [%u] freed objects, %zu [%zu] bytes\n",
266 count, mappable_count, size, mappable_size); 267 count, mappable_count, size, mappable_size);
267 268
268 size = count = mappable_size = mappable_count = 0; 269 size = count = mappable_size = mappable_count = 0;
269 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 270 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
270 if (obj->fault_mappable) { 271 if (obj->fault_mappable) {
271 size += obj->gtt_space->size; 272 size += obj->gtt_space->size;
272 ++count; 273 ++count;
273 } 274 }
274 if (obj->pin_mappable) { 275 if (obj->pin_mappable) {
275 mappable_size += obj->gtt_space->size; 276 mappable_size += obj->gtt_space->size;
276 ++mappable_count; 277 ++mappable_count;
277 } 278 }
278 } 279 }
279 seq_printf(m, "%u pinned mappable objects, %zu bytes\n", 280 seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
280 mappable_count, mappable_size); 281 mappable_count, mappable_size);
281 seq_printf(m, "%u fault mappable objects, %zu bytes\n", 282 seq_printf(m, "%u fault mappable objects, %zu bytes\n",
282 count, size); 283 count, size);
283 284
284 seq_printf(m, "%zu [%zu] gtt total\n", 285 seq_printf(m, "%zu [%zu] gtt total\n",
285 dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total); 286 dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total);
286 287
287 mutex_unlock(&dev->struct_mutex); 288 mutex_unlock(&dev->struct_mutex);
288 289
289 return 0; 290 return 0;
290 } 291 }
291 292
292 static int i915_gem_gtt_info(struct seq_file *m, void* data) 293 static int i915_gem_gtt_info(struct seq_file *m, void* data)
293 { 294 {
294 struct drm_info_node *node = (struct drm_info_node *) m->private; 295 struct drm_info_node *node = (struct drm_info_node *) m->private;
295 struct drm_device *dev = node->minor->dev; 296 struct drm_device *dev = node->minor->dev;
296 struct drm_i915_private *dev_priv = dev->dev_private; 297 struct drm_i915_private *dev_priv = dev->dev_private;
297 struct drm_i915_gem_object *obj; 298 struct drm_i915_gem_object *obj;
298 size_t total_obj_size, total_gtt_size; 299 size_t total_obj_size, total_gtt_size;
299 int count, ret; 300 int count, ret;
300 301
301 ret = mutex_lock_interruptible(&dev->struct_mutex); 302 ret = mutex_lock_interruptible(&dev->struct_mutex);
302 if (ret) 303 if (ret)
303 return ret; 304 return ret;
304 305
305 total_obj_size = total_gtt_size = count = 0; 306 total_obj_size = total_gtt_size = count = 0;
306 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 307 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
307 seq_printf(m, " "); 308 seq_printf(m, " ");
308 describe_obj(m, obj); 309 describe_obj(m, obj);
309 seq_printf(m, "\n"); 310 seq_printf(m, "\n");
310 total_obj_size += obj->base.size; 311 total_obj_size += obj->base.size;
311 total_gtt_size += obj->gtt_space->size; 312 total_gtt_size += obj->gtt_space->size;
312 count++; 313 count++;
313 } 314 }
314 315
315 mutex_unlock(&dev->struct_mutex); 316 mutex_unlock(&dev->struct_mutex);
316 317
317 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 318 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
318 count, total_obj_size, total_gtt_size); 319 count, total_obj_size, total_gtt_size);
319 320
320 return 0; 321 return 0;
321 } 322 }
322 323
323 324
324 static int i915_gem_pageflip_info(struct seq_file *m, void *data) 325 static int i915_gem_pageflip_info(struct seq_file *m, void *data)
325 { 326 {
326 struct drm_info_node *node = (struct drm_info_node *) m->private; 327 struct drm_info_node *node = (struct drm_info_node *) m->private;
327 struct drm_device *dev = node->minor->dev; 328 struct drm_device *dev = node->minor->dev;
328 unsigned long flags; 329 unsigned long flags;
329 struct intel_crtc *crtc; 330 struct intel_crtc *crtc;
330 331
331 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { 332 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
332 const char pipe = pipe_name(crtc->pipe); 333 const char pipe = pipe_name(crtc->pipe);
333 const char plane = plane_name(crtc->plane); 334 const char plane = plane_name(crtc->plane);
334 struct intel_unpin_work *work; 335 struct intel_unpin_work *work;
335 336
336 spin_lock_irqsave(&dev->event_lock, flags); 337 spin_lock_irqsave(&dev->event_lock, flags);
337 work = crtc->unpin_work; 338 work = crtc->unpin_work;
338 if (work == NULL) { 339 if (work == NULL) {
339 seq_printf(m, "No flip due on pipe %c (plane %c)\n", 340 seq_printf(m, "No flip due on pipe %c (plane %c)\n",
340 pipe, plane); 341 pipe, plane);
341 } else { 342 } else {
342 if (!work->pending) { 343 if (!work->pending) {
343 seq_printf(m, "Flip queued on pipe %c (plane %c)\n", 344 seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
344 pipe, plane); 345 pipe, plane);
345 } else { 346 } else {
346 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", 347 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
347 pipe, plane); 348 pipe, plane);
348 } 349 }
349 if (work->enable_stall_check) 350 if (work->enable_stall_check)
350 seq_printf(m, "Stall check enabled, "); 351 seq_printf(m, "Stall check enabled, ");
351 else 352 else
352 seq_printf(m, "Stall check waiting for page flip ioctl, "); 353 seq_printf(m, "Stall check waiting for page flip ioctl, ");
353 seq_printf(m, "%d prepares\n", work->pending); 354 seq_printf(m, "%d prepares\n", work->pending);
354 355
355 if (work->old_fb_obj) { 356 if (work->old_fb_obj) {
356 struct drm_i915_gem_object *obj = work->old_fb_obj; 357 struct drm_i915_gem_object *obj = work->old_fb_obj;
357 if (obj) 358 if (obj)
358 seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); 359 seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
359 } 360 }
360 if (work->pending_flip_obj) { 361 if (work->pending_flip_obj) {
361 struct drm_i915_gem_object *obj = work->pending_flip_obj; 362 struct drm_i915_gem_object *obj = work->pending_flip_obj;
362 if (obj) 363 if (obj)
363 seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); 364 seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
364 } 365 }
365 } 366 }
366 spin_unlock_irqrestore(&dev->event_lock, flags); 367 spin_unlock_irqrestore(&dev->event_lock, flags);
367 } 368 }
368 369
369 return 0; 370 return 0;
370 } 371 }
371 372
372 static int i915_gem_request_info(struct seq_file *m, void *data) 373 static int i915_gem_request_info(struct seq_file *m, void *data)
373 { 374 {
374 struct drm_info_node *node = (struct drm_info_node *) m->private; 375 struct drm_info_node *node = (struct drm_info_node *) m->private;
375 struct drm_device *dev = node->minor->dev; 376 struct drm_device *dev = node->minor->dev;
376 drm_i915_private_t *dev_priv = dev->dev_private; 377 drm_i915_private_t *dev_priv = dev->dev_private;
377 struct drm_i915_gem_request *gem_request; 378 struct drm_i915_gem_request *gem_request;
378 int ret, count; 379 int ret, count;
379 380
380 ret = mutex_lock_interruptible(&dev->struct_mutex); 381 ret = mutex_lock_interruptible(&dev->struct_mutex);
381 if (ret) 382 if (ret)
382 return ret; 383 return ret;
383 384
384 count = 0; 385 count = 0;
385 if (!list_empty(&dev_priv->ring[RCS].request_list)) { 386 if (!list_empty(&dev_priv->ring[RCS].request_list)) {
386 seq_printf(m, "Render requests:\n"); 387 seq_printf(m, "Render requests:\n");
387 list_for_each_entry(gem_request, 388 list_for_each_entry(gem_request,
388 &dev_priv->ring[RCS].request_list, 389 &dev_priv->ring[RCS].request_list,
389 list) { 390 list) {
390 seq_printf(m, " %d @ %d\n", 391 seq_printf(m, " %d @ %d\n",
391 gem_request->seqno, 392 gem_request->seqno,
392 (int) (jiffies - gem_request->emitted_jiffies)); 393 (int) (jiffies - gem_request->emitted_jiffies));
393 } 394 }
394 count++; 395 count++;
395 } 396 }
396 if (!list_empty(&dev_priv->ring[VCS].request_list)) { 397 if (!list_empty(&dev_priv->ring[VCS].request_list)) {
397 seq_printf(m, "BSD requests:\n"); 398 seq_printf(m, "BSD requests:\n");
398 list_for_each_entry(gem_request, 399 list_for_each_entry(gem_request,
399 &dev_priv->ring[VCS].request_list, 400 &dev_priv->ring[VCS].request_list,
400 list) { 401 list) {
401 seq_printf(m, " %d @ %d\n", 402 seq_printf(m, " %d @ %d\n",
402 gem_request->seqno, 403 gem_request->seqno,
403 (int) (jiffies - gem_request->emitted_jiffies)); 404 (int) (jiffies - gem_request->emitted_jiffies));
404 } 405 }
405 count++; 406 count++;
406 } 407 }
407 if (!list_empty(&dev_priv->ring[BCS].request_list)) { 408 if (!list_empty(&dev_priv->ring[BCS].request_list)) {
408 seq_printf(m, "BLT requests:\n"); 409 seq_printf(m, "BLT requests:\n");
409 list_for_each_entry(gem_request, 410 list_for_each_entry(gem_request,
410 &dev_priv->ring[BCS].request_list, 411 &dev_priv->ring[BCS].request_list,
411 list) { 412 list) {
412 seq_printf(m, " %d @ %d\n", 413 seq_printf(m, " %d @ %d\n",
413 gem_request->seqno, 414 gem_request->seqno,
414 (int) (jiffies - gem_request->emitted_jiffies)); 415 (int) (jiffies - gem_request->emitted_jiffies));
415 } 416 }
416 count++; 417 count++;
417 } 418 }
418 mutex_unlock(&dev->struct_mutex); 419 mutex_unlock(&dev->struct_mutex);
419 420
420 if (count == 0) 421 if (count == 0)
421 seq_printf(m, "No requests\n"); 422 seq_printf(m, "No requests\n");
422 423
423 return 0; 424 return 0;
424 } 425 }
425 426
426 static void i915_ring_seqno_info(struct seq_file *m, 427 static void i915_ring_seqno_info(struct seq_file *m,
427 struct intel_ring_buffer *ring) 428 struct intel_ring_buffer *ring)
428 { 429 {
429 if (ring->get_seqno) { 430 if (ring->get_seqno) {
430 seq_printf(m, "Current sequence (%s): %d\n", 431 seq_printf(m, "Current sequence (%s): %d\n",
431 ring->name, ring->get_seqno(ring)); 432 ring->name, ring->get_seqno(ring));
432 seq_printf(m, "Waiter sequence (%s): %d\n", 433 seq_printf(m, "Waiter sequence (%s): %d\n",
433 ring->name, ring->waiting_seqno); 434 ring->name, ring->waiting_seqno);
434 seq_printf(m, "IRQ sequence (%s): %d\n", 435 seq_printf(m, "IRQ sequence (%s): %d\n",
435 ring->name, ring->irq_seqno); 436 ring->name, ring->irq_seqno);
436 } 437 }
437 } 438 }
438 439
439 static int i915_gem_seqno_info(struct seq_file *m, void *data) 440 static int i915_gem_seqno_info(struct seq_file *m, void *data)
440 { 441 {
441 struct drm_info_node *node = (struct drm_info_node *) m->private; 442 struct drm_info_node *node = (struct drm_info_node *) m->private;
442 struct drm_device *dev = node->minor->dev; 443 struct drm_device *dev = node->minor->dev;
443 drm_i915_private_t *dev_priv = dev->dev_private; 444 drm_i915_private_t *dev_priv = dev->dev_private;
444 int ret, i; 445 int ret, i;
445 446
446 ret = mutex_lock_interruptible(&dev->struct_mutex); 447 ret = mutex_lock_interruptible(&dev->struct_mutex);
447 if (ret) 448 if (ret)
448 return ret; 449 return ret;
449 450
450 for (i = 0; i < I915_NUM_RINGS; i++) 451 for (i = 0; i < I915_NUM_RINGS; i++)
451 i915_ring_seqno_info(m, &dev_priv->ring[i]); 452 i915_ring_seqno_info(m, &dev_priv->ring[i]);
452 453
453 mutex_unlock(&dev->struct_mutex); 454 mutex_unlock(&dev->struct_mutex);
454 455
455 return 0; 456 return 0;
456 } 457 }
457 458
458 459
459 static int i915_interrupt_info(struct seq_file *m, void *data) 460 static int i915_interrupt_info(struct seq_file *m, void *data)
460 { 461 {
461 struct drm_info_node *node = (struct drm_info_node *) m->private; 462 struct drm_info_node *node = (struct drm_info_node *) m->private;
462 struct drm_device *dev = node->minor->dev; 463 struct drm_device *dev = node->minor->dev;
463 drm_i915_private_t *dev_priv = dev->dev_private; 464 drm_i915_private_t *dev_priv = dev->dev_private;
464 int ret, i, pipe; 465 int ret, i, pipe;
465 466
466 ret = mutex_lock_interruptible(&dev->struct_mutex); 467 ret = mutex_lock_interruptible(&dev->struct_mutex);
467 if (ret) 468 if (ret)
468 return ret; 469 return ret;
469 470
470 if (!HAS_PCH_SPLIT(dev)) { 471 if (!HAS_PCH_SPLIT(dev)) {
471 seq_printf(m, "Interrupt enable: %08x\n", 472 seq_printf(m, "Interrupt enable: %08x\n",
472 I915_READ(IER)); 473 I915_READ(IER));
473 seq_printf(m, "Interrupt identity: %08x\n", 474 seq_printf(m, "Interrupt identity: %08x\n",
474 I915_READ(IIR)); 475 I915_READ(IIR));
475 seq_printf(m, "Interrupt mask: %08x\n", 476 seq_printf(m, "Interrupt mask: %08x\n",
476 I915_READ(IMR)); 477 I915_READ(IMR));
477 for_each_pipe(pipe) 478 for_each_pipe(pipe)
478 seq_printf(m, "Pipe %c stat: %08x\n", 479 seq_printf(m, "Pipe %c stat: %08x\n",
479 pipe_name(pipe), 480 pipe_name(pipe),
480 I915_READ(PIPESTAT(pipe))); 481 I915_READ(PIPESTAT(pipe)));
481 } else { 482 } else {
482 seq_printf(m, "North Display Interrupt enable: %08x\n", 483 seq_printf(m, "North Display Interrupt enable: %08x\n",
483 I915_READ(DEIER)); 484 I915_READ(DEIER));
484 seq_printf(m, "North Display Interrupt identity: %08x\n", 485 seq_printf(m, "North Display Interrupt identity: %08x\n",
485 I915_READ(DEIIR)); 486 I915_READ(DEIIR));
486 seq_printf(m, "North Display Interrupt mask: %08x\n", 487 seq_printf(m, "North Display Interrupt mask: %08x\n",
487 I915_READ(DEIMR)); 488 I915_READ(DEIMR));
488 seq_printf(m, "South Display Interrupt enable: %08x\n", 489 seq_printf(m, "South Display Interrupt enable: %08x\n",
489 I915_READ(SDEIER)); 490 I915_READ(SDEIER));
490 seq_printf(m, "South Display Interrupt identity: %08x\n", 491 seq_printf(m, "South Display Interrupt identity: %08x\n",
491 I915_READ(SDEIIR)); 492 I915_READ(SDEIIR));
492 seq_printf(m, "South Display Interrupt mask: %08x\n", 493 seq_printf(m, "South Display Interrupt mask: %08x\n",
493 I915_READ(SDEIMR)); 494 I915_READ(SDEIMR));
494 seq_printf(m, "Graphics Interrupt enable: %08x\n", 495 seq_printf(m, "Graphics Interrupt enable: %08x\n",
495 I915_READ(GTIER)); 496 I915_READ(GTIER));
496 seq_printf(m, "Graphics Interrupt identity: %08x\n", 497 seq_printf(m, "Graphics Interrupt identity: %08x\n",
497 I915_READ(GTIIR)); 498 I915_READ(GTIIR));
498 seq_printf(m, "Graphics Interrupt mask: %08x\n", 499 seq_printf(m, "Graphics Interrupt mask: %08x\n",
499 I915_READ(GTIMR)); 500 I915_READ(GTIMR));
500 } 501 }
501 seq_printf(m, "Interrupts received: %d\n", 502 seq_printf(m, "Interrupts received: %d\n",
502 atomic_read(&dev_priv->irq_received)); 503 atomic_read(&dev_priv->irq_received));
503 for (i = 0; i < I915_NUM_RINGS; i++) { 504 for (i = 0; i < I915_NUM_RINGS; i++) {
504 if (IS_GEN6(dev) || IS_GEN7(dev)) { 505 if (IS_GEN6(dev) || IS_GEN7(dev)) {
505 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n", 506 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
506 dev_priv->ring[i].name, 507 dev_priv->ring[i].name,
507 I915_READ_IMR(&dev_priv->ring[i])); 508 I915_READ_IMR(&dev_priv->ring[i]));
508 } 509 }
509 i915_ring_seqno_info(m, &dev_priv->ring[i]); 510 i915_ring_seqno_info(m, &dev_priv->ring[i]);
510 } 511 }
511 mutex_unlock(&dev->struct_mutex); 512 mutex_unlock(&dev->struct_mutex);
512 513
513 return 0; 514 return 0;
514 } 515 }
515 516
516 static int i915_gem_fence_regs_info(struct seq_file *m, void *data) 517 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
517 { 518 {
518 struct drm_info_node *node = (struct drm_info_node *) m->private; 519 struct drm_info_node *node = (struct drm_info_node *) m->private;
519 struct drm_device *dev = node->minor->dev; 520 struct drm_device *dev = node->minor->dev;
520 drm_i915_private_t *dev_priv = dev->dev_private; 521 drm_i915_private_t *dev_priv = dev->dev_private;
521 int i, ret; 522 int i, ret;
522 523
523 ret = mutex_lock_interruptible(&dev->struct_mutex); 524 ret = mutex_lock_interruptible(&dev->struct_mutex);
524 if (ret) 525 if (ret)
525 return ret; 526 return ret;
526 527
527 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); 528 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
528 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); 529 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
529 for (i = 0; i < dev_priv->num_fence_regs; i++) { 530 for (i = 0; i < dev_priv->num_fence_regs; i++) {
530 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; 531 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
531 532
532 seq_printf(m, "Fenced object[%2d] = ", i); 533 seq_printf(m, "Fenced object[%2d] = ", i);
533 if (obj == NULL) 534 if (obj == NULL)
534 seq_printf(m, "unused"); 535 seq_printf(m, "unused");
535 else 536 else
536 describe_obj(m, obj); 537 describe_obj(m, obj);
537 seq_printf(m, "\n"); 538 seq_printf(m, "\n");
538 } 539 }
539 540
540 mutex_unlock(&dev->struct_mutex); 541 mutex_unlock(&dev->struct_mutex);
541 return 0; 542 return 0;
542 } 543 }
543 544
544 static int i915_hws_info(struct seq_file *m, void *data) 545 static int i915_hws_info(struct seq_file *m, void *data)
545 { 546 {
546 struct drm_info_node *node = (struct drm_info_node *) m->private; 547 struct drm_info_node *node = (struct drm_info_node *) m->private;
547 struct drm_device *dev = node->minor->dev; 548 struct drm_device *dev = node->minor->dev;
548 drm_i915_private_t *dev_priv = dev->dev_private; 549 drm_i915_private_t *dev_priv = dev->dev_private;
549 struct intel_ring_buffer *ring; 550 struct intel_ring_buffer *ring;
550 const volatile u32 __iomem *hws; 551 const volatile u32 __iomem *hws;
551 int i; 552 int i;
552 553
553 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; 554 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
554 hws = (volatile u32 __iomem *)ring->status_page.page_addr; 555 hws = (volatile u32 __iomem *)ring->status_page.page_addr;
555 if (hws == NULL) 556 if (hws == NULL)
556 return 0; 557 return 0;
557 558
558 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) { 559 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
559 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", 560 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
560 i * 4, 561 i * 4,
561 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]); 562 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
562 } 563 }
563 return 0; 564 return 0;
564 } 565 }
565 566
566 static void i915_dump_object(struct seq_file *m, 567 static void i915_dump_object(struct seq_file *m,
567 struct io_mapping *mapping, 568 struct io_mapping *mapping,
568 struct drm_i915_gem_object *obj) 569 struct drm_i915_gem_object *obj)
569 { 570 {
570 int page, page_count, i; 571 int page, page_count, i;
571 572
572 page_count = obj->base.size / PAGE_SIZE; 573 page_count = obj->base.size / PAGE_SIZE;
573 for (page = 0; page < page_count; page++) { 574 for (page = 0; page < page_count; page++) {
574 u32 *mem = io_mapping_map_wc(mapping, 575 u32 *mem = io_mapping_map_wc(mapping,
575 obj->gtt_offset + page * PAGE_SIZE); 576 obj->gtt_offset + page * PAGE_SIZE);
576 for (i = 0; i < PAGE_SIZE; i += 4) 577 for (i = 0; i < PAGE_SIZE; i += 4)
577 seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); 578 seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
578 io_mapping_unmap(mem); 579 io_mapping_unmap(mem);
579 } 580 }
580 } 581 }
581 582
582 static int i915_batchbuffer_info(struct seq_file *m, void *data) 583 static int i915_batchbuffer_info(struct seq_file *m, void *data)
583 { 584 {
584 struct drm_info_node *node = (struct drm_info_node *) m->private; 585 struct drm_info_node *node = (struct drm_info_node *) m->private;
585 struct drm_device *dev = node->minor->dev; 586 struct drm_device *dev = node->minor->dev;
586 drm_i915_private_t *dev_priv = dev->dev_private; 587 drm_i915_private_t *dev_priv = dev->dev_private;
587 struct drm_i915_gem_object *obj; 588 struct drm_i915_gem_object *obj;
588 int ret; 589 int ret;
589 590
590 ret = mutex_lock_interruptible(&dev->struct_mutex); 591 ret = mutex_lock_interruptible(&dev->struct_mutex);
591 if (ret) 592 if (ret)
592 return ret; 593 return ret;
593 594
594 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { 595 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
595 if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) { 596 if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) {
596 seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset); 597 seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
597 i915_dump_object(m, dev_priv->mm.gtt_mapping, obj); 598 i915_dump_object(m, dev_priv->mm.gtt_mapping, obj);
598 } 599 }
599 } 600 }
600 601
601 mutex_unlock(&dev->struct_mutex); 602 mutex_unlock(&dev->struct_mutex);
602 return 0; 603 return 0;
603 } 604 }
604 605
605 static int i915_ringbuffer_data(struct seq_file *m, void *data) 606 static int i915_ringbuffer_data(struct seq_file *m, void *data)
606 { 607 {
607 struct drm_info_node *node = (struct drm_info_node *) m->private; 608 struct drm_info_node *node = (struct drm_info_node *) m->private;
608 struct drm_device *dev = node->minor->dev; 609 struct drm_device *dev = node->minor->dev;
609 drm_i915_private_t *dev_priv = dev->dev_private; 610 drm_i915_private_t *dev_priv = dev->dev_private;
610 struct intel_ring_buffer *ring; 611 struct intel_ring_buffer *ring;
611 int ret; 612 int ret;
612 613
613 ret = mutex_lock_interruptible(&dev->struct_mutex); 614 ret = mutex_lock_interruptible(&dev->struct_mutex);
614 if (ret) 615 if (ret)
615 return ret; 616 return ret;
616 617
617 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; 618 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
618 if (!ring->obj) { 619 if (!ring->obj) {
619 seq_printf(m, "No ringbuffer setup\n"); 620 seq_printf(m, "No ringbuffer setup\n");
620 } else { 621 } else {
621 const u8 __iomem *virt = ring->virtual_start; 622 const u8 __iomem *virt = ring->virtual_start;
622 uint32_t off; 623 uint32_t off;
623 624
624 for (off = 0; off < ring->size; off += 4) { 625 for (off = 0; off < ring->size; off += 4) {
625 uint32_t *ptr = (uint32_t *)(virt + off); 626 uint32_t *ptr = (uint32_t *)(virt + off);
626 seq_printf(m, "%08x : %08x\n", off, *ptr); 627 seq_printf(m, "%08x : %08x\n", off, *ptr);
627 } 628 }
628 } 629 }
629 mutex_unlock(&dev->struct_mutex); 630 mutex_unlock(&dev->struct_mutex);
630 631
631 return 0; 632 return 0;
632 } 633 }
633 634
634 static int i915_ringbuffer_info(struct seq_file *m, void *data) 635 static int i915_ringbuffer_info(struct seq_file *m, void *data)
635 { 636 {
636 struct drm_info_node *node = (struct drm_info_node *) m->private; 637 struct drm_info_node *node = (struct drm_info_node *) m->private;
637 struct drm_device *dev = node->minor->dev; 638 struct drm_device *dev = node->minor->dev;
638 drm_i915_private_t *dev_priv = dev->dev_private; 639 drm_i915_private_t *dev_priv = dev->dev_private;
639 struct intel_ring_buffer *ring; 640 struct intel_ring_buffer *ring;
640 int ret; 641 int ret;
641 642
642 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; 643 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
643 if (ring->size == 0) 644 if (ring->size == 0)
644 return 0; 645 return 0;
645 646
646 ret = mutex_lock_interruptible(&dev->struct_mutex); 647 ret = mutex_lock_interruptible(&dev->struct_mutex);
647 if (ret) 648 if (ret)
648 return ret; 649 return ret;
649 650
650 seq_printf(m, "Ring %s:\n", ring->name); 651 seq_printf(m, "Ring %s:\n", ring->name);
651 seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR); 652 seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
652 seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR); 653 seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
653 seq_printf(m, " Size : %08x\n", ring->size); 654 seq_printf(m, " Size : %08x\n", ring->size);
654 seq_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring)); 655 seq_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring));
655 seq_printf(m, " NOPID : %08x\n", I915_READ_NOPID(ring)); 656 seq_printf(m, " NOPID : %08x\n", I915_READ_NOPID(ring));
656 if (IS_GEN6(dev)) { 657 if (IS_GEN6(dev)) {
657 seq_printf(m, " Sync 0 : %08x\n", I915_READ_SYNC_0(ring)); 658 seq_printf(m, " Sync 0 : %08x\n", I915_READ_SYNC_0(ring));
658 seq_printf(m, " Sync 1 : %08x\n", I915_READ_SYNC_1(ring)); 659 seq_printf(m, " Sync 1 : %08x\n", I915_READ_SYNC_1(ring));
659 } 660 }
660 seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring)); 661 seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring));
661 seq_printf(m, " Start : %08x\n", I915_READ_START(ring)); 662 seq_printf(m, " Start : %08x\n", I915_READ_START(ring));
662 663
663 mutex_unlock(&dev->struct_mutex); 664 mutex_unlock(&dev->struct_mutex);
664 665
665 return 0; 666 return 0;
666 } 667 }
667 668
668 static const char *ring_str(int ring) 669 static const char *ring_str(int ring)
669 { 670 {
670 switch (ring) { 671 switch (ring) {
671 case RING_RENDER: return " render"; 672 case RING_RENDER: return " render";
672 case RING_BSD: return " bsd"; 673 case RING_BSD: return " bsd";
673 case RING_BLT: return " blt"; 674 case RING_BLT: return " blt";
674 default: return ""; 675 default: return "";
675 } 676 }
676 } 677 }
677 678
678 static const char *pin_flag(int pinned) 679 static const char *pin_flag(int pinned)
679 { 680 {
680 if (pinned > 0) 681 if (pinned > 0)
681 return " P"; 682 return " P";
682 else if (pinned < 0) 683 else if (pinned < 0)
683 return " p"; 684 return " p";
684 else 685 else
685 return ""; 686 return "";
686 } 687 }
687 688
688 static const char *tiling_flag(int tiling) 689 static const char *tiling_flag(int tiling)
689 { 690 {
690 switch (tiling) { 691 switch (tiling) {
691 default: 692 default:
692 case I915_TILING_NONE: return ""; 693 case I915_TILING_NONE: return "";
693 case I915_TILING_X: return " X"; 694 case I915_TILING_X: return " X";
694 case I915_TILING_Y: return " Y"; 695 case I915_TILING_Y: return " Y";
695 } 696 }
696 } 697 }
697 698
698 static const char *dirty_flag(int dirty) 699 static const char *dirty_flag(int dirty)
699 { 700 {
700 return dirty ? " dirty" : ""; 701 return dirty ? " dirty" : "";
701 } 702 }
702 703
703 static const char *purgeable_flag(int purgeable) 704 static const char *purgeable_flag(int purgeable)
704 { 705 {
705 return purgeable ? " purgeable" : ""; 706 return purgeable ? " purgeable" : "";
706 } 707 }
707 708
708 static void print_error_buffers(struct seq_file *m, 709 static void print_error_buffers(struct seq_file *m,
709 const char *name, 710 const char *name,
710 struct drm_i915_error_buffer *err, 711 struct drm_i915_error_buffer *err,
711 int count) 712 int count)
712 { 713 {
713 seq_printf(m, "%s [%d]:\n", name, count); 714 seq_printf(m, "%s [%d]:\n", name, count);
714 715
715 while (count--) { 716 while (count--) {
716 seq_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s", 717 seq_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s",
717 err->gtt_offset, 718 err->gtt_offset,
718 err->size, 719 err->size,
719 err->read_domains, 720 err->read_domains,
720 err->write_domain, 721 err->write_domain,
721 err->seqno, 722 err->seqno,
722 pin_flag(err->pinned), 723 pin_flag(err->pinned),
723 tiling_flag(err->tiling), 724 tiling_flag(err->tiling),
724 dirty_flag(err->dirty), 725 dirty_flag(err->dirty),
725 purgeable_flag(err->purgeable), 726 purgeable_flag(err->purgeable),
726 ring_str(err->ring), 727 ring_str(err->ring),
727 cache_level_str(err->cache_level)); 728 cache_level_str(err->cache_level));
728 729
729 if (err->name) 730 if (err->name)
730 seq_printf(m, " (name: %d)", err->name); 731 seq_printf(m, " (name: %d)", err->name);
731 if (err->fence_reg != I915_FENCE_REG_NONE) 732 if (err->fence_reg != I915_FENCE_REG_NONE)
732 seq_printf(m, " (fence: %d)", err->fence_reg); 733 seq_printf(m, " (fence: %d)", err->fence_reg);
733 734
734 seq_printf(m, "\n"); 735 seq_printf(m, "\n");
735 err++; 736 err++;
736 } 737 }
737 } 738 }
738 739
739 static int i915_error_state(struct seq_file *m, void *unused) 740 static int i915_error_state(struct seq_file *m, void *unused)
740 { 741 {
741 struct drm_info_node *node = (struct drm_info_node *) m->private; 742 struct drm_info_node *node = (struct drm_info_node *) m->private;
742 struct drm_device *dev = node->minor->dev; 743 struct drm_device *dev = node->minor->dev;
743 drm_i915_private_t *dev_priv = dev->dev_private; 744 drm_i915_private_t *dev_priv = dev->dev_private;
744 struct drm_i915_error_state *error; 745 struct drm_i915_error_state *error;
745 unsigned long flags; 746 unsigned long flags;
746 int i, page, offset, elt; 747 int i, page, offset, elt;
747 748
748 spin_lock_irqsave(&dev_priv->error_lock, flags); 749 spin_lock_irqsave(&dev_priv->error_lock, flags);
749 if (!dev_priv->first_error) { 750 if (!dev_priv->first_error) {
750 seq_printf(m, "no error state collected\n"); 751 seq_printf(m, "no error state collected\n");
751 goto out; 752 goto out;
752 } 753 }
753 754
754 error = dev_priv->first_error; 755 error = dev_priv->first_error;
755 756
756 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, 757 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
757 error->time.tv_usec); 758 error->time.tv_usec);
758 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); 759 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
759 seq_printf(m, "EIR: 0x%08x\n", error->eir); 760 seq_printf(m, "EIR: 0x%08x\n", error->eir);
760 seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); 761 seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
761 if (INTEL_INFO(dev)->gen >= 6) { 762 if (INTEL_INFO(dev)->gen >= 6) {
762 seq_printf(m, "ERROR: 0x%08x\n", error->error); 763 seq_printf(m, "ERROR: 0x%08x\n", error->error);
763 seq_printf(m, "Blitter command stream:\n"); 764 seq_printf(m, "Blitter command stream:\n");
764 seq_printf(m, " ACTHD: 0x%08x\n", error->bcs_acthd); 765 seq_printf(m, " ACTHD: 0x%08x\n", error->bcs_acthd);
765 seq_printf(m, " IPEIR: 0x%08x\n", error->bcs_ipeir); 766 seq_printf(m, " IPEIR: 0x%08x\n", error->bcs_ipeir);
766 seq_printf(m, " IPEHR: 0x%08x\n", error->bcs_ipehr); 767 seq_printf(m, " IPEHR: 0x%08x\n", error->bcs_ipehr);
767 seq_printf(m, " INSTDONE: 0x%08x\n", error->bcs_instdone); 768 seq_printf(m, " INSTDONE: 0x%08x\n", error->bcs_instdone);
768 seq_printf(m, " seqno: 0x%08x\n", error->bcs_seqno); 769 seq_printf(m, " seqno: 0x%08x\n", error->bcs_seqno);
769 seq_printf(m, "Video (BSD) command stream:\n"); 770 seq_printf(m, "Video (BSD) command stream:\n");
770 seq_printf(m, " ACTHD: 0x%08x\n", error->vcs_acthd); 771 seq_printf(m, " ACTHD: 0x%08x\n", error->vcs_acthd);
771 seq_printf(m, " IPEIR: 0x%08x\n", error->vcs_ipeir); 772 seq_printf(m, " IPEIR: 0x%08x\n", error->vcs_ipeir);
772 seq_printf(m, " IPEHR: 0x%08x\n", error->vcs_ipehr); 773 seq_printf(m, " IPEHR: 0x%08x\n", error->vcs_ipehr);
773 seq_printf(m, " INSTDONE: 0x%08x\n", error->vcs_instdone); 774 seq_printf(m, " INSTDONE: 0x%08x\n", error->vcs_instdone);
774 seq_printf(m, " seqno: 0x%08x\n", error->vcs_seqno); 775 seq_printf(m, " seqno: 0x%08x\n", error->vcs_seqno);
775 } 776 }
776 seq_printf(m, "Render command stream:\n"); 777 seq_printf(m, "Render command stream:\n");
777 seq_printf(m, " ACTHD: 0x%08x\n", error->acthd); 778 seq_printf(m, " ACTHD: 0x%08x\n", error->acthd);
778 seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir); 779 seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir);
779 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr); 780 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr);
780 seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone); 781 seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone);
781 if (INTEL_INFO(dev)->gen >= 4) { 782 if (INTEL_INFO(dev)->gen >= 4) {
782 seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); 783 seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
783 seq_printf(m, " INSTPS: 0x%08x\n", error->instps); 784 seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
784 } 785 }
785 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); 786 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
786 seq_printf(m, " seqno: 0x%08x\n", error->seqno); 787 seq_printf(m, " seqno: 0x%08x\n", error->seqno);
787 788
788 for (i = 0; i < dev_priv->num_fence_regs; i++) 789 for (i = 0; i < dev_priv->num_fence_regs; i++)
789 seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); 790 seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
790 791
791 if (error->active_bo) 792 if (error->active_bo)
792 print_error_buffers(m, "Active", 793 print_error_buffers(m, "Active",
793 error->active_bo, 794 error->active_bo,
794 error->active_bo_count); 795 error->active_bo_count);
795 796
796 if (error->pinned_bo) 797 if (error->pinned_bo)
797 print_error_buffers(m, "Pinned", 798 print_error_buffers(m, "Pinned",
798 error->pinned_bo, 799 error->pinned_bo,
799 error->pinned_bo_count); 800 error->pinned_bo_count);
800 801
801 for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) { 802 for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) {
802 if (error->batchbuffer[i]) { 803 if (error->batchbuffer[i]) {
803 struct drm_i915_error_object *obj = error->batchbuffer[i]; 804 struct drm_i915_error_object *obj = error->batchbuffer[i];
804 805
805 seq_printf(m, "%s --- gtt_offset = 0x%08x\n", 806 seq_printf(m, "%s --- gtt_offset = 0x%08x\n",
806 dev_priv->ring[i].name, 807 dev_priv->ring[i].name,
807 obj->gtt_offset); 808 obj->gtt_offset);
808 offset = 0; 809 offset = 0;
809 for (page = 0; page < obj->page_count; page++) { 810 for (page = 0; page < obj->page_count; page++) {
810 for (elt = 0; elt < PAGE_SIZE/4; elt++) { 811 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
811 seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]); 812 seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]);
812 offset += 4; 813 offset += 4;
813 } 814 }
814 } 815 }
815 } 816 }
816 } 817 }
817 818
818 for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++) { 819 for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++) {
819 if (error->ringbuffer[i]) { 820 if (error->ringbuffer[i]) {
820 struct drm_i915_error_object *obj = error->ringbuffer[i]; 821 struct drm_i915_error_object *obj = error->ringbuffer[i];
821 seq_printf(m, "%s --- ringbuffer = 0x%08x\n", 822 seq_printf(m, "%s --- ringbuffer = 0x%08x\n",
822 dev_priv->ring[i].name, 823 dev_priv->ring[i].name,
823 obj->gtt_offset); 824 obj->gtt_offset);
824 offset = 0; 825 offset = 0;
825 for (page = 0; page < obj->page_count; page++) { 826 for (page = 0; page < obj->page_count; page++) {
826 for (elt = 0; elt < PAGE_SIZE/4; elt++) { 827 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
827 seq_printf(m, "%08x : %08x\n", 828 seq_printf(m, "%08x : %08x\n",
828 offset, 829 offset,
829 obj->pages[page][elt]); 830 obj->pages[page][elt]);
830 offset += 4; 831 offset += 4;
831 } 832 }
832 } 833 }
833 } 834 }
834 } 835 }
835 836
836 if (error->overlay) 837 if (error->overlay)
837 intel_overlay_print_error_state(m, error->overlay); 838 intel_overlay_print_error_state(m, error->overlay);
838 839
839 if (error->display) 840 if (error->display)
840 intel_display_print_error_state(m, dev, error->display); 841 intel_display_print_error_state(m, dev, error->display);
841 842
842 out: 843 out:
843 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 844 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
844 845
845 return 0; 846 return 0;
846 } 847 }
847 848
848 static int i915_rstdby_delays(struct seq_file *m, void *unused) 849 static int i915_rstdby_delays(struct seq_file *m, void *unused)
849 { 850 {
850 struct drm_info_node *node = (struct drm_info_node *) m->private; 851 struct drm_info_node *node = (struct drm_info_node *) m->private;
851 struct drm_device *dev = node->minor->dev; 852 struct drm_device *dev = node->minor->dev;
852 drm_i915_private_t *dev_priv = dev->dev_private; 853 drm_i915_private_t *dev_priv = dev->dev_private;
853 u16 crstanddelay; 854 u16 crstanddelay;
854 int ret; 855 int ret;
855 856
856 ret = mutex_lock_interruptible(&dev->struct_mutex); 857 ret = mutex_lock_interruptible(&dev->struct_mutex);
857 if (ret) 858 if (ret)
858 return ret; 859 return ret;
859 860
860 crstanddelay = I915_READ16(CRSTANDVID); 861 crstanddelay = I915_READ16(CRSTANDVID);
861 862
862 mutex_unlock(&dev->struct_mutex); 863 mutex_unlock(&dev->struct_mutex);
863 864
864 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f)); 865 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
865 866
866 return 0; 867 return 0;
867 } 868 }
868 869
869 static int i915_cur_delayinfo(struct seq_file *m, void *unused) 870 static int i915_cur_delayinfo(struct seq_file *m, void *unused)
870 { 871 {
871 struct drm_info_node *node = (struct drm_info_node *) m->private; 872 struct drm_info_node *node = (struct drm_info_node *) m->private;
872 struct drm_device *dev = node->minor->dev; 873 struct drm_device *dev = node->minor->dev;
873 drm_i915_private_t *dev_priv = dev->dev_private; 874 drm_i915_private_t *dev_priv = dev->dev_private;
874 int ret; 875 int ret;
875 876
876 if (IS_GEN5(dev)) { 877 if (IS_GEN5(dev)) {
877 u16 rgvswctl = I915_READ16(MEMSWCTL); 878 u16 rgvswctl = I915_READ16(MEMSWCTL);
878 u16 rgvstat = I915_READ16(MEMSTAT_ILK); 879 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
879 880
880 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); 881 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
881 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); 882 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
882 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> 883 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
883 MEMSTAT_VID_SHIFT); 884 MEMSTAT_VID_SHIFT);
884 seq_printf(m, "Current P-state: %d\n", 885 seq_printf(m, "Current P-state: %d\n",
885 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 886 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
886 } else if (IS_GEN6(dev) || IS_GEN7(dev)) { 887 } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
887 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 888 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
888 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 889 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
889 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 890 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
890 u32 rpstat; 891 u32 rpstat;
891 u32 rpupei, rpcurup, rpprevup; 892 u32 rpupei, rpcurup, rpprevup;
892 u32 rpdownei, rpcurdown, rpprevdown; 893 u32 rpdownei, rpcurdown, rpprevdown;
893 int max_freq; 894 int max_freq;
894 895
895 /* RPSTAT1 is in the GT power well */ 896 /* RPSTAT1 is in the GT power well */
896 ret = mutex_lock_interruptible(&dev->struct_mutex); 897 ret = mutex_lock_interruptible(&dev->struct_mutex);
897 if (ret) 898 if (ret)
898 return ret; 899 return ret;
899 900
900 gen6_gt_force_wake_get(dev_priv); 901 gen6_gt_force_wake_get(dev_priv);
901 902
902 rpstat = I915_READ(GEN6_RPSTAT1); 903 rpstat = I915_READ(GEN6_RPSTAT1);
903 rpupei = I915_READ(GEN6_RP_CUR_UP_EI); 904 rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
904 rpcurup = I915_READ(GEN6_RP_CUR_UP); 905 rpcurup = I915_READ(GEN6_RP_CUR_UP);
905 rpprevup = I915_READ(GEN6_RP_PREV_UP); 906 rpprevup = I915_READ(GEN6_RP_PREV_UP);
906 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); 907 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
907 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); 908 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
908 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); 909 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
909 910
910 gen6_gt_force_wake_put(dev_priv); 911 gen6_gt_force_wake_put(dev_priv);
911 mutex_unlock(&dev->struct_mutex); 912 mutex_unlock(&dev->struct_mutex);
912 913
913 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 914 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
914 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat); 915 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
915 seq_printf(m, "Render p-state ratio: %d\n", 916 seq_printf(m, "Render p-state ratio: %d\n",
916 (gt_perf_status & 0xff00) >> 8); 917 (gt_perf_status & 0xff00) >> 8);
917 seq_printf(m, "Render p-state VID: %d\n", 918 seq_printf(m, "Render p-state VID: %d\n",
918 gt_perf_status & 0xff); 919 gt_perf_status & 0xff);
919 seq_printf(m, "Render p-state limit: %d\n", 920 seq_printf(m, "Render p-state limit: %d\n",
920 rp_state_limits & 0xff); 921 rp_state_limits & 0xff);
921 seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >> 922 seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >>
922 GEN6_CAGF_SHIFT) * 50); 923 GEN6_CAGF_SHIFT) * 50);
923 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & 924 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
924 GEN6_CURICONT_MASK); 925 GEN6_CURICONT_MASK);
925 seq_printf(m, "RP CUR UP: %dus\n", rpcurup & 926 seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
926 GEN6_CURBSYTAVG_MASK); 927 GEN6_CURBSYTAVG_MASK);
927 seq_printf(m, "RP PREV UP: %dus\n", rpprevup & 928 seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
928 GEN6_CURBSYTAVG_MASK); 929 GEN6_CURBSYTAVG_MASK);
929 seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei & 930 seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
930 GEN6_CURIAVG_MASK); 931 GEN6_CURIAVG_MASK);
931 seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown & 932 seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
932 GEN6_CURBSYTAVG_MASK); 933 GEN6_CURBSYTAVG_MASK);
933 seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown & 934 seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
934 GEN6_CURBSYTAVG_MASK); 935 GEN6_CURBSYTAVG_MASK);
935 936
936 max_freq = (rp_state_cap & 0xff0000) >> 16; 937 max_freq = (rp_state_cap & 0xff0000) >> 16;
937 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", 938 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
938 max_freq * 50); 939 max_freq * 50);
939 940
940 max_freq = (rp_state_cap & 0xff00) >> 8; 941 max_freq = (rp_state_cap & 0xff00) >> 8;
941 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", 942 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
942 max_freq * 50); 943 max_freq * 50);
943 944
944 max_freq = rp_state_cap & 0xff; 945 max_freq = rp_state_cap & 0xff;
945 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 946 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
946 max_freq * 50); 947 max_freq * 50);
947 } else { 948 } else {
948 seq_printf(m, "no P-state info available\n"); 949 seq_printf(m, "no P-state info available\n");
949 } 950 }
950 951
951 return 0; 952 return 0;
952 } 953 }
953 954
954 static int i915_delayfreq_table(struct seq_file *m, void *unused) 955 static int i915_delayfreq_table(struct seq_file *m, void *unused)
955 { 956 {
956 struct drm_info_node *node = (struct drm_info_node *) m->private; 957 struct drm_info_node *node = (struct drm_info_node *) m->private;
957 struct drm_device *dev = node->minor->dev; 958 struct drm_device *dev = node->minor->dev;
958 drm_i915_private_t *dev_priv = dev->dev_private; 959 drm_i915_private_t *dev_priv = dev->dev_private;
959 u32 delayfreq; 960 u32 delayfreq;
960 int ret, i; 961 int ret, i;
961 962
962 ret = mutex_lock_interruptible(&dev->struct_mutex); 963 ret = mutex_lock_interruptible(&dev->struct_mutex);
963 if (ret) 964 if (ret)
964 return ret; 965 return ret;
965 966
966 for (i = 0; i < 16; i++) { 967 for (i = 0; i < 16; i++) {
967 delayfreq = I915_READ(PXVFREQ_BASE + i * 4); 968 delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
968 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq, 969 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
969 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT); 970 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
970 } 971 }
971 972
972 mutex_unlock(&dev->struct_mutex); 973 mutex_unlock(&dev->struct_mutex);
973 974
974 return 0; 975 return 0;
975 } 976 }
976 977
977 static inline int MAP_TO_MV(int map) 978 static inline int MAP_TO_MV(int map)
978 { 979 {
979 return 1250 - (map * 25); 980 return 1250 - (map * 25);
980 } 981 }
981 982
982 static int i915_inttoext_table(struct seq_file *m, void *unused) 983 static int i915_inttoext_table(struct seq_file *m, void *unused)
983 { 984 {
984 struct drm_info_node *node = (struct drm_info_node *) m->private; 985 struct drm_info_node *node = (struct drm_info_node *) m->private;
985 struct drm_device *dev = node->minor->dev; 986 struct drm_device *dev = node->minor->dev;
986 drm_i915_private_t *dev_priv = dev->dev_private; 987 drm_i915_private_t *dev_priv = dev->dev_private;
987 u32 inttoext; 988 u32 inttoext;
988 int ret, i; 989 int ret, i;
989 990
990 ret = mutex_lock_interruptible(&dev->struct_mutex); 991 ret = mutex_lock_interruptible(&dev->struct_mutex);
991 if (ret) 992 if (ret)
992 return ret; 993 return ret;
993 994
994 for (i = 1; i <= 32; i++) { 995 for (i = 1; i <= 32; i++) {
995 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4); 996 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
996 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext); 997 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
997 } 998 }
998 999
999 mutex_unlock(&dev->struct_mutex); 1000 mutex_unlock(&dev->struct_mutex);
1000 1001
1001 return 0; 1002 return 0;
1002 } 1003 }
1003 1004
1004 static int ironlake_drpc_info(struct seq_file *m) 1005 static int ironlake_drpc_info(struct seq_file *m)
1005 { 1006 {
1006 struct drm_info_node *node = (struct drm_info_node *) m->private; 1007 struct drm_info_node *node = (struct drm_info_node *) m->private;
1007 struct drm_device *dev = node->minor->dev; 1008 struct drm_device *dev = node->minor->dev;
1008 drm_i915_private_t *dev_priv = dev->dev_private; 1009 drm_i915_private_t *dev_priv = dev->dev_private;
1009 u32 rgvmodectl, rstdbyctl; 1010 u32 rgvmodectl, rstdbyctl;
1010 u16 crstandvid; 1011 u16 crstandvid;
1011 int ret; 1012 int ret;
1012 1013
1013 ret = mutex_lock_interruptible(&dev->struct_mutex); 1014 ret = mutex_lock_interruptible(&dev->struct_mutex);
1014 if (ret) 1015 if (ret)
1015 return ret; 1016 return ret;
1016 1017
1017 rgvmodectl = I915_READ(MEMMODECTL); 1018 rgvmodectl = I915_READ(MEMMODECTL);
1018 rstdbyctl = I915_READ(RSTDBYCTL); 1019 rstdbyctl = I915_READ(RSTDBYCTL);
1019 crstandvid = I915_READ16(CRSTANDVID); 1020 crstandvid = I915_READ16(CRSTANDVID);
1020 1021
1021 mutex_unlock(&dev->struct_mutex); 1022 mutex_unlock(&dev->struct_mutex);
1022 1023
1023 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? 1024 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
1024 "yes" : "no"); 1025 "yes" : "no");
1025 seq_printf(m, "Boost freq: %d\n", 1026 seq_printf(m, "Boost freq: %d\n",
1026 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> 1027 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1027 MEMMODE_BOOST_FREQ_SHIFT); 1028 MEMMODE_BOOST_FREQ_SHIFT);
1028 seq_printf(m, "HW control enabled: %s\n", 1029 seq_printf(m, "HW control enabled: %s\n",
1029 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no"); 1030 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
1030 seq_printf(m, "SW control enabled: %s\n", 1031 seq_printf(m, "SW control enabled: %s\n",
1031 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no"); 1032 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
1032 seq_printf(m, "Gated voltage change: %s\n", 1033 seq_printf(m, "Gated voltage change: %s\n",
1033 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no"); 1034 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
1034 seq_printf(m, "Starting frequency: P%d\n", 1035 seq_printf(m, "Starting frequency: P%d\n",
1035 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); 1036 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1036 seq_printf(m, "Max P-state: P%d\n", 1037 seq_printf(m, "Max P-state: P%d\n",
1037 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); 1038 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1038 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); 1039 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1039 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); 1040 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1040 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); 1041 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1041 seq_printf(m, "Render standby enabled: %s\n", 1042 seq_printf(m, "Render standby enabled: %s\n",
1042 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); 1043 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
1043 seq_printf(m, "Current RS state: "); 1044 seq_printf(m, "Current RS state: ");
1044 switch (rstdbyctl & RSX_STATUS_MASK) { 1045 switch (rstdbyctl & RSX_STATUS_MASK) {
1045 case RSX_STATUS_ON: 1046 case RSX_STATUS_ON:
1046 seq_printf(m, "on\n"); 1047 seq_printf(m, "on\n");
1047 break; 1048 break;
1048 case RSX_STATUS_RC1: 1049 case RSX_STATUS_RC1:
1049 seq_printf(m, "RC1\n"); 1050 seq_printf(m, "RC1\n");
1050 break; 1051 break;
1051 case RSX_STATUS_RC1E: 1052 case RSX_STATUS_RC1E:
1052 seq_printf(m, "RC1E\n"); 1053 seq_printf(m, "RC1E\n");
1053 break; 1054 break;
1054 case RSX_STATUS_RS1: 1055 case RSX_STATUS_RS1:
1055 seq_printf(m, "RS1\n"); 1056 seq_printf(m, "RS1\n");
1056 break; 1057 break;
1057 case RSX_STATUS_RS2: 1058 case RSX_STATUS_RS2:
1058 seq_printf(m, "RS2 (RC6)\n"); 1059 seq_printf(m, "RS2 (RC6)\n");
1059 break; 1060 break;
1060 case RSX_STATUS_RS3: 1061 case RSX_STATUS_RS3:
1061 seq_printf(m, "RC3 (RC6+)\n"); 1062 seq_printf(m, "RC3 (RC6+)\n");
1062 break; 1063 break;
1063 default: 1064 default:
1064 seq_printf(m, "unknown\n"); 1065 seq_printf(m, "unknown\n");
1065 break; 1066 break;
1066 } 1067 }
1067 1068
1068 return 0; 1069 return 0;
1069 } 1070 }
1070 1071
1071 static int gen6_drpc_info(struct seq_file *m) 1072 static int gen6_drpc_info(struct seq_file *m)
1072 { 1073 {
1073 1074
1074 struct drm_info_node *node = (struct drm_info_node *) m->private; 1075 struct drm_info_node *node = (struct drm_info_node *) m->private;
1075 struct drm_device *dev = node->minor->dev; 1076 struct drm_device *dev = node->minor->dev;
1076 struct drm_i915_private *dev_priv = dev->dev_private; 1077 struct drm_i915_private *dev_priv = dev->dev_private;
1077 u32 rpmodectl1, gt_core_status, rcctl1; 1078 u32 rpmodectl1, gt_core_status, rcctl1;
1078 int count=0, ret; 1079 int count=0, ret;
1079 1080
1080 1081
1081 ret = mutex_lock_interruptible(&dev->struct_mutex); 1082 ret = mutex_lock_interruptible(&dev->struct_mutex);
1082 if (ret) 1083 if (ret)
1083 return ret; 1084 return ret;
1084 1085
1085 if (atomic_read(&dev_priv->forcewake_count)) { 1086 if (atomic_read(&dev_priv->forcewake_count)) {
1086 seq_printf(m, "RC information inaccurate because userspace " 1087 seq_printf(m, "RC information inaccurate because userspace "
1087 "holds a reference \n"); 1088 "holds a reference \n");
1088 } else { 1089 } else {
1089 /* NB: we cannot use forcewake, else we read the wrong values */ 1090 /* NB: we cannot use forcewake, else we read the wrong values */
1090 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) 1091 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
1091 udelay(10); 1092 udelay(10);
1092 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51)); 1093 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
1093 } 1094 }
1094 1095
1095 gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS); 1096 gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
1096 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4); 1097 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4);
1097 1098
1098 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1099 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1099 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1100 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1100 mutex_unlock(&dev->struct_mutex); 1101 mutex_unlock(&dev->struct_mutex);
1101 1102
1102 seq_printf(m, "Video Turbo Mode: %s\n", 1103 seq_printf(m, "Video Turbo Mode: %s\n",
1103 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); 1104 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1104 seq_printf(m, "HW control enabled: %s\n", 1105 seq_printf(m, "HW control enabled: %s\n",
1105 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1106 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1106 seq_printf(m, "SW control enabled: %s\n", 1107 seq_printf(m, "SW control enabled: %s\n",
1107 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) == 1108 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1108 GEN6_RP_MEDIA_SW_MODE)); 1109 GEN6_RP_MEDIA_SW_MODE));
1109 seq_printf(m, "RC6 Enabled: %s\n", 1110 seq_printf(m, "RC6 Enabled: %s\n",
1110 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); 1111 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1111 seq_printf(m, "RC6 Enabled: %s\n", 1112 seq_printf(m, "RC6 Enabled: %s\n",
1112 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE)); 1113 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1113 seq_printf(m, "Deep RC6 Enabled: %s\n", 1114 seq_printf(m, "Deep RC6 Enabled: %s\n",
1114 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE)); 1115 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1115 seq_printf(m, "Deepest RC6 Enabled: %s\n", 1116 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1116 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE)); 1117 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1117 seq_printf(m, "Current RC state: "); 1118 seq_printf(m, "Current RC state: ");
1118 switch (gt_core_status & GEN6_RCn_MASK) { 1119 switch (gt_core_status & GEN6_RCn_MASK) {
1119 case GEN6_RC0: 1120 case GEN6_RC0:
1120 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK) 1121 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1121 seq_printf(m, "Core Power Down\n"); 1122 seq_printf(m, "Core Power Down\n");
1122 else 1123 else
1123 seq_printf(m, "on\n"); 1124 seq_printf(m, "on\n");
1124 break; 1125 break;
1125 case GEN6_RC3: 1126 case GEN6_RC3:
1126 seq_printf(m, "RC3\n"); 1127 seq_printf(m, "RC3\n");
1127 break; 1128 break;
1128 case GEN6_RC6: 1129 case GEN6_RC6:
1129 seq_printf(m, "RC6\n"); 1130 seq_printf(m, "RC6\n");
1130 break; 1131 break;
1131 case GEN6_RC7: 1132 case GEN6_RC7:
1132 seq_printf(m, "RC7\n"); 1133 seq_printf(m, "RC7\n");
1133 break; 1134 break;
1134 default: 1135 default:
1135 seq_printf(m, "Unknown\n"); 1136 seq_printf(m, "Unknown\n");
1136 break; 1137 break;
1137 } 1138 }
1138 1139
1139 seq_printf(m, "Core Power Down: %s\n", 1140 seq_printf(m, "Core Power Down: %s\n",
1140 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK)); 1141 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1141 return 0; 1142 return 0;
1142 } 1143 }
1143 1144
1144 static int i915_drpc_info(struct seq_file *m, void *unused) 1145 static int i915_drpc_info(struct seq_file *m, void *unused)
1145 { 1146 {
1146 struct drm_info_node *node = (struct drm_info_node *) m->private; 1147 struct drm_info_node *node = (struct drm_info_node *) m->private;
1147 struct drm_device *dev = node->minor->dev; 1148 struct drm_device *dev = node->minor->dev;
1148 1149
1149 if (IS_GEN6(dev) || IS_GEN7(dev)) 1150 if (IS_GEN6(dev) || IS_GEN7(dev))
1150 return gen6_drpc_info(m); 1151 return gen6_drpc_info(m);
1151 else 1152 else
1152 return ironlake_drpc_info(m); 1153 return ironlake_drpc_info(m);
1153 } 1154 }
1154 1155
1155 static int i915_fbc_status(struct seq_file *m, void *unused) 1156 static int i915_fbc_status(struct seq_file *m, void *unused)
1156 { 1157 {
1157 struct drm_info_node *node = (struct drm_info_node *) m->private; 1158 struct drm_info_node *node = (struct drm_info_node *) m->private;
1158 struct drm_device *dev = node->minor->dev; 1159 struct drm_device *dev = node->minor->dev;
1159 drm_i915_private_t *dev_priv = dev->dev_private; 1160 drm_i915_private_t *dev_priv = dev->dev_private;
1160 1161
1161 if (!I915_HAS_FBC(dev)) { 1162 if (!I915_HAS_FBC(dev)) {
1162 seq_printf(m, "FBC unsupported on this chipset\n"); 1163 seq_printf(m, "FBC unsupported on this chipset\n");
1163 return 0; 1164 return 0;
1164 } 1165 }
1165 1166
1166 if (intel_fbc_enabled(dev)) { 1167 if (intel_fbc_enabled(dev)) {
1167 seq_printf(m, "FBC enabled\n"); 1168 seq_printf(m, "FBC enabled\n");
1168 } else { 1169 } else {
1169 seq_printf(m, "FBC disabled: "); 1170 seq_printf(m, "FBC disabled: ");
1170 switch (dev_priv->no_fbc_reason) { 1171 switch (dev_priv->no_fbc_reason) {
1171 case FBC_NO_OUTPUT: 1172 case FBC_NO_OUTPUT:
1172 seq_printf(m, "no outputs"); 1173 seq_printf(m, "no outputs");
1173 break; 1174 break;
1174 case FBC_STOLEN_TOO_SMALL: 1175 case FBC_STOLEN_TOO_SMALL:
1175 seq_printf(m, "not enough stolen memory"); 1176 seq_printf(m, "not enough stolen memory");
1176 break; 1177 break;
1177 case FBC_UNSUPPORTED_MODE: 1178 case FBC_UNSUPPORTED_MODE:
1178 seq_printf(m, "mode not supported"); 1179 seq_printf(m, "mode not supported");
1179 break; 1180 break;
1180 case FBC_MODE_TOO_LARGE: 1181 case FBC_MODE_TOO_LARGE:
1181 seq_printf(m, "mode too large"); 1182 seq_printf(m, "mode too large");
1182 break; 1183 break;
1183 case FBC_BAD_PLANE: 1184 case FBC_BAD_PLANE:
1184 seq_printf(m, "FBC unsupported on plane"); 1185 seq_printf(m, "FBC unsupported on plane");
1185 break; 1186 break;
1186 case FBC_NOT_TILED: 1187 case FBC_NOT_TILED:
1187 seq_printf(m, "scanout buffer not tiled"); 1188 seq_printf(m, "scanout buffer not tiled");
1188 break; 1189 break;
1189 case FBC_MULTIPLE_PIPES: 1190 case FBC_MULTIPLE_PIPES:
1190 seq_printf(m, "multiple pipes are enabled"); 1191 seq_printf(m, "multiple pipes are enabled");
1191 break; 1192 break;
1192 case FBC_MODULE_PARAM: 1193 case FBC_MODULE_PARAM:
1193 seq_printf(m, "disabled per module param (default off)"); 1194 seq_printf(m, "disabled per module param (default off)");
1194 break; 1195 break;
1195 default: 1196 default:
1196 seq_printf(m, "unknown reason"); 1197 seq_printf(m, "unknown reason");
1197 } 1198 }
1198 seq_printf(m, "\n"); 1199 seq_printf(m, "\n");
1199 } 1200 }
1200 return 0; 1201 return 0;
1201 } 1202 }
1202 1203
1203 static int i915_sr_status(struct seq_file *m, void *unused) 1204 static int i915_sr_status(struct seq_file *m, void *unused)
1204 { 1205 {
1205 struct drm_info_node *node = (struct drm_info_node *) m->private; 1206 struct drm_info_node *node = (struct drm_info_node *) m->private;
1206 struct drm_device *dev = node->minor->dev; 1207 struct drm_device *dev = node->minor->dev;
1207 drm_i915_private_t *dev_priv = dev->dev_private; 1208 drm_i915_private_t *dev_priv = dev->dev_private;
1208 bool sr_enabled = false; 1209 bool sr_enabled = false;
1209 1210
1210 if (HAS_PCH_SPLIT(dev)) 1211 if (HAS_PCH_SPLIT(dev))
1211 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; 1212 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1212 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) 1213 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
1213 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 1214 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1214 else if (IS_I915GM(dev)) 1215 else if (IS_I915GM(dev))
1215 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; 1216 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1216 else if (IS_PINEVIEW(dev)) 1217 else if (IS_PINEVIEW(dev))
1217 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 1218 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1218 1219
1219 seq_printf(m, "self-refresh: %s\n", 1220 seq_printf(m, "self-refresh: %s\n",
1220 sr_enabled ? "enabled" : "disabled"); 1221 sr_enabled ? "enabled" : "disabled");
1221 1222
1222 return 0; 1223 return 0;
1223 } 1224 }
1224 1225
1225 static int i915_emon_status(struct seq_file *m, void *unused) 1226 static int i915_emon_status(struct seq_file *m, void *unused)
1226 { 1227 {
1227 struct drm_info_node *node = (struct drm_info_node *) m->private; 1228 struct drm_info_node *node = (struct drm_info_node *) m->private;
1228 struct drm_device *dev = node->minor->dev; 1229 struct drm_device *dev = node->minor->dev;
1229 drm_i915_private_t *dev_priv = dev->dev_private; 1230 drm_i915_private_t *dev_priv = dev->dev_private;
1230 unsigned long temp, chipset, gfx; 1231 unsigned long temp, chipset, gfx;
1231 int ret; 1232 int ret;
1232 1233
1233 ret = mutex_lock_interruptible(&dev->struct_mutex); 1234 ret = mutex_lock_interruptible(&dev->struct_mutex);
1234 if (ret) 1235 if (ret)
1235 return ret; 1236 return ret;
1236 1237
1237 temp = i915_mch_val(dev_priv); 1238 temp = i915_mch_val(dev_priv);
1238 chipset = i915_chipset_val(dev_priv); 1239 chipset = i915_chipset_val(dev_priv);
1239 gfx = i915_gfx_val(dev_priv); 1240 gfx = i915_gfx_val(dev_priv);
1240 mutex_unlock(&dev->struct_mutex); 1241 mutex_unlock(&dev->struct_mutex);
1241 1242
1242 seq_printf(m, "GMCH temp: %ld\n", temp); 1243 seq_printf(m, "GMCH temp: %ld\n", temp);
1243 seq_printf(m, "Chipset power: %ld\n", chipset); 1244 seq_printf(m, "Chipset power: %ld\n", chipset);
1244 seq_printf(m, "GFX power: %ld\n", gfx); 1245 seq_printf(m, "GFX power: %ld\n", gfx);
1245 seq_printf(m, "Total power: %ld\n", chipset + gfx); 1246 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1246 1247
1247 return 0; 1248 return 0;
1248 } 1249 }
1249 1250
1250 static int i915_ring_freq_table(struct seq_file *m, void *unused) 1251 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1251 { 1252 {
1252 struct drm_info_node *node = (struct drm_info_node *) m->private; 1253 struct drm_info_node *node = (struct drm_info_node *) m->private;
1253 struct drm_device *dev = node->minor->dev; 1254 struct drm_device *dev = node->minor->dev;
1254 drm_i915_private_t *dev_priv = dev->dev_private; 1255 drm_i915_private_t *dev_priv = dev->dev_private;
1255 int ret; 1256 int ret;
1256 int gpu_freq, ia_freq; 1257 int gpu_freq, ia_freq;
1257 1258
1258 if (!(IS_GEN6(dev) || IS_GEN7(dev))) { 1259 if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
1259 seq_printf(m, "unsupported on this chipset\n"); 1260 seq_printf(m, "unsupported on this chipset\n");
1260 return 0; 1261 return 0;
1261 } 1262 }
1262 1263
1263 ret = mutex_lock_interruptible(&dev->struct_mutex); 1264 ret = mutex_lock_interruptible(&dev->struct_mutex);
1264 if (ret) 1265 if (ret)
1265 return ret; 1266 return ret;
1266 1267
1267 seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n"); 1268 seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
1268 1269
1269 for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay; 1270 for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay;
1270 gpu_freq++) { 1271 gpu_freq++) {
1271 I915_WRITE(GEN6_PCODE_DATA, gpu_freq); 1272 I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
1272 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | 1273 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
1273 GEN6_PCODE_READ_MIN_FREQ_TABLE); 1274 GEN6_PCODE_READ_MIN_FREQ_TABLE);
1274 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & 1275 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
1275 GEN6_PCODE_READY) == 0, 10)) { 1276 GEN6_PCODE_READY) == 0, 10)) {
1276 DRM_ERROR("pcode read of freq table timed out\n"); 1277 DRM_ERROR("pcode read of freq table timed out\n");
1277 continue; 1278 continue;
1278 } 1279 }
1279 ia_freq = I915_READ(GEN6_PCODE_DATA); 1280 ia_freq = I915_READ(GEN6_PCODE_DATA);
1280 seq_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100); 1281 seq_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100);
1281 } 1282 }
1282 1283
1283 mutex_unlock(&dev->struct_mutex); 1284 mutex_unlock(&dev->struct_mutex);
1284 1285
1285 return 0; 1286 return 0;
1286 } 1287 }
1287 1288
1288 static int i915_gfxec(struct seq_file *m, void *unused) 1289 static int i915_gfxec(struct seq_file *m, void *unused)
1289 { 1290 {
1290 struct drm_info_node *node = (struct drm_info_node *) m->private; 1291 struct drm_info_node *node = (struct drm_info_node *) m->private;
1291 struct drm_device *dev = node->minor->dev; 1292 struct drm_device *dev = node->minor->dev;
1292 drm_i915_private_t *dev_priv = dev->dev_private; 1293 drm_i915_private_t *dev_priv = dev->dev_private;
1293 int ret; 1294 int ret;
1294 1295
1295 ret = mutex_lock_interruptible(&dev->struct_mutex); 1296 ret = mutex_lock_interruptible(&dev->struct_mutex);
1296 if (ret) 1297 if (ret)
1297 return ret; 1298 return ret;
1298 1299
1299 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); 1300 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
1300 1301
1301 mutex_unlock(&dev->struct_mutex); 1302 mutex_unlock(&dev->struct_mutex);
1302 1303
1303 return 0; 1304 return 0;
1304 } 1305 }
1305 1306
1306 static int i915_opregion(struct seq_file *m, void *unused) 1307 static int i915_opregion(struct seq_file *m, void *unused)
1307 { 1308 {
1308 struct drm_info_node *node = (struct drm_info_node *) m->private; 1309 struct drm_info_node *node = (struct drm_info_node *) m->private;
1309 struct drm_device *dev = node->minor->dev; 1310 struct drm_device *dev = node->minor->dev;
1310 drm_i915_private_t *dev_priv = dev->dev_private; 1311 drm_i915_private_t *dev_priv = dev->dev_private;
1311 struct intel_opregion *opregion = &dev_priv->opregion; 1312 struct intel_opregion *opregion = &dev_priv->opregion;
1312 int ret; 1313 int ret;
1313 1314
1314 ret = mutex_lock_interruptible(&dev->struct_mutex); 1315 ret = mutex_lock_interruptible(&dev->struct_mutex);
1315 if (ret) 1316 if (ret)
1316 return ret; 1317 return ret;
1317 1318
1318 if (opregion->header) 1319 if (opregion->header)
1319 seq_write(m, opregion->header, OPREGION_SIZE); 1320 seq_write(m, opregion->header, OPREGION_SIZE);
1320 1321
1321 mutex_unlock(&dev->struct_mutex); 1322 mutex_unlock(&dev->struct_mutex);
1322 1323
1323 return 0; 1324 return 0;
1324 } 1325 }
1325 1326
1326 static int i915_gem_framebuffer_info(struct seq_file *m, void *data) 1327 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1327 { 1328 {
1328 struct drm_info_node *node = (struct drm_info_node *) m->private; 1329 struct drm_info_node *node = (struct drm_info_node *) m->private;
1329 struct drm_device *dev = node->minor->dev; 1330 struct drm_device *dev = node->minor->dev;
1330 drm_i915_private_t *dev_priv = dev->dev_private; 1331 drm_i915_private_t *dev_priv = dev->dev_private;
1331 struct intel_fbdev *ifbdev; 1332 struct intel_fbdev *ifbdev;
1332 struct intel_framebuffer *fb; 1333 struct intel_framebuffer *fb;
1333 int ret; 1334 int ret;
1334 1335
1335 ret = mutex_lock_interruptible(&dev->mode_config.mutex); 1336 ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1336 if (ret) 1337 if (ret)
1337 return ret; 1338 return ret;
1338 1339
1339 ifbdev = dev_priv->fbdev; 1340 ifbdev = dev_priv->fbdev;
1340 fb = to_intel_framebuffer(ifbdev->helper.fb); 1341 fb = to_intel_framebuffer(ifbdev->helper.fb);
1341 1342
1342 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ", 1343 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ",
1343 fb->base.width, 1344 fb->base.width,
1344 fb->base.height, 1345 fb->base.height,
1345 fb->base.depth, 1346 fb->base.depth,
1346 fb->base.bits_per_pixel); 1347 fb->base.bits_per_pixel);
1347 describe_obj(m, fb->obj); 1348 describe_obj(m, fb->obj);
1348 seq_printf(m, "\n"); 1349 seq_printf(m, "\n");
1349 1350
1350 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { 1351 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
1351 if (&fb->base == ifbdev->helper.fb) 1352 if (&fb->base == ifbdev->helper.fb)
1352 continue; 1353 continue;
1353 1354
1354 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ", 1355 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ",
1355 fb->base.width, 1356 fb->base.width,
1356 fb->base.height, 1357 fb->base.height,
1357 fb->base.depth, 1358 fb->base.depth,
1358 fb->base.bits_per_pixel); 1359 fb->base.bits_per_pixel);
1359 describe_obj(m, fb->obj); 1360 describe_obj(m, fb->obj);
1360 seq_printf(m, "\n"); 1361 seq_printf(m, "\n");
1361 } 1362 }
1362 1363
1363 mutex_unlock(&dev->mode_config.mutex); 1364 mutex_unlock(&dev->mode_config.mutex);
1364 1365
1365 return 0; 1366 return 0;
1366 } 1367 }
1367 1368
1368 static int i915_context_status(struct seq_file *m, void *unused) 1369 static int i915_context_status(struct seq_file *m, void *unused)
1369 { 1370 {
1370 struct drm_info_node *node = (struct drm_info_node *) m->private; 1371 struct drm_info_node *node = (struct drm_info_node *) m->private;
1371 struct drm_device *dev = node->minor->dev; 1372 struct drm_device *dev = node->minor->dev;
1372 drm_i915_private_t *dev_priv = dev->dev_private; 1373 drm_i915_private_t *dev_priv = dev->dev_private;
1373 int ret; 1374 int ret;
1374 1375
1375 ret = mutex_lock_interruptible(&dev->mode_config.mutex); 1376 ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1376 if (ret) 1377 if (ret)
1377 return ret; 1378 return ret;
1378 1379
1379 if (dev_priv->pwrctx) { 1380 if (dev_priv->pwrctx) {
1380 seq_printf(m, "power context "); 1381 seq_printf(m, "power context ");
1381 describe_obj(m, dev_priv->pwrctx); 1382 describe_obj(m, dev_priv->pwrctx);
1382 seq_printf(m, "\n"); 1383 seq_printf(m, "\n");
1383 } 1384 }
1384 1385
1385 if (dev_priv->renderctx) { 1386 if (dev_priv->renderctx) {
1386 seq_printf(m, "render context "); 1387 seq_printf(m, "render context ");
1387 describe_obj(m, dev_priv->renderctx); 1388 describe_obj(m, dev_priv->renderctx);
1388 seq_printf(m, "\n"); 1389 seq_printf(m, "\n");
1389 } 1390 }
1390 1391
1391 mutex_unlock(&dev->mode_config.mutex); 1392 mutex_unlock(&dev->mode_config.mutex);
1392 1393
1393 return 0; 1394 return 0;
1394 } 1395 }
1395 1396
1396 static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data) 1397 static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
1397 { 1398 {
1398 struct drm_info_node *node = (struct drm_info_node *) m->private; 1399 struct drm_info_node *node = (struct drm_info_node *) m->private;
1399 struct drm_device *dev = node->minor->dev; 1400 struct drm_device *dev = node->minor->dev;
1400 struct drm_i915_private *dev_priv = dev->dev_private; 1401 struct drm_i915_private *dev_priv = dev->dev_private;
1401 1402
1402 seq_printf(m, "forcewake count = %d\n", 1403 seq_printf(m, "forcewake count = %d\n",
1403 atomic_read(&dev_priv->forcewake_count)); 1404 atomic_read(&dev_priv->forcewake_count));
1404 1405
1405 return 0; 1406 return 0;
1406 } 1407 }
1407 1408
1408 static int 1409 static int
1409 i915_wedged_open(struct inode *inode, 1410 i915_wedged_open(struct inode *inode,
1410 struct file *filp) 1411 struct file *filp)
1411 { 1412 {
1412 filp->private_data = inode->i_private; 1413 filp->private_data = inode->i_private;
1413 return 0; 1414 return 0;
1414 } 1415 }
1415 1416
1416 static ssize_t 1417 static ssize_t
1417 i915_wedged_read(struct file *filp, 1418 i915_wedged_read(struct file *filp,
1418 char __user *ubuf, 1419 char __user *ubuf,
1419 size_t max, 1420 size_t max,
1420 loff_t *ppos) 1421 loff_t *ppos)
1421 { 1422 {
1422 struct drm_device *dev = filp->private_data; 1423 struct drm_device *dev = filp->private_data;
1423 drm_i915_private_t *dev_priv = dev->dev_private; 1424 drm_i915_private_t *dev_priv = dev->dev_private;
1424 char buf[80]; 1425 char buf[80];
1425 int len; 1426 int len;
1426 1427
1427 len = snprintf(buf, sizeof(buf), 1428 len = snprintf(buf, sizeof(buf),
1428 "wedged : %d\n", 1429 "wedged : %d\n",
1429 atomic_read(&dev_priv->mm.wedged)); 1430 atomic_read(&dev_priv->mm.wedged));
1430 1431
1431 if (len > sizeof(buf)) 1432 if (len > sizeof(buf))
1432 len = sizeof(buf); 1433 len = sizeof(buf);
1433 1434
1434 return simple_read_from_buffer(ubuf, max, ppos, buf, len); 1435 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1435 } 1436 }
1436 1437
1437 static ssize_t 1438 static ssize_t
1438 i915_wedged_write(struct file *filp, 1439 i915_wedged_write(struct file *filp,
1439 const char __user *ubuf, 1440 const char __user *ubuf,
1440 size_t cnt, 1441 size_t cnt,
1441 loff_t *ppos) 1442 loff_t *ppos)
1442 { 1443 {
1443 struct drm_device *dev = filp->private_data; 1444 struct drm_device *dev = filp->private_data;
1444 char buf[20]; 1445 char buf[20];
1445 int val = 1; 1446 int val = 1;
1446 1447
1447 if (cnt > 0) { 1448 if (cnt > 0) {
1448 if (cnt > sizeof(buf) - 1) 1449 if (cnt > sizeof(buf) - 1)
1449 return -EINVAL; 1450 return -EINVAL;
1450 1451
1451 if (copy_from_user(buf, ubuf, cnt)) 1452 if (copy_from_user(buf, ubuf, cnt))
1452 return -EFAULT; 1453 return -EFAULT;
1453 buf[cnt] = 0; 1454 buf[cnt] = 0;
1454 1455
1455 val = simple_strtoul(buf, NULL, 0); 1456 val = simple_strtoul(buf, NULL, 0);
1456 } 1457 }
1457 1458
1458 DRM_INFO("Manually setting wedged to %d\n", val); 1459 DRM_INFO("Manually setting wedged to %d\n", val);
1459 i915_handle_error(dev, val); 1460 i915_handle_error(dev, val);
1460 1461
1461 return cnt; 1462 return cnt;
1462 } 1463 }
1463 1464
1464 static const struct file_operations i915_wedged_fops = { 1465 static const struct file_operations i915_wedged_fops = {
1465 .owner = THIS_MODULE, 1466 .owner = THIS_MODULE,
1466 .open = i915_wedged_open, 1467 .open = i915_wedged_open,
1467 .read = i915_wedged_read, 1468 .read = i915_wedged_read,
1468 .write = i915_wedged_write, 1469 .write = i915_wedged_write,
1469 .llseek = default_llseek, 1470 .llseek = default_llseek,
1470 }; 1471 };
1471 1472
1472 static int 1473 static int
1473 i915_max_freq_open(struct inode *inode, 1474 i915_max_freq_open(struct inode *inode,
1474 struct file *filp) 1475 struct file *filp)
1475 { 1476 {
1476 filp->private_data = inode->i_private; 1477 filp->private_data = inode->i_private;
1477 return 0; 1478 return 0;
1478 } 1479 }
1479 1480
1480 static ssize_t 1481 static ssize_t
1481 i915_max_freq_read(struct file *filp, 1482 i915_max_freq_read(struct file *filp,
1482 char __user *ubuf, 1483 char __user *ubuf,
1483 size_t max, 1484 size_t max,
1484 loff_t *ppos) 1485 loff_t *ppos)
1485 { 1486 {
1486 struct drm_device *dev = filp->private_data; 1487 struct drm_device *dev = filp->private_data;
1487 drm_i915_private_t *dev_priv = dev->dev_private; 1488 drm_i915_private_t *dev_priv = dev->dev_private;
1488 char buf[80]; 1489 char buf[80];
1489 int len; 1490 int len;
1490 1491
1491 len = snprintf(buf, sizeof(buf), 1492 len = snprintf(buf, sizeof(buf),
1492 "max freq: %d\n", dev_priv->max_delay * 50); 1493 "max freq: %d\n", dev_priv->max_delay * 50);
1493 1494
1494 if (len > sizeof(buf)) 1495 if (len > sizeof(buf))
1495 len = sizeof(buf); 1496 len = sizeof(buf);
1496 1497
1497 return simple_read_from_buffer(ubuf, max, ppos, buf, len); 1498 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1498 } 1499 }
1499 1500
1500 static ssize_t 1501 static ssize_t
1501 i915_max_freq_write(struct file *filp, 1502 i915_max_freq_write(struct file *filp,
1502 const char __user *ubuf, 1503 const char __user *ubuf,
1503 size_t cnt, 1504 size_t cnt,
1504 loff_t *ppos) 1505 loff_t *ppos)
1505 { 1506 {
1506 struct drm_device *dev = filp->private_data; 1507 struct drm_device *dev = filp->private_data;
1507 struct drm_i915_private *dev_priv = dev->dev_private; 1508 struct drm_i915_private *dev_priv = dev->dev_private;
1508 char buf[20]; 1509 char buf[20];
1509 int val = 1; 1510 int val = 1;
1510 1511
1511 if (cnt > 0) { 1512 if (cnt > 0) {
1512 if (cnt > sizeof(buf) - 1) 1513 if (cnt > sizeof(buf) - 1)
1513 return -EINVAL; 1514 return -EINVAL;
1514 1515
1515 if (copy_from_user(buf, ubuf, cnt)) 1516 if (copy_from_user(buf, ubuf, cnt))
1516 return -EFAULT; 1517 return -EFAULT;
1517 buf[cnt] = 0; 1518 buf[cnt] = 0;
1518 1519
1519 val = simple_strtoul(buf, NULL, 0); 1520 val = simple_strtoul(buf, NULL, 0);
1520 } 1521 }
1521 1522
1522 DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val); 1523 DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
1523 1524
1524 /* 1525 /*
1525 * Turbo will still be enabled, but won't go above the set value. 1526 * Turbo will still be enabled, but won't go above the set value.
1526 */ 1527 */
1527 dev_priv->max_delay = val / 50; 1528 dev_priv->max_delay = val / 50;
1528 1529
1529 gen6_set_rps(dev, val / 50); 1530 gen6_set_rps(dev, val / 50);
1530 1531
1531 return cnt; 1532 return cnt;
1532 } 1533 }
1533 1534
1534 static const struct file_operations i915_max_freq_fops = { 1535 static const struct file_operations i915_max_freq_fops = {
1535 .owner = THIS_MODULE, 1536 .owner = THIS_MODULE,
1536 .open = i915_max_freq_open, 1537 .open = i915_max_freq_open,
1537 .read = i915_max_freq_read, 1538 .read = i915_max_freq_read,
1538 .write = i915_max_freq_write, 1539 .write = i915_max_freq_write,
1539 .llseek = default_llseek, 1540 .llseek = default_llseek,
1540 }; 1541 };
1541 1542
1542 static int 1543 static int
1543 i915_cache_sharing_open(struct inode *inode, 1544 i915_cache_sharing_open(struct inode *inode,
1544 struct file *filp) 1545 struct file *filp)
1545 { 1546 {
1546 filp->private_data = inode->i_private; 1547 filp->private_data = inode->i_private;
1547 return 0; 1548 return 0;
1548 } 1549 }
1549 1550
1550 static ssize_t 1551 static ssize_t
1551 i915_cache_sharing_read(struct file *filp, 1552 i915_cache_sharing_read(struct file *filp,
1552 char __user *ubuf, 1553 char __user *ubuf,
1553 size_t max, 1554 size_t max,
1554 loff_t *ppos) 1555 loff_t *ppos)
1555 { 1556 {
1556 struct drm_device *dev = filp->private_data; 1557 struct drm_device *dev = filp->private_data;
1557 drm_i915_private_t *dev_priv = dev->dev_private; 1558 drm_i915_private_t *dev_priv = dev->dev_private;
1558 char buf[80]; 1559 char buf[80];
1559 u32 snpcr; 1560 u32 snpcr;
1560 int len; 1561 int len;
1561 1562
1562 mutex_lock(&dev_priv->dev->struct_mutex); 1563 mutex_lock(&dev_priv->dev->struct_mutex);
1563 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 1564 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
1564 mutex_unlock(&dev_priv->dev->struct_mutex); 1565 mutex_unlock(&dev_priv->dev->struct_mutex);
1565 1566
1566 len = snprintf(buf, sizeof(buf), 1567 len = snprintf(buf, sizeof(buf),
1567 "%d\n", (snpcr & GEN6_MBC_SNPCR_MASK) >> 1568 "%d\n", (snpcr & GEN6_MBC_SNPCR_MASK) >>
1568 GEN6_MBC_SNPCR_SHIFT); 1569 GEN6_MBC_SNPCR_SHIFT);
1569 1570
1570 if (len > sizeof(buf)) 1571 if (len > sizeof(buf))
1571 len = sizeof(buf); 1572 len = sizeof(buf);
1572 1573
1573 return simple_read_from_buffer(ubuf, max, ppos, buf, len); 1574 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1574 } 1575 }
1575 1576
1576 static ssize_t 1577 static ssize_t
1577 i915_cache_sharing_write(struct file *filp, 1578 i915_cache_sharing_write(struct file *filp,
1578 const char __user *ubuf, 1579 const char __user *ubuf,
1579 size_t cnt, 1580 size_t cnt,
1580 loff_t *ppos) 1581 loff_t *ppos)
1581 { 1582 {
1582 struct drm_device *dev = filp->private_data; 1583 struct drm_device *dev = filp->private_data;
1583 struct drm_i915_private *dev_priv = dev->dev_private; 1584 struct drm_i915_private *dev_priv = dev->dev_private;
1584 char buf[20]; 1585 char buf[20];
1585 u32 snpcr; 1586 u32 snpcr;
1586 int val = 1; 1587 int val = 1;
1587 1588
1588 if (cnt > 0) { 1589 if (cnt > 0) {
1589 if (cnt > sizeof(buf) - 1) 1590 if (cnt > sizeof(buf) - 1)
1590 return -EINVAL; 1591 return -EINVAL;
1591 1592
1592 if (copy_from_user(buf, ubuf, cnt)) 1593 if (copy_from_user(buf, ubuf, cnt))
1593 return -EFAULT; 1594 return -EFAULT;
1594 buf[cnt] = 0; 1595 buf[cnt] = 0;
1595 1596
1596 val = simple_strtoul(buf, NULL, 0); 1597 val = simple_strtoul(buf, NULL, 0);
1597 } 1598 }
1598 1599
1599 if (val < 0 || val > 3) 1600 if (val < 0 || val > 3)
1600 return -EINVAL; 1601 return -EINVAL;
1601 1602
1602 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %d\n", val); 1603 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %d\n", val);
1603 1604
1604 /* Update the cache sharing policy here as well */ 1605 /* Update the cache sharing policy here as well */
1605 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 1606 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
1606 snpcr &= ~GEN6_MBC_SNPCR_MASK; 1607 snpcr &= ~GEN6_MBC_SNPCR_MASK;
1607 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT); 1608 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
1608 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 1609 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
1609 1610
1610 return cnt; 1611 return cnt;
1611 } 1612 }
1612 1613
1613 static const struct file_operations i915_cache_sharing_fops = { 1614 static const struct file_operations i915_cache_sharing_fops = {
1614 .owner = THIS_MODULE, 1615 .owner = THIS_MODULE,
1615 .open = i915_cache_sharing_open, 1616 .open = i915_cache_sharing_open,
1616 .read = i915_cache_sharing_read, 1617 .read = i915_cache_sharing_read,
1617 .write = i915_cache_sharing_write, 1618 .write = i915_cache_sharing_write,
1618 .llseek = default_llseek, 1619 .llseek = default_llseek,
1619 }; 1620 };
1620 1621
1621 /* As the drm_debugfs_init() routines are called before dev->dev_private is 1622 /* As the drm_debugfs_init() routines are called before dev->dev_private is
1622 * allocated we need to hook into the minor for release. */ 1623 * allocated we need to hook into the minor for release. */
1623 static int 1624 static int
1624 drm_add_fake_info_node(struct drm_minor *minor, 1625 drm_add_fake_info_node(struct drm_minor *minor,
1625 struct dentry *ent, 1626 struct dentry *ent,
1626 const void *key) 1627 const void *key)
1627 { 1628 {
1628 struct drm_info_node *node; 1629 struct drm_info_node *node;
1629 1630
1630 node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL); 1631 node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
1631 if (node == NULL) { 1632 if (node == NULL) {
1632 debugfs_remove(ent); 1633 debugfs_remove(ent);
1633 return -ENOMEM; 1634 return -ENOMEM;
1634 } 1635 }
1635 1636
1636 node->minor = minor; 1637 node->minor = minor;
1637 node->dent = ent; 1638 node->dent = ent;
1638 node->info_ent = (void *) key; 1639 node->info_ent = (void *) key;
1639 1640
1640 mutex_lock(&minor->debugfs_lock); 1641 mutex_lock(&minor->debugfs_lock);
1641 list_add(&node->list, &minor->debugfs_list); 1642 list_add(&node->list, &minor->debugfs_list);
1642 mutex_unlock(&minor->debugfs_lock); 1643 mutex_unlock(&minor->debugfs_lock);
1643 1644
1644 return 0; 1645 return 0;
1645 } 1646 }
1646 1647
1647 static int i915_wedged_create(struct dentry *root, struct drm_minor *minor) 1648 static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
1648 { 1649 {
1649 struct drm_device *dev = minor->dev; 1650 struct drm_device *dev = minor->dev;
1650 struct dentry *ent; 1651 struct dentry *ent;
1651 1652
1652 ent = debugfs_create_file("i915_wedged", 1653 ent = debugfs_create_file("i915_wedged",
1653 S_IRUGO | S_IWUSR, 1654 S_IRUGO | S_IWUSR,
1654 root, dev, 1655 root, dev,
1655 &i915_wedged_fops); 1656 &i915_wedged_fops);
1656 if (IS_ERR(ent)) 1657 if (IS_ERR(ent))
1657 return PTR_ERR(ent); 1658 return PTR_ERR(ent);
1658 1659
1659 return drm_add_fake_info_node(minor, ent, &i915_wedged_fops); 1660 return drm_add_fake_info_node(minor, ent, &i915_wedged_fops);
1660 } 1661 }
1661 1662
1662 static int i915_forcewake_open(struct inode *inode, struct file *file) 1663 static int i915_forcewake_open(struct inode *inode, struct file *file)
1663 { 1664 {
1664 struct drm_device *dev = inode->i_private; 1665 struct drm_device *dev = inode->i_private;
1665 struct drm_i915_private *dev_priv = dev->dev_private; 1666 struct drm_i915_private *dev_priv = dev->dev_private;
1666 int ret; 1667 int ret;
1667 1668
1668 if (!IS_GEN6(dev)) 1669 if (!IS_GEN6(dev))
1669 return 0; 1670 return 0;
1670 1671
1671 ret = mutex_lock_interruptible(&dev->struct_mutex); 1672 ret = mutex_lock_interruptible(&dev->struct_mutex);
1672 if (ret) 1673 if (ret)
1673 return ret; 1674 return ret;
1674 gen6_gt_force_wake_get(dev_priv); 1675 gen6_gt_force_wake_get(dev_priv);
1675 mutex_unlock(&dev->struct_mutex); 1676 mutex_unlock(&dev->struct_mutex);
1676 1677
1677 return 0; 1678 return 0;
1678 } 1679 }
1679 1680
1680 int i915_forcewake_release(struct inode *inode, struct file *file) 1681 int i915_forcewake_release(struct inode *inode, struct file *file)
1681 { 1682 {
1682 struct drm_device *dev = inode->i_private; 1683 struct drm_device *dev = inode->i_private;
1683 struct drm_i915_private *dev_priv = dev->dev_private; 1684 struct drm_i915_private *dev_priv = dev->dev_private;
1684 1685
1685 if (!IS_GEN6(dev)) 1686 if (!IS_GEN6(dev))
1686 return 0; 1687 return 0;
1687 1688
1688 /* 1689 /*
1689 * It's bad that we can potentially hang userspace if struct_mutex gets 1690 * It's bad that we can potentially hang userspace if struct_mutex gets
1690 * forever stuck. However, if we cannot acquire this lock it means that 1691 * forever stuck. However, if we cannot acquire this lock it means that
1691 * almost certainly the driver has hung, is not unload-able. Therefore 1692 * almost certainly the driver has hung, is not unload-able. Therefore
1692 * hanging here is probably a minor inconvenience not to be seen my 1693 * hanging here is probably a minor inconvenience not to be seen my
1693 * almost every user. 1694 * almost every user.
1694 */ 1695 */
1695 mutex_lock(&dev->struct_mutex); 1696 mutex_lock(&dev->struct_mutex);
1696 gen6_gt_force_wake_put(dev_priv); 1697 gen6_gt_force_wake_put(dev_priv);
1697 mutex_unlock(&dev->struct_mutex); 1698 mutex_unlock(&dev->struct_mutex);
1698 1699
1699 return 0; 1700 return 0;
1700 } 1701 }
1701 1702
1702 static const struct file_operations i915_forcewake_fops = { 1703 static const struct file_operations i915_forcewake_fops = {
1703 .owner = THIS_MODULE, 1704 .owner = THIS_MODULE,
1704 .open = i915_forcewake_open, 1705 .open = i915_forcewake_open,
1705 .release = i915_forcewake_release, 1706 .release = i915_forcewake_release,
1706 }; 1707 };
1707 1708
1708 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor) 1709 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
1709 { 1710 {
1710 struct drm_device *dev = minor->dev; 1711 struct drm_device *dev = minor->dev;
1711 struct dentry *ent; 1712 struct dentry *ent;
1712 1713
1713 ent = debugfs_create_file("i915_forcewake_user", 1714 ent = debugfs_create_file("i915_forcewake_user",
1714 S_IRUSR, 1715 S_IRUSR,
1715 root, dev, 1716 root, dev,
1716 &i915_forcewake_fops); 1717 &i915_forcewake_fops);
1717 if (IS_ERR(ent)) 1718 if (IS_ERR(ent))
1718 return PTR_ERR(ent); 1719 return PTR_ERR(ent);
1719 1720
1720 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops); 1721 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
1721 } 1722 }
1722 1723
1723 static int i915_max_freq_create(struct dentry *root, struct drm_minor *minor) 1724 static int i915_max_freq_create(struct dentry *root, struct drm_minor *minor)
1724 { 1725 {
1725 struct drm_device *dev = minor->dev; 1726 struct drm_device *dev = minor->dev;
1726 struct dentry *ent; 1727 struct dentry *ent;
1727 1728
1728 ent = debugfs_create_file("i915_max_freq", 1729 ent = debugfs_create_file("i915_max_freq",
1729 S_IRUGO | S_IWUSR, 1730 S_IRUGO | S_IWUSR,
1730 root, dev, 1731 root, dev,
1731 &i915_max_freq_fops); 1732 &i915_max_freq_fops);
1732 if (IS_ERR(ent)) 1733 if (IS_ERR(ent))
1733 return PTR_ERR(ent); 1734 return PTR_ERR(ent);
1734 1735
1735 return drm_add_fake_info_node(minor, ent, &i915_max_freq_fops); 1736 return drm_add_fake_info_node(minor, ent, &i915_max_freq_fops);
1736 } 1737 }
1737 1738
1738 static int i915_cache_sharing_create(struct dentry *root, struct drm_minor *minor) 1739 static int i915_cache_sharing_create(struct dentry *root, struct drm_minor *minor)
1739 { 1740 {
1740 struct drm_device *dev = minor->dev; 1741 struct drm_device *dev = minor->dev;
1741 struct dentry *ent; 1742 struct dentry *ent;
1742 1743
1743 ent = debugfs_create_file("i915_cache_sharing", 1744 ent = debugfs_create_file("i915_cache_sharing",
1744 S_IRUGO | S_IWUSR, 1745 S_IRUGO | S_IWUSR,
1745 root, dev, 1746 root, dev,
1746 &i915_cache_sharing_fops); 1747 &i915_cache_sharing_fops);
1747 if (IS_ERR(ent)) 1748 if (IS_ERR(ent))
1748 return PTR_ERR(ent); 1749 return PTR_ERR(ent);
1749 1750
1750 return drm_add_fake_info_node(minor, ent, &i915_cache_sharing_fops); 1751 return drm_add_fake_info_node(minor, ent, &i915_cache_sharing_fops);
1751 } 1752 }
1752 1753
1753 static struct drm_info_list i915_debugfs_list[] = { 1754 static struct drm_info_list i915_debugfs_list[] = {
1754 {"i915_capabilities", i915_capabilities, 0}, 1755 {"i915_capabilities", i915_capabilities, 0},
1755 {"i915_gem_objects", i915_gem_object_info, 0}, 1756 {"i915_gem_objects", i915_gem_object_info, 0},
1756 {"i915_gem_gtt", i915_gem_gtt_info, 0}, 1757 {"i915_gem_gtt", i915_gem_gtt_info, 0},
1757 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 1758 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
1758 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, 1759 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
1759 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, 1760 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
1760 {"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST}, 1761 {"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST},
1761 {"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST}, 1762 {"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST},
1762 {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, 1763 {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
1763 {"i915_gem_request", i915_gem_request_info, 0}, 1764 {"i915_gem_request", i915_gem_request_info, 0},
1764 {"i915_gem_seqno", i915_gem_seqno_info, 0}, 1765 {"i915_gem_seqno", i915_gem_seqno_info, 0},
1765 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 1766 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
1766 {"i915_gem_interrupt", i915_interrupt_info, 0}, 1767 {"i915_gem_interrupt", i915_interrupt_info, 0},
1767 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS}, 1768 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
1768 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, 1769 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
1769 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, 1770 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
1770 {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS}, 1771 {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS},
1771 {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS}, 1772 {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS},
1772 {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS}, 1773 {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS},
1773 {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS}, 1774 {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS},
1774 {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS}, 1775 {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS},
1775 {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS}, 1776 {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS},
1776 {"i915_batchbuffers", i915_batchbuffer_info, 0}, 1777 {"i915_batchbuffers", i915_batchbuffer_info, 0},
1777 {"i915_error_state", i915_error_state, 0}, 1778 {"i915_error_state", i915_error_state, 0},
1778 {"i915_rstdby_delays", i915_rstdby_delays, 0}, 1779 {"i915_rstdby_delays", i915_rstdby_delays, 0},
1779 {"i915_cur_delayinfo", i915_cur_delayinfo, 0}, 1780 {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
1780 {"i915_delayfreq_table", i915_delayfreq_table, 0}, 1781 {"i915_delayfreq_table", i915_delayfreq_table, 0},
1781 {"i915_inttoext_table", i915_inttoext_table, 0}, 1782 {"i915_inttoext_table", i915_inttoext_table, 0},
1782 {"i915_drpc_info", i915_drpc_info, 0}, 1783 {"i915_drpc_info", i915_drpc_info, 0},
1783 {"i915_emon_status", i915_emon_status, 0}, 1784 {"i915_emon_status", i915_emon_status, 0},
1784 {"i915_ring_freq_table", i915_ring_freq_table, 0}, 1785 {"i915_ring_freq_table", i915_ring_freq_table, 0},
1785 {"i915_gfxec", i915_gfxec, 0}, 1786 {"i915_gfxec", i915_gfxec, 0},
1786 {"i915_fbc_status", i915_fbc_status, 0}, 1787 {"i915_fbc_status", i915_fbc_status, 0},
1787 {"i915_sr_status", i915_sr_status, 0}, 1788 {"i915_sr_status", i915_sr_status, 0},
1788 {"i915_opregion", i915_opregion, 0}, 1789 {"i915_opregion", i915_opregion, 0},
1789 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 1790 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
1790 {"i915_context_status", i915_context_status, 0}, 1791 {"i915_context_status", i915_context_status, 0},
1791 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0}, 1792 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
1792 }; 1793 };
1793 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 1794 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
1794 1795
1795 int i915_debugfs_init(struct drm_minor *minor) 1796 int i915_debugfs_init(struct drm_minor *minor)
1796 { 1797 {
1797 int ret; 1798 int ret;
1798 1799
1799 ret = i915_wedged_create(minor->debugfs_root, minor); 1800 ret = i915_wedged_create(minor->debugfs_root, minor);
1800 if (ret) 1801 if (ret)
1801 return ret; 1802 return ret;
1802 1803
1803 ret = i915_forcewake_create(minor->debugfs_root, minor); 1804 ret = i915_forcewake_create(minor->debugfs_root, minor);
1804 if (ret) 1805 if (ret)
1805 return ret; 1806 return ret;
1806 ret = i915_max_freq_create(minor->debugfs_root, minor); 1807 ret = i915_max_freq_create(minor->debugfs_root, minor);
1807 if (ret) 1808 if (ret)
1808 return ret; 1809 return ret;
1809 ret = i915_cache_sharing_create(minor->debugfs_root, minor); 1810 ret = i915_cache_sharing_create(minor->debugfs_root, minor);
1810 if (ret) 1811 if (ret)
1811 return ret; 1812 return ret;
1812 1813
1813 return drm_debugfs_create_files(i915_debugfs_list, 1814 return drm_debugfs_create_files(i915_debugfs_list,
1814 I915_DEBUGFS_ENTRIES, 1815 I915_DEBUGFS_ENTRIES,
1815 minor->debugfs_root, minor); 1816 minor->debugfs_root, minor);
1816 } 1817 }
1817 1818
1818 void i915_debugfs_cleanup(struct drm_minor *minor) 1819 void i915_debugfs_cleanup(struct drm_minor *minor)
1819 { 1820 {
1820 drm_debugfs_remove_files(i915_debugfs_list, 1821 drm_debugfs_remove_files(i915_debugfs_list,
1821 I915_DEBUGFS_ENTRIES, minor); 1822 I915_DEBUGFS_ENTRIES, minor);
1822 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops, 1823 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
1823 1, minor); 1824 1, minor);
1824 drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops, 1825 drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
1825 1, minor); 1826 1, minor);
1826 drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops, 1827 drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops,
1827 1, minor); 1828 1, minor);
1828 drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops, 1829 drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
1829 1, minor); 1830 1, minor);
1830 } 1831 }
1831 1832
1832 #endif /* CONFIG_DEBUG_FS */ 1833 #endif /* CONFIG_DEBUG_FS */
1833 1834
drivers/gpu/drm/i915/i915_dma.c
1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*- 1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2 */ 2 */
3 /* 3 /*
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved. 5 * All Rights Reserved.
6 * 6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a 7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the 8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including 9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish, 10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to 11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to 12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions: 13 * the following conditions:
14 * 14 *
15 * The above copyright notice and this permission notice (including the 15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions 16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software. 17 * of the Software.
18 * 18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 * 26 *
27 */ 27 */
28 28
29 #include "drmP.h" 29 #include "drmP.h"
30 #include "drm.h" 30 #include "drm.h"
31 #include "drm_crtc_helper.h" 31 #include "drm_crtc_helper.h"
32 #include "drm_fb_helper.h" 32 #include "drm_fb_helper.h"
33 #include "intel_drv.h" 33 #include "intel_drv.h"
34 #include "i915_drm.h" 34 #include "i915_drm.h"
35 #include "i915_drv.h" 35 #include "i915_drv.h"
36 #include "i915_trace.h" 36 #include "i915_trace.h"
37 #include "../../../platform/x86/intel_ips.h" 37 #include "../../../platform/x86/intel_ips.h"
38 #include <linux/pci.h> 38 #include <linux/pci.h>
39 #include <linux/vgaarb.h> 39 #include <linux/vgaarb.h>
40 #include <linux/acpi.h> 40 #include <linux/acpi.h>
41 #include <linux/pnp.h> 41 #include <linux/pnp.h>
42 #include <linux/vga_switcheroo.h> 42 #include <linux/vga_switcheroo.h>
43 #include <linux/slab.h> 43 #include <linux/slab.h>
44 #include <linux/module.h> 44 #include <linux/module.h>
45 #include <acpi/video.h> 45 #include <acpi/video.h>
46 46
47 static void i915_write_hws_pga(struct drm_device *dev) 47 static void i915_write_hws_pga(struct drm_device *dev)
48 { 48 {
49 drm_i915_private_t *dev_priv = dev->dev_private; 49 drm_i915_private_t *dev_priv = dev->dev_private;
50 u32 addr; 50 u32 addr;
51 51
52 addr = dev_priv->status_page_dmah->busaddr; 52 addr = dev_priv->status_page_dmah->busaddr;
53 if (INTEL_INFO(dev)->gen >= 4) 53 if (INTEL_INFO(dev)->gen >= 4)
54 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; 54 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
55 I915_WRITE(HWS_PGA, addr); 55 I915_WRITE(HWS_PGA, addr);
56 } 56 }
57 57
58 /** 58 /**
59 * Sets up the hardware status page for devices that need a physical address 59 * Sets up the hardware status page for devices that need a physical address
60 * in the register. 60 * in the register.
61 */ 61 */
62 static int i915_init_phys_hws(struct drm_device *dev) 62 static int i915_init_phys_hws(struct drm_device *dev)
63 { 63 {
64 drm_i915_private_t *dev_priv = dev->dev_private; 64 drm_i915_private_t *dev_priv = dev->dev_private;
65 65
66 /* Program Hardware Status Page */ 66 /* Program Hardware Status Page */
67 dev_priv->status_page_dmah = 67 dev_priv->status_page_dmah =
68 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE); 68 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
69 69
70 if (!dev_priv->status_page_dmah) { 70 if (!dev_priv->status_page_dmah) {
71 DRM_ERROR("Can not allocate hardware status page\n"); 71 DRM_ERROR("Can not allocate hardware status page\n");
72 return -ENOMEM; 72 return -ENOMEM;
73 } 73 }
74 74
75 memset_io((void __force __iomem *)dev_priv->status_page_dmah->vaddr, 75 memset_io((void __force __iomem *)dev_priv->status_page_dmah->vaddr,
76 0, PAGE_SIZE); 76 0, PAGE_SIZE);
77 77
78 i915_write_hws_pga(dev); 78 i915_write_hws_pga(dev);
79 79
80 DRM_DEBUG_DRIVER("Enabled hardware status page\n"); 80 DRM_DEBUG_DRIVER("Enabled hardware status page\n");
81 return 0; 81 return 0;
82 } 82 }
83 83
84 /** 84 /**
85 * Frees the hardware status page, whether it's a physical address or a virtual 85 * Frees the hardware status page, whether it's a physical address or a virtual
86 * address set up by the X Server. 86 * address set up by the X Server.
87 */ 87 */
88 static void i915_free_hws(struct drm_device *dev) 88 static void i915_free_hws(struct drm_device *dev)
89 { 89 {
90 drm_i915_private_t *dev_priv = dev->dev_private; 90 drm_i915_private_t *dev_priv = dev->dev_private;
91 struct intel_ring_buffer *ring = LP_RING(dev_priv); 91 struct intel_ring_buffer *ring = LP_RING(dev_priv);
92 92
93 if (dev_priv->status_page_dmah) { 93 if (dev_priv->status_page_dmah) {
94 drm_pci_free(dev, dev_priv->status_page_dmah); 94 drm_pci_free(dev, dev_priv->status_page_dmah);
95 dev_priv->status_page_dmah = NULL; 95 dev_priv->status_page_dmah = NULL;
96 } 96 }
97 97
98 if (ring->status_page.gfx_addr) { 98 if (ring->status_page.gfx_addr) {
99 ring->status_page.gfx_addr = 0; 99 ring->status_page.gfx_addr = 0;
100 drm_core_ioremapfree(&dev_priv->hws_map, dev); 100 drm_core_ioremapfree(&dev_priv->hws_map, dev);
101 } 101 }
102 102
103 /* Need to rewrite hardware status page */ 103 /* Need to rewrite hardware status page */
104 I915_WRITE(HWS_PGA, 0x1ffff000); 104 I915_WRITE(HWS_PGA, 0x1ffff000);
105 } 105 }
106 106
107 void i915_kernel_lost_context(struct drm_device * dev) 107 void i915_kernel_lost_context(struct drm_device * dev)
108 { 108 {
109 drm_i915_private_t *dev_priv = dev->dev_private; 109 drm_i915_private_t *dev_priv = dev->dev_private;
110 struct drm_i915_master_private *master_priv; 110 struct drm_i915_master_private *master_priv;
111 struct intel_ring_buffer *ring = LP_RING(dev_priv); 111 struct intel_ring_buffer *ring = LP_RING(dev_priv);
112 112
113 /* 113 /*
114 * We should never lose context on the ring with modesetting 114 * We should never lose context on the ring with modesetting
115 * as we don't expose it to userspace 115 * as we don't expose it to userspace
116 */ 116 */
117 if (drm_core_check_feature(dev, DRIVER_MODESET)) 117 if (drm_core_check_feature(dev, DRIVER_MODESET))
118 return; 118 return;
119 119
120 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; 120 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
121 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 121 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
122 ring->space = ring->head - (ring->tail + 8); 122 ring->space = ring->head - (ring->tail + 8);
123 if (ring->space < 0) 123 if (ring->space < 0)
124 ring->space += ring->size; 124 ring->space += ring->size;
125 125
126 if (!dev->primary->master) 126 if (!dev->primary->master)
127 return; 127 return;
128 128
129 master_priv = dev->primary->master->driver_priv; 129 master_priv = dev->primary->master->driver_priv;
130 if (ring->head == ring->tail && master_priv->sarea_priv) 130 if (ring->head == ring->tail && master_priv->sarea_priv)
131 master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; 131 master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
132 } 132 }
133 133
134 static int i915_dma_cleanup(struct drm_device * dev) 134 static int i915_dma_cleanup(struct drm_device * dev)
135 { 135 {
136 drm_i915_private_t *dev_priv = dev->dev_private; 136 drm_i915_private_t *dev_priv = dev->dev_private;
137 int i; 137 int i;
138 138
139 /* Make sure interrupts are disabled here because the uninstall ioctl 139 /* Make sure interrupts are disabled here because the uninstall ioctl
140 * may not have been called from userspace and after dev_private 140 * may not have been called from userspace and after dev_private
141 * is freed, it's too late. 141 * is freed, it's too late.
142 */ 142 */
143 if (dev->irq_enabled) 143 if (dev->irq_enabled)
144 drm_irq_uninstall(dev); 144 drm_irq_uninstall(dev);
145 145
146 mutex_lock(&dev->struct_mutex); 146 mutex_lock(&dev->struct_mutex);
147 for (i = 0; i < I915_NUM_RINGS; i++) 147 for (i = 0; i < I915_NUM_RINGS; i++)
148 intel_cleanup_ring_buffer(&dev_priv->ring[i]); 148 intel_cleanup_ring_buffer(&dev_priv->ring[i]);
149 mutex_unlock(&dev->struct_mutex); 149 mutex_unlock(&dev->struct_mutex);
150 150
151 /* Clear the HWS virtual address at teardown */ 151 /* Clear the HWS virtual address at teardown */
152 if (I915_NEED_GFX_HWS(dev)) 152 if (I915_NEED_GFX_HWS(dev))
153 i915_free_hws(dev); 153 i915_free_hws(dev);
154 154
155 return 0; 155 return 0;
156 } 156 }
157 157
158 static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) 158 static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
159 { 159 {
160 drm_i915_private_t *dev_priv = dev->dev_private; 160 drm_i915_private_t *dev_priv = dev->dev_private;
161 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 161 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
162 int ret; 162 int ret;
163 163
164 master_priv->sarea = drm_getsarea(dev); 164 master_priv->sarea = drm_getsarea(dev);
165 if (master_priv->sarea) { 165 if (master_priv->sarea) {
166 master_priv->sarea_priv = (drm_i915_sarea_t *) 166 master_priv->sarea_priv = (drm_i915_sarea_t *)
167 ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset); 167 ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
168 } else { 168 } else {
169 DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n"); 169 DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
170 } 170 }
171 171
172 if (init->ring_size != 0) { 172 if (init->ring_size != 0) {
173 if (LP_RING(dev_priv)->obj != NULL) { 173 if (LP_RING(dev_priv)->obj != NULL) {
174 i915_dma_cleanup(dev); 174 i915_dma_cleanup(dev);
175 DRM_ERROR("Client tried to initialize ringbuffer in " 175 DRM_ERROR("Client tried to initialize ringbuffer in "
176 "GEM mode\n"); 176 "GEM mode\n");
177 return -EINVAL; 177 return -EINVAL;
178 } 178 }
179 179
180 ret = intel_render_ring_init_dri(dev, 180 ret = intel_render_ring_init_dri(dev,
181 init->ring_start, 181 init->ring_start,
182 init->ring_size); 182 init->ring_size);
183 if (ret) { 183 if (ret) {
184 i915_dma_cleanup(dev); 184 i915_dma_cleanup(dev);
185 return ret; 185 return ret;
186 } 186 }
187 } 187 }
188 188
189 dev_priv->cpp = init->cpp; 189 dev_priv->cpp = init->cpp;
190 dev_priv->back_offset = init->back_offset; 190 dev_priv->back_offset = init->back_offset;
191 dev_priv->front_offset = init->front_offset; 191 dev_priv->front_offset = init->front_offset;
192 dev_priv->current_page = 0; 192 dev_priv->current_page = 0;
193 if (master_priv->sarea_priv) 193 if (master_priv->sarea_priv)
194 master_priv->sarea_priv->pf_current_page = 0; 194 master_priv->sarea_priv->pf_current_page = 0;
195 195
196 /* Allow hardware batchbuffers unless told otherwise. 196 /* Allow hardware batchbuffers unless told otherwise.
197 */ 197 */
198 dev_priv->allow_batchbuffer = 1; 198 dev_priv->allow_batchbuffer = 1;
199 199
200 return 0; 200 return 0;
201 } 201 }
202 202
203 static int i915_dma_resume(struct drm_device * dev) 203 static int i915_dma_resume(struct drm_device * dev)
204 { 204 {
205 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 205 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
206 struct intel_ring_buffer *ring = LP_RING(dev_priv); 206 struct intel_ring_buffer *ring = LP_RING(dev_priv);
207 207
208 DRM_DEBUG_DRIVER("%s\n", __func__); 208 DRM_DEBUG_DRIVER("%s\n", __func__);
209 209
210 if (ring->map.handle == NULL) { 210 if (ring->map.handle == NULL) {
211 DRM_ERROR("can not ioremap virtual address for" 211 DRM_ERROR("can not ioremap virtual address for"
212 " ring buffer\n"); 212 " ring buffer\n");
213 return -ENOMEM; 213 return -ENOMEM;
214 } 214 }
215 215
216 /* Program Hardware Status Page */ 216 /* Program Hardware Status Page */
217 if (!ring->status_page.page_addr) { 217 if (!ring->status_page.page_addr) {
218 DRM_ERROR("Can not find hardware status page\n"); 218 DRM_ERROR("Can not find hardware status page\n");
219 return -EINVAL; 219 return -EINVAL;
220 } 220 }
221 DRM_DEBUG_DRIVER("hw status page @ %p\n", 221 DRM_DEBUG_DRIVER("hw status page @ %p\n",
222 ring->status_page.page_addr); 222 ring->status_page.page_addr);
223 if (ring->status_page.gfx_addr != 0) 223 if (ring->status_page.gfx_addr != 0)
224 intel_ring_setup_status_page(ring); 224 intel_ring_setup_status_page(ring);
225 else 225 else
226 i915_write_hws_pga(dev); 226 i915_write_hws_pga(dev);
227 227
228 DRM_DEBUG_DRIVER("Enabled hardware status page\n"); 228 DRM_DEBUG_DRIVER("Enabled hardware status page\n");
229 229
230 return 0; 230 return 0;
231 } 231 }
232 232
233 static int i915_dma_init(struct drm_device *dev, void *data, 233 static int i915_dma_init(struct drm_device *dev, void *data,
234 struct drm_file *file_priv) 234 struct drm_file *file_priv)
235 { 235 {
236 drm_i915_init_t *init = data; 236 drm_i915_init_t *init = data;
237 int retcode = 0; 237 int retcode = 0;
238 238
239 switch (init->func) { 239 switch (init->func) {
240 case I915_INIT_DMA: 240 case I915_INIT_DMA:
241 retcode = i915_initialize(dev, init); 241 retcode = i915_initialize(dev, init);
242 break; 242 break;
243 case I915_CLEANUP_DMA: 243 case I915_CLEANUP_DMA:
244 retcode = i915_dma_cleanup(dev); 244 retcode = i915_dma_cleanup(dev);
245 break; 245 break;
246 case I915_RESUME_DMA: 246 case I915_RESUME_DMA:
247 retcode = i915_dma_resume(dev); 247 retcode = i915_dma_resume(dev);
248 break; 248 break;
249 default: 249 default:
250 retcode = -EINVAL; 250 retcode = -EINVAL;
251 break; 251 break;
252 } 252 }
253 253
254 return retcode; 254 return retcode;
255 } 255 }
256 256
257 /* Implement basically the same security restrictions as hardware does 257 /* Implement basically the same security restrictions as hardware does
258 * for MI_BATCH_NON_SECURE. These can be made stricter at any time. 258 * for MI_BATCH_NON_SECURE. These can be made stricter at any time.
259 * 259 *
260 * Most of the calculations below involve calculating the size of a 260 * Most of the calculations below involve calculating the size of a
261 * particular instruction. It's important to get the size right as 261 * particular instruction. It's important to get the size right as
262 * that tells us where the next instruction to check is. Any illegal 262 * that tells us where the next instruction to check is. Any illegal
263 * instruction detected will be given a size of zero, which is a 263 * instruction detected will be given a size of zero, which is a
264 * signal to abort the rest of the buffer. 264 * signal to abort the rest of the buffer.
265 */ 265 */
266 static int validate_cmd(int cmd) 266 static int validate_cmd(int cmd)
267 { 267 {
268 switch (((cmd >> 29) & 0x7)) { 268 switch (((cmd >> 29) & 0x7)) {
269 case 0x0: 269 case 0x0:
270 switch ((cmd >> 23) & 0x3f) { 270 switch ((cmd >> 23) & 0x3f) {
271 case 0x0: 271 case 0x0:
272 return 1; /* MI_NOOP */ 272 return 1; /* MI_NOOP */
273 case 0x4: 273 case 0x4:
274 return 1; /* MI_FLUSH */ 274 return 1; /* MI_FLUSH */
275 default: 275 default:
276 return 0; /* disallow everything else */ 276 return 0; /* disallow everything else */
277 } 277 }
278 break; 278 break;
279 case 0x1: 279 case 0x1:
280 return 0; /* reserved */ 280 return 0; /* reserved */
281 case 0x2: 281 case 0x2:
282 return (cmd & 0xff) + 2; /* 2d commands */ 282 return (cmd & 0xff) + 2; /* 2d commands */
283 case 0x3: 283 case 0x3:
284 if (((cmd >> 24) & 0x1f) <= 0x18) 284 if (((cmd >> 24) & 0x1f) <= 0x18)
285 return 1; 285 return 1;
286 286
287 switch ((cmd >> 24) & 0x1f) { 287 switch ((cmd >> 24) & 0x1f) {
288 case 0x1c: 288 case 0x1c:
289 return 1; 289 return 1;
290 case 0x1d: 290 case 0x1d:
291 switch ((cmd >> 16) & 0xff) { 291 switch ((cmd >> 16) & 0xff) {
292 case 0x3: 292 case 0x3:
293 return (cmd & 0x1f) + 2; 293 return (cmd & 0x1f) + 2;
294 case 0x4: 294 case 0x4:
295 return (cmd & 0xf) + 2; 295 return (cmd & 0xf) + 2;
296 default: 296 default:
297 return (cmd & 0xffff) + 2; 297 return (cmd & 0xffff) + 2;
298 } 298 }
299 case 0x1e: 299 case 0x1e:
300 if (cmd & (1 << 23)) 300 if (cmd & (1 << 23))
301 return (cmd & 0xffff) + 1; 301 return (cmd & 0xffff) + 1;
302 else 302 else
303 return 1; 303 return 1;
304 case 0x1f: 304 case 0x1f:
305 if ((cmd & (1 << 23)) == 0) /* inline vertices */ 305 if ((cmd & (1 << 23)) == 0) /* inline vertices */
306 return (cmd & 0x1ffff) + 2; 306 return (cmd & 0x1ffff) + 2;
307 else if (cmd & (1 << 17)) /* indirect random */ 307 else if (cmd & (1 << 17)) /* indirect random */
308 if ((cmd & 0xffff) == 0) 308 if ((cmd & 0xffff) == 0)
309 return 0; /* unknown length, too hard */ 309 return 0; /* unknown length, too hard */
310 else 310 else
311 return (((cmd & 0xffff) + 1) / 2) + 1; 311 return (((cmd & 0xffff) + 1) / 2) + 1;
312 else 312 else
313 return 2; /* indirect sequential */ 313 return 2; /* indirect sequential */
314 default: 314 default:
315 return 0; 315 return 0;
316 } 316 }
317 default: 317 default:
318 return 0; 318 return 0;
319 } 319 }
320 320
321 return 0; 321 return 0;
322 } 322 }
323 323
324 static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) 324 static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
325 { 325 {
326 drm_i915_private_t *dev_priv = dev->dev_private; 326 drm_i915_private_t *dev_priv = dev->dev_private;
327 int i, ret; 327 int i, ret;
328 328
329 if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8) 329 if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
330 return -EINVAL; 330 return -EINVAL;
331 331
332 for (i = 0; i < dwords;) { 332 for (i = 0; i < dwords;) {
333 int sz = validate_cmd(buffer[i]); 333 int sz = validate_cmd(buffer[i]);
334 if (sz == 0 || i + sz > dwords) 334 if (sz == 0 || i + sz > dwords)
335 return -EINVAL; 335 return -EINVAL;
336 i += sz; 336 i += sz;
337 } 337 }
338 338
339 ret = BEGIN_LP_RING((dwords+1)&~1); 339 ret = BEGIN_LP_RING((dwords+1)&~1);
340 if (ret) 340 if (ret)
341 return ret; 341 return ret;
342 342
343 for (i = 0; i < dwords; i++) 343 for (i = 0; i < dwords; i++)
344 OUT_RING(buffer[i]); 344 OUT_RING(buffer[i]);
345 if (dwords & 1) 345 if (dwords & 1)
346 OUT_RING(0); 346 OUT_RING(0);
347 347
348 ADVANCE_LP_RING(); 348 ADVANCE_LP_RING();
349 349
350 return 0; 350 return 0;
351 } 351 }
352 352
353 int 353 int
354 i915_emit_box(struct drm_device *dev, 354 i915_emit_box(struct drm_device *dev,
355 struct drm_clip_rect *box, 355 struct drm_clip_rect *box,
356 int DR1, int DR4) 356 int DR1, int DR4)
357 { 357 {
358 struct drm_i915_private *dev_priv = dev->dev_private; 358 struct drm_i915_private *dev_priv = dev->dev_private;
359 int ret; 359 int ret;
360 360
361 if (box->y2 <= box->y1 || box->x2 <= box->x1 || 361 if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
362 box->y2 <= 0 || box->x2 <= 0) { 362 box->y2 <= 0 || box->x2 <= 0) {
363 DRM_ERROR("Bad box %d,%d..%d,%d\n", 363 DRM_ERROR("Bad box %d,%d..%d,%d\n",
364 box->x1, box->y1, box->x2, box->y2); 364 box->x1, box->y1, box->x2, box->y2);
365 return -EINVAL; 365 return -EINVAL;
366 } 366 }
367 367
368 if (INTEL_INFO(dev)->gen >= 4) { 368 if (INTEL_INFO(dev)->gen >= 4) {
369 ret = BEGIN_LP_RING(4); 369 ret = BEGIN_LP_RING(4);
370 if (ret) 370 if (ret)
371 return ret; 371 return ret;
372 372
373 OUT_RING(GFX_OP_DRAWRECT_INFO_I965); 373 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
374 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); 374 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
375 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); 375 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
376 OUT_RING(DR4); 376 OUT_RING(DR4);
377 } else { 377 } else {
378 ret = BEGIN_LP_RING(6); 378 ret = BEGIN_LP_RING(6);
379 if (ret) 379 if (ret)
380 return ret; 380 return ret;
381 381
382 OUT_RING(GFX_OP_DRAWRECT_INFO); 382 OUT_RING(GFX_OP_DRAWRECT_INFO);
383 OUT_RING(DR1); 383 OUT_RING(DR1);
384 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); 384 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
385 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); 385 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
386 OUT_RING(DR4); 386 OUT_RING(DR4);
387 OUT_RING(0); 387 OUT_RING(0);
388 } 388 }
389 ADVANCE_LP_RING(); 389 ADVANCE_LP_RING();
390 390
391 return 0; 391 return 0;
392 } 392 }
393 393
394 /* XXX: Emitting the counter should really be moved to part of the IRQ 394 /* XXX: Emitting the counter should really be moved to part of the IRQ
395 * emit. For now, do it in both places: 395 * emit. For now, do it in both places:
396 */ 396 */
397 397
398 static void i915_emit_breadcrumb(struct drm_device *dev) 398 static void i915_emit_breadcrumb(struct drm_device *dev)
399 { 399 {
400 drm_i915_private_t *dev_priv = dev->dev_private; 400 drm_i915_private_t *dev_priv = dev->dev_private;
401 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 401 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
402 402
403 dev_priv->counter++; 403 dev_priv->counter++;
404 if (dev_priv->counter > 0x7FFFFFFFUL) 404 if (dev_priv->counter > 0x7FFFFFFFUL)
405 dev_priv->counter = 0; 405 dev_priv->counter = 0;
406 if (master_priv->sarea_priv) 406 if (master_priv->sarea_priv)
407 master_priv->sarea_priv->last_enqueue = dev_priv->counter; 407 master_priv->sarea_priv->last_enqueue = dev_priv->counter;
408 408
409 if (BEGIN_LP_RING(4) == 0) { 409 if (BEGIN_LP_RING(4) == 0) {
410 OUT_RING(MI_STORE_DWORD_INDEX); 410 OUT_RING(MI_STORE_DWORD_INDEX);
411 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 411 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
412 OUT_RING(dev_priv->counter); 412 OUT_RING(dev_priv->counter);
413 OUT_RING(0); 413 OUT_RING(0);
414 ADVANCE_LP_RING(); 414 ADVANCE_LP_RING();
415 } 415 }
416 } 416 }
417 417
418 static int i915_dispatch_cmdbuffer(struct drm_device * dev, 418 static int i915_dispatch_cmdbuffer(struct drm_device * dev,
419 drm_i915_cmdbuffer_t *cmd, 419 drm_i915_cmdbuffer_t *cmd,
420 struct drm_clip_rect *cliprects, 420 struct drm_clip_rect *cliprects,
421 void *cmdbuf) 421 void *cmdbuf)
422 { 422 {
423 int nbox = cmd->num_cliprects; 423 int nbox = cmd->num_cliprects;
424 int i = 0, count, ret; 424 int i = 0, count, ret;
425 425
426 if (cmd->sz & 0x3) { 426 if (cmd->sz & 0x3) {
427 DRM_ERROR("alignment"); 427 DRM_ERROR("alignment");
428 return -EINVAL; 428 return -EINVAL;
429 } 429 }
430 430
431 i915_kernel_lost_context(dev); 431 i915_kernel_lost_context(dev);
432 432
433 count = nbox ? nbox : 1; 433 count = nbox ? nbox : 1;
434 434
435 for (i = 0; i < count; i++) { 435 for (i = 0; i < count; i++) {
436 if (i < nbox) { 436 if (i < nbox) {
437 ret = i915_emit_box(dev, &cliprects[i], 437 ret = i915_emit_box(dev, &cliprects[i],
438 cmd->DR1, cmd->DR4); 438 cmd->DR1, cmd->DR4);
439 if (ret) 439 if (ret)
440 return ret; 440 return ret;
441 } 441 }
442 442
443 ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4); 443 ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
444 if (ret) 444 if (ret)
445 return ret; 445 return ret;
446 } 446 }
447 447
448 i915_emit_breadcrumb(dev); 448 i915_emit_breadcrumb(dev);
449 return 0; 449 return 0;
450 } 450 }
451 451
452 static int i915_dispatch_batchbuffer(struct drm_device * dev, 452 static int i915_dispatch_batchbuffer(struct drm_device * dev,
453 drm_i915_batchbuffer_t * batch, 453 drm_i915_batchbuffer_t * batch,
454 struct drm_clip_rect *cliprects) 454 struct drm_clip_rect *cliprects)
455 { 455 {
456 struct drm_i915_private *dev_priv = dev->dev_private; 456 struct drm_i915_private *dev_priv = dev->dev_private;
457 int nbox = batch->num_cliprects; 457 int nbox = batch->num_cliprects;
458 int i, count, ret; 458 int i, count, ret;
459 459
460 if ((batch->start | batch->used) & 0x7) { 460 if ((batch->start | batch->used) & 0x7) {
461 DRM_ERROR("alignment"); 461 DRM_ERROR("alignment");
462 return -EINVAL; 462 return -EINVAL;
463 } 463 }
464 464
465 i915_kernel_lost_context(dev); 465 i915_kernel_lost_context(dev);
466 466
467 count = nbox ? nbox : 1; 467 count = nbox ? nbox : 1;
468 for (i = 0; i < count; i++) { 468 for (i = 0; i < count; i++) {
469 if (i < nbox) { 469 if (i < nbox) {
470 ret = i915_emit_box(dev, &cliprects[i], 470 ret = i915_emit_box(dev, &cliprects[i],
471 batch->DR1, batch->DR4); 471 batch->DR1, batch->DR4);
472 if (ret) 472 if (ret)
473 return ret; 473 return ret;
474 } 474 }
475 475
476 if (!IS_I830(dev) && !IS_845G(dev)) { 476 if (!IS_I830(dev) && !IS_845G(dev)) {
477 ret = BEGIN_LP_RING(2); 477 ret = BEGIN_LP_RING(2);
478 if (ret) 478 if (ret)
479 return ret; 479 return ret;
480 480
481 if (INTEL_INFO(dev)->gen >= 4) { 481 if (INTEL_INFO(dev)->gen >= 4) {
482 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); 482 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
483 OUT_RING(batch->start); 483 OUT_RING(batch->start);
484 } else { 484 } else {
485 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); 485 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
486 OUT_RING(batch->start | MI_BATCH_NON_SECURE); 486 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
487 } 487 }
488 } else { 488 } else {
489 ret = BEGIN_LP_RING(4); 489 ret = BEGIN_LP_RING(4);
490 if (ret) 490 if (ret)
491 return ret; 491 return ret;
492 492
493 OUT_RING(MI_BATCH_BUFFER); 493 OUT_RING(MI_BATCH_BUFFER);
494 OUT_RING(batch->start | MI_BATCH_NON_SECURE); 494 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
495 OUT_RING(batch->start + batch->used - 4); 495 OUT_RING(batch->start + batch->used - 4);
496 OUT_RING(0); 496 OUT_RING(0);
497 } 497 }
498 ADVANCE_LP_RING(); 498 ADVANCE_LP_RING();
499 } 499 }
500 500
501 501
502 if (IS_G4X(dev) || IS_GEN5(dev)) { 502 if (IS_G4X(dev) || IS_GEN5(dev)) {
503 if (BEGIN_LP_RING(2) == 0) { 503 if (BEGIN_LP_RING(2) == 0) {
504 OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP); 504 OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
505 OUT_RING(MI_NOOP); 505 OUT_RING(MI_NOOP);
506 ADVANCE_LP_RING(); 506 ADVANCE_LP_RING();
507 } 507 }
508 } 508 }
509 509
510 i915_emit_breadcrumb(dev); 510 i915_emit_breadcrumb(dev);
511 return 0; 511 return 0;
512 } 512 }
513 513
514 static int i915_dispatch_flip(struct drm_device * dev) 514 static int i915_dispatch_flip(struct drm_device * dev)
515 { 515 {
516 drm_i915_private_t *dev_priv = dev->dev_private; 516 drm_i915_private_t *dev_priv = dev->dev_private;
517 struct drm_i915_master_private *master_priv = 517 struct drm_i915_master_private *master_priv =
518 dev->primary->master->driver_priv; 518 dev->primary->master->driver_priv;
519 int ret; 519 int ret;
520 520
521 if (!master_priv->sarea_priv) 521 if (!master_priv->sarea_priv)
522 return -EINVAL; 522 return -EINVAL;
523 523
524 DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n", 524 DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
525 __func__, 525 __func__,
526 dev_priv->current_page, 526 dev_priv->current_page,
527 master_priv->sarea_priv->pf_current_page); 527 master_priv->sarea_priv->pf_current_page);
528 528
529 i915_kernel_lost_context(dev); 529 i915_kernel_lost_context(dev);
530 530
531 ret = BEGIN_LP_RING(10); 531 ret = BEGIN_LP_RING(10);
532 if (ret) 532 if (ret)
533 return ret; 533 return ret;
534 534
535 OUT_RING(MI_FLUSH | MI_READ_FLUSH); 535 OUT_RING(MI_FLUSH | MI_READ_FLUSH);
536 OUT_RING(0); 536 OUT_RING(0);
537 537
538 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); 538 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
539 OUT_RING(0); 539 OUT_RING(0);
540 if (dev_priv->current_page == 0) { 540 if (dev_priv->current_page == 0) {
541 OUT_RING(dev_priv->back_offset); 541 OUT_RING(dev_priv->back_offset);
542 dev_priv->current_page = 1; 542 dev_priv->current_page = 1;
543 } else { 543 } else {
544 OUT_RING(dev_priv->front_offset); 544 OUT_RING(dev_priv->front_offset);
545 dev_priv->current_page = 0; 545 dev_priv->current_page = 0;
546 } 546 }
547 OUT_RING(0); 547 OUT_RING(0);
548 548
549 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); 549 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
550 OUT_RING(0); 550 OUT_RING(0);
551 551
552 ADVANCE_LP_RING(); 552 ADVANCE_LP_RING();
553 553
554 master_priv->sarea_priv->last_enqueue = dev_priv->counter++; 554 master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
555 555
556 if (BEGIN_LP_RING(4) == 0) { 556 if (BEGIN_LP_RING(4) == 0) {
557 OUT_RING(MI_STORE_DWORD_INDEX); 557 OUT_RING(MI_STORE_DWORD_INDEX);
558 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 558 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
559 OUT_RING(dev_priv->counter); 559 OUT_RING(dev_priv->counter);
560 OUT_RING(0); 560 OUT_RING(0);
561 ADVANCE_LP_RING(); 561 ADVANCE_LP_RING();
562 } 562 }
563 563
564 master_priv->sarea_priv->pf_current_page = dev_priv->current_page; 564 master_priv->sarea_priv->pf_current_page = dev_priv->current_page;
565 return 0; 565 return 0;
566 } 566 }
567 567
568 static int i915_quiescent(struct drm_device *dev) 568 static int i915_quiescent(struct drm_device *dev)
569 { 569 {
570 struct intel_ring_buffer *ring = LP_RING(dev->dev_private); 570 struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
571 571
572 i915_kernel_lost_context(dev); 572 i915_kernel_lost_context(dev);
573 return intel_wait_ring_idle(ring); 573 return intel_wait_ring_idle(ring);
574 } 574 }
575 575
576 static int i915_flush_ioctl(struct drm_device *dev, void *data, 576 static int i915_flush_ioctl(struct drm_device *dev, void *data,
577 struct drm_file *file_priv) 577 struct drm_file *file_priv)
578 { 578 {
579 int ret; 579 int ret;
580 580
581 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 581 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
582 582
583 mutex_lock(&dev->struct_mutex); 583 mutex_lock(&dev->struct_mutex);
584 ret = i915_quiescent(dev); 584 ret = i915_quiescent(dev);
585 mutex_unlock(&dev->struct_mutex); 585 mutex_unlock(&dev->struct_mutex);
586 586
587 return ret; 587 return ret;
588 } 588 }
589 589
590 static int i915_batchbuffer(struct drm_device *dev, void *data, 590 static int i915_batchbuffer(struct drm_device *dev, void *data,
591 struct drm_file *file_priv) 591 struct drm_file *file_priv)
592 { 592 {
593 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 593 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
594 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 594 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
595 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 595 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
596 master_priv->sarea_priv; 596 master_priv->sarea_priv;
597 drm_i915_batchbuffer_t *batch = data; 597 drm_i915_batchbuffer_t *batch = data;
598 int ret; 598 int ret;
599 struct drm_clip_rect *cliprects = NULL; 599 struct drm_clip_rect *cliprects = NULL;
600 600
601 if (!dev_priv->allow_batchbuffer) { 601 if (!dev_priv->allow_batchbuffer) {
602 DRM_ERROR("Batchbuffer ioctl disabled\n"); 602 DRM_ERROR("Batchbuffer ioctl disabled\n");
603 return -EINVAL; 603 return -EINVAL;
604 } 604 }
605 605
606 DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n", 606 DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
607 batch->start, batch->used, batch->num_cliprects); 607 batch->start, batch->used, batch->num_cliprects);
608 608
609 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 609 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
610 610
611 if (batch->num_cliprects < 0) 611 if (batch->num_cliprects < 0)
612 return -EINVAL; 612 return -EINVAL;
613 613
614 if (batch->num_cliprects) { 614 if (batch->num_cliprects) {
615 cliprects = kcalloc(batch->num_cliprects, 615 cliprects = kcalloc(batch->num_cliprects,
616 sizeof(struct drm_clip_rect), 616 sizeof(struct drm_clip_rect),
617 GFP_KERNEL); 617 GFP_KERNEL);
618 if (cliprects == NULL) 618 if (cliprects == NULL)
619 return -ENOMEM; 619 return -ENOMEM;
620 620
621 ret = copy_from_user(cliprects, batch->cliprects, 621 ret = copy_from_user(cliprects, batch->cliprects,
622 batch->num_cliprects * 622 batch->num_cliprects *
623 sizeof(struct drm_clip_rect)); 623 sizeof(struct drm_clip_rect));
624 if (ret != 0) { 624 if (ret != 0) {
625 ret = -EFAULT; 625 ret = -EFAULT;
626 goto fail_free; 626 goto fail_free;
627 } 627 }
628 } 628 }
629 629
630 mutex_lock(&dev->struct_mutex); 630 mutex_lock(&dev->struct_mutex);
631 ret = i915_dispatch_batchbuffer(dev, batch, cliprects); 631 ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
632 mutex_unlock(&dev->struct_mutex); 632 mutex_unlock(&dev->struct_mutex);
633 633
634 if (sarea_priv) 634 if (sarea_priv)
635 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 635 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
636 636
637 fail_free: 637 fail_free:
638 kfree(cliprects); 638 kfree(cliprects);
639 639
640 return ret; 640 return ret;
641 } 641 }
642 642
643 static int i915_cmdbuffer(struct drm_device *dev, void *data, 643 static int i915_cmdbuffer(struct drm_device *dev, void *data,
644 struct drm_file *file_priv) 644 struct drm_file *file_priv)
645 { 645 {
646 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 646 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
647 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 647 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
648 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 648 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
649 master_priv->sarea_priv; 649 master_priv->sarea_priv;
650 drm_i915_cmdbuffer_t *cmdbuf = data; 650 drm_i915_cmdbuffer_t *cmdbuf = data;
651 struct drm_clip_rect *cliprects = NULL; 651 struct drm_clip_rect *cliprects = NULL;
652 void *batch_data; 652 void *batch_data;
653 int ret; 653 int ret;
654 654
655 DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n", 655 DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
656 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); 656 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
657 657
658 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 658 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
659 659
660 if (cmdbuf->num_cliprects < 0) 660 if (cmdbuf->num_cliprects < 0)
661 return -EINVAL; 661 return -EINVAL;
662 662
663 batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL); 663 batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL);
664 if (batch_data == NULL) 664 if (batch_data == NULL)
665 return -ENOMEM; 665 return -ENOMEM;
666 666
667 ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz); 667 ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
668 if (ret != 0) { 668 if (ret != 0) {
669 ret = -EFAULT; 669 ret = -EFAULT;
670 goto fail_batch_free; 670 goto fail_batch_free;
671 } 671 }
672 672
673 if (cmdbuf->num_cliprects) { 673 if (cmdbuf->num_cliprects) {
674 cliprects = kcalloc(cmdbuf->num_cliprects, 674 cliprects = kcalloc(cmdbuf->num_cliprects,
675 sizeof(struct drm_clip_rect), GFP_KERNEL); 675 sizeof(struct drm_clip_rect), GFP_KERNEL);
676 if (cliprects == NULL) { 676 if (cliprects == NULL) {
677 ret = -ENOMEM; 677 ret = -ENOMEM;
678 goto fail_batch_free; 678 goto fail_batch_free;
679 } 679 }
680 680
681 ret = copy_from_user(cliprects, cmdbuf->cliprects, 681 ret = copy_from_user(cliprects, cmdbuf->cliprects,
682 cmdbuf->num_cliprects * 682 cmdbuf->num_cliprects *
683 sizeof(struct drm_clip_rect)); 683 sizeof(struct drm_clip_rect));
684 if (ret != 0) { 684 if (ret != 0) {
685 ret = -EFAULT; 685 ret = -EFAULT;
686 goto fail_clip_free; 686 goto fail_clip_free;
687 } 687 }
688 } 688 }
689 689
690 mutex_lock(&dev->struct_mutex); 690 mutex_lock(&dev->struct_mutex);
691 ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data); 691 ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
692 mutex_unlock(&dev->struct_mutex); 692 mutex_unlock(&dev->struct_mutex);
693 if (ret) { 693 if (ret) {
694 DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); 694 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
695 goto fail_clip_free; 695 goto fail_clip_free;
696 } 696 }
697 697
698 if (sarea_priv) 698 if (sarea_priv)
699 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 699 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
700 700
701 fail_clip_free: 701 fail_clip_free:
702 kfree(cliprects); 702 kfree(cliprects);
703 fail_batch_free: 703 fail_batch_free:
704 kfree(batch_data); 704 kfree(batch_data);
705 705
706 return ret; 706 return ret;
707 } 707 }
708 708
709 static int i915_flip_bufs(struct drm_device *dev, void *data, 709 static int i915_flip_bufs(struct drm_device *dev, void *data,
710 struct drm_file *file_priv) 710 struct drm_file *file_priv)
711 { 711 {
712 int ret; 712 int ret;
713 713
714 DRM_DEBUG_DRIVER("%s\n", __func__); 714 DRM_DEBUG_DRIVER("%s\n", __func__);
715 715
716 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 716 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
717 717
718 mutex_lock(&dev->struct_mutex); 718 mutex_lock(&dev->struct_mutex);
719 ret = i915_dispatch_flip(dev); 719 ret = i915_dispatch_flip(dev);
720 mutex_unlock(&dev->struct_mutex); 720 mutex_unlock(&dev->struct_mutex);
721 721
722 return ret; 722 return ret;
723 } 723 }
724 724
725 static int i915_getparam(struct drm_device *dev, void *data, 725 static int i915_getparam(struct drm_device *dev, void *data,
726 struct drm_file *file_priv) 726 struct drm_file *file_priv)
727 { 727 {
728 drm_i915_private_t *dev_priv = dev->dev_private; 728 drm_i915_private_t *dev_priv = dev->dev_private;
729 drm_i915_getparam_t *param = data; 729 drm_i915_getparam_t *param = data;
730 int value; 730 int value;
731 731
732 if (!dev_priv) { 732 if (!dev_priv) {
733 DRM_ERROR("called with no initialization\n"); 733 DRM_ERROR("called with no initialization\n");
734 return -EINVAL; 734 return -EINVAL;
735 } 735 }
736 736
737 switch (param->param) { 737 switch (param->param) {
738 case I915_PARAM_IRQ_ACTIVE: 738 case I915_PARAM_IRQ_ACTIVE:
739 value = dev->pdev->irq ? 1 : 0; 739 value = dev->pdev->irq ? 1 : 0;
740 break; 740 break;
741 case I915_PARAM_ALLOW_BATCHBUFFER: 741 case I915_PARAM_ALLOW_BATCHBUFFER:
742 value = dev_priv->allow_batchbuffer ? 1 : 0; 742 value = dev_priv->allow_batchbuffer ? 1 : 0;
743 break; 743 break;
744 case I915_PARAM_LAST_DISPATCH: 744 case I915_PARAM_LAST_DISPATCH:
745 value = READ_BREADCRUMB(dev_priv); 745 value = READ_BREADCRUMB(dev_priv);
746 break; 746 break;
747 case I915_PARAM_CHIPSET_ID: 747 case I915_PARAM_CHIPSET_ID:
748 value = dev->pci_device; 748 value = dev->pci_device;
749 break; 749 break;
750 case I915_PARAM_HAS_GEM: 750 case I915_PARAM_HAS_GEM:
751 value = dev_priv->has_gem; 751 value = dev_priv->has_gem;
752 break; 752 break;
753 case I915_PARAM_NUM_FENCES_AVAIL: 753 case I915_PARAM_NUM_FENCES_AVAIL:
754 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; 754 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
755 break; 755 break;
756 case I915_PARAM_HAS_OVERLAY: 756 case I915_PARAM_HAS_OVERLAY:
757 value = dev_priv->overlay ? 1 : 0; 757 value = dev_priv->overlay ? 1 : 0;
758 break; 758 break;
759 case I915_PARAM_HAS_PAGEFLIPPING: 759 case I915_PARAM_HAS_PAGEFLIPPING:
760 value = 1; 760 value = 1;
761 break; 761 break;
762 case I915_PARAM_HAS_EXECBUF2: 762 case I915_PARAM_HAS_EXECBUF2:
763 /* depends on GEM */ 763 /* depends on GEM */
764 value = dev_priv->has_gem; 764 value = dev_priv->has_gem;
765 break; 765 break;
766 case I915_PARAM_HAS_BSD: 766 case I915_PARAM_HAS_BSD:
767 value = HAS_BSD(dev); 767 value = HAS_BSD(dev);
768 break; 768 break;
769 case I915_PARAM_HAS_BLT: 769 case I915_PARAM_HAS_BLT:
770 value = HAS_BLT(dev); 770 value = HAS_BLT(dev);
771 break; 771 break;
772 case I915_PARAM_HAS_RELAXED_FENCING: 772 case I915_PARAM_HAS_RELAXED_FENCING:
773 value = 1; 773 value = 1;
774 break; 774 break;
775 case I915_PARAM_HAS_COHERENT_RINGS: 775 case I915_PARAM_HAS_COHERENT_RINGS:
776 value = 1; 776 value = 1;
777 break; 777 break;
778 case I915_PARAM_HAS_EXEC_CONSTANTS: 778 case I915_PARAM_HAS_EXEC_CONSTANTS:
779 value = INTEL_INFO(dev)->gen >= 4; 779 value = INTEL_INFO(dev)->gen >= 4;
780 break; 780 break;
781 case I915_PARAM_HAS_RELAXED_DELTA: 781 case I915_PARAM_HAS_RELAXED_DELTA:
782 value = 1; 782 value = 1;
783 break; 783 break;
784 case I915_PARAM_HAS_GEN7_SOL_RESET: 784 case I915_PARAM_HAS_GEN7_SOL_RESET:
785 value = 1; 785 value = 1;
786 break; 786 break;
787 case I915_PARAM_HAS_LLC:
788 value = HAS_LLC(dev);
789 break;
787 default: 790 default:
788 DRM_DEBUG_DRIVER("Unknown parameter %d\n", 791 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
789 param->param); 792 param->param);
790 return -EINVAL; 793 return -EINVAL;
791 } 794 }
792 795
793 if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { 796 if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
794 DRM_ERROR("DRM_COPY_TO_USER failed\n"); 797 DRM_ERROR("DRM_COPY_TO_USER failed\n");
795 return -EFAULT; 798 return -EFAULT;
796 } 799 }
797 800
798 return 0; 801 return 0;
799 } 802 }
800 803
801 static int i915_setparam(struct drm_device *dev, void *data, 804 static int i915_setparam(struct drm_device *dev, void *data,
802 struct drm_file *file_priv) 805 struct drm_file *file_priv)
803 { 806 {
804 drm_i915_private_t *dev_priv = dev->dev_private; 807 drm_i915_private_t *dev_priv = dev->dev_private;
805 drm_i915_setparam_t *param = data; 808 drm_i915_setparam_t *param = data;
806 809
807 if (!dev_priv) { 810 if (!dev_priv) {
808 DRM_ERROR("called with no initialization\n"); 811 DRM_ERROR("called with no initialization\n");
809 return -EINVAL; 812 return -EINVAL;
810 } 813 }
811 814
812 switch (param->param) { 815 switch (param->param) {
813 case I915_SETPARAM_USE_MI_BATCHBUFFER_START: 816 case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
814 break; 817 break;
815 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: 818 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
816 dev_priv->tex_lru_log_granularity = param->value; 819 dev_priv->tex_lru_log_granularity = param->value;
817 break; 820 break;
818 case I915_SETPARAM_ALLOW_BATCHBUFFER: 821 case I915_SETPARAM_ALLOW_BATCHBUFFER:
819 dev_priv->allow_batchbuffer = param->value; 822 dev_priv->allow_batchbuffer = param->value;
820 break; 823 break;
821 case I915_SETPARAM_NUM_USED_FENCES: 824 case I915_SETPARAM_NUM_USED_FENCES:
822 if (param->value > dev_priv->num_fence_regs || 825 if (param->value > dev_priv->num_fence_regs ||
823 param->value < 0) 826 param->value < 0)
824 return -EINVAL; 827 return -EINVAL;
825 /* Userspace can use first N regs */ 828 /* Userspace can use first N regs */
826 dev_priv->fence_reg_start = param->value; 829 dev_priv->fence_reg_start = param->value;
827 break; 830 break;
828 default: 831 default:
829 DRM_DEBUG_DRIVER("unknown parameter %d\n", 832 DRM_DEBUG_DRIVER("unknown parameter %d\n",
830 param->param); 833 param->param);
831 return -EINVAL; 834 return -EINVAL;
832 } 835 }
833 836
834 return 0; 837 return 0;
835 } 838 }
836 839
837 static int i915_set_status_page(struct drm_device *dev, void *data, 840 static int i915_set_status_page(struct drm_device *dev, void *data,
838 struct drm_file *file_priv) 841 struct drm_file *file_priv)
839 { 842 {
840 drm_i915_private_t *dev_priv = dev->dev_private; 843 drm_i915_private_t *dev_priv = dev->dev_private;
841 drm_i915_hws_addr_t *hws = data; 844 drm_i915_hws_addr_t *hws = data;
842 struct intel_ring_buffer *ring = LP_RING(dev_priv); 845 struct intel_ring_buffer *ring = LP_RING(dev_priv);
843 846
844 if (!I915_NEED_GFX_HWS(dev)) 847 if (!I915_NEED_GFX_HWS(dev))
845 return -EINVAL; 848 return -EINVAL;
846 849
847 if (!dev_priv) { 850 if (!dev_priv) {
848 DRM_ERROR("called with no initialization\n"); 851 DRM_ERROR("called with no initialization\n");
849 return -EINVAL; 852 return -EINVAL;
850 } 853 }
851 854
852 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 855 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
853 WARN(1, "tried to set status page when mode setting active\n"); 856 WARN(1, "tried to set status page when mode setting active\n");
854 return 0; 857 return 0;
855 } 858 }
856 859
857 DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr); 860 DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
858 861
859 ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12); 862 ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
860 863
861 dev_priv->hws_map.offset = dev->agp->base + hws->addr; 864 dev_priv->hws_map.offset = dev->agp->base + hws->addr;
862 dev_priv->hws_map.size = 4*1024; 865 dev_priv->hws_map.size = 4*1024;
863 dev_priv->hws_map.type = 0; 866 dev_priv->hws_map.type = 0;
864 dev_priv->hws_map.flags = 0; 867 dev_priv->hws_map.flags = 0;
865 dev_priv->hws_map.mtrr = 0; 868 dev_priv->hws_map.mtrr = 0;
866 869
867 drm_core_ioremap_wc(&dev_priv->hws_map, dev); 870 drm_core_ioremap_wc(&dev_priv->hws_map, dev);
868 if (dev_priv->hws_map.handle == NULL) { 871 if (dev_priv->hws_map.handle == NULL) {
869 i915_dma_cleanup(dev); 872 i915_dma_cleanup(dev);
870 ring->status_page.gfx_addr = 0; 873 ring->status_page.gfx_addr = 0;
871 DRM_ERROR("can not ioremap virtual address for" 874 DRM_ERROR("can not ioremap virtual address for"
872 " G33 hw status page\n"); 875 " G33 hw status page\n");
873 return -ENOMEM; 876 return -ENOMEM;
874 } 877 }
875 ring->status_page.page_addr = 878 ring->status_page.page_addr =
876 (void __force __iomem *)dev_priv->hws_map.handle; 879 (void __force __iomem *)dev_priv->hws_map.handle;
877 memset_io(ring->status_page.page_addr, 0, PAGE_SIZE); 880 memset_io(ring->status_page.page_addr, 0, PAGE_SIZE);
878 I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); 881 I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
879 882
880 DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", 883 DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
881 ring->status_page.gfx_addr); 884 ring->status_page.gfx_addr);
882 DRM_DEBUG_DRIVER("load hws at %p\n", 885 DRM_DEBUG_DRIVER("load hws at %p\n",
883 ring->status_page.page_addr); 886 ring->status_page.page_addr);
884 return 0; 887 return 0;
885 } 888 }
886 889
887 static int i915_get_bridge_dev(struct drm_device *dev) 890 static int i915_get_bridge_dev(struct drm_device *dev)
888 { 891 {
889 struct drm_i915_private *dev_priv = dev->dev_private; 892 struct drm_i915_private *dev_priv = dev->dev_private;
890 893
891 dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); 894 dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
892 if (!dev_priv->bridge_dev) { 895 if (!dev_priv->bridge_dev) {
893 DRM_ERROR("bridge device not found\n"); 896 DRM_ERROR("bridge device not found\n");
894 return -1; 897 return -1;
895 } 898 }
896 return 0; 899 return 0;
897 } 900 }
898 901
899 #define MCHBAR_I915 0x44 902 #define MCHBAR_I915 0x44
900 #define MCHBAR_I965 0x48 903 #define MCHBAR_I965 0x48
901 #define MCHBAR_SIZE (4*4096) 904 #define MCHBAR_SIZE (4*4096)
902 905
903 #define DEVEN_REG 0x54 906 #define DEVEN_REG 0x54
904 #define DEVEN_MCHBAR_EN (1 << 28) 907 #define DEVEN_MCHBAR_EN (1 << 28)
905 908
906 /* Allocate space for the MCH regs if needed, return nonzero on error */ 909 /* Allocate space for the MCH regs if needed, return nonzero on error */
907 static int 910 static int
908 intel_alloc_mchbar_resource(struct drm_device *dev) 911 intel_alloc_mchbar_resource(struct drm_device *dev)
909 { 912 {
910 drm_i915_private_t *dev_priv = dev->dev_private; 913 drm_i915_private_t *dev_priv = dev->dev_private;
911 int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; 914 int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
912 u32 temp_lo, temp_hi = 0; 915 u32 temp_lo, temp_hi = 0;
913 u64 mchbar_addr; 916 u64 mchbar_addr;
914 int ret; 917 int ret;
915 918
916 if (INTEL_INFO(dev)->gen >= 4) 919 if (INTEL_INFO(dev)->gen >= 4)
917 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); 920 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
918 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); 921 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
919 mchbar_addr = ((u64)temp_hi << 32) | temp_lo; 922 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
920 923
921 /* If ACPI doesn't have it, assume we need to allocate it ourselves */ 924 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
922 #ifdef CONFIG_PNP 925 #ifdef CONFIG_PNP
923 if (mchbar_addr && 926 if (mchbar_addr &&
924 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) 927 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
925 return 0; 928 return 0;
926 #endif 929 #endif
927 930
928 /* Get some space for it */ 931 /* Get some space for it */
929 dev_priv->mch_res.name = "i915 MCHBAR"; 932 dev_priv->mch_res.name = "i915 MCHBAR";
930 dev_priv->mch_res.flags = IORESOURCE_MEM; 933 dev_priv->mch_res.flags = IORESOURCE_MEM;
931 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, 934 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
932 &dev_priv->mch_res, 935 &dev_priv->mch_res,
933 MCHBAR_SIZE, MCHBAR_SIZE, 936 MCHBAR_SIZE, MCHBAR_SIZE,
934 PCIBIOS_MIN_MEM, 937 PCIBIOS_MIN_MEM,
935 0, pcibios_align_resource, 938 0, pcibios_align_resource,
936 dev_priv->bridge_dev); 939 dev_priv->bridge_dev);
937 if (ret) { 940 if (ret) {
938 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); 941 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
939 dev_priv->mch_res.start = 0; 942 dev_priv->mch_res.start = 0;
940 return ret; 943 return ret;
941 } 944 }
942 945
943 if (INTEL_INFO(dev)->gen >= 4) 946 if (INTEL_INFO(dev)->gen >= 4)
944 pci_write_config_dword(dev_priv->bridge_dev, reg + 4, 947 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
945 upper_32_bits(dev_priv->mch_res.start)); 948 upper_32_bits(dev_priv->mch_res.start));
946 949
947 pci_write_config_dword(dev_priv->bridge_dev, reg, 950 pci_write_config_dword(dev_priv->bridge_dev, reg,
948 lower_32_bits(dev_priv->mch_res.start)); 951 lower_32_bits(dev_priv->mch_res.start));
949 return 0; 952 return 0;
950 } 953 }
951 954
952 /* Setup MCHBAR if possible, return true if we should disable it again */ 955 /* Setup MCHBAR if possible, return true if we should disable it again */
953 static void 956 static void
954 intel_setup_mchbar(struct drm_device *dev) 957 intel_setup_mchbar(struct drm_device *dev)
955 { 958 {
956 drm_i915_private_t *dev_priv = dev->dev_private; 959 drm_i915_private_t *dev_priv = dev->dev_private;
957 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; 960 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
958 u32 temp; 961 u32 temp;
959 bool enabled; 962 bool enabled;
960 963
961 dev_priv->mchbar_need_disable = false; 964 dev_priv->mchbar_need_disable = false;
962 965
963 if (IS_I915G(dev) || IS_I915GM(dev)) { 966 if (IS_I915G(dev) || IS_I915GM(dev)) {
964 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); 967 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
965 enabled = !!(temp & DEVEN_MCHBAR_EN); 968 enabled = !!(temp & DEVEN_MCHBAR_EN);
966 } else { 969 } else {
967 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); 970 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
968 enabled = temp & 1; 971 enabled = temp & 1;
969 } 972 }
970 973
971 /* If it's already enabled, don't have to do anything */ 974 /* If it's already enabled, don't have to do anything */
972 if (enabled) 975 if (enabled)
973 return; 976 return;
974 977
975 if (intel_alloc_mchbar_resource(dev)) 978 if (intel_alloc_mchbar_resource(dev))
976 return; 979 return;
977 980
978 dev_priv->mchbar_need_disable = true; 981 dev_priv->mchbar_need_disable = true;
979 982
980 /* Space is allocated or reserved, so enable it. */ 983 /* Space is allocated or reserved, so enable it. */
981 if (IS_I915G(dev) || IS_I915GM(dev)) { 984 if (IS_I915G(dev) || IS_I915GM(dev)) {
982 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, 985 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
983 temp | DEVEN_MCHBAR_EN); 986 temp | DEVEN_MCHBAR_EN);
984 } else { 987 } else {
985 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); 988 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
986 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); 989 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
987 } 990 }
988 } 991 }
989 992
990 static void 993 static void
991 intel_teardown_mchbar(struct drm_device *dev) 994 intel_teardown_mchbar(struct drm_device *dev)
992 { 995 {
993 drm_i915_private_t *dev_priv = dev->dev_private; 996 drm_i915_private_t *dev_priv = dev->dev_private;
994 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; 997 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
995 u32 temp; 998 u32 temp;
996 999
997 if (dev_priv->mchbar_need_disable) { 1000 if (dev_priv->mchbar_need_disable) {
998 if (IS_I915G(dev) || IS_I915GM(dev)) { 1001 if (IS_I915G(dev) || IS_I915GM(dev)) {
999 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); 1002 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
1000 temp &= ~DEVEN_MCHBAR_EN; 1003 temp &= ~DEVEN_MCHBAR_EN;
1001 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp); 1004 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
1002 } else { 1005 } else {
1003 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); 1006 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
1004 temp &= ~1; 1007 temp &= ~1;
1005 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp); 1008 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
1006 } 1009 }
1007 } 1010 }
1008 1011
1009 if (dev_priv->mch_res.start) 1012 if (dev_priv->mch_res.start)
1010 release_resource(&dev_priv->mch_res); 1013 release_resource(&dev_priv->mch_res);
1011 } 1014 }
1012 1015
1013 #define PTE_ADDRESS_MASK 0xfffff000 1016 #define PTE_ADDRESS_MASK 0xfffff000
1014 #define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */ 1017 #define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
1015 #define PTE_MAPPING_TYPE_UNCACHED (0 << 1) 1018 #define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
1016 #define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */ 1019 #define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
1017 #define PTE_MAPPING_TYPE_CACHED (3 << 1) 1020 #define PTE_MAPPING_TYPE_CACHED (3 << 1)
1018 #define PTE_MAPPING_TYPE_MASK (3 << 1) 1021 #define PTE_MAPPING_TYPE_MASK (3 << 1)
1019 #define PTE_VALID (1 << 0) 1022 #define PTE_VALID (1 << 0)
1020 1023
1021 /** 1024 /**
1022 * i915_stolen_to_phys - take an offset into stolen memory and turn it into 1025 * i915_stolen_to_phys - take an offset into stolen memory and turn it into
1023 * a physical one 1026 * a physical one
1024 * @dev: drm device 1027 * @dev: drm device
1025 * @offset: address to translate 1028 * @offset: address to translate
1026 * 1029 *
1027 * Some chip functions require allocations from stolen space and need the 1030 * Some chip functions require allocations from stolen space and need the
1028 * physical address of the memory in question. 1031 * physical address of the memory in question.
1029 */ 1032 */
1030 static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset) 1033 static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
1031 { 1034 {
1032 struct drm_i915_private *dev_priv = dev->dev_private; 1035 struct drm_i915_private *dev_priv = dev->dev_private;
1033 struct pci_dev *pdev = dev_priv->bridge_dev; 1036 struct pci_dev *pdev = dev_priv->bridge_dev;
1034 u32 base; 1037 u32 base;
1035 1038
1036 #if 0 1039 #if 0
1037 /* On the machines I have tested the Graphics Base of Stolen Memory 1040 /* On the machines I have tested the Graphics Base of Stolen Memory
1038 * is unreliable, so compute the base by subtracting the stolen memory 1041 * is unreliable, so compute the base by subtracting the stolen memory
1039 * from the Top of Low Usable DRAM which is where the BIOS places 1042 * from the Top of Low Usable DRAM which is where the BIOS places
1040 * the graphics stolen memory. 1043 * the graphics stolen memory.
1041 */ 1044 */
1042 if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) { 1045 if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
1043 /* top 32bits are reserved = 0 */ 1046 /* top 32bits are reserved = 0 */
1044 pci_read_config_dword(pdev, 0xA4, &base); 1047 pci_read_config_dword(pdev, 0xA4, &base);
1045 } else { 1048 } else {
1046 /* XXX presume 8xx is the same as i915 */ 1049 /* XXX presume 8xx is the same as i915 */
1047 pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base); 1050 pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
1048 } 1051 }
1049 #else 1052 #else
1050 if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) { 1053 if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
1051 u16 val; 1054 u16 val;
1052 pci_read_config_word(pdev, 0xb0, &val); 1055 pci_read_config_word(pdev, 0xb0, &val);
1053 base = val >> 4 << 20; 1056 base = val >> 4 << 20;
1054 } else { 1057 } else {
1055 u8 val; 1058 u8 val;
1056 pci_read_config_byte(pdev, 0x9c, &val); 1059 pci_read_config_byte(pdev, 0x9c, &val);
1057 base = val >> 3 << 27; 1060 base = val >> 3 << 27;
1058 } 1061 }
1059 base -= dev_priv->mm.gtt->stolen_size; 1062 base -= dev_priv->mm.gtt->stolen_size;
1060 #endif 1063 #endif
1061 1064
1062 return base + offset; 1065 return base + offset;
1063 } 1066 }
1064 1067
1065 static void i915_warn_stolen(struct drm_device *dev) 1068 static void i915_warn_stolen(struct drm_device *dev)
1066 { 1069 {
1067 DRM_ERROR("not enough stolen space for compressed buffer, disabling\n"); 1070 DRM_ERROR("not enough stolen space for compressed buffer, disabling\n");
1068 DRM_ERROR("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n"); 1071 DRM_ERROR("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
1069 } 1072 }
1070 1073
1071 static void i915_setup_compression(struct drm_device *dev, int size) 1074 static void i915_setup_compression(struct drm_device *dev, int size)
1072 { 1075 {
1073 struct drm_i915_private *dev_priv = dev->dev_private; 1076 struct drm_i915_private *dev_priv = dev->dev_private;
1074 struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb); 1077 struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
1075 unsigned long cfb_base; 1078 unsigned long cfb_base;
1076 unsigned long ll_base = 0; 1079 unsigned long ll_base = 0;
1077 1080
1078 /* Just in case the BIOS is doing something questionable. */ 1081 /* Just in case the BIOS is doing something questionable. */
1079 intel_disable_fbc(dev); 1082 intel_disable_fbc(dev);
1080 1083
1081 compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0); 1084 compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
1082 if (compressed_fb) 1085 if (compressed_fb)
1083 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); 1086 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
1084 if (!compressed_fb) 1087 if (!compressed_fb)
1085 goto err; 1088 goto err;
1086 1089
1087 cfb_base = i915_stolen_to_phys(dev, compressed_fb->start); 1090 cfb_base = i915_stolen_to_phys(dev, compressed_fb->start);
1088 if (!cfb_base) 1091 if (!cfb_base)
1089 goto err_fb; 1092 goto err_fb;
1090 1093
1091 if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) { 1094 if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) {
1092 compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen, 1095 compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
1093 4096, 4096, 0); 1096 4096, 4096, 0);
1094 if (compressed_llb) 1097 if (compressed_llb)
1095 compressed_llb = drm_mm_get_block(compressed_llb, 1098 compressed_llb = drm_mm_get_block(compressed_llb,
1096 4096, 4096); 1099 4096, 4096);
1097 if (!compressed_llb) 1100 if (!compressed_llb)
1098 goto err_fb; 1101 goto err_fb;
1099 1102
1100 ll_base = i915_stolen_to_phys(dev, compressed_llb->start); 1103 ll_base = i915_stolen_to_phys(dev, compressed_llb->start);
1101 if (!ll_base) 1104 if (!ll_base)
1102 goto err_llb; 1105 goto err_llb;
1103 } 1106 }
1104 1107
1105 dev_priv->cfb_size = size; 1108 dev_priv->cfb_size = size;
1106 1109
1107 dev_priv->compressed_fb = compressed_fb; 1110 dev_priv->compressed_fb = compressed_fb;
1108 if (HAS_PCH_SPLIT(dev)) 1111 if (HAS_PCH_SPLIT(dev))
1109 I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); 1112 I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
1110 else if (IS_GM45(dev)) { 1113 else if (IS_GM45(dev)) {
1111 I915_WRITE(DPFC_CB_BASE, compressed_fb->start); 1114 I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
1112 } else { 1115 } else {
1113 I915_WRITE(FBC_CFB_BASE, cfb_base); 1116 I915_WRITE(FBC_CFB_BASE, cfb_base);
1114 I915_WRITE(FBC_LL_BASE, ll_base); 1117 I915_WRITE(FBC_LL_BASE, ll_base);
1115 dev_priv->compressed_llb = compressed_llb; 1118 dev_priv->compressed_llb = compressed_llb;
1116 } 1119 }
1117 1120
1118 DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", 1121 DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
1119 cfb_base, ll_base, size >> 20); 1122 cfb_base, ll_base, size >> 20);
1120 return; 1123 return;
1121 1124
1122 err_llb: 1125 err_llb:
1123 drm_mm_put_block(compressed_llb); 1126 drm_mm_put_block(compressed_llb);
1124 err_fb: 1127 err_fb:
1125 drm_mm_put_block(compressed_fb); 1128 drm_mm_put_block(compressed_fb);
1126 err: 1129 err:
1127 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; 1130 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1128 i915_warn_stolen(dev); 1131 i915_warn_stolen(dev);
1129 } 1132 }
1130 1133
1131 static void i915_cleanup_compression(struct drm_device *dev) 1134 static void i915_cleanup_compression(struct drm_device *dev)
1132 { 1135 {
1133 struct drm_i915_private *dev_priv = dev->dev_private; 1136 struct drm_i915_private *dev_priv = dev->dev_private;
1134 1137
1135 drm_mm_put_block(dev_priv->compressed_fb); 1138 drm_mm_put_block(dev_priv->compressed_fb);
1136 if (dev_priv->compressed_llb) 1139 if (dev_priv->compressed_llb)
1137 drm_mm_put_block(dev_priv->compressed_llb); 1140 drm_mm_put_block(dev_priv->compressed_llb);
1138 } 1141 }
1139 1142
1140 /* true = enable decode, false = disable decoder */ 1143 /* true = enable decode, false = disable decoder */
1141 static unsigned int i915_vga_set_decode(void *cookie, bool state) 1144 static unsigned int i915_vga_set_decode(void *cookie, bool state)
1142 { 1145 {
1143 struct drm_device *dev = cookie; 1146 struct drm_device *dev = cookie;
1144 1147
1145 intel_modeset_vga_set_state(dev, state); 1148 intel_modeset_vga_set_state(dev, state);
1146 if (state) 1149 if (state)
1147 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 1150 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1148 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 1151 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1149 else 1152 else
1150 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 1153 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1151 } 1154 }
1152 1155
1153 static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) 1156 static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1154 { 1157 {
1155 struct drm_device *dev = pci_get_drvdata(pdev); 1158 struct drm_device *dev = pci_get_drvdata(pdev);
1156 pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; 1159 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
1157 if (state == VGA_SWITCHEROO_ON) { 1160 if (state == VGA_SWITCHEROO_ON) {
1158 printk(KERN_INFO "i915: switched on\n"); 1161 printk(KERN_INFO "i915: switched on\n");
1159 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1162 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1160 /* i915 resume handler doesn't set to D0 */ 1163 /* i915 resume handler doesn't set to D0 */
1161 pci_set_power_state(dev->pdev, PCI_D0); 1164 pci_set_power_state(dev->pdev, PCI_D0);
1162 i915_resume(dev); 1165 i915_resume(dev);
1163 dev->switch_power_state = DRM_SWITCH_POWER_ON; 1166 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1164 } else { 1167 } else {
1165 printk(KERN_ERR "i915: switched off\n"); 1168 printk(KERN_ERR "i915: switched off\n");
1166 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1169 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1167 i915_suspend(dev, pmm); 1170 i915_suspend(dev, pmm);
1168 dev->switch_power_state = DRM_SWITCH_POWER_OFF; 1171 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1169 } 1172 }
1170 } 1173 }
1171 1174
1172 static bool i915_switcheroo_can_switch(struct pci_dev *pdev) 1175 static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
1173 { 1176 {
1174 struct drm_device *dev = pci_get_drvdata(pdev); 1177 struct drm_device *dev = pci_get_drvdata(pdev);
1175 bool can_switch; 1178 bool can_switch;
1176 1179
1177 spin_lock(&dev->count_lock); 1180 spin_lock(&dev->count_lock);
1178 can_switch = (dev->open_count == 0); 1181 can_switch = (dev->open_count == 0);
1179 spin_unlock(&dev->count_lock); 1182 spin_unlock(&dev->count_lock);
1180 return can_switch; 1183 return can_switch;
1181 } 1184 }
1182 1185
1183 static int i915_load_gem_init(struct drm_device *dev) 1186 static int i915_load_gem_init(struct drm_device *dev)
1184 { 1187 {
1185 struct drm_i915_private *dev_priv = dev->dev_private; 1188 struct drm_i915_private *dev_priv = dev->dev_private;
1186 unsigned long prealloc_size, gtt_size, mappable_size; 1189 unsigned long prealloc_size, gtt_size, mappable_size;
1187 int ret; 1190 int ret;
1188 1191
1189 prealloc_size = dev_priv->mm.gtt->stolen_size; 1192 prealloc_size = dev_priv->mm.gtt->stolen_size;
1190 gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT; 1193 gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
1191 mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; 1194 mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
1192 1195
1193 /* Basic memrange allocator for stolen space */ 1196 /* Basic memrange allocator for stolen space */
1194 drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size); 1197 drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
1195 1198
1196 /* Let GEM Manage all of the aperture. 1199 /* Let GEM Manage all of the aperture.
1197 * 1200 *
1198 * However, leave one page at the end still bound to the scratch page. 1201 * However, leave one page at the end still bound to the scratch page.
1199 * There are a number of places where the hardware apparently 1202 * There are a number of places where the hardware apparently
1200 * prefetches past the end of the object, and we've seen multiple 1203 * prefetches past the end of the object, and we've seen multiple
1201 * hangs with the GPU head pointer stuck in a batchbuffer bound 1204 * hangs with the GPU head pointer stuck in a batchbuffer bound
1202 * at the last page of the aperture. One page should be enough to 1205 * at the last page of the aperture. One page should be enough to
1203 * keep any prefetching inside of the aperture. 1206 * keep any prefetching inside of the aperture.
1204 */ 1207 */
1205 i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE); 1208 i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
1206 1209
1207 mutex_lock(&dev->struct_mutex); 1210 mutex_lock(&dev->struct_mutex);
1208 ret = i915_gem_init_ringbuffer(dev); 1211 ret = i915_gem_init_ringbuffer(dev);
1209 mutex_unlock(&dev->struct_mutex); 1212 mutex_unlock(&dev->struct_mutex);
1210 if (ret) 1213 if (ret)
1211 return ret; 1214 return ret;
1212 1215
1213 /* Try to set up FBC with a reasonable compressed buffer size */ 1216 /* Try to set up FBC with a reasonable compressed buffer size */
1214 if (I915_HAS_FBC(dev) && i915_powersave) { 1217 if (I915_HAS_FBC(dev) && i915_powersave) {
1215 int cfb_size; 1218 int cfb_size;
1216 1219
1217 /* Leave 1M for line length buffer & misc. */ 1220 /* Leave 1M for line length buffer & misc. */
1218 1221
1219 /* Try to get a 32M buffer... */ 1222 /* Try to get a 32M buffer... */
1220 if (prealloc_size > (36*1024*1024)) 1223 if (prealloc_size > (36*1024*1024))
1221 cfb_size = 32*1024*1024; 1224 cfb_size = 32*1024*1024;
1222 else /* fall back to 7/8 of the stolen space */ 1225 else /* fall back to 7/8 of the stolen space */
1223 cfb_size = prealloc_size * 7 / 8; 1226 cfb_size = prealloc_size * 7 / 8;
1224 i915_setup_compression(dev, cfb_size); 1227 i915_setup_compression(dev, cfb_size);
1225 } 1228 }
1226 1229
1227 /* Allow hardware batchbuffers unless told otherwise. */ 1230 /* Allow hardware batchbuffers unless told otherwise. */
1228 dev_priv->allow_batchbuffer = 1; 1231 dev_priv->allow_batchbuffer = 1;
1229 return 0; 1232 return 0;
1230 } 1233 }
1231 1234
1232 static int i915_load_modeset_init(struct drm_device *dev) 1235 static int i915_load_modeset_init(struct drm_device *dev)
1233 { 1236 {
1234 struct drm_i915_private *dev_priv = dev->dev_private; 1237 struct drm_i915_private *dev_priv = dev->dev_private;
1235 int ret; 1238 int ret;
1236 1239
1237 ret = intel_parse_bios(dev); 1240 ret = intel_parse_bios(dev);
1238 if (ret) 1241 if (ret)
1239 DRM_INFO("failed to find VBIOS tables\n"); 1242 DRM_INFO("failed to find VBIOS tables\n");
1240 1243
1241 /* If we have > 1 VGA cards, then we need to arbitrate access 1244 /* If we have > 1 VGA cards, then we need to arbitrate access
1242 * to the common VGA resources. 1245 * to the common VGA resources.
1243 * 1246 *
1244 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA), 1247 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
1245 * then we do not take part in VGA arbitration and the 1248 * then we do not take part in VGA arbitration and the
1246 * vga_client_register() fails with -ENODEV. 1249 * vga_client_register() fails with -ENODEV.
1247 */ 1250 */
1248 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); 1251 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
1249 if (ret && ret != -ENODEV) 1252 if (ret && ret != -ENODEV)
1250 goto out; 1253 goto out;
1251 1254
1252 intel_register_dsm_handler(); 1255 intel_register_dsm_handler();
1253 1256
1254 ret = vga_switcheroo_register_client(dev->pdev, 1257 ret = vga_switcheroo_register_client(dev->pdev,
1255 i915_switcheroo_set_state, 1258 i915_switcheroo_set_state,
1256 NULL, 1259 NULL,
1257 i915_switcheroo_can_switch); 1260 i915_switcheroo_can_switch);
1258 if (ret) 1261 if (ret)
1259 goto cleanup_vga_client; 1262 goto cleanup_vga_client;
1260 1263
1261 /* IIR "flip pending" bit means done if this bit is set */ 1264 /* IIR "flip pending" bit means done if this bit is set */
1262 if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE)) 1265 if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE))
1263 dev_priv->flip_pending_is_done = true; 1266 dev_priv->flip_pending_is_done = true;
1264 1267
1265 intel_modeset_init(dev); 1268 intel_modeset_init(dev);
1266 1269
1267 ret = i915_load_gem_init(dev); 1270 ret = i915_load_gem_init(dev);
1268 if (ret) 1271 if (ret)
1269 goto cleanup_vga_switcheroo; 1272 goto cleanup_vga_switcheroo;
1270 1273
1271 intel_modeset_gem_init(dev); 1274 intel_modeset_gem_init(dev);
1272 1275
1273 ret = drm_irq_install(dev); 1276 ret = drm_irq_install(dev);
1274 if (ret) 1277 if (ret)
1275 goto cleanup_gem; 1278 goto cleanup_gem;
1276 1279
1277 /* Always safe in the mode setting case. */ 1280 /* Always safe in the mode setting case. */
1278 /* FIXME: do pre/post-mode set stuff in core KMS code */ 1281 /* FIXME: do pre/post-mode set stuff in core KMS code */
1279 dev->vblank_disable_allowed = 1; 1282 dev->vblank_disable_allowed = 1;
1280 1283
1281 ret = intel_fbdev_init(dev); 1284 ret = intel_fbdev_init(dev);
1282 if (ret) 1285 if (ret)
1283 goto cleanup_irq; 1286 goto cleanup_irq;
1284 1287
1285 drm_kms_helper_poll_init(dev); 1288 drm_kms_helper_poll_init(dev);
1286 1289
1287 /* We're off and running w/KMS */ 1290 /* We're off and running w/KMS */
1288 dev_priv->mm.suspended = 0; 1291 dev_priv->mm.suspended = 0;
1289 1292
1290 return 0; 1293 return 0;
1291 1294
1292 cleanup_irq: 1295 cleanup_irq:
1293 drm_irq_uninstall(dev); 1296 drm_irq_uninstall(dev);
1294 cleanup_gem: 1297 cleanup_gem:
1295 mutex_lock(&dev->struct_mutex); 1298 mutex_lock(&dev->struct_mutex);
1296 i915_gem_cleanup_ringbuffer(dev); 1299 i915_gem_cleanup_ringbuffer(dev);
1297 mutex_unlock(&dev->struct_mutex); 1300 mutex_unlock(&dev->struct_mutex);
1298 cleanup_vga_switcheroo: 1301 cleanup_vga_switcheroo:
1299 vga_switcheroo_unregister_client(dev->pdev); 1302 vga_switcheroo_unregister_client(dev->pdev);
1300 cleanup_vga_client: 1303 cleanup_vga_client:
1301 vga_client_register(dev->pdev, NULL, NULL, NULL); 1304 vga_client_register(dev->pdev, NULL, NULL, NULL);
1302 out: 1305 out:
1303 return ret; 1306 return ret;
1304 } 1307 }
1305 1308
1306 int i915_master_create(struct drm_device *dev, struct drm_master *master) 1309 int i915_master_create(struct drm_device *dev, struct drm_master *master)
1307 { 1310 {
1308 struct drm_i915_master_private *master_priv; 1311 struct drm_i915_master_private *master_priv;
1309 1312
1310 master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL); 1313 master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL);
1311 if (!master_priv) 1314 if (!master_priv)
1312 return -ENOMEM; 1315 return -ENOMEM;
1313 1316
1314 master->driver_priv = master_priv; 1317 master->driver_priv = master_priv;
1315 return 0; 1318 return 0;
1316 } 1319 }
1317 1320
1318 void i915_master_destroy(struct drm_device *dev, struct drm_master *master) 1321 void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
1319 { 1322 {
1320 struct drm_i915_master_private *master_priv = master->driver_priv; 1323 struct drm_i915_master_private *master_priv = master->driver_priv;
1321 1324
1322 if (!master_priv) 1325 if (!master_priv)
1323 return; 1326 return;
1324 1327
1325 kfree(master_priv); 1328 kfree(master_priv);
1326 1329
1327 master->driver_priv = NULL; 1330 master->driver_priv = NULL;
1328 } 1331 }
1329 1332
1330 static void i915_pineview_get_mem_freq(struct drm_device *dev) 1333 static void i915_pineview_get_mem_freq(struct drm_device *dev)
1331 { 1334 {
1332 drm_i915_private_t *dev_priv = dev->dev_private; 1335 drm_i915_private_t *dev_priv = dev->dev_private;
1333 u32 tmp; 1336 u32 tmp;
1334 1337
1335 tmp = I915_READ(CLKCFG); 1338 tmp = I915_READ(CLKCFG);
1336 1339
1337 switch (tmp & CLKCFG_FSB_MASK) { 1340 switch (tmp & CLKCFG_FSB_MASK) {
1338 case CLKCFG_FSB_533: 1341 case CLKCFG_FSB_533:
1339 dev_priv->fsb_freq = 533; /* 133*4 */ 1342 dev_priv->fsb_freq = 533; /* 133*4 */
1340 break; 1343 break;
1341 case CLKCFG_FSB_800: 1344 case CLKCFG_FSB_800:
1342 dev_priv->fsb_freq = 800; /* 200*4 */ 1345 dev_priv->fsb_freq = 800; /* 200*4 */
1343 break; 1346 break;
1344 case CLKCFG_FSB_667: 1347 case CLKCFG_FSB_667:
1345 dev_priv->fsb_freq = 667; /* 167*4 */ 1348 dev_priv->fsb_freq = 667; /* 167*4 */
1346 break; 1349 break;
1347 case CLKCFG_FSB_400: 1350 case CLKCFG_FSB_400:
1348 dev_priv->fsb_freq = 400; /* 100*4 */ 1351 dev_priv->fsb_freq = 400; /* 100*4 */
1349 break; 1352 break;
1350 } 1353 }
1351 1354
1352 switch (tmp & CLKCFG_MEM_MASK) { 1355 switch (tmp & CLKCFG_MEM_MASK) {
1353 case CLKCFG_MEM_533: 1356 case CLKCFG_MEM_533:
1354 dev_priv->mem_freq = 533; 1357 dev_priv->mem_freq = 533;
1355 break; 1358 break;
1356 case CLKCFG_MEM_667: 1359 case CLKCFG_MEM_667:
1357 dev_priv->mem_freq = 667; 1360 dev_priv->mem_freq = 667;
1358 break; 1361 break;
1359 case CLKCFG_MEM_800: 1362 case CLKCFG_MEM_800:
1360 dev_priv->mem_freq = 800; 1363 dev_priv->mem_freq = 800;
1361 break; 1364 break;
1362 } 1365 }
1363 1366
1364 /* detect pineview DDR3 setting */ 1367 /* detect pineview DDR3 setting */
1365 tmp = I915_READ(CSHRDDR3CTL); 1368 tmp = I915_READ(CSHRDDR3CTL);
1366 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0; 1369 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
1367 } 1370 }
1368 1371
1369 static void i915_ironlake_get_mem_freq(struct drm_device *dev) 1372 static void i915_ironlake_get_mem_freq(struct drm_device *dev)
1370 { 1373 {
1371 drm_i915_private_t *dev_priv = dev->dev_private; 1374 drm_i915_private_t *dev_priv = dev->dev_private;
1372 u16 ddrpll, csipll; 1375 u16 ddrpll, csipll;
1373 1376
1374 ddrpll = I915_READ16(DDRMPLL1); 1377 ddrpll = I915_READ16(DDRMPLL1);
1375 csipll = I915_READ16(CSIPLL0); 1378 csipll = I915_READ16(CSIPLL0);
1376 1379
1377 switch (ddrpll & 0xff) { 1380 switch (ddrpll & 0xff) {
1378 case 0xc: 1381 case 0xc:
1379 dev_priv->mem_freq = 800; 1382 dev_priv->mem_freq = 800;
1380 break; 1383 break;
1381 case 0x10: 1384 case 0x10:
1382 dev_priv->mem_freq = 1066; 1385 dev_priv->mem_freq = 1066;
1383 break; 1386 break;
1384 case 0x14: 1387 case 0x14:
1385 dev_priv->mem_freq = 1333; 1388 dev_priv->mem_freq = 1333;
1386 break; 1389 break;
1387 case 0x18: 1390 case 0x18:
1388 dev_priv->mem_freq = 1600; 1391 dev_priv->mem_freq = 1600;
1389 break; 1392 break;
1390 default: 1393 default:
1391 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n", 1394 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
1392 ddrpll & 0xff); 1395 ddrpll & 0xff);
1393 dev_priv->mem_freq = 0; 1396 dev_priv->mem_freq = 0;
1394 break; 1397 break;
1395 } 1398 }
1396 1399
1397 dev_priv->r_t = dev_priv->mem_freq; 1400 dev_priv->r_t = dev_priv->mem_freq;
1398 1401
1399 switch (csipll & 0x3ff) { 1402 switch (csipll & 0x3ff) {
1400 case 0x00c: 1403 case 0x00c:
1401 dev_priv->fsb_freq = 3200; 1404 dev_priv->fsb_freq = 3200;
1402 break; 1405 break;
1403 case 0x00e: 1406 case 0x00e:
1404 dev_priv->fsb_freq = 3733; 1407 dev_priv->fsb_freq = 3733;
1405 break; 1408 break;
1406 case 0x010: 1409 case 0x010:
1407 dev_priv->fsb_freq = 4266; 1410 dev_priv->fsb_freq = 4266;
1408 break; 1411 break;
1409 case 0x012: 1412 case 0x012:
1410 dev_priv->fsb_freq = 4800; 1413 dev_priv->fsb_freq = 4800;
1411 break; 1414 break;
1412 case 0x014: 1415 case 0x014:
1413 dev_priv->fsb_freq = 5333; 1416 dev_priv->fsb_freq = 5333;
1414 break; 1417 break;
1415 case 0x016: 1418 case 0x016:
1416 dev_priv->fsb_freq = 5866; 1419 dev_priv->fsb_freq = 5866;
1417 break; 1420 break;
1418 case 0x018: 1421 case 0x018:
1419 dev_priv->fsb_freq = 6400; 1422 dev_priv->fsb_freq = 6400;
1420 break; 1423 break;
1421 default: 1424 default:
1422 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n", 1425 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
1423 csipll & 0x3ff); 1426 csipll & 0x3ff);
1424 dev_priv->fsb_freq = 0; 1427 dev_priv->fsb_freq = 0;
1425 break; 1428 break;
1426 } 1429 }
1427 1430
1428 if (dev_priv->fsb_freq == 3200) { 1431 if (dev_priv->fsb_freq == 3200) {
1429 dev_priv->c_m = 0; 1432 dev_priv->c_m = 0;
1430 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) { 1433 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
1431 dev_priv->c_m = 1; 1434 dev_priv->c_m = 1;
1432 } else { 1435 } else {
1433 dev_priv->c_m = 2; 1436 dev_priv->c_m = 2;
1434 } 1437 }
1435 } 1438 }
1436 1439
1437 static const struct cparams { 1440 static const struct cparams {
1438 u16 i; 1441 u16 i;
1439 u16 t; 1442 u16 t;
1440 u16 m; 1443 u16 m;
1441 u16 c; 1444 u16 c;
1442 } cparams[] = { 1445 } cparams[] = {
1443 { 1, 1333, 301, 28664 }, 1446 { 1, 1333, 301, 28664 },
1444 { 1, 1066, 294, 24460 }, 1447 { 1, 1066, 294, 24460 },
1445 { 1, 800, 294, 25192 }, 1448 { 1, 800, 294, 25192 },
1446 { 0, 1333, 276, 27605 }, 1449 { 0, 1333, 276, 27605 },
1447 { 0, 1066, 276, 27605 }, 1450 { 0, 1066, 276, 27605 },
1448 { 0, 800, 231, 23784 }, 1451 { 0, 800, 231, 23784 },
1449 }; 1452 };
1450 1453
1451 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) 1454 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
1452 { 1455 {
1453 u64 total_count, diff, ret; 1456 u64 total_count, diff, ret;
1454 u32 count1, count2, count3, m = 0, c = 0; 1457 u32 count1, count2, count3, m = 0, c = 0;
1455 unsigned long now = jiffies_to_msecs(jiffies), diff1; 1458 unsigned long now = jiffies_to_msecs(jiffies), diff1;
1456 int i; 1459 int i;
1457 1460
1458 diff1 = now - dev_priv->last_time1; 1461 diff1 = now - dev_priv->last_time1;
1459 1462
1460 /* Prevent division-by-zero if we are asking too fast. 1463 /* Prevent division-by-zero if we are asking too fast.
1461 * Also, we don't get interesting results if we are polling 1464 * Also, we don't get interesting results if we are polling
1462 * faster than once in 10ms, so just return the saved value 1465 * faster than once in 10ms, so just return the saved value
1463 * in such cases. 1466 * in such cases.
1464 */ 1467 */
1465 if (diff1 <= 10) 1468 if (diff1 <= 10)
1466 return dev_priv->chipset_power; 1469 return dev_priv->chipset_power;
1467 1470
1468 count1 = I915_READ(DMIEC); 1471 count1 = I915_READ(DMIEC);
1469 count2 = I915_READ(DDREC); 1472 count2 = I915_READ(DDREC);
1470 count3 = I915_READ(CSIEC); 1473 count3 = I915_READ(CSIEC);
1471 1474
1472 total_count = count1 + count2 + count3; 1475 total_count = count1 + count2 + count3;
1473 1476
1474 /* FIXME: handle per-counter overflow */ 1477 /* FIXME: handle per-counter overflow */
1475 if (total_count < dev_priv->last_count1) { 1478 if (total_count < dev_priv->last_count1) {
1476 diff = ~0UL - dev_priv->last_count1; 1479 diff = ~0UL - dev_priv->last_count1;
1477 diff += total_count; 1480 diff += total_count;
1478 } else { 1481 } else {
1479 diff = total_count - dev_priv->last_count1; 1482 diff = total_count - dev_priv->last_count1;
1480 } 1483 }
1481 1484
1482 for (i = 0; i < ARRAY_SIZE(cparams); i++) { 1485 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
1483 if (cparams[i].i == dev_priv->c_m && 1486 if (cparams[i].i == dev_priv->c_m &&
1484 cparams[i].t == dev_priv->r_t) { 1487 cparams[i].t == dev_priv->r_t) {
1485 m = cparams[i].m; 1488 m = cparams[i].m;
1486 c = cparams[i].c; 1489 c = cparams[i].c;
1487 break; 1490 break;
1488 } 1491 }
1489 } 1492 }
1490 1493
1491 diff = div_u64(diff, diff1); 1494 diff = div_u64(diff, diff1);
1492 ret = ((m * diff) + c); 1495 ret = ((m * diff) + c);
1493 ret = div_u64(ret, 10); 1496 ret = div_u64(ret, 10);
1494 1497
1495 dev_priv->last_count1 = total_count; 1498 dev_priv->last_count1 = total_count;
1496 dev_priv->last_time1 = now; 1499 dev_priv->last_time1 = now;
1497 1500
1498 dev_priv->chipset_power = ret; 1501 dev_priv->chipset_power = ret;
1499 1502
1500 return ret; 1503 return ret;
1501 } 1504 }
1502 1505
1503 unsigned long i915_mch_val(struct drm_i915_private *dev_priv) 1506 unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
1504 { 1507 {
1505 unsigned long m, x, b; 1508 unsigned long m, x, b;
1506 u32 tsfs; 1509 u32 tsfs;
1507 1510
1508 tsfs = I915_READ(TSFS); 1511 tsfs = I915_READ(TSFS);
1509 1512
1510 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT); 1513 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
1511 x = I915_READ8(TR1); 1514 x = I915_READ8(TR1);
1512 1515
1513 b = tsfs & TSFS_INTR_MASK; 1516 b = tsfs & TSFS_INTR_MASK;
1514 1517
1515 return ((m * x) / 127) - b; 1518 return ((m * x) / 127) - b;
1516 } 1519 }
1517 1520
1518 static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) 1521 static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
1519 { 1522 {
1520 static const struct v_table { 1523 static const struct v_table {
1521 u16 vd; /* in .1 mil */ 1524 u16 vd; /* in .1 mil */
1522 u16 vm; /* in .1 mil */ 1525 u16 vm; /* in .1 mil */
1523 } v_table[] = { 1526 } v_table[] = {
1524 { 0, 0, }, 1527 { 0, 0, },
1525 { 375, 0, }, 1528 { 375, 0, },
1526 { 500, 0, }, 1529 { 500, 0, },
1527 { 625, 0, }, 1530 { 625, 0, },
1528 { 750, 0, }, 1531 { 750, 0, },
1529 { 875, 0, }, 1532 { 875, 0, },
1530 { 1000, 0, }, 1533 { 1000, 0, },
1531 { 1125, 0, }, 1534 { 1125, 0, },
1532 { 4125, 3000, }, 1535 { 4125, 3000, },
1533 { 4125, 3000, }, 1536 { 4125, 3000, },
1534 { 4125, 3000, }, 1537 { 4125, 3000, },
1535 { 4125, 3000, }, 1538 { 4125, 3000, },
1536 { 4125, 3000, }, 1539 { 4125, 3000, },
1537 { 4125, 3000, }, 1540 { 4125, 3000, },
1538 { 4125, 3000, }, 1541 { 4125, 3000, },
1539 { 4125, 3000, }, 1542 { 4125, 3000, },
1540 { 4125, 3000, }, 1543 { 4125, 3000, },
1541 { 4125, 3000, }, 1544 { 4125, 3000, },
1542 { 4125, 3000, }, 1545 { 4125, 3000, },
1543 { 4125, 3000, }, 1546 { 4125, 3000, },
1544 { 4125, 3000, }, 1547 { 4125, 3000, },
1545 { 4125, 3000, }, 1548 { 4125, 3000, },
1546 { 4125, 3000, }, 1549 { 4125, 3000, },
1547 { 4125, 3000, }, 1550 { 4125, 3000, },
1548 { 4125, 3000, }, 1551 { 4125, 3000, },
1549 { 4125, 3000, }, 1552 { 4125, 3000, },
1550 { 4125, 3000, }, 1553 { 4125, 3000, },
1551 { 4125, 3000, }, 1554 { 4125, 3000, },
1552 { 4125, 3000, }, 1555 { 4125, 3000, },
1553 { 4125, 3000, }, 1556 { 4125, 3000, },
1554 { 4125, 3000, }, 1557 { 4125, 3000, },
1555 { 4125, 3000, }, 1558 { 4125, 3000, },
1556 { 4250, 3125, }, 1559 { 4250, 3125, },
1557 { 4375, 3250, }, 1560 { 4375, 3250, },
1558 { 4500, 3375, }, 1561 { 4500, 3375, },
1559 { 4625, 3500, }, 1562 { 4625, 3500, },
1560 { 4750, 3625, }, 1563 { 4750, 3625, },
1561 { 4875, 3750, }, 1564 { 4875, 3750, },
1562 { 5000, 3875, }, 1565 { 5000, 3875, },
1563 { 5125, 4000, }, 1566 { 5125, 4000, },
1564 { 5250, 4125, }, 1567 { 5250, 4125, },
1565 { 5375, 4250, }, 1568 { 5375, 4250, },
1566 { 5500, 4375, }, 1569 { 5500, 4375, },
1567 { 5625, 4500, }, 1570 { 5625, 4500, },
1568 { 5750, 4625, }, 1571 { 5750, 4625, },
1569 { 5875, 4750, }, 1572 { 5875, 4750, },
1570 { 6000, 4875, }, 1573 { 6000, 4875, },
1571 { 6125, 5000, }, 1574 { 6125, 5000, },
1572 { 6250, 5125, }, 1575 { 6250, 5125, },
1573 { 6375, 5250, }, 1576 { 6375, 5250, },
1574 { 6500, 5375, }, 1577 { 6500, 5375, },
1575 { 6625, 5500, }, 1578 { 6625, 5500, },
1576 { 6750, 5625, }, 1579 { 6750, 5625, },
1577 { 6875, 5750, }, 1580 { 6875, 5750, },
1578 { 7000, 5875, }, 1581 { 7000, 5875, },
1579 { 7125, 6000, }, 1582 { 7125, 6000, },
1580 { 7250, 6125, }, 1583 { 7250, 6125, },
1581 { 7375, 6250, }, 1584 { 7375, 6250, },
1582 { 7500, 6375, }, 1585 { 7500, 6375, },
1583 { 7625, 6500, }, 1586 { 7625, 6500, },
1584 { 7750, 6625, }, 1587 { 7750, 6625, },
1585 { 7875, 6750, }, 1588 { 7875, 6750, },
1586 { 8000, 6875, }, 1589 { 8000, 6875, },
1587 { 8125, 7000, }, 1590 { 8125, 7000, },
1588 { 8250, 7125, }, 1591 { 8250, 7125, },
1589 { 8375, 7250, }, 1592 { 8375, 7250, },
1590 { 8500, 7375, }, 1593 { 8500, 7375, },
1591 { 8625, 7500, }, 1594 { 8625, 7500, },
1592 { 8750, 7625, }, 1595 { 8750, 7625, },
1593 { 8875, 7750, }, 1596 { 8875, 7750, },
1594 { 9000, 7875, }, 1597 { 9000, 7875, },
1595 { 9125, 8000, }, 1598 { 9125, 8000, },
1596 { 9250, 8125, }, 1599 { 9250, 8125, },
1597 { 9375, 8250, }, 1600 { 9375, 8250, },
1598 { 9500, 8375, }, 1601 { 9500, 8375, },
1599 { 9625, 8500, }, 1602 { 9625, 8500, },
1600 { 9750, 8625, }, 1603 { 9750, 8625, },
1601 { 9875, 8750, }, 1604 { 9875, 8750, },
1602 { 10000, 8875, }, 1605 { 10000, 8875, },
1603 { 10125, 9000, }, 1606 { 10125, 9000, },
1604 { 10250, 9125, }, 1607 { 10250, 9125, },
1605 { 10375, 9250, }, 1608 { 10375, 9250, },
1606 { 10500, 9375, }, 1609 { 10500, 9375, },
1607 { 10625, 9500, }, 1610 { 10625, 9500, },
1608 { 10750, 9625, }, 1611 { 10750, 9625, },
1609 { 10875, 9750, }, 1612 { 10875, 9750, },
1610 { 11000, 9875, }, 1613 { 11000, 9875, },
1611 { 11125, 10000, }, 1614 { 11125, 10000, },
1612 { 11250, 10125, }, 1615 { 11250, 10125, },
1613 { 11375, 10250, }, 1616 { 11375, 10250, },
1614 { 11500, 10375, }, 1617 { 11500, 10375, },
1615 { 11625, 10500, }, 1618 { 11625, 10500, },
1616 { 11750, 10625, }, 1619 { 11750, 10625, },
1617 { 11875, 10750, }, 1620 { 11875, 10750, },
1618 { 12000, 10875, }, 1621 { 12000, 10875, },
1619 { 12125, 11000, }, 1622 { 12125, 11000, },
1620 { 12250, 11125, }, 1623 { 12250, 11125, },
1621 { 12375, 11250, }, 1624 { 12375, 11250, },
1622 { 12500, 11375, }, 1625 { 12500, 11375, },
1623 { 12625, 11500, }, 1626 { 12625, 11500, },
1624 { 12750, 11625, }, 1627 { 12750, 11625, },
1625 { 12875, 11750, }, 1628 { 12875, 11750, },
1626 { 13000, 11875, }, 1629 { 13000, 11875, },
1627 { 13125, 12000, }, 1630 { 13125, 12000, },
1628 { 13250, 12125, }, 1631 { 13250, 12125, },
1629 { 13375, 12250, }, 1632 { 13375, 12250, },
1630 { 13500, 12375, }, 1633 { 13500, 12375, },
1631 { 13625, 12500, }, 1634 { 13625, 12500, },
1632 { 13750, 12625, }, 1635 { 13750, 12625, },
1633 { 13875, 12750, }, 1636 { 13875, 12750, },
1634 { 14000, 12875, }, 1637 { 14000, 12875, },
1635 { 14125, 13000, }, 1638 { 14125, 13000, },
1636 { 14250, 13125, }, 1639 { 14250, 13125, },
1637 { 14375, 13250, }, 1640 { 14375, 13250, },
1638 { 14500, 13375, }, 1641 { 14500, 13375, },
1639 { 14625, 13500, }, 1642 { 14625, 13500, },
1640 { 14750, 13625, }, 1643 { 14750, 13625, },
1641 { 14875, 13750, }, 1644 { 14875, 13750, },
1642 { 15000, 13875, }, 1645 { 15000, 13875, },
1643 { 15125, 14000, }, 1646 { 15125, 14000, },
1644 { 15250, 14125, }, 1647 { 15250, 14125, },
1645 { 15375, 14250, }, 1648 { 15375, 14250, },
1646 { 15500, 14375, }, 1649 { 15500, 14375, },
1647 { 15625, 14500, }, 1650 { 15625, 14500, },
1648 { 15750, 14625, }, 1651 { 15750, 14625, },
1649 { 15875, 14750, }, 1652 { 15875, 14750, },
1650 { 16000, 14875, }, 1653 { 16000, 14875, },
1651 { 16125, 15000, }, 1654 { 16125, 15000, },
1652 }; 1655 };
1653 if (dev_priv->info->is_mobile) 1656 if (dev_priv->info->is_mobile)
1654 return v_table[pxvid].vm; 1657 return v_table[pxvid].vm;
1655 else 1658 else
1656 return v_table[pxvid].vd; 1659 return v_table[pxvid].vd;
1657 } 1660 }
1658 1661
1659 void i915_update_gfx_val(struct drm_i915_private *dev_priv) 1662 void i915_update_gfx_val(struct drm_i915_private *dev_priv)
1660 { 1663 {
1661 struct timespec now, diff1; 1664 struct timespec now, diff1;
1662 u64 diff; 1665 u64 diff;
1663 unsigned long diffms; 1666 unsigned long diffms;
1664 u32 count; 1667 u32 count;
1665 1668
1666 getrawmonotonic(&now); 1669 getrawmonotonic(&now);
1667 diff1 = timespec_sub(now, dev_priv->last_time2); 1670 diff1 = timespec_sub(now, dev_priv->last_time2);
1668 1671
1669 /* Don't divide by 0 */ 1672 /* Don't divide by 0 */
1670 diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000; 1673 diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
1671 if (!diffms) 1674 if (!diffms)
1672 return; 1675 return;
1673 1676
1674 count = I915_READ(GFXEC); 1677 count = I915_READ(GFXEC);
1675 1678
1676 if (count < dev_priv->last_count2) { 1679 if (count < dev_priv->last_count2) {
1677 diff = ~0UL - dev_priv->last_count2; 1680 diff = ~0UL - dev_priv->last_count2;
1678 diff += count; 1681 diff += count;
1679 } else { 1682 } else {
1680 diff = count - dev_priv->last_count2; 1683 diff = count - dev_priv->last_count2;
1681 } 1684 }
1682 1685
1683 dev_priv->last_count2 = count; 1686 dev_priv->last_count2 = count;
1684 dev_priv->last_time2 = now; 1687 dev_priv->last_time2 = now;
1685 1688
1686 /* More magic constants... */ 1689 /* More magic constants... */
1687 diff = diff * 1181; 1690 diff = diff * 1181;
1688 diff = div_u64(diff, diffms * 10); 1691 diff = div_u64(diff, diffms * 10);
1689 dev_priv->gfx_power = diff; 1692 dev_priv->gfx_power = diff;
1690 } 1693 }
1691 1694
1692 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) 1695 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
1693 { 1696 {
1694 unsigned long t, corr, state1, corr2, state2; 1697 unsigned long t, corr, state1, corr2, state2;
1695 u32 pxvid, ext_v; 1698 u32 pxvid, ext_v;
1696 1699
1697 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4)); 1700 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
1698 pxvid = (pxvid >> 24) & 0x7f; 1701 pxvid = (pxvid >> 24) & 0x7f;
1699 ext_v = pvid_to_extvid(dev_priv, pxvid); 1702 ext_v = pvid_to_extvid(dev_priv, pxvid);
1700 1703
1701 state1 = ext_v; 1704 state1 = ext_v;
1702 1705
1703 t = i915_mch_val(dev_priv); 1706 t = i915_mch_val(dev_priv);
1704 1707
1705 /* Revel in the empirically derived constants */ 1708 /* Revel in the empirically derived constants */
1706 1709
1707 /* Correction factor in 1/100000 units */ 1710 /* Correction factor in 1/100000 units */
1708 if (t > 80) 1711 if (t > 80)
1709 corr = ((t * 2349) + 135940); 1712 corr = ((t * 2349) + 135940);
1710 else if (t >= 50) 1713 else if (t >= 50)
1711 corr = ((t * 964) + 29317); 1714 corr = ((t * 964) + 29317);
1712 else /* < 50 */ 1715 else /* < 50 */
1713 corr = ((t * 301) + 1004); 1716 corr = ((t * 301) + 1004);
1714 1717
1715 corr = corr * ((150142 * state1) / 10000 - 78642); 1718 corr = corr * ((150142 * state1) / 10000 - 78642);
1716 corr /= 100000; 1719 corr /= 100000;
1717 corr2 = (corr * dev_priv->corr); 1720 corr2 = (corr * dev_priv->corr);
1718 1721
1719 state2 = (corr2 * state1) / 10000; 1722 state2 = (corr2 * state1) / 10000;
1720 state2 /= 100; /* convert to mW */ 1723 state2 /= 100; /* convert to mW */
1721 1724
1722 i915_update_gfx_val(dev_priv); 1725 i915_update_gfx_val(dev_priv);
1723 1726
1724 return dev_priv->gfx_power + state2; 1727 return dev_priv->gfx_power + state2;
1725 } 1728 }
1726 1729
1727 /* Global for IPS driver to get at the current i915 device */ 1730 /* Global for IPS driver to get at the current i915 device */
1728 static struct drm_i915_private *i915_mch_dev; 1731 static struct drm_i915_private *i915_mch_dev;
1729 /* 1732 /*
1730 * Lock protecting IPS related data structures 1733 * Lock protecting IPS related data structures
1731 * - i915_mch_dev 1734 * - i915_mch_dev
1732 * - dev_priv->max_delay 1735 * - dev_priv->max_delay
1733 * - dev_priv->min_delay 1736 * - dev_priv->min_delay
1734 * - dev_priv->fmax 1737 * - dev_priv->fmax
1735 * - dev_priv->gpu_busy 1738 * - dev_priv->gpu_busy
1736 */ 1739 */
1737 static DEFINE_SPINLOCK(mchdev_lock); 1740 static DEFINE_SPINLOCK(mchdev_lock);
1738 1741
1739 /** 1742 /**
1740 * i915_read_mch_val - return value for IPS use 1743 * i915_read_mch_val - return value for IPS use
1741 * 1744 *
1742 * Calculate and return a value for the IPS driver to use when deciding whether 1745 * Calculate and return a value for the IPS driver to use when deciding whether
1743 * we have thermal and power headroom to increase CPU or GPU power budget. 1746 * we have thermal and power headroom to increase CPU or GPU power budget.
1744 */ 1747 */
1745 unsigned long i915_read_mch_val(void) 1748 unsigned long i915_read_mch_val(void)
1746 { 1749 {
1747 struct drm_i915_private *dev_priv; 1750 struct drm_i915_private *dev_priv;
1748 unsigned long chipset_val, graphics_val, ret = 0; 1751 unsigned long chipset_val, graphics_val, ret = 0;
1749 1752
1750 spin_lock(&mchdev_lock); 1753 spin_lock(&mchdev_lock);
1751 if (!i915_mch_dev) 1754 if (!i915_mch_dev)
1752 goto out_unlock; 1755 goto out_unlock;
1753 dev_priv = i915_mch_dev; 1756 dev_priv = i915_mch_dev;
1754 1757
1755 chipset_val = i915_chipset_val(dev_priv); 1758 chipset_val = i915_chipset_val(dev_priv);
1756 graphics_val = i915_gfx_val(dev_priv); 1759 graphics_val = i915_gfx_val(dev_priv);
1757 1760
1758 ret = chipset_val + graphics_val; 1761 ret = chipset_val + graphics_val;
1759 1762
1760 out_unlock: 1763 out_unlock:
1761 spin_unlock(&mchdev_lock); 1764 spin_unlock(&mchdev_lock);
1762 1765
1763 return ret; 1766 return ret;
1764 } 1767 }
1765 EXPORT_SYMBOL_GPL(i915_read_mch_val); 1768 EXPORT_SYMBOL_GPL(i915_read_mch_val);
1766 1769
1767 /** 1770 /**
1768 * i915_gpu_raise - raise GPU frequency limit 1771 * i915_gpu_raise - raise GPU frequency limit
1769 * 1772 *
1770 * Raise the limit; IPS indicates we have thermal headroom. 1773 * Raise the limit; IPS indicates we have thermal headroom.
1771 */ 1774 */
1772 bool i915_gpu_raise(void) 1775 bool i915_gpu_raise(void)
1773 { 1776 {
1774 struct drm_i915_private *dev_priv; 1777 struct drm_i915_private *dev_priv;
1775 bool ret = true; 1778 bool ret = true;
1776 1779
1777 spin_lock(&mchdev_lock); 1780 spin_lock(&mchdev_lock);
1778 if (!i915_mch_dev) { 1781 if (!i915_mch_dev) {
1779 ret = false; 1782 ret = false;
1780 goto out_unlock; 1783 goto out_unlock;
1781 } 1784 }
1782 dev_priv = i915_mch_dev; 1785 dev_priv = i915_mch_dev;
1783 1786
1784 if (dev_priv->max_delay > dev_priv->fmax) 1787 if (dev_priv->max_delay > dev_priv->fmax)
1785 dev_priv->max_delay--; 1788 dev_priv->max_delay--;
1786 1789
1787 out_unlock: 1790 out_unlock:
1788 spin_unlock(&mchdev_lock); 1791 spin_unlock(&mchdev_lock);
1789 1792
1790 return ret; 1793 return ret;
1791 } 1794 }
1792 EXPORT_SYMBOL_GPL(i915_gpu_raise); 1795 EXPORT_SYMBOL_GPL(i915_gpu_raise);
1793 1796
1794 /** 1797 /**
1795 * i915_gpu_lower - lower GPU frequency limit 1798 * i915_gpu_lower - lower GPU frequency limit
1796 * 1799 *
1797 * IPS indicates we're close to a thermal limit, so throttle back the GPU 1800 * IPS indicates we're close to a thermal limit, so throttle back the GPU
1798 * frequency maximum. 1801 * frequency maximum.
1799 */ 1802 */
1800 bool i915_gpu_lower(void) 1803 bool i915_gpu_lower(void)
1801 { 1804 {
1802 struct drm_i915_private *dev_priv; 1805 struct drm_i915_private *dev_priv;
1803 bool ret = true; 1806 bool ret = true;
1804 1807
1805 spin_lock(&mchdev_lock); 1808 spin_lock(&mchdev_lock);
1806 if (!i915_mch_dev) { 1809 if (!i915_mch_dev) {
1807 ret = false; 1810 ret = false;
1808 goto out_unlock; 1811 goto out_unlock;
1809 } 1812 }
1810 dev_priv = i915_mch_dev; 1813 dev_priv = i915_mch_dev;
1811 1814
1812 if (dev_priv->max_delay < dev_priv->min_delay) 1815 if (dev_priv->max_delay < dev_priv->min_delay)
1813 dev_priv->max_delay++; 1816 dev_priv->max_delay++;
1814 1817
1815 out_unlock: 1818 out_unlock:
1816 spin_unlock(&mchdev_lock); 1819 spin_unlock(&mchdev_lock);
1817 1820
1818 return ret; 1821 return ret;
1819 } 1822 }
1820 EXPORT_SYMBOL_GPL(i915_gpu_lower); 1823 EXPORT_SYMBOL_GPL(i915_gpu_lower);
1821 1824
1822 /** 1825 /**
1823 * i915_gpu_busy - indicate GPU business to IPS 1826 * i915_gpu_busy - indicate GPU business to IPS
1824 * 1827 *
1825 * Tell the IPS driver whether or not the GPU is busy. 1828 * Tell the IPS driver whether or not the GPU is busy.
1826 */ 1829 */
1827 bool i915_gpu_busy(void) 1830 bool i915_gpu_busy(void)
1828 { 1831 {
1829 struct drm_i915_private *dev_priv; 1832 struct drm_i915_private *dev_priv;
1830 bool ret = false; 1833 bool ret = false;
1831 1834
1832 spin_lock(&mchdev_lock); 1835 spin_lock(&mchdev_lock);
1833 if (!i915_mch_dev) 1836 if (!i915_mch_dev)
1834 goto out_unlock; 1837 goto out_unlock;
1835 dev_priv = i915_mch_dev; 1838 dev_priv = i915_mch_dev;
1836 1839
1837 ret = dev_priv->busy; 1840 ret = dev_priv->busy;
1838 1841
1839 out_unlock: 1842 out_unlock:
1840 spin_unlock(&mchdev_lock); 1843 spin_unlock(&mchdev_lock);
1841 1844
1842 return ret; 1845 return ret;
1843 } 1846 }
1844 EXPORT_SYMBOL_GPL(i915_gpu_busy); 1847 EXPORT_SYMBOL_GPL(i915_gpu_busy);
1845 1848
1846 /** 1849 /**
1847 * i915_gpu_turbo_disable - disable graphics turbo 1850 * i915_gpu_turbo_disable - disable graphics turbo
1848 * 1851 *
1849 * Disable graphics turbo by resetting the max frequency and setting the 1852 * Disable graphics turbo by resetting the max frequency and setting the
1850 * current frequency to the default. 1853 * current frequency to the default.
1851 */ 1854 */
1852 bool i915_gpu_turbo_disable(void) 1855 bool i915_gpu_turbo_disable(void)
1853 { 1856 {
1854 struct drm_i915_private *dev_priv; 1857 struct drm_i915_private *dev_priv;
1855 bool ret = true; 1858 bool ret = true;
1856 1859
1857 spin_lock(&mchdev_lock); 1860 spin_lock(&mchdev_lock);
1858 if (!i915_mch_dev) { 1861 if (!i915_mch_dev) {
1859 ret = false; 1862 ret = false;
1860 goto out_unlock; 1863 goto out_unlock;
1861 } 1864 }
1862 dev_priv = i915_mch_dev; 1865 dev_priv = i915_mch_dev;
1863 1866
1864 dev_priv->max_delay = dev_priv->fstart; 1867 dev_priv->max_delay = dev_priv->fstart;
1865 1868
1866 if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart)) 1869 if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart))
1867 ret = false; 1870 ret = false;
1868 1871
1869 out_unlock: 1872 out_unlock:
1870 spin_unlock(&mchdev_lock); 1873 spin_unlock(&mchdev_lock);
1871 1874
1872 return ret; 1875 return ret;
1873 } 1876 }
1874 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); 1877 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
1875 1878
1876 /** 1879 /**
1877 * Tells the intel_ips driver that the i915 driver is now loaded, if 1880 * Tells the intel_ips driver that the i915 driver is now loaded, if
1878 * IPS got loaded first. 1881 * IPS got loaded first.
1879 * 1882 *
1880 * This awkward dance is so that neither module has to depend on the 1883 * This awkward dance is so that neither module has to depend on the
1881 * other in order for IPS to do the appropriate communication of 1884 * other in order for IPS to do the appropriate communication of
1882 * GPU turbo limits to i915. 1885 * GPU turbo limits to i915.
1883 */ 1886 */
1884 static void 1887 static void
1885 ips_ping_for_i915_load(void) 1888 ips_ping_for_i915_load(void)
1886 { 1889 {
1887 void (*link)(void); 1890 void (*link)(void);
1888 1891
1889 link = symbol_get(ips_link_to_i915_driver); 1892 link = symbol_get(ips_link_to_i915_driver);
1890 if (link) { 1893 if (link) {
1891 link(); 1894 link();
1892 symbol_put(ips_link_to_i915_driver); 1895 symbol_put(ips_link_to_i915_driver);
1893 } 1896 }
1894 } 1897 }
1895 1898
1896 /** 1899 /**
1897 * i915_driver_load - setup chip and create an initial config 1900 * i915_driver_load - setup chip and create an initial config
1898 * @dev: DRM device 1901 * @dev: DRM device
1899 * @flags: startup flags 1902 * @flags: startup flags
1900 * 1903 *
1901 * The driver load routine has to do several things: 1904 * The driver load routine has to do several things:
1902 * - drive output discovery via intel_modeset_init() 1905 * - drive output discovery via intel_modeset_init()
1903 * - initialize the memory manager 1906 * - initialize the memory manager
1904 * - allocate initial config memory 1907 * - allocate initial config memory
1905 * - setup the DRM framebuffer with the allocated memory 1908 * - setup the DRM framebuffer with the allocated memory
1906 */ 1909 */
1907 int i915_driver_load(struct drm_device *dev, unsigned long flags) 1910 int i915_driver_load(struct drm_device *dev, unsigned long flags)
1908 { 1911 {
1909 struct drm_i915_private *dev_priv; 1912 struct drm_i915_private *dev_priv;
1910 int ret = 0, mmio_bar; 1913 int ret = 0, mmio_bar;
1911 uint32_t agp_size; 1914 uint32_t agp_size;
1912 1915
1913 /* i915 has 4 more counters */ 1916 /* i915 has 4 more counters */
1914 dev->counters += 4; 1917 dev->counters += 4;
1915 dev->types[6] = _DRM_STAT_IRQ; 1918 dev->types[6] = _DRM_STAT_IRQ;
1916 dev->types[7] = _DRM_STAT_PRIMARY; 1919 dev->types[7] = _DRM_STAT_PRIMARY;
1917 dev->types[8] = _DRM_STAT_SECONDARY; 1920 dev->types[8] = _DRM_STAT_SECONDARY;
1918 dev->types[9] = _DRM_STAT_DMA; 1921 dev->types[9] = _DRM_STAT_DMA;
1919 1922
1920 dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL); 1923 dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL);
1921 if (dev_priv == NULL) 1924 if (dev_priv == NULL)
1922 return -ENOMEM; 1925 return -ENOMEM;
1923 1926
1924 dev->dev_private = (void *)dev_priv; 1927 dev->dev_private = (void *)dev_priv;
1925 dev_priv->dev = dev; 1928 dev_priv->dev = dev;
1926 dev_priv->info = (struct intel_device_info *) flags; 1929 dev_priv->info = (struct intel_device_info *) flags;
1927 1930
1928 if (i915_get_bridge_dev(dev)) { 1931 if (i915_get_bridge_dev(dev)) {
1929 ret = -EIO; 1932 ret = -EIO;
1930 goto free_priv; 1933 goto free_priv;
1931 } 1934 }
1932 1935
1933 /* overlay on gen2 is broken and can't address above 1G */ 1936 /* overlay on gen2 is broken and can't address above 1G */
1934 if (IS_GEN2(dev)) 1937 if (IS_GEN2(dev))
1935 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); 1938 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
1936 1939
1937 /* 965GM sometimes incorrectly writes to hardware status page (HWS) 1940 /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1938 * using 32bit addressing, overwriting memory if HWS is located 1941 * using 32bit addressing, overwriting memory if HWS is located
1939 * above 4GB. 1942 * above 4GB.
1940 * 1943 *
1941 * The documentation also mentions an issue with undefined 1944 * The documentation also mentions an issue with undefined
1942 * behaviour if any general state is accessed within a page above 4GB, 1945 * behaviour if any general state is accessed within a page above 4GB,
1943 * which also needs to be handled carefully. 1946 * which also needs to be handled carefully.
1944 */ 1947 */
1945 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) 1948 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1946 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); 1949 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
1947 1950
1948 mmio_bar = IS_GEN2(dev) ? 1 : 0; 1951 mmio_bar = IS_GEN2(dev) ? 1 : 0;
1949 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0); 1952 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0);
1950 if (!dev_priv->regs) { 1953 if (!dev_priv->regs) {
1951 DRM_ERROR("failed to map registers\n"); 1954 DRM_ERROR("failed to map registers\n");
1952 ret = -EIO; 1955 ret = -EIO;
1953 goto put_bridge; 1956 goto put_bridge;
1954 } 1957 }
1955 1958
1956 dev_priv->mm.gtt = intel_gtt_get(); 1959 dev_priv->mm.gtt = intel_gtt_get();
1957 if (!dev_priv->mm.gtt) { 1960 if (!dev_priv->mm.gtt) {
1958 DRM_ERROR("Failed to initialize GTT\n"); 1961 DRM_ERROR("Failed to initialize GTT\n");
1959 ret = -ENODEV; 1962 ret = -ENODEV;
1960 goto out_rmmap; 1963 goto out_rmmap;
1961 } 1964 }
1962 1965
1963 agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; 1966 agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
1964 1967
1965 dev_priv->mm.gtt_mapping = 1968 dev_priv->mm.gtt_mapping =
1966 io_mapping_create_wc(dev->agp->base, agp_size); 1969 io_mapping_create_wc(dev->agp->base, agp_size);
1967 if (dev_priv->mm.gtt_mapping == NULL) { 1970 if (dev_priv->mm.gtt_mapping == NULL) {
1968 ret = -EIO; 1971 ret = -EIO;
1969 goto out_rmmap; 1972 goto out_rmmap;
1970 } 1973 }
1971 1974
1972 /* Set up a WC MTRR for non-PAT systems. This is more common than 1975 /* Set up a WC MTRR for non-PAT systems. This is more common than
1973 * one would think, because the kernel disables PAT on first 1976 * one would think, because the kernel disables PAT on first
1974 * generation Core chips because WC PAT gets overridden by a UC 1977 * generation Core chips because WC PAT gets overridden by a UC
1975 * MTRR if present. Even if a UC MTRR isn't present. 1978 * MTRR if present. Even if a UC MTRR isn't present.
1976 */ 1979 */
1977 dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base, 1980 dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base,
1978 agp_size, 1981 agp_size,
1979 MTRR_TYPE_WRCOMB, 1); 1982 MTRR_TYPE_WRCOMB, 1);
1980 if (dev_priv->mm.gtt_mtrr < 0) { 1983 if (dev_priv->mm.gtt_mtrr < 0) {
1981 DRM_INFO("MTRR allocation failed. Graphics " 1984 DRM_INFO("MTRR allocation failed. Graphics "
1982 "performance may suffer.\n"); 1985 "performance may suffer.\n");
1983 } 1986 }
1984 1987
1985 /* The i915 workqueue is primarily used for batched retirement of 1988 /* The i915 workqueue is primarily used for batched retirement of
1986 * requests (and thus managing bo) once the task has been completed 1989 * requests (and thus managing bo) once the task has been completed
1987 * by the GPU. i915_gem_retire_requests() is called directly when we 1990 * by the GPU. i915_gem_retire_requests() is called directly when we
1988 * need high-priority retirement, such as waiting for an explicit 1991 * need high-priority retirement, such as waiting for an explicit
1989 * bo. 1992 * bo.
1990 * 1993 *
1991 * It is also used for periodic low-priority events, such as 1994 * It is also used for periodic low-priority events, such as
1992 * idle-timers and recording error state. 1995 * idle-timers and recording error state.
1993 * 1996 *
1994 * All tasks on the workqueue are expected to acquire the dev mutex 1997 * All tasks on the workqueue are expected to acquire the dev mutex
1995 * so there is no point in running more than one instance of the 1998 * so there is no point in running more than one instance of the
1996 * workqueue at any time: max_active = 1 and NON_REENTRANT. 1999 * workqueue at any time: max_active = 1 and NON_REENTRANT.
1997 */ 2000 */
1998 dev_priv->wq = alloc_workqueue("i915", 2001 dev_priv->wq = alloc_workqueue("i915",
1999 WQ_UNBOUND | WQ_NON_REENTRANT, 2002 WQ_UNBOUND | WQ_NON_REENTRANT,
2000 1); 2003 1);
2001 if (dev_priv->wq == NULL) { 2004 if (dev_priv->wq == NULL) {
2002 DRM_ERROR("Failed to create our workqueue.\n"); 2005 DRM_ERROR("Failed to create our workqueue.\n");
2003 ret = -ENOMEM; 2006 ret = -ENOMEM;
2004 goto out_mtrrfree; 2007 goto out_mtrrfree;
2005 } 2008 }
2006 2009
2007 /* enable GEM by default */ 2010 /* enable GEM by default */
2008 dev_priv->has_gem = 1; 2011 dev_priv->has_gem = 1;
2009 2012
2010 intel_irq_init(dev); 2013 intel_irq_init(dev);
2011 2014
2012 /* Try to make sure MCHBAR is enabled before poking at it */ 2015 /* Try to make sure MCHBAR is enabled before poking at it */
2013 intel_setup_mchbar(dev); 2016 intel_setup_mchbar(dev);
2014 intel_setup_gmbus(dev); 2017 intel_setup_gmbus(dev);
2015 intel_opregion_setup(dev); 2018 intel_opregion_setup(dev);
2016 2019
2017 /* Make sure the bios did its job and set up vital registers */ 2020 /* Make sure the bios did its job and set up vital registers */
2018 intel_setup_bios(dev); 2021 intel_setup_bios(dev);
2019 2022
2020 i915_gem_load(dev); 2023 i915_gem_load(dev);
2021 2024
2022 /* Init HWS */ 2025 /* Init HWS */
2023 if (!I915_NEED_GFX_HWS(dev)) { 2026 if (!I915_NEED_GFX_HWS(dev)) {
2024 ret = i915_init_phys_hws(dev); 2027 ret = i915_init_phys_hws(dev);
2025 if (ret) 2028 if (ret)
2026 goto out_gem_unload; 2029 goto out_gem_unload;
2027 } 2030 }
2028 2031
2029 if (IS_PINEVIEW(dev)) 2032 if (IS_PINEVIEW(dev))
2030 i915_pineview_get_mem_freq(dev); 2033 i915_pineview_get_mem_freq(dev);
2031 else if (IS_GEN5(dev)) 2034 else if (IS_GEN5(dev))
2032 i915_ironlake_get_mem_freq(dev); 2035 i915_ironlake_get_mem_freq(dev);
2033 2036
2034 /* On the 945G/GM, the chipset reports the MSI capability on the 2037 /* On the 945G/GM, the chipset reports the MSI capability on the
2035 * integrated graphics even though the support isn't actually there 2038 * integrated graphics even though the support isn't actually there
2036 * according to the published specs. It doesn't appear to function 2039 * according to the published specs. It doesn't appear to function
2037 * correctly in testing on 945G. 2040 * correctly in testing on 945G.
2038 * This may be a side effect of MSI having been made available for PEG 2041 * This may be a side effect of MSI having been made available for PEG
2039 * and the registers being closely associated. 2042 * and the registers being closely associated.
2040 * 2043 *
2041 * According to chipset errata, on the 965GM, MSI interrupts may 2044 * According to chipset errata, on the 965GM, MSI interrupts may
2042 * be lost or delayed, but we use them anyways to avoid 2045 * be lost or delayed, but we use them anyways to avoid
2043 * stuck interrupts on some machines. 2046 * stuck interrupts on some machines.
2044 */ 2047 */
2045 if (!IS_I945G(dev) && !IS_I945GM(dev)) 2048 if (!IS_I945G(dev) && !IS_I945GM(dev))
2046 pci_enable_msi(dev->pdev); 2049 pci_enable_msi(dev->pdev);
2047 2050
2048 spin_lock_init(&dev_priv->irq_lock); 2051 spin_lock_init(&dev_priv->irq_lock);
2049 spin_lock_init(&dev_priv->error_lock); 2052 spin_lock_init(&dev_priv->error_lock);
2050 spin_lock_init(&dev_priv->rps_lock); 2053 spin_lock_init(&dev_priv->rps_lock);
2051 2054
2052 if (IS_IVYBRIDGE(dev)) 2055 if (IS_IVYBRIDGE(dev))
2053 dev_priv->num_pipe = 3; 2056 dev_priv->num_pipe = 3;
2054 else if (IS_MOBILE(dev) || !IS_GEN2(dev)) 2057 else if (IS_MOBILE(dev) || !IS_GEN2(dev))
2055 dev_priv->num_pipe = 2; 2058 dev_priv->num_pipe = 2;
2056 else 2059 else
2057 dev_priv->num_pipe = 1; 2060 dev_priv->num_pipe = 1;
2058 2061
2059 ret = drm_vblank_init(dev, dev_priv->num_pipe); 2062 ret = drm_vblank_init(dev, dev_priv->num_pipe);
2060 if (ret) 2063 if (ret)
2061 goto out_gem_unload; 2064 goto out_gem_unload;
2062 2065
2063 /* Start out suspended */ 2066 /* Start out suspended */
2064 dev_priv->mm.suspended = 1; 2067 dev_priv->mm.suspended = 1;
2065 2068
2066 intel_detect_pch(dev); 2069 intel_detect_pch(dev);
2067 2070
2068 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 2071 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
2069 ret = i915_load_modeset_init(dev); 2072 ret = i915_load_modeset_init(dev);
2070 if (ret < 0) { 2073 if (ret < 0) {
2071 DRM_ERROR("failed to init modeset\n"); 2074 DRM_ERROR("failed to init modeset\n");
2072 goto out_gem_unload; 2075 goto out_gem_unload;
2073 } 2076 }
2074 } 2077 }
2075 2078
2076 /* Must be done after probing outputs */ 2079 /* Must be done after probing outputs */
2077 intel_opregion_init(dev); 2080 intel_opregion_init(dev);
2078 acpi_video_register(); 2081 acpi_video_register();
2079 2082
2080 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, 2083 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
2081 (unsigned long) dev); 2084 (unsigned long) dev);
2082 2085
2083 spin_lock(&mchdev_lock); 2086 spin_lock(&mchdev_lock);
2084 i915_mch_dev = dev_priv; 2087 i915_mch_dev = dev_priv;
2085 dev_priv->mchdev_lock = &mchdev_lock; 2088 dev_priv->mchdev_lock = &mchdev_lock;
2086 spin_unlock(&mchdev_lock); 2089 spin_unlock(&mchdev_lock);
2087 2090
2088 ips_ping_for_i915_load(); 2091 ips_ping_for_i915_load();
2089 2092
2090 return 0; 2093 return 0;
2091 2094
2092 out_gem_unload: 2095 out_gem_unload:
2093 if (dev_priv->mm.inactive_shrinker.shrink) 2096 if (dev_priv->mm.inactive_shrinker.shrink)
2094 unregister_shrinker(&dev_priv->mm.inactive_shrinker); 2097 unregister_shrinker(&dev_priv->mm.inactive_shrinker);
2095 2098
2096 if (dev->pdev->msi_enabled) 2099 if (dev->pdev->msi_enabled)
2097 pci_disable_msi(dev->pdev); 2100 pci_disable_msi(dev->pdev);
2098 2101
2099 intel_teardown_gmbus(dev); 2102 intel_teardown_gmbus(dev);
2100 intel_teardown_mchbar(dev); 2103 intel_teardown_mchbar(dev);
2101 destroy_workqueue(dev_priv->wq); 2104 destroy_workqueue(dev_priv->wq);
2102 out_mtrrfree: 2105 out_mtrrfree:
2103 if (dev_priv->mm.gtt_mtrr >= 0) { 2106 if (dev_priv->mm.gtt_mtrr >= 0) {
2104 mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base, 2107 mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base,
2105 dev->agp->agp_info.aper_size * 1024 * 1024); 2108 dev->agp->agp_info.aper_size * 1024 * 1024);
2106 dev_priv->mm.gtt_mtrr = -1; 2109 dev_priv->mm.gtt_mtrr = -1;
2107 } 2110 }
2108 io_mapping_free(dev_priv->mm.gtt_mapping); 2111 io_mapping_free(dev_priv->mm.gtt_mapping);
2109 out_rmmap: 2112 out_rmmap:
2110 pci_iounmap(dev->pdev, dev_priv->regs); 2113 pci_iounmap(dev->pdev, dev_priv->regs);
2111 put_bridge: 2114 put_bridge:
2112 pci_dev_put(dev_priv->bridge_dev); 2115 pci_dev_put(dev_priv->bridge_dev);
2113 free_priv: 2116 free_priv:
2114 kfree(dev_priv); 2117 kfree(dev_priv);
2115 return ret; 2118 return ret;
2116 } 2119 }
2117 2120
2118 int i915_driver_unload(struct drm_device *dev) 2121 int i915_driver_unload(struct drm_device *dev)
2119 { 2122 {
2120 struct drm_i915_private *dev_priv = dev->dev_private; 2123 struct drm_i915_private *dev_priv = dev->dev_private;
2121 int ret; 2124 int ret;
2122 2125
2123 spin_lock(&mchdev_lock); 2126 spin_lock(&mchdev_lock);
2124 i915_mch_dev = NULL; 2127 i915_mch_dev = NULL;
2125 spin_unlock(&mchdev_lock); 2128 spin_unlock(&mchdev_lock);
2126 2129
2127 if (dev_priv->mm.inactive_shrinker.shrink) 2130 if (dev_priv->mm.inactive_shrinker.shrink)
2128 unregister_shrinker(&dev_priv->mm.inactive_shrinker); 2131 unregister_shrinker(&dev_priv->mm.inactive_shrinker);
2129 2132
2130 mutex_lock(&dev->struct_mutex); 2133 mutex_lock(&dev->struct_mutex);
2131 ret = i915_gpu_idle(dev); 2134 ret = i915_gpu_idle(dev);
2132 if (ret) 2135 if (ret)
2133 DRM_ERROR("failed to idle hardware: %d\n", ret); 2136 DRM_ERROR("failed to idle hardware: %d\n", ret);
2134 mutex_unlock(&dev->struct_mutex); 2137 mutex_unlock(&dev->struct_mutex);
2135 2138
2136 /* Cancel the retire work handler, which should be idle now. */ 2139 /* Cancel the retire work handler, which should be idle now. */
2137 cancel_delayed_work_sync(&dev_priv->mm.retire_work); 2140 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
2138 2141
2139 io_mapping_free(dev_priv->mm.gtt_mapping); 2142 io_mapping_free(dev_priv->mm.gtt_mapping);
2140 if (dev_priv->mm.gtt_mtrr >= 0) { 2143 if (dev_priv->mm.gtt_mtrr >= 0) {
2141 mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base, 2144 mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base,
2142 dev->agp->agp_info.aper_size * 1024 * 1024); 2145 dev->agp->agp_info.aper_size * 1024 * 1024);
2143 dev_priv->mm.gtt_mtrr = -1; 2146 dev_priv->mm.gtt_mtrr = -1;
2144 } 2147 }
2145 2148
2146 acpi_video_unregister(); 2149 acpi_video_unregister();
2147 2150
2148 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 2151 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
2149 intel_fbdev_fini(dev); 2152 intel_fbdev_fini(dev);
2150 intel_modeset_cleanup(dev); 2153 intel_modeset_cleanup(dev);
2151 2154
2152 /* 2155 /*
2153 * free the memory space allocated for the child device 2156 * free the memory space allocated for the child device
2154 * config parsed from VBT 2157 * config parsed from VBT
2155 */ 2158 */
2156 if (dev_priv->child_dev && dev_priv->child_dev_num) { 2159 if (dev_priv->child_dev && dev_priv->child_dev_num) {
2157 kfree(dev_priv->child_dev); 2160 kfree(dev_priv->child_dev);
2158 dev_priv->child_dev = NULL; 2161 dev_priv->child_dev = NULL;
2159 dev_priv->child_dev_num = 0; 2162 dev_priv->child_dev_num = 0;
2160 } 2163 }
2161 2164
2162 vga_switcheroo_unregister_client(dev->pdev); 2165 vga_switcheroo_unregister_client(dev->pdev);
2163 vga_client_register(dev->pdev, NULL, NULL, NULL); 2166 vga_client_register(dev->pdev, NULL, NULL, NULL);
2164 } 2167 }
2165 2168
2166 /* Free error state after interrupts are fully disabled. */ 2169 /* Free error state after interrupts are fully disabled. */
2167 del_timer_sync(&dev_priv->hangcheck_timer); 2170 del_timer_sync(&dev_priv->hangcheck_timer);
2168 cancel_work_sync(&dev_priv->error_work); 2171 cancel_work_sync(&dev_priv->error_work);
2169 i915_destroy_error_state(dev); 2172 i915_destroy_error_state(dev);
2170 2173
2171 if (dev->pdev->msi_enabled) 2174 if (dev->pdev->msi_enabled)
2172 pci_disable_msi(dev->pdev); 2175 pci_disable_msi(dev->pdev);
2173 2176
2174 intel_opregion_fini(dev); 2177 intel_opregion_fini(dev);
2175 2178
2176 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 2179 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
2177 /* Flush any outstanding unpin_work. */ 2180 /* Flush any outstanding unpin_work. */
2178 flush_workqueue(dev_priv->wq); 2181 flush_workqueue(dev_priv->wq);
2179 2182
2180 mutex_lock(&dev->struct_mutex); 2183 mutex_lock(&dev->struct_mutex);
2181 i915_gem_free_all_phys_object(dev); 2184 i915_gem_free_all_phys_object(dev);
2182 i915_gem_cleanup_ringbuffer(dev); 2185 i915_gem_cleanup_ringbuffer(dev);
2183 mutex_unlock(&dev->struct_mutex); 2186 mutex_unlock(&dev->struct_mutex);
2184 if (I915_HAS_FBC(dev) && i915_powersave) 2187 if (I915_HAS_FBC(dev) && i915_powersave)
2185 i915_cleanup_compression(dev); 2188 i915_cleanup_compression(dev);
2186 drm_mm_takedown(&dev_priv->mm.stolen); 2189 drm_mm_takedown(&dev_priv->mm.stolen);
2187 2190
2188 intel_cleanup_overlay(dev); 2191 intel_cleanup_overlay(dev);
2189 2192
2190 if (!I915_NEED_GFX_HWS(dev)) 2193 if (!I915_NEED_GFX_HWS(dev))
2191 i915_free_hws(dev); 2194 i915_free_hws(dev);
2192 } 2195 }
2193 2196
2194 if (dev_priv->regs != NULL) 2197 if (dev_priv->regs != NULL)
2195 pci_iounmap(dev->pdev, dev_priv->regs); 2198 pci_iounmap(dev->pdev, dev_priv->regs);
2196 2199
2197 intel_teardown_gmbus(dev); 2200 intel_teardown_gmbus(dev);
2198 intel_teardown_mchbar(dev); 2201 intel_teardown_mchbar(dev);
2199 2202
2200 destroy_workqueue(dev_priv->wq); 2203 destroy_workqueue(dev_priv->wq);
2201 2204
2202 pci_dev_put(dev_priv->bridge_dev); 2205 pci_dev_put(dev_priv->bridge_dev);
2203 kfree(dev->dev_private); 2206 kfree(dev->dev_private);
2204 2207
2205 return 0; 2208 return 0;
2206 } 2209 }
2207 2210
2208 int i915_driver_open(struct drm_device *dev, struct drm_file *file) 2211 int i915_driver_open(struct drm_device *dev, struct drm_file *file)
2209 { 2212 {
2210 struct drm_i915_file_private *file_priv; 2213 struct drm_i915_file_private *file_priv;
2211 2214
2212 DRM_DEBUG_DRIVER("\n"); 2215 DRM_DEBUG_DRIVER("\n");
2213 file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL); 2216 file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
2214 if (!file_priv) 2217 if (!file_priv)
2215 return -ENOMEM; 2218 return -ENOMEM;
2216 2219
2217 file->driver_priv = file_priv; 2220 file->driver_priv = file_priv;
2218 2221
2219 spin_lock_init(&file_priv->mm.lock); 2222 spin_lock_init(&file_priv->mm.lock);
2220 INIT_LIST_HEAD(&file_priv->mm.request_list); 2223 INIT_LIST_HEAD(&file_priv->mm.request_list);
2221 2224
2222 return 0; 2225 return 0;
2223 } 2226 }
2224 2227
2225 /** 2228 /**
2226 * i915_driver_lastclose - clean up after all DRM clients have exited 2229 * i915_driver_lastclose - clean up after all DRM clients have exited
2227 * @dev: DRM device 2230 * @dev: DRM device
2228 * 2231 *
2229 * Take care of cleaning up after all DRM clients have exited. In the 2232 * Take care of cleaning up after all DRM clients have exited. In the
2230 * mode setting case, we want to restore the kernel's initial mode (just 2233 * mode setting case, we want to restore the kernel's initial mode (just
2231 * in case the last client left us in a bad state). 2234 * in case the last client left us in a bad state).
2232 * 2235 *
2233 * Additionally, in the non-mode setting case, we'll tear down the AGP 2236 * Additionally, in the non-mode setting case, we'll tear down the AGP
2234 * and DMA structures, since the kernel won't be using them, and clea 2237 * and DMA structures, since the kernel won't be using them, and clea
2235 * up any GEM state. 2238 * up any GEM state.
2236 */ 2239 */
2237 void i915_driver_lastclose(struct drm_device * dev) 2240 void i915_driver_lastclose(struct drm_device * dev)
2238 { 2241 {
2239 drm_i915_private_t *dev_priv = dev->dev_private; 2242 drm_i915_private_t *dev_priv = dev->dev_private;
2240 2243
2241 if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) { 2244 if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) {
2242 intel_fb_restore_mode(dev); 2245 intel_fb_restore_mode(dev);
2243 vga_switcheroo_process_delayed_switch(); 2246 vga_switcheroo_process_delayed_switch();
2244 return; 2247 return;
2245 } 2248 }
2246 2249
2247 i915_gem_lastclose(dev); 2250 i915_gem_lastclose(dev);
2248 2251
2249 i915_dma_cleanup(dev); 2252 i915_dma_cleanup(dev);
2250 } 2253 }
2251 2254
2252 void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) 2255 void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
2253 { 2256 {
2254 i915_gem_release(dev, file_priv); 2257 i915_gem_release(dev, file_priv);
2255 } 2258 }
2256 2259
2257 void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) 2260 void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
2258 { 2261 {
2259 struct drm_i915_file_private *file_priv = file->driver_priv; 2262 struct drm_i915_file_private *file_priv = file->driver_priv;
2260 2263
2261 kfree(file_priv); 2264 kfree(file_priv);
2262 } 2265 }
2263 2266
2264 struct drm_ioctl_desc i915_ioctls[] = { 2267 struct drm_ioctl_desc i915_ioctls[] = {
2265 DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2268 DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2266 DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH), 2269 DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
2267 DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH), 2270 DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
2268 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), 2271 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
2269 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), 2272 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
2270 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), 2273 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
2271 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH), 2274 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH),
2272 DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2275 DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2273 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), 2276 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
2274 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), 2277 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
2275 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2278 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2276 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), 2279 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
2277 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2280 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2278 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2281 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2279 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH), 2282 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH),
2280 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), 2283 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
2281 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2284 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2282 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 2285 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
2283 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED), 2286 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
2284 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED), 2287 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED),
2285 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), 2288 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
2286 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), 2289 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
2287 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), 2290 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
2288 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED), 2291 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
2289 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 2292 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
2290 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 2293 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
2291 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED), 2294 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
2292 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED), 2295 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
2293 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED), 2296 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
2294 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED), 2297 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
2295 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED), 2298 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
2296 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED), 2299 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
2297 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED), 2300 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
2298 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED), 2301 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
2299 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED), 2302 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
2300 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED), 2303 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
2301 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED), 2304 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
2302 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED), 2305 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
2303 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 2306 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
2304 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 2307 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
2305 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 2308 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
2306 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 2309 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
2307 }; 2310 };
2308 2311
2309 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); 2312 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
2310 2313
2311 /** 2314 /**
2312 * Determine if the device really is AGP or not. 2315 * Determine if the device really is AGP or not.
2313 * 2316 *
2314 * All Intel graphics chipsets are treated as AGP, even if they are really 2317 * All Intel graphics chipsets are treated as AGP, even if they are really
2315 * PCI-e. 2318 * PCI-e.
2316 * 2319 *
2317 * \param dev The device to be tested. 2320 * \param dev The device to be tested.
2318 * 2321 *
2319 * \returns 2322 * \returns
2320 * A value of 1 is always retured to indictate every i9x5 is AGP. 2323 * A value of 1 is always retured to indictate every i9x5 is AGP.
2321 */ 2324 */
2322 int i915_driver_device_is_agp(struct drm_device * dev) 2325 int i915_driver_device_is_agp(struct drm_device * dev)
2323 { 2326 {
2324 return 1; 2327 return 1;
2325 } 2328 }
2326 2329
drivers/gpu/drm/i915/i915_drv.c
1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*- 1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
2 */ 2 */
3 /* 3 /*
4 * 4 *
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved. 6 * All Rights Reserved.
7 * 7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the 9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including 10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish, 11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to 12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to 13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions: 14 * the following conditions:
15 * 15 *
16 * The above copyright notice and this permission notice (including the 16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions 17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software. 18 * of the Software.
19 * 19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 * 27 *
28 */ 28 */
29 29
30 #include <linux/device.h> 30 #include <linux/device.h>
31 #include "drmP.h" 31 #include "drmP.h"
32 #include "drm.h" 32 #include "drm.h"
33 #include "i915_drm.h" 33 #include "i915_drm.h"
34 #include "i915_drv.h" 34 #include "i915_drv.h"
35 #include "intel_drv.h" 35 #include "intel_drv.h"
36 36
37 #include <linux/console.h> 37 #include <linux/console.h>
38 #include <linux/module.h> 38 #include <linux/module.h>
39 #include "drm_crtc_helper.h" 39 #include "drm_crtc_helper.h"
40 40
41 static int i915_modeset __read_mostly = -1; 41 static int i915_modeset __read_mostly = -1;
42 module_param_named(modeset, i915_modeset, int, 0400); 42 module_param_named(modeset, i915_modeset, int, 0400);
43 MODULE_PARM_DESC(modeset, 43 MODULE_PARM_DESC(modeset,
44 "Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, " 44 "Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
45 "1=on, -1=force vga console preference [default])"); 45 "1=on, -1=force vga console preference [default])");
46 46
47 unsigned int i915_fbpercrtc __always_unused = 0; 47 unsigned int i915_fbpercrtc __always_unused = 0;
48 module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); 48 module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
49 49
50 int i915_panel_ignore_lid __read_mostly = 0; 50 int i915_panel_ignore_lid __read_mostly = 0;
51 module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600); 51 module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
52 MODULE_PARM_DESC(panel_ignore_lid, 52 MODULE_PARM_DESC(panel_ignore_lid,
53 "Override lid status (0=autodetect [default], 1=lid open, " 53 "Override lid status (0=autodetect [default], 1=lid open, "
54 "-1=lid closed)"); 54 "-1=lid closed)");
55 55
56 unsigned int i915_powersave __read_mostly = 1; 56 unsigned int i915_powersave __read_mostly = 1;
57 module_param_named(powersave, i915_powersave, int, 0600); 57 module_param_named(powersave, i915_powersave, int, 0600);
58 MODULE_PARM_DESC(powersave, 58 MODULE_PARM_DESC(powersave,
59 "Enable powersavings, fbc, downclocking, etc. (default: true)"); 59 "Enable powersavings, fbc, downclocking, etc. (default: true)");
60 60
61 int i915_semaphores __read_mostly = -1; 61 int i915_semaphores __read_mostly = -1;
62 module_param_named(semaphores, i915_semaphores, int, 0600); 62 module_param_named(semaphores, i915_semaphores, int, 0600);
63 MODULE_PARM_DESC(semaphores, 63 MODULE_PARM_DESC(semaphores,
64 "Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))"); 64 "Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
65 65
66 int i915_enable_rc6 __read_mostly = -1; 66 int i915_enable_rc6 __read_mostly = -1;
67 module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); 67 module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
68 MODULE_PARM_DESC(i915_enable_rc6, 68 MODULE_PARM_DESC(i915_enable_rc6,
69 "Enable power-saving render C-state 6 (default: -1 (use per-chip default)"); 69 "Enable power-saving render C-state 6 (default: -1 (use per-chip default)");
70 70
71 int i915_enable_fbc __read_mostly = -1; 71 int i915_enable_fbc __read_mostly = -1;
72 module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); 72 module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
73 MODULE_PARM_DESC(i915_enable_fbc, 73 MODULE_PARM_DESC(i915_enable_fbc,
74 "Enable frame buffer compression for power savings " 74 "Enable frame buffer compression for power savings "
75 "(default: -1 (use per-chip default))"); 75 "(default: -1 (use per-chip default))");
76 76
77 unsigned int i915_lvds_downclock __read_mostly = 0; 77 unsigned int i915_lvds_downclock __read_mostly = 0;
78 module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); 78 module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
79 MODULE_PARM_DESC(lvds_downclock, 79 MODULE_PARM_DESC(lvds_downclock,
80 "Use panel (LVDS/eDP) downclocking for power savings " 80 "Use panel (LVDS/eDP) downclocking for power savings "
81 "(default: false)"); 81 "(default: false)");
82 82
83 int i915_panel_use_ssc __read_mostly = -1; 83 int i915_panel_use_ssc __read_mostly = -1;
84 module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600); 84 module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
85 MODULE_PARM_DESC(lvds_use_ssc, 85 MODULE_PARM_DESC(lvds_use_ssc,
86 "Use Spread Spectrum Clock with panels [LVDS/eDP] " 86 "Use Spread Spectrum Clock with panels [LVDS/eDP] "
87 "(default: auto from VBT)"); 87 "(default: auto from VBT)");
88 88
89 int i915_vbt_sdvo_panel_type __read_mostly = -1; 89 int i915_vbt_sdvo_panel_type __read_mostly = -1;
90 module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600); 90 module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
91 MODULE_PARM_DESC(vbt_sdvo_panel_type, 91 MODULE_PARM_DESC(vbt_sdvo_panel_type,
92 "Override selection of SDVO panel mode in the VBT " 92 "Override selection of SDVO panel mode in the VBT "
93 "(default: auto)"); 93 "(default: auto)");
94 94
95 static bool i915_try_reset __read_mostly = true; 95 static bool i915_try_reset __read_mostly = true;
96 module_param_named(reset, i915_try_reset, bool, 0600); 96 module_param_named(reset, i915_try_reset, bool, 0600);
97 MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)"); 97 MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
98 98
99 bool i915_enable_hangcheck __read_mostly = true; 99 bool i915_enable_hangcheck __read_mostly = true;
100 module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644); 100 module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644);
101 MODULE_PARM_DESC(enable_hangcheck, 101 MODULE_PARM_DESC(enable_hangcheck,
102 "Periodically check GPU activity for detecting hangs. " 102 "Periodically check GPU activity for detecting hangs. "
103 "WARNING: Disabling this can cause system wide hangs. " 103 "WARNING: Disabling this can cause system wide hangs. "
104 "(default: true)"); 104 "(default: true)");
105 105
106 static struct drm_driver driver; 106 static struct drm_driver driver;
107 extern int intel_agp_enabled; 107 extern int intel_agp_enabled;
108 108
109 #define INTEL_VGA_DEVICE(id, info) { \ 109 #define INTEL_VGA_DEVICE(id, info) { \
110 .class = PCI_BASE_CLASS_DISPLAY << 16, \ 110 .class = PCI_BASE_CLASS_DISPLAY << 16, \
111 .class_mask = 0xff0000, \ 111 .class_mask = 0xff0000, \
112 .vendor = 0x8086, \ 112 .vendor = 0x8086, \
113 .device = id, \ 113 .device = id, \
114 .subvendor = PCI_ANY_ID, \ 114 .subvendor = PCI_ANY_ID, \
115 .subdevice = PCI_ANY_ID, \ 115 .subdevice = PCI_ANY_ID, \
116 .driver_data = (unsigned long) info } 116 .driver_data = (unsigned long) info }
117 117
118 static const struct intel_device_info intel_i830_info = { 118 static const struct intel_device_info intel_i830_info = {
119 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, 119 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1,
120 .has_overlay = 1, .overlay_needs_physical = 1, 120 .has_overlay = 1, .overlay_needs_physical = 1,
121 }; 121 };
122 122
123 static const struct intel_device_info intel_845g_info = { 123 static const struct intel_device_info intel_845g_info = {
124 .gen = 2, 124 .gen = 2,
125 .has_overlay = 1, .overlay_needs_physical = 1, 125 .has_overlay = 1, .overlay_needs_physical = 1,
126 }; 126 };
127 127
128 static const struct intel_device_info intel_i85x_info = { 128 static const struct intel_device_info intel_i85x_info = {
129 .gen = 2, .is_i85x = 1, .is_mobile = 1, 129 .gen = 2, .is_i85x = 1, .is_mobile = 1,
130 .cursor_needs_physical = 1, 130 .cursor_needs_physical = 1,
131 .has_overlay = 1, .overlay_needs_physical = 1, 131 .has_overlay = 1, .overlay_needs_physical = 1,
132 }; 132 };
133 133
134 static const struct intel_device_info intel_i865g_info = { 134 static const struct intel_device_info intel_i865g_info = {
135 .gen = 2, 135 .gen = 2,
136 .has_overlay = 1, .overlay_needs_physical = 1, 136 .has_overlay = 1, .overlay_needs_physical = 1,
137 }; 137 };
138 138
139 static const struct intel_device_info intel_i915g_info = { 139 static const struct intel_device_info intel_i915g_info = {
140 .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, 140 .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1,
141 .has_overlay = 1, .overlay_needs_physical = 1, 141 .has_overlay = 1, .overlay_needs_physical = 1,
142 }; 142 };
143 static const struct intel_device_info intel_i915gm_info = { 143 static const struct intel_device_info intel_i915gm_info = {
144 .gen = 3, .is_mobile = 1, 144 .gen = 3, .is_mobile = 1,
145 .cursor_needs_physical = 1, 145 .cursor_needs_physical = 1,
146 .has_overlay = 1, .overlay_needs_physical = 1, 146 .has_overlay = 1, .overlay_needs_physical = 1,
147 .supports_tv = 1, 147 .supports_tv = 1,
148 }; 148 };
149 static const struct intel_device_info intel_i945g_info = { 149 static const struct intel_device_info intel_i945g_info = {
150 .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, 150 .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1,
151 .has_overlay = 1, .overlay_needs_physical = 1, 151 .has_overlay = 1, .overlay_needs_physical = 1,
152 }; 152 };
153 static const struct intel_device_info intel_i945gm_info = { 153 static const struct intel_device_info intel_i945gm_info = {
154 .gen = 3, .is_i945gm = 1, .is_mobile = 1, 154 .gen = 3, .is_i945gm = 1, .is_mobile = 1,
155 .has_hotplug = 1, .cursor_needs_physical = 1, 155 .has_hotplug = 1, .cursor_needs_physical = 1,
156 .has_overlay = 1, .overlay_needs_physical = 1, 156 .has_overlay = 1, .overlay_needs_physical = 1,
157 .supports_tv = 1, 157 .supports_tv = 1,
158 }; 158 };
159 159
160 static const struct intel_device_info intel_i965g_info = { 160 static const struct intel_device_info intel_i965g_info = {
161 .gen = 4, .is_broadwater = 1, 161 .gen = 4, .is_broadwater = 1,
162 .has_hotplug = 1, 162 .has_hotplug = 1,
163 .has_overlay = 1, 163 .has_overlay = 1,
164 }; 164 };
165 165
166 static const struct intel_device_info intel_i965gm_info = { 166 static const struct intel_device_info intel_i965gm_info = {
167 .gen = 4, .is_crestline = 1, 167 .gen = 4, .is_crestline = 1,
168 .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1, 168 .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
169 .has_overlay = 1, 169 .has_overlay = 1,
170 .supports_tv = 1, 170 .supports_tv = 1,
171 }; 171 };
172 172
173 static const struct intel_device_info intel_g33_info = { 173 static const struct intel_device_info intel_g33_info = {
174 .gen = 3, .is_g33 = 1, 174 .gen = 3, .is_g33 = 1,
175 .need_gfx_hws = 1, .has_hotplug = 1, 175 .need_gfx_hws = 1, .has_hotplug = 1,
176 .has_overlay = 1, 176 .has_overlay = 1,
177 }; 177 };
178 178
179 static const struct intel_device_info intel_g45_info = { 179 static const struct intel_device_info intel_g45_info = {
180 .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, 180 .gen = 4, .is_g4x = 1, .need_gfx_hws = 1,
181 .has_pipe_cxsr = 1, .has_hotplug = 1, 181 .has_pipe_cxsr = 1, .has_hotplug = 1,
182 .has_bsd_ring = 1, 182 .has_bsd_ring = 1,
183 }; 183 };
184 184
185 static const struct intel_device_info intel_gm45_info = { 185 static const struct intel_device_info intel_gm45_info = {
186 .gen = 4, .is_g4x = 1, 186 .gen = 4, .is_g4x = 1,
187 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, 187 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
188 .has_pipe_cxsr = 1, .has_hotplug = 1, 188 .has_pipe_cxsr = 1, .has_hotplug = 1,
189 .supports_tv = 1, 189 .supports_tv = 1,
190 .has_bsd_ring = 1, 190 .has_bsd_ring = 1,
191 }; 191 };
192 192
193 static const struct intel_device_info intel_pineview_info = { 193 static const struct intel_device_info intel_pineview_info = {
194 .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, 194 .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1,
195 .need_gfx_hws = 1, .has_hotplug = 1, 195 .need_gfx_hws = 1, .has_hotplug = 1,
196 .has_overlay = 1, 196 .has_overlay = 1,
197 }; 197 };
198 198
199 static const struct intel_device_info intel_ironlake_d_info = { 199 static const struct intel_device_info intel_ironlake_d_info = {
200 .gen = 5, 200 .gen = 5,
201 .need_gfx_hws = 1, .has_hotplug = 1, 201 .need_gfx_hws = 1, .has_hotplug = 1,
202 .has_bsd_ring = 1, 202 .has_bsd_ring = 1,
203 }; 203 };
204 204
205 static const struct intel_device_info intel_ironlake_m_info = { 205 static const struct intel_device_info intel_ironlake_m_info = {
206 .gen = 5, .is_mobile = 1, 206 .gen = 5, .is_mobile = 1,
207 .need_gfx_hws = 1, .has_hotplug = 1, 207 .need_gfx_hws = 1, .has_hotplug = 1,
208 .has_fbc = 1, 208 .has_fbc = 1,
209 .has_bsd_ring = 1, 209 .has_bsd_ring = 1,
210 }; 210 };
211 211
212 static const struct intel_device_info intel_sandybridge_d_info = { 212 static const struct intel_device_info intel_sandybridge_d_info = {
213 .gen = 6, 213 .gen = 6,
214 .need_gfx_hws = 1, .has_hotplug = 1, 214 .need_gfx_hws = 1, .has_hotplug = 1,
215 .has_bsd_ring = 1, 215 .has_bsd_ring = 1,
216 .has_blt_ring = 1, 216 .has_blt_ring = 1,
217 .has_llc = 1,
217 }; 218 };
218 219
219 static const struct intel_device_info intel_sandybridge_m_info = { 220 static const struct intel_device_info intel_sandybridge_m_info = {
220 .gen = 6, .is_mobile = 1, 221 .gen = 6, .is_mobile = 1,
221 .need_gfx_hws = 1, .has_hotplug = 1, 222 .need_gfx_hws = 1, .has_hotplug = 1,
222 .has_fbc = 1, 223 .has_fbc = 1,
223 .has_bsd_ring = 1, 224 .has_bsd_ring = 1,
224 .has_blt_ring = 1, 225 .has_blt_ring = 1,
226 .has_llc = 1,
225 }; 227 };
226 228
227 static const struct intel_device_info intel_ivybridge_d_info = { 229 static const struct intel_device_info intel_ivybridge_d_info = {
228 .is_ivybridge = 1, .gen = 7, 230 .is_ivybridge = 1, .gen = 7,
229 .need_gfx_hws = 1, .has_hotplug = 1, 231 .need_gfx_hws = 1, .has_hotplug = 1,
230 .has_bsd_ring = 1, 232 .has_bsd_ring = 1,
231 .has_blt_ring = 1, 233 .has_blt_ring = 1,
234 .has_llc = 1,
232 }; 235 };
233 236
234 static const struct intel_device_info intel_ivybridge_m_info = { 237 static const struct intel_device_info intel_ivybridge_m_info = {
235 .is_ivybridge = 1, .gen = 7, .is_mobile = 1, 238 .is_ivybridge = 1, .gen = 7, .is_mobile = 1,
236 .need_gfx_hws = 1, .has_hotplug = 1, 239 .need_gfx_hws = 1, .has_hotplug = 1,
237 .has_fbc = 0, /* FBC is not enabled on Ivybridge mobile yet */ 240 .has_fbc = 0, /* FBC is not enabled on Ivybridge mobile yet */
238 .has_bsd_ring = 1, 241 .has_bsd_ring = 1,
239 .has_blt_ring = 1, 242 .has_blt_ring = 1,
243 .has_llc = 1,
240 }; 244 };
241 245
242 static const struct pci_device_id pciidlist[] = { /* aka */ 246 static const struct pci_device_id pciidlist[] = { /* aka */
243 INTEL_VGA_DEVICE(0x3577, &intel_i830_info), /* I830_M */ 247 INTEL_VGA_DEVICE(0x3577, &intel_i830_info), /* I830_M */
244 INTEL_VGA_DEVICE(0x2562, &intel_845g_info), /* 845_G */ 248 INTEL_VGA_DEVICE(0x2562, &intel_845g_info), /* 845_G */
245 INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), /* I855_GM */ 249 INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), /* I855_GM */
246 INTEL_VGA_DEVICE(0x358e, &intel_i85x_info), 250 INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
247 INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), /* I865_G */ 251 INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), /* I865_G */
248 INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), /* I915_G */ 252 INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), /* I915_G */
249 INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), /* E7221_G */ 253 INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), /* E7221_G */
250 INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), /* I915_GM */ 254 INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), /* I915_GM */
251 INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), /* I945_G */ 255 INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), /* I945_G */
252 INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), /* I945_GM */ 256 INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), /* I945_GM */
253 INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), /* I945_GME */ 257 INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), /* I945_GME */
254 INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), /* I946_GZ */ 258 INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), /* I946_GZ */
255 INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), /* G35_G */ 259 INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), /* G35_G */
256 INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), /* I965_Q */ 260 INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), /* I965_Q */
257 INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), /* I965_G */ 261 INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), /* I965_G */
258 INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), /* Q35_G */ 262 INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), /* Q35_G */
259 INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), /* G33_G */ 263 INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), /* G33_G */
260 INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), /* Q33_G */ 264 INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), /* Q33_G */
261 INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), /* I965_GM */ 265 INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), /* I965_GM */
262 INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), /* I965_GME */ 266 INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), /* I965_GME */
263 INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), /* GM45_G */ 267 INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), /* GM45_G */
264 INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), /* IGD_E_G */ 268 INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), /* IGD_E_G */
265 INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), /* Q45_G */ 269 INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), /* Q45_G */
266 INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */ 270 INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */
267 INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */ 271 INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */
268 INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */ 272 INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */
269 INTEL_VGA_DEVICE(0x2e92, &intel_g45_info), /* B43_G.1 */ 273 INTEL_VGA_DEVICE(0x2e92, &intel_g45_info), /* B43_G.1 */
270 INTEL_VGA_DEVICE(0xa001, &intel_pineview_info), 274 INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
271 INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), 275 INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
272 INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), 276 INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
273 INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info), 277 INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
274 INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info), 278 INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
275 INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info), 279 INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
276 INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info), 280 INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
277 INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info), 281 INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
278 INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info), 282 INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
279 INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info), 283 INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
280 INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info), 284 INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
281 INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */ 285 INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */
282 INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */ 286 INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */
283 INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */ 287 INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
284 INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */ 288 INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
285 INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */ 289 INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
286 {0, 0, 0} 290 {0, 0, 0}
287 }; 291 };
288 292
289 #if defined(CONFIG_DRM_I915_KMS) 293 #if defined(CONFIG_DRM_I915_KMS)
290 MODULE_DEVICE_TABLE(pci, pciidlist); 294 MODULE_DEVICE_TABLE(pci, pciidlist);
291 #endif 295 #endif
292 296
293 #define INTEL_PCH_DEVICE_ID_MASK 0xff00 297 #define INTEL_PCH_DEVICE_ID_MASK 0xff00
294 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 298 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
295 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 299 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
296 #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 300 #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
297 301
298 void intel_detect_pch(struct drm_device *dev) 302 void intel_detect_pch(struct drm_device *dev)
299 { 303 {
300 struct drm_i915_private *dev_priv = dev->dev_private; 304 struct drm_i915_private *dev_priv = dev->dev_private;
301 struct pci_dev *pch; 305 struct pci_dev *pch;
302 306
303 /* 307 /*
304 * The reason to probe ISA bridge instead of Dev31:Fun0 is to 308 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
305 * make graphics device passthrough work easy for VMM, that only 309 * make graphics device passthrough work easy for VMM, that only
306 * need to expose ISA bridge to let driver know the real hardware 310 * need to expose ISA bridge to let driver know the real hardware
307 * underneath. This is a requirement from virtualization team. 311 * underneath. This is a requirement from virtualization team.
308 */ 312 */
309 pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); 313 pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
310 if (pch) { 314 if (pch) {
311 if (pch->vendor == PCI_VENDOR_ID_INTEL) { 315 if (pch->vendor == PCI_VENDOR_ID_INTEL) {
312 int id; 316 int id;
313 id = pch->device & INTEL_PCH_DEVICE_ID_MASK; 317 id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
314 318
315 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { 319 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
316 dev_priv->pch_type = PCH_IBX; 320 dev_priv->pch_type = PCH_IBX;
317 DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); 321 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
318 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { 322 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
319 dev_priv->pch_type = PCH_CPT; 323 dev_priv->pch_type = PCH_CPT;
320 DRM_DEBUG_KMS("Found CougarPoint PCH\n"); 324 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
321 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { 325 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
322 /* PantherPoint is CPT compatible */ 326 /* PantherPoint is CPT compatible */
323 dev_priv->pch_type = PCH_CPT; 327 dev_priv->pch_type = PCH_CPT;
324 DRM_DEBUG_KMS("Found PatherPoint PCH\n"); 328 DRM_DEBUG_KMS("Found PatherPoint PCH\n");
325 } 329 }
326 } 330 }
327 pci_dev_put(pch); 331 pci_dev_put(pch);
328 } 332 }
329 } 333 }
330 334
331 void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) 335 void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
332 { 336 {
333 int count; 337 int count;
334 338
335 count = 0; 339 count = 0;
336 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) 340 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
337 udelay(10); 341 udelay(10);
338 342
339 I915_WRITE_NOTRACE(FORCEWAKE, 1); 343 I915_WRITE_NOTRACE(FORCEWAKE, 1);
340 POSTING_READ(FORCEWAKE); 344 POSTING_READ(FORCEWAKE);
341 345
342 count = 0; 346 count = 0;
343 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0) 347 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0)
344 udelay(10); 348 udelay(10);
345 } 349 }
346 350
347 void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) 351 void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
348 { 352 {
349 int count; 353 int count;
350 354
351 count = 0; 355 count = 0;
352 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1)) 356 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
353 udelay(10); 357 udelay(10);
354 358
355 I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 1); 359 I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 1);
356 POSTING_READ(FORCEWAKE_MT); 360 POSTING_READ(FORCEWAKE_MT);
357 361
358 count = 0; 362 count = 0;
359 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0) 363 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0)
360 udelay(10); 364 udelay(10);
361 } 365 }
362 366
363 /* 367 /*
364 * Generally this is called implicitly by the register read function. However, 368 * Generally this is called implicitly by the register read function. However,
365 * if some sequence requires the GT to not power down then this function should 369 * if some sequence requires the GT to not power down then this function should
366 * be called at the beginning of the sequence followed by a call to 370 * be called at the beginning of the sequence followed by a call to
367 * gen6_gt_force_wake_put() at the end of the sequence. 371 * gen6_gt_force_wake_put() at the end of the sequence.
368 */ 372 */
369 void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) 373 void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
370 { 374 {
371 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); 375 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
372 376
373 /* Forcewake is atomic in case we get in here without the lock */ 377 /* Forcewake is atomic in case we get in here without the lock */
374 if (atomic_add_return(1, &dev_priv->forcewake_count) == 1) 378 if (atomic_add_return(1, &dev_priv->forcewake_count) == 1)
375 dev_priv->display.force_wake_get(dev_priv); 379 dev_priv->display.force_wake_get(dev_priv);
376 } 380 }
377 381
378 void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) 382 void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
379 { 383 {
380 I915_WRITE_NOTRACE(FORCEWAKE, 0); 384 I915_WRITE_NOTRACE(FORCEWAKE, 0);
381 POSTING_READ(FORCEWAKE); 385 POSTING_READ(FORCEWAKE);
382 } 386 }
383 387
384 void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) 388 void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
385 { 389 {
386 I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0); 390 I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0);
387 POSTING_READ(FORCEWAKE_MT); 391 POSTING_READ(FORCEWAKE_MT);
388 } 392 }
389 393
390 /* 394 /*
391 * see gen6_gt_force_wake_get() 395 * see gen6_gt_force_wake_get()
392 */ 396 */
393 void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) 397 void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
394 { 398 {
395 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); 399 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
396 400
397 if (atomic_dec_and_test(&dev_priv->forcewake_count)) 401 if (atomic_dec_and_test(&dev_priv->forcewake_count))
398 dev_priv->display.force_wake_put(dev_priv); 402 dev_priv->display.force_wake_put(dev_priv);
399 } 403 }
400 404
401 void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) 405 void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
402 { 406 {
403 if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { 407 if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
404 int loop = 500; 408 int loop = 500;
405 u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); 409 u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
406 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { 410 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
407 udelay(10); 411 udelay(10);
408 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); 412 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
409 } 413 }
410 WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES); 414 WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES);
411 dev_priv->gt_fifo_count = fifo; 415 dev_priv->gt_fifo_count = fifo;
412 } 416 }
413 dev_priv->gt_fifo_count--; 417 dev_priv->gt_fifo_count--;
414 } 418 }
415 419
416 static int i915_drm_freeze(struct drm_device *dev) 420 static int i915_drm_freeze(struct drm_device *dev)
417 { 421 {
418 struct drm_i915_private *dev_priv = dev->dev_private; 422 struct drm_i915_private *dev_priv = dev->dev_private;
419 423
420 drm_kms_helper_poll_disable(dev); 424 drm_kms_helper_poll_disable(dev);
421 425
422 pci_save_state(dev->pdev); 426 pci_save_state(dev->pdev);
423 427
424 /* If KMS is active, we do the leavevt stuff here */ 428 /* If KMS is active, we do the leavevt stuff here */
425 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 429 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
426 int error = i915_gem_idle(dev); 430 int error = i915_gem_idle(dev);
427 if (error) { 431 if (error) {
428 dev_err(&dev->pdev->dev, 432 dev_err(&dev->pdev->dev,
429 "GEM idle failed, resume might fail\n"); 433 "GEM idle failed, resume might fail\n");
430 return error; 434 return error;
431 } 435 }
432 drm_irq_uninstall(dev); 436 drm_irq_uninstall(dev);
433 } 437 }
434 438
435 i915_save_state(dev); 439 i915_save_state(dev);
436 440
437 intel_opregion_fini(dev); 441 intel_opregion_fini(dev);
438 442
439 /* Modeset on resume, not lid events */ 443 /* Modeset on resume, not lid events */
440 dev_priv->modeset_on_lid = 0; 444 dev_priv->modeset_on_lid = 0;
441 445
442 return 0; 446 return 0;
443 } 447 }
444 448
445 int i915_suspend(struct drm_device *dev, pm_message_t state) 449 int i915_suspend(struct drm_device *dev, pm_message_t state)
446 { 450 {
447 int error; 451 int error;
448 452
449 if (!dev || !dev->dev_private) { 453 if (!dev || !dev->dev_private) {
450 DRM_ERROR("dev: %p\n", dev); 454 DRM_ERROR("dev: %p\n", dev);
451 DRM_ERROR("DRM not initialized, aborting suspend.\n"); 455 DRM_ERROR("DRM not initialized, aborting suspend.\n");
452 return -ENODEV; 456 return -ENODEV;
453 } 457 }
454 458
455 if (state.event == PM_EVENT_PRETHAW) 459 if (state.event == PM_EVENT_PRETHAW)
456 return 0; 460 return 0;
457 461
458 462
459 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 463 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
460 return 0; 464 return 0;
461 465
462 error = i915_drm_freeze(dev); 466 error = i915_drm_freeze(dev);
463 if (error) 467 if (error)
464 return error; 468 return error;
465 469
466 if (state.event == PM_EVENT_SUSPEND) { 470 if (state.event == PM_EVENT_SUSPEND) {
467 /* Shut down the device */ 471 /* Shut down the device */
468 pci_disable_device(dev->pdev); 472 pci_disable_device(dev->pdev);
469 pci_set_power_state(dev->pdev, PCI_D3hot); 473 pci_set_power_state(dev->pdev, PCI_D3hot);
470 } 474 }
471 475
472 return 0; 476 return 0;
473 } 477 }
474 478
475 static int i915_drm_thaw(struct drm_device *dev) 479 static int i915_drm_thaw(struct drm_device *dev)
476 { 480 {
477 struct drm_i915_private *dev_priv = dev->dev_private; 481 struct drm_i915_private *dev_priv = dev->dev_private;
478 int error = 0; 482 int error = 0;
479 483
480 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 484 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
481 mutex_lock(&dev->struct_mutex); 485 mutex_lock(&dev->struct_mutex);
482 i915_gem_restore_gtt_mappings(dev); 486 i915_gem_restore_gtt_mappings(dev);
483 mutex_unlock(&dev->struct_mutex); 487 mutex_unlock(&dev->struct_mutex);
484 } 488 }
485 489
486 i915_restore_state(dev); 490 i915_restore_state(dev);
487 intel_opregion_setup(dev); 491 intel_opregion_setup(dev);
488 492
489 /* KMS EnterVT equivalent */ 493 /* KMS EnterVT equivalent */
490 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 494 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
491 mutex_lock(&dev->struct_mutex); 495 mutex_lock(&dev->struct_mutex);
492 dev_priv->mm.suspended = 0; 496 dev_priv->mm.suspended = 0;
493 497
494 error = i915_gem_init_ringbuffer(dev); 498 error = i915_gem_init_ringbuffer(dev);
495 mutex_unlock(&dev->struct_mutex); 499 mutex_unlock(&dev->struct_mutex);
496 500
497 if (HAS_PCH_SPLIT(dev)) 501 if (HAS_PCH_SPLIT(dev))
498 ironlake_init_pch_refclk(dev); 502 ironlake_init_pch_refclk(dev);
499 503
500 drm_mode_config_reset(dev); 504 drm_mode_config_reset(dev);
501 drm_irq_install(dev); 505 drm_irq_install(dev);
502 506
503 /* Resume the modeset for every activated CRTC */ 507 /* Resume the modeset for every activated CRTC */
504 drm_helper_resume_force_mode(dev); 508 drm_helper_resume_force_mode(dev);
505 509
506 if (IS_IRONLAKE_M(dev)) 510 if (IS_IRONLAKE_M(dev))
507 ironlake_enable_rc6(dev); 511 ironlake_enable_rc6(dev);
508 } 512 }
509 513
510 intel_opregion_init(dev); 514 intel_opregion_init(dev);
511 515
512 dev_priv->modeset_on_lid = 0; 516 dev_priv->modeset_on_lid = 0;
513 517
514 return error; 518 return error;
515 } 519 }
516 520
517 int i915_resume(struct drm_device *dev) 521 int i915_resume(struct drm_device *dev)
518 { 522 {
519 int ret; 523 int ret;
520 524
521 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 525 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
522 return 0; 526 return 0;
523 527
524 if (pci_enable_device(dev->pdev)) 528 if (pci_enable_device(dev->pdev))
525 return -EIO; 529 return -EIO;
526 530
527 pci_set_master(dev->pdev); 531 pci_set_master(dev->pdev);
528 532
529 ret = i915_drm_thaw(dev); 533 ret = i915_drm_thaw(dev);
530 if (ret) 534 if (ret)
531 return ret; 535 return ret;
532 536
533 drm_kms_helper_poll_enable(dev); 537 drm_kms_helper_poll_enable(dev);
534 return 0; 538 return 0;
535 } 539 }
536 540
537 static int i8xx_do_reset(struct drm_device *dev, u8 flags) 541 static int i8xx_do_reset(struct drm_device *dev, u8 flags)
538 { 542 {
539 struct drm_i915_private *dev_priv = dev->dev_private; 543 struct drm_i915_private *dev_priv = dev->dev_private;
540 544
541 if (IS_I85X(dev)) 545 if (IS_I85X(dev))
542 return -ENODEV; 546 return -ENODEV;
543 547
544 I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830); 548 I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
545 POSTING_READ(D_STATE); 549 POSTING_READ(D_STATE);
546 550
547 if (IS_I830(dev) || IS_845G(dev)) { 551 if (IS_I830(dev) || IS_845G(dev)) {
548 I915_WRITE(DEBUG_RESET_I830, 552 I915_WRITE(DEBUG_RESET_I830,
549 DEBUG_RESET_DISPLAY | 553 DEBUG_RESET_DISPLAY |
550 DEBUG_RESET_RENDER | 554 DEBUG_RESET_RENDER |
551 DEBUG_RESET_FULL); 555 DEBUG_RESET_FULL);
552 POSTING_READ(DEBUG_RESET_I830); 556 POSTING_READ(DEBUG_RESET_I830);
553 msleep(1); 557 msleep(1);
554 558
555 I915_WRITE(DEBUG_RESET_I830, 0); 559 I915_WRITE(DEBUG_RESET_I830, 0);
556 POSTING_READ(DEBUG_RESET_I830); 560 POSTING_READ(DEBUG_RESET_I830);
557 } 561 }
558 562
559 msleep(1); 563 msleep(1);
560 564
561 I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830); 565 I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
562 POSTING_READ(D_STATE); 566 POSTING_READ(D_STATE);
563 567
564 return 0; 568 return 0;
565 } 569 }
566 570
567 static int i965_reset_complete(struct drm_device *dev) 571 static int i965_reset_complete(struct drm_device *dev)
568 { 572 {
569 u8 gdrst; 573 u8 gdrst;
570 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); 574 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
571 return gdrst & 0x1; 575 return gdrst & 0x1;
572 } 576 }
573 577
574 static int i965_do_reset(struct drm_device *dev, u8 flags) 578 static int i965_do_reset(struct drm_device *dev, u8 flags)
575 { 579 {
576 u8 gdrst; 580 u8 gdrst;
577 581
578 /* 582 /*
579 * Set the domains we want to reset (GRDOM/bits 2 and 3) as 583 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
580 * well as the reset bit (GR/bit 0). Setting the GR bit 584 * well as the reset bit (GR/bit 0). Setting the GR bit
581 * triggers the reset; when done, the hardware will clear it. 585 * triggers the reset; when done, the hardware will clear it.
582 */ 586 */
583 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); 587 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
584 pci_write_config_byte(dev->pdev, I965_GDRST, gdrst | flags | 0x1); 588 pci_write_config_byte(dev->pdev, I965_GDRST, gdrst | flags | 0x1);
585 589
586 return wait_for(i965_reset_complete(dev), 500); 590 return wait_for(i965_reset_complete(dev), 500);
587 } 591 }
588 592
589 static int ironlake_do_reset(struct drm_device *dev, u8 flags) 593 static int ironlake_do_reset(struct drm_device *dev, u8 flags)
590 { 594 {
591 struct drm_i915_private *dev_priv = dev->dev_private; 595 struct drm_i915_private *dev_priv = dev->dev_private;
592 u32 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR); 596 u32 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
593 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, gdrst | flags | 0x1); 597 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, gdrst | flags | 0x1);
594 return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); 598 return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
595 } 599 }
596 600
597 static int gen6_do_reset(struct drm_device *dev, u8 flags) 601 static int gen6_do_reset(struct drm_device *dev, u8 flags)
598 { 602 {
599 struct drm_i915_private *dev_priv = dev->dev_private; 603 struct drm_i915_private *dev_priv = dev->dev_private;
600 604
601 I915_WRITE(GEN6_GDRST, GEN6_GRDOM_FULL); 605 I915_WRITE(GEN6_GDRST, GEN6_GRDOM_FULL);
602 return wait_for((I915_READ(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); 606 return wait_for((I915_READ(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
603 } 607 }
604 608
605 /** 609 /**
606 * i915_reset - reset chip after a hang 610 * i915_reset - reset chip after a hang
607 * @dev: drm device to reset 611 * @dev: drm device to reset
608 * @flags: reset domains 612 * @flags: reset domains
609 * 613 *
610 * Reset the chip. Useful if a hang is detected. Returns zero on successful 614 * Reset the chip. Useful if a hang is detected. Returns zero on successful
611 * reset or otherwise an error code. 615 * reset or otherwise an error code.
612 * 616 *
613 * Procedure is fairly simple: 617 * Procedure is fairly simple:
614 * - reset the chip using the reset reg 618 * - reset the chip using the reset reg
615 * - re-init context state 619 * - re-init context state
616 * - re-init hardware status page 620 * - re-init hardware status page
617 * - re-init ring buffer 621 * - re-init ring buffer
618 * - re-init interrupt state 622 * - re-init interrupt state
619 * - re-init display 623 * - re-init display
620 */ 624 */
621 int i915_reset(struct drm_device *dev, u8 flags) 625 int i915_reset(struct drm_device *dev, u8 flags)
622 { 626 {
623 drm_i915_private_t *dev_priv = dev->dev_private; 627 drm_i915_private_t *dev_priv = dev->dev_private;
624 /* 628 /*
625 * We really should only reset the display subsystem if we actually 629 * We really should only reset the display subsystem if we actually
626 * need to 630 * need to
627 */ 631 */
628 bool need_display = true; 632 bool need_display = true;
629 int ret; 633 int ret;
630 634
631 if (!i915_try_reset) 635 if (!i915_try_reset)
632 return 0; 636 return 0;
633 637
634 if (!mutex_trylock(&dev->struct_mutex)) 638 if (!mutex_trylock(&dev->struct_mutex))
635 return -EBUSY; 639 return -EBUSY;
636 640
637 i915_gem_reset(dev); 641 i915_gem_reset(dev);
638 642
639 ret = -ENODEV; 643 ret = -ENODEV;
640 if (get_seconds() - dev_priv->last_gpu_reset < 5) { 644 if (get_seconds() - dev_priv->last_gpu_reset < 5) {
641 DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); 645 DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
642 } else switch (INTEL_INFO(dev)->gen) { 646 } else switch (INTEL_INFO(dev)->gen) {
643 case 7: 647 case 7:
644 case 6: 648 case 6:
645 ret = gen6_do_reset(dev, flags); 649 ret = gen6_do_reset(dev, flags);
646 /* If reset with a user forcewake, try to restore */ 650 /* If reset with a user forcewake, try to restore */
647 if (atomic_read(&dev_priv->forcewake_count)) 651 if (atomic_read(&dev_priv->forcewake_count))
648 __gen6_gt_force_wake_get(dev_priv); 652 __gen6_gt_force_wake_get(dev_priv);
649 break; 653 break;
650 case 5: 654 case 5:
651 ret = ironlake_do_reset(dev, flags); 655 ret = ironlake_do_reset(dev, flags);
652 break; 656 break;
653 case 4: 657 case 4:
654 ret = i965_do_reset(dev, flags); 658 ret = i965_do_reset(dev, flags);
655 break; 659 break;
656 case 2: 660 case 2:
657 ret = i8xx_do_reset(dev, flags); 661 ret = i8xx_do_reset(dev, flags);
658 break; 662 break;
659 } 663 }
660 dev_priv->last_gpu_reset = get_seconds(); 664 dev_priv->last_gpu_reset = get_seconds();
661 if (ret) { 665 if (ret) {
662 DRM_ERROR("Failed to reset chip.\n"); 666 DRM_ERROR("Failed to reset chip.\n");
663 mutex_unlock(&dev->struct_mutex); 667 mutex_unlock(&dev->struct_mutex);
664 return ret; 668 return ret;
665 } 669 }
666 670
667 /* Ok, now get things going again... */ 671 /* Ok, now get things going again... */
668 672
669 /* 673 /*
670 * Everything depends on having the GTT running, so we need to start 674 * Everything depends on having the GTT running, so we need to start
671 * there. Fortunately we don't need to do this unless we reset the 675 * there. Fortunately we don't need to do this unless we reset the
672 * chip at a PCI level. 676 * chip at a PCI level.
673 * 677 *
674 * Next we need to restore the context, but we don't use those 678 * Next we need to restore the context, but we don't use those
675 * yet either... 679 * yet either...
676 * 680 *
677 * Ring buffer needs to be re-initialized in the KMS case, or if X 681 * Ring buffer needs to be re-initialized in the KMS case, or if X
678 * was running at the time of the reset (i.e. we weren't VT 682 * was running at the time of the reset (i.e. we weren't VT
679 * switched away). 683 * switched away).
680 */ 684 */
681 if (drm_core_check_feature(dev, DRIVER_MODESET) || 685 if (drm_core_check_feature(dev, DRIVER_MODESET) ||
682 !dev_priv->mm.suspended) { 686 !dev_priv->mm.suspended) {
683 dev_priv->mm.suspended = 0; 687 dev_priv->mm.suspended = 0;
684 688
685 dev_priv->ring[RCS].init(&dev_priv->ring[RCS]); 689 dev_priv->ring[RCS].init(&dev_priv->ring[RCS]);
686 if (HAS_BSD(dev)) 690 if (HAS_BSD(dev))
687 dev_priv->ring[VCS].init(&dev_priv->ring[VCS]); 691 dev_priv->ring[VCS].init(&dev_priv->ring[VCS]);
688 if (HAS_BLT(dev)) 692 if (HAS_BLT(dev))
689 dev_priv->ring[BCS].init(&dev_priv->ring[BCS]); 693 dev_priv->ring[BCS].init(&dev_priv->ring[BCS]);
690 694
691 mutex_unlock(&dev->struct_mutex); 695 mutex_unlock(&dev->struct_mutex);
692 drm_irq_uninstall(dev); 696 drm_irq_uninstall(dev);
693 drm_mode_config_reset(dev); 697 drm_mode_config_reset(dev);
694 drm_irq_install(dev); 698 drm_irq_install(dev);
695 mutex_lock(&dev->struct_mutex); 699 mutex_lock(&dev->struct_mutex);
696 } 700 }
697 701
698 mutex_unlock(&dev->struct_mutex); 702 mutex_unlock(&dev->struct_mutex);
699 703
700 /* 704 /*
701 * Perform a full modeset as on later generations, e.g. Ironlake, we may 705 * Perform a full modeset as on later generations, e.g. Ironlake, we may
702 * need to retrain the display link and cannot just restore the register 706 * need to retrain the display link and cannot just restore the register
703 * values. 707 * values.
704 */ 708 */
705 if (need_display) { 709 if (need_display) {
706 mutex_lock(&dev->mode_config.mutex); 710 mutex_lock(&dev->mode_config.mutex);
707 drm_helper_resume_force_mode(dev); 711 drm_helper_resume_force_mode(dev);
708 mutex_unlock(&dev->mode_config.mutex); 712 mutex_unlock(&dev->mode_config.mutex);
709 } 713 }
710 714
711 return 0; 715 return 0;
712 } 716 }
713 717
714 718
715 static int __devinit 719 static int __devinit
716 i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 720 i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
717 { 721 {
718 /* Only bind to function 0 of the device. Early generations 722 /* Only bind to function 0 of the device. Early generations
719 * used function 1 as a placeholder for multi-head. This causes 723 * used function 1 as a placeholder for multi-head. This causes
720 * us confusion instead, especially on the systems where both 724 * us confusion instead, especially on the systems where both
721 * functions have the same PCI-ID! 725 * functions have the same PCI-ID!
722 */ 726 */
723 if (PCI_FUNC(pdev->devfn)) 727 if (PCI_FUNC(pdev->devfn))
724 return -ENODEV; 728 return -ENODEV;
725 729
726 return drm_get_pci_dev(pdev, ent, &driver); 730 return drm_get_pci_dev(pdev, ent, &driver);
727 } 731 }
728 732
729 static void 733 static void
730 i915_pci_remove(struct pci_dev *pdev) 734 i915_pci_remove(struct pci_dev *pdev)
731 { 735 {
732 struct drm_device *dev = pci_get_drvdata(pdev); 736 struct drm_device *dev = pci_get_drvdata(pdev);
733 737
734 drm_put_dev(dev); 738 drm_put_dev(dev);
735 } 739 }
736 740
737 static int i915_pm_suspend(struct device *dev) 741 static int i915_pm_suspend(struct device *dev)
738 { 742 {
739 struct pci_dev *pdev = to_pci_dev(dev); 743 struct pci_dev *pdev = to_pci_dev(dev);
740 struct drm_device *drm_dev = pci_get_drvdata(pdev); 744 struct drm_device *drm_dev = pci_get_drvdata(pdev);
741 int error; 745 int error;
742 746
743 if (!drm_dev || !drm_dev->dev_private) { 747 if (!drm_dev || !drm_dev->dev_private) {
744 dev_err(dev, "DRM not initialized, aborting suspend.\n"); 748 dev_err(dev, "DRM not initialized, aborting suspend.\n");
745 return -ENODEV; 749 return -ENODEV;
746 } 750 }
747 751
748 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 752 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
749 return 0; 753 return 0;
750 754
751 error = i915_drm_freeze(drm_dev); 755 error = i915_drm_freeze(drm_dev);
752 if (error) 756 if (error)
753 return error; 757 return error;
754 758
755 pci_disable_device(pdev); 759 pci_disable_device(pdev);
756 pci_set_power_state(pdev, PCI_D3hot); 760 pci_set_power_state(pdev, PCI_D3hot);
757 761
758 return 0; 762 return 0;
759 } 763 }
760 764
761 static int i915_pm_resume(struct device *dev) 765 static int i915_pm_resume(struct device *dev)
762 { 766 {
763 struct pci_dev *pdev = to_pci_dev(dev); 767 struct pci_dev *pdev = to_pci_dev(dev);
764 struct drm_device *drm_dev = pci_get_drvdata(pdev); 768 struct drm_device *drm_dev = pci_get_drvdata(pdev);
765 769
766 return i915_resume(drm_dev); 770 return i915_resume(drm_dev);
767 } 771 }
768 772
769 static int i915_pm_freeze(struct device *dev) 773 static int i915_pm_freeze(struct device *dev)
770 { 774 {
771 struct pci_dev *pdev = to_pci_dev(dev); 775 struct pci_dev *pdev = to_pci_dev(dev);
772 struct drm_device *drm_dev = pci_get_drvdata(pdev); 776 struct drm_device *drm_dev = pci_get_drvdata(pdev);
773 777
774 if (!drm_dev || !drm_dev->dev_private) { 778 if (!drm_dev || !drm_dev->dev_private) {
775 dev_err(dev, "DRM not initialized, aborting suspend.\n"); 779 dev_err(dev, "DRM not initialized, aborting suspend.\n");
776 return -ENODEV; 780 return -ENODEV;
777 } 781 }
778 782
779 return i915_drm_freeze(drm_dev); 783 return i915_drm_freeze(drm_dev);
780 } 784 }
781 785
782 static int i915_pm_thaw(struct device *dev) 786 static int i915_pm_thaw(struct device *dev)
783 { 787 {
784 struct pci_dev *pdev = to_pci_dev(dev); 788 struct pci_dev *pdev = to_pci_dev(dev);
785 struct drm_device *drm_dev = pci_get_drvdata(pdev); 789 struct drm_device *drm_dev = pci_get_drvdata(pdev);
786 790
787 return i915_drm_thaw(drm_dev); 791 return i915_drm_thaw(drm_dev);
788 } 792 }
789 793
790 static int i915_pm_poweroff(struct device *dev) 794 static int i915_pm_poweroff(struct device *dev)
791 { 795 {
792 struct pci_dev *pdev = to_pci_dev(dev); 796 struct pci_dev *pdev = to_pci_dev(dev);
793 struct drm_device *drm_dev = pci_get_drvdata(pdev); 797 struct drm_device *drm_dev = pci_get_drvdata(pdev);
794 798
795 return i915_drm_freeze(drm_dev); 799 return i915_drm_freeze(drm_dev);
796 } 800 }
797 801
798 static const struct dev_pm_ops i915_pm_ops = { 802 static const struct dev_pm_ops i915_pm_ops = {
799 .suspend = i915_pm_suspend, 803 .suspend = i915_pm_suspend,
800 .resume = i915_pm_resume, 804 .resume = i915_pm_resume,
801 .freeze = i915_pm_freeze, 805 .freeze = i915_pm_freeze,
802 .thaw = i915_pm_thaw, 806 .thaw = i915_pm_thaw,
803 .poweroff = i915_pm_poweroff, 807 .poweroff = i915_pm_poweroff,
804 .restore = i915_pm_resume, 808 .restore = i915_pm_resume,
805 }; 809 };
806 810
807 static struct vm_operations_struct i915_gem_vm_ops = { 811 static struct vm_operations_struct i915_gem_vm_ops = {
808 .fault = i915_gem_fault, 812 .fault = i915_gem_fault,
809 .open = drm_gem_vm_open, 813 .open = drm_gem_vm_open,
810 .close = drm_gem_vm_close, 814 .close = drm_gem_vm_close,
811 }; 815 };
812 816
813 static const struct file_operations i915_driver_fops = { 817 static const struct file_operations i915_driver_fops = {
814 .owner = THIS_MODULE, 818 .owner = THIS_MODULE,
815 .open = drm_open, 819 .open = drm_open,
816 .release = drm_release, 820 .release = drm_release,
817 .unlocked_ioctl = drm_ioctl, 821 .unlocked_ioctl = drm_ioctl,
818 .mmap = drm_gem_mmap, 822 .mmap = drm_gem_mmap,
819 .poll = drm_poll, 823 .poll = drm_poll,
820 .fasync = drm_fasync, 824 .fasync = drm_fasync,
821 .read = drm_read, 825 .read = drm_read,
822 #ifdef CONFIG_COMPAT 826 #ifdef CONFIG_COMPAT
823 .compat_ioctl = i915_compat_ioctl, 827 .compat_ioctl = i915_compat_ioctl,
824 #endif 828 #endif
825 .llseek = noop_llseek, 829 .llseek = noop_llseek,
826 }; 830 };
827 831
828 static struct drm_driver driver = { 832 static struct drm_driver driver = {
829 /* Don't use MTRRs here; the Xserver or userspace app should 833 /* Don't use MTRRs here; the Xserver or userspace app should
830 * deal with them for Intel hardware. 834 * deal with them for Intel hardware.
831 */ 835 */
832 .driver_features = 836 .driver_features =
833 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/ 837 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
834 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM, 838 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM,
835 .load = i915_driver_load, 839 .load = i915_driver_load,
836 .unload = i915_driver_unload, 840 .unload = i915_driver_unload,
837 .open = i915_driver_open, 841 .open = i915_driver_open,
838 .lastclose = i915_driver_lastclose, 842 .lastclose = i915_driver_lastclose,
839 .preclose = i915_driver_preclose, 843 .preclose = i915_driver_preclose,
840 .postclose = i915_driver_postclose, 844 .postclose = i915_driver_postclose,
841 845
842 /* Used in place of i915_pm_ops for non-DRIVER_MODESET */ 846 /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
843 .suspend = i915_suspend, 847 .suspend = i915_suspend,
844 .resume = i915_resume, 848 .resume = i915_resume,
845 849
846 .device_is_agp = i915_driver_device_is_agp, 850 .device_is_agp = i915_driver_device_is_agp,
847 .reclaim_buffers = drm_core_reclaim_buffers, 851 .reclaim_buffers = drm_core_reclaim_buffers,
848 .master_create = i915_master_create, 852 .master_create = i915_master_create,
849 .master_destroy = i915_master_destroy, 853 .master_destroy = i915_master_destroy,
850 #if defined(CONFIG_DEBUG_FS) 854 #if defined(CONFIG_DEBUG_FS)
851 .debugfs_init = i915_debugfs_init, 855 .debugfs_init = i915_debugfs_init,
852 .debugfs_cleanup = i915_debugfs_cleanup, 856 .debugfs_cleanup = i915_debugfs_cleanup,
853 #endif 857 #endif
854 .gem_init_object = i915_gem_init_object, 858 .gem_init_object = i915_gem_init_object,
855 .gem_free_object = i915_gem_free_object, 859 .gem_free_object = i915_gem_free_object,
856 .gem_vm_ops = &i915_gem_vm_ops, 860 .gem_vm_ops = &i915_gem_vm_ops,
857 .dumb_create = i915_gem_dumb_create, 861 .dumb_create = i915_gem_dumb_create,
858 .dumb_map_offset = i915_gem_mmap_gtt, 862 .dumb_map_offset = i915_gem_mmap_gtt,
859 .dumb_destroy = i915_gem_dumb_destroy, 863 .dumb_destroy = i915_gem_dumb_destroy,
860 .ioctls = i915_ioctls, 864 .ioctls = i915_ioctls,
861 .fops = &i915_driver_fops, 865 .fops = &i915_driver_fops,
862 .name = DRIVER_NAME, 866 .name = DRIVER_NAME,
863 .desc = DRIVER_DESC, 867 .desc = DRIVER_DESC,
864 .date = DRIVER_DATE, 868 .date = DRIVER_DATE,
865 .major = DRIVER_MAJOR, 869 .major = DRIVER_MAJOR,
866 .minor = DRIVER_MINOR, 870 .minor = DRIVER_MINOR,
867 .patchlevel = DRIVER_PATCHLEVEL, 871 .patchlevel = DRIVER_PATCHLEVEL,
868 }; 872 };
869 873
870 static struct pci_driver i915_pci_driver = { 874 static struct pci_driver i915_pci_driver = {
871 .name = DRIVER_NAME, 875 .name = DRIVER_NAME,
872 .id_table = pciidlist, 876 .id_table = pciidlist,
873 .probe = i915_pci_probe, 877 .probe = i915_pci_probe,
874 .remove = i915_pci_remove, 878 .remove = i915_pci_remove,
875 .driver.pm = &i915_pm_ops, 879 .driver.pm = &i915_pm_ops,
876 }; 880 };
877 881
878 static int __init i915_init(void) 882 static int __init i915_init(void)
879 { 883 {
880 if (!intel_agp_enabled) { 884 if (!intel_agp_enabled) {
881 DRM_ERROR("drm/i915 can't work without intel_agp module!\n"); 885 DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
882 return -ENODEV; 886 return -ENODEV;
883 } 887 }
884 888
885 driver.num_ioctls = i915_max_ioctl; 889 driver.num_ioctls = i915_max_ioctl;
886 890
887 /* 891 /*
888 * If CONFIG_DRM_I915_KMS is set, default to KMS unless 892 * If CONFIG_DRM_I915_KMS is set, default to KMS unless
889 * explicitly disabled with the module pararmeter. 893 * explicitly disabled with the module pararmeter.
890 * 894 *
891 * Otherwise, just follow the parameter (defaulting to off). 895 * Otherwise, just follow the parameter (defaulting to off).
892 * 896 *
893 * Allow optional vga_text_mode_force boot option to override 897 * Allow optional vga_text_mode_force boot option to override
894 * the default behavior. 898 * the default behavior.
895 */ 899 */
896 #if defined(CONFIG_DRM_I915_KMS) 900 #if defined(CONFIG_DRM_I915_KMS)
897 if (i915_modeset != 0) 901 if (i915_modeset != 0)
898 driver.driver_features |= DRIVER_MODESET; 902 driver.driver_features |= DRIVER_MODESET;
899 #endif 903 #endif
900 if (i915_modeset == 1) 904 if (i915_modeset == 1)
901 driver.driver_features |= DRIVER_MODESET; 905 driver.driver_features |= DRIVER_MODESET;
902 906
903 #ifdef CONFIG_VGA_CONSOLE 907 #ifdef CONFIG_VGA_CONSOLE
904 if (vgacon_text_force() && i915_modeset == -1) 908 if (vgacon_text_force() && i915_modeset == -1)
905 driver.driver_features &= ~DRIVER_MODESET; 909 driver.driver_features &= ~DRIVER_MODESET;
906 #endif 910 #endif
907 911
908 if (!(driver.driver_features & DRIVER_MODESET)) 912 if (!(driver.driver_features & DRIVER_MODESET))
909 driver.get_vblank_timestamp = NULL; 913 driver.get_vblank_timestamp = NULL;
910 914
911 return drm_pci_init(&driver, &i915_pci_driver); 915 return drm_pci_init(&driver, &i915_pci_driver);
912 } 916 }
913 917
914 static void __exit i915_exit(void) 918 static void __exit i915_exit(void)
915 { 919 {
916 drm_pci_exit(&driver, &i915_pci_driver); 920 drm_pci_exit(&driver, &i915_pci_driver);
917 } 921 }
918 922
919 module_init(i915_init); 923 module_init(i915_init);
920 module_exit(i915_exit); 924 module_exit(i915_exit);
921 925
922 MODULE_AUTHOR(DRIVER_AUTHOR); 926 MODULE_AUTHOR(DRIVER_AUTHOR);
923 MODULE_DESCRIPTION(DRIVER_DESC); 927 MODULE_DESCRIPTION(DRIVER_DESC);
924 MODULE_LICENSE("GPL and additional rights"); 928 MODULE_LICENSE("GPL and additional rights");
925 929
926 #define __i915_read(x, y) \ 930 #define __i915_read(x, y) \
927 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ 931 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
928 u##x val = 0; \ 932 u##x val = 0; \
929 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 933 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
930 gen6_gt_force_wake_get(dev_priv); \ 934 gen6_gt_force_wake_get(dev_priv); \
931 val = read##y(dev_priv->regs + reg); \ 935 val = read##y(dev_priv->regs + reg); \
932 gen6_gt_force_wake_put(dev_priv); \ 936 gen6_gt_force_wake_put(dev_priv); \
933 } else { \ 937 } else { \
934 val = read##y(dev_priv->regs + reg); \ 938 val = read##y(dev_priv->regs + reg); \
935 } \ 939 } \
936 trace_i915_reg_rw(false, reg, val, sizeof(val)); \ 940 trace_i915_reg_rw(false, reg, val, sizeof(val)); \
937 return val; \ 941 return val; \
938 } 942 }
939 943
940 __i915_read(8, b) 944 __i915_read(8, b)
941 __i915_read(16, w) 945 __i915_read(16, w)
942 __i915_read(32, l) 946 __i915_read(32, l)
943 __i915_read(64, q) 947 __i915_read(64, q)
944 #undef __i915_read 948 #undef __i915_read
945 949
946 #define __i915_write(x, y) \ 950 #define __i915_write(x, y) \
947 void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ 951 void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
948 trace_i915_reg_rw(true, reg, val, sizeof(val)); \ 952 trace_i915_reg_rw(true, reg, val, sizeof(val)); \
949 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 953 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
950 __gen6_gt_wait_for_fifo(dev_priv); \ 954 __gen6_gt_wait_for_fifo(dev_priv); \
951 } \ 955 } \
952 write##y(val, dev_priv->regs + reg); \ 956 write##y(val, dev_priv->regs + reg); \
953 } 957 }
954 __i915_write(8, b) 958 __i915_write(8, b)
955 __i915_write(16, w) 959 __i915_write(16, w)
956 __i915_write(32, l) 960 __i915_write(32, l)
957 __i915_write(64, q) 961 __i915_write(64, q)
958 #undef __i915_write 962 #undef __i915_write
959 963
drivers/gpu/drm/i915/i915_drv.h
1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- 1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
2 */ 2 */
3 /* 3 /*
4 * 4 *
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved. 6 * All Rights Reserved.
7 * 7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the 9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including 10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish, 11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to 12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to 13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions: 14 * the following conditions:
15 * 15 *
16 * The above copyright notice and this permission notice (including the 16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions 17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software. 18 * of the Software.
19 * 19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 * 27 *
28 */ 28 */
29 29
30 #ifndef _I915_DRV_H_ 30 #ifndef _I915_DRV_H_
31 #define _I915_DRV_H_ 31 #define _I915_DRV_H_
32 32
33 #include "i915_reg.h" 33 #include "i915_reg.h"
34 #include "intel_bios.h" 34 #include "intel_bios.h"
35 #include "intel_ringbuffer.h" 35 #include "intel_ringbuffer.h"
36 #include <linux/io-mapping.h> 36 #include <linux/io-mapping.h>
37 #include <linux/i2c.h> 37 #include <linux/i2c.h>
38 #include <drm/intel-gtt.h> 38 #include <drm/intel-gtt.h>
39 #include <linux/backlight.h> 39 #include <linux/backlight.h>
40 40
41 /* General customization: 41 /* General customization:
42 */ 42 */
43 43
44 #define DRIVER_AUTHOR "Tungsten Graphics, Inc." 44 #define DRIVER_AUTHOR "Tungsten Graphics, Inc."
45 45
46 #define DRIVER_NAME "i915" 46 #define DRIVER_NAME "i915"
47 #define DRIVER_DESC "Intel Graphics" 47 #define DRIVER_DESC "Intel Graphics"
48 #define DRIVER_DATE "20080730" 48 #define DRIVER_DATE "20080730"
49 49
50 enum pipe { 50 enum pipe {
51 PIPE_A = 0, 51 PIPE_A = 0,
52 PIPE_B, 52 PIPE_B,
53 PIPE_C, 53 PIPE_C,
54 I915_MAX_PIPES 54 I915_MAX_PIPES
55 }; 55 };
56 #define pipe_name(p) ((p) + 'A') 56 #define pipe_name(p) ((p) + 'A')
57 57
58 enum plane { 58 enum plane {
59 PLANE_A = 0, 59 PLANE_A = 0,
60 PLANE_B, 60 PLANE_B,
61 PLANE_C, 61 PLANE_C,
62 }; 62 };
63 #define plane_name(p) ((p) + 'A') 63 #define plane_name(p) ((p) + 'A')
64 64
65 #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) 65 #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
66 66
67 #define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++) 67 #define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
68 68
69 /* Interface history: 69 /* Interface history:
70 * 70 *
71 * 1.1: Original. 71 * 1.1: Original.
72 * 1.2: Add Power Management 72 * 1.2: Add Power Management
73 * 1.3: Add vblank support 73 * 1.3: Add vblank support
74 * 1.4: Fix cmdbuffer path, add heap destroy 74 * 1.4: Fix cmdbuffer path, add heap destroy
75 * 1.5: Add vblank pipe configuration 75 * 1.5: Add vblank pipe configuration
76 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank 76 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
77 * - Support vertical blank on secondary display pipe 77 * - Support vertical blank on secondary display pipe
78 */ 78 */
79 #define DRIVER_MAJOR 1 79 #define DRIVER_MAJOR 1
80 #define DRIVER_MINOR 6 80 #define DRIVER_MINOR 6
81 #define DRIVER_PATCHLEVEL 0 81 #define DRIVER_PATCHLEVEL 0
82 82
83 #define WATCH_COHERENCY 0 83 #define WATCH_COHERENCY 0
84 #define WATCH_LISTS 0 84 #define WATCH_LISTS 0
85 85
86 #define I915_GEM_PHYS_CURSOR_0 1 86 #define I915_GEM_PHYS_CURSOR_0 1
87 #define I915_GEM_PHYS_CURSOR_1 2 87 #define I915_GEM_PHYS_CURSOR_1 2
88 #define I915_GEM_PHYS_OVERLAY_REGS 3 88 #define I915_GEM_PHYS_OVERLAY_REGS 3
89 #define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS) 89 #define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
90 90
91 struct drm_i915_gem_phys_object { 91 struct drm_i915_gem_phys_object {
92 int id; 92 int id;
93 struct page **page_list; 93 struct page **page_list;
94 drm_dma_handle_t *handle; 94 drm_dma_handle_t *handle;
95 struct drm_i915_gem_object *cur_obj; 95 struct drm_i915_gem_object *cur_obj;
96 }; 96 };
97 97
98 struct mem_block { 98 struct mem_block {
99 struct mem_block *next; 99 struct mem_block *next;
100 struct mem_block *prev; 100 struct mem_block *prev;
101 int start; 101 int start;
102 int size; 102 int size;
103 struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */ 103 struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
104 }; 104 };
105 105
106 struct opregion_header; 106 struct opregion_header;
107 struct opregion_acpi; 107 struct opregion_acpi;
108 struct opregion_swsci; 108 struct opregion_swsci;
109 struct opregion_asle; 109 struct opregion_asle;
110 struct drm_i915_private; 110 struct drm_i915_private;
111 111
112 struct intel_opregion { 112 struct intel_opregion {
113 struct opregion_header *header; 113 struct opregion_header *header;
114 struct opregion_acpi *acpi; 114 struct opregion_acpi *acpi;
115 struct opregion_swsci *swsci; 115 struct opregion_swsci *swsci;
116 struct opregion_asle *asle; 116 struct opregion_asle *asle;
117 void *vbt; 117 void *vbt;
118 u32 __iomem *lid_state; 118 u32 __iomem *lid_state;
119 }; 119 };
120 #define OPREGION_SIZE (8*1024) 120 #define OPREGION_SIZE (8*1024)
121 121
122 struct intel_overlay; 122 struct intel_overlay;
123 struct intel_overlay_error_state; 123 struct intel_overlay_error_state;
124 124
125 struct drm_i915_master_private { 125 struct drm_i915_master_private {
126 drm_local_map_t *sarea; 126 drm_local_map_t *sarea;
127 struct _drm_i915_sarea *sarea_priv; 127 struct _drm_i915_sarea *sarea_priv;
128 }; 128 };
129 #define I915_FENCE_REG_NONE -1 129 #define I915_FENCE_REG_NONE -1
130 #define I915_MAX_NUM_FENCES 16 130 #define I915_MAX_NUM_FENCES 16
131 /* 16 fences + sign bit for FENCE_REG_NONE */ 131 /* 16 fences + sign bit for FENCE_REG_NONE */
132 #define I915_MAX_NUM_FENCE_BITS 5 132 #define I915_MAX_NUM_FENCE_BITS 5
133 133
134 struct drm_i915_fence_reg { 134 struct drm_i915_fence_reg {
135 struct list_head lru_list; 135 struct list_head lru_list;
136 struct drm_i915_gem_object *obj; 136 struct drm_i915_gem_object *obj;
137 uint32_t setup_seqno; 137 uint32_t setup_seqno;
138 }; 138 };
139 139
140 struct sdvo_device_mapping { 140 struct sdvo_device_mapping {
141 u8 initialized; 141 u8 initialized;
142 u8 dvo_port; 142 u8 dvo_port;
143 u8 slave_addr; 143 u8 slave_addr;
144 u8 dvo_wiring; 144 u8 dvo_wiring;
145 u8 i2c_pin; 145 u8 i2c_pin;
146 u8 ddc_pin; 146 u8 ddc_pin;
147 }; 147 };
148 148
149 struct intel_display_error_state; 149 struct intel_display_error_state;
150 150
151 struct drm_i915_error_state { 151 struct drm_i915_error_state {
152 u32 eir; 152 u32 eir;
153 u32 pgtbl_er; 153 u32 pgtbl_er;
154 u32 pipestat[I915_MAX_PIPES]; 154 u32 pipestat[I915_MAX_PIPES];
155 u32 ipeir; 155 u32 ipeir;
156 u32 ipehr; 156 u32 ipehr;
157 u32 instdone; 157 u32 instdone;
158 u32 acthd; 158 u32 acthd;
159 u32 error; /* gen6+ */ 159 u32 error; /* gen6+ */
160 u32 bcs_acthd; /* gen6+ blt engine */ 160 u32 bcs_acthd; /* gen6+ blt engine */
161 u32 bcs_ipehr; 161 u32 bcs_ipehr;
162 u32 bcs_ipeir; 162 u32 bcs_ipeir;
163 u32 bcs_instdone; 163 u32 bcs_instdone;
164 u32 bcs_seqno; 164 u32 bcs_seqno;
165 u32 vcs_acthd; /* gen6+ bsd engine */ 165 u32 vcs_acthd; /* gen6+ bsd engine */
166 u32 vcs_ipehr; 166 u32 vcs_ipehr;
167 u32 vcs_ipeir; 167 u32 vcs_ipeir;
168 u32 vcs_instdone; 168 u32 vcs_instdone;
169 u32 vcs_seqno; 169 u32 vcs_seqno;
170 u32 instpm; 170 u32 instpm;
171 u32 instps; 171 u32 instps;
172 u32 instdone1; 172 u32 instdone1;
173 u32 seqno; 173 u32 seqno;
174 u64 bbaddr; 174 u64 bbaddr;
175 u64 fence[I915_MAX_NUM_FENCES]; 175 u64 fence[I915_MAX_NUM_FENCES];
176 struct timeval time; 176 struct timeval time;
177 struct drm_i915_error_object { 177 struct drm_i915_error_object {
178 int page_count; 178 int page_count;
179 u32 gtt_offset; 179 u32 gtt_offset;
180 u32 *pages[0]; 180 u32 *pages[0];
181 } *ringbuffer[I915_NUM_RINGS], *batchbuffer[I915_NUM_RINGS]; 181 } *ringbuffer[I915_NUM_RINGS], *batchbuffer[I915_NUM_RINGS];
182 struct drm_i915_error_buffer { 182 struct drm_i915_error_buffer {
183 u32 size; 183 u32 size;
184 u32 name; 184 u32 name;
185 u32 seqno; 185 u32 seqno;
186 u32 gtt_offset; 186 u32 gtt_offset;
187 u32 read_domains; 187 u32 read_domains;
188 u32 write_domain; 188 u32 write_domain;
189 s32 fence_reg:I915_MAX_NUM_FENCE_BITS; 189 s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
190 s32 pinned:2; 190 s32 pinned:2;
191 u32 tiling:2; 191 u32 tiling:2;
192 u32 dirty:1; 192 u32 dirty:1;
193 u32 purgeable:1; 193 u32 purgeable:1;
194 u32 ring:4; 194 u32 ring:4;
195 u32 cache_level:2; 195 u32 cache_level:2;
196 } *active_bo, *pinned_bo; 196 } *active_bo, *pinned_bo;
197 u32 active_bo_count, pinned_bo_count; 197 u32 active_bo_count, pinned_bo_count;
198 struct intel_overlay_error_state *overlay; 198 struct intel_overlay_error_state *overlay;
199 struct intel_display_error_state *display; 199 struct intel_display_error_state *display;
200 }; 200 };
201 201
202 struct drm_i915_display_funcs { 202 struct drm_i915_display_funcs {
203 void (*dpms)(struct drm_crtc *crtc, int mode); 203 void (*dpms)(struct drm_crtc *crtc, int mode);
204 bool (*fbc_enabled)(struct drm_device *dev); 204 bool (*fbc_enabled)(struct drm_device *dev);
205 void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval); 205 void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
206 void (*disable_fbc)(struct drm_device *dev); 206 void (*disable_fbc)(struct drm_device *dev);
207 int (*get_display_clock_speed)(struct drm_device *dev); 207 int (*get_display_clock_speed)(struct drm_device *dev);
208 int (*get_fifo_size)(struct drm_device *dev, int plane); 208 int (*get_fifo_size)(struct drm_device *dev, int plane);
209 void (*update_wm)(struct drm_device *dev); 209 void (*update_wm)(struct drm_device *dev);
210 void (*update_sprite_wm)(struct drm_device *dev, int pipe, 210 void (*update_sprite_wm)(struct drm_device *dev, int pipe,
211 uint32_t sprite_width, int pixel_size); 211 uint32_t sprite_width, int pixel_size);
212 int (*crtc_mode_set)(struct drm_crtc *crtc, 212 int (*crtc_mode_set)(struct drm_crtc *crtc,
213 struct drm_display_mode *mode, 213 struct drm_display_mode *mode,
214 struct drm_display_mode *adjusted_mode, 214 struct drm_display_mode *adjusted_mode,
215 int x, int y, 215 int x, int y,
216 struct drm_framebuffer *old_fb); 216 struct drm_framebuffer *old_fb);
217 void (*write_eld)(struct drm_connector *connector, 217 void (*write_eld)(struct drm_connector *connector,
218 struct drm_crtc *crtc); 218 struct drm_crtc *crtc);
219 void (*fdi_link_train)(struct drm_crtc *crtc); 219 void (*fdi_link_train)(struct drm_crtc *crtc);
220 void (*init_clock_gating)(struct drm_device *dev); 220 void (*init_clock_gating)(struct drm_device *dev);
221 void (*init_pch_clock_gating)(struct drm_device *dev); 221 void (*init_pch_clock_gating)(struct drm_device *dev);
222 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, 222 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
223 struct drm_framebuffer *fb, 223 struct drm_framebuffer *fb,
224 struct drm_i915_gem_object *obj); 224 struct drm_i915_gem_object *obj);
225 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb, 225 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
226 int x, int y); 226 int x, int y);
227 void (*force_wake_get)(struct drm_i915_private *dev_priv); 227 void (*force_wake_get)(struct drm_i915_private *dev_priv);
228 void (*force_wake_put)(struct drm_i915_private *dev_priv); 228 void (*force_wake_put)(struct drm_i915_private *dev_priv);
229 /* clock updates for mode set */ 229 /* clock updates for mode set */
230 /* cursor updates */ 230 /* cursor updates */
231 /* render clock increase/decrease */ 231 /* render clock increase/decrease */
232 /* display clock increase/decrease */ 232 /* display clock increase/decrease */
233 /* pll clock increase/decrease */ 233 /* pll clock increase/decrease */
234 }; 234 };
235 235
236 struct intel_device_info { 236 struct intel_device_info {
237 u8 gen; 237 u8 gen;
238 u8 is_mobile:1; 238 u8 is_mobile:1;
239 u8 is_i85x:1; 239 u8 is_i85x:1;
240 u8 is_i915g:1; 240 u8 is_i915g:1;
241 u8 is_i945gm:1; 241 u8 is_i945gm:1;
242 u8 is_g33:1; 242 u8 is_g33:1;
243 u8 need_gfx_hws:1; 243 u8 need_gfx_hws:1;
244 u8 is_g4x:1; 244 u8 is_g4x:1;
245 u8 is_pineview:1; 245 u8 is_pineview:1;
246 u8 is_broadwater:1; 246 u8 is_broadwater:1;
247 u8 is_crestline:1; 247 u8 is_crestline:1;
248 u8 is_ivybridge:1; 248 u8 is_ivybridge:1;
249 u8 has_fbc:1; 249 u8 has_fbc:1;
250 u8 has_pipe_cxsr:1; 250 u8 has_pipe_cxsr:1;
251 u8 has_hotplug:1; 251 u8 has_hotplug:1;
252 u8 cursor_needs_physical:1; 252 u8 cursor_needs_physical:1;
253 u8 has_overlay:1; 253 u8 has_overlay:1;
254 u8 overlay_needs_physical:1; 254 u8 overlay_needs_physical:1;
255 u8 supports_tv:1; 255 u8 supports_tv:1;
256 u8 has_bsd_ring:1; 256 u8 has_bsd_ring:1;
257 u8 has_blt_ring:1; 257 u8 has_blt_ring:1;
258 u8 has_llc:1;
258 }; 259 };
259 260
260 enum no_fbc_reason { 261 enum no_fbc_reason {
261 FBC_NO_OUTPUT, /* no outputs enabled to compress */ 262 FBC_NO_OUTPUT, /* no outputs enabled to compress */
262 FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */ 263 FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
263 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */ 264 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
264 FBC_MODE_TOO_LARGE, /* mode too large for compression */ 265 FBC_MODE_TOO_LARGE, /* mode too large for compression */
265 FBC_BAD_PLANE, /* fbc not supported on plane */ 266 FBC_BAD_PLANE, /* fbc not supported on plane */
266 FBC_NOT_TILED, /* buffer not tiled */ 267 FBC_NOT_TILED, /* buffer not tiled */
267 FBC_MULTIPLE_PIPES, /* more than one pipe active */ 268 FBC_MULTIPLE_PIPES, /* more than one pipe active */
268 FBC_MODULE_PARAM, 269 FBC_MODULE_PARAM,
269 }; 270 };
270 271
271 enum intel_pch { 272 enum intel_pch {
272 PCH_IBX, /* Ibexpeak PCH */ 273 PCH_IBX, /* Ibexpeak PCH */
273 PCH_CPT, /* Cougarpoint PCH */ 274 PCH_CPT, /* Cougarpoint PCH */
274 }; 275 };
275 276
276 #define QUIRK_PIPEA_FORCE (1<<0) 277 #define QUIRK_PIPEA_FORCE (1<<0)
277 #define QUIRK_LVDS_SSC_DISABLE (1<<1) 278 #define QUIRK_LVDS_SSC_DISABLE (1<<1)
278 279
279 struct intel_fbdev; 280 struct intel_fbdev;
280 struct intel_fbc_work; 281 struct intel_fbc_work;
281 282
282 typedef struct drm_i915_private { 283 typedef struct drm_i915_private {
283 struct drm_device *dev; 284 struct drm_device *dev;
284 285
285 const struct intel_device_info *info; 286 const struct intel_device_info *info;
286 287
287 int has_gem; 288 int has_gem;
288 int relative_constants_mode; 289 int relative_constants_mode;
289 290
290 void __iomem *regs; 291 void __iomem *regs;
291 u32 gt_fifo_count; 292 u32 gt_fifo_count;
292 293
293 struct intel_gmbus { 294 struct intel_gmbus {
294 struct i2c_adapter adapter; 295 struct i2c_adapter adapter;
295 struct i2c_adapter *force_bit; 296 struct i2c_adapter *force_bit;
296 u32 reg0; 297 u32 reg0;
297 } *gmbus; 298 } *gmbus;
298 299
299 struct pci_dev *bridge_dev; 300 struct pci_dev *bridge_dev;
300 struct intel_ring_buffer ring[I915_NUM_RINGS]; 301 struct intel_ring_buffer ring[I915_NUM_RINGS];
301 uint32_t next_seqno; 302 uint32_t next_seqno;
302 303
303 drm_dma_handle_t *status_page_dmah; 304 drm_dma_handle_t *status_page_dmah;
304 uint32_t counter; 305 uint32_t counter;
305 drm_local_map_t hws_map; 306 drm_local_map_t hws_map;
306 struct drm_i915_gem_object *pwrctx; 307 struct drm_i915_gem_object *pwrctx;
307 struct drm_i915_gem_object *renderctx; 308 struct drm_i915_gem_object *renderctx;
308 309
309 struct resource mch_res; 310 struct resource mch_res;
310 311
311 unsigned int cpp; 312 unsigned int cpp;
312 int back_offset; 313 int back_offset;
313 int front_offset; 314 int front_offset;
314 int current_page; 315 int current_page;
315 int page_flipping; 316 int page_flipping;
316 317
317 atomic_t irq_received; 318 atomic_t irq_received;
318 319
319 /* protects the irq masks */ 320 /* protects the irq masks */
320 spinlock_t irq_lock; 321 spinlock_t irq_lock;
321 /** Cached value of IMR to avoid reads in updating the bitfield */ 322 /** Cached value of IMR to avoid reads in updating the bitfield */
322 u32 pipestat[2]; 323 u32 pipestat[2];
323 u32 irq_mask; 324 u32 irq_mask;
324 u32 gt_irq_mask; 325 u32 gt_irq_mask;
325 u32 pch_irq_mask; 326 u32 pch_irq_mask;
326 327
327 u32 hotplug_supported_mask; 328 u32 hotplug_supported_mask;
328 struct work_struct hotplug_work; 329 struct work_struct hotplug_work;
329 330
330 int tex_lru_log_granularity; 331 int tex_lru_log_granularity;
331 int allow_batchbuffer; 332 int allow_batchbuffer;
332 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; 333 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
333 int vblank_pipe; 334 int vblank_pipe;
334 int num_pipe; 335 int num_pipe;
335 336
336 /* For hangcheck timer */ 337 /* For hangcheck timer */
337 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ 338 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
338 struct timer_list hangcheck_timer; 339 struct timer_list hangcheck_timer;
339 int hangcheck_count; 340 int hangcheck_count;
340 uint32_t last_acthd; 341 uint32_t last_acthd;
341 uint32_t last_acthd_bsd; 342 uint32_t last_acthd_bsd;
342 uint32_t last_acthd_blt; 343 uint32_t last_acthd_blt;
343 uint32_t last_instdone; 344 uint32_t last_instdone;
344 uint32_t last_instdone1; 345 uint32_t last_instdone1;
345 346
346 unsigned long cfb_size; 347 unsigned long cfb_size;
347 unsigned int cfb_fb; 348 unsigned int cfb_fb;
348 enum plane cfb_plane; 349 enum plane cfb_plane;
349 int cfb_y; 350 int cfb_y;
350 struct intel_fbc_work *fbc_work; 351 struct intel_fbc_work *fbc_work;
351 352
352 struct intel_opregion opregion; 353 struct intel_opregion opregion;
353 354
354 /* overlay */ 355 /* overlay */
355 struct intel_overlay *overlay; 356 struct intel_overlay *overlay;
356 bool sprite_scaling_enabled; 357 bool sprite_scaling_enabled;
357 358
358 /* LVDS info */ 359 /* LVDS info */
359 int backlight_level; /* restore backlight to this value */ 360 int backlight_level; /* restore backlight to this value */
360 bool backlight_enabled; 361 bool backlight_enabled;
361 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ 362 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
362 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ 363 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
363 364
364 /* Feature bits from the VBIOS */ 365 /* Feature bits from the VBIOS */
365 unsigned int int_tv_support:1; 366 unsigned int int_tv_support:1;
366 unsigned int lvds_dither:1; 367 unsigned int lvds_dither:1;
367 unsigned int lvds_vbt:1; 368 unsigned int lvds_vbt:1;
368 unsigned int int_crt_support:1; 369 unsigned int int_crt_support:1;
369 unsigned int lvds_use_ssc:1; 370 unsigned int lvds_use_ssc:1;
370 unsigned int display_clock_mode:1; 371 unsigned int display_clock_mode:1;
371 int lvds_ssc_freq; 372 int lvds_ssc_freq;
372 struct { 373 struct {
373 int rate; 374 int rate;
374 int lanes; 375 int lanes;
375 int preemphasis; 376 int preemphasis;
376 int vswing; 377 int vswing;
377 378
378 bool initialized; 379 bool initialized;
379 bool support; 380 bool support;
380 int bpp; 381 int bpp;
381 struct edp_power_seq pps; 382 struct edp_power_seq pps;
382 } edp; 383 } edp;
383 bool no_aux_handshake; 384 bool no_aux_handshake;
384 385
385 struct notifier_block lid_notifier; 386 struct notifier_block lid_notifier;
386 387
387 int crt_ddc_pin; 388 int crt_ddc_pin;
388 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ 389 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
389 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ 390 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
390 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 391 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
391 392
392 unsigned int fsb_freq, mem_freq, is_ddr3; 393 unsigned int fsb_freq, mem_freq, is_ddr3;
393 394
394 spinlock_t error_lock; 395 spinlock_t error_lock;
395 struct drm_i915_error_state *first_error; 396 struct drm_i915_error_state *first_error;
396 struct work_struct error_work; 397 struct work_struct error_work;
397 struct completion error_completion; 398 struct completion error_completion;
398 struct workqueue_struct *wq; 399 struct workqueue_struct *wq;
399 400
400 /* Display functions */ 401 /* Display functions */
401 struct drm_i915_display_funcs display; 402 struct drm_i915_display_funcs display;
402 403
403 /* PCH chipset type */ 404 /* PCH chipset type */
404 enum intel_pch pch_type; 405 enum intel_pch pch_type;
405 406
406 unsigned long quirks; 407 unsigned long quirks;
407 408
408 /* Register state */ 409 /* Register state */
409 bool modeset_on_lid; 410 bool modeset_on_lid;
410 u8 saveLBB; 411 u8 saveLBB;
411 u32 saveDSPACNTR; 412 u32 saveDSPACNTR;
412 u32 saveDSPBCNTR; 413 u32 saveDSPBCNTR;
413 u32 saveDSPARB; 414 u32 saveDSPARB;
414 u32 saveHWS; 415 u32 saveHWS;
415 u32 savePIPEACONF; 416 u32 savePIPEACONF;
416 u32 savePIPEBCONF; 417 u32 savePIPEBCONF;
417 u32 savePIPEASRC; 418 u32 savePIPEASRC;
418 u32 savePIPEBSRC; 419 u32 savePIPEBSRC;
419 u32 saveFPA0; 420 u32 saveFPA0;
420 u32 saveFPA1; 421 u32 saveFPA1;
421 u32 saveDPLL_A; 422 u32 saveDPLL_A;
422 u32 saveDPLL_A_MD; 423 u32 saveDPLL_A_MD;
423 u32 saveHTOTAL_A; 424 u32 saveHTOTAL_A;
424 u32 saveHBLANK_A; 425 u32 saveHBLANK_A;
425 u32 saveHSYNC_A; 426 u32 saveHSYNC_A;
426 u32 saveVTOTAL_A; 427 u32 saveVTOTAL_A;
427 u32 saveVBLANK_A; 428 u32 saveVBLANK_A;
428 u32 saveVSYNC_A; 429 u32 saveVSYNC_A;
429 u32 saveBCLRPAT_A; 430 u32 saveBCLRPAT_A;
430 u32 saveTRANSACONF; 431 u32 saveTRANSACONF;
431 u32 saveTRANS_HTOTAL_A; 432 u32 saveTRANS_HTOTAL_A;
432 u32 saveTRANS_HBLANK_A; 433 u32 saveTRANS_HBLANK_A;
433 u32 saveTRANS_HSYNC_A; 434 u32 saveTRANS_HSYNC_A;
434 u32 saveTRANS_VTOTAL_A; 435 u32 saveTRANS_VTOTAL_A;
435 u32 saveTRANS_VBLANK_A; 436 u32 saveTRANS_VBLANK_A;
436 u32 saveTRANS_VSYNC_A; 437 u32 saveTRANS_VSYNC_A;
437 u32 savePIPEASTAT; 438 u32 savePIPEASTAT;
438 u32 saveDSPASTRIDE; 439 u32 saveDSPASTRIDE;
439 u32 saveDSPASIZE; 440 u32 saveDSPASIZE;
440 u32 saveDSPAPOS; 441 u32 saveDSPAPOS;
441 u32 saveDSPAADDR; 442 u32 saveDSPAADDR;
442 u32 saveDSPASURF; 443 u32 saveDSPASURF;
443 u32 saveDSPATILEOFF; 444 u32 saveDSPATILEOFF;
444 u32 savePFIT_PGM_RATIOS; 445 u32 savePFIT_PGM_RATIOS;
445 u32 saveBLC_HIST_CTL; 446 u32 saveBLC_HIST_CTL;
446 u32 saveBLC_PWM_CTL; 447 u32 saveBLC_PWM_CTL;
447 u32 saveBLC_PWM_CTL2; 448 u32 saveBLC_PWM_CTL2;
448 u32 saveBLC_CPU_PWM_CTL; 449 u32 saveBLC_CPU_PWM_CTL;
449 u32 saveBLC_CPU_PWM_CTL2; 450 u32 saveBLC_CPU_PWM_CTL2;
450 u32 saveFPB0; 451 u32 saveFPB0;
451 u32 saveFPB1; 452 u32 saveFPB1;
452 u32 saveDPLL_B; 453 u32 saveDPLL_B;
453 u32 saveDPLL_B_MD; 454 u32 saveDPLL_B_MD;
454 u32 saveHTOTAL_B; 455 u32 saveHTOTAL_B;
455 u32 saveHBLANK_B; 456 u32 saveHBLANK_B;
456 u32 saveHSYNC_B; 457 u32 saveHSYNC_B;
457 u32 saveVTOTAL_B; 458 u32 saveVTOTAL_B;
458 u32 saveVBLANK_B; 459 u32 saveVBLANK_B;
459 u32 saveVSYNC_B; 460 u32 saveVSYNC_B;
460 u32 saveBCLRPAT_B; 461 u32 saveBCLRPAT_B;
461 u32 saveTRANSBCONF; 462 u32 saveTRANSBCONF;
462 u32 saveTRANS_HTOTAL_B; 463 u32 saveTRANS_HTOTAL_B;
463 u32 saveTRANS_HBLANK_B; 464 u32 saveTRANS_HBLANK_B;
464 u32 saveTRANS_HSYNC_B; 465 u32 saveTRANS_HSYNC_B;
465 u32 saveTRANS_VTOTAL_B; 466 u32 saveTRANS_VTOTAL_B;
466 u32 saveTRANS_VBLANK_B; 467 u32 saveTRANS_VBLANK_B;
467 u32 saveTRANS_VSYNC_B; 468 u32 saveTRANS_VSYNC_B;
468 u32 savePIPEBSTAT; 469 u32 savePIPEBSTAT;
469 u32 saveDSPBSTRIDE; 470 u32 saveDSPBSTRIDE;
470 u32 saveDSPBSIZE; 471 u32 saveDSPBSIZE;
471 u32 saveDSPBPOS; 472 u32 saveDSPBPOS;
472 u32 saveDSPBADDR; 473 u32 saveDSPBADDR;
473 u32 saveDSPBSURF; 474 u32 saveDSPBSURF;
474 u32 saveDSPBTILEOFF; 475 u32 saveDSPBTILEOFF;
475 u32 saveVGA0; 476 u32 saveVGA0;
476 u32 saveVGA1; 477 u32 saveVGA1;
477 u32 saveVGA_PD; 478 u32 saveVGA_PD;
478 u32 saveVGACNTRL; 479 u32 saveVGACNTRL;
479 u32 saveADPA; 480 u32 saveADPA;
480 u32 saveLVDS; 481 u32 saveLVDS;
481 u32 savePP_ON_DELAYS; 482 u32 savePP_ON_DELAYS;
482 u32 savePP_OFF_DELAYS; 483 u32 savePP_OFF_DELAYS;
483 u32 saveDVOA; 484 u32 saveDVOA;
484 u32 saveDVOB; 485 u32 saveDVOB;
485 u32 saveDVOC; 486 u32 saveDVOC;
486 u32 savePP_ON; 487 u32 savePP_ON;
487 u32 savePP_OFF; 488 u32 savePP_OFF;
488 u32 savePP_CONTROL; 489 u32 savePP_CONTROL;
489 u32 savePP_DIVISOR; 490 u32 savePP_DIVISOR;
490 u32 savePFIT_CONTROL; 491 u32 savePFIT_CONTROL;
491 u32 save_palette_a[256]; 492 u32 save_palette_a[256];
492 u32 save_palette_b[256]; 493 u32 save_palette_b[256];
493 u32 saveDPFC_CB_BASE; 494 u32 saveDPFC_CB_BASE;
494 u32 saveFBC_CFB_BASE; 495 u32 saveFBC_CFB_BASE;
495 u32 saveFBC_LL_BASE; 496 u32 saveFBC_LL_BASE;
496 u32 saveFBC_CONTROL; 497 u32 saveFBC_CONTROL;
497 u32 saveFBC_CONTROL2; 498 u32 saveFBC_CONTROL2;
498 u32 saveIER; 499 u32 saveIER;
499 u32 saveIIR; 500 u32 saveIIR;
500 u32 saveIMR; 501 u32 saveIMR;
501 u32 saveDEIER; 502 u32 saveDEIER;
502 u32 saveDEIMR; 503 u32 saveDEIMR;
503 u32 saveGTIER; 504 u32 saveGTIER;
504 u32 saveGTIMR; 505 u32 saveGTIMR;
505 u32 saveFDI_RXA_IMR; 506 u32 saveFDI_RXA_IMR;
506 u32 saveFDI_RXB_IMR; 507 u32 saveFDI_RXB_IMR;
507 u32 saveCACHE_MODE_0; 508 u32 saveCACHE_MODE_0;
508 u32 saveMI_ARB_STATE; 509 u32 saveMI_ARB_STATE;
509 u32 saveSWF0[16]; 510 u32 saveSWF0[16];
510 u32 saveSWF1[16]; 511 u32 saveSWF1[16];
511 u32 saveSWF2[3]; 512 u32 saveSWF2[3];
512 u8 saveMSR; 513 u8 saveMSR;
513 u8 saveSR[8]; 514 u8 saveSR[8];
514 u8 saveGR[25]; 515 u8 saveGR[25];
515 u8 saveAR_INDEX; 516 u8 saveAR_INDEX;
516 u8 saveAR[21]; 517 u8 saveAR[21];
517 u8 saveDACMASK; 518 u8 saveDACMASK;
518 u8 saveCR[37]; 519 u8 saveCR[37];
519 uint64_t saveFENCE[I915_MAX_NUM_FENCES]; 520 uint64_t saveFENCE[I915_MAX_NUM_FENCES];
520 u32 saveCURACNTR; 521 u32 saveCURACNTR;
521 u32 saveCURAPOS; 522 u32 saveCURAPOS;
522 u32 saveCURABASE; 523 u32 saveCURABASE;
523 u32 saveCURBCNTR; 524 u32 saveCURBCNTR;
524 u32 saveCURBPOS; 525 u32 saveCURBPOS;
525 u32 saveCURBBASE; 526 u32 saveCURBBASE;
526 u32 saveCURSIZE; 527 u32 saveCURSIZE;
527 u32 saveDP_B; 528 u32 saveDP_B;
528 u32 saveDP_C; 529 u32 saveDP_C;
529 u32 saveDP_D; 530 u32 saveDP_D;
530 u32 savePIPEA_GMCH_DATA_M; 531 u32 savePIPEA_GMCH_DATA_M;
531 u32 savePIPEB_GMCH_DATA_M; 532 u32 savePIPEB_GMCH_DATA_M;
532 u32 savePIPEA_GMCH_DATA_N; 533 u32 savePIPEA_GMCH_DATA_N;
533 u32 savePIPEB_GMCH_DATA_N; 534 u32 savePIPEB_GMCH_DATA_N;
534 u32 savePIPEA_DP_LINK_M; 535 u32 savePIPEA_DP_LINK_M;
535 u32 savePIPEB_DP_LINK_M; 536 u32 savePIPEB_DP_LINK_M;
536 u32 savePIPEA_DP_LINK_N; 537 u32 savePIPEA_DP_LINK_N;
537 u32 savePIPEB_DP_LINK_N; 538 u32 savePIPEB_DP_LINK_N;
538 u32 saveFDI_RXA_CTL; 539 u32 saveFDI_RXA_CTL;
539 u32 saveFDI_TXA_CTL; 540 u32 saveFDI_TXA_CTL;
540 u32 saveFDI_RXB_CTL; 541 u32 saveFDI_RXB_CTL;
541 u32 saveFDI_TXB_CTL; 542 u32 saveFDI_TXB_CTL;
542 u32 savePFA_CTL_1; 543 u32 savePFA_CTL_1;
543 u32 savePFB_CTL_1; 544 u32 savePFB_CTL_1;
544 u32 savePFA_WIN_SZ; 545 u32 savePFA_WIN_SZ;
545 u32 savePFB_WIN_SZ; 546 u32 savePFB_WIN_SZ;
546 u32 savePFA_WIN_POS; 547 u32 savePFA_WIN_POS;
547 u32 savePFB_WIN_POS; 548 u32 savePFB_WIN_POS;
548 u32 savePCH_DREF_CONTROL; 549 u32 savePCH_DREF_CONTROL;
549 u32 saveDISP_ARB_CTL; 550 u32 saveDISP_ARB_CTL;
550 u32 savePIPEA_DATA_M1; 551 u32 savePIPEA_DATA_M1;
551 u32 savePIPEA_DATA_N1; 552 u32 savePIPEA_DATA_N1;
552 u32 savePIPEA_LINK_M1; 553 u32 savePIPEA_LINK_M1;
553 u32 savePIPEA_LINK_N1; 554 u32 savePIPEA_LINK_N1;
554 u32 savePIPEB_DATA_M1; 555 u32 savePIPEB_DATA_M1;
555 u32 savePIPEB_DATA_N1; 556 u32 savePIPEB_DATA_N1;
556 u32 savePIPEB_LINK_M1; 557 u32 savePIPEB_LINK_M1;
557 u32 savePIPEB_LINK_N1; 558 u32 savePIPEB_LINK_N1;
558 u32 saveMCHBAR_RENDER_STANDBY; 559 u32 saveMCHBAR_RENDER_STANDBY;
559 u32 savePCH_PORT_HOTPLUG; 560 u32 savePCH_PORT_HOTPLUG;
560 561
561 struct { 562 struct {
562 /** Bridge to intel-gtt-ko */ 563 /** Bridge to intel-gtt-ko */
563 const struct intel_gtt *gtt; 564 const struct intel_gtt *gtt;
564 /** Memory allocator for GTT stolen memory */ 565 /** Memory allocator for GTT stolen memory */
565 struct drm_mm stolen; 566 struct drm_mm stolen;
566 /** Memory allocator for GTT */ 567 /** Memory allocator for GTT */
567 struct drm_mm gtt_space; 568 struct drm_mm gtt_space;
568 /** List of all objects in gtt_space. Used to restore gtt 569 /** List of all objects in gtt_space. Used to restore gtt
569 * mappings on resume */ 570 * mappings on resume */
570 struct list_head gtt_list; 571 struct list_head gtt_list;
571 572
572 /** Usable portion of the GTT for GEM */ 573 /** Usable portion of the GTT for GEM */
573 unsigned long gtt_start; 574 unsigned long gtt_start;
574 unsigned long gtt_mappable_end; 575 unsigned long gtt_mappable_end;
575 unsigned long gtt_end; 576 unsigned long gtt_end;
576 577
577 struct io_mapping *gtt_mapping; 578 struct io_mapping *gtt_mapping;
578 int gtt_mtrr; 579 int gtt_mtrr;
579 580
580 struct shrinker inactive_shrinker; 581 struct shrinker inactive_shrinker;
581 582
582 /** 583 /**
583 * List of objects currently involved in rendering. 584 * List of objects currently involved in rendering.
584 * 585 *
585 * Includes buffers having the contents of their GPU caches 586 * Includes buffers having the contents of their GPU caches
586 * flushed, not necessarily primitives. last_rendering_seqno 587 * flushed, not necessarily primitives. last_rendering_seqno
587 * represents when the rendering involved will be completed. 588 * represents when the rendering involved will be completed.
588 * 589 *
589 * A reference is held on the buffer while on this list. 590 * A reference is held on the buffer while on this list.
590 */ 591 */
591 struct list_head active_list; 592 struct list_head active_list;
592 593
593 /** 594 /**
594 * List of objects which are not in the ringbuffer but which 595 * List of objects which are not in the ringbuffer but which
595 * still have a write_domain which needs to be flushed before 596 * still have a write_domain which needs to be flushed before
596 * unbinding. 597 * unbinding.
597 * 598 *
598 * last_rendering_seqno is 0 while an object is in this list. 599 * last_rendering_seqno is 0 while an object is in this list.
599 * 600 *
600 * A reference is held on the buffer while on this list. 601 * A reference is held on the buffer while on this list.
601 */ 602 */
602 struct list_head flushing_list; 603 struct list_head flushing_list;
603 604
604 /** 605 /**
605 * LRU list of objects which are not in the ringbuffer and 606 * LRU list of objects which are not in the ringbuffer and
606 * are ready to unbind, but are still in the GTT. 607 * are ready to unbind, but are still in the GTT.
607 * 608 *
608 * last_rendering_seqno is 0 while an object is in this list. 609 * last_rendering_seqno is 0 while an object is in this list.
609 * 610 *
610 * A reference is not held on the buffer while on this list, 611 * A reference is not held on the buffer while on this list,
611 * as merely being GTT-bound shouldn't prevent its being 612 * as merely being GTT-bound shouldn't prevent its being
612 * freed, and we'll pull it off the list in the free path. 613 * freed, and we'll pull it off the list in the free path.
613 */ 614 */
614 struct list_head inactive_list; 615 struct list_head inactive_list;
615 616
616 /** 617 /**
617 * LRU list of objects which are not in the ringbuffer but 618 * LRU list of objects which are not in the ringbuffer but
618 * are still pinned in the GTT. 619 * are still pinned in the GTT.
619 */ 620 */
620 struct list_head pinned_list; 621 struct list_head pinned_list;
621 622
622 /** LRU list of objects with fence regs on them. */ 623 /** LRU list of objects with fence regs on them. */
623 struct list_head fence_list; 624 struct list_head fence_list;
624 625
625 /** 626 /**
626 * List of objects currently pending being freed. 627 * List of objects currently pending being freed.
627 * 628 *
628 * These objects are no longer in use, but due to a signal 629 * These objects are no longer in use, but due to a signal
629 * we were prevented from freeing them at the appointed time. 630 * we were prevented from freeing them at the appointed time.
630 */ 631 */
631 struct list_head deferred_free_list; 632 struct list_head deferred_free_list;
632 633
633 /** 634 /**
634 * We leave the user IRQ off as much as possible, 635 * We leave the user IRQ off as much as possible,
635 * but this means that requests will finish and never 636 * but this means that requests will finish and never
636 * be retired once the system goes idle. Set a timer to 637 * be retired once the system goes idle. Set a timer to
637 * fire periodically while the ring is running. When it 638 * fire periodically while the ring is running. When it
638 * fires, go retire requests. 639 * fires, go retire requests.
639 */ 640 */
640 struct delayed_work retire_work; 641 struct delayed_work retire_work;
641 642
642 /** 643 /**
643 * Are we in a non-interruptible section of code like 644 * Are we in a non-interruptible section of code like
644 * modesetting? 645 * modesetting?
645 */ 646 */
646 bool interruptible; 647 bool interruptible;
647 648
648 /** 649 /**
649 * Flag if the X Server, and thus DRM, is not currently in 650 * Flag if the X Server, and thus DRM, is not currently in
650 * control of the device. 651 * control of the device.
651 * 652 *
652 * This is set between LeaveVT and EnterVT. It needs to be 653 * This is set between LeaveVT and EnterVT. It needs to be
653 * replaced with a semaphore. It also needs to be 654 * replaced with a semaphore. It also needs to be
654 * transitioned away from for kernel modesetting. 655 * transitioned away from for kernel modesetting.
655 */ 656 */
656 int suspended; 657 int suspended;
657 658
658 /** 659 /**
659 * Flag if the hardware appears to be wedged. 660 * Flag if the hardware appears to be wedged.
660 * 661 *
661 * This is set when attempts to idle the device timeout. 662 * This is set when attempts to idle the device timeout.
662 * It prevents command submission from occurring and makes 663 * It prevents command submission from occurring and makes
663 * every pending request fail 664 * every pending request fail
664 */ 665 */
665 atomic_t wedged; 666 atomic_t wedged;
666 667
667 /** Bit 6 swizzling required for X tiling */ 668 /** Bit 6 swizzling required for X tiling */
668 uint32_t bit_6_swizzle_x; 669 uint32_t bit_6_swizzle_x;
669 /** Bit 6 swizzling required for Y tiling */ 670 /** Bit 6 swizzling required for Y tiling */
670 uint32_t bit_6_swizzle_y; 671 uint32_t bit_6_swizzle_y;
671 672
672 /* storage for physical objects */ 673 /* storage for physical objects */
673 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; 674 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
674 675
675 /* accounting, useful for userland debugging */ 676 /* accounting, useful for userland debugging */
676 size_t gtt_total; 677 size_t gtt_total;
677 size_t mappable_gtt_total; 678 size_t mappable_gtt_total;
678 size_t object_memory; 679 size_t object_memory;
679 u32 object_count; 680 u32 object_count;
680 } mm; 681 } mm;
681 struct sdvo_device_mapping sdvo_mappings[2]; 682 struct sdvo_device_mapping sdvo_mappings[2];
682 /* indicate whether the LVDS_BORDER should be enabled or not */ 683 /* indicate whether the LVDS_BORDER should be enabled or not */
683 unsigned int lvds_border_bits; 684 unsigned int lvds_border_bits;
684 /* Panel fitter placement and size for Ironlake+ */ 685 /* Panel fitter placement and size for Ironlake+ */
685 u32 pch_pf_pos, pch_pf_size; 686 u32 pch_pf_pos, pch_pf_size;
686 687
687 struct drm_crtc *plane_to_crtc_mapping[3]; 688 struct drm_crtc *plane_to_crtc_mapping[3];
688 struct drm_crtc *pipe_to_crtc_mapping[3]; 689 struct drm_crtc *pipe_to_crtc_mapping[3];
689 wait_queue_head_t pending_flip_queue; 690 wait_queue_head_t pending_flip_queue;
690 bool flip_pending_is_done; 691 bool flip_pending_is_done;
691 692
692 /* Reclocking support */ 693 /* Reclocking support */
693 bool render_reclock_avail; 694 bool render_reclock_avail;
694 bool lvds_downclock_avail; 695 bool lvds_downclock_avail;
695 /* indicates the reduced downclock for LVDS*/ 696 /* indicates the reduced downclock for LVDS*/
696 int lvds_downclock; 697 int lvds_downclock;
697 struct work_struct idle_work; 698 struct work_struct idle_work;
698 struct timer_list idle_timer; 699 struct timer_list idle_timer;
699 bool busy; 700 bool busy;
700 u16 orig_clock; 701 u16 orig_clock;
701 int child_dev_num; 702 int child_dev_num;
702 struct child_device_config *child_dev; 703 struct child_device_config *child_dev;
703 struct drm_connector *int_lvds_connector; 704 struct drm_connector *int_lvds_connector;
704 struct drm_connector *int_edp_connector; 705 struct drm_connector *int_edp_connector;
705 706
706 bool mchbar_need_disable; 707 bool mchbar_need_disable;
707 708
708 struct work_struct rps_work; 709 struct work_struct rps_work;
709 spinlock_t rps_lock; 710 spinlock_t rps_lock;
710 u32 pm_iir; 711 u32 pm_iir;
711 712
712 u8 cur_delay; 713 u8 cur_delay;
713 u8 min_delay; 714 u8 min_delay;
714 u8 max_delay; 715 u8 max_delay;
715 u8 fmax; 716 u8 fmax;
716 u8 fstart; 717 u8 fstart;
717 718
718 u64 last_count1; 719 u64 last_count1;
719 unsigned long last_time1; 720 unsigned long last_time1;
720 unsigned long chipset_power; 721 unsigned long chipset_power;
721 u64 last_count2; 722 u64 last_count2;
722 struct timespec last_time2; 723 struct timespec last_time2;
723 unsigned long gfx_power; 724 unsigned long gfx_power;
724 int c_m; 725 int c_m;
725 int r_t; 726 int r_t;
726 u8 corr; 727 u8 corr;
727 spinlock_t *mchdev_lock; 728 spinlock_t *mchdev_lock;
728 729
729 enum no_fbc_reason no_fbc_reason; 730 enum no_fbc_reason no_fbc_reason;
730 731
731 struct drm_mm_node *compressed_fb; 732 struct drm_mm_node *compressed_fb;
732 struct drm_mm_node *compressed_llb; 733 struct drm_mm_node *compressed_llb;
733 734
734 unsigned long last_gpu_reset; 735 unsigned long last_gpu_reset;
735 736
736 /* list of fbdev register on this device */ 737 /* list of fbdev register on this device */
737 struct intel_fbdev *fbdev; 738 struct intel_fbdev *fbdev;
738 739
739 struct backlight_device *backlight; 740 struct backlight_device *backlight;
740 741
741 struct drm_property *broadcast_rgb_property; 742 struct drm_property *broadcast_rgb_property;
742 struct drm_property *force_audio_property; 743 struct drm_property *force_audio_property;
743 744
744 atomic_t forcewake_count; 745 atomic_t forcewake_count;
745 } drm_i915_private_t; 746 } drm_i915_private_t;
746 747
747 enum i915_cache_level { 748 enum i915_cache_level {
748 I915_CACHE_NONE, 749 I915_CACHE_NONE,
749 I915_CACHE_LLC, 750 I915_CACHE_LLC,
750 I915_CACHE_LLC_MLC, /* gen6+ */ 751 I915_CACHE_LLC_MLC, /* gen6+ */
751 }; 752 };
752 753
753 struct drm_i915_gem_object { 754 struct drm_i915_gem_object {
754 struct drm_gem_object base; 755 struct drm_gem_object base;
755 756
756 /** Current space allocated to this object in the GTT, if any. */ 757 /** Current space allocated to this object in the GTT, if any. */
757 struct drm_mm_node *gtt_space; 758 struct drm_mm_node *gtt_space;
758 struct list_head gtt_list; 759 struct list_head gtt_list;
759 760
760 /** This object's place on the active/flushing/inactive lists */ 761 /** This object's place on the active/flushing/inactive lists */
761 struct list_head ring_list; 762 struct list_head ring_list;
762 struct list_head mm_list; 763 struct list_head mm_list;
763 /** This object's place on GPU write list */ 764 /** This object's place on GPU write list */
764 struct list_head gpu_write_list; 765 struct list_head gpu_write_list;
765 /** This object's place in the batchbuffer or on the eviction list */ 766 /** This object's place in the batchbuffer or on the eviction list */
766 struct list_head exec_list; 767 struct list_head exec_list;
767 768
768 /** 769 /**
769 * This is set if the object is on the active or flushing lists 770 * This is set if the object is on the active or flushing lists
770 * (has pending rendering), and is not set if it's on inactive (ready 771 * (has pending rendering), and is not set if it's on inactive (ready
771 * to be unbound). 772 * to be unbound).
772 */ 773 */
773 unsigned int active:1; 774 unsigned int active:1;
774 775
775 /** 776 /**
776 * This is set if the object has been written to since last bound 777 * This is set if the object has been written to since last bound
777 * to the GTT 778 * to the GTT
778 */ 779 */
779 unsigned int dirty:1; 780 unsigned int dirty:1;
780 781
781 /** 782 /**
782 * This is set if the object has been written to since the last 783 * This is set if the object has been written to since the last
783 * GPU flush. 784 * GPU flush.
784 */ 785 */
785 unsigned int pending_gpu_write:1; 786 unsigned int pending_gpu_write:1;
786 787
787 /** 788 /**
788 * Fence register bits (if any) for this object. Will be set 789 * Fence register bits (if any) for this object. Will be set
789 * as needed when mapped into the GTT. 790 * as needed when mapped into the GTT.
790 * Protected by dev->struct_mutex. 791 * Protected by dev->struct_mutex.
791 */ 792 */
792 signed int fence_reg:I915_MAX_NUM_FENCE_BITS; 793 signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
793 794
794 /** 795 /**
795 * Advice: are the backing pages purgeable? 796 * Advice: are the backing pages purgeable?
796 */ 797 */
797 unsigned int madv:2; 798 unsigned int madv:2;
798 799
799 /** 800 /**
800 * Current tiling mode for the object. 801 * Current tiling mode for the object.
801 */ 802 */
802 unsigned int tiling_mode:2; 803 unsigned int tiling_mode:2;
803 unsigned int tiling_changed:1; 804 unsigned int tiling_changed:1;
804 805
805 /** How many users have pinned this object in GTT space. The following 806 /** How many users have pinned this object in GTT space. The following
806 * users can each hold at most one reference: pwrite/pread, pin_ioctl 807 * users can each hold at most one reference: pwrite/pread, pin_ioctl
807 * (via user_pin_count), execbuffer (objects are not allowed multiple 808 * (via user_pin_count), execbuffer (objects are not allowed multiple
808 * times for the same batchbuffer), and the framebuffer code. When 809 * times for the same batchbuffer), and the framebuffer code. When
809 * switching/pageflipping, the framebuffer code has at most two buffers 810 * switching/pageflipping, the framebuffer code has at most two buffers
810 * pinned per crtc. 811 * pinned per crtc.
811 * 812 *
812 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 813 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
813 * bits with absolutely no headroom. So use 4 bits. */ 814 * bits with absolutely no headroom. So use 4 bits. */
814 unsigned int pin_count:4; 815 unsigned int pin_count:4;
815 #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf 816 #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
816 817
817 /** 818 /**
818 * Is the object at the current location in the gtt mappable and 819 * Is the object at the current location in the gtt mappable and
819 * fenceable? Used to avoid costly recalculations. 820 * fenceable? Used to avoid costly recalculations.
820 */ 821 */
821 unsigned int map_and_fenceable:1; 822 unsigned int map_and_fenceable:1;
822 823
823 /** 824 /**
824 * Whether the current gtt mapping needs to be mappable (and isn't just 825 * Whether the current gtt mapping needs to be mappable (and isn't just
825 * mappable by accident). Track pin and fault separate for a more 826 * mappable by accident). Track pin and fault separate for a more
826 * accurate mappable working set. 827 * accurate mappable working set.
827 */ 828 */
828 unsigned int fault_mappable:1; 829 unsigned int fault_mappable:1;
829 unsigned int pin_mappable:1; 830 unsigned int pin_mappable:1;
830 831
831 /* 832 /*
832 * Is the GPU currently using a fence to access this buffer, 833 * Is the GPU currently using a fence to access this buffer,
833 */ 834 */
834 unsigned int pending_fenced_gpu_access:1; 835 unsigned int pending_fenced_gpu_access:1;
835 unsigned int fenced_gpu_access:1; 836 unsigned int fenced_gpu_access:1;
836 837
837 unsigned int cache_level:2; 838 unsigned int cache_level:2;
838 839
839 struct page **pages; 840 struct page **pages;
840 841
841 /** 842 /**
842 * DMAR support 843 * DMAR support
843 */ 844 */
844 struct scatterlist *sg_list; 845 struct scatterlist *sg_list;
845 int num_sg; 846 int num_sg;
846 847
847 /** 848 /**
848 * Used for performing relocations during execbuffer insertion. 849 * Used for performing relocations during execbuffer insertion.
849 */ 850 */
850 struct hlist_node exec_node; 851 struct hlist_node exec_node;
851 unsigned long exec_handle; 852 unsigned long exec_handle;
852 struct drm_i915_gem_exec_object2 *exec_entry; 853 struct drm_i915_gem_exec_object2 *exec_entry;
853 854
854 /** 855 /**
855 * Current offset of the object in GTT space. 856 * Current offset of the object in GTT space.
856 * 857 *
857 * This is the same as gtt_space->start 858 * This is the same as gtt_space->start
858 */ 859 */
859 uint32_t gtt_offset; 860 uint32_t gtt_offset;
860 861
861 /** Breadcrumb of last rendering to the buffer. */ 862 /** Breadcrumb of last rendering to the buffer. */
862 uint32_t last_rendering_seqno; 863 uint32_t last_rendering_seqno;
863 struct intel_ring_buffer *ring; 864 struct intel_ring_buffer *ring;
864 865
865 /** Breadcrumb of last fenced GPU access to the buffer. */ 866 /** Breadcrumb of last fenced GPU access to the buffer. */
866 uint32_t last_fenced_seqno; 867 uint32_t last_fenced_seqno;
867 struct intel_ring_buffer *last_fenced_ring; 868 struct intel_ring_buffer *last_fenced_ring;
868 869
869 /** Current tiling stride for the object, if it's tiled. */ 870 /** Current tiling stride for the object, if it's tiled. */
870 uint32_t stride; 871 uint32_t stride;
871 872
872 /** Record of address bit 17 of each page at last unbind. */ 873 /** Record of address bit 17 of each page at last unbind. */
873 unsigned long *bit_17; 874 unsigned long *bit_17;
874 875
875 876
876 /** 877 /**
877 * If present, while GEM_DOMAIN_CPU is in the read domain this array 878 * If present, while GEM_DOMAIN_CPU is in the read domain this array
878 * flags which individual pages are valid. 879 * flags which individual pages are valid.
879 */ 880 */
880 uint8_t *page_cpu_valid; 881 uint8_t *page_cpu_valid;
881 882
882 /** User space pin count and filp owning the pin */ 883 /** User space pin count and filp owning the pin */
883 uint32_t user_pin_count; 884 uint32_t user_pin_count;
884 struct drm_file *pin_filp; 885 struct drm_file *pin_filp;
885 886
886 /** for phy allocated objects */ 887 /** for phy allocated objects */
887 struct drm_i915_gem_phys_object *phys_obj; 888 struct drm_i915_gem_phys_object *phys_obj;
888 889
889 /** 890 /**
890 * Number of crtcs where this object is currently the fb, but 891 * Number of crtcs where this object is currently the fb, but
891 * will be page flipped away on the next vblank. When it 892 * will be page flipped away on the next vblank. When it
892 * reaches 0, dev_priv->pending_flip_queue will be woken up. 893 * reaches 0, dev_priv->pending_flip_queue will be woken up.
893 */ 894 */
894 atomic_t pending_flip; 895 atomic_t pending_flip;
895 }; 896 };
896 897
897 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) 898 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
898 899
899 /** 900 /**
900 * Request queue structure. 901 * Request queue structure.
901 * 902 *
902 * The request queue allows us to note sequence numbers that have been emitted 903 * The request queue allows us to note sequence numbers that have been emitted
903 * and may be associated with active buffers to be retired. 904 * and may be associated with active buffers to be retired.
904 * 905 *
905 * By keeping this list, we can avoid having to do questionable 906 * By keeping this list, we can avoid having to do questionable
906 * sequence-number comparisons on buffer last_rendering_seqnos, and associate 907 * sequence-number comparisons on buffer last_rendering_seqnos, and associate
907 * an emission time with seqnos for tracking how far ahead of the GPU we are. 908 * an emission time with seqnos for tracking how far ahead of the GPU we are.
908 */ 909 */
909 struct drm_i915_gem_request { 910 struct drm_i915_gem_request {
910 /** On Which ring this request was generated */ 911 /** On Which ring this request was generated */
911 struct intel_ring_buffer *ring; 912 struct intel_ring_buffer *ring;
912 913
913 /** GEM sequence number associated with this request. */ 914 /** GEM sequence number associated with this request. */
914 uint32_t seqno; 915 uint32_t seqno;
915 916
916 /** Time at which this request was emitted, in jiffies. */ 917 /** Time at which this request was emitted, in jiffies. */
917 unsigned long emitted_jiffies; 918 unsigned long emitted_jiffies;
918 919
919 /** global list entry for this request */ 920 /** global list entry for this request */
920 struct list_head list; 921 struct list_head list;
921 922
922 struct drm_i915_file_private *file_priv; 923 struct drm_i915_file_private *file_priv;
923 /** file_priv list entry for this request */ 924 /** file_priv list entry for this request */
924 struct list_head client_list; 925 struct list_head client_list;
925 }; 926 };
926 927
927 struct drm_i915_file_private { 928 struct drm_i915_file_private {
928 struct { 929 struct {
929 struct spinlock lock; 930 struct spinlock lock;
930 struct list_head request_list; 931 struct list_head request_list;
931 } mm; 932 } mm;
932 }; 933 };
933 934
934 #define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) 935 #define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
935 936
936 #define IS_I830(dev) ((dev)->pci_device == 0x3577) 937 #define IS_I830(dev) ((dev)->pci_device == 0x3577)
937 #define IS_845G(dev) ((dev)->pci_device == 0x2562) 938 #define IS_845G(dev) ((dev)->pci_device == 0x2562)
938 #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) 939 #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
939 #define IS_I865G(dev) ((dev)->pci_device == 0x2572) 940 #define IS_I865G(dev) ((dev)->pci_device == 0x2572)
940 #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) 941 #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
941 #define IS_I915GM(dev) ((dev)->pci_device == 0x2592) 942 #define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
942 #define IS_I945G(dev) ((dev)->pci_device == 0x2772) 943 #define IS_I945G(dev) ((dev)->pci_device == 0x2772)
943 #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) 944 #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
944 #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) 945 #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
945 #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) 946 #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
946 #define IS_GM45(dev) ((dev)->pci_device == 0x2A42) 947 #define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
947 #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) 948 #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
948 #define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) 949 #define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
949 #define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011) 950 #define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
950 #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) 951 #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
951 #define IS_G33(dev) (INTEL_INFO(dev)->is_g33) 952 #define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
952 #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) 953 #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
953 #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) 954 #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
954 #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) 955 #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
955 #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 956 #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
956 957
957 /* 958 /*
958 * The genX designation typically refers to the render engine, so render 959 * The genX designation typically refers to the render engine, so render
959 * capability related checks should use IS_GEN, while display and other checks 960 * capability related checks should use IS_GEN, while display and other checks
960 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular 961 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
961 * chips, etc.). 962 * chips, etc.).
962 */ 963 */
963 #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) 964 #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
964 #define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) 965 #define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
965 #define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) 966 #define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
966 #define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) 967 #define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
967 #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) 968 #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
968 #define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) 969 #define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
969 970
970 #define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring) 971 #define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
971 #define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) 972 #define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
973 #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
972 #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 974 #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
973 975
974 #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) 976 #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
975 #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) 977 #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
976 978
977 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 979 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
978 * rows, which changed the alignment requirements and fence programming. 980 * rows, which changed the alignment requirements and fence programming.
979 */ 981 */
980 #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \ 982 #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
981 IS_I915GM(dev))) 983 IS_I915GM(dev)))
982 #define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) 984 #define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
983 #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev)) 985 #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
984 #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev)) 986 #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
985 #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) 987 #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
986 #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) 988 #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
987 #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) 989 #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
988 /* dsparb controlled by hw only */ 990 /* dsparb controlled by hw only */
989 #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) 991 #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
990 992
991 #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) 993 #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
992 #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) 994 #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
993 #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) 995 #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
994 996
995 #define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) 997 #define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev))
996 #define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) 998 #define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
997 999
998 #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) 1000 #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
999 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) 1001 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
1000 #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) 1002 #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
1001 1003
1002 #include "i915_trace.h" 1004 #include "i915_trace.h"
1003 1005
1004 extern struct drm_ioctl_desc i915_ioctls[]; 1006 extern struct drm_ioctl_desc i915_ioctls[];
1005 extern int i915_max_ioctl; 1007 extern int i915_max_ioctl;
1006 extern unsigned int i915_fbpercrtc __always_unused; 1008 extern unsigned int i915_fbpercrtc __always_unused;
1007 extern int i915_panel_ignore_lid __read_mostly; 1009 extern int i915_panel_ignore_lid __read_mostly;
1008 extern unsigned int i915_powersave __read_mostly; 1010 extern unsigned int i915_powersave __read_mostly;
1009 extern int i915_semaphores __read_mostly; 1011 extern int i915_semaphores __read_mostly;
1010 extern unsigned int i915_lvds_downclock __read_mostly; 1012 extern unsigned int i915_lvds_downclock __read_mostly;
1011 extern int i915_panel_use_ssc __read_mostly; 1013 extern int i915_panel_use_ssc __read_mostly;
1012 extern int i915_vbt_sdvo_panel_type __read_mostly; 1014 extern int i915_vbt_sdvo_panel_type __read_mostly;
1013 extern int i915_enable_rc6 __read_mostly; 1015 extern int i915_enable_rc6 __read_mostly;
1014 extern int i915_enable_fbc __read_mostly; 1016 extern int i915_enable_fbc __read_mostly;
1015 extern bool i915_enable_hangcheck __read_mostly; 1017 extern bool i915_enable_hangcheck __read_mostly;
1016 1018
1017 extern int i915_suspend(struct drm_device *dev, pm_message_t state); 1019 extern int i915_suspend(struct drm_device *dev, pm_message_t state);
1018 extern int i915_resume(struct drm_device *dev); 1020 extern int i915_resume(struct drm_device *dev);
1019 extern int i915_master_create(struct drm_device *dev, struct drm_master *master); 1021 extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
1020 extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); 1022 extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
1021 1023
1022 /* i915_dma.c */ 1024 /* i915_dma.c */
1023 extern void i915_kernel_lost_context(struct drm_device * dev); 1025 extern void i915_kernel_lost_context(struct drm_device * dev);
1024 extern int i915_driver_load(struct drm_device *, unsigned long flags); 1026 extern int i915_driver_load(struct drm_device *, unsigned long flags);
1025 extern int i915_driver_unload(struct drm_device *); 1027 extern int i915_driver_unload(struct drm_device *);
1026 extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv); 1028 extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
1027 extern void i915_driver_lastclose(struct drm_device * dev); 1029 extern void i915_driver_lastclose(struct drm_device * dev);
1028 extern void i915_driver_preclose(struct drm_device *dev, 1030 extern void i915_driver_preclose(struct drm_device *dev,
1029 struct drm_file *file_priv); 1031 struct drm_file *file_priv);
1030 extern void i915_driver_postclose(struct drm_device *dev, 1032 extern void i915_driver_postclose(struct drm_device *dev,
1031 struct drm_file *file_priv); 1033 struct drm_file *file_priv);
1032 extern int i915_driver_device_is_agp(struct drm_device * dev); 1034 extern int i915_driver_device_is_agp(struct drm_device * dev);
1033 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 1035 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
1034 unsigned long arg); 1036 unsigned long arg);
1035 extern int i915_emit_box(struct drm_device *dev, 1037 extern int i915_emit_box(struct drm_device *dev,
1036 struct drm_clip_rect *box, 1038 struct drm_clip_rect *box,
1037 int DR1, int DR4); 1039 int DR1, int DR4);
1038 extern int i915_reset(struct drm_device *dev, u8 flags); 1040 extern int i915_reset(struct drm_device *dev, u8 flags);
1039 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); 1041 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
1040 extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); 1042 extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
1041 extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); 1043 extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
1042 extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); 1044 extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
1043 1045
1044 1046
1045 /* i915_irq.c */ 1047 /* i915_irq.c */
1046 void i915_hangcheck_elapsed(unsigned long data); 1048 void i915_hangcheck_elapsed(unsigned long data);
1047 void i915_handle_error(struct drm_device *dev, bool wedged); 1049 void i915_handle_error(struct drm_device *dev, bool wedged);
1048 extern int i915_irq_emit(struct drm_device *dev, void *data, 1050 extern int i915_irq_emit(struct drm_device *dev, void *data,
1049 struct drm_file *file_priv); 1051 struct drm_file *file_priv);
1050 extern int i915_irq_wait(struct drm_device *dev, void *data, 1052 extern int i915_irq_wait(struct drm_device *dev, void *data,
1051 struct drm_file *file_priv); 1053 struct drm_file *file_priv);
1052 1054
1053 extern void intel_irq_init(struct drm_device *dev); 1055 extern void intel_irq_init(struct drm_device *dev);
1054 1056
1055 extern int i915_vblank_pipe_set(struct drm_device *dev, void *data, 1057 extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
1056 struct drm_file *file_priv); 1058 struct drm_file *file_priv);
1057 extern int i915_vblank_pipe_get(struct drm_device *dev, void *data, 1059 extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
1058 struct drm_file *file_priv); 1060 struct drm_file *file_priv);
1059 extern int i915_vblank_swap(struct drm_device *dev, void *data, 1061 extern int i915_vblank_swap(struct drm_device *dev, void *data,
1060 struct drm_file *file_priv); 1062 struct drm_file *file_priv);
1061 1063
1062 void 1064 void
1063 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 1065 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
1064 1066
1065 void 1067 void
1066 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 1068 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
1067 1069
1068 void intel_enable_asle(struct drm_device *dev); 1070 void intel_enable_asle(struct drm_device *dev);
1069 1071
1070 #ifdef CONFIG_DEBUG_FS 1072 #ifdef CONFIG_DEBUG_FS
1071 extern void i915_destroy_error_state(struct drm_device *dev); 1073 extern void i915_destroy_error_state(struct drm_device *dev);
1072 #else 1074 #else
1073 #define i915_destroy_error_state(x) 1075 #define i915_destroy_error_state(x)
1074 #endif 1076 #endif
1075 1077
1076 1078
1077 /* i915_gem.c */ 1079 /* i915_gem.c */
1078 int i915_gem_init_ioctl(struct drm_device *dev, void *data, 1080 int i915_gem_init_ioctl(struct drm_device *dev, void *data,
1079 struct drm_file *file_priv); 1081 struct drm_file *file_priv);
1080 int i915_gem_create_ioctl(struct drm_device *dev, void *data, 1082 int i915_gem_create_ioctl(struct drm_device *dev, void *data,
1081 struct drm_file *file_priv); 1083 struct drm_file *file_priv);
1082 int i915_gem_pread_ioctl(struct drm_device *dev, void *data, 1084 int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1083 struct drm_file *file_priv); 1085 struct drm_file *file_priv);
1084 int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 1086 int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1085 struct drm_file *file_priv); 1087 struct drm_file *file_priv);
1086 int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 1088 int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1087 struct drm_file *file_priv); 1089 struct drm_file *file_priv);
1088 int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, 1090 int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1089 struct drm_file *file_priv); 1091 struct drm_file *file_priv);
1090 int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 1092 int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1091 struct drm_file *file_priv); 1093 struct drm_file *file_priv);
1092 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 1094 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1093 struct drm_file *file_priv); 1095 struct drm_file *file_priv);
1094 int i915_gem_execbuffer(struct drm_device *dev, void *data, 1096 int i915_gem_execbuffer(struct drm_device *dev, void *data,
1095 struct drm_file *file_priv); 1097 struct drm_file *file_priv);
1096 int i915_gem_execbuffer2(struct drm_device *dev, void *data, 1098 int i915_gem_execbuffer2(struct drm_device *dev, void *data,
1097 struct drm_file *file_priv); 1099 struct drm_file *file_priv);
1098 int i915_gem_pin_ioctl(struct drm_device *dev, void *data, 1100 int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
1099 struct drm_file *file_priv); 1101 struct drm_file *file_priv);
1100 int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, 1102 int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
1101 struct drm_file *file_priv); 1103 struct drm_file *file_priv);
1102 int i915_gem_busy_ioctl(struct drm_device *dev, void *data, 1104 int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
1103 struct drm_file *file_priv); 1105 struct drm_file *file_priv);
1104 int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 1106 int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
1105 struct drm_file *file_priv); 1107 struct drm_file *file_priv);
1106 int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, 1108 int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
1107 struct drm_file *file_priv); 1109 struct drm_file *file_priv);
1108 int i915_gem_entervt_ioctl(struct drm_device *dev, void *data, 1110 int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
1109 struct drm_file *file_priv); 1111 struct drm_file *file_priv);
1110 int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, 1112 int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
1111 struct drm_file *file_priv); 1113 struct drm_file *file_priv);
1112 int i915_gem_set_tiling(struct drm_device *dev, void *data, 1114 int i915_gem_set_tiling(struct drm_device *dev, void *data,
1113 struct drm_file *file_priv); 1115 struct drm_file *file_priv);
1114 int i915_gem_get_tiling(struct drm_device *dev, void *data, 1116 int i915_gem_get_tiling(struct drm_device *dev, void *data,
1115 struct drm_file *file_priv); 1117 struct drm_file *file_priv);
1116 int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 1118 int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
1117 struct drm_file *file_priv); 1119 struct drm_file *file_priv);
1118 void i915_gem_load(struct drm_device *dev); 1120 void i915_gem_load(struct drm_device *dev);
1119 int i915_gem_init_object(struct drm_gem_object *obj); 1121 int i915_gem_init_object(struct drm_gem_object *obj);
1120 int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring, 1122 int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring,
1121 uint32_t invalidate_domains, 1123 uint32_t invalidate_domains,
1122 uint32_t flush_domains); 1124 uint32_t flush_domains);
1123 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 1125 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
1124 size_t size); 1126 size_t size);
1125 void i915_gem_free_object(struct drm_gem_object *obj); 1127 void i915_gem_free_object(struct drm_gem_object *obj);
1126 int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, 1128 int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
1127 uint32_t alignment, 1129 uint32_t alignment,
1128 bool map_and_fenceable); 1130 bool map_and_fenceable);
1129 void i915_gem_object_unpin(struct drm_i915_gem_object *obj); 1131 void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
1130 int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj); 1132 int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
1131 void i915_gem_release_mmap(struct drm_i915_gem_object *obj); 1133 void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
1132 void i915_gem_lastclose(struct drm_device *dev); 1134 void i915_gem_lastclose(struct drm_device *dev);
1133 1135
1134 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); 1136 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
1135 int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj); 1137 int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
1136 void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, 1138 void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1137 struct intel_ring_buffer *ring, 1139 struct intel_ring_buffer *ring,
1138 u32 seqno); 1140 u32 seqno);
1139 1141
1140 int i915_gem_dumb_create(struct drm_file *file_priv, 1142 int i915_gem_dumb_create(struct drm_file *file_priv,
1141 struct drm_device *dev, 1143 struct drm_device *dev,
1142 struct drm_mode_create_dumb *args); 1144 struct drm_mode_create_dumb *args);
1143 int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, 1145 int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
1144 uint32_t handle, uint64_t *offset); 1146 uint32_t handle, uint64_t *offset);
1145 int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev, 1147 int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
1146 uint32_t handle); 1148 uint32_t handle);
1147 /** 1149 /**
1148 * Returns true if seq1 is later than seq2. 1150 * Returns true if seq1 is later than seq2.
1149 */ 1151 */
1150 static inline bool 1152 static inline bool
1151 i915_seqno_passed(uint32_t seq1, uint32_t seq2) 1153 i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1152 { 1154 {
1153 return (int32_t)(seq1 - seq2) >= 0; 1155 return (int32_t)(seq1 - seq2) >= 0;
1154 } 1156 }
1155 1157
1156 static inline u32 1158 static inline u32
1157 i915_gem_next_request_seqno(struct intel_ring_buffer *ring) 1159 i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
1158 { 1160 {
1159 drm_i915_private_t *dev_priv = ring->dev->dev_private; 1161 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1160 return ring->outstanding_lazy_request = dev_priv->next_seqno; 1162 return ring->outstanding_lazy_request = dev_priv->next_seqno;
1161 } 1163 }
1162 1164
1163 int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj, 1165 int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
1164 struct intel_ring_buffer *pipelined); 1166 struct intel_ring_buffer *pipelined);
1165 int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); 1167 int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
1166 1168
1167 void i915_gem_retire_requests(struct drm_device *dev); 1169 void i915_gem_retire_requests(struct drm_device *dev);
1168 void i915_gem_reset(struct drm_device *dev); 1170 void i915_gem_reset(struct drm_device *dev);
1169 void i915_gem_clflush_object(struct drm_i915_gem_object *obj); 1171 void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
1170 int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj, 1172 int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
1171 uint32_t read_domains, 1173 uint32_t read_domains,
1172 uint32_t write_domain); 1174 uint32_t write_domain);
1173 int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); 1175 int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
1174 int __must_check i915_gem_init_ringbuffer(struct drm_device *dev); 1176 int __must_check i915_gem_init_ringbuffer(struct drm_device *dev);
1175 void i915_gem_cleanup_ringbuffer(struct drm_device *dev); 1177 void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
1176 void i915_gem_do_init(struct drm_device *dev, 1178 void i915_gem_do_init(struct drm_device *dev,
1177 unsigned long start, 1179 unsigned long start,
1178 unsigned long mappable_end, 1180 unsigned long mappable_end,
1179 unsigned long end); 1181 unsigned long end);
1180 int __must_check i915_gpu_idle(struct drm_device *dev); 1182 int __must_check i915_gpu_idle(struct drm_device *dev);
1181 int __must_check i915_gem_idle(struct drm_device *dev); 1183 int __must_check i915_gem_idle(struct drm_device *dev);
1182 int __must_check i915_add_request(struct intel_ring_buffer *ring, 1184 int __must_check i915_add_request(struct intel_ring_buffer *ring,
1183 struct drm_file *file, 1185 struct drm_file *file,
1184 struct drm_i915_gem_request *request); 1186 struct drm_i915_gem_request *request);
1185 int __must_check i915_wait_request(struct intel_ring_buffer *ring, 1187 int __must_check i915_wait_request(struct intel_ring_buffer *ring,
1186 uint32_t seqno); 1188 uint32_t seqno);
1187 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 1189 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
1188 int __must_check 1190 int __must_check
1189 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, 1191 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
1190 bool write); 1192 bool write);
1191 int __must_check 1193 int __must_check
1192 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 1194 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
1193 u32 alignment, 1195 u32 alignment,
1194 struct intel_ring_buffer *pipelined); 1196 struct intel_ring_buffer *pipelined);
1195 int i915_gem_attach_phys_object(struct drm_device *dev, 1197 int i915_gem_attach_phys_object(struct drm_device *dev,
1196 struct drm_i915_gem_object *obj, 1198 struct drm_i915_gem_object *obj,
1197 int id, 1199 int id,
1198 int align); 1200 int align);
1199 void i915_gem_detach_phys_object(struct drm_device *dev, 1201 void i915_gem_detach_phys_object(struct drm_device *dev,
1200 struct drm_i915_gem_object *obj); 1202 struct drm_i915_gem_object *obj);
1201 void i915_gem_free_all_phys_object(struct drm_device *dev); 1203 void i915_gem_free_all_phys_object(struct drm_device *dev);
1202 void i915_gem_release(struct drm_device *dev, struct drm_file *file); 1204 void i915_gem_release(struct drm_device *dev, struct drm_file *file);
1203 1205
1204 uint32_t 1206 uint32_t
1205 i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev, 1207 i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1206 uint32_t size, 1208 uint32_t size,
1207 int tiling_mode); 1209 int tiling_mode);
1208 1210
1209 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, 1211 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
1210 enum i915_cache_level cache_level); 1212 enum i915_cache_level cache_level);
1211 1213
1212 /* i915_gem_gtt.c */ 1214 /* i915_gem_gtt.c */
1213 void i915_gem_restore_gtt_mappings(struct drm_device *dev); 1215 void i915_gem_restore_gtt_mappings(struct drm_device *dev);
1214 int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); 1216 int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
1215 void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj, 1217 void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
1216 enum i915_cache_level cache_level); 1218 enum i915_cache_level cache_level);
1217 void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); 1219 void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
1218 1220
1219 /* i915_gem_evict.c */ 1221 /* i915_gem_evict.c */
1220 int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, 1222 int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
1221 unsigned alignment, bool mappable); 1223 unsigned alignment, bool mappable);
1222 int __must_check i915_gem_evict_everything(struct drm_device *dev, 1224 int __must_check i915_gem_evict_everything(struct drm_device *dev,
1223 bool purgeable_only); 1225 bool purgeable_only);
1224 int __must_check i915_gem_evict_inactive(struct drm_device *dev, 1226 int __must_check i915_gem_evict_inactive(struct drm_device *dev,
1225 bool purgeable_only); 1227 bool purgeable_only);
1226 1228
1227 /* i915_gem_tiling.c */ 1229 /* i915_gem_tiling.c */
1228 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 1230 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
1229 void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); 1231 void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
1230 void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); 1232 void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
1231 1233
1232 /* i915_gem_debug.c */ 1234 /* i915_gem_debug.c */
1233 void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len, 1235 void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
1234 const char *where, uint32_t mark); 1236 const char *where, uint32_t mark);
1235 #if WATCH_LISTS 1237 #if WATCH_LISTS
1236 int i915_verify_lists(struct drm_device *dev); 1238 int i915_verify_lists(struct drm_device *dev);
1237 #else 1239 #else
1238 #define i915_verify_lists(dev) 0 1240 #define i915_verify_lists(dev) 0
1239 #endif 1241 #endif
1240 void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, 1242 void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj,
1241 int handle); 1243 int handle);
1242 void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len, 1244 void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
1243 const char *where, uint32_t mark); 1245 const char *where, uint32_t mark);
1244 1246
1245 /* i915_debugfs.c */ 1247 /* i915_debugfs.c */
1246 int i915_debugfs_init(struct drm_minor *minor); 1248 int i915_debugfs_init(struct drm_minor *minor);
1247 void i915_debugfs_cleanup(struct drm_minor *minor); 1249 void i915_debugfs_cleanup(struct drm_minor *minor);
1248 1250
1249 /* i915_suspend.c */ 1251 /* i915_suspend.c */
1250 extern int i915_save_state(struct drm_device *dev); 1252 extern int i915_save_state(struct drm_device *dev);
1251 extern int i915_restore_state(struct drm_device *dev); 1253 extern int i915_restore_state(struct drm_device *dev);
1252 1254
1253 /* i915_suspend.c */ 1255 /* i915_suspend.c */
1254 extern int i915_save_state(struct drm_device *dev); 1256 extern int i915_save_state(struct drm_device *dev);
1255 extern int i915_restore_state(struct drm_device *dev); 1257 extern int i915_restore_state(struct drm_device *dev);
1256 1258
1257 /* intel_i2c.c */ 1259 /* intel_i2c.c */
1258 extern int intel_setup_gmbus(struct drm_device *dev); 1260 extern int intel_setup_gmbus(struct drm_device *dev);
1259 extern void intel_teardown_gmbus(struct drm_device *dev); 1261 extern void intel_teardown_gmbus(struct drm_device *dev);
1260 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); 1262 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
1261 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); 1263 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
1262 extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) 1264 extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
1263 { 1265 {
1264 return container_of(adapter, struct intel_gmbus, adapter)->force_bit; 1266 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
1265 } 1267 }
1266 extern void intel_i2c_reset(struct drm_device *dev); 1268 extern void intel_i2c_reset(struct drm_device *dev);
1267 1269
1268 /* intel_opregion.c */ 1270 /* intel_opregion.c */
1269 extern int intel_opregion_setup(struct drm_device *dev); 1271 extern int intel_opregion_setup(struct drm_device *dev);
1270 #ifdef CONFIG_ACPI 1272 #ifdef CONFIG_ACPI
1271 extern void intel_opregion_init(struct drm_device *dev); 1273 extern void intel_opregion_init(struct drm_device *dev);
1272 extern void intel_opregion_fini(struct drm_device *dev); 1274 extern void intel_opregion_fini(struct drm_device *dev);
1273 extern void intel_opregion_asle_intr(struct drm_device *dev); 1275 extern void intel_opregion_asle_intr(struct drm_device *dev);
1274 extern void intel_opregion_gse_intr(struct drm_device *dev); 1276 extern void intel_opregion_gse_intr(struct drm_device *dev);
1275 extern void intel_opregion_enable_asle(struct drm_device *dev); 1277 extern void intel_opregion_enable_asle(struct drm_device *dev);
1276 #else 1278 #else
1277 static inline void intel_opregion_init(struct drm_device *dev) { return; } 1279 static inline void intel_opregion_init(struct drm_device *dev) { return; }
1278 static inline void intel_opregion_fini(struct drm_device *dev) { return; } 1280 static inline void intel_opregion_fini(struct drm_device *dev) { return; }
1279 static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; } 1281 static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
1280 static inline void intel_opregion_gse_intr(struct drm_device *dev) { return; } 1282 static inline void intel_opregion_gse_intr(struct drm_device *dev) { return; }
1281 static inline void intel_opregion_enable_asle(struct drm_device *dev) { return; } 1283 static inline void intel_opregion_enable_asle(struct drm_device *dev) { return; }
1282 #endif 1284 #endif
1283 1285
1284 /* intel_acpi.c */ 1286 /* intel_acpi.c */
1285 #ifdef CONFIG_ACPI 1287 #ifdef CONFIG_ACPI
1286 extern void intel_register_dsm_handler(void); 1288 extern void intel_register_dsm_handler(void);
1287 extern void intel_unregister_dsm_handler(void); 1289 extern void intel_unregister_dsm_handler(void);
1288 #else 1290 #else
1289 static inline void intel_register_dsm_handler(void) { return; } 1291 static inline void intel_register_dsm_handler(void) { return; }
1290 static inline void intel_unregister_dsm_handler(void) { return; } 1292 static inline void intel_unregister_dsm_handler(void) { return; }
1291 #endif /* CONFIG_ACPI */ 1293 #endif /* CONFIG_ACPI */
1292 1294
1293 /* modesetting */ 1295 /* modesetting */
1294 extern void intel_modeset_init(struct drm_device *dev); 1296 extern void intel_modeset_init(struct drm_device *dev);
1295 extern void intel_modeset_gem_init(struct drm_device *dev); 1297 extern void intel_modeset_gem_init(struct drm_device *dev);
1296 extern void intel_modeset_cleanup(struct drm_device *dev); 1298 extern void intel_modeset_cleanup(struct drm_device *dev);
1297 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); 1299 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
1298 extern bool intel_fbc_enabled(struct drm_device *dev); 1300 extern bool intel_fbc_enabled(struct drm_device *dev);
1299 extern void intel_disable_fbc(struct drm_device *dev); 1301 extern void intel_disable_fbc(struct drm_device *dev);
1300 extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 1302 extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
1301 extern void ironlake_init_pch_refclk(struct drm_device *dev); 1303 extern void ironlake_init_pch_refclk(struct drm_device *dev);
1302 extern void ironlake_enable_rc6(struct drm_device *dev); 1304 extern void ironlake_enable_rc6(struct drm_device *dev);
1303 extern void gen6_set_rps(struct drm_device *dev, u8 val); 1305 extern void gen6_set_rps(struct drm_device *dev, u8 val);
1304 extern void intel_detect_pch(struct drm_device *dev); 1306 extern void intel_detect_pch(struct drm_device *dev);
1305 extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); 1307 extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
1306 1308
1307 extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); 1309 extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
1308 extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv); 1310 extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv);
1309 extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); 1311 extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
1310 extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv); 1312 extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv);
1311 1313
1312 /* overlay */ 1314 /* overlay */
1313 #ifdef CONFIG_DEBUG_FS 1315 #ifdef CONFIG_DEBUG_FS
1314 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); 1316 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
1315 extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error); 1317 extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error);
1316 1318
1317 extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev); 1319 extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
1318 extern void intel_display_print_error_state(struct seq_file *m, 1320 extern void intel_display_print_error_state(struct seq_file *m,
1319 struct drm_device *dev, 1321 struct drm_device *dev,
1320 struct intel_display_error_state *error); 1322 struct intel_display_error_state *error);
1321 #endif 1323 #endif
1322 1324
1323 #define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS]) 1325 #define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
1324 1326
1325 #define BEGIN_LP_RING(n) \ 1327 #define BEGIN_LP_RING(n) \
1326 intel_ring_begin(LP_RING(dev_priv), (n)) 1328 intel_ring_begin(LP_RING(dev_priv), (n))
1327 1329
1328 #define OUT_RING(x) \ 1330 #define OUT_RING(x) \
1329 intel_ring_emit(LP_RING(dev_priv), x) 1331 intel_ring_emit(LP_RING(dev_priv), x)
1330 1332
1331 #define ADVANCE_LP_RING() \ 1333 #define ADVANCE_LP_RING() \
1332 intel_ring_advance(LP_RING(dev_priv)) 1334 intel_ring_advance(LP_RING(dev_priv))
1333 1335
1334 /** 1336 /**
1335 * Lock test for when it's just for synchronization of ring access. 1337 * Lock test for when it's just for synchronization of ring access.
1336 * 1338 *
1337 * In that case, we don't need to do it when GEM is initialized as nobody else 1339 * In that case, we don't need to do it when GEM is initialized as nobody else
1338 * has access to the ring. 1340 * has access to the ring.
1339 */ 1341 */
1340 #define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \ 1342 #define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
1341 if (LP_RING(dev->dev_private)->obj == NULL) \ 1343 if (LP_RING(dev->dev_private)->obj == NULL) \
1342 LOCK_TEST_WITH_RETURN(dev, file); \ 1344 LOCK_TEST_WITH_RETURN(dev, file); \
1343 } while (0) 1345 } while (0)
1344 1346
1345 /* On SNB platform, before reading ring registers forcewake bit 1347 /* On SNB platform, before reading ring registers forcewake bit
1346 * must be set to prevent GT core from power down and stale values being 1348 * must be set to prevent GT core from power down and stale values being
1347 * returned. 1349 * returned.
1348 */ 1350 */
1349 void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); 1351 void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
1350 void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); 1352 void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
1351 void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); 1353 void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
1352 1354
1353 /* We give fast paths for the really cool registers */ 1355 /* We give fast paths for the really cool registers */
1354 #define NEEDS_FORCE_WAKE(dev_priv, reg) \ 1356 #define NEEDS_FORCE_WAKE(dev_priv, reg) \
1355 (((dev_priv)->info->gen >= 6) && \ 1357 (((dev_priv)->info->gen >= 6) && \
1356 ((reg) < 0x40000) && \ 1358 ((reg) < 0x40000) && \
1357 ((reg) != FORCEWAKE)) 1359 ((reg) != FORCEWAKE))
1358 1360
1359 #define __i915_read(x, y) \ 1361 #define __i915_read(x, y) \
1360 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg); 1362 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
1361 1363
1362 __i915_read(8, b) 1364 __i915_read(8, b)
1363 __i915_read(16, w) 1365 __i915_read(16, w)
1364 __i915_read(32, l) 1366 __i915_read(32, l)
1365 __i915_read(64, q) 1367 __i915_read(64, q)
1366 #undef __i915_read 1368 #undef __i915_read
1367 1369
1368 #define __i915_write(x, y) \ 1370 #define __i915_write(x, y) \
1369 void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val); 1371 void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val);
1370 1372
1371 __i915_write(8, b) 1373 __i915_write(8, b)
1372 __i915_write(16, w) 1374 __i915_write(16, w)
1373 __i915_write(32, l) 1375 __i915_write(32, l)
1374 __i915_write(64, q) 1376 __i915_write(64, q)
1375 #undef __i915_write 1377 #undef __i915_write
1376 1378
1377 #define I915_READ8(reg) i915_read8(dev_priv, (reg)) 1379 #define I915_READ8(reg) i915_read8(dev_priv, (reg))
1378 #define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val)) 1380 #define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val))
1379 1381
1380 #define I915_READ16(reg) i915_read16(dev_priv, (reg)) 1382 #define I915_READ16(reg) i915_read16(dev_priv, (reg))
1381 #define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val)) 1383 #define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val))
1382 #define I915_READ16_NOTRACE(reg) readw(dev_priv->regs + (reg)) 1384 #define I915_READ16_NOTRACE(reg) readw(dev_priv->regs + (reg))
1383 #define I915_WRITE16_NOTRACE(reg, val) writew(val, dev_priv->regs + (reg)) 1385 #define I915_WRITE16_NOTRACE(reg, val) writew(val, dev_priv->regs + (reg))
1384 1386
1385 #define I915_READ(reg) i915_read32(dev_priv, (reg)) 1387 #define I915_READ(reg) i915_read32(dev_priv, (reg))
1386 #define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val)) 1388 #define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val))
1387 #define I915_READ_NOTRACE(reg) readl(dev_priv->regs + (reg)) 1389 #define I915_READ_NOTRACE(reg) readl(dev_priv->regs + (reg))
1388 #define I915_WRITE_NOTRACE(reg, val) writel(val, dev_priv->regs + (reg)) 1390 #define I915_WRITE_NOTRACE(reg, val) writel(val, dev_priv->regs + (reg))
1389 1391
1390 #define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val)) 1392 #define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val))
1391 #define I915_READ64(reg) i915_read64(dev_priv, (reg)) 1393 #define I915_READ64(reg) i915_read64(dev_priv, (reg))
1392 1394
1393 #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) 1395 #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
1394 #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) 1396 #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
1395 1397
1396 1398
1397 #endif 1399 #endif
1398 1400
drivers/gpu/drm/i915/i915_gem.c
1 /* 1 /*
2 * Copyright © 2008 Intel Corporation 2 * Copyright © 2008 Intel Corporation
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation 6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the 8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions: 9 * Software is furnished to do so, subject to the following conditions:
10 * 10 *
11 * The above copyright notice and this permission notice (including the next 11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the 12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software. 13 * Software.
14 * 14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE. 21 * IN THE SOFTWARE.
22 * 22 *
23 * Authors: 23 * Authors:
24 * Eric Anholt <eric@anholt.net> 24 * Eric Anholt <eric@anholt.net>
25 * 25 *
26 */ 26 */
27 27
28 #include "drmP.h" 28 #include "drmP.h"
29 #include "drm.h" 29 #include "drm.h"
30 #include "i915_drm.h" 30 #include "i915_drm.h"
31 #include "i915_drv.h" 31 #include "i915_drv.h"
32 #include "i915_trace.h" 32 #include "i915_trace.h"
33 #include "intel_drv.h" 33 #include "intel_drv.h"
34 #include <linux/shmem_fs.h> 34 #include <linux/shmem_fs.h>
35 #include <linux/slab.h> 35 #include <linux/slab.h>
36 #include <linux/swap.h> 36 #include <linux/swap.h>
37 #include <linux/pci.h> 37 #include <linux/pci.h>
38 38
39 static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj); 39 static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
40 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); 40 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
41 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); 41 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
42 static __must_check int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, 42 static __must_check int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
43 bool write); 43 bool write);
44 static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, 44 static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
45 uint64_t offset, 45 uint64_t offset,
46 uint64_t size); 46 uint64_t size);
47 static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj); 47 static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
48 static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, 48 static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
49 unsigned alignment, 49 unsigned alignment,
50 bool map_and_fenceable); 50 bool map_and_fenceable);
51 static void i915_gem_clear_fence_reg(struct drm_device *dev, 51 static void i915_gem_clear_fence_reg(struct drm_device *dev,
52 struct drm_i915_fence_reg *reg); 52 struct drm_i915_fence_reg *reg);
53 static int i915_gem_phys_pwrite(struct drm_device *dev, 53 static int i915_gem_phys_pwrite(struct drm_device *dev,
54 struct drm_i915_gem_object *obj, 54 struct drm_i915_gem_object *obj,
55 struct drm_i915_gem_pwrite *args, 55 struct drm_i915_gem_pwrite *args,
56 struct drm_file *file); 56 struct drm_file *file);
57 static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj); 57 static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
58 58
59 static int i915_gem_inactive_shrink(struct shrinker *shrinker, 59 static int i915_gem_inactive_shrink(struct shrinker *shrinker,
60 struct shrink_control *sc); 60 struct shrink_control *sc);
61 61
62 /* some bookkeeping */ 62 /* some bookkeeping */
63 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, 63 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
64 size_t size) 64 size_t size)
65 { 65 {
66 dev_priv->mm.object_count++; 66 dev_priv->mm.object_count++;
67 dev_priv->mm.object_memory += size; 67 dev_priv->mm.object_memory += size;
68 } 68 }
69 69
70 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, 70 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
71 size_t size) 71 size_t size)
72 { 72 {
73 dev_priv->mm.object_count--; 73 dev_priv->mm.object_count--;
74 dev_priv->mm.object_memory -= size; 74 dev_priv->mm.object_memory -= size;
75 } 75 }
76 76
77 static int 77 static int
78 i915_gem_wait_for_error(struct drm_device *dev) 78 i915_gem_wait_for_error(struct drm_device *dev)
79 { 79 {
80 struct drm_i915_private *dev_priv = dev->dev_private; 80 struct drm_i915_private *dev_priv = dev->dev_private;
81 struct completion *x = &dev_priv->error_completion; 81 struct completion *x = &dev_priv->error_completion;
82 unsigned long flags; 82 unsigned long flags;
83 int ret; 83 int ret;
84 84
85 if (!atomic_read(&dev_priv->mm.wedged)) 85 if (!atomic_read(&dev_priv->mm.wedged))
86 return 0; 86 return 0;
87 87
88 ret = wait_for_completion_interruptible(x); 88 ret = wait_for_completion_interruptible(x);
89 if (ret) 89 if (ret)
90 return ret; 90 return ret;
91 91
92 if (atomic_read(&dev_priv->mm.wedged)) { 92 if (atomic_read(&dev_priv->mm.wedged)) {
93 /* GPU is hung, bump the completion count to account for 93 /* GPU is hung, bump the completion count to account for
94 * the token we just consumed so that we never hit zero and 94 * the token we just consumed so that we never hit zero and
95 * end up waiting upon a subsequent completion event that 95 * end up waiting upon a subsequent completion event that
96 * will never happen. 96 * will never happen.
97 */ 97 */
98 spin_lock_irqsave(&x->wait.lock, flags); 98 spin_lock_irqsave(&x->wait.lock, flags);
99 x->done++; 99 x->done++;
100 spin_unlock_irqrestore(&x->wait.lock, flags); 100 spin_unlock_irqrestore(&x->wait.lock, flags);
101 } 101 }
102 return 0; 102 return 0;
103 } 103 }
104 104
105 int i915_mutex_lock_interruptible(struct drm_device *dev) 105 int i915_mutex_lock_interruptible(struct drm_device *dev)
106 { 106 {
107 int ret; 107 int ret;
108 108
109 ret = i915_gem_wait_for_error(dev); 109 ret = i915_gem_wait_for_error(dev);
110 if (ret) 110 if (ret)
111 return ret; 111 return ret;
112 112
113 ret = mutex_lock_interruptible(&dev->struct_mutex); 113 ret = mutex_lock_interruptible(&dev->struct_mutex);
114 if (ret) 114 if (ret)
115 return ret; 115 return ret;
116 116
117 WARN_ON(i915_verify_lists(dev)); 117 WARN_ON(i915_verify_lists(dev));
118 return 0; 118 return 0;
119 } 119 }
120 120
121 static inline bool 121 static inline bool
122 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) 122 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
123 { 123 {
124 return obj->gtt_space && !obj->active && obj->pin_count == 0; 124 return obj->gtt_space && !obj->active && obj->pin_count == 0;
125 } 125 }
126 126
127 void i915_gem_do_init(struct drm_device *dev, 127 void i915_gem_do_init(struct drm_device *dev,
128 unsigned long start, 128 unsigned long start,
129 unsigned long mappable_end, 129 unsigned long mappable_end,
130 unsigned long end) 130 unsigned long end)
131 { 131 {
132 drm_i915_private_t *dev_priv = dev->dev_private; 132 drm_i915_private_t *dev_priv = dev->dev_private;
133 133
134 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start); 134 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
135 135
136 dev_priv->mm.gtt_start = start; 136 dev_priv->mm.gtt_start = start;
137 dev_priv->mm.gtt_mappable_end = mappable_end; 137 dev_priv->mm.gtt_mappable_end = mappable_end;
138 dev_priv->mm.gtt_end = end; 138 dev_priv->mm.gtt_end = end;
139 dev_priv->mm.gtt_total = end - start; 139 dev_priv->mm.gtt_total = end - start;
140 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start; 140 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
141 141
142 /* Take over this portion of the GTT */ 142 /* Take over this portion of the GTT */
143 intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE); 143 intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
144 } 144 }
145 145
146 int 146 int
147 i915_gem_init_ioctl(struct drm_device *dev, void *data, 147 i915_gem_init_ioctl(struct drm_device *dev, void *data,
148 struct drm_file *file) 148 struct drm_file *file)
149 { 149 {
150 struct drm_i915_gem_init *args = data; 150 struct drm_i915_gem_init *args = data;
151 151
152 if (args->gtt_start >= args->gtt_end || 152 if (args->gtt_start >= args->gtt_end ||
153 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1)) 153 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
154 return -EINVAL; 154 return -EINVAL;
155 155
156 mutex_lock(&dev->struct_mutex); 156 mutex_lock(&dev->struct_mutex);
157 i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end); 157 i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end);
158 mutex_unlock(&dev->struct_mutex); 158 mutex_unlock(&dev->struct_mutex);
159 159
160 return 0; 160 return 0;
161 } 161 }
162 162
163 int 163 int
164 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 164 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
165 struct drm_file *file) 165 struct drm_file *file)
166 { 166 {
167 struct drm_i915_private *dev_priv = dev->dev_private; 167 struct drm_i915_private *dev_priv = dev->dev_private;
168 struct drm_i915_gem_get_aperture *args = data; 168 struct drm_i915_gem_get_aperture *args = data;
169 struct drm_i915_gem_object *obj; 169 struct drm_i915_gem_object *obj;
170 size_t pinned; 170 size_t pinned;
171 171
172 if (!(dev->driver->driver_features & DRIVER_GEM)) 172 if (!(dev->driver->driver_features & DRIVER_GEM))
173 return -ENODEV; 173 return -ENODEV;
174 174
175 pinned = 0; 175 pinned = 0;
176 mutex_lock(&dev->struct_mutex); 176 mutex_lock(&dev->struct_mutex);
177 list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list) 177 list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
178 pinned += obj->gtt_space->size; 178 pinned += obj->gtt_space->size;
179 mutex_unlock(&dev->struct_mutex); 179 mutex_unlock(&dev->struct_mutex);
180 180
181 args->aper_size = dev_priv->mm.gtt_total; 181 args->aper_size = dev_priv->mm.gtt_total;
182 args->aper_available_size = args->aper_size - pinned; 182 args->aper_available_size = args->aper_size - pinned;
183 183
184 return 0; 184 return 0;
185 } 185 }
186 186
187 static int 187 static int
188 i915_gem_create(struct drm_file *file, 188 i915_gem_create(struct drm_file *file,
189 struct drm_device *dev, 189 struct drm_device *dev,
190 uint64_t size, 190 uint64_t size,
191 uint32_t *handle_p) 191 uint32_t *handle_p)
192 { 192 {
193 struct drm_i915_gem_object *obj; 193 struct drm_i915_gem_object *obj;
194 int ret; 194 int ret;
195 u32 handle; 195 u32 handle;
196 196
197 size = roundup(size, PAGE_SIZE); 197 size = roundup(size, PAGE_SIZE);
198 if (size == 0) 198 if (size == 0)
199 return -EINVAL; 199 return -EINVAL;
200 200
201 /* Allocate the new object */ 201 /* Allocate the new object */
202 obj = i915_gem_alloc_object(dev, size); 202 obj = i915_gem_alloc_object(dev, size);
203 if (obj == NULL) 203 if (obj == NULL)
204 return -ENOMEM; 204 return -ENOMEM;
205 205
206 ret = drm_gem_handle_create(file, &obj->base, &handle); 206 ret = drm_gem_handle_create(file, &obj->base, &handle);
207 if (ret) { 207 if (ret) {
208 drm_gem_object_release(&obj->base); 208 drm_gem_object_release(&obj->base);
209 i915_gem_info_remove_obj(dev->dev_private, obj->base.size); 209 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
210 kfree(obj); 210 kfree(obj);
211 return ret; 211 return ret;
212 } 212 }
213 213
214 /* drop reference from allocate - handle holds it now */ 214 /* drop reference from allocate - handle holds it now */
215 drm_gem_object_unreference(&obj->base); 215 drm_gem_object_unreference(&obj->base);
216 trace_i915_gem_object_create(obj); 216 trace_i915_gem_object_create(obj);
217 217
218 *handle_p = handle; 218 *handle_p = handle;
219 return 0; 219 return 0;
220 } 220 }
221 221
222 int 222 int
223 i915_gem_dumb_create(struct drm_file *file, 223 i915_gem_dumb_create(struct drm_file *file,
224 struct drm_device *dev, 224 struct drm_device *dev,
225 struct drm_mode_create_dumb *args) 225 struct drm_mode_create_dumb *args)
226 { 226 {
227 /* have to work out size/pitch and return them */ 227 /* have to work out size/pitch and return them */
228 args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64); 228 args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
229 args->size = args->pitch * args->height; 229 args->size = args->pitch * args->height;
230 return i915_gem_create(file, dev, 230 return i915_gem_create(file, dev,
231 args->size, &args->handle); 231 args->size, &args->handle);
232 } 232 }
233 233
234 int i915_gem_dumb_destroy(struct drm_file *file, 234 int i915_gem_dumb_destroy(struct drm_file *file,
235 struct drm_device *dev, 235 struct drm_device *dev,
236 uint32_t handle) 236 uint32_t handle)
237 { 237 {
238 return drm_gem_handle_delete(file, handle); 238 return drm_gem_handle_delete(file, handle);
239 } 239 }
240 240
241 /** 241 /**
242 * Creates a new mm object and returns a handle to it. 242 * Creates a new mm object and returns a handle to it.
243 */ 243 */
244 int 244 int
245 i915_gem_create_ioctl(struct drm_device *dev, void *data, 245 i915_gem_create_ioctl(struct drm_device *dev, void *data,
246 struct drm_file *file) 246 struct drm_file *file)
247 { 247 {
248 struct drm_i915_gem_create *args = data; 248 struct drm_i915_gem_create *args = data;
249 return i915_gem_create(file, dev, 249 return i915_gem_create(file, dev,
250 args->size, &args->handle); 250 args->size, &args->handle);
251 } 251 }
252 252
253 static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) 253 static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
254 { 254 {
255 drm_i915_private_t *dev_priv = obj->base.dev->dev_private; 255 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
256 256
257 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && 257 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
258 obj->tiling_mode != I915_TILING_NONE; 258 obj->tiling_mode != I915_TILING_NONE;
259 } 259 }
260 260
261 static inline void 261 static inline void
262 slow_shmem_copy(struct page *dst_page, 262 slow_shmem_copy(struct page *dst_page,
263 int dst_offset, 263 int dst_offset,
264 struct page *src_page, 264 struct page *src_page,
265 int src_offset, 265 int src_offset,
266 int length) 266 int length)
267 { 267 {
268 char *dst_vaddr, *src_vaddr; 268 char *dst_vaddr, *src_vaddr;
269 269
270 dst_vaddr = kmap(dst_page); 270 dst_vaddr = kmap(dst_page);
271 src_vaddr = kmap(src_page); 271 src_vaddr = kmap(src_page);
272 272
273 memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length); 273 memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
274 274
275 kunmap(src_page); 275 kunmap(src_page);
276 kunmap(dst_page); 276 kunmap(dst_page);
277 } 277 }
278 278
279 static inline void 279 static inline void
280 slow_shmem_bit17_copy(struct page *gpu_page, 280 slow_shmem_bit17_copy(struct page *gpu_page,
281 int gpu_offset, 281 int gpu_offset,
282 struct page *cpu_page, 282 struct page *cpu_page,
283 int cpu_offset, 283 int cpu_offset,
284 int length, 284 int length,
285 int is_read) 285 int is_read)
286 { 286 {
287 char *gpu_vaddr, *cpu_vaddr; 287 char *gpu_vaddr, *cpu_vaddr;
288 288
289 /* Use the unswizzled path if this page isn't affected. */ 289 /* Use the unswizzled path if this page isn't affected. */
290 if ((page_to_phys(gpu_page) & (1 << 17)) == 0) { 290 if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
291 if (is_read) 291 if (is_read)
292 return slow_shmem_copy(cpu_page, cpu_offset, 292 return slow_shmem_copy(cpu_page, cpu_offset,
293 gpu_page, gpu_offset, length); 293 gpu_page, gpu_offset, length);
294 else 294 else
295 return slow_shmem_copy(gpu_page, gpu_offset, 295 return slow_shmem_copy(gpu_page, gpu_offset,
296 cpu_page, cpu_offset, length); 296 cpu_page, cpu_offset, length);
297 } 297 }
298 298
299 gpu_vaddr = kmap(gpu_page); 299 gpu_vaddr = kmap(gpu_page);
300 cpu_vaddr = kmap(cpu_page); 300 cpu_vaddr = kmap(cpu_page);
301 301
302 /* Copy the data, XORing A6 with A17 (1). The user already knows he's 302 /* Copy the data, XORing A6 with A17 (1). The user already knows he's
303 * XORing with the other bits (A9 for Y, A9 and A10 for X) 303 * XORing with the other bits (A9 for Y, A9 and A10 for X)
304 */ 304 */
305 while (length > 0) { 305 while (length > 0) {
306 int cacheline_end = ALIGN(gpu_offset + 1, 64); 306 int cacheline_end = ALIGN(gpu_offset + 1, 64);
307 int this_length = min(cacheline_end - gpu_offset, length); 307 int this_length = min(cacheline_end - gpu_offset, length);
308 int swizzled_gpu_offset = gpu_offset ^ 64; 308 int swizzled_gpu_offset = gpu_offset ^ 64;
309 309
310 if (is_read) { 310 if (is_read) {
311 memcpy(cpu_vaddr + cpu_offset, 311 memcpy(cpu_vaddr + cpu_offset,
312 gpu_vaddr + swizzled_gpu_offset, 312 gpu_vaddr + swizzled_gpu_offset,
313 this_length); 313 this_length);
314 } else { 314 } else {
315 memcpy(gpu_vaddr + swizzled_gpu_offset, 315 memcpy(gpu_vaddr + swizzled_gpu_offset,
316 cpu_vaddr + cpu_offset, 316 cpu_vaddr + cpu_offset,
317 this_length); 317 this_length);
318 } 318 }
319 cpu_offset += this_length; 319 cpu_offset += this_length;
320 gpu_offset += this_length; 320 gpu_offset += this_length;
321 length -= this_length; 321 length -= this_length;
322 } 322 }
323 323
324 kunmap(cpu_page); 324 kunmap(cpu_page);
325 kunmap(gpu_page); 325 kunmap(gpu_page);
326 } 326 }
327 327
328 /** 328 /**
329 * This is the fast shmem pread path, which attempts to copy_from_user directly 329 * This is the fast shmem pread path, which attempts to copy_from_user directly
330 * from the backing pages of the object to the user's address space. On a 330 * from the backing pages of the object to the user's address space. On a
331 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow(). 331 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
332 */ 332 */
333 static int 333 static int
334 i915_gem_shmem_pread_fast(struct drm_device *dev, 334 i915_gem_shmem_pread_fast(struct drm_device *dev,
335 struct drm_i915_gem_object *obj, 335 struct drm_i915_gem_object *obj,
336 struct drm_i915_gem_pread *args, 336 struct drm_i915_gem_pread *args,
337 struct drm_file *file) 337 struct drm_file *file)
338 { 338 {
339 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; 339 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
340 ssize_t remain; 340 ssize_t remain;
341 loff_t offset; 341 loff_t offset;
342 char __user *user_data; 342 char __user *user_data;
343 int page_offset, page_length; 343 int page_offset, page_length;
344 344
345 user_data = (char __user *) (uintptr_t) args->data_ptr; 345 user_data = (char __user *) (uintptr_t) args->data_ptr;
346 remain = args->size; 346 remain = args->size;
347 347
348 offset = args->offset; 348 offset = args->offset;
349 349
350 while (remain > 0) { 350 while (remain > 0) {
351 struct page *page; 351 struct page *page;
352 char *vaddr; 352 char *vaddr;
353 int ret; 353 int ret;
354 354
355 /* Operation in this page 355 /* Operation in this page
356 * 356 *
357 * page_offset = offset within page 357 * page_offset = offset within page
358 * page_length = bytes to copy for this page 358 * page_length = bytes to copy for this page
359 */ 359 */
360 page_offset = offset_in_page(offset); 360 page_offset = offset_in_page(offset);
361 page_length = remain; 361 page_length = remain;
362 if ((page_offset + remain) > PAGE_SIZE) 362 if ((page_offset + remain) > PAGE_SIZE)
363 page_length = PAGE_SIZE - page_offset; 363 page_length = PAGE_SIZE - page_offset;
364 364
365 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); 365 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
366 if (IS_ERR(page)) 366 if (IS_ERR(page))
367 return PTR_ERR(page); 367 return PTR_ERR(page);
368 368
369 vaddr = kmap_atomic(page); 369 vaddr = kmap_atomic(page);
370 ret = __copy_to_user_inatomic(user_data, 370 ret = __copy_to_user_inatomic(user_data,
371 vaddr + page_offset, 371 vaddr + page_offset,
372 page_length); 372 page_length);
373 kunmap_atomic(vaddr); 373 kunmap_atomic(vaddr);
374 374
375 mark_page_accessed(page); 375 mark_page_accessed(page);
376 page_cache_release(page); 376 page_cache_release(page);
377 if (ret) 377 if (ret)
378 return -EFAULT; 378 return -EFAULT;
379 379
380 remain -= page_length; 380 remain -= page_length;
381 user_data += page_length; 381 user_data += page_length;
382 offset += page_length; 382 offset += page_length;
383 } 383 }
384 384
385 return 0; 385 return 0;
386 } 386 }
387 387
388 /** 388 /**
389 * This is the fallback shmem pread path, which allocates temporary storage 389 * This is the fallback shmem pread path, which allocates temporary storage
390 * in kernel space to copy_to_user into outside of the struct_mutex, so we 390 * in kernel space to copy_to_user into outside of the struct_mutex, so we
391 * can copy out of the object's backing pages while holding the struct mutex 391 * can copy out of the object's backing pages while holding the struct mutex
392 * and not take page faults. 392 * and not take page faults.
393 */ 393 */
394 static int 394 static int
395 i915_gem_shmem_pread_slow(struct drm_device *dev, 395 i915_gem_shmem_pread_slow(struct drm_device *dev,
396 struct drm_i915_gem_object *obj, 396 struct drm_i915_gem_object *obj,
397 struct drm_i915_gem_pread *args, 397 struct drm_i915_gem_pread *args,
398 struct drm_file *file) 398 struct drm_file *file)
399 { 399 {
400 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; 400 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
401 struct mm_struct *mm = current->mm; 401 struct mm_struct *mm = current->mm;
402 struct page **user_pages; 402 struct page **user_pages;
403 ssize_t remain; 403 ssize_t remain;
404 loff_t offset, pinned_pages, i; 404 loff_t offset, pinned_pages, i;
405 loff_t first_data_page, last_data_page, num_pages; 405 loff_t first_data_page, last_data_page, num_pages;
406 int shmem_page_offset; 406 int shmem_page_offset;
407 int data_page_index, data_page_offset; 407 int data_page_index, data_page_offset;
408 int page_length; 408 int page_length;
409 int ret; 409 int ret;
410 uint64_t data_ptr = args->data_ptr; 410 uint64_t data_ptr = args->data_ptr;
411 int do_bit17_swizzling; 411 int do_bit17_swizzling;
412 412
413 remain = args->size; 413 remain = args->size;
414 414
415 /* Pin the user pages containing the data. We can't fault while 415 /* Pin the user pages containing the data. We can't fault while
416 * holding the struct mutex, yet we want to hold it while 416 * holding the struct mutex, yet we want to hold it while
417 * dereferencing the user data. 417 * dereferencing the user data.
418 */ 418 */
419 first_data_page = data_ptr / PAGE_SIZE; 419 first_data_page = data_ptr / PAGE_SIZE;
420 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; 420 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
421 num_pages = last_data_page - first_data_page + 1; 421 num_pages = last_data_page - first_data_page + 1;
422 422
423 user_pages = drm_malloc_ab(num_pages, sizeof(struct page *)); 423 user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
424 if (user_pages == NULL) 424 if (user_pages == NULL)
425 return -ENOMEM; 425 return -ENOMEM;
426 426
427 mutex_unlock(&dev->struct_mutex); 427 mutex_unlock(&dev->struct_mutex);
428 down_read(&mm->mmap_sem); 428 down_read(&mm->mmap_sem);
429 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, 429 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
430 num_pages, 1, 0, user_pages, NULL); 430 num_pages, 1, 0, user_pages, NULL);
431 up_read(&mm->mmap_sem); 431 up_read(&mm->mmap_sem);
432 mutex_lock(&dev->struct_mutex); 432 mutex_lock(&dev->struct_mutex);
433 if (pinned_pages < num_pages) { 433 if (pinned_pages < num_pages) {
434 ret = -EFAULT; 434 ret = -EFAULT;
435 goto out; 435 goto out;
436 } 436 }
437 437
438 ret = i915_gem_object_set_cpu_read_domain_range(obj, 438 ret = i915_gem_object_set_cpu_read_domain_range(obj,
439 args->offset, 439 args->offset,
440 args->size); 440 args->size);
441 if (ret) 441 if (ret)
442 goto out; 442 goto out;
443 443
444 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); 444 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
445 445
446 offset = args->offset; 446 offset = args->offset;
447 447
448 while (remain > 0) { 448 while (remain > 0) {
449 struct page *page; 449 struct page *page;
450 450
451 /* Operation in this page 451 /* Operation in this page
452 * 452 *
453 * shmem_page_offset = offset within page in shmem file 453 * shmem_page_offset = offset within page in shmem file
454 * data_page_index = page number in get_user_pages return 454 * data_page_index = page number in get_user_pages return
455 * data_page_offset = offset with data_page_index page. 455 * data_page_offset = offset with data_page_index page.
456 * page_length = bytes to copy for this page 456 * page_length = bytes to copy for this page
457 */ 457 */
458 shmem_page_offset = offset_in_page(offset); 458 shmem_page_offset = offset_in_page(offset);
459 data_page_index = data_ptr / PAGE_SIZE - first_data_page; 459 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
460 data_page_offset = offset_in_page(data_ptr); 460 data_page_offset = offset_in_page(data_ptr);
461 461
462 page_length = remain; 462 page_length = remain;
463 if ((shmem_page_offset + page_length) > PAGE_SIZE) 463 if ((shmem_page_offset + page_length) > PAGE_SIZE)
464 page_length = PAGE_SIZE - shmem_page_offset; 464 page_length = PAGE_SIZE - shmem_page_offset;
465 if ((data_page_offset + page_length) > PAGE_SIZE) 465 if ((data_page_offset + page_length) > PAGE_SIZE)
466 page_length = PAGE_SIZE - data_page_offset; 466 page_length = PAGE_SIZE - data_page_offset;
467 467
468 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); 468 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
469 if (IS_ERR(page)) { 469 if (IS_ERR(page)) {
470 ret = PTR_ERR(page); 470 ret = PTR_ERR(page);
471 goto out; 471 goto out;
472 } 472 }
473 473
474 if (do_bit17_swizzling) { 474 if (do_bit17_swizzling) {
475 slow_shmem_bit17_copy(page, 475 slow_shmem_bit17_copy(page,
476 shmem_page_offset, 476 shmem_page_offset,
477 user_pages[data_page_index], 477 user_pages[data_page_index],
478 data_page_offset, 478 data_page_offset,
479 page_length, 479 page_length,
480 1); 480 1);
481 } else { 481 } else {
482 slow_shmem_copy(user_pages[data_page_index], 482 slow_shmem_copy(user_pages[data_page_index],
483 data_page_offset, 483 data_page_offset,
484 page, 484 page,
485 shmem_page_offset, 485 shmem_page_offset,
486 page_length); 486 page_length);
487 } 487 }
488 488
489 mark_page_accessed(page); 489 mark_page_accessed(page);
490 page_cache_release(page); 490 page_cache_release(page);
491 491
492 remain -= page_length; 492 remain -= page_length;
493 data_ptr += page_length; 493 data_ptr += page_length;
494 offset += page_length; 494 offset += page_length;
495 } 495 }
496 496
497 out: 497 out:
498 for (i = 0; i < pinned_pages; i++) { 498 for (i = 0; i < pinned_pages; i++) {
499 SetPageDirty(user_pages[i]); 499 SetPageDirty(user_pages[i]);
500 mark_page_accessed(user_pages[i]); 500 mark_page_accessed(user_pages[i]);
501 page_cache_release(user_pages[i]); 501 page_cache_release(user_pages[i]);
502 } 502 }
503 drm_free_large(user_pages); 503 drm_free_large(user_pages);
504 504
505 return ret; 505 return ret;
506 } 506 }
507 507
508 /** 508 /**
509 * Reads data from the object referenced by handle. 509 * Reads data from the object referenced by handle.
510 * 510 *
511 * On error, the contents of *data are undefined. 511 * On error, the contents of *data are undefined.
512 */ 512 */
513 int 513 int
514 i915_gem_pread_ioctl(struct drm_device *dev, void *data, 514 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
515 struct drm_file *file) 515 struct drm_file *file)
516 { 516 {
517 struct drm_i915_gem_pread *args = data; 517 struct drm_i915_gem_pread *args = data;
518 struct drm_i915_gem_object *obj; 518 struct drm_i915_gem_object *obj;
519 int ret = 0; 519 int ret = 0;
520 520
521 if (args->size == 0) 521 if (args->size == 0)
522 return 0; 522 return 0;
523 523
524 if (!access_ok(VERIFY_WRITE, 524 if (!access_ok(VERIFY_WRITE,
525 (char __user *)(uintptr_t)args->data_ptr, 525 (char __user *)(uintptr_t)args->data_ptr,
526 args->size)) 526 args->size))
527 return -EFAULT; 527 return -EFAULT;
528 528
529 ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr, 529 ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
530 args->size); 530 args->size);
531 if (ret) 531 if (ret)
532 return -EFAULT; 532 return -EFAULT;
533 533
534 ret = i915_mutex_lock_interruptible(dev); 534 ret = i915_mutex_lock_interruptible(dev);
535 if (ret) 535 if (ret)
536 return ret; 536 return ret;
537 537
538 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 538 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
539 if (&obj->base == NULL) { 539 if (&obj->base == NULL) {
540 ret = -ENOENT; 540 ret = -ENOENT;
541 goto unlock; 541 goto unlock;
542 } 542 }
543 543
544 /* Bounds check source. */ 544 /* Bounds check source. */
545 if (args->offset > obj->base.size || 545 if (args->offset > obj->base.size ||
546 args->size > obj->base.size - args->offset) { 546 args->size > obj->base.size - args->offset) {
547 ret = -EINVAL; 547 ret = -EINVAL;
548 goto out; 548 goto out;
549 } 549 }
550 550
551 trace_i915_gem_object_pread(obj, args->offset, args->size); 551 trace_i915_gem_object_pread(obj, args->offset, args->size);
552 552
553 ret = i915_gem_object_set_cpu_read_domain_range(obj, 553 ret = i915_gem_object_set_cpu_read_domain_range(obj,
554 args->offset, 554 args->offset,
555 args->size); 555 args->size);
556 if (ret) 556 if (ret)
557 goto out; 557 goto out;
558 558
559 ret = -EFAULT; 559 ret = -EFAULT;
560 if (!i915_gem_object_needs_bit17_swizzle(obj)) 560 if (!i915_gem_object_needs_bit17_swizzle(obj))
561 ret = i915_gem_shmem_pread_fast(dev, obj, args, file); 561 ret = i915_gem_shmem_pread_fast(dev, obj, args, file);
562 if (ret == -EFAULT) 562 if (ret == -EFAULT)
563 ret = i915_gem_shmem_pread_slow(dev, obj, args, file); 563 ret = i915_gem_shmem_pread_slow(dev, obj, args, file);
564 564
565 out: 565 out:
566 drm_gem_object_unreference(&obj->base); 566 drm_gem_object_unreference(&obj->base);
567 unlock: 567 unlock:
568 mutex_unlock(&dev->struct_mutex); 568 mutex_unlock(&dev->struct_mutex);
569 return ret; 569 return ret;
570 } 570 }
571 571
572 /* This is the fast write path which cannot handle 572 /* This is the fast write path which cannot handle
573 * page faults in the source data 573 * page faults in the source data
574 */ 574 */
575 575
576 static inline int 576 static inline int
577 fast_user_write(struct io_mapping *mapping, 577 fast_user_write(struct io_mapping *mapping,
578 loff_t page_base, int page_offset, 578 loff_t page_base, int page_offset,
579 char __user *user_data, 579 char __user *user_data,
580 int length) 580 int length)
581 { 581 {
582 char *vaddr_atomic; 582 char *vaddr_atomic;
583 unsigned long unwritten; 583 unsigned long unwritten;
584 584
585 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base); 585 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
586 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset, 586 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
587 user_data, length); 587 user_data, length);
588 io_mapping_unmap_atomic(vaddr_atomic); 588 io_mapping_unmap_atomic(vaddr_atomic);
589 return unwritten; 589 return unwritten;
590 } 590 }
591 591
592 /* Here's the write path which can sleep for 592 /* Here's the write path which can sleep for
593 * page faults 593 * page faults
594 */ 594 */
595 595
596 static inline void 596 static inline void
597 slow_kernel_write(struct io_mapping *mapping, 597 slow_kernel_write(struct io_mapping *mapping,
598 loff_t gtt_base, int gtt_offset, 598 loff_t gtt_base, int gtt_offset,
599 struct page *user_page, int user_offset, 599 struct page *user_page, int user_offset,
600 int length) 600 int length)
601 { 601 {
602 char __iomem *dst_vaddr; 602 char __iomem *dst_vaddr;
603 char *src_vaddr; 603 char *src_vaddr;
604 604
605 dst_vaddr = io_mapping_map_wc(mapping, gtt_base); 605 dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
606 src_vaddr = kmap(user_page); 606 src_vaddr = kmap(user_page);
607 607
608 memcpy_toio(dst_vaddr + gtt_offset, 608 memcpy_toio(dst_vaddr + gtt_offset,
609 src_vaddr + user_offset, 609 src_vaddr + user_offset,
610 length); 610 length);
611 611
612 kunmap(user_page); 612 kunmap(user_page);
613 io_mapping_unmap(dst_vaddr); 613 io_mapping_unmap(dst_vaddr);
614 } 614 }
615 615
616 /** 616 /**
617 * This is the fast pwrite path, where we copy the data directly from the 617 * This is the fast pwrite path, where we copy the data directly from the
618 * user into the GTT, uncached. 618 * user into the GTT, uncached.
619 */ 619 */
620 static int 620 static int
621 i915_gem_gtt_pwrite_fast(struct drm_device *dev, 621 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
622 struct drm_i915_gem_object *obj, 622 struct drm_i915_gem_object *obj,
623 struct drm_i915_gem_pwrite *args, 623 struct drm_i915_gem_pwrite *args,
624 struct drm_file *file) 624 struct drm_file *file)
625 { 625 {
626 drm_i915_private_t *dev_priv = dev->dev_private; 626 drm_i915_private_t *dev_priv = dev->dev_private;
627 ssize_t remain; 627 ssize_t remain;
628 loff_t offset, page_base; 628 loff_t offset, page_base;
629 char __user *user_data; 629 char __user *user_data;
630 int page_offset, page_length; 630 int page_offset, page_length;
631 631
632 user_data = (char __user *) (uintptr_t) args->data_ptr; 632 user_data = (char __user *) (uintptr_t) args->data_ptr;
633 remain = args->size; 633 remain = args->size;
634 634
635 offset = obj->gtt_offset + args->offset; 635 offset = obj->gtt_offset + args->offset;
636 636
637 while (remain > 0) { 637 while (remain > 0) {
638 /* Operation in this page 638 /* Operation in this page
639 * 639 *
640 * page_base = page offset within aperture 640 * page_base = page offset within aperture
641 * page_offset = offset within page 641 * page_offset = offset within page
642 * page_length = bytes to copy for this page 642 * page_length = bytes to copy for this page
643 */ 643 */
644 page_base = offset & PAGE_MASK; 644 page_base = offset & PAGE_MASK;
645 page_offset = offset_in_page(offset); 645 page_offset = offset_in_page(offset);
646 page_length = remain; 646 page_length = remain;
647 if ((page_offset + remain) > PAGE_SIZE) 647 if ((page_offset + remain) > PAGE_SIZE)
648 page_length = PAGE_SIZE - page_offset; 648 page_length = PAGE_SIZE - page_offset;
649 649
650 /* If we get a fault while copying data, then (presumably) our 650 /* If we get a fault while copying data, then (presumably) our
651 * source page isn't available. Return the error and we'll 651 * source page isn't available. Return the error and we'll
652 * retry in the slow path. 652 * retry in the slow path.
653 */ 653 */
654 if (fast_user_write(dev_priv->mm.gtt_mapping, page_base, 654 if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
655 page_offset, user_data, page_length)) 655 page_offset, user_data, page_length))
656 return -EFAULT; 656 return -EFAULT;
657 657
658 remain -= page_length; 658 remain -= page_length;
659 user_data += page_length; 659 user_data += page_length;
660 offset += page_length; 660 offset += page_length;
661 } 661 }
662 662
663 return 0; 663 return 0;
664 } 664 }
665 665
666 /** 666 /**
667 * This is the fallback GTT pwrite path, which uses get_user_pages to pin 667 * This is the fallback GTT pwrite path, which uses get_user_pages to pin
668 * the memory and maps it using kmap_atomic for copying. 668 * the memory and maps it using kmap_atomic for copying.
669 * 669 *
670 * This code resulted in x11perf -rgb10text consuming about 10% more CPU 670 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
671 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit). 671 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
672 */ 672 */
673 static int 673 static int
674 i915_gem_gtt_pwrite_slow(struct drm_device *dev, 674 i915_gem_gtt_pwrite_slow(struct drm_device *dev,
675 struct drm_i915_gem_object *obj, 675 struct drm_i915_gem_object *obj,
676 struct drm_i915_gem_pwrite *args, 676 struct drm_i915_gem_pwrite *args,
677 struct drm_file *file) 677 struct drm_file *file)
678 { 678 {
679 drm_i915_private_t *dev_priv = dev->dev_private; 679 drm_i915_private_t *dev_priv = dev->dev_private;
680 ssize_t remain; 680 ssize_t remain;
681 loff_t gtt_page_base, offset; 681 loff_t gtt_page_base, offset;
682 loff_t first_data_page, last_data_page, num_pages; 682 loff_t first_data_page, last_data_page, num_pages;
683 loff_t pinned_pages, i; 683 loff_t pinned_pages, i;
684 struct page **user_pages; 684 struct page **user_pages;
685 struct mm_struct *mm = current->mm; 685 struct mm_struct *mm = current->mm;
686 int gtt_page_offset, data_page_offset, data_page_index, page_length; 686 int gtt_page_offset, data_page_offset, data_page_index, page_length;
687 int ret; 687 int ret;
688 uint64_t data_ptr = args->data_ptr; 688 uint64_t data_ptr = args->data_ptr;
689 689
690 remain = args->size; 690 remain = args->size;
691 691
692 /* Pin the user pages containing the data. We can't fault while 692 /* Pin the user pages containing the data. We can't fault while
693 * holding the struct mutex, and all of the pwrite implementations 693 * holding the struct mutex, and all of the pwrite implementations
694 * want to hold it while dereferencing the user data. 694 * want to hold it while dereferencing the user data.
695 */ 695 */
696 first_data_page = data_ptr / PAGE_SIZE; 696 first_data_page = data_ptr / PAGE_SIZE;
697 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; 697 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
698 num_pages = last_data_page - first_data_page + 1; 698 num_pages = last_data_page - first_data_page + 1;
699 699
700 user_pages = drm_malloc_ab(num_pages, sizeof(struct page *)); 700 user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
701 if (user_pages == NULL) 701 if (user_pages == NULL)
702 return -ENOMEM; 702 return -ENOMEM;
703 703
704 mutex_unlock(&dev->struct_mutex); 704 mutex_unlock(&dev->struct_mutex);
705 down_read(&mm->mmap_sem); 705 down_read(&mm->mmap_sem);
706 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, 706 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
707 num_pages, 0, 0, user_pages, NULL); 707 num_pages, 0, 0, user_pages, NULL);
708 up_read(&mm->mmap_sem); 708 up_read(&mm->mmap_sem);
709 mutex_lock(&dev->struct_mutex); 709 mutex_lock(&dev->struct_mutex);
710 if (pinned_pages < num_pages) { 710 if (pinned_pages < num_pages) {
711 ret = -EFAULT; 711 ret = -EFAULT;
712 goto out_unpin_pages; 712 goto out_unpin_pages;
713 } 713 }
714 714
715 ret = i915_gem_object_set_to_gtt_domain(obj, true); 715 ret = i915_gem_object_set_to_gtt_domain(obj, true);
716 if (ret) 716 if (ret)
717 goto out_unpin_pages; 717 goto out_unpin_pages;
718 718
719 ret = i915_gem_object_put_fence(obj); 719 ret = i915_gem_object_put_fence(obj);
720 if (ret) 720 if (ret)
721 goto out_unpin_pages; 721 goto out_unpin_pages;
722 722
723 offset = obj->gtt_offset + args->offset; 723 offset = obj->gtt_offset + args->offset;
724 724
725 while (remain > 0) { 725 while (remain > 0) {
726 /* Operation in this page 726 /* Operation in this page
727 * 727 *
728 * gtt_page_base = page offset within aperture 728 * gtt_page_base = page offset within aperture
729 * gtt_page_offset = offset within page in aperture 729 * gtt_page_offset = offset within page in aperture
730 * data_page_index = page number in get_user_pages return 730 * data_page_index = page number in get_user_pages return
731 * data_page_offset = offset with data_page_index page. 731 * data_page_offset = offset with data_page_index page.
732 * page_length = bytes to copy for this page 732 * page_length = bytes to copy for this page
733 */ 733 */
734 gtt_page_base = offset & PAGE_MASK; 734 gtt_page_base = offset & PAGE_MASK;
735 gtt_page_offset = offset_in_page(offset); 735 gtt_page_offset = offset_in_page(offset);
736 data_page_index = data_ptr / PAGE_SIZE - first_data_page; 736 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
737 data_page_offset = offset_in_page(data_ptr); 737 data_page_offset = offset_in_page(data_ptr);
738 738
739 page_length = remain; 739 page_length = remain;
740 if ((gtt_page_offset + page_length) > PAGE_SIZE) 740 if ((gtt_page_offset + page_length) > PAGE_SIZE)
741 page_length = PAGE_SIZE - gtt_page_offset; 741 page_length = PAGE_SIZE - gtt_page_offset;
742 if ((data_page_offset + page_length) > PAGE_SIZE) 742 if ((data_page_offset + page_length) > PAGE_SIZE)
743 page_length = PAGE_SIZE - data_page_offset; 743 page_length = PAGE_SIZE - data_page_offset;
744 744
745 slow_kernel_write(dev_priv->mm.gtt_mapping, 745 slow_kernel_write(dev_priv->mm.gtt_mapping,
746 gtt_page_base, gtt_page_offset, 746 gtt_page_base, gtt_page_offset,
747 user_pages[data_page_index], 747 user_pages[data_page_index],
748 data_page_offset, 748 data_page_offset,
749 page_length); 749 page_length);
750 750
751 remain -= page_length; 751 remain -= page_length;
752 offset += page_length; 752 offset += page_length;
753 data_ptr += page_length; 753 data_ptr += page_length;
754 } 754 }
755 755
756 out_unpin_pages: 756 out_unpin_pages:
757 for (i = 0; i < pinned_pages; i++) 757 for (i = 0; i < pinned_pages; i++)
758 page_cache_release(user_pages[i]); 758 page_cache_release(user_pages[i]);
759 drm_free_large(user_pages); 759 drm_free_large(user_pages);
760 760
761 return ret; 761 return ret;
762 } 762 }
763 763
764 /** 764 /**
765 * This is the fast shmem pwrite path, which attempts to directly 765 * This is the fast shmem pwrite path, which attempts to directly
766 * copy_from_user into the kmapped pages backing the object. 766 * copy_from_user into the kmapped pages backing the object.
767 */ 767 */
768 static int 768 static int
769 i915_gem_shmem_pwrite_fast(struct drm_device *dev, 769 i915_gem_shmem_pwrite_fast(struct drm_device *dev,
770 struct drm_i915_gem_object *obj, 770 struct drm_i915_gem_object *obj,
771 struct drm_i915_gem_pwrite *args, 771 struct drm_i915_gem_pwrite *args,
772 struct drm_file *file) 772 struct drm_file *file)
773 { 773 {
774 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; 774 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
775 ssize_t remain; 775 ssize_t remain;
776 loff_t offset; 776 loff_t offset;
777 char __user *user_data; 777 char __user *user_data;
778 int page_offset, page_length; 778 int page_offset, page_length;
779 779
780 user_data = (char __user *) (uintptr_t) args->data_ptr; 780 user_data = (char __user *) (uintptr_t) args->data_ptr;
781 remain = args->size; 781 remain = args->size;
782 782
783 offset = args->offset; 783 offset = args->offset;
784 obj->dirty = 1; 784 obj->dirty = 1;
785 785
786 while (remain > 0) { 786 while (remain > 0) {
787 struct page *page; 787 struct page *page;
788 char *vaddr; 788 char *vaddr;
789 int ret; 789 int ret;
790 790
791 /* Operation in this page 791 /* Operation in this page
792 * 792 *
793 * page_offset = offset within page 793 * page_offset = offset within page
794 * page_length = bytes to copy for this page 794 * page_length = bytes to copy for this page
795 */ 795 */
796 page_offset = offset_in_page(offset); 796 page_offset = offset_in_page(offset);
797 page_length = remain; 797 page_length = remain;
798 if ((page_offset + remain) > PAGE_SIZE) 798 if ((page_offset + remain) > PAGE_SIZE)
799 page_length = PAGE_SIZE - page_offset; 799 page_length = PAGE_SIZE - page_offset;
800 800
801 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); 801 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
802 if (IS_ERR(page)) 802 if (IS_ERR(page))
803 return PTR_ERR(page); 803 return PTR_ERR(page);
804 804
805 vaddr = kmap_atomic(page); 805 vaddr = kmap_atomic(page);
806 ret = __copy_from_user_inatomic(vaddr + page_offset, 806 ret = __copy_from_user_inatomic(vaddr + page_offset,
807 user_data, 807 user_data,
808 page_length); 808 page_length);
809 kunmap_atomic(vaddr); 809 kunmap_atomic(vaddr);
810 810
811 set_page_dirty(page); 811 set_page_dirty(page);
812 mark_page_accessed(page); 812 mark_page_accessed(page);
813 page_cache_release(page); 813 page_cache_release(page);
814 814
815 /* If we get a fault while copying data, then (presumably) our 815 /* If we get a fault while copying data, then (presumably) our
816 * source page isn't available. Return the error and we'll 816 * source page isn't available. Return the error and we'll
817 * retry in the slow path. 817 * retry in the slow path.
818 */ 818 */
819 if (ret) 819 if (ret)
820 return -EFAULT; 820 return -EFAULT;
821 821
822 remain -= page_length; 822 remain -= page_length;
823 user_data += page_length; 823 user_data += page_length;
824 offset += page_length; 824 offset += page_length;
825 } 825 }
826 826
827 return 0; 827 return 0;
828 } 828 }
829 829
830 /** 830 /**
831 * This is the fallback shmem pwrite path, which uses get_user_pages to pin 831 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
832 * the memory and maps it using kmap_atomic for copying. 832 * the memory and maps it using kmap_atomic for copying.
833 * 833 *
834 * This avoids taking mmap_sem for faulting on the user's address while the 834 * This avoids taking mmap_sem for faulting on the user's address while the
835 * struct_mutex is held. 835 * struct_mutex is held.
836 */ 836 */
837 static int 837 static int
838 i915_gem_shmem_pwrite_slow(struct drm_device *dev, 838 i915_gem_shmem_pwrite_slow(struct drm_device *dev,
839 struct drm_i915_gem_object *obj, 839 struct drm_i915_gem_object *obj,
840 struct drm_i915_gem_pwrite *args, 840 struct drm_i915_gem_pwrite *args,
841 struct drm_file *file) 841 struct drm_file *file)
842 { 842 {
843 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; 843 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
844 struct mm_struct *mm = current->mm; 844 struct mm_struct *mm = current->mm;
845 struct page **user_pages; 845 struct page **user_pages;
846 ssize_t remain; 846 ssize_t remain;
847 loff_t offset, pinned_pages, i; 847 loff_t offset, pinned_pages, i;
848 loff_t first_data_page, last_data_page, num_pages; 848 loff_t first_data_page, last_data_page, num_pages;
849 int shmem_page_offset; 849 int shmem_page_offset;
850 int data_page_index, data_page_offset; 850 int data_page_index, data_page_offset;
851 int page_length; 851 int page_length;
852 int ret; 852 int ret;
853 uint64_t data_ptr = args->data_ptr; 853 uint64_t data_ptr = args->data_ptr;
854 int do_bit17_swizzling; 854 int do_bit17_swizzling;
855 855
856 remain = args->size; 856 remain = args->size;
857 857
858 /* Pin the user pages containing the data. We can't fault while 858 /* Pin the user pages containing the data. We can't fault while
859 * holding the struct mutex, and all of the pwrite implementations 859 * holding the struct mutex, and all of the pwrite implementations
860 * want to hold it while dereferencing the user data. 860 * want to hold it while dereferencing the user data.
861 */ 861 */
862 first_data_page = data_ptr / PAGE_SIZE; 862 first_data_page = data_ptr / PAGE_SIZE;
863 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; 863 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
864 num_pages = last_data_page - first_data_page + 1; 864 num_pages = last_data_page - first_data_page + 1;
865 865
866 user_pages = drm_malloc_ab(num_pages, sizeof(struct page *)); 866 user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
867 if (user_pages == NULL) 867 if (user_pages == NULL)
868 return -ENOMEM; 868 return -ENOMEM;
869 869
870 mutex_unlock(&dev->struct_mutex); 870 mutex_unlock(&dev->struct_mutex);
871 down_read(&mm->mmap_sem); 871 down_read(&mm->mmap_sem);
872 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, 872 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
873 num_pages, 0, 0, user_pages, NULL); 873 num_pages, 0, 0, user_pages, NULL);
874 up_read(&mm->mmap_sem); 874 up_read(&mm->mmap_sem);
875 mutex_lock(&dev->struct_mutex); 875 mutex_lock(&dev->struct_mutex);
876 if (pinned_pages < num_pages) { 876 if (pinned_pages < num_pages) {
877 ret = -EFAULT; 877 ret = -EFAULT;
878 goto out; 878 goto out;
879 } 879 }
880 880
881 ret = i915_gem_object_set_to_cpu_domain(obj, 1); 881 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
882 if (ret) 882 if (ret)
883 goto out; 883 goto out;
884 884
885 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); 885 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
886 886
887 offset = args->offset; 887 offset = args->offset;
888 obj->dirty = 1; 888 obj->dirty = 1;
889 889
890 while (remain > 0) { 890 while (remain > 0) {
891 struct page *page; 891 struct page *page;
892 892
893 /* Operation in this page 893 /* Operation in this page
894 * 894 *
895 * shmem_page_offset = offset within page in shmem file 895 * shmem_page_offset = offset within page in shmem file
896 * data_page_index = page number in get_user_pages return 896 * data_page_index = page number in get_user_pages return
897 * data_page_offset = offset with data_page_index page. 897 * data_page_offset = offset with data_page_index page.
898 * page_length = bytes to copy for this page 898 * page_length = bytes to copy for this page
899 */ 899 */
900 shmem_page_offset = offset_in_page(offset); 900 shmem_page_offset = offset_in_page(offset);
901 data_page_index = data_ptr / PAGE_SIZE - first_data_page; 901 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
902 data_page_offset = offset_in_page(data_ptr); 902 data_page_offset = offset_in_page(data_ptr);
903 903
904 page_length = remain; 904 page_length = remain;
905 if ((shmem_page_offset + page_length) > PAGE_SIZE) 905 if ((shmem_page_offset + page_length) > PAGE_SIZE)
906 page_length = PAGE_SIZE - shmem_page_offset; 906 page_length = PAGE_SIZE - shmem_page_offset;
907 if ((data_page_offset + page_length) > PAGE_SIZE) 907 if ((data_page_offset + page_length) > PAGE_SIZE)
908 page_length = PAGE_SIZE - data_page_offset; 908 page_length = PAGE_SIZE - data_page_offset;
909 909
910 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); 910 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
911 if (IS_ERR(page)) { 911 if (IS_ERR(page)) {
912 ret = PTR_ERR(page); 912 ret = PTR_ERR(page);
913 goto out; 913 goto out;
914 } 914 }
915 915
916 if (do_bit17_swizzling) { 916 if (do_bit17_swizzling) {
917 slow_shmem_bit17_copy(page, 917 slow_shmem_bit17_copy(page,
918 shmem_page_offset, 918 shmem_page_offset,
919 user_pages[data_page_index], 919 user_pages[data_page_index],
920 data_page_offset, 920 data_page_offset,
921 page_length, 921 page_length,
922 0); 922 0);
923 } else { 923 } else {
924 slow_shmem_copy(page, 924 slow_shmem_copy(page,
925 shmem_page_offset, 925 shmem_page_offset,
926 user_pages[data_page_index], 926 user_pages[data_page_index],
927 data_page_offset, 927 data_page_offset,
928 page_length); 928 page_length);
929 } 929 }
930 930
931 set_page_dirty(page); 931 set_page_dirty(page);
932 mark_page_accessed(page); 932 mark_page_accessed(page);
933 page_cache_release(page); 933 page_cache_release(page);
934 934
935 remain -= page_length; 935 remain -= page_length;
936 data_ptr += page_length; 936 data_ptr += page_length;
937 offset += page_length; 937 offset += page_length;
938 } 938 }
939 939
940 out: 940 out:
941 for (i = 0; i < pinned_pages; i++) 941 for (i = 0; i < pinned_pages; i++)
942 page_cache_release(user_pages[i]); 942 page_cache_release(user_pages[i]);
943 drm_free_large(user_pages); 943 drm_free_large(user_pages);
944 944
945 return ret; 945 return ret;
946 } 946 }
947 947
948 /** 948 /**
949 * Writes data to the object referenced by handle. 949 * Writes data to the object referenced by handle.
950 * 950 *
951 * On error, the contents of the buffer that were to be modified are undefined. 951 * On error, the contents of the buffer that were to be modified are undefined.
952 */ 952 */
953 int 953 int
954 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 954 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
955 struct drm_file *file) 955 struct drm_file *file)
956 { 956 {
957 struct drm_i915_gem_pwrite *args = data; 957 struct drm_i915_gem_pwrite *args = data;
958 struct drm_i915_gem_object *obj; 958 struct drm_i915_gem_object *obj;
959 int ret; 959 int ret;
960 960
961 if (args->size == 0) 961 if (args->size == 0)
962 return 0; 962 return 0;
963 963
964 if (!access_ok(VERIFY_READ, 964 if (!access_ok(VERIFY_READ,
965 (char __user *)(uintptr_t)args->data_ptr, 965 (char __user *)(uintptr_t)args->data_ptr,
966 args->size)) 966 args->size))
967 return -EFAULT; 967 return -EFAULT;
968 968
969 ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr, 969 ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
970 args->size); 970 args->size);
971 if (ret) 971 if (ret)
972 return -EFAULT; 972 return -EFAULT;
973 973
974 ret = i915_mutex_lock_interruptible(dev); 974 ret = i915_mutex_lock_interruptible(dev);
975 if (ret) 975 if (ret)
976 return ret; 976 return ret;
977 977
978 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 978 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
979 if (&obj->base == NULL) { 979 if (&obj->base == NULL) {
980 ret = -ENOENT; 980 ret = -ENOENT;
981 goto unlock; 981 goto unlock;
982 } 982 }
983 983
984 /* Bounds check destination. */ 984 /* Bounds check destination. */
985 if (args->offset > obj->base.size || 985 if (args->offset > obj->base.size ||
986 args->size > obj->base.size - args->offset) { 986 args->size > obj->base.size - args->offset) {
987 ret = -EINVAL; 987 ret = -EINVAL;
988 goto out; 988 goto out;
989 } 989 }
990 990
991 trace_i915_gem_object_pwrite(obj, args->offset, args->size); 991 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
992 992
993 /* We can only do the GTT pwrite on untiled buffers, as otherwise 993 /* We can only do the GTT pwrite on untiled buffers, as otherwise
994 * it would end up going through the fenced access, and we'll get 994 * it would end up going through the fenced access, and we'll get
995 * different detiling behavior between reading and writing. 995 * different detiling behavior between reading and writing.
996 * pread/pwrite currently are reading and writing from the CPU 996 * pread/pwrite currently are reading and writing from the CPU
997 * perspective, requiring manual detiling by the client. 997 * perspective, requiring manual detiling by the client.
998 */ 998 */
999 if (obj->phys_obj) 999 if (obj->phys_obj)
1000 ret = i915_gem_phys_pwrite(dev, obj, args, file); 1000 ret = i915_gem_phys_pwrite(dev, obj, args, file);
1001 else if (obj->gtt_space && 1001 else if (obj->gtt_space &&
1002 obj->base.write_domain != I915_GEM_DOMAIN_CPU) { 1002 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1003 ret = i915_gem_object_pin(obj, 0, true); 1003 ret = i915_gem_object_pin(obj, 0, true);
1004 if (ret) 1004 if (ret)
1005 goto out; 1005 goto out;
1006 1006
1007 ret = i915_gem_object_set_to_gtt_domain(obj, true); 1007 ret = i915_gem_object_set_to_gtt_domain(obj, true);
1008 if (ret) 1008 if (ret)
1009 goto out_unpin; 1009 goto out_unpin;
1010 1010
1011 ret = i915_gem_object_put_fence(obj); 1011 ret = i915_gem_object_put_fence(obj);
1012 if (ret) 1012 if (ret)
1013 goto out_unpin; 1013 goto out_unpin;
1014 1014
1015 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file); 1015 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
1016 if (ret == -EFAULT) 1016 if (ret == -EFAULT)
1017 ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file); 1017 ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file);
1018 1018
1019 out_unpin: 1019 out_unpin:
1020 i915_gem_object_unpin(obj); 1020 i915_gem_object_unpin(obj);
1021 } else { 1021 } else {
1022 ret = i915_gem_object_set_to_cpu_domain(obj, 1); 1022 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
1023 if (ret) 1023 if (ret)
1024 goto out; 1024 goto out;
1025 1025
1026 ret = -EFAULT; 1026 ret = -EFAULT;
1027 if (!i915_gem_object_needs_bit17_swizzle(obj)) 1027 if (!i915_gem_object_needs_bit17_swizzle(obj))
1028 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file); 1028 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
1029 if (ret == -EFAULT) 1029 if (ret == -EFAULT)
1030 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file); 1030 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
1031 } 1031 }
1032 1032
1033 out: 1033 out:
1034 drm_gem_object_unreference(&obj->base); 1034 drm_gem_object_unreference(&obj->base);
1035 unlock: 1035 unlock:
1036 mutex_unlock(&dev->struct_mutex); 1036 mutex_unlock(&dev->struct_mutex);
1037 return ret; 1037 return ret;
1038 } 1038 }
1039 1039
1040 /** 1040 /**
1041 * Called when user space prepares to use an object with the CPU, either 1041 * Called when user space prepares to use an object with the CPU, either
1042 * through the mmap ioctl's mapping or a GTT mapping. 1042 * through the mmap ioctl's mapping or a GTT mapping.
1043 */ 1043 */
1044 int 1044 int
1045 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 1045 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1046 struct drm_file *file) 1046 struct drm_file *file)
1047 { 1047 {
1048 struct drm_i915_gem_set_domain *args = data; 1048 struct drm_i915_gem_set_domain *args = data;
1049 struct drm_i915_gem_object *obj; 1049 struct drm_i915_gem_object *obj;
1050 uint32_t read_domains = args->read_domains; 1050 uint32_t read_domains = args->read_domains;
1051 uint32_t write_domain = args->write_domain; 1051 uint32_t write_domain = args->write_domain;
1052 int ret; 1052 int ret;
1053 1053
1054 if (!(dev->driver->driver_features & DRIVER_GEM)) 1054 if (!(dev->driver->driver_features & DRIVER_GEM))
1055 return -ENODEV; 1055 return -ENODEV;
1056 1056
1057 /* Only handle setting domains to types used by the CPU. */ 1057 /* Only handle setting domains to types used by the CPU. */
1058 if (write_domain & I915_GEM_GPU_DOMAINS) 1058 if (write_domain & I915_GEM_GPU_DOMAINS)
1059 return -EINVAL; 1059 return -EINVAL;
1060 1060
1061 if (read_domains & I915_GEM_GPU_DOMAINS) 1061 if (read_domains & I915_GEM_GPU_DOMAINS)
1062 return -EINVAL; 1062 return -EINVAL;
1063 1063
1064 /* Having something in the write domain implies it's in the read 1064 /* Having something in the write domain implies it's in the read
1065 * domain, and only that read domain. Enforce that in the request. 1065 * domain, and only that read domain. Enforce that in the request.
1066 */ 1066 */
1067 if (write_domain != 0 && read_domains != write_domain) 1067 if (write_domain != 0 && read_domains != write_domain)
1068 return -EINVAL; 1068 return -EINVAL;
1069 1069
1070 ret = i915_mutex_lock_interruptible(dev); 1070 ret = i915_mutex_lock_interruptible(dev);
1071 if (ret) 1071 if (ret)
1072 return ret; 1072 return ret;
1073 1073
1074 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 1074 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1075 if (&obj->base == NULL) { 1075 if (&obj->base == NULL) {
1076 ret = -ENOENT; 1076 ret = -ENOENT;
1077 goto unlock; 1077 goto unlock;
1078 } 1078 }
1079 1079
1080 if (read_domains & I915_GEM_DOMAIN_GTT) { 1080 if (read_domains & I915_GEM_DOMAIN_GTT) {
1081 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); 1081 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1082 1082
1083 /* Silently promote "you're not bound, there was nothing to do" 1083 /* Silently promote "you're not bound, there was nothing to do"
1084 * to success, since the client was just asking us to 1084 * to success, since the client was just asking us to
1085 * make sure everything was done. 1085 * make sure everything was done.
1086 */ 1086 */
1087 if (ret == -EINVAL) 1087 if (ret == -EINVAL)
1088 ret = 0; 1088 ret = 0;
1089 } else { 1089 } else {
1090 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); 1090 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1091 } 1091 }
1092 1092
1093 drm_gem_object_unreference(&obj->base); 1093 drm_gem_object_unreference(&obj->base);
1094 unlock: 1094 unlock:
1095 mutex_unlock(&dev->struct_mutex); 1095 mutex_unlock(&dev->struct_mutex);
1096 return ret; 1096 return ret;
1097 } 1097 }
1098 1098
1099 /** 1099 /**
1100 * Called when user space has done writes to this buffer 1100 * Called when user space has done writes to this buffer
1101 */ 1101 */
1102 int 1102 int
1103 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 1103 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1104 struct drm_file *file) 1104 struct drm_file *file)
1105 { 1105 {
1106 struct drm_i915_gem_sw_finish *args = data; 1106 struct drm_i915_gem_sw_finish *args = data;
1107 struct drm_i915_gem_object *obj; 1107 struct drm_i915_gem_object *obj;
1108 int ret = 0; 1108 int ret = 0;
1109 1109
1110 if (!(dev->driver->driver_features & DRIVER_GEM)) 1110 if (!(dev->driver->driver_features & DRIVER_GEM))
1111 return -ENODEV; 1111 return -ENODEV;
1112 1112
1113 ret = i915_mutex_lock_interruptible(dev); 1113 ret = i915_mutex_lock_interruptible(dev);
1114 if (ret) 1114 if (ret)
1115 return ret; 1115 return ret;
1116 1116
1117 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 1117 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1118 if (&obj->base == NULL) { 1118 if (&obj->base == NULL) {
1119 ret = -ENOENT; 1119 ret = -ENOENT;
1120 goto unlock; 1120 goto unlock;
1121 } 1121 }
1122 1122
1123 /* Pinned buffers may be scanout, so flush the cache */ 1123 /* Pinned buffers may be scanout, so flush the cache */
1124 if (obj->pin_count) 1124 if (obj->pin_count)
1125 i915_gem_object_flush_cpu_write_domain(obj); 1125 i915_gem_object_flush_cpu_write_domain(obj);
1126 1126
1127 drm_gem_object_unreference(&obj->base); 1127 drm_gem_object_unreference(&obj->base);
1128 unlock: 1128 unlock:
1129 mutex_unlock(&dev->struct_mutex); 1129 mutex_unlock(&dev->struct_mutex);
1130 return ret; 1130 return ret;
1131 } 1131 }
1132 1132
1133 /** 1133 /**
1134 * Maps the contents of an object, returning the address it is mapped 1134 * Maps the contents of an object, returning the address it is mapped
1135 * into. 1135 * into.
1136 * 1136 *
1137 * While the mapping holds a reference on the contents of the object, it doesn't 1137 * While the mapping holds a reference on the contents of the object, it doesn't
1138 * imply a ref on the object itself. 1138 * imply a ref on the object itself.
1139 */ 1139 */
1140 int 1140 int
1141 i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 1141 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1142 struct drm_file *file) 1142 struct drm_file *file)
1143 { 1143 {
1144 struct drm_i915_private *dev_priv = dev->dev_private; 1144 struct drm_i915_private *dev_priv = dev->dev_private;
1145 struct drm_i915_gem_mmap *args = data; 1145 struct drm_i915_gem_mmap *args = data;
1146 struct drm_gem_object *obj; 1146 struct drm_gem_object *obj;
1147 unsigned long addr; 1147 unsigned long addr;
1148 1148
1149 if (!(dev->driver->driver_features & DRIVER_GEM)) 1149 if (!(dev->driver->driver_features & DRIVER_GEM))
1150 return -ENODEV; 1150 return -ENODEV;
1151 1151
1152 obj = drm_gem_object_lookup(dev, file, args->handle); 1152 obj = drm_gem_object_lookup(dev, file, args->handle);
1153 if (obj == NULL) 1153 if (obj == NULL)
1154 return -ENOENT; 1154 return -ENOENT;
1155 1155
1156 if (obj->size > dev_priv->mm.gtt_mappable_end) { 1156 if (obj->size > dev_priv->mm.gtt_mappable_end) {
1157 drm_gem_object_unreference_unlocked(obj); 1157 drm_gem_object_unreference_unlocked(obj);
1158 return -E2BIG; 1158 return -E2BIG;
1159 } 1159 }
1160 1160
1161 down_write(&current->mm->mmap_sem); 1161 down_write(&current->mm->mmap_sem);
1162 addr = do_mmap(obj->filp, 0, args->size, 1162 addr = do_mmap(obj->filp, 0, args->size,
1163 PROT_READ | PROT_WRITE, MAP_SHARED, 1163 PROT_READ | PROT_WRITE, MAP_SHARED,
1164 args->offset); 1164 args->offset);
1165 up_write(&current->mm->mmap_sem); 1165 up_write(&current->mm->mmap_sem);
1166 drm_gem_object_unreference_unlocked(obj); 1166 drm_gem_object_unreference_unlocked(obj);
1167 if (IS_ERR((void *)addr)) 1167 if (IS_ERR((void *)addr))
1168 return addr; 1168 return addr;
1169 1169
1170 args->addr_ptr = (uint64_t) addr; 1170 args->addr_ptr = (uint64_t) addr;
1171 1171
1172 return 0; 1172 return 0;
1173 } 1173 }
1174 1174
1175 /** 1175 /**
1176 * i915_gem_fault - fault a page into the GTT 1176 * i915_gem_fault - fault a page into the GTT
1177 * vma: VMA in question 1177 * vma: VMA in question
1178 * vmf: fault info 1178 * vmf: fault info
1179 * 1179 *
1180 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped 1180 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1181 * from userspace. The fault handler takes care of binding the object to 1181 * from userspace. The fault handler takes care of binding the object to
1182 * the GTT (if needed), allocating and programming a fence register (again, 1182 * the GTT (if needed), allocating and programming a fence register (again,
1183 * only if needed based on whether the old reg is still valid or the object 1183 * only if needed based on whether the old reg is still valid or the object
1184 * is tiled) and inserting a new PTE into the faulting process. 1184 * is tiled) and inserting a new PTE into the faulting process.
1185 * 1185 *
1186 * Note that the faulting process may involve evicting existing objects 1186 * Note that the faulting process may involve evicting existing objects
1187 * from the GTT and/or fence registers to make room. So performance may 1187 * from the GTT and/or fence registers to make room. So performance may
1188 * suffer if the GTT working set is large or there are few fence registers 1188 * suffer if the GTT working set is large or there are few fence registers
1189 * left. 1189 * left.
1190 */ 1190 */
1191 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1191 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1192 { 1192 {
1193 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data); 1193 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1194 struct drm_device *dev = obj->base.dev; 1194 struct drm_device *dev = obj->base.dev;
1195 drm_i915_private_t *dev_priv = dev->dev_private; 1195 drm_i915_private_t *dev_priv = dev->dev_private;
1196 pgoff_t page_offset; 1196 pgoff_t page_offset;
1197 unsigned long pfn; 1197 unsigned long pfn;
1198 int ret = 0; 1198 int ret = 0;
1199 bool write = !!(vmf->flags & FAULT_FLAG_WRITE); 1199 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1200 1200
1201 /* We don't use vmf->pgoff since that has the fake offset */ 1201 /* We don't use vmf->pgoff since that has the fake offset */
1202 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> 1202 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1203 PAGE_SHIFT; 1203 PAGE_SHIFT;
1204 1204
1205 ret = i915_mutex_lock_interruptible(dev); 1205 ret = i915_mutex_lock_interruptible(dev);
1206 if (ret) 1206 if (ret)
1207 goto out; 1207 goto out;
1208 1208
1209 trace_i915_gem_object_fault(obj, page_offset, true, write); 1209 trace_i915_gem_object_fault(obj, page_offset, true, write);
1210 1210
1211 /* Now bind it into the GTT if needed */ 1211 /* Now bind it into the GTT if needed */
1212 if (!obj->map_and_fenceable) { 1212 if (!obj->map_and_fenceable) {
1213 ret = i915_gem_object_unbind(obj); 1213 ret = i915_gem_object_unbind(obj);
1214 if (ret) 1214 if (ret)
1215 goto unlock; 1215 goto unlock;
1216 } 1216 }
1217 if (!obj->gtt_space) { 1217 if (!obj->gtt_space) {
1218 ret = i915_gem_object_bind_to_gtt(obj, 0, true); 1218 ret = i915_gem_object_bind_to_gtt(obj, 0, true);
1219 if (ret) 1219 if (ret)
1220 goto unlock; 1220 goto unlock;
1221 1221
1222 ret = i915_gem_object_set_to_gtt_domain(obj, write); 1222 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1223 if (ret) 1223 if (ret)
1224 goto unlock; 1224 goto unlock;
1225 } 1225 }
1226 1226
1227 if (obj->tiling_mode == I915_TILING_NONE) 1227 if (obj->tiling_mode == I915_TILING_NONE)
1228 ret = i915_gem_object_put_fence(obj); 1228 ret = i915_gem_object_put_fence(obj);
1229 else 1229 else
1230 ret = i915_gem_object_get_fence(obj, NULL); 1230 ret = i915_gem_object_get_fence(obj, NULL);
1231 if (ret) 1231 if (ret)
1232 goto unlock; 1232 goto unlock;
1233 1233
1234 if (i915_gem_object_is_inactive(obj)) 1234 if (i915_gem_object_is_inactive(obj))
1235 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); 1235 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1236 1236
1237 obj->fault_mappable = true; 1237 obj->fault_mappable = true;
1238 1238
1239 pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) + 1239 pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) +
1240 page_offset; 1240 page_offset;
1241 1241
1242 /* Finally, remap it using the new GTT offset */ 1242 /* Finally, remap it using the new GTT offset */
1243 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); 1243 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1244 unlock: 1244 unlock:
1245 mutex_unlock(&dev->struct_mutex); 1245 mutex_unlock(&dev->struct_mutex);
1246 out: 1246 out:
1247 switch (ret) { 1247 switch (ret) {
1248 case -EIO: 1248 case -EIO:
1249 case -EAGAIN: 1249 case -EAGAIN:
1250 /* Give the error handler a chance to run and move the 1250 /* Give the error handler a chance to run and move the
1251 * objects off the GPU active list. Next time we service the 1251 * objects off the GPU active list. Next time we service the
1252 * fault, we should be able to transition the page into the 1252 * fault, we should be able to transition the page into the
1253 * GTT without touching the GPU (and so avoid further 1253 * GTT without touching the GPU (and so avoid further
1254 * EIO/EGAIN). If the GPU is wedged, then there is no issue 1254 * EIO/EGAIN). If the GPU is wedged, then there is no issue
1255 * with coherency, just lost writes. 1255 * with coherency, just lost writes.
1256 */ 1256 */
1257 set_need_resched(); 1257 set_need_resched();
1258 case 0: 1258 case 0:
1259 case -ERESTARTSYS: 1259 case -ERESTARTSYS:
1260 case -EINTR: 1260 case -EINTR:
1261 return VM_FAULT_NOPAGE; 1261 return VM_FAULT_NOPAGE;
1262 case -ENOMEM: 1262 case -ENOMEM:
1263 return VM_FAULT_OOM; 1263 return VM_FAULT_OOM;
1264 default: 1264 default:
1265 return VM_FAULT_SIGBUS; 1265 return VM_FAULT_SIGBUS;
1266 } 1266 }
1267 } 1267 }
1268 1268
1269 /** 1269 /**
1270 * i915_gem_release_mmap - remove physical page mappings 1270 * i915_gem_release_mmap - remove physical page mappings
1271 * @obj: obj in question 1271 * @obj: obj in question
1272 * 1272 *
1273 * Preserve the reservation of the mmapping with the DRM core code, but 1273 * Preserve the reservation of the mmapping with the DRM core code, but
1274 * relinquish ownership of the pages back to the system. 1274 * relinquish ownership of the pages back to the system.
1275 * 1275 *
1276 * It is vital that we remove the page mapping if we have mapped a tiled 1276 * It is vital that we remove the page mapping if we have mapped a tiled
1277 * object through the GTT and then lose the fence register due to 1277 * object through the GTT and then lose the fence register due to
1278 * resource pressure. Similarly if the object has been moved out of the 1278 * resource pressure. Similarly if the object has been moved out of the
1279 * aperture, than pages mapped into userspace must be revoked. Removing the 1279 * aperture, than pages mapped into userspace must be revoked. Removing the
1280 * mapping will then trigger a page fault on the next user access, allowing 1280 * mapping will then trigger a page fault on the next user access, allowing
1281 * fixup by i915_gem_fault(). 1281 * fixup by i915_gem_fault().
1282 */ 1282 */
1283 void 1283 void
1284 i915_gem_release_mmap(struct drm_i915_gem_object *obj) 1284 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1285 { 1285 {
1286 if (!obj->fault_mappable) 1286 if (!obj->fault_mappable)
1287 return; 1287 return;
1288 1288
1289 if (obj->base.dev->dev_mapping) 1289 if (obj->base.dev->dev_mapping)
1290 unmap_mapping_range(obj->base.dev->dev_mapping, 1290 unmap_mapping_range(obj->base.dev->dev_mapping,
1291 (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT, 1291 (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
1292 obj->base.size, 1); 1292 obj->base.size, 1);
1293 1293
1294 obj->fault_mappable = false; 1294 obj->fault_mappable = false;
1295 } 1295 }
1296 1296
1297 static uint32_t 1297 static uint32_t
1298 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) 1298 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1299 { 1299 {
1300 uint32_t gtt_size; 1300 uint32_t gtt_size;
1301 1301
1302 if (INTEL_INFO(dev)->gen >= 4 || 1302 if (INTEL_INFO(dev)->gen >= 4 ||
1303 tiling_mode == I915_TILING_NONE) 1303 tiling_mode == I915_TILING_NONE)
1304 return size; 1304 return size;
1305 1305
1306 /* Previous chips need a power-of-two fence region when tiling */ 1306 /* Previous chips need a power-of-two fence region when tiling */
1307 if (INTEL_INFO(dev)->gen == 3) 1307 if (INTEL_INFO(dev)->gen == 3)
1308 gtt_size = 1024*1024; 1308 gtt_size = 1024*1024;
1309 else 1309 else
1310 gtt_size = 512*1024; 1310 gtt_size = 512*1024;
1311 1311
1312 while (gtt_size < size) 1312 while (gtt_size < size)
1313 gtt_size <<= 1; 1313 gtt_size <<= 1;
1314 1314
1315 return gtt_size; 1315 return gtt_size;
1316 } 1316 }
1317 1317
1318 /** 1318 /**
1319 * i915_gem_get_gtt_alignment - return required GTT alignment for an object 1319 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1320 * @obj: object to check 1320 * @obj: object to check
1321 * 1321 *
1322 * Return the required GTT alignment for an object, taking into account 1322 * Return the required GTT alignment for an object, taking into account
1323 * potential fence register mapping. 1323 * potential fence register mapping.
1324 */ 1324 */
1325 static uint32_t 1325 static uint32_t
1326 i915_gem_get_gtt_alignment(struct drm_device *dev, 1326 i915_gem_get_gtt_alignment(struct drm_device *dev,
1327 uint32_t size, 1327 uint32_t size,
1328 int tiling_mode) 1328 int tiling_mode)
1329 { 1329 {
1330 /* 1330 /*
1331 * Minimum alignment is 4k (GTT page size), but might be greater 1331 * Minimum alignment is 4k (GTT page size), but might be greater
1332 * if a fence register is needed for the object. 1332 * if a fence register is needed for the object.
1333 */ 1333 */
1334 if (INTEL_INFO(dev)->gen >= 4 || 1334 if (INTEL_INFO(dev)->gen >= 4 ||
1335 tiling_mode == I915_TILING_NONE) 1335 tiling_mode == I915_TILING_NONE)
1336 return 4096; 1336 return 4096;
1337 1337
1338 /* 1338 /*
1339 * Previous chips need to be aligned to the size of the smallest 1339 * Previous chips need to be aligned to the size of the smallest
1340 * fence register that can contain the object. 1340 * fence register that can contain the object.
1341 */ 1341 */
1342 return i915_gem_get_gtt_size(dev, size, tiling_mode); 1342 return i915_gem_get_gtt_size(dev, size, tiling_mode);
1343 } 1343 }
1344 1344
1345 /** 1345 /**
1346 * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an 1346 * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
1347 * unfenced object 1347 * unfenced object
1348 * @dev: the device 1348 * @dev: the device
1349 * @size: size of the object 1349 * @size: size of the object
1350 * @tiling_mode: tiling mode of the object 1350 * @tiling_mode: tiling mode of the object
1351 * 1351 *
1352 * Return the required GTT alignment for an object, only taking into account 1352 * Return the required GTT alignment for an object, only taking into account
1353 * unfenced tiled surface requirements. 1353 * unfenced tiled surface requirements.
1354 */ 1354 */
1355 uint32_t 1355 uint32_t
1356 i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev, 1356 i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1357 uint32_t size, 1357 uint32_t size,
1358 int tiling_mode) 1358 int tiling_mode)
1359 { 1359 {
1360 /* 1360 /*
1361 * Minimum alignment is 4k (GTT page size) for sane hw. 1361 * Minimum alignment is 4k (GTT page size) for sane hw.
1362 */ 1362 */
1363 if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) || 1363 if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
1364 tiling_mode == I915_TILING_NONE) 1364 tiling_mode == I915_TILING_NONE)
1365 return 4096; 1365 return 4096;
1366 1366
1367 /* Previous hardware however needs to be aligned to a power-of-two 1367 /* Previous hardware however needs to be aligned to a power-of-two
1368 * tile height. The simplest method for determining this is to reuse 1368 * tile height. The simplest method for determining this is to reuse
1369 * the power-of-tile object size. 1369 * the power-of-tile object size.
1370 */ 1370 */
1371 return i915_gem_get_gtt_size(dev, size, tiling_mode); 1371 return i915_gem_get_gtt_size(dev, size, tiling_mode);
1372 } 1372 }
1373 1373
1374 int 1374 int
1375 i915_gem_mmap_gtt(struct drm_file *file, 1375 i915_gem_mmap_gtt(struct drm_file *file,
1376 struct drm_device *dev, 1376 struct drm_device *dev,
1377 uint32_t handle, 1377 uint32_t handle,
1378 uint64_t *offset) 1378 uint64_t *offset)
1379 { 1379 {
1380 struct drm_i915_private *dev_priv = dev->dev_private; 1380 struct drm_i915_private *dev_priv = dev->dev_private;
1381 struct drm_i915_gem_object *obj; 1381 struct drm_i915_gem_object *obj;
1382 int ret; 1382 int ret;
1383 1383
1384 if (!(dev->driver->driver_features & DRIVER_GEM)) 1384 if (!(dev->driver->driver_features & DRIVER_GEM))
1385 return -ENODEV; 1385 return -ENODEV;
1386 1386
1387 ret = i915_mutex_lock_interruptible(dev); 1387 ret = i915_mutex_lock_interruptible(dev);
1388 if (ret) 1388 if (ret)
1389 return ret; 1389 return ret;
1390 1390
1391 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); 1391 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1392 if (&obj->base == NULL) { 1392 if (&obj->base == NULL) {
1393 ret = -ENOENT; 1393 ret = -ENOENT;
1394 goto unlock; 1394 goto unlock;
1395 } 1395 }
1396 1396
1397 if (obj->base.size > dev_priv->mm.gtt_mappable_end) { 1397 if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
1398 ret = -E2BIG; 1398 ret = -E2BIG;
1399 goto out; 1399 goto out;
1400 } 1400 }
1401 1401
1402 if (obj->madv != I915_MADV_WILLNEED) { 1402 if (obj->madv != I915_MADV_WILLNEED) {
1403 DRM_ERROR("Attempting to mmap a purgeable buffer\n"); 1403 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1404 ret = -EINVAL; 1404 ret = -EINVAL;
1405 goto out; 1405 goto out;
1406 } 1406 }
1407 1407
1408 if (!obj->base.map_list.map) { 1408 if (!obj->base.map_list.map) {
1409 ret = drm_gem_create_mmap_offset(&obj->base); 1409 ret = drm_gem_create_mmap_offset(&obj->base);
1410 if (ret) 1410 if (ret)
1411 goto out; 1411 goto out;
1412 } 1412 }
1413 1413
1414 *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT; 1414 *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
1415 1415
1416 out: 1416 out:
1417 drm_gem_object_unreference(&obj->base); 1417 drm_gem_object_unreference(&obj->base);
1418 unlock: 1418 unlock:
1419 mutex_unlock(&dev->struct_mutex); 1419 mutex_unlock(&dev->struct_mutex);
1420 return ret; 1420 return ret;
1421 } 1421 }
1422 1422
1423 /** 1423 /**
1424 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing 1424 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1425 * @dev: DRM device 1425 * @dev: DRM device
1426 * @data: GTT mapping ioctl data 1426 * @data: GTT mapping ioctl data
1427 * @file: GEM object info 1427 * @file: GEM object info
1428 * 1428 *
1429 * Simply returns the fake offset to userspace so it can mmap it. 1429 * Simply returns the fake offset to userspace so it can mmap it.
1430 * The mmap call will end up in drm_gem_mmap(), which will set things 1430 * The mmap call will end up in drm_gem_mmap(), which will set things
1431 * up so we can get faults in the handler above. 1431 * up so we can get faults in the handler above.
1432 * 1432 *
1433 * The fault handler will take care of binding the object into the GTT 1433 * The fault handler will take care of binding the object into the GTT
1434 * (since it may have been evicted to make room for something), allocating 1434 * (since it may have been evicted to make room for something), allocating
1435 * a fence register, and mapping the appropriate aperture address into 1435 * a fence register, and mapping the appropriate aperture address into
1436 * userspace. 1436 * userspace.
1437 */ 1437 */
1438 int 1438 int
1439 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, 1439 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1440 struct drm_file *file) 1440 struct drm_file *file)
1441 { 1441 {
1442 struct drm_i915_gem_mmap_gtt *args = data; 1442 struct drm_i915_gem_mmap_gtt *args = data;
1443 1443
1444 if (!(dev->driver->driver_features & DRIVER_GEM)) 1444 if (!(dev->driver->driver_features & DRIVER_GEM))
1445 return -ENODEV; 1445 return -ENODEV;
1446 1446
1447 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); 1447 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1448 } 1448 }
1449 1449
1450 1450
1451 static int 1451 static int
1452 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, 1452 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
1453 gfp_t gfpmask) 1453 gfp_t gfpmask)
1454 { 1454 {
1455 int page_count, i; 1455 int page_count, i;
1456 struct address_space *mapping; 1456 struct address_space *mapping;
1457 struct inode *inode; 1457 struct inode *inode;
1458 struct page *page; 1458 struct page *page;
1459 1459
1460 /* Get the list of pages out of our struct file. They'll be pinned 1460 /* Get the list of pages out of our struct file. They'll be pinned
1461 * at this point until we release them. 1461 * at this point until we release them.
1462 */ 1462 */
1463 page_count = obj->base.size / PAGE_SIZE; 1463 page_count = obj->base.size / PAGE_SIZE;
1464 BUG_ON(obj->pages != NULL); 1464 BUG_ON(obj->pages != NULL);
1465 obj->pages = drm_malloc_ab(page_count, sizeof(struct page *)); 1465 obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
1466 if (obj->pages == NULL) 1466 if (obj->pages == NULL)
1467 return -ENOMEM; 1467 return -ENOMEM;
1468 1468
1469 inode = obj->base.filp->f_path.dentry->d_inode; 1469 inode = obj->base.filp->f_path.dentry->d_inode;
1470 mapping = inode->i_mapping; 1470 mapping = inode->i_mapping;
1471 gfpmask |= mapping_gfp_mask(mapping); 1471 gfpmask |= mapping_gfp_mask(mapping);
1472 1472
1473 for (i = 0; i < page_count; i++) { 1473 for (i = 0; i < page_count; i++) {
1474 page = shmem_read_mapping_page_gfp(mapping, i, gfpmask); 1474 page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
1475 if (IS_ERR(page)) 1475 if (IS_ERR(page))
1476 goto err_pages; 1476 goto err_pages;
1477 1477
1478 obj->pages[i] = page; 1478 obj->pages[i] = page;
1479 } 1479 }
1480 1480
1481 if (i915_gem_object_needs_bit17_swizzle(obj)) 1481 if (i915_gem_object_needs_bit17_swizzle(obj))
1482 i915_gem_object_do_bit_17_swizzle(obj); 1482 i915_gem_object_do_bit_17_swizzle(obj);
1483 1483
1484 return 0; 1484 return 0;
1485 1485
1486 err_pages: 1486 err_pages:
1487 while (i--) 1487 while (i--)
1488 page_cache_release(obj->pages[i]); 1488 page_cache_release(obj->pages[i]);
1489 1489
1490 drm_free_large(obj->pages); 1490 drm_free_large(obj->pages);
1491 obj->pages = NULL; 1491 obj->pages = NULL;
1492 return PTR_ERR(page); 1492 return PTR_ERR(page);
1493 } 1493 }
1494 1494
1495 static void 1495 static void
1496 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) 1496 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1497 { 1497 {
1498 int page_count = obj->base.size / PAGE_SIZE; 1498 int page_count = obj->base.size / PAGE_SIZE;
1499 int i; 1499 int i;
1500 1500
1501 BUG_ON(obj->madv == __I915_MADV_PURGED); 1501 BUG_ON(obj->madv == __I915_MADV_PURGED);
1502 1502
1503 if (i915_gem_object_needs_bit17_swizzle(obj)) 1503 if (i915_gem_object_needs_bit17_swizzle(obj))
1504 i915_gem_object_save_bit_17_swizzle(obj); 1504 i915_gem_object_save_bit_17_swizzle(obj);
1505 1505
1506 if (obj->madv == I915_MADV_DONTNEED) 1506 if (obj->madv == I915_MADV_DONTNEED)
1507 obj->dirty = 0; 1507 obj->dirty = 0;
1508 1508
1509 for (i = 0; i < page_count; i++) { 1509 for (i = 0; i < page_count; i++) {
1510 if (obj->dirty) 1510 if (obj->dirty)
1511 set_page_dirty(obj->pages[i]); 1511 set_page_dirty(obj->pages[i]);
1512 1512
1513 if (obj->madv == I915_MADV_WILLNEED) 1513 if (obj->madv == I915_MADV_WILLNEED)
1514 mark_page_accessed(obj->pages[i]); 1514 mark_page_accessed(obj->pages[i]);
1515 1515
1516 page_cache_release(obj->pages[i]); 1516 page_cache_release(obj->pages[i]);
1517 } 1517 }
1518 obj->dirty = 0; 1518 obj->dirty = 0;
1519 1519
1520 drm_free_large(obj->pages); 1520 drm_free_large(obj->pages);
1521 obj->pages = NULL; 1521 obj->pages = NULL;
1522 } 1522 }
1523 1523
1524 void 1524 void
1525 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, 1525 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1526 struct intel_ring_buffer *ring, 1526 struct intel_ring_buffer *ring,
1527 u32 seqno) 1527 u32 seqno)
1528 { 1528 {
1529 struct drm_device *dev = obj->base.dev; 1529 struct drm_device *dev = obj->base.dev;
1530 struct drm_i915_private *dev_priv = dev->dev_private; 1530 struct drm_i915_private *dev_priv = dev->dev_private;
1531 1531
1532 BUG_ON(ring == NULL); 1532 BUG_ON(ring == NULL);
1533 obj->ring = ring; 1533 obj->ring = ring;
1534 1534
1535 /* Add a reference if we're newly entering the active list. */ 1535 /* Add a reference if we're newly entering the active list. */
1536 if (!obj->active) { 1536 if (!obj->active) {
1537 drm_gem_object_reference(&obj->base); 1537 drm_gem_object_reference(&obj->base);
1538 obj->active = 1; 1538 obj->active = 1;
1539 } 1539 }
1540 1540
1541 /* Move from whatever list we were on to the tail of execution. */ 1541 /* Move from whatever list we were on to the tail of execution. */
1542 list_move_tail(&obj->mm_list, &dev_priv->mm.active_list); 1542 list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
1543 list_move_tail(&obj->ring_list, &ring->active_list); 1543 list_move_tail(&obj->ring_list, &ring->active_list);
1544 1544
1545 obj->last_rendering_seqno = seqno; 1545 obj->last_rendering_seqno = seqno;
1546 if (obj->fenced_gpu_access) { 1546 if (obj->fenced_gpu_access) {
1547 struct drm_i915_fence_reg *reg; 1547 struct drm_i915_fence_reg *reg;
1548 1548
1549 BUG_ON(obj->fence_reg == I915_FENCE_REG_NONE); 1549 BUG_ON(obj->fence_reg == I915_FENCE_REG_NONE);
1550 1550
1551 obj->last_fenced_seqno = seqno; 1551 obj->last_fenced_seqno = seqno;
1552 obj->last_fenced_ring = ring; 1552 obj->last_fenced_ring = ring;
1553 1553
1554 reg = &dev_priv->fence_regs[obj->fence_reg]; 1554 reg = &dev_priv->fence_regs[obj->fence_reg];
1555 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list); 1555 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
1556 } 1556 }
1557 } 1557 }
1558 1558
1559 static void 1559 static void
1560 i915_gem_object_move_off_active(struct drm_i915_gem_object *obj) 1560 i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
1561 { 1561 {
1562 list_del_init(&obj->ring_list); 1562 list_del_init(&obj->ring_list);
1563 obj->last_rendering_seqno = 0; 1563 obj->last_rendering_seqno = 0;
1564 } 1564 }
1565 1565
1566 static void 1566 static void
1567 i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj) 1567 i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
1568 { 1568 {
1569 struct drm_device *dev = obj->base.dev; 1569 struct drm_device *dev = obj->base.dev;
1570 drm_i915_private_t *dev_priv = dev->dev_private; 1570 drm_i915_private_t *dev_priv = dev->dev_private;
1571 1571
1572 BUG_ON(!obj->active); 1572 BUG_ON(!obj->active);
1573 list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list); 1573 list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
1574 1574
1575 i915_gem_object_move_off_active(obj); 1575 i915_gem_object_move_off_active(obj);
1576 } 1576 }
1577 1577
1578 static void 1578 static void
1579 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) 1579 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1580 { 1580 {
1581 struct drm_device *dev = obj->base.dev; 1581 struct drm_device *dev = obj->base.dev;
1582 struct drm_i915_private *dev_priv = dev->dev_private; 1582 struct drm_i915_private *dev_priv = dev->dev_private;
1583 1583
1584 if (obj->pin_count != 0) 1584 if (obj->pin_count != 0)
1585 list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list); 1585 list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
1586 else 1586 else
1587 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); 1587 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1588 1588
1589 BUG_ON(!list_empty(&obj->gpu_write_list)); 1589 BUG_ON(!list_empty(&obj->gpu_write_list));
1590 BUG_ON(!obj->active); 1590 BUG_ON(!obj->active);
1591 obj->ring = NULL; 1591 obj->ring = NULL;
1592 1592
1593 i915_gem_object_move_off_active(obj); 1593 i915_gem_object_move_off_active(obj);
1594 obj->fenced_gpu_access = false; 1594 obj->fenced_gpu_access = false;
1595 1595
1596 obj->active = 0; 1596 obj->active = 0;
1597 obj->pending_gpu_write = false; 1597 obj->pending_gpu_write = false;
1598 drm_gem_object_unreference(&obj->base); 1598 drm_gem_object_unreference(&obj->base);
1599 1599
1600 WARN_ON(i915_verify_lists(dev)); 1600 WARN_ON(i915_verify_lists(dev));
1601 } 1601 }
1602 1602
1603 /* Immediately discard the backing storage */ 1603 /* Immediately discard the backing storage */
1604 static void 1604 static void
1605 i915_gem_object_truncate(struct drm_i915_gem_object *obj) 1605 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1606 { 1606 {
1607 struct inode *inode; 1607 struct inode *inode;
1608 1608
1609 /* Our goal here is to return as much of the memory as 1609 /* Our goal here is to return as much of the memory as
1610 * is possible back to the system as we are called from OOM. 1610 * is possible back to the system as we are called from OOM.
1611 * To do this we must instruct the shmfs to drop all of its 1611 * To do this we must instruct the shmfs to drop all of its
1612 * backing pages, *now*. 1612 * backing pages, *now*.
1613 */ 1613 */
1614 inode = obj->base.filp->f_path.dentry->d_inode; 1614 inode = obj->base.filp->f_path.dentry->d_inode;
1615 shmem_truncate_range(inode, 0, (loff_t)-1); 1615 shmem_truncate_range(inode, 0, (loff_t)-1);
1616 1616
1617 obj->madv = __I915_MADV_PURGED; 1617 obj->madv = __I915_MADV_PURGED;
1618 } 1618 }
1619 1619
1620 static inline int 1620 static inline int
1621 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) 1621 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1622 { 1622 {
1623 return obj->madv == I915_MADV_DONTNEED; 1623 return obj->madv == I915_MADV_DONTNEED;
1624 } 1624 }
1625 1625
1626 static void 1626 static void
1627 i915_gem_process_flushing_list(struct intel_ring_buffer *ring, 1627 i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
1628 uint32_t flush_domains) 1628 uint32_t flush_domains)
1629 { 1629 {
1630 struct drm_i915_gem_object *obj, *next; 1630 struct drm_i915_gem_object *obj, *next;
1631 1631
1632 list_for_each_entry_safe(obj, next, 1632 list_for_each_entry_safe(obj, next,
1633 &ring->gpu_write_list, 1633 &ring->gpu_write_list,
1634 gpu_write_list) { 1634 gpu_write_list) {
1635 if (obj->base.write_domain & flush_domains) { 1635 if (obj->base.write_domain & flush_domains) {
1636 uint32_t old_write_domain = obj->base.write_domain; 1636 uint32_t old_write_domain = obj->base.write_domain;
1637 1637
1638 obj->base.write_domain = 0; 1638 obj->base.write_domain = 0;
1639 list_del_init(&obj->gpu_write_list); 1639 list_del_init(&obj->gpu_write_list);
1640 i915_gem_object_move_to_active(obj, ring, 1640 i915_gem_object_move_to_active(obj, ring,
1641 i915_gem_next_request_seqno(ring)); 1641 i915_gem_next_request_seqno(ring));
1642 1642
1643 trace_i915_gem_object_change_domain(obj, 1643 trace_i915_gem_object_change_domain(obj,
1644 obj->base.read_domains, 1644 obj->base.read_domains,
1645 old_write_domain); 1645 old_write_domain);
1646 } 1646 }
1647 } 1647 }
1648 } 1648 }
1649 1649
1650 int 1650 int
1651 i915_add_request(struct intel_ring_buffer *ring, 1651 i915_add_request(struct intel_ring_buffer *ring,
1652 struct drm_file *file, 1652 struct drm_file *file,
1653 struct drm_i915_gem_request *request) 1653 struct drm_i915_gem_request *request)
1654 { 1654 {
1655 drm_i915_private_t *dev_priv = ring->dev->dev_private; 1655 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1656 uint32_t seqno; 1656 uint32_t seqno;
1657 int was_empty; 1657 int was_empty;
1658 int ret; 1658 int ret;
1659 1659
1660 BUG_ON(request == NULL); 1660 BUG_ON(request == NULL);
1661 1661
1662 ret = ring->add_request(ring, &seqno); 1662 ret = ring->add_request(ring, &seqno);
1663 if (ret) 1663 if (ret)
1664 return ret; 1664 return ret;
1665 1665
1666 trace_i915_gem_request_add(ring, seqno); 1666 trace_i915_gem_request_add(ring, seqno);
1667 1667
1668 request->seqno = seqno; 1668 request->seqno = seqno;
1669 request->ring = ring; 1669 request->ring = ring;
1670 request->emitted_jiffies = jiffies; 1670 request->emitted_jiffies = jiffies;
1671 was_empty = list_empty(&ring->request_list); 1671 was_empty = list_empty(&ring->request_list);
1672 list_add_tail(&request->list, &ring->request_list); 1672 list_add_tail(&request->list, &ring->request_list);
1673 1673
1674 if (file) { 1674 if (file) {
1675 struct drm_i915_file_private *file_priv = file->driver_priv; 1675 struct drm_i915_file_private *file_priv = file->driver_priv;
1676 1676
1677 spin_lock(&file_priv->mm.lock); 1677 spin_lock(&file_priv->mm.lock);
1678 request->file_priv = file_priv; 1678 request->file_priv = file_priv;
1679 list_add_tail(&request->client_list, 1679 list_add_tail(&request->client_list,
1680 &file_priv->mm.request_list); 1680 &file_priv->mm.request_list);
1681 spin_unlock(&file_priv->mm.lock); 1681 spin_unlock(&file_priv->mm.lock);
1682 } 1682 }
1683 1683
1684 ring->outstanding_lazy_request = false; 1684 ring->outstanding_lazy_request = false;
1685 1685
1686 if (!dev_priv->mm.suspended) { 1686 if (!dev_priv->mm.suspended) {
1687 if (i915_enable_hangcheck) { 1687 if (i915_enable_hangcheck) {
1688 mod_timer(&dev_priv->hangcheck_timer, 1688 mod_timer(&dev_priv->hangcheck_timer,
1689 jiffies + 1689 jiffies +
1690 msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); 1690 msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
1691 } 1691 }
1692 if (was_empty) 1692 if (was_empty)
1693 queue_delayed_work(dev_priv->wq, 1693 queue_delayed_work(dev_priv->wq,
1694 &dev_priv->mm.retire_work, HZ); 1694 &dev_priv->mm.retire_work, HZ);
1695 } 1695 }
1696 return 0; 1696 return 0;
1697 } 1697 }
1698 1698
1699 static inline void 1699 static inline void
1700 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) 1700 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
1701 { 1701 {
1702 struct drm_i915_file_private *file_priv = request->file_priv; 1702 struct drm_i915_file_private *file_priv = request->file_priv;
1703 1703
1704 if (!file_priv) 1704 if (!file_priv)
1705 return; 1705 return;
1706 1706
1707 spin_lock(&file_priv->mm.lock); 1707 spin_lock(&file_priv->mm.lock);
1708 if (request->file_priv) { 1708 if (request->file_priv) {
1709 list_del(&request->client_list); 1709 list_del(&request->client_list);
1710 request->file_priv = NULL; 1710 request->file_priv = NULL;
1711 } 1711 }
1712 spin_unlock(&file_priv->mm.lock); 1712 spin_unlock(&file_priv->mm.lock);
1713 } 1713 }
1714 1714
1715 static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, 1715 static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
1716 struct intel_ring_buffer *ring) 1716 struct intel_ring_buffer *ring)
1717 { 1717 {
1718 while (!list_empty(&ring->request_list)) { 1718 while (!list_empty(&ring->request_list)) {
1719 struct drm_i915_gem_request *request; 1719 struct drm_i915_gem_request *request;
1720 1720
1721 request = list_first_entry(&ring->request_list, 1721 request = list_first_entry(&ring->request_list,
1722 struct drm_i915_gem_request, 1722 struct drm_i915_gem_request,
1723 list); 1723 list);
1724 1724
1725 list_del(&request->list); 1725 list_del(&request->list);
1726 i915_gem_request_remove_from_client(request); 1726 i915_gem_request_remove_from_client(request);
1727 kfree(request); 1727 kfree(request);
1728 } 1728 }
1729 1729
1730 while (!list_empty(&ring->active_list)) { 1730 while (!list_empty(&ring->active_list)) {
1731 struct drm_i915_gem_object *obj; 1731 struct drm_i915_gem_object *obj;
1732 1732
1733 obj = list_first_entry(&ring->active_list, 1733 obj = list_first_entry(&ring->active_list,
1734 struct drm_i915_gem_object, 1734 struct drm_i915_gem_object,
1735 ring_list); 1735 ring_list);
1736 1736
1737 obj->base.write_domain = 0; 1737 obj->base.write_domain = 0;
1738 list_del_init(&obj->gpu_write_list); 1738 list_del_init(&obj->gpu_write_list);
1739 i915_gem_object_move_to_inactive(obj); 1739 i915_gem_object_move_to_inactive(obj);
1740 } 1740 }
1741 } 1741 }
1742 1742
1743 static void i915_gem_reset_fences(struct drm_device *dev) 1743 static void i915_gem_reset_fences(struct drm_device *dev)
1744 { 1744 {
1745 struct drm_i915_private *dev_priv = dev->dev_private; 1745 struct drm_i915_private *dev_priv = dev->dev_private;
1746 int i; 1746 int i;
1747 1747
1748 for (i = 0; i < dev_priv->num_fence_regs; i++) { 1748 for (i = 0; i < dev_priv->num_fence_regs; i++) {
1749 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; 1749 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
1750 struct drm_i915_gem_object *obj = reg->obj; 1750 struct drm_i915_gem_object *obj = reg->obj;
1751 1751
1752 if (!obj) 1752 if (!obj)
1753 continue; 1753 continue;
1754 1754
1755 if (obj->tiling_mode) 1755 if (obj->tiling_mode)
1756 i915_gem_release_mmap(obj); 1756 i915_gem_release_mmap(obj);
1757 1757
1758 reg->obj->fence_reg = I915_FENCE_REG_NONE; 1758 reg->obj->fence_reg = I915_FENCE_REG_NONE;
1759 reg->obj->fenced_gpu_access = false; 1759 reg->obj->fenced_gpu_access = false;
1760 reg->obj->last_fenced_seqno = 0; 1760 reg->obj->last_fenced_seqno = 0;
1761 reg->obj->last_fenced_ring = NULL; 1761 reg->obj->last_fenced_ring = NULL;
1762 i915_gem_clear_fence_reg(dev, reg); 1762 i915_gem_clear_fence_reg(dev, reg);
1763 } 1763 }
1764 } 1764 }
1765 1765
1766 void i915_gem_reset(struct drm_device *dev) 1766 void i915_gem_reset(struct drm_device *dev)
1767 { 1767 {
1768 struct drm_i915_private *dev_priv = dev->dev_private; 1768 struct drm_i915_private *dev_priv = dev->dev_private;
1769 struct drm_i915_gem_object *obj; 1769 struct drm_i915_gem_object *obj;
1770 int i; 1770 int i;
1771 1771
1772 for (i = 0; i < I915_NUM_RINGS; i++) 1772 for (i = 0; i < I915_NUM_RINGS; i++)
1773 i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]); 1773 i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]);
1774 1774
1775 /* Remove anything from the flushing lists. The GPU cache is likely 1775 /* Remove anything from the flushing lists. The GPU cache is likely
1776 * to be lost on reset along with the data, so simply move the 1776 * to be lost on reset along with the data, so simply move the
1777 * lost bo to the inactive list. 1777 * lost bo to the inactive list.
1778 */ 1778 */
1779 while (!list_empty(&dev_priv->mm.flushing_list)) { 1779 while (!list_empty(&dev_priv->mm.flushing_list)) {
1780 obj = list_first_entry(&dev_priv->mm.flushing_list, 1780 obj = list_first_entry(&dev_priv->mm.flushing_list,
1781 struct drm_i915_gem_object, 1781 struct drm_i915_gem_object,
1782 mm_list); 1782 mm_list);
1783 1783
1784 obj->base.write_domain = 0; 1784 obj->base.write_domain = 0;
1785 list_del_init(&obj->gpu_write_list); 1785 list_del_init(&obj->gpu_write_list);
1786 i915_gem_object_move_to_inactive(obj); 1786 i915_gem_object_move_to_inactive(obj);
1787 } 1787 }
1788 1788
1789 /* Move everything out of the GPU domains to ensure we do any 1789 /* Move everything out of the GPU domains to ensure we do any
1790 * necessary invalidation upon reuse. 1790 * necessary invalidation upon reuse.
1791 */ 1791 */
1792 list_for_each_entry(obj, 1792 list_for_each_entry(obj,
1793 &dev_priv->mm.inactive_list, 1793 &dev_priv->mm.inactive_list,
1794 mm_list) 1794 mm_list)
1795 { 1795 {
1796 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; 1796 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
1797 } 1797 }
1798 1798
1799 /* The fence registers are invalidated so clear them out */ 1799 /* The fence registers are invalidated so clear them out */
1800 i915_gem_reset_fences(dev); 1800 i915_gem_reset_fences(dev);
1801 } 1801 }
1802 1802
1803 /** 1803 /**
1804 * This function clears the request list as sequence numbers are passed. 1804 * This function clears the request list as sequence numbers are passed.
1805 */ 1805 */
1806 static void 1806 static void
1807 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) 1807 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
1808 { 1808 {
1809 uint32_t seqno; 1809 uint32_t seqno;
1810 int i; 1810 int i;
1811 1811
1812 if (list_empty(&ring->request_list)) 1812 if (list_empty(&ring->request_list))
1813 return; 1813 return;
1814 1814
1815 WARN_ON(i915_verify_lists(ring->dev)); 1815 WARN_ON(i915_verify_lists(ring->dev));
1816 1816
1817 seqno = ring->get_seqno(ring); 1817 seqno = ring->get_seqno(ring);
1818 1818
1819 for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) 1819 for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
1820 if (seqno >= ring->sync_seqno[i]) 1820 if (seqno >= ring->sync_seqno[i])
1821 ring->sync_seqno[i] = 0; 1821 ring->sync_seqno[i] = 0;
1822 1822
1823 while (!list_empty(&ring->request_list)) { 1823 while (!list_empty(&ring->request_list)) {
1824 struct drm_i915_gem_request *request; 1824 struct drm_i915_gem_request *request;
1825 1825
1826 request = list_first_entry(&ring->request_list, 1826 request = list_first_entry(&ring->request_list,
1827 struct drm_i915_gem_request, 1827 struct drm_i915_gem_request,
1828 list); 1828 list);
1829 1829
1830 if (!i915_seqno_passed(seqno, request->seqno)) 1830 if (!i915_seqno_passed(seqno, request->seqno))
1831 break; 1831 break;
1832 1832
1833 trace_i915_gem_request_retire(ring, request->seqno); 1833 trace_i915_gem_request_retire(ring, request->seqno);
1834 1834
1835 list_del(&request->list); 1835 list_del(&request->list);
1836 i915_gem_request_remove_from_client(request); 1836 i915_gem_request_remove_from_client(request);
1837 kfree(request); 1837 kfree(request);
1838 } 1838 }
1839 1839
1840 /* Move any buffers on the active list that are no longer referenced 1840 /* Move any buffers on the active list that are no longer referenced
1841 * by the ringbuffer to the flushing/inactive lists as appropriate. 1841 * by the ringbuffer to the flushing/inactive lists as appropriate.
1842 */ 1842 */
1843 while (!list_empty(&ring->active_list)) { 1843 while (!list_empty(&ring->active_list)) {
1844 struct drm_i915_gem_object *obj; 1844 struct drm_i915_gem_object *obj;
1845 1845
1846 obj = list_first_entry(&ring->active_list, 1846 obj = list_first_entry(&ring->active_list,
1847 struct drm_i915_gem_object, 1847 struct drm_i915_gem_object,
1848 ring_list); 1848 ring_list);
1849 1849
1850 if (!i915_seqno_passed(seqno, obj->last_rendering_seqno)) 1850 if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
1851 break; 1851 break;
1852 1852
1853 if (obj->base.write_domain != 0) 1853 if (obj->base.write_domain != 0)
1854 i915_gem_object_move_to_flushing(obj); 1854 i915_gem_object_move_to_flushing(obj);
1855 else 1855 else
1856 i915_gem_object_move_to_inactive(obj); 1856 i915_gem_object_move_to_inactive(obj);
1857 } 1857 }
1858 1858
1859 if (unlikely(ring->trace_irq_seqno && 1859 if (unlikely(ring->trace_irq_seqno &&
1860 i915_seqno_passed(seqno, ring->trace_irq_seqno))) { 1860 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
1861 ring->irq_put(ring); 1861 ring->irq_put(ring);
1862 ring->trace_irq_seqno = 0; 1862 ring->trace_irq_seqno = 0;
1863 } 1863 }
1864 1864
1865 WARN_ON(i915_verify_lists(ring->dev)); 1865 WARN_ON(i915_verify_lists(ring->dev));
1866 } 1866 }
1867 1867
1868 void 1868 void
1869 i915_gem_retire_requests(struct drm_device *dev) 1869 i915_gem_retire_requests(struct drm_device *dev)
1870 { 1870 {
1871 drm_i915_private_t *dev_priv = dev->dev_private; 1871 drm_i915_private_t *dev_priv = dev->dev_private;
1872 int i; 1872 int i;
1873 1873
1874 if (!list_empty(&dev_priv->mm.deferred_free_list)) { 1874 if (!list_empty(&dev_priv->mm.deferred_free_list)) {
1875 struct drm_i915_gem_object *obj, *next; 1875 struct drm_i915_gem_object *obj, *next;
1876 1876
1877 /* We must be careful that during unbind() we do not 1877 /* We must be careful that during unbind() we do not
1878 * accidentally infinitely recurse into retire requests. 1878 * accidentally infinitely recurse into retire requests.
1879 * Currently: 1879 * Currently:
1880 * retire -> free -> unbind -> wait -> retire_ring 1880 * retire -> free -> unbind -> wait -> retire_ring
1881 */ 1881 */
1882 list_for_each_entry_safe(obj, next, 1882 list_for_each_entry_safe(obj, next,
1883 &dev_priv->mm.deferred_free_list, 1883 &dev_priv->mm.deferred_free_list,
1884 mm_list) 1884 mm_list)
1885 i915_gem_free_object_tail(obj); 1885 i915_gem_free_object_tail(obj);
1886 } 1886 }
1887 1887
1888 for (i = 0; i < I915_NUM_RINGS; i++) 1888 for (i = 0; i < I915_NUM_RINGS; i++)
1889 i915_gem_retire_requests_ring(&dev_priv->ring[i]); 1889 i915_gem_retire_requests_ring(&dev_priv->ring[i]);
1890 } 1890 }
1891 1891
1892 static void 1892 static void
1893 i915_gem_retire_work_handler(struct work_struct *work) 1893 i915_gem_retire_work_handler(struct work_struct *work)
1894 { 1894 {
1895 drm_i915_private_t *dev_priv; 1895 drm_i915_private_t *dev_priv;
1896 struct drm_device *dev; 1896 struct drm_device *dev;
1897 bool idle; 1897 bool idle;
1898 int i; 1898 int i;
1899 1899
1900 dev_priv = container_of(work, drm_i915_private_t, 1900 dev_priv = container_of(work, drm_i915_private_t,
1901 mm.retire_work.work); 1901 mm.retire_work.work);
1902 dev = dev_priv->dev; 1902 dev = dev_priv->dev;
1903 1903
1904 /* Come back later if the device is busy... */ 1904 /* Come back later if the device is busy... */
1905 if (!mutex_trylock(&dev->struct_mutex)) { 1905 if (!mutex_trylock(&dev->struct_mutex)) {
1906 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); 1906 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1907 return; 1907 return;
1908 } 1908 }
1909 1909
1910 i915_gem_retire_requests(dev); 1910 i915_gem_retire_requests(dev);
1911 1911
1912 /* Send a periodic flush down the ring so we don't hold onto GEM 1912 /* Send a periodic flush down the ring so we don't hold onto GEM
1913 * objects indefinitely. 1913 * objects indefinitely.
1914 */ 1914 */
1915 idle = true; 1915 idle = true;
1916 for (i = 0; i < I915_NUM_RINGS; i++) { 1916 for (i = 0; i < I915_NUM_RINGS; i++) {
1917 struct intel_ring_buffer *ring = &dev_priv->ring[i]; 1917 struct intel_ring_buffer *ring = &dev_priv->ring[i];
1918 1918
1919 if (!list_empty(&ring->gpu_write_list)) { 1919 if (!list_empty(&ring->gpu_write_list)) {
1920 struct drm_i915_gem_request *request; 1920 struct drm_i915_gem_request *request;
1921 int ret; 1921 int ret;
1922 1922
1923 ret = i915_gem_flush_ring(ring, 1923 ret = i915_gem_flush_ring(ring,
1924 0, I915_GEM_GPU_DOMAINS); 1924 0, I915_GEM_GPU_DOMAINS);
1925 request = kzalloc(sizeof(*request), GFP_KERNEL); 1925 request = kzalloc(sizeof(*request), GFP_KERNEL);
1926 if (ret || request == NULL || 1926 if (ret || request == NULL ||
1927 i915_add_request(ring, NULL, request)) 1927 i915_add_request(ring, NULL, request))
1928 kfree(request); 1928 kfree(request);
1929 } 1929 }
1930 1930
1931 idle &= list_empty(&ring->request_list); 1931 idle &= list_empty(&ring->request_list);
1932 } 1932 }
1933 1933
1934 if (!dev_priv->mm.suspended && !idle) 1934 if (!dev_priv->mm.suspended && !idle)
1935 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); 1935 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1936 1936
1937 mutex_unlock(&dev->struct_mutex); 1937 mutex_unlock(&dev->struct_mutex);
1938 } 1938 }
1939 1939
1940 /** 1940 /**
1941 * Waits for a sequence number to be signaled, and cleans up the 1941 * Waits for a sequence number to be signaled, and cleans up the
1942 * request and object lists appropriately for that event. 1942 * request and object lists appropriately for that event.
1943 */ 1943 */
1944 int 1944 int
1945 i915_wait_request(struct intel_ring_buffer *ring, 1945 i915_wait_request(struct intel_ring_buffer *ring,
1946 uint32_t seqno) 1946 uint32_t seqno)
1947 { 1947 {
1948 drm_i915_private_t *dev_priv = ring->dev->dev_private; 1948 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1949 u32 ier; 1949 u32 ier;
1950 int ret = 0; 1950 int ret = 0;
1951 1951
1952 BUG_ON(seqno == 0); 1952 BUG_ON(seqno == 0);
1953 1953
1954 if (atomic_read(&dev_priv->mm.wedged)) { 1954 if (atomic_read(&dev_priv->mm.wedged)) {
1955 struct completion *x = &dev_priv->error_completion; 1955 struct completion *x = &dev_priv->error_completion;
1956 bool recovery_complete; 1956 bool recovery_complete;
1957 unsigned long flags; 1957 unsigned long flags;
1958 1958
1959 /* Give the error handler a chance to run. */ 1959 /* Give the error handler a chance to run. */
1960 spin_lock_irqsave(&x->wait.lock, flags); 1960 spin_lock_irqsave(&x->wait.lock, flags);
1961 recovery_complete = x->done > 0; 1961 recovery_complete = x->done > 0;
1962 spin_unlock_irqrestore(&x->wait.lock, flags); 1962 spin_unlock_irqrestore(&x->wait.lock, flags);
1963 1963
1964 return recovery_complete ? -EIO : -EAGAIN; 1964 return recovery_complete ? -EIO : -EAGAIN;
1965 } 1965 }
1966 1966
1967 if (seqno == ring->outstanding_lazy_request) { 1967 if (seqno == ring->outstanding_lazy_request) {
1968 struct drm_i915_gem_request *request; 1968 struct drm_i915_gem_request *request;
1969 1969
1970 request = kzalloc(sizeof(*request), GFP_KERNEL); 1970 request = kzalloc(sizeof(*request), GFP_KERNEL);
1971 if (request == NULL) 1971 if (request == NULL)
1972 return -ENOMEM; 1972 return -ENOMEM;
1973 1973
1974 ret = i915_add_request(ring, NULL, request); 1974 ret = i915_add_request(ring, NULL, request);
1975 if (ret) { 1975 if (ret) {
1976 kfree(request); 1976 kfree(request);
1977 return ret; 1977 return ret;
1978 } 1978 }
1979 1979
1980 seqno = request->seqno; 1980 seqno = request->seqno;
1981 } 1981 }
1982 1982
1983 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) { 1983 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
1984 if (HAS_PCH_SPLIT(ring->dev)) 1984 if (HAS_PCH_SPLIT(ring->dev))
1985 ier = I915_READ(DEIER) | I915_READ(GTIER); 1985 ier = I915_READ(DEIER) | I915_READ(GTIER);
1986 else 1986 else
1987 ier = I915_READ(IER); 1987 ier = I915_READ(IER);
1988 if (!ier) { 1988 if (!ier) {
1989 DRM_ERROR("something (likely vbetool) disabled " 1989 DRM_ERROR("something (likely vbetool) disabled "
1990 "interrupts, re-enabling\n"); 1990 "interrupts, re-enabling\n");
1991 ring->dev->driver->irq_preinstall(ring->dev); 1991 ring->dev->driver->irq_preinstall(ring->dev);
1992 ring->dev->driver->irq_postinstall(ring->dev); 1992 ring->dev->driver->irq_postinstall(ring->dev);
1993 } 1993 }
1994 1994
1995 trace_i915_gem_request_wait_begin(ring, seqno); 1995 trace_i915_gem_request_wait_begin(ring, seqno);
1996 1996
1997 ring->waiting_seqno = seqno; 1997 ring->waiting_seqno = seqno;
1998 if (ring->irq_get(ring)) { 1998 if (ring->irq_get(ring)) {
1999 if (dev_priv->mm.interruptible) 1999 if (dev_priv->mm.interruptible)
2000 ret = wait_event_interruptible(ring->irq_queue, 2000 ret = wait_event_interruptible(ring->irq_queue,
2001 i915_seqno_passed(ring->get_seqno(ring), seqno) 2001 i915_seqno_passed(ring->get_seqno(ring), seqno)
2002 || atomic_read(&dev_priv->mm.wedged)); 2002 || atomic_read(&dev_priv->mm.wedged));
2003 else 2003 else
2004 wait_event(ring->irq_queue, 2004 wait_event(ring->irq_queue,
2005 i915_seqno_passed(ring->get_seqno(ring), seqno) 2005 i915_seqno_passed(ring->get_seqno(ring), seqno)
2006 || atomic_read(&dev_priv->mm.wedged)); 2006 || atomic_read(&dev_priv->mm.wedged));
2007 2007
2008 ring->irq_put(ring); 2008 ring->irq_put(ring);
2009 } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring), 2009 } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
2010 seqno) || 2010 seqno) ||
2011 atomic_read(&dev_priv->mm.wedged), 3000)) 2011 atomic_read(&dev_priv->mm.wedged), 3000))
2012 ret = -EBUSY; 2012 ret = -EBUSY;
2013 ring->waiting_seqno = 0; 2013 ring->waiting_seqno = 0;
2014 2014
2015 trace_i915_gem_request_wait_end(ring, seqno); 2015 trace_i915_gem_request_wait_end(ring, seqno);
2016 } 2016 }
2017 if (atomic_read(&dev_priv->mm.wedged)) 2017 if (atomic_read(&dev_priv->mm.wedged))
2018 ret = -EAGAIN; 2018 ret = -EAGAIN;
2019 2019
2020 if (ret && ret != -ERESTARTSYS) 2020 if (ret && ret != -ERESTARTSYS)
2021 DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n", 2021 DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
2022 __func__, ret, seqno, ring->get_seqno(ring), 2022 __func__, ret, seqno, ring->get_seqno(ring),
2023 dev_priv->next_seqno); 2023 dev_priv->next_seqno);
2024 2024
2025 /* Directly dispatch request retiring. While we have the work queue 2025 /* Directly dispatch request retiring. While we have the work queue
2026 * to handle this, the waiter on a request often wants an associated 2026 * to handle this, the waiter on a request often wants an associated
2027 * buffer to have made it to the inactive list, and we would need 2027 * buffer to have made it to the inactive list, and we would need
2028 * a separate wait queue to handle that. 2028 * a separate wait queue to handle that.
2029 */ 2029 */
2030 if (ret == 0) 2030 if (ret == 0)
2031 i915_gem_retire_requests_ring(ring); 2031 i915_gem_retire_requests_ring(ring);
2032 2032
2033 return ret; 2033 return ret;
2034 } 2034 }
2035 2035
2036 /** 2036 /**
2037 * Ensures that all rendering to the object has completed and the object is 2037 * Ensures that all rendering to the object has completed and the object is
2038 * safe to unbind from the GTT or access from the CPU. 2038 * safe to unbind from the GTT or access from the CPU.
2039 */ 2039 */
2040 int 2040 int
2041 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj) 2041 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
2042 { 2042 {
2043 int ret; 2043 int ret;
2044 2044
2045 /* This function only exists to support waiting for existing rendering, 2045 /* This function only exists to support waiting for existing rendering,
2046 * not for emitting required flushes. 2046 * not for emitting required flushes.
2047 */ 2047 */
2048 BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0); 2048 BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
2049 2049
2050 /* If there is rendering queued on the buffer being evicted, wait for 2050 /* If there is rendering queued on the buffer being evicted, wait for
2051 * it. 2051 * it.
2052 */ 2052 */
2053 if (obj->active) { 2053 if (obj->active) {
2054 ret = i915_wait_request(obj->ring, obj->last_rendering_seqno); 2054 ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
2055 if (ret) 2055 if (ret)
2056 return ret; 2056 return ret;
2057 } 2057 }
2058 2058
2059 return 0; 2059 return 0;
2060 } 2060 }
2061 2061
2062 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj) 2062 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2063 { 2063 {
2064 u32 old_write_domain, old_read_domains; 2064 u32 old_write_domain, old_read_domains;
2065 2065
2066 /* Act a barrier for all accesses through the GTT */ 2066 /* Act a barrier for all accesses through the GTT */
2067 mb(); 2067 mb();
2068 2068
2069 /* Force a pagefault for domain tracking on next user access */ 2069 /* Force a pagefault for domain tracking on next user access */
2070 i915_gem_release_mmap(obj); 2070 i915_gem_release_mmap(obj);
2071 2071
2072 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) 2072 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2073 return; 2073 return;
2074 2074
2075 old_read_domains = obj->base.read_domains; 2075 old_read_domains = obj->base.read_domains;
2076 old_write_domain = obj->base.write_domain; 2076 old_write_domain = obj->base.write_domain;
2077 2077
2078 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT; 2078 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2079 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT; 2079 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2080 2080
2081 trace_i915_gem_object_change_domain(obj, 2081 trace_i915_gem_object_change_domain(obj,
2082 old_read_domains, 2082 old_read_domains,
2083 old_write_domain); 2083 old_write_domain);
2084 } 2084 }
2085 2085
2086 /** 2086 /**
2087 * Unbinds an object from the GTT aperture. 2087 * Unbinds an object from the GTT aperture.
2088 */ 2088 */
2089 int 2089 int
2090 i915_gem_object_unbind(struct drm_i915_gem_object *obj) 2090 i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2091 { 2091 {
2092 int ret = 0; 2092 int ret = 0;
2093 2093
2094 if (obj->gtt_space == NULL) 2094 if (obj->gtt_space == NULL)
2095 return 0; 2095 return 0;
2096 2096
2097 if (obj->pin_count != 0) { 2097 if (obj->pin_count != 0) {
2098 DRM_ERROR("Attempting to unbind pinned buffer\n"); 2098 DRM_ERROR("Attempting to unbind pinned buffer\n");
2099 return -EINVAL; 2099 return -EINVAL;
2100 } 2100 }
2101 2101
2102 ret = i915_gem_object_finish_gpu(obj); 2102 ret = i915_gem_object_finish_gpu(obj);
2103 if (ret == -ERESTARTSYS) 2103 if (ret == -ERESTARTSYS)
2104 return ret; 2104 return ret;
2105 /* Continue on if we fail due to EIO, the GPU is hung so we 2105 /* Continue on if we fail due to EIO, the GPU is hung so we
2106 * should be safe and we need to cleanup or else we might 2106 * should be safe and we need to cleanup or else we might
2107 * cause memory corruption through use-after-free. 2107 * cause memory corruption through use-after-free.
2108 */ 2108 */
2109 2109
2110 i915_gem_object_finish_gtt(obj); 2110 i915_gem_object_finish_gtt(obj);
2111 2111
2112 /* Move the object to the CPU domain to ensure that 2112 /* Move the object to the CPU domain to ensure that
2113 * any possible CPU writes while it's not in the GTT 2113 * any possible CPU writes while it's not in the GTT
2114 * are flushed when we go to remap it. 2114 * are flushed when we go to remap it.
2115 */ 2115 */
2116 if (ret == 0) 2116 if (ret == 0)
2117 ret = i915_gem_object_set_to_cpu_domain(obj, 1); 2117 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
2118 if (ret == -ERESTARTSYS) 2118 if (ret == -ERESTARTSYS)
2119 return ret; 2119 return ret;
2120 if (ret) { 2120 if (ret) {
2121 /* In the event of a disaster, abandon all caches and 2121 /* In the event of a disaster, abandon all caches and
2122 * hope for the best. 2122 * hope for the best.
2123 */ 2123 */
2124 i915_gem_clflush_object(obj); 2124 i915_gem_clflush_object(obj);
2125 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; 2125 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2126 } 2126 }
2127 2127
2128 /* release the fence reg _after_ flushing */ 2128 /* release the fence reg _after_ flushing */
2129 ret = i915_gem_object_put_fence(obj); 2129 ret = i915_gem_object_put_fence(obj);
2130 if (ret == -ERESTARTSYS) 2130 if (ret == -ERESTARTSYS)
2131 return ret; 2131 return ret;
2132 2132
2133 trace_i915_gem_object_unbind(obj); 2133 trace_i915_gem_object_unbind(obj);
2134 2134
2135 i915_gem_gtt_unbind_object(obj); 2135 i915_gem_gtt_unbind_object(obj);
2136 i915_gem_object_put_pages_gtt(obj); 2136 i915_gem_object_put_pages_gtt(obj);
2137 2137
2138 list_del_init(&obj->gtt_list); 2138 list_del_init(&obj->gtt_list);
2139 list_del_init(&obj->mm_list); 2139 list_del_init(&obj->mm_list);
2140 /* Avoid an unnecessary call to unbind on rebind. */ 2140 /* Avoid an unnecessary call to unbind on rebind. */
2141 obj->map_and_fenceable = true; 2141 obj->map_and_fenceable = true;
2142 2142
2143 drm_mm_put_block(obj->gtt_space); 2143 drm_mm_put_block(obj->gtt_space);
2144 obj->gtt_space = NULL; 2144 obj->gtt_space = NULL;
2145 obj->gtt_offset = 0; 2145 obj->gtt_offset = 0;
2146 2146
2147 if (i915_gem_object_is_purgeable(obj)) 2147 if (i915_gem_object_is_purgeable(obj))
2148 i915_gem_object_truncate(obj); 2148 i915_gem_object_truncate(obj);
2149 2149
2150 return ret; 2150 return ret;
2151 } 2151 }
2152 2152
2153 int 2153 int
2154 i915_gem_flush_ring(struct intel_ring_buffer *ring, 2154 i915_gem_flush_ring(struct intel_ring_buffer *ring,
2155 uint32_t invalidate_domains, 2155 uint32_t invalidate_domains,
2156 uint32_t flush_domains) 2156 uint32_t flush_domains)
2157 { 2157 {
2158 int ret; 2158 int ret;
2159 2159
2160 if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0) 2160 if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
2161 return 0; 2161 return 0;
2162 2162
2163 trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains); 2163 trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
2164 2164
2165 ret = ring->flush(ring, invalidate_domains, flush_domains); 2165 ret = ring->flush(ring, invalidate_domains, flush_domains);
2166 if (ret) 2166 if (ret)
2167 return ret; 2167 return ret;
2168 2168
2169 if (flush_domains & I915_GEM_GPU_DOMAINS) 2169 if (flush_domains & I915_GEM_GPU_DOMAINS)
2170 i915_gem_process_flushing_list(ring, flush_domains); 2170 i915_gem_process_flushing_list(ring, flush_domains);
2171 2171
2172 return 0; 2172 return 0;
2173 } 2173 }
2174 2174
2175 static int i915_ring_idle(struct intel_ring_buffer *ring) 2175 static int i915_ring_idle(struct intel_ring_buffer *ring)
2176 { 2176 {
2177 int ret; 2177 int ret;
2178 2178
2179 if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list)) 2179 if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
2180 return 0; 2180 return 0;
2181 2181
2182 if (!list_empty(&ring->gpu_write_list)) { 2182 if (!list_empty(&ring->gpu_write_list)) {
2183 ret = i915_gem_flush_ring(ring, 2183 ret = i915_gem_flush_ring(ring,
2184 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); 2184 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2185 if (ret) 2185 if (ret)
2186 return ret; 2186 return ret;
2187 } 2187 }
2188 2188
2189 return i915_wait_request(ring, i915_gem_next_request_seqno(ring)); 2189 return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
2190 } 2190 }
2191 2191
2192 int 2192 int
2193 i915_gpu_idle(struct drm_device *dev) 2193 i915_gpu_idle(struct drm_device *dev)
2194 { 2194 {
2195 drm_i915_private_t *dev_priv = dev->dev_private; 2195 drm_i915_private_t *dev_priv = dev->dev_private;
2196 int ret, i; 2196 int ret, i;
2197 2197
2198 /* Flush everything onto the inactive list. */ 2198 /* Flush everything onto the inactive list. */
2199 for (i = 0; i < I915_NUM_RINGS; i++) { 2199 for (i = 0; i < I915_NUM_RINGS; i++) {
2200 ret = i915_ring_idle(&dev_priv->ring[i]); 2200 ret = i915_ring_idle(&dev_priv->ring[i]);
2201 if (ret) 2201 if (ret)
2202 return ret; 2202 return ret;
2203 } 2203 }
2204 2204
2205 return 0; 2205 return 0;
2206 } 2206 }
2207 2207
2208 static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj, 2208 static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj,
2209 struct intel_ring_buffer *pipelined) 2209 struct intel_ring_buffer *pipelined)
2210 { 2210 {
2211 struct drm_device *dev = obj->base.dev; 2211 struct drm_device *dev = obj->base.dev;
2212 drm_i915_private_t *dev_priv = dev->dev_private; 2212 drm_i915_private_t *dev_priv = dev->dev_private;
2213 u32 size = obj->gtt_space->size; 2213 u32 size = obj->gtt_space->size;
2214 int regnum = obj->fence_reg; 2214 int regnum = obj->fence_reg;
2215 uint64_t val; 2215 uint64_t val;
2216 2216
2217 val = (uint64_t)((obj->gtt_offset + size - 4096) & 2217 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2218 0xfffff000) << 32; 2218 0xfffff000) << 32;
2219 val |= obj->gtt_offset & 0xfffff000; 2219 val |= obj->gtt_offset & 0xfffff000;
2220 val |= (uint64_t)((obj->stride / 128) - 1) << 2220 val |= (uint64_t)((obj->stride / 128) - 1) <<
2221 SANDYBRIDGE_FENCE_PITCH_SHIFT; 2221 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2222 2222
2223 if (obj->tiling_mode == I915_TILING_Y) 2223 if (obj->tiling_mode == I915_TILING_Y)
2224 val |= 1 << I965_FENCE_TILING_Y_SHIFT; 2224 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2225 val |= I965_FENCE_REG_VALID; 2225 val |= I965_FENCE_REG_VALID;
2226 2226
2227 if (pipelined) { 2227 if (pipelined) {
2228 int ret = intel_ring_begin(pipelined, 6); 2228 int ret = intel_ring_begin(pipelined, 6);
2229 if (ret) 2229 if (ret)
2230 return ret; 2230 return ret;
2231 2231
2232 intel_ring_emit(pipelined, MI_NOOP); 2232 intel_ring_emit(pipelined, MI_NOOP);
2233 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2)); 2233 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
2234 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8); 2234 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8);
2235 intel_ring_emit(pipelined, (u32)val); 2235 intel_ring_emit(pipelined, (u32)val);
2236 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4); 2236 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4);
2237 intel_ring_emit(pipelined, (u32)(val >> 32)); 2237 intel_ring_emit(pipelined, (u32)(val >> 32));
2238 intel_ring_advance(pipelined); 2238 intel_ring_advance(pipelined);
2239 } else 2239 } else
2240 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val); 2240 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val);
2241 2241
2242 return 0; 2242 return 0;
2243 } 2243 }
2244 2244
2245 static int i965_write_fence_reg(struct drm_i915_gem_object *obj, 2245 static int i965_write_fence_reg(struct drm_i915_gem_object *obj,
2246 struct intel_ring_buffer *pipelined) 2246 struct intel_ring_buffer *pipelined)
2247 { 2247 {
2248 struct drm_device *dev = obj->base.dev; 2248 struct drm_device *dev = obj->base.dev;
2249 drm_i915_private_t *dev_priv = dev->dev_private; 2249 drm_i915_private_t *dev_priv = dev->dev_private;
2250 u32 size = obj->gtt_space->size; 2250 u32 size = obj->gtt_space->size;
2251 int regnum = obj->fence_reg; 2251 int regnum = obj->fence_reg;
2252 uint64_t val; 2252 uint64_t val;
2253 2253
2254 val = (uint64_t)((obj->gtt_offset + size - 4096) & 2254 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2255 0xfffff000) << 32; 2255 0xfffff000) << 32;
2256 val |= obj->gtt_offset & 0xfffff000; 2256 val |= obj->gtt_offset & 0xfffff000;
2257 val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT; 2257 val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2258 if (obj->tiling_mode == I915_TILING_Y) 2258 if (obj->tiling_mode == I915_TILING_Y)
2259 val |= 1 << I965_FENCE_TILING_Y_SHIFT; 2259 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2260 val |= I965_FENCE_REG_VALID; 2260 val |= I965_FENCE_REG_VALID;
2261 2261
2262 if (pipelined) { 2262 if (pipelined) {
2263 int ret = intel_ring_begin(pipelined, 6); 2263 int ret = intel_ring_begin(pipelined, 6);
2264 if (ret) 2264 if (ret)
2265 return ret; 2265 return ret;
2266 2266
2267 intel_ring_emit(pipelined, MI_NOOP); 2267 intel_ring_emit(pipelined, MI_NOOP);
2268 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2)); 2268 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
2269 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8); 2269 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8);
2270 intel_ring_emit(pipelined, (u32)val); 2270 intel_ring_emit(pipelined, (u32)val);
2271 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4); 2271 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4);
2272 intel_ring_emit(pipelined, (u32)(val >> 32)); 2272 intel_ring_emit(pipelined, (u32)(val >> 32));
2273 intel_ring_advance(pipelined); 2273 intel_ring_advance(pipelined);
2274 } else 2274 } else
2275 I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val); 2275 I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val);
2276 2276
2277 return 0; 2277 return 0;
2278 } 2278 }
2279 2279
2280 static int i915_write_fence_reg(struct drm_i915_gem_object *obj, 2280 static int i915_write_fence_reg(struct drm_i915_gem_object *obj,
2281 struct intel_ring_buffer *pipelined) 2281 struct intel_ring_buffer *pipelined)
2282 { 2282 {
2283 struct drm_device *dev = obj->base.dev; 2283 struct drm_device *dev = obj->base.dev;
2284 drm_i915_private_t *dev_priv = dev->dev_private; 2284 drm_i915_private_t *dev_priv = dev->dev_private;
2285 u32 size = obj->gtt_space->size; 2285 u32 size = obj->gtt_space->size;
2286 u32 fence_reg, val, pitch_val; 2286 u32 fence_reg, val, pitch_val;
2287 int tile_width; 2287 int tile_width;
2288 2288
2289 if (WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) || 2289 if (WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
2290 (size & -size) != size || 2290 (size & -size) != size ||
2291 (obj->gtt_offset & (size - 1)), 2291 (obj->gtt_offset & (size - 1)),
2292 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n", 2292 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2293 obj->gtt_offset, obj->map_and_fenceable, size)) 2293 obj->gtt_offset, obj->map_and_fenceable, size))
2294 return -EINVAL; 2294 return -EINVAL;
2295 2295
2296 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) 2296 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2297 tile_width = 128; 2297 tile_width = 128;
2298 else 2298 else
2299 tile_width = 512; 2299 tile_width = 512;
2300 2300
2301 /* Note: pitch better be a power of two tile widths */ 2301 /* Note: pitch better be a power of two tile widths */
2302 pitch_val = obj->stride / tile_width; 2302 pitch_val = obj->stride / tile_width;
2303 pitch_val = ffs(pitch_val) - 1; 2303 pitch_val = ffs(pitch_val) - 1;
2304 2304
2305 val = obj->gtt_offset; 2305 val = obj->gtt_offset;
2306 if (obj->tiling_mode == I915_TILING_Y) 2306 if (obj->tiling_mode == I915_TILING_Y)
2307 val |= 1 << I830_FENCE_TILING_Y_SHIFT; 2307 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2308 val |= I915_FENCE_SIZE_BITS(size); 2308 val |= I915_FENCE_SIZE_BITS(size);
2309 val |= pitch_val << I830_FENCE_PITCH_SHIFT; 2309 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2310 val |= I830_FENCE_REG_VALID; 2310 val |= I830_FENCE_REG_VALID;
2311 2311
2312 fence_reg = obj->fence_reg; 2312 fence_reg = obj->fence_reg;
2313 if (fence_reg < 8) 2313 if (fence_reg < 8)
2314 fence_reg = FENCE_REG_830_0 + fence_reg * 4; 2314 fence_reg = FENCE_REG_830_0 + fence_reg * 4;
2315 else 2315 else
2316 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4; 2316 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
2317 2317
2318 if (pipelined) { 2318 if (pipelined) {
2319 int ret = intel_ring_begin(pipelined, 4); 2319 int ret = intel_ring_begin(pipelined, 4);
2320 if (ret) 2320 if (ret)
2321 return ret; 2321 return ret;
2322 2322
2323 intel_ring_emit(pipelined, MI_NOOP); 2323 intel_ring_emit(pipelined, MI_NOOP);
2324 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1)); 2324 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
2325 intel_ring_emit(pipelined, fence_reg); 2325 intel_ring_emit(pipelined, fence_reg);
2326 intel_ring_emit(pipelined, val); 2326 intel_ring_emit(pipelined, val);
2327 intel_ring_advance(pipelined); 2327 intel_ring_advance(pipelined);
2328 } else 2328 } else
2329 I915_WRITE(fence_reg, val); 2329 I915_WRITE(fence_reg, val);
2330 2330
2331 return 0; 2331 return 0;
2332 } 2332 }
2333 2333
2334 static int i830_write_fence_reg(struct drm_i915_gem_object *obj, 2334 static int i830_write_fence_reg(struct drm_i915_gem_object *obj,
2335 struct intel_ring_buffer *pipelined) 2335 struct intel_ring_buffer *pipelined)
2336 { 2336 {
2337 struct drm_device *dev = obj->base.dev; 2337 struct drm_device *dev = obj->base.dev;
2338 drm_i915_private_t *dev_priv = dev->dev_private; 2338 drm_i915_private_t *dev_priv = dev->dev_private;
2339 u32 size = obj->gtt_space->size; 2339 u32 size = obj->gtt_space->size;
2340 int regnum = obj->fence_reg; 2340 int regnum = obj->fence_reg;
2341 uint32_t val; 2341 uint32_t val;
2342 uint32_t pitch_val; 2342 uint32_t pitch_val;
2343 2343
2344 if (WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) || 2344 if (WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
2345 (size & -size) != size || 2345 (size & -size) != size ||
2346 (obj->gtt_offset & (size - 1)), 2346 (obj->gtt_offset & (size - 1)),
2347 "object 0x%08x not 512K or pot-size 0x%08x aligned\n", 2347 "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2348 obj->gtt_offset, size)) 2348 obj->gtt_offset, size))
2349 return -EINVAL; 2349 return -EINVAL;
2350 2350
2351 pitch_val = obj->stride / 128; 2351 pitch_val = obj->stride / 128;
2352 pitch_val = ffs(pitch_val) - 1; 2352 pitch_val = ffs(pitch_val) - 1;
2353 2353
2354 val = obj->gtt_offset; 2354 val = obj->gtt_offset;
2355 if (obj->tiling_mode == I915_TILING_Y) 2355 if (obj->tiling_mode == I915_TILING_Y)
2356 val |= 1 << I830_FENCE_TILING_Y_SHIFT; 2356 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2357 val |= I830_FENCE_SIZE_BITS(size); 2357 val |= I830_FENCE_SIZE_BITS(size);
2358 val |= pitch_val << I830_FENCE_PITCH_SHIFT; 2358 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2359 val |= I830_FENCE_REG_VALID; 2359 val |= I830_FENCE_REG_VALID;
2360 2360
2361 if (pipelined) { 2361 if (pipelined) {
2362 int ret = intel_ring_begin(pipelined, 4); 2362 int ret = intel_ring_begin(pipelined, 4);
2363 if (ret) 2363 if (ret)
2364 return ret; 2364 return ret;
2365 2365
2366 intel_ring_emit(pipelined, MI_NOOP); 2366 intel_ring_emit(pipelined, MI_NOOP);
2367 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1)); 2367 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
2368 intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4); 2368 intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4);
2369 intel_ring_emit(pipelined, val); 2369 intel_ring_emit(pipelined, val);
2370 intel_ring_advance(pipelined); 2370 intel_ring_advance(pipelined);
2371 } else 2371 } else
2372 I915_WRITE(FENCE_REG_830_0 + regnum * 4, val); 2372 I915_WRITE(FENCE_REG_830_0 + regnum * 4, val);
2373 2373
2374 return 0; 2374 return 0;
2375 } 2375 }
2376 2376
2377 static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno) 2377 static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
2378 { 2378 {
2379 return i915_seqno_passed(ring->get_seqno(ring), seqno); 2379 return i915_seqno_passed(ring->get_seqno(ring), seqno);
2380 } 2380 }
2381 2381
2382 static int 2382 static int
2383 i915_gem_object_flush_fence(struct drm_i915_gem_object *obj, 2383 i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
2384 struct intel_ring_buffer *pipelined) 2384 struct intel_ring_buffer *pipelined)
2385 { 2385 {
2386 int ret; 2386 int ret;
2387 2387
2388 if (obj->fenced_gpu_access) { 2388 if (obj->fenced_gpu_access) {
2389 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { 2389 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
2390 ret = i915_gem_flush_ring(obj->last_fenced_ring, 2390 ret = i915_gem_flush_ring(obj->last_fenced_ring,
2391 0, obj->base.write_domain); 2391 0, obj->base.write_domain);
2392 if (ret) 2392 if (ret)
2393 return ret; 2393 return ret;
2394 } 2394 }
2395 2395
2396 obj->fenced_gpu_access = false; 2396 obj->fenced_gpu_access = false;
2397 } 2397 }
2398 2398
2399 if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) { 2399 if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
2400 if (!ring_passed_seqno(obj->last_fenced_ring, 2400 if (!ring_passed_seqno(obj->last_fenced_ring,
2401 obj->last_fenced_seqno)) { 2401 obj->last_fenced_seqno)) {
2402 ret = i915_wait_request(obj->last_fenced_ring, 2402 ret = i915_wait_request(obj->last_fenced_ring,
2403 obj->last_fenced_seqno); 2403 obj->last_fenced_seqno);
2404 if (ret) 2404 if (ret)
2405 return ret; 2405 return ret;
2406 } 2406 }
2407 2407
2408 obj->last_fenced_seqno = 0; 2408 obj->last_fenced_seqno = 0;
2409 obj->last_fenced_ring = NULL; 2409 obj->last_fenced_ring = NULL;
2410 } 2410 }
2411 2411
2412 /* Ensure that all CPU reads are completed before installing a fence 2412 /* Ensure that all CPU reads are completed before installing a fence
2413 * and all writes before removing the fence. 2413 * and all writes before removing the fence.
2414 */ 2414 */
2415 if (obj->base.read_domains & I915_GEM_DOMAIN_GTT) 2415 if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
2416 mb(); 2416 mb();
2417 2417
2418 return 0; 2418 return 0;
2419 } 2419 }
2420 2420
2421 int 2421 int
2422 i915_gem_object_put_fence(struct drm_i915_gem_object *obj) 2422 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2423 { 2423 {
2424 int ret; 2424 int ret;
2425 2425
2426 if (obj->tiling_mode) 2426 if (obj->tiling_mode)
2427 i915_gem_release_mmap(obj); 2427 i915_gem_release_mmap(obj);
2428 2428
2429 ret = i915_gem_object_flush_fence(obj, NULL); 2429 ret = i915_gem_object_flush_fence(obj, NULL);
2430 if (ret) 2430 if (ret)
2431 return ret; 2431 return ret;
2432 2432
2433 if (obj->fence_reg != I915_FENCE_REG_NONE) { 2433 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2434 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 2434 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2435 i915_gem_clear_fence_reg(obj->base.dev, 2435 i915_gem_clear_fence_reg(obj->base.dev,
2436 &dev_priv->fence_regs[obj->fence_reg]); 2436 &dev_priv->fence_regs[obj->fence_reg]);
2437 2437
2438 obj->fence_reg = I915_FENCE_REG_NONE; 2438 obj->fence_reg = I915_FENCE_REG_NONE;
2439 } 2439 }
2440 2440
2441 return 0; 2441 return 0;
2442 } 2442 }
2443 2443
2444 static struct drm_i915_fence_reg * 2444 static struct drm_i915_fence_reg *
2445 i915_find_fence_reg(struct drm_device *dev, 2445 i915_find_fence_reg(struct drm_device *dev,
2446 struct intel_ring_buffer *pipelined) 2446 struct intel_ring_buffer *pipelined)
2447 { 2447 {
2448 struct drm_i915_private *dev_priv = dev->dev_private; 2448 struct drm_i915_private *dev_priv = dev->dev_private;
2449 struct drm_i915_fence_reg *reg, *first, *avail; 2449 struct drm_i915_fence_reg *reg, *first, *avail;
2450 int i; 2450 int i;
2451 2451
2452 /* First try to find a free reg */ 2452 /* First try to find a free reg */
2453 avail = NULL; 2453 avail = NULL;
2454 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { 2454 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2455 reg = &dev_priv->fence_regs[i]; 2455 reg = &dev_priv->fence_regs[i];
2456 if (!reg->obj) 2456 if (!reg->obj)
2457 return reg; 2457 return reg;
2458 2458
2459 if (!reg->obj->pin_count) 2459 if (!reg->obj->pin_count)
2460 avail = reg; 2460 avail = reg;
2461 } 2461 }
2462 2462
2463 if (avail == NULL) 2463 if (avail == NULL)
2464 return NULL; 2464 return NULL;
2465 2465
2466 /* None available, try to steal one or wait for a user to finish */ 2466 /* None available, try to steal one or wait for a user to finish */
2467 avail = first = NULL; 2467 avail = first = NULL;
2468 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) { 2468 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
2469 if (reg->obj->pin_count) 2469 if (reg->obj->pin_count)
2470 continue; 2470 continue;
2471 2471
2472 if (first == NULL) 2472 if (first == NULL)
2473 first = reg; 2473 first = reg;
2474 2474
2475 if (!pipelined || 2475 if (!pipelined ||
2476 !reg->obj->last_fenced_ring || 2476 !reg->obj->last_fenced_ring ||
2477 reg->obj->last_fenced_ring == pipelined) { 2477 reg->obj->last_fenced_ring == pipelined) {
2478 avail = reg; 2478 avail = reg;
2479 break; 2479 break;
2480 } 2480 }
2481 } 2481 }
2482 2482
2483 if (avail == NULL) 2483 if (avail == NULL)
2484 avail = first; 2484 avail = first;
2485 2485
2486 return avail; 2486 return avail;
2487 } 2487 }
2488 2488
2489 /** 2489 /**
2490 * i915_gem_object_get_fence - set up a fence reg for an object 2490 * i915_gem_object_get_fence - set up a fence reg for an object
2491 * @obj: object to map through a fence reg 2491 * @obj: object to map through a fence reg
2492 * @pipelined: ring on which to queue the change, or NULL for CPU access 2492 * @pipelined: ring on which to queue the change, or NULL for CPU access
2493 * @interruptible: must we wait uninterruptibly for the register to retire? 2493 * @interruptible: must we wait uninterruptibly for the register to retire?
2494 * 2494 *
2495 * When mapping objects through the GTT, userspace wants to be able to write 2495 * When mapping objects through the GTT, userspace wants to be able to write
2496 * to them without having to worry about swizzling if the object is tiled. 2496 * to them without having to worry about swizzling if the object is tiled.
2497 * 2497 *
2498 * This function walks the fence regs looking for a free one for @obj, 2498 * This function walks the fence regs looking for a free one for @obj,
2499 * stealing one if it can't find any. 2499 * stealing one if it can't find any.
2500 * 2500 *
2501 * It then sets up the reg based on the object's properties: address, pitch 2501 * It then sets up the reg based on the object's properties: address, pitch
2502 * and tiling format. 2502 * and tiling format.
2503 */ 2503 */
2504 int 2504 int
2505 i915_gem_object_get_fence(struct drm_i915_gem_object *obj, 2505 i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
2506 struct intel_ring_buffer *pipelined) 2506 struct intel_ring_buffer *pipelined)
2507 { 2507 {
2508 struct drm_device *dev = obj->base.dev; 2508 struct drm_device *dev = obj->base.dev;
2509 struct drm_i915_private *dev_priv = dev->dev_private; 2509 struct drm_i915_private *dev_priv = dev->dev_private;
2510 struct drm_i915_fence_reg *reg; 2510 struct drm_i915_fence_reg *reg;
2511 int ret; 2511 int ret;
2512 2512
2513 /* XXX disable pipelining. There are bugs. Shocking. */ 2513 /* XXX disable pipelining. There are bugs. Shocking. */
2514 pipelined = NULL; 2514 pipelined = NULL;
2515 2515
2516 /* Just update our place in the LRU if our fence is getting reused. */ 2516 /* Just update our place in the LRU if our fence is getting reused. */
2517 if (obj->fence_reg != I915_FENCE_REG_NONE) { 2517 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2518 reg = &dev_priv->fence_regs[obj->fence_reg]; 2518 reg = &dev_priv->fence_regs[obj->fence_reg];
2519 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list); 2519 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2520 2520
2521 if (obj->tiling_changed) { 2521 if (obj->tiling_changed) {
2522 ret = i915_gem_object_flush_fence(obj, pipelined); 2522 ret = i915_gem_object_flush_fence(obj, pipelined);
2523 if (ret) 2523 if (ret)
2524 return ret; 2524 return ret;
2525 2525
2526 if (!obj->fenced_gpu_access && !obj->last_fenced_seqno) 2526 if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
2527 pipelined = NULL; 2527 pipelined = NULL;
2528 2528
2529 if (pipelined) { 2529 if (pipelined) {
2530 reg->setup_seqno = 2530 reg->setup_seqno =
2531 i915_gem_next_request_seqno(pipelined); 2531 i915_gem_next_request_seqno(pipelined);
2532 obj->last_fenced_seqno = reg->setup_seqno; 2532 obj->last_fenced_seqno = reg->setup_seqno;
2533 obj->last_fenced_ring = pipelined; 2533 obj->last_fenced_ring = pipelined;
2534 } 2534 }
2535 2535
2536 goto update; 2536 goto update;
2537 } 2537 }
2538 2538
2539 if (!pipelined) { 2539 if (!pipelined) {
2540 if (reg->setup_seqno) { 2540 if (reg->setup_seqno) {
2541 if (!ring_passed_seqno(obj->last_fenced_ring, 2541 if (!ring_passed_seqno(obj->last_fenced_ring,
2542 reg->setup_seqno)) { 2542 reg->setup_seqno)) {
2543 ret = i915_wait_request(obj->last_fenced_ring, 2543 ret = i915_wait_request(obj->last_fenced_ring,
2544 reg->setup_seqno); 2544 reg->setup_seqno);
2545 if (ret) 2545 if (ret)
2546 return ret; 2546 return ret;
2547 } 2547 }
2548 2548
2549 reg->setup_seqno = 0; 2549 reg->setup_seqno = 0;
2550 } 2550 }
2551 } else if (obj->last_fenced_ring && 2551 } else if (obj->last_fenced_ring &&
2552 obj->last_fenced_ring != pipelined) { 2552 obj->last_fenced_ring != pipelined) {
2553 ret = i915_gem_object_flush_fence(obj, pipelined); 2553 ret = i915_gem_object_flush_fence(obj, pipelined);
2554 if (ret) 2554 if (ret)
2555 return ret; 2555 return ret;
2556 } 2556 }
2557 2557
2558 return 0; 2558 return 0;
2559 } 2559 }
2560 2560
2561 reg = i915_find_fence_reg(dev, pipelined); 2561 reg = i915_find_fence_reg(dev, pipelined);
2562 if (reg == NULL) 2562 if (reg == NULL)
2563 return -ENOSPC; 2563 return -ENOSPC;
2564 2564
2565 ret = i915_gem_object_flush_fence(obj, pipelined); 2565 ret = i915_gem_object_flush_fence(obj, pipelined);
2566 if (ret) 2566 if (ret)
2567 return ret; 2567 return ret;
2568 2568
2569 if (reg->obj) { 2569 if (reg->obj) {
2570 struct drm_i915_gem_object *old = reg->obj; 2570 struct drm_i915_gem_object *old = reg->obj;
2571 2571
2572 drm_gem_object_reference(&old->base); 2572 drm_gem_object_reference(&old->base);
2573 2573
2574 if (old->tiling_mode) 2574 if (old->tiling_mode)
2575 i915_gem_release_mmap(old); 2575 i915_gem_release_mmap(old);
2576 2576
2577 ret = i915_gem_object_flush_fence(old, pipelined); 2577 ret = i915_gem_object_flush_fence(old, pipelined);
2578 if (ret) { 2578 if (ret) {
2579 drm_gem_object_unreference(&old->base); 2579 drm_gem_object_unreference(&old->base);
2580 return ret; 2580 return ret;
2581 } 2581 }
2582 2582
2583 if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0) 2583 if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0)
2584 pipelined = NULL; 2584 pipelined = NULL;
2585 2585
2586 old->fence_reg = I915_FENCE_REG_NONE; 2586 old->fence_reg = I915_FENCE_REG_NONE;
2587 old->last_fenced_ring = pipelined; 2587 old->last_fenced_ring = pipelined;
2588 old->last_fenced_seqno = 2588 old->last_fenced_seqno =
2589 pipelined ? i915_gem_next_request_seqno(pipelined) : 0; 2589 pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
2590 2590
2591 drm_gem_object_unreference(&old->base); 2591 drm_gem_object_unreference(&old->base);
2592 } else if (obj->last_fenced_seqno == 0) 2592 } else if (obj->last_fenced_seqno == 0)
2593 pipelined = NULL; 2593 pipelined = NULL;
2594 2594
2595 reg->obj = obj; 2595 reg->obj = obj;
2596 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list); 2596 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2597 obj->fence_reg = reg - dev_priv->fence_regs; 2597 obj->fence_reg = reg - dev_priv->fence_regs;
2598 obj->last_fenced_ring = pipelined; 2598 obj->last_fenced_ring = pipelined;
2599 2599
2600 reg->setup_seqno = 2600 reg->setup_seqno =
2601 pipelined ? i915_gem_next_request_seqno(pipelined) : 0; 2601 pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
2602 obj->last_fenced_seqno = reg->setup_seqno; 2602 obj->last_fenced_seqno = reg->setup_seqno;
2603 2603
2604 update: 2604 update:
2605 obj->tiling_changed = false; 2605 obj->tiling_changed = false;
2606 switch (INTEL_INFO(dev)->gen) { 2606 switch (INTEL_INFO(dev)->gen) {
2607 case 7: 2607 case 7:
2608 case 6: 2608 case 6:
2609 ret = sandybridge_write_fence_reg(obj, pipelined); 2609 ret = sandybridge_write_fence_reg(obj, pipelined);
2610 break; 2610 break;
2611 case 5: 2611 case 5:
2612 case 4: 2612 case 4:
2613 ret = i965_write_fence_reg(obj, pipelined); 2613 ret = i965_write_fence_reg(obj, pipelined);
2614 break; 2614 break;
2615 case 3: 2615 case 3:
2616 ret = i915_write_fence_reg(obj, pipelined); 2616 ret = i915_write_fence_reg(obj, pipelined);
2617 break; 2617 break;
2618 case 2: 2618 case 2:
2619 ret = i830_write_fence_reg(obj, pipelined); 2619 ret = i830_write_fence_reg(obj, pipelined);
2620 break; 2620 break;
2621 } 2621 }
2622 2622
2623 return ret; 2623 return ret;
2624 } 2624 }
2625 2625
2626 /** 2626 /**
2627 * i915_gem_clear_fence_reg - clear out fence register info 2627 * i915_gem_clear_fence_reg - clear out fence register info
2628 * @obj: object to clear 2628 * @obj: object to clear
2629 * 2629 *
2630 * Zeroes out the fence register itself and clears out the associated 2630 * Zeroes out the fence register itself and clears out the associated
2631 * data structures in dev_priv and obj. 2631 * data structures in dev_priv and obj.
2632 */ 2632 */
2633 static void 2633 static void
2634 i915_gem_clear_fence_reg(struct drm_device *dev, 2634 i915_gem_clear_fence_reg(struct drm_device *dev,
2635 struct drm_i915_fence_reg *reg) 2635 struct drm_i915_fence_reg *reg)
2636 { 2636 {
2637 drm_i915_private_t *dev_priv = dev->dev_private; 2637 drm_i915_private_t *dev_priv = dev->dev_private;
2638 uint32_t fence_reg = reg - dev_priv->fence_regs; 2638 uint32_t fence_reg = reg - dev_priv->fence_regs;
2639 2639
2640 switch (INTEL_INFO(dev)->gen) { 2640 switch (INTEL_INFO(dev)->gen) {
2641 case 7: 2641 case 7:
2642 case 6: 2642 case 6:
2643 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0); 2643 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0);
2644 break; 2644 break;
2645 case 5: 2645 case 5:
2646 case 4: 2646 case 4:
2647 I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0); 2647 I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0);
2648 break; 2648 break;
2649 case 3: 2649 case 3:
2650 if (fence_reg >= 8) 2650 if (fence_reg >= 8)
2651 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4; 2651 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
2652 else 2652 else
2653 case 2: 2653 case 2:
2654 fence_reg = FENCE_REG_830_0 + fence_reg * 4; 2654 fence_reg = FENCE_REG_830_0 + fence_reg * 4;
2655 2655
2656 I915_WRITE(fence_reg, 0); 2656 I915_WRITE(fence_reg, 0);
2657 break; 2657 break;
2658 } 2658 }
2659 2659
2660 list_del_init(&reg->lru_list); 2660 list_del_init(&reg->lru_list);
2661 reg->obj = NULL; 2661 reg->obj = NULL;
2662 reg->setup_seqno = 0; 2662 reg->setup_seqno = 0;
2663 } 2663 }
2664 2664
2665 /** 2665 /**
2666 * Finds free space in the GTT aperture and binds the object there. 2666 * Finds free space in the GTT aperture and binds the object there.
2667 */ 2667 */
2668 static int 2668 static int
2669 i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, 2669 i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2670 unsigned alignment, 2670 unsigned alignment,
2671 bool map_and_fenceable) 2671 bool map_and_fenceable)
2672 { 2672 {
2673 struct drm_device *dev = obj->base.dev; 2673 struct drm_device *dev = obj->base.dev;
2674 drm_i915_private_t *dev_priv = dev->dev_private; 2674 drm_i915_private_t *dev_priv = dev->dev_private;
2675 struct drm_mm_node *free_space; 2675 struct drm_mm_node *free_space;
2676 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; 2676 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
2677 u32 size, fence_size, fence_alignment, unfenced_alignment; 2677 u32 size, fence_size, fence_alignment, unfenced_alignment;
2678 bool mappable, fenceable; 2678 bool mappable, fenceable;
2679 int ret; 2679 int ret;
2680 2680
2681 if (obj->madv != I915_MADV_WILLNEED) { 2681 if (obj->madv != I915_MADV_WILLNEED) {
2682 DRM_ERROR("Attempting to bind a purgeable object\n"); 2682 DRM_ERROR("Attempting to bind a purgeable object\n");
2683 return -EINVAL; 2683 return -EINVAL;
2684 } 2684 }
2685 2685
2686 fence_size = i915_gem_get_gtt_size(dev, 2686 fence_size = i915_gem_get_gtt_size(dev,
2687 obj->base.size, 2687 obj->base.size,
2688 obj->tiling_mode); 2688 obj->tiling_mode);
2689 fence_alignment = i915_gem_get_gtt_alignment(dev, 2689 fence_alignment = i915_gem_get_gtt_alignment(dev,
2690 obj->base.size, 2690 obj->base.size,
2691 obj->tiling_mode); 2691 obj->tiling_mode);
2692 unfenced_alignment = 2692 unfenced_alignment =
2693 i915_gem_get_unfenced_gtt_alignment(dev, 2693 i915_gem_get_unfenced_gtt_alignment(dev,
2694 obj->base.size, 2694 obj->base.size,
2695 obj->tiling_mode); 2695 obj->tiling_mode);
2696 2696
2697 if (alignment == 0) 2697 if (alignment == 0)
2698 alignment = map_and_fenceable ? fence_alignment : 2698 alignment = map_and_fenceable ? fence_alignment :
2699 unfenced_alignment; 2699 unfenced_alignment;
2700 if (map_and_fenceable && alignment & (fence_alignment - 1)) { 2700 if (map_and_fenceable && alignment & (fence_alignment - 1)) {
2701 DRM_ERROR("Invalid object alignment requested %u\n", alignment); 2701 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2702 return -EINVAL; 2702 return -EINVAL;
2703 } 2703 }
2704 2704
2705 size = map_and_fenceable ? fence_size : obj->base.size; 2705 size = map_and_fenceable ? fence_size : obj->base.size;
2706 2706
2707 /* If the object is bigger than the entire aperture, reject it early 2707 /* If the object is bigger than the entire aperture, reject it early
2708 * before evicting everything in a vain attempt to find space. 2708 * before evicting everything in a vain attempt to find space.
2709 */ 2709 */
2710 if (obj->base.size > 2710 if (obj->base.size >
2711 (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) { 2711 (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
2712 DRM_ERROR("Attempting to bind an object larger than the aperture\n"); 2712 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2713 return -E2BIG; 2713 return -E2BIG;
2714 } 2714 }
2715 2715
2716 search_free: 2716 search_free:
2717 if (map_and_fenceable) 2717 if (map_and_fenceable)
2718 free_space = 2718 free_space =
2719 drm_mm_search_free_in_range(&dev_priv->mm.gtt_space, 2719 drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
2720 size, alignment, 0, 2720 size, alignment, 0,
2721 dev_priv->mm.gtt_mappable_end, 2721 dev_priv->mm.gtt_mappable_end,
2722 0); 2722 0);
2723 else 2723 else
2724 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, 2724 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2725 size, alignment, 0); 2725 size, alignment, 0);
2726 2726
2727 if (free_space != NULL) { 2727 if (free_space != NULL) {
2728 if (map_and_fenceable) 2728 if (map_and_fenceable)
2729 obj->gtt_space = 2729 obj->gtt_space =
2730 drm_mm_get_block_range_generic(free_space, 2730 drm_mm_get_block_range_generic(free_space,
2731 size, alignment, 0, 2731 size, alignment, 0,
2732 dev_priv->mm.gtt_mappable_end, 2732 dev_priv->mm.gtt_mappable_end,
2733 0); 2733 0);
2734 else 2734 else
2735 obj->gtt_space = 2735 obj->gtt_space =
2736 drm_mm_get_block(free_space, size, alignment); 2736 drm_mm_get_block(free_space, size, alignment);
2737 } 2737 }
2738 if (obj->gtt_space == NULL) { 2738 if (obj->gtt_space == NULL) {
2739 /* If the gtt is empty and we're still having trouble 2739 /* If the gtt is empty and we're still having trouble
2740 * fitting our object in, we're out of memory. 2740 * fitting our object in, we're out of memory.
2741 */ 2741 */
2742 ret = i915_gem_evict_something(dev, size, alignment, 2742 ret = i915_gem_evict_something(dev, size, alignment,
2743 map_and_fenceable); 2743 map_and_fenceable);
2744 if (ret) 2744 if (ret)
2745 return ret; 2745 return ret;
2746 2746
2747 goto search_free; 2747 goto search_free;
2748 } 2748 }
2749 2749
2750 ret = i915_gem_object_get_pages_gtt(obj, gfpmask); 2750 ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
2751 if (ret) { 2751 if (ret) {
2752 drm_mm_put_block(obj->gtt_space); 2752 drm_mm_put_block(obj->gtt_space);
2753 obj->gtt_space = NULL; 2753 obj->gtt_space = NULL;
2754 2754
2755 if (ret == -ENOMEM) { 2755 if (ret == -ENOMEM) {
2756 /* first try to reclaim some memory by clearing the GTT */ 2756 /* first try to reclaim some memory by clearing the GTT */
2757 ret = i915_gem_evict_everything(dev, false); 2757 ret = i915_gem_evict_everything(dev, false);
2758 if (ret) { 2758 if (ret) {
2759 /* now try to shrink everyone else */ 2759 /* now try to shrink everyone else */
2760 if (gfpmask) { 2760 if (gfpmask) {
2761 gfpmask = 0; 2761 gfpmask = 0;
2762 goto search_free; 2762 goto search_free;
2763 } 2763 }
2764 2764
2765 return -ENOMEM; 2765 return -ENOMEM;
2766 } 2766 }
2767 2767
2768 goto search_free; 2768 goto search_free;
2769 } 2769 }
2770 2770
2771 return ret; 2771 return ret;
2772 } 2772 }
2773 2773
2774 ret = i915_gem_gtt_bind_object(obj); 2774 ret = i915_gem_gtt_bind_object(obj);
2775 if (ret) { 2775 if (ret) {
2776 i915_gem_object_put_pages_gtt(obj); 2776 i915_gem_object_put_pages_gtt(obj);
2777 drm_mm_put_block(obj->gtt_space); 2777 drm_mm_put_block(obj->gtt_space);
2778 obj->gtt_space = NULL; 2778 obj->gtt_space = NULL;
2779 2779
2780 if (i915_gem_evict_everything(dev, false)) 2780 if (i915_gem_evict_everything(dev, false))
2781 return ret; 2781 return ret;
2782 2782
2783 goto search_free; 2783 goto search_free;
2784 } 2784 }
2785 2785
2786 list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list); 2786 list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
2787 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); 2787 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2788 2788
2789 /* Assert that the object is not currently in any GPU domain. As it 2789 /* Assert that the object is not currently in any GPU domain. As it
2790 * wasn't in the GTT, there shouldn't be any way it could have been in 2790 * wasn't in the GTT, there shouldn't be any way it could have been in
2791 * a GPU cache 2791 * a GPU cache
2792 */ 2792 */
2793 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); 2793 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2794 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); 2794 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2795 2795
2796 obj->gtt_offset = obj->gtt_space->start; 2796 obj->gtt_offset = obj->gtt_space->start;
2797 2797
2798 fenceable = 2798 fenceable =
2799 obj->gtt_space->size == fence_size && 2799 obj->gtt_space->size == fence_size &&
2800 (obj->gtt_space->start & (fence_alignment - 1)) == 0; 2800 (obj->gtt_space->start & (fence_alignment - 1)) == 0;
2801 2801
2802 mappable = 2802 mappable =
2803 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end; 2803 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
2804 2804
2805 obj->map_and_fenceable = mappable && fenceable; 2805 obj->map_and_fenceable = mappable && fenceable;
2806 2806
2807 trace_i915_gem_object_bind(obj, map_and_fenceable); 2807 trace_i915_gem_object_bind(obj, map_and_fenceable);
2808 return 0; 2808 return 0;
2809 } 2809 }
2810 2810
2811 void 2811 void
2812 i915_gem_clflush_object(struct drm_i915_gem_object *obj) 2812 i915_gem_clflush_object(struct drm_i915_gem_object *obj)
2813 { 2813 {
2814 /* If we don't have a page list set up, then we're not pinned 2814 /* If we don't have a page list set up, then we're not pinned
2815 * to GPU, and we can ignore the cache flush because it'll happen 2815 * to GPU, and we can ignore the cache flush because it'll happen
2816 * again at bind time. 2816 * again at bind time.
2817 */ 2817 */
2818 if (obj->pages == NULL) 2818 if (obj->pages == NULL)
2819 return; 2819 return;
2820 2820
2821 /* If the GPU is snooping the contents of the CPU cache, 2821 /* If the GPU is snooping the contents of the CPU cache,
2822 * we do not need to manually clear the CPU cache lines. However, 2822 * we do not need to manually clear the CPU cache lines. However,
2823 * the caches are only snooped when the render cache is 2823 * the caches are only snooped when the render cache is
2824 * flushed/invalidated. As we always have to emit invalidations 2824 * flushed/invalidated. As we always have to emit invalidations
2825 * and flushes when moving into and out of the RENDER domain, correct 2825 * and flushes when moving into and out of the RENDER domain, correct
2826 * snooping behaviour occurs naturally as the result of our domain 2826 * snooping behaviour occurs naturally as the result of our domain
2827 * tracking. 2827 * tracking.
2828 */ 2828 */
2829 if (obj->cache_level != I915_CACHE_NONE) 2829 if (obj->cache_level != I915_CACHE_NONE)
2830 return; 2830 return;
2831 2831
2832 trace_i915_gem_object_clflush(obj); 2832 trace_i915_gem_object_clflush(obj);
2833 2833
2834 drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE); 2834 drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
2835 } 2835 }
2836 2836
2837 /** Flushes any GPU write domain for the object if it's dirty. */ 2837 /** Flushes any GPU write domain for the object if it's dirty. */
2838 static int 2838 static int
2839 i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj) 2839 i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
2840 { 2840 {
2841 if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) 2841 if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
2842 return 0; 2842 return 0;
2843 2843
2844 /* Queue the GPU write cache flushing we need. */ 2844 /* Queue the GPU write cache flushing we need. */
2845 return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain); 2845 return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
2846 } 2846 }
2847 2847
2848 /** Flushes the GTT write domain for the object if it's dirty. */ 2848 /** Flushes the GTT write domain for the object if it's dirty. */
2849 static void 2849 static void
2850 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj) 2850 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
2851 { 2851 {
2852 uint32_t old_write_domain; 2852 uint32_t old_write_domain;
2853 2853
2854 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT) 2854 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
2855 return; 2855 return;
2856 2856
2857 /* No actual flushing is required for the GTT write domain. Writes 2857 /* No actual flushing is required for the GTT write domain. Writes
2858 * to it immediately go to main memory as far as we know, so there's 2858 * to it immediately go to main memory as far as we know, so there's
2859 * no chipset flush. It also doesn't land in render cache. 2859 * no chipset flush. It also doesn't land in render cache.
2860 * 2860 *
2861 * However, we do have to enforce the order so that all writes through 2861 * However, we do have to enforce the order so that all writes through
2862 * the GTT land before any writes to the device, such as updates to 2862 * the GTT land before any writes to the device, such as updates to
2863 * the GATT itself. 2863 * the GATT itself.
2864 */ 2864 */
2865 wmb(); 2865 wmb();
2866 2866
2867 old_write_domain = obj->base.write_domain; 2867 old_write_domain = obj->base.write_domain;
2868 obj->base.write_domain = 0; 2868 obj->base.write_domain = 0;
2869 2869
2870 trace_i915_gem_object_change_domain(obj, 2870 trace_i915_gem_object_change_domain(obj,
2871 obj->base.read_domains, 2871 obj->base.read_domains,
2872 old_write_domain); 2872 old_write_domain);
2873 } 2873 }
2874 2874
2875 /** Flushes the CPU write domain for the object if it's dirty. */ 2875 /** Flushes the CPU write domain for the object if it's dirty. */
2876 static void 2876 static void
2877 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) 2877 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
2878 { 2878 {
2879 uint32_t old_write_domain; 2879 uint32_t old_write_domain;
2880 2880
2881 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) 2881 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
2882 return; 2882 return;
2883 2883
2884 i915_gem_clflush_object(obj); 2884 i915_gem_clflush_object(obj);
2885 intel_gtt_chipset_flush(); 2885 intel_gtt_chipset_flush();
2886 old_write_domain = obj->base.write_domain; 2886 old_write_domain = obj->base.write_domain;
2887 obj->base.write_domain = 0; 2887 obj->base.write_domain = 0;
2888 2888
2889 trace_i915_gem_object_change_domain(obj, 2889 trace_i915_gem_object_change_domain(obj,
2890 obj->base.read_domains, 2890 obj->base.read_domains,
2891 old_write_domain); 2891 old_write_domain);
2892 } 2892 }
2893 2893
2894 /** 2894 /**
2895 * Moves a single object to the GTT read, and possibly write domain. 2895 * Moves a single object to the GTT read, and possibly write domain.
2896 * 2896 *
2897 * This function returns when the move is complete, including waiting on 2897 * This function returns when the move is complete, including waiting on
2898 * flushes to occur. 2898 * flushes to occur.
2899 */ 2899 */
2900 int 2900 int
2901 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) 2901 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
2902 { 2902 {
2903 uint32_t old_write_domain, old_read_domains; 2903 uint32_t old_write_domain, old_read_domains;
2904 int ret; 2904 int ret;
2905 2905
2906 /* Not valid to be called on unbound objects. */ 2906 /* Not valid to be called on unbound objects. */
2907 if (obj->gtt_space == NULL) 2907 if (obj->gtt_space == NULL)
2908 return -EINVAL; 2908 return -EINVAL;
2909 2909
2910 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) 2910 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
2911 return 0; 2911 return 0;
2912 2912
2913 ret = i915_gem_object_flush_gpu_write_domain(obj); 2913 ret = i915_gem_object_flush_gpu_write_domain(obj);
2914 if (ret) 2914 if (ret)
2915 return ret; 2915 return ret;
2916 2916
2917 if (obj->pending_gpu_write || write) { 2917 if (obj->pending_gpu_write || write) {
2918 ret = i915_gem_object_wait_rendering(obj); 2918 ret = i915_gem_object_wait_rendering(obj);
2919 if (ret) 2919 if (ret)
2920 return ret; 2920 return ret;
2921 } 2921 }
2922 2922
2923 i915_gem_object_flush_cpu_write_domain(obj); 2923 i915_gem_object_flush_cpu_write_domain(obj);
2924 2924
2925 old_write_domain = obj->base.write_domain; 2925 old_write_domain = obj->base.write_domain;
2926 old_read_domains = obj->base.read_domains; 2926 old_read_domains = obj->base.read_domains;
2927 2927
2928 /* It should now be out of any other write domains, and we can update 2928 /* It should now be out of any other write domains, and we can update
2929 * the domain values for our changes. 2929 * the domain values for our changes.
2930 */ 2930 */
2931 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0); 2931 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2932 obj->base.read_domains |= I915_GEM_DOMAIN_GTT; 2932 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
2933 if (write) { 2933 if (write) {
2934 obj->base.read_domains = I915_GEM_DOMAIN_GTT; 2934 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
2935 obj->base.write_domain = I915_GEM_DOMAIN_GTT; 2935 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
2936 obj->dirty = 1; 2936 obj->dirty = 1;
2937 } 2937 }
2938 2938
2939 trace_i915_gem_object_change_domain(obj, 2939 trace_i915_gem_object_change_domain(obj,
2940 old_read_domains, 2940 old_read_domains,
2941 old_write_domain); 2941 old_write_domain);
2942 2942
2943 return 0; 2943 return 0;
2944 } 2944 }
2945 2945
2946 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, 2946 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
2947 enum i915_cache_level cache_level) 2947 enum i915_cache_level cache_level)
2948 { 2948 {
2949 int ret; 2949 int ret;
2950 2950
2951 if (obj->cache_level == cache_level) 2951 if (obj->cache_level == cache_level)
2952 return 0; 2952 return 0;
2953 2953
2954 if (obj->pin_count) { 2954 if (obj->pin_count) {
2955 DRM_DEBUG("can not change the cache level of pinned objects\n"); 2955 DRM_DEBUG("can not change the cache level of pinned objects\n");
2956 return -EBUSY; 2956 return -EBUSY;
2957 } 2957 }
2958 2958
2959 if (obj->gtt_space) { 2959 if (obj->gtt_space) {
2960 ret = i915_gem_object_finish_gpu(obj); 2960 ret = i915_gem_object_finish_gpu(obj);
2961 if (ret) 2961 if (ret)
2962 return ret; 2962 return ret;
2963 2963
2964 i915_gem_object_finish_gtt(obj); 2964 i915_gem_object_finish_gtt(obj);
2965 2965
2966 /* Before SandyBridge, you could not use tiling or fence 2966 /* Before SandyBridge, you could not use tiling or fence
2967 * registers with snooped memory, so relinquish any fences 2967 * registers with snooped memory, so relinquish any fences
2968 * currently pointing to our region in the aperture. 2968 * currently pointing to our region in the aperture.
2969 */ 2969 */
2970 if (INTEL_INFO(obj->base.dev)->gen < 6) { 2970 if (INTEL_INFO(obj->base.dev)->gen < 6) {
2971 ret = i915_gem_object_put_fence(obj); 2971 ret = i915_gem_object_put_fence(obj);
2972 if (ret) 2972 if (ret)
2973 return ret; 2973 return ret;
2974 } 2974 }
2975 2975
2976 i915_gem_gtt_rebind_object(obj, cache_level); 2976 i915_gem_gtt_rebind_object(obj, cache_level);
2977 } 2977 }
2978 2978
2979 if (cache_level == I915_CACHE_NONE) { 2979 if (cache_level == I915_CACHE_NONE) {
2980 u32 old_read_domains, old_write_domain; 2980 u32 old_read_domains, old_write_domain;
2981 2981
2982 /* If we're coming from LLC cached, then we haven't 2982 /* If we're coming from LLC cached, then we haven't
2983 * actually been tracking whether the data is in the 2983 * actually been tracking whether the data is in the
2984 * CPU cache or not, since we only allow one bit set 2984 * CPU cache or not, since we only allow one bit set
2985 * in obj->write_domain and have been skipping the clflushes. 2985 * in obj->write_domain and have been skipping the clflushes.
2986 * Just set it to the CPU cache for now. 2986 * Just set it to the CPU cache for now.
2987 */ 2987 */
2988 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU); 2988 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
2989 WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU); 2989 WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
2990 2990
2991 old_read_domains = obj->base.read_domains; 2991 old_read_domains = obj->base.read_domains;
2992 old_write_domain = obj->base.write_domain; 2992 old_write_domain = obj->base.write_domain;
2993 2993
2994 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 2994 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
2995 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 2995 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2996 2996
2997 trace_i915_gem_object_change_domain(obj, 2997 trace_i915_gem_object_change_domain(obj,
2998 old_read_domains, 2998 old_read_domains,
2999 old_write_domain); 2999 old_write_domain);
3000 } 3000 }
3001 3001
3002 obj->cache_level = cache_level; 3002 obj->cache_level = cache_level;
3003 return 0; 3003 return 0;
3004 } 3004 }
3005 3005
3006 /* 3006 /*
3007 * Prepare buffer for display plane (scanout, cursors, etc). 3007 * Prepare buffer for display plane (scanout, cursors, etc).
3008 * Can be called from an uninterruptible phase (modesetting) and allows 3008 * Can be called from an uninterruptible phase (modesetting) and allows
3009 * any flushes to be pipelined (for pageflips). 3009 * any flushes to be pipelined (for pageflips).
3010 * 3010 *
3011 * For the display plane, we want to be in the GTT but out of any write 3011 * For the display plane, we want to be in the GTT but out of any write
3012 * domains. So in many ways this looks like set_to_gtt_domain() apart from the 3012 * domains. So in many ways this looks like set_to_gtt_domain() apart from the
3013 * ability to pipeline the waits, pinning and any additional subtleties 3013 * ability to pipeline the waits, pinning and any additional subtleties
3014 * that may differentiate the display plane from ordinary buffers. 3014 * that may differentiate the display plane from ordinary buffers.
3015 */ 3015 */
3016 int 3016 int
3017 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 3017 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3018 u32 alignment, 3018 u32 alignment,
3019 struct intel_ring_buffer *pipelined) 3019 struct intel_ring_buffer *pipelined)
3020 { 3020 {
3021 u32 old_read_domains, old_write_domain; 3021 u32 old_read_domains, old_write_domain;
3022 int ret; 3022 int ret;
3023 3023
3024 ret = i915_gem_object_flush_gpu_write_domain(obj); 3024 ret = i915_gem_object_flush_gpu_write_domain(obj);
3025 if (ret) 3025 if (ret)
3026 return ret; 3026 return ret;
3027 3027
3028 if (pipelined != obj->ring) { 3028 if (pipelined != obj->ring) {
3029 ret = i915_gem_object_wait_rendering(obj); 3029 ret = i915_gem_object_wait_rendering(obj);
3030 if (ret == -ERESTARTSYS) 3030 if (ret == -ERESTARTSYS)
3031 return ret; 3031 return ret;
3032 } 3032 }
3033 3033
3034 /* The display engine is not coherent with the LLC cache on gen6. As 3034 /* The display engine is not coherent with the LLC cache on gen6. As
3035 * a result, we make sure that the pinning that is about to occur is 3035 * a result, we make sure that the pinning that is about to occur is
3036 * done with uncached PTEs. This is lowest common denominator for all 3036 * done with uncached PTEs. This is lowest common denominator for all
3037 * chipsets. 3037 * chipsets.
3038 * 3038 *
3039 * However for gen6+, we could do better by using the GFDT bit instead 3039 * However for gen6+, we could do better by using the GFDT bit instead
3040 * of uncaching, which would allow us to flush all the LLC-cached data 3040 * of uncaching, which would allow us to flush all the LLC-cached data
3041 * with that bit in the PTE to main memory with just one PIPE_CONTROL. 3041 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3042 */ 3042 */
3043 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE); 3043 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
3044 if (ret) 3044 if (ret)
3045 return ret; 3045 return ret;
3046 3046
3047 /* As the user may map the buffer once pinned in the display plane 3047 /* As the user may map the buffer once pinned in the display plane
3048 * (e.g. libkms for the bootup splash), we have to ensure that we 3048 * (e.g. libkms for the bootup splash), we have to ensure that we
3049 * always use map_and_fenceable for all scanout buffers. 3049 * always use map_and_fenceable for all scanout buffers.
3050 */ 3050 */
3051 ret = i915_gem_object_pin(obj, alignment, true); 3051 ret = i915_gem_object_pin(obj, alignment, true);
3052 if (ret) 3052 if (ret)
3053 return ret; 3053 return ret;
3054 3054
3055 i915_gem_object_flush_cpu_write_domain(obj); 3055 i915_gem_object_flush_cpu_write_domain(obj);
3056 3056
3057 old_write_domain = obj->base.write_domain; 3057 old_write_domain = obj->base.write_domain;
3058 old_read_domains = obj->base.read_domains; 3058 old_read_domains = obj->base.read_domains;
3059 3059
3060 /* It should now be out of any other write domains, and we can update 3060 /* It should now be out of any other write domains, and we can update
3061 * the domain values for our changes. 3061 * the domain values for our changes.
3062 */ 3062 */
3063 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0); 3063 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3064 obj->base.read_domains |= I915_GEM_DOMAIN_GTT; 3064 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3065 3065
3066 trace_i915_gem_object_change_domain(obj, 3066 trace_i915_gem_object_change_domain(obj,
3067 old_read_domains, 3067 old_read_domains,
3068 old_write_domain); 3068 old_write_domain);
3069 3069
3070 return 0; 3070 return 0;
3071 } 3071 }
3072 3072
3073 int 3073 int
3074 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj) 3074 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3075 { 3075 {
3076 int ret; 3076 int ret;
3077 3077
3078 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) 3078 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3079 return 0; 3079 return 0;
3080 3080
3081 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { 3081 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
3082 ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain); 3082 ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
3083 if (ret) 3083 if (ret)
3084 return ret; 3084 return ret;
3085 } 3085 }
3086 3086
3087 /* Ensure that we invalidate the GPU's caches and TLBs. */ 3087 /* Ensure that we invalidate the GPU's caches and TLBs. */
3088 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; 3088 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
3089 3089
3090 return i915_gem_object_wait_rendering(obj); 3090 return i915_gem_object_wait_rendering(obj);
3091 } 3091 }
3092 3092
3093 /** 3093 /**
3094 * Moves a single object to the CPU read, and possibly write domain. 3094 * Moves a single object to the CPU read, and possibly write domain.
3095 * 3095 *
3096 * This function returns when the move is complete, including waiting on 3096 * This function returns when the move is complete, including waiting on
3097 * flushes to occur. 3097 * flushes to occur.
3098 */ 3098 */
3099 static int 3099 static int
3100 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) 3100 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3101 { 3101 {
3102 uint32_t old_write_domain, old_read_domains; 3102 uint32_t old_write_domain, old_read_domains;
3103 int ret; 3103 int ret;
3104 3104
3105 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) 3105 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3106 return 0; 3106 return 0;
3107 3107
3108 ret = i915_gem_object_flush_gpu_write_domain(obj); 3108 ret = i915_gem_object_flush_gpu_write_domain(obj);
3109 if (ret) 3109 if (ret)
3110 return ret; 3110 return ret;
3111 3111
3112 ret = i915_gem_object_wait_rendering(obj); 3112 ret = i915_gem_object_wait_rendering(obj);
3113 if (ret) 3113 if (ret)
3114 return ret; 3114 return ret;
3115 3115
3116 i915_gem_object_flush_gtt_write_domain(obj); 3116 i915_gem_object_flush_gtt_write_domain(obj);
3117 3117
3118 /* If we have a partially-valid cache of the object in the CPU, 3118 /* If we have a partially-valid cache of the object in the CPU,
3119 * finish invalidating it and free the per-page flags. 3119 * finish invalidating it and free the per-page flags.
3120 */ 3120 */
3121 i915_gem_object_set_to_full_cpu_read_domain(obj); 3121 i915_gem_object_set_to_full_cpu_read_domain(obj);
3122 3122
3123 old_write_domain = obj->base.write_domain; 3123 old_write_domain = obj->base.write_domain;
3124 old_read_domains = obj->base.read_domains; 3124 old_read_domains = obj->base.read_domains;
3125 3125
3126 /* Flush the CPU cache if it's still invalid. */ 3126 /* Flush the CPU cache if it's still invalid. */
3127 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) { 3127 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3128 i915_gem_clflush_object(obj); 3128 i915_gem_clflush_object(obj);
3129 3129
3130 obj->base.read_domains |= I915_GEM_DOMAIN_CPU; 3130 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3131 } 3131 }
3132 3132
3133 /* It should now be out of any other write domains, and we can update 3133 /* It should now be out of any other write domains, and we can update
3134 * the domain values for our changes. 3134 * the domain values for our changes.
3135 */ 3135 */
3136 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0); 3136 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3137 3137
3138 /* If we're writing through the CPU, then the GPU read domains will 3138 /* If we're writing through the CPU, then the GPU read domains will
3139 * need to be invalidated at next use. 3139 * need to be invalidated at next use.
3140 */ 3140 */
3141 if (write) { 3141 if (write) {
3142 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 3142 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3143 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 3143 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3144 } 3144 }
3145 3145
3146 trace_i915_gem_object_change_domain(obj, 3146 trace_i915_gem_object_change_domain(obj,
3147 old_read_domains, 3147 old_read_domains,
3148 old_write_domain); 3148 old_write_domain);
3149 3149
3150 return 0; 3150 return 0;
3151 } 3151 }
3152 3152
3153 /** 3153 /**
3154 * Moves the object from a partially CPU read to a full one. 3154 * Moves the object from a partially CPU read to a full one.
3155 * 3155 *
3156 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(), 3156 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
3157 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU). 3157 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
3158 */ 3158 */
3159 static void 3159 static void
3160 i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj) 3160 i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj)
3161 { 3161 {
3162 if (!obj->page_cpu_valid) 3162 if (!obj->page_cpu_valid)
3163 return; 3163 return;
3164 3164
3165 /* If we're partially in the CPU read domain, finish moving it in. 3165 /* If we're partially in the CPU read domain, finish moving it in.
3166 */ 3166 */
3167 if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) { 3167 if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) {
3168 int i; 3168 int i;
3169 3169
3170 for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) { 3170 for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) {
3171 if (obj->page_cpu_valid[i]) 3171 if (obj->page_cpu_valid[i])
3172 continue; 3172 continue;
3173 drm_clflush_pages(obj->pages + i, 1); 3173 drm_clflush_pages(obj->pages + i, 1);
3174 } 3174 }
3175 } 3175 }
3176 3176
3177 /* Free the page_cpu_valid mappings which are now stale, whether 3177 /* Free the page_cpu_valid mappings which are now stale, whether
3178 * or not we've got I915_GEM_DOMAIN_CPU. 3178 * or not we've got I915_GEM_DOMAIN_CPU.
3179 */ 3179 */
3180 kfree(obj->page_cpu_valid); 3180 kfree(obj->page_cpu_valid);
3181 obj->page_cpu_valid = NULL; 3181 obj->page_cpu_valid = NULL;
3182 } 3182 }
3183 3183
3184 /** 3184 /**
3185 * Set the CPU read domain on a range of the object. 3185 * Set the CPU read domain on a range of the object.
3186 * 3186 *
3187 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's 3187 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
3188 * not entirely valid. The page_cpu_valid member of the object flags which 3188 * not entirely valid. The page_cpu_valid member of the object flags which
3189 * pages have been flushed, and will be respected by 3189 * pages have been flushed, and will be respected by
3190 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping 3190 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
3191 * of the whole object. 3191 * of the whole object.
3192 * 3192 *
3193 * This function returns when the move is complete, including waiting on 3193 * This function returns when the move is complete, including waiting on
3194 * flushes to occur. 3194 * flushes to occur.
3195 */ 3195 */
3196 static int 3196 static int
3197 i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, 3197 i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
3198 uint64_t offset, uint64_t size) 3198 uint64_t offset, uint64_t size)
3199 { 3199 {
3200 uint32_t old_read_domains; 3200 uint32_t old_read_domains;
3201 int i, ret; 3201 int i, ret;
3202 3202
3203 if (offset == 0 && size == obj->base.size) 3203 if (offset == 0 && size == obj->base.size)
3204 return i915_gem_object_set_to_cpu_domain(obj, 0); 3204 return i915_gem_object_set_to_cpu_domain(obj, 0);
3205 3205
3206 ret = i915_gem_object_flush_gpu_write_domain(obj); 3206 ret = i915_gem_object_flush_gpu_write_domain(obj);
3207 if (ret) 3207 if (ret)
3208 return ret; 3208 return ret;
3209 3209
3210 ret = i915_gem_object_wait_rendering(obj); 3210 ret = i915_gem_object_wait_rendering(obj);
3211 if (ret) 3211 if (ret)
3212 return ret; 3212 return ret;
3213 3213
3214 i915_gem_object_flush_gtt_write_domain(obj); 3214 i915_gem_object_flush_gtt_write_domain(obj);
3215 3215
3216 /* If we're already fully in the CPU read domain, we're done. */ 3216 /* If we're already fully in the CPU read domain, we're done. */
3217 if (obj->page_cpu_valid == NULL && 3217 if (obj->page_cpu_valid == NULL &&
3218 (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0) 3218 (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0)
3219 return 0; 3219 return 0;
3220 3220
3221 /* Otherwise, create/clear the per-page CPU read domain flag if we're 3221 /* Otherwise, create/clear the per-page CPU read domain flag if we're
3222 * newly adding I915_GEM_DOMAIN_CPU 3222 * newly adding I915_GEM_DOMAIN_CPU
3223 */ 3223 */
3224 if (obj->page_cpu_valid == NULL) { 3224 if (obj->page_cpu_valid == NULL) {
3225 obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE, 3225 obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE,
3226 GFP_KERNEL); 3226 GFP_KERNEL);
3227 if (obj->page_cpu_valid == NULL) 3227 if (obj->page_cpu_valid == NULL)
3228 return -ENOMEM; 3228 return -ENOMEM;
3229 } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) 3229 } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
3230 memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE); 3230 memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE);
3231 3231
3232 /* Flush the cache on any pages that are still invalid from the CPU's 3232 /* Flush the cache on any pages that are still invalid from the CPU's
3233 * perspective. 3233 * perspective.
3234 */ 3234 */
3235 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; 3235 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
3236 i++) { 3236 i++) {
3237 if (obj->page_cpu_valid[i]) 3237 if (obj->page_cpu_valid[i])
3238 continue; 3238 continue;
3239 3239
3240 drm_clflush_pages(obj->pages + i, 1); 3240 drm_clflush_pages(obj->pages + i, 1);
3241 3241
3242 obj->page_cpu_valid[i] = 1; 3242 obj->page_cpu_valid[i] = 1;
3243 } 3243 }
3244 3244
3245 /* It should now be out of any other write domains, and we can update 3245 /* It should now be out of any other write domains, and we can update
3246 * the domain values for our changes. 3246 * the domain values for our changes.
3247 */ 3247 */
3248 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0); 3248 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3249 3249
3250 old_read_domains = obj->base.read_domains; 3250 old_read_domains = obj->base.read_domains;
3251 obj->base.read_domains |= I915_GEM_DOMAIN_CPU; 3251 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3252 3252
3253 trace_i915_gem_object_change_domain(obj, 3253 trace_i915_gem_object_change_domain(obj,
3254 old_read_domains, 3254 old_read_domains,
3255 obj->base.write_domain); 3255 obj->base.write_domain);
3256 3256
3257 return 0; 3257 return 0;
3258 } 3258 }
3259 3259
3260 /* Throttle our rendering by waiting until the ring has completed our requests 3260 /* Throttle our rendering by waiting until the ring has completed our requests
3261 * emitted over 20 msec ago. 3261 * emitted over 20 msec ago.
3262 * 3262 *
3263 * Note that if we were to use the current jiffies each time around the loop, 3263 * Note that if we were to use the current jiffies each time around the loop,
3264 * we wouldn't escape the function with any frames outstanding if the time to 3264 * we wouldn't escape the function with any frames outstanding if the time to
3265 * render a frame was over 20ms. 3265 * render a frame was over 20ms.
3266 * 3266 *
3267 * This should get us reasonable parallelism between CPU and GPU but also 3267 * This should get us reasonable parallelism between CPU and GPU but also
3268 * relatively low latency when blocking on a particular request to finish. 3268 * relatively low latency when blocking on a particular request to finish.
3269 */ 3269 */
3270 static int 3270 static int
3271 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) 3271 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3272 { 3272 {
3273 struct drm_i915_private *dev_priv = dev->dev_private; 3273 struct drm_i915_private *dev_priv = dev->dev_private;
3274 struct drm_i915_file_private *file_priv = file->driver_priv; 3274 struct drm_i915_file_private *file_priv = file->driver_priv;
3275 unsigned long recent_enough = jiffies - msecs_to_jiffies(20); 3275 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3276 struct drm_i915_gem_request *request; 3276 struct drm_i915_gem_request *request;
3277 struct intel_ring_buffer *ring = NULL; 3277 struct intel_ring_buffer *ring = NULL;
3278 u32 seqno = 0; 3278 u32 seqno = 0;
3279 int ret; 3279 int ret;
3280 3280
3281 if (atomic_read(&dev_priv->mm.wedged)) 3281 if (atomic_read(&dev_priv->mm.wedged))
3282 return -EIO; 3282 return -EIO;
3283 3283
3284 spin_lock(&file_priv->mm.lock); 3284 spin_lock(&file_priv->mm.lock);
3285 list_for_each_entry(request, &file_priv->mm.request_list, client_list) { 3285 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3286 if (time_after_eq(request->emitted_jiffies, recent_enough)) 3286 if (time_after_eq(request->emitted_jiffies, recent_enough))
3287 break; 3287 break;
3288 3288
3289 ring = request->ring; 3289 ring = request->ring;
3290 seqno = request->seqno; 3290 seqno = request->seqno;
3291 } 3291 }
3292 spin_unlock(&file_priv->mm.lock); 3292 spin_unlock(&file_priv->mm.lock);
3293 3293
3294 if (seqno == 0) 3294 if (seqno == 0)
3295 return 0; 3295 return 0;
3296 3296
3297 ret = 0; 3297 ret = 0;
3298 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) { 3298 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
3299 /* And wait for the seqno passing without holding any locks and 3299 /* And wait for the seqno passing without holding any locks and
3300 * causing extra latency for others. This is safe as the irq 3300 * causing extra latency for others. This is safe as the irq
3301 * generation is designed to be run atomically and so is 3301 * generation is designed to be run atomically and so is
3302 * lockless. 3302 * lockless.
3303 */ 3303 */
3304 if (ring->irq_get(ring)) { 3304 if (ring->irq_get(ring)) {
3305 ret = wait_event_interruptible(ring->irq_queue, 3305 ret = wait_event_interruptible(ring->irq_queue,
3306 i915_seqno_passed(ring->get_seqno(ring), seqno) 3306 i915_seqno_passed(ring->get_seqno(ring), seqno)
3307 || atomic_read(&dev_priv->mm.wedged)); 3307 || atomic_read(&dev_priv->mm.wedged));
3308 ring->irq_put(ring); 3308 ring->irq_put(ring);
3309 3309
3310 if (ret == 0 && atomic_read(&dev_priv->mm.wedged)) 3310 if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
3311 ret = -EIO; 3311 ret = -EIO;
3312 } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring), 3312 } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
3313 seqno) || 3313 seqno) ||
3314 atomic_read(&dev_priv->mm.wedged), 3000)) { 3314 atomic_read(&dev_priv->mm.wedged), 3000)) {
3315 ret = -EBUSY; 3315 ret = -EBUSY;
3316 } 3316 }
3317 } 3317 }
3318 3318
3319 if (ret == 0) 3319 if (ret == 0)
3320 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); 3320 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3321 3321
3322 return ret; 3322 return ret;
3323 } 3323 }
3324 3324
3325 int 3325 int
3326 i915_gem_object_pin(struct drm_i915_gem_object *obj, 3326 i915_gem_object_pin(struct drm_i915_gem_object *obj,
3327 uint32_t alignment, 3327 uint32_t alignment,
3328 bool map_and_fenceable) 3328 bool map_and_fenceable)
3329 { 3329 {
3330 struct drm_device *dev = obj->base.dev; 3330 struct drm_device *dev = obj->base.dev;
3331 struct drm_i915_private *dev_priv = dev->dev_private; 3331 struct drm_i915_private *dev_priv = dev->dev_private;
3332 int ret; 3332 int ret;
3333 3333
3334 BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT); 3334 BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
3335 WARN_ON(i915_verify_lists(dev)); 3335 WARN_ON(i915_verify_lists(dev));
3336 3336
3337 if (obj->gtt_space != NULL) { 3337 if (obj->gtt_space != NULL) {
3338 if ((alignment && obj->gtt_offset & (alignment - 1)) || 3338 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
3339 (map_and_fenceable && !obj->map_and_fenceable)) { 3339 (map_and_fenceable && !obj->map_and_fenceable)) {
3340 WARN(obj->pin_count, 3340 WARN(obj->pin_count,
3341 "bo is already pinned with incorrect alignment:" 3341 "bo is already pinned with incorrect alignment:"
3342 " offset=%x, req.alignment=%x, req.map_and_fenceable=%d," 3342 " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
3343 " obj->map_and_fenceable=%d\n", 3343 " obj->map_and_fenceable=%d\n",
3344 obj->gtt_offset, alignment, 3344 obj->gtt_offset, alignment,
3345 map_and_fenceable, 3345 map_and_fenceable,
3346 obj->map_and_fenceable); 3346 obj->map_and_fenceable);
3347 ret = i915_gem_object_unbind(obj); 3347 ret = i915_gem_object_unbind(obj);
3348 if (ret) 3348 if (ret)
3349 return ret; 3349 return ret;
3350 } 3350 }
3351 } 3351 }
3352 3352
3353 if (obj->gtt_space == NULL) { 3353 if (obj->gtt_space == NULL) {
3354 ret = i915_gem_object_bind_to_gtt(obj, alignment, 3354 ret = i915_gem_object_bind_to_gtt(obj, alignment,
3355 map_and_fenceable); 3355 map_and_fenceable);
3356 if (ret) 3356 if (ret)
3357 return ret; 3357 return ret;
3358 } 3358 }
3359 3359
3360 if (obj->pin_count++ == 0) { 3360 if (obj->pin_count++ == 0) {
3361 if (!obj->active) 3361 if (!obj->active)
3362 list_move_tail(&obj->mm_list, 3362 list_move_tail(&obj->mm_list,
3363 &dev_priv->mm.pinned_list); 3363 &dev_priv->mm.pinned_list);
3364 } 3364 }
3365 obj->pin_mappable |= map_and_fenceable; 3365 obj->pin_mappable |= map_and_fenceable;
3366 3366
3367 WARN_ON(i915_verify_lists(dev)); 3367 WARN_ON(i915_verify_lists(dev));
3368 return 0; 3368 return 0;
3369 } 3369 }
3370 3370
3371 void 3371 void
3372 i915_gem_object_unpin(struct drm_i915_gem_object *obj) 3372 i915_gem_object_unpin(struct drm_i915_gem_object *obj)
3373 { 3373 {
3374 struct drm_device *dev = obj->base.dev; 3374 struct drm_device *dev = obj->base.dev;
3375 drm_i915_private_t *dev_priv = dev->dev_private; 3375 drm_i915_private_t *dev_priv = dev->dev_private;
3376 3376
3377 WARN_ON(i915_verify_lists(dev)); 3377 WARN_ON(i915_verify_lists(dev));
3378 BUG_ON(obj->pin_count == 0); 3378 BUG_ON(obj->pin_count == 0);
3379 BUG_ON(obj->gtt_space == NULL); 3379 BUG_ON(obj->gtt_space == NULL);
3380 3380
3381 if (--obj->pin_count == 0) { 3381 if (--obj->pin_count == 0) {
3382 if (!obj->active) 3382 if (!obj->active)
3383 list_move_tail(&obj->mm_list, 3383 list_move_tail(&obj->mm_list,
3384 &dev_priv->mm.inactive_list); 3384 &dev_priv->mm.inactive_list);
3385 obj->pin_mappable = false; 3385 obj->pin_mappable = false;
3386 } 3386 }
3387 WARN_ON(i915_verify_lists(dev)); 3387 WARN_ON(i915_verify_lists(dev));
3388 } 3388 }
3389 3389
3390 int 3390 int
3391 i915_gem_pin_ioctl(struct drm_device *dev, void *data, 3391 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3392 struct drm_file *file) 3392 struct drm_file *file)
3393 { 3393 {
3394 struct drm_i915_gem_pin *args = data; 3394 struct drm_i915_gem_pin *args = data;
3395 struct drm_i915_gem_object *obj; 3395 struct drm_i915_gem_object *obj;
3396 int ret; 3396 int ret;
3397 3397
3398 ret = i915_mutex_lock_interruptible(dev); 3398 ret = i915_mutex_lock_interruptible(dev);
3399 if (ret) 3399 if (ret)
3400 return ret; 3400 return ret;
3401 3401
3402 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 3402 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3403 if (&obj->base == NULL) { 3403 if (&obj->base == NULL) {
3404 ret = -ENOENT; 3404 ret = -ENOENT;
3405 goto unlock; 3405 goto unlock;
3406 } 3406 }
3407 3407
3408 if (obj->madv != I915_MADV_WILLNEED) { 3408 if (obj->madv != I915_MADV_WILLNEED) {
3409 DRM_ERROR("Attempting to pin a purgeable buffer\n"); 3409 DRM_ERROR("Attempting to pin a purgeable buffer\n");
3410 ret = -EINVAL; 3410 ret = -EINVAL;
3411 goto out; 3411 goto out;
3412 } 3412 }
3413 3413
3414 if (obj->pin_filp != NULL && obj->pin_filp != file) { 3414 if (obj->pin_filp != NULL && obj->pin_filp != file) {
3415 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", 3415 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3416 args->handle); 3416 args->handle);
3417 ret = -EINVAL; 3417 ret = -EINVAL;
3418 goto out; 3418 goto out;
3419 } 3419 }
3420 3420
3421 obj->user_pin_count++; 3421 obj->user_pin_count++;
3422 obj->pin_filp = file; 3422 obj->pin_filp = file;
3423 if (obj->user_pin_count == 1) { 3423 if (obj->user_pin_count == 1) {
3424 ret = i915_gem_object_pin(obj, args->alignment, true); 3424 ret = i915_gem_object_pin(obj, args->alignment, true);
3425 if (ret) 3425 if (ret)
3426 goto out; 3426 goto out;
3427 } 3427 }
3428 3428
3429 /* XXX - flush the CPU caches for pinned objects 3429 /* XXX - flush the CPU caches for pinned objects
3430 * as the X server doesn't manage domains yet 3430 * as the X server doesn't manage domains yet
3431 */ 3431 */
3432 i915_gem_object_flush_cpu_write_domain(obj); 3432 i915_gem_object_flush_cpu_write_domain(obj);
3433 args->offset = obj->gtt_offset; 3433 args->offset = obj->gtt_offset;
3434 out: 3434 out:
3435 drm_gem_object_unreference(&obj->base); 3435 drm_gem_object_unreference(&obj->base);
3436 unlock: 3436 unlock:
3437 mutex_unlock(&dev->struct_mutex); 3437 mutex_unlock(&dev->struct_mutex);
3438 return ret; 3438 return ret;
3439 } 3439 }
3440 3440
3441 int 3441 int
3442 i915_gem_unpin_ioctl(struct drm_device *dev, void *data, 3442 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3443 struct drm_file *file) 3443 struct drm_file *file)
3444 { 3444 {
3445 struct drm_i915_gem_pin *args = data; 3445 struct drm_i915_gem_pin *args = data;
3446 struct drm_i915_gem_object *obj; 3446 struct drm_i915_gem_object *obj;
3447 int ret; 3447 int ret;
3448 3448
3449 ret = i915_mutex_lock_interruptible(dev); 3449 ret = i915_mutex_lock_interruptible(dev);
3450 if (ret) 3450 if (ret)
3451 return ret; 3451 return ret;
3452 3452
3453 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 3453 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3454 if (&obj->base == NULL) { 3454 if (&obj->base == NULL) {
3455 ret = -ENOENT; 3455 ret = -ENOENT;
3456 goto unlock; 3456 goto unlock;
3457 } 3457 }
3458 3458
3459 if (obj->pin_filp != file) { 3459 if (obj->pin_filp != file) {
3460 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", 3460 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3461 args->handle); 3461 args->handle);
3462 ret = -EINVAL; 3462 ret = -EINVAL;
3463 goto out; 3463 goto out;
3464 } 3464 }
3465 obj->user_pin_count--; 3465 obj->user_pin_count--;
3466 if (obj->user_pin_count == 0) { 3466 if (obj->user_pin_count == 0) {
3467 obj->pin_filp = NULL; 3467 obj->pin_filp = NULL;
3468 i915_gem_object_unpin(obj); 3468 i915_gem_object_unpin(obj);
3469 } 3469 }
3470 3470
3471 out: 3471 out:
3472 drm_gem_object_unreference(&obj->base); 3472 drm_gem_object_unreference(&obj->base);
3473 unlock: 3473 unlock:
3474 mutex_unlock(&dev->struct_mutex); 3474 mutex_unlock(&dev->struct_mutex);
3475 return ret; 3475 return ret;
3476 } 3476 }
3477 3477
3478 int 3478 int
3479 i915_gem_busy_ioctl(struct drm_device *dev, void *data, 3479 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3480 struct drm_file *file) 3480 struct drm_file *file)
3481 { 3481 {
3482 struct drm_i915_gem_busy *args = data; 3482 struct drm_i915_gem_busy *args = data;
3483 struct drm_i915_gem_object *obj; 3483 struct drm_i915_gem_object *obj;
3484 int ret; 3484 int ret;
3485 3485
3486 ret = i915_mutex_lock_interruptible(dev); 3486 ret = i915_mutex_lock_interruptible(dev);
3487 if (ret) 3487 if (ret)
3488 return ret; 3488 return ret;
3489 3489
3490 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 3490 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3491 if (&obj->base == NULL) { 3491 if (&obj->base == NULL) {
3492 ret = -ENOENT; 3492 ret = -ENOENT;
3493 goto unlock; 3493 goto unlock;
3494 } 3494 }
3495 3495
3496 /* Count all active objects as busy, even if they are currently not used 3496 /* Count all active objects as busy, even if they are currently not used
3497 * by the gpu. Users of this interface expect objects to eventually 3497 * by the gpu. Users of this interface expect objects to eventually
3498 * become non-busy without any further actions, therefore emit any 3498 * become non-busy without any further actions, therefore emit any
3499 * necessary flushes here. 3499 * necessary flushes here.
3500 */ 3500 */
3501 args->busy = obj->active; 3501 args->busy = obj->active;
3502 if (args->busy) { 3502 if (args->busy) {
3503 /* Unconditionally flush objects, even when the gpu still uses this 3503 /* Unconditionally flush objects, even when the gpu still uses this
3504 * object. Userspace calling this function indicates that it wants to 3504 * object. Userspace calling this function indicates that it wants to
3505 * use this buffer rather sooner than later, so issuing the required 3505 * use this buffer rather sooner than later, so issuing the required
3506 * flush earlier is beneficial. 3506 * flush earlier is beneficial.
3507 */ 3507 */
3508 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { 3508 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
3509 ret = i915_gem_flush_ring(obj->ring, 3509 ret = i915_gem_flush_ring(obj->ring,
3510 0, obj->base.write_domain); 3510 0, obj->base.write_domain);
3511 } else if (obj->ring->outstanding_lazy_request == 3511 } else if (obj->ring->outstanding_lazy_request ==
3512 obj->last_rendering_seqno) { 3512 obj->last_rendering_seqno) {
3513 struct drm_i915_gem_request *request; 3513 struct drm_i915_gem_request *request;
3514 3514
3515 /* This ring is not being cleared by active usage, 3515 /* This ring is not being cleared by active usage,
3516 * so emit a request to do so. 3516 * so emit a request to do so.
3517 */ 3517 */
3518 request = kzalloc(sizeof(*request), GFP_KERNEL); 3518 request = kzalloc(sizeof(*request), GFP_KERNEL);
3519 if (request) { 3519 if (request) {
3520 ret = i915_add_request(obj->ring, NULL, request); 3520 ret = i915_add_request(obj->ring, NULL, request);
3521 if (ret) 3521 if (ret)
3522 kfree(request); 3522 kfree(request);
3523 } else 3523 } else
3524 ret = -ENOMEM; 3524 ret = -ENOMEM;
3525 } 3525 }
3526 3526
3527 /* Update the active list for the hardware's current position. 3527 /* Update the active list for the hardware's current position.
3528 * Otherwise this only updates on a delayed timer or when irqs 3528 * Otherwise this only updates on a delayed timer or when irqs
3529 * are actually unmasked, and our working set ends up being 3529 * are actually unmasked, and our working set ends up being
3530 * larger than required. 3530 * larger than required.
3531 */ 3531 */
3532 i915_gem_retire_requests_ring(obj->ring); 3532 i915_gem_retire_requests_ring(obj->ring);
3533 3533
3534 args->busy = obj->active; 3534 args->busy = obj->active;
3535 } 3535 }
3536 3536
3537 drm_gem_object_unreference(&obj->base); 3537 drm_gem_object_unreference(&obj->base);
3538 unlock: 3538 unlock:
3539 mutex_unlock(&dev->struct_mutex); 3539 mutex_unlock(&dev->struct_mutex);
3540 return ret; 3540 return ret;
3541 } 3541 }
3542 3542
3543 int 3543 int
3544 i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 3544 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3545 struct drm_file *file_priv) 3545 struct drm_file *file_priv)
3546 { 3546 {
3547 return i915_gem_ring_throttle(dev, file_priv); 3547 return i915_gem_ring_throttle(dev, file_priv);
3548 } 3548 }
3549 3549
3550 int 3550 int
3551 i915_gem_madvise_ioctl(struct drm_device *dev, void *data, 3551 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3552 struct drm_file *file_priv) 3552 struct drm_file *file_priv)
3553 { 3553 {
3554 struct drm_i915_gem_madvise *args = data; 3554 struct drm_i915_gem_madvise *args = data;
3555 struct drm_i915_gem_object *obj; 3555 struct drm_i915_gem_object *obj;
3556 int ret; 3556 int ret;
3557 3557
3558 switch (args->madv) { 3558 switch (args->madv) {
3559 case I915_MADV_DONTNEED: 3559 case I915_MADV_DONTNEED:
3560 case I915_MADV_WILLNEED: 3560 case I915_MADV_WILLNEED:
3561 break; 3561 break;
3562 default: 3562 default:
3563 return -EINVAL; 3563 return -EINVAL;
3564 } 3564 }
3565 3565
3566 ret = i915_mutex_lock_interruptible(dev); 3566 ret = i915_mutex_lock_interruptible(dev);
3567 if (ret) 3567 if (ret)
3568 return ret; 3568 return ret;
3569 3569
3570 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle)); 3570 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
3571 if (&obj->base == NULL) { 3571 if (&obj->base == NULL) {
3572 ret = -ENOENT; 3572 ret = -ENOENT;
3573 goto unlock; 3573 goto unlock;
3574 } 3574 }
3575 3575
3576 if (obj->pin_count) { 3576 if (obj->pin_count) {
3577 ret = -EINVAL; 3577 ret = -EINVAL;
3578 goto out; 3578 goto out;
3579 } 3579 }
3580 3580
3581 if (obj->madv != __I915_MADV_PURGED) 3581 if (obj->madv != __I915_MADV_PURGED)
3582 obj->madv = args->madv; 3582 obj->madv = args->madv;
3583 3583
3584 /* if the object is no longer bound, discard its backing storage */ 3584 /* if the object is no longer bound, discard its backing storage */
3585 if (i915_gem_object_is_purgeable(obj) && 3585 if (i915_gem_object_is_purgeable(obj) &&
3586 obj->gtt_space == NULL) 3586 obj->gtt_space == NULL)
3587 i915_gem_object_truncate(obj); 3587 i915_gem_object_truncate(obj);
3588 3588
3589 args->retained = obj->madv != __I915_MADV_PURGED; 3589 args->retained = obj->madv != __I915_MADV_PURGED;
3590 3590
3591 out: 3591 out:
3592 drm_gem_object_unreference(&obj->base); 3592 drm_gem_object_unreference(&obj->base);
3593 unlock: 3593 unlock:
3594 mutex_unlock(&dev->struct_mutex); 3594 mutex_unlock(&dev->struct_mutex);
3595 return ret; 3595 return ret;
3596 } 3596 }
3597 3597
3598 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 3598 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3599 size_t size) 3599 size_t size)
3600 { 3600 {
3601 struct drm_i915_private *dev_priv = dev->dev_private; 3601 struct drm_i915_private *dev_priv = dev->dev_private;
3602 struct drm_i915_gem_object *obj; 3602 struct drm_i915_gem_object *obj;
3603 struct address_space *mapping; 3603 struct address_space *mapping;
3604 3604
3605 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 3605 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
3606 if (obj == NULL) 3606 if (obj == NULL)
3607 return NULL; 3607 return NULL;
3608 3608
3609 if (drm_gem_object_init(dev, &obj->base, size) != 0) { 3609 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
3610 kfree(obj); 3610 kfree(obj);
3611 return NULL; 3611 return NULL;
3612 } 3612 }
3613 3613
3614 mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; 3614 mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
3615 mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE); 3615 mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
3616 3616
3617 i915_gem_info_add_obj(dev_priv, size); 3617 i915_gem_info_add_obj(dev_priv, size);
3618 3618
3619 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 3619 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3620 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 3620 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3621 3621
3622 if (IS_GEN6(dev) || IS_GEN7(dev)) { 3622 if (HAS_LLC(dev)) {
3623 /* On Gen6, we can have the GPU use the LLC (the CPU 3623 /* On some devices, we can have the GPU use the LLC (the CPU
3624 * cache) for about a 10% performance improvement 3624 * cache) for about a 10% performance improvement
3625 * compared to uncached. Graphics requests other than 3625 * compared to uncached. Graphics requests other than
3626 * display scanout are coherent with the CPU in 3626 * display scanout are coherent with the CPU in
3627 * accessing this cache. This means in this mode we 3627 * accessing this cache. This means in this mode we
3628 * don't need to clflush on the CPU side, and on the 3628 * don't need to clflush on the CPU side, and on the
3629 * GPU side we only need to flush internal caches to 3629 * GPU side we only need to flush internal caches to
3630 * get data visible to the CPU. 3630 * get data visible to the CPU.
3631 * 3631 *
3632 * However, we maintain the display planes as UC, and so 3632 * However, we maintain the display planes as UC, and so
3633 * need to rebind when first used as such. 3633 * need to rebind when first used as such.
3634 */ 3634 */
3635 obj->cache_level = I915_CACHE_LLC; 3635 obj->cache_level = I915_CACHE_LLC;
3636 } else 3636 } else
3637 obj->cache_level = I915_CACHE_NONE; 3637 obj->cache_level = I915_CACHE_NONE;
3638 3638
3639 obj->base.driver_private = NULL; 3639 obj->base.driver_private = NULL;
3640 obj->fence_reg = I915_FENCE_REG_NONE; 3640 obj->fence_reg = I915_FENCE_REG_NONE;
3641 INIT_LIST_HEAD(&obj->mm_list); 3641 INIT_LIST_HEAD(&obj->mm_list);
3642 INIT_LIST_HEAD(&obj->gtt_list); 3642 INIT_LIST_HEAD(&obj->gtt_list);
3643 INIT_LIST_HEAD(&obj->ring_list); 3643 INIT_LIST_HEAD(&obj->ring_list);
3644 INIT_LIST_HEAD(&obj->exec_list); 3644 INIT_LIST_HEAD(&obj->exec_list);
3645 INIT_LIST_HEAD(&obj->gpu_write_list); 3645 INIT_LIST_HEAD(&obj->gpu_write_list);
3646 obj->madv = I915_MADV_WILLNEED; 3646 obj->madv = I915_MADV_WILLNEED;
3647 /* Avoid an unnecessary call to unbind on the first bind. */ 3647 /* Avoid an unnecessary call to unbind on the first bind. */
3648 obj->map_and_fenceable = true; 3648 obj->map_and_fenceable = true;
3649 3649
3650 return obj; 3650 return obj;
3651 } 3651 }
3652 3652
3653 int i915_gem_init_object(struct drm_gem_object *obj) 3653 int i915_gem_init_object(struct drm_gem_object *obj)
3654 { 3654 {
3655 BUG(); 3655 BUG();
3656 3656
3657 return 0; 3657 return 0;
3658 } 3658 }
3659 3659
3660 static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj) 3660 static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
3661 { 3661 {
3662 struct drm_device *dev = obj->base.dev; 3662 struct drm_device *dev = obj->base.dev;
3663 drm_i915_private_t *dev_priv = dev->dev_private; 3663 drm_i915_private_t *dev_priv = dev->dev_private;
3664 int ret; 3664 int ret;
3665 3665
3666 ret = i915_gem_object_unbind(obj); 3666 ret = i915_gem_object_unbind(obj);
3667 if (ret == -ERESTARTSYS) { 3667 if (ret == -ERESTARTSYS) {
3668 list_move(&obj->mm_list, 3668 list_move(&obj->mm_list,
3669 &dev_priv->mm.deferred_free_list); 3669 &dev_priv->mm.deferred_free_list);
3670 return; 3670 return;
3671 } 3671 }
3672 3672
3673 trace_i915_gem_object_destroy(obj); 3673 trace_i915_gem_object_destroy(obj);
3674 3674
3675 if (obj->base.map_list.map) 3675 if (obj->base.map_list.map)
3676 drm_gem_free_mmap_offset(&obj->base); 3676 drm_gem_free_mmap_offset(&obj->base);
3677 3677
3678 drm_gem_object_release(&obj->base); 3678 drm_gem_object_release(&obj->base);
3679 i915_gem_info_remove_obj(dev_priv, obj->base.size); 3679 i915_gem_info_remove_obj(dev_priv, obj->base.size);
3680 3680
3681 kfree(obj->page_cpu_valid); 3681 kfree(obj->page_cpu_valid);
3682 kfree(obj->bit_17); 3682 kfree(obj->bit_17);
3683 kfree(obj); 3683 kfree(obj);
3684 } 3684 }
3685 3685
3686 void i915_gem_free_object(struct drm_gem_object *gem_obj) 3686 void i915_gem_free_object(struct drm_gem_object *gem_obj)
3687 { 3687 {
3688 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); 3688 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
3689 struct drm_device *dev = obj->base.dev; 3689 struct drm_device *dev = obj->base.dev;
3690 3690
3691 while (obj->pin_count > 0) 3691 while (obj->pin_count > 0)
3692 i915_gem_object_unpin(obj); 3692 i915_gem_object_unpin(obj);
3693 3693
3694 if (obj->phys_obj) 3694 if (obj->phys_obj)
3695 i915_gem_detach_phys_object(dev, obj); 3695 i915_gem_detach_phys_object(dev, obj);
3696 3696
3697 i915_gem_free_object_tail(obj); 3697 i915_gem_free_object_tail(obj);
3698 } 3698 }
3699 3699
3700 int 3700 int
3701 i915_gem_idle(struct drm_device *dev) 3701 i915_gem_idle(struct drm_device *dev)
3702 { 3702 {
3703 drm_i915_private_t *dev_priv = dev->dev_private; 3703 drm_i915_private_t *dev_priv = dev->dev_private;
3704 int ret; 3704 int ret;
3705 3705
3706 mutex_lock(&dev->struct_mutex); 3706 mutex_lock(&dev->struct_mutex);
3707 3707
3708 if (dev_priv->mm.suspended) { 3708 if (dev_priv->mm.suspended) {
3709 mutex_unlock(&dev->struct_mutex); 3709 mutex_unlock(&dev->struct_mutex);
3710 return 0; 3710 return 0;
3711 } 3711 }
3712 3712
3713 ret = i915_gpu_idle(dev); 3713 ret = i915_gpu_idle(dev);
3714 if (ret) { 3714 if (ret) {
3715 mutex_unlock(&dev->struct_mutex); 3715 mutex_unlock(&dev->struct_mutex);
3716 return ret; 3716 return ret;
3717 } 3717 }
3718 3718
3719 /* Under UMS, be paranoid and evict. */ 3719 /* Under UMS, be paranoid and evict. */
3720 if (!drm_core_check_feature(dev, DRIVER_MODESET)) { 3720 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
3721 ret = i915_gem_evict_inactive(dev, false); 3721 ret = i915_gem_evict_inactive(dev, false);
3722 if (ret) { 3722 if (ret) {
3723 mutex_unlock(&dev->struct_mutex); 3723 mutex_unlock(&dev->struct_mutex);
3724 return ret; 3724 return ret;
3725 } 3725 }
3726 } 3726 }
3727 3727
3728 i915_gem_reset_fences(dev); 3728 i915_gem_reset_fences(dev);
3729 3729
3730 /* Hack! Don't let anybody do execbuf while we don't control the chip. 3730 /* Hack! Don't let anybody do execbuf while we don't control the chip.
3731 * We need to replace this with a semaphore, or something. 3731 * We need to replace this with a semaphore, or something.
3732 * And not confound mm.suspended! 3732 * And not confound mm.suspended!
3733 */ 3733 */
3734 dev_priv->mm.suspended = 1; 3734 dev_priv->mm.suspended = 1;
3735 del_timer_sync(&dev_priv->hangcheck_timer); 3735 del_timer_sync(&dev_priv->hangcheck_timer);
3736 3736
3737 i915_kernel_lost_context(dev); 3737 i915_kernel_lost_context(dev);
3738 i915_gem_cleanup_ringbuffer(dev); 3738 i915_gem_cleanup_ringbuffer(dev);
3739 3739
3740 mutex_unlock(&dev->struct_mutex); 3740 mutex_unlock(&dev->struct_mutex);
3741 3741
3742 /* Cancel the retire work handler, which should be idle now. */ 3742 /* Cancel the retire work handler, which should be idle now. */
3743 cancel_delayed_work_sync(&dev_priv->mm.retire_work); 3743 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3744 3744
3745 return 0; 3745 return 0;
3746 } 3746 }
3747 3747
3748 int 3748 int
3749 i915_gem_init_ringbuffer(struct drm_device *dev) 3749 i915_gem_init_ringbuffer(struct drm_device *dev)
3750 { 3750 {
3751 drm_i915_private_t *dev_priv = dev->dev_private; 3751 drm_i915_private_t *dev_priv = dev->dev_private;
3752 int ret; 3752 int ret;
3753 3753
3754 ret = intel_init_render_ring_buffer(dev); 3754 ret = intel_init_render_ring_buffer(dev);
3755 if (ret) 3755 if (ret)
3756 return ret; 3756 return ret;
3757 3757
3758 if (HAS_BSD(dev)) { 3758 if (HAS_BSD(dev)) {
3759 ret = intel_init_bsd_ring_buffer(dev); 3759 ret = intel_init_bsd_ring_buffer(dev);
3760 if (ret) 3760 if (ret)
3761 goto cleanup_render_ring; 3761 goto cleanup_render_ring;
3762 } 3762 }
3763 3763
3764 if (HAS_BLT(dev)) { 3764 if (HAS_BLT(dev)) {
3765 ret = intel_init_blt_ring_buffer(dev); 3765 ret = intel_init_blt_ring_buffer(dev);
3766 if (ret) 3766 if (ret)
3767 goto cleanup_bsd_ring; 3767 goto cleanup_bsd_ring;
3768 } 3768 }
3769 3769
3770 dev_priv->next_seqno = 1; 3770 dev_priv->next_seqno = 1;
3771 3771
3772 return 0; 3772 return 0;
3773 3773
3774 cleanup_bsd_ring: 3774 cleanup_bsd_ring:
3775 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]); 3775 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
3776 cleanup_render_ring: 3776 cleanup_render_ring:
3777 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]); 3777 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
3778 return ret; 3778 return ret;
3779 } 3779 }
3780 3780
3781 void 3781 void
3782 i915_gem_cleanup_ringbuffer(struct drm_device *dev) 3782 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
3783 { 3783 {
3784 drm_i915_private_t *dev_priv = dev->dev_private; 3784 drm_i915_private_t *dev_priv = dev->dev_private;
3785 int i; 3785 int i;
3786 3786
3787 for (i = 0; i < I915_NUM_RINGS; i++) 3787 for (i = 0; i < I915_NUM_RINGS; i++)
3788 intel_cleanup_ring_buffer(&dev_priv->ring[i]); 3788 intel_cleanup_ring_buffer(&dev_priv->ring[i]);
3789 } 3789 }
3790 3790
3791 int 3791 int
3792 i915_gem_entervt_ioctl(struct drm_device *dev, void *data, 3792 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3793 struct drm_file *file_priv) 3793 struct drm_file *file_priv)
3794 { 3794 {
3795 drm_i915_private_t *dev_priv = dev->dev_private; 3795 drm_i915_private_t *dev_priv = dev->dev_private;
3796 int ret, i; 3796 int ret, i;
3797 3797
3798 if (drm_core_check_feature(dev, DRIVER_MODESET)) 3798 if (drm_core_check_feature(dev, DRIVER_MODESET))
3799 return 0; 3799 return 0;
3800 3800
3801 if (atomic_read(&dev_priv->mm.wedged)) { 3801 if (atomic_read(&dev_priv->mm.wedged)) {
3802 DRM_ERROR("Reenabling wedged hardware, good luck\n"); 3802 DRM_ERROR("Reenabling wedged hardware, good luck\n");
3803 atomic_set(&dev_priv->mm.wedged, 0); 3803 atomic_set(&dev_priv->mm.wedged, 0);
3804 } 3804 }
3805 3805
3806 mutex_lock(&dev->struct_mutex); 3806 mutex_lock(&dev->struct_mutex);
3807 dev_priv->mm.suspended = 0; 3807 dev_priv->mm.suspended = 0;
3808 3808
3809 ret = i915_gem_init_ringbuffer(dev); 3809 ret = i915_gem_init_ringbuffer(dev);
3810 if (ret != 0) { 3810 if (ret != 0) {
3811 mutex_unlock(&dev->struct_mutex); 3811 mutex_unlock(&dev->struct_mutex);
3812 return ret; 3812 return ret;
3813 } 3813 }
3814 3814
3815 BUG_ON(!list_empty(&dev_priv->mm.active_list)); 3815 BUG_ON(!list_empty(&dev_priv->mm.active_list));
3816 BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); 3816 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
3817 BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); 3817 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
3818 for (i = 0; i < I915_NUM_RINGS; i++) { 3818 for (i = 0; i < I915_NUM_RINGS; i++) {
3819 BUG_ON(!list_empty(&dev_priv->ring[i].active_list)); 3819 BUG_ON(!list_empty(&dev_priv->ring[i].active_list));
3820 BUG_ON(!list_empty(&dev_priv->ring[i].request_list)); 3820 BUG_ON(!list_empty(&dev_priv->ring[i].request_list));
3821 } 3821 }
3822 mutex_unlock(&dev->struct_mutex); 3822 mutex_unlock(&dev->struct_mutex);
3823 3823
3824 ret = drm_irq_install(dev); 3824 ret = drm_irq_install(dev);
3825 if (ret) 3825 if (ret)
3826 goto cleanup_ringbuffer; 3826 goto cleanup_ringbuffer;
3827 3827
3828 return 0; 3828 return 0;
3829 3829
3830 cleanup_ringbuffer: 3830 cleanup_ringbuffer:
3831 mutex_lock(&dev->struct_mutex); 3831 mutex_lock(&dev->struct_mutex);
3832 i915_gem_cleanup_ringbuffer(dev); 3832 i915_gem_cleanup_ringbuffer(dev);
3833 dev_priv->mm.suspended = 1; 3833 dev_priv->mm.suspended = 1;
3834 mutex_unlock(&dev->struct_mutex); 3834 mutex_unlock(&dev->struct_mutex);
3835 3835
3836 return ret; 3836 return ret;
3837 } 3837 }
3838 3838
3839 int 3839 int
3840 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, 3840 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
3841 struct drm_file *file_priv) 3841 struct drm_file *file_priv)
3842 { 3842 {
3843 if (drm_core_check_feature(dev, DRIVER_MODESET)) 3843 if (drm_core_check_feature(dev, DRIVER_MODESET))
3844 return 0; 3844 return 0;
3845 3845
3846 drm_irq_uninstall(dev); 3846 drm_irq_uninstall(dev);
3847 return i915_gem_idle(dev); 3847 return i915_gem_idle(dev);
3848 } 3848 }
3849 3849
3850 void 3850 void
3851 i915_gem_lastclose(struct drm_device *dev) 3851 i915_gem_lastclose(struct drm_device *dev)
3852 { 3852 {
3853 int ret; 3853 int ret;
3854 3854
3855 if (drm_core_check_feature(dev, DRIVER_MODESET)) 3855 if (drm_core_check_feature(dev, DRIVER_MODESET))
3856 return; 3856 return;
3857 3857
3858 ret = i915_gem_idle(dev); 3858 ret = i915_gem_idle(dev);
3859 if (ret) 3859 if (ret)
3860 DRM_ERROR("failed to idle hardware: %d\n", ret); 3860 DRM_ERROR("failed to idle hardware: %d\n", ret);
3861 } 3861 }
3862 3862
3863 static void 3863 static void
3864 init_ring_lists(struct intel_ring_buffer *ring) 3864 init_ring_lists(struct intel_ring_buffer *ring)
3865 { 3865 {
3866 INIT_LIST_HEAD(&ring->active_list); 3866 INIT_LIST_HEAD(&ring->active_list);
3867 INIT_LIST_HEAD(&ring->request_list); 3867 INIT_LIST_HEAD(&ring->request_list);
3868 INIT_LIST_HEAD(&ring->gpu_write_list); 3868 INIT_LIST_HEAD(&ring->gpu_write_list);
3869 } 3869 }
3870 3870
3871 void 3871 void
3872 i915_gem_load(struct drm_device *dev) 3872 i915_gem_load(struct drm_device *dev)
3873 { 3873 {
3874 int i; 3874 int i;
3875 drm_i915_private_t *dev_priv = dev->dev_private; 3875 drm_i915_private_t *dev_priv = dev->dev_private;
3876 3876
3877 INIT_LIST_HEAD(&dev_priv->mm.active_list); 3877 INIT_LIST_HEAD(&dev_priv->mm.active_list);
3878 INIT_LIST_HEAD(&dev_priv->mm.flushing_list); 3878 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
3879 INIT_LIST_HEAD(&dev_priv->mm.inactive_list); 3879 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
3880 INIT_LIST_HEAD(&dev_priv->mm.pinned_list); 3880 INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
3881 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 3881 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
3882 INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list); 3882 INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
3883 INIT_LIST_HEAD(&dev_priv->mm.gtt_list); 3883 INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
3884 for (i = 0; i < I915_NUM_RINGS; i++) 3884 for (i = 0; i < I915_NUM_RINGS; i++)
3885 init_ring_lists(&dev_priv->ring[i]); 3885 init_ring_lists(&dev_priv->ring[i]);
3886 for (i = 0; i < I915_MAX_NUM_FENCES; i++) 3886 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
3887 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); 3887 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
3888 INIT_DELAYED_WORK(&dev_priv->mm.retire_work, 3888 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
3889 i915_gem_retire_work_handler); 3889 i915_gem_retire_work_handler);
3890 init_completion(&dev_priv->error_completion); 3890 init_completion(&dev_priv->error_completion);
3891 3891
3892 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ 3892 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
3893 if (IS_GEN3(dev)) { 3893 if (IS_GEN3(dev)) {
3894 u32 tmp = I915_READ(MI_ARB_STATE); 3894 u32 tmp = I915_READ(MI_ARB_STATE);
3895 if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) { 3895 if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
3896 /* arb state is a masked write, so set bit + bit in mask */ 3896 /* arb state is a masked write, so set bit + bit in mask */
3897 tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT); 3897 tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
3898 I915_WRITE(MI_ARB_STATE, tmp); 3898 I915_WRITE(MI_ARB_STATE, tmp);
3899 } 3899 }
3900 } 3900 }
3901 3901
3902 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; 3902 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
3903 3903
3904 /* Old X drivers will take 0-2 for front, back, depth buffers */ 3904 /* Old X drivers will take 0-2 for front, back, depth buffers */
3905 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 3905 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3906 dev_priv->fence_reg_start = 3; 3906 dev_priv->fence_reg_start = 3;
3907 3907
3908 if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 3908 if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3909 dev_priv->num_fence_regs = 16; 3909 dev_priv->num_fence_regs = 16;
3910 else 3910 else
3911 dev_priv->num_fence_regs = 8; 3911 dev_priv->num_fence_regs = 8;
3912 3912
3913 /* Initialize fence registers to zero */ 3913 /* Initialize fence registers to zero */
3914 for (i = 0; i < dev_priv->num_fence_regs; i++) { 3914 for (i = 0; i < dev_priv->num_fence_regs; i++) {
3915 i915_gem_clear_fence_reg(dev, &dev_priv->fence_regs[i]); 3915 i915_gem_clear_fence_reg(dev, &dev_priv->fence_regs[i]);
3916 } 3916 }
3917 3917
3918 i915_gem_detect_bit_6_swizzle(dev); 3918 i915_gem_detect_bit_6_swizzle(dev);
3919 init_waitqueue_head(&dev_priv->pending_flip_queue); 3919 init_waitqueue_head(&dev_priv->pending_flip_queue);
3920 3920
3921 dev_priv->mm.interruptible = true; 3921 dev_priv->mm.interruptible = true;
3922 3922
3923 dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink; 3923 dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
3924 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS; 3924 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
3925 register_shrinker(&dev_priv->mm.inactive_shrinker); 3925 register_shrinker(&dev_priv->mm.inactive_shrinker);
3926 } 3926 }
3927 3927
3928 /* 3928 /*
3929 * Create a physically contiguous memory object for this object 3929 * Create a physically contiguous memory object for this object
3930 * e.g. for cursor + overlay regs 3930 * e.g. for cursor + overlay regs
3931 */ 3931 */
3932 static int i915_gem_init_phys_object(struct drm_device *dev, 3932 static int i915_gem_init_phys_object(struct drm_device *dev,
3933 int id, int size, int align) 3933 int id, int size, int align)
3934 { 3934 {
3935 drm_i915_private_t *dev_priv = dev->dev_private; 3935 drm_i915_private_t *dev_priv = dev->dev_private;
3936 struct drm_i915_gem_phys_object *phys_obj; 3936 struct drm_i915_gem_phys_object *phys_obj;
3937 int ret; 3937 int ret;
3938 3938
3939 if (dev_priv->mm.phys_objs[id - 1] || !size) 3939 if (dev_priv->mm.phys_objs[id - 1] || !size)
3940 return 0; 3940 return 0;
3941 3941
3942 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL); 3942 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
3943 if (!phys_obj) 3943 if (!phys_obj)
3944 return -ENOMEM; 3944 return -ENOMEM;
3945 3945
3946 phys_obj->id = id; 3946 phys_obj->id = id;
3947 3947
3948 phys_obj->handle = drm_pci_alloc(dev, size, align); 3948 phys_obj->handle = drm_pci_alloc(dev, size, align);
3949 if (!phys_obj->handle) { 3949 if (!phys_obj->handle) {
3950 ret = -ENOMEM; 3950 ret = -ENOMEM;
3951 goto kfree_obj; 3951 goto kfree_obj;
3952 } 3952 }
3953 #ifdef CONFIG_X86 3953 #ifdef CONFIG_X86
3954 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE); 3954 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
3955 #endif 3955 #endif
3956 3956
3957 dev_priv->mm.phys_objs[id - 1] = phys_obj; 3957 dev_priv->mm.phys_objs[id - 1] = phys_obj;
3958 3958
3959 return 0; 3959 return 0;
3960 kfree_obj: 3960 kfree_obj:
3961 kfree(phys_obj); 3961 kfree(phys_obj);
3962 return ret; 3962 return ret;
3963 } 3963 }
3964 3964
3965 static void i915_gem_free_phys_object(struct drm_device *dev, int id) 3965 static void i915_gem_free_phys_object(struct drm_device *dev, int id)
3966 { 3966 {
3967 drm_i915_private_t *dev_priv = dev->dev_private; 3967 drm_i915_private_t *dev_priv = dev->dev_private;
3968 struct drm_i915_gem_phys_object *phys_obj; 3968 struct drm_i915_gem_phys_object *phys_obj;
3969 3969
3970 if (!dev_priv->mm.phys_objs[id - 1]) 3970 if (!dev_priv->mm.phys_objs[id - 1])
3971 return; 3971 return;
3972 3972
3973 phys_obj = dev_priv->mm.phys_objs[id - 1]; 3973 phys_obj = dev_priv->mm.phys_objs[id - 1];
3974 if (phys_obj->cur_obj) { 3974 if (phys_obj->cur_obj) {
3975 i915_gem_detach_phys_object(dev, phys_obj->cur_obj); 3975 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
3976 } 3976 }
3977 3977
3978 #ifdef CONFIG_X86 3978 #ifdef CONFIG_X86
3979 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE); 3979 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
3980 #endif 3980 #endif
3981 drm_pci_free(dev, phys_obj->handle); 3981 drm_pci_free(dev, phys_obj->handle);
3982 kfree(phys_obj); 3982 kfree(phys_obj);
3983 dev_priv->mm.phys_objs[id - 1] = NULL; 3983 dev_priv->mm.phys_objs[id - 1] = NULL;
3984 } 3984 }
3985 3985
3986 void i915_gem_free_all_phys_object(struct drm_device *dev) 3986 void i915_gem_free_all_phys_object(struct drm_device *dev)
3987 { 3987 {
3988 int i; 3988 int i;
3989 3989
3990 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++) 3990 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
3991 i915_gem_free_phys_object(dev, i); 3991 i915_gem_free_phys_object(dev, i);
3992 } 3992 }
3993 3993
3994 void i915_gem_detach_phys_object(struct drm_device *dev, 3994 void i915_gem_detach_phys_object(struct drm_device *dev,
3995 struct drm_i915_gem_object *obj) 3995 struct drm_i915_gem_object *obj)
3996 { 3996 {
3997 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; 3997 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
3998 char *vaddr; 3998 char *vaddr;
3999 int i; 3999 int i;
4000 int page_count; 4000 int page_count;
4001 4001
4002 if (!obj->phys_obj) 4002 if (!obj->phys_obj)
4003 return; 4003 return;
4004 vaddr = obj->phys_obj->handle->vaddr; 4004 vaddr = obj->phys_obj->handle->vaddr;
4005 4005
4006 page_count = obj->base.size / PAGE_SIZE; 4006 page_count = obj->base.size / PAGE_SIZE;
4007 for (i = 0; i < page_count; i++) { 4007 for (i = 0; i < page_count; i++) {
4008 struct page *page = shmem_read_mapping_page(mapping, i); 4008 struct page *page = shmem_read_mapping_page(mapping, i);
4009 if (!IS_ERR(page)) { 4009 if (!IS_ERR(page)) {
4010 char *dst = kmap_atomic(page); 4010 char *dst = kmap_atomic(page);
4011 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE); 4011 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4012 kunmap_atomic(dst); 4012 kunmap_atomic(dst);
4013 4013
4014 drm_clflush_pages(&page, 1); 4014 drm_clflush_pages(&page, 1);
4015 4015
4016 set_page_dirty(page); 4016 set_page_dirty(page);
4017 mark_page_accessed(page); 4017 mark_page_accessed(page);
4018 page_cache_release(page); 4018 page_cache_release(page);
4019 } 4019 }
4020 } 4020 }
4021 intel_gtt_chipset_flush(); 4021 intel_gtt_chipset_flush();
4022 4022
4023 obj->phys_obj->cur_obj = NULL; 4023 obj->phys_obj->cur_obj = NULL;
4024 obj->phys_obj = NULL; 4024 obj->phys_obj = NULL;
4025 } 4025 }
4026 4026
4027 int 4027 int
4028 i915_gem_attach_phys_object(struct drm_device *dev, 4028 i915_gem_attach_phys_object(struct drm_device *dev,
4029 struct drm_i915_gem_object *obj, 4029 struct drm_i915_gem_object *obj,
4030 int id, 4030 int id,
4031 int align) 4031 int align)
4032 { 4032 {
4033 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; 4033 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
4034 drm_i915_private_t *dev_priv = dev->dev_private; 4034 drm_i915_private_t *dev_priv = dev->dev_private;
4035 int ret = 0; 4035 int ret = 0;
4036 int page_count; 4036 int page_count;
4037 int i; 4037 int i;
4038 4038
4039 if (id > I915_MAX_PHYS_OBJECT) 4039 if (id > I915_MAX_PHYS_OBJECT)
4040 return -EINVAL; 4040 return -EINVAL;
4041 4041
4042 if (obj->phys_obj) { 4042 if (obj->phys_obj) {
4043 if (obj->phys_obj->id == id) 4043 if (obj->phys_obj->id == id)
4044 return 0; 4044 return 0;
4045 i915_gem_detach_phys_object(dev, obj); 4045 i915_gem_detach_phys_object(dev, obj);
4046 } 4046 }
4047 4047
4048 /* create a new object */ 4048 /* create a new object */
4049 if (!dev_priv->mm.phys_objs[id - 1]) { 4049 if (!dev_priv->mm.phys_objs[id - 1]) {
4050 ret = i915_gem_init_phys_object(dev, id, 4050 ret = i915_gem_init_phys_object(dev, id,
4051 obj->base.size, align); 4051 obj->base.size, align);
4052 if (ret) { 4052 if (ret) {
4053 DRM_ERROR("failed to init phys object %d size: %zu\n", 4053 DRM_ERROR("failed to init phys object %d size: %zu\n",
4054 id, obj->base.size); 4054 id, obj->base.size);
4055 return ret; 4055 return ret;
4056 } 4056 }
4057 } 4057 }
4058 4058
4059 /* bind to the object */ 4059 /* bind to the object */
4060 obj->phys_obj = dev_priv->mm.phys_objs[id - 1]; 4060 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4061 obj->phys_obj->cur_obj = obj; 4061 obj->phys_obj->cur_obj = obj;
4062 4062
4063 page_count = obj->base.size / PAGE_SIZE; 4063 page_count = obj->base.size / PAGE_SIZE;
4064 4064
4065 for (i = 0; i < page_count; i++) { 4065 for (i = 0; i < page_count; i++) {
4066 struct page *page; 4066 struct page *page;
4067 char *dst, *src; 4067 char *dst, *src;
4068 4068
4069 page = shmem_read_mapping_page(mapping, i); 4069 page = shmem_read_mapping_page(mapping, i);
4070 if (IS_ERR(page)) 4070 if (IS_ERR(page))
4071 return PTR_ERR(page); 4071 return PTR_ERR(page);
4072 4072
4073 src = kmap_atomic(page); 4073 src = kmap_atomic(page);
4074 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE); 4074 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4075 memcpy(dst, src, PAGE_SIZE); 4075 memcpy(dst, src, PAGE_SIZE);
4076 kunmap_atomic(src); 4076 kunmap_atomic(src);
4077 4077
4078 mark_page_accessed(page); 4078 mark_page_accessed(page);
4079 page_cache_release(page); 4079 page_cache_release(page);
4080 } 4080 }
4081 4081
4082 return 0; 4082 return 0;
4083 } 4083 }
4084 4084
4085 static int 4085 static int
4086 i915_gem_phys_pwrite(struct drm_device *dev, 4086 i915_gem_phys_pwrite(struct drm_device *dev,
4087 struct drm_i915_gem_object *obj, 4087 struct drm_i915_gem_object *obj,
4088 struct drm_i915_gem_pwrite *args, 4088 struct drm_i915_gem_pwrite *args,
4089 struct drm_file *file_priv) 4089 struct drm_file *file_priv)
4090 { 4090 {
4091 void *vaddr = obj->phys_obj->handle->vaddr + args->offset; 4091 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
4092 char __user *user_data = (char __user *) (uintptr_t) args->data_ptr; 4092 char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
4093 4093
4094 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { 4094 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4095 unsigned long unwritten; 4095 unsigned long unwritten;
4096 4096
4097 /* The physical object once assigned is fixed for the lifetime 4097 /* The physical object once assigned is fixed for the lifetime
4098 * of the obj, so we can safely drop the lock and continue 4098 * of the obj, so we can safely drop the lock and continue
4099 * to access vaddr. 4099 * to access vaddr.
4100 */ 4100 */
4101 mutex_unlock(&dev->struct_mutex); 4101 mutex_unlock(&dev->struct_mutex);
4102 unwritten = copy_from_user(vaddr, user_data, args->size); 4102 unwritten = copy_from_user(vaddr, user_data, args->size);
4103 mutex_lock(&dev->struct_mutex); 4103 mutex_lock(&dev->struct_mutex);
4104 if (unwritten) 4104 if (unwritten)
4105 return -EFAULT; 4105 return -EFAULT;
4106 } 4106 }
4107 4107
4108 intel_gtt_chipset_flush(); 4108 intel_gtt_chipset_flush();
4109 return 0; 4109 return 0;
4110 } 4110 }
4111 4111
4112 void i915_gem_release(struct drm_device *dev, struct drm_file *file) 4112 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4113 { 4113 {
4114 struct drm_i915_file_private *file_priv = file->driver_priv; 4114 struct drm_i915_file_private *file_priv = file->driver_priv;
4115 4115
4116 /* Clean up our request list when the client is going away, so that 4116 /* Clean up our request list when the client is going away, so that
4117 * later retire_requests won't dereference our soon-to-be-gone 4117 * later retire_requests won't dereference our soon-to-be-gone
4118 * file_priv. 4118 * file_priv.
4119 */ 4119 */
4120 spin_lock(&file_priv->mm.lock); 4120 spin_lock(&file_priv->mm.lock);
4121 while (!list_empty(&file_priv->mm.request_list)) { 4121 while (!list_empty(&file_priv->mm.request_list)) {
4122 struct drm_i915_gem_request *request; 4122 struct drm_i915_gem_request *request;
4123 4123
4124 request = list_first_entry(&file_priv->mm.request_list, 4124 request = list_first_entry(&file_priv->mm.request_list,
4125 struct drm_i915_gem_request, 4125 struct drm_i915_gem_request,
4126 client_list); 4126 client_list);
4127 list_del(&request->client_list); 4127 list_del(&request->client_list);
4128 request->file_priv = NULL; 4128 request->file_priv = NULL;
4129 } 4129 }
4130 spin_unlock(&file_priv->mm.lock); 4130 spin_unlock(&file_priv->mm.lock);
4131 } 4131 }
4132 4132
4133 static int 4133 static int
4134 i915_gpu_is_active(struct drm_device *dev) 4134 i915_gpu_is_active(struct drm_device *dev)
4135 { 4135 {
4136 drm_i915_private_t *dev_priv = dev->dev_private; 4136 drm_i915_private_t *dev_priv = dev->dev_private;
4137 int lists_empty; 4137 int lists_empty;
4138 4138
4139 lists_empty = list_empty(&dev_priv->mm.flushing_list) && 4139 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
4140 list_empty(&dev_priv->mm.active_list); 4140 list_empty(&dev_priv->mm.active_list);
4141 4141
4142 return !lists_empty; 4142 return !lists_empty;
4143 } 4143 }
4144 4144
4145 static int 4145 static int
4146 i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) 4146 i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4147 { 4147 {
4148 struct drm_i915_private *dev_priv = 4148 struct drm_i915_private *dev_priv =
4149 container_of(shrinker, 4149 container_of(shrinker,
4150 struct drm_i915_private, 4150 struct drm_i915_private,
4151 mm.inactive_shrinker); 4151 mm.inactive_shrinker);
4152 struct drm_device *dev = dev_priv->dev; 4152 struct drm_device *dev = dev_priv->dev;
4153 struct drm_i915_gem_object *obj, *next; 4153 struct drm_i915_gem_object *obj, *next;
4154 int nr_to_scan = sc->nr_to_scan; 4154 int nr_to_scan = sc->nr_to_scan;
4155 int cnt; 4155 int cnt;
4156 4156
4157 if (!mutex_trylock(&dev->struct_mutex)) 4157 if (!mutex_trylock(&dev->struct_mutex))
4158 return 0; 4158 return 0;
4159 4159
4160 /* "fast-path" to count number of available objects */ 4160 /* "fast-path" to count number of available objects */
4161 if (nr_to_scan == 0) { 4161 if (nr_to_scan == 0) {
4162 cnt = 0; 4162 cnt = 0;
4163 list_for_each_entry(obj, 4163 list_for_each_entry(obj,
4164 &dev_priv->mm.inactive_list, 4164 &dev_priv->mm.inactive_list,
4165 mm_list) 4165 mm_list)
4166 cnt++; 4166 cnt++;
4167 mutex_unlock(&dev->struct_mutex); 4167 mutex_unlock(&dev->struct_mutex);
4168 return cnt / 100 * sysctl_vfs_cache_pressure; 4168 return cnt / 100 * sysctl_vfs_cache_pressure;
4169 } 4169 }
4170 4170
4171 rescan: 4171 rescan:
4172 /* first scan for clean buffers */ 4172 /* first scan for clean buffers */
4173 i915_gem_retire_requests(dev); 4173 i915_gem_retire_requests(dev);
4174 4174
4175 list_for_each_entry_safe(obj, next, 4175 list_for_each_entry_safe(obj, next,
4176 &dev_priv->mm.inactive_list, 4176 &dev_priv->mm.inactive_list,
4177 mm_list) { 4177 mm_list) {
4178 if (i915_gem_object_is_purgeable(obj)) { 4178 if (i915_gem_object_is_purgeable(obj)) {
4179 if (i915_gem_object_unbind(obj) == 0 && 4179 if (i915_gem_object_unbind(obj) == 0 &&
4180 --nr_to_scan == 0) 4180 --nr_to_scan == 0)
4181 break; 4181 break;
4182 } 4182 }
4183 } 4183 }
4184 4184
4185 /* second pass, evict/count anything still on the inactive list */ 4185 /* second pass, evict/count anything still on the inactive list */
4186 cnt = 0; 4186 cnt = 0;
4187 list_for_each_entry_safe(obj, next, 4187 list_for_each_entry_safe(obj, next,
4188 &dev_priv->mm.inactive_list, 4188 &dev_priv->mm.inactive_list,
4189 mm_list) { 4189 mm_list) {
4190 if (nr_to_scan && 4190 if (nr_to_scan &&
4191 i915_gem_object_unbind(obj) == 0) 4191 i915_gem_object_unbind(obj) == 0)
4192 nr_to_scan--; 4192 nr_to_scan--;
4193 else 4193 else
4194 cnt++; 4194 cnt++;
4195 } 4195 }
4196 4196
4197 if (nr_to_scan && i915_gpu_is_active(dev)) { 4197 if (nr_to_scan && i915_gpu_is_active(dev)) {
4198 /* 4198 /*
4199 * We are desperate for pages, so as a last resort, wait 4199 * We are desperate for pages, so as a last resort, wait
4200 * for the GPU to finish and discard whatever we can. 4200 * for the GPU to finish and discard whatever we can.
4201 * This has a dramatic impact to reduce the number of 4201 * This has a dramatic impact to reduce the number of
4202 * OOM-killer events whilst running the GPU aggressively. 4202 * OOM-killer events whilst running the GPU aggressively.
4203 */ 4203 */
4204 if (i915_gpu_idle(dev) == 0) 4204 if (i915_gpu_idle(dev) == 0)
4205 goto rescan; 4205 goto rescan;
4206 } 4206 }
4207 mutex_unlock(&dev->struct_mutex); 4207 mutex_unlock(&dev->struct_mutex);
4208 return cnt / 100 * sysctl_vfs_cache_pressure; 4208 return cnt / 100 * sysctl_vfs_cache_pressure;
4209 } 4209 }
4210 4210
include/drm/i915_drm.h
1 /* 1 /*
2 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 2 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
3 * All Rights Reserved. 3 * All Rights Reserved.
4 * 4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a 5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the 6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including 7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish, 8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to 9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to 10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions: 11 * the following conditions:
12 * 12 *
13 * The above copyright notice and this permission notice (including the 13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions 14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software. 15 * of the Software.
16 * 16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
20 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 20 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * 24 *
25 */ 25 */
26 26
27 #ifndef _I915_DRM_H_ 27 #ifndef _I915_DRM_H_
28 #define _I915_DRM_H_ 28 #define _I915_DRM_H_
29 29
30 #include "drm.h" 30 #include "drm.h"
31 31
32 /* Please note that modifications to all structs defined here are 32 /* Please note that modifications to all structs defined here are
33 * subject to backwards-compatibility constraints. 33 * subject to backwards-compatibility constraints.
34 */ 34 */
35 35
36 #ifdef __KERNEL__ 36 #ifdef __KERNEL__
37 /* For use by IPS driver */ 37 /* For use by IPS driver */
38 extern unsigned long i915_read_mch_val(void); 38 extern unsigned long i915_read_mch_val(void);
39 extern bool i915_gpu_raise(void); 39 extern bool i915_gpu_raise(void);
40 extern bool i915_gpu_lower(void); 40 extern bool i915_gpu_lower(void);
41 extern bool i915_gpu_busy(void); 41 extern bool i915_gpu_busy(void);
42 extern bool i915_gpu_turbo_disable(void); 42 extern bool i915_gpu_turbo_disable(void);
43 #endif 43 #endif
44 44
45 /* Each region is a minimum of 16k, and there are at most 255 of them. 45 /* Each region is a minimum of 16k, and there are at most 255 of them.
46 */ 46 */
47 #define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use 47 #define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
48 * of chars for next/prev indices */ 48 * of chars for next/prev indices */
49 #define I915_LOG_MIN_TEX_REGION_SIZE 14 49 #define I915_LOG_MIN_TEX_REGION_SIZE 14
50 50
51 typedef struct _drm_i915_init { 51 typedef struct _drm_i915_init {
52 enum { 52 enum {
53 I915_INIT_DMA = 0x01, 53 I915_INIT_DMA = 0x01,
54 I915_CLEANUP_DMA = 0x02, 54 I915_CLEANUP_DMA = 0x02,
55 I915_RESUME_DMA = 0x03 55 I915_RESUME_DMA = 0x03
56 } func; 56 } func;
57 unsigned int mmio_offset; 57 unsigned int mmio_offset;
58 int sarea_priv_offset; 58 int sarea_priv_offset;
59 unsigned int ring_start; 59 unsigned int ring_start;
60 unsigned int ring_end; 60 unsigned int ring_end;
61 unsigned int ring_size; 61 unsigned int ring_size;
62 unsigned int front_offset; 62 unsigned int front_offset;
63 unsigned int back_offset; 63 unsigned int back_offset;
64 unsigned int depth_offset; 64 unsigned int depth_offset;
65 unsigned int w; 65 unsigned int w;
66 unsigned int h; 66 unsigned int h;
67 unsigned int pitch; 67 unsigned int pitch;
68 unsigned int pitch_bits; 68 unsigned int pitch_bits;
69 unsigned int back_pitch; 69 unsigned int back_pitch;
70 unsigned int depth_pitch; 70 unsigned int depth_pitch;
71 unsigned int cpp; 71 unsigned int cpp;
72 unsigned int chipset; 72 unsigned int chipset;
73 } drm_i915_init_t; 73 } drm_i915_init_t;
74 74
75 typedef struct _drm_i915_sarea { 75 typedef struct _drm_i915_sarea {
76 struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1]; 76 struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
77 int last_upload; /* last time texture was uploaded */ 77 int last_upload; /* last time texture was uploaded */
78 int last_enqueue; /* last time a buffer was enqueued */ 78 int last_enqueue; /* last time a buffer was enqueued */
79 int last_dispatch; /* age of the most recently dispatched buffer */ 79 int last_dispatch; /* age of the most recently dispatched buffer */
80 int ctxOwner; /* last context to upload state */ 80 int ctxOwner; /* last context to upload state */
81 int texAge; 81 int texAge;
82 int pf_enabled; /* is pageflipping allowed? */ 82 int pf_enabled; /* is pageflipping allowed? */
83 int pf_active; 83 int pf_active;
84 int pf_current_page; /* which buffer is being displayed? */ 84 int pf_current_page; /* which buffer is being displayed? */
85 int perf_boxes; /* performance boxes to be displayed */ 85 int perf_boxes; /* performance boxes to be displayed */
86 int width, height; /* screen size in pixels */ 86 int width, height; /* screen size in pixels */
87 87
88 drm_handle_t front_handle; 88 drm_handle_t front_handle;
89 int front_offset; 89 int front_offset;
90 int front_size; 90 int front_size;
91 91
92 drm_handle_t back_handle; 92 drm_handle_t back_handle;
93 int back_offset; 93 int back_offset;
94 int back_size; 94 int back_size;
95 95
96 drm_handle_t depth_handle; 96 drm_handle_t depth_handle;
97 int depth_offset; 97 int depth_offset;
98 int depth_size; 98 int depth_size;
99 99
100 drm_handle_t tex_handle; 100 drm_handle_t tex_handle;
101 int tex_offset; 101 int tex_offset;
102 int tex_size; 102 int tex_size;
103 int log_tex_granularity; 103 int log_tex_granularity;
104 int pitch; 104 int pitch;
105 int rotation; /* 0, 90, 180 or 270 */ 105 int rotation; /* 0, 90, 180 or 270 */
106 int rotated_offset; 106 int rotated_offset;
107 int rotated_size; 107 int rotated_size;
108 int rotated_pitch; 108 int rotated_pitch;
109 int virtualX, virtualY; 109 int virtualX, virtualY;
110 110
111 unsigned int front_tiled; 111 unsigned int front_tiled;
112 unsigned int back_tiled; 112 unsigned int back_tiled;
113 unsigned int depth_tiled; 113 unsigned int depth_tiled;
114 unsigned int rotated_tiled; 114 unsigned int rotated_tiled;
115 unsigned int rotated2_tiled; 115 unsigned int rotated2_tiled;
116 116
117 int pipeA_x; 117 int pipeA_x;
118 int pipeA_y; 118 int pipeA_y;
119 int pipeA_w; 119 int pipeA_w;
120 int pipeA_h; 120 int pipeA_h;
121 int pipeB_x; 121 int pipeB_x;
122 int pipeB_y; 122 int pipeB_y;
123 int pipeB_w; 123 int pipeB_w;
124 int pipeB_h; 124 int pipeB_h;
125 125
126 /* fill out some space for old userspace triple buffer */ 126 /* fill out some space for old userspace triple buffer */
127 drm_handle_t unused_handle; 127 drm_handle_t unused_handle;
128 __u32 unused1, unused2, unused3; 128 __u32 unused1, unused2, unused3;
129 129
130 /* buffer object handles for static buffers. May change 130 /* buffer object handles for static buffers. May change
131 * over the lifetime of the client. 131 * over the lifetime of the client.
132 */ 132 */
133 __u32 front_bo_handle; 133 __u32 front_bo_handle;
134 __u32 back_bo_handle; 134 __u32 back_bo_handle;
135 __u32 unused_bo_handle; 135 __u32 unused_bo_handle;
136 __u32 depth_bo_handle; 136 __u32 depth_bo_handle;
137 137
138 } drm_i915_sarea_t; 138 } drm_i915_sarea_t;
139 139
140 /* due to userspace building against these headers we need some compat here */ 140 /* due to userspace building against these headers we need some compat here */
141 #define planeA_x pipeA_x 141 #define planeA_x pipeA_x
142 #define planeA_y pipeA_y 142 #define planeA_y pipeA_y
143 #define planeA_w pipeA_w 143 #define planeA_w pipeA_w
144 #define planeA_h pipeA_h 144 #define planeA_h pipeA_h
145 #define planeB_x pipeB_x 145 #define planeB_x pipeB_x
146 #define planeB_y pipeB_y 146 #define planeB_y pipeB_y
147 #define planeB_w pipeB_w 147 #define planeB_w pipeB_w
148 #define planeB_h pipeB_h 148 #define planeB_h pipeB_h
149 149
150 /* Flags for perf_boxes 150 /* Flags for perf_boxes
151 */ 151 */
152 #define I915_BOX_RING_EMPTY 0x1 152 #define I915_BOX_RING_EMPTY 0x1
153 #define I915_BOX_FLIP 0x2 153 #define I915_BOX_FLIP 0x2
154 #define I915_BOX_WAIT 0x4 154 #define I915_BOX_WAIT 0x4
155 #define I915_BOX_TEXTURE_LOAD 0x8 155 #define I915_BOX_TEXTURE_LOAD 0x8
156 #define I915_BOX_LOST_CONTEXT 0x10 156 #define I915_BOX_LOST_CONTEXT 0x10
157 157
158 /* I915 specific ioctls 158 /* I915 specific ioctls
159 * The device specific ioctl range is 0x40 to 0x79. 159 * The device specific ioctl range is 0x40 to 0x79.
160 */ 160 */
161 #define DRM_I915_INIT 0x00 161 #define DRM_I915_INIT 0x00
162 #define DRM_I915_FLUSH 0x01 162 #define DRM_I915_FLUSH 0x01
163 #define DRM_I915_FLIP 0x02 163 #define DRM_I915_FLIP 0x02
164 #define DRM_I915_BATCHBUFFER 0x03 164 #define DRM_I915_BATCHBUFFER 0x03
165 #define DRM_I915_IRQ_EMIT 0x04 165 #define DRM_I915_IRQ_EMIT 0x04
166 #define DRM_I915_IRQ_WAIT 0x05 166 #define DRM_I915_IRQ_WAIT 0x05
167 #define DRM_I915_GETPARAM 0x06 167 #define DRM_I915_GETPARAM 0x06
168 #define DRM_I915_SETPARAM 0x07 168 #define DRM_I915_SETPARAM 0x07
169 #define DRM_I915_ALLOC 0x08 169 #define DRM_I915_ALLOC 0x08
170 #define DRM_I915_FREE 0x09 170 #define DRM_I915_FREE 0x09
171 #define DRM_I915_INIT_HEAP 0x0a 171 #define DRM_I915_INIT_HEAP 0x0a
172 #define DRM_I915_CMDBUFFER 0x0b 172 #define DRM_I915_CMDBUFFER 0x0b
173 #define DRM_I915_DESTROY_HEAP 0x0c 173 #define DRM_I915_DESTROY_HEAP 0x0c
174 #define DRM_I915_SET_VBLANK_PIPE 0x0d 174 #define DRM_I915_SET_VBLANK_PIPE 0x0d
175 #define DRM_I915_GET_VBLANK_PIPE 0x0e 175 #define DRM_I915_GET_VBLANK_PIPE 0x0e
176 #define DRM_I915_VBLANK_SWAP 0x0f 176 #define DRM_I915_VBLANK_SWAP 0x0f
177 #define DRM_I915_HWS_ADDR 0x11 177 #define DRM_I915_HWS_ADDR 0x11
178 #define DRM_I915_GEM_INIT 0x13 178 #define DRM_I915_GEM_INIT 0x13
179 #define DRM_I915_GEM_EXECBUFFER 0x14 179 #define DRM_I915_GEM_EXECBUFFER 0x14
180 #define DRM_I915_GEM_PIN 0x15 180 #define DRM_I915_GEM_PIN 0x15
181 #define DRM_I915_GEM_UNPIN 0x16 181 #define DRM_I915_GEM_UNPIN 0x16
182 #define DRM_I915_GEM_BUSY 0x17 182 #define DRM_I915_GEM_BUSY 0x17
183 #define DRM_I915_GEM_THROTTLE 0x18 183 #define DRM_I915_GEM_THROTTLE 0x18
184 #define DRM_I915_GEM_ENTERVT 0x19 184 #define DRM_I915_GEM_ENTERVT 0x19
185 #define DRM_I915_GEM_LEAVEVT 0x1a 185 #define DRM_I915_GEM_LEAVEVT 0x1a
186 #define DRM_I915_GEM_CREATE 0x1b 186 #define DRM_I915_GEM_CREATE 0x1b
187 #define DRM_I915_GEM_PREAD 0x1c 187 #define DRM_I915_GEM_PREAD 0x1c
188 #define DRM_I915_GEM_PWRITE 0x1d 188 #define DRM_I915_GEM_PWRITE 0x1d
189 #define DRM_I915_GEM_MMAP 0x1e 189 #define DRM_I915_GEM_MMAP 0x1e
190 #define DRM_I915_GEM_SET_DOMAIN 0x1f 190 #define DRM_I915_GEM_SET_DOMAIN 0x1f
191 #define DRM_I915_GEM_SW_FINISH 0x20 191 #define DRM_I915_GEM_SW_FINISH 0x20
192 #define DRM_I915_GEM_SET_TILING 0x21 192 #define DRM_I915_GEM_SET_TILING 0x21
193 #define DRM_I915_GEM_GET_TILING 0x22 193 #define DRM_I915_GEM_GET_TILING 0x22
194 #define DRM_I915_GEM_GET_APERTURE 0x23 194 #define DRM_I915_GEM_GET_APERTURE 0x23
195 #define DRM_I915_GEM_MMAP_GTT 0x24 195 #define DRM_I915_GEM_MMAP_GTT 0x24
196 #define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25 196 #define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25
197 #define DRM_I915_GEM_MADVISE 0x26 197 #define DRM_I915_GEM_MADVISE 0x26
198 #define DRM_I915_OVERLAY_PUT_IMAGE 0x27 198 #define DRM_I915_OVERLAY_PUT_IMAGE 0x27
199 #define DRM_I915_OVERLAY_ATTRS 0x28 199 #define DRM_I915_OVERLAY_ATTRS 0x28
200 #define DRM_I915_GEM_EXECBUFFER2 0x29 200 #define DRM_I915_GEM_EXECBUFFER2 0x29
201 #define DRM_I915_GET_SPRITE_COLORKEY 0x2a 201 #define DRM_I915_GET_SPRITE_COLORKEY 0x2a
202 #define DRM_I915_SET_SPRITE_COLORKEY 0x2b 202 #define DRM_I915_SET_SPRITE_COLORKEY 0x2b
203 203
204 #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) 204 #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
205 #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) 205 #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
206 #define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP) 206 #define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)
207 #define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t) 207 #define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
208 #define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t) 208 #define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
209 #define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t) 209 #define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
210 #define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t) 210 #define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
211 #define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t) 211 #define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
212 #define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t) 212 #define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
213 #define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t) 213 #define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
214 #define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t) 214 #define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
215 #define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t) 215 #define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
216 #define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t) 216 #define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
217 #define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t) 217 #define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
218 #define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) 218 #define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
219 #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) 219 #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
220 #define DRM_IOCTL_I915_HWS_ADDR DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init) 220 #define DRM_IOCTL_I915_HWS_ADDR DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)
221 #define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init) 221 #define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
222 #define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer) 222 #define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
223 #define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2) 223 #define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
224 #define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin) 224 #define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
225 #define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin) 225 #define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
226 #define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy) 226 #define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
227 #define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE) 227 #define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
228 #define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT) 228 #define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
229 #define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT) 229 #define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
230 #define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create) 230 #define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
231 #define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread) 231 #define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
232 #define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite) 232 #define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
233 #define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap) 233 #define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
234 #define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt) 234 #define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
235 #define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain) 235 #define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
236 #define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish) 236 #define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
237 #define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling) 237 #define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
238 #define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling) 238 #define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
239 #define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture) 239 #define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
240 #define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id) 240 #define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
241 #define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise) 241 #define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
242 #define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image) 242 #define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
243 #define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs) 243 #define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
244 #define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) 244 #define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
245 #define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) 245 #define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
246 246
247 /* Allow drivers to submit batchbuffers directly to hardware, relying 247 /* Allow drivers to submit batchbuffers directly to hardware, relying
248 * on the security mechanisms provided by hardware. 248 * on the security mechanisms provided by hardware.
249 */ 249 */
250 typedef struct drm_i915_batchbuffer { 250 typedef struct drm_i915_batchbuffer {
251 int start; /* agp offset */ 251 int start; /* agp offset */
252 int used; /* nr bytes in use */ 252 int used; /* nr bytes in use */
253 int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ 253 int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
254 int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ 254 int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
255 int num_cliprects; /* mulitpass with multiple cliprects? */ 255 int num_cliprects; /* mulitpass with multiple cliprects? */
256 struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */ 256 struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
257 } drm_i915_batchbuffer_t; 257 } drm_i915_batchbuffer_t;
258 258
259 /* As above, but pass a pointer to userspace buffer which can be 259 /* As above, but pass a pointer to userspace buffer which can be
260 * validated by the kernel prior to sending to hardware. 260 * validated by the kernel prior to sending to hardware.
261 */ 261 */
262 typedef struct _drm_i915_cmdbuffer { 262 typedef struct _drm_i915_cmdbuffer {
263 char __user *buf; /* pointer to userspace command buffer */ 263 char __user *buf; /* pointer to userspace command buffer */
264 int sz; /* nr bytes in buf */ 264 int sz; /* nr bytes in buf */
265 int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ 265 int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
266 int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ 266 int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
267 int num_cliprects; /* mulitpass with multiple cliprects? */ 267 int num_cliprects; /* mulitpass with multiple cliprects? */
268 struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */ 268 struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
269 } drm_i915_cmdbuffer_t; 269 } drm_i915_cmdbuffer_t;
270 270
271 /* Userspace can request & wait on irq's: 271 /* Userspace can request & wait on irq's:
272 */ 272 */
273 typedef struct drm_i915_irq_emit { 273 typedef struct drm_i915_irq_emit {
274 int __user *irq_seq; 274 int __user *irq_seq;
275 } drm_i915_irq_emit_t; 275 } drm_i915_irq_emit_t;
276 276
277 typedef struct drm_i915_irq_wait { 277 typedef struct drm_i915_irq_wait {
278 int irq_seq; 278 int irq_seq;
279 } drm_i915_irq_wait_t; 279 } drm_i915_irq_wait_t;
280 280
281 /* Ioctl to query kernel params: 281 /* Ioctl to query kernel params:
282 */ 282 */
283 #define I915_PARAM_IRQ_ACTIVE 1 283 #define I915_PARAM_IRQ_ACTIVE 1
284 #define I915_PARAM_ALLOW_BATCHBUFFER 2 284 #define I915_PARAM_ALLOW_BATCHBUFFER 2
285 #define I915_PARAM_LAST_DISPATCH 3 285 #define I915_PARAM_LAST_DISPATCH 3
286 #define I915_PARAM_CHIPSET_ID 4 286 #define I915_PARAM_CHIPSET_ID 4
287 #define I915_PARAM_HAS_GEM 5 287 #define I915_PARAM_HAS_GEM 5
288 #define I915_PARAM_NUM_FENCES_AVAIL 6 288 #define I915_PARAM_NUM_FENCES_AVAIL 6
289 #define I915_PARAM_HAS_OVERLAY 7 289 #define I915_PARAM_HAS_OVERLAY 7
290 #define I915_PARAM_HAS_PAGEFLIPPING 8 290 #define I915_PARAM_HAS_PAGEFLIPPING 8
291 #define I915_PARAM_HAS_EXECBUF2 9 291 #define I915_PARAM_HAS_EXECBUF2 9
292 #define I915_PARAM_HAS_BSD 10 292 #define I915_PARAM_HAS_BSD 10
293 #define I915_PARAM_HAS_BLT 11 293 #define I915_PARAM_HAS_BLT 11
294 #define I915_PARAM_HAS_RELAXED_FENCING 12 294 #define I915_PARAM_HAS_RELAXED_FENCING 12
295 #define I915_PARAM_HAS_COHERENT_RINGS 13 295 #define I915_PARAM_HAS_COHERENT_RINGS 13
296 #define I915_PARAM_HAS_EXEC_CONSTANTS 14 296 #define I915_PARAM_HAS_EXEC_CONSTANTS 14
297 #define I915_PARAM_HAS_RELAXED_DELTA 15 297 #define I915_PARAM_HAS_RELAXED_DELTA 15
298 #define I915_PARAM_HAS_GEN7_SOL_RESET 16 298 #define I915_PARAM_HAS_GEN7_SOL_RESET 16
299 #define I915_PARAM_HAS_LLC 17
299 300
300 typedef struct drm_i915_getparam { 301 typedef struct drm_i915_getparam {
301 int param; 302 int param;
302 int __user *value; 303 int __user *value;
303 } drm_i915_getparam_t; 304 } drm_i915_getparam_t;
304 305
305 /* Ioctl to set kernel params: 306 /* Ioctl to set kernel params:
306 */ 307 */
307 #define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1 308 #define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1
308 #define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2 309 #define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2
309 #define I915_SETPARAM_ALLOW_BATCHBUFFER 3 310 #define I915_SETPARAM_ALLOW_BATCHBUFFER 3
310 #define I915_SETPARAM_NUM_USED_FENCES 4 311 #define I915_SETPARAM_NUM_USED_FENCES 4
311 312
312 typedef struct drm_i915_setparam { 313 typedef struct drm_i915_setparam {
313 int param; 314 int param;
314 int value; 315 int value;
315 } drm_i915_setparam_t; 316 } drm_i915_setparam_t;
316 317
317 /* A memory manager for regions of shared memory: 318 /* A memory manager for regions of shared memory:
318 */ 319 */
319 #define I915_MEM_REGION_AGP 1 320 #define I915_MEM_REGION_AGP 1
320 321
321 typedef struct drm_i915_mem_alloc { 322 typedef struct drm_i915_mem_alloc {
322 int region; 323 int region;
323 int alignment; 324 int alignment;
324 int size; 325 int size;
325 int __user *region_offset; /* offset from start of fb or agp */ 326 int __user *region_offset; /* offset from start of fb or agp */
326 } drm_i915_mem_alloc_t; 327 } drm_i915_mem_alloc_t;
327 328
328 typedef struct drm_i915_mem_free { 329 typedef struct drm_i915_mem_free {
329 int region; 330 int region;
330 int region_offset; 331 int region_offset;
331 } drm_i915_mem_free_t; 332 } drm_i915_mem_free_t;
332 333
333 typedef struct drm_i915_mem_init_heap { 334 typedef struct drm_i915_mem_init_heap {
334 int region; 335 int region;
335 int size; 336 int size;
336 int start; 337 int start;
337 } drm_i915_mem_init_heap_t; 338 } drm_i915_mem_init_heap_t;
338 339
339 /* Allow memory manager to be torn down and re-initialized (eg on 340 /* Allow memory manager to be torn down and re-initialized (eg on
340 * rotate): 341 * rotate):
341 */ 342 */
342 typedef struct drm_i915_mem_destroy_heap { 343 typedef struct drm_i915_mem_destroy_heap {
343 int region; 344 int region;
344 } drm_i915_mem_destroy_heap_t; 345 } drm_i915_mem_destroy_heap_t;
345 346
346 /* Allow X server to configure which pipes to monitor for vblank signals 347 /* Allow X server to configure which pipes to monitor for vblank signals
347 */ 348 */
348 #define DRM_I915_VBLANK_PIPE_A 1 349 #define DRM_I915_VBLANK_PIPE_A 1
349 #define DRM_I915_VBLANK_PIPE_B 2 350 #define DRM_I915_VBLANK_PIPE_B 2
350 351
351 typedef struct drm_i915_vblank_pipe { 352 typedef struct drm_i915_vblank_pipe {
352 int pipe; 353 int pipe;
353 } drm_i915_vblank_pipe_t; 354 } drm_i915_vblank_pipe_t;
354 355
355 /* Schedule buffer swap at given vertical blank: 356 /* Schedule buffer swap at given vertical blank:
356 */ 357 */
357 typedef struct drm_i915_vblank_swap { 358 typedef struct drm_i915_vblank_swap {
358 drm_drawable_t drawable; 359 drm_drawable_t drawable;
359 enum drm_vblank_seq_type seqtype; 360 enum drm_vblank_seq_type seqtype;
360 unsigned int sequence; 361 unsigned int sequence;
361 } drm_i915_vblank_swap_t; 362 } drm_i915_vblank_swap_t;
362 363
363 typedef struct drm_i915_hws_addr { 364 typedef struct drm_i915_hws_addr {
364 __u64 addr; 365 __u64 addr;
365 } drm_i915_hws_addr_t; 366 } drm_i915_hws_addr_t;
366 367
367 struct drm_i915_gem_init { 368 struct drm_i915_gem_init {
368 /** 369 /**
369 * Beginning offset in the GTT to be managed by the DRM memory 370 * Beginning offset in the GTT to be managed by the DRM memory
370 * manager. 371 * manager.
371 */ 372 */
372 __u64 gtt_start; 373 __u64 gtt_start;
373 /** 374 /**
374 * Ending offset in the GTT to be managed by the DRM memory 375 * Ending offset in the GTT to be managed by the DRM memory
375 * manager. 376 * manager.
376 */ 377 */
377 __u64 gtt_end; 378 __u64 gtt_end;
378 }; 379 };
379 380
380 struct drm_i915_gem_create { 381 struct drm_i915_gem_create {
381 /** 382 /**
382 * Requested size for the object. 383 * Requested size for the object.
383 * 384 *
384 * The (page-aligned) allocated size for the object will be returned. 385 * The (page-aligned) allocated size for the object will be returned.
385 */ 386 */
386 __u64 size; 387 __u64 size;
387 /** 388 /**
388 * Returned handle for the object. 389 * Returned handle for the object.
389 * 390 *
390 * Object handles are nonzero. 391 * Object handles are nonzero.
391 */ 392 */
392 __u32 handle; 393 __u32 handle;
393 __u32 pad; 394 __u32 pad;
394 }; 395 };
395 396
396 struct drm_i915_gem_pread { 397 struct drm_i915_gem_pread {
397 /** Handle for the object being read. */ 398 /** Handle for the object being read. */
398 __u32 handle; 399 __u32 handle;
399 __u32 pad; 400 __u32 pad;
400 /** Offset into the object to read from */ 401 /** Offset into the object to read from */
401 __u64 offset; 402 __u64 offset;
402 /** Length of data to read */ 403 /** Length of data to read */
403 __u64 size; 404 __u64 size;
404 /** 405 /**
405 * Pointer to write the data into. 406 * Pointer to write the data into.
406 * 407 *
407 * This is a fixed-size type for 32/64 compatibility. 408 * This is a fixed-size type for 32/64 compatibility.
408 */ 409 */
409 __u64 data_ptr; 410 __u64 data_ptr;
410 }; 411 };
411 412
412 struct drm_i915_gem_pwrite { 413 struct drm_i915_gem_pwrite {
413 /** Handle for the object being written to. */ 414 /** Handle for the object being written to. */
414 __u32 handle; 415 __u32 handle;
415 __u32 pad; 416 __u32 pad;
416 /** Offset into the object to write to */ 417 /** Offset into the object to write to */
417 __u64 offset; 418 __u64 offset;
418 /** Length of data to write */ 419 /** Length of data to write */
419 __u64 size; 420 __u64 size;
420 /** 421 /**
421 * Pointer to read the data from. 422 * Pointer to read the data from.
422 * 423 *
423 * This is a fixed-size type for 32/64 compatibility. 424 * This is a fixed-size type for 32/64 compatibility.
424 */ 425 */
425 __u64 data_ptr; 426 __u64 data_ptr;
426 }; 427 };
427 428
428 struct drm_i915_gem_mmap { 429 struct drm_i915_gem_mmap {
429 /** Handle for the object being mapped. */ 430 /** Handle for the object being mapped. */
430 __u32 handle; 431 __u32 handle;
431 __u32 pad; 432 __u32 pad;
432 /** Offset in the object to map. */ 433 /** Offset in the object to map. */
433 __u64 offset; 434 __u64 offset;
434 /** 435 /**
435 * Length of data to map. 436 * Length of data to map.
436 * 437 *
437 * The value will be page-aligned. 438 * The value will be page-aligned.
438 */ 439 */
439 __u64 size; 440 __u64 size;
440 /** 441 /**
441 * Returned pointer the data was mapped at. 442 * Returned pointer the data was mapped at.
442 * 443 *
443 * This is a fixed-size type for 32/64 compatibility. 444 * This is a fixed-size type for 32/64 compatibility.
444 */ 445 */
445 __u64 addr_ptr; 446 __u64 addr_ptr;
446 }; 447 };
447 448
448 struct drm_i915_gem_mmap_gtt { 449 struct drm_i915_gem_mmap_gtt {
449 /** Handle for the object being mapped. */ 450 /** Handle for the object being mapped. */
450 __u32 handle; 451 __u32 handle;
451 __u32 pad; 452 __u32 pad;
452 /** 453 /**
453 * Fake offset to use for subsequent mmap call 454 * Fake offset to use for subsequent mmap call
454 * 455 *
455 * This is a fixed-size type for 32/64 compatibility. 456 * This is a fixed-size type for 32/64 compatibility.
456 */ 457 */
457 __u64 offset; 458 __u64 offset;
458 }; 459 };
459 460
460 struct drm_i915_gem_set_domain { 461 struct drm_i915_gem_set_domain {
461 /** Handle for the object */ 462 /** Handle for the object */
462 __u32 handle; 463 __u32 handle;
463 464
464 /** New read domains */ 465 /** New read domains */
465 __u32 read_domains; 466 __u32 read_domains;
466 467
467 /** New write domain */ 468 /** New write domain */
468 __u32 write_domain; 469 __u32 write_domain;
469 }; 470 };
470 471
471 struct drm_i915_gem_sw_finish { 472 struct drm_i915_gem_sw_finish {
472 /** Handle for the object */ 473 /** Handle for the object */
473 __u32 handle; 474 __u32 handle;
474 }; 475 };
475 476
476 struct drm_i915_gem_relocation_entry { 477 struct drm_i915_gem_relocation_entry {
477 /** 478 /**
478 * Handle of the buffer being pointed to by this relocation entry. 479 * Handle of the buffer being pointed to by this relocation entry.
479 * 480 *
480 * It's appealing to make this be an index into the mm_validate_entry 481 * It's appealing to make this be an index into the mm_validate_entry
481 * list to refer to the buffer, but this allows the driver to create 482 * list to refer to the buffer, but this allows the driver to create
482 * a relocation list for state buffers and not re-write it per 483 * a relocation list for state buffers and not re-write it per
483 * exec using the buffer. 484 * exec using the buffer.
484 */ 485 */
485 __u32 target_handle; 486 __u32 target_handle;
486 487
487 /** 488 /**
488 * Value to be added to the offset of the target buffer to make up 489 * Value to be added to the offset of the target buffer to make up
489 * the relocation entry. 490 * the relocation entry.
490 */ 491 */
491 __u32 delta; 492 __u32 delta;
492 493
493 /** Offset in the buffer the relocation entry will be written into */ 494 /** Offset in the buffer the relocation entry will be written into */
494 __u64 offset; 495 __u64 offset;
495 496
496 /** 497 /**
497 * Offset value of the target buffer that the relocation entry was last 498 * Offset value of the target buffer that the relocation entry was last
498 * written as. 499 * written as.
499 * 500 *
500 * If the buffer has the same offset as last time, we can skip syncing 501 * If the buffer has the same offset as last time, we can skip syncing
501 * and writing the relocation. This value is written back out by 502 * and writing the relocation. This value is written back out by
502 * the execbuffer ioctl when the relocation is written. 503 * the execbuffer ioctl when the relocation is written.
503 */ 504 */
504 __u64 presumed_offset; 505 __u64 presumed_offset;
505 506
506 /** 507 /**
507 * Target memory domains read by this operation. 508 * Target memory domains read by this operation.
508 */ 509 */
509 __u32 read_domains; 510 __u32 read_domains;
510 511
511 /** 512 /**
512 * Target memory domains written by this operation. 513 * Target memory domains written by this operation.
513 * 514 *
514 * Note that only one domain may be written by the whole 515 * Note that only one domain may be written by the whole
515 * execbuffer operation, so that where there are conflicts, 516 * execbuffer operation, so that where there are conflicts,
516 * the application will get -EINVAL back. 517 * the application will get -EINVAL back.
517 */ 518 */
518 __u32 write_domain; 519 __u32 write_domain;
519 }; 520 };
520 521
521 /** @{ 522 /** @{
522 * Intel memory domains 523 * Intel memory domains
523 * 524 *
524 * Most of these just align with the various caches in 525 * Most of these just align with the various caches in
525 * the system and are used to flush and invalidate as 526 * the system and are used to flush and invalidate as
526 * objects end up cached in different domains. 527 * objects end up cached in different domains.
527 */ 528 */
528 /** CPU cache */ 529 /** CPU cache */
529 #define I915_GEM_DOMAIN_CPU 0x00000001 530 #define I915_GEM_DOMAIN_CPU 0x00000001
530 /** Render cache, used by 2D and 3D drawing */ 531 /** Render cache, used by 2D and 3D drawing */
531 #define I915_GEM_DOMAIN_RENDER 0x00000002 532 #define I915_GEM_DOMAIN_RENDER 0x00000002
532 /** Sampler cache, used by texture engine */ 533 /** Sampler cache, used by texture engine */
533 #define I915_GEM_DOMAIN_SAMPLER 0x00000004 534 #define I915_GEM_DOMAIN_SAMPLER 0x00000004
534 /** Command queue, used to load batch buffers */ 535 /** Command queue, used to load batch buffers */
535 #define I915_GEM_DOMAIN_COMMAND 0x00000008 536 #define I915_GEM_DOMAIN_COMMAND 0x00000008
536 /** Instruction cache, used by shader programs */ 537 /** Instruction cache, used by shader programs */
537 #define I915_GEM_DOMAIN_INSTRUCTION 0x00000010 538 #define I915_GEM_DOMAIN_INSTRUCTION 0x00000010
538 /** Vertex address cache */ 539 /** Vertex address cache */
539 #define I915_GEM_DOMAIN_VERTEX 0x00000020 540 #define I915_GEM_DOMAIN_VERTEX 0x00000020
540 /** GTT domain - aperture and scanout */ 541 /** GTT domain - aperture and scanout */
541 #define I915_GEM_DOMAIN_GTT 0x00000040 542 #define I915_GEM_DOMAIN_GTT 0x00000040
542 /** @} */ 543 /** @} */
543 544
544 struct drm_i915_gem_exec_object { 545 struct drm_i915_gem_exec_object {
545 /** 546 /**
546 * User's handle for a buffer to be bound into the GTT for this 547 * User's handle for a buffer to be bound into the GTT for this
547 * operation. 548 * operation.
548 */ 549 */
549 __u32 handle; 550 __u32 handle;
550 551
551 /** Number of relocations to be performed on this buffer */ 552 /** Number of relocations to be performed on this buffer */
552 __u32 relocation_count; 553 __u32 relocation_count;
553 /** 554 /**
554 * Pointer to array of struct drm_i915_gem_relocation_entry containing 555 * Pointer to array of struct drm_i915_gem_relocation_entry containing
555 * the relocations to be performed in this buffer. 556 * the relocations to be performed in this buffer.
556 */ 557 */
557 __u64 relocs_ptr; 558 __u64 relocs_ptr;
558 559
559 /** Required alignment in graphics aperture */ 560 /** Required alignment in graphics aperture */
560 __u64 alignment; 561 __u64 alignment;
561 562
562 /** 563 /**
563 * Returned value of the updated offset of the object, for future 564 * Returned value of the updated offset of the object, for future
564 * presumed_offset writes. 565 * presumed_offset writes.
565 */ 566 */
566 __u64 offset; 567 __u64 offset;
567 }; 568 };
568 569
569 struct drm_i915_gem_execbuffer { 570 struct drm_i915_gem_execbuffer {
570 /** 571 /**
571 * List of buffers to be validated with their relocations to be 572 * List of buffers to be validated with their relocations to be
572 * performend on them. 573 * performend on them.
573 * 574 *
574 * This is a pointer to an array of struct drm_i915_gem_validate_entry. 575 * This is a pointer to an array of struct drm_i915_gem_validate_entry.
575 * 576 *
576 * These buffers must be listed in an order such that all relocations 577 * These buffers must be listed in an order such that all relocations
577 * a buffer is performing refer to buffers that have already appeared 578 * a buffer is performing refer to buffers that have already appeared
578 * in the validate list. 579 * in the validate list.
579 */ 580 */
580 __u64 buffers_ptr; 581 __u64 buffers_ptr;
581 __u32 buffer_count; 582 __u32 buffer_count;
582 583
583 /** Offset in the batchbuffer to start execution from. */ 584 /** Offset in the batchbuffer to start execution from. */
584 __u32 batch_start_offset; 585 __u32 batch_start_offset;
585 /** Bytes used in batchbuffer from batch_start_offset */ 586 /** Bytes used in batchbuffer from batch_start_offset */
586 __u32 batch_len; 587 __u32 batch_len;
587 __u32 DR1; 588 __u32 DR1;
588 __u32 DR4; 589 __u32 DR4;
589 __u32 num_cliprects; 590 __u32 num_cliprects;
590 /** This is a struct drm_clip_rect *cliprects */ 591 /** This is a struct drm_clip_rect *cliprects */
591 __u64 cliprects_ptr; 592 __u64 cliprects_ptr;
592 }; 593 };
593 594
594 struct drm_i915_gem_exec_object2 { 595 struct drm_i915_gem_exec_object2 {
595 /** 596 /**
596 * User's handle for a buffer to be bound into the GTT for this 597 * User's handle for a buffer to be bound into the GTT for this
597 * operation. 598 * operation.
598 */ 599 */
599 __u32 handle; 600 __u32 handle;
600 601
601 /** Number of relocations to be performed on this buffer */ 602 /** Number of relocations to be performed on this buffer */
602 __u32 relocation_count; 603 __u32 relocation_count;
603 /** 604 /**
604 * Pointer to array of struct drm_i915_gem_relocation_entry containing 605 * Pointer to array of struct drm_i915_gem_relocation_entry containing
605 * the relocations to be performed in this buffer. 606 * the relocations to be performed in this buffer.
606 */ 607 */
607 __u64 relocs_ptr; 608 __u64 relocs_ptr;
608 609
609 /** Required alignment in graphics aperture */ 610 /** Required alignment in graphics aperture */
610 __u64 alignment; 611 __u64 alignment;
611 612
612 /** 613 /**
613 * Returned value of the updated offset of the object, for future 614 * Returned value of the updated offset of the object, for future
614 * presumed_offset writes. 615 * presumed_offset writes.
615 */ 616 */
616 __u64 offset; 617 __u64 offset;
617 618
618 #define EXEC_OBJECT_NEEDS_FENCE (1<<0) 619 #define EXEC_OBJECT_NEEDS_FENCE (1<<0)
619 __u64 flags; 620 __u64 flags;
620 __u64 rsvd1; 621 __u64 rsvd1;
621 __u64 rsvd2; 622 __u64 rsvd2;
622 }; 623 };
623 624
624 struct drm_i915_gem_execbuffer2 { 625 struct drm_i915_gem_execbuffer2 {
625 /** 626 /**
626 * List of gem_exec_object2 structs 627 * List of gem_exec_object2 structs
627 */ 628 */
628 __u64 buffers_ptr; 629 __u64 buffers_ptr;
629 __u32 buffer_count; 630 __u32 buffer_count;
630 631
631 /** Offset in the batchbuffer to start execution from. */ 632 /** Offset in the batchbuffer to start execution from. */
632 __u32 batch_start_offset; 633 __u32 batch_start_offset;
633 /** Bytes used in batchbuffer from batch_start_offset */ 634 /** Bytes used in batchbuffer from batch_start_offset */
634 __u32 batch_len; 635 __u32 batch_len;
635 __u32 DR1; 636 __u32 DR1;
636 __u32 DR4; 637 __u32 DR4;
637 __u32 num_cliprects; 638 __u32 num_cliprects;
638 /** This is a struct drm_clip_rect *cliprects */ 639 /** This is a struct drm_clip_rect *cliprects */
639 __u64 cliprects_ptr; 640 __u64 cliprects_ptr;
640 #define I915_EXEC_RING_MASK (7<<0) 641 #define I915_EXEC_RING_MASK (7<<0)
641 #define I915_EXEC_DEFAULT (0<<0) 642 #define I915_EXEC_DEFAULT (0<<0)
642 #define I915_EXEC_RENDER (1<<0) 643 #define I915_EXEC_RENDER (1<<0)
643 #define I915_EXEC_BSD (2<<0) 644 #define I915_EXEC_BSD (2<<0)
644 #define I915_EXEC_BLT (3<<0) 645 #define I915_EXEC_BLT (3<<0)
645 646
646 /* Used for switching the constants addressing mode on gen4+ RENDER ring. 647 /* Used for switching the constants addressing mode on gen4+ RENDER ring.
647 * Gen6+ only supports relative addressing to dynamic state (default) and 648 * Gen6+ only supports relative addressing to dynamic state (default) and
648 * absolute addressing. 649 * absolute addressing.
649 * 650 *
650 * These flags are ignored for the BSD and BLT rings. 651 * These flags are ignored for the BSD and BLT rings.
651 */ 652 */
652 #define I915_EXEC_CONSTANTS_MASK (3<<6) 653 #define I915_EXEC_CONSTANTS_MASK (3<<6)
653 #define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */ 654 #define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
654 #define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6) 655 #define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6)
655 #define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */ 656 #define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
656 __u64 flags; 657 __u64 flags;
657 __u64 rsvd1; 658 __u64 rsvd1;
658 __u64 rsvd2; 659 __u64 rsvd2;
659 }; 660 };
660 661
661 /** Resets the SO write offset registers for transform feedback on gen7. */ 662 /** Resets the SO write offset registers for transform feedback on gen7. */
662 #define I915_EXEC_GEN7_SOL_RESET (1<<8) 663 #define I915_EXEC_GEN7_SOL_RESET (1<<8)
663 664
664 struct drm_i915_gem_pin { 665 struct drm_i915_gem_pin {
665 /** Handle of the buffer to be pinned. */ 666 /** Handle of the buffer to be pinned. */
666 __u32 handle; 667 __u32 handle;
667 __u32 pad; 668 __u32 pad;
668 669
669 /** alignment required within the aperture */ 670 /** alignment required within the aperture */
670 __u64 alignment; 671 __u64 alignment;
671 672
672 /** Returned GTT offset of the buffer. */ 673 /** Returned GTT offset of the buffer. */
673 __u64 offset; 674 __u64 offset;
674 }; 675 };
675 676
676 struct drm_i915_gem_unpin { 677 struct drm_i915_gem_unpin {
677 /** Handle of the buffer to be unpinned. */ 678 /** Handle of the buffer to be unpinned. */
678 __u32 handle; 679 __u32 handle;
679 __u32 pad; 680 __u32 pad;
680 }; 681 };
681 682
682 struct drm_i915_gem_busy { 683 struct drm_i915_gem_busy {
683 /** Handle of the buffer to check for busy */ 684 /** Handle of the buffer to check for busy */
684 __u32 handle; 685 __u32 handle;
685 686
686 /** Return busy status (1 if busy, 0 if idle) */ 687 /** Return busy status (1 if busy, 0 if idle) */
687 __u32 busy; 688 __u32 busy;
688 }; 689 };
689 690
690 #define I915_TILING_NONE 0 691 #define I915_TILING_NONE 0
691 #define I915_TILING_X 1 692 #define I915_TILING_X 1
692 #define I915_TILING_Y 2 693 #define I915_TILING_Y 2
693 694
694 #define I915_BIT_6_SWIZZLE_NONE 0 695 #define I915_BIT_6_SWIZZLE_NONE 0
695 #define I915_BIT_6_SWIZZLE_9 1 696 #define I915_BIT_6_SWIZZLE_9 1
696 #define I915_BIT_6_SWIZZLE_9_10 2 697 #define I915_BIT_6_SWIZZLE_9_10 2
697 #define I915_BIT_6_SWIZZLE_9_11 3 698 #define I915_BIT_6_SWIZZLE_9_11 3
698 #define I915_BIT_6_SWIZZLE_9_10_11 4 699 #define I915_BIT_6_SWIZZLE_9_10_11 4
699 /* Not seen by userland */ 700 /* Not seen by userland */
700 #define I915_BIT_6_SWIZZLE_UNKNOWN 5 701 #define I915_BIT_6_SWIZZLE_UNKNOWN 5
701 /* Seen by userland. */ 702 /* Seen by userland. */
702 #define I915_BIT_6_SWIZZLE_9_17 6 703 #define I915_BIT_6_SWIZZLE_9_17 6
703 #define I915_BIT_6_SWIZZLE_9_10_17 7 704 #define I915_BIT_6_SWIZZLE_9_10_17 7
704 705
705 struct drm_i915_gem_set_tiling { 706 struct drm_i915_gem_set_tiling {
706 /** Handle of the buffer to have its tiling state updated */ 707 /** Handle of the buffer to have its tiling state updated */
707 __u32 handle; 708 __u32 handle;
708 709
709 /** 710 /**
710 * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X, 711 * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
711 * I915_TILING_Y). 712 * I915_TILING_Y).
712 * 713 *
713 * This value is to be set on request, and will be updated by the 714 * This value is to be set on request, and will be updated by the
714 * kernel on successful return with the actual chosen tiling layout. 715 * kernel on successful return with the actual chosen tiling layout.
715 * 716 *
716 * The tiling mode may be demoted to I915_TILING_NONE when the system 717 * The tiling mode may be demoted to I915_TILING_NONE when the system
717 * has bit 6 swizzling that can't be managed correctly by GEM. 718 * has bit 6 swizzling that can't be managed correctly by GEM.
718 * 719 *
719 * Buffer contents become undefined when changing tiling_mode. 720 * Buffer contents become undefined when changing tiling_mode.
720 */ 721 */
721 __u32 tiling_mode; 722 __u32 tiling_mode;
722 723
723 /** 724 /**
724 * Stride in bytes for the object when in I915_TILING_X or 725 * Stride in bytes for the object when in I915_TILING_X or
725 * I915_TILING_Y. 726 * I915_TILING_Y.
726 */ 727 */
727 __u32 stride; 728 __u32 stride;
728 729
729 /** 730 /**
730 * Returned address bit 6 swizzling required for CPU access through 731 * Returned address bit 6 swizzling required for CPU access through
731 * mmap mapping. 732 * mmap mapping.
732 */ 733 */
733 __u32 swizzle_mode; 734 __u32 swizzle_mode;
734 }; 735 };
735 736
736 struct drm_i915_gem_get_tiling { 737 struct drm_i915_gem_get_tiling {
737 /** Handle of the buffer to get tiling state for. */ 738 /** Handle of the buffer to get tiling state for. */
738 __u32 handle; 739 __u32 handle;
739 740
740 /** 741 /**
741 * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X, 742 * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
742 * I915_TILING_Y). 743 * I915_TILING_Y).
743 */ 744 */
744 __u32 tiling_mode; 745 __u32 tiling_mode;
745 746
746 /** 747 /**
747 * Returned address bit 6 swizzling required for CPU access through 748 * Returned address bit 6 swizzling required for CPU access through
748 * mmap mapping. 749 * mmap mapping.
749 */ 750 */
750 __u32 swizzle_mode; 751 __u32 swizzle_mode;
751 }; 752 };
752 753
753 struct drm_i915_gem_get_aperture { 754 struct drm_i915_gem_get_aperture {
754 /** Total size of the aperture used by i915_gem_execbuffer, in bytes */ 755 /** Total size of the aperture used by i915_gem_execbuffer, in bytes */
755 __u64 aper_size; 756 __u64 aper_size;
756 757
757 /** 758 /**
758 * Available space in the aperture used by i915_gem_execbuffer, in 759 * Available space in the aperture used by i915_gem_execbuffer, in
759 * bytes 760 * bytes
760 */ 761 */
761 __u64 aper_available_size; 762 __u64 aper_available_size;
762 }; 763 };
763 764
764 struct drm_i915_get_pipe_from_crtc_id { 765 struct drm_i915_get_pipe_from_crtc_id {
765 /** ID of CRTC being requested **/ 766 /** ID of CRTC being requested **/
766 __u32 crtc_id; 767 __u32 crtc_id;
767 768
768 /** pipe of requested CRTC **/ 769 /** pipe of requested CRTC **/
769 __u32 pipe; 770 __u32 pipe;
770 }; 771 };
771 772
772 #define I915_MADV_WILLNEED 0 773 #define I915_MADV_WILLNEED 0
773 #define I915_MADV_DONTNEED 1 774 #define I915_MADV_DONTNEED 1
774 #define __I915_MADV_PURGED 2 /* internal state */ 775 #define __I915_MADV_PURGED 2 /* internal state */
775 776
776 struct drm_i915_gem_madvise { 777 struct drm_i915_gem_madvise {
777 /** Handle of the buffer to change the backing store advice */ 778 /** Handle of the buffer to change the backing store advice */
778 __u32 handle; 779 __u32 handle;
779 780
780 /* Advice: either the buffer will be needed again in the near future, 781 /* Advice: either the buffer will be needed again in the near future,
781 * or wont be and could be discarded under memory pressure. 782 * or wont be and could be discarded under memory pressure.
782 */ 783 */
783 __u32 madv; 784 __u32 madv;
784 785
785 /** Whether the backing store still exists. */ 786 /** Whether the backing store still exists. */
786 __u32 retained; 787 __u32 retained;
787 }; 788 };
788 789
789 /* flags */ 790 /* flags */
790 #define I915_OVERLAY_TYPE_MASK 0xff 791 #define I915_OVERLAY_TYPE_MASK 0xff
791 #define I915_OVERLAY_YUV_PLANAR 0x01 792 #define I915_OVERLAY_YUV_PLANAR 0x01
792 #define I915_OVERLAY_YUV_PACKED 0x02 793 #define I915_OVERLAY_YUV_PACKED 0x02
793 #define I915_OVERLAY_RGB 0x03 794 #define I915_OVERLAY_RGB 0x03
794 795
795 #define I915_OVERLAY_DEPTH_MASK 0xff00 796 #define I915_OVERLAY_DEPTH_MASK 0xff00
796 #define I915_OVERLAY_RGB24 0x1000 797 #define I915_OVERLAY_RGB24 0x1000
797 #define I915_OVERLAY_RGB16 0x2000 798 #define I915_OVERLAY_RGB16 0x2000
798 #define I915_OVERLAY_RGB15 0x3000 799 #define I915_OVERLAY_RGB15 0x3000
799 #define I915_OVERLAY_YUV422 0x0100 800 #define I915_OVERLAY_YUV422 0x0100
800 #define I915_OVERLAY_YUV411 0x0200 801 #define I915_OVERLAY_YUV411 0x0200
801 #define I915_OVERLAY_YUV420 0x0300 802 #define I915_OVERLAY_YUV420 0x0300
802 #define I915_OVERLAY_YUV410 0x0400 803 #define I915_OVERLAY_YUV410 0x0400
803 804
804 #define I915_OVERLAY_SWAP_MASK 0xff0000 805 #define I915_OVERLAY_SWAP_MASK 0xff0000
805 #define I915_OVERLAY_NO_SWAP 0x000000 806 #define I915_OVERLAY_NO_SWAP 0x000000
806 #define I915_OVERLAY_UV_SWAP 0x010000 807 #define I915_OVERLAY_UV_SWAP 0x010000
807 #define I915_OVERLAY_Y_SWAP 0x020000 808 #define I915_OVERLAY_Y_SWAP 0x020000
808 #define I915_OVERLAY_Y_AND_UV_SWAP 0x030000 809 #define I915_OVERLAY_Y_AND_UV_SWAP 0x030000
809 810
810 #define I915_OVERLAY_FLAGS_MASK 0xff000000 811 #define I915_OVERLAY_FLAGS_MASK 0xff000000
811 #define I915_OVERLAY_ENABLE 0x01000000 812 #define I915_OVERLAY_ENABLE 0x01000000
812 813
813 struct drm_intel_overlay_put_image { 814 struct drm_intel_overlay_put_image {
814 /* various flags and src format description */ 815 /* various flags and src format description */
815 __u32 flags; 816 __u32 flags;
816 /* source picture description */ 817 /* source picture description */
817 __u32 bo_handle; 818 __u32 bo_handle;
818 /* stride values and offsets are in bytes, buffer relative */ 819 /* stride values and offsets are in bytes, buffer relative */
819 __u16 stride_Y; /* stride for packed formats */ 820 __u16 stride_Y; /* stride for packed formats */
820 __u16 stride_UV; 821 __u16 stride_UV;
821 __u32 offset_Y; /* offset for packet formats */ 822 __u32 offset_Y; /* offset for packet formats */
822 __u32 offset_U; 823 __u32 offset_U;
823 __u32 offset_V; 824 __u32 offset_V;
824 /* in pixels */ 825 /* in pixels */
825 __u16 src_width; 826 __u16 src_width;
826 __u16 src_height; 827 __u16 src_height;
827 /* to compensate the scaling factors for partially covered surfaces */ 828 /* to compensate the scaling factors for partially covered surfaces */
828 __u16 src_scan_width; 829 __u16 src_scan_width;
829 __u16 src_scan_height; 830 __u16 src_scan_height;
830 /* output crtc description */ 831 /* output crtc description */
831 __u32 crtc_id; 832 __u32 crtc_id;
832 __u16 dst_x; 833 __u16 dst_x;
833 __u16 dst_y; 834 __u16 dst_y;
834 __u16 dst_width; 835 __u16 dst_width;
835 __u16 dst_height; 836 __u16 dst_height;
836 }; 837 };
837 838
838 /* flags */ 839 /* flags */
839 #define I915_OVERLAY_UPDATE_ATTRS (1<<0) 840 #define I915_OVERLAY_UPDATE_ATTRS (1<<0)
840 #define I915_OVERLAY_UPDATE_GAMMA (1<<1) 841 #define I915_OVERLAY_UPDATE_GAMMA (1<<1)
841 struct drm_intel_overlay_attrs { 842 struct drm_intel_overlay_attrs {
842 __u32 flags; 843 __u32 flags;
843 __u32 color_key; 844 __u32 color_key;
844 __s32 brightness; 845 __s32 brightness;
845 __u32 contrast; 846 __u32 contrast;
846 __u32 saturation; 847 __u32 saturation;
847 __u32 gamma0; 848 __u32 gamma0;
848 __u32 gamma1; 849 __u32 gamma1;
849 __u32 gamma2; 850 __u32 gamma2;
850 __u32 gamma3; 851 __u32 gamma3;
851 __u32 gamma4; 852 __u32 gamma4;
852 __u32 gamma5; 853 __u32 gamma5;
853 }; 854 };
854 855
855 /* 856 /*
856 * Intel sprite handling 857 * Intel sprite handling
857 * 858 *
858 * Color keying works with a min/mask/max tuple. Both source and destination 859 * Color keying works with a min/mask/max tuple. Both source and destination
859 * color keying is allowed. 860 * color keying is allowed.
860 * 861 *
861 * Source keying: 862 * Source keying:
862 * Sprite pixels within the min & max values, masked against the color channels 863 * Sprite pixels within the min & max values, masked against the color channels
863 * specified in the mask field, will be transparent. All other pixels will 864 * specified in the mask field, will be transparent. All other pixels will
864 * be displayed on top of the primary plane. For RGB surfaces, only the min 865 * be displayed on top of the primary plane. For RGB surfaces, only the min
865 * and mask fields will be used; ranged compares are not allowed. 866 * and mask fields will be used; ranged compares are not allowed.
866 * 867 *
867 * Destination keying: 868 * Destination keying:
868 * Primary plane pixels that match the min value, masked against the color 869 * Primary plane pixels that match the min value, masked against the color
869 * channels specified in the mask field, will be replaced by corresponding 870 * channels specified in the mask field, will be replaced by corresponding
870 * pixels from the sprite plane. 871 * pixels from the sprite plane.
871 * 872 *
872 * Note that source & destination keying are exclusive; only one can be 873 * Note that source & destination keying are exclusive; only one can be
873 * active on a given plane. 874 * active on a given plane.
874 */ 875 */
875 876
876 #define I915_SET_COLORKEY_NONE (1<<0) /* disable color key matching */ 877 #define I915_SET_COLORKEY_NONE (1<<0) /* disable color key matching */
877 #define I915_SET_COLORKEY_DESTINATION (1<<1) 878 #define I915_SET_COLORKEY_DESTINATION (1<<1)
878 #define I915_SET_COLORKEY_SOURCE (1<<2) 879 #define I915_SET_COLORKEY_SOURCE (1<<2)
879 struct drm_intel_sprite_colorkey { 880 struct drm_intel_sprite_colorkey {
880 __u32 plane_id; 881 __u32 plane_id;
881 __u32 min_value; 882 __u32 min_value;
882 __u32 channel_mask; 883 __u32 channel_mask;
883 __u32 max_value; 884 __u32 max_value;
884 __u32 flags; 885 __u32 flags;
885 }; 886 };
886 887
887 #endif /* _I915_DRM_H_ */ 888 #endif /* _I915_DRM_H_ */
888 889