Commit b897e6fbc49dd84b2634bca664344d503b907ce9
Exists in
master
and in
4 other branches
Merge branch 'drm-intel-next' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt/drm-intel
* 'drm-intel-next' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt/drm-intel: drm/i915: fix scheduling while holding the new active list spinlock drm/i915: Allow tiling of objects with bit 17 swizzling by the CPU. drm/i915: Correctly set the write flag for get_user_pages in pread. drm/i915: Fix use of uninitialized var in 40a5f0de drm/i915: indicate framebuffer restore key in SysRq help message drm/i915: sync hdmi detection by hdmi identifier with 2D drm/i915: Fix a mismerge of the IGD patch (new .find_pll hooks missed) drm/i915: Implement batch and ring buffer dumping
Showing 9 changed files Side-by-side Diff
- drivers/gpu/drm/i915/i915_drv.h
- drivers/gpu/drm/i915/i915_gem.c
- drivers/gpu/drm/i915/i915_gem_debugfs.c
- drivers/gpu/drm/i915/i915_gem_tiling.c
- drivers/gpu/drm/i915/intel_display.c
- drivers/gpu/drm/i915/intel_fb.c
- drivers/gpu/drm/i915/intel_hdmi.c
- drivers/gpu/drm/i915/intel_sdvo.c
- include/drm/i915_drm.h
drivers/gpu/drm/i915/i915_drv.h
... | ... | @@ -446,6 +446,9 @@ |
446 | 446 | uint32_t tiling_mode; |
447 | 447 | uint32_t stride; |
448 | 448 | |
449 | + /** Record of address bit 17 of each page at last unbind. */ | |
450 | + long *bit_17; | |
451 | + | |
449 | 452 | /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */ |
450 | 453 | uint32_t agp_type; |
451 | 454 | |
452 | 455 | |
... | ... | @@ -635,9 +638,13 @@ |
635 | 638 | void i915_gem_detach_phys_object(struct drm_device *dev, |
636 | 639 | struct drm_gem_object *obj); |
637 | 640 | void i915_gem_free_all_phys_object(struct drm_device *dev); |
641 | +int i915_gem_object_get_pages(struct drm_gem_object *obj); | |
642 | +void i915_gem_object_put_pages(struct drm_gem_object *obj); | |
638 | 643 | |
639 | 644 | /* i915_gem_tiling.c */ |
640 | 645 | void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); |
646 | +void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); | |
647 | +void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj); | |
641 | 648 | |
642 | 649 | /* i915_gem_debug.c */ |
643 | 650 | void i915_gem_dump_object(struct drm_gem_object *obj, int len, |
drivers/gpu/drm/i915/i915_gem.c
... | ... | @@ -43,8 +43,6 @@ |
43 | 43 | uint64_t offset, |
44 | 44 | uint64_t size); |
45 | 45 | static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); |
46 | -static int i915_gem_object_get_pages(struct drm_gem_object *obj); | |
47 | -static void i915_gem_object_put_pages(struct drm_gem_object *obj); | |
48 | 46 | static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); |
49 | 47 | static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, |
50 | 48 | unsigned alignment); |
51 | 49 | |
52 | 50 | |
53 | 51 | |
... | ... | @@ -143,17 +141,29 @@ |
143 | 141 | int length) |
144 | 142 | { |
145 | 143 | char __iomem *vaddr; |
146 | - int ret; | |
144 | + int unwritten; | |
147 | 145 | |
148 | 146 | vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); |
149 | 147 | if (vaddr == NULL) |
150 | 148 | return -ENOMEM; |
151 | - ret = __copy_to_user_inatomic(data, vaddr + page_offset, length); | |
149 | + unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length); | |
152 | 150 | kunmap_atomic(vaddr, KM_USER0); |
153 | 151 | |
154 | - return ret; | |
152 | + if (unwritten) | |
153 | + return -EFAULT; | |
154 | + | |
155 | + return 0; | |
155 | 156 | } |
156 | 157 | |
158 | +static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) | |
159 | +{ | |
160 | + drm_i915_private_t *dev_priv = obj->dev->dev_private; | |
161 | + struct drm_i915_gem_object *obj_priv = obj->driver_private; | |
162 | + | |
163 | + return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && | |
164 | + obj_priv->tiling_mode != I915_TILING_NONE; | |
165 | +} | |
166 | + | |
157 | 167 | static inline int |
158 | 168 | slow_shmem_copy(struct page *dst_page, |
159 | 169 | int dst_offset, |
... | ... | @@ -181,6 +191,64 @@ |
181 | 191 | return 0; |
182 | 192 | } |
183 | 193 | |
194 | +static inline int | |
195 | +slow_shmem_bit17_copy(struct page *gpu_page, | |
196 | + int gpu_offset, | |
197 | + struct page *cpu_page, | |
198 | + int cpu_offset, | |
199 | + int length, | |
200 | + int is_read) | |
201 | +{ | |
202 | + char *gpu_vaddr, *cpu_vaddr; | |
203 | + | |
204 | + /* Use the unswizzled path if this page isn't affected. */ | |
205 | + if ((page_to_phys(gpu_page) & (1 << 17)) == 0) { | |
206 | + if (is_read) | |
207 | + return slow_shmem_copy(cpu_page, cpu_offset, | |
208 | + gpu_page, gpu_offset, length); | |
209 | + else | |
210 | + return slow_shmem_copy(gpu_page, gpu_offset, | |
211 | + cpu_page, cpu_offset, length); | |
212 | + } | |
213 | + | |
214 | + gpu_vaddr = kmap_atomic(gpu_page, KM_USER0); | |
215 | + if (gpu_vaddr == NULL) | |
216 | + return -ENOMEM; | |
217 | + | |
218 | + cpu_vaddr = kmap_atomic(cpu_page, KM_USER1); | |
219 | + if (cpu_vaddr == NULL) { | |
220 | + kunmap_atomic(gpu_vaddr, KM_USER0); | |
221 | + return -ENOMEM; | |
222 | + } | |
223 | + | |
224 | + /* Copy the data, XORing A6 with A17 (1). The user already knows he's | |
225 | + * XORing with the other bits (A9 for Y, A9 and A10 for X) | |
226 | + */ | |
227 | + while (length > 0) { | |
228 | + int cacheline_end = ALIGN(gpu_offset + 1, 64); | |
229 | + int this_length = min(cacheline_end - gpu_offset, length); | |
230 | + int swizzled_gpu_offset = gpu_offset ^ 64; | |
231 | + | |
232 | + if (is_read) { | |
233 | + memcpy(cpu_vaddr + cpu_offset, | |
234 | + gpu_vaddr + swizzled_gpu_offset, | |
235 | + this_length); | |
236 | + } else { | |
237 | + memcpy(gpu_vaddr + swizzled_gpu_offset, | |
238 | + cpu_vaddr + cpu_offset, | |
239 | + this_length); | |
240 | + } | |
241 | + cpu_offset += this_length; | |
242 | + gpu_offset += this_length; | |
243 | + length -= this_length; | |
244 | + } | |
245 | + | |
246 | + kunmap_atomic(cpu_vaddr, KM_USER1); | |
247 | + kunmap_atomic(gpu_vaddr, KM_USER0); | |
248 | + | |
249 | + return 0; | |
250 | +} | |
251 | + | |
184 | 252 | /** |
185 | 253 | * This is the fast shmem pread path, which attempts to copy_from_user directly |
186 | 254 | * from the backing pages of the object to the user's address space. On a |
... | ... | @@ -269,6 +337,7 @@ |
269 | 337 | int page_length; |
270 | 338 | int ret; |
271 | 339 | uint64_t data_ptr = args->data_ptr; |
340 | + int do_bit17_swizzling; | |
272 | 341 | |
273 | 342 | remain = args->size; |
274 | 343 | |
275 | 344 | |
... | ... | @@ -286,13 +355,15 @@ |
286 | 355 | |
287 | 356 | down_read(&mm->mmap_sem); |
288 | 357 | pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, |
289 | - num_pages, 0, 0, user_pages, NULL); | |
358 | + num_pages, 1, 0, user_pages, NULL); | |
290 | 359 | up_read(&mm->mmap_sem); |
291 | 360 | if (pinned_pages < num_pages) { |
292 | 361 | ret = -EFAULT; |
293 | 362 | goto fail_put_user_pages; |
294 | 363 | } |
295 | 364 | |
365 | + do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); | |
366 | + | |
296 | 367 | mutex_lock(&dev->struct_mutex); |
297 | 368 | |
298 | 369 | ret = i915_gem_object_get_pages(obj); |
... | ... | @@ -327,11 +398,20 @@ |
327 | 398 | if ((data_page_offset + page_length) > PAGE_SIZE) |
328 | 399 | page_length = PAGE_SIZE - data_page_offset; |
329 | 400 | |
330 | - ret = slow_shmem_copy(user_pages[data_page_index], | |
331 | - data_page_offset, | |
332 | - obj_priv->pages[shmem_page_index], | |
333 | - shmem_page_offset, | |
334 | - page_length); | |
401 | + if (do_bit17_swizzling) { | |
402 | + ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], | |
403 | + shmem_page_offset, | |
404 | + user_pages[data_page_index], | |
405 | + data_page_offset, | |
406 | + page_length, | |
407 | + 1); | |
408 | + } else { | |
409 | + ret = slow_shmem_copy(user_pages[data_page_index], | |
410 | + data_page_offset, | |
411 | + obj_priv->pages[shmem_page_index], | |
412 | + shmem_page_offset, | |
413 | + page_length); | |
414 | + } | |
335 | 415 | if (ret) |
336 | 416 | goto fail_put_pages; |
337 | 417 | |
338 | 418 | |
... | ... | @@ -383,9 +463,14 @@ |
383 | 463 | return -EINVAL; |
384 | 464 | } |
385 | 465 | |
386 | - ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv); | |
387 | - if (ret != 0) | |
466 | + if (i915_gem_object_needs_bit17_swizzle(obj)) { | |
388 | 467 | ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv); |
468 | + } else { | |
469 | + ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv); | |
470 | + if (ret != 0) | |
471 | + ret = i915_gem_shmem_pread_slow(dev, obj, args, | |
472 | + file_priv); | |
473 | + } | |
389 | 474 | |
390 | 475 | drm_gem_object_unreference(obj); |
391 | 476 | |
... | ... | @@ -727,6 +812,7 @@ |
727 | 812 | int page_length; |
728 | 813 | int ret; |
729 | 814 | uint64_t data_ptr = args->data_ptr; |
815 | + int do_bit17_swizzling; | |
730 | 816 | |
731 | 817 | remain = args->size; |
732 | 818 | |
... | ... | @@ -751,6 +837,8 @@ |
751 | 837 | goto fail_put_user_pages; |
752 | 838 | } |
753 | 839 | |
840 | + do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); | |
841 | + | |
754 | 842 | mutex_lock(&dev->struct_mutex); |
755 | 843 | |
756 | 844 | ret = i915_gem_object_get_pages(obj); |
... | ... | @@ -785,11 +873,20 @@ |
785 | 873 | if ((data_page_offset + page_length) > PAGE_SIZE) |
786 | 874 | page_length = PAGE_SIZE - data_page_offset; |
787 | 875 | |
788 | - ret = slow_shmem_copy(obj_priv->pages[shmem_page_index], | |
789 | - shmem_page_offset, | |
790 | - user_pages[data_page_index], | |
791 | - data_page_offset, | |
792 | - page_length); | |
876 | + if (do_bit17_swizzling) { | |
877 | + ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], | |
878 | + shmem_page_offset, | |
879 | + user_pages[data_page_index], | |
880 | + data_page_offset, | |
881 | + page_length, | |
882 | + 0); | |
883 | + } else { | |
884 | + ret = slow_shmem_copy(obj_priv->pages[shmem_page_index], | |
885 | + shmem_page_offset, | |
886 | + user_pages[data_page_index], | |
887 | + data_page_offset, | |
888 | + page_length); | |
889 | + } | |
793 | 890 | if (ret) |
794 | 891 | goto fail_put_pages; |
795 | 892 | |
... | ... | @@ -854,6 +951,8 @@ |
854 | 951 | ret = i915_gem_gtt_pwrite_slow(dev, obj, args, |
855 | 952 | file_priv); |
856 | 953 | } |
954 | + } else if (i915_gem_object_needs_bit17_swizzle(obj)) { | |
955 | + ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv); | |
857 | 956 | } else { |
858 | 957 | ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv); |
859 | 958 | if (ret == -EFAULT) { |
... | ... | @@ -1285,7 +1384,7 @@ |
1285 | 1384 | return 0; |
1286 | 1385 | } |
1287 | 1386 | |
1288 | -static void | |
1387 | +void | |
1289 | 1388 | i915_gem_object_put_pages(struct drm_gem_object *obj) |
1290 | 1389 | { |
1291 | 1390 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
... | ... | @@ -1297,6 +1396,9 @@ |
1297 | 1396 | if (--obj_priv->pages_refcount != 0) |
1298 | 1397 | return; |
1299 | 1398 | |
1399 | + if (obj_priv->tiling_mode != I915_TILING_NONE) | |
1400 | + i915_gem_object_save_bit_17_swizzle(obj); | |
1401 | + | |
1300 | 1402 | for (i = 0; i < page_count; i++) |
1301 | 1403 | if (obj_priv->pages[i] != NULL) { |
1302 | 1404 | if (obj_priv->dirty) |
1303 | 1405 | |
... | ... | @@ -1494,8 +1596,19 @@ |
1494 | 1596 | |
1495 | 1597 | if (obj->write_domain != 0) |
1496 | 1598 | i915_gem_object_move_to_flushing(obj); |
1497 | - else | |
1599 | + else { | |
1600 | + /* Take a reference on the object so it won't be | |
1601 | + * freed while the spinlock is held. The list | |
1602 | + * protection for this spinlock is safe when breaking | |
1603 | + * the lock like this since the next thing we do | |
1604 | + * is just get the head of the list again. | |
1605 | + */ | |
1606 | + drm_gem_object_reference(obj); | |
1498 | 1607 | i915_gem_object_move_to_inactive(obj); |
1608 | + spin_unlock(&dev_priv->mm.active_list_lock); | |
1609 | + drm_gem_object_unreference(obj); | |
1610 | + spin_lock(&dev_priv->mm.active_list_lock); | |
1611 | + } | |
1499 | 1612 | } |
1500 | 1613 | out: |
1501 | 1614 | spin_unlock(&dev_priv->mm.active_list_lock); |
... | ... | @@ -1884,7 +1997,7 @@ |
1884 | 1997 | return ret; |
1885 | 1998 | } |
1886 | 1999 | |
1887 | -static int | |
2000 | +int | |
1888 | 2001 | i915_gem_object_get_pages(struct drm_gem_object *obj) |
1889 | 2002 | { |
1890 | 2003 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
... | ... | @@ -1922,6 +2035,10 @@ |
1922 | 2035 | } |
1923 | 2036 | obj_priv->pages[i] = page; |
1924 | 2037 | } |
2038 | + | |
2039 | + if (obj_priv->tiling_mode != I915_TILING_NONE) | |
2040 | + i915_gem_object_do_bit_17_swizzle(obj); | |
2041 | + | |
1925 | 2042 | return 0; |
1926 | 2043 | } |
1927 | 2044 | |
1928 | 2045 | |
... | ... | @@ -3002,13 +3119,13 @@ |
3002 | 3119 | drm_free(*relocs, reloc_count * sizeof(**relocs), |
3003 | 3120 | DRM_MEM_DRIVER); |
3004 | 3121 | *relocs = NULL; |
3005 | - return ret; | |
3122 | + return -EFAULT; | |
3006 | 3123 | } |
3007 | 3124 | |
3008 | 3125 | reloc_index += exec_list[i].relocation_count; |
3009 | 3126 | } |
3010 | 3127 | |
3011 | - return ret; | |
3128 | + return 0; | |
3012 | 3129 | } |
3013 | 3130 | |
3014 | 3131 | static int |
3015 | 3132 | |
3016 | 3133 | |
3017 | 3134 | |
... | ... | @@ -3017,23 +3134,28 @@ |
3017 | 3134 | struct drm_i915_gem_relocation_entry *relocs) |
3018 | 3135 | { |
3019 | 3136 | uint32_t reloc_count = 0, i; |
3020 | - int ret; | |
3137 | + int ret = 0; | |
3021 | 3138 | |
3022 | 3139 | for (i = 0; i < buffer_count; i++) { |
3023 | 3140 | struct drm_i915_gem_relocation_entry __user *user_relocs; |
3141 | + int unwritten; | |
3024 | 3142 | |
3025 | 3143 | user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr; |
3026 | 3144 | |
3027 | - if (ret == 0) { | |
3028 | - ret = copy_to_user(user_relocs, | |
3029 | - &relocs[reloc_count], | |
3030 | - exec_list[i].relocation_count * | |
3031 | - sizeof(*relocs)); | |
3145 | + unwritten = copy_to_user(user_relocs, | |
3146 | + &relocs[reloc_count], | |
3147 | + exec_list[i].relocation_count * | |
3148 | + sizeof(*relocs)); | |
3149 | + | |
3150 | + if (unwritten) { | |
3151 | + ret = -EFAULT; | |
3152 | + goto err; | |
3032 | 3153 | } |
3033 | 3154 | |
3034 | 3155 | reloc_count += exec_list[i].relocation_count; |
3035 | 3156 | } |
3036 | 3157 | |
3158 | +err: | |
3037 | 3159 | drm_free(relocs, reloc_count * sizeof(*relocs), DRM_MEM_DRIVER); |
3038 | 3160 | |
3039 | 3161 | return ret; |
... | ... | @@ -3243,7 +3365,7 @@ |
3243 | 3365 | exec_offset = exec_list[args->buffer_count - 1].offset; |
3244 | 3366 | |
3245 | 3367 | #if WATCH_EXEC |
3246 | - i915_gem_dump_object(object_list[args->buffer_count - 1], | |
3368 | + i915_gem_dump_object(batch_obj, | |
3247 | 3369 | args->batch_len, |
3248 | 3370 | __func__, |
3249 | 3371 | ~0); |
3250 | 3372 | |
... | ... | @@ -3308,10 +3430,12 @@ |
3308 | 3430 | (uintptr_t) args->buffers_ptr, |
3309 | 3431 | exec_list, |
3310 | 3432 | sizeof(*exec_list) * args->buffer_count); |
3311 | - if (ret) | |
3433 | + if (ret) { | |
3434 | + ret = -EFAULT; | |
3312 | 3435 | DRM_ERROR("failed to copy %d exec entries " |
3313 | 3436 | "back to user (%d)\n", |
3314 | 3437 | args->buffer_count, ret); |
3438 | + } | |
3315 | 3439 | } |
3316 | 3440 | |
3317 | 3441 | /* Copy the updated relocations out regardless of current error |
... | ... | @@ -3593,6 +3717,7 @@ |
3593 | 3717 | i915_gem_free_mmap_offset(obj); |
3594 | 3718 | |
3595 | 3719 | drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); |
3720 | + kfree(obj_priv->bit_17); | |
3596 | 3721 | drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); |
3597 | 3722 | } |
3598 | 3723 |
drivers/gpu/drm/i915/i915_gem_debugfs.c
... | ... | @@ -234,6 +234,96 @@ |
234 | 234 | return 0; |
235 | 235 | } |
236 | 236 | |
237 | +static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_count) | |
238 | +{ | |
239 | + int page, i; | |
240 | + uint32_t *mem; | |
241 | + | |
242 | + for (page = 0; page < page_count; page++) { | |
243 | + mem = kmap(pages[page]); | |
244 | + for (i = 0; i < PAGE_SIZE; i += 4) | |
245 | + seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); | |
246 | + kunmap(pages[page]); | |
247 | + } | |
248 | +} | |
249 | + | |
250 | +static int i915_batchbuffer_info(struct seq_file *m, void *data) | |
251 | +{ | |
252 | + struct drm_info_node *node = (struct drm_info_node *) m->private; | |
253 | + struct drm_device *dev = node->minor->dev; | |
254 | + drm_i915_private_t *dev_priv = dev->dev_private; | |
255 | + struct drm_gem_object *obj; | |
256 | + struct drm_i915_gem_object *obj_priv; | |
257 | + int ret; | |
258 | + | |
259 | + spin_lock(&dev_priv->mm.active_list_lock); | |
260 | + | |
261 | + list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { | |
262 | + obj = obj_priv->obj; | |
263 | + if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { | |
264 | + ret = i915_gem_object_get_pages(obj); | |
265 | + if (ret) { | |
266 | + DRM_ERROR("Failed to get pages: %d\n", ret); | |
267 | + spin_unlock(&dev_priv->mm.active_list_lock); | |
268 | + return ret; | |
269 | + } | |
270 | + | |
271 | + seq_printf(m, "--- gtt_offset = 0x%08x\n", obj_priv->gtt_offset); | |
272 | + i915_dump_pages(m, obj_priv->pages, obj->size / PAGE_SIZE); | |
273 | + | |
274 | + i915_gem_object_put_pages(obj); | |
275 | + } | |
276 | + } | |
277 | + | |
278 | + spin_unlock(&dev_priv->mm.active_list_lock); | |
279 | + | |
280 | + return 0; | |
281 | +} | |
282 | + | |
283 | +static int i915_ringbuffer_data(struct seq_file *m, void *data) | |
284 | +{ | |
285 | + struct drm_info_node *node = (struct drm_info_node *) m->private; | |
286 | + struct drm_device *dev = node->minor->dev; | |
287 | + drm_i915_private_t *dev_priv = dev->dev_private; | |
288 | + u8 *virt; | |
289 | + uint32_t *ptr, off; | |
290 | + | |
291 | + if (!dev_priv->ring.ring_obj) { | |
292 | + seq_printf(m, "No ringbuffer setup\n"); | |
293 | + return 0; | |
294 | + } | |
295 | + | |
296 | + virt = dev_priv->ring.virtual_start; | |
297 | + | |
298 | + for (off = 0; off < dev_priv->ring.Size; off += 4) { | |
299 | + ptr = (uint32_t *)(virt + off); | |
300 | + seq_printf(m, "%08x : %08x\n", off, *ptr); | |
301 | + } | |
302 | + | |
303 | + return 0; | |
304 | +} | |
305 | + | |
306 | +static int i915_ringbuffer_info(struct seq_file *m, void *data) | |
307 | +{ | |
308 | + struct drm_info_node *node = (struct drm_info_node *) m->private; | |
309 | + struct drm_device *dev = node->minor->dev; | |
310 | + drm_i915_private_t *dev_priv = dev->dev_private; | |
311 | + unsigned int head, tail, mask; | |
312 | + | |
313 | + head = I915_READ(PRB0_HEAD) & HEAD_ADDR; | |
314 | + tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; | |
315 | + mask = dev_priv->ring.tail_mask; | |
316 | + | |
317 | + seq_printf(m, "RingHead : %08x\n", head); | |
318 | + seq_printf(m, "RingTail : %08x\n", tail); | |
319 | + seq_printf(m, "RingMask : %08x\n", mask); | |
320 | + seq_printf(m, "RingSize : %08lx\n", dev_priv->ring.Size); | |
321 | + seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD)); | |
322 | + | |
323 | + return 0; | |
324 | +} | |
325 | + | |
326 | + | |
237 | 327 | static struct drm_info_list i915_gem_debugfs_list[] = { |
238 | 328 | {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, |
239 | 329 | {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, |
... | ... | @@ -243,6 +333,9 @@ |
243 | 333 | {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, |
244 | 334 | {"i915_gem_interrupt", i915_interrupt_info, 0}, |
245 | 335 | {"i915_gem_hws", i915_hws_info, 0}, |
336 | + {"i915_ringbuffer_data", i915_ringbuffer_data, 0}, | |
337 | + {"i915_ringbuffer_info", i915_ringbuffer_info, 0}, | |
338 | + {"i915_batchbuffers", i915_batchbuffer_info, 0}, | |
246 | 339 | }; |
247 | 340 | #define I915_GEM_DEBUGFS_ENTRIES ARRAY_SIZE(i915_gem_debugfs_list) |
248 | 341 |
drivers/gpu/drm/i915/i915_gem_tiling.c
... | ... | @@ -25,6 +25,8 @@ |
25 | 25 | * |
26 | 26 | */ |
27 | 27 | |
28 | +#include "linux/string.h" | |
29 | +#include "linux/bitops.h" | |
28 | 30 | #include "drmP.h" |
29 | 31 | #include "drm.h" |
30 | 32 | #include "i915_drm.h" |
... | ... | @@ -127,8 +129,8 @@ |
127 | 129 | swizzle_y = I915_BIT_6_SWIZZLE_9_11; |
128 | 130 | } else { |
129 | 131 | /* Bit 17 swizzling by the CPU in addition. */ |
130 | - swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; | |
131 | - swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; | |
132 | + swizzle_x = I915_BIT_6_SWIZZLE_9_10_17; | |
133 | + swizzle_y = I915_BIT_6_SWIZZLE_9_17; | |
132 | 134 | } |
133 | 135 | break; |
134 | 136 | } |
... | ... | @@ -288,6 +290,19 @@ |
288 | 290 | args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; |
289 | 291 | else |
290 | 292 | args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; |
293 | + | |
294 | + /* Hide bit 17 swizzling from the user. This prevents old Mesa | |
295 | + * from aborting the application on sw fallbacks to bit 17, | |
296 | + * and we use the pread/pwrite bit17 paths to swizzle for it. | |
297 | + * If there was a user that was relying on the swizzle | |
298 | + * information for drm_intel_bo_map()ed reads/writes this would | |
299 | + * break it, but we don't have any of those. | |
300 | + */ | |
301 | + if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) | |
302 | + args->swizzle_mode = I915_BIT_6_SWIZZLE_9; | |
303 | + if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) | |
304 | + args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; | |
305 | + | |
291 | 306 | /* If we can't handle the swizzling, make it untiled. */ |
292 | 307 | if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { |
293 | 308 | args->tiling_mode = I915_TILING_NONE; |
294 | 309 | |
... | ... | @@ -354,9 +369,101 @@ |
354 | 369 | DRM_ERROR("unknown tiling mode\n"); |
355 | 370 | } |
356 | 371 | |
372 | + /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */ | |
373 | + if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) | |
374 | + args->swizzle_mode = I915_BIT_6_SWIZZLE_9; | |
375 | + if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) | |
376 | + args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; | |
377 | + | |
357 | 378 | drm_gem_object_unreference(obj); |
358 | 379 | mutex_unlock(&dev->struct_mutex); |
359 | 380 | |
360 | 381 | return 0; |
382 | +} | |
383 | + | |
384 | +/** | |
385 | + * Swap every 64 bytes of this page around, to account for it having a new | |
386 | + * bit 17 of its physical address and therefore being interpreted differently | |
387 | + * by the GPU. | |
388 | + */ | |
389 | +static int | |
390 | +i915_gem_swizzle_page(struct page *page) | |
391 | +{ | |
392 | + char *vaddr; | |
393 | + int i; | |
394 | + char temp[64]; | |
395 | + | |
396 | + vaddr = kmap(page); | |
397 | + if (vaddr == NULL) | |
398 | + return -ENOMEM; | |
399 | + | |
400 | + for (i = 0; i < PAGE_SIZE; i += 128) { | |
401 | + memcpy(temp, &vaddr[i], 64); | |
402 | + memcpy(&vaddr[i], &vaddr[i + 64], 64); | |
403 | + memcpy(&vaddr[i + 64], temp, 64); | |
404 | + } | |
405 | + | |
406 | + kunmap(page); | |
407 | + | |
408 | + return 0; | |
409 | +} | |
410 | + | |
411 | +void | |
412 | +i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj) | |
413 | +{ | |
414 | + struct drm_device *dev = obj->dev; | |
415 | + drm_i915_private_t *dev_priv = dev->dev_private; | |
416 | + struct drm_i915_gem_object *obj_priv = obj->driver_private; | |
417 | + int page_count = obj->size >> PAGE_SHIFT; | |
418 | + int i; | |
419 | + | |
420 | + if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) | |
421 | + return; | |
422 | + | |
423 | + if (obj_priv->bit_17 == NULL) | |
424 | + return; | |
425 | + | |
426 | + for (i = 0; i < page_count; i++) { | |
427 | + char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17; | |
428 | + if ((new_bit_17 & 0x1) != | |
429 | + (test_bit(i, obj_priv->bit_17) != 0)) { | |
430 | + int ret = i915_gem_swizzle_page(obj_priv->pages[i]); | |
431 | + if (ret != 0) { | |
432 | + DRM_ERROR("Failed to swizzle page\n"); | |
433 | + return; | |
434 | + } | |
435 | + set_page_dirty(obj_priv->pages[i]); | |
436 | + } | |
437 | + } | |
438 | +} | |
439 | + | |
440 | +void | |
441 | +i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj) | |
442 | +{ | |
443 | + struct drm_device *dev = obj->dev; | |
444 | + drm_i915_private_t *dev_priv = dev->dev_private; | |
445 | + struct drm_i915_gem_object *obj_priv = obj->driver_private; | |
446 | + int page_count = obj->size >> PAGE_SHIFT; | |
447 | + int i; | |
448 | + | |
449 | + if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) | |
450 | + return; | |
451 | + | |
452 | + if (obj_priv->bit_17 == NULL) { | |
453 | + obj_priv->bit_17 = kmalloc(BITS_TO_LONGS(page_count) * | |
454 | + sizeof(long), GFP_KERNEL); | |
455 | + if (obj_priv->bit_17 == NULL) { | |
456 | + DRM_ERROR("Failed to allocate memory for bit 17 " | |
457 | + "record\n"); | |
458 | + return; | |
459 | + } | |
460 | + } | |
461 | + | |
462 | + for (i = 0; i < page_count; i++) { | |
463 | + if (page_to_phys(obj_priv->pages[i]) & (1 << 17)) | |
464 | + __set_bit(i, obj_priv->bit_17); | |
465 | + else | |
466 | + __clear_bit(i, obj_priv->bit_17); | |
467 | + } | |
361 | 468 | } |
drivers/gpu/drm/i915/intel_display.c
... | ... | @@ -367,6 +367,7 @@ |
367 | 367 | .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, |
368 | 368 | .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, |
369 | 369 | .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, |
370 | + .find_pll = intel_find_best_PLL, | |
370 | 371 | }, |
371 | 372 | { /* INTEL_LIMIT_IGD_LVDS */ |
372 | 373 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, |
... | ... | @@ -380,6 +381,7 @@ |
380 | 381 | /* IGD only supports single-channel mode. */ |
381 | 382 | .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, |
382 | 383 | .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, |
384 | + .find_pll = intel_find_best_PLL, | |
383 | 385 | }, |
384 | 386 | |
385 | 387 | }; |
drivers/gpu/drm/i915/intel_fb.c
... | ... | @@ -864,8 +864,8 @@ |
864 | 864 | |
865 | 865 | static struct sysrq_key_op sysrq_intelfb_restore_op = { |
866 | 866 | .handler = intelfb_sysrq, |
867 | - .help_msg = "force fb", | |
868 | - .action_msg = "force restore of fb console", | |
867 | + .help_msg = "force-fb(G)", | |
868 | + .action_msg = "Restore framebuffer console", | |
869 | 869 | }; |
870 | 870 | |
871 | 871 | int intelfb_probe(struct drm_device *dev) |
drivers/gpu/drm/i915/intel_hdmi.c
... | ... | @@ -38,7 +38,7 @@ |
38 | 38 | struct intel_hdmi_priv { |
39 | 39 | u32 sdvox_reg; |
40 | 40 | u32 save_SDVOX; |
41 | - int has_hdmi_sink; | |
41 | + bool has_hdmi_sink; | |
42 | 42 | }; |
43 | 43 | |
44 | 44 | static void intel_hdmi_mode_set(struct drm_encoder *encoder, |
... | ... | @@ -128,6 +128,22 @@ |
128 | 128 | return true; |
129 | 129 | } |
130 | 130 | |
131 | +static void | |
132 | +intel_hdmi_sink_detect(struct drm_connector *connector) | |
133 | +{ | |
134 | + struct intel_output *intel_output = to_intel_output(connector); | |
135 | + struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; | |
136 | + struct edid *edid = NULL; | |
137 | + | |
138 | + edid = drm_get_edid(&intel_output->base, | |
139 | + &intel_output->ddc_bus->adapter); | |
140 | + if (edid != NULL) { | |
141 | + hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid); | |
142 | + kfree(edid); | |
143 | + intel_output->base.display_info.raw_edid = NULL; | |
144 | + } | |
145 | +} | |
146 | + | |
131 | 147 | static enum drm_connector_status |
132 | 148 | intel_hdmi_detect(struct drm_connector *connector) |
133 | 149 | { |
134 | 150 | |
... | ... | @@ -158,9 +174,10 @@ |
158 | 174 | return connector_status_unknown; |
159 | 175 | } |
160 | 176 | |
161 | - if ((I915_READ(PORT_HOTPLUG_STAT) & bit) != 0) | |
177 | + if ((I915_READ(PORT_HOTPLUG_STAT) & bit) != 0) { | |
178 | + intel_hdmi_sink_detect(connector); | |
162 | 179 | return connector_status_connected; |
163 | - else | |
180 | + } else | |
164 | 181 | return connector_status_disconnected; |
165 | 182 | } |
166 | 183 |
drivers/gpu/drm/i915/intel_sdvo.c
... | ... | @@ -1357,6 +1357,23 @@ |
1357 | 1357 | intel_sdvo_read_response(intel_output, &response, 2); |
1358 | 1358 | } |
1359 | 1359 | |
1360 | +static void | |
1361 | +intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) | |
1362 | +{ | |
1363 | + struct intel_output *intel_output = to_intel_output(connector); | |
1364 | + struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | |
1365 | + struct edid *edid = NULL; | |
1366 | + | |
1367 | + intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus); | |
1368 | + edid = drm_get_edid(&intel_output->base, | |
1369 | + &intel_output->ddc_bus->adapter); | |
1370 | + if (edid != NULL) { | |
1371 | + sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid); | |
1372 | + kfree(edid); | |
1373 | + intel_output->base.display_info.raw_edid = NULL; | |
1374 | + } | |
1375 | +} | |
1376 | + | |
1360 | 1377 | static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector) |
1361 | 1378 | { |
1362 | 1379 | u8 response[2]; |
1363 | 1380 | |
... | ... | @@ -1371,9 +1388,10 @@ |
1371 | 1388 | if (status != SDVO_CMD_STATUS_SUCCESS) |
1372 | 1389 | return connector_status_unknown; |
1373 | 1390 | |
1374 | - if ((response[0] != 0) || (response[1] != 0)) | |
1391 | + if ((response[0] != 0) || (response[1] != 0)) { | |
1392 | + intel_sdvo_hdmi_sink_detect(connector); | |
1375 | 1393 | return connector_status_connected; |
1376 | - else | |
1394 | + } else | |
1377 | 1395 | return connector_status_disconnected; |
1378 | 1396 | } |
1379 | 1397 |
include/drm/i915_drm.h
... | ... | @@ -594,6 +594,9 @@ |
594 | 594 | #define I915_BIT_6_SWIZZLE_9_10_11 4 |
595 | 595 | /* Not seen by userland */ |
596 | 596 | #define I915_BIT_6_SWIZZLE_UNKNOWN 5 |
597 | +/* Seen by userland. */ | |
598 | +#define I915_BIT_6_SWIZZLE_9_17 6 | |
599 | +#define I915_BIT_6_SWIZZLE_9_10_17 7 | |
597 | 600 | |
598 | 601 | struct drm_i915_gem_set_tiling { |
599 | 602 | /** Handle of the buffer to have its tiling state updated */ |