Commit af13e867133a084aea536870ce39843e862c8aaa
Exists in
ti-lsk-linux-4.1.y
and in
10 other branches
Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux
Pull drm fixes from Dave Airlie: "Radeon, imx, msm, and i915 fixes. The msm, imx and i915 ones are fairly run of the mill. Radeon had some DP audio and posting reads for irq fixes, along with a fix for 32-bit kernels with new cards, we were using unsigned long to represent GPU side memory space, but since that changed size on 32 vs 64 cards with lots of VRAM failed, so the change has no effect on x86-64, just moves to using uint64_t instead" * 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: (35 commits) drm/msm: kexec fixes drm/msm/mdp5: fix cursor blending drm/msm/mdp5: fix cursor ROI drm/msm/atomic: Don't leak atomic commit object when commit fails drm/msm/mdp5: Avoid flushing registers when CRTC is disabled drm/msm: update generated headers (add 6th lm.base entry) drm/msm/mdp5: fixup "drm/msm: fix fallout of atomic dpms changes" drm/ttm: device address space != CPU address space drm/mm: Support 4 GiB and larger ranges drm/i915: gen4: work around hang during hibernation drm/i915: Check for driver readyness before handling an underrun interrupt drm/radeon: fix interlaced modes on DCE8 drm/radeon: fix DRM_IOCTL_RADEON_CS oops drm/radeon: do a posting read in cik_set_irq drm/radeon: do a posting read in si_set_irq drm/radeon: do a posting read in evergreen_set_irq drm/radeon: do a posting read in r600_set_irq drm/radeon: do a posting read in rs600_set_irq drm/radeon: do a posting read in r100_set_irq radeon/audio: fix DP audio on DCE6 ...
Showing 34 changed files Side-by-side Diff
- drivers/gpu/drm/drm_mm.c
- drivers/gpu/drm/i915/i915_debugfs.c
- drivers/gpu/drm/i915/i915_drv.c
- drivers/gpu/drm/i915/i915_gem_gtt.c
- drivers/gpu/drm/i915/intel_fifo_underrun.c
- drivers/gpu/drm/imx/dw_hdmi-imx.c
- drivers/gpu/drm/imx/imx-ldb.c
- drivers/gpu/drm/imx/parallel-display.c
- drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
- drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
- drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
- drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
- drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
- drivers/gpu/drm/msm/msm_atomic.c
- drivers/gpu/drm/nouveau/nouveau_fbcon.c
- drivers/gpu/drm/radeon/atombios_crtc.c
- drivers/gpu/drm/radeon/atombios_encoders.c
- drivers/gpu/drm/radeon/cik.c
- drivers/gpu/drm/radeon/dce6_afmt.c
- drivers/gpu/drm/radeon/evergreen.c
- drivers/gpu/drm/radeon/evergreen_hdmi.c
- drivers/gpu/drm/radeon/r100.c
- drivers/gpu/drm/radeon/r600.c
- drivers/gpu/drm/radeon/r600_hdmi.c
- drivers/gpu/drm/radeon/radeon_audio.c
- drivers/gpu/drm/radeon/radeon_cs.c
- drivers/gpu/drm/radeon/rs600.c
- drivers/gpu/drm/radeon/si.c
- drivers/gpu/drm/radeon/sid.h
- drivers/gpu/drm/ttm/ttm_bo.c
- drivers/gpu/ipu-v3/ipu-di.c
- include/drm/drm_mm.h
- include/drm/ttm/ttm_bo_api.h
- include/drm/ttm/ttm_bo_driver.h
drivers/gpu/drm/drm_mm.c
... | ... | @@ -91,29 +91,29 @@ |
91 | 91 | */ |
92 | 92 | |
93 | 93 | static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, |
94 | - unsigned long size, | |
94 | + u64 size, | |
95 | 95 | unsigned alignment, |
96 | 96 | unsigned long color, |
97 | 97 | enum drm_mm_search_flags flags); |
98 | 98 | static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, |
99 | - unsigned long size, | |
99 | + u64 size, | |
100 | 100 | unsigned alignment, |
101 | 101 | unsigned long color, |
102 | - unsigned long start, | |
103 | - unsigned long end, | |
102 | + u64 start, | |
103 | + u64 end, | |
104 | 104 | enum drm_mm_search_flags flags); |
105 | 105 | |
106 | 106 | static void drm_mm_insert_helper(struct drm_mm_node *hole_node, |
107 | 107 | struct drm_mm_node *node, |
108 | - unsigned long size, unsigned alignment, | |
108 | + u64 size, unsigned alignment, | |
109 | 109 | unsigned long color, |
110 | 110 | enum drm_mm_allocator_flags flags) |
111 | 111 | { |
112 | 112 | struct drm_mm *mm = hole_node->mm; |
113 | - unsigned long hole_start = drm_mm_hole_node_start(hole_node); | |
114 | - unsigned long hole_end = drm_mm_hole_node_end(hole_node); | |
115 | - unsigned long adj_start = hole_start; | |
116 | - unsigned long adj_end = hole_end; | |
113 | + u64 hole_start = drm_mm_hole_node_start(hole_node); | |
114 | + u64 hole_end = drm_mm_hole_node_end(hole_node); | |
115 | + u64 adj_start = hole_start; | |
116 | + u64 adj_end = hole_end; | |
117 | 117 | |
118 | 118 | BUG_ON(node->allocated); |
119 | 119 | |
120 | 120 | |
121 | 121 | |
... | ... | @@ -124,12 +124,15 @@ |
124 | 124 | adj_start = adj_end - size; |
125 | 125 | |
126 | 126 | if (alignment) { |
127 | - unsigned tmp = adj_start % alignment; | |
128 | - if (tmp) { | |
127 | + u64 tmp = adj_start; | |
128 | + unsigned rem; | |
129 | + | |
130 | + rem = do_div(tmp, alignment); | |
131 | + if (rem) { | |
129 | 132 | if (flags & DRM_MM_CREATE_TOP) |
130 | - adj_start -= tmp; | |
133 | + adj_start -= rem; | |
131 | 134 | else |
132 | - adj_start += alignment - tmp; | |
135 | + adj_start += alignment - rem; | |
133 | 136 | } |
134 | 137 | } |
135 | 138 | |
... | ... | @@ -176,9 +179,9 @@ |
176 | 179 | int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) |
177 | 180 | { |
178 | 181 | struct drm_mm_node *hole; |
179 | - unsigned long end = node->start + node->size; | |
180 | - unsigned long hole_start; | |
181 | - unsigned long hole_end; | |
182 | + u64 end = node->start + node->size; | |
183 | + u64 hole_start; | |
184 | + u64 hole_end; | |
182 | 185 | |
183 | 186 | BUG_ON(node == NULL); |
184 | 187 | |
... | ... | @@ -227,7 +230,7 @@ |
227 | 230 | * 0 on success, -ENOSPC if there's no suitable hole. |
228 | 231 | */ |
229 | 232 | int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, |
230 | - unsigned long size, unsigned alignment, | |
233 | + u64 size, unsigned alignment, | |
231 | 234 | unsigned long color, |
232 | 235 | enum drm_mm_search_flags sflags, |
233 | 236 | enum drm_mm_allocator_flags aflags) |
234 | 237 | |
235 | 238 | |
... | ... | @@ -246,16 +249,16 @@ |
246 | 249 | |
247 | 250 | static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, |
248 | 251 | struct drm_mm_node *node, |
249 | - unsigned long size, unsigned alignment, | |
252 | + u64 size, unsigned alignment, | |
250 | 253 | unsigned long color, |
251 | - unsigned long start, unsigned long end, | |
254 | + u64 start, u64 end, | |
252 | 255 | enum drm_mm_allocator_flags flags) |
253 | 256 | { |
254 | 257 | struct drm_mm *mm = hole_node->mm; |
255 | - unsigned long hole_start = drm_mm_hole_node_start(hole_node); | |
256 | - unsigned long hole_end = drm_mm_hole_node_end(hole_node); | |
257 | - unsigned long adj_start = hole_start; | |
258 | - unsigned long adj_end = hole_end; | |
258 | + u64 hole_start = drm_mm_hole_node_start(hole_node); | |
259 | + u64 hole_end = drm_mm_hole_node_end(hole_node); | |
260 | + u64 adj_start = hole_start; | |
261 | + u64 adj_end = hole_end; | |
259 | 262 | |
260 | 263 | BUG_ON(!hole_node->hole_follows || node->allocated); |
261 | 264 | |
262 | 265 | |
263 | 266 | |
... | ... | @@ -271,12 +274,15 @@ |
271 | 274 | mm->color_adjust(hole_node, color, &adj_start, &adj_end); |
272 | 275 | |
273 | 276 | if (alignment) { |
274 | - unsigned tmp = adj_start % alignment; | |
275 | - if (tmp) { | |
277 | + u64 tmp = adj_start; | |
278 | + unsigned rem; | |
279 | + | |
280 | + rem = do_div(tmp, alignment); | |
281 | + if (rem) { | |
276 | 282 | if (flags & DRM_MM_CREATE_TOP) |
277 | - adj_start -= tmp; | |
283 | + adj_start -= rem; | |
278 | 284 | else |
279 | - adj_start += alignment - tmp; | |
285 | + adj_start += alignment - rem; | |
280 | 286 | } |
281 | 287 | } |
282 | 288 | |
283 | 289 | |
... | ... | @@ -324,9 +330,9 @@ |
324 | 330 | * 0 on success, -ENOSPC if there's no suitable hole. |
325 | 331 | */ |
326 | 332 | int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node, |
327 | - unsigned long size, unsigned alignment, | |
333 | + u64 size, unsigned alignment, | |
328 | 334 | unsigned long color, |
329 | - unsigned long start, unsigned long end, | |
335 | + u64 start, u64 end, | |
330 | 336 | enum drm_mm_search_flags sflags, |
331 | 337 | enum drm_mm_allocator_flags aflags) |
332 | 338 | { |
333 | 339 | |
334 | 340 | |
335 | 341 | |
336 | 342 | |
... | ... | @@ -387,32 +393,34 @@ |
387 | 393 | } |
388 | 394 | EXPORT_SYMBOL(drm_mm_remove_node); |
389 | 395 | |
390 | -static int check_free_hole(unsigned long start, unsigned long end, | |
391 | - unsigned long size, unsigned alignment) | |
396 | +static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment) | |
392 | 397 | { |
393 | 398 | if (end - start < size) |
394 | 399 | return 0; |
395 | 400 | |
396 | 401 | if (alignment) { |
397 | - unsigned tmp = start % alignment; | |
402 | + u64 tmp = start; | |
403 | + unsigned rem; | |
404 | + | |
405 | + rem = do_div(tmp, alignment); | |
398 | 406 | if (tmp) |
399 | - start += alignment - tmp; | |
407 | + start += alignment - rem; | |
400 | 408 | } |
401 | 409 | |
402 | 410 | return end >= start + size; |
403 | 411 | } |
404 | 412 | |
405 | 413 | static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, |
406 | - unsigned long size, | |
414 | + u64 size, | |
407 | 415 | unsigned alignment, |
408 | 416 | unsigned long color, |
409 | 417 | enum drm_mm_search_flags flags) |
410 | 418 | { |
411 | 419 | struct drm_mm_node *entry; |
412 | 420 | struct drm_mm_node *best; |
413 | - unsigned long adj_start; | |
414 | - unsigned long adj_end; | |
415 | - unsigned long best_size; | |
421 | + u64 adj_start; | |
422 | + u64 adj_end; | |
423 | + u64 best_size; | |
416 | 424 | |
417 | 425 | BUG_ON(mm->scanned_blocks); |
418 | 426 | |
... | ... | @@ -421,7 +429,7 @@ |
421 | 429 | |
422 | 430 | __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, |
423 | 431 | flags & DRM_MM_SEARCH_BELOW) { |
424 | - unsigned long hole_size = adj_end - adj_start; | |
432 | + u64 hole_size = adj_end - adj_start; | |
425 | 433 | |
426 | 434 | if (mm->color_adjust) { |
427 | 435 | mm->color_adjust(entry, color, &adj_start, &adj_end); |
428 | 436 | |
429 | 437 | |
... | ... | @@ -445,18 +453,18 @@ |
445 | 453 | } |
446 | 454 | |
447 | 455 | static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, |
448 | - unsigned long size, | |
456 | + u64 size, | |
449 | 457 | unsigned alignment, |
450 | 458 | unsigned long color, |
451 | - unsigned long start, | |
452 | - unsigned long end, | |
459 | + u64 start, | |
460 | + u64 end, | |
453 | 461 | enum drm_mm_search_flags flags) |
454 | 462 | { |
455 | 463 | struct drm_mm_node *entry; |
456 | 464 | struct drm_mm_node *best; |
457 | - unsigned long adj_start; | |
458 | - unsigned long adj_end; | |
459 | - unsigned long best_size; | |
465 | + u64 adj_start; | |
466 | + u64 adj_end; | |
467 | + u64 best_size; | |
460 | 468 | |
461 | 469 | BUG_ON(mm->scanned_blocks); |
462 | 470 | |
... | ... | @@ -465,7 +473,7 @@ |
465 | 473 | |
466 | 474 | __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, |
467 | 475 | flags & DRM_MM_SEARCH_BELOW) { |
468 | - unsigned long hole_size = adj_end - adj_start; | |
476 | + u64 hole_size = adj_end - adj_start; | |
469 | 477 | |
470 | 478 | if (adj_start < start) |
471 | 479 | adj_start = start; |
... | ... | @@ -561,7 +569,7 @@ |
561 | 569 | * adding/removing nodes to/from the scan list are allowed. |
562 | 570 | */ |
563 | 571 | void drm_mm_init_scan(struct drm_mm *mm, |
564 | - unsigned long size, | |
572 | + u64 size, | |
565 | 573 | unsigned alignment, |
566 | 574 | unsigned long color) |
567 | 575 | { |
568 | 576 | |
... | ... | @@ -594,11 +602,11 @@ |
594 | 602 | * adding/removing nodes to/from the scan list are allowed. |
595 | 603 | */ |
596 | 604 | void drm_mm_init_scan_with_range(struct drm_mm *mm, |
597 | - unsigned long size, | |
605 | + u64 size, | |
598 | 606 | unsigned alignment, |
599 | 607 | unsigned long color, |
600 | - unsigned long start, | |
601 | - unsigned long end) | |
608 | + u64 start, | |
609 | + u64 end) | |
602 | 610 | { |
603 | 611 | mm->scan_color = color; |
604 | 612 | mm->scan_alignment = alignment; |
... | ... | @@ -627,8 +635,8 @@ |
627 | 635 | { |
628 | 636 | struct drm_mm *mm = node->mm; |
629 | 637 | struct drm_mm_node *prev_node; |
630 | - unsigned long hole_start, hole_end; | |
631 | - unsigned long adj_start, adj_end; | |
638 | + u64 hole_start, hole_end; | |
639 | + u64 adj_start, adj_end; | |
632 | 640 | |
633 | 641 | mm->scanned_blocks++; |
634 | 642 | |
... | ... | @@ -731,7 +739,7 @@ |
731 | 739 | * |
732 | 740 | * Note that @mm must be cleared to 0 before calling this function. |
733 | 741 | */ |
734 | -void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) | |
742 | +void drm_mm_init(struct drm_mm * mm, u64 start, u64 size) | |
735 | 743 | { |
736 | 744 | INIT_LIST_HEAD(&mm->hole_stack); |
737 | 745 | mm->scanned_blocks = 0; |
738 | 746 | |
739 | 747 | |
... | ... | @@ -766,18 +774,17 @@ |
766 | 774 | } |
767 | 775 | EXPORT_SYMBOL(drm_mm_takedown); |
768 | 776 | |
769 | -static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry, | |
770 | - const char *prefix) | |
777 | +static u64 drm_mm_debug_hole(struct drm_mm_node *entry, | |
778 | + const char *prefix) | |
771 | 779 | { |
772 | - unsigned long hole_start, hole_end, hole_size; | |
780 | + u64 hole_start, hole_end, hole_size; | |
773 | 781 | |
774 | 782 | if (entry->hole_follows) { |
775 | 783 | hole_start = drm_mm_hole_node_start(entry); |
776 | 784 | hole_end = drm_mm_hole_node_end(entry); |
777 | 785 | hole_size = hole_end - hole_start; |
778 | - printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n", | |
779 | - prefix, hole_start, hole_end, | |
780 | - hole_size); | |
786 | + pr_debug("%s %#llx-%#llx: %llu: free\n", prefix, hole_start, | |
787 | + hole_end, hole_size); | |
781 | 788 | return hole_size; |
782 | 789 | } |
783 | 790 | |
784 | 791 | |
785 | 792 | |
786 | 793 | |
787 | 794 | |
788 | 795 | |
... | ... | @@ -792,35 +799,34 @@ |
792 | 799 | void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) |
793 | 800 | { |
794 | 801 | struct drm_mm_node *entry; |
795 | - unsigned long total_used = 0, total_free = 0, total = 0; | |
802 | + u64 total_used = 0, total_free = 0, total = 0; | |
796 | 803 | |
797 | 804 | total_free += drm_mm_debug_hole(&mm->head_node, prefix); |
798 | 805 | |
799 | 806 | drm_mm_for_each_node(entry, mm) { |
800 | - printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n", | |
801 | - prefix, entry->start, entry->start + entry->size, | |
802 | - entry->size); | |
807 | + pr_debug("%s %#llx-%#llx: %llu: used\n", prefix, entry->start, | |
808 | + entry->start + entry->size, entry->size); | |
803 | 809 | total_used += entry->size; |
804 | 810 | total_free += drm_mm_debug_hole(entry, prefix); |
805 | 811 | } |
806 | 812 | total = total_free + total_used; |
807 | 813 | |
808 | - printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total, | |
809 | - total_used, total_free); | |
814 | + pr_debug("%s total: %llu, used %llu free %llu\n", prefix, total, | |
815 | + total_used, total_free); | |
810 | 816 | } |
811 | 817 | EXPORT_SYMBOL(drm_mm_debug_table); |
812 | 818 | |
813 | 819 | #if defined(CONFIG_DEBUG_FS) |
814 | -static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry) | |
820 | +static u64 drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry) | |
815 | 821 | { |
816 | - unsigned long hole_start, hole_end, hole_size; | |
822 | + u64 hole_start, hole_end, hole_size; | |
817 | 823 | |
818 | 824 | if (entry->hole_follows) { |
819 | 825 | hole_start = drm_mm_hole_node_start(entry); |
820 | 826 | hole_end = drm_mm_hole_node_end(entry); |
821 | 827 | hole_size = hole_end - hole_start; |
822 | - seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n", | |
823 | - hole_start, hole_end, hole_size); | |
828 | + seq_printf(m, "%#llx-%#llx: %llu: free\n", hole_start, | |
829 | + hole_end, hole_size); | |
824 | 830 | return hole_size; |
825 | 831 | } |
826 | 832 | |
827 | 833 | |
828 | 834 | |
... | ... | @@ -835,20 +841,20 @@ |
835 | 841 | int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) |
836 | 842 | { |
837 | 843 | struct drm_mm_node *entry; |
838 | - unsigned long total_used = 0, total_free = 0, total = 0; | |
844 | + u64 total_used = 0, total_free = 0, total = 0; | |
839 | 845 | |
840 | 846 | total_free += drm_mm_dump_hole(m, &mm->head_node); |
841 | 847 | |
842 | 848 | drm_mm_for_each_node(entry, mm) { |
843 | - seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n", | |
844 | - entry->start, entry->start + entry->size, | |
845 | - entry->size); | |
849 | + seq_printf(m, "%#016llx-%#016llx: %llu: used\n", entry->start, | |
850 | + entry->start + entry->size, entry->size); | |
846 | 851 | total_used += entry->size; |
847 | 852 | total_free += drm_mm_dump_hole(m, entry); |
848 | 853 | } |
849 | 854 | total = total_free + total_used; |
850 | 855 | |
851 | - seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free); | |
856 | + seq_printf(m, "total: %llu, used %llu free %llu\n", total, | |
857 | + total_used, total_free); | |
852 | 858 | return 0; |
853 | 859 | } |
854 | 860 | EXPORT_SYMBOL(drm_mm_dump_table); |
drivers/gpu/drm/i915/i915_debugfs.c
... | ... | @@ -152,12 +152,12 @@ |
152 | 152 | seq_puts(m, " (pp"); |
153 | 153 | else |
154 | 154 | seq_puts(m, " (g"); |
155 | - seq_printf(m, "gtt offset: %08lx, size: %08lx, type: %u)", | |
155 | + seq_printf(m, "gtt offset: %08llx, size: %08llx, type: %u)", | |
156 | 156 | vma->node.start, vma->node.size, |
157 | 157 | vma->ggtt_view.type); |
158 | 158 | } |
159 | 159 | if (obj->stolen) |
160 | - seq_printf(m, " (stolen: %08lx)", obj->stolen->start); | |
160 | + seq_printf(m, " (stolen: %08llx)", obj->stolen->start); | |
161 | 161 | if (obj->pin_mappable || obj->fault_mappable) { |
162 | 162 | char s[3], *t = s; |
163 | 163 | if (obj->pin_mappable) |
drivers/gpu/drm/i915/i915_drv.c
... | ... | @@ -622,7 +622,7 @@ |
622 | 622 | return 0; |
623 | 623 | } |
624 | 624 | |
625 | -static int i915_drm_suspend_late(struct drm_device *drm_dev) | |
625 | +static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation) | |
626 | 626 | { |
627 | 627 | struct drm_i915_private *dev_priv = drm_dev->dev_private; |
628 | 628 | int ret; |
... | ... | @@ -636,7 +636,17 @@ |
636 | 636 | } |
637 | 637 | |
638 | 638 | pci_disable_device(drm_dev->pdev); |
639 | - pci_set_power_state(drm_dev->pdev, PCI_D3hot); | |
639 | + /* | |
640 | + * During hibernation on some GEN4 platforms the BIOS may try to access | |
641 | + * the device even though it's already in D3 and hang the machine. So | |
642 | + * leave the device in D0 on those platforms and hope the BIOS will | |
643 | + * power down the device properly. Platforms where this was seen: | |
644 | + * Lenovo Thinkpad X301, X61s | |
645 | + */ | |
646 | + if (!(hibernation && | |
647 | + drm_dev->pdev->subsystem_vendor == PCI_VENDOR_ID_LENOVO && | |
648 | + INTEL_INFO(dev_priv)->gen == 4)) | |
649 | + pci_set_power_state(drm_dev->pdev, PCI_D3hot); | |
640 | 650 | |
641 | 651 | return 0; |
642 | 652 | } |
... | ... | @@ -662,7 +672,7 @@ |
662 | 672 | if (error) |
663 | 673 | return error; |
664 | 674 | |
665 | - return i915_drm_suspend_late(dev); | |
675 | + return i915_drm_suspend_late(dev, false); | |
666 | 676 | } |
667 | 677 | |
668 | 678 | static int i915_drm_resume(struct drm_device *dev) |
669 | 679 | |
... | ... | @@ -950,9 +960,19 @@ |
950 | 960 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
951 | 961 | return 0; |
952 | 962 | |
953 | - return i915_drm_suspend_late(drm_dev); | |
963 | + return i915_drm_suspend_late(drm_dev, false); | |
954 | 964 | } |
955 | 965 | |
966 | +static int i915_pm_poweroff_late(struct device *dev) | |
967 | +{ | |
968 | + struct drm_device *drm_dev = dev_to_i915(dev)->dev; | |
969 | + | |
970 | + if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) | |
971 | + return 0; | |
972 | + | |
973 | + return i915_drm_suspend_late(drm_dev, true); | |
974 | +} | |
975 | + | |
956 | 976 | static int i915_pm_resume_early(struct device *dev) |
957 | 977 | { |
958 | 978 | struct drm_device *drm_dev = dev_to_i915(dev)->dev; |
... | ... | @@ -1520,7 +1540,7 @@ |
1520 | 1540 | .thaw_early = i915_pm_resume_early, |
1521 | 1541 | .thaw = i915_pm_resume, |
1522 | 1542 | .poweroff = i915_pm_suspend, |
1523 | - .poweroff_late = i915_pm_suspend_late, | |
1543 | + .poweroff_late = i915_pm_poweroff_late, | |
1524 | 1544 | .restore_early = i915_pm_resume_early, |
1525 | 1545 | .restore = i915_pm_resume, |
1526 | 1546 |
drivers/gpu/drm/i915/i915_gem_gtt.c
... | ... | @@ -1145,7 +1145,7 @@ |
1145 | 1145 | |
1146 | 1146 | ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true); |
1147 | 1147 | |
1148 | - DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n", | |
1148 | + DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n", | |
1149 | 1149 | ppgtt->node.size >> 20, |
1150 | 1150 | ppgtt->node.start / PAGE_SIZE); |
1151 | 1151 | |
... | ... | @@ -1713,8 +1713,8 @@ |
1713 | 1713 | |
1714 | 1714 | static void i915_gtt_color_adjust(struct drm_mm_node *node, |
1715 | 1715 | unsigned long color, |
1716 | - unsigned long *start, | |
1717 | - unsigned long *end) | |
1716 | + u64 *start, | |
1717 | + u64 *end) | |
1718 | 1718 | { |
1719 | 1719 | if (node->color != color) |
1720 | 1720 | *start += 4096; |
drivers/gpu/drm/i915/intel_fifo_underrun.c
... | ... | @@ -282,16 +282,6 @@ |
282 | 282 | return ret; |
283 | 283 | } |
284 | 284 | |
285 | -static bool | |
286 | -__cpu_fifo_underrun_reporting_enabled(struct drm_i915_private *dev_priv, | |
287 | - enum pipe pipe) | |
288 | -{ | |
289 | - struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | |
290 | - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
291 | - | |
292 | - return !intel_crtc->cpu_fifo_underrun_disabled; | |
293 | -} | |
294 | - | |
295 | 285 | /** |
296 | 286 | * intel_set_pch_fifo_underrun_reporting - set PCH fifo underrun reporting state |
297 | 287 | * @dev_priv: i915 device instance |
298 | 288 | |
... | ... | @@ -352,9 +342,15 @@ |
352 | 342 | void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, |
353 | 343 | enum pipe pipe) |
354 | 344 | { |
345 | + struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | |
346 | + | |
347 | + /* We may be called too early in init, thanks BIOS! */ | |
348 | + if (crtc == NULL) | |
349 | + return; | |
350 | + | |
355 | 351 | /* GMCH can't disable fifo underruns, filter them. */ |
356 | 352 | if (HAS_GMCH_DISPLAY(dev_priv->dev) && |
357 | - !__cpu_fifo_underrun_reporting_enabled(dev_priv, pipe)) | |
353 | + to_intel_crtc(crtc)->cpu_fifo_underrun_disabled) | |
358 | 354 | return; |
359 | 355 | |
360 | 356 | if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false)) |
drivers/gpu/drm/imx/dw_hdmi-imx.c
... | ... | @@ -70,7 +70,9 @@ |
70 | 70 | 118800000, { 0x091c, 0x091c, 0x06dc }, |
71 | 71 | }, { |
72 | 72 | 216000000, { 0x06dc, 0x0b5c, 0x091c }, |
73 | - } | |
73 | + }, { | |
74 | + ~0UL, { 0x0000, 0x0000, 0x0000 }, | |
75 | + }, | |
74 | 76 | }; |
75 | 77 | |
76 | 78 | static const struct dw_hdmi_sym_term imx_sym_term[] = { |
77 | 79 | |
... | ... | @@ -136,11 +138,34 @@ |
136 | 138 | .destroy = drm_encoder_cleanup, |
137 | 139 | }; |
138 | 140 | |
141 | +static enum drm_mode_status imx6q_hdmi_mode_valid(struct drm_connector *con, | |
142 | + struct drm_display_mode *mode) | |
143 | +{ | |
144 | + if (mode->clock < 13500) | |
145 | + return MODE_CLOCK_LOW; | |
146 | + if (mode->clock > 266000) | |
147 | + return MODE_CLOCK_HIGH; | |
148 | + | |
149 | + return MODE_OK; | |
150 | +} | |
151 | + | |
152 | +static enum drm_mode_status imx6dl_hdmi_mode_valid(struct drm_connector *con, | |
153 | + struct drm_display_mode *mode) | |
154 | +{ | |
155 | + if (mode->clock < 13500) | |
156 | + return MODE_CLOCK_LOW; | |
157 | + if (mode->clock > 270000) | |
158 | + return MODE_CLOCK_HIGH; | |
159 | + | |
160 | + return MODE_OK; | |
161 | +} | |
162 | + | |
139 | 163 | static struct dw_hdmi_plat_data imx6q_hdmi_drv_data = { |
140 | - .mpll_cfg = imx_mpll_cfg, | |
141 | - .cur_ctr = imx_cur_ctr, | |
142 | - .sym_term = imx_sym_term, | |
143 | - .dev_type = IMX6Q_HDMI, | |
164 | + .mpll_cfg = imx_mpll_cfg, | |
165 | + .cur_ctr = imx_cur_ctr, | |
166 | + .sym_term = imx_sym_term, | |
167 | + .dev_type = IMX6Q_HDMI, | |
168 | + .mode_valid = imx6q_hdmi_mode_valid, | |
144 | 169 | }; |
145 | 170 | |
146 | 171 | static struct dw_hdmi_plat_data imx6dl_hdmi_drv_data = { |
... | ... | @@ -148,6 +173,7 @@ |
148 | 173 | .cur_ctr = imx_cur_ctr, |
149 | 174 | .sym_term = imx_sym_term, |
150 | 175 | .dev_type = IMX6DL_HDMI, |
176 | + .mode_valid = imx6dl_hdmi_mode_valid, | |
151 | 177 | }; |
152 | 178 | |
153 | 179 | static const struct of_device_id dw_hdmi_imx_dt_ids[] = { |
drivers/gpu/drm/imx/imx-ldb.c
... | ... | @@ -163,23 +163,8 @@ |
163 | 163 | { |
164 | 164 | struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder); |
165 | 165 | struct imx_ldb *ldb = imx_ldb_ch->ldb; |
166 | - struct drm_display_mode *mode = &encoder->crtc->hwmode; | |
167 | 166 | u32 pixel_fmt; |
168 | - unsigned long serial_clk; | |
169 | - unsigned long di_clk = mode->clock * 1000; | |
170 | - int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->child, encoder); | |
171 | 167 | |
172 | - if (ldb->ldb_ctrl & LDB_SPLIT_MODE_EN) { | |
173 | - /* dual channel LVDS mode */ | |
174 | - serial_clk = 3500UL * mode->clock; | |
175 | - imx_ldb_set_clock(ldb, mux, 0, serial_clk, di_clk); | |
176 | - imx_ldb_set_clock(ldb, mux, 1, serial_clk, di_clk); | |
177 | - } else { | |
178 | - serial_clk = 7000UL * mode->clock; | |
179 | - imx_ldb_set_clock(ldb, mux, imx_ldb_ch->chno, serial_clk, | |
180 | - di_clk); | |
181 | - } | |
182 | - | |
183 | 168 | switch (imx_ldb_ch->chno) { |
184 | 169 | case 0: |
185 | 170 | pixel_fmt = (ldb->ldb_ctrl & LDB_DATA_WIDTH_CH0_24) ? |
... | ... | @@ -247,6 +232,9 @@ |
247 | 232 | struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder); |
248 | 233 | struct imx_ldb *ldb = imx_ldb_ch->ldb; |
249 | 234 | int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN; |
235 | + unsigned long serial_clk; | |
236 | + unsigned long di_clk = mode->clock * 1000; | |
237 | + int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->child, encoder); | |
250 | 238 | |
251 | 239 | if (mode->clock > 170000) { |
252 | 240 | dev_warn(ldb->dev, |
... | ... | @@ -255,6 +243,16 @@ |
255 | 243 | if (mode->clock > 85000 && !dual) { |
256 | 244 | dev_warn(ldb->dev, |
257 | 245 | "%s: mode exceeds 85 MHz pixel clock\n", __func__); |
246 | + } | |
247 | + | |
248 | + if (dual) { | |
249 | + serial_clk = 3500UL * mode->clock; | |
250 | + imx_ldb_set_clock(ldb, mux, 0, serial_clk, di_clk); | |
251 | + imx_ldb_set_clock(ldb, mux, 1, serial_clk, di_clk); | |
252 | + } else { | |
253 | + serial_clk = 7000UL * mode->clock; | |
254 | + imx_ldb_set_clock(ldb, mux, imx_ldb_ch->chno, serial_clk, | |
255 | + di_clk); | |
258 | 256 | } |
259 | 257 | |
260 | 258 | /* FIXME - assumes straight connections DI0 --> CH0, DI1 --> CH1 */ |
drivers/gpu/drm/imx/parallel-display.c
... | ... | @@ -236,8 +236,11 @@ |
236 | 236 | } |
237 | 237 | |
238 | 238 | panel_node = of_parse_phandle(np, "fsl,panel", 0); |
239 | - if (panel_node) | |
239 | + if (panel_node) { | |
240 | 240 | imxpd->panel = of_drm_find_panel(panel_node); |
241 | + if (!imxpd->panel) | |
242 | + return -EPROBE_DEFER; | |
243 | + } | |
241 | 244 | |
242 | 245 | imxpd->dev = dev; |
243 | 246 |
drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
... | ... | @@ -32,7 +32,10 @@ |
32 | 32 | void mdp4_irq_preinstall(struct msm_kms *kms) |
33 | 33 | { |
34 | 34 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); |
35 | + mdp4_enable(mdp4_kms); | |
35 | 36 | mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff); |
37 | + mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000); | |
38 | + mdp4_disable(mdp4_kms); | |
36 | 39 | } |
37 | 40 | |
38 | 41 | int mdp4_irq_postinstall(struct msm_kms *kms) |
39 | 42 | |
... | ... | @@ -53,7 +56,9 @@ |
53 | 56 | void mdp4_irq_uninstall(struct msm_kms *kms) |
54 | 57 | { |
55 | 58 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); |
59 | + mdp4_enable(mdp4_kms); | |
56 | 60 | mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000); |
61 | + mdp4_disable(mdp4_kms); | |
57 | 62 | } |
58 | 63 | |
59 | 64 | irqreturn_t mdp4_irq(struct msm_kms *kms) |
drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
... | ... | @@ -8,17 +8,9 @@ |
8 | 8 | git clone https://github.com/freedreno/envytools.git |
9 | 9 | |
10 | 10 | The rules-ng-ng source files this header was generated from are: |
11 | -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) | |
12 | -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) | |
13 | -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00) | |
14 | -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00) | |
15 | -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11) | |
16 | -- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) | |
17 | -- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) | |
18 | -- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) | |
19 | -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) | |
20 | -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57) | |
21 | -- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00) | |
11 | +- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp5.xml ( 27229 bytes, from 2015-02-10 17:00:41) | |
12 | +- /local/mnt2/workspace2/sviau/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2014-06-02 18:31:15) | |
13 | +- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2015-01-23 16:20:19) | |
22 | 14 | |
23 | 15 | Copyright (C) 2013-2015 by the following authors: |
24 | 16 | - Rob Clark <robdclark@gmail.com> (robclark) |
... | ... | @@ -910,6 +902,7 @@ |
910 | 902 | case 2: return (mdp5_cfg->lm.base[2]); |
911 | 903 | case 3: return (mdp5_cfg->lm.base[3]); |
912 | 904 | case 4: return (mdp5_cfg->lm.base[4]); |
905 | + case 5: return (mdp5_cfg->lm.base[5]); | |
913 | 906 | default: return INVALID_IDX(idx); |
914 | 907 | } |
915 | 908 | } |
drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
... | ... | @@ -62,8 +62,8 @@ |
62 | 62 | |
63 | 63 | /* current cursor being scanned out: */ |
64 | 64 | struct drm_gem_object *scanout_bo; |
65 | - uint32_t width; | |
66 | - uint32_t height; | |
65 | + uint32_t width, height; | |
66 | + uint32_t x, y; | |
67 | 67 | } cursor; |
68 | 68 | }; |
69 | 69 | #define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base) |
... | ... | @@ -103,8 +103,8 @@ |
103 | 103 | struct drm_plane *plane; |
104 | 104 | uint32_t flush_mask = 0; |
105 | 105 | |
106 | - /* we could have already released CTL in the disable path: */ | |
107 | - if (!mdp5_crtc->ctl) | |
106 | + /* this should not happen: */ | |
107 | + if (WARN_ON(!mdp5_crtc->ctl)) | |
108 | 108 | return; |
109 | 109 | |
110 | 110 | drm_atomic_crtc_for_each_plane(plane, crtc) { |
... | ... | @@ -143,6 +143,11 @@ |
143 | 143 | drm_atomic_crtc_for_each_plane(plane, crtc) { |
144 | 144 | mdp5_plane_complete_flip(plane); |
145 | 145 | } |
146 | + | |
147 | + if (mdp5_crtc->ctl && !crtc->state->enable) { | |
148 | + mdp5_ctl_release(mdp5_crtc->ctl); | |
149 | + mdp5_crtc->ctl = NULL; | |
150 | + } | |
146 | 151 | } |
147 | 152 | |
148 | 153 | static void unref_cursor_worker(struct drm_flip_work *work, void *val) |
149 | 154 | |
... | ... | @@ -386,14 +391,17 @@ |
386 | 391 | mdp5_crtc->event = crtc->state->event; |
387 | 392 | spin_unlock_irqrestore(&dev->event_lock, flags); |
388 | 393 | |
394 | + /* | |
395 | + * If no CTL has been allocated in mdp5_crtc_atomic_check(), | |
396 | + * it means we are trying to flush a CRTC whose state is disabled: | |
397 | + * nothing else needs to be done. | |
398 | + */ | |
399 | + if (unlikely(!mdp5_crtc->ctl)) | |
400 | + return; | |
401 | + | |
389 | 402 | blend_setup(crtc); |
390 | 403 | crtc_flush_all(crtc); |
391 | 404 | request_pending(crtc, PENDING_FLIP); |
392 | - | |
393 | - if (mdp5_crtc->ctl && !crtc->state->enable) { | |
394 | - mdp5_ctl_release(mdp5_crtc->ctl); | |
395 | - mdp5_crtc->ctl = NULL; | |
396 | - } | |
397 | 405 | } |
398 | 406 | |
399 | 407 | static int mdp5_crtc_set_property(struct drm_crtc *crtc, |
... | ... | @@ -403,6 +411,32 @@ |
403 | 411 | return -EINVAL; |
404 | 412 | } |
405 | 413 | |
414 | +static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h) | |
415 | +{ | |
416 | + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); | |
417 | + uint32_t xres = crtc->mode.hdisplay; | |
418 | + uint32_t yres = crtc->mode.vdisplay; | |
419 | + | |
420 | + /* | |
421 | + * Cursor Region Of Interest (ROI) is a plane read from cursor | |
422 | + * buffer to render. The ROI region is determined by the visibility of | |
423 | + * the cursor point. In the default Cursor image the cursor point will | |
424 | + * be at the top left of the cursor image, unless it is specified | |
425 | + * otherwise using hotspot feature. | |
426 | + * | |
427 | + * If the cursor point reaches the right (xres - x < cursor.width) or | |
428 | + * bottom (yres - y < cursor.height) boundary of the screen, then ROI | |
429 | + * width and ROI height need to be evaluated to crop the cursor image | |
430 | + * accordingly. | |
431 | + * (xres-x) will be new cursor width when x > (xres - cursor.width) | |
432 | + * (yres-y) will be new cursor height when y > (yres - cursor.height) | |
433 | + */ | |
434 | + *roi_w = min(mdp5_crtc->cursor.width, xres - | |
435 | + mdp5_crtc->cursor.x); | |
436 | + *roi_h = min(mdp5_crtc->cursor.height, yres - | |
437 | + mdp5_crtc->cursor.y); | |
438 | +} | |
439 | + | |
406 | 440 | static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, |
407 | 441 | struct drm_file *file, uint32_t handle, |
408 | 442 | uint32_t width, uint32_t height) |
... | ... | @@ -416,6 +450,7 @@ |
416 | 450 | unsigned int depth; |
417 | 451 | enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL; |
418 | 452 | uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); |
453 | + uint32_t roi_w, roi_h; | |
419 | 454 | unsigned long flags; |
420 | 455 | |
421 | 456 | if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) { |
... | ... | @@ -446,6 +481,12 @@ |
446 | 481 | spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); |
447 | 482 | old_bo = mdp5_crtc->cursor.scanout_bo; |
448 | 483 | |
484 | + mdp5_crtc->cursor.scanout_bo = cursor_bo; | |
485 | + mdp5_crtc->cursor.width = width; | |
486 | + mdp5_crtc->cursor.height = height; | |
487 | + | |
488 | + get_roi(crtc, &roi_w, &roi_h); | |
489 | + | |
449 | 490 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride); |
450 | 491 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm), |
451 | 492 | MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888)); |
452 | 493 | |
453 | 494 | |
454 | 495 | |
... | ... | @@ -453,19 +494,14 @@ |
453 | 494 | MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) | |
454 | 495 | MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width)); |
455 | 496 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm), |
456 | - MDP5_LM_CURSOR_SIZE_ROI_H(height) | | |
457 | - MDP5_LM_CURSOR_SIZE_ROI_W(width)); | |
497 | + MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) | | |
498 | + MDP5_LM_CURSOR_SIZE_ROI_W(roi_w)); | |
458 | 499 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm), cursor_addr); |
459 | 500 | |
460 | - | |
461 | 501 | blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN; |
462 | - blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_TRANSP_EN; | |
463 | 502 | blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha); |
464 | 503 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg); |
465 | 504 | |
466 | - mdp5_crtc->cursor.scanout_bo = cursor_bo; | |
467 | - mdp5_crtc->cursor.width = width; | |
468 | - mdp5_crtc->cursor.height = height; | |
469 | 505 | spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); |
470 | 506 | |
471 | 507 | ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, true); |
472 | 508 | |
473 | 509 | |
474 | 510 | |
... | ... | @@ -489,32 +525,19 @@ |
489 | 525 | struct mdp5_kms *mdp5_kms = get_kms(crtc); |
490 | 526 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); |
491 | 527 | uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); |
492 | - uint32_t xres = crtc->mode.hdisplay; | |
493 | - uint32_t yres = crtc->mode.vdisplay; | |
494 | 528 | uint32_t roi_w; |
495 | 529 | uint32_t roi_h; |
496 | 530 | unsigned long flags; |
497 | 531 | |
498 | - x = (x > 0) ? x : 0; | |
499 | - y = (y > 0) ? y : 0; | |
532 | + /* In case the CRTC is disabled, just drop the cursor update */ | |
533 | + if (unlikely(!crtc->state->enable)) | |
534 | + return 0; | |
500 | 535 | |
501 | - /* | |
502 | - * Cursor Region Of Interest (ROI) is a plane read from cursor | |
503 | - * buffer to render. The ROI region is determined by the visiblity of | |
504 | - * the cursor point. In the default Cursor image the cursor point will | |
505 | - * be at the top left of the cursor image, unless it is specified | |
506 | - * otherwise using hotspot feature. | |
507 | - * | |
508 | - * If the cursor point reaches the right (xres - x < cursor.width) or | |
509 | - * bottom (yres - y < cursor.height) boundary of the screen, then ROI | |
510 | - * width and ROI height need to be evaluated to crop the cursor image | |
511 | - * accordingly. | |
512 | - * (xres-x) will be new cursor width when x > (xres - cursor.width) | |
513 | - * (yres-y) will be new cursor height when y > (yres - cursor.height) | |
514 | - */ | |
515 | - roi_w = min(mdp5_crtc->cursor.width, xres - x); | |
516 | - roi_h = min(mdp5_crtc->cursor.height, yres - y); | |
536 | + mdp5_crtc->cursor.x = x = max(x, 0); | |
537 | + mdp5_crtc->cursor.y = y = max(y, 0); | |
517 | 538 | |
539 | + get_roi(crtc, &roi_w, &roi_h); | |
540 | + | |
518 | 541 | spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); |
519 | 542 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(mdp5_crtc->lm), |
520 | 543 | MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) | |
... | ... | @@ -544,8 +567,8 @@ |
544 | 567 | static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = { |
545 | 568 | .mode_fixup = mdp5_crtc_mode_fixup, |
546 | 569 | .mode_set_nofb = mdp5_crtc_mode_set_nofb, |
547 | - .prepare = mdp5_crtc_disable, | |
548 | - .commit = mdp5_crtc_enable, | |
570 | + .disable = mdp5_crtc_disable, | |
571 | + .enable = mdp5_crtc_enable, | |
549 | 572 | .atomic_check = mdp5_crtc_atomic_check, |
550 | 573 | .atomic_begin = mdp5_crtc_atomic_begin, |
551 | 574 | .atomic_flush = mdp5_crtc_atomic_flush, |
drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
... | ... | @@ -267,14 +267,14 @@ |
267 | 267 | mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1); |
268 | 268 | spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); |
269 | 269 | |
270 | - mdp5_encoder->enabled = false; | |
270 | + mdp5_encoder->enabled = true; | |
271 | 271 | } |
272 | 272 | |
273 | 273 | static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = { |
274 | 274 | .mode_fixup = mdp5_encoder_mode_fixup, |
275 | 275 | .mode_set = mdp5_encoder_mode_set, |
276 | - .prepare = mdp5_encoder_disable, | |
277 | - .commit = mdp5_encoder_enable, | |
276 | + .disable = mdp5_encoder_disable, | |
277 | + .enable = mdp5_encoder_enable, | |
278 | 278 | }; |
279 | 279 | |
280 | 280 | /* initialize encoder */ |
drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
... | ... | @@ -34,7 +34,10 @@ |
34 | 34 | void mdp5_irq_preinstall(struct msm_kms *kms) |
35 | 35 | { |
36 | 36 | struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); |
37 | + mdp5_enable(mdp5_kms); | |
37 | 38 | mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff); |
39 | + mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000); | |
40 | + mdp5_disable(mdp5_kms); | |
38 | 41 | } |
39 | 42 | |
40 | 43 | int mdp5_irq_postinstall(struct msm_kms *kms) |
41 | 44 | |
... | ... | @@ -57,7 +60,9 @@ |
57 | 60 | void mdp5_irq_uninstall(struct msm_kms *kms) |
58 | 61 | { |
59 | 62 | struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); |
63 | + mdp5_enable(mdp5_kms); | |
60 | 64 | mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000); |
65 | + mdp5_disable(mdp5_kms); | |
61 | 66 | } |
62 | 67 | |
63 | 68 | static void mdp5_irq_mdp(struct mdp_kms *mdp_kms) |
drivers/gpu/drm/msm/msm_atomic.c
... | ... | @@ -219,8 +219,10 @@ |
219 | 219 | * mark our set of crtc's as busy: |
220 | 220 | */ |
221 | 221 | ret = start_atomic(dev->dev_private, c->crtc_mask); |
222 | - if (ret) | |
222 | + if (ret) { | |
223 | + kfree(c); | |
223 | 224 | return ret; |
225 | + } | |
224 | 226 | |
225 | 227 | /* |
226 | 228 | * This is the point of no return - everything below never fails except |
drivers/gpu/drm/nouveau/nouveau_fbcon.c
... | ... | @@ -418,7 +418,7 @@ |
418 | 418 | nouveau_fbcon_zfill(dev, fbcon); |
419 | 419 | |
420 | 420 | /* To allow resizeing without swapping buffers */ |
421 | - NV_INFO(drm, "allocated %dx%d fb: 0x%lx, bo %p\n", | |
421 | + NV_INFO(drm, "allocated %dx%d fb: 0x%llx, bo %p\n", | |
422 | 422 | nouveau_fb->base.width, nouveau_fb->base.height, |
423 | 423 | nvbo->bo.offset, nvbo); |
424 | 424 |
drivers/gpu/drm/radeon/atombios_crtc.c
... | ... | @@ -1405,6 +1405,9 @@ |
1405 | 1405 | (x << 16) | y); |
1406 | 1406 | viewport_w = crtc->mode.hdisplay; |
1407 | 1407 | viewport_h = (crtc->mode.vdisplay + 1) & ~1; |
1408 | + if ((rdev->family >= CHIP_BONAIRE) && | |
1409 | + (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)) | |
1410 | + viewport_h *= 2; | |
1408 | 1411 | WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset, |
1409 | 1412 | (viewport_w << 16) | viewport_h); |
1410 | 1413 |
drivers/gpu/drm/radeon/atombios_encoders.c
... | ... | @@ -1626,7 +1626,6 @@ |
1626 | 1626 | struct radeon_connector *radeon_connector = NULL; |
1627 | 1627 | struct radeon_connector_atom_dig *radeon_dig_connector = NULL; |
1628 | 1628 | bool travis_quirk = false; |
1629 | - int encoder_mode; | |
1630 | 1629 | |
1631 | 1630 | if (connector) { |
1632 | 1631 | radeon_connector = to_radeon_connector(connector); |
... | ... | @@ -1722,13 +1721,6 @@ |
1722 | 1721 | } |
1723 | 1722 | break; |
1724 | 1723 | } |
1725 | - | |
1726 | - encoder_mode = atombios_get_encoder_mode(encoder); | |
1727 | - if (connector && (radeon_audio != 0) && | |
1728 | - ((encoder_mode == ATOM_ENCODER_MODE_HDMI) || | |
1729 | - (ENCODER_MODE_IS_DP(encoder_mode) && | |
1730 | - drm_detect_monitor_audio(radeon_connector_edid(connector))))) | |
1731 | - radeon_audio_dpms(encoder, mode); | |
1732 | 1724 | } |
1733 | 1725 | |
1734 | 1726 | static void |
1735 | 1727 | |
... | ... | @@ -1737,10 +1729,19 @@ |
1737 | 1729 | struct drm_device *dev = encoder->dev; |
1738 | 1730 | struct radeon_device *rdev = dev->dev_private; |
1739 | 1731 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
1732 | + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | |
1733 | + int encoder_mode = atombios_get_encoder_mode(encoder); | |
1740 | 1734 | |
1741 | 1735 | DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n", |
1742 | 1736 | radeon_encoder->encoder_id, mode, radeon_encoder->devices, |
1743 | 1737 | radeon_encoder->active_device); |
1738 | + | |
1739 | + if (connector && (radeon_audio != 0) && | |
1740 | + ((encoder_mode == ATOM_ENCODER_MODE_HDMI) || | |
1741 | + (ENCODER_MODE_IS_DP(encoder_mode) && | |
1742 | + drm_detect_monitor_audio(radeon_connector_edid(connector))))) | |
1743 | + radeon_audio_dpms(encoder, mode); | |
1744 | + | |
1744 | 1745 | switch (radeon_encoder->encoder_id) { |
1745 | 1746 | case ENCODER_OBJECT_ID_INTERNAL_TMDS1: |
1746 | 1747 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: |
... | ... | @@ -2170,12 +2171,6 @@ |
2170 | 2171 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: |
2171 | 2172 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: |
2172 | 2173 | /* handled in dpms */ |
2173 | - encoder_mode = atombios_get_encoder_mode(encoder); | |
2174 | - if (connector && (radeon_audio != 0) && | |
2175 | - ((encoder_mode == ATOM_ENCODER_MODE_HDMI) || | |
2176 | - (ENCODER_MODE_IS_DP(encoder_mode) && | |
2177 | - drm_detect_monitor_audio(radeon_connector_edid(connector))))) | |
2178 | - radeon_audio_mode_set(encoder, adjusted_mode); | |
2179 | 2174 | break; |
2180 | 2175 | case ENCODER_OBJECT_ID_INTERNAL_DDI: |
2181 | 2176 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: |
... | ... | @@ -2197,6 +2192,13 @@ |
2197 | 2192 | } |
2198 | 2193 | |
2199 | 2194 | atombios_apply_encoder_quirks(encoder, adjusted_mode); |
2195 | + | |
2196 | + encoder_mode = atombios_get_encoder_mode(encoder); | |
2197 | + if (connector && (radeon_audio != 0) && | |
2198 | + ((encoder_mode == ATOM_ENCODER_MODE_HDMI) || | |
2199 | + (ENCODER_MODE_IS_DP(encoder_mode) && | |
2200 | + drm_detect_monitor_audio(radeon_connector_edid(connector))))) | |
2201 | + radeon_audio_mode_set(encoder, adjusted_mode); | |
2200 | 2202 | } |
2201 | 2203 | |
2202 | 2204 | static bool |
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/dce6_afmt.c
... | ... | @@ -26,6 +26,9 @@ |
26 | 26 | #include "radeon_audio.h" |
27 | 27 | #include "sid.h" |
28 | 28 | |
29 | +#define DCE8_DCCG_AUDIO_DTO1_PHASE 0x05b8 | |
30 | +#define DCE8_DCCG_AUDIO_DTO1_MODULE 0x05bc | |
31 | + | |
29 | 32 | u32 dce6_endpoint_rreg(struct radeon_device *rdev, |
30 | 33 | u32 block_offset, u32 reg) |
31 | 34 | { |
32 | 35 | |
33 | 36 | |
34 | 37 | |
35 | 38 | |
36 | 39 | |
37 | 40 | |
38 | 41 | |
39 | 42 | |
40 | 43 | |
41 | 44 | |
... | ... | @@ -252,72 +255,67 @@ |
252 | 255 | void dce6_hdmi_audio_set_dto(struct radeon_device *rdev, |
253 | 256 | struct radeon_crtc *crtc, unsigned int clock) |
254 | 257 | { |
255 | - /* Two dtos; generally use dto0 for HDMI */ | |
258 | + /* Two dtos; generally use dto0 for HDMI */ | |
256 | 259 | u32 value = 0; |
257 | 260 | |
258 | - if (crtc) | |
261 | + if (crtc) | |
259 | 262 | value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id); |
260 | 263 | |
261 | 264 | WREG32(DCCG_AUDIO_DTO_SOURCE, value); |
262 | 265 | |
263 | - /* Express [24MHz / target pixel clock] as an exact rational | |
264 | - * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE | |
265 | - * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator | |
266 | - */ | |
267 | - WREG32(DCCG_AUDIO_DTO0_PHASE, 24000); | |
268 | - WREG32(DCCG_AUDIO_DTO0_MODULE, clock); | |
266 | + /* Express [24MHz / target pixel clock] as an exact rational | |
267 | + * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE | |
268 | + * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator | |
269 | + */ | |
270 | + WREG32(DCCG_AUDIO_DTO0_PHASE, 24000); | |
271 | + WREG32(DCCG_AUDIO_DTO0_MODULE, clock); | |
269 | 272 | } |
270 | 273 | |
271 | 274 | void dce6_dp_audio_set_dto(struct radeon_device *rdev, |
272 | 275 | struct radeon_crtc *crtc, unsigned int clock) |
273 | 276 | { |
274 | - /* Two dtos; generally use dto1 for DP */ | |
277 | + /* Two dtos; generally use dto1 for DP */ | |
275 | 278 | u32 value = 0; |
276 | 279 | value |= DCCG_AUDIO_DTO_SEL; |
277 | 280 | |
278 | - if (crtc) | |
281 | + if (crtc) | |
279 | 282 | value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id); |
280 | 283 | |
281 | 284 | WREG32(DCCG_AUDIO_DTO_SOURCE, value); |
282 | 285 | |
283 | - /* Express [24MHz / target pixel clock] as an exact rational | |
284 | - * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE | |
285 | - * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator | |
286 | - */ | |
287 | - WREG32(DCCG_AUDIO_DTO1_PHASE, 24000); | |
288 | - WREG32(DCCG_AUDIO_DTO1_MODULE, clock); | |
286 | + /* Express [24MHz / target pixel clock] as an exact rational | |
287 | + * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE | |
288 | + * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator | |
289 | + */ | |
290 | + if (ASIC_IS_DCE8(rdev)) { | |
291 | + WREG32(DCE8_DCCG_AUDIO_DTO1_PHASE, 24000); | |
292 | + WREG32(DCE8_DCCG_AUDIO_DTO1_MODULE, clock); | |
293 | + } else { | |
294 | + WREG32(DCCG_AUDIO_DTO1_PHASE, 24000); | |
295 | + WREG32(DCCG_AUDIO_DTO1_MODULE, clock); | |
296 | + } | |
289 | 297 | } |
290 | 298 | |
291 | -void dce6_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable) | |
299 | +void dce6_dp_enable(struct drm_encoder *encoder, bool enable) | |
292 | 300 | { |
293 | 301 | struct drm_device *dev = encoder->dev; |
294 | 302 | struct radeon_device *rdev = dev->dev_private; |
295 | 303 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
296 | 304 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
297 | - uint32_t offset; | |
298 | 305 | |
299 | 306 | if (!dig || !dig->afmt) |
300 | 307 | return; |
301 | 308 | |
302 | - offset = dig->afmt->offset; | |
303 | - | |
304 | 309 | if (enable) { |
305 | - if (dig->afmt->enabled) | |
306 | - return; | |
307 | - | |
308 | - WREG32(EVERGREEN_DP_SEC_TIMESTAMP + offset, EVERGREEN_DP_SEC_TIMESTAMP_MODE(1)); | |
309 | - WREG32(EVERGREEN_DP_SEC_CNTL + offset, | |
310 | - EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */ | |
311 | - EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */ | |
312 | - EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */ | |
313 | - EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */ | |
314 | - radeon_audio_enable(rdev, dig->afmt->pin, true); | |
310 | + WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset, | |
311 | + EVERGREEN_DP_SEC_TIMESTAMP_MODE(1)); | |
312 | + WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, | |
313 | + EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */ | |
314 | + EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */ | |
315 | + EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */ | |
316 | + EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */ | |
315 | 317 | } else { |
316 | - if (!dig->afmt->enabled) | |
317 | - return; | |
318 | - | |
319 | - WREG32(EVERGREEN_DP_SEC_CNTL + offset, 0); | |
320 | - radeon_audio_enable(rdev, dig->afmt->pin, false); | |
318 | + WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0); | |
321 | 319 | } |
322 | 320 | |
323 | 321 | dig->afmt->enabled = enable; |
drivers/gpu/drm/radeon/evergreen.c
... | ... | @@ -4593,6 +4593,9 @@ |
4593 | 4593 | WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5); |
4594 | 4594 | WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6); |
4595 | 4595 | |
4596 | + /* posting read */ | |
4597 | + RREG32(SRBM_STATUS); | |
4598 | + | |
4596 | 4599 | return 0; |
4597 | 4600 | } |
4598 | 4601 |
drivers/gpu/drm/radeon/evergreen_hdmi.c
... | ... | @@ -272,7 +272,7 @@ |
272 | 272 | } |
273 | 273 | |
274 | 274 | void dce4_dp_audio_set_dto(struct radeon_device *rdev, |
275 | - struct radeon_crtc *crtc, unsigned int clock) | |
275 | + struct radeon_crtc *crtc, unsigned int clock) | |
276 | 276 | { |
277 | 277 | u32 value; |
278 | 278 | |
... | ... | @@ -294,7 +294,7 @@ |
294 | 294 | * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator |
295 | 295 | */ |
296 | 296 | WREG32(DCCG_AUDIO_DTO1_PHASE, 24000); |
297 | - WREG32(DCCG_AUDIO_DTO1_MODULE, rdev->clock.max_pixel_clock * 10); | |
297 | + WREG32(DCCG_AUDIO_DTO1_MODULE, clock); | |
298 | 298 | } |
299 | 299 | |
300 | 300 | void dce4_set_vbi_packet(struct drm_encoder *encoder, u32 offset) |
301 | 301 | |
... | ... | @@ -350,20 +350,9 @@ |
350 | 350 | struct drm_device *dev = encoder->dev; |
351 | 351 | struct radeon_device *rdev = dev->dev_private; |
352 | 352 | |
353 | - WREG32(HDMI_INFOFRAME_CONTROL0 + offset, | |
354 | - HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */ | |
355 | - HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */ | |
356 | - | |
357 | 353 | WREG32(AFMT_INFOFRAME_CONTROL0 + offset, |
358 | 354 | AFMT_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */ |
359 | 355 | |
360 | - WREG32(HDMI_INFOFRAME_CONTROL1 + offset, | |
361 | - HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */ | |
362 | - | |
363 | - WREG32(HDMI_AUDIO_PACKET_CONTROL + offset, | |
364 | - HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */ | |
365 | - HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */ | |
366 | - | |
367 | 356 | WREG32(AFMT_60958_0 + offset, |
368 | 357 | AFMT_60958_CS_CHANNEL_NUMBER_L(1)); |
369 | 358 | |
370 | 359 | |
... | ... | @@ -408,15 +397,19 @@ |
408 | 397 | if (!dig || !dig->afmt) |
409 | 398 | return; |
410 | 399 | |
411 | - /* Silent, r600_hdmi_enable will raise WARN for us */ | |
412 | - if (enable && dig->afmt->enabled) | |
413 | - return; | |
414 | - if (!enable && !dig->afmt->enabled) | |
415 | - return; | |
400 | + if (enable) { | |
401 | + WREG32(HDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, | |
402 | + HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */ | |
416 | 403 | |
417 | - if (!enable && dig->afmt->pin) { | |
418 | - radeon_audio_enable(rdev, dig->afmt->pin, 0); | |
419 | - dig->afmt->pin = NULL; | |
404 | + WREG32(HDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, | |
405 | + HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */ | |
406 | + HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */ | |
407 | + | |
408 | + WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, | |
409 | + HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */ | |
410 | + HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */ | |
411 | + } else { | |
412 | + WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, 0); | |
420 | 413 | } |
421 | 414 | |
422 | 415 | dig->afmt->enabled = enable; |
423 | 416 | |
424 | 417 | |
425 | 418 | |
426 | 419 | |
427 | 420 | |
... | ... | @@ -425,33 +418,28 @@ |
425 | 418 | enable ? "En" : "Dis", dig->afmt->offset, radeon_encoder->encoder_id); |
426 | 419 | } |
427 | 420 | |
428 | -void evergreen_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable) | |
421 | +void evergreen_dp_enable(struct drm_encoder *encoder, bool enable) | |
429 | 422 | { |
430 | 423 | struct drm_device *dev = encoder->dev; |
431 | 424 | struct radeon_device *rdev = dev->dev_private; |
432 | 425 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
433 | 426 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
434 | - uint32_t offset; | |
435 | 427 | |
436 | 428 | if (!dig || !dig->afmt) |
437 | 429 | return; |
438 | 430 | |
439 | - offset = dig->afmt->offset; | |
440 | - | |
441 | 431 | if (enable) { |
442 | 432 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); |
443 | 433 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
444 | 434 | struct radeon_connector_atom_dig *dig_connector; |
445 | 435 | uint32_t val; |
446 | 436 | |
447 | - if (dig->afmt->enabled) | |
448 | - return; | |
437 | + WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset, | |
438 | + EVERGREEN_DP_SEC_TIMESTAMP_MODE(1)); | |
449 | 439 | |
450 | - WREG32(EVERGREEN_DP_SEC_TIMESTAMP + offset, EVERGREEN_DP_SEC_TIMESTAMP_MODE(1)); | |
451 | - | |
452 | 440 | if (radeon_connector->con_priv) { |
453 | 441 | dig_connector = radeon_connector->con_priv; |
454 | - val = RREG32(EVERGREEN_DP_SEC_AUD_N + offset); | |
442 | + val = RREG32(EVERGREEN_DP_SEC_AUD_N + dig->afmt->offset); | |
455 | 443 | val &= ~EVERGREEN_DP_SEC_N_BASE_MULTIPLE(0xf); |
456 | 444 | |
457 | 445 | if (dig_connector->dp_clock == 162000) |
458 | 446 | |
459 | 447 | |
460 | 448 | |
... | ... | @@ -459,21 +447,16 @@ |
459 | 447 | else |
460 | 448 | val |= EVERGREEN_DP_SEC_N_BASE_MULTIPLE(5); |
461 | 449 | |
462 | - WREG32(EVERGREEN_DP_SEC_AUD_N + offset, val); | |
450 | + WREG32(EVERGREEN_DP_SEC_AUD_N + dig->afmt->offset, val); | |
463 | 451 | } |
464 | 452 | |
465 | - WREG32(EVERGREEN_DP_SEC_CNTL + offset, | |
453 | + WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, | |
466 | 454 | EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */ |
467 | 455 | EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */ |
468 | 456 | EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */ |
469 | 457 | EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */ |
470 | - radeon_audio_enable(rdev, dig->afmt->pin, 0xf); | |
471 | 458 | } else { |
472 | - if (!dig->afmt->enabled) | |
473 | - return; | |
474 | - | |
475 | - WREG32(EVERGREEN_DP_SEC_CNTL + offset, 0); | |
476 | - radeon_audio_enable(rdev, dig->afmt->pin, 0); | |
459 | + WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0); | |
477 | 460 | } |
478 | 461 | |
479 | 462 | dig->afmt->enabled = enable; |
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_hdmi.c
... | ... | @@ -476,17 +476,6 @@ |
476 | 476 | if (!dig || !dig->afmt) |
477 | 477 | return; |
478 | 478 | |
479 | - /* Silent, r600_hdmi_enable will raise WARN for us */ | |
480 | - if (enable && dig->afmt->enabled) | |
481 | - return; | |
482 | - if (!enable && !dig->afmt->enabled) | |
483 | - return; | |
484 | - | |
485 | - if (!enable && dig->afmt->pin) { | |
486 | - radeon_audio_enable(rdev, dig->afmt->pin, 0); | |
487 | - dig->afmt->pin = NULL; | |
488 | - } | |
489 | - | |
490 | 479 | /* Older chipsets require setting HDMI and routing manually */ |
491 | 480 | if (!ASIC_IS_DCE3(rdev)) { |
492 | 481 | if (enable) |
drivers/gpu/drm/radeon/radeon_audio.c
... | ... | @@ -101,8 +101,8 @@ |
101 | 101 | struct drm_display_mode *mode); |
102 | 102 | void r600_hdmi_enable(struct drm_encoder *encoder, bool enable); |
103 | 103 | void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable); |
104 | -void evergreen_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable); | |
105 | -void dce6_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable); | |
104 | +void evergreen_dp_enable(struct drm_encoder *encoder, bool enable); | |
105 | +void dce6_dp_enable(struct drm_encoder *encoder, bool enable); | |
106 | 106 | |
107 | 107 | static const u32 pin_offsets[7] = |
108 | 108 | { |
... | ... | @@ -210,7 +210,7 @@ |
210 | 210 | .set_avi_packet = evergreen_set_avi_packet, |
211 | 211 | .set_audio_packet = dce4_set_audio_packet, |
212 | 212 | .mode_set = radeon_audio_dp_mode_set, |
213 | - .dpms = evergreen_enable_dp_audio_packets, | |
213 | + .dpms = evergreen_dp_enable, | |
214 | 214 | }; |
215 | 215 | |
216 | 216 | static struct radeon_audio_funcs dce6_hdmi_funcs = { |
... | ... | @@ -240,7 +240,7 @@ |
240 | 240 | .set_avi_packet = evergreen_set_avi_packet, |
241 | 241 | .set_audio_packet = dce4_set_audio_packet, |
242 | 242 | .mode_set = radeon_audio_dp_mode_set, |
243 | - .dpms = dce6_enable_dp_audio_packets, | |
243 | + .dpms = dce6_dp_enable, | |
244 | 244 | }; |
245 | 245 | |
246 | 246 | static void radeon_audio_interface_init(struct radeon_device *rdev) |
... | ... | @@ -452,7 +452,7 @@ |
452 | 452 | } |
453 | 453 | |
454 | 454 | void radeon_audio_detect(struct drm_connector *connector, |
455 | - enum drm_connector_status status) | |
455 | + enum drm_connector_status status) | |
456 | 456 | { |
457 | 457 | struct radeon_device *rdev; |
458 | 458 | struct radeon_encoder *radeon_encoder; |
459 | 459 | |
... | ... | @@ -483,14 +483,11 @@ |
483 | 483 | else |
484 | 484 | radeon_encoder->audio = rdev->audio.hdmi_funcs; |
485 | 485 | |
486 | - radeon_audio_write_speaker_allocation(connector->encoder); | |
487 | - radeon_audio_write_sad_regs(connector->encoder); | |
488 | - if (connector->encoder->crtc) | |
489 | - radeon_audio_write_latency_fields(connector->encoder, | |
490 | - &connector->encoder->crtc->mode); | |
486 | + dig->afmt->pin = radeon_audio_get_pin(connector->encoder); | |
491 | 487 | radeon_audio_enable(rdev, dig->afmt->pin, 0xf); |
492 | 488 | } else { |
493 | 489 | radeon_audio_enable(rdev, dig->afmt->pin, 0); |
490 | + dig->afmt->pin = NULL; | |
494 | 491 | } |
495 | 492 | } |
496 | 493 | |
497 | 494 | |
498 | 495 | |
499 | 496 | |
500 | 497 | |
... | ... | @@ -694,23 +691,22 @@ |
694 | 691 | * update the info frames with the data from the current display mode |
695 | 692 | */ |
696 | 693 | static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder, |
697 | - struct drm_display_mode *mode) | |
694 | + struct drm_display_mode *mode) | |
698 | 695 | { |
699 | - struct radeon_device *rdev = encoder->dev->dev_private; | |
700 | 696 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
701 | 697 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
702 | 698 | |
703 | 699 | if (!dig || !dig->afmt) |
704 | 700 | return; |
705 | 701 | |
706 | - /* disable audio prior to setting up hw */ | |
707 | - dig->afmt->pin = radeon_audio_get_pin(encoder); | |
708 | - radeon_audio_enable(rdev, dig->afmt->pin, 0); | |
702 | + radeon_audio_set_mute(encoder, true); | |
709 | 703 | |
704 | + radeon_audio_write_speaker_allocation(encoder); | |
705 | + radeon_audio_write_sad_regs(encoder); | |
706 | + radeon_audio_write_latency_fields(encoder, mode); | |
710 | 707 | radeon_audio_set_dto(encoder, mode->clock); |
711 | 708 | radeon_audio_set_vbi_packet(encoder); |
712 | 709 | radeon_hdmi_set_color_depth(encoder); |
713 | - radeon_audio_set_mute(encoder, false); | |
714 | 710 | radeon_audio_update_acr(encoder, mode->clock); |
715 | 711 | radeon_audio_set_audio_packet(encoder); |
716 | 712 | radeon_audio_select_pin(encoder); |
... | ... | @@ -718,8 +714,7 @@ |
718 | 714 | if (radeon_audio_set_avi_packet(encoder, mode) < 0) |
719 | 715 | return; |
720 | 716 | |
721 | - /* enable audio after to setting up hw */ | |
722 | - radeon_audio_enable(rdev, dig->afmt->pin, 0xf); | |
717 | + radeon_audio_set_mute(encoder, false); | |
723 | 718 | } |
724 | 719 | |
725 | 720 | static void radeon_audio_dp_mode_set(struct drm_encoder *encoder, |
726 | 721 | |
727 | 722 | |
... | ... | @@ -729,23 +724,26 @@ |
729 | 724 | struct radeon_device *rdev = dev->dev_private; |
730 | 725 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
731 | 726 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
727 | + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | |
728 | + struct radeon_connector *radeon_connector = to_radeon_connector(connector); | |
729 | + struct radeon_connector_atom_dig *dig_connector = | |
730 | + radeon_connector->con_priv; | |
732 | 731 | |
733 | 732 | if (!dig || !dig->afmt) |
734 | 733 | return; |
735 | 734 | |
736 | - /* disable audio prior to setting up hw */ | |
737 | - dig->afmt->pin = radeon_audio_get_pin(encoder); | |
738 | - radeon_audio_enable(rdev, dig->afmt->pin, 0); | |
739 | - | |
740 | - radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10); | |
735 | + radeon_audio_write_speaker_allocation(encoder); | |
736 | + radeon_audio_write_sad_regs(encoder); | |
737 | + radeon_audio_write_latency_fields(encoder, mode); | |
738 | + if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev)) | |
739 | + radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10); | |
740 | + else | |
741 | + radeon_audio_set_dto(encoder, dig_connector->dp_clock); | |
741 | 742 | radeon_audio_set_audio_packet(encoder); |
742 | 743 | radeon_audio_select_pin(encoder); |
743 | 744 | |
744 | 745 | if (radeon_audio_set_avi_packet(encoder, mode) < 0) |
745 | 746 | return; |
746 | - | |
747 | - /* enable audio after to setting up hw */ | |
748 | - radeon_audio_enable(rdev, dig->afmt->pin, 0xf); | |
749 | 747 | } |
750 | 748 | |
751 | 749 | void radeon_audio_mode_set(struct drm_encoder *encoder, |
drivers/gpu/drm/radeon/radeon_cs.c
... | ... | @@ -256,11 +256,13 @@ |
256 | 256 | u32 ring = RADEON_CS_RING_GFX; |
257 | 257 | s32 priority = 0; |
258 | 258 | |
259 | + INIT_LIST_HEAD(&p->validated); | |
260 | + | |
259 | 261 | if (!cs->num_chunks) { |
260 | 262 | return 0; |
261 | 263 | } |
264 | + | |
262 | 265 | /* get chunks */ |
263 | - INIT_LIST_HEAD(&p->validated); | |
264 | 266 | p->idx = 0; |
265 | 267 | p->ib.sa_bo = NULL; |
266 | 268 | p->const_ib.sa_bo = NULL; |
drivers/gpu/drm/radeon/rs600.c
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/radeon/sid.h
... | ... | @@ -912,8 +912,8 @@ |
912 | 912 | |
913 | 913 | #define DCCG_AUDIO_DTO0_PHASE 0x05b0 |
914 | 914 | #define DCCG_AUDIO_DTO0_MODULE 0x05b4 |
915 | -#define DCCG_AUDIO_DTO1_PHASE 0x05b8 | |
916 | -#define DCCG_AUDIO_DTO1_MODULE 0x05bc | |
915 | +#define DCCG_AUDIO_DTO1_PHASE 0x05c0 | |
916 | +#define DCCG_AUDIO_DTO1_MODULE 0x05c4 | |
917 | 917 | |
918 | 918 | #define AFMT_AUDIO_SRC_CONTROL 0x713c |
919 | 919 | #define AFMT_AUDIO_SRC_SELECT(x) (((x) & 7) << 0) |
drivers/gpu/drm/ttm/ttm_bo.c
... | ... | @@ -74,7 +74,7 @@ |
74 | 74 | pr_err(" has_type: %d\n", man->has_type); |
75 | 75 | pr_err(" use_type: %d\n", man->use_type); |
76 | 76 | pr_err(" flags: 0x%08X\n", man->flags); |
77 | - pr_err(" gpu_offset: 0x%08lX\n", man->gpu_offset); | |
77 | + pr_err(" gpu_offset: 0x%08llX\n", man->gpu_offset); | |
78 | 78 | pr_err(" size: %llu\n", man->size); |
79 | 79 | pr_err(" available_caching: 0x%08X\n", man->available_caching); |
80 | 80 | pr_err(" default_caching: 0x%08X\n", man->default_caching); |
drivers/gpu/ipu-v3/ipu-di.c
include/drm/drm_mm.h
... | ... | @@ -68,8 +68,8 @@ |
68 | 68 | unsigned scanned_preceeds_hole : 1; |
69 | 69 | unsigned allocated : 1; |
70 | 70 | unsigned long color; |
71 | - unsigned long start; | |
72 | - unsigned long size; | |
71 | + u64 start; | |
72 | + u64 size; | |
73 | 73 | struct drm_mm *mm; |
74 | 74 | }; |
75 | 75 | |
76 | 76 | |
77 | 77 | |
... | ... | @@ -82,16 +82,16 @@ |
82 | 82 | unsigned int scan_check_range : 1; |
83 | 83 | unsigned scan_alignment; |
84 | 84 | unsigned long scan_color; |
85 | - unsigned long scan_size; | |
86 | - unsigned long scan_hit_start; | |
87 | - unsigned long scan_hit_end; | |
85 | + u64 scan_size; | |
86 | + u64 scan_hit_start; | |
87 | + u64 scan_hit_end; | |
88 | 88 | unsigned scanned_blocks; |
89 | - unsigned long scan_start; | |
90 | - unsigned long scan_end; | |
89 | + u64 scan_start; | |
90 | + u64 scan_end; | |
91 | 91 | struct drm_mm_node *prev_scanned_node; |
92 | 92 | |
93 | 93 | void (*color_adjust)(struct drm_mm_node *node, unsigned long color, |
94 | - unsigned long *start, unsigned long *end); | |
94 | + u64 *start, u64 *end); | |
95 | 95 | }; |
96 | 96 | |
97 | 97 | /** |
... | ... | @@ -124,7 +124,7 @@ |
124 | 124 | return mm->hole_stack.next; |
125 | 125 | } |
126 | 126 | |
127 | -static inline unsigned long __drm_mm_hole_node_start(struct drm_mm_node *hole_node) | |
127 | +static inline u64 __drm_mm_hole_node_start(struct drm_mm_node *hole_node) | |
128 | 128 | { |
129 | 129 | return hole_node->start + hole_node->size; |
130 | 130 | } |
131 | 131 | |
... | ... | @@ -140,13 +140,13 @@ |
140 | 140 | * Returns: |
141 | 141 | * Start of the subsequent hole. |
142 | 142 | */ |
143 | -static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node) | |
143 | +static inline u64 drm_mm_hole_node_start(struct drm_mm_node *hole_node) | |
144 | 144 | { |
145 | 145 | BUG_ON(!hole_node->hole_follows); |
146 | 146 | return __drm_mm_hole_node_start(hole_node); |
147 | 147 | } |
148 | 148 | |
149 | -static inline unsigned long __drm_mm_hole_node_end(struct drm_mm_node *hole_node) | |
149 | +static inline u64 __drm_mm_hole_node_end(struct drm_mm_node *hole_node) | |
150 | 150 | { |
151 | 151 | return list_entry(hole_node->node_list.next, |
152 | 152 | struct drm_mm_node, node_list)->start; |
... | ... | @@ -163,7 +163,7 @@ |
163 | 163 | * Returns: |
164 | 164 | * End of the subsequent hole. |
165 | 165 | */ |
166 | -static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node) | |
166 | +static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node) | |
167 | 167 | { |
168 | 168 | return __drm_mm_hole_node_end(hole_node); |
169 | 169 | } |
... | ... | @@ -222,7 +222,7 @@ |
222 | 222 | |
223 | 223 | int drm_mm_insert_node_generic(struct drm_mm *mm, |
224 | 224 | struct drm_mm_node *node, |
225 | - unsigned long size, | |
225 | + u64 size, | |
226 | 226 | unsigned alignment, |
227 | 227 | unsigned long color, |
228 | 228 | enum drm_mm_search_flags sflags, |
... | ... | @@ -245,7 +245,7 @@ |
245 | 245 | */ |
246 | 246 | static inline int drm_mm_insert_node(struct drm_mm *mm, |
247 | 247 | struct drm_mm_node *node, |
248 | - unsigned long size, | |
248 | + u64 size, | |
249 | 249 | unsigned alignment, |
250 | 250 | enum drm_mm_search_flags flags) |
251 | 251 | { |
252 | 252 | |
... | ... | @@ -255,11 +255,11 @@ |
255 | 255 | |
256 | 256 | int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, |
257 | 257 | struct drm_mm_node *node, |
258 | - unsigned long size, | |
258 | + u64 size, | |
259 | 259 | unsigned alignment, |
260 | 260 | unsigned long color, |
261 | - unsigned long start, | |
262 | - unsigned long end, | |
261 | + u64 start, | |
262 | + u64 end, | |
263 | 263 | enum drm_mm_search_flags sflags, |
264 | 264 | enum drm_mm_allocator_flags aflags); |
265 | 265 | /** |
266 | 266 | |
... | ... | @@ -282,10 +282,10 @@ |
282 | 282 | */ |
283 | 283 | static inline int drm_mm_insert_node_in_range(struct drm_mm *mm, |
284 | 284 | struct drm_mm_node *node, |
285 | - unsigned long size, | |
285 | + u64 size, | |
286 | 286 | unsigned alignment, |
287 | - unsigned long start, | |
288 | - unsigned long end, | |
287 | + u64 start, | |
288 | + u64 end, | |
289 | 289 | enum drm_mm_search_flags flags) |
290 | 290 | { |
291 | 291 | return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, |
292 | 292 | |
293 | 293 | |
294 | 294 | |
... | ... | @@ -296,21 +296,21 @@ |
296 | 296 | void drm_mm_remove_node(struct drm_mm_node *node); |
297 | 297 | void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new); |
298 | 298 | void drm_mm_init(struct drm_mm *mm, |
299 | - unsigned long start, | |
300 | - unsigned long size); | |
299 | + u64 start, | |
300 | + u64 size); | |
301 | 301 | void drm_mm_takedown(struct drm_mm *mm); |
302 | 302 | bool drm_mm_clean(struct drm_mm *mm); |
303 | 303 | |
304 | 304 | void drm_mm_init_scan(struct drm_mm *mm, |
305 | - unsigned long size, | |
305 | + u64 size, | |
306 | 306 | unsigned alignment, |
307 | 307 | unsigned long color); |
308 | 308 | void drm_mm_init_scan_with_range(struct drm_mm *mm, |
309 | - unsigned long size, | |
309 | + u64 size, | |
310 | 310 | unsigned alignment, |
311 | 311 | unsigned long color, |
312 | - unsigned long start, | |
313 | - unsigned long end); | |
312 | + u64 start, | |
313 | + u64 end); | |
314 | 314 | bool drm_mm_scan_add_block(struct drm_mm_node *node); |
315 | 315 | bool drm_mm_scan_remove_block(struct drm_mm_node *node); |
316 | 316 |
include/drm/ttm/ttm_bo_api.h
include/drm/ttm/ttm_bo_driver.h
... | ... | @@ -277,7 +277,7 @@ |
277 | 277 | bool has_type; |
278 | 278 | bool use_type; |
279 | 279 | uint32_t flags; |
280 | - unsigned long gpu_offset; | |
280 | + uint64_t gpu_offset; /* GPU address space is independent of CPU word size */ | |
281 | 281 | uint64_t size; |
282 | 282 | uint32_t available_caching; |
283 | 283 | uint32_t default_caching; |