Commit 6f19e7e5ae29b3df9061f9141ac138f60b8f416d
Exists in
master
and in
13 other branches
Merge tag 'drm-intel-fixes-2014-04-25' of git://anongit.freedesktop.org/drm-intel into drm-next
Fix regression with DVI and fix warns, and GM45 boot regression. * tag 'drm-intel-fixes-2014-04-25' of git://anongit.freedesktop.org/drm-intel: drm/i915: Move all ring resets before setting the HWS page drm/i915: Don't WARN nor handle unexpected hpd interrupts on gmch platforms drm/i915: Allow full PPGTT with param override drm/i915: Discard BIOS framebuffers too small to accommodate chosen mode drm/i915: get power domain in case the BIOS enabled eDP VDD drm/i915: Don't check gmch state on inherited configs drm/i915: Allow user modes to exceed DVI 165MHz limit
Showing 10 changed files Inline Diff
- drivers/gpu/drm/i915/i915_gem_gtt.c
- drivers/gpu/drm/i915/i915_irq.c
- drivers/gpu/drm/i915/i915_reg.h
- drivers/gpu/drm/i915/intel_display.c
- drivers/gpu/drm/i915/intel_dp.c
- drivers/gpu/drm/i915/intel_drv.h
- drivers/gpu/drm/i915/intel_fbdev.c
- drivers/gpu/drm/i915/intel_hdmi.c
- drivers/gpu/drm/i915/intel_ringbuffer.c
- drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/i915/i915_gem_gtt.c
1 | /* | 1 | /* |
2 | * Copyright © 2010 Daniel Vetter | 2 | * Copyright © 2010 Daniel Vetter |
3 | * Copyright © 2011-2014 Intel Corporation | 3 | * Copyright © 2011-2014 Intel Corporation |
4 | * | 4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | 5 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * copy of this software and associated documentation files (the "Software"), | 6 | * copy of this software and associated documentation files (the "Software"), |
7 | * to deal in the Software without restriction, including without limitation | 7 | * to deal in the Software without restriction, including without limitation |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | 8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
9 | * and/or sell copies of the Software, and to permit persons to whom the | 9 | * and/or sell copies of the Software, and to permit persons to whom the |
10 | * Software is furnished to do so, subject to the following conditions: | 10 | * Software is furnished to do so, subject to the following conditions: |
11 | * | 11 | * |
12 | * The above copyright notice and this permission notice (including the next | 12 | * The above copyright notice and this permission notice (including the next |
13 | * paragraph) shall be included in all copies or substantial portions of the | 13 | * paragraph) shall be included in all copies or substantial portions of the |
14 | * Software. | 14 | * Software. |
15 | * | 15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | 19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
21 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | 21 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
22 | * IN THE SOFTWARE. | 22 | * IN THE SOFTWARE. |
23 | * | 23 | * |
24 | */ | 24 | */ |
25 | 25 | ||
26 | #include <linux/seq_file.h> | 26 | #include <linux/seq_file.h> |
27 | #include <drm/drmP.h> | 27 | #include <drm/drmP.h> |
28 | #include <drm/i915_drm.h> | 28 | #include <drm/i915_drm.h> |
29 | #include "i915_drv.h" | 29 | #include "i915_drv.h" |
30 | #include "i915_trace.h" | 30 | #include "i915_trace.h" |
31 | #include "intel_drv.h" | 31 | #include "intel_drv.h" |
32 | 32 | ||
33 | static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv); | 33 | static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv); |
34 | 34 | ||
35 | bool intel_enable_ppgtt(struct drm_device *dev, bool full) | 35 | bool intel_enable_ppgtt(struct drm_device *dev, bool full) |
36 | { | 36 | { |
37 | if (i915.enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev)) | 37 | if (i915.enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev)) |
38 | return false; | 38 | return false; |
39 | 39 | ||
40 | if (i915.enable_ppgtt == 1 && full) | 40 | if (i915.enable_ppgtt == 1 && full) |
41 | return false; | 41 | return false; |
42 | 42 | ||
43 | #ifdef CONFIG_INTEL_IOMMU | 43 | #ifdef CONFIG_INTEL_IOMMU |
44 | /* Disable ppgtt on SNB if VT-d is on. */ | 44 | /* Disable ppgtt on SNB if VT-d is on. */ |
45 | if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) { | 45 | if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) { |
46 | DRM_INFO("Disabling PPGTT because VT-d is on\n"); | 46 | DRM_INFO("Disabling PPGTT because VT-d is on\n"); |
47 | return false; | 47 | return false; |
48 | } | 48 | } |
49 | #endif | 49 | #endif |
50 | 50 | ||
51 | /* Full ppgtt disabled by default for now due to issues. */ | 51 | /* Full ppgtt disabled by default for now due to issues. */ |
52 | if (full) | 52 | if (full) |
53 | return false; /* HAS_PPGTT(dev) */ | 53 | return HAS_PPGTT(dev) && (i915.enable_ppgtt == 2); |
54 | else | 54 | else |
55 | return HAS_ALIASING_PPGTT(dev); | 55 | return HAS_ALIASING_PPGTT(dev); |
56 | } | 56 | } |
57 | 57 | ||
58 | #define GEN6_PPGTT_PD_ENTRIES 512 | 58 | #define GEN6_PPGTT_PD_ENTRIES 512 |
59 | #define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t)) | 59 | #define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t)) |
60 | typedef uint64_t gen8_gtt_pte_t; | 60 | typedef uint64_t gen8_gtt_pte_t; |
61 | typedef gen8_gtt_pte_t gen8_ppgtt_pde_t; | 61 | typedef gen8_gtt_pte_t gen8_ppgtt_pde_t; |
62 | 62 | ||
63 | /* PPGTT stuff */ | 63 | /* PPGTT stuff */ |
64 | #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) | 64 | #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) |
65 | #define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0)) | 65 | #define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0)) |
66 | 66 | ||
67 | #define GEN6_PDE_VALID (1 << 0) | 67 | #define GEN6_PDE_VALID (1 << 0) |
68 | /* gen6+ has bit 11-4 for physical addr bit 39-32 */ | 68 | /* gen6+ has bit 11-4 for physical addr bit 39-32 */ |
69 | #define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) | 69 | #define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) |
70 | 70 | ||
71 | #define GEN6_PTE_VALID (1 << 0) | 71 | #define GEN6_PTE_VALID (1 << 0) |
72 | #define GEN6_PTE_UNCACHED (1 << 1) | 72 | #define GEN6_PTE_UNCACHED (1 << 1) |
73 | #define HSW_PTE_UNCACHED (0) | 73 | #define HSW_PTE_UNCACHED (0) |
74 | #define GEN6_PTE_CACHE_LLC (2 << 1) | 74 | #define GEN6_PTE_CACHE_LLC (2 << 1) |
75 | #define GEN7_PTE_CACHE_L3_LLC (3 << 1) | 75 | #define GEN7_PTE_CACHE_L3_LLC (3 << 1) |
76 | #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) | 76 | #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) |
77 | #define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr) | 77 | #define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr) |
78 | 78 | ||
79 | /* Cacheability Control is a 4-bit value. The low three bits are stored in * | 79 | /* Cacheability Control is a 4-bit value. The low three bits are stored in * |
80 | * bits 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE. | 80 | * bits 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE. |
81 | */ | 81 | */ |
82 | #define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \ | 82 | #define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \ |
83 | (((bits) & 0x8) << (11 - 3))) | 83 | (((bits) & 0x8) << (11 - 3))) |
84 | #define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2) | 84 | #define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2) |
85 | #define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3) | 85 | #define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3) |
86 | #define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) | 86 | #define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) |
87 | #define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8) | 87 | #define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8) |
88 | #define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) | 88 | #define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) |
89 | #define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7) | 89 | #define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7) |
90 | 90 | ||
91 | #define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t)) | 91 | #define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t)) |
92 | #define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t)) | 92 | #define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t)) |
93 | 93 | ||
94 | /* GEN8 legacy style addressis defined as a 3 level page table: | 94 | /* GEN8 legacy style addressis defined as a 3 level page table: |
95 | * 31:30 | 29:21 | 20:12 | 11:0 | 95 | * 31:30 | 29:21 | 20:12 | 11:0 |
96 | * PDPE | PDE | PTE | offset | 96 | * PDPE | PDE | PTE | offset |
97 | * The difference as compared to normal x86 3 level page table is the PDPEs are | 97 | * The difference as compared to normal x86 3 level page table is the PDPEs are |
98 | * programmed via register. | 98 | * programmed via register. |
99 | */ | 99 | */ |
100 | #define GEN8_PDPE_SHIFT 30 | 100 | #define GEN8_PDPE_SHIFT 30 |
101 | #define GEN8_PDPE_MASK 0x3 | 101 | #define GEN8_PDPE_MASK 0x3 |
102 | #define GEN8_PDE_SHIFT 21 | 102 | #define GEN8_PDE_SHIFT 21 |
103 | #define GEN8_PDE_MASK 0x1ff | 103 | #define GEN8_PDE_MASK 0x1ff |
104 | #define GEN8_PTE_SHIFT 12 | 104 | #define GEN8_PTE_SHIFT 12 |
105 | #define GEN8_PTE_MASK 0x1ff | 105 | #define GEN8_PTE_MASK 0x1ff |
106 | 106 | ||
107 | #define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD) | 107 | #define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD) |
108 | #define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */ | 108 | #define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */ |
109 | #define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */ | 109 | #define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */ |
110 | #define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */ | 110 | #define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */ |
111 | 111 | ||
112 | static void ppgtt_bind_vma(struct i915_vma *vma, | 112 | static void ppgtt_bind_vma(struct i915_vma *vma, |
113 | enum i915_cache_level cache_level, | 113 | enum i915_cache_level cache_level, |
114 | u32 flags); | 114 | u32 flags); |
115 | static void ppgtt_unbind_vma(struct i915_vma *vma); | 115 | static void ppgtt_unbind_vma(struct i915_vma *vma); |
116 | static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt); | 116 | static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt); |
117 | 117 | ||
118 | static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr, | 118 | static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr, |
119 | enum i915_cache_level level, | 119 | enum i915_cache_level level, |
120 | bool valid) | 120 | bool valid) |
121 | { | 121 | { |
122 | gen8_gtt_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0; | 122 | gen8_gtt_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0; |
123 | pte |= addr; | 123 | pte |= addr; |
124 | if (level != I915_CACHE_NONE) | 124 | if (level != I915_CACHE_NONE) |
125 | pte |= PPAT_CACHED_INDEX; | 125 | pte |= PPAT_CACHED_INDEX; |
126 | else | 126 | else |
127 | pte |= PPAT_UNCACHED_INDEX; | 127 | pte |= PPAT_UNCACHED_INDEX; |
128 | return pte; | 128 | return pte; |
129 | } | 129 | } |
130 | 130 | ||
131 | static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev, | 131 | static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev, |
132 | dma_addr_t addr, | 132 | dma_addr_t addr, |
133 | enum i915_cache_level level) | 133 | enum i915_cache_level level) |
134 | { | 134 | { |
135 | gen8_ppgtt_pde_t pde = _PAGE_PRESENT | _PAGE_RW; | 135 | gen8_ppgtt_pde_t pde = _PAGE_PRESENT | _PAGE_RW; |
136 | pde |= addr; | 136 | pde |= addr; |
137 | if (level != I915_CACHE_NONE) | 137 | if (level != I915_CACHE_NONE) |
138 | pde |= PPAT_CACHED_PDE_INDEX; | 138 | pde |= PPAT_CACHED_PDE_INDEX; |
139 | else | 139 | else |
140 | pde |= PPAT_UNCACHED_INDEX; | 140 | pde |= PPAT_UNCACHED_INDEX; |
141 | return pde; | 141 | return pde; |
142 | } | 142 | } |
143 | 143 | ||
144 | static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr, | 144 | static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr, |
145 | enum i915_cache_level level, | 145 | enum i915_cache_level level, |
146 | bool valid) | 146 | bool valid) |
147 | { | 147 | { |
148 | gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; | 148 | gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; |
149 | pte |= GEN6_PTE_ADDR_ENCODE(addr); | 149 | pte |= GEN6_PTE_ADDR_ENCODE(addr); |
150 | 150 | ||
151 | switch (level) { | 151 | switch (level) { |
152 | case I915_CACHE_L3_LLC: | 152 | case I915_CACHE_L3_LLC: |
153 | case I915_CACHE_LLC: | 153 | case I915_CACHE_LLC: |
154 | pte |= GEN6_PTE_CACHE_LLC; | 154 | pte |= GEN6_PTE_CACHE_LLC; |
155 | break; | 155 | break; |
156 | case I915_CACHE_NONE: | 156 | case I915_CACHE_NONE: |
157 | pte |= GEN6_PTE_UNCACHED; | 157 | pte |= GEN6_PTE_UNCACHED; |
158 | break; | 158 | break; |
159 | default: | 159 | default: |
160 | WARN_ON(1); | 160 | WARN_ON(1); |
161 | } | 161 | } |
162 | 162 | ||
163 | return pte; | 163 | return pte; |
164 | } | 164 | } |
165 | 165 | ||
166 | static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr, | 166 | static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr, |
167 | enum i915_cache_level level, | 167 | enum i915_cache_level level, |
168 | bool valid) | 168 | bool valid) |
169 | { | 169 | { |
170 | gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; | 170 | gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; |
171 | pte |= GEN6_PTE_ADDR_ENCODE(addr); | 171 | pte |= GEN6_PTE_ADDR_ENCODE(addr); |
172 | 172 | ||
173 | switch (level) { | 173 | switch (level) { |
174 | case I915_CACHE_L3_LLC: | 174 | case I915_CACHE_L3_LLC: |
175 | pte |= GEN7_PTE_CACHE_L3_LLC; | 175 | pte |= GEN7_PTE_CACHE_L3_LLC; |
176 | break; | 176 | break; |
177 | case I915_CACHE_LLC: | 177 | case I915_CACHE_LLC: |
178 | pte |= GEN6_PTE_CACHE_LLC; | 178 | pte |= GEN6_PTE_CACHE_LLC; |
179 | break; | 179 | break; |
180 | case I915_CACHE_NONE: | 180 | case I915_CACHE_NONE: |
181 | pte |= GEN6_PTE_UNCACHED; | 181 | pte |= GEN6_PTE_UNCACHED; |
182 | break; | 182 | break; |
183 | default: | 183 | default: |
184 | WARN_ON(1); | 184 | WARN_ON(1); |
185 | } | 185 | } |
186 | 186 | ||
187 | return pte; | 187 | return pte; |
188 | } | 188 | } |
189 | 189 | ||
190 | #define BYT_PTE_WRITEABLE (1 << 1) | 190 | #define BYT_PTE_WRITEABLE (1 << 1) |
191 | #define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2) | 191 | #define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2) |
192 | 192 | ||
193 | static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr, | 193 | static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr, |
194 | enum i915_cache_level level, | 194 | enum i915_cache_level level, |
195 | bool valid) | 195 | bool valid) |
196 | { | 196 | { |
197 | gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; | 197 | gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; |
198 | pte |= GEN6_PTE_ADDR_ENCODE(addr); | 198 | pte |= GEN6_PTE_ADDR_ENCODE(addr); |
199 | 199 | ||
200 | /* Mark the page as writeable. Other platforms don't have a | 200 | /* Mark the page as writeable. Other platforms don't have a |
201 | * setting for read-only/writable, so this matches that behavior. | 201 | * setting for read-only/writable, so this matches that behavior. |
202 | */ | 202 | */ |
203 | pte |= BYT_PTE_WRITEABLE; | 203 | pte |= BYT_PTE_WRITEABLE; |
204 | 204 | ||
205 | if (level != I915_CACHE_NONE) | 205 | if (level != I915_CACHE_NONE) |
206 | pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES; | 206 | pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES; |
207 | 207 | ||
208 | return pte; | 208 | return pte; |
209 | } | 209 | } |
210 | 210 | ||
211 | static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr, | 211 | static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr, |
212 | enum i915_cache_level level, | 212 | enum i915_cache_level level, |
213 | bool valid) | 213 | bool valid) |
214 | { | 214 | { |
215 | gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; | 215 | gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; |
216 | pte |= HSW_PTE_ADDR_ENCODE(addr); | 216 | pte |= HSW_PTE_ADDR_ENCODE(addr); |
217 | 217 | ||
218 | if (level != I915_CACHE_NONE) | 218 | if (level != I915_CACHE_NONE) |
219 | pte |= HSW_WB_LLC_AGE3; | 219 | pte |= HSW_WB_LLC_AGE3; |
220 | 220 | ||
221 | return pte; | 221 | return pte; |
222 | } | 222 | } |
223 | 223 | ||
224 | static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, | 224 | static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, |
225 | enum i915_cache_level level, | 225 | enum i915_cache_level level, |
226 | bool valid) | 226 | bool valid) |
227 | { | 227 | { |
228 | gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; | 228 | gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; |
229 | pte |= HSW_PTE_ADDR_ENCODE(addr); | 229 | pte |= HSW_PTE_ADDR_ENCODE(addr); |
230 | 230 | ||
231 | switch (level) { | 231 | switch (level) { |
232 | case I915_CACHE_NONE: | 232 | case I915_CACHE_NONE: |
233 | break; | 233 | break; |
234 | case I915_CACHE_WT: | 234 | case I915_CACHE_WT: |
235 | pte |= HSW_WT_ELLC_LLC_AGE3; | 235 | pte |= HSW_WT_ELLC_LLC_AGE3; |
236 | break; | 236 | break; |
237 | default: | 237 | default: |
238 | pte |= HSW_WB_ELLC_LLC_AGE3; | 238 | pte |= HSW_WB_ELLC_LLC_AGE3; |
239 | break; | 239 | break; |
240 | } | 240 | } |
241 | 241 | ||
242 | return pte; | 242 | return pte; |
243 | } | 243 | } |
244 | 244 | ||
245 | /* Broadwell Page Directory Pointer Descriptors */ | 245 | /* Broadwell Page Directory Pointer Descriptors */ |
246 | static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry, | 246 | static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry, |
247 | uint64_t val, bool synchronous) | 247 | uint64_t val, bool synchronous) |
248 | { | 248 | { |
249 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | 249 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
250 | int ret; | 250 | int ret; |
251 | 251 | ||
252 | BUG_ON(entry >= 4); | 252 | BUG_ON(entry >= 4); |
253 | 253 | ||
254 | if (synchronous) { | 254 | if (synchronous) { |
255 | I915_WRITE(GEN8_RING_PDP_UDW(ring, entry), val >> 32); | 255 | I915_WRITE(GEN8_RING_PDP_UDW(ring, entry), val >> 32); |
256 | I915_WRITE(GEN8_RING_PDP_LDW(ring, entry), (u32)val); | 256 | I915_WRITE(GEN8_RING_PDP_LDW(ring, entry), (u32)val); |
257 | return 0; | 257 | return 0; |
258 | } | 258 | } |
259 | 259 | ||
260 | ret = intel_ring_begin(ring, 6); | 260 | ret = intel_ring_begin(ring, 6); |
261 | if (ret) | 261 | if (ret) |
262 | return ret; | 262 | return ret; |
263 | 263 | ||
264 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); | 264 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); |
265 | intel_ring_emit(ring, GEN8_RING_PDP_UDW(ring, entry)); | 265 | intel_ring_emit(ring, GEN8_RING_PDP_UDW(ring, entry)); |
266 | intel_ring_emit(ring, (u32)(val >> 32)); | 266 | intel_ring_emit(ring, (u32)(val >> 32)); |
267 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); | 267 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); |
268 | intel_ring_emit(ring, GEN8_RING_PDP_LDW(ring, entry)); | 268 | intel_ring_emit(ring, GEN8_RING_PDP_LDW(ring, entry)); |
269 | intel_ring_emit(ring, (u32)(val)); | 269 | intel_ring_emit(ring, (u32)(val)); |
270 | intel_ring_advance(ring); | 270 | intel_ring_advance(ring); |
271 | 271 | ||
272 | return 0; | 272 | return 0; |
273 | } | 273 | } |
274 | 274 | ||
275 | static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt, | 275 | static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt, |
276 | struct intel_ring_buffer *ring, | 276 | struct intel_ring_buffer *ring, |
277 | bool synchronous) | 277 | bool synchronous) |
278 | { | 278 | { |
279 | int i, ret; | 279 | int i, ret; |
280 | 280 | ||
281 | /* bit of a hack to find the actual last used pd */ | 281 | /* bit of a hack to find the actual last used pd */ |
282 | int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE; | 282 | int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE; |
283 | 283 | ||
284 | for (i = used_pd - 1; i >= 0; i--) { | 284 | for (i = used_pd - 1; i >= 0; i--) { |
285 | dma_addr_t addr = ppgtt->pd_dma_addr[i]; | 285 | dma_addr_t addr = ppgtt->pd_dma_addr[i]; |
286 | ret = gen8_write_pdp(ring, i, addr, synchronous); | 286 | ret = gen8_write_pdp(ring, i, addr, synchronous); |
287 | if (ret) | 287 | if (ret) |
288 | return ret; | 288 | return ret; |
289 | } | 289 | } |
290 | 290 | ||
291 | return 0; | 291 | return 0; |
292 | } | 292 | } |
293 | 293 | ||
294 | static void gen8_ppgtt_clear_range(struct i915_address_space *vm, | 294 | static void gen8_ppgtt_clear_range(struct i915_address_space *vm, |
295 | uint64_t start, | 295 | uint64_t start, |
296 | uint64_t length, | 296 | uint64_t length, |
297 | bool use_scratch) | 297 | bool use_scratch) |
298 | { | 298 | { |
299 | struct i915_hw_ppgtt *ppgtt = | 299 | struct i915_hw_ppgtt *ppgtt = |
300 | container_of(vm, struct i915_hw_ppgtt, base); | 300 | container_of(vm, struct i915_hw_ppgtt, base); |
301 | gen8_gtt_pte_t *pt_vaddr, scratch_pte; | 301 | gen8_gtt_pte_t *pt_vaddr, scratch_pte; |
302 | unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK; | 302 | unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK; |
303 | unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK; | 303 | unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK; |
304 | unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK; | 304 | unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK; |
305 | unsigned num_entries = length >> PAGE_SHIFT; | 305 | unsigned num_entries = length >> PAGE_SHIFT; |
306 | unsigned last_pte, i; | 306 | unsigned last_pte, i; |
307 | 307 | ||
308 | scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr, | 308 | scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr, |
309 | I915_CACHE_LLC, use_scratch); | 309 | I915_CACHE_LLC, use_scratch); |
310 | 310 | ||
311 | while (num_entries) { | 311 | while (num_entries) { |
312 | struct page *page_table = ppgtt->gen8_pt_pages[pdpe][pde]; | 312 | struct page *page_table = ppgtt->gen8_pt_pages[pdpe][pde]; |
313 | 313 | ||
314 | last_pte = pte + num_entries; | 314 | last_pte = pte + num_entries; |
315 | if (last_pte > GEN8_PTES_PER_PAGE) | 315 | if (last_pte > GEN8_PTES_PER_PAGE) |
316 | last_pte = GEN8_PTES_PER_PAGE; | 316 | last_pte = GEN8_PTES_PER_PAGE; |
317 | 317 | ||
318 | pt_vaddr = kmap_atomic(page_table); | 318 | pt_vaddr = kmap_atomic(page_table); |
319 | 319 | ||
320 | for (i = pte; i < last_pte; i++) { | 320 | for (i = pte; i < last_pte; i++) { |
321 | pt_vaddr[i] = scratch_pte; | 321 | pt_vaddr[i] = scratch_pte; |
322 | num_entries--; | 322 | num_entries--; |
323 | } | 323 | } |
324 | 324 | ||
325 | kunmap_atomic(pt_vaddr); | 325 | kunmap_atomic(pt_vaddr); |
326 | 326 | ||
327 | pte = 0; | 327 | pte = 0; |
328 | if (++pde == GEN8_PDES_PER_PAGE) { | 328 | if (++pde == GEN8_PDES_PER_PAGE) { |
329 | pdpe++; | 329 | pdpe++; |
330 | pde = 0; | 330 | pde = 0; |
331 | } | 331 | } |
332 | } | 332 | } |
333 | } | 333 | } |
334 | 334 | ||
335 | static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, | 335 | static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, |
336 | struct sg_table *pages, | 336 | struct sg_table *pages, |
337 | uint64_t start, | 337 | uint64_t start, |
338 | enum i915_cache_level cache_level) | 338 | enum i915_cache_level cache_level) |
339 | { | 339 | { |
340 | struct i915_hw_ppgtt *ppgtt = | 340 | struct i915_hw_ppgtt *ppgtt = |
341 | container_of(vm, struct i915_hw_ppgtt, base); | 341 | container_of(vm, struct i915_hw_ppgtt, base); |
342 | gen8_gtt_pte_t *pt_vaddr; | 342 | gen8_gtt_pte_t *pt_vaddr; |
343 | unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK; | 343 | unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK; |
344 | unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK; | 344 | unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK; |
345 | unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK; | 345 | unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK; |
346 | struct sg_page_iter sg_iter; | 346 | struct sg_page_iter sg_iter; |
347 | 347 | ||
348 | pt_vaddr = NULL; | 348 | pt_vaddr = NULL; |
349 | 349 | ||
350 | for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) { | 350 | for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) { |
351 | if (WARN_ON(pdpe >= GEN8_LEGACY_PDPS)) | 351 | if (WARN_ON(pdpe >= GEN8_LEGACY_PDPS)) |
352 | break; | 352 | break; |
353 | 353 | ||
354 | if (pt_vaddr == NULL) | 354 | if (pt_vaddr == NULL) |
355 | pt_vaddr = kmap_atomic(ppgtt->gen8_pt_pages[pdpe][pde]); | 355 | pt_vaddr = kmap_atomic(ppgtt->gen8_pt_pages[pdpe][pde]); |
356 | 356 | ||
357 | pt_vaddr[pte] = | 357 | pt_vaddr[pte] = |
358 | gen8_pte_encode(sg_page_iter_dma_address(&sg_iter), | 358 | gen8_pte_encode(sg_page_iter_dma_address(&sg_iter), |
359 | cache_level, true); | 359 | cache_level, true); |
360 | if (++pte == GEN8_PTES_PER_PAGE) { | 360 | if (++pte == GEN8_PTES_PER_PAGE) { |
361 | kunmap_atomic(pt_vaddr); | 361 | kunmap_atomic(pt_vaddr); |
362 | pt_vaddr = NULL; | 362 | pt_vaddr = NULL; |
363 | if (++pde == GEN8_PDES_PER_PAGE) { | 363 | if (++pde == GEN8_PDES_PER_PAGE) { |
364 | pdpe++; | 364 | pdpe++; |
365 | pde = 0; | 365 | pde = 0; |
366 | } | 366 | } |
367 | pte = 0; | 367 | pte = 0; |
368 | } | 368 | } |
369 | } | 369 | } |
370 | if (pt_vaddr) | 370 | if (pt_vaddr) |
371 | kunmap_atomic(pt_vaddr); | 371 | kunmap_atomic(pt_vaddr); |
372 | } | 372 | } |
373 | 373 | ||
374 | static void gen8_free_page_tables(struct page **pt_pages) | 374 | static void gen8_free_page_tables(struct page **pt_pages) |
375 | { | 375 | { |
376 | int i; | 376 | int i; |
377 | 377 | ||
378 | if (pt_pages == NULL) | 378 | if (pt_pages == NULL) |
379 | return; | 379 | return; |
380 | 380 | ||
381 | for (i = 0; i < GEN8_PDES_PER_PAGE; i++) | 381 | for (i = 0; i < GEN8_PDES_PER_PAGE; i++) |
382 | if (pt_pages[i]) | 382 | if (pt_pages[i]) |
383 | __free_pages(pt_pages[i], 0); | 383 | __free_pages(pt_pages[i], 0); |
384 | } | 384 | } |
385 | 385 | ||
386 | static void gen8_ppgtt_free(const struct i915_hw_ppgtt *ppgtt) | 386 | static void gen8_ppgtt_free(const struct i915_hw_ppgtt *ppgtt) |
387 | { | 387 | { |
388 | int i; | 388 | int i; |
389 | 389 | ||
390 | for (i = 0; i < ppgtt->num_pd_pages; i++) { | 390 | for (i = 0; i < ppgtt->num_pd_pages; i++) { |
391 | gen8_free_page_tables(ppgtt->gen8_pt_pages[i]); | 391 | gen8_free_page_tables(ppgtt->gen8_pt_pages[i]); |
392 | kfree(ppgtt->gen8_pt_pages[i]); | 392 | kfree(ppgtt->gen8_pt_pages[i]); |
393 | kfree(ppgtt->gen8_pt_dma_addr[i]); | 393 | kfree(ppgtt->gen8_pt_dma_addr[i]); |
394 | } | 394 | } |
395 | 395 | ||
396 | __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT)); | 396 | __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT)); |
397 | } | 397 | } |
398 | 398 | ||
399 | static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt) | 399 | static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt) |
400 | { | 400 | { |
401 | struct pci_dev *hwdev = ppgtt->base.dev->pdev; | 401 | struct pci_dev *hwdev = ppgtt->base.dev->pdev; |
402 | int i, j; | 402 | int i, j; |
403 | 403 | ||
404 | for (i = 0; i < ppgtt->num_pd_pages; i++) { | 404 | for (i = 0; i < ppgtt->num_pd_pages; i++) { |
405 | /* TODO: In the future we'll support sparse mappings, so this | 405 | /* TODO: In the future we'll support sparse mappings, so this |
406 | * will have to change. */ | 406 | * will have to change. */ |
407 | if (!ppgtt->pd_dma_addr[i]) | 407 | if (!ppgtt->pd_dma_addr[i]) |
408 | continue; | 408 | continue; |
409 | 409 | ||
410 | pci_unmap_page(hwdev, ppgtt->pd_dma_addr[i], PAGE_SIZE, | 410 | pci_unmap_page(hwdev, ppgtt->pd_dma_addr[i], PAGE_SIZE, |
411 | PCI_DMA_BIDIRECTIONAL); | 411 | PCI_DMA_BIDIRECTIONAL); |
412 | 412 | ||
413 | for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { | 413 | for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { |
414 | dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j]; | 414 | dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j]; |
415 | if (addr) | 415 | if (addr) |
416 | pci_unmap_page(hwdev, addr, PAGE_SIZE, | 416 | pci_unmap_page(hwdev, addr, PAGE_SIZE, |
417 | PCI_DMA_BIDIRECTIONAL); | 417 | PCI_DMA_BIDIRECTIONAL); |
418 | } | 418 | } |
419 | } | 419 | } |
420 | } | 420 | } |
421 | 421 | ||
422 | static void gen8_ppgtt_cleanup(struct i915_address_space *vm) | 422 | static void gen8_ppgtt_cleanup(struct i915_address_space *vm) |
423 | { | 423 | { |
424 | struct i915_hw_ppgtt *ppgtt = | 424 | struct i915_hw_ppgtt *ppgtt = |
425 | container_of(vm, struct i915_hw_ppgtt, base); | 425 | container_of(vm, struct i915_hw_ppgtt, base); |
426 | 426 | ||
427 | list_del(&vm->global_link); | 427 | list_del(&vm->global_link); |
428 | drm_mm_takedown(&vm->mm); | 428 | drm_mm_takedown(&vm->mm); |
429 | 429 | ||
430 | gen8_ppgtt_unmap_pages(ppgtt); | 430 | gen8_ppgtt_unmap_pages(ppgtt); |
431 | gen8_ppgtt_free(ppgtt); | 431 | gen8_ppgtt_free(ppgtt); |
432 | } | 432 | } |
433 | 433 | ||
434 | static struct page **__gen8_alloc_page_tables(void) | 434 | static struct page **__gen8_alloc_page_tables(void) |
435 | { | 435 | { |
436 | struct page **pt_pages; | 436 | struct page **pt_pages; |
437 | int i; | 437 | int i; |
438 | 438 | ||
439 | pt_pages = kcalloc(GEN8_PDES_PER_PAGE, sizeof(struct page *), GFP_KERNEL); | 439 | pt_pages = kcalloc(GEN8_PDES_PER_PAGE, sizeof(struct page *), GFP_KERNEL); |
440 | if (!pt_pages) | 440 | if (!pt_pages) |
441 | return ERR_PTR(-ENOMEM); | 441 | return ERR_PTR(-ENOMEM); |
442 | 442 | ||
443 | for (i = 0; i < GEN8_PDES_PER_PAGE; i++) { | 443 | for (i = 0; i < GEN8_PDES_PER_PAGE; i++) { |
444 | pt_pages[i] = alloc_page(GFP_KERNEL); | 444 | pt_pages[i] = alloc_page(GFP_KERNEL); |
445 | if (!pt_pages[i]) | 445 | if (!pt_pages[i]) |
446 | goto bail; | 446 | goto bail; |
447 | } | 447 | } |
448 | 448 | ||
449 | return pt_pages; | 449 | return pt_pages; |
450 | 450 | ||
451 | bail: | 451 | bail: |
452 | gen8_free_page_tables(pt_pages); | 452 | gen8_free_page_tables(pt_pages); |
453 | kfree(pt_pages); | 453 | kfree(pt_pages); |
454 | return ERR_PTR(-ENOMEM); | 454 | return ERR_PTR(-ENOMEM); |
455 | } | 455 | } |
456 | 456 | ||
457 | static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt, | 457 | static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt, |
458 | const int max_pdp) | 458 | const int max_pdp) |
459 | { | 459 | { |
460 | struct page **pt_pages[GEN8_LEGACY_PDPS]; | 460 | struct page **pt_pages[GEN8_LEGACY_PDPS]; |
461 | int i, ret; | 461 | int i, ret; |
462 | 462 | ||
463 | for (i = 0; i < max_pdp; i++) { | 463 | for (i = 0; i < max_pdp; i++) { |
464 | pt_pages[i] = __gen8_alloc_page_tables(); | 464 | pt_pages[i] = __gen8_alloc_page_tables(); |
465 | if (IS_ERR(pt_pages[i])) { | 465 | if (IS_ERR(pt_pages[i])) { |
466 | ret = PTR_ERR(pt_pages[i]); | 466 | ret = PTR_ERR(pt_pages[i]); |
467 | goto unwind_out; | 467 | goto unwind_out; |
468 | } | 468 | } |
469 | } | 469 | } |
470 | 470 | ||
471 | /* NB: Avoid touching gen8_pt_pages until last to keep the allocation, | 471 | /* NB: Avoid touching gen8_pt_pages until last to keep the allocation, |
472 | * "atomic" - for cleanup purposes. | 472 | * "atomic" - for cleanup purposes. |
473 | */ | 473 | */ |
474 | for (i = 0; i < max_pdp; i++) | 474 | for (i = 0; i < max_pdp; i++) |
475 | ppgtt->gen8_pt_pages[i] = pt_pages[i]; | 475 | ppgtt->gen8_pt_pages[i] = pt_pages[i]; |
476 | 476 | ||
477 | return 0; | 477 | return 0; |
478 | 478 | ||
479 | unwind_out: | 479 | unwind_out: |
480 | while (i--) { | 480 | while (i--) { |
481 | gen8_free_page_tables(pt_pages[i]); | 481 | gen8_free_page_tables(pt_pages[i]); |
482 | kfree(pt_pages[i]); | 482 | kfree(pt_pages[i]); |
483 | } | 483 | } |
484 | 484 | ||
485 | return ret; | 485 | return ret; |
486 | } | 486 | } |
487 | 487 | ||
488 | static int gen8_ppgtt_allocate_dma(struct i915_hw_ppgtt *ppgtt) | 488 | static int gen8_ppgtt_allocate_dma(struct i915_hw_ppgtt *ppgtt) |
489 | { | 489 | { |
490 | int i; | 490 | int i; |
491 | 491 | ||
492 | for (i = 0; i < ppgtt->num_pd_pages; i++) { | 492 | for (i = 0; i < ppgtt->num_pd_pages; i++) { |
493 | ppgtt->gen8_pt_dma_addr[i] = kcalloc(GEN8_PDES_PER_PAGE, | 493 | ppgtt->gen8_pt_dma_addr[i] = kcalloc(GEN8_PDES_PER_PAGE, |
494 | sizeof(dma_addr_t), | 494 | sizeof(dma_addr_t), |
495 | GFP_KERNEL); | 495 | GFP_KERNEL); |
496 | if (!ppgtt->gen8_pt_dma_addr[i]) | 496 | if (!ppgtt->gen8_pt_dma_addr[i]) |
497 | return -ENOMEM; | 497 | return -ENOMEM; |
498 | } | 498 | } |
499 | 499 | ||
500 | return 0; | 500 | return 0; |
501 | } | 501 | } |
502 | 502 | ||
503 | static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt, | 503 | static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt, |
504 | const int max_pdp) | 504 | const int max_pdp) |
505 | { | 505 | { |
506 | ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT)); | 506 | ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT)); |
507 | if (!ppgtt->pd_pages) | 507 | if (!ppgtt->pd_pages) |
508 | return -ENOMEM; | 508 | return -ENOMEM; |
509 | 509 | ||
510 | ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT); | 510 | ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT); |
511 | BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS); | 511 | BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS); |
512 | 512 | ||
513 | return 0; | 513 | return 0; |
514 | } | 514 | } |
515 | 515 | ||
516 | static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt, | 516 | static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt, |
517 | const int max_pdp) | 517 | const int max_pdp) |
518 | { | 518 | { |
519 | int ret; | 519 | int ret; |
520 | 520 | ||
521 | ret = gen8_ppgtt_allocate_page_directories(ppgtt, max_pdp); | 521 | ret = gen8_ppgtt_allocate_page_directories(ppgtt, max_pdp); |
522 | if (ret) | 522 | if (ret) |
523 | return ret; | 523 | return ret; |
524 | 524 | ||
525 | ret = gen8_ppgtt_allocate_page_tables(ppgtt, max_pdp); | 525 | ret = gen8_ppgtt_allocate_page_tables(ppgtt, max_pdp); |
526 | if (ret) { | 526 | if (ret) { |
527 | __free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT)); | 527 | __free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT)); |
528 | return ret; | 528 | return ret; |
529 | } | 529 | } |
530 | 530 | ||
531 | ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE; | 531 | ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE; |
532 | 532 | ||
533 | ret = gen8_ppgtt_allocate_dma(ppgtt); | 533 | ret = gen8_ppgtt_allocate_dma(ppgtt); |
534 | if (ret) | 534 | if (ret) |
535 | gen8_ppgtt_free(ppgtt); | 535 | gen8_ppgtt_free(ppgtt); |
536 | 536 | ||
537 | return ret; | 537 | return ret; |
538 | } | 538 | } |
539 | 539 | ||
540 | static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt, | 540 | static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt, |
541 | const int pd) | 541 | const int pd) |
542 | { | 542 | { |
543 | dma_addr_t pd_addr; | 543 | dma_addr_t pd_addr; |
544 | int ret; | 544 | int ret; |
545 | 545 | ||
546 | pd_addr = pci_map_page(ppgtt->base.dev->pdev, | 546 | pd_addr = pci_map_page(ppgtt->base.dev->pdev, |
547 | &ppgtt->pd_pages[pd], 0, | 547 | &ppgtt->pd_pages[pd], 0, |
548 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | 548 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
549 | 549 | ||
550 | ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr); | 550 | ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr); |
551 | if (ret) | 551 | if (ret) |
552 | return ret; | 552 | return ret; |
553 | 553 | ||
554 | ppgtt->pd_dma_addr[pd] = pd_addr; | 554 | ppgtt->pd_dma_addr[pd] = pd_addr; |
555 | 555 | ||
556 | return 0; | 556 | return 0; |
557 | } | 557 | } |
558 | 558 | ||
559 | static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt, | 559 | static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt, |
560 | const int pd, | 560 | const int pd, |
561 | const int pt) | 561 | const int pt) |
562 | { | 562 | { |
563 | dma_addr_t pt_addr; | 563 | dma_addr_t pt_addr; |
564 | struct page *p; | 564 | struct page *p; |
565 | int ret; | 565 | int ret; |
566 | 566 | ||
567 | p = ppgtt->gen8_pt_pages[pd][pt]; | 567 | p = ppgtt->gen8_pt_pages[pd][pt]; |
568 | pt_addr = pci_map_page(ppgtt->base.dev->pdev, | 568 | pt_addr = pci_map_page(ppgtt->base.dev->pdev, |
569 | p, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | 569 | p, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
570 | ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr); | 570 | ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr); |
571 | if (ret) | 571 | if (ret) |
572 | return ret; | 572 | return ret; |
573 | 573 | ||
574 | ppgtt->gen8_pt_dma_addr[pd][pt] = pt_addr; | 574 | ppgtt->gen8_pt_dma_addr[pd][pt] = pt_addr; |
575 | 575 | ||
576 | return 0; | 576 | return 0; |
577 | } | 577 | } |
578 | 578 | ||
579 | /** | 579 | /** |
580 | * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers | 580 | * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers |
581 | * with a net effect resembling a 2-level page table in normal x86 terms. Each | 581 | * with a net effect resembling a 2-level page table in normal x86 terms. Each |
582 | * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address | 582 | * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address |
583 | * space. | 583 | * space. |
584 | * | 584 | * |
585 | * FIXME: split allocation into smaller pieces. For now we only ever do this | 585 | * FIXME: split allocation into smaller pieces. For now we only ever do this |
586 | * once, but with full PPGTT, the multiple contiguous allocations will be bad. | 586 | * once, but with full PPGTT, the multiple contiguous allocations will be bad. |
587 | * TODO: Do something with the size parameter | 587 | * TODO: Do something with the size parameter |
588 | */ | 588 | */ |
589 | static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) | 589 | static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) |
590 | { | 590 | { |
591 | const int max_pdp = DIV_ROUND_UP(size, 1 << 30); | 591 | const int max_pdp = DIV_ROUND_UP(size, 1 << 30); |
592 | const int min_pt_pages = GEN8_PDES_PER_PAGE * max_pdp; | 592 | const int min_pt_pages = GEN8_PDES_PER_PAGE * max_pdp; |
593 | int i, j, ret; | 593 | int i, j, ret; |
594 | 594 | ||
595 | if (size % (1<<30)) | 595 | if (size % (1<<30)) |
596 | DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size); | 596 | DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size); |
597 | 597 | ||
598 | /* 1. Do all our allocations for page directories and page tables. */ | 598 | /* 1. Do all our allocations for page directories and page tables. */ |
599 | ret = gen8_ppgtt_alloc(ppgtt, max_pdp); | 599 | ret = gen8_ppgtt_alloc(ppgtt, max_pdp); |
600 | if (ret) | 600 | if (ret) |
601 | return ret; | 601 | return ret; |
602 | 602 | ||
603 | /* | 603 | /* |
604 | * 2. Create DMA mappings for the page directories and page tables. | 604 | * 2. Create DMA mappings for the page directories and page tables. |
605 | */ | 605 | */ |
606 | for (i = 0; i < max_pdp; i++) { | 606 | for (i = 0; i < max_pdp; i++) { |
607 | ret = gen8_ppgtt_setup_page_directories(ppgtt, i); | 607 | ret = gen8_ppgtt_setup_page_directories(ppgtt, i); |
608 | if (ret) | 608 | if (ret) |
609 | goto bail; | 609 | goto bail; |
610 | 610 | ||
611 | for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { | 611 | for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { |
612 | ret = gen8_ppgtt_setup_page_tables(ppgtt, i, j); | 612 | ret = gen8_ppgtt_setup_page_tables(ppgtt, i, j); |
613 | if (ret) | 613 | if (ret) |
614 | goto bail; | 614 | goto bail; |
615 | } | 615 | } |
616 | } | 616 | } |
617 | 617 | ||
618 | /* | 618 | /* |
619 | * 3. Map all the page directory entires to point to the page tables | 619 | * 3. Map all the page directory entires to point to the page tables |
620 | * we've allocated. | 620 | * we've allocated. |
621 | * | 621 | * |
622 | * For now, the PPGTT helper functions all require that the PDEs are | 622 | * For now, the PPGTT helper functions all require that the PDEs are |
623 | * plugged in correctly. So we do that now/here. For aliasing PPGTT, we | 623 | * plugged in correctly. So we do that now/here. For aliasing PPGTT, we |
624 | * will never need to touch the PDEs again. | 624 | * will never need to touch the PDEs again. |
625 | */ | 625 | */ |
626 | for (i = 0; i < max_pdp; i++) { | 626 | for (i = 0; i < max_pdp; i++) { |
627 | gen8_ppgtt_pde_t *pd_vaddr; | 627 | gen8_ppgtt_pde_t *pd_vaddr; |
628 | pd_vaddr = kmap_atomic(&ppgtt->pd_pages[i]); | 628 | pd_vaddr = kmap_atomic(&ppgtt->pd_pages[i]); |
629 | for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { | 629 | for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { |
630 | dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j]; | 630 | dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j]; |
631 | pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr, | 631 | pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr, |
632 | I915_CACHE_LLC); | 632 | I915_CACHE_LLC); |
633 | } | 633 | } |
634 | kunmap_atomic(pd_vaddr); | 634 | kunmap_atomic(pd_vaddr); |
635 | } | 635 | } |
636 | 636 | ||
637 | ppgtt->enable = gen8_ppgtt_enable; | 637 | ppgtt->enable = gen8_ppgtt_enable; |
638 | ppgtt->switch_mm = gen8_mm_switch; | 638 | ppgtt->switch_mm = gen8_mm_switch; |
639 | ppgtt->base.clear_range = gen8_ppgtt_clear_range; | 639 | ppgtt->base.clear_range = gen8_ppgtt_clear_range; |
640 | ppgtt->base.insert_entries = gen8_ppgtt_insert_entries; | 640 | ppgtt->base.insert_entries = gen8_ppgtt_insert_entries; |
641 | ppgtt->base.cleanup = gen8_ppgtt_cleanup; | 641 | ppgtt->base.cleanup = gen8_ppgtt_cleanup; |
642 | ppgtt->base.start = 0; | 642 | ppgtt->base.start = 0; |
643 | ppgtt->base.total = ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE * PAGE_SIZE; | 643 | ppgtt->base.total = ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE * PAGE_SIZE; |
644 | 644 | ||
645 | ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true); | 645 | ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true); |
646 | 646 | ||
647 | DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n", | 647 | DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n", |
648 | ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp); | 648 | ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp); |
649 | DRM_DEBUG_DRIVER("Allocated %d pages for page tables (%lld wasted)\n", | 649 | DRM_DEBUG_DRIVER("Allocated %d pages for page tables (%lld wasted)\n", |
650 | ppgtt->num_pd_entries, | 650 | ppgtt->num_pd_entries, |
651 | (ppgtt->num_pd_entries - min_pt_pages) + size % (1<<30)); | 651 | (ppgtt->num_pd_entries - min_pt_pages) + size % (1<<30)); |
652 | return 0; | 652 | return 0; |
653 | 653 | ||
654 | bail: | 654 | bail: |
655 | gen8_ppgtt_unmap_pages(ppgtt); | 655 | gen8_ppgtt_unmap_pages(ppgtt); |
656 | gen8_ppgtt_free(ppgtt); | 656 | gen8_ppgtt_free(ppgtt); |
657 | return ret; | 657 | return ret; |
658 | } | 658 | } |
659 | 659 | ||
660 | static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) | 660 | static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) |
661 | { | 661 | { |
662 | struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private; | 662 | struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private; |
663 | struct i915_address_space *vm = &ppgtt->base; | 663 | struct i915_address_space *vm = &ppgtt->base; |
664 | gen6_gtt_pte_t __iomem *pd_addr; | 664 | gen6_gtt_pte_t __iomem *pd_addr; |
665 | gen6_gtt_pte_t scratch_pte; | 665 | gen6_gtt_pte_t scratch_pte; |
666 | uint32_t pd_entry; | 666 | uint32_t pd_entry; |
667 | int pte, pde; | 667 | int pte, pde; |
668 | 668 | ||
669 | scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true); | 669 | scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true); |
670 | 670 | ||
671 | pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + | 671 | pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + |
672 | ppgtt->pd_offset / sizeof(gen6_gtt_pte_t); | 672 | ppgtt->pd_offset / sizeof(gen6_gtt_pte_t); |
673 | 673 | ||
674 | seq_printf(m, " VM %p (pd_offset %x-%x):\n", vm, | 674 | seq_printf(m, " VM %p (pd_offset %x-%x):\n", vm, |
675 | ppgtt->pd_offset, ppgtt->pd_offset + ppgtt->num_pd_entries); | 675 | ppgtt->pd_offset, ppgtt->pd_offset + ppgtt->num_pd_entries); |
676 | for (pde = 0; pde < ppgtt->num_pd_entries; pde++) { | 676 | for (pde = 0; pde < ppgtt->num_pd_entries; pde++) { |
677 | u32 expected; | 677 | u32 expected; |
678 | gen6_gtt_pte_t *pt_vaddr; | 678 | gen6_gtt_pte_t *pt_vaddr; |
679 | dma_addr_t pt_addr = ppgtt->pt_dma_addr[pde]; | 679 | dma_addr_t pt_addr = ppgtt->pt_dma_addr[pde]; |
680 | pd_entry = readl(pd_addr + pde); | 680 | pd_entry = readl(pd_addr + pde); |
681 | expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID); | 681 | expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID); |
682 | 682 | ||
683 | if (pd_entry != expected) | 683 | if (pd_entry != expected) |
684 | seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n", | 684 | seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n", |
685 | pde, | 685 | pde, |
686 | pd_entry, | 686 | pd_entry, |
687 | expected); | 687 | expected); |
688 | seq_printf(m, "\tPDE: %x\n", pd_entry); | 688 | seq_printf(m, "\tPDE: %x\n", pd_entry); |
689 | 689 | ||
690 | pt_vaddr = kmap_atomic(ppgtt->pt_pages[pde]); | 690 | pt_vaddr = kmap_atomic(ppgtt->pt_pages[pde]); |
691 | for (pte = 0; pte < I915_PPGTT_PT_ENTRIES; pte+=4) { | 691 | for (pte = 0; pte < I915_PPGTT_PT_ENTRIES; pte+=4) { |
692 | unsigned long va = | 692 | unsigned long va = |
693 | (pde * PAGE_SIZE * I915_PPGTT_PT_ENTRIES) + | 693 | (pde * PAGE_SIZE * I915_PPGTT_PT_ENTRIES) + |
694 | (pte * PAGE_SIZE); | 694 | (pte * PAGE_SIZE); |
695 | int i; | 695 | int i; |
696 | bool found = false; | 696 | bool found = false; |
697 | for (i = 0; i < 4; i++) | 697 | for (i = 0; i < 4; i++) |
698 | if (pt_vaddr[pte + i] != scratch_pte) | 698 | if (pt_vaddr[pte + i] != scratch_pte) |
699 | found = true; | 699 | found = true; |
700 | if (!found) | 700 | if (!found) |
701 | continue; | 701 | continue; |
702 | 702 | ||
703 | seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte); | 703 | seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte); |
704 | for (i = 0; i < 4; i++) { | 704 | for (i = 0; i < 4; i++) { |
705 | if (pt_vaddr[pte + i] != scratch_pte) | 705 | if (pt_vaddr[pte + i] != scratch_pte) |
706 | seq_printf(m, " %08x", pt_vaddr[pte + i]); | 706 | seq_printf(m, " %08x", pt_vaddr[pte + i]); |
707 | else | 707 | else |
708 | seq_puts(m, " SCRATCH "); | 708 | seq_puts(m, " SCRATCH "); |
709 | } | 709 | } |
710 | seq_puts(m, "\n"); | 710 | seq_puts(m, "\n"); |
711 | } | 711 | } |
712 | kunmap_atomic(pt_vaddr); | 712 | kunmap_atomic(pt_vaddr); |
713 | } | 713 | } |
714 | } | 714 | } |
715 | 715 | ||
716 | static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt) | 716 | static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt) |
717 | { | 717 | { |
718 | struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private; | 718 | struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private; |
719 | gen6_gtt_pte_t __iomem *pd_addr; | 719 | gen6_gtt_pte_t __iomem *pd_addr; |
720 | uint32_t pd_entry; | 720 | uint32_t pd_entry; |
721 | int i; | 721 | int i; |
722 | 722 | ||
723 | WARN_ON(ppgtt->pd_offset & 0x3f); | 723 | WARN_ON(ppgtt->pd_offset & 0x3f); |
724 | pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm + | 724 | pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm + |
725 | ppgtt->pd_offset / sizeof(gen6_gtt_pte_t); | 725 | ppgtt->pd_offset / sizeof(gen6_gtt_pte_t); |
726 | for (i = 0; i < ppgtt->num_pd_entries; i++) { | 726 | for (i = 0; i < ppgtt->num_pd_entries; i++) { |
727 | dma_addr_t pt_addr; | 727 | dma_addr_t pt_addr; |
728 | 728 | ||
729 | pt_addr = ppgtt->pt_dma_addr[i]; | 729 | pt_addr = ppgtt->pt_dma_addr[i]; |
730 | pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr); | 730 | pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr); |
731 | pd_entry |= GEN6_PDE_VALID; | 731 | pd_entry |= GEN6_PDE_VALID; |
732 | 732 | ||
733 | writel(pd_entry, pd_addr + i); | 733 | writel(pd_entry, pd_addr + i); |
734 | } | 734 | } |
735 | readl(pd_addr); | 735 | readl(pd_addr); |
736 | } | 736 | } |
737 | 737 | ||
738 | static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt) | 738 | static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt) |
739 | { | 739 | { |
740 | BUG_ON(ppgtt->pd_offset & 0x3f); | 740 | BUG_ON(ppgtt->pd_offset & 0x3f); |
741 | 741 | ||
742 | return (ppgtt->pd_offset / 64) << 16; | 742 | return (ppgtt->pd_offset / 64) << 16; |
743 | } | 743 | } |
744 | 744 | ||
745 | static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, | 745 | static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, |
746 | struct intel_ring_buffer *ring, | 746 | struct intel_ring_buffer *ring, |
747 | bool synchronous) | 747 | bool synchronous) |
748 | { | 748 | { |
749 | struct drm_device *dev = ppgtt->base.dev; | 749 | struct drm_device *dev = ppgtt->base.dev; |
750 | struct drm_i915_private *dev_priv = dev->dev_private; | 750 | struct drm_i915_private *dev_priv = dev->dev_private; |
751 | int ret; | 751 | int ret; |
752 | 752 | ||
753 | /* If we're in reset, we can assume the GPU is sufficiently idle to | 753 | /* If we're in reset, we can assume the GPU is sufficiently idle to |
754 | * manually frob these bits. Ideally we could use the ring functions, | 754 | * manually frob these bits. Ideally we could use the ring functions, |
755 | * except our error handling makes it quite difficult (can't use | 755 | * except our error handling makes it quite difficult (can't use |
756 | * intel_ring_begin, ring->flush, or intel_ring_advance) | 756 | * intel_ring_begin, ring->flush, or intel_ring_advance) |
757 | * | 757 | * |
758 | * FIXME: We should try not to special case reset | 758 | * FIXME: We should try not to special case reset |
759 | */ | 759 | */ |
760 | if (synchronous || | 760 | if (synchronous || |
761 | i915_reset_in_progress(&dev_priv->gpu_error)) { | 761 | i915_reset_in_progress(&dev_priv->gpu_error)) { |
762 | WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt); | 762 | WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt); |
763 | I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); | 763 | I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); |
764 | I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt)); | 764 | I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt)); |
765 | POSTING_READ(RING_PP_DIR_BASE(ring)); | 765 | POSTING_READ(RING_PP_DIR_BASE(ring)); |
766 | return 0; | 766 | return 0; |
767 | } | 767 | } |
768 | 768 | ||
769 | /* NB: TLBs must be flushed and invalidated before a switch */ | 769 | /* NB: TLBs must be flushed and invalidated before a switch */ |
770 | ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); | 770 | ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); |
771 | if (ret) | 771 | if (ret) |
772 | return ret; | 772 | return ret; |
773 | 773 | ||
774 | ret = intel_ring_begin(ring, 6); | 774 | ret = intel_ring_begin(ring, 6); |
775 | if (ret) | 775 | if (ret) |
776 | return ret; | 776 | return ret; |
777 | 777 | ||
778 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2)); | 778 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2)); |
779 | intel_ring_emit(ring, RING_PP_DIR_DCLV(ring)); | 779 | intel_ring_emit(ring, RING_PP_DIR_DCLV(ring)); |
780 | intel_ring_emit(ring, PP_DIR_DCLV_2G); | 780 | intel_ring_emit(ring, PP_DIR_DCLV_2G); |
781 | intel_ring_emit(ring, RING_PP_DIR_BASE(ring)); | 781 | intel_ring_emit(ring, RING_PP_DIR_BASE(ring)); |
782 | intel_ring_emit(ring, get_pd_offset(ppgtt)); | 782 | intel_ring_emit(ring, get_pd_offset(ppgtt)); |
783 | intel_ring_emit(ring, MI_NOOP); | 783 | intel_ring_emit(ring, MI_NOOP); |
784 | intel_ring_advance(ring); | 784 | intel_ring_advance(ring); |
785 | 785 | ||
786 | return 0; | 786 | return 0; |
787 | } | 787 | } |
788 | 788 | ||
789 | static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, | 789 | static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, |
790 | struct intel_ring_buffer *ring, | 790 | struct intel_ring_buffer *ring, |
791 | bool synchronous) | 791 | bool synchronous) |
792 | { | 792 | { |
793 | struct drm_device *dev = ppgtt->base.dev; | 793 | struct drm_device *dev = ppgtt->base.dev; |
794 | struct drm_i915_private *dev_priv = dev->dev_private; | 794 | struct drm_i915_private *dev_priv = dev->dev_private; |
795 | int ret; | 795 | int ret; |
796 | 796 | ||
797 | /* If we're in reset, we can assume the GPU is sufficiently idle to | 797 | /* If we're in reset, we can assume the GPU is sufficiently idle to |
798 | * manually frob these bits. Ideally we could use the ring functions, | 798 | * manually frob these bits. Ideally we could use the ring functions, |
799 | * except our error handling makes it quite difficult (can't use | 799 | * except our error handling makes it quite difficult (can't use |
800 | * intel_ring_begin, ring->flush, or intel_ring_advance) | 800 | * intel_ring_begin, ring->flush, or intel_ring_advance) |
801 | * | 801 | * |
802 | * FIXME: We should try not to special case reset | 802 | * FIXME: We should try not to special case reset |
803 | */ | 803 | */ |
804 | if (synchronous || | 804 | if (synchronous || |
805 | i915_reset_in_progress(&dev_priv->gpu_error)) { | 805 | i915_reset_in_progress(&dev_priv->gpu_error)) { |
806 | WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt); | 806 | WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt); |
807 | I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); | 807 | I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); |
808 | I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt)); | 808 | I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt)); |
809 | POSTING_READ(RING_PP_DIR_BASE(ring)); | 809 | POSTING_READ(RING_PP_DIR_BASE(ring)); |
810 | return 0; | 810 | return 0; |
811 | } | 811 | } |
812 | 812 | ||
813 | /* NB: TLBs must be flushed and invalidated before a switch */ | 813 | /* NB: TLBs must be flushed and invalidated before a switch */ |
814 | ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); | 814 | ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); |
815 | if (ret) | 815 | if (ret) |
816 | return ret; | 816 | return ret; |
817 | 817 | ||
818 | ret = intel_ring_begin(ring, 6); | 818 | ret = intel_ring_begin(ring, 6); |
819 | if (ret) | 819 | if (ret) |
820 | return ret; | 820 | return ret; |
821 | 821 | ||
822 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2)); | 822 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2)); |
823 | intel_ring_emit(ring, RING_PP_DIR_DCLV(ring)); | 823 | intel_ring_emit(ring, RING_PP_DIR_DCLV(ring)); |
824 | intel_ring_emit(ring, PP_DIR_DCLV_2G); | 824 | intel_ring_emit(ring, PP_DIR_DCLV_2G); |
825 | intel_ring_emit(ring, RING_PP_DIR_BASE(ring)); | 825 | intel_ring_emit(ring, RING_PP_DIR_BASE(ring)); |
826 | intel_ring_emit(ring, get_pd_offset(ppgtt)); | 826 | intel_ring_emit(ring, get_pd_offset(ppgtt)); |
827 | intel_ring_emit(ring, MI_NOOP); | 827 | intel_ring_emit(ring, MI_NOOP); |
828 | intel_ring_advance(ring); | 828 | intel_ring_advance(ring); |
829 | 829 | ||
830 | /* XXX: RCS is the only one to auto invalidate the TLBs? */ | 830 | /* XXX: RCS is the only one to auto invalidate the TLBs? */ |
831 | if (ring->id != RCS) { | 831 | if (ring->id != RCS) { |
832 | ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); | 832 | ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); |
833 | if (ret) | 833 | if (ret) |
834 | return ret; | 834 | return ret; |
835 | } | 835 | } |
836 | 836 | ||
837 | return 0; | 837 | return 0; |
838 | } | 838 | } |
839 | 839 | ||
840 | static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt, | 840 | static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt, |
841 | struct intel_ring_buffer *ring, | 841 | struct intel_ring_buffer *ring, |
842 | bool synchronous) | 842 | bool synchronous) |
843 | { | 843 | { |
844 | struct drm_device *dev = ppgtt->base.dev; | 844 | struct drm_device *dev = ppgtt->base.dev; |
845 | struct drm_i915_private *dev_priv = dev->dev_private; | 845 | struct drm_i915_private *dev_priv = dev->dev_private; |
846 | 846 | ||
847 | if (!synchronous) | 847 | if (!synchronous) |
848 | return 0; | 848 | return 0; |
849 | 849 | ||
850 | I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); | 850 | I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); |
851 | I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt)); | 851 | I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt)); |
852 | 852 | ||
853 | POSTING_READ(RING_PP_DIR_DCLV(ring)); | 853 | POSTING_READ(RING_PP_DIR_DCLV(ring)); |
854 | 854 | ||
855 | return 0; | 855 | return 0; |
856 | } | 856 | } |
857 | 857 | ||
858 | static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt) | 858 | static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt) |
859 | { | 859 | { |
860 | struct drm_device *dev = ppgtt->base.dev; | 860 | struct drm_device *dev = ppgtt->base.dev; |
861 | struct drm_i915_private *dev_priv = dev->dev_private; | 861 | struct drm_i915_private *dev_priv = dev->dev_private; |
862 | struct intel_ring_buffer *ring; | 862 | struct intel_ring_buffer *ring; |
863 | int j, ret; | 863 | int j, ret; |
864 | 864 | ||
865 | for_each_ring(ring, dev_priv, j) { | 865 | for_each_ring(ring, dev_priv, j) { |
866 | I915_WRITE(RING_MODE_GEN7(ring), | 866 | I915_WRITE(RING_MODE_GEN7(ring), |
867 | _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); | 867 | _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); |
868 | 868 | ||
869 | /* We promise to do a switch later with FULL PPGTT. If this is | 869 | /* We promise to do a switch later with FULL PPGTT. If this is |
870 | * aliasing, this is the one and only switch we'll do */ | 870 | * aliasing, this is the one and only switch we'll do */ |
871 | if (USES_FULL_PPGTT(dev)) | 871 | if (USES_FULL_PPGTT(dev)) |
872 | continue; | 872 | continue; |
873 | 873 | ||
874 | ret = ppgtt->switch_mm(ppgtt, ring, true); | 874 | ret = ppgtt->switch_mm(ppgtt, ring, true); |
875 | if (ret) | 875 | if (ret) |
876 | goto err_out; | 876 | goto err_out; |
877 | } | 877 | } |
878 | 878 | ||
879 | return 0; | 879 | return 0; |
880 | 880 | ||
881 | err_out: | 881 | err_out: |
882 | for_each_ring(ring, dev_priv, j) | 882 | for_each_ring(ring, dev_priv, j) |
883 | I915_WRITE(RING_MODE_GEN7(ring), | 883 | I915_WRITE(RING_MODE_GEN7(ring), |
884 | _MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE)); | 884 | _MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE)); |
885 | return ret; | 885 | return ret; |
886 | } | 886 | } |
887 | 887 | ||
888 | static int gen7_ppgtt_enable(struct i915_hw_ppgtt *ppgtt) | 888 | static int gen7_ppgtt_enable(struct i915_hw_ppgtt *ppgtt) |
889 | { | 889 | { |
890 | struct drm_device *dev = ppgtt->base.dev; | 890 | struct drm_device *dev = ppgtt->base.dev; |
891 | struct drm_i915_private *dev_priv = dev->dev_private; | 891 | struct drm_i915_private *dev_priv = dev->dev_private; |
892 | struct intel_ring_buffer *ring; | 892 | struct intel_ring_buffer *ring; |
893 | uint32_t ecochk, ecobits; | 893 | uint32_t ecochk, ecobits; |
894 | int i; | 894 | int i; |
895 | 895 | ||
896 | ecobits = I915_READ(GAC_ECO_BITS); | 896 | ecobits = I915_READ(GAC_ECO_BITS); |
897 | I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B); | 897 | I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B); |
898 | 898 | ||
899 | ecochk = I915_READ(GAM_ECOCHK); | 899 | ecochk = I915_READ(GAM_ECOCHK); |
900 | if (IS_HASWELL(dev)) { | 900 | if (IS_HASWELL(dev)) { |
901 | ecochk |= ECOCHK_PPGTT_WB_HSW; | 901 | ecochk |= ECOCHK_PPGTT_WB_HSW; |
902 | } else { | 902 | } else { |
903 | ecochk |= ECOCHK_PPGTT_LLC_IVB; | 903 | ecochk |= ECOCHK_PPGTT_LLC_IVB; |
904 | ecochk &= ~ECOCHK_PPGTT_GFDT_IVB; | 904 | ecochk &= ~ECOCHK_PPGTT_GFDT_IVB; |
905 | } | 905 | } |
906 | I915_WRITE(GAM_ECOCHK, ecochk); | 906 | I915_WRITE(GAM_ECOCHK, ecochk); |
907 | 907 | ||
908 | for_each_ring(ring, dev_priv, i) { | 908 | for_each_ring(ring, dev_priv, i) { |
909 | int ret; | 909 | int ret; |
910 | /* GFX_MODE is per-ring on gen7+ */ | 910 | /* GFX_MODE is per-ring on gen7+ */ |
911 | I915_WRITE(RING_MODE_GEN7(ring), | 911 | I915_WRITE(RING_MODE_GEN7(ring), |
912 | _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); | 912 | _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); |
913 | 913 | ||
914 | /* We promise to do a switch later with FULL PPGTT. If this is | 914 | /* We promise to do a switch later with FULL PPGTT. If this is |
915 | * aliasing, this is the one and only switch we'll do */ | 915 | * aliasing, this is the one and only switch we'll do */ |
916 | if (USES_FULL_PPGTT(dev)) | 916 | if (USES_FULL_PPGTT(dev)) |
917 | continue; | 917 | continue; |
918 | 918 | ||
919 | ret = ppgtt->switch_mm(ppgtt, ring, true); | 919 | ret = ppgtt->switch_mm(ppgtt, ring, true); |
920 | if (ret) | 920 | if (ret) |
921 | return ret; | 921 | return ret; |
922 | } | 922 | } |
923 | 923 | ||
924 | return 0; | 924 | return 0; |
925 | } | 925 | } |
926 | 926 | ||
927 | static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt) | 927 | static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt) |
928 | { | 928 | { |
929 | struct drm_device *dev = ppgtt->base.dev; | 929 | struct drm_device *dev = ppgtt->base.dev; |
930 | struct drm_i915_private *dev_priv = dev->dev_private; | 930 | struct drm_i915_private *dev_priv = dev->dev_private; |
931 | struct intel_ring_buffer *ring; | 931 | struct intel_ring_buffer *ring; |
932 | uint32_t ecochk, gab_ctl, ecobits; | 932 | uint32_t ecochk, gab_ctl, ecobits; |
933 | int i; | 933 | int i; |
934 | 934 | ||
935 | ecobits = I915_READ(GAC_ECO_BITS); | 935 | ecobits = I915_READ(GAC_ECO_BITS); |
936 | I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT | | 936 | I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT | |
937 | ECOBITS_PPGTT_CACHE64B); | 937 | ECOBITS_PPGTT_CACHE64B); |
938 | 938 | ||
939 | gab_ctl = I915_READ(GAB_CTL); | 939 | gab_ctl = I915_READ(GAB_CTL); |
940 | I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT); | 940 | I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT); |
941 | 941 | ||
942 | ecochk = I915_READ(GAM_ECOCHK); | 942 | ecochk = I915_READ(GAM_ECOCHK); |
943 | I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B); | 943 | I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B); |
944 | 944 | ||
945 | I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); | 945 | I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); |
946 | 946 | ||
947 | for_each_ring(ring, dev_priv, i) { | 947 | for_each_ring(ring, dev_priv, i) { |
948 | int ret = ppgtt->switch_mm(ppgtt, ring, true); | 948 | int ret = ppgtt->switch_mm(ppgtt, ring, true); |
949 | if (ret) | 949 | if (ret) |
950 | return ret; | 950 | return ret; |
951 | } | 951 | } |
952 | 952 | ||
953 | return 0; | 953 | return 0; |
954 | } | 954 | } |
955 | 955 | ||
956 | /* PPGTT support for Sandybdrige/Gen6 and later */ | 956 | /* PPGTT support for Sandybdrige/Gen6 and later */ |
957 | static void gen6_ppgtt_clear_range(struct i915_address_space *vm, | 957 | static void gen6_ppgtt_clear_range(struct i915_address_space *vm, |
958 | uint64_t start, | 958 | uint64_t start, |
959 | uint64_t length, | 959 | uint64_t length, |
960 | bool use_scratch) | 960 | bool use_scratch) |
961 | { | 961 | { |
962 | struct i915_hw_ppgtt *ppgtt = | 962 | struct i915_hw_ppgtt *ppgtt = |
963 | container_of(vm, struct i915_hw_ppgtt, base); | 963 | container_of(vm, struct i915_hw_ppgtt, base); |
964 | gen6_gtt_pte_t *pt_vaddr, scratch_pte; | 964 | gen6_gtt_pte_t *pt_vaddr, scratch_pte; |
965 | unsigned first_entry = start >> PAGE_SHIFT; | 965 | unsigned first_entry = start >> PAGE_SHIFT; |
966 | unsigned num_entries = length >> PAGE_SHIFT; | 966 | unsigned num_entries = length >> PAGE_SHIFT; |
967 | unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; | 967 | unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; |
968 | unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; | 968 | unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; |
969 | unsigned last_pte, i; | 969 | unsigned last_pte, i; |
970 | 970 | ||
971 | scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true); | 971 | scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true); |
972 | 972 | ||
973 | while (num_entries) { | 973 | while (num_entries) { |
974 | last_pte = first_pte + num_entries; | 974 | last_pte = first_pte + num_entries; |
975 | if (last_pte > I915_PPGTT_PT_ENTRIES) | 975 | if (last_pte > I915_PPGTT_PT_ENTRIES) |
976 | last_pte = I915_PPGTT_PT_ENTRIES; | 976 | last_pte = I915_PPGTT_PT_ENTRIES; |
977 | 977 | ||
978 | pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]); | 978 | pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]); |
979 | 979 | ||
980 | for (i = first_pte; i < last_pte; i++) | 980 | for (i = first_pte; i < last_pte; i++) |
981 | pt_vaddr[i] = scratch_pte; | 981 | pt_vaddr[i] = scratch_pte; |
982 | 982 | ||
983 | kunmap_atomic(pt_vaddr); | 983 | kunmap_atomic(pt_vaddr); |
984 | 984 | ||
985 | num_entries -= last_pte - first_pte; | 985 | num_entries -= last_pte - first_pte; |
986 | first_pte = 0; | 986 | first_pte = 0; |
987 | act_pt++; | 987 | act_pt++; |
988 | } | 988 | } |
989 | } | 989 | } |
990 | 990 | ||
991 | static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, | 991 | static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, |
992 | struct sg_table *pages, | 992 | struct sg_table *pages, |
993 | uint64_t start, | 993 | uint64_t start, |
994 | enum i915_cache_level cache_level) | 994 | enum i915_cache_level cache_level) |
995 | { | 995 | { |
996 | struct i915_hw_ppgtt *ppgtt = | 996 | struct i915_hw_ppgtt *ppgtt = |
997 | container_of(vm, struct i915_hw_ppgtt, base); | 997 | container_of(vm, struct i915_hw_ppgtt, base); |
998 | gen6_gtt_pte_t *pt_vaddr; | 998 | gen6_gtt_pte_t *pt_vaddr; |
999 | unsigned first_entry = start >> PAGE_SHIFT; | 999 | unsigned first_entry = start >> PAGE_SHIFT; |
1000 | unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; | 1000 | unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; |
1001 | unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES; | 1001 | unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES; |
1002 | struct sg_page_iter sg_iter; | 1002 | struct sg_page_iter sg_iter; |
1003 | 1003 | ||
1004 | pt_vaddr = NULL; | 1004 | pt_vaddr = NULL; |
1005 | for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) { | 1005 | for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) { |
1006 | if (pt_vaddr == NULL) | 1006 | if (pt_vaddr == NULL) |
1007 | pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]); | 1007 | pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]); |
1008 | 1008 | ||
1009 | pt_vaddr[act_pte] = | 1009 | pt_vaddr[act_pte] = |
1010 | vm->pte_encode(sg_page_iter_dma_address(&sg_iter), | 1010 | vm->pte_encode(sg_page_iter_dma_address(&sg_iter), |
1011 | cache_level, true); | 1011 | cache_level, true); |
1012 | if (++act_pte == I915_PPGTT_PT_ENTRIES) { | 1012 | if (++act_pte == I915_PPGTT_PT_ENTRIES) { |
1013 | kunmap_atomic(pt_vaddr); | 1013 | kunmap_atomic(pt_vaddr); |
1014 | pt_vaddr = NULL; | 1014 | pt_vaddr = NULL; |
1015 | act_pt++; | 1015 | act_pt++; |
1016 | act_pte = 0; | 1016 | act_pte = 0; |
1017 | } | 1017 | } |
1018 | } | 1018 | } |
1019 | if (pt_vaddr) | 1019 | if (pt_vaddr) |
1020 | kunmap_atomic(pt_vaddr); | 1020 | kunmap_atomic(pt_vaddr); |
1021 | } | 1021 | } |
1022 | 1022 | ||
1023 | static void gen6_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt) | 1023 | static void gen6_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt) |
1024 | { | 1024 | { |
1025 | int i; | 1025 | int i; |
1026 | 1026 | ||
1027 | if (ppgtt->pt_dma_addr) { | 1027 | if (ppgtt->pt_dma_addr) { |
1028 | for (i = 0; i < ppgtt->num_pd_entries; i++) | 1028 | for (i = 0; i < ppgtt->num_pd_entries; i++) |
1029 | pci_unmap_page(ppgtt->base.dev->pdev, | 1029 | pci_unmap_page(ppgtt->base.dev->pdev, |
1030 | ppgtt->pt_dma_addr[i], | 1030 | ppgtt->pt_dma_addr[i], |
1031 | 4096, PCI_DMA_BIDIRECTIONAL); | 1031 | 4096, PCI_DMA_BIDIRECTIONAL); |
1032 | } | 1032 | } |
1033 | } | 1033 | } |
1034 | 1034 | ||
1035 | static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt) | 1035 | static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt) |
1036 | { | 1036 | { |
1037 | int i; | 1037 | int i; |
1038 | 1038 | ||
1039 | kfree(ppgtt->pt_dma_addr); | 1039 | kfree(ppgtt->pt_dma_addr); |
1040 | for (i = 0; i < ppgtt->num_pd_entries; i++) | 1040 | for (i = 0; i < ppgtt->num_pd_entries; i++) |
1041 | __free_page(ppgtt->pt_pages[i]); | 1041 | __free_page(ppgtt->pt_pages[i]); |
1042 | kfree(ppgtt->pt_pages); | 1042 | kfree(ppgtt->pt_pages); |
1043 | } | 1043 | } |
1044 | 1044 | ||
1045 | static void gen6_ppgtt_cleanup(struct i915_address_space *vm) | 1045 | static void gen6_ppgtt_cleanup(struct i915_address_space *vm) |
1046 | { | 1046 | { |
1047 | struct i915_hw_ppgtt *ppgtt = | 1047 | struct i915_hw_ppgtt *ppgtt = |
1048 | container_of(vm, struct i915_hw_ppgtt, base); | 1048 | container_of(vm, struct i915_hw_ppgtt, base); |
1049 | 1049 | ||
1050 | list_del(&vm->global_link); | 1050 | list_del(&vm->global_link); |
1051 | drm_mm_takedown(&ppgtt->base.mm); | 1051 | drm_mm_takedown(&ppgtt->base.mm); |
1052 | drm_mm_remove_node(&ppgtt->node); | 1052 | drm_mm_remove_node(&ppgtt->node); |
1053 | 1053 | ||
1054 | gen6_ppgtt_unmap_pages(ppgtt); | 1054 | gen6_ppgtt_unmap_pages(ppgtt); |
1055 | gen6_ppgtt_free(ppgtt); | 1055 | gen6_ppgtt_free(ppgtt); |
1056 | } | 1056 | } |
1057 | 1057 | ||
1058 | static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt) | 1058 | static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt) |
1059 | { | 1059 | { |
1060 | #define GEN6_PD_ALIGN (PAGE_SIZE * 16) | 1060 | #define GEN6_PD_ALIGN (PAGE_SIZE * 16) |
1061 | #define GEN6_PD_SIZE (GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE) | 1061 | #define GEN6_PD_SIZE (GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE) |
1062 | struct drm_device *dev = ppgtt->base.dev; | 1062 | struct drm_device *dev = ppgtt->base.dev; |
1063 | struct drm_i915_private *dev_priv = dev->dev_private; | 1063 | struct drm_i915_private *dev_priv = dev->dev_private; |
1064 | bool retried = false; | 1064 | bool retried = false; |
1065 | int ret; | 1065 | int ret; |
1066 | 1066 | ||
1067 | /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The | 1067 | /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The |
1068 | * allocator works in address space sizes, so it's multiplied by page | 1068 | * allocator works in address space sizes, so it's multiplied by page |
1069 | * size. We allocate at the top of the GTT to avoid fragmentation. | 1069 | * size. We allocate at the top of the GTT to avoid fragmentation. |
1070 | */ | 1070 | */ |
1071 | BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm)); | 1071 | BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm)); |
1072 | alloc: | 1072 | alloc: |
1073 | ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm, | 1073 | ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm, |
1074 | &ppgtt->node, GEN6_PD_SIZE, | 1074 | &ppgtt->node, GEN6_PD_SIZE, |
1075 | GEN6_PD_ALIGN, 0, | 1075 | GEN6_PD_ALIGN, 0, |
1076 | 0, dev_priv->gtt.base.total, | 1076 | 0, dev_priv->gtt.base.total, |
1077 | DRM_MM_SEARCH_DEFAULT, | 1077 | DRM_MM_SEARCH_DEFAULT, |
1078 | DRM_MM_CREATE_DEFAULT); | 1078 | DRM_MM_CREATE_DEFAULT); |
1079 | if (ret == -ENOSPC && !retried) { | 1079 | if (ret == -ENOSPC && !retried) { |
1080 | ret = i915_gem_evict_something(dev, &dev_priv->gtt.base, | 1080 | ret = i915_gem_evict_something(dev, &dev_priv->gtt.base, |
1081 | GEN6_PD_SIZE, GEN6_PD_ALIGN, | 1081 | GEN6_PD_SIZE, GEN6_PD_ALIGN, |
1082 | I915_CACHE_NONE, 0); | 1082 | I915_CACHE_NONE, 0); |
1083 | if (ret) | 1083 | if (ret) |
1084 | return ret; | 1084 | return ret; |
1085 | 1085 | ||
1086 | retried = true; | 1086 | retried = true; |
1087 | goto alloc; | 1087 | goto alloc; |
1088 | } | 1088 | } |
1089 | 1089 | ||
1090 | if (ppgtt->node.start < dev_priv->gtt.mappable_end) | 1090 | if (ppgtt->node.start < dev_priv->gtt.mappable_end) |
1091 | DRM_DEBUG("Forced to use aperture for PDEs\n"); | 1091 | DRM_DEBUG("Forced to use aperture for PDEs\n"); |
1092 | 1092 | ||
1093 | ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES; | 1093 | ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES; |
1094 | return ret; | 1094 | return ret; |
1095 | } | 1095 | } |
1096 | 1096 | ||
1097 | static int gen6_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt) | 1097 | static int gen6_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt) |
1098 | { | 1098 | { |
1099 | int i; | 1099 | int i; |
1100 | 1100 | ||
1101 | ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *), | 1101 | ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *), |
1102 | GFP_KERNEL); | 1102 | GFP_KERNEL); |
1103 | 1103 | ||
1104 | if (!ppgtt->pt_pages) | 1104 | if (!ppgtt->pt_pages) |
1105 | return -ENOMEM; | 1105 | return -ENOMEM; |
1106 | 1106 | ||
1107 | for (i = 0; i < ppgtt->num_pd_entries; i++) { | 1107 | for (i = 0; i < ppgtt->num_pd_entries; i++) { |
1108 | ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL); | 1108 | ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL); |
1109 | if (!ppgtt->pt_pages[i]) { | 1109 | if (!ppgtt->pt_pages[i]) { |
1110 | gen6_ppgtt_free(ppgtt); | 1110 | gen6_ppgtt_free(ppgtt); |
1111 | return -ENOMEM; | 1111 | return -ENOMEM; |
1112 | } | 1112 | } |
1113 | } | 1113 | } |
1114 | 1114 | ||
1115 | return 0; | 1115 | return 0; |
1116 | } | 1116 | } |
1117 | 1117 | ||
1118 | static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt) | 1118 | static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt) |
1119 | { | 1119 | { |
1120 | int ret; | 1120 | int ret; |
1121 | 1121 | ||
1122 | ret = gen6_ppgtt_allocate_page_directories(ppgtt); | 1122 | ret = gen6_ppgtt_allocate_page_directories(ppgtt); |
1123 | if (ret) | 1123 | if (ret) |
1124 | return ret; | 1124 | return ret; |
1125 | 1125 | ||
1126 | ret = gen6_ppgtt_allocate_page_tables(ppgtt); | 1126 | ret = gen6_ppgtt_allocate_page_tables(ppgtt); |
1127 | if (ret) { | 1127 | if (ret) { |
1128 | drm_mm_remove_node(&ppgtt->node); | 1128 | drm_mm_remove_node(&ppgtt->node); |
1129 | return ret; | 1129 | return ret; |
1130 | } | 1130 | } |
1131 | 1131 | ||
1132 | ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t), | 1132 | ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t), |
1133 | GFP_KERNEL); | 1133 | GFP_KERNEL); |
1134 | if (!ppgtt->pt_dma_addr) { | 1134 | if (!ppgtt->pt_dma_addr) { |
1135 | drm_mm_remove_node(&ppgtt->node); | 1135 | drm_mm_remove_node(&ppgtt->node); |
1136 | gen6_ppgtt_free(ppgtt); | 1136 | gen6_ppgtt_free(ppgtt); |
1137 | return -ENOMEM; | 1137 | return -ENOMEM; |
1138 | } | 1138 | } |
1139 | 1139 | ||
1140 | return 0; | 1140 | return 0; |
1141 | } | 1141 | } |
1142 | 1142 | ||
1143 | static int gen6_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt) | 1143 | static int gen6_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt) |
1144 | { | 1144 | { |
1145 | struct drm_device *dev = ppgtt->base.dev; | 1145 | struct drm_device *dev = ppgtt->base.dev; |
1146 | int i; | 1146 | int i; |
1147 | 1147 | ||
1148 | for (i = 0; i < ppgtt->num_pd_entries; i++) { | 1148 | for (i = 0; i < ppgtt->num_pd_entries; i++) { |
1149 | dma_addr_t pt_addr; | 1149 | dma_addr_t pt_addr; |
1150 | 1150 | ||
1151 | pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096, | 1151 | pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096, |
1152 | PCI_DMA_BIDIRECTIONAL); | 1152 | PCI_DMA_BIDIRECTIONAL); |
1153 | 1153 | ||
1154 | if (pci_dma_mapping_error(dev->pdev, pt_addr)) { | 1154 | if (pci_dma_mapping_error(dev->pdev, pt_addr)) { |
1155 | gen6_ppgtt_unmap_pages(ppgtt); | 1155 | gen6_ppgtt_unmap_pages(ppgtt); |
1156 | return -EIO; | 1156 | return -EIO; |
1157 | } | 1157 | } |
1158 | 1158 | ||
1159 | ppgtt->pt_dma_addr[i] = pt_addr; | 1159 | ppgtt->pt_dma_addr[i] = pt_addr; |
1160 | } | 1160 | } |
1161 | 1161 | ||
1162 | return 0; | 1162 | return 0; |
1163 | } | 1163 | } |
1164 | 1164 | ||
1165 | static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) | 1165 | static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) |
1166 | { | 1166 | { |
1167 | struct drm_device *dev = ppgtt->base.dev; | 1167 | struct drm_device *dev = ppgtt->base.dev; |
1168 | struct drm_i915_private *dev_priv = dev->dev_private; | 1168 | struct drm_i915_private *dev_priv = dev->dev_private; |
1169 | int ret; | 1169 | int ret; |
1170 | 1170 | ||
1171 | ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode; | 1171 | ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode; |
1172 | if (IS_GEN6(dev)) { | 1172 | if (IS_GEN6(dev)) { |
1173 | ppgtt->enable = gen6_ppgtt_enable; | 1173 | ppgtt->enable = gen6_ppgtt_enable; |
1174 | ppgtt->switch_mm = gen6_mm_switch; | 1174 | ppgtt->switch_mm = gen6_mm_switch; |
1175 | } else if (IS_HASWELL(dev)) { | 1175 | } else if (IS_HASWELL(dev)) { |
1176 | ppgtt->enable = gen7_ppgtt_enable; | 1176 | ppgtt->enable = gen7_ppgtt_enable; |
1177 | ppgtt->switch_mm = hsw_mm_switch; | 1177 | ppgtt->switch_mm = hsw_mm_switch; |
1178 | } else if (IS_GEN7(dev)) { | 1178 | } else if (IS_GEN7(dev)) { |
1179 | ppgtt->enable = gen7_ppgtt_enable; | 1179 | ppgtt->enable = gen7_ppgtt_enable; |
1180 | ppgtt->switch_mm = gen7_mm_switch; | 1180 | ppgtt->switch_mm = gen7_mm_switch; |
1181 | } else | 1181 | } else |
1182 | BUG(); | 1182 | BUG(); |
1183 | 1183 | ||
1184 | ret = gen6_ppgtt_alloc(ppgtt); | 1184 | ret = gen6_ppgtt_alloc(ppgtt); |
1185 | if (ret) | 1185 | if (ret) |
1186 | return ret; | 1186 | return ret; |
1187 | 1187 | ||
1188 | ret = gen6_ppgtt_setup_page_tables(ppgtt); | 1188 | ret = gen6_ppgtt_setup_page_tables(ppgtt); |
1189 | if (ret) { | 1189 | if (ret) { |
1190 | gen6_ppgtt_free(ppgtt); | 1190 | gen6_ppgtt_free(ppgtt); |
1191 | return ret; | 1191 | return ret; |
1192 | } | 1192 | } |
1193 | 1193 | ||
1194 | ppgtt->base.clear_range = gen6_ppgtt_clear_range; | 1194 | ppgtt->base.clear_range = gen6_ppgtt_clear_range; |
1195 | ppgtt->base.insert_entries = gen6_ppgtt_insert_entries; | 1195 | ppgtt->base.insert_entries = gen6_ppgtt_insert_entries; |
1196 | ppgtt->base.cleanup = gen6_ppgtt_cleanup; | 1196 | ppgtt->base.cleanup = gen6_ppgtt_cleanup; |
1197 | ppgtt->base.start = 0; | 1197 | ppgtt->base.start = 0; |
1198 | ppgtt->base.total = ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES * PAGE_SIZE; | 1198 | ppgtt->base.total = ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES * PAGE_SIZE; |
1199 | ppgtt->debug_dump = gen6_dump_ppgtt; | 1199 | ppgtt->debug_dump = gen6_dump_ppgtt; |
1200 | 1200 | ||
1201 | ppgtt->pd_offset = | 1201 | ppgtt->pd_offset = |
1202 | ppgtt->node.start / PAGE_SIZE * sizeof(gen6_gtt_pte_t); | 1202 | ppgtt->node.start / PAGE_SIZE * sizeof(gen6_gtt_pte_t); |
1203 | 1203 | ||
1204 | ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true); | 1204 | ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true); |
1205 | 1205 | ||
1206 | DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n", | 1206 | DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n", |
1207 | ppgtt->node.size >> 20, | 1207 | ppgtt->node.size >> 20, |
1208 | ppgtt->node.start / PAGE_SIZE); | 1208 | ppgtt->node.start / PAGE_SIZE); |
1209 | 1209 | ||
1210 | return 0; | 1210 | return 0; |
1211 | } | 1211 | } |
1212 | 1212 | ||
1213 | int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) | 1213 | int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) |
1214 | { | 1214 | { |
1215 | struct drm_i915_private *dev_priv = dev->dev_private; | 1215 | struct drm_i915_private *dev_priv = dev->dev_private; |
1216 | int ret = 0; | 1216 | int ret = 0; |
1217 | 1217 | ||
1218 | ppgtt->base.dev = dev; | 1218 | ppgtt->base.dev = dev; |
1219 | ppgtt->base.scratch = dev_priv->gtt.base.scratch; | 1219 | ppgtt->base.scratch = dev_priv->gtt.base.scratch; |
1220 | 1220 | ||
1221 | if (INTEL_INFO(dev)->gen < 8) | 1221 | if (INTEL_INFO(dev)->gen < 8) |
1222 | ret = gen6_ppgtt_init(ppgtt); | 1222 | ret = gen6_ppgtt_init(ppgtt); |
1223 | else if (IS_GEN8(dev)) | 1223 | else if (IS_GEN8(dev)) |
1224 | ret = gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total); | 1224 | ret = gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total); |
1225 | else | 1225 | else |
1226 | BUG(); | 1226 | BUG(); |
1227 | 1227 | ||
1228 | if (!ret) { | 1228 | if (!ret) { |
1229 | struct drm_i915_private *dev_priv = dev->dev_private; | 1229 | struct drm_i915_private *dev_priv = dev->dev_private; |
1230 | kref_init(&ppgtt->ref); | 1230 | kref_init(&ppgtt->ref); |
1231 | drm_mm_init(&ppgtt->base.mm, ppgtt->base.start, | 1231 | drm_mm_init(&ppgtt->base.mm, ppgtt->base.start, |
1232 | ppgtt->base.total); | 1232 | ppgtt->base.total); |
1233 | i915_init_vm(dev_priv, &ppgtt->base); | 1233 | i915_init_vm(dev_priv, &ppgtt->base); |
1234 | if (INTEL_INFO(dev)->gen < 8) { | 1234 | if (INTEL_INFO(dev)->gen < 8) { |
1235 | gen6_write_pdes(ppgtt); | 1235 | gen6_write_pdes(ppgtt); |
1236 | DRM_DEBUG("Adding PPGTT at offset %x\n", | 1236 | DRM_DEBUG("Adding PPGTT at offset %x\n", |
1237 | ppgtt->pd_offset << 10); | 1237 | ppgtt->pd_offset << 10); |
1238 | } | 1238 | } |
1239 | } | 1239 | } |
1240 | 1240 | ||
1241 | return ret; | 1241 | return ret; |
1242 | } | 1242 | } |
1243 | 1243 | ||
1244 | static void | 1244 | static void |
1245 | ppgtt_bind_vma(struct i915_vma *vma, | 1245 | ppgtt_bind_vma(struct i915_vma *vma, |
1246 | enum i915_cache_level cache_level, | 1246 | enum i915_cache_level cache_level, |
1247 | u32 flags) | 1247 | u32 flags) |
1248 | { | 1248 | { |
1249 | vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start, | 1249 | vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start, |
1250 | cache_level); | 1250 | cache_level); |
1251 | } | 1251 | } |
1252 | 1252 | ||
1253 | static void ppgtt_unbind_vma(struct i915_vma *vma) | 1253 | static void ppgtt_unbind_vma(struct i915_vma *vma) |
1254 | { | 1254 | { |
1255 | vma->vm->clear_range(vma->vm, | 1255 | vma->vm->clear_range(vma->vm, |
1256 | vma->node.start, | 1256 | vma->node.start, |
1257 | vma->obj->base.size, | 1257 | vma->obj->base.size, |
1258 | true); | 1258 | true); |
1259 | } | 1259 | } |
1260 | 1260 | ||
1261 | extern int intel_iommu_gfx_mapped; | 1261 | extern int intel_iommu_gfx_mapped; |
1262 | /* Certain Gen5 chipsets require require idling the GPU before | 1262 | /* Certain Gen5 chipsets require require idling the GPU before |
1263 | * unmapping anything from the GTT when VT-d is enabled. | 1263 | * unmapping anything from the GTT when VT-d is enabled. |
1264 | */ | 1264 | */ |
1265 | static inline bool needs_idle_maps(struct drm_device *dev) | 1265 | static inline bool needs_idle_maps(struct drm_device *dev) |
1266 | { | 1266 | { |
1267 | #ifdef CONFIG_INTEL_IOMMU | 1267 | #ifdef CONFIG_INTEL_IOMMU |
1268 | /* Query intel_iommu to see if we need the workaround. Presumably that | 1268 | /* Query intel_iommu to see if we need the workaround. Presumably that |
1269 | * was loaded first. | 1269 | * was loaded first. |
1270 | */ | 1270 | */ |
1271 | if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped) | 1271 | if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped) |
1272 | return true; | 1272 | return true; |
1273 | #endif | 1273 | #endif |
1274 | return false; | 1274 | return false; |
1275 | } | 1275 | } |
1276 | 1276 | ||
1277 | static bool do_idling(struct drm_i915_private *dev_priv) | 1277 | static bool do_idling(struct drm_i915_private *dev_priv) |
1278 | { | 1278 | { |
1279 | bool ret = dev_priv->mm.interruptible; | 1279 | bool ret = dev_priv->mm.interruptible; |
1280 | 1280 | ||
1281 | if (unlikely(dev_priv->gtt.do_idle_maps)) { | 1281 | if (unlikely(dev_priv->gtt.do_idle_maps)) { |
1282 | dev_priv->mm.interruptible = false; | 1282 | dev_priv->mm.interruptible = false; |
1283 | if (i915_gpu_idle(dev_priv->dev)) { | 1283 | if (i915_gpu_idle(dev_priv->dev)) { |
1284 | DRM_ERROR("Couldn't idle GPU\n"); | 1284 | DRM_ERROR("Couldn't idle GPU\n"); |
1285 | /* Wait a bit, in hopes it avoids the hang */ | 1285 | /* Wait a bit, in hopes it avoids the hang */ |
1286 | udelay(10); | 1286 | udelay(10); |
1287 | } | 1287 | } |
1288 | } | 1288 | } |
1289 | 1289 | ||
1290 | return ret; | 1290 | return ret; |
1291 | } | 1291 | } |
1292 | 1292 | ||
1293 | static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible) | 1293 | static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible) |
1294 | { | 1294 | { |
1295 | if (unlikely(dev_priv->gtt.do_idle_maps)) | 1295 | if (unlikely(dev_priv->gtt.do_idle_maps)) |
1296 | dev_priv->mm.interruptible = interruptible; | 1296 | dev_priv->mm.interruptible = interruptible; |
1297 | } | 1297 | } |
1298 | 1298 | ||
1299 | void i915_check_and_clear_faults(struct drm_device *dev) | 1299 | void i915_check_and_clear_faults(struct drm_device *dev) |
1300 | { | 1300 | { |
1301 | struct drm_i915_private *dev_priv = dev->dev_private; | 1301 | struct drm_i915_private *dev_priv = dev->dev_private; |
1302 | struct intel_ring_buffer *ring; | 1302 | struct intel_ring_buffer *ring; |
1303 | int i; | 1303 | int i; |
1304 | 1304 | ||
1305 | if (INTEL_INFO(dev)->gen < 6) | 1305 | if (INTEL_INFO(dev)->gen < 6) |
1306 | return; | 1306 | return; |
1307 | 1307 | ||
1308 | for_each_ring(ring, dev_priv, i) { | 1308 | for_each_ring(ring, dev_priv, i) { |
1309 | u32 fault_reg; | 1309 | u32 fault_reg; |
1310 | fault_reg = I915_READ(RING_FAULT_REG(ring)); | 1310 | fault_reg = I915_READ(RING_FAULT_REG(ring)); |
1311 | if (fault_reg & RING_FAULT_VALID) { | 1311 | if (fault_reg & RING_FAULT_VALID) { |
1312 | DRM_DEBUG_DRIVER("Unexpected fault\n" | 1312 | DRM_DEBUG_DRIVER("Unexpected fault\n" |
1313 | "\tAddr: 0x%08lx\\n" | 1313 | "\tAddr: 0x%08lx\\n" |
1314 | "\tAddress space: %s\n" | 1314 | "\tAddress space: %s\n" |
1315 | "\tSource ID: %d\n" | 1315 | "\tSource ID: %d\n" |
1316 | "\tType: %d\n", | 1316 | "\tType: %d\n", |
1317 | fault_reg & PAGE_MASK, | 1317 | fault_reg & PAGE_MASK, |
1318 | fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT", | 1318 | fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT", |
1319 | RING_FAULT_SRCID(fault_reg), | 1319 | RING_FAULT_SRCID(fault_reg), |
1320 | RING_FAULT_FAULT_TYPE(fault_reg)); | 1320 | RING_FAULT_FAULT_TYPE(fault_reg)); |
1321 | I915_WRITE(RING_FAULT_REG(ring), | 1321 | I915_WRITE(RING_FAULT_REG(ring), |
1322 | fault_reg & ~RING_FAULT_VALID); | 1322 | fault_reg & ~RING_FAULT_VALID); |
1323 | } | 1323 | } |
1324 | } | 1324 | } |
1325 | POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS])); | 1325 | POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS])); |
1326 | } | 1326 | } |
1327 | 1327 | ||
1328 | void i915_gem_suspend_gtt_mappings(struct drm_device *dev) | 1328 | void i915_gem_suspend_gtt_mappings(struct drm_device *dev) |
1329 | { | 1329 | { |
1330 | struct drm_i915_private *dev_priv = dev->dev_private; | 1330 | struct drm_i915_private *dev_priv = dev->dev_private; |
1331 | 1331 | ||
1332 | /* Don't bother messing with faults pre GEN6 as we have little | 1332 | /* Don't bother messing with faults pre GEN6 as we have little |
1333 | * documentation supporting that it's a good idea. | 1333 | * documentation supporting that it's a good idea. |
1334 | */ | 1334 | */ |
1335 | if (INTEL_INFO(dev)->gen < 6) | 1335 | if (INTEL_INFO(dev)->gen < 6) |
1336 | return; | 1336 | return; |
1337 | 1337 | ||
1338 | i915_check_and_clear_faults(dev); | 1338 | i915_check_and_clear_faults(dev); |
1339 | 1339 | ||
1340 | dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, | 1340 | dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, |
1341 | dev_priv->gtt.base.start, | 1341 | dev_priv->gtt.base.start, |
1342 | dev_priv->gtt.base.total, | 1342 | dev_priv->gtt.base.total, |
1343 | true); | 1343 | true); |
1344 | } | 1344 | } |
1345 | 1345 | ||
1346 | void i915_gem_restore_gtt_mappings(struct drm_device *dev) | 1346 | void i915_gem_restore_gtt_mappings(struct drm_device *dev) |
1347 | { | 1347 | { |
1348 | struct drm_i915_private *dev_priv = dev->dev_private; | 1348 | struct drm_i915_private *dev_priv = dev->dev_private; |
1349 | struct drm_i915_gem_object *obj; | 1349 | struct drm_i915_gem_object *obj; |
1350 | struct i915_address_space *vm; | 1350 | struct i915_address_space *vm; |
1351 | 1351 | ||
1352 | i915_check_and_clear_faults(dev); | 1352 | i915_check_and_clear_faults(dev); |
1353 | 1353 | ||
1354 | /* First fill our portion of the GTT with scratch pages */ | 1354 | /* First fill our portion of the GTT with scratch pages */ |
1355 | dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, | 1355 | dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, |
1356 | dev_priv->gtt.base.start, | 1356 | dev_priv->gtt.base.start, |
1357 | dev_priv->gtt.base.total, | 1357 | dev_priv->gtt.base.total, |
1358 | true); | 1358 | true); |
1359 | 1359 | ||
1360 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { | 1360 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { |
1361 | struct i915_vma *vma = i915_gem_obj_to_vma(obj, | 1361 | struct i915_vma *vma = i915_gem_obj_to_vma(obj, |
1362 | &dev_priv->gtt.base); | 1362 | &dev_priv->gtt.base); |
1363 | if (!vma) | 1363 | if (!vma) |
1364 | continue; | 1364 | continue; |
1365 | 1365 | ||
1366 | i915_gem_clflush_object(obj, obj->pin_display); | 1366 | i915_gem_clflush_object(obj, obj->pin_display); |
1367 | /* The bind_vma code tries to be smart about tracking mappings. | 1367 | /* The bind_vma code tries to be smart about tracking mappings. |
1368 | * Unfortunately above, we've just wiped out the mappings | 1368 | * Unfortunately above, we've just wiped out the mappings |
1369 | * without telling our object about it. So we need to fake it. | 1369 | * without telling our object about it. So we need to fake it. |
1370 | */ | 1370 | */ |
1371 | obj->has_global_gtt_mapping = 0; | 1371 | obj->has_global_gtt_mapping = 0; |
1372 | vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND); | 1372 | vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND); |
1373 | } | 1373 | } |
1374 | 1374 | ||
1375 | 1375 | ||
1376 | if (INTEL_INFO(dev)->gen >= 8) { | 1376 | if (INTEL_INFO(dev)->gen >= 8) { |
1377 | gen8_setup_private_ppat(dev_priv); | 1377 | gen8_setup_private_ppat(dev_priv); |
1378 | return; | 1378 | return; |
1379 | } | 1379 | } |
1380 | 1380 | ||
1381 | list_for_each_entry(vm, &dev_priv->vm_list, global_link) { | 1381 | list_for_each_entry(vm, &dev_priv->vm_list, global_link) { |
1382 | /* TODO: Perhaps it shouldn't be gen6 specific */ | 1382 | /* TODO: Perhaps it shouldn't be gen6 specific */ |
1383 | if (i915_is_ggtt(vm)) { | 1383 | if (i915_is_ggtt(vm)) { |
1384 | if (dev_priv->mm.aliasing_ppgtt) | 1384 | if (dev_priv->mm.aliasing_ppgtt) |
1385 | gen6_write_pdes(dev_priv->mm.aliasing_ppgtt); | 1385 | gen6_write_pdes(dev_priv->mm.aliasing_ppgtt); |
1386 | continue; | 1386 | continue; |
1387 | } | 1387 | } |
1388 | 1388 | ||
1389 | gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base)); | 1389 | gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base)); |
1390 | } | 1390 | } |
1391 | 1391 | ||
1392 | i915_gem_chipset_flush(dev); | 1392 | i915_gem_chipset_flush(dev); |
1393 | } | 1393 | } |
1394 | 1394 | ||
1395 | int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) | 1395 | int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) |
1396 | { | 1396 | { |
1397 | if (obj->has_dma_mapping) | 1397 | if (obj->has_dma_mapping) |
1398 | return 0; | 1398 | return 0; |
1399 | 1399 | ||
1400 | if (!dma_map_sg(&obj->base.dev->pdev->dev, | 1400 | if (!dma_map_sg(&obj->base.dev->pdev->dev, |
1401 | obj->pages->sgl, obj->pages->nents, | 1401 | obj->pages->sgl, obj->pages->nents, |
1402 | PCI_DMA_BIDIRECTIONAL)) | 1402 | PCI_DMA_BIDIRECTIONAL)) |
1403 | return -ENOSPC; | 1403 | return -ENOSPC; |
1404 | 1404 | ||
1405 | return 0; | 1405 | return 0; |
1406 | } | 1406 | } |
1407 | 1407 | ||
1408 | static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte) | 1408 | static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte) |
1409 | { | 1409 | { |
1410 | #ifdef writeq | 1410 | #ifdef writeq |
1411 | writeq(pte, addr); | 1411 | writeq(pte, addr); |
1412 | #else | 1412 | #else |
1413 | iowrite32((u32)pte, addr); | 1413 | iowrite32((u32)pte, addr); |
1414 | iowrite32(pte >> 32, addr + 4); | 1414 | iowrite32(pte >> 32, addr + 4); |
1415 | #endif | 1415 | #endif |
1416 | } | 1416 | } |
1417 | 1417 | ||
1418 | static void gen8_ggtt_insert_entries(struct i915_address_space *vm, | 1418 | static void gen8_ggtt_insert_entries(struct i915_address_space *vm, |
1419 | struct sg_table *st, | 1419 | struct sg_table *st, |
1420 | uint64_t start, | 1420 | uint64_t start, |
1421 | enum i915_cache_level level) | 1421 | enum i915_cache_level level) |
1422 | { | 1422 | { |
1423 | struct drm_i915_private *dev_priv = vm->dev->dev_private; | 1423 | struct drm_i915_private *dev_priv = vm->dev->dev_private; |
1424 | unsigned first_entry = start >> PAGE_SHIFT; | 1424 | unsigned first_entry = start >> PAGE_SHIFT; |
1425 | gen8_gtt_pte_t __iomem *gtt_entries = | 1425 | gen8_gtt_pte_t __iomem *gtt_entries = |
1426 | (gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; | 1426 | (gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; |
1427 | int i = 0; | 1427 | int i = 0; |
1428 | struct sg_page_iter sg_iter; | 1428 | struct sg_page_iter sg_iter; |
1429 | dma_addr_t addr; | 1429 | dma_addr_t addr; |
1430 | 1430 | ||
1431 | for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { | 1431 | for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { |
1432 | addr = sg_dma_address(sg_iter.sg) + | 1432 | addr = sg_dma_address(sg_iter.sg) + |
1433 | (sg_iter.sg_pgoffset << PAGE_SHIFT); | 1433 | (sg_iter.sg_pgoffset << PAGE_SHIFT); |
1434 | gen8_set_pte(>t_entries[i], | 1434 | gen8_set_pte(>t_entries[i], |
1435 | gen8_pte_encode(addr, level, true)); | 1435 | gen8_pte_encode(addr, level, true)); |
1436 | i++; | 1436 | i++; |
1437 | } | 1437 | } |
1438 | 1438 | ||
1439 | /* | 1439 | /* |
1440 | * XXX: This serves as a posting read to make sure that the PTE has | 1440 | * XXX: This serves as a posting read to make sure that the PTE has |
1441 | * actually been updated. There is some concern that even though | 1441 | * actually been updated. There is some concern that even though |
1442 | * registers and PTEs are within the same BAR that they are potentially | 1442 | * registers and PTEs are within the same BAR that they are potentially |
1443 | * of NUMA access patterns. Therefore, even with the way we assume | 1443 | * of NUMA access patterns. Therefore, even with the way we assume |
1444 | * hardware should work, we must keep this posting read for paranoia. | 1444 | * hardware should work, we must keep this posting read for paranoia. |
1445 | */ | 1445 | */ |
1446 | if (i != 0) | 1446 | if (i != 0) |
1447 | WARN_ON(readq(>t_entries[i-1]) | 1447 | WARN_ON(readq(>t_entries[i-1]) |
1448 | != gen8_pte_encode(addr, level, true)); | 1448 | != gen8_pte_encode(addr, level, true)); |
1449 | 1449 | ||
1450 | /* This next bit makes the above posting read even more important. We | 1450 | /* This next bit makes the above posting read even more important. We |
1451 | * want to flush the TLBs only after we're certain all the PTE updates | 1451 | * want to flush the TLBs only after we're certain all the PTE updates |
1452 | * have finished. | 1452 | * have finished. |
1453 | */ | 1453 | */ |
1454 | I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); | 1454 | I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); |
1455 | POSTING_READ(GFX_FLSH_CNTL_GEN6); | 1455 | POSTING_READ(GFX_FLSH_CNTL_GEN6); |
1456 | } | 1456 | } |
1457 | 1457 | ||
1458 | /* | 1458 | /* |
1459 | * Binds an object into the global gtt with the specified cache level. The object | 1459 | * Binds an object into the global gtt with the specified cache level. The object |
1460 | * will be accessible to the GPU via commands whose operands reference offsets | 1460 | * will be accessible to the GPU via commands whose operands reference offsets |
1461 | * within the global GTT as well as accessible by the GPU through the GMADR | 1461 | * within the global GTT as well as accessible by the GPU through the GMADR |
1462 | * mapped BAR (dev_priv->mm.gtt->gtt). | 1462 | * mapped BAR (dev_priv->mm.gtt->gtt). |
1463 | */ | 1463 | */ |
1464 | static void gen6_ggtt_insert_entries(struct i915_address_space *vm, | 1464 | static void gen6_ggtt_insert_entries(struct i915_address_space *vm, |
1465 | struct sg_table *st, | 1465 | struct sg_table *st, |
1466 | uint64_t start, | 1466 | uint64_t start, |
1467 | enum i915_cache_level level) | 1467 | enum i915_cache_level level) |
1468 | { | 1468 | { |
1469 | struct drm_i915_private *dev_priv = vm->dev->dev_private; | 1469 | struct drm_i915_private *dev_priv = vm->dev->dev_private; |
1470 | unsigned first_entry = start >> PAGE_SHIFT; | 1470 | unsigned first_entry = start >> PAGE_SHIFT; |
1471 | gen6_gtt_pte_t __iomem *gtt_entries = | 1471 | gen6_gtt_pte_t __iomem *gtt_entries = |
1472 | (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; | 1472 | (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; |
1473 | int i = 0; | 1473 | int i = 0; |
1474 | struct sg_page_iter sg_iter; | 1474 | struct sg_page_iter sg_iter; |
1475 | dma_addr_t addr; | 1475 | dma_addr_t addr; |
1476 | 1476 | ||
1477 | for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { | 1477 | for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { |
1478 | addr = sg_page_iter_dma_address(&sg_iter); | 1478 | addr = sg_page_iter_dma_address(&sg_iter); |
1479 | iowrite32(vm->pte_encode(addr, level, true), >t_entries[i]); | 1479 | iowrite32(vm->pte_encode(addr, level, true), >t_entries[i]); |
1480 | i++; | 1480 | i++; |
1481 | } | 1481 | } |
1482 | 1482 | ||
1483 | /* XXX: This serves as a posting read to make sure that the PTE has | 1483 | /* XXX: This serves as a posting read to make sure that the PTE has |
1484 | * actually been updated. There is some concern that even though | 1484 | * actually been updated. There is some concern that even though |
1485 | * registers and PTEs are within the same BAR that they are potentially | 1485 | * registers and PTEs are within the same BAR that they are potentially |
1486 | * of NUMA access patterns. Therefore, even with the way we assume | 1486 | * of NUMA access patterns. Therefore, even with the way we assume |
1487 | * hardware should work, we must keep this posting read for paranoia. | 1487 | * hardware should work, we must keep this posting read for paranoia. |
1488 | */ | 1488 | */ |
1489 | if (i != 0) | 1489 | if (i != 0) |
1490 | WARN_ON(readl(>t_entries[i-1]) != | 1490 | WARN_ON(readl(>t_entries[i-1]) != |
1491 | vm->pte_encode(addr, level, true)); | 1491 | vm->pte_encode(addr, level, true)); |
1492 | 1492 | ||
1493 | /* This next bit makes the above posting read even more important. We | 1493 | /* This next bit makes the above posting read even more important. We |
1494 | * want to flush the TLBs only after we're certain all the PTE updates | 1494 | * want to flush the TLBs only after we're certain all the PTE updates |
1495 | * have finished. | 1495 | * have finished. |
1496 | */ | 1496 | */ |
1497 | I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); | 1497 | I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); |
1498 | POSTING_READ(GFX_FLSH_CNTL_GEN6); | 1498 | POSTING_READ(GFX_FLSH_CNTL_GEN6); |
1499 | } | 1499 | } |
1500 | 1500 | ||
1501 | static void gen8_ggtt_clear_range(struct i915_address_space *vm, | 1501 | static void gen8_ggtt_clear_range(struct i915_address_space *vm, |
1502 | uint64_t start, | 1502 | uint64_t start, |
1503 | uint64_t length, | 1503 | uint64_t length, |
1504 | bool use_scratch) | 1504 | bool use_scratch) |
1505 | { | 1505 | { |
1506 | struct drm_i915_private *dev_priv = vm->dev->dev_private; | 1506 | struct drm_i915_private *dev_priv = vm->dev->dev_private; |
1507 | unsigned first_entry = start >> PAGE_SHIFT; | 1507 | unsigned first_entry = start >> PAGE_SHIFT; |
1508 | unsigned num_entries = length >> PAGE_SHIFT; | 1508 | unsigned num_entries = length >> PAGE_SHIFT; |
1509 | gen8_gtt_pte_t scratch_pte, __iomem *gtt_base = | 1509 | gen8_gtt_pte_t scratch_pte, __iomem *gtt_base = |
1510 | (gen8_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; | 1510 | (gen8_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; |
1511 | const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; | 1511 | const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; |
1512 | int i; | 1512 | int i; |
1513 | 1513 | ||
1514 | if (WARN(num_entries > max_entries, | 1514 | if (WARN(num_entries > max_entries, |
1515 | "First entry = %d; Num entries = %d (max=%d)\n", | 1515 | "First entry = %d; Num entries = %d (max=%d)\n", |
1516 | first_entry, num_entries, max_entries)) | 1516 | first_entry, num_entries, max_entries)) |
1517 | num_entries = max_entries; | 1517 | num_entries = max_entries; |
1518 | 1518 | ||
1519 | scratch_pte = gen8_pte_encode(vm->scratch.addr, | 1519 | scratch_pte = gen8_pte_encode(vm->scratch.addr, |
1520 | I915_CACHE_LLC, | 1520 | I915_CACHE_LLC, |
1521 | use_scratch); | 1521 | use_scratch); |
1522 | for (i = 0; i < num_entries; i++) | 1522 | for (i = 0; i < num_entries; i++) |
1523 | gen8_set_pte(>t_base[i], scratch_pte); | 1523 | gen8_set_pte(>t_base[i], scratch_pte); |
1524 | readl(gtt_base); | 1524 | readl(gtt_base); |
1525 | } | 1525 | } |
1526 | 1526 | ||
1527 | static void gen6_ggtt_clear_range(struct i915_address_space *vm, | 1527 | static void gen6_ggtt_clear_range(struct i915_address_space *vm, |
1528 | uint64_t start, | 1528 | uint64_t start, |
1529 | uint64_t length, | 1529 | uint64_t length, |
1530 | bool use_scratch) | 1530 | bool use_scratch) |
1531 | { | 1531 | { |
1532 | struct drm_i915_private *dev_priv = vm->dev->dev_private; | 1532 | struct drm_i915_private *dev_priv = vm->dev->dev_private; |
1533 | unsigned first_entry = start >> PAGE_SHIFT; | 1533 | unsigned first_entry = start >> PAGE_SHIFT; |
1534 | unsigned num_entries = length >> PAGE_SHIFT; | 1534 | unsigned num_entries = length >> PAGE_SHIFT; |
1535 | gen6_gtt_pte_t scratch_pte, __iomem *gtt_base = | 1535 | gen6_gtt_pte_t scratch_pte, __iomem *gtt_base = |
1536 | (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; | 1536 | (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; |
1537 | const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; | 1537 | const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; |
1538 | int i; | 1538 | int i; |
1539 | 1539 | ||
1540 | if (WARN(num_entries > max_entries, | 1540 | if (WARN(num_entries > max_entries, |
1541 | "First entry = %d; Num entries = %d (max=%d)\n", | 1541 | "First entry = %d; Num entries = %d (max=%d)\n", |
1542 | first_entry, num_entries, max_entries)) | 1542 | first_entry, num_entries, max_entries)) |
1543 | num_entries = max_entries; | 1543 | num_entries = max_entries; |
1544 | 1544 | ||
1545 | scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch); | 1545 | scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch); |
1546 | 1546 | ||
1547 | for (i = 0; i < num_entries; i++) | 1547 | for (i = 0; i < num_entries; i++) |
1548 | iowrite32(scratch_pte, >t_base[i]); | 1548 | iowrite32(scratch_pte, >t_base[i]); |
1549 | readl(gtt_base); | 1549 | readl(gtt_base); |
1550 | } | 1550 | } |
1551 | 1551 | ||
1552 | 1552 | ||
1553 | static void i915_ggtt_bind_vma(struct i915_vma *vma, | 1553 | static void i915_ggtt_bind_vma(struct i915_vma *vma, |
1554 | enum i915_cache_level cache_level, | 1554 | enum i915_cache_level cache_level, |
1555 | u32 unused) | 1555 | u32 unused) |
1556 | { | 1556 | { |
1557 | const unsigned long entry = vma->node.start >> PAGE_SHIFT; | 1557 | const unsigned long entry = vma->node.start >> PAGE_SHIFT; |
1558 | unsigned int flags = (cache_level == I915_CACHE_NONE) ? | 1558 | unsigned int flags = (cache_level == I915_CACHE_NONE) ? |
1559 | AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; | 1559 | AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; |
1560 | 1560 | ||
1561 | BUG_ON(!i915_is_ggtt(vma->vm)); | 1561 | BUG_ON(!i915_is_ggtt(vma->vm)); |
1562 | intel_gtt_insert_sg_entries(vma->obj->pages, entry, flags); | 1562 | intel_gtt_insert_sg_entries(vma->obj->pages, entry, flags); |
1563 | vma->obj->has_global_gtt_mapping = 1; | 1563 | vma->obj->has_global_gtt_mapping = 1; |
1564 | } | 1564 | } |
1565 | 1565 | ||
1566 | static void i915_ggtt_clear_range(struct i915_address_space *vm, | 1566 | static void i915_ggtt_clear_range(struct i915_address_space *vm, |
1567 | uint64_t start, | 1567 | uint64_t start, |
1568 | uint64_t length, | 1568 | uint64_t length, |
1569 | bool unused) | 1569 | bool unused) |
1570 | { | 1570 | { |
1571 | unsigned first_entry = start >> PAGE_SHIFT; | 1571 | unsigned first_entry = start >> PAGE_SHIFT; |
1572 | unsigned num_entries = length >> PAGE_SHIFT; | 1572 | unsigned num_entries = length >> PAGE_SHIFT; |
1573 | intel_gtt_clear_range(first_entry, num_entries); | 1573 | intel_gtt_clear_range(first_entry, num_entries); |
1574 | } | 1574 | } |
1575 | 1575 | ||
1576 | static void i915_ggtt_unbind_vma(struct i915_vma *vma) | 1576 | static void i915_ggtt_unbind_vma(struct i915_vma *vma) |
1577 | { | 1577 | { |
1578 | const unsigned int first = vma->node.start >> PAGE_SHIFT; | 1578 | const unsigned int first = vma->node.start >> PAGE_SHIFT; |
1579 | const unsigned int size = vma->obj->base.size >> PAGE_SHIFT; | 1579 | const unsigned int size = vma->obj->base.size >> PAGE_SHIFT; |
1580 | 1580 | ||
1581 | BUG_ON(!i915_is_ggtt(vma->vm)); | 1581 | BUG_ON(!i915_is_ggtt(vma->vm)); |
1582 | vma->obj->has_global_gtt_mapping = 0; | 1582 | vma->obj->has_global_gtt_mapping = 0; |
1583 | intel_gtt_clear_range(first, size); | 1583 | intel_gtt_clear_range(first, size); |
1584 | } | 1584 | } |
1585 | 1585 | ||
1586 | static void ggtt_bind_vma(struct i915_vma *vma, | 1586 | static void ggtt_bind_vma(struct i915_vma *vma, |
1587 | enum i915_cache_level cache_level, | 1587 | enum i915_cache_level cache_level, |
1588 | u32 flags) | 1588 | u32 flags) |
1589 | { | 1589 | { |
1590 | struct drm_device *dev = vma->vm->dev; | 1590 | struct drm_device *dev = vma->vm->dev; |
1591 | struct drm_i915_private *dev_priv = dev->dev_private; | 1591 | struct drm_i915_private *dev_priv = dev->dev_private; |
1592 | struct drm_i915_gem_object *obj = vma->obj; | 1592 | struct drm_i915_gem_object *obj = vma->obj; |
1593 | 1593 | ||
1594 | /* If there is no aliasing PPGTT, or the caller needs a global mapping, | 1594 | /* If there is no aliasing PPGTT, or the caller needs a global mapping, |
1595 | * or we have a global mapping already but the cacheability flags have | 1595 | * or we have a global mapping already but the cacheability flags have |
1596 | * changed, set the global PTEs. | 1596 | * changed, set the global PTEs. |
1597 | * | 1597 | * |
1598 | * If there is an aliasing PPGTT it is anecdotally faster, so use that | 1598 | * If there is an aliasing PPGTT it is anecdotally faster, so use that |
1599 | * instead if none of the above hold true. | 1599 | * instead if none of the above hold true. |
1600 | * | 1600 | * |
1601 | * NB: A global mapping should only be needed for special regions like | 1601 | * NB: A global mapping should only be needed for special regions like |
1602 | * "gtt mappable", SNB errata, or if specified via special execbuf | 1602 | * "gtt mappable", SNB errata, or if specified via special execbuf |
1603 | * flags. At all other times, the GPU will use the aliasing PPGTT. | 1603 | * flags. At all other times, the GPU will use the aliasing PPGTT. |
1604 | */ | 1604 | */ |
1605 | if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) { | 1605 | if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) { |
1606 | if (!obj->has_global_gtt_mapping || | 1606 | if (!obj->has_global_gtt_mapping || |
1607 | (cache_level != obj->cache_level)) { | 1607 | (cache_level != obj->cache_level)) { |
1608 | vma->vm->insert_entries(vma->vm, obj->pages, | 1608 | vma->vm->insert_entries(vma->vm, obj->pages, |
1609 | vma->node.start, | 1609 | vma->node.start, |
1610 | cache_level); | 1610 | cache_level); |
1611 | obj->has_global_gtt_mapping = 1; | 1611 | obj->has_global_gtt_mapping = 1; |
1612 | } | 1612 | } |
1613 | } | 1613 | } |
1614 | 1614 | ||
1615 | if (dev_priv->mm.aliasing_ppgtt && | 1615 | if (dev_priv->mm.aliasing_ppgtt && |
1616 | (!obj->has_aliasing_ppgtt_mapping || | 1616 | (!obj->has_aliasing_ppgtt_mapping || |
1617 | (cache_level != obj->cache_level))) { | 1617 | (cache_level != obj->cache_level))) { |
1618 | struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; | 1618 | struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; |
1619 | appgtt->base.insert_entries(&appgtt->base, | 1619 | appgtt->base.insert_entries(&appgtt->base, |
1620 | vma->obj->pages, | 1620 | vma->obj->pages, |
1621 | vma->node.start, | 1621 | vma->node.start, |
1622 | cache_level); | 1622 | cache_level); |
1623 | vma->obj->has_aliasing_ppgtt_mapping = 1; | 1623 | vma->obj->has_aliasing_ppgtt_mapping = 1; |
1624 | } | 1624 | } |
1625 | } | 1625 | } |
1626 | 1626 | ||
1627 | static void ggtt_unbind_vma(struct i915_vma *vma) | 1627 | static void ggtt_unbind_vma(struct i915_vma *vma) |
1628 | { | 1628 | { |
1629 | struct drm_device *dev = vma->vm->dev; | 1629 | struct drm_device *dev = vma->vm->dev; |
1630 | struct drm_i915_private *dev_priv = dev->dev_private; | 1630 | struct drm_i915_private *dev_priv = dev->dev_private; |
1631 | struct drm_i915_gem_object *obj = vma->obj; | 1631 | struct drm_i915_gem_object *obj = vma->obj; |
1632 | 1632 | ||
1633 | if (obj->has_global_gtt_mapping) { | 1633 | if (obj->has_global_gtt_mapping) { |
1634 | vma->vm->clear_range(vma->vm, | 1634 | vma->vm->clear_range(vma->vm, |
1635 | vma->node.start, | 1635 | vma->node.start, |
1636 | obj->base.size, | 1636 | obj->base.size, |
1637 | true); | 1637 | true); |
1638 | obj->has_global_gtt_mapping = 0; | 1638 | obj->has_global_gtt_mapping = 0; |
1639 | } | 1639 | } |
1640 | 1640 | ||
1641 | if (obj->has_aliasing_ppgtt_mapping) { | 1641 | if (obj->has_aliasing_ppgtt_mapping) { |
1642 | struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; | 1642 | struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; |
1643 | appgtt->base.clear_range(&appgtt->base, | 1643 | appgtt->base.clear_range(&appgtt->base, |
1644 | vma->node.start, | 1644 | vma->node.start, |
1645 | obj->base.size, | 1645 | obj->base.size, |
1646 | true); | 1646 | true); |
1647 | obj->has_aliasing_ppgtt_mapping = 0; | 1647 | obj->has_aliasing_ppgtt_mapping = 0; |
1648 | } | 1648 | } |
1649 | } | 1649 | } |
1650 | 1650 | ||
1651 | void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj) | 1651 | void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj) |
1652 | { | 1652 | { |
1653 | struct drm_device *dev = obj->base.dev; | 1653 | struct drm_device *dev = obj->base.dev; |
1654 | struct drm_i915_private *dev_priv = dev->dev_private; | 1654 | struct drm_i915_private *dev_priv = dev->dev_private; |
1655 | bool interruptible; | 1655 | bool interruptible; |
1656 | 1656 | ||
1657 | interruptible = do_idling(dev_priv); | 1657 | interruptible = do_idling(dev_priv); |
1658 | 1658 | ||
1659 | if (!obj->has_dma_mapping) | 1659 | if (!obj->has_dma_mapping) |
1660 | dma_unmap_sg(&dev->pdev->dev, | 1660 | dma_unmap_sg(&dev->pdev->dev, |
1661 | obj->pages->sgl, obj->pages->nents, | 1661 | obj->pages->sgl, obj->pages->nents, |
1662 | PCI_DMA_BIDIRECTIONAL); | 1662 | PCI_DMA_BIDIRECTIONAL); |
1663 | 1663 | ||
1664 | undo_idling(dev_priv, interruptible); | 1664 | undo_idling(dev_priv, interruptible); |
1665 | } | 1665 | } |
1666 | 1666 | ||
1667 | static void i915_gtt_color_adjust(struct drm_mm_node *node, | 1667 | static void i915_gtt_color_adjust(struct drm_mm_node *node, |
1668 | unsigned long color, | 1668 | unsigned long color, |
1669 | unsigned long *start, | 1669 | unsigned long *start, |
1670 | unsigned long *end) | 1670 | unsigned long *end) |
1671 | { | 1671 | { |
1672 | if (node->color != color) | 1672 | if (node->color != color) |
1673 | *start += 4096; | 1673 | *start += 4096; |
1674 | 1674 | ||
1675 | if (!list_empty(&node->node_list)) { | 1675 | if (!list_empty(&node->node_list)) { |
1676 | node = list_entry(node->node_list.next, | 1676 | node = list_entry(node->node_list.next, |
1677 | struct drm_mm_node, | 1677 | struct drm_mm_node, |
1678 | node_list); | 1678 | node_list); |
1679 | if (node->allocated && node->color != color) | 1679 | if (node->allocated && node->color != color) |
1680 | *end -= 4096; | 1680 | *end -= 4096; |
1681 | } | 1681 | } |
1682 | } | 1682 | } |
1683 | 1683 | ||
1684 | void i915_gem_setup_global_gtt(struct drm_device *dev, | 1684 | void i915_gem_setup_global_gtt(struct drm_device *dev, |
1685 | unsigned long start, | 1685 | unsigned long start, |
1686 | unsigned long mappable_end, | 1686 | unsigned long mappable_end, |
1687 | unsigned long end) | 1687 | unsigned long end) |
1688 | { | 1688 | { |
1689 | /* Let GEM Manage all of the aperture. | 1689 | /* Let GEM Manage all of the aperture. |
1690 | * | 1690 | * |
1691 | * However, leave one page at the end still bound to the scratch page. | 1691 | * However, leave one page at the end still bound to the scratch page. |
1692 | * There are a number of places where the hardware apparently prefetches | 1692 | * There are a number of places where the hardware apparently prefetches |
1693 | * past the end of the object, and we've seen multiple hangs with the | 1693 | * past the end of the object, and we've seen multiple hangs with the |
1694 | * GPU head pointer stuck in a batchbuffer bound at the last page of the | 1694 | * GPU head pointer stuck in a batchbuffer bound at the last page of the |
1695 | * aperture. One page should be enough to keep any prefetching inside | 1695 | * aperture. One page should be enough to keep any prefetching inside |
1696 | * of the aperture. | 1696 | * of the aperture. |
1697 | */ | 1697 | */ |
1698 | struct drm_i915_private *dev_priv = dev->dev_private; | 1698 | struct drm_i915_private *dev_priv = dev->dev_private; |
1699 | struct i915_address_space *ggtt_vm = &dev_priv->gtt.base; | 1699 | struct i915_address_space *ggtt_vm = &dev_priv->gtt.base; |
1700 | struct drm_mm_node *entry; | 1700 | struct drm_mm_node *entry; |
1701 | struct drm_i915_gem_object *obj; | 1701 | struct drm_i915_gem_object *obj; |
1702 | unsigned long hole_start, hole_end; | 1702 | unsigned long hole_start, hole_end; |
1703 | 1703 | ||
1704 | BUG_ON(mappable_end > end); | 1704 | BUG_ON(mappable_end > end); |
1705 | 1705 | ||
1706 | /* Subtract the guard page ... */ | 1706 | /* Subtract the guard page ... */ |
1707 | drm_mm_init(&ggtt_vm->mm, start, end - start - PAGE_SIZE); | 1707 | drm_mm_init(&ggtt_vm->mm, start, end - start - PAGE_SIZE); |
1708 | if (!HAS_LLC(dev)) | 1708 | if (!HAS_LLC(dev)) |
1709 | dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust; | 1709 | dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust; |
1710 | 1710 | ||
1711 | /* Mark any preallocated objects as occupied */ | 1711 | /* Mark any preallocated objects as occupied */ |
1712 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { | 1712 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { |
1713 | struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm); | 1713 | struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm); |
1714 | int ret; | 1714 | int ret; |
1715 | DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n", | 1715 | DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n", |
1716 | i915_gem_obj_ggtt_offset(obj), obj->base.size); | 1716 | i915_gem_obj_ggtt_offset(obj), obj->base.size); |
1717 | 1717 | ||
1718 | WARN_ON(i915_gem_obj_ggtt_bound(obj)); | 1718 | WARN_ON(i915_gem_obj_ggtt_bound(obj)); |
1719 | ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node); | 1719 | ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node); |
1720 | if (ret) | 1720 | if (ret) |
1721 | DRM_DEBUG_KMS("Reservation failed\n"); | 1721 | DRM_DEBUG_KMS("Reservation failed\n"); |
1722 | obj->has_global_gtt_mapping = 1; | 1722 | obj->has_global_gtt_mapping = 1; |
1723 | } | 1723 | } |
1724 | 1724 | ||
1725 | dev_priv->gtt.base.start = start; | 1725 | dev_priv->gtt.base.start = start; |
1726 | dev_priv->gtt.base.total = end - start; | 1726 | dev_priv->gtt.base.total = end - start; |
1727 | 1727 | ||
1728 | /* Clear any non-preallocated blocks */ | 1728 | /* Clear any non-preallocated blocks */ |
1729 | drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) { | 1729 | drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) { |
1730 | DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", | 1730 | DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", |
1731 | hole_start, hole_end); | 1731 | hole_start, hole_end); |
1732 | ggtt_vm->clear_range(ggtt_vm, hole_start, | 1732 | ggtt_vm->clear_range(ggtt_vm, hole_start, |
1733 | hole_end - hole_start, true); | 1733 | hole_end - hole_start, true); |
1734 | } | 1734 | } |
1735 | 1735 | ||
1736 | /* And finally clear the reserved guard page */ | 1736 | /* And finally clear the reserved guard page */ |
1737 | ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true); | 1737 | ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true); |
1738 | } | 1738 | } |
1739 | 1739 | ||
1740 | void i915_gem_init_global_gtt(struct drm_device *dev) | 1740 | void i915_gem_init_global_gtt(struct drm_device *dev) |
1741 | { | 1741 | { |
1742 | struct drm_i915_private *dev_priv = dev->dev_private; | 1742 | struct drm_i915_private *dev_priv = dev->dev_private; |
1743 | unsigned long gtt_size, mappable_size; | 1743 | unsigned long gtt_size, mappable_size; |
1744 | 1744 | ||
1745 | gtt_size = dev_priv->gtt.base.total; | 1745 | gtt_size = dev_priv->gtt.base.total; |
1746 | mappable_size = dev_priv->gtt.mappable_end; | 1746 | mappable_size = dev_priv->gtt.mappable_end; |
1747 | 1747 | ||
1748 | i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); | 1748 | i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); |
1749 | } | 1749 | } |
1750 | 1750 | ||
1751 | static int setup_scratch_page(struct drm_device *dev) | 1751 | static int setup_scratch_page(struct drm_device *dev) |
1752 | { | 1752 | { |
1753 | struct drm_i915_private *dev_priv = dev->dev_private; | 1753 | struct drm_i915_private *dev_priv = dev->dev_private; |
1754 | struct page *page; | 1754 | struct page *page; |
1755 | dma_addr_t dma_addr; | 1755 | dma_addr_t dma_addr; |
1756 | 1756 | ||
1757 | page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); | 1757 | page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); |
1758 | if (page == NULL) | 1758 | if (page == NULL) |
1759 | return -ENOMEM; | 1759 | return -ENOMEM; |
1760 | get_page(page); | 1760 | get_page(page); |
1761 | set_pages_uc(page, 1); | 1761 | set_pages_uc(page, 1); |
1762 | 1762 | ||
1763 | #ifdef CONFIG_INTEL_IOMMU | 1763 | #ifdef CONFIG_INTEL_IOMMU |
1764 | dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE, | 1764 | dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE, |
1765 | PCI_DMA_BIDIRECTIONAL); | 1765 | PCI_DMA_BIDIRECTIONAL); |
1766 | if (pci_dma_mapping_error(dev->pdev, dma_addr)) | 1766 | if (pci_dma_mapping_error(dev->pdev, dma_addr)) |
1767 | return -EINVAL; | 1767 | return -EINVAL; |
1768 | #else | 1768 | #else |
1769 | dma_addr = page_to_phys(page); | 1769 | dma_addr = page_to_phys(page); |
1770 | #endif | 1770 | #endif |
1771 | dev_priv->gtt.base.scratch.page = page; | 1771 | dev_priv->gtt.base.scratch.page = page; |
1772 | dev_priv->gtt.base.scratch.addr = dma_addr; | 1772 | dev_priv->gtt.base.scratch.addr = dma_addr; |
1773 | 1773 | ||
1774 | return 0; | 1774 | return 0; |
1775 | } | 1775 | } |
1776 | 1776 | ||
1777 | static void teardown_scratch_page(struct drm_device *dev) | 1777 | static void teardown_scratch_page(struct drm_device *dev) |
1778 | { | 1778 | { |
1779 | struct drm_i915_private *dev_priv = dev->dev_private; | 1779 | struct drm_i915_private *dev_priv = dev->dev_private; |
1780 | struct page *page = dev_priv->gtt.base.scratch.page; | 1780 | struct page *page = dev_priv->gtt.base.scratch.page; |
1781 | 1781 | ||
1782 | set_pages_wb(page, 1); | 1782 | set_pages_wb(page, 1); |
1783 | pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr, | 1783 | pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr, |
1784 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | 1784 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
1785 | put_page(page); | 1785 | put_page(page); |
1786 | __free_page(page); | 1786 | __free_page(page); |
1787 | } | 1787 | } |
1788 | 1788 | ||
1789 | static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) | 1789 | static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) |
1790 | { | 1790 | { |
1791 | snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT; | 1791 | snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT; |
1792 | snb_gmch_ctl &= SNB_GMCH_GGMS_MASK; | 1792 | snb_gmch_ctl &= SNB_GMCH_GGMS_MASK; |
1793 | return snb_gmch_ctl << 20; | 1793 | return snb_gmch_ctl << 20; |
1794 | } | 1794 | } |
1795 | 1795 | ||
1796 | static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl) | 1796 | static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl) |
1797 | { | 1797 | { |
1798 | bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT; | 1798 | bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT; |
1799 | bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK; | 1799 | bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK; |
1800 | if (bdw_gmch_ctl) | 1800 | if (bdw_gmch_ctl) |
1801 | bdw_gmch_ctl = 1 << bdw_gmch_ctl; | 1801 | bdw_gmch_ctl = 1 << bdw_gmch_ctl; |
1802 | return bdw_gmch_ctl << 20; | 1802 | return bdw_gmch_ctl << 20; |
1803 | } | 1803 | } |
1804 | 1804 | ||
1805 | static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl) | 1805 | static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl) |
1806 | { | 1806 | { |
1807 | snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT; | 1807 | snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT; |
1808 | snb_gmch_ctl &= SNB_GMCH_GMS_MASK; | 1808 | snb_gmch_ctl &= SNB_GMCH_GMS_MASK; |
1809 | return snb_gmch_ctl << 25; /* 32 MB units */ | 1809 | return snb_gmch_ctl << 25; /* 32 MB units */ |
1810 | } | 1810 | } |
1811 | 1811 | ||
1812 | static inline size_t gen8_get_stolen_size(u16 bdw_gmch_ctl) | 1812 | static inline size_t gen8_get_stolen_size(u16 bdw_gmch_ctl) |
1813 | { | 1813 | { |
1814 | bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT; | 1814 | bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT; |
1815 | bdw_gmch_ctl &= BDW_GMCH_GMS_MASK; | 1815 | bdw_gmch_ctl &= BDW_GMCH_GMS_MASK; |
1816 | return bdw_gmch_ctl << 25; /* 32 MB units */ | 1816 | return bdw_gmch_ctl << 25; /* 32 MB units */ |
1817 | } | 1817 | } |
1818 | 1818 | ||
1819 | static int ggtt_probe_common(struct drm_device *dev, | 1819 | static int ggtt_probe_common(struct drm_device *dev, |
1820 | size_t gtt_size) | 1820 | size_t gtt_size) |
1821 | { | 1821 | { |
1822 | struct drm_i915_private *dev_priv = dev->dev_private; | 1822 | struct drm_i915_private *dev_priv = dev->dev_private; |
1823 | phys_addr_t gtt_phys_addr; | 1823 | phys_addr_t gtt_phys_addr; |
1824 | int ret; | 1824 | int ret; |
1825 | 1825 | ||
1826 | /* For Modern GENs the PTEs and register space are split in the BAR */ | 1826 | /* For Modern GENs the PTEs and register space are split in the BAR */ |
1827 | gtt_phys_addr = pci_resource_start(dev->pdev, 0) + | 1827 | gtt_phys_addr = pci_resource_start(dev->pdev, 0) + |
1828 | (pci_resource_len(dev->pdev, 0) / 2); | 1828 | (pci_resource_len(dev->pdev, 0) / 2); |
1829 | 1829 | ||
1830 | dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size); | 1830 | dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size); |
1831 | if (!dev_priv->gtt.gsm) { | 1831 | if (!dev_priv->gtt.gsm) { |
1832 | DRM_ERROR("Failed to map the gtt page table\n"); | 1832 | DRM_ERROR("Failed to map the gtt page table\n"); |
1833 | return -ENOMEM; | 1833 | return -ENOMEM; |
1834 | } | 1834 | } |
1835 | 1835 | ||
1836 | ret = setup_scratch_page(dev); | 1836 | ret = setup_scratch_page(dev); |
1837 | if (ret) { | 1837 | if (ret) { |
1838 | DRM_ERROR("Scratch setup failed\n"); | 1838 | DRM_ERROR("Scratch setup failed\n"); |
1839 | /* iounmap will also get called at remove, but meh */ | 1839 | /* iounmap will also get called at remove, but meh */ |
1840 | iounmap(dev_priv->gtt.gsm); | 1840 | iounmap(dev_priv->gtt.gsm); |
1841 | } | 1841 | } |
1842 | 1842 | ||
1843 | return ret; | 1843 | return ret; |
1844 | } | 1844 | } |
1845 | 1845 | ||
1846 | /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability | 1846 | /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability |
1847 | * bits. When using advanced contexts each context stores its own PAT, but | 1847 | * bits. When using advanced contexts each context stores its own PAT, but |
1848 | * writing this data shouldn't be harmful even in those cases. */ | 1848 | * writing this data shouldn't be harmful even in those cases. */ |
1849 | static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv) | 1849 | static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv) |
1850 | { | 1850 | { |
1851 | #define GEN8_PPAT_UC (0<<0) | 1851 | #define GEN8_PPAT_UC (0<<0) |
1852 | #define GEN8_PPAT_WC (1<<0) | 1852 | #define GEN8_PPAT_WC (1<<0) |
1853 | #define GEN8_PPAT_WT (2<<0) | 1853 | #define GEN8_PPAT_WT (2<<0) |
1854 | #define GEN8_PPAT_WB (3<<0) | 1854 | #define GEN8_PPAT_WB (3<<0) |
1855 | #define GEN8_PPAT_ELLC_OVERRIDE (0<<2) | 1855 | #define GEN8_PPAT_ELLC_OVERRIDE (0<<2) |
1856 | /* FIXME(BDW): Bspec is completely confused about cache control bits. */ | 1856 | /* FIXME(BDW): Bspec is completely confused about cache control bits. */ |
1857 | #define GEN8_PPAT_LLC (1<<2) | 1857 | #define GEN8_PPAT_LLC (1<<2) |
1858 | #define GEN8_PPAT_LLCELLC (2<<2) | 1858 | #define GEN8_PPAT_LLCELLC (2<<2) |
1859 | #define GEN8_PPAT_LLCeLLC (3<<2) | 1859 | #define GEN8_PPAT_LLCeLLC (3<<2) |
1860 | #define GEN8_PPAT_AGE(x) (x<<4) | 1860 | #define GEN8_PPAT_AGE(x) (x<<4) |
1861 | #define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8)) | 1861 | #define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8)) |
1862 | uint64_t pat; | 1862 | uint64_t pat; |
1863 | 1863 | ||
1864 | pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */ | 1864 | pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */ |
1865 | GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */ | 1865 | GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */ |
1866 | GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */ | 1866 | GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */ |
1867 | GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */ | 1867 | GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */ |
1868 | GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) | | 1868 | GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) | |
1869 | GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) | | 1869 | GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) | |
1870 | GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) | | 1870 | GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) | |
1871 | GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); | 1871 | GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); |
1872 | 1872 | ||
1873 | /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b | 1873 | /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b |
1874 | * write would work. */ | 1874 | * write would work. */ |
1875 | I915_WRITE(GEN8_PRIVATE_PAT, pat); | 1875 | I915_WRITE(GEN8_PRIVATE_PAT, pat); |
1876 | I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32); | 1876 | I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32); |
1877 | } | 1877 | } |
1878 | 1878 | ||
1879 | static int gen8_gmch_probe(struct drm_device *dev, | 1879 | static int gen8_gmch_probe(struct drm_device *dev, |
1880 | size_t *gtt_total, | 1880 | size_t *gtt_total, |
1881 | size_t *stolen, | 1881 | size_t *stolen, |
1882 | phys_addr_t *mappable_base, | 1882 | phys_addr_t *mappable_base, |
1883 | unsigned long *mappable_end) | 1883 | unsigned long *mappable_end) |
1884 | { | 1884 | { |
1885 | struct drm_i915_private *dev_priv = dev->dev_private; | 1885 | struct drm_i915_private *dev_priv = dev->dev_private; |
1886 | unsigned int gtt_size; | 1886 | unsigned int gtt_size; |
1887 | u16 snb_gmch_ctl; | 1887 | u16 snb_gmch_ctl; |
1888 | int ret; | 1888 | int ret; |
1889 | 1889 | ||
1890 | /* TODO: We're not aware of mappable constraints on gen8 yet */ | 1890 | /* TODO: We're not aware of mappable constraints on gen8 yet */ |
1891 | *mappable_base = pci_resource_start(dev->pdev, 2); | 1891 | *mappable_base = pci_resource_start(dev->pdev, 2); |
1892 | *mappable_end = pci_resource_len(dev->pdev, 2); | 1892 | *mappable_end = pci_resource_len(dev->pdev, 2); |
1893 | 1893 | ||
1894 | if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39))) | 1894 | if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39))) |
1895 | pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39)); | 1895 | pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39)); |
1896 | 1896 | ||
1897 | pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); | 1897 | pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); |
1898 | 1898 | ||
1899 | *stolen = gen8_get_stolen_size(snb_gmch_ctl); | 1899 | *stolen = gen8_get_stolen_size(snb_gmch_ctl); |
1900 | 1900 | ||
1901 | gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl); | 1901 | gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl); |
1902 | *gtt_total = (gtt_size / sizeof(gen8_gtt_pte_t)) << PAGE_SHIFT; | 1902 | *gtt_total = (gtt_size / sizeof(gen8_gtt_pte_t)) << PAGE_SHIFT; |
1903 | 1903 | ||
1904 | gen8_setup_private_ppat(dev_priv); | 1904 | gen8_setup_private_ppat(dev_priv); |
1905 | 1905 | ||
1906 | ret = ggtt_probe_common(dev, gtt_size); | 1906 | ret = ggtt_probe_common(dev, gtt_size); |
1907 | 1907 | ||
1908 | dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range; | 1908 | dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range; |
1909 | dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries; | 1909 | dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries; |
1910 | 1910 | ||
1911 | return ret; | 1911 | return ret; |
1912 | } | 1912 | } |
1913 | 1913 | ||
1914 | static int gen6_gmch_probe(struct drm_device *dev, | 1914 | static int gen6_gmch_probe(struct drm_device *dev, |
1915 | size_t *gtt_total, | 1915 | size_t *gtt_total, |
1916 | size_t *stolen, | 1916 | size_t *stolen, |
1917 | phys_addr_t *mappable_base, | 1917 | phys_addr_t *mappable_base, |
1918 | unsigned long *mappable_end) | 1918 | unsigned long *mappable_end) |
1919 | { | 1919 | { |
1920 | struct drm_i915_private *dev_priv = dev->dev_private; | 1920 | struct drm_i915_private *dev_priv = dev->dev_private; |
1921 | unsigned int gtt_size; | 1921 | unsigned int gtt_size; |
1922 | u16 snb_gmch_ctl; | 1922 | u16 snb_gmch_ctl; |
1923 | int ret; | 1923 | int ret; |
1924 | 1924 | ||
1925 | *mappable_base = pci_resource_start(dev->pdev, 2); | 1925 | *mappable_base = pci_resource_start(dev->pdev, 2); |
1926 | *mappable_end = pci_resource_len(dev->pdev, 2); | 1926 | *mappable_end = pci_resource_len(dev->pdev, 2); |
1927 | 1927 | ||
1928 | /* 64/512MB is the current min/max we actually know of, but this is just | 1928 | /* 64/512MB is the current min/max we actually know of, but this is just |
1929 | * a coarse sanity check. | 1929 | * a coarse sanity check. |
1930 | */ | 1930 | */ |
1931 | if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) { | 1931 | if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) { |
1932 | DRM_ERROR("Unknown GMADR size (%lx)\n", | 1932 | DRM_ERROR("Unknown GMADR size (%lx)\n", |
1933 | dev_priv->gtt.mappable_end); | 1933 | dev_priv->gtt.mappable_end); |
1934 | return -ENXIO; | 1934 | return -ENXIO; |
1935 | } | 1935 | } |
1936 | 1936 | ||
1937 | if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40))) | 1937 | if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40))) |
1938 | pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40)); | 1938 | pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40)); |
1939 | pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); | 1939 | pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); |
1940 | 1940 | ||
1941 | *stolen = gen6_get_stolen_size(snb_gmch_ctl); | 1941 | *stolen = gen6_get_stolen_size(snb_gmch_ctl); |
1942 | 1942 | ||
1943 | gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl); | 1943 | gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl); |
1944 | *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT; | 1944 | *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT; |
1945 | 1945 | ||
1946 | ret = ggtt_probe_common(dev, gtt_size); | 1946 | ret = ggtt_probe_common(dev, gtt_size); |
1947 | 1947 | ||
1948 | dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range; | 1948 | dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range; |
1949 | dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries; | 1949 | dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries; |
1950 | 1950 | ||
1951 | return ret; | 1951 | return ret; |
1952 | } | 1952 | } |
1953 | 1953 | ||
1954 | static void gen6_gmch_remove(struct i915_address_space *vm) | 1954 | static void gen6_gmch_remove(struct i915_address_space *vm) |
1955 | { | 1955 | { |
1956 | 1956 | ||
1957 | struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base); | 1957 | struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base); |
1958 | 1958 | ||
1959 | drm_mm_takedown(&vm->mm); | 1959 | drm_mm_takedown(&vm->mm); |
1960 | iounmap(gtt->gsm); | 1960 | iounmap(gtt->gsm); |
1961 | teardown_scratch_page(vm->dev); | 1961 | teardown_scratch_page(vm->dev); |
1962 | } | 1962 | } |
1963 | 1963 | ||
1964 | static int i915_gmch_probe(struct drm_device *dev, | 1964 | static int i915_gmch_probe(struct drm_device *dev, |
1965 | size_t *gtt_total, | 1965 | size_t *gtt_total, |
1966 | size_t *stolen, | 1966 | size_t *stolen, |
1967 | phys_addr_t *mappable_base, | 1967 | phys_addr_t *mappable_base, |
1968 | unsigned long *mappable_end) | 1968 | unsigned long *mappable_end) |
1969 | { | 1969 | { |
1970 | struct drm_i915_private *dev_priv = dev->dev_private; | 1970 | struct drm_i915_private *dev_priv = dev->dev_private; |
1971 | int ret; | 1971 | int ret; |
1972 | 1972 | ||
1973 | ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL); | 1973 | ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL); |
1974 | if (!ret) { | 1974 | if (!ret) { |
1975 | DRM_ERROR("failed to set up gmch\n"); | 1975 | DRM_ERROR("failed to set up gmch\n"); |
1976 | return -EIO; | 1976 | return -EIO; |
1977 | } | 1977 | } |
1978 | 1978 | ||
1979 | intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end); | 1979 | intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end); |
1980 | 1980 | ||
1981 | dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev); | 1981 | dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev); |
1982 | dev_priv->gtt.base.clear_range = i915_ggtt_clear_range; | 1982 | dev_priv->gtt.base.clear_range = i915_ggtt_clear_range; |
1983 | 1983 | ||
1984 | if (unlikely(dev_priv->gtt.do_idle_maps)) | 1984 | if (unlikely(dev_priv->gtt.do_idle_maps)) |
1985 | DRM_INFO("applying Ironlake quirks for intel_iommu\n"); | 1985 | DRM_INFO("applying Ironlake quirks for intel_iommu\n"); |
1986 | 1986 | ||
1987 | return 0; | 1987 | return 0; |
1988 | } | 1988 | } |
1989 | 1989 | ||
1990 | static void i915_gmch_remove(struct i915_address_space *vm) | 1990 | static void i915_gmch_remove(struct i915_address_space *vm) |
1991 | { | 1991 | { |
1992 | intel_gmch_remove(); | 1992 | intel_gmch_remove(); |
1993 | } | 1993 | } |
1994 | 1994 | ||
1995 | int i915_gem_gtt_init(struct drm_device *dev) | 1995 | int i915_gem_gtt_init(struct drm_device *dev) |
1996 | { | 1996 | { |
1997 | struct drm_i915_private *dev_priv = dev->dev_private; | 1997 | struct drm_i915_private *dev_priv = dev->dev_private; |
1998 | struct i915_gtt *gtt = &dev_priv->gtt; | 1998 | struct i915_gtt *gtt = &dev_priv->gtt; |
1999 | int ret; | 1999 | int ret; |
2000 | 2000 | ||
2001 | if (INTEL_INFO(dev)->gen <= 5) { | 2001 | if (INTEL_INFO(dev)->gen <= 5) { |
2002 | gtt->gtt_probe = i915_gmch_probe; | 2002 | gtt->gtt_probe = i915_gmch_probe; |
2003 | gtt->base.cleanup = i915_gmch_remove; | 2003 | gtt->base.cleanup = i915_gmch_remove; |
2004 | } else if (INTEL_INFO(dev)->gen < 8) { | 2004 | } else if (INTEL_INFO(dev)->gen < 8) { |
2005 | gtt->gtt_probe = gen6_gmch_probe; | 2005 | gtt->gtt_probe = gen6_gmch_probe; |
2006 | gtt->base.cleanup = gen6_gmch_remove; | 2006 | gtt->base.cleanup = gen6_gmch_remove; |
2007 | if (IS_HASWELL(dev) && dev_priv->ellc_size) | 2007 | if (IS_HASWELL(dev) && dev_priv->ellc_size) |
2008 | gtt->base.pte_encode = iris_pte_encode; | 2008 | gtt->base.pte_encode = iris_pte_encode; |
2009 | else if (IS_HASWELL(dev)) | 2009 | else if (IS_HASWELL(dev)) |
2010 | gtt->base.pte_encode = hsw_pte_encode; | 2010 | gtt->base.pte_encode = hsw_pte_encode; |
2011 | else if (IS_VALLEYVIEW(dev)) | 2011 | else if (IS_VALLEYVIEW(dev)) |
2012 | gtt->base.pte_encode = byt_pte_encode; | 2012 | gtt->base.pte_encode = byt_pte_encode; |
2013 | else if (INTEL_INFO(dev)->gen >= 7) | 2013 | else if (INTEL_INFO(dev)->gen >= 7) |
2014 | gtt->base.pte_encode = ivb_pte_encode; | 2014 | gtt->base.pte_encode = ivb_pte_encode; |
2015 | else | 2015 | else |
2016 | gtt->base.pte_encode = snb_pte_encode; | 2016 | gtt->base.pte_encode = snb_pte_encode; |
2017 | } else { | 2017 | } else { |
2018 | dev_priv->gtt.gtt_probe = gen8_gmch_probe; | 2018 | dev_priv->gtt.gtt_probe = gen8_gmch_probe; |
2019 | dev_priv->gtt.base.cleanup = gen6_gmch_remove; | 2019 | dev_priv->gtt.base.cleanup = gen6_gmch_remove; |
2020 | } | 2020 | } |
2021 | 2021 | ||
2022 | ret = gtt->gtt_probe(dev, >t->base.total, >t->stolen_size, | 2022 | ret = gtt->gtt_probe(dev, >t->base.total, >t->stolen_size, |
2023 | >t->mappable_base, >t->mappable_end); | 2023 | >t->mappable_base, >t->mappable_end); |
2024 | if (ret) | 2024 | if (ret) |
2025 | return ret; | 2025 | return ret; |
2026 | 2026 | ||
2027 | gtt->base.dev = dev; | 2027 | gtt->base.dev = dev; |
2028 | 2028 | ||
2029 | /* GMADR is the PCI mmio aperture into the global GTT. */ | 2029 | /* GMADR is the PCI mmio aperture into the global GTT. */ |
2030 | DRM_INFO("Memory usable by graphics device = %zdM\n", | 2030 | DRM_INFO("Memory usable by graphics device = %zdM\n", |
2031 | gtt->base.total >> 20); | 2031 | gtt->base.total >> 20); |
2032 | DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20); | 2032 | DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20); |
2033 | DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20); | 2033 | DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20); |
2034 | 2034 | ||
2035 | return 0; | 2035 | return 0; |
2036 | } | 2036 | } |
2037 | 2037 | ||
2038 | static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj, | 2038 | static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj, |
2039 | struct i915_address_space *vm) | 2039 | struct i915_address_space *vm) |
2040 | { | 2040 | { |
2041 | struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); | 2041 | struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); |
2042 | if (vma == NULL) | 2042 | if (vma == NULL) |
2043 | return ERR_PTR(-ENOMEM); | 2043 | return ERR_PTR(-ENOMEM); |
2044 | 2044 | ||
2045 | INIT_LIST_HEAD(&vma->vma_link); | 2045 | INIT_LIST_HEAD(&vma->vma_link); |
2046 | INIT_LIST_HEAD(&vma->mm_list); | 2046 | INIT_LIST_HEAD(&vma->mm_list); |
2047 | INIT_LIST_HEAD(&vma->exec_list); | 2047 | INIT_LIST_HEAD(&vma->exec_list); |
2048 | vma->vm = vm; | 2048 | vma->vm = vm; |
2049 | vma->obj = obj; | 2049 | vma->obj = obj; |
2050 | 2050 | ||
2051 | switch (INTEL_INFO(vm->dev)->gen) { | 2051 | switch (INTEL_INFO(vm->dev)->gen) { |
2052 | case 8: | 2052 | case 8: |
2053 | case 7: | 2053 | case 7: |
2054 | case 6: | 2054 | case 6: |
2055 | if (i915_is_ggtt(vm)) { | 2055 | if (i915_is_ggtt(vm)) { |
2056 | vma->unbind_vma = ggtt_unbind_vma; | 2056 | vma->unbind_vma = ggtt_unbind_vma; |
2057 | vma->bind_vma = ggtt_bind_vma; | 2057 | vma->bind_vma = ggtt_bind_vma; |
2058 | } else { | 2058 | } else { |
2059 | vma->unbind_vma = ppgtt_unbind_vma; | 2059 | vma->unbind_vma = ppgtt_unbind_vma; |
2060 | vma->bind_vma = ppgtt_bind_vma; | 2060 | vma->bind_vma = ppgtt_bind_vma; |
2061 | } | 2061 | } |
2062 | break; | 2062 | break; |
2063 | case 5: | 2063 | case 5: |
2064 | case 4: | 2064 | case 4: |
2065 | case 3: | 2065 | case 3: |
2066 | case 2: | 2066 | case 2: |
2067 | BUG_ON(!i915_is_ggtt(vm)); | 2067 | BUG_ON(!i915_is_ggtt(vm)); |
2068 | vma->unbind_vma = i915_ggtt_unbind_vma; | 2068 | vma->unbind_vma = i915_ggtt_unbind_vma; |
2069 | vma->bind_vma = i915_ggtt_bind_vma; | 2069 | vma->bind_vma = i915_ggtt_bind_vma; |
2070 | break; | 2070 | break; |
2071 | default: | 2071 | default: |
2072 | BUG(); | 2072 | BUG(); |
2073 | } | 2073 | } |
2074 | 2074 | ||
2075 | /* Keep GGTT vmas first to make debug easier */ | 2075 | /* Keep GGTT vmas first to make debug easier */ |
2076 | if (i915_is_ggtt(vm)) | 2076 | if (i915_is_ggtt(vm)) |
2077 | list_add(&vma->vma_link, &obj->vma_list); | 2077 | list_add(&vma->vma_link, &obj->vma_list); |
2078 | else | 2078 | else |
2079 | list_add_tail(&vma->vma_link, &obj->vma_list); | 2079 | list_add_tail(&vma->vma_link, &obj->vma_list); |
2080 | 2080 | ||
2081 | return vma; | 2081 | return vma; |
2082 | } | 2082 | } |
2083 | 2083 | ||
2084 | struct i915_vma * | 2084 | struct i915_vma * |
2085 | i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, | 2085 | i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, |
2086 | struct i915_address_space *vm) | 2086 | struct i915_address_space *vm) |
2087 | { | 2087 | { |
2088 | struct i915_vma *vma; | 2088 | struct i915_vma *vma; |
2089 | 2089 | ||
2090 | vma = i915_gem_obj_to_vma(obj, vm); | 2090 | vma = i915_gem_obj_to_vma(obj, vm); |
2091 | if (!vma) | 2091 | if (!vma) |
2092 | vma = __i915_gem_vma_create(obj, vm); | 2092 | vma = __i915_gem_vma_create(obj, vm); |
2093 | 2093 | ||
2094 | return vma; | 2094 | return vma; |
2095 | } | 2095 | } |
2096 | 2096 |
drivers/gpu/drm/i915/i915_irq.c
1 | /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- | 1 | /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- |
2 | */ | 2 | */ |
3 | /* | 3 | /* |
4 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. | 4 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. |
5 | * All Rights Reserved. | 5 | * All Rights Reserved. |
6 | * | 6 | * |
7 | * Permission is hereby granted, free of charge, to any person obtaining a | 7 | * Permission is hereby granted, free of charge, to any person obtaining a |
8 | * copy of this software and associated documentation files (the | 8 | * copy of this software and associated documentation files (the |
9 | * "Software"), to deal in the Software without restriction, including | 9 | * "Software"), to deal in the Software without restriction, including |
10 | * without limitation the rights to use, copy, modify, merge, publish, | 10 | * without limitation the rights to use, copy, modify, merge, publish, |
11 | * distribute, sub license, and/or sell copies of the Software, and to | 11 | * distribute, sub license, and/or sell copies of the Software, and to |
12 | * permit persons to whom the Software is furnished to do so, subject to | 12 | * permit persons to whom the Software is furnished to do so, subject to |
13 | * the following conditions: | 13 | * the following conditions: |
14 | * | 14 | * |
15 | * The above copyright notice and this permission notice (including the | 15 | * The above copyright notice and this permission notice (including the |
16 | * next paragraph) shall be included in all copies or substantial portions | 16 | * next paragraph) shall be included in all copies or substantial portions |
17 | * of the Software. | 17 | * of the Software. |
18 | * | 18 | * |
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | 19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
20 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 20 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
21 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. | 21 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. |
22 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR | 22 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR |
23 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, | 23 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, |
24 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE | 24 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE |
25 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | 25 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
26 | * | 26 | * |
27 | */ | 27 | */ |
28 | 28 | ||
29 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 29 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
30 | 30 | ||
31 | #include <linux/sysrq.h> | 31 | #include <linux/sysrq.h> |
32 | #include <linux/slab.h> | 32 | #include <linux/slab.h> |
33 | #include <linux/circ_buf.h> | 33 | #include <linux/circ_buf.h> |
34 | #include <drm/drmP.h> | 34 | #include <drm/drmP.h> |
35 | #include <drm/i915_drm.h> | 35 | #include <drm/i915_drm.h> |
36 | #include "i915_drv.h" | 36 | #include "i915_drv.h" |
37 | #include "i915_trace.h" | 37 | #include "i915_trace.h" |
38 | #include "intel_drv.h" | 38 | #include "intel_drv.h" |
39 | 39 | ||
40 | static const u32 hpd_ibx[] = { | 40 | static const u32 hpd_ibx[] = { |
41 | [HPD_CRT] = SDE_CRT_HOTPLUG, | 41 | [HPD_CRT] = SDE_CRT_HOTPLUG, |
42 | [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, | 42 | [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, |
43 | [HPD_PORT_B] = SDE_PORTB_HOTPLUG, | 43 | [HPD_PORT_B] = SDE_PORTB_HOTPLUG, |
44 | [HPD_PORT_C] = SDE_PORTC_HOTPLUG, | 44 | [HPD_PORT_C] = SDE_PORTC_HOTPLUG, |
45 | [HPD_PORT_D] = SDE_PORTD_HOTPLUG | 45 | [HPD_PORT_D] = SDE_PORTD_HOTPLUG |
46 | }; | 46 | }; |
47 | 47 | ||
48 | static const u32 hpd_cpt[] = { | 48 | static const u32 hpd_cpt[] = { |
49 | [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, | 49 | [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, |
50 | [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, | 50 | [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, |
51 | [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, | 51 | [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, |
52 | [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, | 52 | [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, |
53 | [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT | 53 | [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT |
54 | }; | 54 | }; |
55 | 55 | ||
56 | static const u32 hpd_mask_i915[] = { | 56 | static const u32 hpd_mask_i915[] = { |
57 | [HPD_CRT] = CRT_HOTPLUG_INT_EN, | 57 | [HPD_CRT] = CRT_HOTPLUG_INT_EN, |
58 | [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, | 58 | [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, |
59 | [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, | 59 | [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, |
60 | [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, | 60 | [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, |
61 | [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, | 61 | [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, |
62 | [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN | 62 | [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN |
63 | }; | 63 | }; |
64 | 64 | ||
65 | static const u32 hpd_status_g4x[] = { | 65 | static const u32 hpd_status_g4x[] = { |
66 | [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, | 66 | [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, |
67 | [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, | 67 | [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, |
68 | [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, | 68 | [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, |
69 | [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, | 69 | [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, |
70 | [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, | 70 | [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, |
71 | [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS | 71 | [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS |
72 | }; | 72 | }; |
73 | 73 | ||
74 | static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ | 74 | static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ |
75 | [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, | 75 | [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, |
76 | [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, | 76 | [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, |
77 | [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, | 77 | [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, |
78 | [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, | 78 | [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, |
79 | [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, | 79 | [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, |
80 | [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS | 80 | [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS |
81 | }; | 81 | }; |
82 | 82 | ||
83 | /* For display hotplug interrupt */ | 83 | /* For display hotplug interrupt */ |
84 | static void | 84 | static void |
85 | ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask) | 85 | ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask) |
86 | { | 86 | { |
87 | assert_spin_locked(&dev_priv->irq_lock); | 87 | assert_spin_locked(&dev_priv->irq_lock); |
88 | 88 | ||
89 | if (dev_priv->pm.irqs_disabled) { | 89 | if (dev_priv->pm.irqs_disabled) { |
90 | WARN(1, "IRQs disabled\n"); | 90 | WARN(1, "IRQs disabled\n"); |
91 | dev_priv->pm.regsave.deimr &= ~mask; | 91 | dev_priv->pm.regsave.deimr &= ~mask; |
92 | return; | 92 | return; |
93 | } | 93 | } |
94 | 94 | ||
95 | if ((dev_priv->irq_mask & mask) != 0) { | 95 | if ((dev_priv->irq_mask & mask) != 0) { |
96 | dev_priv->irq_mask &= ~mask; | 96 | dev_priv->irq_mask &= ~mask; |
97 | I915_WRITE(DEIMR, dev_priv->irq_mask); | 97 | I915_WRITE(DEIMR, dev_priv->irq_mask); |
98 | POSTING_READ(DEIMR); | 98 | POSTING_READ(DEIMR); |
99 | } | 99 | } |
100 | } | 100 | } |
101 | 101 | ||
102 | static void | 102 | static void |
103 | ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask) | 103 | ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask) |
104 | { | 104 | { |
105 | assert_spin_locked(&dev_priv->irq_lock); | 105 | assert_spin_locked(&dev_priv->irq_lock); |
106 | 106 | ||
107 | if (dev_priv->pm.irqs_disabled) { | 107 | if (dev_priv->pm.irqs_disabled) { |
108 | WARN(1, "IRQs disabled\n"); | 108 | WARN(1, "IRQs disabled\n"); |
109 | dev_priv->pm.regsave.deimr |= mask; | 109 | dev_priv->pm.regsave.deimr |= mask; |
110 | return; | 110 | return; |
111 | } | 111 | } |
112 | 112 | ||
113 | if ((dev_priv->irq_mask & mask) != mask) { | 113 | if ((dev_priv->irq_mask & mask) != mask) { |
114 | dev_priv->irq_mask |= mask; | 114 | dev_priv->irq_mask |= mask; |
115 | I915_WRITE(DEIMR, dev_priv->irq_mask); | 115 | I915_WRITE(DEIMR, dev_priv->irq_mask); |
116 | POSTING_READ(DEIMR); | 116 | POSTING_READ(DEIMR); |
117 | } | 117 | } |
118 | } | 118 | } |
119 | 119 | ||
120 | /** | 120 | /** |
121 | * ilk_update_gt_irq - update GTIMR | 121 | * ilk_update_gt_irq - update GTIMR |
122 | * @dev_priv: driver private | 122 | * @dev_priv: driver private |
123 | * @interrupt_mask: mask of interrupt bits to update | 123 | * @interrupt_mask: mask of interrupt bits to update |
124 | * @enabled_irq_mask: mask of interrupt bits to enable | 124 | * @enabled_irq_mask: mask of interrupt bits to enable |
125 | */ | 125 | */ |
126 | static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, | 126 | static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, |
127 | uint32_t interrupt_mask, | 127 | uint32_t interrupt_mask, |
128 | uint32_t enabled_irq_mask) | 128 | uint32_t enabled_irq_mask) |
129 | { | 129 | { |
130 | assert_spin_locked(&dev_priv->irq_lock); | 130 | assert_spin_locked(&dev_priv->irq_lock); |
131 | 131 | ||
132 | if (dev_priv->pm.irqs_disabled) { | 132 | if (dev_priv->pm.irqs_disabled) { |
133 | WARN(1, "IRQs disabled\n"); | 133 | WARN(1, "IRQs disabled\n"); |
134 | dev_priv->pm.regsave.gtimr &= ~interrupt_mask; | 134 | dev_priv->pm.regsave.gtimr &= ~interrupt_mask; |
135 | dev_priv->pm.regsave.gtimr |= (~enabled_irq_mask & | 135 | dev_priv->pm.regsave.gtimr |= (~enabled_irq_mask & |
136 | interrupt_mask); | 136 | interrupt_mask); |
137 | return; | 137 | return; |
138 | } | 138 | } |
139 | 139 | ||
140 | dev_priv->gt_irq_mask &= ~interrupt_mask; | 140 | dev_priv->gt_irq_mask &= ~interrupt_mask; |
141 | dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); | 141 | dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); |
142 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | 142 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
143 | POSTING_READ(GTIMR); | 143 | POSTING_READ(GTIMR); |
144 | } | 144 | } |
145 | 145 | ||
146 | void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) | 146 | void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) |
147 | { | 147 | { |
148 | ilk_update_gt_irq(dev_priv, mask, mask); | 148 | ilk_update_gt_irq(dev_priv, mask, mask); |
149 | } | 149 | } |
150 | 150 | ||
151 | void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) | 151 | void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) |
152 | { | 152 | { |
153 | ilk_update_gt_irq(dev_priv, mask, 0); | 153 | ilk_update_gt_irq(dev_priv, mask, 0); |
154 | } | 154 | } |
155 | 155 | ||
156 | /** | 156 | /** |
157 | * snb_update_pm_irq - update GEN6_PMIMR | 157 | * snb_update_pm_irq - update GEN6_PMIMR |
158 | * @dev_priv: driver private | 158 | * @dev_priv: driver private |
159 | * @interrupt_mask: mask of interrupt bits to update | 159 | * @interrupt_mask: mask of interrupt bits to update |
160 | * @enabled_irq_mask: mask of interrupt bits to enable | 160 | * @enabled_irq_mask: mask of interrupt bits to enable |
161 | */ | 161 | */ |
162 | static void snb_update_pm_irq(struct drm_i915_private *dev_priv, | 162 | static void snb_update_pm_irq(struct drm_i915_private *dev_priv, |
163 | uint32_t interrupt_mask, | 163 | uint32_t interrupt_mask, |
164 | uint32_t enabled_irq_mask) | 164 | uint32_t enabled_irq_mask) |
165 | { | 165 | { |
166 | uint32_t new_val; | 166 | uint32_t new_val; |
167 | 167 | ||
168 | assert_spin_locked(&dev_priv->irq_lock); | 168 | assert_spin_locked(&dev_priv->irq_lock); |
169 | 169 | ||
170 | if (dev_priv->pm.irqs_disabled) { | 170 | if (dev_priv->pm.irqs_disabled) { |
171 | WARN(1, "IRQs disabled\n"); | 171 | WARN(1, "IRQs disabled\n"); |
172 | dev_priv->pm.regsave.gen6_pmimr &= ~interrupt_mask; | 172 | dev_priv->pm.regsave.gen6_pmimr &= ~interrupt_mask; |
173 | dev_priv->pm.regsave.gen6_pmimr |= (~enabled_irq_mask & | 173 | dev_priv->pm.regsave.gen6_pmimr |= (~enabled_irq_mask & |
174 | interrupt_mask); | 174 | interrupt_mask); |
175 | return; | 175 | return; |
176 | } | 176 | } |
177 | 177 | ||
178 | new_val = dev_priv->pm_irq_mask; | 178 | new_val = dev_priv->pm_irq_mask; |
179 | new_val &= ~interrupt_mask; | 179 | new_val &= ~interrupt_mask; |
180 | new_val |= (~enabled_irq_mask & interrupt_mask); | 180 | new_val |= (~enabled_irq_mask & interrupt_mask); |
181 | 181 | ||
182 | if (new_val != dev_priv->pm_irq_mask) { | 182 | if (new_val != dev_priv->pm_irq_mask) { |
183 | dev_priv->pm_irq_mask = new_val; | 183 | dev_priv->pm_irq_mask = new_val; |
184 | I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); | 184 | I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); |
185 | POSTING_READ(GEN6_PMIMR); | 185 | POSTING_READ(GEN6_PMIMR); |
186 | } | 186 | } |
187 | } | 187 | } |
188 | 188 | ||
189 | void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) | 189 | void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) |
190 | { | 190 | { |
191 | snb_update_pm_irq(dev_priv, mask, mask); | 191 | snb_update_pm_irq(dev_priv, mask, mask); |
192 | } | 192 | } |
193 | 193 | ||
194 | void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) | 194 | void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) |
195 | { | 195 | { |
196 | snb_update_pm_irq(dev_priv, mask, 0); | 196 | snb_update_pm_irq(dev_priv, mask, 0); |
197 | } | 197 | } |
198 | 198 | ||
199 | static bool ivb_can_enable_err_int(struct drm_device *dev) | 199 | static bool ivb_can_enable_err_int(struct drm_device *dev) |
200 | { | 200 | { |
201 | struct drm_i915_private *dev_priv = dev->dev_private; | 201 | struct drm_i915_private *dev_priv = dev->dev_private; |
202 | struct intel_crtc *crtc; | 202 | struct intel_crtc *crtc; |
203 | enum pipe pipe; | 203 | enum pipe pipe; |
204 | 204 | ||
205 | assert_spin_locked(&dev_priv->irq_lock); | 205 | assert_spin_locked(&dev_priv->irq_lock); |
206 | 206 | ||
207 | for_each_pipe(pipe) { | 207 | for_each_pipe(pipe) { |
208 | crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); | 208 | crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); |
209 | 209 | ||
210 | if (crtc->cpu_fifo_underrun_disabled) | 210 | if (crtc->cpu_fifo_underrun_disabled) |
211 | return false; | 211 | return false; |
212 | } | 212 | } |
213 | 213 | ||
214 | return true; | 214 | return true; |
215 | } | 215 | } |
216 | 216 | ||
217 | static bool cpt_can_enable_serr_int(struct drm_device *dev) | 217 | static bool cpt_can_enable_serr_int(struct drm_device *dev) |
218 | { | 218 | { |
219 | struct drm_i915_private *dev_priv = dev->dev_private; | 219 | struct drm_i915_private *dev_priv = dev->dev_private; |
220 | enum pipe pipe; | 220 | enum pipe pipe; |
221 | struct intel_crtc *crtc; | 221 | struct intel_crtc *crtc; |
222 | 222 | ||
223 | assert_spin_locked(&dev_priv->irq_lock); | 223 | assert_spin_locked(&dev_priv->irq_lock); |
224 | 224 | ||
225 | for_each_pipe(pipe) { | 225 | for_each_pipe(pipe) { |
226 | crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); | 226 | crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); |
227 | 227 | ||
228 | if (crtc->pch_fifo_underrun_disabled) | 228 | if (crtc->pch_fifo_underrun_disabled) |
229 | return false; | 229 | return false; |
230 | } | 230 | } |
231 | 231 | ||
232 | return true; | 232 | return true; |
233 | } | 233 | } |
234 | 234 | ||
235 | static void i9xx_clear_fifo_underrun(struct drm_device *dev, enum pipe pipe) | 235 | static void i9xx_clear_fifo_underrun(struct drm_device *dev, enum pipe pipe) |
236 | { | 236 | { |
237 | struct drm_i915_private *dev_priv = dev->dev_private; | 237 | struct drm_i915_private *dev_priv = dev->dev_private; |
238 | u32 reg = PIPESTAT(pipe); | 238 | u32 reg = PIPESTAT(pipe); |
239 | u32 pipestat = I915_READ(reg) & 0x7fff0000; | 239 | u32 pipestat = I915_READ(reg) & 0x7fff0000; |
240 | 240 | ||
241 | assert_spin_locked(&dev_priv->irq_lock); | 241 | assert_spin_locked(&dev_priv->irq_lock); |
242 | 242 | ||
243 | I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS); | 243 | I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS); |
244 | POSTING_READ(reg); | 244 | POSTING_READ(reg); |
245 | } | 245 | } |
246 | 246 | ||
247 | static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, | 247 | static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, |
248 | enum pipe pipe, bool enable) | 248 | enum pipe pipe, bool enable) |
249 | { | 249 | { |
250 | struct drm_i915_private *dev_priv = dev->dev_private; | 250 | struct drm_i915_private *dev_priv = dev->dev_private; |
251 | uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : | 251 | uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : |
252 | DE_PIPEB_FIFO_UNDERRUN; | 252 | DE_PIPEB_FIFO_UNDERRUN; |
253 | 253 | ||
254 | if (enable) | 254 | if (enable) |
255 | ironlake_enable_display_irq(dev_priv, bit); | 255 | ironlake_enable_display_irq(dev_priv, bit); |
256 | else | 256 | else |
257 | ironlake_disable_display_irq(dev_priv, bit); | 257 | ironlake_disable_display_irq(dev_priv, bit); |
258 | } | 258 | } |
259 | 259 | ||
260 | static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, | 260 | static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, |
261 | enum pipe pipe, bool enable) | 261 | enum pipe pipe, bool enable) |
262 | { | 262 | { |
263 | struct drm_i915_private *dev_priv = dev->dev_private; | 263 | struct drm_i915_private *dev_priv = dev->dev_private; |
264 | if (enable) { | 264 | if (enable) { |
265 | I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); | 265 | I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); |
266 | 266 | ||
267 | if (!ivb_can_enable_err_int(dev)) | 267 | if (!ivb_can_enable_err_int(dev)) |
268 | return; | 268 | return; |
269 | 269 | ||
270 | ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); | 270 | ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); |
271 | } else { | 271 | } else { |
272 | bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB); | 272 | bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB); |
273 | 273 | ||
274 | /* Change the state _after_ we've read out the current one. */ | 274 | /* Change the state _after_ we've read out the current one. */ |
275 | ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); | 275 | ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); |
276 | 276 | ||
277 | if (!was_enabled && | 277 | if (!was_enabled && |
278 | (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) { | 278 | (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) { |
279 | DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n", | 279 | DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n", |
280 | pipe_name(pipe)); | 280 | pipe_name(pipe)); |
281 | } | 281 | } |
282 | } | 282 | } |
283 | } | 283 | } |
284 | 284 | ||
285 | static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev, | 285 | static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev, |
286 | enum pipe pipe, bool enable) | 286 | enum pipe pipe, bool enable) |
287 | { | 287 | { |
288 | struct drm_i915_private *dev_priv = dev->dev_private; | 288 | struct drm_i915_private *dev_priv = dev->dev_private; |
289 | 289 | ||
290 | assert_spin_locked(&dev_priv->irq_lock); | 290 | assert_spin_locked(&dev_priv->irq_lock); |
291 | 291 | ||
292 | if (enable) | 292 | if (enable) |
293 | dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN; | 293 | dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN; |
294 | else | 294 | else |
295 | dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN; | 295 | dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN; |
296 | I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); | 296 | I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); |
297 | POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); | 297 | POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); |
298 | } | 298 | } |
299 | 299 | ||
300 | /** | 300 | /** |
301 | * ibx_display_interrupt_update - update SDEIMR | 301 | * ibx_display_interrupt_update - update SDEIMR |
302 | * @dev_priv: driver private | 302 | * @dev_priv: driver private |
303 | * @interrupt_mask: mask of interrupt bits to update | 303 | * @interrupt_mask: mask of interrupt bits to update |
304 | * @enabled_irq_mask: mask of interrupt bits to enable | 304 | * @enabled_irq_mask: mask of interrupt bits to enable |
305 | */ | 305 | */ |
306 | static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, | 306 | static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, |
307 | uint32_t interrupt_mask, | 307 | uint32_t interrupt_mask, |
308 | uint32_t enabled_irq_mask) | 308 | uint32_t enabled_irq_mask) |
309 | { | 309 | { |
310 | uint32_t sdeimr = I915_READ(SDEIMR); | 310 | uint32_t sdeimr = I915_READ(SDEIMR); |
311 | sdeimr &= ~interrupt_mask; | 311 | sdeimr &= ~interrupt_mask; |
312 | sdeimr |= (~enabled_irq_mask & interrupt_mask); | 312 | sdeimr |= (~enabled_irq_mask & interrupt_mask); |
313 | 313 | ||
314 | assert_spin_locked(&dev_priv->irq_lock); | 314 | assert_spin_locked(&dev_priv->irq_lock); |
315 | 315 | ||
316 | if (dev_priv->pm.irqs_disabled && | 316 | if (dev_priv->pm.irqs_disabled && |
317 | (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) { | 317 | (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) { |
318 | WARN(1, "IRQs disabled\n"); | 318 | WARN(1, "IRQs disabled\n"); |
319 | dev_priv->pm.regsave.sdeimr &= ~interrupt_mask; | 319 | dev_priv->pm.regsave.sdeimr &= ~interrupt_mask; |
320 | dev_priv->pm.regsave.sdeimr |= (~enabled_irq_mask & | 320 | dev_priv->pm.regsave.sdeimr |= (~enabled_irq_mask & |
321 | interrupt_mask); | 321 | interrupt_mask); |
322 | return; | 322 | return; |
323 | } | 323 | } |
324 | 324 | ||
325 | I915_WRITE(SDEIMR, sdeimr); | 325 | I915_WRITE(SDEIMR, sdeimr); |
326 | POSTING_READ(SDEIMR); | 326 | POSTING_READ(SDEIMR); |
327 | } | 327 | } |
328 | #define ibx_enable_display_interrupt(dev_priv, bits) \ | 328 | #define ibx_enable_display_interrupt(dev_priv, bits) \ |
329 | ibx_display_interrupt_update((dev_priv), (bits), (bits)) | 329 | ibx_display_interrupt_update((dev_priv), (bits), (bits)) |
330 | #define ibx_disable_display_interrupt(dev_priv, bits) \ | 330 | #define ibx_disable_display_interrupt(dev_priv, bits) \ |
331 | ibx_display_interrupt_update((dev_priv), (bits), 0) | 331 | ibx_display_interrupt_update((dev_priv), (bits), 0) |
332 | 332 | ||
333 | static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, | 333 | static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, |
334 | enum transcoder pch_transcoder, | 334 | enum transcoder pch_transcoder, |
335 | bool enable) | 335 | bool enable) |
336 | { | 336 | { |
337 | struct drm_i915_private *dev_priv = dev->dev_private; | 337 | struct drm_i915_private *dev_priv = dev->dev_private; |
338 | uint32_t bit = (pch_transcoder == TRANSCODER_A) ? | 338 | uint32_t bit = (pch_transcoder == TRANSCODER_A) ? |
339 | SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; | 339 | SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; |
340 | 340 | ||
341 | if (enable) | 341 | if (enable) |
342 | ibx_enable_display_interrupt(dev_priv, bit); | 342 | ibx_enable_display_interrupt(dev_priv, bit); |
343 | else | 343 | else |
344 | ibx_disable_display_interrupt(dev_priv, bit); | 344 | ibx_disable_display_interrupt(dev_priv, bit); |
345 | } | 345 | } |
346 | 346 | ||
347 | static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, | 347 | static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, |
348 | enum transcoder pch_transcoder, | 348 | enum transcoder pch_transcoder, |
349 | bool enable) | 349 | bool enable) |
350 | { | 350 | { |
351 | struct drm_i915_private *dev_priv = dev->dev_private; | 351 | struct drm_i915_private *dev_priv = dev->dev_private; |
352 | 352 | ||
353 | if (enable) { | 353 | if (enable) { |
354 | I915_WRITE(SERR_INT, | 354 | I915_WRITE(SERR_INT, |
355 | SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); | 355 | SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); |
356 | 356 | ||
357 | if (!cpt_can_enable_serr_int(dev)) | 357 | if (!cpt_can_enable_serr_int(dev)) |
358 | return; | 358 | return; |
359 | 359 | ||
360 | ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT); | 360 | ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT); |
361 | } else { | 361 | } else { |
362 | uint32_t tmp = I915_READ(SERR_INT); | 362 | uint32_t tmp = I915_READ(SERR_INT); |
363 | bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT); | 363 | bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT); |
364 | 364 | ||
365 | /* Change the state _after_ we've read out the current one. */ | 365 | /* Change the state _after_ we've read out the current one. */ |
366 | ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT); | 366 | ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT); |
367 | 367 | ||
368 | if (!was_enabled && | 368 | if (!was_enabled && |
369 | (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) { | 369 | (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) { |
370 | DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n", | 370 | DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n", |
371 | transcoder_name(pch_transcoder)); | 371 | transcoder_name(pch_transcoder)); |
372 | } | 372 | } |
373 | } | 373 | } |
374 | } | 374 | } |
375 | 375 | ||
376 | /** | 376 | /** |
377 | * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages | 377 | * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages |
378 | * @dev: drm device | 378 | * @dev: drm device |
379 | * @pipe: pipe | 379 | * @pipe: pipe |
380 | * @enable: true if we want to report FIFO underrun errors, false otherwise | 380 | * @enable: true if we want to report FIFO underrun errors, false otherwise |
381 | * | 381 | * |
382 | * This function makes us disable or enable CPU fifo underruns for a specific | 382 | * This function makes us disable or enable CPU fifo underruns for a specific |
383 | * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun | 383 | * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun |
384 | * reporting for one pipe may also disable all the other CPU error interruts for | 384 | * reporting for one pipe may also disable all the other CPU error interruts for |
385 | * the other pipes, due to the fact that there's just one interrupt mask/enable | 385 | * the other pipes, due to the fact that there's just one interrupt mask/enable |
386 | * bit for all the pipes. | 386 | * bit for all the pipes. |
387 | * | 387 | * |
388 | * Returns the previous state of underrun reporting. | 388 | * Returns the previous state of underrun reporting. |
389 | */ | 389 | */ |
390 | bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, | 390 | bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, |
391 | enum pipe pipe, bool enable) | 391 | enum pipe pipe, bool enable) |
392 | { | 392 | { |
393 | struct drm_i915_private *dev_priv = dev->dev_private; | 393 | struct drm_i915_private *dev_priv = dev->dev_private; |
394 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | 394 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; |
395 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 395 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
396 | bool ret; | 396 | bool ret; |
397 | 397 | ||
398 | assert_spin_locked(&dev_priv->irq_lock); | 398 | assert_spin_locked(&dev_priv->irq_lock); |
399 | 399 | ||
400 | ret = !intel_crtc->cpu_fifo_underrun_disabled; | 400 | ret = !intel_crtc->cpu_fifo_underrun_disabled; |
401 | 401 | ||
402 | if (enable == ret) | 402 | if (enable == ret) |
403 | goto done; | 403 | goto done; |
404 | 404 | ||
405 | intel_crtc->cpu_fifo_underrun_disabled = !enable; | 405 | intel_crtc->cpu_fifo_underrun_disabled = !enable; |
406 | 406 | ||
407 | if (enable && (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev))) | 407 | if (enable && (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev))) |
408 | i9xx_clear_fifo_underrun(dev, pipe); | 408 | i9xx_clear_fifo_underrun(dev, pipe); |
409 | else if (IS_GEN5(dev) || IS_GEN6(dev)) | 409 | else if (IS_GEN5(dev) || IS_GEN6(dev)) |
410 | ironlake_set_fifo_underrun_reporting(dev, pipe, enable); | 410 | ironlake_set_fifo_underrun_reporting(dev, pipe, enable); |
411 | else if (IS_GEN7(dev)) | 411 | else if (IS_GEN7(dev)) |
412 | ivybridge_set_fifo_underrun_reporting(dev, pipe, enable); | 412 | ivybridge_set_fifo_underrun_reporting(dev, pipe, enable); |
413 | else if (IS_GEN8(dev)) | 413 | else if (IS_GEN8(dev)) |
414 | broadwell_set_fifo_underrun_reporting(dev, pipe, enable); | 414 | broadwell_set_fifo_underrun_reporting(dev, pipe, enable); |
415 | 415 | ||
416 | done: | 416 | done: |
417 | return ret; | 417 | return ret; |
418 | } | 418 | } |
419 | 419 | ||
420 | bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, | 420 | bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, |
421 | enum pipe pipe, bool enable) | 421 | enum pipe pipe, bool enable) |
422 | { | 422 | { |
423 | struct drm_i915_private *dev_priv = dev->dev_private; | 423 | struct drm_i915_private *dev_priv = dev->dev_private; |
424 | unsigned long flags; | 424 | unsigned long flags; |
425 | bool ret; | 425 | bool ret; |
426 | 426 | ||
427 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 427 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
428 | ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable); | 428 | ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable); |
429 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | 429 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
430 | 430 | ||
431 | return ret; | 431 | return ret; |
432 | } | 432 | } |
433 | 433 | ||
434 | static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device *dev, | 434 | static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device *dev, |
435 | enum pipe pipe) | 435 | enum pipe pipe) |
436 | { | 436 | { |
437 | struct drm_i915_private *dev_priv = dev->dev_private; | 437 | struct drm_i915_private *dev_priv = dev->dev_private; |
438 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | 438 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; |
439 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 439 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
440 | 440 | ||
441 | return !intel_crtc->cpu_fifo_underrun_disabled; | 441 | return !intel_crtc->cpu_fifo_underrun_disabled; |
442 | } | 442 | } |
443 | 443 | ||
444 | /** | 444 | /** |
445 | * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages | 445 | * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages |
446 | * @dev: drm device | 446 | * @dev: drm device |
447 | * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older) | 447 | * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older) |
448 | * @enable: true if we want to report FIFO underrun errors, false otherwise | 448 | * @enable: true if we want to report FIFO underrun errors, false otherwise |
449 | * | 449 | * |
450 | * This function makes us disable or enable PCH fifo underruns for a specific | 450 | * This function makes us disable or enable PCH fifo underruns for a specific |
451 | * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO | 451 | * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO |
452 | * underrun reporting for one transcoder may also disable all the other PCH | 452 | * underrun reporting for one transcoder may also disable all the other PCH |
453 | * error interruts for the other transcoders, due to the fact that there's just | 453 | * error interruts for the other transcoders, due to the fact that there's just |
454 | * one interrupt mask/enable bit for all the transcoders. | 454 | * one interrupt mask/enable bit for all the transcoders. |
455 | * | 455 | * |
456 | * Returns the previous state of underrun reporting. | 456 | * Returns the previous state of underrun reporting. |
457 | */ | 457 | */ |
458 | bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, | 458 | bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, |
459 | enum transcoder pch_transcoder, | 459 | enum transcoder pch_transcoder, |
460 | bool enable) | 460 | bool enable) |
461 | { | 461 | { |
462 | struct drm_i915_private *dev_priv = dev->dev_private; | 462 | struct drm_i915_private *dev_priv = dev->dev_private; |
463 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder]; | 463 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder]; |
464 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 464 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
465 | unsigned long flags; | 465 | unsigned long flags; |
466 | bool ret; | 466 | bool ret; |
467 | 467 | ||
468 | /* | 468 | /* |
469 | * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT | 469 | * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT |
470 | * has only one pch transcoder A that all pipes can use. To avoid racy | 470 | * has only one pch transcoder A that all pipes can use. To avoid racy |
471 | * pch transcoder -> pipe lookups from interrupt code simply store the | 471 | * pch transcoder -> pipe lookups from interrupt code simply store the |
472 | * underrun statistics in crtc A. Since we never expose this anywhere | 472 | * underrun statistics in crtc A. Since we never expose this anywhere |
473 | * nor use it outside of the fifo underrun code here using the "wrong" | 473 | * nor use it outside of the fifo underrun code here using the "wrong" |
474 | * crtc on LPT won't cause issues. | 474 | * crtc on LPT won't cause issues. |
475 | */ | 475 | */ |
476 | 476 | ||
477 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 477 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
478 | 478 | ||
479 | ret = !intel_crtc->pch_fifo_underrun_disabled; | 479 | ret = !intel_crtc->pch_fifo_underrun_disabled; |
480 | 480 | ||
481 | if (enable == ret) | 481 | if (enable == ret) |
482 | goto done; | 482 | goto done; |
483 | 483 | ||
484 | intel_crtc->pch_fifo_underrun_disabled = !enable; | 484 | intel_crtc->pch_fifo_underrun_disabled = !enable; |
485 | 485 | ||
486 | if (HAS_PCH_IBX(dev)) | 486 | if (HAS_PCH_IBX(dev)) |
487 | ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable); | 487 | ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable); |
488 | else | 488 | else |
489 | cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable); | 489 | cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable); |
490 | 490 | ||
491 | done: | 491 | done: |
492 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | 492 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
493 | return ret; | 493 | return ret; |
494 | } | 494 | } |
495 | 495 | ||
496 | 496 | ||
497 | static void | 497 | static void |
498 | __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, | 498 | __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, |
499 | u32 enable_mask, u32 status_mask) | 499 | u32 enable_mask, u32 status_mask) |
500 | { | 500 | { |
501 | u32 reg = PIPESTAT(pipe); | 501 | u32 reg = PIPESTAT(pipe); |
502 | u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; | 502 | u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; |
503 | 503 | ||
504 | assert_spin_locked(&dev_priv->irq_lock); | 504 | assert_spin_locked(&dev_priv->irq_lock); |
505 | 505 | ||
506 | if (WARN_ON_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || | 506 | if (WARN_ON_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || |
507 | status_mask & ~PIPESTAT_INT_STATUS_MASK)) | 507 | status_mask & ~PIPESTAT_INT_STATUS_MASK)) |
508 | return; | 508 | return; |
509 | 509 | ||
510 | if ((pipestat & enable_mask) == enable_mask) | 510 | if ((pipestat & enable_mask) == enable_mask) |
511 | return; | 511 | return; |
512 | 512 | ||
513 | dev_priv->pipestat_irq_mask[pipe] |= status_mask; | 513 | dev_priv->pipestat_irq_mask[pipe] |= status_mask; |
514 | 514 | ||
515 | /* Enable the interrupt, clear any pending status */ | 515 | /* Enable the interrupt, clear any pending status */ |
516 | pipestat |= enable_mask | status_mask; | 516 | pipestat |= enable_mask | status_mask; |
517 | I915_WRITE(reg, pipestat); | 517 | I915_WRITE(reg, pipestat); |
518 | POSTING_READ(reg); | 518 | POSTING_READ(reg); |
519 | } | 519 | } |
520 | 520 | ||
521 | static void | 521 | static void |
522 | __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, | 522 | __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, |
523 | u32 enable_mask, u32 status_mask) | 523 | u32 enable_mask, u32 status_mask) |
524 | { | 524 | { |
525 | u32 reg = PIPESTAT(pipe); | 525 | u32 reg = PIPESTAT(pipe); |
526 | u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; | 526 | u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; |
527 | 527 | ||
528 | assert_spin_locked(&dev_priv->irq_lock); | 528 | assert_spin_locked(&dev_priv->irq_lock); |
529 | 529 | ||
530 | if (WARN_ON_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || | 530 | if (WARN_ON_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || |
531 | status_mask & ~PIPESTAT_INT_STATUS_MASK)) | 531 | status_mask & ~PIPESTAT_INT_STATUS_MASK)) |
532 | return; | 532 | return; |
533 | 533 | ||
534 | if ((pipestat & enable_mask) == 0) | 534 | if ((pipestat & enable_mask) == 0) |
535 | return; | 535 | return; |
536 | 536 | ||
537 | dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; | 537 | dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; |
538 | 538 | ||
539 | pipestat &= ~enable_mask; | 539 | pipestat &= ~enable_mask; |
540 | I915_WRITE(reg, pipestat); | 540 | I915_WRITE(reg, pipestat); |
541 | POSTING_READ(reg); | 541 | POSTING_READ(reg); |
542 | } | 542 | } |
543 | 543 | ||
544 | static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask) | 544 | static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask) |
545 | { | 545 | { |
546 | u32 enable_mask = status_mask << 16; | 546 | u32 enable_mask = status_mask << 16; |
547 | 547 | ||
548 | /* | 548 | /* |
549 | * On pipe A we don't support the PSR interrupt yet, on pipe B the | 549 | * On pipe A we don't support the PSR interrupt yet, on pipe B the |
550 | * same bit MBZ. | 550 | * same bit MBZ. |
551 | */ | 551 | */ |
552 | if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) | 552 | if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) |
553 | return 0; | 553 | return 0; |
554 | 554 | ||
555 | enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | | 555 | enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | |
556 | SPRITE0_FLIP_DONE_INT_EN_VLV | | 556 | SPRITE0_FLIP_DONE_INT_EN_VLV | |
557 | SPRITE1_FLIP_DONE_INT_EN_VLV); | 557 | SPRITE1_FLIP_DONE_INT_EN_VLV); |
558 | if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) | 558 | if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) |
559 | enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; | 559 | enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; |
560 | if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) | 560 | if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) |
561 | enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; | 561 | enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; |
562 | 562 | ||
563 | return enable_mask; | 563 | return enable_mask; |
564 | } | 564 | } |
565 | 565 | ||
566 | void | 566 | void |
567 | i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, | 567 | i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, |
568 | u32 status_mask) | 568 | u32 status_mask) |
569 | { | 569 | { |
570 | u32 enable_mask; | 570 | u32 enable_mask; |
571 | 571 | ||
572 | if (IS_VALLEYVIEW(dev_priv->dev)) | 572 | if (IS_VALLEYVIEW(dev_priv->dev)) |
573 | enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, | 573 | enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, |
574 | status_mask); | 574 | status_mask); |
575 | else | 575 | else |
576 | enable_mask = status_mask << 16; | 576 | enable_mask = status_mask << 16; |
577 | __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask); | 577 | __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask); |
578 | } | 578 | } |
579 | 579 | ||
580 | void | 580 | void |
581 | i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, | 581 | i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, |
582 | u32 status_mask) | 582 | u32 status_mask) |
583 | { | 583 | { |
584 | u32 enable_mask; | 584 | u32 enable_mask; |
585 | 585 | ||
586 | if (IS_VALLEYVIEW(dev_priv->dev)) | 586 | if (IS_VALLEYVIEW(dev_priv->dev)) |
587 | enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, | 587 | enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, |
588 | status_mask); | 588 | status_mask); |
589 | else | 589 | else |
590 | enable_mask = status_mask << 16; | 590 | enable_mask = status_mask << 16; |
591 | __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask); | 591 | __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask); |
592 | } | 592 | } |
593 | 593 | ||
594 | /** | 594 | /** |
595 | * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion | 595 | * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion |
596 | */ | 596 | */ |
597 | static void i915_enable_asle_pipestat(struct drm_device *dev) | 597 | static void i915_enable_asle_pipestat(struct drm_device *dev) |
598 | { | 598 | { |
599 | struct drm_i915_private *dev_priv = dev->dev_private; | 599 | struct drm_i915_private *dev_priv = dev->dev_private; |
600 | unsigned long irqflags; | 600 | unsigned long irqflags; |
601 | 601 | ||
602 | if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) | 602 | if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) |
603 | return; | 603 | return; |
604 | 604 | ||
605 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 605 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
606 | 606 | ||
607 | i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); | 607 | i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); |
608 | if (INTEL_INFO(dev)->gen >= 4) | 608 | if (INTEL_INFO(dev)->gen >= 4) |
609 | i915_enable_pipestat(dev_priv, PIPE_A, | 609 | i915_enable_pipestat(dev_priv, PIPE_A, |
610 | PIPE_LEGACY_BLC_EVENT_STATUS); | 610 | PIPE_LEGACY_BLC_EVENT_STATUS); |
611 | 611 | ||
612 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 612 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
613 | } | 613 | } |
614 | 614 | ||
615 | /** | 615 | /** |
616 | * i915_pipe_enabled - check if a pipe is enabled | 616 | * i915_pipe_enabled - check if a pipe is enabled |
617 | * @dev: DRM device | 617 | * @dev: DRM device |
618 | * @pipe: pipe to check | 618 | * @pipe: pipe to check |
619 | * | 619 | * |
620 | * Reading certain registers when the pipe is disabled can hang the chip. | 620 | * Reading certain registers when the pipe is disabled can hang the chip. |
621 | * Use this routine to make sure the PLL is running and the pipe is active | 621 | * Use this routine to make sure the PLL is running and the pipe is active |
622 | * before reading such registers if unsure. | 622 | * before reading such registers if unsure. |
623 | */ | 623 | */ |
624 | static int | 624 | static int |
625 | i915_pipe_enabled(struct drm_device *dev, int pipe) | 625 | i915_pipe_enabled(struct drm_device *dev, int pipe) |
626 | { | 626 | { |
627 | struct drm_i915_private *dev_priv = dev->dev_private; | 627 | struct drm_i915_private *dev_priv = dev->dev_private; |
628 | 628 | ||
629 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 629 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
630 | /* Locking is horribly broken here, but whatever. */ | 630 | /* Locking is horribly broken here, but whatever. */ |
631 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | 631 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; |
632 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 632 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
633 | 633 | ||
634 | return intel_crtc->active; | 634 | return intel_crtc->active; |
635 | } else { | 635 | } else { |
636 | return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; | 636 | return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; |
637 | } | 637 | } |
638 | } | 638 | } |
639 | 639 | ||
640 | static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe) | 640 | static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe) |
641 | { | 641 | { |
642 | /* Gen2 doesn't have a hardware frame counter */ | 642 | /* Gen2 doesn't have a hardware frame counter */ |
643 | return 0; | 643 | return 0; |
644 | } | 644 | } |
645 | 645 | ||
646 | /* Called from drm generic code, passed a 'crtc', which | 646 | /* Called from drm generic code, passed a 'crtc', which |
647 | * we use as a pipe index | 647 | * we use as a pipe index |
648 | */ | 648 | */ |
649 | static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) | 649 | static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) |
650 | { | 650 | { |
651 | struct drm_i915_private *dev_priv = dev->dev_private; | 651 | struct drm_i915_private *dev_priv = dev->dev_private; |
652 | unsigned long high_frame; | 652 | unsigned long high_frame; |
653 | unsigned long low_frame; | 653 | unsigned long low_frame; |
654 | u32 high1, high2, low, pixel, vbl_start; | 654 | u32 high1, high2, low, pixel, vbl_start; |
655 | 655 | ||
656 | if (!i915_pipe_enabled(dev, pipe)) { | 656 | if (!i915_pipe_enabled(dev, pipe)) { |
657 | DRM_DEBUG_DRIVER("trying to get vblank count for disabled " | 657 | DRM_DEBUG_DRIVER("trying to get vblank count for disabled " |
658 | "pipe %c\n", pipe_name(pipe)); | 658 | "pipe %c\n", pipe_name(pipe)); |
659 | return 0; | 659 | return 0; |
660 | } | 660 | } |
661 | 661 | ||
662 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 662 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
663 | struct intel_crtc *intel_crtc = | 663 | struct intel_crtc *intel_crtc = |
664 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); | 664 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); |
665 | const struct drm_display_mode *mode = | 665 | const struct drm_display_mode *mode = |
666 | &intel_crtc->config.adjusted_mode; | 666 | &intel_crtc->config.adjusted_mode; |
667 | 667 | ||
668 | vbl_start = mode->crtc_vblank_start * mode->crtc_htotal; | 668 | vbl_start = mode->crtc_vblank_start * mode->crtc_htotal; |
669 | } else { | 669 | } else { |
670 | enum transcoder cpu_transcoder = (enum transcoder) pipe; | 670 | enum transcoder cpu_transcoder = (enum transcoder) pipe; |
671 | u32 htotal; | 671 | u32 htotal; |
672 | 672 | ||
673 | htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1; | 673 | htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1; |
674 | vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1; | 674 | vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1; |
675 | 675 | ||
676 | vbl_start *= htotal; | 676 | vbl_start *= htotal; |
677 | } | 677 | } |
678 | 678 | ||
679 | high_frame = PIPEFRAME(pipe); | 679 | high_frame = PIPEFRAME(pipe); |
680 | low_frame = PIPEFRAMEPIXEL(pipe); | 680 | low_frame = PIPEFRAMEPIXEL(pipe); |
681 | 681 | ||
682 | /* | 682 | /* |
683 | * High & low register fields aren't synchronized, so make sure | 683 | * High & low register fields aren't synchronized, so make sure |
684 | * we get a low value that's stable across two reads of the high | 684 | * we get a low value that's stable across two reads of the high |
685 | * register. | 685 | * register. |
686 | */ | 686 | */ |
687 | do { | 687 | do { |
688 | high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; | 688 | high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; |
689 | low = I915_READ(low_frame); | 689 | low = I915_READ(low_frame); |
690 | high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; | 690 | high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; |
691 | } while (high1 != high2); | 691 | } while (high1 != high2); |
692 | 692 | ||
693 | high1 >>= PIPE_FRAME_HIGH_SHIFT; | 693 | high1 >>= PIPE_FRAME_HIGH_SHIFT; |
694 | pixel = low & PIPE_PIXEL_MASK; | 694 | pixel = low & PIPE_PIXEL_MASK; |
695 | low >>= PIPE_FRAME_LOW_SHIFT; | 695 | low >>= PIPE_FRAME_LOW_SHIFT; |
696 | 696 | ||
697 | /* | 697 | /* |
698 | * The frame counter increments at beginning of active. | 698 | * The frame counter increments at beginning of active. |
699 | * Cook up a vblank counter by also checking the pixel | 699 | * Cook up a vblank counter by also checking the pixel |
700 | * counter against vblank start. | 700 | * counter against vblank start. |
701 | */ | 701 | */ |
702 | return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; | 702 | return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; |
703 | } | 703 | } |
704 | 704 | ||
705 | static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) | 705 | static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) |
706 | { | 706 | { |
707 | struct drm_i915_private *dev_priv = dev->dev_private; | 707 | struct drm_i915_private *dev_priv = dev->dev_private; |
708 | int reg = PIPE_FRMCOUNT_GM45(pipe); | 708 | int reg = PIPE_FRMCOUNT_GM45(pipe); |
709 | 709 | ||
710 | if (!i915_pipe_enabled(dev, pipe)) { | 710 | if (!i915_pipe_enabled(dev, pipe)) { |
711 | DRM_DEBUG_DRIVER("trying to get vblank count for disabled " | 711 | DRM_DEBUG_DRIVER("trying to get vblank count for disabled " |
712 | "pipe %c\n", pipe_name(pipe)); | 712 | "pipe %c\n", pipe_name(pipe)); |
713 | return 0; | 713 | return 0; |
714 | } | 714 | } |
715 | 715 | ||
716 | return I915_READ(reg); | 716 | return I915_READ(reg); |
717 | } | 717 | } |
718 | 718 | ||
719 | /* raw reads, only for fast reads of display block, no need for forcewake etc. */ | 719 | /* raw reads, only for fast reads of display block, no need for forcewake etc. */ |
720 | #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) | 720 | #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) |
721 | 721 | ||
722 | static bool ilk_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe) | 722 | static bool ilk_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe) |
723 | { | 723 | { |
724 | struct drm_i915_private *dev_priv = dev->dev_private; | 724 | struct drm_i915_private *dev_priv = dev->dev_private; |
725 | uint32_t status; | 725 | uint32_t status; |
726 | int reg; | 726 | int reg; |
727 | 727 | ||
728 | if (INTEL_INFO(dev)->gen >= 8) { | 728 | if (INTEL_INFO(dev)->gen >= 8) { |
729 | status = GEN8_PIPE_VBLANK; | 729 | status = GEN8_PIPE_VBLANK; |
730 | reg = GEN8_DE_PIPE_ISR(pipe); | 730 | reg = GEN8_DE_PIPE_ISR(pipe); |
731 | } else if (INTEL_INFO(dev)->gen >= 7) { | 731 | } else if (INTEL_INFO(dev)->gen >= 7) { |
732 | status = DE_PIPE_VBLANK_IVB(pipe); | 732 | status = DE_PIPE_VBLANK_IVB(pipe); |
733 | reg = DEISR; | 733 | reg = DEISR; |
734 | } else { | 734 | } else { |
735 | status = DE_PIPE_VBLANK(pipe); | 735 | status = DE_PIPE_VBLANK(pipe); |
736 | reg = DEISR; | 736 | reg = DEISR; |
737 | } | 737 | } |
738 | 738 | ||
739 | return __raw_i915_read32(dev_priv, reg) & status; | 739 | return __raw_i915_read32(dev_priv, reg) & status; |
740 | } | 740 | } |
741 | 741 | ||
742 | static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, | 742 | static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, |
743 | unsigned int flags, int *vpos, int *hpos, | 743 | unsigned int flags, int *vpos, int *hpos, |
744 | ktime_t *stime, ktime_t *etime) | 744 | ktime_t *stime, ktime_t *etime) |
745 | { | 745 | { |
746 | struct drm_i915_private *dev_priv = dev->dev_private; | 746 | struct drm_i915_private *dev_priv = dev->dev_private; |
747 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | 747 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; |
748 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 748 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
749 | const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode; | 749 | const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode; |
750 | int position; | 750 | int position; |
751 | int vbl_start, vbl_end, htotal, vtotal; | 751 | int vbl_start, vbl_end, htotal, vtotal; |
752 | bool in_vbl = true; | 752 | bool in_vbl = true; |
753 | int ret = 0; | 753 | int ret = 0; |
754 | unsigned long irqflags; | 754 | unsigned long irqflags; |
755 | 755 | ||
756 | if (!intel_crtc->active) { | 756 | if (!intel_crtc->active) { |
757 | DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " | 757 | DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " |
758 | "pipe %c\n", pipe_name(pipe)); | 758 | "pipe %c\n", pipe_name(pipe)); |
759 | return 0; | 759 | return 0; |
760 | } | 760 | } |
761 | 761 | ||
762 | htotal = mode->crtc_htotal; | 762 | htotal = mode->crtc_htotal; |
763 | vtotal = mode->crtc_vtotal; | 763 | vtotal = mode->crtc_vtotal; |
764 | vbl_start = mode->crtc_vblank_start; | 764 | vbl_start = mode->crtc_vblank_start; |
765 | vbl_end = mode->crtc_vblank_end; | 765 | vbl_end = mode->crtc_vblank_end; |
766 | 766 | ||
767 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) { | 767 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) { |
768 | vbl_start = DIV_ROUND_UP(vbl_start, 2); | 768 | vbl_start = DIV_ROUND_UP(vbl_start, 2); |
769 | vbl_end /= 2; | 769 | vbl_end /= 2; |
770 | vtotal /= 2; | 770 | vtotal /= 2; |
771 | } | 771 | } |
772 | 772 | ||
773 | ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; | 773 | ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; |
774 | 774 | ||
775 | /* | 775 | /* |
776 | * Lock uncore.lock, as we will do multiple timing critical raw | 776 | * Lock uncore.lock, as we will do multiple timing critical raw |
777 | * register reads, potentially with preemption disabled, so the | 777 | * register reads, potentially with preemption disabled, so the |
778 | * following code must not block on uncore.lock. | 778 | * following code must not block on uncore.lock. |
779 | */ | 779 | */ |
780 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | 780 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); |
781 | 781 | ||
782 | /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ | 782 | /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ |
783 | 783 | ||
784 | /* Get optional system timestamp before query. */ | 784 | /* Get optional system timestamp before query. */ |
785 | if (stime) | 785 | if (stime) |
786 | *stime = ktime_get(); | 786 | *stime = ktime_get(); |
787 | 787 | ||
788 | if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { | 788 | if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { |
789 | /* No obvious pixelcount register. Only query vertical | 789 | /* No obvious pixelcount register. Only query vertical |
790 | * scanout position from Display scan line register. | 790 | * scanout position from Display scan line register. |
791 | */ | 791 | */ |
792 | if (IS_GEN2(dev)) | 792 | if (IS_GEN2(dev)) |
793 | position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; | 793 | position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; |
794 | else | 794 | else |
795 | position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; | 795 | position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; |
796 | 796 | ||
797 | if (HAS_DDI(dev)) { | 797 | if (HAS_DDI(dev)) { |
798 | /* | 798 | /* |
799 | * On HSW HDMI outputs there seems to be a 2 line | 799 | * On HSW HDMI outputs there seems to be a 2 line |
800 | * difference, whereas eDP has the normal 1 line | 800 | * difference, whereas eDP has the normal 1 line |
801 | * difference that earlier platforms have. External | 801 | * difference that earlier platforms have. External |
802 | * DP is unknown. For now just check for the 2 line | 802 | * DP is unknown. For now just check for the 2 line |
803 | * difference case on all output types on HSW+. | 803 | * difference case on all output types on HSW+. |
804 | * | 804 | * |
805 | * This might misinterpret the scanline counter being | 805 | * This might misinterpret the scanline counter being |
806 | * one line too far along on eDP, but that's less | 806 | * one line too far along on eDP, but that's less |
807 | * dangerous than the alternative since that would lead | 807 | * dangerous than the alternative since that would lead |
808 | * the vblank timestamp code astray when it sees a | 808 | * the vblank timestamp code astray when it sees a |
809 | * scanline count before vblank_start during a vblank | 809 | * scanline count before vblank_start during a vblank |
810 | * interrupt. | 810 | * interrupt. |
811 | */ | 811 | */ |
812 | in_vbl = ilk_pipe_in_vblank_locked(dev, pipe); | 812 | in_vbl = ilk_pipe_in_vblank_locked(dev, pipe); |
813 | if ((in_vbl && (position == vbl_start - 2 || | 813 | if ((in_vbl && (position == vbl_start - 2 || |
814 | position == vbl_start - 1)) || | 814 | position == vbl_start - 1)) || |
815 | (!in_vbl && (position == vbl_end - 2 || | 815 | (!in_vbl && (position == vbl_end - 2 || |
816 | position == vbl_end - 1))) | 816 | position == vbl_end - 1))) |
817 | position = (position + 2) % vtotal; | 817 | position = (position + 2) % vtotal; |
818 | } else if (HAS_PCH_SPLIT(dev)) { | 818 | } else if (HAS_PCH_SPLIT(dev)) { |
819 | /* | 819 | /* |
820 | * The scanline counter increments at the leading edge | 820 | * The scanline counter increments at the leading edge |
821 | * of hsync, ie. it completely misses the active portion | 821 | * of hsync, ie. it completely misses the active portion |
822 | * of the line. Fix up the counter at both edges of vblank | 822 | * of the line. Fix up the counter at both edges of vblank |
823 | * to get a more accurate picture whether we're in vblank | 823 | * to get a more accurate picture whether we're in vblank |
824 | * or not. | 824 | * or not. |
825 | */ | 825 | */ |
826 | in_vbl = ilk_pipe_in_vblank_locked(dev, pipe); | 826 | in_vbl = ilk_pipe_in_vblank_locked(dev, pipe); |
827 | if ((in_vbl && position == vbl_start - 1) || | 827 | if ((in_vbl && position == vbl_start - 1) || |
828 | (!in_vbl && position == vbl_end - 1)) | 828 | (!in_vbl && position == vbl_end - 1)) |
829 | position = (position + 1) % vtotal; | 829 | position = (position + 1) % vtotal; |
830 | } else { | 830 | } else { |
831 | /* | 831 | /* |
832 | * ISR vblank status bits don't work the way we'd want | 832 | * ISR vblank status bits don't work the way we'd want |
833 | * them to work on non-PCH platforms (for | 833 | * them to work on non-PCH platforms (for |
834 | * ilk_pipe_in_vblank_locked()), and there doesn't | 834 | * ilk_pipe_in_vblank_locked()), and there doesn't |
835 | * appear any other way to determine if we're currently | 835 | * appear any other way to determine if we're currently |
836 | * in vblank. | 836 | * in vblank. |
837 | * | 837 | * |
838 | * Instead let's assume that we're already in vblank if | 838 | * Instead let's assume that we're already in vblank if |
839 | * we got called from the vblank interrupt and the | 839 | * we got called from the vblank interrupt and the |
840 | * scanline counter value indicates that we're on the | 840 | * scanline counter value indicates that we're on the |
841 | * line just prior to vblank start. This should result | 841 | * line just prior to vblank start. This should result |
842 | * in the correct answer, unless the vblank interrupt | 842 | * in the correct answer, unless the vblank interrupt |
843 | * delivery really got delayed for almost exactly one | 843 | * delivery really got delayed for almost exactly one |
844 | * full frame/field. | 844 | * full frame/field. |
845 | */ | 845 | */ |
846 | if (flags & DRM_CALLED_FROM_VBLIRQ && | 846 | if (flags & DRM_CALLED_FROM_VBLIRQ && |
847 | position == vbl_start - 1) { | 847 | position == vbl_start - 1) { |
848 | position = (position + 1) % vtotal; | 848 | position = (position + 1) % vtotal; |
849 | 849 | ||
850 | /* Signal this correction as "applied". */ | 850 | /* Signal this correction as "applied". */ |
851 | ret |= 0x8; | 851 | ret |= 0x8; |
852 | } | 852 | } |
853 | } | 853 | } |
854 | } else { | 854 | } else { |
855 | /* Have access to pixelcount since start of frame. | 855 | /* Have access to pixelcount since start of frame. |
856 | * We can split this into vertical and horizontal | 856 | * We can split this into vertical and horizontal |
857 | * scanout position. | 857 | * scanout position. |
858 | */ | 858 | */ |
859 | position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; | 859 | position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; |
860 | 860 | ||
861 | /* convert to pixel counts */ | 861 | /* convert to pixel counts */ |
862 | vbl_start *= htotal; | 862 | vbl_start *= htotal; |
863 | vbl_end *= htotal; | 863 | vbl_end *= htotal; |
864 | vtotal *= htotal; | 864 | vtotal *= htotal; |
865 | } | 865 | } |
866 | 866 | ||
867 | /* Get optional system timestamp after query. */ | 867 | /* Get optional system timestamp after query. */ |
868 | if (etime) | 868 | if (etime) |
869 | *etime = ktime_get(); | 869 | *etime = ktime_get(); |
870 | 870 | ||
871 | /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ | 871 | /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ |
872 | 872 | ||
873 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | 873 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
874 | 874 | ||
875 | in_vbl = position >= vbl_start && position < vbl_end; | 875 | in_vbl = position >= vbl_start && position < vbl_end; |
876 | 876 | ||
877 | /* | 877 | /* |
878 | * While in vblank, position will be negative | 878 | * While in vblank, position will be negative |
879 | * counting up towards 0 at vbl_end. And outside | 879 | * counting up towards 0 at vbl_end. And outside |
880 | * vblank, position will be positive counting | 880 | * vblank, position will be positive counting |
881 | * up since vbl_end. | 881 | * up since vbl_end. |
882 | */ | 882 | */ |
883 | if (position >= vbl_start) | 883 | if (position >= vbl_start) |
884 | position -= vbl_end; | 884 | position -= vbl_end; |
885 | else | 885 | else |
886 | position += vtotal - vbl_end; | 886 | position += vtotal - vbl_end; |
887 | 887 | ||
888 | if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { | 888 | if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { |
889 | *vpos = position; | 889 | *vpos = position; |
890 | *hpos = 0; | 890 | *hpos = 0; |
891 | } else { | 891 | } else { |
892 | *vpos = position / htotal; | 892 | *vpos = position / htotal; |
893 | *hpos = position - (*vpos * htotal); | 893 | *hpos = position - (*vpos * htotal); |
894 | } | 894 | } |
895 | 895 | ||
896 | /* In vblank? */ | 896 | /* In vblank? */ |
897 | if (in_vbl) | 897 | if (in_vbl) |
898 | ret |= DRM_SCANOUTPOS_INVBL; | 898 | ret |= DRM_SCANOUTPOS_INVBL; |
899 | 899 | ||
900 | return ret; | 900 | return ret; |
901 | } | 901 | } |
902 | 902 | ||
903 | static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, | 903 | static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, |
904 | int *max_error, | 904 | int *max_error, |
905 | struct timeval *vblank_time, | 905 | struct timeval *vblank_time, |
906 | unsigned flags) | 906 | unsigned flags) |
907 | { | 907 | { |
908 | struct drm_crtc *crtc; | 908 | struct drm_crtc *crtc; |
909 | 909 | ||
910 | if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { | 910 | if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { |
911 | DRM_ERROR("Invalid crtc %d\n", pipe); | 911 | DRM_ERROR("Invalid crtc %d\n", pipe); |
912 | return -EINVAL; | 912 | return -EINVAL; |
913 | } | 913 | } |
914 | 914 | ||
915 | /* Get drm_crtc to timestamp: */ | 915 | /* Get drm_crtc to timestamp: */ |
916 | crtc = intel_get_crtc_for_pipe(dev, pipe); | 916 | crtc = intel_get_crtc_for_pipe(dev, pipe); |
917 | if (crtc == NULL) { | 917 | if (crtc == NULL) { |
918 | DRM_ERROR("Invalid crtc %d\n", pipe); | 918 | DRM_ERROR("Invalid crtc %d\n", pipe); |
919 | return -EINVAL; | 919 | return -EINVAL; |
920 | } | 920 | } |
921 | 921 | ||
922 | if (!crtc->enabled) { | 922 | if (!crtc->enabled) { |
923 | DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); | 923 | DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); |
924 | return -EBUSY; | 924 | return -EBUSY; |
925 | } | 925 | } |
926 | 926 | ||
927 | /* Helper routine in DRM core does all the work: */ | 927 | /* Helper routine in DRM core does all the work: */ |
928 | return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, | 928 | return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, |
929 | vblank_time, flags, | 929 | vblank_time, flags, |
930 | crtc, | 930 | crtc, |
931 | &to_intel_crtc(crtc)->config.adjusted_mode); | 931 | &to_intel_crtc(crtc)->config.adjusted_mode); |
932 | } | 932 | } |
933 | 933 | ||
934 | static bool intel_hpd_irq_event(struct drm_device *dev, | 934 | static bool intel_hpd_irq_event(struct drm_device *dev, |
935 | struct drm_connector *connector) | 935 | struct drm_connector *connector) |
936 | { | 936 | { |
937 | enum drm_connector_status old_status; | 937 | enum drm_connector_status old_status; |
938 | 938 | ||
939 | WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); | 939 | WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); |
940 | old_status = connector->status; | 940 | old_status = connector->status; |
941 | 941 | ||
942 | connector->status = connector->funcs->detect(connector, false); | 942 | connector->status = connector->funcs->detect(connector, false); |
943 | if (old_status == connector->status) | 943 | if (old_status == connector->status) |
944 | return false; | 944 | return false; |
945 | 945 | ||
946 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", | 946 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", |
947 | connector->base.id, | 947 | connector->base.id, |
948 | drm_get_connector_name(connector), | 948 | drm_get_connector_name(connector), |
949 | drm_get_connector_status_name(old_status), | 949 | drm_get_connector_status_name(old_status), |
950 | drm_get_connector_status_name(connector->status)); | 950 | drm_get_connector_status_name(connector->status)); |
951 | 951 | ||
952 | return true; | 952 | return true; |
953 | } | 953 | } |
954 | 954 | ||
955 | /* | 955 | /* |
956 | * Handle hotplug events outside the interrupt handler proper. | 956 | * Handle hotplug events outside the interrupt handler proper. |
957 | */ | 957 | */ |
958 | #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) | 958 | #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) |
959 | 959 | ||
960 | static void i915_hotplug_work_func(struct work_struct *work) | 960 | static void i915_hotplug_work_func(struct work_struct *work) |
961 | { | 961 | { |
962 | struct drm_i915_private *dev_priv = | 962 | struct drm_i915_private *dev_priv = |
963 | container_of(work, struct drm_i915_private, hotplug_work); | 963 | container_of(work, struct drm_i915_private, hotplug_work); |
964 | struct drm_device *dev = dev_priv->dev; | 964 | struct drm_device *dev = dev_priv->dev; |
965 | struct drm_mode_config *mode_config = &dev->mode_config; | 965 | struct drm_mode_config *mode_config = &dev->mode_config; |
966 | struct intel_connector *intel_connector; | 966 | struct intel_connector *intel_connector; |
967 | struct intel_encoder *intel_encoder; | 967 | struct intel_encoder *intel_encoder; |
968 | struct drm_connector *connector; | 968 | struct drm_connector *connector; |
969 | unsigned long irqflags; | 969 | unsigned long irqflags; |
970 | bool hpd_disabled = false; | 970 | bool hpd_disabled = false; |
971 | bool changed = false; | 971 | bool changed = false; |
972 | u32 hpd_event_bits; | 972 | u32 hpd_event_bits; |
973 | 973 | ||
974 | /* HPD irq before everything is fully set up. */ | 974 | /* HPD irq before everything is fully set up. */ |
975 | if (!dev_priv->enable_hotplug_processing) | 975 | if (!dev_priv->enable_hotplug_processing) |
976 | return; | 976 | return; |
977 | 977 | ||
978 | mutex_lock(&mode_config->mutex); | 978 | mutex_lock(&mode_config->mutex); |
979 | DRM_DEBUG_KMS("running encoder hotplug functions\n"); | 979 | DRM_DEBUG_KMS("running encoder hotplug functions\n"); |
980 | 980 | ||
981 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 981 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
982 | 982 | ||
983 | hpd_event_bits = dev_priv->hpd_event_bits; | 983 | hpd_event_bits = dev_priv->hpd_event_bits; |
984 | dev_priv->hpd_event_bits = 0; | 984 | dev_priv->hpd_event_bits = 0; |
985 | list_for_each_entry(connector, &mode_config->connector_list, head) { | 985 | list_for_each_entry(connector, &mode_config->connector_list, head) { |
986 | intel_connector = to_intel_connector(connector); | 986 | intel_connector = to_intel_connector(connector); |
987 | intel_encoder = intel_connector->encoder; | 987 | intel_encoder = intel_connector->encoder; |
988 | if (intel_encoder->hpd_pin > HPD_NONE && | 988 | if (intel_encoder->hpd_pin > HPD_NONE && |
989 | dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && | 989 | dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && |
990 | connector->polled == DRM_CONNECTOR_POLL_HPD) { | 990 | connector->polled == DRM_CONNECTOR_POLL_HPD) { |
991 | DRM_INFO("HPD interrupt storm detected on connector %s: " | 991 | DRM_INFO("HPD interrupt storm detected on connector %s: " |
992 | "switching from hotplug detection to polling\n", | 992 | "switching from hotplug detection to polling\n", |
993 | drm_get_connector_name(connector)); | 993 | drm_get_connector_name(connector)); |
994 | dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; | 994 | dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; |
995 | connector->polled = DRM_CONNECTOR_POLL_CONNECT | 995 | connector->polled = DRM_CONNECTOR_POLL_CONNECT |
996 | | DRM_CONNECTOR_POLL_DISCONNECT; | 996 | | DRM_CONNECTOR_POLL_DISCONNECT; |
997 | hpd_disabled = true; | 997 | hpd_disabled = true; |
998 | } | 998 | } |
999 | if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { | 999 | if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { |
1000 | DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", | 1000 | DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", |
1001 | drm_get_connector_name(connector), intel_encoder->hpd_pin); | 1001 | drm_get_connector_name(connector), intel_encoder->hpd_pin); |
1002 | } | 1002 | } |
1003 | } | 1003 | } |
1004 | /* if there were no outputs to poll, poll was disabled, | 1004 | /* if there were no outputs to poll, poll was disabled, |
1005 | * therefore make sure it's enabled when disabling HPD on | 1005 | * therefore make sure it's enabled when disabling HPD on |
1006 | * some connectors */ | 1006 | * some connectors */ |
1007 | if (hpd_disabled) { | 1007 | if (hpd_disabled) { |
1008 | drm_kms_helper_poll_enable(dev); | 1008 | drm_kms_helper_poll_enable(dev); |
1009 | mod_timer(&dev_priv->hotplug_reenable_timer, | 1009 | mod_timer(&dev_priv->hotplug_reenable_timer, |
1010 | jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); | 1010 | jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); |
1011 | } | 1011 | } |
1012 | 1012 | ||
1013 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 1013 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
1014 | 1014 | ||
1015 | list_for_each_entry(connector, &mode_config->connector_list, head) { | 1015 | list_for_each_entry(connector, &mode_config->connector_list, head) { |
1016 | intel_connector = to_intel_connector(connector); | 1016 | intel_connector = to_intel_connector(connector); |
1017 | intel_encoder = intel_connector->encoder; | 1017 | intel_encoder = intel_connector->encoder; |
1018 | if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { | 1018 | if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { |
1019 | if (intel_encoder->hot_plug) | 1019 | if (intel_encoder->hot_plug) |
1020 | intel_encoder->hot_plug(intel_encoder); | 1020 | intel_encoder->hot_plug(intel_encoder); |
1021 | if (intel_hpd_irq_event(dev, connector)) | 1021 | if (intel_hpd_irq_event(dev, connector)) |
1022 | changed = true; | 1022 | changed = true; |
1023 | } | 1023 | } |
1024 | } | 1024 | } |
1025 | mutex_unlock(&mode_config->mutex); | 1025 | mutex_unlock(&mode_config->mutex); |
1026 | 1026 | ||
1027 | if (changed) | 1027 | if (changed) |
1028 | drm_kms_helper_hotplug_event(dev); | 1028 | drm_kms_helper_hotplug_event(dev); |
1029 | } | 1029 | } |
1030 | 1030 | ||
1031 | static void intel_hpd_irq_uninstall(struct drm_i915_private *dev_priv) | 1031 | static void intel_hpd_irq_uninstall(struct drm_i915_private *dev_priv) |
1032 | { | 1032 | { |
1033 | del_timer_sync(&dev_priv->hotplug_reenable_timer); | 1033 | del_timer_sync(&dev_priv->hotplug_reenable_timer); |
1034 | } | 1034 | } |
1035 | 1035 | ||
1036 | static void ironlake_rps_change_irq_handler(struct drm_device *dev) | 1036 | static void ironlake_rps_change_irq_handler(struct drm_device *dev) |
1037 | { | 1037 | { |
1038 | struct drm_i915_private *dev_priv = dev->dev_private; | 1038 | struct drm_i915_private *dev_priv = dev->dev_private; |
1039 | u32 busy_up, busy_down, max_avg, min_avg; | 1039 | u32 busy_up, busy_down, max_avg, min_avg; |
1040 | u8 new_delay; | 1040 | u8 new_delay; |
1041 | 1041 | ||
1042 | spin_lock(&mchdev_lock); | 1042 | spin_lock(&mchdev_lock); |
1043 | 1043 | ||
1044 | I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); | 1044 | I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); |
1045 | 1045 | ||
1046 | new_delay = dev_priv->ips.cur_delay; | 1046 | new_delay = dev_priv->ips.cur_delay; |
1047 | 1047 | ||
1048 | I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); | 1048 | I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); |
1049 | busy_up = I915_READ(RCPREVBSYTUPAVG); | 1049 | busy_up = I915_READ(RCPREVBSYTUPAVG); |
1050 | busy_down = I915_READ(RCPREVBSYTDNAVG); | 1050 | busy_down = I915_READ(RCPREVBSYTDNAVG); |
1051 | max_avg = I915_READ(RCBMAXAVG); | 1051 | max_avg = I915_READ(RCBMAXAVG); |
1052 | min_avg = I915_READ(RCBMINAVG); | 1052 | min_avg = I915_READ(RCBMINAVG); |
1053 | 1053 | ||
1054 | /* Handle RCS change request from hw */ | 1054 | /* Handle RCS change request from hw */ |
1055 | if (busy_up > max_avg) { | 1055 | if (busy_up > max_avg) { |
1056 | if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) | 1056 | if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) |
1057 | new_delay = dev_priv->ips.cur_delay - 1; | 1057 | new_delay = dev_priv->ips.cur_delay - 1; |
1058 | if (new_delay < dev_priv->ips.max_delay) | 1058 | if (new_delay < dev_priv->ips.max_delay) |
1059 | new_delay = dev_priv->ips.max_delay; | 1059 | new_delay = dev_priv->ips.max_delay; |
1060 | } else if (busy_down < min_avg) { | 1060 | } else if (busy_down < min_avg) { |
1061 | if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) | 1061 | if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) |
1062 | new_delay = dev_priv->ips.cur_delay + 1; | 1062 | new_delay = dev_priv->ips.cur_delay + 1; |
1063 | if (new_delay > dev_priv->ips.min_delay) | 1063 | if (new_delay > dev_priv->ips.min_delay) |
1064 | new_delay = dev_priv->ips.min_delay; | 1064 | new_delay = dev_priv->ips.min_delay; |
1065 | } | 1065 | } |
1066 | 1066 | ||
1067 | if (ironlake_set_drps(dev, new_delay)) | 1067 | if (ironlake_set_drps(dev, new_delay)) |
1068 | dev_priv->ips.cur_delay = new_delay; | 1068 | dev_priv->ips.cur_delay = new_delay; |
1069 | 1069 | ||
1070 | spin_unlock(&mchdev_lock); | 1070 | spin_unlock(&mchdev_lock); |
1071 | 1071 | ||
1072 | return; | 1072 | return; |
1073 | } | 1073 | } |
1074 | 1074 | ||
1075 | static void notify_ring(struct drm_device *dev, | 1075 | static void notify_ring(struct drm_device *dev, |
1076 | struct intel_ring_buffer *ring) | 1076 | struct intel_ring_buffer *ring) |
1077 | { | 1077 | { |
1078 | if (ring->obj == NULL) | 1078 | if (ring->obj == NULL) |
1079 | return; | 1079 | return; |
1080 | 1080 | ||
1081 | trace_i915_gem_request_complete(ring); | 1081 | trace_i915_gem_request_complete(ring); |
1082 | 1082 | ||
1083 | wake_up_all(&ring->irq_queue); | 1083 | wake_up_all(&ring->irq_queue); |
1084 | i915_queue_hangcheck(dev); | 1084 | i915_queue_hangcheck(dev); |
1085 | } | 1085 | } |
1086 | 1086 | ||
1087 | static void gen6_pm_rps_work(struct work_struct *work) | 1087 | static void gen6_pm_rps_work(struct work_struct *work) |
1088 | { | 1088 | { |
1089 | struct drm_i915_private *dev_priv = | 1089 | struct drm_i915_private *dev_priv = |
1090 | container_of(work, struct drm_i915_private, rps.work); | 1090 | container_of(work, struct drm_i915_private, rps.work); |
1091 | u32 pm_iir; | 1091 | u32 pm_iir; |
1092 | int new_delay, adj; | 1092 | int new_delay, adj; |
1093 | 1093 | ||
1094 | spin_lock_irq(&dev_priv->irq_lock); | 1094 | spin_lock_irq(&dev_priv->irq_lock); |
1095 | pm_iir = dev_priv->rps.pm_iir; | 1095 | pm_iir = dev_priv->rps.pm_iir; |
1096 | dev_priv->rps.pm_iir = 0; | 1096 | dev_priv->rps.pm_iir = 0; |
1097 | /* Make sure not to corrupt PMIMR state used by ringbuffer code */ | 1097 | /* Make sure not to corrupt PMIMR state used by ringbuffer code */ |
1098 | snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); | 1098 | snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); |
1099 | spin_unlock_irq(&dev_priv->irq_lock); | 1099 | spin_unlock_irq(&dev_priv->irq_lock); |
1100 | 1100 | ||
1101 | /* Make sure we didn't queue anything we're not going to process. */ | 1101 | /* Make sure we didn't queue anything we're not going to process. */ |
1102 | WARN_ON(pm_iir & ~dev_priv->pm_rps_events); | 1102 | WARN_ON(pm_iir & ~dev_priv->pm_rps_events); |
1103 | 1103 | ||
1104 | if ((pm_iir & dev_priv->pm_rps_events) == 0) | 1104 | if ((pm_iir & dev_priv->pm_rps_events) == 0) |
1105 | return; | 1105 | return; |
1106 | 1106 | ||
1107 | mutex_lock(&dev_priv->rps.hw_lock); | 1107 | mutex_lock(&dev_priv->rps.hw_lock); |
1108 | 1108 | ||
1109 | adj = dev_priv->rps.last_adj; | 1109 | adj = dev_priv->rps.last_adj; |
1110 | if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { | 1110 | if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { |
1111 | if (adj > 0) | 1111 | if (adj > 0) |
1112 | adj *= 2; | 1112 | adj *= 2; |
1113 | else | 1113 | else |
1114 | adj = 1; | 1114 | adj = 1; |
1115 | new_delay = dev_priv->rps.cur_freq + adj; | 1115 | new_delay = dev_priv->rps.cur_freq + adj; |
1116 | 1116 | ||
1117 | /* | 1117 | /* |
1118 | * For better performance, jump directly | 1118 | * For better performance, jump directly |
1119 | * to RPe if we're below it. | 1119 | * to RPe if we're below it. |
1120 | */ | 1120 | */ |
1121 | if (new_delay < dev_priv->rps.efficient_freq) | 1121 | if (new_delay < dev_priv->rps.efficient_freq) |
1122 | new_delay = dev_priv->rps.efficient_freq; | 1122 | new_delay = dev_priv->rps.efficient_freq; |
1123 | } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { | 1123 | } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { |
1124 | if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) | 1124 | if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) |
1125 | new_delay = dev_priv->rps.efficient_freq; | 1125 | new_delay = dev_priv->rps.efficient_freq; |
1126 | else | 1126 | else |
1127 | new_delay = dev_priv->rps.min_freq_softlimit; | 1127 | new_delay = dev_priv->rps.min_freq_softlimit; |
1128 | adj = 0; | 1128 | adj = 0; |
1129 | } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { | 1129 | } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { |
1130 | if (adj < 0) | 1130 | if (adj < 0) |
1131 | adj *= 2; | 1131 | adj *= 2; |
1132 | else | 1132 | else |
1133 | adj = -1; | 1133 | adj = -1; |
1134 | new_delay = dev_priv->rps.cur_freq + adj; | 1134 | new_delay = dev_priv->rps.cur_freq + adj; |
1135 | } else { /* unknown event */ | 1135 | } else { /* unknown event */ |
1136 | new_delay = dev_priv->rps.cur_freq; | 1136 | new_delay = dev_priv->rps.cur_freq; |
1137 | } | 1137 | } |
1138 | 1138 | ||
1139 | /* sysfs frequency interfaces may have snuck in while servicing the | 1139 | /* sysfs frequency interfaces may have snuck in while servicing the |
1140 | * interrupt | 1140 | * interrupt |
1141 | */ | 1141 | */ |
1142 | new_delay = clamp_t(int, new_delay, | 1142 | new_delay = clamp_t(int, new_delay, |
1143 | dev_priv->rps.min_freq_softlimit, | 1143 | dev_priv->rps.min_freq_softlimit, |
1144 | dev_priv->rps.max_freq_softlimit); | 1144 | dev_priv->rps.max_freq_softlimit); |
1145 | 1145 | ||
1146 | dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq; | 1146 | dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq; |
1147 | 1147 | ||
1148 | if (IS_VALLEYVIEW(dev_priv->dev)) | 1148 | if (IS_VALLEYVIEW(dev_priv->dev)) |
1149 | valleyview_set_rps(dev_priv->dev, new_delay); | 1149 | valleyview_set_rps(dev_priv->dev, new_delay); |
1150 | else | 1150 | else |
1151 | gen6_set_rps(dev_priv->dev, new_delay); | 1151 | gen6_set_rps(dev_priv->dev, new_delay); |
1152 | 1152 | ||
1153 | mutex_unlock(&dev_priv->rps.hw_lock); | 1153 | mutex_unlock(&dev_priv->rps.hw_lock); |
1154 | } | 1154 | } |
1155 | 1155 | ||
1156 | 1156 | ||
1157 | /** | 1157 | /** |
1158 | * ivybridge_parity_work - Workqueue called when a parity error interrupt | 1158 | * ivybridge_parity_work - Workqueue called when a parity error interrupt |
1159 | * occurred. | 1159 | * occurred. |
1160 | * @work: workqueue struct | 1160 | * @work: workqueue struct |
1161 | * | 1161 | * |
1162 | * Doesn't actually do anything except notify userspace. As a consequence of | 1162 | * Doesn't actually do anything except notify userspace. As a consequence of |
1163 | * this event, userspace should try to remap the bad rows since statistically | 1163 | * this event, userspace should try to remap the bad rows since statistically |
1164 | * it is likely the same row is more likely to go bad again. | 1164 | * it is likely the same row is more likely to go bad again. |
1165 | */ | 1165 | */ |
1166 | static void ivybridge_parity_work(struct work_struct *work) | 1166 | static void ivybridge_parity_work(struct work_struct *work) |
1167 | { | 1167 | { |
1168 | struct drm_i915_private *dev_priv = | 1168 | struct drm_i915_private *dev_priv = |
1169 | container_of(work, struct drm_i915_private, l3_parity.error_work); | 1169 | container_of(work, struct drm_i915_private, l3_parity.error_work); |
1170 | u32 error_status, row, bank, subbank; | 1170 | u32 error_status, row, bank, subbank; |
1171 | char *parity_event[6]; | 1171 | char *parity_event[6]; |
1172 | uint32_t misccpctl; | 1172 | uint32_t misccpctl; |
1173 | unsigned long flags; | 1173 | unsigned long flags; |
1174 | uint8_t slice = 0; | 1174 | uint8_t slice = 0; |
1175 | 1175 | ||
1176 | /* We must turn off DOP level clock gating to access the L3 registers. | 1176 | /* We must turn off DOP level clock gating to access the L3 registers. |
1177 | * In order to prevent a get/put style interface, acquire struct mutex | 1177 | * In order to prevent a get/put style interface, acquire struct mutex |
1178 | * any time we access those registers. | 1178 | * any time we access those registers. |
1179 | */ | 1179 | */ |
1180 | mutex_lock(&dev_priv->dev->struct_mutex); | 1180 | mutex_lock(&dev_priv->dev->struct_mutex); |
1181 | 1181 | ||
1182 | /* If we've screwed up tracking, just let the interrupt fire again */ | 1182 | /* If we've screwed up tracking, just let the interrupt fire again */ |
1183 | if (WARN_ON(!dev_priv->l3_parity.which_slice)) | 1183 | if (WARN_ON(!dev_priv->l3_parity.which_slice)) |
1184 | goto out; | 1184 | goto out; |
1185 | 1185 | ||
1186 | misccpctl = I915_READ(GEN7_MISCCPCTL); | 1186 | misccpctl = I915_READ(GEN7_MISCCPCTL); |
1187 | I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); | 1187 | I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); |
1188 | POSTING_READ(GEN7_MISCCPCTL); | 1188 | POSTING_READ(GEN7_MISCCPCTL); |
1189 | 1189 | ||
1190 | while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { | 1190 | while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { |
1191 | u32 reg; | 1191 | u32 reg; |
1192 | 1192 | ||
1193 | slice--; | 1193 | slice--; |
1194 | if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev))) | 1194 | if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev))) |
1195 | break; | 1195 | break; |
1196 | 1196 | ||
1197 | dev_priv->l3_parity.which_slice &= ~(1<<slice); | 1197 | dev_priv->l3_parity.which_slice &= ~(1<<slice); |
1198 | 1198 | ||
1199 | reg = GEN7_L3CDERRST1 + (slice * 0x200); | 1199 | reg = GEN7_L3CDERRST1 + (slice * 0x200); |
1200 | 1200 | ||
1201 | error_status = I915_READ(reg); | 1201 | error_status = I915_READ(reg); |
1202 | row = GEN7_PARITY_ERROR_ROW(error_status); | 1202 | row = GEN7_PARITY_ERROR_ROW(error_status); |
1203 | bank = GEN7_PARITY_ERROR_BANK(error_status); | 1203 | bank = GEN7_PARITY_ERROR_BANK(error_status); |
1204 | subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); | 1204 | subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); |
1205 | 1205 | ||
1206 | I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); | 1206 | I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); |
1207 | POSTING_READ(reg); | 1207 | POSTING_READ(reg); |
1208 | 1208 | ||
1209 | parity_event[0] = I915_L3_PARITY_UEVENT "=1"; | 1209 | parity_event[0] = I915_L3_PARITY_UEVENT "=1"; |
1210 | parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); | 1210 | parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); |
1211 | parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); | 1211 | parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); |
1212 | parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); | 1212 | parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); |
1213 | parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); | 1213 | parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); |
1214 | parity_event[5] = NULL; | 1214 | parity_event[5] = NULL; |
1215 | 1215 | ||
1216 | kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj, | 1216 | kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj, |
1217 | KOBJ_CHANGE, parity_event); | 1217 | KOBJ_CHANGE, parity_event); |
1218 | 1218 | ||
1219 | DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", | 1219 | DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", |
1220 | slice, row, bank, subbank); | 1220 | slice, row, bank, subbank); |
1221 | 1221 | ||
1222 | kfree(parity_event[4]); | 1222 | kfree(parity_event[4]); |
1223 | kfree(parity_event[3]); | 1223 | kfree(parity_event[3]); |
1224 | kfree(parity_event[2]); | 1224 | kfree(parity_event[2]); |
1225 | kfree(parity_event[1]); | 1225 | kfree(parity_event[1]); |
1226 | } | 1226 | } |
1227 | 1227 | ||
1228 | I915_WRITE(GEN7_MISCCPCTL, misccpctl); | 1228 | I915_WRITE(GEN7_MISCCPCTL, misccpctl); |
1229 | 1229 | ||
1230 | out: | 1230 | out: |
1231 | WARN_ON(dev_priv->l3_parity.which_slice); | 1231 | WARN_ON(dev_priv->l3_parity.which_slice); |
1232 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 1232 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
1233 | ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); | 1233 | ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); |
1234 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | 1234 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
1235 | 1235 | ||
1236 | mutex_unlock(&dev_priv->dev->struct_mutex); | 1236 | mutex_unlock(&dev_priv->dev->struct_mutex); |
1237 | } | 1237 | } |
1238 | 1238 | ||
1239 | static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir) | 1239 | static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir) |
1240 | { | 1240 | { |
1241 | struct drm_i915_private *dev_priv = dev->dev_private; | 1241 | struct drm_i915_private *dev_priv = dev->dev_private; |
1242 | 1242 | ||
1243 | if (!HAS_L3_DPF(dev)) | 1243 | if (!HAS_L3_DPF(dev)) |
1244 | return; | 1244 | return; |
1245 | 1245 | ||
1246 | spin_lock(&dev_priv->irq_lock); | 1246 | spin_lock(&dev_priv->irq_lock); |
1247 | ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev)); | 1247 | ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev)); |
1248 | spin_unlock(&dev_priv->irq_lock); | 1248 | spin_unlock(&dev_priv->irq_lock); |
1249 | 1249 | ||
1250 | iir &= GT_PARITY_ERROR(dev); | 1250 | iir &= GT_PARITY_ERROR(dev); |
1251 | if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) | 1251 | if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) |
1252 | dev_priv->l3_parity.which_slice |= 1 << 1; | 1252 | dev_priv->l3_parity.which_slice |= 1 << 1; |
1253 | 1253 | ||
1254 | if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) | 1254 | if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) |
1255 | dev_priv->l3_parity.which_slice |= 1 << 0; | 1255 | dev_priv->l3_parity.which_slice |= 1 << 0; |
1256 | 1256 | ||
1257 | queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); | 1257 | queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); |
1258 | } | 1258 | } |
1259 | 1259 | ||
1260 | static void ilk_gt_irq_handler(struct drm_device *dev, | 1260 | static void ilk_gt_irq_handler(struct drm_device *dev, |
1261 | struct drm_i915_private *dev_priv, | 1261 | struct drm_i915_private *dev_priv, |
1262 | u32 gt_iir) | 1262 | u32 gt_iir) |
1263 | { | 1263 | { |
1264 | if (gt_iir & | 1264 | if (gt_iir & |
1265 | (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) | 1265 | (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) |
1266 | notify_ring(dev, &dev_priv->ring[RCS]); | 1266 | notify_ring(dev, &dev_priv->ring[RCS]); |
1267 | if (gt_iir & ILK_BSD_USER_INTERRUPT) | 1267 | if (gt_iir & ILK_BSD_USER_INTERRUPT) |
1268 | notify_ring(dev, &dev_priv->ring[VCS]); | 1268 | notify_ring(dev, &dev_priv->ring[VCS]); |
1269 | } | 1269 | } |
1270 | 1270 | ||
1271 | static void snb_gt_irq_handler(struct drm_device *dev, | 1271 | static void snb_gt_irq_handler(struct drm_device *dev, |
1272 | struct drm_i915_private *dev_priv, | 1272 | struct drm_i915_private *dev_priv, |
1273 | u32 gt_iir) | 1273 | u32 gt_iir) |
1274 | { | 1274 | { |
1275 | 1275 | ||
1276 | if (gt_iir & | 1276 | if (gt_iir & |
1277 | (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) | 1277 | (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) |
1278 | notify_ring(dev, &dev_priv->ring[RCS]); | 1278 | notify_ring(dev, &dev_priv->ring[RCS]); |
1279 | if (gt_iir & GT_BSD_USER_INTERRUPT) | 1279 | if (gt_iir & GT_BSD_USER_INTERRUPT) |
1280 | notify_ring(dev, &dev_priv->ring[VCS]); | 1280 | notify_ring(dev, &dev_priv->ring[VCS]); |
1281 | if (gt_iir & GT_BLT_USER_INTERRUPT) | 1281 | if (gt_iir & GT_BLT_USER_INTERRUPT) |
1282 | notify_ring(dev, &dev_priv->ring[BCS]); | 1282 | notify_ring(dev, &dev_priv->ring[BCS]); |
1283 | 1283 | ||
1284 | if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | | 1284 | if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | |
1285 | GT_BSD_CS_ERROR_INTERRUPT | | 1285 | GT_BSD_CS_ERROR_INTERRUPT | |
1286 | GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) { | 1286 | GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) { |
1287 | i915_handle_error(dev, false, "GT error interrupt 0x%08x", | 1287 | i915_handle_error(dev, false, "GT error interrupt 0x%08x", |
1288 | gt_iir); | 1288 | gt_iir); |
1289 | } | 1289 | } |
1290 | 1290 | ||
1291 | if (gt_iir & GT_PARITY_ERROR(dev)) | 1291 | if (gt_iir & GT_PARITY_ERROR(dev)) |
1292 | ivybridge_parity_error_irq_handler(dev, gt_iir); | 1292 | ivybridge_parity_error_irq_handler(dev, gt_iir); |
1293 | } | 1293 | } |
1294 | 1294 | ||
1295 | static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, | 1295 | static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, |
1296 | struct drm_i915_private *dev_priv, | 1296 | struct drm_i915_private *dev_priv, |
1297 | u32 master_ctl) | 1297 | u32 master_ctl) |
1298 | { | 1298 | { |
1299 | u32 rcs, bcs, vcs; | 1299 | u32 rcs, bcs, vcs; |
1300 | uint32_t tmp = 0; | 1300 | uint32_t tmp = 0; |
1301 | irqreturn_t ret = IRQ_NONE; | 1301 | irqreturn_t ret = IRQ_NONE; |
1302 | 1302 | ||
1303 | if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { | 1303 | if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { |
1304 | tmp = I915_READ(GEN8_GT_IIR(0)); | 1304 | tmp = I915_READ(GEN8_GT_IIR(0)); |
1305 | if (tmp) { | 1305 | if (tmp) { |
1306 | ret = IRQ_HANDLED; | 1306 | ret = IRQ_HANDLED; |
1307 | rcs = tmp >> GEN8_RCS_IRQ_SHIFT; | 1307 | rcs = tmp >> GEN8_RCS_IRQ_SHIFT; |
1308 | bcs = tmp >> GEN8_BCS_IRQ_SHIFT; | 1308 | bcs = tmp >> GEN8_BCS_IRQ_SHIFT; |
1309 | if (rcs & GT_RENDER_USER_INTERRUPT) | 1309 | if (rcs & GT_RENDER_USER_INTERRUPT) |
1310 | notify_ring(dev, &dev_priv->ring[RCS]); | 1310 | notify_ring(dev, &dev_priv->ring[RCS]); |
1311 | if (bcs & GT_RENDER_USER_INTERRUPT) | 1311 | if (bcs & GT_RENDER_USER_INTERRUPT) |
1312 | notify_ring(dev, &dev_priv->ring[BCS]); | 1312 | notify_ring(dev, &dev_priv->ring[BCS]); |
1313 | I915_WRITE(GEN8_GT_IIR(0), tmp); | 1313 | I915_WRITE(GEN8_GT_IIR(0), tmp); |
1314 | } else | 1314 | } else |
1315 | DRM_ERROR("The master control interrupt lied (GT0)!\n"); | 1315 | DRM_ERROR("The master control interrupt lied (GT0)!\n"); |
1316 | } | 1316 | } |
1317 | 1317 | ||
1318 | if (master_ctl & GEN8_GT_VCS1_IRQ) { | 1318 | if (master_ctl & GEN8_GT_VCS1_IRQ) { |
1319 | tmp = I915_READ(GEN8_GT_IIR(1)); | 1319 | tmp = I915_READ(GEN8_GT_IIR(1)); |
1320 | if (tmp) { | 1320 | if (tmp) { |
1321 | ret = IRQ_HANDLED; | 1321 | ret = IRQ_HANDLED; |
1322 | vcs = tmp >> GEN8_VCS1_IRQ_SHIFT; | 1322 | vcs = tmp >> GEN8_VCS1_IRQ_SHIFT; |
1323 | if (vcs & GT_RENDER_USER_INTERRUPT) | 1323 | if (vcs & GT_RENDER_USER_INTERRUPT) |
1324 | notify_ring(dev, &dev_priv->ring[VCS]); | 1324 | notify_ring(dev, &dev_priv->ring[VCS]); |
1325 | I915_WRITE(GEN8_GT_IIR(1), tmp); | 1325 | I915_WRITE(GEN8_GT_IIR(1), tmp); |
1326 | } else | 1326 | } else |
1327 | DRM_ERROR("The master control interrupt lied (GT1)!\n"); | 1327 | DRM_ERROR("The master control interrupt lied (GT1)!\n"); |
1328 | } | 1328 | } |
1329 | 1329 | ||
1330 | if (master_ctl & GEN8_GT_VECS_IRQ) { | 1330 | if (master_ctl & GEN8_GT_VECS_IRQ) { |
1331 | tmp = I915_READ(GEN8_GT_IIR(3)); | 1331 | tmp = I915_READ(GEN8_GT_IIR(3)); |
1332 | if (tmp) { | 1332 | if (tmp) { |
1333 | ret = IRQ_HANDLED; | 1333 | ret = IRQ_HANDLED; |
1334 | vcs = tmp >> GEN8_VECS_IRQ_SHIFT; | 1334 | vcs = tmp >> GEN8_VECS_IRQ_SHIFT; |
1335 | if (vcs & GT_RENDER_USER_INTERRUPT) | 1335 | if (vcs & GT_RENDER_USER_INTERRUPT) |
1336 | notify_ring(dev, &dev_priv->ring[VECS]); | 1336 | notify_ring(dev, &dev_priv->ring[VECS]); |
1337 | I915_WRITE(GEN8_GT_IIR(3), tmp); | 1337 | I915_WRITE(GEN8_GT_IIR(3), tmp); |
1338 | } else | 1338 | } else |
1339 | DRM_ERROR("The master control interrupt lied (GT3)!\n"); | 1339 | DRM_ERROR("The master control interrupt lied (GT3)!\n"); |
1340 | } | 1340 | } |
1341 | 1341 | ||
1342 | return ret; | 1342 | return ret; |
1343 | } | 1343 | } |
1344 | 1344 | ||
1345 | #define HPD_STORM_DETECT_PERIOD 1000 | 1345 | #define HPD_STORM_DETECT_PERIOD 1000 |
1346 | #define HPD_STORM_THRESHOLD 5 | 1346 | #define HPD_STORM_THRESHOLD 5 |
1347 | 1347 | ||
1348 | static inline void intel_hpd_irq_handler(struct drm_device *dev, | 1348 | static inline void intel_hpd_irq_handler(struct drm_device *dev, |
1349 | u32 hotplug_trigger, | 1349 | u32 hotplug_trigger, |
1350 | const u32 *hpd) | 1350 | const u32 *hpd) |
1351 | { | 1351 | { |
1352 | struct drm_i915_private *dev_priv = dev->dev_private; | 1352 | struct drm_i915_private *dev_priv = dev->dev_private; |
1353 | int i; | 1353 | int i; |
1354 | bool storm_detected = false; | 1354 | bool storm_detected = false; |
1355 | 1355 | ||
1356 | if (!hotplug_trigger) | 1356 | if (!hotplug_trigger) |
1357 | return; | 1357 | return; |
1358 | 1358 | ||
1359 | DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", | 1359 | DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", |
1360 | hotplug_trigger); | 1360 | hotplug_trigger); |
1361 | 1361 | ||
1362 | spin_lock(&dev_priv->irq_lock); | 1362 | spin_lock(&dev_priv->irq_lock); |
1363 | for (i = 1; i < HPD_NUM_PINS; i++) { | 1363 | for (i = 1; i < HPD_NUM_PINS; i++) { |
1364 | 1364 | ||
1365 | WARN_ONCE(hpd[i] & hotplug_trigger && | 1365 | if (hpd[i] & hotplug_trigger && |
1366 | dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED, | 1366 | dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) { |
1367 | "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n", | 1367 | /* |
1368 | hotplug_trigger, i, hpd[i]); | 1368 | * On GMCH platforms the interrupt mask bits only |
1369 | * prevent irq generation, not the setting of the | ||
1370 | * hotplug bits itself. So only WARN about unexpected | ||
1371 | * interrupts on saner platforms. | ||
1372 | */ | ||
1373 | WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev), | ||
1374 | "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n", | ||
1375 | hotplug_trigger, i, hpd[i]); | ||
1376 | |||
1377 | continue; | ||
1378 | } | ||
1369 | 1379 | ||
1370 | if (!(hpd[i] & hotplug_trigger) || | 1380 | if (!(hpd[i] & hotplug_trigger) || |
1371 | dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) | 1381 | dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) |
1372 | continue; | 1382 | continue; |
1373 | 1383 | ||
1374 | dev_priv->hpd_event_bits |= (1 << i); | 1384 | dev_priv->hpd_event_bits |= (1 << i); |
1375 | if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, | 1385 | if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, |
1376 | dev_priv->hpd_stats[i].hpd_last_jiffies | 1386 | dev_priv->hpd_stats[i].hpd_last_jiffies |
1377 | + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { | 1387 | + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { |
1378 | dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; | 1388 | dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; |
1379 | dev_priv->hpd_stats[i].hpd_cnt = 0; | 1389 | dev_priv->hpd_stats[i].hpd_cnt = 0; |
1380 | DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i); | 1390 | DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i); |
1381 | } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { | 1391 | } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { |
1382 | dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; | 1392 | dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; |
1383 | dev_priv->hpd_event_bits &= ~(1 << i); | 1393 | dev_priv->hpd_event_bits &= ~(1 << i); |
1384 | DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); | 1394 | DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); |
1385 | storm_detected = true; | 1395 | storm_detected = true; |
1386 | } else { | 1396 | } else { |
1387 | dev_priv->hpd_stats[i].hpd_cnt++; | 1397 | dev_priv->hpd_stats[i].hpd_cnt++; |
1388 | DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i, | 1398 | DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i, |
1389 | dev_priv->hpd_stats[i].hpd_cnt); | 1399 | dev_priv->hpd_stats[i].hpd_cnt); |
1390 | } | 1400 | } |
1391 | } | 1401 | } |
1392 | 1402 | ||
1393 | if (storm_detected) | 1403 | if (storm_detected) |
1394 | dev_priv->display.hpd_irq_setup(dev); | 1404 | dev_priv->display.hpd_irq_setup(dev); |
1395 | spin_unlock(&dev_priv->irq_lock); | 1405 | spin_unlock(&dev_priv->irq_lock); |
1396 | 1406 | ||
1397 | /* | 1407 | /* |
1398 | * Our hotplug handler can grab modeset locks (by calling down into the | 1408 | * Our hotplug handler can grab modeset locks (by calling down into the |
1399 | * fb helpers). Hence it must not be run on our own dev-priv->wq work | 1409 | * fb helpers). Hence it must not be run on our own dev-priv->wq work |
1400 | * queue for otherwise the flush_work in the pageflip code will | 1410 | * queue for otherwise the flush_work in the pageflip code will |
1401 | * deadlock. | 1411 | * deadlock. |
1402 | */ | 1412 | */ |
1403 | schedule_work(&dev_priv->hotplug_work); | 1413 | schedule_work(&dev_priv->hotplug_work); |
1404 | } | 1414 | } |
1405 | 1415 | ||
1406 | static void gmbus_irq_handler(struct drm_device *dev) | 1416 | static void gmbus_irq_handler(struct drm_device *dev) |
1407 | { | 1417 | { |
1408 | struct drm_i915_private *dev_priv = dev->dev_private; | 1418 | struct drm_i915_private *dev_priv = dev->dev_private; |
1409 | 1419 | ||
1410 | wake_up_all(&dev_priv->gmbus_wait_queue); | 1420 | wake_up_all(&dev_priv->gmbus_wait_queue); |
1411 | } | 1421 | } |
1412 | 1422 | ||
1413 | static void dp_aux_irq_handler(struct drm_device *dev) | 1423 | static void dp_aux_irq_handler(struct drm_device *dev) |
1414 | { | 1424 | { |
1415 | struct drm_i915_private *dev_priv = dev->dev_private; | 1425 | struct drm_i915_private *dev_priv = dev->dev_private; |
1416 | 1426 | ||
1417 | wake_up_all(&dev_priv->gmbus_wait_queue); | 1427 | wake_up_all(&dev_priv->gmbus_wait_queue); |
1418 | } | 1428 | } |
1419 | 1429 | ||
1420 | #if defined(CONFIG_DEBUG_FS) | 1430 | #if defined(CONFIG_DEBUG_FS) |
1421 | static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, | 1431 | static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, |
1422 | uint32_t crc0, uint32_t crc1, | 1432 | uint32_t crc0, uint32_t crc1, |
1423 | uint32_t crc2, uint32_t crc3, | 1433 | uint32_t crc2, uint32_t crc3, |
1424 | uint32_t crc4) | 1434 | uint32_t crc4) |
1425 | { | 1435 | { |
1426 | struct drm_i915_private *dev_priv = dev->dev_private; | 1436 | struct drm_i915_private *dev_priv = dev->dev_private; |
1427 | struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; | 1437 | struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; |
1428 | struct intel_pipe_crc_entry *entry; | 1438 | struct intel_pipe_crc_entry *entry; |
1429 | int head, tail; | 1439 | int head, tail; |
1430 | 1440 | ||
1431 | spin_lock(&pipe_crc->lock); | 1441 | spin_lock(&pipe_crc->lock); |
1432 | 1442 | ||
1433 | if (!pipe_crc->entries) { | 1443 | if (!pipe_crc->entries) { |
1434 | spin_unlock(&pipe_crc->lock); | 1444 | spin_unlock(&pipe_crc->lock); |
1435 | DRM_ERROR("spurious interrupt\n"); | 1445 | DRM_ERROR("spurious interrupt\n"); |
1436 | return; | 1446 | return; |
1437 | } | 1447 | } |
1438 | 1448 | ||
1439 | head = pipe_crc->head; | 1449 | head = pipe_crc->head; |
1440 | tail = pipe_crc->tail; | 1450 | tail = pipe_crc->tail; |
1441 | 1451 | ||
1442 | if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { | 1452 | if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { |
1443 | spin_unlock(&pipe_crc->lock); | 1453 | spin_unlock(&pipe_crc->lock); |
1444 | DRM_ERROR("CRC buffer overflowing\n"); | 1454 | DRM_ERROR("CRC buffer overflowing\n"); |
1445 | return; | 1455 | return; |
1446 | } | 1456 | } |
1447 | 1457 | ||
1448 | entry = &pipe_crc->entries[head]; | 1458 | entry = &pipe_crc->entries[head]; |
1449 | 1459 | ||
1450 | entry->frame = dev->driver->get_vblank_counter(dev, pipe); | 1460 | entry->frame = dev->driver->get_vblank_counter(dev, pipe); |
1451 | entry->crc[0] = crc0; | 1461 | entry->crc[0] = crc0; |
1452 | entry->crc[1] = crc1; | 1462 | entry->crc[1] = crc1; |
1453 | entry->crc[2] = crc2; | 1463 | entry->crc[2] = crc2; |
1454 | entry->crc[3] = crc3; | 1464 | entry->crc[3] = crc3; |
1455 | entry->crc[4] = crc4; | 1465 | entry->crc[4] = crc4; |
1456 | 1466 | ||
1457 | head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); | 1467 | head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); |
1458 | pipe_crc->head = head; | 1468 | pipe_crc->head = head; |
1459 | 1469 | ||
1460 | spin_unlock(&pipe_crc->lock); | 1470 | spin_unlock(&pipe_crc->lock); |
1461 | 1471 | ||
1462 | wake_up_interruptible(&pipe_crc->wq); | 1472 | wake_up_interruptible(&pipe_crc->wq); |
1463 | } | 1473 | } |
1464 | #else | 1474 | #else |
1465 | static inline void | 1475 | static inline void |
1466 | display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, | 1476 | display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, |
1467 | uint32_t crc0, uint32_t crc1, | 1477 | uint32_t crc0, uint32_t crc1, |
1468 | uint32_t crc2, uint32_t crc3, | 1478 | uint32_t crc2, uint32_t crc3, |
1469 | uint32_t crc4) {} | 1479 | uint32_t crc4) {} |
1470 | #endif | 1480 | #endif |
1471 | 1481 | ||
1472 | 1482 | ||
1473 | static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) | 1483 | static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) |
1474 | { | 1484 | { |
1475 | struct drm_i915_private *dev_priv = dev->dev_private; | 1485 | struct drm_i915_private *dev_priv = dev->dev_private; |
1476 | 1486 | ||
1477 | display_pipe_crc_irq_handler(dev, pipe, | 1487 | display_pipe_crc_irq_handler(dev, pipe, |
1478 | I915_READ(PIPE_CRC_RES_1_IVB(pipe)), | 1488 | I915_READ(PIPE_CRC_RES_1_IVB(pipe)), |
1479 | 0, 0, 0, 0); | 1489 | 0, 0, 0, 0); |
1480 | } | 1490 | } |
1481 | 1491 | ||
1482 | static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) | 1492 | static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) |
1483 | { | 1493 | { |
1484 | struct drm_i915_private *dev_priv = dev->dev_private; | 1494 | struct drm_i915_private *dev_priv = dev->dev_private; |
1485 | 1495 | ||
1486 | display_pipe_crc_irq_handler(dev, pipe, | 1496 | display_pipe_crc_irq_handler(dev, pipe, |
1487 | I915_READ(PIPE_CRC_RES_1_IVB(pipe)), | 1497 | I915_READ(PIPE_CRC_RES_1_IVB(pipe)), |
1488 | I915_READ(PIPE_CRC_RES_2_IVB(pipe)), | 1498 | I915_READ(PIPE_CRC_RES_2_IVB(pipe)), |
1489 | I915_READ(PIPE_CRC_RES_3_IVB(pipe)), | 1499 | I915_READ(PIPE_CRC_RES_3_IVB(pipe)), |
1490 | I915_READ(PIPE_CRC_RES_4_IVB(pipe)), | 1500 | I915_READ(PIPE_CRC_RES_4_IVB(pipe)), |
1491 | I915_READ(PIPE_CRC_RES_5_IVB(pipe))); | 1501 | I915_READ(PIPE_CRC_RES_5_IVB(pipe))); |
1492 | } | 1502 | } |
1493 | 1503 | ||
1494 | static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) | 1504 | static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) |
1495 | { | 1505 | { |
1496 | struct drm_i915_private *dev_priv = dev->dev_private; | 1506 | struct drm_i915_private *dev_priv = dev->dev_private; |
1497 | uint32_t res1, res2; | 1507 | uint32_t res1, res2; |
1498 | 1508 | ||
1499 | if (INTEL_INFO(dev)->gen >= 3) | 1509 | if (INTEL_INFO(dev)->gen >= 3) |
1500 | res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); | 1510 | res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); |
1501 | else | 1511 | else |
1502 | res1 = 0; | 1512 | res1 = 0; |
1503 | 1513 | ||
1504 | if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) | 1514 | if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) |
1505 | res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); | 1515 | res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); |
1506 | else | 1516 | else |
1507 | res2 = 0; | 1517 | res2 = 0; |
1508 | 1518 | ||
1509 | display_pipe_crc_irq_handler(dev, pipe, | 1519 | display_pipe_crc_irq_handler(dev, pipe, |
1510 | I915_READ(PIPE_CRC_RES_RED(pipe)), | 1520 | I915_READ(PIPE_CRC_RES_RED(pipe)), |
1511 | I915_READ(PIPE_CRC_RES_GREEN(pipe)), | 1521 | I915_READ(PIPE_CRC_RES_GREEN(pipe)), |
1512 | I915_READ(PIPE_CRC_RES_BLUE(pipe)), | 1522 | I915_READ(PIPE_CRC_RES_BLUE(pipe)), |
1513 | res1, res2); | 1523 | res1, res2); |
1514 | } | 1524 | } |
1515 | 1525 | ||
1516 | /* The RPS events need forcewake, so we add them to a work queue and mask their | 1526 | /* The RPS events need forcewake, so we add them to a work queue and mask their |
1517 | * IMR bits until the work is done. Other interrupts can be processed without | 1527 | * IMR bits until the work is done. Other interrupts can be processed without |
1518 | * the work queue. */ | 1528 | * the work queue. */ |
1519 | static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) | 1529 | static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) |
1520 | { | 1530 | { |
1521 | if (pm_iir & dev_priv->pm_rps_events) { | 1531 | if (pm_iir & dev_priv->pm_rps_events) { |
1522 | spin_lock(&dev_priv->irq_lock); | 1532 | spin_lock(&dev_priv->irq_lock); |
1523 | dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; | 1533 | dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; |
1524 | snb_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); | 1534 | snb_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); |
1525 | spin_unlock(&dev_priv->irq_lock); | 1535 | spin_unlock(&dev_priv->irq_lock); |
1526 | 1536 | ||
1527 | queue_work(dev_priv->wq, &dev_priv->rps.work); | 1537 | queue_work(dev_priv->wq, &dev_priv->rps.work); |
1528 | } | 1538 | } |
1529 | 1539 | ||
1530 | if (HAS_VEBOX(dev_priv->dev)) { | 1540 | if (HAS_VEBOX(dev_priv->dev)) { |
1531 | if (pm_iir & PM_VEBOX_USER_INTERRUPT) | 1541 | if (pm_iir & PM_VEBOX_USER_INTERRUPT) |
1532 | notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); | 1542 | notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); |
1533 | 1543 | ||
1534 | if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { | 1544 | if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { |
1535 | i915_handle_error(dev_priv->dev, false, | 1545 | i915_handle_error(dev_priv->dev, false, |
1536 | "VEBOX CS error interrupt 0x%08x", | 1546 | "VEBOX CS error interrupt 0x%08x", |
1537 | pm_iir); | 1547 | pm_iir); |
1538 | } | 1548 | } |
1539 | } | 1549 | } |
1540 | } | 1550 | } |
1541 | 1551 | ||
1542 | static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir) | 1552 | static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir) |
1543 | { | 1553 | { |
1544 | struct drm_i915_private *dev_priv = dev->dev_private; | 1554 | struct drm_i915_private *dev_priv = dev->dev_private; |
1545 | u32 pipe_stats[I915_MAX_PIPES] = { }; | 1555 | u32 pipe_stats[I915_MAX_PIPES] = { }; |
1546 | int pipe; | 1556 | int pipe; |
1547 | 1557 | ||
1548 | spin_lock(&dev_priv->irq_lock); | 1558 | spin_lock(&dev_priv->irq_lock); |
1549 | for_each_pipe(pipe) { | 1559 | for_each_pipe(pipe) { |
1550 | int reg; | 1560 | int reg; |
1551 | u32 mask, iir_bit = 0; | 1561 | u32 mask, iir_bit = 0; |
1552 | 1562 | ||
1553 | /* | 1563 | /* |
1554 | * PIPESTAT bits get signalled even when the interrupt is | 1564 | * PIPESTAT bits get signalled even when the interrupt is |
1555 | * disabled with the mask bits, and some of the status bits do | 1565 | * disabled with the mask bits, and some of the status bits do |
1556 | * not generate interrupts at all (like the underrun bit). Hence | 1566 | * not generate interrupts at all (like the underrun bit). Hence |
1557 | * we need to be careful that we only handle what we want to | 1567 | * we need to be careful that we only handle what we want to |
1558 | * handle. | 1568 | * handle. |
1559 | */ | 1569 | */ |
1560 | mask = 0; | 1570 | mask = 0; |
1561 | if (__cpu_fifo_underrun_reporting_enabled(dev, pipe)) | 1571 | if (__cpu_fifo_underrun_reporting_enabled(dev, pipe)) |
1562 | mask |= PIPE_FIFO_UNDERRUN_STATUS; | 1572 | mask |= PIPE_FIFO_UNDERRUN_STATUS; |
1563 | 1573 | ||
1564 | switch (pipe) { | 1574 | switch (pipe) { |
1565 | case PIPE_A: | 1575 | case PIPE_A: |
1566 | iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; | 1576 | iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; |
1567 | break; | 1577 | break; |
1568 | case PIPE_B: | 1578 | case PIPE_B: |
1569 | iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; | 1579 | iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; |
1570 | break; | 1580 | break; |
1571 | } | 1581 | } |
1572 | if (iir & iir_bit) | 1582 | if (iir & iir_bit) |
1573 | mask |= dev_priv->pipestat_irq_mask[pipe]; | 1583 | mask |= dev_priv->pipestat_irq_mask[pipe]; |
1574 | 1584 | ||
1575 | if (!mask) | 1585 | if (!mask) |
1576 | continue; | 1586 | continue; |
1577 | 1587 | ||
1578 | reg = PIPESTAT(pipe); | 1588 | reg = PIPESTAT(pipe); |
1579 | mask |= PIPESTAT_INT_ENABLE_MASK; | 1589 | mask |= PIPESTAT_INT_ENABLE_MASK; |
1580 | pipe_stats[pipe] = I915_READ(reg) & mask; | 1590 | pipe_stats[pipe] = I915_READ(reg) & mask; |
1581 | 1591 | ||
1582 | /* | 1592 | /* |
1583 | * Clear the PIPE*STAT regs before the IIR | 1593 | * Clear the PIPE*STAT regs before the IIR |
1584 | */ | 1594 | */ |
1585 | if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS | | 1595 | if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS | |
1586 | PIPESTAT_INT_STATUS_MASK)) | 1596 | PIPESTAT_INT_STATUS_MASK)) |
1587 | I915_WRITE(reg, pipe_stats[pipe]); | 1597 | I915_WRITE(reg, pipe_stats[pipe]); |
1588 | } | 1598 | } |
1589 | spin_unlock(&dev_priv->irq_lock); | 1599 | spin_unlock(&dev_priv->irq_lock); |
1590 | 1600 | ||
1591 | for_each_pipe(pipe) { | 1601 | for_each_pipe(pipe) { |
1592 | if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) | 1602 | if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) |
1593 | drm_handle_vblank(dev, pipe); | 1603 | drm_handle_vblank(dev, pipe); |
1594 | 1604 | ||
1595 | if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) { | 1605 | if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) { |
1596 | intel_prepare_page_flip(dev, pipe); | 1606 | intel_prepare_page_flip(dev, pipe); |
1597 | intel_finish_page_flip(dev, pipe); | 1607 | intel_finish_page_flip(dev, pipe); |
1598 | } | 1608 | } |
1599 | 1609 | ||
1600 | if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) | 1610 | if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) |
1601 | i9xx_pipe_crc_irq_handler(dev, pipe); | 1611 | i9xx_pipe_crc_irq_handler(dev, pipe); |
1602 | 1612 | ||
1603 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && | 1613 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && |
1604 | intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) | 1614 | intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) |
1605 | DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); | 1615 | DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); |
1606 | } | 1616 | } |
1607 | 1617 | ||
1608 | if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) | 1618 | if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) |
1609 | gmbus_irq_handler(dev); | 1619 | gmbus_irq_handler(dev); |
1610 | } | 1620 | } |
1611 | 1621 | ||
1612 | static irqreturn_t valleyview_irq_handler(int irq, void *arg) | 1622 | static irqreturn_t valleyview_irq_handler(int irq, void *arg) |
1613 | { | 1623 | { |
1614 | struct drm_device *dev = (struct drm_device *) arg; | 1624 | struct drm_device *dev = (struct drm_device *) arg; |
1615 | struct drm_i915_private *dev_priv = dev->dev_private; | 1625 | struct drm_i915_private *dev_priv = dev->dev_private; |
1616 | u32 iir, gt_iir, pm_iir; | 1626 | u32 iir, gt_iir, pm_iir; |
1617 | irqreturn_t ret = IRQ_NONE; | 1627 | irqreturn_t ret = IRQ_NONE; |
1618 | 1628 | ||
1619 | while (true) { | 1629 | while (true) { |
1620 | iir = I915_READ(VLV_IIR); | 1630 | iir = I915_READ(VLV_IIR); |
1621 | gt_iir = I915_READ(GTIIR); | 1631 | gt_iir = I915_READ(GTIIR); |
1622 | pm_iir = I915_READ(GEN6_PMIIR); | 1632 | pm_iir = I915_READ(GEN6_PMIIR); |
1623 | 1633 | ||
1624 | if (gt_iir == 0 && pm_iir == 0 && iir == 0) | 1634 | if (gt_iir == 0 && pm_iir == 0 && iir == 0) |
1625 | goto out; | 1635 | goto out; |
1626 | 1636 | ||
1627 | ret = IRQ_HANDLED; | 1637 | ret = IRQ_HANDLED; |
1628 | 1638 | ||
1629 | snb_gt_irq_handler(dev, dev_priv, gt_iir); | 1639 | snb_gt_irq_handler(dev, dev_priv, gt_iir); |
1630 | 1640 | ||
1631 | valleyview_pipestat_irq_handler(dev, iir); | 1641 | valleyview_pipestat_irq_handler(dev, iir); |
1632 | 1642 | ||
1633 | /* Consume port. Then clear IIR or we'll miss events */ | 1643 | /* Consume port. Then clear IIR or we'll miss events */ |
1634 | if (iir & I915_DISPLAY_PORT_INTERRUPT) { | 1644 | if (iir & I915_DISPLAY_PORT_INTERRUPT) { |
1635 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); | 1645 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); |
1636 | u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; | 1646 | u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; |
1637 | 1647 | ||
1638 | intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); | 1648 | intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); |
1639 | 1649 | ||
1640 | if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) | 1650 | if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) |
1641 | dp_aux_irq_handler(dev); | 1651 | dp_aux_irq_handler(dev); |
1642 | 1652 | ||
1643 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); | 1653 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); |
1644 | I915_READ(PORT_HOTPLUG_STAT); | 1654 | I915_READ(PORT_HOTPLUG_STAT); |
1645 | } | 1655 | } |
1646 | 1656 | ||
1647 | 1657 | ||
1648 | if (pm_iir) | 1658 | if (pm_iir) |
1649 | gen6_rps_irq_handler(dev_priv, pm_iir); | 1659 | gen6_rps_irq_handler(dev_priv, pm_iir); |
1650 | 1660 | ||
1651 | I915_WRITE(GTIIR, gt_iir); | 1661 | I915_WRITE(GTIIR, gt_iir); |
1652 | I915_WRITE(GEN6_PMIIR, pm_iir); | 1662 | I915_WRITE(GEN6_PMIIR, pm_iir); |
1653 | I915_WRITE(VLV_IIR, iir); | 1663 | I915_WRITE(VLV_IIR, iir); |
1654 | } | 1664 | } |
1655 | 1665 | ||
1656 | out: | 1666 | out: |
1657 | return ret; | 1667 | return ret; |
1658 | } | 1668 | } |
1659 | 1669 | ||
1660 | static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) | 1670 | static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) |
1661 | { | 1671 | { |
1662 | struct drm_i915_private *dev_priv = dev->dev_private; | 1672 | struct drm_i915_private *dev_priv = dev->dev_private; |
1663 | int pipe; | 1673 | int pipe; |
1664 | u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; | 1674 | u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; |
1665 | 1675 | ||
1666 | intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); | 1676 | intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); |
1667 | 1677 | ||
1668 | if (pch_iir & SDE_AUDIO_POWER_MASK) { | 1678 | if (pch_iir & SDE_AUDIO_POWER_MASK) { |
1669 | int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> | 1679 | int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> |
1670 | SDE_AUDIO_POWER_SHIFT); | 1680 | SDE_AUDIO_POWER_SHIFT); |
1671 | DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", | 1681 | DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", |
1672 | port_name(port)); | 1682 | port_name(port)); |
1673 | } | 1683 | } |
1674 | 1684 | ||
1675 | if (pch_iir & SDE_AUX_MASK) | 1685 | if (pch_iir & SDE_AUX_MASK) |
1676 | dp_aux_irq_handler(dev); | 1686 | dp_aux_irq_handler(dev); |
1677 | 1687 | ||
1678 | if (pch_iir & SDE_GMBUS) | 1688 | if (pch_iir & SDE_GMBUS) |
1679 | gmbus_irq_handler(dev); | 1689 | gmbus_irq_handler(dev); |
1680 | 1690 | ||
1681 | if (pch_iir & SDE_AUDIO_HDCP_MASK) | 1691 | if (pch_iir & SDE_AUDIO_HDCP_MASK) |
1682 | DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); | 1692 | DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); |
1683 | 1693 | ||
1684 | if (pch_iir & SDE_AUDIO_TRANS_MASK) | 1694 | if (pch_iir & SDE_AUDIO_TRANS_MASK) |
1685 | DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); | 1695 | DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); |
1686 | 1696 | ||
1687 | if (pch_iir & SDE_POISON) | 1697 | if (pch_iir & SDE_POISON) |
1688 | DRM_ERROR("PCH poison interrupt\n"); | 1698 | DRM_ERROR("PCH poison interrupt\n"); |
1689 | 1699 | ||
1690 | if (pch_iir & SDE_FDI_MASK) | 1700 | if (pch_iir & SDE_FDI_MASK) |
1691 | for_each_pipe(pipe) | 1701 | for_each_pipe(pipe) |
1692 | DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", | 1702 | DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", |
1693 | pipe_name(pipe), | 1703 | pipe_name(pipe), |
1694 | I915_READ(FDI_RX_IIR(pipe))); | 1704 | I915_READ(FDI_RX_IIR(pipe))); |
1695 | 1705 | ||
1696 | if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) | 1706 | if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) |
1697 | DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); | 1707 | DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); |
1698 | 1708 | ||
1699 | if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) | 1709 | if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) |
1700 | DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); | 1710 | DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); |
1701 | 1711 | ||
1702 | if (pch_iir & SDE_TRANSA_FIFO_UNDER) | 1712 | if (pch_iir & SDE_TRANSA_FIFO_UNDER) |
1703 | if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, | 1713 | if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, |
1704 | false)) | 1714 | false)) |
1705 | DRM_ERROR("PCH transcoder A FIFO underrun\n"); | 1715 | DRM_ERROR("PCH transcoder A FIFO underrun\n"); |
1706 | 1716 | ||
1707 | if (pch_iir & SDE_TRANSB_FIFO_UNDER) | 1717 | if (pch_iir & SDE_TRANSB_FIFO_UNDER) |
1708 | if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, | 1718 | if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, |
1709 | false)) | 1719 | false)) |
1710 | DRM_ERROR("PCH transcoder B FIFO underrun\n"); | 1720 | DRM_ERROR("PCH transcoder B FIFO underrun\n"); |
1711 | } | 1721 | } |
1712 | 1722 | ||
1713 | static void ivb_err_int_handler(struct drm_device *dev) | 1723 | static void ivb_err_int_handler(struct drm_device *dev) |
1714 | { | 1724 | { |
1715 | struct drm_i915_private *dev_priv = dev->dev_private; | 1725 | struct drm_i915_private *dev_priv = dev->dev_private; |
1716 | u32 err_int = I915_READ(GEN7_ERR_INT); | 1726 | u32 err_int = I915_READ(GEN7_ERR_INT); |
1717 | enum pipe pipe; | 1727 | enum pipe pipe; |
1718 | 1728 | ||
1719 | if (err_int & ERR_INT_POISON) | 1729 | if (err_int & ERR_INT_POISON) |
1720 | DRM_ERROR("Poison interrupt\n"); | 1730 | DRM_ERROR("Poison interrupt\n"); |
1721 | 1731 | ||
1722 | for_each_pipe(pipe) { | 1732 | for_each_pipe(pipe) { |
1723 | if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) { | 1733 | if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) { |
1724 | if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, | 1734 | if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, |
1725 | false)) | 1735 | false)) |
1726 | DRM_ERROR("Pipe %c FIFO underrun\n", | 1736 | DRM_ERROR("Pipe %c FIFO underrun\n", |
1727 | pipe_name(pipe)); | 1737 | pipe_name(pipe)); |
1728 | } | 1738 | } |
1729 | 1739 | ||
1730 | if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { | 1740 | if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { |
1731 | if (IS_IVYBRIDGE(dev)) | 1741 | if (IS_IVYBRIDGE(dev)) |
1732 | ivb_pipe_crc_irq_handler(dev, pipe); | 1742 | ivb_pipe_crc_irq_handler(dev, pipe); |
1733 | else | 1743 | else |
1734 | hsw_pipe_crc_irq_handler(dev, pipe); | 1744 | hsw_pipe_crc_irq_handler(dev, pipe); |
1735 | } | 1745 | } |
1736 | } | 1746 | } |
1737 | 1747 | ||
1738 | I915_WRITE(GEN7_ERR_INT, err_int); | 1748 | I915_WRITE(GEN7_ERR_INT, err_int); |
1739 | } | 1749 | } |
1740 | 1750 | ||
1741 | static void cpt_serr_int_handler(struct drm_device *dev) | 1751 | static void cpt_serr_int_handler(struct drm_device *dev) |
1742 | { | 1752 | { |
1743 | struct drm_i915_private *dev_priv = dev->dev_private; | 1753 | struct drm_i915_private *dev_priv = dev->dev_private; |
1744 | u32 serr_int = I915_READ(SERR_INT); | 1754 | u32 serr_int = I915_READ(SERR_INT); |
1745 | 1755 | ||
1746 | if (serr_int & SERR_INT_POISON) | 1756 | if (serr_int & SERR_INT_POISON) |
1747 | DRM_ERROR("PCH poison interrupt\n"); | 1757 | DRM_ERROR("PCH poison interrupt\n"); |
1748 | 1758 | ||
1749 | if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) | 1759 | if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) |
1750 | if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, | 1760 | if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, |
1751 | false)) | 1761 | false)) |
1752 | DRM_ERROR("PCH transcoder A FIFO underrun\n"); | 1762 | DRM_ERROR("PCH transcoder A FIFO underrun\n"); |
1753 | 1763 | ||
1754 | if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) | 1764 | if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) |
1755 | if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, | 1765 | if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, |
1756 | false)) | 1766 | false)) |
1757 | DRM_ERROR("PCH transcoder B FIFO underrun\n"); | 1767 | DRM_ERROR("PCH transcoder B FIFO underrun\n"); |
1758 | 1768 | ||
1759 | if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) | 1769 | if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) |
1760 | if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C, | 1770 | if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C, |
1761 | false)) | 1771 | false)) |
1762 | DRM_ERROR("PCH transcoder C FIFO underrun\n"); | 1772 | DRM_ERROR("PCH transcoder C FIFO underrun\n"); |
1763 | 1773 | ||
1764 | I915_WRITE(SERR_INT, serr_int); | 1774 | I915_WRITE(SERR_INT, serr_int); |
1765 | } | 1775 | } |
1766 | 1776 | ||
1767 | static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) | 1777 | static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) |
1768 | { | 1778 | { |
1769 | struct drm_i915_private *dev_priv = dev->dev_private; | 1779 | struct drm_i915_private *dev_priv = dev->dev_private; |
1770 | int pipe; | 1780 | int pipe; |
1771 | u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; | 1781 | u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; |
1772 | 1782 | ||
1773 | intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); | 1783 | intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); |
1774 | 1784 | ||
1775 | if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { | 1785 | if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { |
1776 | int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> | 1786 | int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> |
1777 | SDE_AUDIO_POWER_SHIFT_CPT); | 1787 | SDE_AUDIO_POWER_SHIFT_CPT); |
1778 | DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", | 1788 | DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", |
1779 | port_name(port)); | 1789 | port_name(port)); |
1780 | } | 1790 | } |
1781 | 1791 | ||
1782 | if (pch_iir & SDE_AUX_MASK_CPT) | 1792 | if (pch_iir & SDE_AUX_MASK_CPT) |
1783 | dp_aux_irq_handler(dev); | 1793 | dp_aux_irq_handler(dev); |
1784 | 1794 | ||
1785 | if (pch_iir & SDE_GMBUS_CPT) | 1795 | if (pch_iir & SDE_GMBUS_CPT) |
1786 | gmbus_irq_handler(dev); | 1796 | gmbus_irq_handler(dev); |
1787 | 1797 | ||
1788 | if (pch_iir & SDE_AUDIO_CP_REQ_CPT) | 1798 | if (pch_iir & SDE_AUDIO_CP_REQ_CPT) |
1789 | DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); | 1799 | DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); |
1790 | 1800 | ||
1791 | if (pch_iir & SDE_AUDIO_CP_CHG_CPT) | 1801 | if (pch_iir & SDE_AUDIO_CP_CHG_CPT) |
1792 | DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); | 1802 | DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); |
1793 | 1803 | ||
1794 | if (pch_iir & SDE_FDI_MASK_CPT) | 1804 | if (pch_iir & SDE_FDI_MASK_CPT) |
1795 | for_each_pipe(pipe) | 1805 | for_each_pipe(pipe) |
1796 | DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", | 1806 | DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", |
1797 | pipe_name(pipe), | 1807 | pipe_name(pipe), |
1798 | I915_READ(FDI_RX_IIR(pipe))); | 1808 | I915_READ(FDI_RX_IIR(pipe))); |
1799 | 1809 | ||
1800 | if (pch_iir & SDE_ERROR_CPT) | 1810 | if (pch_iir & SDE_ERROR_CPT) |
1801 | cpt_serr_int_handler(dev); | 1811 | cpt_serr_int_handler(dev); |
1802 | } | 1812 | } |
1803 | 1813 | ||
1804 | static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) | 1814 | static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) |
1805 | { | 1815 | { |
1806 | struct drm_i915_private *dev_priv = dev->dev_private; | 1816 | struct drm_i915_private *dev_priv = dev->dev_private; |
1807 | enum pipe pipe; | 1817 | enum pipe pipe; |
1808 | 1818 | ||
1809 | if (de_iir & DE_AUX_CHANNEL_A) | 1819 | if (de_iir & DE_AUX_CHANNEL_A) |
1810 | dp_aux_irq_handler(dev); | 1820 | dp_aux_irq_handler(dev); |
1811 | 1821 | ||
1812 | if (de_iir & DE_GSE) | 1822 | if (de_iir & DE_GSE) |
1813 | intel_opregion_asle_intr(dev); | 1823 | intel_opregion_asle_intr(dev); |
1814 | 1824 | ||
1815 | if (de_iir & DE_POISON) | 1825 | if (de_iir & DE_POISON) |
1816 | DRM_ERROR("Poison interrupt\n"); | 1826 | DRM_ERROR("Poison interrupt\n"); |
1817 | 1827 | ||
1818 | for_each_pipe(pipe) { | 1828 | for_each_pipe(pipe) { |
1819 | if (de_iir & DE_PIPE_VBLANK(pipe)) | 1829 | if (de_iir & DE_PIPE_VBLANK(pipe)) |
1820 | drm_handle_vblank(dev, pipe); | 1830 | drm_handle_vblank(dev, pipe); |
1821 | 1831 | ||
1822 | if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) | 1832 | if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) |
1823 | if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) | 1833 | if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) |
1824 | DRM_ERROR("Pipe %c FIFO underrun\n", | 1834 | DRM_ERROR("Pipe %c FIFO underrun\n", |
1825 | pipe_name(pipe)); | 1835 | pipe_name(pipe)); |
1826 | 1836 | ||
1827 | if (de_iir & DE_PIPE_CRC_DONE(pipe)) | 1837 | if (de_iir & DE_PIPE_CRC_DONE(pipe)) |
1828 | i9xx_pipe_crc_irq_handler(dev, pipe); | 1838 | i9xx_pipe_crc_irq_handler(dev, pipe); |
1829 | 1839 | ||
1830 | /* plane/pipes map 1:1 on ilk+ */ | 1840 | /* plane/pipes map 1:1 on ilk+ */ |
1831 | if (de_iir & DE_PLANE_FLIP_DONE(pipe)) { | 1841 | if (de_iir & DE_PLANE_FLIP_DONE(pipe)) { |
1832 | intel_prepare_page_flip(dev, pipe); | 1842 | intel_prepare_page_flip(dev, pipe); |
1833 | intel_finish_page_flip_plane(dev, pipe); | 1843 | intel_finish_page_flip_plane(dev, pipe); |
1834 | } | 1844 | } |
1835 | } | 1845 | } |
1836 | 1846 | ||
1837 | /* check event from PCH */ | 1847 | /* check event from PCH */ |
1838 | if (de_iir & DE_PCH_EVENT) { | 1848 | if (de_iir & DE_PCH_EVENT) { |
1839 | u32 pch_iir = I915_READ(SDEIIR); | 1849 | u32 pch_iir = I915_READ(SDEIIR); |
1840 | 1850 | ||
1841 | if (HAS_PCH_CPT(dev)) | 1851 | if (HAS_PCH_CPT(dev)) |
1842 | cpt_irq_handler(dev, pch_iir); | 1852 | cpt_irq_handler(dev, pch_iir); |
1843 | else | 1853 | else |
1844 | ibx_irq_handler(dev, pch_iir); | 1854 | ibx_irq_handler(dev, pch_iir); |
1845 | 1855 | ||
1846 | /* should clear PCH hotplug event before clear CPU irq */ | 1856 | /* should clear PCH hotplug event before clear CPU irq */ |
1847 | I915_WRITE(SDEIIR, pch_iir); | 1857 | I915_WRITE(SDEIIR, pch_iir); |
1848 | } | 1858 | } |
1849 | 1859 | ||
1850 | if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) | 1860 | if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) |
1851 | ironlake_rps_change_irq_handler(dev); | 1861 | ironlake_rps_change_irq_handler(dev); |
1852 | } | 1862 | } |
1853 | 1863 | ||
1854 | static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) | 1864 | static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) |
1855 | { | 1865 | { |
1856 | struct drm_i915_private *dev_priv = dev->dev_private; | 1866 | struct drm_i915_private *dev_priv = dev->dev_private; |
1857 | enum pipe pipe; | 1867 | enum pipe pipe; |
1858 | 1868 | ||
1859 | if (de_iir & DE_ERR_INT_IVB) | 1869 | if (de_iir & DE_ERR_INT_IVB) |
1860 | ivb_err_int_handler(dev); | 1870 | ivb_err_int_handler(dev); |
1861 | 1871 | ||
1862 | if (de_iir & DE_AUX_CHANNEL_A_IVB) | 1872 | if (de_iir & DE_AUX_CHANNEL_A_IVB) |
1863 | dp_aux_irq_handler(dev); | 1873 | dp_aux_irq_handler(dev); |
1864 | 1874 | ||
1865 | if (de_iir & DE_GSE_IVB) | 1875 | if (de_iir & DE_GSE_IVB) |
1866 | intel_opregion_asle_intr(dev); | 1876 | intel_opregion_asle_intr(dev); |
1867 | 1877 | ||
1868 | for_each_pipe(pipe) { | 1878 | for_each_pipe(pipe) { |
1869 | if (de_iir & (DE_PIPE_VBLANK_IVB(pipe))) | 1879 | if (de_iir & (DE_PIPE_VBLANK_IVB(pipe))) |
1870 | drm_handle_vblank(dev, pipe); | 1880 | drm_handle_vblank(dev, pipe); |
1871 | 1881 | ||
1872 | /* plane/pipes map 1:1 on ilk+ */ | 1882 | /* plane/pipes map 1:1 on ilk+ */ |
1873 | if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) { | 1883 | if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) { |
1874 | intel_prepare_page_flip(dev, pipe); | 1884 | intel_prepare_page_flip(dev, pipe); |
1875 | intel_finish_page_flip_plane(dev, pipe); | 1885 | intel_finish_page_flip_plane(dev, pipe); |
1876 | } | 1886 | } |
1877 | } | 1887 | } |
1878 | 1888 | ||
1879 | /* check event from PCH */ | 1889 | /* check event from PCH */ |
1880 | if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { | 1890 | if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { |
1881 | u32 pch_iir = I915_READ(SDEIIR); | 1891 | u32 pch_iir = I915_READ(SDEIIR); |
1882 | 1892 | ||
1883 | cpt_irq_handler(dev, pch_iir); | 1893 | cpt_irq_handler(dev, pch_iir); |
1884 | 1894 | ||
1885 | /* clear PCH hotplug event before clear CPU irq */ | 1895 | /* clear PCH hotplug event before clear CPU irq */ |
1886 | I915_WRITE(SDEIIR, pch_iir); | 1896 | I915_WRITE(SDEIIR, pch_iir); |
1887 | } | 1897 | } |
1888 | } | 1898 | } |
1889 | 1899 | ||
1890 | static irqreturn_t ironlake_irq_handler(int irq, void *arg) | 1900 | static irqreturn_t ironlake_irq_handler(int irq, void *arg) |
1891 | { | 1901 | { |
1892 | struct drm_device *dev = (struct drm_device *) arg; | 1902 | struct drm_device *dev = (struct drm_device *) arg; |
1893 | struct drm_i915_private *dev_priv = dev->dev_private; | 1903 | struct drm_i915_private *dev_priv = dev->dev_private; |
1894 | u32 de_iir, gt_iir, de_ier, sde_ier = 0; | 1904 | u32 de_iir, gt_iir, de_ier, sde_ier = 0; |
1895 | irqreturn_t ret = IRQ_NONE; | 1905 | irqreturn_t ret = IRQ_NONE; |
1896 | 1906 | ||
1897 | /* We get interrupts on unclaimed registers, so check for this before we | 1907 | /* We get interrupts on unclaimed registers, so check for this before we |
1898 | * do any I915_{READ,WRITE}. */ | 1908 | * do any I915_{READ,WRITE}. */ |
1899 | intel_uncore_check_errors(dev); | 1909 | intel_uncore_check_errors(dev); |
1900 | 1910 | ||
1901 | /* disable master interrupt before clearing iir */ | 1911 | /* disable master interrupt before clearing iir */ |
1902 | de_ier = I915_READ(DEIER); | 1912 | de_ier = I915_READ(DEIER); |
1903 | I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); | 1913 | I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); |
1904 | POSTING_READ(DEIER); | 1914 | POSTING_READ(DEIER); |
1905 | 1915 | ||
1906 | /* Disable south interrupts. We'll only write to SDEIIR once, so further | 1916 | /* Disable south interrupts. We'll only write to SDEIIR once, so further |
1907 | * interrupts will will be stored on its back queue, and then we'll be | 1917 | * interrupts will will be stored on its back queue, and then we'll be |
1908 | * able to process them after we restore SDEIER (as soon as we restore | 1918 | * able to process them after we restore SDEIER (as soon as we restore |
1909 | * it, we'll get an interrupt if SDEIIR still has something to process | 1919 | * it, we'll get an interrupt if SDEIIR still has something to process |
1910 | * due to its back queue). */ | 1920 | * due to its back queue). */ |
1911 | if (!HAS_PCH_NOP(dev)) { | 1921 | if (!HAS_PCH_NOP(dev)) { |
1912 | sde_ier = I915_READ(SDEIER); | 1922 | sde_ier = I915_READ(SDEIER); |
1913 | I915_WRITE(SDEIER, 0); | 1923 | I915_WRITE(SDEIER, 0); |
1914 | POSTING_READ(SDEIER); | 1924 | POSTING_READ(SDEIER); |
1915 | } | 1925 | } |
1916 | 1926 | ||
1917 | gt_iir = I915_READ(GTIIR); | 1927 | gt_iir = I915_READ(GTIIR); |
1918 | if (gt_iir) { | 1928 | if (gt_iir) { |
1919 | if (INTEL_INFO(dev)->gen >= 6) | 1929 | if (INTEL_INFO(dev)->gen >= 6) |
1920 | snb_gt_irq_handler(dev, dev_priv, gt_iir); | 1930 | snb_gt_irq_handler(dev, dev_priv, gt_iir); |
1921 | else | 1931 | else |
1922 | ilk_gt_irq_handler(dev, dev_priv, gt_iir); | 1932 | ilk_gt_irq_handler(dev, dev_priv, gt_iir); |
1923 | I915_WRITE(GTIIR, gt_iir); | 1933 | I915_WRITE(GTIIR, gt_iir); |
1924 | ret = IRQ_HANDLED; | 1934 | ret = IRQ_HANDLED; |
1925 | } | 1935 | } |
1926 | 1936 | ||
1927 | de_iir = I915_READ(DEIIR); | 1937 | de_iir = I915_READ(DEIIR); |
1928 | if (de_iir) { | 1938 | if (de_iir) { |
1929 | if (INTEL_INFO(dev)->gen >= 7) | 1939 | if (INTEL_INFO(dev)->gen >= 7) |
1930 | ivb_display_irq_handler(dev, de_iir); | 1940 | ivb_display_irq_handler(dev, de_iir); |
1931 | else | 1941 | else |
1932 | ilk_display_irq_handler(dev, de_iir); | 1942 | ilk_display_irq_handler(dev, de_iir); |
1933 | I915_WRITE(DEIIR, de_iir); | 1943 | I915_WRITE(DEIIR, de_iir); |
1934 | ret = IRQ_HANDLED; | 1944 | ret = IRQ_HANDLED; |
1935 | } | 1945 | } |
1936 | 1946 | ||
1937 | if (INTEL_INFO(dev)->gen >= 6) { | 1947 | if (INTEL_INFO(dev)->gen >= 6) { |
1938 | u32 pm_iir = I915_READ(GEN6_PMIIR); | 1948 | u32 pm_iir = I915_READ(GEN6_PMIIR); |
1939 | if (pm_iir) { | 1949 | if (pm_iir) { |
1940 | gen6_rps_irq_handler(dev_priv, pm_iir); | 1950 | gen6_rps_irq_handler(dev_priv, pm_iir); |
1941 | I915_WRITE(GEN6_PMIIR, pm_iir); | 1951 | I915_WRITE(GEN6_PMIIR, pm_iir); |
1942 | ret = IRQ_HANDLED; | 1952 | ret = IRQ_HANDLED; |
1943 | } | 1953 | } |
1944 | } | 1954 | } |
1945 | 1955 | ||
1946 | I915_WRITE(DEIER, de_ier); | 1956 | I915_WRITE(DEIER, de_ier); |
1947 | POSTING_READ(DEIER); | 1957 | POSTING_READ(DEIER); |
1948 | if (!HAS_PCH_NOP(dev)) { | 1958 | if (!HAS_PCH_NOP(dev)) { |
1949 | I915_WRITE(SDEIER, sde_ier); | 1959 | I915_WRITE(SDEIER, sde_ier); |
1950 | POSTING_READ(SDEIER); | 1960 | POSTING_READ(SDEIER); |
1951 | } | 1961 | } |
1952 | 1962 | ||
1953 | return ret; | 1963 | return ret; |
1954 | } | 1964 | } |
1955 | 1965 | ||
1956 | static irqreturn_t gen8_irq_handler(int irq, void *arg) | 1966 | static irqreturn_t gen8_irq_handler(int irq, void *arg) |
1957 | { | 1967 | { |
1958 | struct drm_device *dev = arg; | 1968 | struct drm_device *dev = arg; |
1959 | struct drm_i915_private *dev_priv = dev->dev_private; | 1969 | struct drm_i915_private *dev_priv = dev->dev_private; |
1960 | u32 master_ctl; | 1970 | u32 master_ctl; |
1961 | irqreturn_t ret = IRQ_NONE; | 1971 | irqreturn_t ret = IRQ_NONE; |
1962 | uint32_t tmp = 0; | 1972 | uint32_t tmp = 0; |
1963 | enum pipe pipe; | 1973 | enum pipe pipe; |
1964 | 1974 | ||
1965 | master_ctl = I915_READ(GEN8_MASTER_IRQ); | 1975 | master_ctl = I915_READ(GEN8_MASTER_IRQ); |
1966 | master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; | 1976 | master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; |
1967 | if (!master_ctl) | 1977 | if (!master_ctl) |
1968 | return IRQ_NONE; | 1978 | return IRQ_NONE; |
1969 | 1979 | ||
1970 | I915_WRITE(GEN8_MASTER_IRQ, 0); | 1980 | I915_WRITE(GEN8_MASTER_IRQ, 0); |
1971 | POSTING_READ(GEN8_MASTER_IRQ); | 1981 | POSTING_READ(GEN8_MASTER_IRQ); |
1972 | 1982 | ||
1973 | ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl); | 1983 | ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl); |
1974 | 1984 | ||
1975 | if (master_ctl & GEN8_DE_MISC_IRQ) { | 1985 | if (master_ctl & GEN8_DE_MISC_IRQ) { |
1976 | tmp = I915_READ(GEN8_DE_MISC_IIR); | 1986 | tmp = I915_READ(GEN8_DE_MISC_IIR); |
1977 | if (tmp & GEN8_DE_MISC_GSE) | 1987 | if (tmp & GEN8_DE_MISC_GSE) |
1978 | intel_opregion_asle_intr(dev); | 1988 | intel_opregion_asle_intr(dev); |
1979 | else if (tmp) | 1989 | else if (tmp) |
1980 | DRM_ERROR("Unexpected DE Misc interrupt\n"); | 1990 | DRM_ERROR("Unexpected DE Misc interrupt\n"); |
1981 | else | 1991 | else |
1982 | DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); | 1992 | DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); |
1983 | 1993 | ||
1984 | if (tmp) { | 1994 | if (tmp) { |
1985 | I915_WRITE(GEN8_DE_MISC_IIR, tmp); | 1995 | I915_WRITE(GEN8_DE_MISC_IIR, tmp); |
1986 | ret = IRQ_HANDLED; | 1996 | ret = IRQ_HANDLED; |
1987 | } | 1997 | } |
1988 | } | 1998 | } |
1989 | 1999 | ||
1990 | if (master_ctl & GEN8_DE_PORT_IRQ) { | 2000 | if (master_ctl & GEN8_DE_PORT_IRQ) { |
1991 | tmp = I915_READ(GEN8_DE_PORT_IIR); | 2001 | tmp = I915_READ(GEN8_DE_PORT_IIR); |
1992 | if (tmp & GEN8_AUX_CHANNEL_A) | 2002 | if (tmp & GEN8_AUX_CHANNEL_A) |
1993 | dp_aux_irq_handler(dev); | 2003 | dp_aux_irq_handler(dev); |
1994 | else if (tmp) | 2004 | else if (tmp) |
1995 | DRM_ERROR("Unexpected DE Port interrupt\n"); | 2005 | DRM_ERROR("Unexpected DE Port interrupt\n"); |
1996 | else | 2006 | else |
1997 | DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); | 2007 | DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); |
1998 | 2008 | ||
1999 | if (tmp) { | 2009 | if (tmp) { |
2000 | I915_WRITE(GEN8_DE_PORT_IIR, tmp); | 2010 | I915_WRITE(GEN8_DE_PORT_IIR, tmp); |
2001 | ret = IRQ_HANDLED; | 2011 | ret = IRQ_HANDLED; |
2002 | } | 2012 | } |
2003 | } | 2013 | } |
2004 | 2014 | ||
2005 | for_each_pipe(pipe) { | 2015 | for_each_pipe(pipe) { |
2006 | uint32_t pipe_iir; | 2016 | uint32_t pipe_iir; |
2007 | 2017 | ||
2008 | if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) | 2018 | if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) |
2009 | continue; | 2019 | continue; |
2010 | 2020 | ||
2011 | pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); | 2021 | pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); |
2012 | if (pipe_iir & GEN8_PIPE_VBLANK) | 2022 | if (pipe_iir & GEN8_PIPE_VBLANK) |
2013 | drm_handle_vblank(dev, pipe); | 2023 | drm_handle_vblank(dev, pipe); |
2014 | 2024 | ||
2015 | if (pipe_iir & GEN8_PIPE_FLIP_DONE) { | 2025 | if (pipe_iir & GEN8_PIPE_FLIP_DONE) { |
2016 | intel_prepare_page_flip(dev, pipe); | 2026 | intel_prepare_page_flip(dev, pipe); |
2017 | intel_finish_page_flip_plane(dev, pipe); | 2027 | intel_finish_page_flip_plane(dev, pipe); |
2018 | } | 2028 | } |
2019 | 2029 | ||
2020 | if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) | 2030 | if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) |
2021 | hsw_pipe_crc_irq_handler(dev, pipe); | 2031 | hsw_pipe_crc_irq_handler(dev, pipe); |
2022 | 2032 | ||
2023 | if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) { | 2033 | if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) { |
2024 | if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, | 2034 | if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, |
2025 | false)) | 2035 | false)) |
2026 | DRM_ERROR("Pipe %c FIFO underrun\n", | 2036 | DRM_ERROR("Pipe %c FIFO underrun\n", |
2027 | pipe_name(pipe)); | 2037 | pipe_name(pipe)); |
2028 | } | 2038 | } |
2029 | 2039 | ||
2030 | if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) { | 2040 | if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) { |
2031 | DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", | 2041 | DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", |
2032 | pipe_name(pipe), | 2042 | pipe_name(pipe), |
2033 | pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); | 2043 | pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); |
2034 | } | 2044 | } |
2035 | 2045 | ||
2036 | if (pipe_iir) { | 2046 | if (pipe_iir) { |
2037 | ret = IRQ_HANDLED; | 2047 | ret = IRQ_HANDLED; |
2038 | I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); | 2048 | I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); |
2039 | } else | 2049 | } else |
2040 | DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); | 2050 | DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); |
2041 | } | 2051 | } |
2042 | 2052 | ||
2043 | if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) { | 2053 | if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) { |
2044 | /* | 2054 | /* |
2045 | * FIXME(BDW): Assume for now that the new interrupt handling | 2055 | * FIXME(BDW): Assume for now that the new interrupt handling |
2046 | * scheme also closed the SDE interrupt handling race we've seen | 2056 | * scheme also closed the SDE interrupt handling race we've seen |
2047 | * on older pch-split platforms. But this needs testing. | 2057 | * on older pch-split platforms. But this needs testing. |
2048 | */ | 2058 | */ |
2049 | u32 pch_iir = I915_READ(SDEIIR); | 2059 | u32 pch_iir = I915_READ(SDEIIR); |
2050 | 2060 | ||
2051 | cpt_irq_handler(dev, pch_iir); | 2061 | cpt_irq_handler(dev, pch_iir); |
2052 | 2062 | ||
2053 | if (pch_iir) { | 2063 | if (pch_iir) { |
2054 | I915_WRITE(SDEIIR, pch_iir); | 2064 | I915_WRITE(SDEIIR, pch_iir); |
2055 | ret = IRQ_HANDLED; | 2065 | ret = IRQ_HANDLED; |
2056 | } | 2066 | } |
2057 | } | 2067 | } |
2058 | 2068 | ||
2059 | I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); | 2069 | I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); |
2060 | POSTING_READ(GEN8_MASTER_IRQ); | 2070 | POSTING_READ(GEN8_MASTER_IRQ); |
2061 | 2071 | ||
2062 | return ret; | 2072 | return ret; |
2063 | } | 2073 | } |
2064 | 2074 | ||
2065 | static void i915_error_wake_up(struct drm_i915_private *dev_priv, | 2075 | static void i915_error_wake_up(struct drm_i915_private *dev_priv, |
2066 | bool reset_completed) | 2076 | bool reset_completed) |
2067 | { | 2077 | { |
2068 | struct intel_ring_buffer *ring; | 2078 | struct intel_ring_buffer *ring; |
2069 | int i; | 2079 | int i; |
2070 | 2080 | ||
2071 | /* | 2081 | /* |
2072 | * Notify all waiters for GPU completion events that reset state has | 2082 | * Notify all waiters for GPU completion events that reset state has |
2073 | * been changed, and that they need to restart their wait after | 2083 | * been changed, and that they need to restart their wait after |
2074 | * checking for potential errors (and bail out to drop locks if there is | 2084 | * checking for potential errors (and bail out to drop locks if there is |
2075 | * a gpu reset pending so that i915_error_work_func can acquire them). | 2085 | * a gpu reset pending so that i915_error_work_func can acquire them). |
2076 | */ | 2086 | */ |
2077 | 2087 | ||
2078 | /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ | 2088 | /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ |
2079 | for_each_ring(ring, dev_priv, i) | 2089 | for_each_ring(ring, dev_priv, i) |
2080 | wake_up_all(&ring->irq_queue); | 2090 | wake_up_all(&ring->irq_queue); |
2081 | 2091 | ||
2082 | /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ | 2092 | /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ |
2083 | wake_up_all(&dev_priv->pending_flip_queue); | 2093 | wake_up_all(&dev_priv->pending_flip_queue); |
2084 | 2094 | ||
2085 | /* | 2095 | /* |
2086 | * Signal tasks blocked in i915_gem_wait_for_error that the pending | 2096 | * Signal tasks blocked in i915_gem_wait_for_error that the pending |
2087 | * reset state is cleared. | 2097 | * reset state is cleared. |
2088 | */ | 2098 | */ |
2089 | if (reset_completed) | 2099 | if (reset_completed) |
2090 | wake_up_all(&dev_priv->gpu_error.reset_queue); | 2100 | wake_up_all(&dev_priv->gpu_error.reset_queue); |
2091 | } | 2101 | } |
2092 | 2102 | ||
2093 | /** | 2103 | /** |
2094 | * i915_error_work_func - do process context error handling work | 2104 | * i915_error_work_func - do process context error handling work |
2095 | * @work: work struct | 2105 | * @work: work struct |
2096 | * | 2106 | * |
2097 | * Fire an error uevent so userspace can see that a hang or error | 2107 | * Fire an error uevent so userspace can see that a hang or error |
2098 | * was detected. | 2108 | * was detected. |
2099 | */ | 2109 | */ |
2100 | static void i915_error_work_func(struct work_struct *work) | 2110 | static void i915_error_work_func(struct work_struct *work) |
2101 | { | 2111 | { |
2102 | struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, | 2112 | struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, |
2103 | work); | 2113 | work); |
2104 | struct drm_i915_private *dev_priv = | 2114 | struct drm_i915_private *dev_priv = |
2105 | container_of(error, struct drm_i915_private, gpu_error); | 2115 | container_of(error, struct drm_i915_private, gpu_error); |
2106 | struct drm_device *dev = dev_priv->dev; | 2116 | struct drm_device *dev = dev_priv->dev; |
2107 | char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; | 2117 | char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; |
2108 | char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; | 2118 | char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; |
2109 | char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; | 2119 | char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; |
2110 | int ret; | 2120 | int ret; |
2111 | 2121 | ||
2112 | kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event); | 2122 | kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event); |
2113 | 2123 | ||
2114 | /* | 2124 | /* |
2115 | * Note that there's only one work item which does gpu resets, so we | 2125 | * Note that there's only one work item which does gpu resets, so we |
2116 | * need not worry about concurrent gpu resets potentially incrementing | 2126 | * need not worry about concurrent gpu resets potentially incrementing |
2117 | * error->reset_counter twice. We only need to take care of another | 2127 | * error->reset_counter twice. We only need to take care of another |
2118 | * racing irq/hangcheck declaring the gpu dead for a second time. A | 2128 | * racing irq/hangcheck declaring the gpu dead for a second time. A |
2119 | * quick check for that is good enough: schedule_work ensures the | 2129 | * quick check for that is good enough: schedule_work ensures the |
2120 | * correct ordering between hang detection and this work item, and since | 2130 | * correct ordering between hang detection and this work item, and since |
2121 | * the reset in-progress bit is only ever set by code outside of this | 2131 | * the reset in-progress bit is only ever set by code outside of this |
2122 | * work we don't need to worry about any other races. | 2132 | * work we don't need to worry about any other races. |
2123 | */ | 2133 | */ |
2124 | if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { | 2134 | if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { |
2125 | DRM_DEBUG_DRIVER("resetting chip\n"); | 2135 | DRM_DEBUG_DRIVER("resetting chip\n"); |
2126 | kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, | 2136 | kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, |
2127 | reset_event); | 2137 | reset_event); |
2128 | 2138 | ||
2129 | /* | 2139 | /* |
2130 | * All state reset _must_ be completed before we update the | 2140 | * All state reset _must_ be completed before we update the |
2131 | * reset counter, for otherwise waiters might miss the reset | 2141 | * reset counter, for otherwise waiters might miss the reset |
2132 | * pending state and not properly drop locks, resulting in | 2142 | * pending state and not properly drop locks, resulting in |
2133 | * deadlocks with the reset work. | 2143 | * deadlocks with the reset work. |
2134 | */ | 2144 | */ |
2135 | ret = i915_reset(dev); | 2145 | ret = i915_reset(dev); |
2136 | 2146 | ||
2137 | intel_display_handle_reset(dev); | 2147 | intel_display_handle_reset(dev); |
2138 | 2148 | ||
2139 | if (ret == 0) { | 2149 | if (ret == 0) { |
2140 | /* | 2150 | /* |
2141 | * After all the gem state is reset, increment the reset | 2151 | * After all the gem state is reset, increment the reset |
2142 | * counter and wake up everyone waiting for the reset to | 2152 | * counter and wake up everyone waiting for the reset to |
2143 | * complete. | 2153 | * complete. |
2144 | * | 2154 | * |
2145 | * Since unlock operations are a one-sided barrier only, | 2155 | * Since unlock operations are a one-sided barrier only, |
2146 | * we need to insert a barrier here to order any seqno | 2156 | * we need to insert a barrier here to order any seqno |
2147 | * updates before | 2157 | * updates before |
2148 | * the counter increment. | 2158 | * the counter increment. |
2149 | */ | 2159 | */ |
2150 | smp_mb__before_atomic_inc(); | 2160 | smp_mb__before_atomic_inc(); |
2151 | atomic_inc(&dev_priv->gpu_error.reset_counter); | 2161 | atomic_inc(&dev_priv->gpu_error.reset_counter); |
2152 | 2162 | ||
2153 | kobject_uevent_env(&dev->primary->kdev->kobj, | 2163 | kobject_uevent_env(&dev->primary->kdev->kobj, |
2154 | KOBJ_CHANGE, reset_done_event); | 2164 | KOBJ_CHANGE, reset_done_event); |
2155 | } else { | 2165 | } else { |
2156 | atomic_set_mask(I915_WEDGED, &error->reset_counter); | 2166 | atomic_set_mask(I915_WEDGED, &error->reset_counter); |
2157 | } | 2167 | } |
2158 | 2168 | ||
2159 | /* | 2169 | /* |
2160 | * Note: The wake_up also serves as a memory barrier so that | 2170 | * Note: The wake_up also serves as a memory barrier so that |
2161 | * waiters see the update value of the reset counter atomic_t. | 2171 | * waiters see the update value of the reset counter atomic_t. |
2162 | */ | 2172 | */ |
2163 | i915_error_wake_up(dev_priv, true); | 2173 | i915_error_wake_up(dev_priv, true); |
2164 | } | 2174 | } |
2165 | } | 2175 | } |
2166 | 2176 | ||
2167 | static void i915_report_and_clear_eir(struct drm_device *dev) | 2177 | static void i915_report_and_clear_eir(struct drm_device *dev) |
2168 | { | 2178 | { |
2169 | struct drm_i915_private *dev_priv = dev->dev_private; | 2179 | struct drm_i915_private *dev_priv = dev->dev_private; |
2170 | uint32_t instdone[I915_NUM_INSTDONE_REG]; | 2180 | uint32_t instdone[I915_NUM_INSTDONE_REG]; |
2171 | u32 eir = I915_READ(EIR); | 2181 | u32 eir = I915_READ(EIR); |
2172 | int pipe, i; | 2182 | int pipe, i; |
2173 | 2183 | ||
2174 | if (!eir) | 2184 | if (!eir) |
2175 | return; | 2185 | return; |
2176 | 2186 | ||
2177 | pr_err("render error detected, EIR: 0x%08x\n", eir); | 2187 | pr_err("render error detected, EIR: 0x%08x\n", eir); |
2178 | 2188 | ||
2179 | i915_get_extra_instdone(dev, instdone); | 2189 | i915_get_extra_instdone(dev, instdone); |
2180 | 2190 | ||
2181 | if (IS_G4X(dev)) { | 2191 | if (IS_G4X(dev)) { |
2182 | if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { | 2192 | if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { |
2183 | u32 ipeir = I915_READ(IPEIR_I965); | 2193 | u32 ipeir = I915_READ(IPEIR_I965); |
2184 | 2194 | ||
2185 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); | 2195 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); |
2186 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); | 2196 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); |
2187 | for (i = 0; i < ARRAY_SIZE(instdone); i++) | 2197 | for (i = 0; i < ARRAY_SIZE(instdone); i++) |
2188 | pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); | 2198 | pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); |
2189 | pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); | 2199 | pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); |
2190 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); | 2200 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); |
2191 | I915_WRITE(IPEIR_I965, ipeir); | 2201 | I915_WRITE(IPEIR_I965, ipeir); |
2192 | POSTING_READ(IPEIR_I965); | 2202 | POSTING_READ(IPEIR_I965); |
2193 | } | 2203 | } |
2194 | if (eir & GM45_ERROR_PAGE_TABLE) { | 2204 | if (eir & GM45_ERROR_PAGE_TABLE) { |
2195 | u32 pgtbl_err = I915_READ(PGTBL_ER); | 2205 | u32 pgtbl_err = I915_READ(PGTBL_ER); |
2196 | pr_err("page table error\n"); | 2206 | pr_err("page table error\n"); |
2197 | pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); | 2207 | pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); |
2198 | I915_WRITE(PGTBL_ER, pgtbl_err); | 2208 | I915_WRITE(PGTBL_ER, pgtbl_err); |
2199 | POSTING_READ(PGTBL_ER); | 2209 | POSTING_READ(PGTBL_ER); |
2200 | } | 2210 | } |
2201 | } | 2211 | } |
2202 | 2212 | ||
2203 | if (!IS_GEN2(dev)) { | 2213 | if (!IS_GEN2(dev)) { |
2204 | if (eir & I915_ERROR_PAGE_TABLE) { | 2214 | if (eir & I915_ERROR_PAGE_TABLE) { |
2205 | u32 pgtbl_err = I915_READ(PGTBL_ER); | 2215 | u32 pgtbl_err = I915_READ(PGTBL_ER); |
2206 | pr_err("page table error\n"); | 2216 | pr_err("page table error\n"); |
2207 | pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); | 2217 | pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); |
2208 | I915_WRITE(PGTBL_ER, pgtbl_err); | 2218 | I915_WRITE(PGTBL_ER, pgtbl_err); |
2209 | POSTING_READ(PGTBL_ER); | 2219 | POSTING_READ(PGTBL_ER); |
2210 | } | 2220 | } |
2211 | } | 2221 | } |
2212 | 2222 | ||
2213 | if (eir & I915_ERROR_MEMORY_REFRESH) { | 2223 | if (eir & I915_ERROR_MEMORY_REFRESH) { |
2214 | pr_err("memory refresh error:\n"); | 2224 | pr_err("memory refresh error:\n"); |
2215 | for_each_pipe(pipe) | 2225 | for_each_pipe(pipe) |
2216 | pr_err("pipe %c stat: 0x%08x\n", | 2226 | pr_err("pipe %c stat: 0x%08x\n", |
2217 | pipe_name(pipe), I915_READ(PIPESTAT(pipe))); | 2227 | pipe_name(pipe), I915_READ(PIPESTAT(pipe))); |
2218 | /* pipestat has already been acked */ | 2228 | /* pipestat has already been acked */ |
2219 | } | 2229 | } |
2220 | if (eir & I915_ERROR_INSTRUCTION) { | 2230 | if (eir & I915_ERROR_INSTRUCTION) { |
2221 | pr_err("instruction error\n"); | 2231 | pr_err("instruction error\n"); |
2222 | pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); | 2232 | pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); |
2223 | for (i = 0; i < ARRAY_SIZE(instdone); i++) | 2233 | for (i = 0; i < ARRAY_SIZE(instdone); i++) |
2224 | pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); | 2234 | pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); |
2225 | if (INTEL_INFO(dev)->gen < 4) { | 2235 | if (INTEL_INFO(dev)->gen < 4) { |
2226 | u32 ipeir = I915_READ(IPEIR); | 2236 | u32 ipeir = I915_READ(IPEIR); |
2227 | 2237 | ||
2228 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); | 2238 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); |
2229 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); | 2239 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); |
2230 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); | 2240 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); |
2231 | I915_WRITE(IPEIR, ipeir); | 2241 | I915_WRITE(IPEIR, ipeir); |
2232 | POSTING_READ(IPEIR); | 2242 | POSTING_READ(IPEIR); |
2233 | } else { | 2243 | } else { |
2234 | u32 ipeir = I915_READ(IPEIR_I965); | 2244 | u32 ipeir = I915_READ(IPEIR_I965); |
2235 | 2245 | ||
2236 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); | 2246 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); |
2237 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); | 2247 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); |
2238 | pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); | 2248 | pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); |
2239 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); | 2249 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); |
2240 | I915_WRITE(IPEIR_I965, ipeir); | 2250 | I915_WRITE(IPEIR_I965, ipeir); |
2241 | POSTING_READ(IPEIR_I965); | 2251 | POSTING_READ(IPEIR_I965); |
2242 | } | 2252 | } |
2243 | } | 2253 | } |
2244 | 2254 | ||
2245 | I915_WRITE(EIR, eir); | 2255 | I915_WRITE(EIR, eir); |
2246 | POSTING_READ(EIR); | 2256 | POSTING_READ(EIR); |
2247 | eir = I915_READ(EIR); | 2257 | eir = I915_READ(EIR); |
2248 | if (eir) { | 2258 | if (eir) { |
2249 | /* | 2259 | /* |
2250 | * some errors might have become stuck, | 2260 | * some errors might have become stuck, |
2251 | * mask them. | 2261 | * mask them. |
2252 | */ | 2262 | */ |
2253 | DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); | 2263 | DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); |
2254 | I915_WRITE(EMR, I915_READ(EMR) | eir); | 2264 | I915_WRITE(EMR, I915_READ(EMR) | eir); |
2255 | I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | 2265 | I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); |
2256 | } | 2266 | } |
2257 | } | 2267 | } |
2258 | 2268 | ||
2259 | /** | 2269 | /** |
2260 | * i915_handle_error - handle an error interrupt | 2270 | * i915_handle_error - handle an error interrupt |
2261 | * @dev: drm device | 2271 | * @dev: drm device |
2262 | * | 2272 | * |
2263 | * Do some basic checking of regsiter state at error interrupt time and | 2273 | * Do some basic checking of regsiter state at error interrupt time and |
2264 | * dump it to the syslog. Also call i915_capture_error_state() to make | 2274 | * dump it to the syslog. Also call i915_capture_error_state() to make |
2265 | * sure we get a record and make it available in debugfs. Fire a uevent | 2275 | * sure we get a record and make it available in debugfs. Fire a uevent |
2266 | * so userspace knows something bad happened (should trigger collection | 2276 | * so userspace knows something bad happened (should trigger collection |
2267 | * of a ring dump etc.). | 2277 | * of a ring dump etc.). |
2268 | */ | 2278 | */ |
2269 | void i915_handle_error(struct drm_device *dev, bool wedged, | 2279 | void i915_handle_error(struct drm_device *dev, bool wedged, |
2270 | const char *fmt, ...) | 2280 | const char *fmt, ...) |
2271 | { | 2281 | { |
2272 | struct drm_i915_private *dev_priv = dev->dev_private; | 2282 | struct drm_i915_private *dev_priv = dev->dev_private; |
2273 | va_list args; | 2283 | va_list args; |
2274 | char error_msg[80]; | 2284 | char error_msg[80]; |
2275 | 2285 | ||
2276 | va_start(args, fmt); | 2286 | va_start(args, fmt); |
2277 | vscnprintf(error_msg, sizeof(error_msg), fmt, args); | 2287 | vscnprintf(error_msg, sizeof(error_msg), fmt, args); |
2278 | va_end(args); | 2288 | va_end(args); |
2279 | 2289 | ||
2280 | i915_capture_error_state(dev, wedged, error_msg); | 2290 | i915_capture_error_state(dev, wedged, error_msg); |
2281 | i915_report_and_clear_eir(dev); | 2291 | i915_report_and_clear_eir(dev); |
2282 | 2292 | ||
2283 | if (wedged) { | 2293 | if (wedged) { |
2284 | atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, | 2294 | atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, |
2285 | &dev_priv->gpu_error.reset_counter); | 2295 | &dev_priv->gpu_error.reset_counter); |
2286 | 2296 | ||
2287 | /* | 2297 | /* |
2288 | * Wakeup waiting processes so that the reset work function | 2298 | * Wakeup waiting processes so that the reset work function |
2289 | * i915_error_work_func doesn't deadlock trying to grab various | 2299 | * i915_error_work_func doesn't deadlock trying to grab various |
2290 | * locks. By bumping the reset counter first, the woken | 2300 | * locks. By bumping the reset counter first, the woken |
2291 | * processes will see a reset in progress and back off, | 2301 | * processes will see a reset in progress and back off, |
2292 | * releasing their locks and then wait for the reset completion. | 2302 | * releasing their locks and then wait for the reset completion. |
2293 | * We must do this for _all_ gpu waiters that might hold locks | 2303 | * We must do this for _all_ gpu waiters that might hold locks |
2294 | * that the reset work needs to acquire. | 2304 | * that the reset work needs to acquire. |
2295 | * | 2305 | * |
2296 | * Note: The wake_up serves as the required memory barrier to | 2306 | * Note: The wake_up serves as the required memory barrier to |
2297 | * ensure that the waiters see the updated value of the reset | 2307 | * ensure that the waiters see the updated value of the reset |
2298 | * counter atomic_t. | 2308 | * counter atomic_t. |
2299 | */ | 2309 | */ |
2300 | i915_error_wake_up(dev_priv, false); | 2310 | i915_error_wake_up(dev_priv, false); |
2301 | } | 2311 | } |
2302 | 2312 | ||
2303 | /* | 2313 | /* |
2304 | * Our reset work can grab modeset locks (since it needs to reset the | 2314 | * Our reset work can grab modeset locks (since it needs to reset the |
2305 | * state of outstanding pagelips). Hence it must not be run on our own | 2315 | * state of outstanding pagelips). Hence it must not be run on our own |
2306 | * dev-priv->wq work queue for otherwise the flush_work in the pageflip | 2316 | * dev-priv->wq work queue for otherwise the flush_work in the pageflip |
2307 | * code will deadlock. | 2317 | * code will deadlock. |
2308 | */ | 2318 | */ |
2309 | schedule_work(&dev_priv->gpu_error.work); | 2319 | schedule_work(&dev_priv->gpu_error.work); |
2310 | } | 2320 | } |
2311 | 2321 | ||
2312 | static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe) | 2322 | static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe) |
2313 | { | 2323 | { |
2314 | struct drm_i915_private *dev_priv = dev->dev_private; | 2324 | struct drm_i915_private *dev_priv = dev->dev_private; |
2315 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | 2325 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; |
2316 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 2326 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2317 | struct drm_i915_gem_object *obj; | 2327 | struct drm_i915_gem_object *obj; |
2318 | struct intel_unpin_work *work; | 2328 | struct intel_unpin_work *work; |
2319 | unsigned long flags; | 2329 | unsigned long flags; |
2320 | bool stall_detected; | 2330 | bool stall_detected; |
2321 | 2331 | ||
2322 | /* Ignore early vblank irqs */ | 2332 | /* Ignore early vblank irqs */ |
2323 | if (intel_crtc == NULL) | 2333 | if (intel_crtc == NULL) |
2324 | return; | 2334 | return; |
2325 | 2335 | ||
2326 | spin_lock_irqsave(&dev->event_lock, flags); | 2336 | spin_lock_irqsave(&dev->event_lock, flags); |
2327 | work = intel_crtc->unpin_work; | 2337 | work = intel_crtc->unpin_work; |
2328 | 2338 | ||
2329 | if (work == NULL || | 2339 | if (work == NULL || |
2330 | atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE || | 2340 | atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE || |
2331 | !work->enable_stall_check) { | 2341 | !work->enable_stall_check) { |
2332 | /* Either the pending flip IRQ arrived, or we're too early. Don't check */ | 2342 | /* Either the pending flip IRQ arrived, or we're too early. Don't check */ |
2333 | spin_unlock_irqrestore(&dev->event_lock, flags); | 2343 | spin_unlock_irqrestore(&dev->event_lock, flags); |
2334 | return; | 2344 | return; |
2335 | } | 2345 | } |
2336 | 2346 | ||
2337 | /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ | 2347 | /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ |
2338 | obj = work->pending_flip_obj; | 2348 | obj = work->pending_flip_obj; |
2339 | if (INTEL_INFO(dev)->gen >= 4) { | 2349 | if (INTEL_INFO(dev)->gen >= 4) { |
2340 | int dspsurf = DSPSURF(intel_crtc->plane); | 2350 | int dspsurf = DSPSURF(intel_crtc->plane); |
2341 | stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == | 2351 | stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == |
2342 | i915_gem_obj_ggtt_offset(obj); | 2352 | i915_gem_obj_ggtt_offset(obj); |
2343 | } else { | 2353 | } else { |
2344 | int dspaddr = DSPADDR(intel_crtc->plane); | 2354 | int dspaddr = DSPADDR(intel_crtc->plane); |
2345 | stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) + | 2355 | stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) + |
2346 | crtc->y * crtc->primary->fb->pitches[0] + | 2356 | crtc->y * crtc->primary->fb->pitches[0] + |
2347 | crtc->x * crtc->primary->fb->bits_per_pixel/8); | 2357 | crtc->x * crtc->primary->fb->bits_per_pixel/8); |
2348 | } | 2358 | } |
2349 | 2359 | ||
2350 | spin_unlock_irqrestore(&dev->event_lock, flags); | 2360 | spin_unlock_irqrestore(&dev->event_lock, flags); |
2351 | 2361 | ||
2352 | if (stall_detected) { | 2362 | if (stall_detected) { |
2353 | DRM_DEBUG_DRIVER("Pageflip stall detected\n"); | 2363 | DRM_DEBUG_DRIVER("Pageflip stall detected\n"); |
2354 | intel_prepare_page_flip(dev, intel_crtc->plane); | 2364 | intel_prepare_page_flip(dev, intel_crtc->plane); |
2355 | } | 2365 | } |
2356 | } | 2366 | } |
2357 | 2367 | ||
2358 | /* Called from drm generic code, passed 'crtc' which | 2368 | /* Called from drm generic code, passed 'crtc' which |
2359 | * we use as a pipe index | 2369 | * we use as a pipe index |
2360 | */ | 2370 | */ |
2361 | static int i915_enable_vblank(struct drm_device *dev, int pipe) | 2371 | static int i915_enable_vblank(struct drm_device *dev, int pipe) |
2362 | { | 2372 | { |
2363 | struct drm_i915_private *dev_priv = dev->dev_private; | 2373 | struct drm_i915_private *dev_priv = dev->dev_private; |
2364 | unsigned long irqflags; | 2374 | unsigned long irqflags; |
2365 | 2375 | ||
2366 | if (!i915_pipe_enabled(dev, pipe)) | 2376 | if (!i915_pipe_enabled(dev, pipe)) |
2367 | return -EINVAL; | 2377 | return -EINVAL; |
2368 | 2378 | ||
2369 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 2379 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
2370 | if (INTEL_INFO(dev)->gen >= 4) | 2380 | if (INTEL_INFO(dev)->gen >= 4) |
2371 | i915_enable_pipestat(dev_priv, pipe, | 2381 | i915_enable_pipestat(dev_priv, pipe, |
2372 | PIPE_START_VBLANK_INTERRUPT_STATUS); | 2382 | PIPE_START_VBLANK_INTERRUPT_STATUS); |
2373 | else | 2383 | else |
2374 | i915_enable_pipestat(dev_priv, pipe, | 2384 | i915_enable_pipestat(dev_priv, pipe, |
2375 | PIPE_VBLANK_INTERRUPT_STATUS); | 2385 | PIPE_VBLANK_INTERRUPT_STATUS); |
2376 | 2386 | ||
2377 | /* maintain vblank delivery even in deep C-states */ | 2387 | /* maintain vblank delivery even in deep C-states */ |
2378 | if (INTEL_INFO(dev)->gen == 3) | 2388 | if (INTEL_INFO(dev)->gen == 3) |
2379 | I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS)); | 2389 | I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS)); |
2380 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 2390 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
2381 | 2391 | ||
2382 | return 0; | 2392 | return 0; |
2383 | } | 2393 | } |
2384 | 2394 | ||
2385 | static int ironlake_enable_vblank(struct drm_device *dev, int pipe) | 2395 | static int ironlake_enable_vblank(struct drm_device *dev, int pipe) |
2386 | { | 2396 | { |
2387 | struct drm_i915_private *dev_priv = dev->dev_private; | 2397 | struct drm_i915_private *dev_priv = dev->dev_private; |
2388 | unsigned long irqflags; | 2398 | unsigned long irqflags; |
2389 | uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : | 2399 | uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : |
2390 | DE_PIPE_VBLANK(pipe); | 2400 | DE_PIPE_VBLANK(pipe); |
2391 | 2401 | ||
2392 | if (!i915_pipe_enabled(dev, pipe)) | 2402 | if (!i915_pipe_enabled(dev, pipe)) |
2393 | return -EINVAL; | 2403 | return -EINVAL; |
2394 | 2404 | ||
2395 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 2405 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
2396 | ironlake_enable_display_irq(dev_priv, bit); | 2406 | ironlake_enable_display_irq(dev_priv, bit); |
2397 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 2407 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
2398 | 2408 | ||
2399 | return 0; | 2409 | return 0; |
2400 | } | 2410 | } |
2401 | 2411 | ||
2402 | static int valleyview_enable_vblank(struct drm_device *dev, int pipe) | 2412 | static int valleyview_enable_vblank(struct drm_device *dev, int pipe) |
2403 | { | 2413 | { |
2404 | struct drm_i915_private *dev_priv = dev->dev_private; | 2414 | struct drm_i915_private *dev_priv = dev->dev_private; |
2405 | unsigned long irqflags; | 2415 | unsigned long irqflags; |
2406 | 2416 | ||
2407 | if (!i915_pipe_enabled(dev, pipe)) | 2417 | if (!i915_pipe_enabled(dev, pipe)) |
2408 | return -EINVAL; | 2418 | return -EINVAL; |
2409 | 2419 | ||
2410 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 2420 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
2411 | i915_enable_pipestat(dev_priv, pipe, | 2421 | i915_enable_pipestat(dev_priv, pipe, |
2412 | PIPE_START_VBLANK_INTERRUPT_STATUS); | 2422 | PIPE_START_VBLANK_INTERRUPT_STATUS); |
2413 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 2423 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
2414 | 2424 | ||
2415 | return 0; | 2425 | return 0; |
2416 | } | 2426 | } |
2417 | 2427 | ||
2418 | static int gen8_enable_vblank(struct drm_device *dev, int pipe) | 2428 | static int gen8_enable_vblank(struct drm_device *dev, int pipe) |
2419 | { | 2429 | { |
2420 | struct drm_i915_private *dev_priv = dev->dev_private; | 2430 | struct drm_i915_private *dev_priv = dev->dev_private; |
2421 | unsigned long irqflags; | 2431 | unsigned long irqflags; |
2422 | 2432 | ||
2423 | if (!i915_pipe_enabled(dev, pipe)) | 2433 | if (!i915_pipe_enabled(dev, pipe)) |
2424 | return -EINVAL; | 2434 | return -EINVAL; |
2425 | 2435 | ||
2426 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 2436 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
2427 | dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK; | 2437 | dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK; |
2428 | I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); | 2438 | I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); |
2429 | POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); | 2439 | POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); |
2430 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 2440 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
2431 | return 0; | 2441 | return 0; |
2432 | } | 2442 | } |
2433 | 2443 | ||
2434 | /* Called from drm generic code, passed 'crtc' which | 2444 | /* Called from drm generic code, passed 'crtc' which |
2435 | * we use as a pipe index | 2445 | * we use as a pipe index |
2436 | */ | 2446 | */ |
2437 | static void i915_disable_vblank(struct drm_device *dev, int pipe) | 2447 | static void i915_disable_vblank(struct drm_device *dev, int pipe) |
2438 | { | 2448 | { |
2439 | struct drm_i915_private *dev_priv = dev->dev_private; | 2449 | struct drm_i915_private *dev_priv = dev->dev_private; |
2440 | unsigned long irqflags; | 2450 | unsigned long irqflags; |
2441 | 2451 | ||
2442 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 2452 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
2443 | if (INTEL_INFO(dev)->gen == 3) | 2453 | if (INTEL_INFO(dev)->gen == 3) |
2444 | I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS)); | 2454 | I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS)); |
2445 | 2455 | ||
2446 | i915_disable_pipestat(dev_priv, pipe, | 2456 | i915_disable_pipestat(dev_priv, pipe, |
2447 | PIPE_VBLANK_INTERRUPT_STATUS | | 2457 | PIPE_VBLANK_INTERRUPT_STATUS | |
2448 | PIPE_START_VBLANK_INTERRUPT_STATUS); | 2458 | PIPE_START_VBLANK_INTERRUPT_STATUS); |
2449 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 2459 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
2450 | } | 2460 | } |
2451 | 2461 | ||
2452 | static void ironlake_disable_vblank(struct drm_device *dev, int pipe) | 2462 | static void ironlake_disable_vblank(struct drm_device *dev, int pipe) |
2453 | { | 2463 | { |
2454 | struct drm_i915_private *dev_priv = dev->dev_private; | 2464 | struct drm_i915_private *dev_priv = dev->dev_private; |
2455 | unsigned long irqflags; | 2465 | unsigned long irqflags; |
2456 | uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : | 2466 | uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : |
2457 | DE_PIPE_VBLANK(pipe); | 2467 | DE_PIPE_VBLANK(pipe); |
2458 | 2468 | ||
2459 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 2469 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
2460 | ironlake_disable_display_irq(dev_priv, bit); | 2470 | ironlake_disable_display_irq(dev_priv, bit); |
2461 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 2471 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
2462 | } | 2472 | } |
2463 | 2473 | ||
2464 | static void valleyview_disable_vblank(struct drm_device *dev, int pipe) | 2474 | static void valleyview_disable_vblank(struct drm_device *dev, int pipe) |
2465 | { | 2475 | { |
2466 | struct drm_i915_private *dev_priv = dev->dev_private; | 2476 | struct drm_i915_private *dev_priv = dev->dev_private; |
2467 | unsigned long irqflags; | 2477 | unsigned long irqflags; |
2468 | 2478 | ||
2469 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 2479 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
2470 | i915_disable_pipestat(dev_priv, pipe, | 2480 | i915_disable_pipestat(dev_priv, pipe, |
2471 | PIPE_START_VBLANK_INTERRUPT_STATUS); | 2481 | PIPE_START_VBLANK_INTERRUPT_STATUS); |
2472 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 2482 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
2473 | } | 2483 | } |
2474 | 2484 | ||
2475 | static void gen8_disable_vblank(struct drm_device *dev, int pipe) | 2485 | static void gen8_disable_vblank(struct drm_device *dev, int pipe) |
2476 | { | 2486 | { |
2477 | struct drm_i915_private *dev_priv = dev->dev_private; | 2487 | struct drm_i915_private *dev_priv = dev->dev_private; |
2478 | unsigned long irqflags; | 2488 | unsigned long irqflags; |
2479 | 2489 | ||
2480 | if (!i915_pipe_enabled(dev, pipe)) | 2490 | if (!i915_pipe_enabled(dev, pipe)) |
2481 | return; | 2491 | return; |
2482 | 2492 | ||
2483 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 2493 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
2484 | dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK; | 2494 | dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK; |
2485 | I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); | 2495 | I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); |
2486 | POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); | 2496 | POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); |
2487 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 2497 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
2488 | } | 2498 | } |
2489 | 2499 | ||
2490 | static u32 | 2500 | static u32 |
2491 | ring_last_seqno(struct intel_ring_buffer *ring) | 2501 | ring_last_seqno(struct intel_ring_buffer *ring) |
2492 | { | 2502 | { |
2493 | return list_entry(ring->request_list.prev, | 2503 | return list_entry(ring->request_list.prev, |
2494 | struct drm_i915_gem_request, list)->seqno; | 2504 | struct drm_i915_gem_request, list)->seqno; |
2495 | } | 2505 | } |
2496 | 2506 | ||
2497 | static bool | 2507 | static bool |
2498 | ring_idle(struct intel_ring_buffer *ring, u32 seqno) | 2508 | ring_idle(struct intel_ring_buffer *ring, u32 seqno) |
2499 | { | 2509 | { |
2500 | return (list_empty(&ring->request_list) || | 2510 | return (list_empty(&ring->request_list) || |
2501 | i915_seqno_passed(seqno, ring_last_seqno(ring))); | 2511 | i915_seqno_passed(seqno, ring_last_seqno(ring))); |
2502 | } | 2512 | } |
2503 | 2513 | ||
2504 | static struct intel_ring_buffer * | 2514 | static struct intel_ring_buffer * |
2505 | semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno) | 2515 | semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno) |
2506 | { | 2516 | { |
2507 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | 2517 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
2508 | u32 cmd, ipehr, head; | 2518 | u32 cmd, ipehr, head; |
2509 | int i; | 2519 | int i; |
2510 | 2520 | ||
2511 | ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); | 2521 | ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); |
2512 | if ((ipehr & ~(0x3 << 16)) != | 2522 | if ((ipehr & ~(0x3 << 16)) != |
2513 | (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER)) | 2523 | (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER)) |
2514 | return NULL; | 2524 | return NULL; |
2515 | 2525 | ||
2516 | /* | 2526 | /* |
2517 | * HEAD is likely pointing to the dword after the actual command, | 2527 | * HEAD is likely pointing to the dword after the actual command, |
2518 | * so scan backwards until we find the MBOX. But limit it to just 3 | 2528 | * so scan backwards until we find the MBOX. But limit it to just 3 |
2519 | * dwords. Note that we don't care about ACTHD here since that might | 2529 | * dwords. Note that we don't care about ACTHD here since that might |
2520 | * point at at batch, and semaphores are always emitted into the | 2530 | * point at at batch, and semaphores are always emitted into the |
2521 | * ringbuffer itself. | 2531 | * ringbuffer itself. |
2522 | */ | 2532 | */ |
2523 | head = I915_READ_HEAD(ring) & HEAD_ADDR; | 2533 | head = I915_READ_HEAD(ring) & HEAD_ADDR; |
2524 | 2534 | ||
2525 | for (i = 4; i; --i) { | 2535 | for (i = 4; i; --i) { |
2526 | /* | 2536 | /* |
2527 | * Be paranoid and presume the hw has gone off into the wild - | 2537 | * Be paranoid and presume the hw has gone off into the wild - |
2528 | * our ring is smaller than what the hardware (and hence | 2538 | * our ring is smaller than what the hardware (and hence |
2529 | * HEAD_ADDR) allows. Also handles wrap-around. | 2539 | * HEAD_ADDR) allows. Also handles wrap-around. |
2530 | */ | 2540 | */ |
2531 | head &= ring->size - 1; | 2541 | head &= ring->size - 1; |
2532 | 2542 | ||
2533 | /* This here seems to blow up */ | 2543 | /* This here seems to blow up */ |
2534 | cmd = ioread32(ring->virtual_start + head); | 2544 | cmd = ioread32(ring->virtual_start + head); |
2535 | if (cmd == ipehr) | 2545 | if (cmd == ipehr) |
2536 | break; | 2546 | break; |
2537 | 2547 | ||
2538 | head -= 4; | 2548 | head -= 4; |
2539 | } | 2549 | } |
2540 | 2550 | ||
2541 | if (!i) | 2551 | if (!i) |
2542 | return NULL; | 2552 | return NULL; |
2543 | 2553 | ||
2544 | *seqno = ioread32(ring->virtual_start + head + 4) + 1; | 2554 | *seqno = ioread32(ring->virtual_start + head + 4) + 1; |
2545 | return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3]; | 2555 | return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3]; |
2546 | } | 2556 | } |
2547 | 2557 | ||
2548 | static int semaphore_passed(struct intel_ring_buffer *ring) | 2558 | static int semaphore_passed(struct intel_ring_buffer *ring) |
2549 | { | 2559 | { |
2550 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | 2560 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
2551 | struct intel_ring_buffer *signaller; | 2561 | struct intel_ring_buffer *signaller; |
2552 | u32 seqno, ctl; | 2562 | u32 seqno, ctl; |
2553 | 2563 | ||
2554 | ring->hangcheck.deadlock = true; | 2564 | ring->hangcheck.deadlock = true; |
2555 | 2565 | ||
2556 | signaller = semaphore_waits_for(ring, &seqno); | 2566 | signaller = semaphore_waits_for(ring, &seqno); |
2557 | if (signaller == NULL || signaller->hangcheck.deadlock) | 2567 | if (signaller == NULL || signaller->hangcheck.deadlock) |
2558 | return -1; | 2568 | return -1; |
2559 | 2569 | ||
2560 | /* cursory check for an unkickable deadlock */ | 2570 | /* cursory check for an unkickable deadlock */ |
2561 | ctl = I915_READ_CTL(signaller); | 2571 | ctl = I915_READ_CTL(signaller); |
2562 | if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0) | 2572 | if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0) |
2563 | return -1; | 2573 | return -1; |
2564 | 2574 | ||
2565 | return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno); | 2575 | return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno); |
2566 | } | 2576 | } |
2567 | 2577 | ||
2568 | static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) | 2578 | static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) |
2569 | { | 2579 | { |
2570 | struct intel_ring_buffer *ring; | 2580 | struct intel_ring_buffer *ring; |
2571 | int i; | 2581 | int i; |
2572 | 2582 | ||
2573 | for_each_ring(ring, dev_priv, i) | 2583 | for_each_ring(ring, dev_priv, i) |
2574 | ring->hangcheck.deadlock = false; | 2584 | ring->hangcheck.deadlock = false; |
2575 | } | 2585 | } |
2576 | 2586 | ||
2577 | static enum intel_ring_hangcheck_action | 2587 | static enum intel_ring_hangcheck_action |
2578 | ring_stuck(struct intel_ring_buffer *ring, u64 acthd) | 2588 | ring_stuck(struct intel_ring_buffer *ring, u64 acthd) |
2579 | { | 2589 | { |
2580 | struct drm_device *dev = ring->dev; | 2590 | struct drm_device *dev = ring->dev; |
2581 | struct drm_i915_private *dev_priv = dev->dev_private; | 2591 | struct drm_i915_private *dev_priv = dev->dev_private; |
2582 | u32 tmp; | 2592 | u32 tmp; |
2583 | 2593 | ||
2584 | if (ring->hangcheck.acthd != acthd) | 2594 | if (ring->hangcheck.acthd != acthd) |
2585 | return HANGCHECK_ACTIVE; | 2595 | return HANGCHECK_ACTIVE; |
2586 | 2596 | ||
2587 | if (IS_GEN2(dev)) | 2597 | if (IS_GEN2(dev)) |
2588 | return HANGCHECK_HUNG; | 2598 | return HANGCHECK_HUNG; |
2589 | 2599 | ||
2590 | /* Is the chip hanging on a WAIT_FOR_EVENT? | 2600 | /* Is the chip hanging on a WAIT_FOR_EVENT? |
2591 | * If so we can simply poke the RB_WAIT bit | 2601 | * If so we can simply poke the RB_WAIT bit |
2592 | * and break the hang. This should work on | 2602 | * and break the hang. This should work on |
2593 | * all but the second generation chipsets. | 2603 | * all but the second generation chipsets. |
2594 | */ | 2604 | */ |
2595 | tmp = I915_READ_CTL(ring); | 2605 | tmp = I915_READ_CTL(ring); |
2596 | if (tmp & RING_WAIT) { | 2606 | if (tmp & RING_WAIT) { |
2597 | i915_handle_error(dev, false, | 2607 | i915_handle_error(dev, false, |
2598 | "Kicking stuck wait on %s", | 2608 | "Kicking stuck wait on %s", |
2599 | ring->name); | 2609 | ring->name); |
2600 | I915_WRITE_CTL(ring, tmp); | 2610 | I915_WRITE_CTL(ring, tmp); |
2601 | return HANGCHECK_KICK; | 2611 | return HANGCHECK_KICK; |
2602 | } | 2612 | } |
2603 | 2613 | ||
2604 | if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { | 2614 | if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { |
2605 | switch (semaphore_passed(ring)) { | 2615 | switch (semaphore_passed(ring)) { |
2606 | default: | 2616 | default: |
2607 | return HANGCHECK_HUNG; | 2617 | return HANGCHECK_HUNG; |
2608 | case 1: | 2618 | case 1: |
2609 | i915_handle_error(dev, false, | 2619 | i915_handle_error(dev, false, |
2610 | "Kicking stuck semaphore on %s", | 2620 | "Kicking stuck semaphore on %s", |
2611 | ring->name); | 2621 | ring->name); |
2612 | I915_WRITE_CTL(ring, tmp); | 2622 | I915_WRITE_CTL(ring, tmp); |
2613 | return HANGCHECK_KICK; | 2623 | return HANGCHECK_KICK; |
2614 | case 0: | 2624 | case 0: |
2615 | return HANGCHECK_WAIT; | 2625 | return HANGCHECK_WAIT; |
2616 | } | 2626 | } |
2617 | } | 2627 | } |
2618 | 2628 | ||
2619 | return HANGCHECK_HUNG; | 2629 | return HANGCHECK_HUNG; |
2620 | } | 2630 | } |
2621 | 2631 | ||
2622 | /** | 2632 | /** |
2623 | * This is called when the chip hasn't reported back with completed | 2633 | * This is called when the chip hasn't reported back with completed |
2624 | * batchbuffers in a long time. We keep track per ring seqno progress and | 2634 | * batchbuffers in a long time. We keep track per ring seqno progress and |
2625 | * if there are no progress, hangcheck score for that ring is increased. | 2635 | * if there are no progress, hangcheck score for that ring is increased. |
2626 | * Further, acthd is inspected to see if the ring is stuck. On stuck case | 2636 | * Further, acthd is inspected to see if the ring is stuck. On stuck case |
2627 | * we kick the ring. If we see no progress on three subsequent calls | 2637 | * we kick the ring. If we see no progress on three subsequent calls |
2628 | * we assume chip is wedged and try to fix it by resetting the chip. | 2638 | * we assume chip is wedged and try to fix it by resetting the chip. |
2629 | */ | 2639 | */ |
2630 | static void i915_hangcheck_elapsed(unsigned long data) | 2640 | static void i915_hangcheck_elapsed(unsigned long data) |
2631 | { | 2641 | { |
2632 | struct drm_device *dev = (struct drm_device *)data; | 2642 | struct drm_device *dev = (struct drm_device *)data; |
2633 | struct drm_i915_private *dev_priv = dev->dev_private; | 2643 | struct drm_i915_private *dev_priv = dev->dev_private; |
2634 | struct intel_ring_buffer *ring; | 2644 | struct intel_ring_buffer *ring; |
2635 | int i; | 2645 | int i; |
2636 | int busy_count = 0, rings_hung = 0; | 2646 | int busy_count = 0, rings_hung = 0; |
2637 | bool stuck[I915_NUM_RINGS] = { 0 }; | 2647 | bool stuck[I915_NUM_RINGS] = { 0 }; |
2638 | #define BUSY 1 | 2648 | #define BUSY 1 |
2639 | #define KICK 5 | 2649 | #define KICK 5 |
2640 | #define HUNG 20 | 2650 | #define HUNG 20 |
2641 | 2651 | ||
2642 | if (!i915.enable_hangcheck) | 2652 | if (!i915.enable_hangcheck) |
2643 | return; | 2653 | return; |
2644 | 2654 | ||
2645 | for_each_ring(ring, dev_priv, i) { | 2655 | for_each_ring(ring, dev_priv, i) { |
2646 | u64 acthd; | 2656 | u64 acthd; |
2647 | u32 seqno; | 2657 | u32 seqno; |
2648 | bool busy = true; | 2658 | bool busy = true; |
2649 | 2659 | ||
2650 | semaphore_clear_deadlocks(dev_priv); | 2660 | semaphore_clear_deadlocks(dev_priv); |
2651 | 2661 | ||
2652 | seqno = ring->get_seqno(ring, false); | 2662 | seqno = ring->get_seqno(ring, false); |
2653 | acthd = intel_ring_get_active_head(ring); | 2663 | acthd = intel_ring_get_active_head(ring); |
2654 | 2664 | ||
2655 | if (ring->hangcheck.seqno == seqno) { | 2665 | if (ring->hangcheck.seqno == seqno) { |
2656 | if (ring_idle(ring, seqno)) { | 2666 | if (ring_idle(ring, seqno)) { |
2657 | ring->hangcheck.action = HANGCHECK_IDLE; | 2667 | ring->hangcheck.action = HANGCHECK_IDLE; |
2658 | 2668 | ||
2659 | if (waitqueue_active(&ring->irq_queue)) { | 2669 | if (waitqueue_active(&ring->irq_queue)) { |
2660 | /* Issue a wake-up to catch stuck h/w. */ | 2670 | /* Issue a wake-up to catch stuck h/w. */ |
2661 | if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) { | 2671 | if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) { |
2662 | if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring))) | 2672 | if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring))) |
2663 | DRM_ERROR("Hangcheck timer elapsed... %s idle\n", | 2673 | DRM_ERROR("Hangcheck timer elapsed... %s idle\n", |
2664 | ring->name); | 2674 | ring->name); |
2665 | else | 2675 | else |
2666 | DRM_INFO("Fake missed irq on %s\n", | 2676 | DRM_INFO("Fake missed irq on %s\n", |
2667 | ring->name); | 2677 | ring->name); |
2668 | wake_up_all(&ring->irq_queue); | 2678 | wake_up_all(&ring->irq_queue); |
2669 | } | 2679 | } |
2670 | /* Safeguard against driver failure */ | 2680 | /* Safeguard against driver failure */ |
2671 | ring->hangcheck.score += BUSY; | 2681 | ring->hangcheck.score += BUSY; |
2672 | } else | 2682 | } else |
2673 | busy = false; | 2683 | busy = false; |
2674 | } else { | 2684 | } else { |
2675 | /* We always increment the hangcheck score | 2685 | /* We always increment the hangcheck score |
2676 | * if the ring is busy and still processing | 2686 | * if the ring is busy and still processing |
2677 | * the same request, so that no single request | 2687 | * the same request, so that no single request |
2678 | * can run indefinitely (such as a chain of | 2688 | * can run indefinitely (such as a chain of |
2679 | * batches). The only time we do not increment | 2689 | * batches). The only time we do not increment |
2680 | * the hangcheck score on this ring, if this | 2690 | * the hangcheck score on this ring, if this |
2681 | * ring is in a legitimate wait for another | 2691 | * ring is in a legitimate wait for another |
2682 | * ring. In that case the waiting ring is a | 2692 | * ring. In that case the waiting ring is a |
2683 | * victim and we want to be sure we catch the | 2693 | * victim and we want to be sure we catch the |
2684 | * right culprit. Then every time we do kick | 2694 | * right culprit. Then every time we do kick |
2685 | * the ring, add a small increment to the | 2695 | * the ring, add a small increment to the |
2686 | * score so that we can catch a batch that is | 2696 | * score so that we can catch a batch that is |
2687 | * being repeatedly kicked and so responsible | 2697 | * being repeatedly kicked and so responsible |
2688 | * for stalling the machine. | 2698 | * for stalling the machine. |
2689 | */ | 2699 | */ |
2690 | ring->hangcheck.action = ring_stuck(ring, | 2700 | ring->hangcheck.action = ring_stuck(ring, |
2691 | acthd); | 2701 | acthd); |
2692 | 2702 | ||
2693 | switch (ring->hangcheck.action) { | 2703 | switch (ring->hangcheck.action) { |
2694 | case HANGCHECK_IDLE: | 2704 | case HANGCHECK_IDLE: |
2695 | case HANGCHECK_WAIT: | 2705 | case HANGCHECK_WAIT: |
2696 | break; | 2706 | break; |
2697 | case HANGCHECK_ACTIVE: | 2707 | case HANGCHECK_ACTIVE: |
2698 | ring->hangcheck.score += BUSY; | 2708 | ring->hangcheck.score += BUSY; |
2699 | break; | 2709 | break; |
2700 | case HANGCHECK_KICK: | 2710 | case HANGCHECK_KICK: |
2701 | ring->hangcheck.score += KICK; | 2711 | ring->hangcheck.score += KICK; |
2702 | break; | 2712 | break; |
2703 | case HANGCHECK_HUNG: | 2713 | case HANGCHECK_HUNG: |
2704 | ring->hangcheck.score += HUNG; | 2714 | ring->hangcheck.score += HUNG; |
2705 | stuck[i] = true; | 2715 | stuck[i] = true; |
2706 | break; | 2716 | break; |
2707 | } | 2717 | } |
2708 | } | 2718 | } |
2709 | } else { | 2719 | } else { |
2710 | ring->hangcheck.action = HANGCHECK_ACTIVE; | 2720 | ring->hangcheck.action = HANGCHECK_ACTIVE; |
2711 | 2721 | ||
2712 | /* Gradually reduce the count so that we catch DoS | 2722 | /* Gradually reduce the count so that we catch DoS |
2713 | * attempts across multiple batches. | 2723 | * attempts across multiple batches. |
2714 | */ | 2724 | */ |
2715 | if (ring->hangcheck.score > 0) | 2725 | if (ring->hangcheck.score > 0) |
2716 | ring->hangcheck.score--; | 2726 | ring->hangcheck.score--; |
2717 | } | 2727 | } |
2718 | 2728 | ||
2719 | ring->hangcheck.seqno = seqno; | 2729 | ring->hangcheck.seqno = seqno; |
2720 | ring->hangcheck.acthd = acthd; | 2730 | ring->hangcheck.acthd = acthd; |
2721 | busy_count += busy; | 2731 | busy_count += busy; |
2722 | } | 2732 | } |
2723 | 2733 | ||
2724 | for_each_ring(ring, dev_priv, i) { | 2734 | for_each_ring(ring, dev_priv, i) { |
2725 | if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) { | 2735 | if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) { |
2726 | DRM_INFO("%s on %s\n", | 2736 | DRM_INFO("%s on %s\n", |
2727 | stuck[i] ? "stuck" : "no progress", | 2737 | stuck[i] ? "stuck" : "no progress", |
2728 | ring->name); | 2738 | ring->name); |
2729 | rings_hung++; | 2739 | rings_hung++; |
2730 | } | 2740 | } |
2731 | } | 2741 | } |
2732 | 2742 | ||
2733 | if (rings_hung) | 2743 | if (rings_hung) |
2734 | return i915_handle_error(dev, true, "Ring hung"); | 2744 | return i915_handle_error(dev, true, "Ring hung"); |
2735 | 2745 | ||
2736 | if (busy_count) | 2746 | if (busy_count) |
2737 | /* Reset timer case chip hangs without another request | 2747 | /* Reset timer case chip hangs without another request |
2738 | * being added */ | 2748 | * being added */ |
2739 | i915_queue_hangcheck(dev); | 2749 | i915_queue_hangcheck(dev); |
2740 | } | 2750 | } |
2741 | 2751 | ||
2742 | void i915_queue_hangcheck(struct drm_device *dev) | 2752 | void i915_queue_hangcheck(struct drm_device *dev) |
2743 | { | 2753 | { |
2744 | struct drm_i915_private *dev_priv = dev->dev_private; | 2754 | struct drm_i915_private *dev_priv = dev->dev_private; |
2745 | if (!i915.enable_hangcheck) | 2755 | if (!i915.enable_hangcheck) |
2746 | return; | 2756 | return; |
2747 | 2757 | ||
2748 | mod_timer(&dev_priv->gpu_error.hangcheck_timer, | 2758 | mod_timer(&dev_priv->gpu_error.hangcheck_timer, |
2749 | round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); | 2759 | round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); |
2750 | } | 2760 | } |
2751 | 2761 | ||
2752 | static void ibx_irq_preinstall(struct drm_device *dev) | 2762 | static void ibx_irq_preinstall(struct drm_device *dev) |
2753 | { | 2763 | { |
2754 | struct drm_i915_private *dev_priv = dev->dev_private; | 2764 | struct drm_i915_private *dev_priv = dev->dev_private; |
2755 | 2765 | ||
2756 | if (HAS_PCH_NOP(dev)) | 2766 | if (HAS_PCH_NOP(dev)) |
2757 | return; | 2767 | return; |
2758 | 2768 | ||
2759 | /* south display irq */ | 2769 | /* south display irq */ |
2760 | I915_WRITE(SDEIMR, 0xffffffff); | 2770 | I915_WRITE(SDEIMR, 0xffffffff); |
2761 | /* | 2771 | /* |
2762 | * SDEIER is also touched by the interrupt handler to work around missed | 2772 | * SDEIER is also touched by the interrupt handler to work around missed |
2763 | * PCH interrupts. Hence we can't update it after the interrupt handler | 2773 | * PCH interrupts. Hence we can't update it after the interrupt handler |
2764 | * is enabled - instead we unconditionally enable all PCH interrupt | 2774 | * is enabled - instead we unconditionally enable all PCH interrupt |
2765 | * sources here, but then only unmask them as needed with SDEIMR. | 2775 | * sources here, but then only unmask them as needed with SDEIMR. |
2766 | */ | 2776 | */ |
2767 | I915_WRITE(SDEIER, 0xffffffff); | 2777 | I915_WRITE(SDEIER, 0xffffffff); |
2768 | POSTING_READ(SDEIER); | 2778 | POSTING_READ(SDEIER); |
2769 | } | 2779 | } |
2770 | 2780 | ||
2771 | static void gen5_gt_irq_preinstall(struct drm_device *dev) | 2781 | static void gen5_gt_irq_preinstall(struct drm_device *dev) |
2772 | { | 2782 | { |
2773 | struct drm_i915_private *dev_priv = dev->dev_private; | 2783 | struct drm_i915_private *dev_priv = dev->dev_private; |
2774 | 2784 | ||
2775 | /* and GT */ | 2785 | /* and GT */ |
2776 | I915_WRITE(GTIMR, 0xffffffff); | 2786 | I915_WRITE(GTIMR, 0xffffffff); |
2777 | I915_WRITE(GTIER, 0x0); | 2787 | I915_WRITE(GTIER, 0x0); |
2778 | POSTING_READ(GTIER); | 2788 | POSTING_READ(GTIER); |
2779 | 2789 | ||
2780 | if (INTEL_INFO(dev)->gen >= 6) { | 2790 | if (INTEL_INFO(dev)->gen >= 6) { |
2781 | /* and PM */ | 2791 | /* and PM */ |
2782 | I915_WRITE(GEN6_PMIMR, 0xffffffff); | 2792 | I915_WRITE(GEN6_PMIMR, 0xffffffff); |
2783 | I915_WRITE(GEN6_PMIER, 0x0); | 2793 | I915_WRITE(GEN6_PMIER, 0x0); |
2784 | POSTING_READ(GEN6_PMIER); | 2794 | POSTING_READ(GEN6_PMIER); |
2785 | } | 2795 | } |
2786 | } | 2796 | } |
2787 | 2797 | ||
2788 | /* drm_dma.h hooks | 2798 | /* drm_dma.h hooks |
2789 | */ | 2799 | */ |
2790 | static void ironlake_irq_preinstall(struct drm_device *dev) | 2800 | static void ironlake_irq_preinstall(struct drm_device *dev) |
2791 | { | 2801 | { |
2792 | struct drm_i915_private *dev_priv = dev->dev_private; | 2802 | struct drm_i915_private *dev_priv = dev->dev_private; |
2793 | 2803 | ||
2794 | I915_WRITE(HWSTAM, 0xeffe); | 2804 | I915_WRITE(HWSTAM, 0xeffe); |
2795 | 2805 | ||
2796 | I915_WRITE(DEIMR, 0xffffffff); | 2806 | I915_WRITE(DEIMR, 0xffffffff); |
2797 | I915_WRITE(DEIER, 0x0); | 2807 | I915_WRITE(DEIER, 0x0); |
2798 | POSTING_READ(DEIER); | 2808 | POSTING_READ(DEIER); |
2799 | 2809 | ||
2800 | gen5_gt_irq_preinstall(dev); | 2810 | gen5_gt_irq_preinstall(dev); |
2801 | 2811 | ||
2802 | ibx_irq_preinstall(dev); | 2812 | ibx_irq_preinstall(dev); |
2803 | } | 2813 | } |
2804 | 2814 | ||
2805 | static void valleyview_irq_preinstall(struct drm_device *dev) | 2815 | static void valleyview_irq_preinstall(struct drm_device *dev) |
2806 | { | 2816 | { |
2807 | struct drm_i915_private *dev_priv = dev->dev_private; | 2817 | struct drm_i915_private *dev_priv = dev->dev_private; |
2808 | int pipe; | 2818 | int pipe; |
2809 | 2819 | ||
2810 | /* VLV magic */ | 2820 | /* VLV magic */ |
2811 | I915_WRITE(VLV_IMR, 0); | 2821 | I915_WRITE(VLV_IMR, 0); |
2812 | I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); | 2822 | I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); |
2813 | I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); | 2823 | I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); |
2814 | I915_WRITE(RING_IMR(BLT_RING_BASE), 0); | 2824 | I915_WRITE(RING_IMR(BLT_RING_BASE), 0); |
2815 | 2825 | ||
2816 | /* and GT */ | 2826 | /* and GT */ |
2817 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | 2827 | I915_WRITE(GTIIR, I915_READ(GTIIR)); |
2818 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | 2828 | I915_WRITE(GTIIR, I915_READ(GTIIR)); |
2819 | 2829 | ||
2820 | gen5_gt_irq_preinstall(dev); | 2830 | gen5_gt_irq_preinstall(dev); |
2821 | 2831 | ||
2822 | I915_WRITE(DPINVGTT, 0xff); | 2832 | I915_WRITE(DPINVGTT, 0xff); |
2823 | 2833 | ||
2824 | I915_WRITE(PORT_HOTPLUG_EN, 0); | 2834 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
2825 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | 2835 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); |
2826 | for_each_pipe(pipe) | 2836 | for_each_pipe(pipe) |
2827 | I915_WRITE(PIPESTAT(pipe), 0xffff); | 2837 | I915_WRITE(PIPESTAT(pipe), 0xffff); |
2828 | I915_WRITE(VLV_IIR, 0xffffffff); | 2838 | I915_WRITE(VLV_IIR, 0xffffffff); |
2829 | I915_WRITE(VLV_IMR, 0xffffffff); | 2839 | I915_WRITE(VLV_IMR, 0xffffffff); |
2830 | I915_WRITE(VLV_IER, 0x0); | 2840 | I915_WRITE(VLV_IER, 0x0); |
2831 | POSTING_READ(VLV_IER); | 2841 | POSTING_READ(VLV_IER); |
2832 | } | 2842 | } |
2833 | 2843 | ||
2834 | static void gen8_irq_preinstall(struct drm_device *dev) | 2844 | static void gen8_irq_preinstall(struct drm_device *dev) |
2835 | { | 2845 | { |
2836 | struct drm_i915_private *dev_priv = dev->dev_private; | 2846 | struct drm_i915_private *dev_priv = dev->dev_private; |
2837 | int pipe; | 2847 | int pipe; |
2838 | 2848 | ||
2839 | I915_WRITE(GEN8_MASTER_IRQ, 0); | 2849 | I915_WRITE(GEN8_MASTER_IRQ, 0); |
2840 | POSTING_READ(GEN8_MASTER_IRQ); | 2850 | POSTING_READ(GEN8_MASTER_IRQ); |
2841 | 2851 | ||
2842 | /* IIR can theoretically queue up two events. Be paranoid */ | 2852 | /* IIR can theoretically queue up two events. Be paranoid */ |
2843 | #define GEN8_IRQ_INIT_NDX(type, which) do { \ | 2853 | #define GEN8_IRQ_INIT_NDX(type, which) do { \ |
2844 | I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ | 2854 | I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ |
2845 | POSTING_READ(GEN8_##type##_IMR(which)); \ | 2855 | POSTING_READ(GEN8_##type##_IMR(which)); \ |
2846 | I915_WRITE(GEN8_##type##_IER(which), 0); \ | 2856 | I915_WRITE(GEN8_##type##_IER(which), 0); \ |
2847 | I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ | 2857 | I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ |
2848 | POSTING_READ(GEN8_##type##_IIR(which)); \ | 2858 | POSTING_READ(GEN8_##type##_IIR(which)); \ |
2849 | I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ | 2859 | I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ |
2850 | } while (0) | 2860 | } while (0) |
2851 | 2861 | ||
2852 | #define GEN8_IRQ_INIT(type) do { \ | 2862 | #define GEN8_IRQ_INIT(type) do { \ |
2853 | I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \ | 2863 | I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \ |
2854 | POSTING_READ(GEN8_##type##_IMR); \ | 2864 | POSTING_READ(GEN8_##type##_IMR); \ |
2855 | I915_WRITE(GEN8_##type##_IER, 0); \ | 2865 | I915_WRITE(GEN8_##type##_IER, 0); \ |
2856 | I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ | 2866 | I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ |
2857 | POSTING_READ(GEN8_##type##_IIR); \ | 2867 | POSTING_READ(GEN8_##type##_IIR); \ |
2858 | I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ | 2868 | I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ |
2859 | } while (0) | 2869 | } while (0) |
2860 | 2870 | ||
2861 | GEN8_IRQ_INIT_NDX(GT, 0); | 2871 | GEN8_IRQ_INIT_NDX(GT, 0); |
2862 | GEN8_IRQ_INIT_NDX(GT, 1); | 2872 | GEN8_IRQ_INIT_NDX(GT, 1); |
2863 | GEN8_IRQ_INIT_NDX(GT, 2); | 2873 | GEN8_IRQ_INIT_NDX(GT, 2); |
2864 | GEN8_IRQ_INIT_NDX(GT, 3); | 2874 | GEN8_IRQ_INIT_NDX(GT, 3); |
2865 | 2875 | ||
2866 | for_each_pipe(pipe) { | 2876 | for_each_pipe(pipe) { |
2867 | GEN8_IRQ_INIT_NDX(DE_PIPE, pipe); | 2877 | GEN8_IRQ_INIT_NDX(DE_PIPE, pipe); |
2868 | } | 2878 | } |
2869 | 2879 | ||
2870 | GEN8_IRQ_INIT(DE_PORT); | 2880 | GEN8_IRQ_INIT(DE_PORT); |
2871 | GEN8_IRQ_INIT(DE_MISC); | 2881 | GEN8_IRQ_INIT(DE_MISC); |
2872 | GEN8_IRQ_INIT(PCU); | 2882 | GEN8_IRQ_INIT(PCU); |
2873 | #undef GEN8_IRQ_INIT | 2883 | #undef GEN8_IRQ_INIT |
2874 | #undef GEN8_IRQ_INIT_NDX | 2884 | #undef GEN8_IRQ_INIT_NDX |
2875 | 2885 | ||
2876 | POSTING_READ(GEN8_PCU_IIR); | 2886 | POSTING_READ(GEN8_PCU_IIR); |
2877 | 2887 | ||
2878 | ibx_irq_preinstall(dev); | 2888 | ibx_irq_preinstall(dev); |
2879 | } | 2889 | } |
2880 | 2890 | ||
2881 | static void ibx_hpd_irq_setup(struct drm_device *dev) | 2891 | static void ibx_hpd_irq_setup(struct drm_device *dev) |
2882 | { | 2892 | { |
2883 | struct drm_i915_private *dev_priv = dev->dev_private; | 2893 | struct drm_i915_private *dev_priv = dev->dev_private; |
2884 | struct drm_mode_config *mode_config = &dev->mode_config; | 2894 | struct drm_mode_config *mode_config = &dev->mode_config; |
2885 | struct intel_encoder *intel_encoder; | 2895 | struct intel_encoder *intel_encoder; |
2886 | u32 hotplug_irqs, hotplug, enabled_irqs = 0; | 2896 | u32 hotplug_irqs, hotplug, enabled_irqs = 0; |
2887 | 2897 | ||
2888 | if (HAS_PCH_IBX(dev)) { | 2898 | if (HAS_PCH_IBX(dev)) { |
2889 | hotplug_irqs = SDE_HOTPLUG_MASK; | 2899 | hotplug_irqs = SDE_HOTPLUG_MASK; |
2890 | list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) | 2900 | list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) |
2891 | if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) | 2901 | if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) |
2892 | enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; | 2902 | enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; |
2893 | } else { | 2903 | } else { |
2894 | hotplug_irqs = SDE_HOTPLUG_MASK_CPT; | 2904 | hotplug_irqs = SDE_HOTPLUG_MASK_CPT; |
2895 | list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) | 2905 | list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) |
2896 | if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) | 2906 | if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) |
2897 | enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; | 2907 | enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; |
2898 | } | 2908 | } |
2899 | 2909 | ||
2900 | ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); | 2910 | ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); |
2901 | 2911 | ||
2902 | /* | 2912 | /* |
2903 | * Enable digital hotplug on the PCH, and configure the DP short pulse | 2913 | * Enable digital hotplug on the PCH, and configure the DP short pulse |
2904 | * duration to 2ms (which is the minimum in the Display Port spec) | 2914 | * duration to 2ms (which is the minimum in the Display Port spec) |
2905 | * | 2915 | * |
2906 | * This register is the same on all known PCH chips. | 2916 | * This register is the same on all known PCH chips. |
2907 | */ | 2917 | */ |
2908 | hotplug = I915_READ(PCH_PORT_HOTPLUG); | 2918 | hotplug = I915_READ(PCH_PORT_HOTPLUG); |
2909 | hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); | 2919 | hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); |
2910 | hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; | 2920 | hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; |
2911 | hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; | 2921 | hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; |
2912 | hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; | 2922 | hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; |
2913 | I915_WRITE(PCH_PORT_HOTPLUG, hotplug); | 2923 | I915_WRITE(PCH_PORT_HOTPLUG, hotplug); |
2914 | } | 2924 | } |
2915 | 2925 | ||
2916 | static void ibx_irq_postinstall(struct drm_device *dev) | 2926 | static void ibx_irq_postinstall(struct drm_device *dev) |
2917 | { | 2927 | { |
2918 | struct drm_i915_private *dev_priv = dev->dev_private; | 2928 | struct drm_i915_private *dev_priv = dev->dev_private; |
2919 | u32 mask; | 2929 | u32 mask; |
2920 | 2930 | ||
2921 | if (HAS_PCH_NOP(dev)) | 2931 | if (HAS_PCH_NOP(dev)) |
2922 | return; | 2932 | return; |
2923 | 2933 | ||
2924 | if (HAS_PCH_IBX(dev)) { | 2934 | if (HAS_PCH_IBX(dev)) { |
2925 | mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; | 2935 | mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; |
2926 | } else { | 2936 | } else { |
2927 | mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; | 2937 | mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; |
2928 | 2938 | ||
2929 | I915_WRITE(SERR_INT, I915_READ(SERR_INT)); | 2939 | I915_WRITE(SERR_INT, I915_READ(SERR_INT)); |
2930 | } | 2940 | } |
2931 | 2941 | ||
2932 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); | 2942 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); |
2933 | I915_WRITE(SDEIMR, ~mask); | 2943 | I915_WRITE(SDEIMR, ~mask); |
2934 | } | 2944 | } |
2935 | 2945 | ||
2936 | static void gen5_gt_irq_postinstall(struct drm_device *dev) | 2946 | static void gen5_gt_irq_postinstall(struct drm_device *dev) |
2937 | { | 2947 | { |
2938 | struct drm_i915_private *dev_priv = dev->dev_private; | 2948 | struct drm_i915_private *dev_priv = dev->dev_private; |
2939 | u32 pm_irqs, gt_irqs; | 2949 | u32 pm_irqs, gt_irqs; |
2940 | 2950 | ||
2941 | pm_irqs = gt_irqs = 0; | 2951 | pm_irqs = gt_irqs = 0; |
2942 | 2952 | ||
2943 | dev_priv->gt_irq_mask = ~0; | 2953 | dev_priv->gt_irq_mask = ~0; |
2944 | if (HAS_L3_DPF(dev)) { | 2954 | if (HAS_L3_DPF(dev)) { |
2945 | /* L3 parity interrupt is always unmasked. */ | 2955 | /* L3 parity interrupt is always unmasked. */ |
2946 | dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev); | 2956 | dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev); |
2947 | gt_irqs |= GT_PARITY_ERROR(dev); | 2957 | gt_irqs |= GT_PARITY_ERROR(dev); |
2948 | } | 2958 | } |
2949 | 2959 | ||
2950 | gt_irqs |= GT_RENDER_USER_INTERRUPT; | 2960 | gt_irqs |= GT_RENDER_USER_INTERRUPT; |
2951 | if (IS_GEN5(dev)) { | 2961 | if (IS_GEN5(dev)) { |
2952 | gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | | 2962 | gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | |
2953 | ILK_BSD_USER_INTERRUPT; | 2963 | ILK_BSD_USER_INTERRUPT; |
2954 | } else { | 2964 | } else { |
2955 | gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; | 2965 | gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; |
2956 | } | 2966 | } |
2957 | 2967 | ||
2958 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | 2968 | I915_WRITE(GTIIR, I915_READ(GTIIR)); |
2959 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | 2969 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
2960 | I915_WRITE(GTIER, gt_irqs); | 2970 | I915_WRITE(GTIER, gt_irqs); |
2961 | POSTING_READ(GTIER); | 2971 | POSTING_READ(GTIER); |
2962 | 2972 | ||
2963 | if (INTEL_INFO(dev)->gen >= 6) { | 2973 | if (INTEL_INFO(dev)->gen >= 6) { |
2964 | pm_irqs |= dev_priv->pm_rps_events; | 2974 | pm_irqs |= dev_priv->pm_rps_events; |
2965 | 2975 | ||
2966 | if (HAS_VEBOX(dev)) | 2976 | if (HAS_VEBOX(dev)) |
2967 | pm_irqs |= PM_VEBOX_USER_INTERRUPT; | 2977 | pm_irqs |= PM_VEBOX_USER_INTERRUPT; |
2968 | 2978 | ||
2969 | dev_priv->pm_irq_mask = 0xffffffff; | 2979 | dev_priv->pm_irq_mask = 0xffffffff; |
2970 | I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); | 2980 | I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); |
2971 | I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); | 2981 | I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); |
2972 | I915_WRITE(GEN6_PMIER, pm_irqs); | 2982 | I915_WRITE(GEN6_PMIER, pm_irqs); |
2973 | POSTING_READ(GEN6_PMIER); | 2983 | POSTING_READ(GEN6_PMIER); |
2974 | } | 2984 | } |
2975 | } | 2985 | } |
2976 | 2986 | ||
2977 | static int ironlake_irq_postinstall(struct drm_device *dev) | 2987 | static int ironlake_irq_postinstall(struct drm_device *dev) |
2978 | { | 2988 | { |
2979 | unsigned long irqflags; | 2989 | unsigned long irqflags; |
2980 | struct drm_i915_private *dev_priv = dev->dev_private; | 2990 | struct drm_i915_private *dev_priv = dev->dev_private; |
2981 | u32 display_mask, extra_mask; | 2991 | u32 display_mask, extra_mask; |
2982 | 2992 | ||
2983 | if (INTEL_INFO(dev)->gen >= 7) { | 2993 | if (INTEL_INFO(dev)->gen >= 7) { |
2984 | display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | | 2994 | display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | |
2985 | DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | | 2995 | DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | |
2986 | DE_PLANEB_FLIP_DONE_IVB | | 2996 | DE_PLANEB_FLIP_DONE_IVB | |
2987 | DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); | 2997 | DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); |
2988 | extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | | 2998 | extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | |
2989 | DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB); | 2999 | DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB); |
2990 | 3000 | ||
2991 | I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); | 3001 | I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); |
2992 | } else { | 3002 | } else { |
2993 | display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | | 3003 | display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | |
2994 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | | 3004 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | |
2995 | DE_AUX_CHANNEL_A | | 3005 | DE_AUX_CHANNEL_A | |
2996 | DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | | 3006 | DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | |
2997 | DE_POISON); | 3007 | DE_POISON); |
2998 | extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | | 3008 | extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | |
2999 | DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN; | 3009 | DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN; |
3000 | } | 3010 | } |
3001 | 3011 | ||
3002 | dev_priv->irq_mask = ~display_mask; | 3012 | dev_priv->irq_mask = ~display_mask; |
3003 | 3013 | ||
3004 | /* should always can generate irq */ | 3014 | /* should always can generate irq */ |
3005 | I915_WRITE(DEIIR, I915_READ(DEIIR)); | 3015 | I915_WRITE(DEIIR, I915_READ(DEIIR)); |
3006 | I915_WRITE(DEIMR, dev_priv->irq_mask); | 3016 | I915_WRITE(DEIMR, dev_priv->irq_mask); |
3007 | I915_WRITE(DEIER, display_mask | extra_mask); | 3017 | I915_WRITE(DEIER, display_mask | extra_mask); |
3008 | POSTING_READ(DEIER); | 3018 | POSTING_READ(DEIER); |
3009 | 3019 | ||
3010 | gen5_gt_irq_postinstall(dev); | 3020 | gen5_gt_irq_postinstall(dev); |
3011 | 3021 | ||
3012 | ibx_irq_postinstall(dev); | 3022 | ibx_irq_postinstall(dev); |
3013 | 3023 | ||
3014 | if (IS_IRONLAKE_M(dev)) { | 3024 | if (IS_IRONLAKE_M(dev)) { |
3015 | /* Enable PCU event interrupts | 3025 | /* Enable PCU event interrupts |
3016 | * | 3026 | * |
3017 | * spinlocking not required here for correctness since interrupt | 3027 | * spinlocking not required here for correctness since interrupt |
3018 | * setup is guaranteed to run in single-threaded context. But we | 3028 | * setup is guaranteed to run in single-threaded context. But we |
3019 | * need it to make the assert_spin_locked happy. */ | 3029 | * need it to make the assert_spin_locked happy. */ |
3020 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 3030 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
3021 | ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); | 3031 | ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); |
3022 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 3032 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
3023 | } | 3033 | } |
3024 | 3034 | ||
3025 | return 0; | 3035 | return 0; |
3026 | } | 3036 | } |
3027 | 3037 | ||
3028 | static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv) | 3038 | static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv) |
3029 | { | 3039 | { |
3030 | u32 pipestat_mask; | 3040 | u32 pipestat_mask; |
3031 | u32 iir_mask; | 3041 | u32 iir_mask; |
3032 | 3042 | ||
3033 | pipestat_mask = PIPESTAT_INT_STATUS_MASK | | 3043 | pipestat_mask = PIPESTAT_INT_STATUS_MASK | |
3034 | PIPE_FIFO_UNDERRUN_STATUS; | 3044 | PIPE_FIFO_UNDERRUN_STATUS; |
3035 | 3045 | ||
3036 | I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask); | 3046 | I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask); |
3037 | I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask); | 3047 | I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask); |
3038 | POSTING_READ(PIPESTAT(PIPE_A)); | 3048 | POSTING_READ(PIPESTAT(PIPE_A)); |
3039 | 3049 | ||
3040 | pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | | 3050 | pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | |
3041 | PIPE_CRC_DONE_INTERRUPT_STATUS; | 3051 | PIPE_CRC_DONE_INTERRUPT_STATUS; |
3042 | 3052 | ||
3043 | i915_enable_pipestat(dev_priv, PIPE_A, pipestat_mask | | 3053 | i915_enable_pipestat(dev_priv, PIPE_A, pipestat_mask | |
3044 | PIPE_GMBUS_INTERRUPT_STATUS); | 3054 | PIPE_GMBUS_INTERRUPT_STATUS); |
3045 | i915_enable_pipestat(dev_priv, PIPE_B, pipestat_mask); | 3055 | i915_enable_pipestat(dev_priv, PIPE_B, pipestat_mask); |
3046 | 3056 | ||
3047 | iir_mask = I915_DISPLAY_PORT_INTERRUPT | | 3057 | iir_mask = I915_DISPLAY_PORT_INTERRUPT | |
3048 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | 3058 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
3049 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; | 3059 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; |
3050 | dev_priv->irq_mask &= ~iir_mask; | 3060 | dev_priv->irq_mask &= ~iir_mask; |
3051 | 3061 | ||
3052 | I915_WRITE(VLV_IIR, iir_mask); | 3062 | I915_WRITE(VLV_IIR, iir_mask); |
3053 | I915_WRITE(VLV_IIR, iir_mask); | 3063 | I915_WRITE(VLV_IIR, iir_mask); |
3054 | I915_WRITE(VLV_IMR, dev_priv->irq_mask); | 3064 | I915_WRITE(VLV_IMR, dev_priv->irq_mask); |
3055 | I915_WRITE(VLV_IER, ~dev_priv->irq_mask); | 3065 | I915_WRITE(VLV_IER, ~dev_priv->irq_mask); |
3056 | POSTING_READ(VLV_IER); | 3066 | POSTING_READ(VLV_IER); |
3057 | } | 3067 | } |
3058 | 3068 | ||
3059 | static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv) | 3069 | static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv) |
3060 | { | 3070 | { |
3061 | u32 pipestat_mask; | 3071 | u32 pipestat_mask; |
3062 | u32 iir_mask; | 3072 | u32 iir_mask; |
3063 | 3073 | ||
3064 | iir_mask = I915_DISPLAY_PORT_INTERRUPT | | 3074 | iir_mask = I915_DISPLAY_PORT_INTERRUPT | |
3065 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | 3075 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
3066 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; | 3076 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; |
3067 | 3077 | ||
3068 | dev_priv->irq_mask |= iir_mask; | 3078 | dev_priv->irq_mask |= iir_mask; |
3069 | I915_WRITE(VLV_IER, ~dev_priv->irq_mask); | 3079 | I915_WRITE(VLV_IER, ~dev_priv->irq_mask); |
3070 | I915_WRITE(VLV_IMR, dev_priv->irq_mask); | 3080 | I915_WRITE(VLV_IMR, dev_priv->irq_mask); |
3071 | I915_WRITE(VLV_IIR, iir_mask); | 3081 | I915_WRITE(VLV_IIR, iir_mask); |
3072 | I915_WRITE(VLV_IIR, iir_mask); | 3082 | I915_WRITE(VLV_IIR, iir_mask); |
3073 | POSTING_READ(VLV_IIR); | 3083 | POSTING_READ(VLV_IIR); |
3074 | 3084 | ||
3075 | pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | | 3085 | pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | |
3076 | PIPE_CRC_DONE_INTERRUPT_STATUS; | 3086 | PIPE_CRC_DONE_INTERRUPT_STATUS; |
3077 | 3087 | ||
3078 | i915_disable_pipestat(dev_priv, PIPE_A, pipestat_mask | | 3088 | i915_disable_pipestat(dev_priv, PIPE_A, pipestat_mask | |
3079 | PIPE_GMBUS_INTERRUPT_STATUS); | 3089 | PIPE_GMBUS_INTERRUPT_STATUS); |
3080 | i915_disable_pipestat(dev_priv, PIPE_B, pipestat_mask); | 3090 | i915_disable_pipestat(dev_priv, PIPE_B, pipestat_mask); |
3081 | 3091 | ||
3082 | pipestat_mask = PIPESTAT_INT_STATUS_MASK | | 3092 | pipestat_mask = PIPESTAT_INT_STATUS_MASK | |
3083 | PIPE_FIFO_UNDERRUN_STATUS; | 3093 | PIPE_FIFO_UNDERRUN_STATUS; |
3084 | I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask); | 3094 | I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask); |
3085 | I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask); | 3095 | I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask); |
3086 | POSTING_READ(PIPESTAT(PIPE_A)); | 3096 | POSTING_READ(PIPESTAT(PIPE_A)); |
3087 | } | 3097 | } |
3088 | 3098 | ||
3089 | void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) | 3099 | void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) |
3090 | { | 3100 | { |
3091 | assert_spin_locked(&dev_priv->irq_lock); | 3101 | assert_spin_locked(&dev_priv->irq_lock); |
3092 | 3102 | ||
3093 | if (dev_priv->display_irqs_enabled) | 3103 | if (dev_priv->display_irqs_enabled) |
3094 | return; | 3104 | return; |
3095 | 3105 | ||
3096 | dev_priv->display_irqs_enabled = true; | 3106 | dev_priv->display_irqs_enabled = true; |
3097 | 3107 | ||
3098 | if (dev_priv->dev->irq_enabled) | 3108 | if (dev_priv->dev->irq_enabled) |
3099 | valleyview_display_irqs_install(dev_priv); | 3109 | valleyview_display_irqs_install(dev_priv); |
3100 | } | 3110 | } |
3101 | 3111 | ||
3102 | void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) | 3112 | void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) |
3103 | { | 3113 | { |
3104 | assert_spin_locked(&dev_priv->irq_lock); | 3114 | assert_spin_locked(&dev_priv->irq_lock); |
3105 | 3115 | ||
3106 | if (!dev_priv->display_irqs_enabled) | 3116 | if (!dev_priv->display_irqs_enabled) |
3107 | return; | 3117 | return; |
3108 | 3118 | ||
3109 | dev_priv->display_irqs_enabled = false; | 3119 | dev_priv->display_irqs_enabled = false; |
3110 | 3120 | ||
3111 | if (dev_priv->dev->irq_enabled) | 3121 | if (dev_priv->dev->irq_enabled) |
3112 | valleyview_display_irqs_uninstall(dev_priv); | 3122 | valleyview_display_irqs_uninstall(dev_priv); |
3113 | } | 3123 | } |
3114 | 3124 | ||
3115 | static int valleyview_irq_postinstall(struct drm_device *dev) | 3125 | static int valleyview_irq_postinstall(struct drm_device *dev) |
3116 | { | 3126 | { |
3117 | struct drm_i915_private *dev_priv = dev->dev_private; | 3127 | struct drm_i915_private *dev_priv = dev->dev_private; |
3118 | unsigned long irqflags; | 3128 | unsigned long irqflags; |
3119 | 3129 | ||
3120 | dev_priv->irq_mask = ~0; | 3130 | dev_priv->irq_mask = ~0; |
3121 | 3131 | ||
3122 | I915_WRITE(PORT_HOTPLUG_EN, 0); | 3132 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
3123 | POSTING_READ(PORT_HOTPLUG_EN); | 3133 | POSTING_READ(PORT_HOTPLUG_EN); |
3124 | 3134 | ||
3125 | I915_WRITE(VLV_IMR, dev_priv->irq_mask); | 3135 | I915_WRITE(VLV_IMR, dev_priv->irq_mask); |
3126 | I915_WRITE(VLV_IER, ~dev_priv->irq_mask); | 3136 | I915_WRITE(VLV_IER, ~dev_priv->irq_mask); |
3127 | I915_WRITE(VLV_IIR, 0xffffffff); | 3137 | I915_WRITE(VLV_IIR, 0xffffffff); |
3128 | POSTING_READ(VLV_IER); | 3138 | POSTING_READ(VLV_IER); |
3129 | 3139 | ||
3130 | /* Interrupt setup is already guaranteed to be single-threaded, this is | 3140 | /* Interrupt setup is already guaranteed to be single-threaded, this is |
3131 | * just to make the assert_spin_locked check happy. */ | 3141 | * just to make the assert_spin_locked check happy. */ |
3132 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 3142 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
3133 | if (dev_priv->display_irqs_enabled) | 3143 | if (dev_priv->display_irqs_enabled) |
3134 | valleyview_display_irqs_install(dev_priv); | 3144 | valleyview_display_irqs_install(dev_priv); |
3135 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 3145 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
3136 | 3146 | ||
3137 | I915_WRITE(VLV_IIR, 0xffffffff); | 3147 | I915_WRITE(VLV_IIR, 0xffffffff); |
3138 | I915_WRITE(VLV_IIR, 0xffffffff); | 3148 | I915_WRITE(VLV_IIR, 0xffffffff); |
3139 | 3149 | ||
3140 | gen5_gt_irq_postinstall(dev); | 3150 | gen5_gt_irq_postinstall(dev); |
3141 | 3151 | ||
3142 | /* ack & enable invalid PTE error interrupts */ | 3152 | /* ack & enable invalid PTE error interrupts */ |
3143 | #if 0 /* FIXME: add support to irq handler for checking these bits */ | 3153 | #if 0 /* FIXME: add support to irq handler for checking these bits */ |
3144 | I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); | 3154 | I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); |
3145 | I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); | 3155 | I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); |
3146 | #endif | 3156 | #endif |
3147 | 3157 | ||
3148 | I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); | 3158 | I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); |
3149 | 3159 | ||
3150 | return 0; | 3160 | return 0; |
3151 | } | 3161 | } |
3152 | 3162 | ||
3153 | static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) | 3163 | static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) |
3154 | { | 3164 | { |
3155 | int i; | 3165 | int i; |
3156 | 3166 | ||
3157 | /* These are interrupts we'll toggle with the ring mask register */ | 3167 | /* These are interrupts we'll toggle with the ring mask register */ |
3158 | uint32_t gt_interrupts[] = { | 3168 | uint32_t gt_interrupts[] = { |
3159 | GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | | 3169 | GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | |
3160 | GT_RENDER_L3_PARITY_ERROR_INTERRUPT | | 3170 | GT_RENDER_L3_PARITY_ERROR_INTERRUPT | |
3161 | GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT, | 3171 | GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT, |
3162 | GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | | 3172 | GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | |
3163 | GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, | 3173 | GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, |
3164 | 0, | 3174 | 0, |
3165 | GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | 3175 | GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3166 | }; | 3176 | }; |
3167 | 3177 | ||
3168 | for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++) { | 3178 | for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++) { |
3169 | u32 tmp = I915_READ(GEN8_GT_IIR(i)); | 3179 | u32 tmp = I915_READ(GEN8_GT_IIR(i)); |
3170 | if (tmp) | 3180 | if (tmp) |
3171 | DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n", | 3181 | DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n", |
3172 | i, tmp); | 3182 | i, tmp); |
3173 | I915_WRITE(GEN8_GT_IMR(i), ~gt_interrupts[i]); | 3183 | I915_WRITE(GEN8_GT_IMR(i), ~gt_interrupts[i]); |
3174 | I915_WRITE(GEN8_GT_IER(i), gt_interrupts[i]); | 3184 | I915_WRITE(GEN8_GT_IER(i), gt_interrupts[i]); |
3175 | } | 3185 | } |
3176 | POSTING_READ(GEN8_GT_IER(0)); | 3186 | POSTING_READ(GEN8_GT_IER(0)); |
3177 | } | 3187 | } |
3178 | 3188 | ||
3179 | static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) | 3189 | static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) |
3180 | { | 3190 | { |
3181 | struct drm_device *dev = dev_priv->dev; | 3191 | struct drm_device *dev = dev_priv->dev; |
3182 | uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE | | 3192 | uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE | |
3183 | GEN8_PIPE_CDCLK_CRC_DONE | | 3193 | GEN8_PIPE_CDCLK_CRC_DONE | |
3184 | GEN8_DE_PIPE_IRQ_FAULT_ERRORS; | 3194 | GEN8_DE_PIPE_IRQ_FAULT_ERRORS; |
3185 | uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | | 3195 | uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | |
3186 | GEN8_PIPE_FIFO_UNDERRUN; | 3196 | GEN8_PIPE_FIFO_UNDERRUN; |
3187 | int pipe; | 3197 | int pipe; |
3188 | dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; | 3198 | dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; |
3189 | dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; | 3199 | dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; |
3190 | dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; | 3200 | dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; |
3191 | 3201 | ||
3192 | for_each_pipe(pipe) { | 3202 | for_each_pipe(pipe) { |
3193 | u32 tmp = I915_READ(GEN8_DE_PIPE_IIR(pipe)); | 3203 | u32 tmp = I915_READ(GEN8_DE_PIPE_IIR(pipe)); |
3194 | if (tmp) | 3204 | if (tmp) |
3195 | DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n", | 3205 | DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n", |
3196 | pipe, tmp); | 3206 | pipe, tmp); |
3197 | I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); | 3207 | I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); |
3198 | I915_WRITE(GEN8_DE_PIPE_IER(pipe), de_pipe_enables); | 3208 | I915_WRITE(GEN8_DE_PIPE_IER(pipe), de_pipe_enables); |
3199 | } | 3209 | } |
3200 | POSTING_READ(GEN8_DE_PIPE_ISR(0)); | 3210 | POSTING_READ(GEN8_DE_PIPE_ISR(0)); |
3201 | 3211 | ||
3202 | I915_WRITE(GEN8_DE_PORT_IMR, ~GEN8_AUX_CHANNEL_A); | 3212 | I915_WRITE(GEN8_DE_PORT_IMR, ~GEN8_AUX_CHANNEL_A); |
3203 | I915_WRITE(GEN8_DE_PORT_IER, GEN8_AUX_CHANNEL_A); | 3213 | I915_WRITE(GEN8_DE_PORT_IER, GEN8_AUX_CHANNEL_A); |
3204 | POSTING_READ(GEN8_DE_PORT_IER); | 3214 | POSTING_READ(GEN8_DE_PORT_IER); |
3205 | } | 3215 | } |
3206 | 3216 | ||
3207 | static int gen8_irq_postinstall(struct drm_device *dev) | 3217 | static int gen8_irq_postinstall(struct drm_device *dev) |
3208 | { | 3218 | { |
3209 | struct drm_i915_private *dev_priv = dev->dev_private; | 3219 | struct drm_i915_private *dev_priv = dev->dev_private; |
3210 | 3220 | ||
3211 | gen8_gt_irq_postinstall(dev_priv); | 3221 | gen8_gt_irq_postinstall(dev_priv); |
3212 | gen8_de_irq_postinstall(dev_priv); | 3222 | gen8_de_irq_postinstall(dev_priv); |
3213 | 3223 | ||
3214 | ibx_irq_postinstall(dev); | 3224 | ibx_irq_postinstall(dev); |
3215 | 3225 | ||
3216 | I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); | 3226 | I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); |
3217 | POSTING_READ(GEN8_MASTER_IRQ); | 3227 | POSTING_READ(GEN8_MASTER_IRQ); |
3218 | 3228 | ||
3219 | return 0; | 3229 | return 0; |
3220 | } | 3230 | } |
3221 | 3231 | ||
3222 | static void gen8_irq_uninstall(struct drm_device *dev) | 3232 | static void gen8_irq_uninstall(struct drm_device *dev) |
3223 | { | 3233 | { |
3224 | struct drm_i915_private *dev_priv = dev->dev_private; | 3234 | struct drm_i915_private *dev_priv = dev->dev_private; |
3225 | int pipe; | 3235 | int pipe; |
3226 | 3236 | ||
3227 | if (!dev_priv) | 3237 | if (!dev_priv) |
3228 | return; | 3238 | return; |
3229 | 3239 | ||
3230 | I915_WRITE(GEN8_MASTER_IRQ, 0); | 3240 | I915_WRITE(GEN8_MASTER_IRQ, 0); |
3231 | 3241 | ||
3232 | #define GEN8_IRQ_FINI_NDX(type, which) do { \ | 3242 | #define GEN8_IRQ_FINI_NDX(type, which) do { \ |
3233 | I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ | 3243 | I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ |
3234 | I915_WRITE(GEN8_##type##_IER(which), 0); \ | 3244 | I915_WRITE(GEN8_##type##_IER(which), 0); \ |
3235 | I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ | 3245 | I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ |
3236 | } while (0) | 3246 | } while (0) |
3237 | 3247 | ||
3238 | #define GEN8_IRQ_FINI(type) do { \ | 3248 | #define GEN8_IRQ_FINI(type) do { \ |
3239 | I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \ | 3249 | I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \ |
3240 | I915_WRITE(GEN8_##type##_IER, 0); \ | 3250 | I915_WRITE(GEN8_##type##_IER, 0); \ |
3241 | I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ | 3251 | I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ |
3242 | } while (0) | 3252 | } while (0) |
3243 | 3253 | ||
3244 | GEN8_IRQ_FINI_NDX(GT, 0); | 3254 | GEN8_IRQ_FINI_NDX(GT, 0); |
3245 | GEN8_IRQ_FINI_NDX(GT, 1); | 3255 | GEN8_IRQ_FINI_NDX(GT, 1); |
3246 | GEN8_IRQ_FINI_NDX(GT, 2); | 3256 | GEN8_IRQ_FINI_NDX(GT, 2); |
3247 | GEN8_IRQ_FINI_NDX(GT, 3); | 3257 | GEN8_IRQ_FINI_NDX(GT, 3); |
3248 | 3258 | ||
3249 | for_each_pipe(pipe) { | 3259 | for_each_pipe(pipe) { |
3250 | GEN8_IRQ_FINI_NDX(DE_PIPE, pipe); | 3260 | GEN8_IRQ_FINI_NDX(DE_PIPE, pipe); |
3251 | } | 3261 | } |
3252 | 3262 | ||
3253 | GEN8_IRQ_FINI(DE_PORT); | 3263 | GEN8_IRQ_FINI(DE_PORT); |
3254 | GEN8_IRQ_FINI(DE_MISC); | 3264 | GEN8_IRQ_FINI(DE_MISC); |
3255 | GEN8_IRQ_FINI(PCU); | 3265 | GEN8_IRQ_FINI(PCU); |
3256 | #undef GEN8_IRQ_FINI | 3266 | #undef GEN8_IRQ_FINI |
3257 | #undef GEN8_IRQ_FINI_NDX | 3267 | #undef GEN8_IRQ_FINI_NDX |
3258 | 3268 | ||
3259 | POSTING_READ(GEN8_PCU_IIR); | 3269 | POSTING_READ(GEN8_PCU_IIR); |
3260 | } | 3270 | } |
3261 | 3271 | ||
3262 | static void valleyview_irq_uninstall(struct drm_device *dev) | 3272 | static void valleyview_irq_uninstall(struct drm_device *dev) |
3263 | { | 3273 | { |
3264 | struct drm_i915_private *dev_priv = dev->dev_private; | 3274 | struct drm_i915_private *dev_priv = dev->dev_private; |
3265 | unsigned long irqflags; | 3275 | unsigned long irqflags; |
3266 | int pipe; | 3276 | int pipe; |
3267 | 3277 | ||
3268 | if (!dev_priv) | 3278 | if (!dev_priv) |
3269 | return; | 3279 | return; |
3270 | 3280 | ||
3271 | intel_hpd_irq_uninstall(dev_priv); | 3281 | intel_hpd_irq_uninstall(dev_priv); |
3272 | 3282 | ||
3273 | for_each_pipe(pipe) | 3283 | for_each_pipe(pipe) |
3274 | I915_WRITE(PIPESTAT(pipe), 0xffff); | 3284 | I915_WRITE(PIPESTAT(pipe), 0xffff); |
3275 | 3285 | ||
3276 | I915_WRITE(HWSTAM, 0xffffffff); | 3286 | I915_WRITE(HWSTAM, 0xffffffff); |
3277 | I915_WRITE(PORT_HOTPLUG_EN, 0); | 3287 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
3278 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | 3288 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); |
3279 | 3289 | ||
3280 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 3290 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
3281 | if (dev_priv->display_irqs_enabled) | 3291 | if (dev_priv->display_irqs_enabled) |
3282 | valleyview_display_irqs_uninstall(dev_priv); | 3292 | valleyview_display_irqs_uninstall(dev_priv); |
3283 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 3293 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
3284 | 3294 | ||
3285 | dev_priv->irq_mask = 0; | 3295 | dev_priv->irq_mask = 0; |
3286 | 3296 | ||
3287 | I915_WRITE(VLV_IIR, 0xffffffff); | 3297 | I915_WRITE(VLV_IIR, 0xffffffff); |
3288 | I915_WRITE(VLV_IMR, 0xffffffff); | 3298 | I915_WRITE(VLV_IMR, 0xffffffff); |
3289 | I915_WRITE(VLV_IER, 0x0); | 3299 | I915_WRITE(VLV_IER, 0x0); |
3290 | POSTING_READ(VLV_IER); | 3300 | POSTING_READ(VLV_IER); |
3291 | } | 3301 | } |
3292 | 3302 | ||
3293 | static void ironlake_irq_uninstall(struct drm_device *dev) | 3303 | static void ironlake_irq_uninstall(struct drm_device *dev) |
3294 | { | 3304 | { |
3295 | struct drm_i915_private *dev_priv = dev->dev_private; | 3305 | struct drm_i915_private *dev_priv = dev->dev_private; |
3296 | 3306 | ||
3297 | if (!dev_priv) | 3307 | if (!dev_priv) |
3298 | return; | 3308 | return; |
3299 | 3309 | ||
3300 | intel_hpd_irq_uninstall(dev_priv); | 3310 | intel_hpd_irq_uninstall(dev_priv); |
3301 | 3311 | ||
3302 | I915_WRITE(HWSTAM, 0xffffffff); | 3312 | I915_WRITE(HWSTAM, 0xffffffff); |
3303 | 3313 | ||
3304 | I915_WRITE(DEIMR, 0xffffffff); | 3314 | I915_WRITE(DEIMR, 0xffffffff); |
3305 | I915_WRITE(DEIER, 0x0); | 3315 | I915_WRITE(DEIER, 0x0); |
3306 | I915_WRITE(DEIIR, I915_READ(DEIIR)); | 3316 | I915_WRITE(DEIIR, I915_READ(DEIIR)); |
3307 | if (IS_GEN7(dev)) | 3317 | if (IS_GEN7(dev)) |
3308 | I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); | 3318 | I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); |
3309 | 3319 | ||
3310 | I915_WRITE(GTIMR, 0xffffffff); | 3320 | I915_WRITE(GTIMR, 0xffffffff); |
3311 | I915_WRITE(GTIER, 0x0); | 3321 | I915_WRITE(GTIER, 0x0); |
3312 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | 3322 | I915_WRITE(GTIIR, I915_READ(GTIIR)); |
3313 | 3323 | ||
3314 | if (HAS_PCH_NOP(dev)) | 3324 | if (HAS_PCH_NOP(dev)) |
3315 | return; | 3325 | return; |
3316 | 3326 | ||
3317 | I915_WRITE(SDEIMR, 0xffffffff); | 3327 | I915_WRITE(SDEIMR, 0xffffffff); |
3318 | I915_WRITE(SDEIER, 0x0); | 3328 | I915_WRITE(SDEIER, 0x0); |
3319 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); | 3329 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); |
3320 | if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) | 3330 | if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) |
3321 | I915_WRITE(SERR_INT, I915_READ(SERR_INT)); | 3331 | I915_WRITE(SERR_INT, I915_READ(SERR_INT)); |
3322 | } | 3332 | } |
3323 | 3333 | ||
3324 | static void i8xx_irq_preinstall(struct drm_device * dev) | 3334 | static void i8xx_irq_preinstall(struct drm_device * dev) |
3325 | { | 3335 | { |
3326 | struct drm_i915_private *dev_priv = dev->dev_private; | 3336 | struct drm_i915_private *dev_priv = dev->dev_private; |
3327 | int pipe; | 3337 | int pipe; |
3328 | 3338 | ||
3329 | for_each_pipe(pipe) | 3339 | for_each_pipe(pipe) |
3330 | I915_WRITE(PIPESTAT(pipe), 0); | 3340 | I915_WRITE(PIPESTAT(pipe), 0); |
3331 | I915_WRITE16(IMR, 0xffff); | 3341 | I915_WRITE16(IMR, 0xffff); |
3332 | I915_WRITE16(IER, 0x0); | 3342 | I915_WRITE16(IER, 0x0); |
3333 | POSTING_READ16(IER); | 3343 | POSTING_READ16(IER); |
3334 | } | 3344 | } |
3335 | 3345 | ||
3336 | static int i8xx_irq_postinstall(struct drm_device *dev) | 3346 | static int i8xx_irq_postinstall(struct drm_device *dev) |
3337 | { | 3347 | { |
3338 | struct drm_i915_private *dev_priv = dev->dev_private; | 3348 | struct drm_i915_private *dev_priv = dev->dev_private; |
3339 | unsigned long irqflags; | 3349 | unsigned long irqflags; |
3340 | 3350 | ||
3341 | I915_WRITE16(EMR, | 3351 | I915_WRITE16(EMR, |
3342 | ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); | 3352 | ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); |
3343 | 3353 | ||
3344 | /* Unmask the interrupts that we always want on. */ | 3354 | /* Unmask the interrupts that we always want on. */ |
3345 | dev_priv->irq_mask = | 3355 | dev_priv->irq_mask = |
3346 | ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | 3356 | ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
3347 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | 3357 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
3348 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | 3358 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | |
3349 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | | 3359 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | |
3350 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | 3360 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); |
3351 | I915_WRITE16(IMR, dev_priv->irq_mask); | 3361 | I915_WRITE16(IMR, dev_priv->irq_mask); |
3352 | 3362 | ||
3353 | I915_WRITE16(IER, | 3363 | I915_WRITE16(IER, |
3354 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | 3364 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
3355 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | 3365 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
3356 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | | 3366 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | |
3357 | I915_USER_INTERRUPT); | 3367 | I915_USER_INTERRUPT); |
3358 | POSTING_READ16(IER); | 3368 | POSTING_READ16(IER); |
3359 | 3369 | ||
3360 | /* Interrupt setup is already guaranteed to be single-threaded, this is | 3370 | /* Interrupt setup is already guaranteed to be single-threaded, this is |
3361 | * just to make the assert_spin_locked check happy. */ | 3371 | * just to make the assert_spin_locked check happy. */ |
3362 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 3372 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
3363 | i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); | 3373 | i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); |
3364 | i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); | 3374 | i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); |
3365 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 3375 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
3366 | 3376 | ||
3367 | return 0; | 3377 | return 0; |
3368 | } | 3378 | } |
3369 | 3379 | ||
3370 | /* | 3380 | /* |
3371 | * Returns true when a page flip has completed. | 3381 | * Returns true when a page flip has completed. |
3372 | */ | 3382 | */ |
3373 | static bool i8xx_handle_vblank(struct drm_device *dev, | 3383 | static bool i8xx_handle_vblank(struct drm_device *dev, |
3374 | int plane, int pipe, u32 iir) | 3384 | int plane, int pipe, u32 iir) |
3375 | { | 3385 | { |
3376 | struct drm_i915_private *dev_priv = dev->dev_private; | 3386 | struct drm_i915_private *dev_priv = dev->dev_private; |
3377 | u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); | 3387 | u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); |
3378 | 3388 | ||
3379 | if (!drm_handle_vblank(dev, pipe)) | 3389 | if (!drm_handle_vblank(dev, pipe)) |
3380 | return false; | 3390 | return false; |
3381 | 3391 | ||
3382 | if ((iir & flip_pending) == 0) | 3392 | if ((iir & flip_pending) == 0) |
3383 | return false; | 3393 | return false; |
3384 | 3394 | ||
3385 | intel_prepare_page_flip(dev, plane); | 3395 | intel_prepare_page_flip(dev, plane); |
3386 | 3396 | ||
3387 | /* We detect FlipDone by looking for the change in PendingFlip from '1' | 3397 | /* We detect FlipDone by looking for the change in PendingFlip from '1' |
3388 | * to '0' on the following vblank, i.e. IIR has the Pendingflip | 3398 | * to '0' on the following vblank, i.e. IIR has the Pendingflip |
3389 | * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence | 3399 | * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence |
3390 | * the flip is completed (no longer pending). Since this doesn't raise | 3400 | * the flip is completed (no longer pending). Since this doesn't raise |
3391 | * an interrupt per se, we watch for the change at vblank. | 3401 | * an interrupt per se, we watch for the change at vblank. |
3392 | */ | 3402 | */ |
3393 | if (I915_READ16(ISR) & flip_pending) | 3403 | if (I915_READ16(ISR) & flip_pending) |
3394 | return false; | 3404 | return false; |
3395 | 3405 | ||
3396 | intel_finish_page_flip(dev, pipe); | 3406 | intel_finish_page_flip(dev, pipe); |
3397 | 3407 | ||
3398 | return true; | 3408 | return true; |
3399 | } | 3409 | } |
3400 | 3410 | ||
3401 | static irqreturn_t i8xx_irq_handler(int irq, void *arg) | 3411 | static irqreturn_t i8xx_irq_handler(int irq, void *arg) |
3402 | { | 3412 | { |
3403 | struct drm_device *dev = (struct drm_device *) arg; | 3413 | struct drm_device *dev = (struct drm_device *) arg; |
3404 | struct drm_i915_private *dev_priv = dev->dev_private; | 3414 | struct drm_i915_private *dev_priv = dev->dev_private; |
3405 | u16 iir, new_iir; | 3415 | u16 iir, new_iir; |
3406 | u32 pipe_stats[2]; | 3416 | u32 pipe_stats[2]; |
3407 | unsigned long irqflags; | 3417 | unsigned long irqflags; |
3408 | int pipe; | 3418 | int pipe; |
3409 | u16 flip_mask = | 3419 | u16 flip_mask = |
3410 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | 3420 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | |
3411 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; | 3421 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; |
3412 | 3422 | ||
3413 | iir = I915_READ16(IIR); | 3423 | iir = I915_READ16(IIR); |
3414 | if (iir == 0) | 3424 | if (iir == 0) |
3415 | return IRQ_NONE; | 3425 | return IRQ_NONE; |
3416 | 3426 | ||
3417 | while (iir & ~flip_mask) { | 3427 | while (iir & ~flip_mask) { |
3418 | /* Can't rely on pipestat interrupt bit in iir as it might | 3428 | /* Can't rely on pipestat interrupt bit in iir as it might |
3419 | * have been cleared after the pipestat interrupt was received. | 3429 | * have been cleared after the pipestat interrupt was received. |
3420 | * It doesn't set the bit in iir again, but it still produces | 3430 | * It doesn't set the bit in iir again, but it still produces |
3421 | * interrupts (for non-MSI). | 3431 | * interrupts (for non-MSI). |
3422 | */ | 3432 | */ |
3423 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 3433 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
3424 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) | 3434 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) |
3425 | i915_handle_error(dev, false, | 3435 | i915_handle_error(dev, false, |
3426 | "Command parser error, iir 0x%08x", | 3436 | "Command parser error, iir 0x%08x", |
3427 | iir); | 3437 | iir); |
3428 | 3438 | ||
3429 | for_each_pipe(pipe) { | 3439 | for_each_pipe(pipe) { |
3430 | int reg = PIPESTAT(pipe); | 3440 | int reg = PIPESTAT(pipe); |
3431 | pipe_stats[pipe] = I915_READ(reg); | 3441 | pipe_stats[pipe] = I915_READ(reg); |
3432 | 3442 | ||
3433 | /* | 3443 | /* |
3434 | * Clear the PIPE*STAT regs before the IIR | 3444 | * Clear the PIPE*STAT regs before the IIR |
3435 | */ | 3445 | */ |
3436 | if (pipe_stats[pipe] & 0x8000ffff) | 3446 | if (pipe_stats[pipe] & 0x8000ffff) |
3437 | I915_WRITE(reg, pipe_stats[pipe]); | 3447 | I915_WRITE(reg, pipe_stats[pipe]); |
3438 | } | 3448 | } |
3439 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 3449 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
3440 | 3450 | ||
3441 | I915_WRITE16(IIR, iir & ~flip_mask); | 3451 | I915_WRITE16(IIR, iir & ~flip_mask); |
3442 | new_iir = I915_READ16(IIR); /* Flush posted writes */ | 3452 | new_iir = I915_READ16(IIR); /* Flush posted writes */ |
3443 | 3453 | ||
3444 | i915_update_dri1_breadcrumb(dev); | 3454 | i915_update_dri1_breadcrumb(dev); |
3445 | 3455 | ||
3446 | if (iir & I915_USER_INTERRUPT) | 3456 | if (iir & I915_USER_INTERRUPT) |
3447 | notify_ring(dev, &dev_priv->ring[RCS]); | 3457 | notify_ring(dev, &dev_priv->ring[RCS]); |
3448 | 3458 | ||
3449 | for_each_pipe(pipe) { | 3459 | for_each_pipe(pipe) { |
3450 | int plane = pipe; | 3460 | int plane = pipe; |
3451 | if (HAS_FBC(dev)) | 3461 | if (HAS_FBC(dev)) |
3452 | plane = !plane; | 3462 | plane = !plane; |
3453 | 3463 | ||
3454 | if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && | 3464 | if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && |
3455 | i8xx_handle_vblank(dev, plane, pipe, iir)) | 3465 | i8xx_handle_vblank(dev, plane, pipe, iir)) |
3456 | flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); | 3466 | flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); |
3457 | 3467 | ||
3458 | if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) | 3468 | if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) |
3459 | i9xx_pipe_crc_irq_handler(dev, pipe); | 3469 | i9xx_pipe_crc_irq_handler(dev, pipe); |
3460 | 3470 | ||
3461 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && | 3471 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && |
3462 | intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) | 3472 | intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) |
3463 | DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); | 3473 | DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); |
3464 | } | 3474 | } |
3465 | 3475 | ||
3466 | iir = new_iir; | 3476 | iir = new_iir; |
3467 | } | 3477 | } |
3468 | 3478 | ||
3469 | return IRQ_HANDLED; | 3479 | return IRQ_HANDLED; |
3470 | } | 3480 | } |
3471 | 3481 | ||
3472 | static void i8xx_irq_uninstall(struct drm_device * dev) | 3482 | static void i8xx_irq_uninstall(struct drm_device * dev) |
3473 | { | 3483 | { |
3474 | struct drm_i915_private *dev_priv = dev->dev_private; | 3484 | struct drm_i915_private *dev_priv = dev->dev_private; |
3475 | int pipe; | 3485 | int pipe; |
3476 | 3486 | ||
3477 | for_each_pipe(pipe) { | 3487 | for_each_pipe(pipe) { |
3478 | /* Clear enable bits; then clear status bits */ | 3488 | /* Clear enable bits; then clear status bits */ |
3479 | I915_WRITE(PIPESTAT(pipe), 0); | 3489 | I915_WRITE(PIPESTAT(pipe), 0); |
3480 | I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); | 3490 | I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); |
3481 | } | 3491 | } |
3482 | I915_WRITE16(IMR, 0xffff); | 3492 | I915_WRITE16(IMR, 0xffff); |
3483 | I915_WRITE16(IER, 0x0); | 3493 | I915_WRITE16(IER, 0x0); |
3484 | I915_WRITE16(IIR, I915_READ16(IIR)); | 3494 | I915_WRITE16(IIR, I915_READ16(IIR)); |
3485 | } | 3495 | } |
3486 | 3496 | ||
3487 | static void i915_irq_preinstall(struct drm_device * dev) | 3497 | static void i915_irq_preinstall(struct drm_device * dev) |
3488 | { | 3498 | { |
3489 | struct drm_i915_private *dev_priv = dev->dev_private; | 3499 | struct drm_i915_private *dev_priv = dev->dev_private; |
3490 | int pipe; | 3500 | int pipe; |
3491 | 3501 | ||
3492 | if (I915_HAS_HOTPLUG(dev)) { | 3502 | if (I915_HAS_HOTPLUG(dev)) { |
3493 | I915_WRITE(PORT_HOTPLUG_EN, 0); | 3503 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
3494 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | 3504 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); |
3495 | } | 3505 | } |
3496 | 3506 | ||
3497 | I915_WRITE16(HWSTAM, 0xeffe); | 3507 | I915_WRITE16(HWSTAM, 0xeffe); |
3498 | for_each_pipe(pipe) | 3508 | for_each_pipe(pipe) |
3499 | I915_WRITE(PIPESTAT(pipe), 0); | 3509 | I915_WRITE(PIPESTAT(pipe), 0); |
3500 | I915_WRITE(IMR, 0xffffffff); | 3510 | I915_WRITE(IMR, 0xffffffff); |
3501 | I915_WRITE(IER, 0x0); | 3511 | I915_WRITE(IER, 0x0); |
3502 | POSTING_READ(IER); | 3512 | POSTING_READ(IER); |
3503 | } | 3513 | } |
3504 | 3514 | ||
3505 | static int i915_irq_postinstall(struct drm_device *dev) | 3515 | static int i915_irq_postinstall(struct drm_device *dev) |
3506 | { | 3516 | { |
3507 | struct drm_i915_private *dev_priv = dev->dev_private; | 3517 | struct drm_i915_private *dev_priv = dev->dev_private; |
3508 | u32 enable_mask; | 3518 | u32 enable_mask; |
3509 | unsigned long irqflags; | 3519 | unsigned long irqflags; |
3510 | 3520 | ||
3511 | I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); | 3521 | I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); |
3512 | 3522 | ||
3513 | /* Unmask the interrupts that we always want on. */ | 3523 | /* Unmask the interrupts that we always want on. */ |
3514 | dev_priv->irq_mask = | 3524 | dev_priv->irq_mask = |
3515 | ~(I915_ASLE_INTERRUPT | | 3525 | ~(I915_ASLE_INTERRUPT | |
3516 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | 3526 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
3517 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | 3527 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
3518 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | 3528 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | |
3519 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | | 3529 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | |
3520 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | 3530 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); |
3521 | 3531 | ||
3522 | enable_mask = | 3532 | enable_mask = |
3523 | I915_ASLE_INTERRUPT | | 3533 | I915_ASLE_INTERRUPT | |
3524 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | 3534 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
3525 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | 3535 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
3526 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | | 3536 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | |
3527 | I915_USER_INTERRUPT; | 3537 | I915_USER_INTERRUPT; |
3528 | 3538 | ||
3529 | if (I915_HAS_HOTPLUG(dev)) { | 3539 | if (I915_HAS_HOTPLUG(dev)) { |
3530 | I915_WRITE(PORT_HOTPLUG_EN, 0); | 3540 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
3531 | POSTING_READ(PORT_HOTPLUG_EN); | 3541 | POSTING_READ(PORT_HOTPLUG_EN); |
3532 | 3542 | ||
3533 | /* Enable in IER... */ | 3543 | /* Enable in IER... */ |
3534 | enable_mask |= I915_DISPLAY_PORT_INTERRUPT; | 3544 | enable_mask |= I915_DISPLAY_PORT_INTERRUPT; |
3535 | /* and unmask in IMR */ | 3545 | /* and unmask in IMR */ |
3536 | dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; | 3546 | dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; |
3537 | } | 3547 | } |
3538 | 3548 | ||
3539 | I915_WRITE(IMR, dev_priv->irq_mask); | 3549 | I915_WRITE(IMR, dev_priv->irq_mask); |
3540 | I915_WRITE(IER, enable_mask); | 3550 | I915_WRITE(IER, enable_mask); |
3541 | POSTING_READ(IER); | 3551 | POSTING_READ(IER); |
3542 | 3552 | ||
3543 | i915_enable_asle_pipestat(dev); | 3553 | i915_enable_asle_pipestat(dev); |
3544 | 3554 | ||
3545 | /* Interrupt setup is already guaranteed to be single-threaded, this is | 3555 | /* Interrupt setup is already guaranteed to be single-threaded, this is |
3546 | * just to make the assert_spin_locked check happy. */ | 3556 | * just to make the assert_spin_locked check happy. */ |
3547 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 3557 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
3548 | i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); | 3558 | i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); |
3549 | i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); | 3559 | i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); |
3550 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 3560 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
3551 | 3561 | ||
3552 | return 0; | 3562 | return 0; |
3553 | } | 3563 | } |
3554 | 3564 | ||
3555 | /* | 3565 | /* |
3556 | * Returns true when a page flip has completed. | 3566 | * Returns true when a page flip has completed. |
3557 | */ | 3567 | */ |
3558 | static bool i915_handle_vblank(struct drm_device *dev, | 3568 | static bool i915_handle_vblank(struct drm_device *dev, |
3559 | int plane, int pipe, u32 iir) | 3569 | int plane, int pipe, u32 iir) |
3560 | { | 3570 | { |
3561 | struct drm_i915_private *dev_priv = dev->dev_private; | 3571 | struct drm_i915_private *dev_priv = dev->dev_private; |
3562 | u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); | 3572 | u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); |
3563 | 3573 | ||
3564 | if (!drm_handle_vblank(dev, pipe)) | 3574 | if (!drm_handle_vblank(dev, pipe)) |
3565 | return false; | 3575 | return false; |
3566 | 3576 | ||
3567 | if ((iir & flip_pending) == 0) | 3577 | if ((iir & flip_pending) == 0) |
3568 | return false; | 3578 | return false; |
3569 | 3579 | ||
3570 | intel_prepare_page_flip(dev, plane); | 3580 | intel_prepare_page_flip(dev, plane); |
3571 | 3581 | ||
3572 | /* We detect FlipDone by looking for the change in PendingFlip from '1' | 3582 | /* We detect FlipDone by looking for the change in PendingFlip from '1' |
3573 | * to '0' on the following vblank, i.e. IIR has the Pendingflip | 3583 | * to '0' on the following vblank, i.e. IIR has the Pendingflip |
3574 | * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence | 3584 | * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence |
3575 | * the flip is completed (no longer pending). Since this doesn't raise | 3585 | * the flip is completed (no longer pending). Since this doesn't raise |
3576 | * an interrupt per se, we watch for the change at vblank. | 3586 | * an interrupt per se, we watch for the change at vblank. |
3577 | */ | 3587 | */ |
3578 | if (I915_READ(ISR) & flip_pending) | 3588 | if (I915_READ(ISR) & flip_pending) |
3579 | return false; | 3589 | return false; |
3580 | 3590 | ||
3581 | intel_finish_page_flip(dev, pipe); | 3591 | intel_finish_page_flip(dev, pipe); |
3582 | 3592 | ||
3583 | return true; | 3593 | return true; |
3584 | } | 3594 | } |
3585 | 3595 | ||
3586 | static irqreturn_t i915_irq_handler(int irq, void *arg) | 3596 | static irqreturn_t i915_irq_handler(int irq, void *arg) |
3587 | { | 3597 | { |
3588 | struct drm_device *dev = (struct drm_device *) arg; | 3598 | struct drm_device *dev = (struct drm_device *) arg; |
3589 | struct drm_i915_private *dev_priv = dev->dev_private; | 3599 | struct drm_i915_private *dev_priv = dev->dev_private; |
3590 | u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; | 3600 | u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; |
3591 | unsigned long irqflags; | 3601 | unsigned long irqflags; |
3592 | u32 flip_mask = | 3602 | u32 flip_mask = |
3593 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | 3603 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | |
3594 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; | 3604 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; |
3595 | int pipe, ret = IRQ_NONE; | 3605 | int pipe, ret = IRQ_NONE; |
3596 | 3606 | ||
3597 | iir = I915_READ(IIR); | 3607 | iir = I915_READ(IIR); |
3598 | do { | 3608 | do { |
3599 | bool irq_received = (iir & ~flip_mask) != 0; | 3609 | bool irq_received = (iir & ~flip_mask) != 0; |
3600 | bool blc_event = false; | 3610 | bool blc_event = false; |
3601 | 3611 | ||
3602 | /* Can't rely on pipestat interrupt bit in iir as it might | 3612 | /* Can't rely on pipestat interrupt bit in iir as it might |
3603 | * have been cleared after the pipestat interrupt was received. | 3613 | * have been cleared after the pipestat interrupt was received. |
3604 | * It doesn't set the bit in iir again, but it still produces | 3614 | * It doesn't set the bit in iir again, but it still produces |
3605 | * interrupts (for non-MSI). | 3615 | * interrupts (for non-MSI). |
3606 | */ | 3616 | */ |
3607 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 3617 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
3608 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) | 3618 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) |
3609 | i915_handle_error(dev, false, | 3619 | i915_handle_error(dev, false, |
3610 | "Command parser error, iir 0x%08x", | 3620 | "Command parser error, iir 0x%08x", |
3611 | iir); | 3621 | iir); |
3612 | 3622 | ||
3613 | for_each_pipe(pipe) { | 3623 | for_each_pipe(pipe) { |
3614 | int reg = PIPESTAT(pipe); | 3624 | int reg = PIPESTAT(pipe); |
3615 | pipe_stats[pipe] = I915_READ(reg); | 3625 | pipe_stats[pipe] = I915_READ(reg); |
3616 | 3626 | ||
3617 | /* Clear the PIPE*STAT regs before the IIR */ | 3627 | /* Clear the PIPE*STAT regs before the IIR */ |
3618 | if (pipe_stats[pipe] & 0x8000ffff) { | 3628 | if (pipe_stats[pipe] & 0x8000ffff) { |
3619 | I915_WRITE(reg, pipe_stats[pipe]); | 3629 | I915_WRITE(reg, pipe_stats[pipe]); |
3620 | irq_received = true; | 3630 | irq_received = true; |
3621 | } | 3631 | } |
3622 | } | 3632 | } |
3623 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 3633 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
3624 | 3634 | ||
3625 | if (!irq_received) | 3635 | if (!irq_received) |
3626 | break; | 3636 | break; |
3627 | 3637 | ||
3628 | /* Consume port. Then clear IIR or we'll miss events */ | 3638 | /* Consume port. Then clear IIR or we'll miss events */ |
3629 | if ((I915_HAS_HOTPLUG(dev)) && | 3639 | if ((I915_HAS_HOTPLUG(dev)) && |
3630 | (iir & I915_DISPLAY_PORT_INTERRUPT)) { | 3640 | (iir & I915_DISPLAY_PORT_INTERRUPT)) { |
3631 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); | 3641 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); |
3632 | u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; | 3642 | u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; |
3633 | 3643 | ||
3634 | intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); | 3644 | intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); |
3635 | 3645 | ||
3636 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); | 3646 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); |
3637 | POSTING_READ(PORT_HOTPLUG_STAT); | 3647 | POSTING_READ(PORT_HOTPLUG_STAT); |
3638 | } | 3648 | } |
3639 | 3649 | ||
3640 | I915_WRITE(IIR, iir & ~flip_mask); | 3650 | I915_WRITE(IIR, iir & ~flip_mask); |
3641 | new_iir = I915_READ(IIR); /* Flush posted writes */ | 3651 | new_iir = I915_READ(IIR); /* Flush posted writes */ |
3642 | 3652 | ||
3643 | if (iir & I915_USER_INTERRUPT) | 3653 | if (iir & I915_USER_INTERRUPT) |
3644 | notify_ring(dev, &dev_priv->ring[RCS]); | 3654 | notify_ring(dev, &dev_priv->ring[RCS]); |
3645 | 3655 | ||
3646 | for_each_pipe(pipe) { | 3656 | for_each_pipe(pipe) { |
3647 | int plane = pipe; | 3657 | int plane = pipe; |
3648 | if (HAS_FBC(dev)) | 3658 | if (HAS_FBC(dev)) |
3649 | plane = !plane; | 3659 | plane = !plane; |
3650 | 3660 | ||
3651 | if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && | 3661 | if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && |
3652 | i915_handle_vblank(dev, plane, pipe, iir)) | 3662 | i915_handle_vblank(dev, plane, pipe, iir)) |
3653 | flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); | 3663 | flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); |
3654 | 3664 | ||
3655 | if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) | 3665 | if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) |
3656 | blc_event = true; | 3666 | blc_event = true; |
3657 | 3667 | ||
3658 | if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) | 3668 | if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) |
3659 | i9xx_pipe_crc_irq_handler(dev, pipe); | 3669 | i9xx_pipe_crc_irq_handler(dev, pipe); |
3660 | 3670 | ||
3661 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && | 3671 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && |
3662 | intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) | 3672 | intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) |
3663 | DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); | 3673 | DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); |
3664 | } | 3674 | } |
3665 | 3675 | ||
3666 | if (blc_event || (iir & I915_ASLE_INTERRUPT)) | 3676 | if (blc_event || (iir & I915_ASLE_INTERRUPT)) |
3667 | intel_opregion_asle_intr(dev); | 3677 | intel_opregion_asle_intr(dev); |
3668 | 3678 | ||
3669 | /* With MSI, interrupts are only generated when iir | 3679 | /* With MSI, interrupts are only generated when iir |
3670 | * transitions from zero to nonzero. If another bit got | 3680 | * transitions from zero to nonzero. If another bit got |
3671 | * set while we were handling the existing iir bits, then | 3681 | * set while we were handling the existing iir bits, then |
3672 | * we would never get another interrupt. | 3682 | * we would never get another interrupt. |
3673 | * | 3683 | * |
3674 | * This is fine on non-MSI as well, as if we hit this path | 3684 | * This is fine on non-MSI as well, as if we hit this path |
3675 | * we avoid exiting the interrupt handler only to generate | 3685 | * we avoid exiting the interrupt handler only to generate |
3676 | * another one. | 3686 | * another one. |
3677 | * | 3687 | * |
3678 | * Note that for MSI this could cause a stray interrupt report | 3688 | * Note that for MSI this could cause a stray interrupt report |
3679 | * if an interrupt landed in the time between writing IIR and | 3689 | * if an interrupt landed in the time between writing IIR and |
3680 | * the posting read. This should be rare enough to never | 3690 | * the posting read. This should be rare enough to never |
3681 | * trigger the 99% of 100,000 interrupts test for disabling | 3691 | * trigger the 99% of 100,000 interrupts test for disabling |
3682 | * stray interrupts. | 3692 | * stray interrupts. |
3683 | */ | 3693 | */ |
3684 | ret = IRQ_HANDLED; | 3694 | ret = IRQ_HANDLED; |
3685 | iir = new_iir; | 3695 | iir = new_iir; |
3686 | } while (iir & ~flip_mask); | 3696 | } while (iir & ~flip_mask); |
3687 | 3697 | ||
3688 | i915_update_dri1_breadcrumb(dev); | 3698 | i915_update_dri1_breadcrumb(dev); |
3689 | 3699 | ||
3690 | return ret; | 3700 | return ret; |
3691 | } | 3701 | } |
3692 | 3702 | ||
3693 | static void i915_irq_uninstall(struct drm_device * dev) | 3703 | static void i915_irq_uninstall(struct drm_device * dev) |
3694 | { | 3704 | { |
3695 | struct drm_i915_private *dev_priv = dev->dev_private; | 3705 | struct drm_i915_private *dev_priv = dev->dev_private; |
3696 | int pipe; | 3706 | int pipe; |
3697 | 3707 | ||
3698 | intel_hpd_irq_uninstall(dev_priv); | 3708 | intel_hpd_irq_uninstall(dev_priv); |
3699 | 3709 | ||
3700 | if (I915_HAS_HOTPLUG(dev)) { | 3710 | if (I915_HAS_HOTPLUG(dev)) { |
3701 | I915_WRITE(PORT_HOTPLUG_EN, 0); | 3711 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
3702 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | 3712 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); |
3703 | } | 3713 | } |
3704 | 3714 | ||
3705 | I915_WRITE16(HWSTAM, 0xffff); | 3715 | I915_WRITE16(HWSTAM, 0xffff); |
3706 | for_each_pipe(pipe) { | 3716 | for_each_pipe(pipe) { |
3707 | /* Clear enable bits; then clear status bits */ | 3717 | /* Clear enable bits; then clear status bits */ |
3708 | I915_WRITE(PIPESTAT(pipe), 0); | 3718 | I915_WRITE(PIPESTAT(pipe), 0); |
3709 | I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); | 3719 | I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); |
3710 | } | 3720 | } |
3711 | I915_WRITE(IMR, 0xffffffff); | 3721 | I915_WRITE(IMR, 0xffffffff); |
3712 | I915_WRITE(IER, 0x0); | 3722 | I915_WRITE(IER, 0x0); |
3713 | 3723 | ||
3714 | I915_WRITE(IIR, I915_READ(IIR)); | 3724 | I915_WRITE(IIR, I915_READ(IIR)); |
3715 | } | 3725 | } |
3716 | 3726 | ||
3717 | static void i965_irq_preinstall(struct drm_device * dev) | 3727 | static void i965_irq_preinstall(struct drm_device * dev) |
3718 | { | 3728 | { |
3719 | struct drm_i915_private *dev_priv = dev->dev_private; | 3729 | struct drm_i915_private *dev_priv = dev->dev_private; |
3720 | int pipe; | 3730 | int pipe; |
3721 | 3731 | ||
3722 | I915_WRITE(PORT_HOTPLUG_EN, 0); | 3732 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
3723 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | 3733 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); |
3724 | 3734 | ||
3725 | I915_WRITE(HWSTAM, 0xeffe); | 3735 | I915_WRITE(HWSTAM, 0xeffe); |
3726 | for_each_pipe(pipe) | 3736 | for_each_pipe(pipe) |
3727 | I915_WRITE(PIPESTAT(pipe), 0); | 3737 | I915_WRITE(PIPESTAT(pipe), 0); |
3728 | I915_WRITE(IMR, 0xffffffff); | 3738 | I915_WRITE(IMR, 0xffffffff); |
3729 | I915_WRITE(IER, 0x0); | 3739 | I915_WRITE(IER, 0x0); |
3730 | POSTING_READ(IER); | 3740 | POSTING_READ(IER); |
3731 | } | 3741 | } |
3732 | 3742 | ||
3733 | static int i965_irq_postinstall(struct drm_device *dev) | 3743 | static int i965_irq_postinstall(struct drm_device *dev) |
3734 | { | 3744 | { |
3735 | struct drm_i915_private *dev_priv = dev->dev_private; | 3745 | struct drm_i915_private *dev_priv = dev->dev_private; |
3736 | u32 enable_mask; | 3746 | u32 enable_mask; |
3737 | u32 error_mask; | 3747 | u32 error_mask; |
3738 | unsigned long irqflags; | 3748 | unsigned long irqflags; |
3739 | 3749 | ||
3740 | /* Unmask the interrupts that we always want on. */ | 3750 | /* Unmask the interrupts that we always want on. */ |
3741 | dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | | 3751 | dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | |
3742 | I915_DISPLAY_PORT_INTERRUPT | | 3752 | I915_DISPLAY_PORT_INTERRUPT | |
3743 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | 3753 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
3744 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | 3754 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
3745 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | 3755 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | |
3746 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | | 3756 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | |
3747 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | 3757 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); |
3748 | 3758 | ||
3749 | enable_mask = ~dev_priv->irq_mask; | 3759 | enable_mask = ~dev_priv->irq_mask; |
3750 | enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | 3760 | enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | |
3751 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); | 3761 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); |
3752 | enable_mask |= I915_USER_INTERRUPT; | 3762 | enable_mask |= I915_USER_INTERRUPT; |
3753 | 3763 | ||
3754 | if (IS_G4X(dev)) | 3764 | if (IS_G4X(dev)) |
3755 | enable_mask |= I915_BSD_USER_INTERRUPT; | 3765 | enable_mask |= I915_BSD_USER_INTERRUPT; |
3756 | 3766 | ||
3757 | /* Interrupt setup is already guaranteed to be single-threaded, this is | 3767 | /* Interrupt setup is already guaranteed to be single-threaded, this is |
3758 | * just to make the assert_spin_locked check happy. */ | 3768 | * just to make the assert_spin_locked check happy. */ |
3759 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 3769 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
3760 | i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); | 3770 | i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); |
3761 | i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); | 3771 | i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); |
3762 | i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); | 3772 | i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); |
3763 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 3773 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
3764 | 3774 | ||
3765 | /* | 3775 | /* |
3766 | * Enable some error detection, note the instruction error mask | 3776 | * Enable some error detection, note the instruction error mask |
3767 | * bit is reserved, so we leave it masked. | 3777 | * bit is reserved, so we leave it masked. |
3768 | */ | 3778 | */ |
3769 | if (IS_G4X(dev)) { | 3779 | if (IS_G4X(dev)) { |
3770 | error_mask = ~(GM45_ERROR_PAGE_TABLE | | 3780 | error_mask = ~(GM45_ERROR_PAGE_TABLE | |
3771 | GM45_ERROR_MEM_PRIV | | 3781 | GM45_ERROR_MEM_PRIV | |
3772 | GM45_ERROR_CP_PRIV | | 3782 | GM45_ERROR_CP_PRIV | |
3773 | I915_ERROR_MEMORY_REFRESH); | 3783 | I915_ERROR_MEMORY_REFRESH); |
3774 | } else { | 3784 | } else { |
3775 | error_mask = ~(I915_ERROR_PAGE_TABLE | | 3785 | error_mask = ~(I915_ERROR_PAGE_TABLE | |
3776 | I915_ERROR_MEMORY_REFRESH); | 3786 | I915_ERROR_MEMORY_REFRESH); |
3777 | } | 3787 | } |
3778 | I915_WRITE(EMR, error_mask); | 3788 | I915_WRITE(EMR, error_mask); |
3779 | 3789 | ||
3780 | I915_WRITE(IMR, dev_priv->irq_mask); | 3790 | I915_WRITE(IMR, dev_priv->irq_mask); |
3781 | I915_WRITE(IER, enable_mask); | 3791 | I915_WRITE(IER, enable_mask); |
3782 | POSTING_READ(IER); | 3792 | POSTING_READ(IER); |
3783 | 3793 | ||
3784 | I915_WRITE(PORT_HOTPLUG_EN, 0); | 3794 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
3785 | POSTING_READ(PORT_HOTPLUG_EN); | 3795 | POSTING_READ(PORT_HOTPLUG_EN); |
3786 | 3796 | ||
3787 | i915_enable_asle_pipestat(dev); | 3797 | i915_enable_asle_pipestat(dev); |
3788 | 3798 | ||
3789 | return 0; | 3799 | return 0; |
3790 | } | 3800 | } |
3791 | 3801 | ||
3792 | static void i915_hpd_irq_setup(struct drm_device *dev) | 3802 | static void i915_hpd_irq_setup(struct drm_device *dev) |
3793 | { | 3803 | { |
3794 | struct drm_i915_private *dev_priv = dev->dev_private; | 3804 | struct drm_i915_private *dev_priv = dev->dev_private; |
3795 | struct drm_mode_config *mode_config = &dev->mode_config; | 3805 | struct drm_mode_config *mode_config = &dev->mode_config; |
3796 | struct intel_encoder *intel_encoder; | 3806 | struct intel_encoder *intel_encoder; |
3797 | u32 hotplug_en; | 3807 | u32 hotplug_en; |
3798 | 3808 | ||
3799 | assert_spin_locked(&dev_priv->irq_lock); | 3809 | assert_spin_locked(&dev_priv->irq_lock); |
3800 | 3810 | ||
3801 | if (I915_HAS_HOTPLUG(dev)) { | 3811 | if (I915_HAS_HOTPLUG(dev)) { |
3802 | hotplug_en = I915_READ(PORT_HOTPLUG_EN); | 3812 | hotplug_en = I915_READ(PORT_HOTPLUG_EN); |
3803 | hotplug_en &= ~HOTPLUG_INT_EN_MASK; | 3813 | hotplug_en &= ~HOTPLUG_INT_EN_MASK; |
3804 | /* Note HDMI and DP share hotplug bits */ | 3814 | /* Note HDMI and DP share hotplug bits */ |
3805 | /* enable bits are the same for all generations */ | 3815 | /* enable bits are the same for all generations */ |
3806 | list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) | 3816 | list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) |
3807 | if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) | 3817 | if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) |
3808 | hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; | 3818 | hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; |
3809 | /* Programming the CRT detection parameters tends | 3819 | /* Programming the CRT detection parameters tends |
3810 | to generate a spurious hotplug event about three | 3820 | to generate a spurious hotplug event about three |
3811 | seconds later. So just do it once. | 3821 | seconds later. So just do it once. |
3812 | */ | 3822 | */ |
3813 | if (IS_G4X(dev)) | 3823 | if (IS_G4X(dev)) |
3814 | hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; | 3824 | hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; |
3815 | hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; | 3825 | hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; |
3816 | hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; | 3826 | hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; |
3817 | 3827 | ||
3818 | /* Ignore TV since it's buggy */ | 3828 | /* Ignore TV since it's buggy */ |
3819 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); | 3829 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); |
3820 | } | 3830 | } |
3821 | } | 3831 | } |
3822 | 3832 | ||
3823 | static irqreturn_t i965_irq_handler(int irq, void *arg) | 3833 | static irqreturn_t i965_irq_handler(int irq, void *arg) |
3824 | { | 3834 | { |
3825 | struct drm_device *dev = (struct drm_device *) arg; | 3835 | struct drm_device *dev = (struct drm_device *) arg; |
3826 | struct drm_i915_private *dev_priv = dev->dev_private; | 3836 | struct drm_i915_private *dev_priv = dev->dev_private; |
3827 | u32 iir, new_iir; | 3837 | u32 iir, new_iir; |
3828 | u32 pipe_stats[I915_MAX_PIPES]; | 3838 | u32 pipe_stats[I915_MAX_PIPES]; |
3829 | unsigned long irqflags; | 3839 | unsigned long irqflags; |
3830 | int ret = IRQ_NONE, pipe; | 3840 | int ret = IRQ_NONE, pipe; |
3831 | u32 flip_mask = | 3841 | u32 flip_mask = |
3832 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | 3842 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | |
3833 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; | 3843 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; |
3834 | 3844 | ||
3835 | iir = I915_READ(IIR); | 3845 | iir = I915_READ(IIR); |
3836 | 3846 | ||
3837 | for (;;) { | 3847 | for (;;) { |
3838 | bool irq_received = (iir & ~flip_mask) != 0; | 3848 | bool irq_received = (iir & ~flip_mask) != 0; |
3839 | bool blc_event = false; | 3849 | bool blc_event = false; |
3840 | 3850 | ||
3841 | /* Can't rely on pipestat interrupt bit in iir as it might | 3851 | /* Can't rely on pipestat interrupt bit in iir as it might |
3842 | * have been cleared after the pipestat interrupt was received. | 3852 | * have been cleared after the pipestat interrupt was received. |
3843 | * It doesn't set the bit in iir again, but it still produces | 3853 | * It doesn't set the bit in iir again, but it still produces |
3844 | * interrupts (for non-MSI). | 3854 | * interrupts (for non-MSI). |
3845 | */ | 3855 | */ |
3846 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 3856 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
3847 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) | 3857 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) |
3848 | i915_handle_error(dev, false, | 3858 | i915_handle_error(dev, false, |
3849 | "Command parser error, iir 0x%08x", | 3859 | "Command parser error, iir 0x%08x", |
3850 | iir); | 3860 | iir); |
3851 | 3861 | ||
3852 | for_each_pipe(pipe) { | 3862 | for_each_pipe(pipe) { |
3853 | int reg = PIPESTAT(pipe); | 3863 | int reg = PIPESTAT(pipe); |
3854 | pipe_stats[pipe] = I915_READ(reg); | 3864 | pipe_stats[pipe] = I915_READ(reg); |
3855 | 3865 | ||
3856 | /* | 3866 | /* |
3857 | * Clear the PIPE*STAT regs before the IIR | 3867 | * Clear the PIPE*STAT regs before the IIR |
3858 | */ | 3868 | */ |
3859 | if (pipe_stats[pipe] & 0x8000ffff) { | 3869 | if (pipe_stats[pipe] & 0x8000ffff) { |
3860 | I915_WRITE(reg, pipe_stats[pipe]); | 3870 | I915_WRITE(reg, pipe_stats[pipe]); |
3861 | irq_received = true; | 3871 | irq_received = true; |
3862 | } | 3872 | } |
3863 | } | 3873 | } |
3864 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 3874 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
3865 | 3875 | ||
3866 | if (!irq_received) | 3876 | if (!irq_received) |
3867 | break; | 3877 | break; |
3868 | 3878 | ||
3869 | ret = IRQ_HANDLED; | 3879 | ret = IRQ_HANDLED; |
3870 | 3880 | ||
3871 | /* Consume port. Then clear IIR or we'll miss events */ | 3881 | /* Consume port. Then clear IIR or we'll miss events */ |
3872 | if (iir & I915_DISPLAY_PORT_INTERRUPT) { | 3882 | if (iir & I915_DISPLAY_PORT_INTERRUPT) { |
3873 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); | 3883 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); |
3874 | u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ? | 3884 | u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ? |
3875 | HOTPLUG_INT_STATUS_G4X : | 3885 | HOTPLUG_INT_STATUS_G4X : |
3876 | HOTPLUG_INT_STATUS_I915); | 3886 | HOTPLUG_INT_STATUS_I915); |
3877 | 3887 | ||
3878 | intel_hpd_irq_handler(dev, hotplug_trigger, | 3888 | intel_hpd_irq_handler(dev, hotplug_trigger, |
3879 | IS_G4X(dev) ? hpd_status_g4x : hpd_status_i915); | 3889 | IS_G4X(dev) ? hpd_status_g4x : hpd_status_i915); |
3880 | 3890 | ||
3881 | if (IS_G4X(dev) && | 3891 | if (IS_G4X(dev) && |
3882 | (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)) | 3892 | (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)) |
3883 | dp_aux_irq_handler(dev); | 3893 | dp_aux_irq_handler(dev); |
3884 | 3894 | ||
3885 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); | 3895 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); |
3886 | I915_READ(PORT_HOTPLUG_STAT); | 3896 | I915_READ(PORT_HOTPLUG_STAT); |
3887 | } | 3897 | } |
3888 | 3898 | ||
3889 | I915_WRITE(IIR, iir & ~flip_mask); | 3899 | I915_WRITE(IIR, iir & ~flip_mask); |
3890 | new_iir = I915_READ(IIR); /* Flush posted writes */ | 3900 | new_iir = I915_READ(IIR); /* Flush posted writes */ |
3891 | 3901 | ||
3892 | if (iir & I915_USER_INTERRUPT) | 3902 | if (iir & I915_USER_INTERRUPT) |
3893 | notify_ring(dev, &dev_priv->ring[RCS]); | 3903 | notify_ring(dev, &dev_priv->ring[RCS]); |
3894 | if (iir & I915_BSD_USER_INTERRUPT) | 3904 | if (iir & I915_BSD_USER_INTERRUPT) |
3895 | notify_ring(dev, &dev_priv->ring[VCS]); | 3905 | notify_ring(dev, &dev_priv->ring[VCS]); |
3896 | 3906 | ||
3897 | for_each_pipe(pipe) { | 3907 | for_each_pipe(pipe) { |
3898 | if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && | 3908 | if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && |
3899 | i915_handle_vblank(dev, pipe, pipe, iir)) | 3909 | i915_handle_vblank(dev, pipe, pipe, iir)) |
3900 | flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); | 3910 | flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); |
3901 | 3911 | ||
3902 | if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) | 3912 | if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) |
3903 | blc_event = true; | 3913 | blc_event = true; |
3904 | 3914 | ||
3905 | if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) | 3915 | if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) |
3906 | i9xx_pipe_crc_irq_handler(dev, pipe); | 3916 | i9xx_pipe_crc_irq_handler(dev, pipe); |
3907 | 3917 | ||
3908 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && | 3918 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && |
3909 | intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) | 3919 | intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) |
3910 | DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); | 3920 | DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); |
3911 | } | 3921 | } |
3912 | 3922 | ||
3913 | if (blc_event || (iir & I915_ASLE_INTERRUPT)) | 3923 | if (blc_event || (iir & I915_ASLE_INTERRUPT)) |
3914 | intel_opregion_asle_intr(dev); | 3924 | intel_opregion_asle_intr(dev); |
3915 | 3925 | ||
3916 | if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) | 3926 | if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) |
3917 | gmbus_irq_handler(dev); | 3927 | gmbus_irq_handler(dev); |
3918 | 3928 | ||
3919 | /* With MSI, interrupts are only generated when iir | 3929 | /* With MSI, interrupts are only generated when iir |
3920 | * transitions from zero to nonzero. If another bit got | 3930 | * transitions from zero to nonzero. If another bit got |
3921 | * set while we were handling the existing iir bits, then | 3931 | * set while we were handling the existing iir bits, then |
3922 | * we would never get another interrupt. | 3932 | * we would never get another interrupt. |
3923 | * | 3933 | * |
3924 | * This is fine on non-MSI as well, as if we hit this path | 3934 | * This is fine on non-MSI as well, as if we hit this path |
3925 | * we avoid exiting the interrupt handler only to generate | 3935 | * we avoid exiting the interrupt handler only to generate |
3926 | * another one. | 3936 | * another one. |
3927 | * | 3937 | * |
3928 | * Note that for MSI this could cause a stray interrupt report | 3938 | * Note that for MSI this could cause a stray interrupt report |
3929 | * if an interrupt landed in the time between writing IIR and | 3939 | * if an interrupt landed in the time between writing IIR and |
3930 | * the posting read. This should be rare enough to never | 3940 | * the posting read. This should be rare enough to never |
3931 | * trigger the 99% of 100,000 interrupts test for disabling | 3941 | * trigger the 99% of 100,000 interrupts test for disabling |
3932 | * stray interrupts. | 3942 | * stray interrupts. |
3933 | */ | 3943 | */ |
3934 | iir = new_iir; | 3944 | iir = new_iir; |
3935 | } | 3945 | } |
3936 | 3946 | ||
3937 | i915_update_dri1_breadcrumb(dev); | 3947 | i915_update_dri1_breadcrumb(dev); |
3938 | 3948 | ||
3939 | return ret; | 3949 | return ret; |
3940 | } | 3950 | } |
3941 | 3951 | ||
3942 | static void i965_irq_uninstall(struct drm_device * dev) | 3952 | static void i965_irq_uninstall(struct drm_device * dev) |
3943 | { | 3953 | { |
3944 | struct drm_i915_private *dev_priv = dev->dev_private; | 3954 | struct drm_i915_private *dev_priv = dev->dev_private; |
3945 | int pipe; | 3955 | int pipe; |
3946 | 3956 | ||
3947 | if (!dev_priv) | 3957 | if (!dev_priv) |
3948 | return; | 3958 | return; |
3949 | 3959 | ||
3950 | intel_hpd_irq_uninstall(dev_priv); | 3960 | intel_hpd_irq_uninstall(dev_priv); |
3951 | 3961 | ||
3952 | I915_WRITE(PORT_HOTPLUG_EN, 0); | 3962 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
3953 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | 3963 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); |
3954 | 3964 | ||
3955 | I915_WRITE(HWSTAM, 0xffffffff); | 3965 | I915_WRITE(HWSTAM, 0xffffffff); |
3956 | for_each_pipe(pipe) | 3966 | for_each_pipe(pipe) |
3957 | I915_WRITE(PIPESTAT(pipe), 0); | 3967 | I915_WRITE(PIPESTAT(pipe), 0); |
3958 | I915_WRITE(IMR, 0xffffffff); | 3968 | I915_WRITE(IMR, 0xffffffff); |
3959 | I915_WRITE(IER, 0x0); | 3969 | I915_WRITE(IER, 0x0); |
3960 | 3970 | ||
3961 | for_each_pipe(pipe) | 3971 | for_each_pipe(pipe) |
3962 | I915_WRITE(PIPESTAT(pipe), | 3972 | I915_WRITE(PIPESTAT(pipe), |
3963 | I915_READ(PIPESTAT(pipe)) & 0x8000ffff); | 3973 | I915_READ(PIPESTAT(pipe)) & 0x8000ffff); |
3964 | I915_WRITE(IIR, I915_READ(IIR)); | 3974 | I915_WRITE(IIR, I915_READ(IIR)); |
3965 | } | 3975 | } |
3966 | 3976 | ||
3967 | static void intel_hpd_irq_reenable(unsigned long data) | 3977 | static void intel_hpd_irq_reenable(unsigned long data) |
3968 | { | 3978 | { |
3969 | struct drm_i915_private *dev_priv = (struct drm_i915_private *)data; | 3979 | struct drm_i915_private *dev_priv = (struct drm_i915_private *)data; |
3970 | struct drm_device *dev = dev_priv->dev; | 3980 | struct drm_device *dev = dev_priv->dev; |
3971 | struct drm_mode_config *mode_config = &dev->mode_config; | 3981 | struct drm_mode_config *mode_config = &dev->mode_config; |
3972 | unsigned long irqflags; | 3982 | unsigned long irqflags; |
3973 | int i; | 3983 | int i; |
3974 | 3984 | ||
3975 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 3985 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
3976 | for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { | 3986 | for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { |
3977 | struct drm_connector *connector; | 3987 | struct drm_connector *connector; |
3978 | 3988 | ||
3979 | if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED) | 3989 | if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED) |
3980 | continue; | 3990 | continue; |
3981 | 3991 | ||
3982 | dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; | 3992 | dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; |
3983 | 3993 | ||
3984 | list_for_each_entry(connector, &mode_config->connector_list, head) { | 3994 | list_for_each_entry(connector, &mode_config->connector_list, head) { |
3985 | struct intel_connector *intel_connector = to_intel_connector(connector); | 3995 | struct intel_connector *intel_connector = to_intel_connector(connector); |
3986 | 3996 | ||
3987 | if (intel_connector->encoder->hpd_pin == i) { | 3997 | if (intel_connector->encoder->hpd_pin == i) { |
3988 | if (connector->polled != intel_connector->polled) | 3998 | if (connector->polled != intel_connector->polled) |
3989 | DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", | 3999 | DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", |
3990 | drm_get_connector_name(connector)); | 4000 | drm_get_connector_name(connector)); |
3991 | connector->polled = intel_connector->polled; | 4001 | connector->polled = intel_connector->polled; |
3992 | if (!connector->polled) | 4002 | if (!connector->polled) |
3993 | connector->polled = DRM_CONNECTOR_POLL_HPD; | 4003 | connector->polled = DRM_CONNECTOR_POLL_HPD; |
3994 | } | 4004 | } |
3995 | } | 4005 | } |
3996 | } | 4006 | } |
3997 | if (dev_priv->display.hpd_irq_setup) | 4007 | if (dev_priv->display.hpd_irq_setup) |
3998 | dev_priv->display.hpd_irq_setup(dev); | 4008 | dev_priv->display.hpd_irq_setup(dev); |
3999 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 4009 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
4000 | } | 4010 | } |
4001 | 4011 | ||
4002 | void intel_irq_init(struct drm_device *dev) | 4012 | void intel_irq_init(struct drm_device *dev) |
4003 | { | 4013 | { |
4004 | struct drm_i915_private *dev_priv = dev->dev_private; | 4014 | struct drm_i915_private *dev_priv = dev->dev_private; |
4005 | 4015 | ||
4006 | INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); | 4016 | INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); |
4007 | INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); | 4017 | INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); |
4008 | INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); | 4018 | INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); |
4009 | INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); | 4019 | INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); |
4010 | 4020 | ||
4011 | /* Let's track the enabled rps events */ | 4021 | /* Let's track the enabled rps events */ |
4012 | dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; | 4022 | dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; |
4013 | 4023 | ||
4014 | setup_timer(&dev_priv->gpu_error.hangcheck_timer, | 4024 | setup_timer(&dev_priv->gpu_error.hangcheck_timer, |
4015 | i915_hangcheck_elapsed, | 4025 | i915_hangcheck_elapsed, |
4016 | (unsigned long) dev); | 4026 | (unsigned long) dev); |
4017 | setup_timer(&dev_priv->hotplug_reenable_timer, intel_hpd_irq_reenable, | 4027 | setup_timer(&dev_priv->hotplug_reenable_timer, intel_hpd_irq_reenable, |
4018 | (unsigned long) dev_priv); | 4028 | (unsigned long) dev_priv); |
4019 | 4029 | ||
4020 | pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); | 4030 | pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); |
4021 | 4031 | ||
4022 | if (IS_GEN2(dev)) { | 4032 | if (IS_GEN2(dev)) { |
4023 | dev->max_vblank_count = 0; | 4033 | dev->max_vblank_count = 0; |
4024 | dev->driver->get_vblank_counter = i8xx_get_vblank_counter; | 4034 | dev->driver->get_vblank_counter = i8xx_get_vblank_counter; |
4025 | } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { | 4035 | } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { |
4026 | dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ | 4036 | dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ |
4027 | dev->driver->get_vblank_counter = gm45_get_vblank_counter; | 4037 | dev->driver->get_vblank_counter = gm45_get_vblank_counter; |
4028 | } else { | 4038 | } else { |
4029 | dev->driver->get_vblank_counter = i915_get_vblank_counter; | 4039 | dev->driver->get_vblank_counter = i915_get_vblank_counter; |
4030 | dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ | 4040 | dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ |
4031 | } | 4041 | } |
4032 | 4042 | ||
4033 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 4043 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
4034 | dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; | 4044 | dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; |
4035 | dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; | 4045 | dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; |
4036 | } | 4046 | } |
4037 | 4047 | ||
4038 | if (IS_VALLEYVIEW(dev)) { | 4048 | if (IS_VALLEYVIEW(dev)) { |
4039 | dev->driver->irq_handler = valleyview_irq_handler; | 4049 | dev->driver->irq_handler = valleyview_irq_handler; |
4040 | dev->driver->irq_preinstall = valleyview_irq_preinstall; | 4050 | dev->driver->irq_preinstall = valleyview_irq_preinstall; |
4041 | dev->driver->irq_postinstall = valleyview_irq_postinstall; | 4051 | dev->driver->irq_postinstall = valleyview_irq_postinstall; |
4042 | dev->driver->irq_uninstall = valleyview_irq_uninstall; | 4052 | dev->driver->irq_uninstall = valleyview_irq_uninstall; |
4043 | dev->driver->enable_vblank = valleyview_enable_vblank; | 4053 | dev->driver->enable_vblank = valleyview_enable_vblank; |
4044 | dev->driver->disable_vblank = valleyview_disable_vblank; | 4054 | dev->driver->disable_vblank = valleyview_disable_vblank; |
4045 | dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; | 4055 | dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; |
4046 | } else if (IS_GEN8(dev)) { | 4056 | } else if (IS_GEN8(dev)) { |
4047 | dev->driver->irq_handler = gen8_irq_handler; | 4057 | dev->driver->irq_handler = gen8_irq_handler; |
4048 | dev->driver->irq_preinstall = gen8_irq_preinstall; | 4058 | dev->driver->irq_preinstall = gen8_irq_preinstall; |
4049 | dev->driver->irq_postinstall = gen8_irq_postinstall; | 4059 | dev->driver->irq_postinstall = gen8_irq_postinstall; |
4050 | dev->driver->irq_uninstall = gen8_irq_uninstall; | 4060 | dev->driver->irq_uninstall = gen8_irq_uninstall; |
4051 | dev->driver->enable_vblank = gen8_enable_vblank; | 4061 | dev->driver->enable_vblank = gen8_enable_vblank; |
4052 | dev->driver->disable_vblank = gen8_disable_vblank; | 4062 | dev->driver->disable_vblank = gen8_disable_vblank; |
4053 | dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; | 4063 | dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; |
4054 | } else if (HAS_PCH_SPLIT(dev)) { | 4064 | } else if (HAS_PCH_SPLIT(dev)) { |
4055 | dev->driver->irq_handler = ironlake_irq_handler; | 4065 | dev->driver->irq_handler = ironlake_irq_handler; |
4056 | dev->driver->irq_preinstall = ironlake_irq_preinstall; | 4066 | dev->driver->irq_preinstall = ironlake_irq_preinstall; |
4057 | dev->driver->irq_postinstall = ironlake_irq_postinstall; | 4067 | dev->driver->irq_postinstall = ironlake_irq_postinstall; |
4058 | dev->driver->irq_uninstall = ironlake_irq_uninstall; | 4068 | dev->driver->irq_uninstall = ironlake_irq_uninstall; |
4059 | dev->driver->enable_vblank = ironlake_enable_vblank; | 4069 | dev->driver->enable_vblank = ironlake_enable_vblank; |
4060 | dev->driver->disable_vblank = ironlake_disable_vblank; | 4070 | dev->driver->disable_vblank = ironlake_disable_vblank; |
4061 | dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; | 4071 | dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; |
4062 | } else { | 4072 | } else { |
4063 | if (INTEL_INFO(dev)->gen == 2) { | 4073 | if (INTEL_INFO(dev)->gen == 2) { |
4064 | dev->driver->irq_preinstall = i8xx_irq_preinstall; | 4074 | dev->driver->irq_preinstall = i8xx_irq_preinstall; |
4065 | dev->driver->irq_postinstall = i8xx_irq_postinstall; | 4075 | dev->driver->irq_postinstall = i8xx_irq_postinstall; |
4066 | dev->driver->irq_handler = i8xx_irq_handler; | 4076 | dev->driver->irq_handler = i8xx_irq_handler; |
4067 | dev->driver->irq_uninstall = i8xx_irq_uninstall; | 4077 | dev->driver->irq_uninstall = i8xx_irq_uninstall; |
4068 | } else if (INTEL_INFO(dev)->gen == 3) { | 4078 | } else if (INTEL_INFO(dev)->gen == 3) { |
4069 | dev->driver->irq_preinstall = i915_irq_preinstall; | 4079 | dev->driver->irq_preinstall = i915_irq_preinstall; |
4070 | dev->driver->irq_postinstall = i915_irq_postinstall; | 4080 | dev->driver->irq_postinstall = i915_irq_postinstall; |
4071 | dev->driver->irq_uninstall = i915_irq_uninstall; | 4081 | dev->driver->irq_uninstall = i915_irq_uninstall; |
4072 | dev->driver->irq_handler = i915_irq_handler; | 4082 | dev->driver->irq_handler = i915_irq_handler; |
4073 | dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; | 4083 | dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; |
4074 | } else { | 4084 | } else { |
4075 | dev->driver->irq_preinstall = i965_irq_preinstall; | 4085 | dev->driver->irq_preinstall = i965_irq_preinstall; |
4076 | dev->driver->irq_postinstall = i965_irq_postinstall; | 4086 | dev->driver->irq_postinstall = i965_irq_postinstall; |
4077 | dev->driver->irq_uninstall = i965_irq_uninstall; | 4087 | dev->driver->irq_uninstall = i965_irq_uninstall; |
4078 | dev->driver->irq_handler = i965_irq_handler; | 4088 | dev->driver->irq_handler = i965_irq_handler; |
4079 | dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; | 4089 | dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; |
4080 | } | 4090 | } |
4081 | dev->driver->enable_vblank = i915_enable_vblank; | 4091 | dev->driver->enable_vblank = i915_enable_vblank; |
4082 | dev->driver->disable_vblank = i915_disable_vblank; | 4092 | dev->driver->disable_vblank = i915_disable_vblank; |
4083 | } | 4093 | } |
4084 | } | 4094 | } |
4085 | 4095 | ||
4086 | void intel_hpd_init(struct drm_device *dev) | 4096 | void intel_hpd_init(struct drm_device *dev) |
4087 | { | 4097 | { |
4088 | struct drm_i915_private *dev_priv = dev->dev_private; | 4098 | struct drm_i915_private *dev_priv = dev->dev_private; |
4089 | struct drm_mode_config *mode_config = &dev->mode_config; | 4099 | struct drm_mode_config *mode_config = &dev->mode_config; |
4090 | struct drm_connector *connector; | 4100 | struct drm_connector *connector; |
4091 | unsigned long irqflags; | 4101 | unsigned long irqflags; |
4092 | int i; | 4102 | int i; |
4093 | 4103 | ||
4094 | for (i = 1; i < HPD_NUM_PINS; i++) { | 4104 | for (i = 1; i < HPD_NUM_PINS; i++) { |
4095 | dev_priv->hpd_stats[i].hpd_cnt = 0; | 4105 | dev_priv->hpd_stats[i].hpd_cnt = 0; |
4096 | dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; | 4106 | dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; |
4097 | } | 4107 | } |
4098 | list_for_each_entry(connector, &mode_config->connector_list, head) { | 4108 | list_for_each_entry(connector, &mode_config->connector_list, head) { |
4099 | struct intel_connector *intel_connector = to_intel_connector(connector); | 4109 | struct intel_connector *intel_connector = to_intel_connector(connector); |
4100 | connector->polled = intel_connector->polled; | 4110 | connector->polled = intel_connector->polled; |
4101 | if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) | 4111 | if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) |
4102 | connector->polled = DRM_CONNECTOR_POLL_HPD; | 4112 | connector->polled = DRM_CONNECTOR_POLL_HPD; |
4103 | } | 4113 | } |
4104 | 4114 | ||
4105 | /* Interrupt setup is already guaranteed to be single-threaded, this is | 4115 | /* Interrupt setup is already guaranteed to be single-threaded, this is |
4106 | * just to make the assert_spin_locked checks happy. */ | 4116 | * just to make the assert_spin_locked checks happy. */ |
4107 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 4117 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
4108 | if (dev_priv->display.hpd_irq_setup) | 4118 | if (dev_priv->display.hpd_irq_setup) |
4109 | dev_priv->display.hpd_irq_setup(dev); | 4119 | dev_priv->display.hpd_irq_setup(dev); |
4110 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 4120 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
4111 | } | 4121 | } |
4112 | 4122 | ||
4113 | /* Disable interrupts so we can allow runtime PM. */ | 4123 | /* Disable interrupts so we can allow runtime PM. */ |
4114 | void hsw_runtime_pm_disable_interrupts(struct drm_device *dev) | 4124 | void hsw_runtime_pm_disable_interrupts(struct drm_device *dev) |
4115 | { | 4125 | { |
4116 | struct drm_i915_private *dev_priv = dev->dev_private; | 4126 | struct drm_i915_private *dev_priv = dev->dev_private; |
4117 | unsigned long irqflags; | 4127 | unsigned long irqflags; |
4118 | 4128 | ||
4119 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 4129 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
4120 | 4130 | ||
4121 | dev_priv->pm.regsave.deimr = I915_READ(DEIMR); | 4131 | dev_priv->pm.regsave.deimr = I915_READ(DEIMR); |
4122 | dev_priv->pm.regsave.sdeimr = I915_READ(SDEIMR); | 4132 | dev_priv->pm.regsave.sdeimr = I915_READ(SDEIMR); |
4123 | dev_priv->pm.regsave.gtimr = I915_READ(GTIMR); | 4133 | dev_priv->pm.regsave.gtimr = I915_READ(GTIMR); |
4124 | dev_priv->pm.regsave.gtier = I915_READ(GTIER); | 4134 | dev_priv->pm.regsave.gtier = I915_READ(GTIER); |
4125 | dev_priv->pm.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR); | 4135 | dev_priv->pm.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR); |
4126 | 4136 | ||
4127 | ironlake_disable_display_irq(dev_priv, 0xffffffff); | 4137 | ironlake_disable_display_irq(dev_priv, 0xffffffff); |
4128 | ibx_disable_display_interrupt(dev_priv, 0xffffffff); | 4138 | ibx_disable_display_interrupt(dev_priv, 0xffffffff); |
4129 | ilk_disable_gt_irq(dev_priv, 0xffffffff); | 4139 | ilk_disable_gt_irq(dev_priv, 0xffffffff); |
4130 | snb_disable_pm_irq(dev_priv, 0xffffffff); | 4140 | snb_disable_pm_irq(dev_priv, 0xffffffff); |
4131 | 4141 | ||
4132 | dev_priv->pm.irqs_disabled = true; | 4142 | dev_priv->pm.irqs_disabled = true; |
4133 | 4143 | ||
4134 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 4144 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
4135 | } | 4145 | } |
4136 | 4146 | ||
4137 | /* Restore interrupts so we can recover from runtime PM. */ | 4147 | /* Restore interrupts so we can recover from runtime PM. */ |
4138 | void hsw_runtime_pm_restore_interrupts(struct drm_device *dev) | 4148 | void hsw_runtime_pm_restore_interrupts(struct drm_device *dev) |
4139 | { | 4149 | { |
4140 | struct drm_i915_private *dev_priv = dev->dev_private; | 4150 | struct drm_i915_private *dev_priv = dev->dev_private; |
4141 | unsigned long irqflags; | 4151 | unsigned long irqflags; |
4142 | uint32_t val; | 4152 | uint32_t val; |
4143 | 4153 | ||
4144 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 4154 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
4145 | 4155 | ||
4146 | val = I915_READ(DEIMR); | 4156 | val = I915_READ(DEIMR); |
4147 | WARN(val != 0xffffffff, "DEIMR is 0x%08x\n", val); | 4157 | WARN(val != 0xffffffff, "DEIMR is 0x%08x\n", val); |
4148 | 4158 | ||
4149 | val = I915_READ(SDEIMR); | 4159 | val = I915_READ(SDEIMR); |
4150 | WARN(val != 0xffffffff, "SDEIMR is 0x%08x\n", val); | 4160 | WARN(val != 0xffffffff, "SDEIMR is 0x%08x\n", val); |
4151 | 4161 | ||
4152 | val = I915_READ(GTIMR); | 4162 | val = I915_READ(GTIMR); |
4153 | WARN(val != 0xffffffff, "GTIMR is 0x%08x\n", val); | 4163 | WARN(val != 0xffffffff, "GTIMR is 0x%08x\n", val); |
4154 | 4164 | ||
4155 | val = I915_READ(GEN6_PMIMR); | 4165 | val = I915_READ(GEN6_PMIMR); |
4156 | WARN(val != 0xffffffff, "GEN6_PMIMR is 0x%08x\n", val); | 4166 | WARN(val != 0xffffffff, "GEN6_PMIMR is 0x%08x\n", val); |
4157 | 4167 | ||
4158 | dev_priv->pm.irqs_disabled = false; | 4168 | dev_priv->pm.irqs_disabled = false; |
4159 | 4169 | ||
4160 | ironlake_enable_display_irq(dev_priv, ~dev_priv->pm.regsave.deimr); | 4170 | ironlake_enable_display_irq(dev_priv, ~dev_priv->pm.regsave.deimr); |
4161 | ibx_enable_display_interrupt(dev_priv, ~dev_priv->pm.regsave.sdeimr); | 4171 | ibx_enable_display_interrupt(dev_priv, ~dev_priv->pm.regsave.sdeimr); |
4162 | ilk_enable_gt_irq(dev_priv, ~dev_priv->pm.regsave.gtimr); | 4172 | ilk_enable_gt_irq(dev_priv, ~dev_priv->pm.regsave.gtimr); |
4163 | snb_enable_pm_irq(dev_priv, ~dev_priv->pm.regsave.gen6_pmimr); | 4173 | snb_enable_pm_irq(dev_priv, ~dev_priv->pm.regsave.gen6_pmimr); |
4164 | I915_WRITE(GTIER, dev_priv->pm.regsave.gtier); | 4174 | I915_WRITE(GTIER, dev_priv->pm.regsave.gtier); |
4165 | 4175 | ||
4166 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 4176 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
4167 | } | 4177 | } |
4168 | 4178 |
drivers/gpu/drm/i915/i915_reg.h
1 | /* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. | 1 | /* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. |
2 | * All Rights Reserved. | 2 | * All Rights Reserved. |
3 | * | 3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the | 5 | * copy of this software and associated documentation files (the |
6 | * "Software"), to deal in the Software without restriction, including | 6 | * "Software"), to deal in the Software without restriction, including |
7 | * without limitation the rights to use, copy, modify, merge, publish, | 7 | * without limitation the rights to use, copy, modify, merge, publish, |
8 | * distribute, sub license, and/or sell copies of the Software, and to | 8 | * distribute, sub license, and/or sell copies of the Software, and to |
9 | * permit persons to whom the Software is furnished to do so, subject to | 9 | * permit persons to whom the Software is furnished to do so, subject to |
10 | * the following conditions: | 10 | * the following conditions: |
11 | * | 11 | * |
12 | * The above copyright notice and this permission notice (including the | 12 | * The above copyright notice and this permission notice (including the |
13 | * next paragraph) shall be included in all copies or substantial portions | 13 | * next paragraph) shall be included in all copies or substantial portions |
14 | * of the Software. | 14 | * of the Software. |
15 | * | 15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
17 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 17 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
18 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. | 18 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. |
19 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR | 19 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR |
20 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, | 20 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, |
21 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE | 21 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE |
22 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | 22 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #ifndef _I915_REG_H_ | 25 | #ifndef _I915_REG_H_ |
26 | #define _I915_REG_H_ | 26 | #define _I915_REG_H_ |
27 | 27 | ||
28 | #define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a))) | 28 | #define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a))) |
29 | #define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a))) | 29 | #define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a))) |
30 | 30 | ||
31 | #define _PORT(port, a, b) ((a) + (port)*((b)-(a))) | 31 | #define _PORT(port, a, b) ((a) + (port)*((b)-(a))) |
32 | 32 | ||
33 | #define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a)) | 33 | #define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a)) |
34 | #define _MASKED_BIT_DISABLE(a) ((a) << 16) | 34 | #define _MASKED_BIT_DISABLE(a) ((a) << 16) |
35 | 35 | ||
36 | /* PCI config space */ | 36 | /* PCI config space */ |
37 | 37 | ||
38 | #define HPLLCC 0xc0 /* 855 only */ | 38 | #define HPLLCC 0xc0 /* 855 only */ |
39 | #define GC_CLOCK_CONTROL_MASK (0xf << 0) | 39 | #define GC_CLOCK_CONTROL_MASK (0xf << 0) |
40 | #define GC_CLOCK_133_200 (0 << 0) | 40 | #define GC_CLOCK_133_200 (0 << 0) |
41 | #define GC_CLOCK_100_200 (1 << 0) | 41 | #define GC_CLOCK_100_200 (1 << 0) |
42 | #define GC_CLOCK_100_133 (2 << 0) | 42 | #define GC_CLOCK_100_133 (2 << 0) |
43 | #define GC_CLOCK_166_250 (3 << 0) | 43 | #define GC_CLOCK_166_250 (3 << 0) |
44 | #define GCFGC2 0xda | 44 | #define GCFGC2 0xda |
45 | #define GCFGC 0xf0 /* 915+ only */ | 45 | #define GCFGC 0xf0 /* 915+ only */ |
46 | #define GC_LOW_FREQUENCY_ENABLE (1 << 7) | 46 | #define GC_LOW_FREQUENCY_ENABLE (1 << 7) |
47 | #define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4) | 47 | #define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4) |
48 | #define GC_DISPLAY_CLOCK_333_MHZ (4 << 4) | 48 | #define GC_DISPLAY_CLOCK_333_MHZ (4 << 4) |
49 | #define GC_DISPLAY_CLOCK_267_MHZ_PNV (0 << 4) | 49 | #define GC_DISPLAY_CLOCK_267_MHZ_PNV (0 << 4) |
50 | #define GC_DISPLAY_CLOCK_333_MHZ_PNV (1 << 4) | 50 | #define GC_DISPLAY_CLOCK_333_MHZ_PNV (1 << 4) |
51 | #define GC_DISPLAY_CLOCK_444_MHZ_PNV (2 << 4) | 51 | #define GC_DISPLAY_CLOCK_444_MHZ_PNV (2 << 4) |
52 | #define GC_DISPLAY_CLOCK_200_MHZ_PNV (5 << 4) | 52 | #define GC_DISPLAY_CLOCK_200_MHZ_PNV (5 << 4) |
53 | #define GC_DISPLAY_CLOCK_133_MHZ_PNV (6 << 4) | 53 | #define GC_DISPLAY_CLOCK_133_MHZ_PNV (6 << 4) |
54 | #define GC_DISPLAY_CLOCK_167_MHZ_PNV (7 << 4) | 54 | #define GC_DISPLAY_CLOCK_167_MHZ_PNV (7 << 4) |
55 | #define GC_DISPLAY_CLOCK_MASK (7 << 4) | 55 | #define GC_DISPLAY_CLOCK_MASK (7 << 4) |
56 | #define GM45_GC_RENDER_CLOCK_MASK (0xf << 0) | 56 | #define GM45_GC_RENDER_CLOCK_MASK (0xf << 0) |
57 | #define GM45_GC_RENDER_CLOCK_266_MHZ (8 << 0) | 57 | #define GM45_GC_RENDER_CLOCK_266_MHZ (8 << 0) |
58 | #define GM45_GC_RENDER_CLOCK_320_MHZ (9 << 0) | 58 | #define GM45_GC_RENDER_CLOCK_320_MHZ (9 << 0) |
59 | #define GM45_GC_RENDER_CLOCK_400_MHZ (0xb << 0) | 59 | #define GM45_GC_RENDER_CLOCK_400_MHZ (0xb << 0) |
60 | #define GM45_GC_RENDER_CLOCK_533_MHZ (0xc << 0) | 60 | #define GM45_GC_RENDER_CLOCK_533_MHZ (0xc << 0) |
61 | #define I965_GC_RENDER_CLOCK_MASK (0xf << 0) | 61 | #define I965_GC_RENDER_CLOCK_MASK (0xf << 0) |
62 | #define I965_GC_RENDER_CLOCK_267_MHZ (2 << 0) | 62 | #define I965_GC_RENDER_CLOCK_267_MHZ (2 << 0) |
63 | #define I965_GC_RENDER_CLOCK_333_MHZ (3 << 0) | 63 | #define I965_GC_RENDER_CLOCK_333_MHZ (3 << 0) |
64 | #define I965_GC_RENDER_CLOCK_444_MHZ (4 << 0) | 64 | #define I965_GC_RENDER_CLOCK_444_MHZ (4 << 0) |
65 | #define I965_GC_RENDER_CLOCK_533_MHZ (5 << 0) | 65 | #define I965_GC_RENDER_CLOCK_533_MHZ (5 << 0) |
66 | #define I945_GC_RENDER_CLOCK_MASK (7 << 0) | 66 | #define I945_GC_RENDER_CLOCK_MASK (7 << 0) |
67 | #define I945_GC_RENDER_CLOCK_166_MHZ (0 << 0) | 67 | #define I945_GC_RENDER_CLOCK_166_MHZ (0 << 0) |
68 | #define I945_GC_RENDER_CLOCK_200_MHZ (1 << 0) | 68 | #define I945_GC_RENDER_CLOCK_200_MHZ (1 << 0) |
69 | #define I945_GC_RENDER_CLOCK_250_MHZ (3 << 0) | 69 | #define I945_GC_RENDER_CLOCK_250_MHZ (3 << 0) |
70 | #define I945_GC_RENDER_CLOCK_400_MHZ (5 << 0) | 70 | #define I945_GC_RENDER_CLOCK_400_MHZ (5 << 0) |
71 | #define I915_GC_RENDER_CLOCK_MASK (7 << 0) | 71 | #define I915_GC_RENDER_CLOCK_MASK (7 << 0) |
72 | #define I915_GC_RENDER_CLOCK_166_MHZ (0 << 0) | 72 | #define I915_GC_RENDER_CLOCK_166_MHZ (0 << 0) |
73 | #define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0) | 73 | #define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0) |
74 | #define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0) | 74 | #define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0) |
75 | #define PCI_LBPC 0xf4 /* legacy/combination backlight modes, also called LBB */ | 75 | #define PCI_LBPC 0xf4 /* legacy/combination backlight modes, also called LBB */ |
76 | 76 | ||
77 | 77 | ||
78 | /* Graphics reset regs */ | 78 | /* Graphics reset regs */ |
79 | #define I965_GDRST 0xc0 /* PCI config register */ | 79 | #define I965_GDRST 0xc0 /* PCI config register */ |
80 | #define ILK_GDSR 0x2ca4 /* MCHBAR offset */ | 80 | #define ILK_GDSR 0x2ca4 /* MCHBAR offset */ |
81 | #define GRDOM_FULL (0<<2) | 81 | #define GRDOM_FULL (0<<2) |
82 | #define GRDOM_RENDER (1<<2) | 82 | #define GRDOM_RENDER (1<<2) |
83 | #define GRDOM_MEDIA (3<<2) | 83 | #define GRDOM_MEDIA (3<<2) |
84 | #define GRDOM_MASK (3<<2) | 84 | #define GRDOM_MASK (3<<2) |
85 | #define GRDOM_RESET_ENABLE (1<<0) | 85 | #define GRDOM_RESET_ENABLE (1<<0) |
86 | 86 | ||
87 | #define GEN6_MBCUNIT_SNPCR 0x900c /* for LLC config */ | 87 | #define GEN6_MBCUNIT_SNPCR 0x900c /* for LLC config */ |
88 | #define GEN6_MBC_SNPCR_SHIFT 21 | 88 | #define GEN6_MBC_SNPCR_SHIFT 21 |
89 | #define GEN6_MBC_SNPCR_MASK (3<<21) | 89 | #define GEN6_MBC_SNPCR_MASK (3<<21) |
90 | #define GEN6_MBC_SNPCR_MAX (0<<21) | 90 | #define GEN6_MBC_SNPCR_MAX (0<<21) |
91 | #define GEN6_MBC_SNPCR_MED (1<<21) | 91 | #define GEN6_MBC_SNPCR_MED (1<<21) |
92 | #define GEN6_MBC_SNPCR_LOW (2<<21) | 92 | #define GEN6_MBC_SNPCR_LOW (2<<21) |
93 | #define GEN6_MBC_SNPCR_MIN (3<<21) /* only 1/16th of the cache is shared */ | 93 | #define GEN6_MBC_SNPCR_MIN (3<<21) /* only 1/16th of the cache is shared */ |
94 | 94 | ||
95 | #define GEN6_MBCTL 0x0907c | 95 | #define GEN6_MBCTL 0x0907c |
96 | #define GEN6_MBCTL_ENABLE_BOOT_FETCH (1 << 4) | 96 | #define GEN6_MBCTL_ENABLE_BOOT_FETCH (1 << 4) |
97 | #define GEN6_MBCTL_CTX_FETCH_NEEDED (1 << 3) | 97 | #define GEN6_MBCTL_CTX_FETCH_NEEDED (1 << 3) |
98 | #define GEN6_MBCTL_BME_UPDATE_ENABLE (1 << 2) | 98 | #define GEN6_MBCTL_BME_UPDATE_ENABLE (1 << 2) |
99 | #define GEN6_MBCTL_MAE_UPDATE_ENABLE (1 << 1) | 99 | #define GEN6_MBCTL_MAE_UPDATE_ENABLE (1 << 1) |
100 | #define GEN6_MBCTL_BOOT_FETCH_MECH (1 << 0) | 100 | #define GEN6_MBCTL_BOOT_FETCH_MECH (1 << 0) |
101 | 101 | ||
102 | #define GEN6_GDRST 0x941c | 102 | #define GEN6_GDRST 0x941c |
103 | #define GEN6_GRDOM_FULL (1 << 0) | 103 | #define GEN6_GRDOM_FULL (1 << 0) |
104 | #define GEN6_GRDOM_RENDER (1 << 1) | 104 | #define GEN6_GRDOM_RENDER (1 << 1) |
105 | #define GEN6_GRDOM_MEDIA (1 << 2) | 105 | #define GEN6_GRDOM_MEDIA (1 << 2) |
106 | #define GEN6_GRDOM_BLT (1 << 3) | 106 | #define GEN6_GRDOM_BLT (1 << 3) |
107 | 107 | ||
108 | #define RING_PP_DIR_BASE(ring) ((ring)->mmio_base+0x228) | 108 | #define RING_PP_DIR_BASE(ring) ((ring)->mmio_base+0x228) |
109 | #define RING_PP_DIR_BASE_READ(ring) ((ring)->mmio_base+0x518) | 109 | #define RING_PP_DIR_BASE_READ(ring) ((ring)->mmio_base+0x518) |
110 | #define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220) | 110 | #define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220) |
111 | #define PP_DIR_DCLV_2G 0xffffffff | 111 | #define PP_DIR_DCLV_2G 0xffffffff |
112 | 112 | ||
113 | #define GEN8_RING_PDP_UDW(ring, n) ((ring)->mmio_base+0x270 + ((n) * 8 + 4)) | 113 | #define GEN8_RING_PDP_UDW(ring, n) ((ring)->mmio_base+0x270 + ((n) * 8 + 4)) |
114 | #define GEN8_RING_PDP_LDW(ring, n) ((ring)->mmio_base+0x270 + (n) * 8) | 114 | #define GEN8_RING_PDP_LDW(ring, n) ((ring)->mmio_base+0x270 + (n) * 8) |
115 | 115 | ||
116 | #define GAM_ECOCHK 0x4090 | 116 | #define GAM_ECOCHK 0x4090 |
117 | #define ECOCHK_SNB_BIT (1<<10) | 117 | #define ECOCHK_SNB_BIT (1<<10) |
118 | #define HSW_ECOCHK_ARB_PRIO_SOL (1<<6) | 118 | #define HSW_ECOCHK_ARB_PRIO_SOL (1<<6) |
119 | #define ECOCHK_PPGTT_CACHE64B (0x3<<3) | 119 | #define ECOCHK_PPGTT_CACHE64B (0x3<<3) |
120 | #define ECOCHK_PPGTT_CACHE4B (0x0<<3) | 120 | #define ECOCHK_PPGTT_CACHE4B (0x0<<3) |
121 | #define ECOCHK_PPGTT_GFDT_IVB (0x1<<4) | 121 | #define ECOCHK_PPGTT_GFDT_IVB (0x1<<4) |
122 | #define ECOCHK_PPGTT_LLC_IVB (0x1<<3) | 122 | #define ECOCHK_PPGTT_LLC_IVB (0x1<<3) |
123 | #define ECOCHK_PPGTT_UC_HSW (0x1<<3) | 123 | #define ECOCHK_PPGTT_UC_HSW (0x1<<3) |
124 | #define ECOCHK_PPGTT_WT_HSW (0x2<<3) | 124 | #define ECOCHK_PPGTT_WT_HSW (0x2<<3) |
125 | #define ECOCHK_PPGTT_WB_HSW (0x3<<3) | 125 | #define ECOCHK_PPGTT_WB_HSW (0x3<<3) |
126 | 126 | ||
127 | #define GAC_ECO_BITS 0x14090 | 127 | #define GAC_ECO_BITS 0x14090 |
128 | #define ECOBITS_SNB_BIT (1<<13) | 128 | #define ECOBITS_SNB_BIT (1<<13) |
129 | #define ECOBITS_PPGTT_CACHE64B (3<<8) | 129 | #define ECOBITS_PPGTT_CACHE64B (3<<8) |
130 | #define ECOBITS_PPGTT_CACHE4B (0<<8) | 130 | #define ECOBITS_PPGTT_CACHE4B (0<<8) |
131 | 131 | ||
132 | #define GAB_CTL 0x24000 | 132 | #define GAB_CTL 0x24000 |
133 | #define GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8) | 133 | #define GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8) |
134 | 134 | ||
135 | /* VGA stuff */ | 135 | /* VGA stuff */ |
136 | 136 | ||
137 | #define VGA_ST01_MDA 0x3ba | 137 | #define VGA_ST01_MDA 0x3ba |
138 | #define VGA_ST01_CGA 0x3da | 138 | #define VGA_ST01_CGA 0x3da |
139 | 139 | ||
140 | #define VGA_MSR_WRITE 0x3c2 | 140 | #define VGA_MSR_WRITE 0x3c2 |
141 | #define VGA_MSR_READ 0x3cc | 141 | #define VGA_MSR_READ 0x3cc |
142 | #define VGA_MSR_MEM_EN (1<<1) | 142 | #define VGA_MSR_MEM_EN (1<<1) |
143 | #define VGA_MSR_CGA_MODE (1<<0) | 143 | #define VGA_MSR_CGA_MODE (1<<0) |
144 | 144 | ||
145 | #define VGA_SR_INDEX 0x3c4 | 145 | #define VGA_SR_INDEX 0x3c4 |
146 | #define SR01 1 | 146 | #define SR01 1 |
147 | #define VGA_SR_DATA 0x3c5 | 147 | #define VGA_SR_DATA 0x3c5 |
148 | 148 | ||
149 | #define VGA_AR_INDEX 0x3c0 | 149 | #define VGA_AR_INDEX 0x3c0 |
150 | #define VGA_AR_VID_EN (1<<5) | 150 | #define VGA_AR_VID_EN (1<<5) |
151 | #define VGA_AR_DATA_WRITE 0x3c0 | 151 | #define VGA_AR_DATA_WRITE 0x3c0 |
152 | #define VGA_AR_DATA_READ 0x3c1 | 152 | #define VGA_AR_DATA_READ 0x3c1 |
153 | 153 | ||
154 | #define VGA_GR_INDEX 0x3ce | 154 | #define VGA_GR_INDEX 0x3ce |
155 | #define VGA_GR_DATA 0x3cf | 155 | #define VGA_GR_DATA 0x3cf |
156 | /* GR05 */ | 156 | /* GR05 */ |
157 | #define VGA_GR_MEM_READ_MODE_SHIFT 3 | 157 | #define VGA_GR_MEM_READ_MODE_SHIFT 3 |
158 | #define VGA_GR_MEM_READ_MODE_PLANE 1 | 158 | #define VGA_GR_MEM_READ_MODE_PLANE 1 |
159 | /* GR06 */ | 159 | /* GR06 */ |
160 | #define VGA_GR_MEM_MODE_MASK 0xc | 160 | #define VGA_GR_MEM_MODE_MASK 0xc |
161 | #define VGA_GR_MEM_MODE_SHIFT 2 | 161 | #define VGA_GR_MEM_MODE_SHIFT 2 |
162 | #define VGA_GR_MEM_A0000_AFFFF 0 | 162 | #define VGA_GR_MEM_A0000_AFFFF 0 |
163 | #define VGA_GR_MEM_A0000_BFFFF 1 | 163 | #define VGA_GR_MEM_A0000_BFFFF 1 |
164 | #define VGA_GR_MEM_B0000_B7FFF 2 | 164 | #define VGA_GR_MEM_B0000_B7FFF 2 |
165 | #define VGA_GR_MEM_B0000_BFFFF 3 | 165 | #define VGA_GR_MEM_B0000_BFFFF 3 |
166 | 166 | ||
167 | #define VGA_DACMASK 0x3c6 | 167 | #define VGA_DACMASK 0x3c6 |
168 | #define VGA_DACRX 0x3c7 | 168 | #define VGA_DACRX 0x3c7 |
169 | #define VGA_DACWX 0x3c8 | 169 | #define VGA_DACWX 0x3c8 |
170 | #define VGA_DACDATA 0x3c9 | 170 | #define VGA_DACDATA 0x3c9 |
171 | 171 | ||
172 | #define VGA_CR_INDEX_MDA 0x3b4 | 172 | #define VGA_CR_INDEX_MDA 0x3b4 |
173 | #define VGA_CR_DATA_MDA 0x3b5 | 173 | #define VGA_CR_DATA_MDA 0x3b5 |
174 | #define VGA_CR_INDEX_CGA 0x3d4 | 174 | #define VGA_CR_INDEX_CGA 0x3d4 |
175 | #define VGA_CR_DATA_CGA 0x3d5 | 175 | #define VGA_CR_DATA_CGA 0x3d5 |
176 | 176 | ||
177 | /* | 177 | /* |
178 | * Instruction field definitions used by the command parser | 178 | * Instruction field definitions used by the command parser |
179 | */ | 179 | */ |
180 | #define INSTR_CLIENT_SHIFT 29 | 180 | #define INSTR_CLIENT_SHIFT 29 |
181 | #define INSTR_CLIENT_MASK 0xE0000000 | 181 | #define INSTR_CLIENT_MASK 0xE0000000 |
182 | #define INSTR_MI_CLIENT 0x0 | 182 | #define INSTR_MI_CLIENT 0x0 |
183 | #define INSTR_BC_CLIENT 0x2 | 183 | #define INSTR_BC_CLIENT 0x2 |
184 | #define INSTR_RC_CLIENT 0x3 | 184 | #define INSTR_RC_CLIENT 0x3 |
185 | #define INSTR_SUBCLIENT_SHIFT 27 | 185 | #define INSTR_SUBCLIENT_SHIFT 27 |
186 | #define INSTR_SUBCLIENT_MASK 0x18000000 | 186 | #define INSTR_SUBCLIENT_MASK 0x18000000 |
187 | #define INSTR_MEDIA_SUBCLIENT 0x2 | 187 | #define INSTR_MEDIA_SUBCLIENT 0x2 |
188 | 188 | ||
189 | /* | 189 | /* |
190 | * Memory interface instructions used by the kernel | 190 | * Memory interface instructions used by the kernel |
191 | */ | 191 | */ |
192 | #define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags)) | 192 | #define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags)) |
193 | 193 | ||
194 | #define MI_NOOP MI_INSTR(0, 0) | 194 | #define MI_NOOP MI_INSTR(0, 0) |
195 | #define MI_USER_INTERRUPT MI_INSTR(0x02, 0) | 195 | #define MI_USER_INTERRUPT MI_INSTR(0x02, 0) |
196 | #define MI_WAIT_FOR_EVENT MI_INSTR(0x03, 0) | 196 | #define MI_WAIT_FOR_EVENT MI_INSTR(0x03, 0) |
197 | #define MI_WAIT_FOR_OVERLAY_FLIP (1<<16) | 197 | #define MI_WAIT_FOR_OVERLAY_FLIP (1<<16) |
198 | #define MI_WAIT_FOR_PLANE_B_FLIP (1<<6) | 198 | #define MI_WAIT_FOR_PLANE_B_FLIP (1<<6) |
199 | #define MI_WAIT_FOR_PLANE_A_FLIP (1<<2) | 199 | #define MI_WAIT_FOR_PLANE_A_FLIP (1<<2) |
200 | #define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1) | 200 | #define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1) |
201 | #define MI_FLUSH MI_INSTR(0x04, 0) | 201 | #define MI_FLUSH MI_INSTR(0x04, 0) |
202 | #define MI_READ_FLUSH (1 << 0) | 202 | #define MI_READ_FLUSH (1 << 0) |
203 | #define MI_EXE_FLUSH (1 << 1) | 203 | #define MI_EXE_FLUSH (1 << 1) |
204 | #define MI_NO_WRITE_FLUSH (1 << 2) | 204 | #define MI_NO_WRITE_FLUSH (1 << 2) |
205 | #define MI_SCENE_COUNT (1 << 3) /* just increment scene count */ | 205 | #define MI_SCENE_COUNT (1 << 3) /* just increment scene count */ |
206 | #define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */ | 206 | #define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */ |
207 | #define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */ | 207 | #define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */ |
208 | #define MI_REPORT_HEAD MI_INSTR(0x07, 0) | 208 | #define MI_REPORT_HEAD MI_INSTR(0x07, 0) |
209 | #define MI_ARB_ON_OFF MI_INSTR(0x08, 0) | 209 | #define MI_ARB_ON_OFF MI_INSTR(0x08, 0) |
210 | #define MI_ARB_ENABLE (1<<0) | 210 | #define MI_ARB_ENABLE (1<<0) |
211 | #define MI_ARB_DISABLE (0<<0) | 211 | #define MI_ARB_DISABLE (0<<0) |
212 | #define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0) | 212 | #define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0) |
213 | #define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0) | 213 | #define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0) |
214 | #define MI_SUSPEND_FLUSH_EN (1<<0) | 214 | #define MI_SUSPEND_FLUSH_EN (1<<0) |
215 | #define MI_OVERLAY_FLIP MI_INSTR(0x11, 0) | 215 | #define MI_OVERLAY_FLIP MI_INSTR(0x11, 0) |
216 | #define MI_OVERLAY_CONTINUE (0x0<<21) | 216 | #define MI_OVERLAY_CONTINUE (0x0<<21) |
217 | #define MI_OVERLAY_ON (0x1<<21) | 217 | #define MI_OVERLAY_ON (0x1<<21) |
218 | #define MI_OVERLAY_OFF (0x2<<21) | 218 | #define MI_OVERLAY_OFF (0x2<<21) |
219 | #define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0) | 219 | #define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0) |
220 | #define MI_DISPLAY_FLIP MI_INSTR(0x14, 2) | 220 | #define MI_DISPLAY_FLIP MI_INSTR(0x14, 2) |
221 | #define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1) | 221 | #define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1) |
222 | #define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20) | 222 | #define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20) |
223 | /* IVB has funny definitions for which plane to flip. */ | 223 | /* IVB has funny definitions for which plane to flip. */ |
224 | #define MI_DISPLAY_FLIP_IVB_PLANE_A (0 << 19) | 224 | #define MI_DISPLAY_FLIP_IVB_PLANE_A (0 << 19) |
225 | #define MI_DISPLAY_FLIP_IVB_PLANE_B (1 << 19) | 225 | #define MI_DISPLAY_FLIP_IVB_PLANE_B (1 << 19) |
226 | #define MI_DISPLAY_FLIP_IVB_SPRITE_A (2 << 19) | 226 | #define MI_DISPLAY_FLIP_IVB_SPRITE_A (2 << 19) |
227 | #define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19) | 227 | #define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19) |
228 | #define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19) | 228 | #define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19) |
229 | #define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19) | 229 | #define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19) |
230 | #define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */ | 230 | #define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */ |
231 | #define MI_SEMAPHORE_GLOBAL_GTT (1<<22) | 231 | #define MI_SEMAPHORE_GLOBAL_GTT (1<<22) |
232 | #define MI_SEMAPHORE_UPDATE (1<<21) | 232 | #define MI_SEMAPHORE_UPDATE (1<<21) |
233 | #define MI_SEMAPHORE_COMPARE (1<<20) | 233 | #define MI_SEMAPHORE_COMPARE (1<<20) |
234 | #define MI_SEMAPHORE_REGISTER (1<<18) | 234 | #define MI_SEMAPHORE_REGISTER (1<<18) |
235 | #define MI_SEMAPHORE_SYNC_VR (0<<16) /* RCS wait for VCS (RVSYNC) */ | 235 | #define MI_SEMAPHORE_SYNC_VR (0<<16) /* RCS wait for VCS (RVSYNC) */ |
236 | #define MI_SEMAPHORE_SYNC_VER (1<<16) /* RCS wait for VECS (RVESYNC) */ | 236 | #define MI_SEMAPHORE_SYNC_VER (1<<16) /* RCS wait for VECS (RVESYNC) */ |
237 | #define MI_SEMAPHORE_SYNC_BR (2<<16) /* RCS wait for BCS (RBSYNC) */ | 237 | #define MI_SEMAPHORE_SYNC_BR (2<<16) /* RCS wait for BCS (RBSYNC) */ |
238 | #define MI_SEMAPHORE_SYNC_BV (0<<16) /* VCS wait for BCS (VBSYNC) */ | 238 | #define MI_SEMAPHORE_SYNC_BV (0<<16) /* VCS wait for BCS (VBSYNC) */ |
239 | #define MI_SEMAPHORE_SYNC_VEV (1<<16) /* VCS wait for VECS (VVESYNC) */ | 239 | #define MI_SEMAPHORE_SYNC_VEV (1<<16) /* VCS wait for VECS (VVESYNC) */ |
240 | #define MI_SEMAPHORE_SYNC_RV (2<<16) /* VCS wait for RCS (VRSYNC) */ | 240 | #define MI_SEMAPHORE_SYNC_RV (2<<16) /* VCS wait for RCS (VRSYNC) */ |
241 | #define MI_SEMAPHORE_SYNC_RB (0<<16) /* BCS wait for RCS (BRSYNC) */ | 241 | #define MI_SEMAPHORE_SYNC_RB (0<<16) /* BCS wait for RCS (BRSYNC) */ |
242 | #define MI_SEMAPHORE_SYNC_VEB (1<<16) /* BCS wait for VECS (BVESYNC) */ | 242 | #define MI_SEMAPHORE_SYNC_VEB (1<<16) /* BCS wait for VECS (BVESYNC) */ |
243 | #define MI_SEMAPHORE_SYNC_VB (2<<16) /* BCS wait for VCS (BVSYNC) */ | 243 | #define MI_SEMAPHORE_SYNC_VB (2<<16) /* BCS wait for VCS (BVSYNC) */ |
244 | #define MI_SEMAPHORE_SYNC_BVE (0<<16) /* VECS wait for BCS (VEBSYNC) */ | 244 | #define MI_SEMAPHORE_SYNC_BVE (0<<16) /* VECS wait for BCS (VEBSYNC) */ |
245 | #define MI_SEMAPHORE_SYNC_VVE (1<<16) /* VECS wait for VCS (VEVSYNC) */ | 245 | #define MI_SEMAPHORE_SYNC_VVE (1<<16) /* VECS wait for VCS (VEVSYNC) */ |
246 | #define MI_SEMAPHORE_SYNC_RVE (2<<16) /* VECS wait for RCS (VERSYNC) */ | 246 | #define MI_SEMAPHORE_SYNC_RVE (2<<16) /* VECS wait for RCS (VERSYNC) */ |
247 | #define MI_SEMAPHORE_SYNC_INVALID (3<<16) | 247 | #define MI_SEMAPHORE_SYNC_INVALID (3<<16) |
248 | #define MI_SET_CONTEXT MI_INSTR(0x18, 0) | 248 | #define MI_SET_CONTEXT MI_INSTR(0x18, 0) |
249 | #define MI_MM_SPACE_GTT (1<<8) | 249 | #define MI_MM_SPACE_GTT (1<<8) |
250 | #define MI_MM_SPACE_PHYSICAL (0<<8) | 250 | #define MI_MM_SPACE_PHYSICAL (0<<8) |
251 | #define MI_SAVE_EXT_STATE_EN (1<<3) | 251 | #define MI_SAVE_EXT_STATE_EN (1<<3) |
252 | #define MI_RESTORE_EXT_STATE_EN (1<<2) | 252 | #define MI_RESTORE_EXT_STATE_EN (1<<2) |
253 | #define MI_FORCE_RESTORE (1<<1) | 253 | #define MI_FORCE_RESTORE (1<<1) |
254 | #define MI_RESTORE_INHIBIT (1<<0) | 254 | #define MI_RESTORE_INHIBIT (1<<0) |
255 | #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) | 255 | #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) |
256 | #define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ | 256 | #define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ |
257 | #define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) | 257 | #define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) |
258 | #define MI_STORE_DWORD_INDEX_SHIFT 2 | 258 | #define MI_STORE_DWORD_INDEX_SHIFT 2 |
259 | /* Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM: | 259 | /* Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM: |
260 | * - Always issue a MI_NOOP _before_ the MI_LOAD_REGISTER_IMM - otherwise hw | 260 | * - Always issue a MI_NOOP _before_ the MI_LOAD_REGISTER_IMM - otherwise hw |
261 | * simply ignores the register load under certain conditions. | 261 | * simply ignores the register load under certain conditions. |
262 | * - One can actually load arbitrary many arbitrary registers: Simply issue x | 262 | * - One can actually load arbitrary many arbitrary registers: Simply issue x |
263 | * address/value pairs. Don't overdue it, though, x <= 2^4 must hold! | 263 | * address/value pairs. Don't overdue it, though, x <= 2^4 must hold! |
264 | */ | 264 | */ |
265 | #define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) | 265 | #define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) |
266 | #define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1) | 266 | #define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1) |
267 | #define MI_SRM_LRM_GLOBAL_GTT (1<<22) | 267 | #define MI_SRM_LRM_GLOBAL_GTT (1<<22) |
268 | #define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ | 268 | #define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ |
269 | #define MI_FLUSH_DW_STORE_INDEX (1<<21) | 269 | #define MI_FLUSH_DW_STORE_INDEX (1<<21) |
270 | #define MI_INVALIDATE_TLB (1<<18) | 270 | #define MI_INVALIDATE_TLB (1<<18) |
271 | #define MI_FLUSH_DW_OP_STOREDW (1<<14) | 271 | #define MI_FLUSH_DW_OP_STOREDW (1<<14) |
272 | #define MI_INVALIDATE_BSD (1<<7) | 272 | #define MI_INVALIDATE_BSD (1<<7) |
273 | #define MI_FLUSH_DW_USE_GTT (1<<2) | 273 | #define MI_FLUSH_DW_USE_GTT (1<<2) |
274 | #define MI_FLUSH_DW_USE_PPGTT (0<<2) | 274 | #define MI_FLUSH_DW_USE_PPGTT (0<<2) |
275 | #define MI_BATCH_BUFFER MI_INSTR(0x30, 1) | 275 | #define MI_BATCH_BUFFER MI_INSTR(0x30, 1) |
276 | #define MI_BATCH_NON_SECURE (1) | 276 | #define MI_BATCH_NON_SECURE (1) |
277 | /* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */ | 277 | /* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */ |
278 | #define MI_BATCH_NON_SECURE_I965 (1<<8) | 278 | #define MI_BATCH_NON_SECURE_I965 (1<<8) |
279 | #define MI_BATCH_PPGTT_HSW (1<<8) | 279 | #define MI_BATCH_PPGTT_HSW (1<<8) |
280 | #define MI_BATCH_NON_SECURE_HSW (1<<13) | 280 | #define MI_BATCH_NON_SECURE_HSW (1<<13) |
281 | #define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) | 281 | #define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) |
282 | #define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */ | 282 | #define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */ |
283 | #define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1) | 283 | #define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1) |
284 | 284 | ||
285 | 285 | ||
286 | #define MI_PREDICATE_RESULT_2 (0x2214) | 286 | #define MI_PREDICATE_RESULT_2 (0x2214) |
287 | #define LOWER_SLICE_ENABLED (1<<0) | 287 | #define LOWER_SLICE_ENABLED (1<<0) |
288 | #define LOWER_SLICE_DISABLED (0<<0) | 288 | #define LOWER_SLICE_DISABLED (0<<0) |
289 | 289 | ||
290 | /* | 290 | /* |
291 | * 3D instructions used by the kernel | 291 | * 3D instructions used by the kernel |
292 | */ | 292 | */ |
293 | #define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags)) | 293 | #define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags)) |
294 | 294 | ||
295 | #define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24)) | 295 | #define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24)) |
296 | #define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19)) | 296 | #define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19)) |
297 | #define SC_UPDATE_SCISSOR (0x1<<1) | 297 | #define SC_UPDATE_SCISSOR (0x1<<1) |
298 | #define SC_ENABLE_MASK (0x1<<0) | 298 | #define SC_ENABLE_MASK (0x1<<0) |
299 | #define SC_ENABLE (0x1<<0) | 299 | #define SC_ENABLE (0x1<<0) |
300 | #define GFX_OP_LOAD_INDIRECT ((0x3<<29)|(0x1d<<24)|(0x7<<16)) | 300 | #define GFX_OP_LOAD_INDIRECT ((0x3<<29)|(0x1d<<24)|(0x7<<16)) |
301 | #define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1)) | 301 | #define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1)) |
302 | #define SCI_YMIN_MASK (0xffff<<16) | 302 | #define SCI_YMIN_MASK (0xffff<<16) |
303 | #define SCI_XMIN_MASK (0xffff<<0) | 303 | #define SCI_XMIN_MASK (0xffff<<0) |
304 | #define SCI_YMAX_MASK (0xffff<<16) | 304 | #define SCI_YMAX_MASK (0xffff<<16) |
305 | #define SCI_XMAX_MASK (0xffff<<0) | 305 | #define SCI_XMAX_MASK (0xffff<<0) |
306 | #define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19)) | 306 | #define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19)) |
307 | #define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1) | 307 | #define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1) |
308 | #define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0) | 308 | #define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0) |
309 | #define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16)) | 309 | #define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16)) |
310 | #define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4) | 310 | #define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4) |
311 | #define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0) | 311 | #define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0) |
312 | #define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) | 312 | #define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) |
313 | #define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3)) | 313 | #define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3)) |
314 | #define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2) | 314 | #define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2) |
315 | #define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4) | 315 | #define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4) |
316 | #define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6) | 316 | #define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6) |
317 | #define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5) | 317 | #define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5) |
318 | #define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21) | 318 | #define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21) |
319 | #define XY_SRC_COPY_BLT_WRITE_RGB (1<<20) | 319 | #define XY_SRC_COPY_BLT_WRITE_RGB (1<<20) |
320 | #define BLT_DEPTH_8 (0<<24) | 320 | #define BLT_DEPTH_8 (0<<24) |
321 | #define BLT_DEPTH_16_565 (1<<24) | 321 | #define BLT_DEPTH_16_565 (1<<24) |
322 | #define BLT_DEPTH_16_1555 (2<<24) | 322 | #define BLT_DEPTH_16_1555 (2<<24) |
323 | #define BLT_DEPTH_32 (3<<24) | 323 | #define BLT_DEPTH_32 (3<<24) |
324 | #define BLT_ROP_GXCOPY (0xcc<<16) | 324 | #define BLT_ROP_GXCOPY (0xcc<<16) |
325 | #define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */ | 325 | #define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */ |
326 | #define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */ | 326 | #define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */ |
327 | #define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2) | 327 | #define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2) |
328 | #define ASYNC_FLIP (1<<22) | 328 | #define ASYNC_FLIP (1<<22) |
329 | #define DISPLAY_PLANE_A (0<<20) | 329 | #define DISPLAY_PLANE_A (0<<20) |
330 | #define DISPLAY_PLANE_B (1<<20) | 330 | #define DISPLAY_PLANE_B (1<<20) |
331 | #define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2)) | 331 | #define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2)) |
332 | #define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */ | 332 | #define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */ |
333 | #define PIPE_CONTROL_CS_STALL (1<<20) | 333 | #define PIPE_CONTROL_CS_STALL (1<<20) |
334 | #define PIPE_CONTROL_TLB_INVALIDATE (1<<18) | 334 | #define PIPE_CONTROL_TLB_INVALIDATE (1<<18) |
335 | #define PIPE_CONTROL_QW_WRITE (1<<14) | 335 | #define PIPE_CONTROL_QW_WRITE (1<<14) |
336 | #define PIPE_CONTROL_DEPTH_STALL (1<<13) | 336 | #define PIPE_CONTROL_DEPTH_STALL (1<<13) |
337 | #define PIPE_CONTROL_WRITE_FLUSH (1<<12) | 337 | #define PIPE_CONTROL_WRITE_FLUSH (1<<12) |
338 | #define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12) /* gen6+ */ | 338 | #define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12) /* gen6+ */ |
339 | #define PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE (1<<11) /* MBZ on Ironlake */ | 339 | #define PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE (1<<11) /* MBZ on Ironlake */ |
340 | #define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */ | 340 | #define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */ |
341 | #define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9) | 341 | #define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9) |
342 | #define PIPE_CONTROL_NOTIFY (1<<8) | 342 | #define PIPE_CONTROL_NOTIFY (1<<8) |
343 | #define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4) | 343 | #define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4) |
344 | #define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3) | 344 | #define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3) |
345 | #define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2) | 345 | #define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2) |
346 | #define PIPE_CONTROL_STALL_AT_SCOREBOARD (1<<1) | 346 | #define PIPE_CONTROL_STALL_AT_SCOREBOARD (1<<1) |
347 | #define PIPE_CONTROL_DEPTH_CACHE_FLUSH (1<<0) | 347 | #define PIPE_CONTROL_DEPTH_CACHE_FLUSH (1<<0) |
348 | #define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */ | 348 | #define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */ |
349 | 349 | ||
350 | 350 | ||
351 | /* | 351 | /* |
352 | * Reset registers | 352 | * Reset registers |
353 | */ | 353 | */ |
354 | #define DEBUG_RESET_I830 0x6070 | 354 | #define DEBUG_RESET_I830 0x6070 |
355 | #define DEBUG_RESET_FULL (1<<7) | 355 | #define DEBUG_RESET_FULL (1<<7) |
356 | #define DEBUG_RESET_RENDER (1<<8) | 356 | #define DEBUG_RESET_RENDER (1<<8) |
357 | #define DEBUG_RESET_DISPLAY (1<<9) | 357 | #define DEBUG_RESET_DISPLAY (1<<9) |
358 | 358 | ||
359 | /* | 359 | /* |
360 | * IOSF sideband | 360 | * IOSF sideband |
361 | */ | 361 | */ |
362 | #define VLV_IOSF_DOORBELL_REQ (VLV_DISPLAY_BASE + 0x2100) | 362 | #define VLV_IOSF_DOORBELL_REQ (VLV_DISPLAY_BASE + 0x2100) |
363 | #define IOSF_DEVFN_SHIFT 24 | 363 | #define IOSF_DEVFN_SHIFT 24 |
364 | #define IOSF_OPCODE_SHIFT 16 | 364 | #define IOSF_OPCODE_SHIFT 16 |
365 | #define IOSF_PORT_SHIFT 8 | 365 | #define IOSF_PORT_SHIFT 8 |
366 | #define IOSF_BYTE_ENABLES_SHIFT 4 | 366 | #define IOSF_BYTE_ENABLES_SHIFT 4 |
367 | #define IOSF_BAR_SHIFT 1 | 367 | #define IOSF_BAR_SHIFT 1 |
368 | #define IOSF_SB_BUSY (1<<0) | 368 | #define IOSF_SB_BUSY (1<<0) |
369 | #define IOSF_PORT_BUNIT 0x3 | 369 | #define IOSF_PORT_BUNIT 0x3 |
370 | #define IOSF_PORT_PUNIT 0x4 | 370 | #define IOSF_PORT_PUNIT 0x4 |
371 | #define IOSF_PORT_NC 0x11 | 371 | #define IOSF_PORT_NC 0x11 |
372 | #define IOSF_PORT_DPIO 0x12 | 372 | #define IOSF_PORT_DPIO 0x12 |
373 | #define IOSF_PORT_GPIO_NC 0x13 | 373 | #define IOSF_PORT_GPIO_NC 0x13 |
374 | #define IOSF_PORT_CCK 0x14 | 374 | #define IOSF_PORT_CCK 0x14 |
375 | #define IOSF_PORT_CCU 0xA9 | 375 | #define IOSF_PORT_CCU 0xA9 |
376 | #define IOSF_PORT_GPS_CORE 0x48 | 376 | #define IOSF_PORT_GPS_CORE 0x48 |
377 | #define IOSF_PORT_FLISDSI 0x1B | 377 | #define IOSF_PORT_FLISDSI 0x1B |
378 | #define VLV_IOSF_DATA (VLV_DISPLAY_BASE + 0x2104) | 378 | #define VLV_IOSF_DATA (VLV_DISPLAY_BASE + 0x2104) |
379 | #define VLV_IOSF_ADDR (VLV_DISPLAY_BASE + 0x2108) | 379 | #define VLV_IOSF_ADDR (VLV_DISPLAY_BASE + 0x2108) |
380 | 380 | ||
381 | /* See configdb bunit SB addr map */ | 381 | /* See configdb bunit SB addr map */ |
382 | #define BUNIT_REG_BISOC 0x11 | 382 | #define BUNIT_REG_BISOC 0x11 |
383 | 383 | ||
384 | #define PUNIT_OPCODE_REG_READ 6 | 384 | #define PUNIT_OPCODE_REG_READ 6 |
385 | #define PUNIT_OPCODE_REG_WRITE 7 | 385 | #define PUNIT_OPCODE_REG_WRITE 7 |
386 | 386 | ||
387 | #define PUNIT_REG_DSPFREQ 0x36 | 387 | #define PUNIT_REG_DSPFREQ 0x36 |
388 | #define DSPFREQSTAT_SHIFT 30 | 388 | #define DSPFREQSTAT_SHIFT 30 |
389 | #define DSPFREQSTAT_MASK (0x3 << DSPFREQSTAT_SHIFT) | 389 | #define DSPFREQSTAT_MASK (0x3 << DSPFREQSTAT_SHIFT) |
390 | #define DSPFREQGUAR_SHIFT 14 | 390 | #define DSPFREQGUAR_SHIFT 14 |
391 | #define DSPFREQGUAR_MASK (0x3 << DSPFREQGUAR_SHIFT) | 391 | #define DSPFREQGUAR_MASK (0x3 << DSPFREQGUAR_SHIFT) |
392 | 392 | ||
393 | /* See the PUNIT HAS v0.8 for the below bits */ | 393 | /* See the PUNIT HAS v0.8 for the below bits */ |
394 | enum punit_power_well { | 394 | enum punit_power_well { |
395 | PUNIT_POWER_WELL_RENDER = 0, | 395 | PUNIT_POWER_WELL_RENDER = 0, |
396 | PUNIT_POWER_WELL_MEDIA = 1, | 396 | PUNIT_POWER_WELL_MEDIA = 1, |
397 | PUNIT_POWER_WELL_DISP2D = 3, | 397 | PUNIT_POWER_WELL_DISP2D = 3, |
398 | PUNIT_POWER_WELL_DPIO_CMN_BC = 5, | 398 | PUNIT_POWER_WELL_DPIO_CMN_BC = 5, |
399 | PUNIT_POWER_WELL_DPIO_TX_B_LANES_01 = 6, | 399 | PUNIT_POWER_WELL_DPIO_TX_B_LANES_01 = 6, |
400 | PUNIT_POWER_WELL_DPIO_TX_B_LANES_23 = 7, | 400 | PUNIT_POWER_WELL_DPIO_TX_B_LANES_23 = 7, |
401 | PUNIT_POWER_WELL_DPIO_TX_C_LANES_01 = 8, | 401 | PUNIT_POWER_WELL_DPIO_TX_C_LANES_01 = 8, |
402 | PUNIT_POWER_WELL_DPIO_TX_C_LANES_23 = 9, | 402 | PUNIT_POWER_WELL_DPIO_TX_C_LANES_23 = 9, |
403 | PUNIT_POWER_WELL_DPIO_RX0 = 10, | 403 | PUNIT_POWER_WELL_DPIO_RX0 = 10, |
404 | PUNIT_POWER_WELL_DPIO_RX1 = 11, | 404 | PUNIT_POWER_WELL_DPIO_RX1 = 11, |
405 | 405 | ||
406 | PUNIT_POWER_WELL_NUM, | 406 | PUNIT_POWER_WELL_NUM, |
407 | }; | 407 | }; |
408 | 408 | ||
409 | #define PUNIT_REG_PWRGT_CTRL 0x60 | 409 | #define PUNIT_REG_PWRGT_CTRL 0x60 |
410 | #define PUNIT_REG_PWRGT_STATUS 0x61 | 410 | #define PUNIT_REG_PWRGT_STATUS 0x61 |
411 | #define PUNIT_PWRGT_MASK(power_well) (3 << ((power_well) * 2)) | 411 | #define PUNIT_PWRGT_MASK(power_well) (3 << ((power_well) * 2)) |
412 | #define PUNIT_PWRGT_PWR_ON(power_well) (0 << ((power_well) * 2)) | 412 | #define PUNIT_PWRGT_PWR_ON(power_well) (0 << ((power_well) * 2)) |
413 | #define PUNIT_PWRGT_CLK_GATE(power_well) (1 << ((power_well) * 2)) | 413 | #define PUNIT_PWRGT_CLK_GATE(power_well) (1 << ((power_well) * 2)) |
414 | #define PUNIT_PWRGT_RESET(power_well) (2 << ((power_well) * 2)) | 414 | #define PUNIT_PWRGT_RESET(power_well) (2 << ((power_well) * 2)) |
415 | #define PUNIT_PWRGT_PWR_GATE(power_well) (3 << ((power_well) * 2)) | 415 | #define PUNIT_PWRGT_PWR_GATE(power_well) (3 << ((power_well) * 2)) |
416 | 416 | ||
417 | #define PUNIT_REG_GPU_LFM 0xd3 | 417 | #define PUNIT_REG_GPU_LFM 0xd3 |
418 | #define PUNIT_REG_GPU_FREQ_REQ 0xd4 | 418 | #define PUNIT_REG_GPU_FREQ_REQ 0xd4 |
419 | #define PUNIT_REG_GPU_FREQ_STS 0xd8 | 419 | #define PUNIT_REG_GPU_FREQ_STS 0xd8 |
420 | #define GENFREQSTATUS (1<<0) | 420 | #define GENFREQSTATUS (1<<0) |
421 | #define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc | 421 | #define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc |
422 | 422 | ||
423 | #define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */ | 423 | #define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */ |
424 | #define PUNIT_FUSE_BUS1 0xf5 /* bits 55:48 */ | 424 | #define PUNIT_FUSE_BUS1 0xf5 /* bits 55:48 */ |
425 | 425 | ||
426 | #define IOSF_NC_FB_GFX_FREQ_FUSE 0x1c | 426 | #define IOSF_NC_FB_GFX_FREQ_FUSE 0x1c |
427 | #define FB_GFX_MAX_FREQ_FUSE_SHIFT 3 | 427 | #define FB_GFX_MAX_FREQ_FUSE_SHIFT 3 |
428 | #define FB_GFX_MAX_FREQ_FUSE_MASK 0x000007f8 | 428 | #define FB_GFX_MAX_FREQ_FUSE_MASK 0x000007f8 |
429 | #define FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT 11 | 429 | #define FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT 11 |
430 | #define FB_GFX_FGUARANTEED_FREQ_FUSE_MASK 0x0007f800 | 430 | #define FB_GFX_FGUARANTEED_FREQ_FUSE_MASK 0x0007f800 |
431 | #define IOSF_NC_FB_GFX_FMAX_FUSE_HI 0x34 | 431 | #define IOSF_NC_FB_GFX_FMAX_FUSE_HI 0x34 |
432 | #define FB_FMAX_VMIN_FREQ_HI_MASK 0x00000007 | 432 | #define FB_FMAX_VMIN_FREQ_HI_MASK 0x00000007 |
433 | #define IOSF_NC_FB_GFX_FMAX_FUSE_LO 0x30 | 433 | #define IOSF_NC_FB_GFX_FMAX_FUSE_LO 0x30 |
434 | #define FB_FMAX_VMIN_FREQ_LO_SHIFT 27 | 434 | #define FB_FMAX_VMIN_FREQ_LO_SHIFT 27 |
435 | #define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000 | 435 | #define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000 |
436 | 436 | ||
437 | /* vlv2 north clock has */ | 437 | /* vlv2 north clock has */ |
438 | #define CCK_FUSE_REG 0x8 | 438 | #define CCK_FUSE_REG 0x8 |
439 | #define CCK_FUSE_HPLL_FREQ_MASK 0x3 | 439 | #define CCK_FUSE_HPLL_FREQ_MASK 0x3 |
440 | #define CCK_REG_DSI_PLL_FUSE 0x44 | 440 | #define CCK_REG_DSI_PLL_FUSE 0x44 |
441 | #define CCK_REG_DSI_PLL_CONTROL 0x48 | 441 | #define CCK_REG_DSI_PLL_CONTROL 0x48 |
442 | #define DSI_PLL_VCO_EN (1 << 31) | 442 | #define DSI_PLL_VCO_EN (1 << 31) |
443 | #define DSI_PLL_LDO_GATE (1 << 30) | 443 | #define DSI_PLL_LDO_GATE (1 << 30) |
444 | #define DSI_PLL_P1_POST_DIV_SHIFT 17 | 444 | #define DSI_PLL_P1_POST_DIV_SHIFT 17 |
445 | #define DSI_PLL_P1_POST_DIV_MASK (0x1ff << 17) | 445 | #define DSI_PLL_P1_POST_DIV_MASK (0x1ff << 17) |
446 | #define DSI_PLL_P2_MUX_DSI0_DIV2 (1 << 13) | 446 | #define DSI_PLL_P2_MUX_DSI0_DIV2 (1 << 13) |
447 | #define DSI_PLL_P3_MUX_DSI1_DIV2 (1 << 12) | 447 | #define DSI_PLL_P3_MUX_DSI1_DIV2 (1 << 12) |
448 | #define DSI_PLL_MUX_MASK (3 << 9) | 448 | #define DSI_PLL_MUX_MASK (3 << 9) |
449 | #define DSI_PLL_MUX_DSI0_DSIPLL (0 << 10) | 449 | #define DSI_PLL_MUX_DSI0_DSIPLL (0 << 10) |
450 | #define DSI_PLL_MUX_DSI0_CCK (1 << 10) | 450 | #define DSI_PLL_MUX_DSI0_CCK (1 << 10) |
451 | #define DSI_PLL_MUX_DSI1_DSIPLL (0 << 9) | 451 | #define DSI_PLL_MUX_DSI1_DSIPLL (0 << 9) |
452 | #define DSI_PLL_MUX_DSI1_CCK (1 << 9) | 452 | #define DSI_PLL_MUX_DSI1_CCK (1 << 9) |
453 | #define DSI_PLL_CLK_GATE_MASK (0xf << 5) | 453 | #define DSI_PLL_CLK_GATE_MASK (0xf << 5) |
454 | #define DSI_PLL_CLK_GATE_DSI0_DSIPLL (1 << 8) | 454 | #define DSI_PLL_CLK_GATE_DSI0_DSIPLL (1 << 8) |
455 | #define DSI_PLL_CLK_GATE_DSI1_DSIPLL (1 << 7) | 455 | #define DSI_PLL_CLK_GATE_DSI1_DSIPLL (1 << 7) |
456 | #define DSI_PLL_CLK_GATE_DSI0_CCK (1 << 6) | 456 | #define DSI_PLL_CLK_GATE_DSI0_CCK (1 << 6) |
457 | #define DSI_PLL_CLK_GATE_DSI1_CCK (1 << 5) | 457 | #define DSI_PLL_CLK_GATE_DSI1_CCK (1 << 5) |
458 | #define DSI_PLL_LOCK (1 << 0) | 458 | #define DSI_PLL_LOCK (1 << 0) |
459 | #define CCK_REG_DSI_PLL_DIVIDER 0x4c | 459 | #define CCK_REG_DSI_PLL_DIVIDER 0x4c |
460 | #define DSI_PLL_LFSR (1 << 31) | 460 | #define DSI_PLL_LFSR (1 << 31) |
461 | #define DSI_PLL_FRACTION_EN (1 << 30) | 461 | #define DSI_PLL_FRACTION_EN (1 << 30) |
462 | #define DSI_PLL_FRAC_COUNTER_SHIFT 27 | 462 | #define DSI_PLL_FRAC_COUNTER_SHIFT 27 |
463 | #define DSI_PLL_FRAC_COUNTER_MASK (7 << 27) | 463 | #define DSI_PLL_FRAC_COUNTER_MASK (7 << 27) |
464 | #define DSI_PLL_USYNC_CNT_SHIFT 18 | 464 | #define DSI_PLL_USYNC_CNT_SHIFT 18 |
465 | #define DSI_PLL_USYNC_CNT_MASK (0x1ff << 18) | 465 | #define DSI_PLL_USYNC_CNT_MASK (0x1ff << 18) |
466 | #define DSI_PLL_N1_DIV_SHIFT 16 | 466 | #define DSI_PLL_N1_DIV_SHIFT 16 |
467 | #define DSI_PLL_N1_DIV_MASK (3 << 16) | 467 | #define DSI_PLL_N1_DIV_MASK (3 << 16) |
468 | #define DSI_PLL_M1_DIV_SHIFT 0 | 468 | #define DSI_PLL_M1_DIV_SHIFT 0 |
469 | #define DSI_PLL_M1_DIV_MASK (0x1ff << 0) | 469 | #define DSI_PLL_M1_DIV_MASK (0x1ff << 0) |
470 | #define CCK_DISPLAY_CLOCK_CONTROL 0x6b | 470 | #define CCK_DISPLAY_CLOCK_CONTROL 0x6b |
471 | 471 | ||
472 | /* | 472 | /* |
473 | * DPIO - a special bus for various display related registers to hide behind | 473 | * DPIO - a special bus for various display related registers to hide behind |
474 | * | 474 | * |
475 | * DPIO is VLV only. | 475 | * DPIO is VLV only. |
476 | * | 476 | * |
477 | * Note: digital port B is DDI0, digital pot C is DDI1 | 477 | * Note: digital port B is DDI0, digital pot C is DDI1 |
478 | */ | 478 | */ |
479 | #define DPIO_DEVFN 0 | 479 | #define DPIO_DEVFN 0 |
480 | #define DPIO_OPCODE_REG_WRITE 1 | 480 | #define DPIO_OPCODE_REG_WRITE 1 |
481 | #define DPIO_OPCODE_REG_READ 0 | 481 | #define DPIO_OPCODE_REG_READ 0 |
482 | 482 | ||
483 | #define DPIO_CTL (VLV_DISPLAY_BASE + 0x2110) | 483 | #define DPIO_CTL (VLV_DISPLAY_BASE + 0x2110) |
484 | #define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */ | 484 | #define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */ |
485 | #define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */ | 485 | #define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */ |
486 | #define DPIO_SFR_BYPASS (1<<1) | 486 | #define DPIO_SFR_BYPASS (1<<1) |
487 | #define DPIO_CMNRST (1<<0) | 487 | #define DPIO_CMNRST (1<<0) |
488 | 488 | ||
489 | #define DPIO_PHY(pipe) ((pipe) >> 1) | 489 | #define DPIO_PHY(pipe) ((pipe) >> 1) |
490 | #define DPIO_PHY_IOSF_PORT(phy) (dev_priv->dpio_phy_iosf_port[phy]) | 490 | #define DPIO_PHY_IOSF_PORT(phy) (dev_priv->dpio_phy_iosf_port[phy]) |
491 | 491 | ||
492 | /* | 492 | /* |
493 | * Per pipe/PLL DPIO regs | 493 | * Per pipe/PLL DPIO regs |
494 | */ | 494 | */ |
495 | #define _VLV_PLL_DW3_CH0 0x800c | 495 | #define _VLV_PLL_DW3_CH0 0x800c |
496 | #define DPIO_POST_DIV_SHIFT (28) /* 3 bits */ | 496 | #define DPIO_POST_DIV_SHIFT (28) /* 3 bits */ |
497 | #define DPIO_POST_DIV_DAC 0 | 497 | #define DPIO_POST_DIV_DAC 0 |
498 | #define DPIO_POST_DIV_HDMIDP 1 /* DAC 225-400M rate */ | 498 | #define DPIO_POST_DIV_HDMIDP 1 /* DAC 225-400M rate */ |
499 | #define DPIO_POST_DIV_LVDS1 2 | 499 | #define DPIO_POST_DIV_LVDS1 2 |
500 | #define DPIO_POST_DIV_LVDS2 3 | 500 | #define DPIO_POST_DIV_LVDS2 3 |
501 | #define DPIO_K_SHIFT (24) /* 4 bits */ | 501 | #define DPIO_K_SHIFT (24) /* 4 bits */ |
502 | #define DPIO_P1_SHIFT (21) /* 3 bits */ | 502 | #define DPIO_P1_SHIFT (21) /* 3 bits */ |
503 | #define DPIO_P2_SHIFT (16) /* 5 bits */ | 503 | #define DPIO_P2_SHIFT (16) /* 5 bits */ |
504 | #define DPIO_N_SHIFT (12) /* 4 bits */ | 504 | #define DPIO_N_SHIFT (12) /* 4 bits */ |
505 | #define DPIO_ENABLE_CALIBRATION (1<<11) | 505 | #define DPIO_ENABLE_CALIBRATION (1<<11) |
506 | #define DPIO_M1DIV_SHIFT (8) /* 3 bits */ | 506 | #define DPIO_M1DIV_SHIFT (8) /* 3 bits */ |
507 | #define DPIO_M2DIV_MASK 0xff | 507 | #define DPIO_M2DIV_MASK 0xff |
508 | #define _VLV_PLL_DW3_CH1 0x802c | 508 | #define _VLV_PLL_DW3_CH1 0x802c |
509 | #define VLV_PLL_DW3(ch) _PIPE(ch, _VLV_PLL_DW3_CH0, _VLV_PLL_DW3_CH1) | 509 | #define VLV_PLL_DW3(ch) _PIPE(ch, _VLV_PLL_DW3_CH0, _VLV_PLL_DW3_CH1) |
510 | 510 | ||
511 | #define _VLV_PLL_DW5_CH0 0x8014 | 511 | #define _VLV_PLL_DW5_CH0 0x8014 |
512 | #define DPIO_REFSEL_OVERRIDE 27 | 512 | #define DPIO_REFSEL_OVERRIDE 27 |
513 | #define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */ | 513 | #define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */ |
514 | #define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */ | 514 | #define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */ |
515 | #define DPIO_PLL_REFCLK_SEL_SHIFT 16 /* 2 bits */ | 515 | #define DPIO_PLL_REFCLK_SEL_SHIFT 16 /* 2 bits */ |
516 | #define DPIO_PLL_REFCLK_SEL_MASK 3 | 516 | #define DPIO_PLL_REFCLK_SEL_MASK 3 |
517 | #define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */ | 517 | #define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */ |
518 | #define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */ | 518 | #define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */ |
519 | #define _VLV_PLL_DW5_CH1 0x8034 | 519 | #define _VLV_PLL_DW5_CH1 0x8034 |
520 | #define VLV_PLL_DW5(ch) _PIPE(ch, _VLV_PLL_DW5_CH0, _VLV_PLL_DW5_CH1) | 520 | #define VLV_PLL_DW5(ch) _PIPE(ch, _VLV_PLL_DW5_CH0, _VLV_PLL_DW5_CH1) |
521 | 521 | ||
522 | #define _VLV_PLL_DW7_CH0 0x801c | 522 | #define _VLV_PLL_DW7_CH0 0x801c |
523 | #define _VLV_PLL_DW7_CH1 0x803c | 523 | #define _VLV_PLL_DW7_CH1 0x803c |
524 | #define VLV_PLL_DW7(ch) _PIPE(ch, _VLV_PLL_DW7_CH0, _VLV_PLL_DW7_CH1) | 524 | #define VLV_PLL_DW7(ch) _PIPE(ch, _VLV_PLL_DW7_CH0, _VLV_PLL_DW7_CH1) |
525 | 525 | ||
526 | #define _VLV_PLL_DW8_CH0 0x8040 | 526 | #define _VLV_PLL_DW8_CH0 0x8040 |
527 | #define _VLV_PLL_DW8_CH1 0x8060 | 527 | #define _VLV_PLL_DW8_CH1 0x8060 |
528 | #define VLV_PLL_DW8(ch) _PIPE(ch, _VLV_PLL_DW8_CH0, _VLV_PLL_DW8_CH1) | 528 | #define VLV_PLL_DW8(ch) _PIPE(ch, _VLV_PLL_DW8_CH0, _VLV_PLL_DW8_CH1) |
529 | 529 | ||
530 | #define VLV_PLL_DW9_BCAST 0xc044 | 530 | #define VLV_PLL_DW9_BCAST 0xc044 |
531 | #define _VLV_PLL_DW9_CH0 0x8044 | 531 | #define _VLV_PLL_DW9_CH0 0x8044 |
532 | #define _VLV_PLL_DW9_CH1 0x8064 | 532 | #define _VLV_PLL_DW9_CH1 0x8064 |
533 | #define VLV_PLL_DW9(ch) _PIPE(ch, _VLV_PLL_DW9_CH0, _VLV_PLL_DW9_CH1) | 533 | #define VLV_PLL_DW9(ch) _PIPE(ch, _VLV_PLL_DW9_CH0, _VLV_PLL_DW9_CH1) |
534 | 534 | ||
535 | #define _VLV_PLL_DW10_CH0 0x8048 | 535 | #define _VLV_PLL_DW10_CH0 0x8048 |
536 | #define _VLV_PLL_DW10_CH1 0x8068 | 536 | #define _VLV_PLL_DW10_CH1 0x8068 |
537 | #define VLV_PLL_DW10(ch) _PIPE(ch, _VLV_PLL_DW10_CH0, _VLV_PLL_DW10_CH1) | 537 | #define VLV_PLL_DW10(ch) _PIPE(ch, _VLV_PLL_DW10_CH0, _VLV_PLL_DW10_CH1) |
538 | 538 | ||
539 | #define _VLV_PLL_DW11_CH0 0x804c | 539 | #define _VLV_PLL_DW11_CH0 0x804c |
540 | #define _VLV_PLL_DW11_CH1 0x806c | 540 | #define _VLV_PLL_DW11_CH1 0x806c |
541 | #define VLV_PLL_DW11(ch) _PIPE(ch, _VLV_PLL_DW11_CH0, _VLV_PLL_DW11_CH1) | 541 | #define VLV_PLL_DW11(ch) _PIPE(ch, _VLV_PLL_DW11_CH0, _VLV_PLL_DW11_CH1) |
542 | 542 | ||
543 | /* Spec for ref block start counts at DW10 */ | 543 | /* Spec for ref block start counts at DW10 */ |
544 | #define VLV_REF_DW13 0x80ac | 544 | #define VLV_REF_DW13 0x80ac |
545 | 545 | ||
546 | #define VLV_CMN_DW0 0x8100 | 546 | #define VLV_CMN_DW0 0x8100 |
547 | 547 | ||
548 | /* | 548 | /* |
549 | * Per DDI channel DPIO regs | 549 | * Per DDI channel DPIO regs |
550 | */ | 550 | */ |
551 | 551 | ||
552 | #define _VLV_PCS_DW0_CH0 0x8200 | 552 | #define _VLV_PCS_DW0_CH0 0x8200 |
553 | #define _VLV_PCS_DW0_CH1 0x8400 | 553 | #define _VLV_PCS_DW0_CH1 0x8400 |
554 | #define DPIO_PCS_TX_LANE2_RESET (1<<16) | 554 | #define DPIO_PCS_TX_LANE2_RESET (1<<16) |
555 | #define DPIO_PCS_TX_LANE1_RESET (1<<7) | 555 | #define DPIO_PCS_TX_LANE1_RESET (1<<7) |
556 | #define VLV_PCS_DW0(ch) _PORT(ch, _VLV_PCS_DW0_CH0, _VLV_PCS_DW0_CH1) | 556 | #define VLV_PCS_DW0(ch) _PORT(ch, _VLV_PCS_DW0_CH0, _VLV_PCS_DW0_CH1) |
557 | 557 | ||
558 | #define _VLV_PCS_DW1_CH0 0x8204 | 558 | #define _VLV_PCS_DW1_CH0 0x8204 |
559 | #define _VLV_PCS_DW1_CH1 0x8404 | 559 | #define _VLV_PCS_DW1_CH1 0x8404 |
560 | #define DPIO_PCS_CLK_CRI_RXEB_EIOS_EN (1<<22) | 560 | #define DPIO_PCS_CLK_CRI_RXEB_EIOS_EN (1<<22) |
561 | #define DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN (1<<21) | 561 | #define DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN (1<<21) |
562 | #define DPIO_PCS_CLK_DATAWIDTH_SHIFT (6) | 562 | #define DPIO_PCS_CLK_DATAWIDTH_SHIFT (6) |
563 | #define DPIO_PCS_CLK_SOFT_RESET (1<<5) | 563 | #define DPIO_PCS_CLK_SOFT_RESET (1<<5) |
564 | #define VLV_PCS_DW1(ch) _PORT(ch, _VLV_PCS_DW1_CH0, _VLV_PCS_DW1_CH1) | 564 | #define VLV_PCS_DW1(ch) _PORT(ch, _VLV_PCS_DW1_CH0, _VLV_PCS_DW1_CH1) |
565 | 565 | ||
566 | #define _VLV_PCS_DW8_CH0 0x8220 | 566 | #define _VLV_PCS_DW8_CH0 0x8220 |
567 | #define _VLV_PCS_DW8_CH1 0x8420 | 567 | #define _VLV_PCS_DW8_CH1 0x8420 |
568 | #define VLV_PCS_DW8(ch) _PORT(ch, _VLV_PCS_DW8_CH0, _VLV_PCS_DW8_CH1) | 568 | #define VLV_PCS_DW8(ch) _PORT(ch, _VLV_PCS_DW8_CH0, _VLV_PCS_DW8_CH1) |
569 | 569 | ||
570 | #define _VLV_PCS01_DW8_CH0 0x0220 | 570 | #define _VLV_PCS01_DW8_CH0 0x0220 |
571 | #define _VLV_PCS23_DW8_CH0 0x0420 | 571 | #define _VLV_PCS23_DW8_CH0 0x0420 |
572 | #define _VLV_PCS01_DW8_CH1 0x2620 | 572 | #define _VLV_PCS01_DW8_CH1 0x2620 |
573 | #define _VLV_PCS23_DW8_CH1 0x2820 | 573 | #define _VLV_PCS23_DW8_CH1 0x2820 |
574 | #define VLV_PCS01_DW8(port) _PORT(port, _VLV_PCS01_DW8_CH0, _VLV_PCS01_DW8_CH1) | 574 | #define VLV_PCS01_DW8(port) _PORT(port, _VLV_PCS01_DW8_CH0, _VLV_PCS01_DW8_CH1) |
575 | #define VLV_PCS23_DW8(port) _PORT(port, _VLV_PCS23_DW8_CH0, _VLV_PCS23_DW8_CH1) | 575 | #define VLV_PCS23_DW8(port) _PORT(port, _VLV_PCS23_DW8_CH0, _VLV_PCS23_DW8_CH1) |
576 | 576 | ||
577 | #define _VLV_PCS_DW9_CH0 0x8224 | 577 | #define _VLV_PCS_DW9_CH0 0x8224 |
578 | #define _VLV_PCS_DW9_CH1 0x8424 | 578 | #define _VLV_PCS_DW9_CH1 0x8424 |
579 | #define VLV_PCS_DW9(ch) _PORT(ch, _VLV_PCS_DW9_CH0, _VLV_PCS_DW9_CH1) | 579 | #define VLV_PCS_DW9(ch) _PORT(ch, _VLV_PCS_DW9_CH0, _VLV_PCS_DW9_CH1) |
580 | 580 | ||
581 | #define _VLV_PCS_DW11_CH0 0x822c | 581 | #define _VLV_PCS_DW11_CH0 0x822c |
582 | #define _VLV_PCS_DW11_CH1 0x842c | 582 | #define _VLV_PCS_DW11_CH1 0x842c |
583 | #define VLV_PCS_DW11(ch) _PORT(ch, _VLV_PCS_DW11_CH0, _VLV_PCS_DW11_CH1) | 583 | #define VLV_PCS_DW11(ch) _PORT(ch, _VLV_PCS_DW11_CH0, _VLV_PCS_DW11_CH1) |
584 | 584 | ||
585 | #define _VLV_PCS_DW12_CH0 0x8230 | 585 | #define _VLV_PCS_DW12_CH0 0x8230 |
586 | #define _VLV_PCS_DW12_CH1 0x8430 | 586 | #define _VLV_PCS_DW12_CH1 0x8430 |
587 | #define VLV_PCS_DW12(ch) _PORT(ch, _VLV_PCS_DW12_CH0, _VLV_PCS_DW12_CH1) | 587 | #define VLV_PCS_DW12(ch) _PORT(ch, _VLV_PCS_DW12_CH0, _VLV_PCS_DW12_CH1) |
588 | 588 | ||
589 | #define _VLV_PCS_DW14_CH0 0x8238 | 589 | #define _VLV_PCS_DW14_CH0 0x8238 |
590 | #define _VLV_PCS_DW14_CH1 0x8438 | 590 | #define _VLV_PCS_DW14_CH1 0x8438 |
591 | #define VLV_PCS_DW14(ch) _PORT(ch, _VLV_PCS_DW14_CH0, _VLV_PCS_DW14_CH1) | 591 | #define VLV_PCS_DW14(ch) _PORT(ch, _VLV_PCS_DW14_CH0, _VLV_PCS_DW14_CH1) |
592 | 592 | ||
593 | #define _VLV_PCS_DW23_CH0 0x825c | 593 | #define _VLV_PCS_DW23_CH0 0x825c |
594 | #define _VLV_PCS_DW23_CH1 0x845c | 594 | #define _VLV_PCS_DW23_CH1 0x845c |
595 | #define VLV_PCS_DW23(ch) _PORT(ch, _VLV_PCS_DW23_CH0, _VLV_PCS_DW23_CH1) | 595 | #define VLV_PCS_DW23(ch) _PORT(ch, _VLV_PCS_DW23_CH0, _VLV_PCS_DW23_CH1) |
596 | 596 | ||
597 | #define _VLV_TX_DW2_CH0 0x8288 | 597 | #define _VLV_TX_DW2_CH0 0x8288 |
598 | #define _VLV_TX_DW2_CH1 0x8488 | 598 | #define _VLV_TX_DW2_CH1 0x8488 |
599 | #define VLV_TX_DW2(ch) _PORT(ch, _VLV_TX_DW2_CH0, _VLV_TX_DW2_CH1) | 599 | #define VLV_TX_DW2(ch) _PORT(ch, _VLV_TX_DW2_CH0, _VLV_TX_DW2_CH1) |
600 | 600 | ||
601 | #define _VLV_TX_DW3_CH0 0x828c | 601 | #define _VLV_TX_DW3_CH0 0x828c |
602 | #define _VLV_TX_DW3_CH1 0x848c | 602 | #define _VLV_TX_DW3_CH1 0x848c |
603 | #define VLV_TX_DW3(ch) _PORT(ch, _VLV_TX_DW3_CH0, _VLV_TX_DW3_CH1) | 603 | #define VLV_TX_DW3(ch) _PORT(ch, _VLV_TX_DW3_CH0, _VLV_TX_DW3_CH1) |
604 | 604 | ||
605 | #define _VLV_TX_DW4_CH0 0x8290 | 605 | #define _VLV_TX_DW4_CH0 0x8290 |
606 | #define _VLV_TX_DW4_CH1 0x8490 | 606 | #define _VLV_TX_DW4_CH1 0x8490 |
607 | #define VLV_TX_DW4(ch) _PORT(ch, _VLV_TX_DW4_CH0, _VLV_TX_DW4_CH1) | 607 | #define VLV_TX_DW4(ch) _PORT(ch, _VLV_TX_DW4_CH0, _VLV_TX_DW4_CH1) |
608 | 608 | ||
609 | #define _VLV_TX3_DW4_CH0 0x690 | 609 | #define _VLV_TX3_DW4_CH0 0x690 |
610 | #define _VLV_TX3_DW4_CH1 0x2a90 | 610 | #define _VLV_TX3_DW4_CH1 0x2a90 |
611 | #define VLV_TX3_DW4(ch) _PORT(ch, _VLV_TX3_DW4_CH0, _VLV_TX3_DW4_CH1) | 611 | #define VLV_TX3_DW4(ch) _PORT(ch, _VLV_TX3_DW4_CH0, _VLV_TX3_DW4_CH1) |
612 | 612 | ||
613 | #define _VLV_TX_DW5_CH0 0x8294 | 613 | #define _VLV_TX_DW5_CH0 0x8294 |
614 | #define _VLV_TX_DW5_CH1 0x8494 | 614 | #define _VLV_TX_DW5_CH1 0x8494 |
615 | #define DPIO_TX_OCALINIT_EN (1<<31) | 615 | #define DPIO_TX_OCALINIT_EN (1<<31) |
616 | #define VLV_TX_DW5(ch) _PORT(ch, _VLV_TX_DW5_CH0, _VLV_TX_DW5_CH1) | 616 | #define VLV_TX_DW5(ch) _PORT(ch, _VLV_TX_DW5_CH0, _VLV_TX_DW5_CH1) |
617 | 617 | ||
618 | #define _VLV_TX_DW11_CH0 0x82ac | 618 | #define _VLV_TX_DW11_CH0 0x82ac |
619 | #define _VLV_TX_DW11_CH1 0x84ac | 619 | #define _VLV_TX_DW11_CH1 0x84ac |
620 | #define VLV_TX_DW11(ch) _PORT(ch, _VLV_TX_DW11_CH0, _VLV_TX_DW11_CH1) | 620 | #define VLV_TX_DW11(ch) _PORT(ch, _VLV_TX_DW11_CH0, _VLV_TX_DW11_CH1) |
621 | 621 | ||
622 | #define _VLV_TX_DW14_CH0 0x82b8 | 622 | #define _VLV_TX_DW14_CH0 0x82b8 |
623 | #define _VLV_TX_DW14_CH1 0x84b8 | 623 | #define _VLV_TX_DW14_CH1 0x84b8 |
624 | #define VLV_TX_DW14(ch) _PORT(ch, _VLV_TX_DW14_CH0, _VLV_TX_DW14_CH1) | 624 | #define VLV_TX_DW14(ch) _PORT(ch, _VLV_TX_DW14_CH0, _VLV_TX_DW14_CH1) |
625 | 625 | ||
626 | /* | 626 | /* |
627 | * Fence registers | 627 | * Fence registers |
628 | */ | 628 | */ |
629 | #define FENCE_REG_830_0 0x2000 | 629 | #define FENCE_REG_830_0 0x2000 |
630 | #define FENCE_REG_945_8 0x3000 | 630 | #define FENCE_REG_945_8 0x3000 |
631 | #define I830_FENCE_START_MASK 0x07f80000 | 631 | #define I830_FENCE_START_MASK 0x07f80000 |
632 | #define I830_FENCE_TILING_Y_SHIFT 12 | 632 | #define I830_FENCE_TILING_Y_SHIFT 12 |
633 | #define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8) | 633 | #define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8) |
634 | #define I830_FENCE_PITCH_SHIFT 4 | 634 | #define I830_FENCE_PITCH_SHIFT 4 |
635 | #define I830_FENCE_REG_VALID (1<<0) | 635 | #define I830_FENCE_REG_VALID (1<<0) |
636 | #define I915_FENCE_MAX_PITCH_VAL 4 | 636 | #define I915_FENCE_MAX_PITCH_VAL 4 |
637 | #define I830_FENCE_MAX_PITCH_VAL 6 | 637 | #define I830_FENCE_MAX_PITCH_VAL 6 |
638 | #define I830_FENCE_MAX_SIZE_VAL (1<<8) | 638 | #define I830_FENCE_MAX_SIZE_VAL (1<<8) |
639 | 639 | ||
640 | #define I915_FENCE_START_MASK 0x0ff00000 | 640 | #define I915_FENCE_START_MASK 0x0ff00000 |
641 | #define I915_FENCE_SIZE_BITS(size) ((ffs((size) >> 20) - 1) << 8) | 641 | #define I915_FENCE_SIZE_BITS(size) ((ffs((size) >> 20) - 1) << 8) |
642 | 642 | ||
643 | #define FENCE_REG_965_0 0x03000 | 643 | #define FENCE_REG_965_0 0x03000 |
644 | #define I965_FENCE_PITCH_SHIFT 2 | 644 | #define I965_FENCE_PITCH_SHIFT 2 |
645 | #define I965_FENCE_TILING_Y_SHIFT 1 | 645 | #define I965_FENCE_TILING_Y_SHIFT 1 |
646 | #define I965_FENCE_REG_VALID (1<<0) | 646 | #define I965_FENCE_REG_VALID (1<<0) |
647 | #define I965_FENCE_MAX_PITCH_VAL 0x0400 | 647 | #define I965_FENCE_MAX_PITCH_VAL 0x0400 |
648 | 648 | ||
649 | #define FENCE_REG_SANDYBRIDGE_0 0x100000 | 649 | #define FENCE_REG_SANDYBRIDGE_0 0x100000 |
650 | #define SANDYBRIDGE_FENCE_PITCH_SHIFT 32 | 650 | #define SANDYBRIDGE_FENCE_PITCH_SHIFT 32 |
651 | #define GEN7_FENCE_MAX_PITCH_VAL 0x0800 | 651 | #define GEN7_FENCE_MAX_PITCH_VAL 0x0800 |
652 | 652 | ||
653 | /* control register for cpu gtt access */ | 653 | /* control register for cpu gtt access */ |
654 | #define TILECTL 0x101000 | 654 | #define TILECTL 0x101000 |
655 | #define TILECTL_SWZCTL (1 << 0) | 655 | #define TILECTL_SWZCTL (1 << 0) |
656 | #define TILECTL_TLB_PREFETCH_DIS (1 << 2) | 656 | #define TILECTL_TLB_PREFETCH_DIS (1 << 2) |
657 | #define TILECTL_BACKSNOOP_DIS (1 << 3) | 657 | #define TILECTL_BACKSNOOP_DIS (1 << 3) |
658 | 658 | ||
659 | /* | 659 | /* |
660 | * Instruction and interrupt control regs | 660 | * Instruction and interrupt control regs |
661 | */ | 661 | */ |
662 | #define PGTBL_ER 0x02024 | 662 | #define PGTBL_ER 0x02024 |
663 | #define RENDER_RING_BASE 0x02000 | 663 | #define RENDER_RING_BASE 0x02000 |
664 | #define BSD_RING_BASE 0x04000 | 664 | #define BSD_RING_BASE 0x04000 |
665 | #define GEN6_BSD_RING_BASE 0x12000 | 665 | #define GEN6_BSD_RING_BASE 0x12000 |
666 | #define VEBOX_RING_BASE 0x1a000 | 666 | #define VEBOX_RING_BASE 0x1a000 |
667 | #define BLT_RING_BASE 0x22000 | 667 | #define BLT_RING_BASE 0x22000 |
668 | #define RING_TAIL(base) ((base)+0x30) | 668 | #define RING_TAIL(base) ((base)+0x30) |
669 | #define RING_HEAD(base) ((base)+0x34) | 669 | #define RING_HEAD(base) ((base)+0x34) |
670 | #define RING_START(base) ((base)+0x38) | 670 | #define RING_START(base) ((base)+0x38) |
671 | #define RING_CTL(base) ((base)+0x3c) | 671 | #define RING_CTL(base) ((base)+0x3c) |
672 | #define RING_SYNC_0(base) ((base)+0x40) | 672 | #define RING_SYNC_0(base) ((base)+0x40) |
673 | #define RING_SYNC_1(base) ((base)+0x44) | 673 | #define RING_SYNC_1(base) ((base)+0x44) |
674 | #define RING_SYNC_2(base) ((base)+0x48) | 674 | #define RING_SYNC_2(base) ((base)+0x48) |
675 | #define GEN6_RVSYNC (RING_SYNC_0(RENDER_RING_BASE)) | 675 | #define GEN6_RVSYNC (RING_SYNC_0(RENDER_RING_BASE)) |
676 | #define GEN6_RBSYNC (RING_SYNC_1(RENDER_RING_BASE)) | 676 | #define GEN6_RBSYNC (RING_SYNC_1(RENDER_RING_BASE)) |
677 | #define GEN6_RVESYNC (RING_SYNC_2(RENDER_RING_BASE)) | 677 | #define GEN6_RVESYNC (RING_SYNC_2(RENDER_RING_BASE)) |
678 | #define GEN6_VBSYNC (RING_SYNC_0(GEN6_BSD_RING_BASE)) | 678 | #define GEN6_VBSYNC (RING_SYNC_0(GEN6_BSD_RING_BASE)) |
679 | #define GEN6_VRSYNC (RING_SYNC_1(GEN6_BSD_RING_BASE)) | 679 | #define GEN6_VRSYNC (RING_SYNC_1(GEN6_BSD_RING_BASE)) |
680 | #define GEN6_VVESYNC (RING_SYNC_2(GEN6_BSD_RING_BASE)) | 680 | #define GEN6_VVESYNC (RING_SYNC_2(GEN6_BSD_RING_BASE)) |
681 | #define GEN6_BRSYNC (RING_SYNC_0(BLT_RING_BASE)) | 681 | #define GEN6_BRSYNC (RING_SYNC_0(BLT_RING_BASE)) |
682 | #define GEN6_BVSYNC (RING_SYNC_1(BLT_RING_BASE)) | 682 | #define GEN6_BVSYNC (RING_SYNC_1(BLT_RING_BASE)) |
683 | #define GEN6_BVESYNC (RING_SYNC_2(BLT_RING_BASE)) | 683 | #define GEN6_BVESYNC (RING_SYNC_2(BLT_RING_BASE)) |
684 | #define GEN6_VEBSYNC (RING_SYNC_0(VEBOX_RING_BASE)) | 684 | #define GEN6_VEBSYNC (RING_SYNC_0(VEBOX_RING_BASE)) |
685 | #define GEN6_VERSYNC (RING_SYNC_1(VEBOX_RING_BASE)) | 685 | #define GEN6_VERSYNC (RING_SYNC_1(VEBOX_RING_BASE)) |
686 | #define GEN6_VEVSYNC (RING_SYNC_2(VEBOX_RING_BASE)) | 686 | #define GEN6_VEVSYNC (RING_SYNC_2(VEBOX_RING_BASE)) |
687 | #define GEN6_NOSYNC 0 | 687 | #define GEN6_NOSYNC 0 |
688 | #define RING_MAX_IDLE(base) ((base)+0x54) | 688 | #define RING_MAX_IDLE(base) ((base)+0x54) |
689 | #define RING_HWS_PGA(base) ((base)+0x80) | 689 | #define RING_HWS_PGA(base) ((base)+0x80) |
690 | #define RING_HWS_PGA_GEN6(base) ((base)+0x2080) | 690 | #define RING_HWS_PGA_GEN6(base) ((base)+0x2080) |
691 | #define ARB_MODE 0x04030 | 691 | #define ARB_MODE 0x04030 |
692 | #define ARB_MODE_SWIZZLE_SNB (1<<4) | 692 | #define ARB_MODE_SWIZZLE_SNB (1<<4) |
693 | #define ARB_MODE_SWIZZLE_IVB (1<<5) | 693 | #define ARB_MODE_SWIZZLE_IVB (1<<5) |
694 | #define GAMTARBMODE 0x04a08 | 694 | #define GAMTARBMODE 0x04a08 |
695 | #define ARB_MODE_BWGTLB_DISABLE (1<<9) | 695 | #define ARB_MODE_BWGTLB_DISABLE (1<<9) |
696 | #define ARB_MODE_SWIZZLE_BDW (1<<1) | 696 | #define ARB_MODE_SWIZZLE_BDW (1<<1) |
697 | #define RENDER_HWS_PGA_GEN7 (0x04080) | 697 | #define RENDER_HWS_PGA_GEN7 (0x04080) |
698 | #define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id) | 698 | #define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id) |
699 | #define RING_FAULT_GTTSEL_MASK (1<<11) | 699 | #define RING_FAULT_GTTSEL_MASK (1<<11) |
700 | #define RING_FAULT_SRCID(x) ((x >> 3) & 0xff) | 700 | #define RING_FAULT_SRCID(x) ((x >> 3) & 0xff) |
701 | #define RING_FAULT_FAULT_TYPE(x) ((x >> 1) & 0x3) | 701 | #define RING_FAULT_FAULT_TYPE(x) ((x >> 1) & 0x3) |
702 | #define RING_FAULT_VALID (1<<0) | 702 | #define RING_FAULT_VALID (1<<0) |
703 | #define DONE_REG 0x40b0 | 703 | #define DONE_REG 0x40b0 |
704 | #define GEN8_PRIVATE_PAT 0x40e0 | 704 | #define GEN8_PRIVATE_PAT 0x40e0 |
705 | #define BSD_HWS_PGA_GEN7 (0x04180) | 705 | #define BSD_HWS_PGA_GEN7 (0x04180) |
706 | #define BLT_HWS_PGA_GEN7 (0x04280) | 706 | #define BLT_HWS_PGA_GEN7 (0x04280) |
707 | #define VEBOX_HWS_PGA_GEN7 (0x04380) | 707 | #define VEBOX_HWS_PGA_GEN7 (0x04380) |
708 | #define RING_ACTHD(base) ((base)+0x74) | 708 | #define RING_ACTHD(base) ((base)+0x74) |
709 | #define RING_ACTHD_UDW(base) ((base)+0x5c) | 709 | #define RING_ACTHD_UDW(base) ((base)+0x5c) |
710 | #define RING_NOPID(base) ((base)+0x94) | 710 | #define RING_NOPID(base) ((base)+0x94) |
711 | #define RING_IMR(base) ((base)+0xa8) | 711 | #define RING_IMR(base) ((base)+0xa8) |
712 | #define RING_TIMESTAMP(base) ((base)+0x358) | 712 | #define RING_TIMESTAMP(base) ((base)+0x358) |
713 | #define TAIL_ADDR 0x001FFFF8 | 713 | #define TAIL_ADDR 0x001FFFF8 |
714 | #define HEAD_WRAP_COUNT 0xFFE00000 | 714 | #define HEAD_WRAP_COUNT 0xFFE00000 |
715 | #define HEAD_WRAP_ONE 0x00200000 | 715 | #define HEAD_WRAP_ONE 0x00200000 |
716 | #define HEAD_ADDR 0x001FFFFC | 716 | #define HEAD_ADDR 0x001FFFFC |
717 | #define RING_NR_PAGES 0x001FF000 | 717 | #define RING_NR_PAGES 0x001FF000 |
718 | #define RING_REPORT_MASK 0x00000006 | 718 | #define RING_REPORT_MASK 0x00000006 |
719 | #define RING_REPORT_64K 0x00000002 | 719 | #define RING_REPORT_64K 0x00000002 |
720 | #define RING_REPORT_128K 0x00000004 | 720 | #define RING_REPORT_128K 0x00000004 |
721 | #define RING_NO_REPORT 0x00000000 | 721 | #define RING_NO_REPORT 0x00000000 |
722 | #define RING_VALID_MASK 0x00000001 | 722 | #define RING_VALID_MASK 0x00000001 |
723 | #define RING_VALID 0x00000001 | 723 | #define RING_VALID 0x00000001 |
724 | #define RING_INVALID 0x00000000 | 724 | #define RING_INVALID 0x00000000 |
725 | #define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */ | 725 | #define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */ |
726 | #define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */ | 726 | #define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */ |
727 | #define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */ | 727 | #define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */ |
728 | #if 0 | 728 | #if 0 |
729 | #define PRB0_TAIL 0x02030 | 729 | #define PRB0_TAIL 0x02030 |
730 | #define PRB0_HEAD 0x02034 | 730 | #define PRB0_HEAD 0x02034 |
731 | #define PRB0_START 0x02038 | 731 | #define PRB0_START 0x02038 |
732 | #define PRB0_CTL 0x0203c | 732 | #define PRB0_CTL 0x0203c |
733 | #define PRB1_TAIL 0x02040 /* 915+ only */ | 733 | #define PRB1_TAIL 0x02040 /* 915+ only */ |
734 | #define PRB1_HEAD 0x02044 /* 915+ only */ | 734 | #define PRB1_HEAD 0x02044 /* 915+ only */ |
735 | #define PRB1_START 0x02048 /* 915+ only */ | 735 | #define PRB1_START 0x02048 /* 915+ only */ |
736 | #define PRB1_CTL 0x0204c /* 915+ only */ | 736 | #define PRB1_CTL 0x0204c /* 915+ only */ |
737 | #endif | 737 | #endif |
738 | #define IPEIR_I965 0x02064 | 738 | #define IPEIR_I965 0x02064 |
739 | #define IPEHR_I965 0x02068 | 739 | #define IPEHR_I965 0x02068 |
740 | #define INSTDONE_I965 0x0206c | 740 | #define INSTDONE_I965 0x0206c |
741 | #define GEN7_INSTDONE_1 0x0206c | 741 | #define GEN7_INSTDONE_1 0x0206c |
742 | #define GEN7_SC_INSTDONE 0x07100 | 742 | #define GEN7_SC_INSTDONE 0x07100 |
743 | #define GEN7_SAMPLER_INSTDONE 0x0e160 | 743 | #define GEN7_SAMPLER_INSTDONE 0x0e160 |
744 | #define GEN7_ROW_INSTDONE 0x0e164 | 744 | #define GEN7_ROW_INSTDONE 0x0e164 |
745 | #define I915_NUM_INSTDONE_REG 4 | 745 | #define I915_NUM_INSTDONE_REG 4 |
746 | #define RING_IPEIR(base) ((base)+0x64) | 746 | #define RING_IPEIR(base) ((base)+0x64) |
747 | #define RING_IPEHR(base) ((base)+0x68) | 747 | #define RING_IPEHR(base) ((base)+0x68) |
748 | #define RING_INSTDONE(base) ((base)+0x6c) | 748 | #define RING_INSTDONE(base) ((base)+0x6c) |
749 | #define RING_INSTPS(base) ((base)+0x70) | 749 | #define RING_INSTPS(base) ((base)+0x70) |
750 | #define RING_DMA_FADD(base) ((base)+0x78) | 750 | #define RING_DMA_FADD(base) ((base)+0x78) |
751 | #define RING_INSTPM(base) ((base)+0xc0) | 751 | #define RING_INSTPM(base) ((base)+0xc0) |
752 | #define RING_MI_MODE(base) ((base)+0x9c) | 752 | #define RING_MI_MODE(base) ((base)+0x9c) |
753 | #define INSTPS 0x02070 /* 965+ only */ | 753 | #define INSTPS 0x02070 /* 965+ only */ |
754 | #define INSTDONE1 0x0207c /* 965+ only */ | 754 | #define INSTDONE1 0x0207c /* 965+ only */ |
755 | #define ACTHD_I965 0x02074 | 755 | #define ACTHD_I965 0x02074 |
756 | #define HWS_PGA 0x02080 | 756 | #define HWS_PGA 0x02080 |
757 | #define HWS_ADDRESS_MASK 0xfffff000 | 757 | #define HWS_ADDRESS_MASK 0xfffff000 |
758 | #define HWS_START_ADDRESS_SHIFT 4 | 758 | #define HWS_START_ADDRESS_SHIFT 4 |
759 | #define PWRCTXA 0x2088 /* 965GM+ only */ | 759 | #define PWRCTXA 0x2088 /* 965GM+ only */ |
760 | #define PWRCTX_EN (1<<0) | 760 | #define PWRCTX_EN (1<<0) |
761 | #define IPEIR 0x02088 | 761 | #define IPEIR 0x02088 |
762 | #define IPEHR 0x0208c | 762 | #define IPEHR 0x0208c |
763 | #define INSTDONE 0x02090 | 763 | #define INSTDONE 0x02090 |
764 | #define NOPID 0x02094 | 764 | #define NOPID 0x02094 |
765 | #define HWSTAM 0x02098 | 765 | #define HWSTAM 0x02098 |
766 | #define DMA_FADD_I8XX 0x020d0 | 766 | #define DMA_FADD_I8XX 0x020d0 |
767 | #define RING_BBSTATE(base) ((base)+0x110) | 767 | #define RING_BBSTATE(base) ((base)+0x110) |
768 | #define RING_BBADDR(base) ((base)+0x140) | 768 | #define RING_BBADDR(base) ((base)+0x140) |
769 | #define RING_BBADDR_UDW(base) ((base)+0x168) /* gen8+ */ | 769 | #define RING_BBADDR_UDW(base) ((base)+0x168) /* gen8+ */ |
770 | 770 | ||
771 | #define ERROR_GEN6 0x040a0 | 771 | #define ERROR_GEN6 0x040a0 |
772 | #define GEN7_ERR_INT 0x44040 | 772 | #define GEN7_ERR_INT 0x44040 |
773 | #define ERR_INT_POISON (1<<31) | 773 | #define ERR_INT_POISON (1<<31) |
774 | #define ERR_INT_MMIO_UNCLAIMED (1<<13) | 774 | #define ERR_INT_MMIO_UNCLAIMED (1<<13) |
775 | #define ERR_INT_PIPE_CRC_DONE_C (1<<8) | 775 | #define ERR_INT_PIPE_CRC_DONE_C (1<<8) |
776 | #define ERR_INT_FIFO_UNDERRUN_C (1<<6) | 776 | #define ERR_INT_FIFO_UNDERRUN_C (1<<6) |
777 | #define ERR_INT_PIPE_CRC_DONE_B (1<<5) | 777 | #define ERR_INT_PIPE_CRC_DONE_B (1<<5) |
778 | #define ERR_INT_FIFO_UNDERRUN_B (1<<3) | 778 | #define ERR_INT_FIFO_UNDERRUN_B (1<<3) |
779 | #define ERR_INT_PIPE_CRC_DONE_A (1<<2) | 779 | #define ERR_INT_PIPE_CRC_DONE_A (1<<2) |
780 | #define ERR_INT_PIPE_CRC_DONE(pipe) (1<<(2 + pipe*3)) | 780 | #define ERR_INT_PIPE_CRC_DONE(pipe) (1<<(2 + pipe*3)) |
781 | #define ERR_INT_FIFO_UNDERRUN_A (1<<0) | 781 | #define ERR_INT_FIFO_UNDERRUN_A (1<<0) |
782 | #define ERR_INT_FIFO_UNDERRUN(pipe) (1<<(pipe*3)) | 782 | #define ERR_INT_FIFO_UNDERRUN(pipe) (1<<(pipe*3)) |
783 | 783 | ||
784 | #define FPGA_DBG 0x42300 | 784 | #define FPGA_DBG 0x42300 |
785 | #define FPGA_DBG_RM_NOCLAIM (1<<31) | 785 | #define FPGA_DBG_RM_NOCLAIM (1<<31) |
786 | 786 | ||
787 | #define DERRMR 0x44050 | 787 | #define DERRMR 0x44050 |
788 | /* Note that HBLANK events are reserved on bdw+ */ | 788 | /* Note that HBLANK events are reserved on bdw+ */ |
789 | #define DERRMR_PIPEA_SCANLINE (1<<0) | 789 | #define DERRMR_PIPEA_SCANLINE (1<<0) |
790 | #define DERRMR_PIPEA_PRI_FLIP_DONE (1<<1) | 790 | #define DERRMR_PIPEA_PRI_FLIP_DONE (1<<1) |
791 | #define DERRMR_PIPEA_SPR_FLIP_DONE (1<<2) | 791 | #define DERRMR_PIPEA_SPR_FLIP_DONE (1<<2) |
792 | #define DERRMR_PIPEA_VBLANK (1<<3) | 792 | #define DERRMR_PIPEA_VBLANK (1<<3) |
793 | #define DERRMR_PIPEA_HBLANK (1<<5) | 793 | #define DERRMR_PIPEA_HBLANK (1<<5) |
794 | #define DERRMR_PIPEB_SCANLINE (1<<8) | 794 | #define DERRMR_PIPEB_SCANLINE (1<<8) |
795 | #define DERRMR_PIPEB_PRI_FLIP_DONE (1<<9) | 795 | #define DERRMR_PIPEB_PRI_FLIP_DONE (1<<9) |
796 | #define DERRMR_PIPEB_SPR_FLIP_DONE (1<<10) | 796 | #define DERRMR_PIPEB_SPR_FLIP_DONE (1<<10) |
797 | #define DERRMR_PIPEB_VBLANK (1<<11) | 797 | #define DERRMR_PIPEB_VBLANK (1<<11) |
798 | #define DERRMR_PIPEB_HBLANK (1<<13) | 798 | #define DERRMR_PIPEB_HBLANK (1<<13) |
799 | /* Note that PIPEC is not a simple translation of PIPEA/PIPEB */ | 799 | /* Note that PIPEC is not a simple translation of PIPEA/PIPEB */ |
800 | #define DERRMR_PIPEC_SCANLINE (1<<14) | 800 | #define DERRMR_PIPEC_SCANLINE (1<<14) |
801 | #define DERRMR_PIPEC_PRI_FLIP_DONE (1<<15) | 801 | #define DERRMR_PIPEC_PRI_FLIP_DONE (1<<15) |
802 | #define DERRMR_PIPEC_SPR_FLIP_DONE (1<<20) | 802 | #define DERRMR_PIPEC_SPR_FLIP_DONE (1<<20) |
803 | #define DERRMR_PIPEC_VBLANK (1<<21) | 803 | #define DERRMR_PIPEC_VBLANK (1<<21) |
804 | #define DERRMR_PIPEC_HBLANK (1<<22) | 804 | #define DERRMR_PIPEC_HBLANK (1<<22) |
805 | 805 | ||
806 | 806 | ||
807 | /* GM45+ chicken bits -- debug workaround bits that may be required | 807 | /* GM45+ chicken bits -- debug workaround bits that may be required |
808 | * for various sorts of correct behavior. The top 16 bits of each are | 808 | * for various sorts of correct behavior. The top 16 bits of each are |
809 | * the enables for writing to the corresponding low bit. | 809 | * the enables for writing to the corresponding low bit. |
810 | */ | 810 | */ |
811 | #define _3D_CHICKEN 0x02084 | 811 | #define _3D_CHICKEN 0x02084 |
812 | #define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10) | 812 | #define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10) |
813 | #define _3D_CHICKEN2 0x0208c | 813 | #define _3D_CHICKEN2 0x0208c |
814 | /* Disables pipelining of read flushes past the SF-WIZ interface. | 814 | /* Disables pipelining of read flushes past the SF-WIZ interface. |
815 | * Required on all Ironlake steppings according to the B-Spec, but the | 815 | * Required on all Ironlake steppings according to the B-Spec, but the |
816 | * particular danger of not doing so is not specified. | 816 | * particular danger of not doing so is not specified. |
817 | */ | 817 | */ |
818 | # define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14) | 818 | # define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14) |
819 | #define _3D_CHICKEN3 0x02090 | 819 | #define _3D_CHICKEN3 0x02090 |
820 | #define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10) | 820 | #define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10) |
821 | #define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5) | 821 | #define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5) |
822 | #define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x)<<1) /* gen8+ */ | 822 | #define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x)<<1) /* gen8+ */ |
823 | #define _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH (1 << 1) /* gen6 */ | 823 | #define _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH (1 << 1) /* gen6 */ |
824 | 824 | ||
825 | #define MI_MODE 0x0209c | 825 | #define MI_MODE 0x0209c |
826 | # define VS_TIMER_DISPATCH (1 << 6) | 826 | # define VS_TIMER_DISPATCH (1 << 6) |
827 | # define MI_FLUSH_ENABLE (1 << 12) | 827 | # define MI_FLUSH_ENABLE (1 << 12) |
828 | # define ASYNC_FLIP_PERF_DISABLE (1 << 14) | 828 | # define ASYNC_FLIP_PERF_DISABLE (1 << 14) |
829 | # define MODE_IDLE (1 << 9) | 829 | # define MODE_IDLE (1 << 9) |
830 | # define STOP_RING (1 << 8) | ||
830 | 831 | ||
831 | #define GEN6_GT_MODE 0x20d0 | 832 | #define GEN6_GT_MODE 0x20d0 |
832 | #define GEN7_GT_MODE 0x7008 | 833 | #define GEN7_GT_MODE 0x7008 |
833 | #define GEN6_WIZ_HASHING(hi, lo) (((hi) << 9) | ((lo) << 7)) | 834 | #define GEN6_WIZ_HASHING(hi, lo) (((hi) << 9) | ((lo) << 7)) |
834 | #define GEN6_WIZ_HASHING_8x8 GEN6_WIZ_HASHING(0, 0) | 835 | #define GEN6_WIZ_HASHING_8x8 GEN6_WIZ_HASHING(0, 0) |
835 | #define GEN6_WIZ_HASHING_8x4 GEN6_WIZ_HASHING(0, 1) | 836 | #define GEN6_WIZ_HASHING_8x4 GEN6_WIZ_HASHING(0, 1) |
836 | #define GEN6_WIZ_HASHING_16x4 GEN6_WIZ_HASHING(1, 0) | 837 | #define GEN6_WIZ_HASHING_16x4 GEN6_WIZ_HASHING(1, 0) |
837 | #define GEN6_WIZ_HASHING_MASK (GEN6_WIZ_HASHING(1, 1) << 16) | 838 | #define GEN6_WIZ_HASHING_MASK (GEN6_WIZ_HASHING(1, 1) << 16) |
838 | #define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5) | 839 | #define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5) |
839 | 840 | ||
840 | #define GFX_MODE 0x02520 | 841 | #define GFX_MODE 0x02520 |
841 | #define GFX_MODE_GEN7 0x0229c | 842 | #define GFX_MODE_GEN7 0x0229c |
842 | #define RING_MODE_GEN7(ring) ((ring)->mmio_base+0x29c) | 843 | #define RING_MODE_GEN7(ring) ((ring)->mmio_base+0x29c) |
843 | #define GFX_RUN_LIST_ENABLE (1<<15) | 844 | #define GFX_RUN_LIST_ENABLE (1<<15) |
844 | #define GFX_TLB_INVALIDATE_ALWAYS (1<<13) | 845 | #define GFX_TLB_INVALIDATE_ALWAYS (1<<13) |
845 | #define GFX_SURFACE_FAULT_ENABLE (1<<12) | 846 | #define GFX_SURFACE_FAULT_ENABLE (1<<12) |
846 | #define GFX_REPLAY_MODE (1<<11) | 847 | #define GFX_REPLAY_MODE (1<<11) |
847 | #define GFX_PSMI_GRANULARITY (1<<10) | 848 | #define GFX_PSMI_GRANULARITY (1<<10) |
848 | #define GFX_PPGTT_ENABLE (1<<9) | 849 | #define GFX_PPGTT_ENABLE (1<<9) |
849 | 850 | ||
850 | #define VLV_DISPLAY_BASE 0x180000 | 851 | #define VLV_DISPLAY_BASE 0x180000 |
851 | 852 | ||
852 | #define SCPD0 0x0209c /* 915+ only */ | 853 | #define SCPD0 0x0209c /* 915+ only */ |
853 | #define IER 0x020a0 | 854 | #define IER 0x020a0 |
854 | #define IIR 0x020a4 | 855 | #define IIR 0x020a4 |
855 | #define IMR 0x020a8 | 856 | #define IMR 0x020a8 |
856 | #define ISR 0x020ac | 857 | #define ISR 0x020ac |
857 | #define VLV_GUNIT_CLOCK_GATE (VLV_DISPLAY_BASE + 0x2060) | 858 | #define VLV_GUNIT_CLOCK_GATE (VLV_DISPLAY_BASE + 0x2060) |
858 | #define GCFG_DIS (1<<8) | 859 | #define GCFG_DIS (1<<8) |
859 | #define VLV_IIR_RW (VLV_DISPLAY_BASE + 0x2084) | 860 | #define VLV_IIR_RW (VLV_DISPLAY_BASE + 0x2084) |
860 | #define VLV_IER (VLV_DISPLAY_BASE + 0x20a0) | 861 | #define VLV_IER (VLV_DISPLAY_BASE + 0x20a0) |
861 | #define VLV_IIR (VLV_DISPLAY_BASE + 0x20a4) | 862 | #define VLV_IIR (VLV_DISPLAY_BASE + 0x20a4) |
862 | #define VLV_IMR (VLV_DISPLAY_BASE + 0x20a8) | 863 | #define VLV_IMR (VLV_DISPLAY_BASE + 0x20a8) |
863 | #define VLV_ISR (VLV_DISPLAY_BASE + 0x20ac) | 864 | #define VLV_ISR (VLV_DISPLAY_BASE + 0x20ac) |
864 | #define VLV_PCBR (VLV_DISPLAY_BASE + 0x2120) | 865 | #define VLV_PCBR (VLV_DISPLAY_BASE + 0x2120) |
865 | #define DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */ | 866 | #define DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */ |
866 | #define EIR 0x020b0 | 867 | #define EIR 0x020b0 |
867 | #define EMR 0x020b4 | 868 | #define EMR 0x020b4 |
868 | #define ESR 0x020b8 | 869 | #define ESR 0x020b8 |
869 | #define GM45_ERROR_PAGE_TABLE (1<<5) | 870 | #define GM45_ERROR_PAGE_TABLE (1<<5) |
870 | #define GM45_ERROR_MEM_PRIV (1<<4) | 871 | #define GM45_ERROR_MEM_PRIV (1<<4) |
871 | #define I915_ERROR_PAGE_TABLE (1<<4) | 872 | #define I915_ERROR_PAGE_TABLE (1<<4) |
872 | #define GM45_ERROR_CP_PRIV (1<<3) | 873 | #define GM45_ERROR_CP_PRIV (1<<3) |
873 | #define I915_ERROR_MEMORY_REFRESH (1<<1) | 874 | #define I915_ERROR_MEMORY_REFRESH (1<<1) |
874 | #define I915_ERROR_INSTRUCTION (1<<0) | 875 | #define I915_ERROR_INSTRUCTION (1<<0) |
875 | #define INSTPM 0x020c0 | 876 | #define INSTPM 0x020c0 |
876 | #define INSTPM_SELF_EN (1<<12) /* 915GM only */ | 877 | #define INSTPM_SELF_EN (1<<12) /* 915GM only */ |
877 | #define INSTPM_AGPBUSY_DIS (1<<11) /* gen3: when disabled, pending interrupts | 878 | #define INSTPM_AGPBUSY_DIS (1<<11) /* gen3: when disabled, pending interrupts |
878 | will not assert AGPBUSY# and will only | 879 | will not assert AGPBUSY# and will only |
879 | be delivered when out of C3. */ | 880 | be delivered when out of C3. */ |
880 | #define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */ | 881 | #define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */ |
881 | #define INSTPM_TLB_INVALIDATE (1<<9) | 882 | #define INSTPM_TLB_INVALIDATE (1<<9) |
882 | #define INSTPM_SYNC_FLUSH (1<<5) | 883 | #define INSTPM_SYNC_FLUSH (1<<5) |
883 | #define ACTHD 0x020c8 | 884 | #define ACTHD 0x020c8 |
884 | #define FW_BLC 0x020d8 | 885 | #define FW_BLC 0x020d8 |
885 | #define FW_BLC2 0x020dc | 886 | #define FW_BLC2 0x020dc |
886 | #define FW_BLC_SELF 0x020e0 /* 915+ only */ | 887 | #define FW_BLC_SELF 0x020e0 /* 915+ only */ |
887 | #define FW_BLC_SELF_EN_MASK (1<<31) | 888 | #define FW_BLC_SELF_EN_MASK (1<<31) |
888 | #define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */ | 889 | #define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */ |
889 | #define FW_BLC_SELF_EN (1<<15) /* 945 only */ | 890 | #define FW_BLC_SELF_EN (1<<15) /* 945 only */ |
890 | #define MM_BURST_LENGTH 0x00700000 | 891 | #define MM_BURST_LENGTH 0x00700000 |
891 | #define MM_FIFO_WATERMARK 0x0001F000 | 892 | #define MM_FIFO_WATERMARK 0x0001F000 |
892 | #define LM_BURST_LENGTH 0x00000700 | 893 | #define LM_BURST_LENGTH 0x00000700 |
893 | #define LM_FIFO_WATERMARK 0x0000001F | 894 | #define LM_FIFO_WATERMARK 0x0000001F |
894 | #define MI_ARB_STATE 0x020e4 /* 915+ only */ | 895 | #define MI_ARB_STATE 0x020e4 /* 915+ only */ |
895 | 896 | ||
896 | /* Make render/texture TLB fetches lower priorty than associated data | 897 | /* Make render/texture TLB fetches lower priorty than associated data |
897 | * fetches. This is not turned on by default | 898 | * fetches. This is not turned on by default |
898 | */ | 899 | */ |
899 | #define MI_ARB_RENDER_TLB_LOW_PRIORITY (1 << 15) | 900 | #define MI_ARB_RENDER_TLB_LOW_PRIORITY (1 << 15) |
900 | 901 | ||
901 | /* Isoch request wait on GTT enable (Display A/B/C streams). | 902 | /* Isoch request wait on GTT enable (Display A/B/C streams). |
902 | * Make isoch requests stall on the TLB update. May cause | 903 | * Make isoch requests stall on the TLB update. May cause |
903 | * display underruns (test mode only) | 904 | * display underruns (test mode only) |
904 | */ | 905 | */ |
905 | #define MI_ARB_ISOCH_WAIT_GTT (1 << 14) | 906 | #define MI_ARB_ISOCH_WAIT_GTT (1 << 14) |
906 | 907 | ||
907 | /* Block grant count for isoch requests when block count is | 908 | /* Block grant count for isoch requests when block count is |
908 | * set to a finite value. | 909 | * set to a finite value. |
909 | */ | 910 | */ |
910 | #define MI_ARB_BLOCK_GRANT_MASK (3 << 12) | 911 | #define MI_ARB_BLOCK_GRANT_MASK (3 << 12) |
911 | #define MI_ARB_BLOCK_GRANT_8 (0 << 12) /* for 3 display planes */ | 912 | #define MI_ARB_BLOCK_GRANT_8 (0 << 12) /* for 3 display planes */ |
912 | #define MI_ARB_BLOCK_GRANT_4 (1 << 12) /* for 2 display planes */ | 913 | #define MI_ARB_BLOCK_GRANT_4 (1 << 12) /* for 2 display planes */ |
913 | #define MI_ARB_BLOCK_GRANT_2 (2 << 12) /* for 1 display plane */ | 914 | #define MI_ARB_BLOCK_GRANT_2 (2 << 12) /* for 1 display plane */ |
914 | #define MI_ARB_BLOCK_GRANT_0 (3 << 12) /* don't use */ | 915 | #define MI_ARB_BLOCK_GRANT_0 (3 << 12) /* don't use */ |
915 | 916 | ||
916 | /* Enable render writes to complete in C2/C3/C4 power states. | 917 | /* Enable render writes to complete in C2/C3/C4 power states. |
917 | * If this isn't enabled, render writes are prevented in low | 918 | * If this isn't enabled, render writes are prevented in low |
918 | * power states. That seems bad to me. | 919 | * power states. That seems bad to me. |
919 | */ | 920 | */ |
920 | #define MI_ARB_C3_LP_WRITE_ENABLE (1 << 11) | 921 | #define MI_ARB_C3_LP_WRITE_ENABLE (1 << 11) |
921 | 922 | ||
922 | /* This acknowledges an async flip immediately instead | 923 | /* This acknowledges an async flip immediately instead |
923 | * of waiting for 2TLB fetches. | 924 | * of waiting for 2TLB fetches. |
924 | */ | 925 | */ |
925 | #define MI_ARB_ASYNC_FLIP_ACK_IMMEDIATE (1 << 10) | 926 | #define MI_ARB_ASYNC_FLIP_ACK_IMMEDIATE (1 << 10) |
926 | 927 | ||
927 | /* Enables non-sequential data reads through arbiter | 928 | /* Enables non-sequential data reads through arbiter |
928 | */ | 929 | */ |
929 | #define MI_ARB_DUAL_DATA_PHASE_DISABLE (1 << 9) | 930 | #define MI_ARB_DUAL_DATA_PHASE_DISABLE (1 << 9) |
930 | 931 | ||
931 | /* Disable FSB snooping of cacheable write cycles from binner/render | 932 | /* Disable FSB snooping of cacheable write cycles from binner/render |
932 | * command stream | 933 | * command stream |
933 | */ | 934 | */ |
934 | #define MI_ARB_CACHE_SNOOP_DISABLE (1 << 8) | 935 | #define MI_ARB_CACHE_SNOOP_DISABLE (1 << 8) |
935 | 936 | ||
936 | /* Arbiter time slice for non-isoch streams */ | 937 | /* Arbiter time slice for non-isoch streams */ |
937 | #define MI_ARB_TIME_SLICE_MASK (7 << 5) | 938 | #define MI_ARB_TIME_SLICE_MASK (7 << 5) |
938 | #define MI_ARB_TIME_SLICE_1 (0 << 5) | 939 | #define MI_ARB_TIME_SLICE_1 (0 << 5) |
939 | #define MI_ARB_TIME_SLICE_2 (1 << 5) | 940 | #define MI_ARB_TIME_SLICE_2 (1 << 5) |
940 | #define MI_ARB_TIME_SLICE_4 (2 << 5) | 941 | #define MI_ARB_TIME_SLICE_4 (2 << 5) |
941 | #define MI_ARB_TIME_SLICE_6 (3 << 5) | 942 | #define MI_ARB_TIME_SLICE_6 (3 << 5) |
942 | #define MI_ARB_TIME_SLICE_8 (4 << 5) | 943 | #define MI_ARB_TIME_SLICE_8 (4 << 5) |
943 | #define MI_ARB_TIME_SLICE_10 (5 << 5) | 944 | #define MI_ARB_TIME_SLICE_10 (5 << 5) |
944 | #define MI_ARB_TIME_SLICE_14 (6 << 5) | 945 | #define MI_ARB_TIME_SLICE_14 (6 << 5) |
945 | #define MI_ARB_TIME_SLICE_16 (7 << 5) | 946 | #define MI_ARB_TIME_SLICE_16 (7 << 5) |
946 | 947 | ||
947 | /* Low priority grace period page size */ | 948 | /* Low priority grace period page size */ |
948 | #define MI_ARB_LOW_PRIORITY_GRACE_4KB (0 << 4) /* default */ | 949 | #define MI_ARB_LOW_PRIORITY_GRACE_4KB (0 << 4) /* default */ |
949 | #define MI_ARB_LOW_PRIORITY_GRACE_8KB (1 << 4) | 950 | #define MI_ARB_LOW_PRIORITY_GRACE_8KB (1 << 4) |
950 | 951 | ||
951 | /* Disable display A/B trickle feed */ | 952 | /* Disable display A/B trickle feed */ |
952 | #define MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE (1 << 2) | 953 | #define MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE (1 << 2) |
953 | 954 | ||
954 | /* Set display plane priority */ | 955 | /* Set display plane priority */ |
955 | #define MI_ARB_DISPLAY_PRIORITY_A_B (0 << 0) /* display A > display B */ | 956 | #define MI_ARB_DISPLAY_PRIORITY_A_B (0 << 0) /* display A > display B */ |
956 | #define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */ | 957 | #define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */ |
957 | 958 | ||
958 | #define CACHE_MODE_0 0x02120 /* 915+ only */ | 959 | #define CACHE_MODE_0 0x02120 /* 915+ only */ |
959 | #define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1<<8) | 960 | #define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1<<8) |
960 | #define CM0_IZ_OPT_DISABLE (1<<6) | 961 | #define CM0_IZ_OPT_DISABLE (1<<6) |
961 | #define CM0_ZR_OPT_DISABLE (1<<5) | 962 | #define CM0_ZR_OPT_DISABLE (1<<5) |
962 | #define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5) | 963 | #define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5) |
963 | #define CM0_DEPTH_EVICT_DISABLE (1<<4) | 964 | #define CM0_DEPTH_EVICT_DISABLE (1<<4) |
964 | #define CM0_COLOR_EVICT_DISABLE (1<<3) | 965 | #define CM0_COLOR_EVICT_DISABLE (1<<3) |
965 | #define CM0_DEPTH_WRITE_DISABLE (1<<1) | 966 | #define CM0_DEPTH_WRITE_DISABLE (1<<1) |
966 | #define CM0_RC_OP_FLUSH_DISABLE (1<<0) | 967 | #define CM0_RC_OP_FLUSH_DISABLE (1<<0) |
967 | #define GFX_FLSH_CNTL 0x02170 /* 915+ only */ | 968 | #define GFX_FLSH_CNTL 0x02170 /* 915+ only */ |
968 | #define GFX_FLSH_CNTL_GEN6 0x101008 | 969 | #define GFX_FLSH_CNTL_GEN6 0x101008 |
969 | #define GFX_FLSH_CNTL_EN (1<<0) | 970 | #define GFX_FLSH_CNTL_EN (1<<0) |
970 | #define ECOSKPD 0x021d0 | 971 | #define ECOSKPD 0x021d0 |
971 | #define ECO_GATING_CX_ONLY (1<<3) | 972 | #define ECO_GATING_CX_ONLY (1<<3) |
972 | #define ECO_FLIP_DONE (1<<0) | 973 | #define ECO_FLIP_DONE (1<<0) |
973 | 974 | ||
974 | #define CACHE_MODE_0_GEN7 0x7000 /* IVB+ */ | 975 | #define CACHE_MODE_0_GEN7 0x7000 /* IVB+ */ |
975 | #define HIZ_RAW_STALL_OPT_DISABLE (1<<2) | 976 | #define HIZ_RAW_STALL_OPT_DISABLE (1<<2) |
976 | #define CACHE_MODE_1 0x7004 /* IVB+ */ | 977 | #define CACHE_MODE_1 0x7004 /* IVB+ */ |
977 | #define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6) | 978 | #define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6) |
978 | #define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1<<6) | 979 | #define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1<<6) |
979 | 980 | ||
980 | #define GEN6_BLITTER_ECOSKPD 0x221d0 | 981 | #define GEN6_BLITTER_ECOSKPD 0x221d0 |
981 | #define GEN6_BLITTER_LOCK_SHIFT 16 | 982 | #define GEN6_BLITTER_LOCK_SHIFT 16 |
982 | #define GEN6_BLITTER_FBC_NOTIFY (1<<3) | 983 | #define GEN6_BLITTER_FBC_NOTIFY (1<<3) |
983 | 984 | ||
984 | #define GEN6_RC_SLEEP_PSMI_CONTROL 0x2050 | 985 | #define GEN6_RC_SLEEP_PSMI_CONTROL 0x2050 |
985 | #define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12) | 986 | #define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12) |
986 | 987 | ||
987 | #define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050 | 988 | #define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050 |
988 | #define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0) | 989 | #define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0) |
989 | #define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2) | 990 | #define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2) |
990 | #define GEN6_BSD_SLEEP_INDICATOR (1 << 3) | 991 | #define GEN6_BSD_SLEEP_INDICATOR (1 << 3) |
991 | #define GEN6_BSD_GO_INDICATOR (1 << 4) | 992 | #define GEN6_BSD_GO_INDICATOR (1 << 4) |
992 | 993 | ||
993 | /* On modern GEN architectures interrupt control consists of two sets | 994 | /* On modern GEN architectures interrupt control consists of two sets |
994 | * of registers. The first set pertains to the ring generating the | 995 | * of registers. The first set pertains to the ring generating the |
995 | * interrupt. The second control is for the functional block generating the | 996 | * interrupt. The second control is for the functional block generating the |
996 | * interrupt. These are PM, GT, DE, etc. | 997 | * interrupt. These are PM, GT, DE, etc. |
997 | * | 998 | * |
998 | * Luckily *knocks on wood* all the ring interrupt bits match up with the | 999 | * Luckily *knocks on wood* all the ring interrupt bits match up with the |
999 | * GT interrupt bits, so we don't need to duplicate the defines. | 1000 | * GT interrupt bits, so we don't need to duplicate the defines. |
1000 | * | 1001 | * |
1001 | * These defines should cover us well from SNB->HSW with minor exceptions | 1002 | * These defines should cover us well from SNB->HSW with minor exceptions |
1002 | * it can also work on ILK. | 1003 | * it can also work on ILK. |
1003 | */ | 1004 | */ |
1004 | #define GT_BLT_FLUSHDW_NOTIFY_INTERRUPT (1 << 26) | 1005 | #define GT_BLT_FLUSHDW_NOTIFY_INTERRUPT (1 << 26) |
1005 | #define GT_BLT_CS_ERROR_INTERRUPT (1 << 25) | 1006 | #define GT_BLT_CS_ERROR_INTERRUPT (1 << 25) |
1006 | #define GT_BLT_USER_INTERRUPT (1 << 22) | 1007 | #define GT_BLT_USER_INTERRUPT (1 << 22) |
1007 | #define GT_BSD_CS_ERROR_INTERRUPT (1 << 15) | 1008 | #define GT_BSD_CS_ERROR_INTERRUPT (1 << 15) |
1008 | #define GT_BSD_USER_INTERRUPT (1 << 12) | 1009 | #define GT_BSD_USER_INTERRUPT (1 << 12) |
1009 | #define GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 (1 << 11) /* hsw+; rsvd on snb, ivb, vlv */ | 1010 | #define GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 (1 << 11) /* hsw+; rsvd on snb, ivb, vlv */ |
1010 | #define GT_RENDER_L3_PARITY_ERROR_INTERRUPT (1 << 5) /* !snb */ | 1011 | #define GT_RENDER_L3_PARITY_ERROR_INTERRUPT (1 << 5) /* !snb */ |
1011 | #define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT (1 << 4) | 1012 | #define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT (1 << 4) |
1012 | #define GT_RENDER_CS_MASTER_ERROR_INTERRUPT (1 << 3) | 1013 | #define GT_RENDER_CS_MASTER_ERROR_INTERRUPT (1 << 3) |
1013 | #define GT_RENDER_SYNC_STATUS_INTERRUPT (1 << 2) | 1014 | #define GT_RENDER_SYNC_STATUS_INTERRUPT (1 << 2) |
1014 | #define GT_RENDER_DEBUG_INTERRUPT (1 << 1) | 1015 | #define GT_RENDER_DEBUG_INTERRUPT (1 << 1) |
1015 | #define GT_RENDER_USER_INTERRUPT (1 << 0) | 1016 | #define GT_RENDER_USER_INTERRUPT (1 << 0) |
1016 | 1017 | ||
1017 | #define PM_VEBOX_CS_ERROR_INTERRUPT (1 << 12) /* hsw+ */ | 1018 | #define PM_VEBOX_CS_ERROR_INTERRUPT (1 << 12) /* hsw+ */ |
1018 | #define PM_VEBOX_USER_INTERRUPT (1 << 10) /* hsw+ */ | 1019 | #define PM_VEBOX_USER_INTERRUPT (1 << 10) /* hsw+ */ |
1019 | 1020 | ||
1020 | #define GT_PARITY_ERROR(dev) \ | 1021 | #define GT_PARITY_ERROR(dev) \ |
1021 | (GT_RENDER_L3_PARITY_ERROR_INTERRUPT | \ | 1022 | (GT_RENDER_L3_PARITY_ERROR_INTERRUPT | \ |
1022 | (IS_HASWELL(dev) ? GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 : 0)) | 1023 | (IS_HASWELL(dev) ? GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 : 0)) |
1023 | 1024 | ||
1024 | /* These are all the "old" interrupts */ | 1025 | /* These are all the "old" interrupts */ |
1025 | #define ILK_BSD_USER_INTERRUPT (1<<5) | 1026 | #define ILK_BSD_USER_INTERRUPT (1<<5) |
1026 | #define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18) | 1027 | #define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18) |
1027 | #define I915_DISPLAY_PORT_INTERRUPT (1<<17) | 1028 | #define I915_DISPLAY_PORT_INTERRUPT (1<<17) |
1028 | #define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15) | 1029 | #define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15) |
1029 | #define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) /* p-state */ | 1030 | #define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) /* p-state */ |
1030 | #define I915_HWB_OOM_INTERRUPT (1<<13) | 1031 | #define I915_HWB_OOM_INTERRUPT (1<<13) |
1031 | #define I915_SYNC_STATUS_INTERRUPT (1<<12) | 1032 | #define I915_SYNC_STATUS_INTERRUPT (1<<12) |
1032 | #define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11) | 1033 | #define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11) |
1033 | #define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10) | 1034 | #define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10) |
1034 | #define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1<<9) | 1035 | #define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1<<9) |
1035 | #define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8) | 1036 | #define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8) |
1036 | #define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1<<7) | 1037 | #define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1<<7) |
1037 | #define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1<<6) | 1038 | #define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1<<6) |
1038 | #define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1<<5) | 1039 | #define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1<<5) |
1039 | #define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4) | 1040 | #define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4) |
1040 | #define I915_DEBUG_INTERRUPT (1<<2) | 1041 | #define I915_DEBUG_INTERRUPT (1<<2) |
1041 | #define I915_USER_INTERRUPT (1<<1) | 1042 | #define I915_USER_INTERRUPT (1<<1) |
1042 | #define I915_ASLE_INTERRUPT (1<<0) | 1043 | #define I915_ASLE_INTERRUPT (1<<0) |
1043 | #define I915_BSD_USER_INTERRUPT (1 << 25) | 1044 | #define I915_BSD_USER_INTERRUPT (1 << 25) |
1044 | 1045 | ||
1045 | #define GEN6_BSD_RNCID 0x12198 | 1046 | #define GEN6_BSD_RNCID 0x12198 |
1046 | 1047 | ||
1047 | #define GEN7_FF_THREAD_MODE 0x20a0 | 1048 | #define GEN7_FF_THREAD_MODE 0x20a0 |
1048 | #define GEN7_FF_SCHED_MASK 0x0077070 | 1049 | #define GEN7_FF_SCHED_MASK 0x0077070 |
1049 | #define GEN8_FF_DS_REF_CNT_FFME (1 << 19) | 1050 | #define GEN8_FF_DS_REF_CNT_FFME (1 << 19) |
1050 | #define GEN7_FF_TS_SCHED_HS1 (0x5<<16) | 1051 | #define GEN7_FF_TS_SCHED_HS1 (0x5<<16) |
1051 | #define GEN7_FF_TS_SCHED_HS0 (0x3<<16) | 1052 | #define GEN7_FF_TS_SCHED_HS0 (0x3<<16) |
1052 | #define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1<<16) | 1053 | #define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1<<16) |
1053 | #define GEN7_FF_TS_SCHED_HW (0x0<<16) /* Default */ | 1054 | #define GEN7_FF_TS_SCHED_HW (0x0<<16) /* Default */ |
1054 | #define GEN7_FF_VS_REF_CNT_FFME (1 << 15) | 1055 | #define GEN7_FF_VS_REF_CNT_FFME (1 << 15) |
1055 | #define GEN7_FF_VS_SCHED_HS1 (0x5<<12) | 1056 | #define GEN7_FF_VS_SCHED_HS1 (0x5<<12) |
1056 | #define GEN7_FF_VS_SCHED_HS0 (0x3<<12) | 1057 | #define GEN7_FF_VS_SCHED_HS0 (0x3<<12) |
1057 | #define GEN7_FF_VS_SCHED_LOAD_BALANCE (0x1<<12) /* Default */ | 1058 | #define GEN7_FF_VS_SCHED_LOAD_BALANCE (0x1<<12) /* Default */ |
1058 | #define GEN7_FF_VS_SCHED_HW (0x0<<12) | 1059 | #define GEN7_FF_VS_SCHED_HW (0x0<<12) |
1059 | #define GEN7_FF_DS_SCHED_HS1 (0x5<<4) | 1060 | #define GEN7_FF_DS_SCHED_HS1 (0x5<<4) |
1060 | #define GEN7_FF_DS_SCHED_HS0 (0x3<<4) | 1061 | #define GEN7_FF_DS_SCHED_HS0 (0x3<<4) |
1061 | #define GEN7_FF_DS_SCHED_LOAD_BALANCE (0x1<<4) /* Default */ | 1062 | #define GEN7_FF_DS_SCHED_LOAD_BALANCE (0x1<<4) /* Default */ |
1062 | #define GEN7_FF_DS_SCHED_HW (0x0<<4) | 1063 | #define GEN7_FF_DS_SCHED_HW (0x0<<4) |
1063 | 1064 | ||
1064 | /* | 1065 | /* |
1065 | * Framebuffer compression (915+ only) | 1066 | * Framebuffer compression (915+ only) |
1066 | */ | 1067 | */ |
1067 | 1068 | ||
1068 | #define FBC_CFB_BASE 0x03200 /* 4k page aligned */ | 1069 | #define FBC_CFB_BASE 0x03200 /* 4k page aligned */ |
1069 | #define FBC_LL_BASE 0x03204 /* 4k page aligned */ | 1070 | #define FBC_LL_BASE 0x03204 /* 4k page aligned */ |
1070 | #define FBC_CONTROL 0x03208 | 1071 | #define FBC_CONTROL 0x03208 |
1071 | #define FBC_CTL_EN (1<<31) | 1072 | #define FBC_CTL_EN (1<<31) |
1072 | #define FBC_CTL_PERIODIC (1<<30) | 1073 | #define FBC_CTL_PERIODIC (1<<30) |
1073 | #define FBC_CTL_INTERVAL_SHIFT (16) | 1074 | #define FBC_CTL_INTERVAL_SHIFT (16) |
1074 | #define FBC_CTL_UNCOMPRESSIBLE (1<<14) | 1075 | #define FBC_CTL_UNCOMPRESSIBLE (1<<14) |
1075 | #define FBC_CTL_C3_IDLE (1<<13) | 1076 | #define FBC_CTL_C3_IDLE (1<<13) |
1076 | #define FBC_CTL_STRIDE_SHIFT (5) | 1077 | #define FBC_CTL_STRIDE_SHIFT (5) |
1077 | #define FBC_CTL_FENCENO_SHIFT (0) | 1078 | #define FBC_CTL_FENCENO_SHIFT (0) |
1078 | #define FBC_COMMAND 0x0320c | 1079 | #define FBC_COMMAND 0x0320c |
1079 | #define FBC_CMD_COMPRESS (1<<0) | 1080 | #define FBC_CMD_COMPRESS (1<<0) |
1080 | #define FBC_STATUS 0x03210 | 1081 | #define FBC_STATUS 0x03210 |
1081 | #define FBC_STAT_COMPRESSING (1<<31) | 1082 | #define FBC_STAT_COMPRESSING (1<<31) |
1082 | #define FBC_STAT_COMPRESSED (1<<30) | 1083 | #define FBC_STAT_COMPRESSED (1<<30) |
1083 | #define FBC_STAT_MODIFIED (1<<29) | 1084 | #define FBC_STAT_MODIFIED (1<<29) |
1084 | #define FBC_STAT_CURRENT_LINE_SHIFT (0) | 1085 | #define FBC_STAT_CURRENT_LINE_SHIFT (0) |
1085 | #define FBC_CONTROL2 0x03214 | 1086 | #define FBC_CONTROL2 0x03214 |
1086 | #define FBC_CTL_FENCE_DBL (0<<4) | 1087 | #define FBC_CTL_FENCE_DBL (0<<4) |
1087 | #define FBC_CTL_IDLE_IMM (0<<2) | 1088 | #define FBC_CTL_IDLE_IMM (0<<2) |
1088 | #define FBC_CTL_IDLE_FULL (1<<2) | 1089 | #define FBC_CTL_IDLE_FULL (1<<2) |
1089 | #define FBC_CTL_IDLE_LINE (2<<2) | 1090 | #define FBC_CTL_IDLE_LINE (2<<2) |
1090 | #define FBC_CTL_IDLE_DEBUG (3<<2) | 1091 | #define FBC_CTL_IDLE_DEBUG (3<<2) |
1091 | #define FBC_CTL_CPU_FENCE (1<<1) | 1092 | #define FBC_CTL_CPU_FENCE (1<<1) |
1092 | #define FBC_CTL_PLANE(plane) ((plane)<<0) | 1093 | #define FBC_CTL_PLANE(plane) ((plane)<<0) |
1093 | #define FBC_FENCE_OFF 0x03218 /* BSpec typo has 321Bh */ | 1094 | #define FBC_FENCE_OFF 0x03218 /* BSpec typo has 321Bh */ |
1094 | #define FBC_TAG 0x03300 | 1095 | #define FBC_TAG 0x03300 |
1095 | 1096 | ||
1096 | #define FBC_LL_SIZE (1536) | 1097 | #define FBC_LL_SIZE (1536) |
1097 | 1098 | ||
1098 | /* Framebuffer compression for GM45+ */ | 1099 | /* Framebuffer compression for GM45+ */ |
1099 | #define DPFC_CB_BASE 0x3200 | 1100 | #define DPFC_CB_BASE 0x3200 |
1100 | #define DPFC_CONTROL 0x3208 | 1101 | #define DPFC_CONTROL 0x3208 |
1101 | #define DPFC_CTL_EN (1<<31) | 1102 | #define DPFC_CTL_EN (1<<31) |
1102 | #define DPFC_CTL_PLANE(plane) ((plane)<<30) | 1103 | #define DPFC_CTL_PLANE(plane) ((plane)<<30) |
1103 | #define IVB_DPFC_CTL_PLANE(plane) ((plane)<<29) | 1104 | #define IVB_DPFC_CTL_PLANE(plane) ((plane)<<29) |
1104 | #define DPFC_CTL_FENCE_EN (1<<29) | 1105 | #define DPFC_CTL_FENCE_EN (1<<29) |
1105 | #define IVB_DPFC_CTL_FENCE_EN (1<<28) | 1106 | #define IVB_DPFC_CTL_FENCE_EN (1<<28) |
1106 | #define DPFC_CTL_PERSISTENT_MODE (1<<25) | 1107 | #define DPFC_CTL_PERSISTENT_MODE (1<<25) |
1107 | #define DPFC_SR_EN (1<<10) | 1108 | #define DPFC_SR_EN (1<<10) |
1108 | #define DPFC_CTL_LIMIT_1X (0<<6) | 1109 | #define DPFC_CTL_LIMIT_1X (0<<6) |
1109 | #define DPFC_CTL_LIMIT_2X (1<<6) | 1110 | #define DPFC_CTL_LIMIT_2X (1<<6) |
1110 | #define DPFC_CTL_LIMIT_4X (2<<6) | 1111 | #define DPFC_CTL_LIMIT_4X (2<<6) |
1111 | #define DPFC_RECOMP_CTL 0x320c | 1112 | #define DPFC_RECOMP_CTL 0x320c |
1112 | #define DPFC_RECOMP_STALL_EN (1<<27) | 1113 | #define DPFC_RECOMP_STALL_EN (1<<27) |
1113 | #define DPFC_RECOMP_STALL_WM_SHIFT (16) | 1114 | #define DPFC_RECOMP_STALL_WM_SHIFT (16) |
1114 | #define DPFC_RECOMP_STALL_WM_MASK (0x07ff0000) | 1115 | #define DPFC_RECOMP_STALL_WM_MASK (0x07ff0000) |
1115 | #define DPFC_RECOMP_TIMER_COUNT_SHIFT (0) | 1116 | #define DPFC_RECOMP_TIMER_COUNT_SHIFT (0) |
1116 | #define DPFC_RECOMP_TIMER_COUNT_MASK (0x0000003f) | 1117 | #define DPFC_RECOMP_TIMER_COUNT_MASK (0x0000003f) |
1117 | #define DPFC_STATUS 0x3210 | 1118 | #define DPFC_STATUS 0x3210 |
1118 | #define DPFC_INVAL_SEG_SHIFT (16) | 1119 | #define DPFC_INVAL_SEG_SHIFT (16) |
1119 | #define DPFC_INVAL_SEG_MASK (0x07ff0000) | 1120 | #define DPFC_INVAL_SEG_MASK (0x07ff0000) |
1120 | #define DPFC_COMP_SEG_SHIFT (0) | 1121 | #define DPFC_COMP_SEG_SHIFT (0) |
1121 | #define DPFC_COMP_SEG_MASK (0x000003ff) | 1122 | #define DPFC_COMP_SEG_MASK (0x000003ff) |
1122 | #define DPFC_STATUS2 0x3214 | 1123 | #define DPFC_STATUS2 0x3214 |
1123 | #define DPFC_FENCE_YOFF 0x3218 | 1124 | #define DPFC_FENCE_YOFF 0x3218 |
1124 | #define DPFC_CHICKEN 0x3224 | 1125 | #define DPFC_CHICKEN 0x3224 |
1125 | #define DPFC_HT_MODIFY (1<<31) | 1126 | #define DPFC_HT_MODIFY (1<<31) |
1126 | 1127 | ||
1127 | /* Framebuffer compression for Ironlake */ | 1128 | /* Framebuffer compression for Ironlake */ |
1128 | #define ILK_DPFC_CB_BASE 0x43200 | 1129 | #define ILK_DPFC_CB_BASE 0x43200 |
1129 | #define ILK_DPFC_CONTROL 0x43208 | 1130 | #define ILK_DPFC_CONTROL 0x43208 |
1130 | /* The bit 28-8 is reserved */ | 1131 | /* The bit 28-8 is reserved */ |
1131 | #define DPFC_RESERVED (0x1FFFFF00) | 1132 | #define DPFC_RESERVED (0x1FFFFF00) |
1132 | #define ILK_DPFC_RECOMP_CTL 0x4320c | 1133 | #define ILK_DPFC_RECOMP_CTL 0x4320c |
1133 | #define ILK_DPFC_STATUS 0x43210 | 1134 | #define ILK_DPFC_STATUS 0x43210 |
1134 | #define ILK_DPFC_FENCE_YOFF 0x43218 | 1135 | #define ILK_DPFC_FENCE_YOFF 0x43218 |
1135 | #define ILK_DPFC_CHICKEN 0x43224 | 1136 | #define ILK_DPFC_CHICKEN 0x43224 |
1136 | #define ILK_FBC_RT_BASE 0x2128 | 1137 | #define ILK_FBC_RT_BASE 0x2128 |
1137 | #define ILK_FBC_RT_VALID (1<<0) | 1138 | #define ILK_FBC_RT_VALID (1<<0) |
1138 | #define SNB_FBC_FRONT_BUFFER (1<<1) | 1139 | #define SNB_FBC_FRONT_BUFFER (1<<1) |
1139 | 1140 | ||
1140 | #define ILK_DISPLAY_CHICKEN1 0x42000 | 1141 | #define ILK_DISPLAY_CHICKEN1 0x42000 |
1141 | #define ILK_FBCQ_DIS (1<<22) | 1142 | #define ILK_FBCQ_DIS (1<<22) |
1142 | #define ILK_PABSTRETCH_DIS (1<<21) | 1143 | #define ILK_PABSTRETCH_DIS (1<<21) |
1143 | 1144 | ||
1144 | 1145 | ||
1145 | /* | 1146 | /* |
1146 | * Framebuffer compression for Sandybridge | 1147 | * Framebuffer compression for Sandybridge |
1147 | * | 1148 | * |
1148 | * The following two registers are of type GTTMMADR | 1149 | * The following two registers are of type GTTMMADR |
1149 | */ | 1150 | */ |
1150 | #define SNB_DPFC_CTL_SA 0x100100 | 1151 | #define SNB_DPFC_CTL_SA 0x100100 |
1151 | #define SNB_CPU_FENCE_ENABLE (1<<29) | 1152 | #define SNB_CPU_FENCE_ENABLE (1<<29) |
1152 | #define DPFC_CPU_FENCE_OFFSET 0x100104 | 1153 | #define DPFC_CPU_FENCE_OFFSET 0x100104 |
1153 | 1154 | ||
1154 | /* Framebuffer compression for Ivybridge */ | 1155 | /* Framebuffer compression for Ivybridge */ |
1155 | #define IVB_FBC_RT_BASE 0x7020 | 1156 | #define IVB_FBC_RT_BASE 0x7020 |
1156 | 1157 | ||
1157 | #define IPS_CTL 0x43408 | 1158 | #define IPS_CTL 0x43408 |
1158 | #define IPS_ENABLE (1 << 31) | 1159 | #define IPS_ENABLE (1 << 31) |
1159 | 1160 | ||
1160 | #define MSG_FBC_REND_STATE 0x50380 | 1161 | #define MSG_FBC_REND_STATE 0x50380 |
1161 | #define FBC_REND_NUKE (1<<2) | 1162 | #define FBC_REND_NUKE (1<<2) |
1162 | #define FBC_REND_CACHE_CLEAN (1<<1) | 1163 | #define FBC_REND_CACHE_CLEAN (1<<1) |
1163 | 1164 | ||
1164 | /* | 1165 | /* |
1165 | * GPIO regs | 1166 | * GPIO regs |
1166 | */ | 1167 | */ |
1167 | #define GPIOA 0x5010 | 1168 | #define GPIOA 0x5010 |
1168 | #define GPIOB 0x5014 | 1169 | #define GPIOB 0x5014 |
1169 | #define GPIOC 0x5018 | 1170 | #define GPIOC 0x5018 |
1170 | #define GPIOD 0x501c | 1171 | #define GPIOD 0x501c |
1171 | #define GPIOE 0x5020 | 1172 | #define GPIOE 0x5020 |
1172 | #define GPIOF 0x5024 | 1173 | #define GPIOF 0x5024 |
1173 | #define GPIOG 0x5028 | 1174 | #define GPIOG 0x5028 |
1174 | #define GPIOH 0x502c | 1175 | #define GPIOH 0x502c |
1175 | # define GPIO_CLOCK_DIR_MASK (1 << 0) | 1176 | # define GPIO_CLOCK_DIR_MASK (1 << 0) |
1176 | # define GPIO_CLOCK_DIR_IN (0 << 1) | 1177 | # define GPIO_CLOCK_DIR_IN (0 << 1) |
1177 | # define GPIO_CLOCK_DIR_OUT (1 << 1) | 1178 | # define GPIO_CLOCK_DIR_OUT (1 << 1) |
1178 | # define GPIO_CLOCK_VAL_MASK (1 << 2) | 1179 | # define GPIO_CLOCK_VAL_MASK (1 << 2) |
1179 | # define GPIO_CLOCK_VAL_OUT (1 << 3) | 1180 | # define GPIO_CLOCK_VAL_OUT (1 << 3) |
1180 | # define GPIO_CLOCK_VAL_IN (1 << 4) | 1181 | # define GPIO_CLOCK_VAL_IN (1 << 4) |
1181 | # define GPIO_CLOCK_PULLUP_DISABLE (1 << 5) | 1182 | # define GPIO_CLOCK_PULLUP_DISABLE (1 << 5) |
1182 | # define GPIO_DATA_DIR_MASK (1 << 8) | 1183 | # define GPIO_DATA_DIR_MASK (1 << 8) |
1183 | # define GPIO_DATA_DIR_IN (0 << 9) | 1184 | # define GPIO_DATA_DIR_IN (0 << 9) |
1184 | # define GPIO_DATA_DIR_OUT (1 << 9) | 1185 | # define GPIO_DATA_DIR_OUT (1 << 9) |
1185 | # define GPIO_DATA_VAL_MASK (1 << 10) | 1186 | # define GPIO_DATA_VAL_MASK (1 << 10) |
1186 | # define GPIO_DATA_VAL_OUT (1 << 11) | 1187 | # define GPIO_DATA_VAL_OUT (1 << 11) |
1187 | # define GPIO_DATA_VAL_IN (1 << 12) | 1188 | # define GPIO_DATA_VAL_IN (1 << 12) |
1188 | # define GPIO_DATA_PULLUP_DISABLE (1 << 13) | 1189 | # define GPIO_DATA_PULLUP_DISABLE (1 << 13) |
1189 | 1190 | ||
1190 | #define GMBUS0 0x5100 /* clock/port select */ | 1191 | #define GMBUS0 0x5100 /* clock/port select */ |
1191 | #define GMBUS_RATE_100KHZ (0<<8) | 1192 | #define GMBUS_RATE_100KHZ (0<<8) |
1192 | #define GMBUS_RATE_50KHZ (1<<8) | 1193 | #define GMBUS_RATE_50KHZ (1<<8) |
1193 | #define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */ | 1194 | #define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */ |
1194 | #define GMBUS_RATE_1MHZ (3<<8) /* reserved on Pineview */ | 1195 | #define GMBUS_RATE_1MHZ (3<<8) /* reserved on Pineview */ |
1195 | #define GMBUS_HOLD_EXT (1<<7) /* 300ns hold time, rsvd on Pineview */ | 1196 | #define GMBUS_HOLD_EXT (1<<7) /* 300ns hold time, rsvd on Pineview */ |
1196 | #define GMBUS_PORT_DISABLED 0 | 1197 | #define GMBUS_PORT_DISABLED 0 |
1197 | #define GMBUS_PORT_SSC 1 | 1198 | #define GMBUS_PORT_SSC 1 |
1198 | #define GMBUS_PORT_VGADDC 2 | 1199 | #define GMBUS_PORT_VGADDC 2 |
1199 | #define GMBUS_PORT_PANEL 3 | 1200 | #define GMBUS_PORT_PANEL 3 |
1200 | #define GMBUS_PORT_DPC 4 /* HDMIC */ | 1201 | #define GMBUS_PORT_DPC 4 /* HDMIC */ |
1201 | #define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */ | 1202 | #define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */ |
1202 | #define GMBUS_PORT_DPD 6 /* HDMID */ | 1203 | #define GMBUS_PORT_DPD 6 /* HDMID */ |
1203 | #define GMBUS_PORT_RESERVED 7 /* 7 reserved */ | 1204 | #define GMBUS_PORT_RESERVED 7 /* 7 reserved */ |
1204 | #define GMBUS_NUM_PORTS (GMBUS_PORT_DPD - GMBUS_PORT_SSC + 1) | 1205 | #define GMBUS_NUM_PORTS (GMBUS_PORT_DPD - GMBUS_PORT_SSC + 1) |
1205 | #define GMBUS1 0x5104 /* command/status */ | 1206 | #define GMBUS1 0x5104 /* command/status */ |
1206 | #define GMBUS_SW_CLR_INT (1<<31) | 1207 | #define GMBUS_SW_CLR_INT (1<<31) |
1207 | #define GMBUS_SW_RDY (1<<30) | 1208 | #define GMBUS_SW_RDY (1<<30) |
1208 | #define GMBUS_ENT (1<<29) /* enable timeout */ | 1209 | #define GMBUS_ENT (1<<29) /* enable timeout */ |
1209 | #define GMBUS_CYCLE_NONE (0<<25) | 1210 | #define GMBUS_CYCLE_NONE (0<<25) |
1210 | #define GMBUS_CYCLE_WAIT (1<<25) | 1211 | #define GMBUS_CYCLE_WAIT (1<<25) |
1211 | #define GMBUS_CYCLE_INDEX (2<<25) | 1212 | #define GMBUS_CYCLE_INDEX (2<<25) |
1212 | #define GMBUS_CYCLE_STOP (4<<25) | 1213 | #define GMBUS_CYCLE_STOP (4<<25) |
1213 | #define GMBUS_BYTE_COUNT_SHIFT 16 | 1214 | #define GMBUS_BYTE_COUNT_SHIFT 16 |
1214 | #define GMBUS_SLAVE_INDEX_SHIFT 8 | 1215 | #define GMBUS_SLAVE_INDEX_SHIFT 8 |
1215 | #define GMBUS_SLAVE_ADDR_SHIFT 1 | 1216 | #define GMBUS_SLAVE_ADDR_SHIFT 1 |
1216 | #define GMBUS_SLAVE_READ (1<<0) | 1217 | #define GMBUS_SLAVE_READ (1<<0) |
1217 | #define GMBUS_SLAVE_WRITE (0<<0) | 1218 | #define GMBUS_SLAVE_WRITE (0<<0) |
1218 | #define GMBUS2 0x5108 /* status */ | 1219 | #define GMBUS2 0x5108 /* status */ |
1219 | #define GMBUS_INUSE (1<<15) | 1220 | #define GMBUS_INUSE (1<<15) |
1220 | #define GMBUS_HW_WAIT_PHASE (1<<14) | 1221 | #define GMBUS_HW_WAIT_PHASE (1<<14) |
1221 | #define GMBUS_STALL_TIMEOUT (1<<13) | 1222 | #define GMBUS_STALL_TIMEOUT (1<<13) |
1222 | #define GMBUS_INT (1<<12) | 1223 | #define GMBUS_INT (1<<12) |
1223 | #define GMBUS_HW_RDY (1<<11) | 1224 | #define GMBUS_HW_RDY (1<<11) |
1224 | #define GMBUS_SATOER (1<<10) | 1225 | #define GMBUS_SATOER (1<<10) |
1225 | #define GMBUS_ACTIVE (1<<9) | 1226 | #define GMBUS_ACTIVE (1<<9) |
1226 | #define GMBUS3 0x510c /* data buffer bytes 3-0 */ | 1227 | #define GMBUS3 0x510c /* data buffer bytes 3-0 */ |
1227 | #define GMBUS4 0x5110 /* interrupt mask (Pineview+) */ | 1228 | #define GMBUS4 0x5110 /* interrupt mask (Pineview+) */ |
1228 | #define GMBUS_SLAVE_TIMEOUT_EN (1<<4) | 1229 | #define GMBUS_SLAVE_TIMEOUT_EN (1<<4) |
1229 | #define GMBUS_NAK_EN (1<<3) | 1230 | #define GMBUS_NAK_EN (1<<3) |
1230 | #define GMBUS_IDLE_EN (1<<2) | 1231 | #define GMBUS_IDLE_EN (1<<2) |
1231 | #define GMBUS_HW_WAIT_EN (1<<1) | 1232 | #define GMBUS_HW_WAIT_EN (1<<1) |
1232 | #define GMBUS_HW_RDY_EN (1<<0) | 1233 | #define GMBUS_HW_RDY_EN (1<<0) |
1233 | #define GMBUS5 0x5120 /* byte index */ | 1234 | #define GMBUS5 0x5120 /* byte index */ |
1234 | #define GMBUS_2BYTE_INDEX_EN (1<<31) | 1235 | #define GMBUS_2BYTE_INDEX_EN (1<<31) |
1235 | 1236 | ||
1236 | /* | 1237 | /* |
1237 | * Clock control & power management | 1238 | * Clock control & power management |
1238 | */ | 1239 | */ |
1239 | #define DPLL_A_OFFSET 0x6014 | 1240 | #define DPLL_A_OFFSET 0x6014 |
1240 | #define DPLL_B_OFFSET 0x6018 | 1241 | #define DPLL_B_OFFSET 0x6018 |
1241 | #define DPLL(pipe) (dev_priv->info.dpll_offsets[pipe] + \ | 1242 | #define DPLL(pipe) (dev_priv->info.dpll_offsets[pipe] + \ |
1242 | dev_priv->info.display_mmio_offset) | 1243 | dev_priv->info.display_mmio_offset) |
1243 | 1244 | ||
1244 | #define VGA0 0x6000 | 1245 | #define VGA0 0x6000 |
1245 | #define VGA1 0x6004 | 1246 | #define VGA1 0x6004 |
1246 | #define VGA_PD 0x6010 | 1247 | #define VGA_PD 0x6010 |
1247 | #define VGA0_PD_P2_DIV_4 (1 << 7) | 1248 | #define VGA0_PD_P2_DIV_4 (1 << 7) |
1248 | #define VGA0_PD_P1_DIV_2 (1 << 5) | 1249 | #define VGA0_PD_P1_DIV_2 (1 << 5) |
1249 | #define VGA0_PD_P1_SHIFT 0 | 1250 | #define VGA0_PD_P1_SHIFT 0 |
1250 | #define VGA0_PD_P1_MASK (0x1f << 0) | 1251 | #define VGA0_PD_P1_MASK (0x1f << 0) |
1251 | #define VGA1_PD_P2_DIV_4 (1 << 15) | 1252 | #define VGA1_PD_P2_DIV_4 (1 << 15) |
1252 | #define VGA1_PD_P1_DIV_2 (1 << 13) | 1253 | #define VGA1_PD_P1_DIV_2 (1 << 13) |
1253 | #define VGA1_PD_P1_SHIFT 8 | 1254 | #define VGA1_PD_P1_SHIFT 8 |
1254 | #define VGA1_PD_P1_MASK (0x1f << 8) | 1255 | #define VGA1_PD_P1_MASK (0x1f << 8) |
1255 | #define DPLL_VCO_ENABLE (1 << 31) | 1256 | #define DPLL_VCO_ENABLE (1 << 31) |
1256 | #define DPLL_SDVO_HIGH_SPEED (1 << 30) | 1257 | #define DPLL_SDVO_HIGH_SPEED (1 << 30) |
1257 | #define DPLL_DVO_2X_MODE (1 << 30) | 1258 | #define DPLL_DVO_2X_MODE (1 << 30) |
1258 | #define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30) | 1259 | #define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30) |
1259 | #define DPLL_SYNCLOCK_ENABLE (1 << 29) | 1260 | #define DPLL_SYNCLOCK_ENABLE (1 << 29) |
1260 | #define DPLL_REFA_CLK_ENABLE_VLV (1 << 29) | 1261 | #define DPLL_REFA_CLK_ENABLE_VLV (1 << 29) |
1261 | #define DPLL_VGA_MODE_DIS (1 << 28) | 1262 | #define DPLL_VGA_MODE_DIS (1 << 28) |
1262 | #define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */ | 1263 | #define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */ |
1263 | #define DPLLB_MODE_LVDS (2 << 26) /* i915 */ | 1264 | #define DPLLB_MODE_LVDS (2 << 26) /* i915 */ |
1264 | #define DPLL_MODE_MASK (3 << 26) | 1265 | #define DPLL_MODE_MASK (3 << 26) |
1265 | #define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */ | 1266 | #define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */ |
1266 | #define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */ | 1267 | #define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */ |
1267 | #define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */ | 1268 | #define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */ |
1268 | #define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */ | 1269 | #define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */ |
1269 | #define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */ | 1270 | #define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */ |
1270 | #define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ | 1271 | #define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ |
1271 | #define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */ | 1272 | #define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */ |
1272 | #define DPLL_LOCK_VLV (1<<15) | 1273 | #define DPLL_LOCK_VLV (1<<15) |
1273 | #define DPLL_INTEGRATED_CRI_CLK_VLV (1<<14) | 1274 | #define DPLL_INTEGRATED_CRI_CLK_VLV (1<<14) |
1274 | #define DPLL_INTEGRATED_CLOCK_VLV (1<<13) | 1275 | #define DPLL_INTEGRATED_CLOCK_VLV (1<<13) |
1275 | #define DPLL_PORTC_READY_MASK (0xf << 4) | 1276 | #define DPLL_PORTC_READY_MASK (0xf << 4) |
1276 | #define DPLL_PORTB_READY_MASK (0xf) | 1277 | #define DPLL_PORTB_READY_MASK (0xf) |
1277 | 1278 | ||
1278 | #define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000 | 1279 | #define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000 |
1279 | /* | 1280 | /* |
1280 | * The i830 generation, in LVDS mode, defines P1 as the bit number set within | 1281 | * The i830 generation, in LVDS mode, defines P1 as the bit number set within |
1281 | * this field (only one bit may be set). | 1282 | * this field (only one bit may be set). |
1282 | */ | 1283 | */ |
1283 | #define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000 | 1284 | #define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000 |
1284 | #define DPLL_FPA01_P1_POST_DIV_SHIFT 16 | 1285 | #define DPLL_FPA01_P1_POST_DIV_SHIFT 16 |
1285 | #define DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW 15 | 1286 | #define DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW 15 |
1286 | /* i830, required in DVO non-gang */ | 1287 | /* i830, required in DVO non-gang */ |
1287 | #define PLL_P2_DIVIDE_BY_4 (1 << 23) | 1288 | #define PLL_P2_DIVIDE_BY_4 (1 << 23) |
1288 | #define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */ | 1289 | #define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */ |
1289 | #define PLL_REF_INPUT_DREFCLK (0 << 13) | 1290 | #define PLL_REF_INPUT_DREFCLK (0 << 13) |
1290 | #define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */ | 1291 | #define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */ |
1291 | #define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO TVCLKIN */ | 1292 | #define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO TVCLKIN */ |
1292 | #define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13) | 1293 | #define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13) |
1293 | #define PLL_REF_INPUT_MASK (3 << 13) | 1294 | #define PLL_REF_INPUT_MASK (3 << 13) |
1294 | #define PLL_LOAD_PULSE_PHASE_SHIFT 9 | 1295 | #define PLL_LOAD_PULSE_PHASE_SHIFT 9 |
1295 | /* Ironlake */ | 1296 | /* Ironlake */ |
1296 | # define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT 9 | 1297 | # define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT 9 |
1297 | # define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK (7 << 9) | 1298 | # define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK (7 << 9) |
1298 | # define PLL_REF_SDVO_HDMI_MULTIPLIER(x) (((x)-1) << 9) | 1299 | # define PLL_REF_SDVO_HDMI_MULTIPLIER(x) (((x)-1) << 9) |
1299 | # define DPLL_FPA1_P1_POST_DIV_SHIFT 0 | 1300 | # define DPLL_FPA1_P1_POST_DIV_SHIFT 0 |
1300 | # define DPLL_FPA1_P1_POST_DIV_MASK 0xff | 1301 | # define DPLL_FPA1_P1_POST_DIV_MASK 0xff |
1301 | 1302 | ||
1302 | /* | 1303 | /* |
1303 | * Parallel to Serial Load Pulse phase selection. | 1304 | * Parallel to Serial Load Pulse phase selection. |
1304 | * Selects the phase for the 10X DPLL clock for the PCIe | 1305 | * Selects the phase for the 10X DPLL clock for the PCIe |
1305 | * digital display port. The range is 4 to 13; 10 or more | 1306 | * digital display port. The range is 4 to 13; 10 or more |
1306 | * is just a flip delay. The default is 6 | 1307 | * is just a flip delay. The default is 6 |
1307 | */ | 1308 | */ |
1308 | #define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT) | 1309 | #define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT) |
1309 | #define DISPLAY_RATE_SELECT_FPA1 (1 << 8) | 1310 | #define DISPLAY_RATE_SELECT_FPA1 (1 << 8) |
1310 | /* | 1311 | /* |
1311 | * SDVO multiplier for 945G/GM. Not used on 965. | 1312 | * SDVO multiplier for 945G/GM. Not used on 965. |
1312 | */ | 1313 | */ |
1313 | #define SDVO_MULTIPLIER_MASK 0x000000ff | 1314 | #define SDVO_MULTIPLIER_MASK 0x000000ff |
1314 | #define SDVO_MULTIPLIER_SHIFT_HIRES 4 | 1315 | #define SDVO_MULTIPLIER_SHIFT_HIRES 4 |
1315 | #define SDVO_MULTIPLIER_SHIFT_VGA 0 | 1316 | #define SDVO_MULTIPLIER_SHIFT_VGA 0 |
1316 | 1317 | ||
1317 | #define DPLL_A_MD_OFFSET 0x601c /* 965+ only */ | 1318 | #define DPLL_A_MD_OFFSET 0x601c /* 965+ only */ |
1318 | #define DPLL_B_MD_OFFSET 0x6020 /* 965+ only */ | 1319 | #define DPLL_B_MD_OFFSET 0x6020 /* 965+ only */ |
1319 | #define DPLL_MD(pipe) (dev_priv->info.dpll_md_offsets[pipe] + \ | 1320 | #define DPLL_MD(pipe) (dev_priv->info.dpll_md_offsets[pipe] + \ |
1320 | dev_priv->info.display_mmio_offset) | 1321 | dev_priv->info.display_mmio_offset) |
1321 | 1322 | ||
1322 | /* | 1323 | /* |
1323 | * UDI pixel divider, controlling how many pixels are stuffed into a packet. | 1324 | * UDI pixel divider, controlling how many pixels are stuffed into a packet. |
1324 | * | 1325 | * |
1325 | * Value is pixels minus 1. Must be set to 1 pixel for SDVO. | 1326 | * Value is pixels minus 1. Must be set to 1 pixel for SDVO. |
1326 | */ | 1327 | */ |
1327 | #define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000 | 1328 | #define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000 |
1328 | #define DPLL_MD_UDI_DIVIDER_SHIFT 24 | 1329 | #define DPLL_MD_UDI_DIVIDER_SHIFT 24 |
1329 | /* UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */ | 1330 | /* UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */ |
1330 | #define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000 | 1331 | #define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000 |
1331 | #define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16 | 1332 | #define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16 |
1332 | /* | 1333 | /* |
1333 | * SDVO/UDI pixel multiplier. | 1334 | * SDVO/UDI pixel multiplier. |
1334 | * | 1335 | * |
1335 | * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus | 1336 | * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus |
1336 | * clock rate is 10 times the DPLL clock. At low resolution/refresh rate | 1337 | * clock rate is 10 times the DPLL clock. At low resolution/refresh rate |
1337 | * modes, the bus rate would be below the limits, so SDVO allows for stuffing | 1338 | * modes, the bus rate would be below the limits, so SDVO allows for stuffing |
1338 | * dummy bytes in the datastream at an increased clock rate, with both sides of | 1339 | * dummy bytes in the datastream at an increased clock rate, with both sides of |
1339 | * the link knowing how many bytes are fill. | 1340 | * the link knowing how many bytes are fill. |
1340 | * | 1341 | * |
1341 | * So, for a mode with a dotclock of 65Mhz, we would want to double the clock | 1342 | * So, for a mode with a dotclock of 65Mhz, we would want to double the clock |
1342 | * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be | 1343 | * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be |
1343 | * set to 130Mhz, and the SDVO multiplier set to 2x in this register and | 1344 | * set to 130Mhz, and the SDVO multiplier set to 2x in this register and |
1344 | * through an SDVO command. | 1345 | * through an SDVO command. |
1345 | * | 1346 | * |
1346 | * This register field has values of multiplication factor minus 1, with | 1347 | * This register field has values of multiplication factor minus 1, with |
1347 | * a maximum multiplier of 5 for SDVO. | 1348 | * a maximum multiplier of 5 for SDVO. |
1348 | */ | 1349 | */ |
1349 | #define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00 | 1350 | #define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00 |
1350 | #define DPLL_MD_UDI_MULTIPLIER_SHIFT 8 | 1351 | #define DPLL_MD_UDI_MULTIPLIER_SHIFT 8 |
1351 | /* | 1352 | /* |
1352 | * SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK. | 1353 | * SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK. |
1353 | * This best be set to the default value (3) or the CRT won't work. No, | 1354 | * This best be set to the default value (3) or the CRT won't work. No, |
1354 | * I don't entirely understand what this does... | 1355 | * I don't entirely understand what this does... |
1355 | */ | 1356 | */ |
1356 | #define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f | 1357 | #define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f |
1357 | #define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 | 1358 | #define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 |
1358 | 1359 | ||
1359 | #define _FPA0 0x06040 | 1360 | #define _FPA0 0x06040 |
1360 | #define _FPA1 0x06044 | 1361 | #define _FPA1 0x06044 |
1361 | #define _FPB0 0x06048 | 1362 | #define _FPB0 0x06048 |
1362 | #define _FPB1 0x0604c | 1363 | #define _FPB1 0x0604c |
1363 | #define FP0(pipe) _PIPE(pipe, _FPA0, _FPB0) | 1364 | #define FP0(pipe) _PIPE(pipe, _FPA0, _FPB0) |
1364 | #define FP1(pipe) _PIPE(pipe, _FPA1, _FPB1) | 1365 | #define FP1(pipe) _PIPE(pipe, _FPA1, _FPB1) |
1365 | #define FP_N_DIV_MASK 0x003f0000 | 1366 | #define FP_N_DIV_MASK 0x003f0000 |
1366 | #define FP_N_PINEVIEW_DIV_MASK 0x00ff0000 | 1367 | #define FP_N_PINEVIEW_DIV_MASK 0x00ff0000 |
1367 | #define FP_N_DIV_SHIFT 16 | 1368 | #define FP_N_DIV_SHIFT 16 |
1368 | #define FP_M1_DIV_MASK 0x00003f00 | 1369 | #define FP_M1_DIV_MASK 0x00003f00 |
1369 | #define FP_M1_DIV_SHIFT 8 | 1370 | #define FP_M1_DIV_SHIFT 8 |
1370 | #define FP_M2_DIV_MASK 0x0000003f | 1371 | #define FP_M2_DIV_MASK 0x0000003f |
1371 | #define FP_M2_PINEVIEW_DIV_MASK 0x000000ff | 1372 | #define FP_M2_PINEVIEW_DIV_MASK 0x000000ff |
1372 | #define FP_M2_DIV_SHIFT 0 | 1373 | #define FP_M2_DIV_SHIFT 0 |
1373 | #define DPLL_TEST 0x606c | 1374 | #define DPLL_TEST 0x606c |
1374 | #define DPLLB_TEST_SDVO_DIV_1 (0 << 22) | 1375 | #define DPLLB_TEST_SDVO_DIV_1 (0 << 22) |
1375 | #define DPLLB_TEST_SDVO_DIV_2 (1 << 22) | 1376 | #define DPLLB_TEST_SDVO_DIV_2 (1 << 22) |
1376 | #define DPLLB_TEST_SDVO_DIV_4 (2 << 22) | 1377 | #define DPLLB_TEST_SDVO_DIV_4 (2 << 22) |
1377 | #define DPLLB_TEST_SDVO_DIV_MASK (3 << 22) | 1378 | #define DPLLB_TEST_SDVO_DIV_MASK (3 << 22) |
1378 | #define DPLLB_TEST_N_BYPASS (1 << 19) | 1379 | #define DPLLB_TEST_N_BYPASS (1 << 19) |
1379 | #define DPLLB_TEST_M_BYPASS (1 << 18) | 1380 | #define DPLLB_TEST_M_BYPASS (1 << 18) |
1380 | #define DPLLB_INPUT_BUFFER_ENABLE (1 << 16) | 1381 | #define DPLLB_INPUT_BUFFER_ENABLE (1 << 16) |
1381 | #define DPLLA_TEST_N_BYPASS (1 << 3) | 1382 | #define DPLLA_TEST_N_BYPASS (1 << 3) |
1382 | #define DPLLA_TEST_M_BYPASS (1 << 2) | 1383 | #define DPLLA_TEST_M_BYPASS (1 << 2) |
1383 | #define DPLLA_INPUT_BUFFER_ENABLE (1 << 0) | 1384 | #define DPLLA_INPUT_BUFFER_ENABLE (1 << 0) |
1384 | #define D_STATE 0x6104 | 1385 | #define D_STATE 0x6104 |
1385 | #define DSTATE_GFX_RESET_I830 (1<<6) | 1386 | #define DSTATE_GFX_RESET_I830 (1<<6) |
1386 | #define DSTATE_PLL_D3_OFF (1<<3) | 1387 | #define DSTATE_PLL_D3_OFF (1<<3) |
1387 | #define DSTATE_GFX_CLOCK_GATING (1<<1) | 1388 | #define DSTATE_GFX_CLOCK_GATING (1<<1) |
1388 | #define DSTATE_DOT_CLOCK_GATING (1<<0) | 1389 | #define DSTATE_DOT_CLOCK_GATING (1<<0) |
1389 | #define DSPCLK_GATE_D (dev_priv->info.display_mmio_offset + 0x6200) | 1390 | #define DSPCLK_GATE_D (dev_priv->info.display_mmio_offset + 0x6200) |
1390 | # define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */ | 1391 | # define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */ |
1391 | # define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */ | 1392 | # define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */ |
1392 | # define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */ | 1393 | # define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */ |
1393 | # define VRDUNIT_CLOCK_GATE_DISABLE (1 << 27) /* 965 */ | 1394 | # define VRDUNIT_CLOCK_GATE_DISABLE (1 << 27) /* 965 */ |
1394 | # define AUDUNIT_CLOCK_GATE_DISABLE (1 << 26) /* 965 */ | 1395 | # define AUDUNIT_CLOCK_GATE_DISABLE (1 << 26) /* 965 */ |
1395 | # define DPUNIT_A_CLOCK_GATE_DISABLE (1 << 25) /* 965 */ | 1396 | # define DPUNIT_A_CLOCK_GATE_DISABLE (1 << 25) /* 965 */ |
1396 | # define DPCUNIT_CLOCK_GATE_DISABLE (1 << 24) /* 965 */ | 1397 | # define DPCUNIT_CLOCK_GATE_DISABLE (1 << 24) /* 965 */ |
1397 | # define TVRUNIT_CLOCK_GATE_DISABLE (1 << 23) /* 915-945 */ | 1398 | # define TVRUNIT_CLOCK_GATE_DISABLE (1 << 23) /* 915-945 */ |
1398 | # define TVCUNIT_CLOCK_GATE_DISABLE (1 << 22) /* 915-945 */ | 1399 | # define TVCUNIT_CLOCK_GATE_DISABLE (1 << 22) /* 915-945 */ |
1399 | # define TVFUNIT_CLOCK_GATE_DISABLE (1 << 21) /* 915-945 */ | 1400 | # define TVFUNIT_CLOCK_GATE_DISABLE (1 << 21) /* 915-945 */ |
1400 | # define TVEUNIT_CLOCK_GATE_DISABLE (1 << 20) /* 915-945 */ | 1401 | # define TVEUNIT_CLOCK_GATE_DISABLE (1 << 20) /* 915-945 */ |
1401 | # define DVSUNIT_CLOCK_GATE_DISABLE (1 << 19) /* 915-945 */ | 1402 | # define DVSUNIT_CLOCK_GATE_DISABLE (1 << 19) /* 915-945 */ |
1402 | # define DSSUNIT_CLOCK_GATE_DISABLE (1 << 18) /* 915-945 */ | 1403 | # define DSSUNIT_CLOCK_GATE_DISABLE (1 << 18) /* 915-945 */ |
1403 | # define DDBUNIT_CLOCK_GATE_DISABLE (1 << 17) /* 915-945 */ | 1404 | # define DDBUNIT_CLOCK_GATE_DISABLE (1 << 17) /* 915-945 */ |
1404 | # define DPRUNIT_CLOCK_GATE_DISABLE (1 << 16) /* 915-945 */ | 1405 | # define DPRUNIT_CLOCK_GATE_DISABLE (1 << 16) /* 915-945 */ |
1405 | # define DPFUNIT_CLOCK_GATE_DISABLE (1 << 15) /* 915-945 */ | 1406 | # define DPFUNIT_CLOCK_GATE_DISABLE (1 << 15) /* 915-945 */ |
1406 | # define DPBMUNIT_CLOCK_GATE_DISABLE (1 << 14) /* 915-945 */ | 1407 | # define DPBMUNIT_CLOCK_GATE_DISABLE (1 << 14) /* 915-945 */ |
1407 | # define DPLSUNIT_CLOCK_GATE_DISABLE (1 << 13) /* 915-945 */ | 1408 | # define DPLSUNIT_CLOCK_GATE_DISABLE (1 << 13) /* 915-945 */ |
1408 | # define DPLUNIT_CLOCK_GATE_DISABLE (1 << 12) /* 915-945 */ | 1409 | # define DPLUNIT_CLOCK_GATE_DISABLE (1 << 12) /* 915-945 */ |
1409 | # define DPOUNIT_CLOCK_GATE_DISABLE (1 << 11) | 1410 | # define DPOUNIT_CLOCK_GATE_DISABLE (1 << 11) |
1410 | # define DPBUNIT_CLOCK_GATE_DISABLE (1 << 10) | 1411 | # define DPBUNIT_CLOCK_GATE_DISABLE (1 << 10) |
1411 | # define DCUNIT_CLOCK_GATE_DISABLE (1 << 9) | 1412 | # define DCUNIT_CLOCK_GATE_DISABLE (1 << 9) |
1412 | # define DPUNIT_CLOCK_GATE_DISABLE (1 << 8) | 1413 | # define DPUNIT_CLOCK_GATE_DISABLE (1 << 8) |
1413 | # define VRUNIT_CLOCK_GATE_DISABLE (1 << 7) /* 915+: reserved */ | 1414 | # define VRUNIT_CLOCK_GATE_DISABLE (1 << 7) /* 915+: reserved */ |
1414 | # define OVHUNIT_CLOCK_GATE_DISABLE (1 << 6) /* 830-865 */ | 1415 | # define OVHUNIT_CLOCK_GATE_DISABLE (1 << 6) /* 830-865 */ |
1415 | # define DPIOUNIT_CLOCK_GATE_DISABLE (1 << 6) /* 915-945 */ | 1416 | # define DPIOUNIT_CLOCK_GATE_DISABLE (1 << 6) /* 915-945 */ |
1416 | # define OVFUNIT_CLOCK_GATE_DISABLE (1 << 5) | 1417 | # define OVFUNIT_CLOCK_GATE_DISABLE (1 << 5) |
1417 | # define OVBUNIT_CLOCK_GATE_DISABLE (1 << 4) | 1418 | # define OVBUNIT_CLOCK_GATE_DISABLE (1 << 4) |
1418 | /** | 1419 | /** |
1419 | * This bit must be set on the 830 to prevent hangs when turning off the | 1420 | * This bit must be set on the 830 to prevent hangs when turning off the |
1420 | * overlay scaler. | 1421 | * overlay scaler. |
1421 | */ | 1422 | */ |
1422 | # define OVRUNIT_CLOCK_GATE_DISABLE (1 << 3) | 1423 | # define OVRUNIT_CLOCK_GATE_DISABLE (1 << 3) |
1423 | # define OVCUNIT_CLOCK_GATE_DISABLE (1 << 2) | 1424 | # define OVCUNIT_CLOCK_GATE_DISABLE (1 << 2) |
1424 | # define OVUUNIT_CLOCK_GATE_DISABLE (1 << 1) | 1425 | # define OVUUNIT_CLOCK_GATE_DISABLE (1 << 1) |
1425 | # define ZVUNIT_CLOCK_GATE_DISABLE (1 << 0) /* 830 */ | 1426 | # define ZVUNIT_CLOCK_GATE_DISABLE (1 << 0) /* 830 */ |
1426 | # define OVLUNIT_CLOCK_GATE_DISABLE (1 << 0) /* 845,865 */ | 1427 | # define OVLUNIT_CLOCK_GATE_DISABLE (1 << 0) /* 845,865 */ |
1427 | 1428 | ||
1428 | #define RENCLK_GATE_D1 0x6204 | 1429 | #define RENCLK_GATE_D1 0x6204 |
1429 | # define BLITTER_CLOCK_GATE_DISABLE (1 << 13) /* 945GM only */ | 1430 | # define BLITTER_CLOCK_GATE_DISABLE (1 << 13) /* 945GM only */ |
1430 | # define MPEG_CLOCK_GATE_DISABLE (1 << 12) /* 945GM only */ | 1431 | # define MPEG_CLOCK_GATE_DISABLE (1 << 12) /* 945GM only */ |
1431 | # define PC_FE_CLOCK_GATE_DISABLE (1 << 11) | 1432 | # define PC_FE_CLOCK_GATE_DISABLE (1 << 11) |
1432 | # define PC_BE_CLOCK_GATE_DISABLE (1 << 10) | 1433 | # define PC_BE_CLOCK_GATE_DISABLE (1 << 10) |
1433 | # define WINDOWER_CLOCK_GATE_DISABLE (1 << 9) | 1434 | # define WINDOWER_CLOCK_GATE_DISABLE (1 << 9) |
1434 | # define INTERPOLATOR_CLOCK_GATE_DISABLE (1 << 8) | 1435 | # define INTERPOLATOR_CLOCK_GATE_DISABLE (1 << 8) |
1435 | # define COLOR_CALCULATOR_CLOCK_GATE_DISABLE (1 << 7) | 1436 | # define COLOR_CALCULATOR_CLOCK_GATE_DISABLE (1 << 7) |
1436 | # define MOTION_COMP_CLOCK_GATE_DISABLE (1 << 6) | 1437 | # define MOTION_COMP_CLOCK_GATE_DISABLE (1 << 6) |
1437 | # define MAG_CLOCK_GATE_DISABLE (1 << 5) | 1438 | # define MAG_CLOCK_GATE_DISABLE (1 << 5) |
1438 | /** This bit must be unset on 855,865 */ | 1439 | /** This bit must be unset on 855,865 */ |
1439 | # define MECI_CLOCK_GATE_DISABLE (1 << 4) | 1440 | # define MECI_CLOCK_GATE_DISABLE (1 << 4) |
1440 | # define DCMP_CLOCK_GATE_DISABLE (1 << 3) | 1441 | # define DCMP_CLOCK_GATE_DISABLE (1 << 3) |
1441 | # define MEC_CLOCK_GATE_DISABLE (1 << 2) | 1442 | # define MEC_CLOCK_GATE_DISABLE (1 << 2) |
1442 | # define MECO_CLOCK_GATE_DISABLE (1 << 1) | 1443 | # define MECO_CLOCK_GATE_DISABLE (1 << 1) |
1443 | /** This bit must be set on 855,865. */ | 1444 | /** This bit must be set on 855,865. */ |
1444 | # define SV_CLOCK_GATE_DISABLE (1 << 0) | 1445 | # define SV_CLOCK_GATE_DISABLE (1 << 0) |
1445 | # define I915_MPEG_CLOCK_GATE_DISABLE (1 << 16) | 1446 | # define I915_MPEG_CLOCK_GATE_DISABLE (1 << 16) |
1446 | # define I915_VLD_IP_PR_CLOCK_GATE_DISABLE (1 << 15) | 1447 | # define I915_VLD_IP_PR_CLOCK_GATE_DISABLE (1 << 15) |
1447 | # define I915_MOTION_COMP_CLOCK_GATE_DISABLE (1 << 14) | 1448 | # define I915_MOTION_COMP_CLOCK_GATE_DISABLE (1 << 14) |
1448 | # define I915_BD_BF_CLOCK_GATE_DISABLE (1 << 13) | 1449 | # define I915_BD_BF_CLOCK_GATE_DISABLE (1 << 13) |
1449 | # define I915_SF_SE_CLOCK_GATE_DISABLE (1 << 12) | 1450 | # define I915_SF_SE_CLOCK_GATE_DISABLE (1 << 12) |
1450 | # define I915_WM_CLOCK_GATE_DISABLE (1 << 11) | 1451 | # define I915_WM_CLOCK_GATE_DISABLE (1 << 11) |
1451 | # define I915_IZ_CLOCK_GATE_DISABLE (1 << 10) | 1452 | # define I915_IZ_CLOCK_GATE_DISABLE (1 << 10) |
1452 | # define I915_PI_CLOCK_GATE_DISABLE (1 << 9) | 1453 | # define I915_PI_CLOCK_GATE_DISABLE (1 << 9) |
1453 | # define I915_DI_CLOCK_GATE_DISABLE (1 << 8) | 1454 | # define I915_DI_CLOCK_GATE_DISABLE (1 << 8) |
1454 | # define I915_SH_SV_CLOCK_GATE_DISABLE (1 << 7) | 1455 | # define I915_SH_SV_CLOCK_GATE_DISABLE (1 << 7) |
1455 | # define I915_PL_DG_QC_FT_CLOCK_GATE_DISABLE (1 << 6) | 1456 | # define I915_PL_DG_QC_FT_CLOCK_GATE_DISABLE (1 << 6) |
1456 | # define I915_SC_CLOCK_GATE_DISABLE (1 << 5) | 1457 | # define I915_SC_CLOCK_GATE_DISABLE (1 << 5) |
1457 | # define I915_FL_CLOCK_GATE_DISABLE (1 << 4) | 1458 | # define I915_FL_CLOCK_GATE_DISABLE (1 << 4) |
1458 | # define I915_DM_CLOCK_GATE_DISABLE (1 << 3) | 1459 | # define I915_DM_CLOCK_GATE_DISABLE (1 << 3) |
1459 | # define I915_PS_CLOCK_GATE_DISABLE (1 << 2) | 1460 | # define I915_PS_CLOCK_GATE_DISABLE (1 << 2) |
1460 | # define I915_CC_CLOCK_GATE_DISABLE (1 << 1) | 1461 | # define I915_CC_CLOCK_GATE_DISABLE (1 << 1) |
1461 | # define I915_BY_CLOCK_GATE_DISABLE (1 << 0) | 1462 | # define I915_BY_CLOCK_GATE_DISABLE (1 << 0) |
1462 | 1463 | ||
1463 | # define I965_RCZ_CLOCK_GATE_DISABLE (1 << 30) | 1464 | # define I965_RCZ_CLOCK_GATE_DISABLE (1 << 30) |
1464 | /** This bit must always be set on 965G/965GM */ | 1465 | /** This bit must always be set on 965G/965GM */ |
1465 | # define I965_RCC_CLOCK_GATE_DISABLE (1 << 29) | 1466 | # define I965_RCC_CLOCK_GATE_DISABLE (1 << 29) |
1466 | # define I965_RCPB_CLOCK_GATE_DISABLE (1 << 28) | 1467 | # define I965_RCPB_CLOCK_GATE_DISABLE (1 << 28) |
1467 | # define I965_DAP_CLOCK_GATE_DISABLE (1 << 27) | 1468 | # define I965_DAP_CLOCK_GATE_DISABLE (1 << 27) |
1468 | # define I965_ROC_CLOCK_GATE_DISABLE (1 << 26) | 1469 | # define I965_ROC_CLOCK_GATE_DISABLE (1 << 26) |
1469 | # define I965_GW_CLOCK_GATE_DISABLE (1 << 25) | 1470 | # define I965_GW_CLOCK_GATE_DISABLE (1 << 25) |
1470 | # define I965_TD_CLOCK_GATE_DISABLE (1 << 24) | 1471 | # define I965_TD_CLOCK_GATE_DISABLE (1 << 24) |
1471 | /** This bit must always be set on 965G */ | 1472 | /** This bit must always be set on 965G */ |
1472 | # define I965_ISC_CLOCK_GATE_DISABLE (1 << 23) | 1473 | # define I965_ISC_CLOCK_GATE_DISABLE (1 << 23) |
1473 | # define I965_IC_CLOCK_GATE_DISABLE (1 << 22) | 1474 | # define I965_IC_CLOCK_GATE_DISABLE (1 << 22) |
1474 | # define I965_EU_CLOCK_GATE_DISABLE (1 << 21) | 1475 | # define I965_EU_CLOCK_GATE_DISABLE (1 << 21) |
1475 | # define I965_IF_CLOCK_GATE_DISABLE (1 << 20) | 1476 | # define I965_IF_CLOCK_GATE_DISABLE (1 << 20) |
1476 | # define I965_TC_CLOCK_GATE_DISABLE (1 << 19) | 1477 | # define I965_TC_CLOCK_GATE_DISABLE (1 << 19) |
1477 | # define I965_SO_CLOCK_GATE_DISABLE (1 << 17) | 1478 | # define I965_SO_CLOCK_GATE_DISABLE (1 << 17) |
1478 | # define I965_FBC_CLOCK_GATE_DISABLE (1 << 16) | 1479 | # define I965_FBC_CLOCK_GATE_DISABLE (1 << 16) |
1479 | # define I965_MARI_CLOCK_GATE_DISABLE (1 << 15) | 1480 | # define I965_MARI_CLOCK_GATE_DISABLE (1 << 15) |
1480 | # define I965_MASF_CLOCK_GATE_DISABLE (1 << 14) | 1481 | # define I965_MASF_CLOCK_GATE_DISABLE (1 << 14) |
1481 | # define I965_MAWB_CLOCK_GATE_DISABLE (1 << 13) | 1482 | # define I965_MAWB_CLOCK_GATE_DISABLE (1 << 13) |
1482 | # define I965_EM_CLOCK_GATE_DISABLE (1 << 12) | 1483 | # define I965_EM_CLOCK_GATE_DISABLE (1 << 12) |
1483 | # define I965_UC_CLOCK_GATE_DISABLE (1 << 11) | 1484 | # define I965_UC_CLOCK_GATE_DISABLE (1 << 11) |
1484 | # define I965_SI_CLOCK_GATE_DISABLE (1 << 6) | 1485 | # define I965_SI_CLOCK_GATE_DISABLE (1 << 6) |
1485 | # define I965_MT_CLOCK_GATE_DISABLE (1 << 5) | 1486 | # define I965_MT_CLOCK_GATE_DISABLE (1 << 5) |
1486 | # define I965_PL_CLOCK_GATE_DISABLE (1 << 4) | 1487 | # define I965_PL_CLOCK_GATE_DISABLE (1 << 4) |
1487 | # define I965_DG_CLOCK_GATE_DISABLE (1 << 3) | 1488 | # define I965_DG_CLOCK_GATE_DISABLE (1 << 3) |
1488 | # define I965_QC_CLOCK_GATE_DISABLE (1 << 2) | 1489 | # define I965_QC_CLOCK_GATE_DISABLE (1 << 2) |
1489 | # define I965_FT_CLOCK_GATE_DISABLE (1 << 1) | 1490 | # define I965_FT_CLOCK_GATE_DISABLE (1 << 1) |
1490 | # define I965_DM_CLOCK_GATE_DISABLE (1 << 0) | 1491 | # define I965_DM_CLOCK_GATE_DISABLE (1 << 0) |
1491 | 1492 | ||
1492 | #define RENCLK_GATE_D2 0x6208 | 1493 | #define RENCLK_GATE_D2 0x6208 |
1493 | #define VF_UNIT_CLOCK_GATE_DISABLE (1 << 9) | 1494 | #define VF_UNIT_CLOCK_GATE_DISABLE (1 << 9) |
1494 | #define GS_UNIT_CLOCK_GATE_DISABLE (1 << 7) | 1495 | #define GS_UNIT_CLOCK_GATE_DISABLE (1 << 7) |
1495 | #define CL_UNIT_CLOCK_GATE_DISABLE (1 << 6) | 1496 | #define CL_UNIT_CLOCK_GATE_DISABLE (1 << 6) |
1496 | #define RAMCLK_GATE_D 0x6210 /* CRL only */ | 1497 | #define RAMCLK_GATE_D 0x6210 /* CRL only */ |
1497 | #define DEUC 0x6214 /* CRL only */ | 1498 | #define DEUC 0x6214 /* CRL only */ |
1498 | 1499 | ||
1499 | #define FW_BLC_SELF_VLV (VLV_DISPLAY_BASE + 0x6500) | 1500 | #define FW_BLC_SELF_VLV (VLV_DISPLAY_BASE + 0x6500) |
1500 | #define FW_CSPWRDWNEN (1<<15) | 1501 | #define FW_CSPWRDWNEN (1<<15) |
1501 | 1502 | ||
1502 | #define MI_ARB_VLV (VLV_DISPLAY_BASE + 0x6504) | 1503 | #define MI_ARB_VLV (VLV_DISPLAY_BASE + 0x6504) |
1503 | 1504 | ||
1504 | #define CZCLK_CDCLK_FREQ_RATIO (VLV_DISPLAY_BASE + 0x6508) | 1505 | #define CZCLK_CDCLK_FREQ_RATIO (VLV_DISPLAY_BASE + 0x6508) |
1505 | #define CDCLK_FREQ_SHIFT 4 | 1506 | #define CDCLK_FREQ_SHIFT 4 |
1506 | #define CDCLK_FREQ_MASK (0x1f << CDCLK_FREQ_SHIFT) | 1507 | #define CDCLK_FREQ_MASK (0x1f << CDCLK_FREQ_SHIFT) |
1507 | #define CZCLK_FREQ_MASK 0xf | 1508 | #define CZCLK_FREQ_MASK 0xf |
1508 | #define GMBUSFREQ_VLV (VLV_DISPLAY_BASE + 0x6510) | 1509 | #define GMBUSFREQ_VLV (VLV_DISPLAY_BASE + 0x6510) |
1509 | 1510 | ||
1510 | /* | 1511 | /* |
1511 | * Palette regs | 1512 | * Palette regs |
1512 | */ | 1513 | */ |
1513 | #define PALETTE_A_OFFSET 0xa000 | 1514 | #define PALETTE_A_OFFSET 0xa000 |
1514 | #define PALETTE_B_OFFSET 0xa800 | 1515 | #define PALETTE_B_OFFSET 0xa800 |
1515 | #define PALETTE(pipe) (dev_priv->info.palette_offsets[pipe] + \ | 1516 | #define PALETTE(pipe) (dev_priv->info.palette_offsets[pipe] + \ |
1516 | dev_priv->info.display_mmio_offset) | 1517 | dev_priv->info.display_mmio_offset) |
1517 | 1518 | ||
1518 | /* MCH MMIO space */ | 1519 | /* MCH MMIO space */ |
1519 | 1520 | ||
1520 | /* | 1521 | /* |
1521 | * MCHBAR mirror. | 1522 | * MCHBAR mirror. |
1522 | * | 1523 | * |
1523 | * This mirrors the MCHBAR MMIO space whose location is determined by | 1524 | * This mirrors the MCHBAR MMIO space whose location is determined by |
1524 | * device 0 function 0's pci config register 0x44 or 0x48 and matches it in | 1525 | * device 0 function 0's pci config register 0x44 or 0x48 and matches it in |
1525 | * every way. It is not accessible from the CP register read instructions. | 1526 | * every way. It is not accessible from the CP register read instructions. |
1526 | * | 1527 | * |
1527 | * Starting from Haswell, you can't write registers using the MCHBAR mirror, | 1528 | * Starting from Haswell, you can't write registers using the MCHBAR mirror, |
1528 | * just read. | 1529 | * just read. |
1529 | */ | 1530 | */ |
1530 | #define MCHBAR_MIRROR_BASE 0x10000 | 1531 | #define MCHBAR_MIRROR_BASE 0x10000 |
1531 | 1532 | ||
1532 | #define MCHBAR_MIRROR_BASE_SNB 0x140000 | 1533 | #define MCHBAR_MIRROR_BASE_SNB 0x140000 |
1533 | 1534 | ||
1534 | /* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */ | 1535 | /* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */ |
1535 | #define DCLK (MCHBAR_MIRROR_BASE_SNB + 0x5e04) | 1536 | #define DCLK (MCHBAR_MIRROR_BASE_SNB + 0x5e04) |
1536 | 1537 | ||
1537 | /** 915-945 and GM965 MCH register controlling DRAM channel access */ | 1538 | /** 915-945 and GM965 MCH register controlling DRAM channel access */ |
1538 | #define DCC 0x10200 | 1539 | #define DCC 0x10200 |
1539 | #define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0) | 1540 | #define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0) |
1540 | #define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0) | 1541 | #define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0) |
1541 | #define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0) | 1542 | #define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0) |
1542 | #define DCC_ADDRESSING_MODE_MASK (3 << 0) | 1543 | #define DCC_ADDRESSING_MODE_MASK (3 << 0) |
1543 | #define DCC_CHANNEL_XOR_DISABLE (1 << 10) | 1544 | #define DCC_CHANNEL_XOR_DISABLE (1 << 10) |
1544 | #define DCC_CHANNEL_XOR_BIT_17 (1 << 9) | 1545 | #define DCC_CHANNEL_XOR_BIT_17 (1 << 9) |
1545 | 1546 | ||
1546 | /** Pineview MCH register contains DDR3 setting */ | 1547 | /** Pineview MCH register contains DDR3 setting */ |
1547 | #define CSHRDDR3CTL 0x101a8 | 1548 | #define CSHRDDR3CTL 0x101a8 |
1548 | #define CSHRDDR3CTL_DDR3 (1 << 2) | 1549 | #define CSHRDDR3CTL_DDR3 (1 << 2) |
1549 | 1550 | ||
1550 | /** 965 MCH register controlling DRAM channel configuration */ | 1551 | /** 965 MCH register controlling DRAM channel configuration */ |
1551 | #define C0DRB3 0x10206 | 1552 | #define C0DRB3 0x10206 |
1552 | #define C1DRB3 0x10606 | 1553 | #define C1DRB3 0x10606 |
1553 | 1554 | ||
1554 | /** snb MCH registers for reading the DRAM channel configuration */ | 1555 | /** snb MCH registers for reading the DRAM channel configuration */ |
1555 | #define MAD_DIMM_C0 (MCHBAR_MIRROR_BASE_SNB + 0x5004) | 1556 | #define MAD_DIMM_C0 (MCHBAR_MIRROR_BASE_SNB + 0x5004) |
1556 | #define MAD_DIMM_C1 (MCHBAR_MIRROR_BASE_SNB + 0x5008) | 1557 | #define MAD_DIMM_C1 (MCHBAR_MIRROR_BASE_SNB + 0x5008) |
1557 | #define MAD_DIMM_C2 (MCHBAR_MIRROR_BASE_SNB + 0x500C) | 1558 | #define MAD_DIMM_C2 (MCHBAR_MIRROR_BASE_SNB + 0x500C) |
1558 | #define MAD_DIMM_ECC_MASK (0x3 << 24) | 1559 | #define MAD_DIMM_ECC_MASK (0x3 << 24) |
1559 | #define MAD_DIMM_ECC_OFF (0x0 << 24) | 1560 | #define MAD_DIMM_ECC_OFF (0x0 << 24) |
1560 | #define MAD_DIMM_ECC_IO_ON_LOGIC_OFF (0x1 << 24) | 1561 | #define MAD_DIMM_ECC_IO_ON_LOGIC_OFF (0x1 << 24) |
1561 | #define MAD_DIMM_ECC_IO_OFF_LOGIC_ON (0x2 << 24) | 1562 | #define MAD_DIMM_ECC_IO_OFF_LOGIC_ON (0x2 << 24) |
1562 | #define MAD_DIMM_ECC_ON (0x3 << 24) | 1563 | #define MAD_DIMM_ECC_ON (0x3 << 24) |
1563 | #define MAD_DIMM_ENH_INTERLEAVE (0x1 << 22) | 1564 | #define MAD_DIMM_ENH_INTERLEAVE (0x1 << 22) |
1564 | #define MAD_DIMM_RANK_INTERLEAVE (0x1 << 21) | 1565 | #define MAD_DIMM_RANK_INTERLEAVE (0x1 << 21) |
1565 | #define MAD_DIMM_B_WIDTH_X16 (0x1 << 20) /* X8 chips if unset */ | 1566 | #define MAD_DIMM_B_WIDTH_X16 (0x1 << 20) /* X8 chips if unset */ |
1566 | #define MAD_DIMM_A_WIDTH_X16 (0x1 << 19) /* X8 chips if unset */ | 1567 | #define MAD_DIMM_A_WIDTH_X16 (0x1 << 19) /* X8 chips if unset */ |
1567 | #define MAD_DIMM_B_DUAL_RANK (0x1 << 18) | 1568 | #define MAD_DIMM_B_DUAL_RANK (0x1 << 18) |
1568 | #define MAD_DIMM_A_DUAL_RANK (0x1 << 17) | 1569 | #define MAD_DIMM_A_DUAL_RANK (0x1 << 17) |
1569 | #define MAD_DIMM_A_SELECT (0x1 << 16) | 1570 | #define MAD_DIMM_A_SELECT (0x1 << 16) |
1570 | /* DIMM sizes are in multiples of 256mb. */ | 1571 | /* DIMM sizes are in multiples of 256mb. */ |
1571 | #define MAD_DIMM_B_SIZE_SHIFT 8 | 1572 | #define MAD_DIMM_B_SIZE_SHIFT 8 |
1572 | #define MAD_DIMM_B_SIZE_MASK (0xff << MAD_DIMM_B_SIZE_SHIFT) | 1573 | #define MAD_DIMM_B_SIZE_MASK (0xff << MAD_DIMM_B_SIZE_SHIFT) |
1573 | #define MAD_DIMM_A_SIZE_SHIFT 0 | 1574 | #define MAD_DIMM_A_SIZE_SHIFT 0 |
1574 | #define MAD_DIMM_A_SIZE_MASK (0xff << MAD_DIMM_A_SIZE_SHIFT) | 1575 | #define MAD_DIMM_A_SIZE_MASK (0xff << MAD_DIMM_A_SIZE_SHIFT) |
1575 | 1576 | ||
1576 | /** snb MCH registers for priority tuning */ | 1577 | /** snb MCH registers for priority tuning */ |
1577 | #define MCH_SSKPD (MCHBAR_MIRROR_BASE_SNB + 0x5d10) | 1578 | #define MCH_SSKPD (MCHBAR_MIRROR_BASE_SNB + 0x5d10) |
1578 | #define MCH_SSKPD_WM0_MASK 0x3f | 1579 | #define MCH_SSKPD_WM0_MASK 0x3f |
1579 | #define MCH_SSKPD_WM0_VAL 0xc | 1580 | #define MCH_SSKPD_WM0_VAL 0xc |
1580 | 1581 | ||
1581 | #define MCH_SECP_NRG_STTS (MCHBAR_MIRROR_BASE_SNB + 0x592c) | 1582 | #define MCH_SECP_NRG_STTS (MCHBAR_MIRROR_BASE_SNB + 0x592c) |
1582 | 1583 | ||
1583 | /* Clocking configuration register */ | 1584 | /* Clocking configuration register */ |
1584 | #define CLKCFG 0x10c00 | 1585 | #define CLKCFG 0x10c00 |
1585 | #define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */ | 1586 | #define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */ |
1586 | #define CLKCFG_FSB_533 (1 << 0) /* hrawclk 133 */ | 1587 | #define CLKCFG_FSB_533 (1 << 0) /* hrawclk 133 */ |
1587 | #define CLKCFG_FSB_667 (3 << 0) /* hrawclk 166 */ | 1588 | #define CLKCFG_FSB_667 (3 << 0) /* hrawclk 166 */ |
1588 | #define CLKCFG_FSB_800 (2 << 0) /* hrawclk 200 */ | 1589 | #define CLKCFG_FSB_800 (2 << 0) /* hrawclk 200 */ |
1589 | #define CLKCFG_FSB_1067 (6 << 0) /* hrawclk 266 */ | 1590 | #define CLKCFG_FSB_1067 (6 << 0) /* hrawclk 266 */ |
1590 | #define CLKCFG_FSB_1333 (7 << 0) /* hrawclk 333 */ | 1591 | #define CLKCFG_FSB_1333 (7 << 0) /* hrawclk 333 */ |
1591 | /* Note, below two are guess */ | 1592 | /* Note, below two are guess */ |
1592 | #define CLKCFG_FSB_1600 (4 << 0) /* hrawclk 400 */ | 1593 | #define CLKCFG_FSB_1600 (4 << 0) /* hrawclk 400 */ |
1593 | #define CLKCFG_FSB_1600_ALT (0 << 0) /* hrawclk 400 */ | 1594 | #define CLKCFG_FSB_1600_ALT (0 << 0) /* hrawclk 400 */ |
1594 | #define CLKCFG_FSB_MASK (7 << 0) | 1595 | #define CLKCFG_FSB_MASK (7 << 0) |
1595 | #define CLKCFG_MEM_533 (1 << 4) | 1596 | #define CLKCFG_MEM_533 (1 << 4) |
1596 | #define CLKCFG_MEM_667 (2 << 4) | 1597 | #define CLKCFG_MEM_667 (2 << 4) |
1597 | #define CLKCFG_MEM_800 (3 << 4) | 1598 | #define CLKCFG_MEM_800 (3 << 4) |
1598 | #define CLKCFG_MEM_MASK (7 << 4) | 1599 | #define CLKCFG_MEM_MASK (7 << 4) |
1599 | 1600 | ||
1600 | #define TSC1 0x11001 | 1601 | #define TSC1 0x11001 |
1601 | #define TSE (1<<0) | 1602 | #define TSE (1<<0) |
1602 | #define TR1 0x11006 | 1603 | #define TR1 0x11006 |
1603 | #define TSFS 0x11020 | 1604 | #define TSFS 0x11020 |
1604 | #define TSFS_SLOPE_MASK 0x0000ff00 | 1605 | #define TSFS_SLOPE_MASK 0x0000ff00 |
1605 | #define TSFS_SLOPE_SHIFT 8 | 1606 | #define TSFS_SLOPE_SHIFT 8 |
1606 | #define TSFS_INTR_MASK 0x000000ff | 1607 | #define TSFS_INTR_MASK 0x000000ff |
1607 | 1608 | ||
1608 | #define CRSTANDVID 0x11100 | 1609 | #define CRSTANDVID 0x11100 |
1609 | #define PXVFREQ_BASE 0x11110 /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */ | 1610 | #define PXVFREQ_BASE 0x11110 /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */ |
1610 | #define PXVFREQ_PX_MASK 0x7f000000 | 1611 | #define PXVFREQ_PX_MASK 0x7f000000 |
1611 | #define PXVFREQ_PX_SHIFT 24 | 1612 | #define PXVFREQ_PX_SHIFT 24 |
1612 | #define VIDFREQ_BASE 0x11110 | 1613 | #define VIDFREQ_BASE 0x11110 |
1613 | #define VIDFREQ1 0x11110 /* VIDFREQ1-4 (0x1111c) (Cantiga) */ | 1614 | #define VIDFREQ1 0x11110 /* VIDFREQ1-4 (0x1111c) (Cantiga) */ |
1614 | #define VIDFREQ2 0x11114 | 1615 | #define VIDFREQ2 0x11114 |
1615 | #define VIDFREQ3 0x11118 | 1616 | #define VIDFREQ3 0x11118 |
1616 | #define VIDFREQ4 0x1111c | 1617 | #define VIDFREQ4 0x1111c |
1617 | #define VIDFREQ_P0_MASK 0x1f000000 | 1618 | #define VIDFREQ_P0_MASK 0x1f000000 |
1618 | #define VIDFREQ_P0_SHIFT 24 | 1619 | #define VIDFREQ_P0_SHIFT 24 |
1619 | #define VIDFREQ_P0_CSCLK_MASK 0x00f00000 | 1620 | #define VIDFREQ_P0_CSCLK_MASK 0x00f00000 |
1620 | #define VIDFREQ_P0_CSCLK_SHIFT 20 | 1621 | #define VIDFREQ_P0_CSCLK_SHIFT 20 |
1621 | #define VIDFREQ_P0_CRCLK_MASK 0x000f0000 | 1622 | #define VIDFREQ_P0_CRCLK_MASK 0x000f0000 |
1622 | #define VIDFREQ_P0_CRCLK_SHIFT 16 | 1623 | #define VIDFREQ_P0_CRCLK_SHIFT 16 |
1623 | #define VIDFREQ_P1_MASK 0x00001f00 | 1624 | #define VIDFREQ_P1_MASK 0x00001f00 |
1624 | #define VIDFREQ_P1_SHIFT 8 | 1625 | #define VIDFREQ_P1_SHIFT 8 |
1625 | #define VIDFREQ_P1_CSCLK_MASK 0x000000f0 | 1626 | #define VIDFREQ_P1_CSCLK_MASK 0x000000f0 |
1626 | #define VIDFREQ_P1_CSCLK_SHIFT 4 | 1627 | #define VIDFREQ_P1_CSCLK_SHIFT 4 |
1627 | #define VIDFREQ_P1_CRCLK_MASK 0x0000000f | 1628 | #define VIDFREQ_P1_CRCLK_MASK 0x0000000f |
1628 | #define INTTOEXT_BASE_ILK 0x11300 | 1629 | #define INTTOEXT_BASE_ILK 0x11300 |
1629 | #define INTTOEXT_BASE 0x11120 /* INTTOEXT1-8 (0x1113c) */ | 1630 | #define INTTOEXT_BASE 0x11120 /* INTTOEXT1-8 (0x1113c) */ |
1630 | #define INTTOEXT_MAP3_SHIFT 24 | 1631 | #define INTTOEXT_MAP3_SHIFT 24 |
1631 | #define INTTOEXT_MAP3_MASK (0x1f << INTTOEXT_MAP3_SHIFT) | 1632 | #define INTTOEXT_MAP3_MASK (0x1f << INTTOEXT_MAP3_SHIFT) |
1632 | #define INTTOEXT_MAP2_SHIFT 16 | 1633 | #define INTTOEXT_MAP2_SHIFT 16 |
1633 | #define INTTOEXT_MAP2_MASK (0x1f << INTTOEXT_MAP2_SHIFT) | 1634 | #define INTTOEXT_MAP2_MASK (0x1f << INTTOEXT_MAP2_SHIFT) |
1634 | #define INTTOEXT_MAP1_SHIFT 8 | 1635 | #define INTTOEXT_MAP1_SHIFT 8 |
1635 | #define INTTOEXT_MAP1_MASK (0x1f << INTTOEXT_MAP1_SHIFT) | 1636 | #define INTTOEXT_MAP1_MASK (0x1f << INTTOEXT_MAP1_SHIFT) |
1636 | #define INTTOEXT_MAP0_SHIFT 0 | 1637 | #define INTTOEXT_MAP0_SHIFT 0 |
1637 | #define INTTOEXT_MAP0_MASK (0x1f << INTTOEXT_MAP0_SHIFT) | 1638 | #define INTTOEXT_MAP0_MASK (0x1f << INTTOEXT_MAP0_SHIFT) |
1638 | #define MEMSWCTL 0x11170 /* Ironlake only */ | 1639 | #define MEMSWCTL 0x11170 /* Ironlake only */ |
1639 | #define MEMCTL_CMD_MASK 0xe000 | 1640 | #define MEMCTL_CMD_MASK 0xe000 |
1640 | #define MEMCTL_CMD_SHIFT 13 | 1641 | #define MEMCTL_CMD_SHIFT 13 |
1641 | #define MEMCTL_CMD_RCLK_OFF 0 | 1642 | #define MEMCTL_CMD_RCLK_OFF 0 |
1642 | #define MEMCTL_CMD_RCLK_ON 1 | 1643 | #define MEMCTL_CMD_RCLK_ON 1 |
1643 | #define MEMCTL_CMD_CHFREQ 2 | 1644 | #define MEMCTL_CMD_CHFREQ 2 |
1644 | #define MEMCTL_CMD_CHVID 3 | 1645 | #define MEMCTL_CMD_CHVID 3 |
1645 | #define MEMCTL_CMD_VMMOFF 4 | 1646 | #define MEMCTL_CMD_VMMOFF 4 |
1646 | #define MEMCTL_CMD_VMMON 5 | 1647 | #define MEMCTL_CMD_VMMON 5 |
1647 | #define MEMCTL_CMD_STS (1<<12) /* write 1 triggers command, clears | 1648 | #define MEMCTL_CMD_STS (1<<12) /* write 1 triggers command, clears |
1648 | when command complete */ | 1649 | when command complete */ |
1649 | #define MEMCTL_FREQ_MASK 0x0f00 /* jitter, from 0-15 */ | 1650 | #define MEMCTL_FREQ_MASK 0x0f00 /* jitter, from 0-15 */ |
1650 | #define MEMCTL_FREQ_SHIFT 8 | 1651 | #define MEMCTL_FREQ_SHIFT 8 |
1651 | #define MEMCTL_SFCAVM (1<<7) | 1652 | #define MEMCTL_SFCAVM (1<<7) |
1652 | #define MEMCTL_TGT_VID_MASK 0x007f | 1653 | #define MEMCTL_TGT_VID_MASK 0x007f |
1653 | #define MEMIHYST 0x1117c | 1654 | #define MEMIHYST 0x1117c |
1654 | #define MEMINTREN 0x11180 /* 16 bits */ | 1655 | #define MEMINTREN 0x11180 /* 16 bits */ |
1655 | #define MEMINT_RSEXIT_EN (1<<8) | 1656 | #define MEMINT_RSEXIT_EN (1<<8) |
1656 | #define MEMINT_CX_SUPR_EN (1<<7) | 1657 | #define MEMINT_CX_SUPR_EN (1<<7) |
1657 | #define MEMINT_CONT_BUSY_EN (1<<6) | 1658 | #define MEMINT_CONT_BUSY_EN (1<<6) |
1658 | #define MEMINT_AVG_BUSY_EN (1<<5) | 1659 | #define MEMINT_AVG_BUSY_EN (1<<5) |
1659 | #define MEMINT_EVAL_CHG_EN (1<<4) | 1660 | #define MEMINT_EVAL_CHG_EN (1<<4) |
1660 | #define MEMINT_MON_IDLE_EN (1<<3) | 1661 | #define MEMINT_MON_IDLE_EN (1<<3) |
1661 | #define MEMINT_UP_EVAL_EN (1<<2) | 1662 | #define MEMINT_UP_EVAL_EN (1<<2) |
1662 | #define MEMINT_DOWN_EVAL_EN (1<<1) | 1663 | #define MEMINT_DOWN_EVAL_EN (1<<1) |
1663 | #define MEMINT_SW_CMD_EN (1<<0) | 1664 | #define MEMINT_SW_CMD_EN (1<<0) |
1664 | #define MEMINTRSTR 0x11182 /* 16 bits */ | 1665 | #define MEMINTRSTR 0x11182 /* 16 bits */ |
1665 | #define MEM_RSEXIT_MASK 0xc000 | 1666 | #define MEM_RSEXIT_MASK 0xc000 |
1666 | #define MEM_RSEXIT_SHIFT 14 | 1667 | #define MEM_RSEXIT_SHIFT 14 |
1667 | #define MEM_CONT_BUSY_MASK 0x3000 | 1668 | #define MEM_CONT_BUSY_MASK 0x3000 |
1668 | #define MEM_CONT_BUSY_SHIFT 12 | 1669 | #define MEM_CONT_BUSY_SHIFT 12 |
1669 | #define MEM_AVG_BUSY_MASK 0x0c00 | 1670 | #define MEM_AVG_BUSY_MASK 0x0c00 |
1670 | #define MEM_AVG_BUSY_SHIFT 10 | 1671 | #define MEM_AVG_BUSY_SHIFT 10 |
1671 | #define MEM_EVAL_CHG_MASK 0x0300 | 1672 | #define MEM_EVAL_CHG_MASK 0x0300 |
1672 | #define MEM_EVAL_BUSY_SHIFT 8 | 1673 | #define MEM_EVAL_BUSY_SHIFT 8 |
1673 | #define MEM_MON_IDLE_MASK 0x00c0 | 1674 | #define MEM_MON_IDLE_MASK 0x00c0 |
1674 | #define MEM_MON_IDLE_SHIFT 6 | 1675 | #define MEM_MON_IDLE_SHIFT 6 |
1675 | #define MEM_UP_EVAL_MASK 0x0030 | 1676 | #define MEM_UP_EVAL_MASK 0x0030 |
1676 | #define MEM_UP_EVAL_SHIFT 4 | 1677 | #define MEM_UP_EVAL_SHIFT 4 |
1677 | #define MEM_DOWN_EVAL_MASK 0x000c | 1678 | #define MEM_DOWN_EVAL_MASK 0x000c |
1678 | #define MEM_DOWN_EVAL_SHIFT 2 | 1679 | #define MEM_DOWN_EVAL_SHIFT 2 |
1679 | #define MEM_SW_CMD_MASK 0x0003 | 1680 | #define MEM_SW_CMD_MASK 0x0003 |
1680 | #define MEM_INT_STEER_GFX 0 | 1681 | #define MEM_INT_STEER_GFX 0 |
1681 | #define MEM_INT_STEER_CMR 1 | 1682 | #define MEM_INT_STEER_CMR 1 |
1682 | #define MEM_INT_STEER_SMI 2 | 1683 | #define MEM_INT_STEER_SMI 2 |
1683 | #define MEM_INT_STEER_SCI 3 | 1684 | #define MEM_INT_STEER_SCI 3 |
1684 | #define MEMINTRSTS 0x11184 | 1685 | #define MEMINTRSTS 0x11184 |
1685 | #define MEMINT_RSEXIT (1<<7) | 1686 | #define MEMINT_RSEXIT (1<<7) |
1686 | #define MEMINT_CONT_BUSY (1<<6) | 1687 | #define MEMINT_CONT_BUSY (1<<6) |
1687 | #define MEMINT_AVG_BUSY (1<<5) | 1688 | #define MEMINT_AVG_BUSY (1<<5) |
1688 | #define MEMINT_EVAL_CHG (1<<4) | 1689 | #define MEMINT_EVAL_CHG (1<<4) |
1689 | #define MEMINT_MON_IDLE (1<<3) | 1690 | #define MEMINT_MON_IDLE (1<<3) |
1690 | #define MEMINT_UP_EVAL (1<<2) | 1691 | #define MEMINT_UP_EVAL (1<<2) |
1691 | #define MEMINT_DOWN_EVAL (1<<1) | 1692 | #define MEMINT_DOWN_EVAL (1<<1) |
1692 | #define MEMINT_SW_CMD (1<<0) | 1693 | #define MEMINT_SW_CMD (1<<0) |
1693 | #define MEMMODECTL 0x11190 | 1694 | #define MEMMODECTL 0x11190 |
1694 | #define MEMMODE_BOOST_EN (1<<31) | 1695 | #define MEMMODE_BOOST_EN (1<<31) |
1695 | #define MEMMODE_BOOST_FREQ_MASK 0x0f000000 /* jitter for boost, 0-15 */ | 1696 | #define MEMMODE_BOOST_FREQ_MASK 0x0f000000 /* jitter for boost, 0-15 */ |
1696 | #define MEMMODE_BOOST_FREQ_SHIFT 24 | 1697 | #define MEMMODE_BOOST_FREQ_SHIFT 24 |
1697 | #define MEMMODE_IDLE_MODE_MASK 0x00030000 | 1698 | #define MEMMODE_IDLE_MODE_MASK 0x00030000 |
1698 | #define MEMMODE_IDLE_MODE_SHIFT 16 | 1699 | #define MEMMODE_IDLE_MODE_SHIFT 16 |
1699 | #define MEMMODE_IDLE_MODE_EVAL 0 | 1700 | #define MEMMODE_IDLE_MODE_EVAL 0 |
1700 | #define MEMMODE_IDLE_MODE_CONT 1 | 1701 | #define MEMMODE_IDLE_MODE_CONT 1 |
1701 | #define MEMMODE_HWIDLE_EN (1<<15) | 1702 | #define MEMMODE_HWIDLE_EN (1<<15) |
1702 | #define MEMMODE_SWMODE_EN (1<<14) | 1703 | #define MEMMODE_SWMODE_EN (1<<14) |
1703 | #define MEMMODE_RCLK_GATE (1<<13) | 1704 | #define MEMMODE_RCLK_GATE (1<<13) |
1704 | #define MEMMODE_HW_UPDATE (1<<12) | 1705 | #define MEMMODE_HW_UPDATE (1<<12) |
1705 | #define MEMMODE_FSTART_MASK 0x00000f00 /* starting jitter, 0-15 */ | 1706 | #define MEMMODE_FSTART_MASK 0x00000f00 /* starting jitter, 0-15 */ |
1706 | #define MEMMODE_FSTART_SHIFT 8 | 1707 | #define MEMMODE_FSTART_SHIFT 8 |
1707 | #define MEMMODE_FMAX_MASK 0x000000f0 /* max jitter, 0-15 */ | 1708 | #define MEMMODE_FMAX_MASK 0x000000f0 /* max jitter, 0-15 */ |
1708 | #define MEMMODE_FMAX_SHIFT 4 | 1709 | #define MEMMODE_FMAX_SHIFT 4 |
1709 | #define MEMMODE_FMIN_MASK 0x0000000f /* min jitter, 0-15 */ | 1710 | #define MEMMODE_FMIN_MASK 0x0000000f /* min jitter, 0-15 */ |
1710 | #define RCBMAXAVG 0x1119c | 1711 | #define RCBMAXAVG 0x1119c |
1711 | #define MEMSWCTL2 0x1119e /* Cantiga only */ | 1712 | #define MEMSWCTL2 0x1119e /* Cantiga only */ |
1712 | #define SWMEMCMD_RENDER_OFF (0 << 13) | 1713 | #define SWMEMCMD_RENDER_OFF (0 << 13) |
1713 | #define SWMEMCMD_RENDER_ON (1 << 13) | 1714 | #define SWMEMCMD_RENDER_ON (1 << 13) |
1714 | #define SWMEMCMD_SWFREQ (2 << 13) | 1715 | #define SWMEMCMD_SWFREQ (2 << 13) |
1715 | #define SWMEMCMD_TARVID (3 << 13) | 1716 | #define SWMEMCMD_TARVID (3 << 13) |
1716 | #define SWMEMCMD_VRM_OFF (4 << 13) | 1717 | #define SWMEMCMD_VRM_OFF (4 << 13) |
1717 | #define SWMEMCMD_VRM_ON (5 << 13) | 1718 | #define SWMEMCMD_VRM_ON (5 << 13) |
1718 | #define CMDSTS (1<<12) | 1719 | #define CMDSTS (1<<12) |
1719 | #define SFCAVM (1<<11) | 1720 | #define SFCAVM (1<<11) |
1720 | #define SWFREQ_MASK 0x0380 /* P0-7 */ | 1721 | #define SWFREQ_MASK 0x0380 /* P0-7 */ |
1721 | #define SWFREQ_SHIFT 7 | 1722 | #define SWFREQ_SHIFT 7 |
1722 | #define TARVID_MASK 0x001f | 1723 | #define TARVID_MASK 0x001f |
1723 | #define MEMSTAT_CTG 0x111a0 | 1724 | #define MEMSTAT_CTG 0x111a0 |
1724 | #define RCBMINAVG 0x111a0 | 1725 | #define RCBMINAVG 0x111a0 |
1725 | #define RCUPEI 0x111b0 | 1726 | #define RCUPEI 0x111b0 |
1726 | #define RCDNEI 0x111b4 | 1727 | #define RCDNEI 0x111b4 |
1727 | #define RSTDBYCTL 0x111b8 | 1728 | #define RSTDBYCTL 0x111b8 |
1728 | #define RS1EN (1<<31) | 1729 | #define RS1EN (1<<31) |
1729 | #define RS2EN (1<<30) | 1730 | #define RS2EN (1<<30) |
1730 | #define RS3EN (1<<29) | 1731 | #define RS3EN (1<<29) |
1731 | #define D3RS3EN (1<<28) /* Display D3 imlies RS3 */ | 1732 | #define D3RS3EN (1<<28) /* Display D3 imlies RS3 */ |
1732 | #define SWPROMORSX (1<<27) /* RSx promotion timers ignored */ | 1733 | #define SWPROMORSX (1<<27) /* RSx promotion timers ignored */ |
1733 | #define RCWAKERW (1<<26) /* Resetwarn from PCH causes wakeup */ | 1734 | #define RCWAKERW (1<<26) /* Resetwarn from PCH causes wakeup */ |
1734 | #define DPRSLPVREN (1<<25) /* Fast voltage ramp enable */ | 1735 | #define DPRSLPVREN (1<<25) /* Fast voltage ramp enable */ |
1735 | #define GFXTGHYST (1<<24) /* Hysteresis to allow trunk gating */ | 1736 | #define GFXTGHYST (1<<24) /* Hysteresis to allow trunk gating */ |
1736 | #define RCX_SW_EXIT (1<<23) /* Leave RSx and prevent re-entry */ | 1737 | #define RCX_SW_EXIT (1<<23) /* Leave RSx and prevent re-entry */ |
1737 | #define RSX_STATUS_MASK (7<<20) | 1738 | #define RSX_STATUS_MASK (7<<20) |
1738 | #define RSX_STATUS_ON (0<<20) | 1739 | #define RSX_STATUS_ON (0<<20) |
1739 | #define RSX_STATUS_RC1 (1<<20) | 1740 | #define RSX_STATUS_RC1 (1<<20) |
1740 | #define RSX_STATUS_RC1E (2<<20) | 1741 | #define RSX_STATUS_RC1E (2<<20) |
1741 | #define RSX_STATUS_RS1 (3<<20) | 1742 | #define RSX_STATUS_RS1 (3<<20) |
1742 | #define RSX_STATUS_RS2 (4<<20) /* aka rc6 */ | 1743 | #define RSX_STATUS_RS2 (4<<20) /* aka rc6 */ |
1743 | #define RSX_STATUS_RSVD (5<<20) /* deep rc6 unsupported on ilk */ | 1744 | #define RSX_STATUS_RSVD (5<<20) /* deep rc6 unsupported on ilk */ |
1744 | #define RSX_STATUS_RS3 (6<<20) /* rs3 unsupported on ilk */ | 1745 | #define RSX_STATUS_RS3 (6<<20) /* rs3 unsupported on ilk */ |
1745 | #define RSX_STATUS_RSVD2 (7<<20) | 1746 | #define RSX_STATUS_RSVD2 (7<<20) |
1746 | #define UWRCRSXE (1<<19) /* wake counter limit prevents rsx */ | 1747 | #define UWRCRSXE (1<<19) /* wake counter limit prevents rsx */ |
1747 | #define RSCRP (1<<18) /* rs requests control on rs1/2 reqs */ | 1748 | #define RSCRP (1<<18) /* rs requests control on rs1/2 reqs */ |
1748 | #define JRSC (1<<17) /* rsx coupled to cpu c-state */ | 1749 | #define JRSC (1<<17) /* rsx coupled to cpu c-state */ |
1749 | #define RS2INC0 (1<<16) /* allow rs2 in cpu c0 */ | 1750 | #define RS2INC0 (1<<16) /* allow rs2 in cpu c0 */ |
1750 | #define RS1CONTSAV_MASK (3<<14) | 1751 | #define RS1CONTSAV_MASK (3<<14) |
1751 | #define RS1CONTSAV_NO_RS1 (0<<14) /* rs1 doesn't save/restore context */ | 1752 | #define RS1CONTSAV_NO_RS1 (0<<14) /* rs1 doesn't save/restore context */ |
1752 | #define RS1CONTSAV_RSVD (1<<14) | 1753 | #define RS1CONTSAV_RSVD (1<<14) |
1753 | #define RS1CONTSAV_SAVE_RS1 (2<<14) /* rs1 saves context */ | 1754 | #define RS1CONTSAV_SAVE_RS1 (2<<14) /* rs1 saves context */ |
1754 | #define RS1CONTSAV_FULL_RS1 (3<<14) /* rs1 saves and restores context */ | 1755 | #define RS1CONTSAV_FULL_RS1 (3<<14) /* rs1 saves and restores context */ |
1755 | #define NORMSLEXLAT_MASK (3<<12) | 1756 | #define NORMSLEXLAT_MASK (3<<12) |
1756 | #define SLOW_RS123 (0<<12) | 1757 | #define SLOW_RS123 (0<<12) |
1757 | #define SLOW_RS23 (1<<12) | 1758 | #define SLOW_RS23 (1<<12) |
1758 | #define SLOW_RS3 (2<<12) | 1759 | #define SLOW_RS3 (2<<12) |
1759 | #define NORMAL_RS123 (3<<12) | 1760 | #define NORMAL_RS123 (3<<12) |
1760 | #define RCMODE_TIMEOUT (1<<11) /* 0 is eval interval method */ | 1761 | #define RCMODE_TIMEOUT (1<<11) /* 0 is eval interval method */ |
1761 | #define IMPROMOEN (1<<10) /* promo is immediate or delayed until next idle interval (only for timeout method above) */ | 1762 | #define IMPROMOEN (1<<10) /* promo is immediate or delayed until next idle interval (only for timeout method above) */ |
1762 | #define RCENTSYNC (1<<9) /* rs coupled to cpu c-state (3/6/7) */ | 1763 | #define RCENTSYNC (1<<9) /* rs coupled to cpu c-state (3/6/7) */ |
1763 | #define STATELOCK (1<<7) /* locked to rs_cstate if 0 */ | 1764 | #define STATELOCK (1<<7) /* locked to rs_cstate if 0 */ |
1764 | #define RS_CSTATE_MASK (3<<4) | 1765 | #define RS_CSTATE_MASK (3<<4) |
1765 | #define RS_CSTATE_C367_RS1 (0<<4) | 1766 | #define RS_CSTATE_C367_RS1 (0<<4) |
1766 | #define RS_CSTATE_C36_RS1_C7_RS2 (1<<4) | 1767 | #define RS_CSTATE_C36_RS1_C7_RS2 (1<<4) |
1767 | #define RS_CSTATE_RSVD (2<<4) | 1768 | #define RS_CSTATE_RSVD (2<<4) |
1768 | #define RS_CSTATE_C367_RS2 (3<<4) | 1769 | #define RS_CSTATE_C367_RS2 (3<<4) |
1769 | #define REDSAVES (1<<3) /* no context save if was idle during rs0 */ | 1770 | #define REDSAVES (1<<3) /* no context save if was idle during rs0 */ |
1770 | #define REDRESTORES (1<<2) /* no restore if was idle during rs0 */ | 1771 | #define REDRESTORES (1<<2) /* no restore if was idle during rs0 */ |
1771 | #define VIDCTL 0x111c0 | 1772 | #define VIDCTL 0x111c0 |
1772 | #define VIDSTS 0x111c8 | 1773 | #define VIDSTS 0x111c8 |
1773 | #define VIDSTART 0x111cc /* 8 bits */ | 1774 | #define VIDSTART 0x111cc /* 8 bits */ |
1774 | #define MEMSTAT_ILK 0x111f8 | 1775 | #define MEMSTAT_ILK 0x111f8 |
1775 | #define MEMSTAT_VID_MASK 0x7f00 | 1776 | #define MEMSTAT_VID_MASK 0x7f00 |
1776 | #define MEMSTAT_VID_SHIFT 8 | 1777 | #define MEMSTAT_VID_SHIFT 8 |
1777 | #define MEMSTAT_PSTATE_MASK 0x00f8 | 1778 | #define MEMSTAT_PSTATE_MASK 0x00f8 |
1778 | #define MEMSTAT_PSTATE_SHIFT 3 | 1779 | #define MEMSTAT_PSTATE_SHIFT 3 |
1779 | #define MEMSTAT_MON_ACTV (1<<2) | 1780 | #define MEMSTAT_MON_ACTV (1<<2) |
1780 | #define MEMSTAT_SRC_CTL_MASK 0x0003 | 1781 | #define MEMSTAT_SRC_CTL_MASK 0x0003 |
1781 | #define MEMSTAT_SRC_CTL_CORE 0 | 1782 | #define MEMSTAT_SRC_CTL_CORE 0 |
1782 | #define MEMSTAT_SRC_CTL_TRB 1 | 1783 | #define MEMSTAT_SRC_CTL_TRB 1 |
1783 | #define MEMSTAT_SRC_CTL_THM 2 | 1784 | #define MEMSTAT_SRC_CTL_THM 2 |
1784 | #define MEMSTAT_SRC_CTL_STDBY 3 | 1785 | #define MEMSTAT_SRC_CTL_STDBY 3 |
1785 | #define RCPREVBSYTUPAVG 0x113b8 | 1786 | #define RCPREVBSYTUPAVG 0x113b8 |
1786 | #define RCPREVBSYTDNAVG 0x113bc | 1787 | #define RCPREVBSYTDNAVG 0x113bc |
1787 | #define PMMISC 0x11214 | 1788 | #define PMMISC 0x11214 |
1788 | #define MCPPCE_EN (1<<0) /* enable PM_MSG from PCH->MPC */ | 1789 | #define MCPPCE_EN (1<<0) /* enable PM_MSG from PCH->MPC */ |
1789 | #define SDEW 0x1124c | 1790 | #define SDEW 0x1124c |
1790 | #define CSIEW0 0x11250 | 1791 | #define CSIEW0 0x11250 |
1791 | #define CSIEW1 0x11254 | 1792 | #define CSIEW1 0x11254 |
1792 | #define CSIEW2 0x11258 | 1793 | #define CSIEW2 0x11258 |
1793 | #define PEW 0x1125c | 1794 | #define PEW 0x1125c |
1794 | #define DEW 0x11270 | 1795 | #define DEW 0x11270 |
1795 | #define MCHAFE 0x112c0 | 1796 | #define MCHAFE 0x112c0 |
1796 | #define CSIEC 0x112e0 | 1797 | #define CSIEC 0x112e0 |
1797 | #define DMIEC 0x112e4 | 1798 | #define DMIEC 0x112e4 |
1798 | #define DDREC 0x112e8 | 1799 | #define DDREC 0x112e8 |
1799 | #define PEG0EC 0x112ec | 1800 | #define PEG0EC 0x112ec |
1800 | #define PEG1EC 0x112f0 | 1801 | #define PEG1EC 0x112f0 |
1801 | #define GFXEC 0x112f4 | 1802 | #define GFXEC 0x112f4 |
1802 | #define RPPREVBSYTUPAVG 0x113b8 | 1803 | #define RPPREVBSYTUPAVG 0x113b8 |
1803 | #define RPPREVBSYTDNAVG 0x113bc | 1804 | #define RPPREVBSYTDNAVG 0x113bc |
1804 | #define ECR 0x11600 | 1805 | #define ECR 0x11600 |
1805 | #define ECR_GPFE (1<<31) | 1806 | #define ECR_GPFE (1<<31) |
1806 | #define ECR_IMONE (1<<30) | 1807 | #define ECR_IMONE (1<<30) |
1807 | #define ECR_CAP_MASK 0x0000001f /* Event range, 0-31 */ | 1808 | #define ECR_CAP_MASK 0x0000001f /* Event range, 0-31 */ |
1808 | #define OGW0 0x11608 | 1809 | #define OGW0 0x11608 |
1809 | #define OGW1 0x1160c | 1810 | #define OGW1 0x1160c |
1810 | #define EG0 0x11610 | 1811 | #define EG0 0x11610 |
1811 | #define EG1 0x11614 | 1812 | #define EG1 0x11614 |
1812 | #define EG2 0x11618 | 1813 | #define EG2 0x11618 |
1813 | #define EG3 0x1161c | 1814 | #define EG3 0x1161c |
1814 | #define EG4 0x11620 | 1815 | #define EG4 0x11620 |
1815 | #define EG5 0x11624 | 1816 | #define EG5 0x11624 |
1816 | #define EG6 0x11628 | 1817 | #define EG6 0x11628 |
1817 | #define EG7 0x1162c | 1818 | #define EG7 0x1162c |
1818 | #define PXW 0x11664 | 1819 | #define PXW 0x11664 |
1819 | #define PXWL 0x11680 | 1820 | #define PXWL 0x11680 |
1820 | #define LCFUSE02 0x116c0 | 1821 | #define LCFUSE02 0x116c0 |
1821 | #define LCFUSE_HIV_MASK 0x000000ff | 1822 | #define LCFUSE_HIV_MASK 0x000000ff |
1822 | #define CSIPLL0 0x12c10 | 1823 | #define CSIPLL0 0x12c10 |
1823 | #define DDRMPLL1 0X12c20 | 1824 | #define DDRMPLL1 0X12c20 |
1824 | #define PEG_BAND_GAP_DATA 0x14d68 | 1825 | #define PEG_BAND_GAP_DATA 0x14d68 |
1825 | 1826 | ||
1826 | #define GEN6_GT_THREAD_STATUS_REG 0x13805c | 1827 | #define GEN6_GT_THREAD_STATUS_REG 0x13805c |
1827 | #define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7 | 1828 | #define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7 |
1828 | #define GEN6_GT_THREAD_STATUS_CORE_MASK_HSW (0x7 | (0x07 << 16)) | 1829 | #define GEN6_GT_THREAD_STATUS_CORE_MASK_HSW (0x7 | (0x07 << 16)) |
1829 | 1830 | ||
1830 | #define GEN6_GT_PERF_STATUS (MCHBAR_MIRROR_BASE_SNB + 0x5948) | 1831 | #define GEN6_GT_PERF_STATUS (MCHBAR_MIRROR_BASE_SNB + 0x5948) |
1831 | #define GEN6_RP_STATE_LIMITS (MCHBAR_MIRROR_BASE_SNB + 0x5994) | 1832 | #define GEN6_RP_STATE_LIMITS (MCHBAR_MIRROR_BASE_SNB + 0x5994) |
1832 | #define GEN6_RP_STATE_CAP (MCHBAR_MIRROR_BASE_SNB + 0x5998) | 1833 | #define GEN6_RP_STATE_CAP (MCHBAR_MIRROR_BASE_SNB + 0x5998) |
1833 | 1834 | ||
1834 | /* | 1835 | /* |
1835 | * Logical Context regs | 1836 | * Logical Context regs |
1836 | */ | 1837 | */ |
1837 | #define CCID 0x2180 | 1838 | #define CCID 0x2180 |
1838 | #define CCID_EN (1<<0) | 1839 | #define CCID_EN (1<<0) |
1839 | /* | 1840 | /* |
1840 | * Notes on SNB/IVB/VLV context size: | 1841 | * Notes on SNB/IVB/VLV context size: |
1841 | * - Power context is saved elsewhere (LLC or stolen) | 1842 | * - Power context is saved elsewhere (LLC or stolen) |
1842 | * - Ring/execlist context is saved on SNB, not on IVB | 1843 | * - Ring/execlist context is saved on SNB, not on IVB |
1843 | * - Extended context size already includes render context size | 1844 | * - Extended context size already includes render context size |
1844 | * - We always need to follow the extended context size. | 1845 | * - We always need to follow the extended context size. |
1845 | * SNB BSpec has comments indicating that we should use the | 1846 | * SNB BSpec has comments indicating that we should use the |
1846 | * render context size instead if execlists are disabled, but | 1847 | * render context size instead if execlists are disabled, but |
1847 | * based on empirical testing that's just nonsense. | 1848 | * based on empirical testing that's just nonsense. |
1848 | * - Pipelined/VF state is saved on SNB/IVB respectively | 1849 | * - Pipelined/VF state is saved on SNB/IVB respectively |
1849 | * - GT1 size just indicates how much of render context | 1850 | * - GT1 size just indicates how much of render context |
1850 | * doesn't need saving on GT1 | 1851 | * doesn't need saving on GT1 |
1851 | */ | 1852 | */ |
1852 | #define CXT_SIZE 0x21a0 | 1853 | #define CXT_SIZE 0x21a0 |
1853 | #define GEN6_CXT_POWER_SIZE(cxt_reg) ((cxt_reg >> 24) & 0x3f) | 1854 | #define GEN6_CXT_POWER_SIZE(cxt_reg) ((cxt_reg >> 24) & 0x3f) |
1854 | #define GEN6_CXT_RING_SIZE(cxt_reg) ((cxt_reg >> 18) & 0x3f) | 1855 | #define GEN6_CXT_RING_SIZE(cxt_reg) ((cxt_reg >> 18) & 0x3f) |
1855 | #define GEN6_CXT_RENDER_SIZE(cxt_reg) ((cxt_reg >> 12) & 0x3f) | 1856 | #define GEN6_CXT_RENDER_SIZE(cxt_reg) ((cxt_reg >> 12) & 0x3f) |
1856 | #define GEN6_CXT_EXTENDED_SIZE(cxt_reg) ((cxt_reg >> 6) & 0x3f) | 1857 | #define GEN6_CXT_EXTENDED_SIZE(cxt_reg) ((cxt_reg >> 6) & 0x3f) |
1857 | #define GEN6_CXT_PIPELINE_SIZE(cxt_reg) ((cxt_reg >> 0) & 0x3f) | 1858 | #define GEN6_CXT_PIPELINE_SIZE(cxt_reg) ((cxt_reg >> 0) & 0x3f) |
1858 | #define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_RING_SIZE(cxt_reg) + \ | 1859 | #define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_RING_SIZE(cxt_reg) + \ |
1859 | GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \ | 1860 | GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \ |
1860 | GEN6_CXT_PIPELINE_SIZE(cxt_reg)) | 1861 | GEN6_CXT_PIPELINE_SIZE(cxt_reg)) |
1861 | #define GEN7_CXT_SIZE 0x21a8 | 1862 | #define GEN7_CXT_SIZE 0x21a8 |
1862 | #define GEN7_CXT_POWER_SIZE(ctx_reg) ((ctx_reg >> 25) & 0x7f) | 1863 | #define GEN7_CXT_POWER_SIZE(ctx_reg) ((ctx_reg >> 25) & 0x7f) |
1863 | #define GEN7_CXT_RING_SIZE(ctx_reg) ((ctx_reg >> 22) & 0x7) | 1864 | #define GEN7_CXT_RING_SIZE(ctx_reg) ((ctx_reg >> 22) & 0x7) |
1864 | #define GEN7_CXT_RENDER_SIZE(ctx_reg) ((ctx_reg >> 16) & 0x3f) | 1865 | #define GEN7_CXT_RENDER_SIZE(ctx_reg) ((ctx_reg >> 16) & 0x3f) |
1865 | #define GEN7_CXT_EXTENDED_SIZE(ctx_reg) ((ctx_reg >> 9) & 0x7f) | 1866 | #define GEN7_CXT_EXTENDED_SIZE(ctx_reg) ((ctx_reg >> 9) & 0x7f) |
1866 | #define GEN7_CXT_GT1_SIZE(ctx_reg) ((ctx_reg >> 6) & 0x7) | 1867 | #define GEN7_CXT_GT1_SIZE(ctx_reg) ((ctx_reg >> 6) & 0x7) |
1867 | #define GEN7_CXT_VFSTATE_SIZE(ctx_reg) ((ctx_reg >> 0) & 0x3f) | 1868 | #define GEN7_CXT_VFSTATE_SIZE(ctx_reg) ((ctx_reg >> 0) & 0x3f) |
1868 | #define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \ | 1869 | #define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \ |
1869 | GEN7_CXT_VFSTATE_SIZE(ctx_reg)) | 1870 | GEN7_CXT_VFSTATE_SIZE(ctx_reg)) |
1870 | /* Haswell does have the CXT_SIZE register however it does not appear to be | 1871 | /* Haswell does have the CXT_SIZE register however it does not appear to be |
1871 | * valid. Now, docs explain in dwords what is in the context object. The full | 1872 | * valid. Now, docs explain in dwords what is in the context object. The full |
1872 | * size is 70720 bytes, however, the power context and execlist context will | 1873 | * size is 70720 bytes, however, the power context and execlist context will |
1873 | * never be saved (power context is stored elsewhere, and execlists don't work | 1874 | * never be saved (power context is stored elsewhere, and execlists don't work |
1874 | * on HSW) - so the final size is 66944 bytes, which rounds to 17 pages. | 1875 | * on HSW) - so the final size is 66944 bytes, which rounds to 17 pages. |
1875 | */ | 1876 | */ |
1876 | #define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE) | 1877 | #define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE) |
1877 | /* Same as Haswell, but 72064 bytes now. */ | 1878 | /* Same as Haswell, but 72064 bytes now. */ |
1878 | #define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE) | 1879 | #define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE) |
1879 | 1880 | ||
1880 | 1881 | ||
1881 | #define VLV_CLK_CTL2 0x101104 | 1882 | #define VLV_CLK_CTL2 0x101104 |
1882 | #define CLK_CTL2_CZCOUNT_30NS_SHIFT 28 | 1883 | #define CLK_CTL2_CZCOUNT_30NS_SHIFT 28 |
1883 | 1884 | ||
1884 | /* | 1885 | /* |
1885 | * Overlay regs | 1886 | * Overlay regs |
1886 | */ | 1887 | */ |
1887 | 1888 | ||
1888 | #define OVADD 0x30000 | 1889 | #define OVADD 0x30000 |
1889 | #define DOVSTA 0x30008 | 1890 | #define DOVSTA 0x30008 |
1890 | #define OC_BUF (0x3<<20) | 1891 | #define OC_BUF (0x3<<20) |
1891 | #define OGAMC5 0x30010 | 1892 | #define OGAMC5 0x30010 |
1892 | #define OGAMC4 0x30014 | 1893 | #define OGAMC4 0x30014 |
1893 | #define OGAMC3 0x30018 | 1894 | #define OGAMC3 0x30018 |
1894 | #define OGAMC2 0x3001c | 1895 | #define OGAMC2 0x3001c |
1895 | #define OGAMC1 0x30020 | 1896 | #define OGAMC1 0x30020 |
1896 | #define OGAMC0 0x30024 | 1897 | #define OGAMC0 0x30024 |
1897 | 1898 | ||
1898 | /* | 1899 | /* |
1899 | * Display engine regs | 1900 | * Display engine regs |
1900 | */ | 1901 | */ |
1901 | 1902 | ||
1902 | /* Pipe A CRC regs */ | 1903 | /* Pipe A CRC regs */ |
1903 | #define _PIPE_CRC_CTL_A 0x60050 | 1904 | #define _PIPE_CRC_CTL_A 0x60050 |
1904 | #define PIPE_CRC_ENABLE (1 << 31) | 1905 | #define PIPE_CRC_ENABLE (1 << 31) |
1905 | /* ivb+ source selection */ | 1906 | /* ivb+ source selection */ |
1906 | #define PIPE_CRC_SOURCE_PRIMARY_IVB (0 << 29) | 1907 | #define PIPE_CRC_SOURCE_PRIMARY_IVB (0 << 29) |
1907 | #define PIPE_CRC_SOURCE_SPRITE_IVB (1 << 29) | 1908 | #define PIPE_CRC_SOURCE_SPRITE_IVB (1 << 29) |
1908 | #define PIPE_CRC_SOURCE_PF_IVB (2 << 29) | 1909 | #define PIPE_CRC_SOURCE_PF_IVB (2 << 29) |
1909 | /* ilk+ source selection */ | 1910 | /* ilk+ source selection */ |
1910 | #define PIPE_CRC_SOURCE_PRIMARY_ILK (0 << 28) | 1911 | #define PIPE_CRC_SOURCE_PRIMARY_ILK (0 << 28) |
1911 | #define PIPE_CRC_SOURCE_SPRITE_ILK (1 << 28) | 1912 | #define PIPE_CRC_SOURCE_SPRITE_ILK (1 << 28) |
1912 | #define PIPE_CRC_SOURCE_PIPE_ILK (2 << 28) | 1913 | #define PIPE_CRC_SOURCE_PIPE_ILK (2 << 28) |
1913 | /* embedded DP port on the north display block, reserved on ivb */ | 1914 | /* embedded DP port on the north display block, reserved on ivb */ |
1914 | #define PIPE_CRC_SOURCE_PORT_A_ILK (4 << 28) | 1915 | #define PIPE_CRC_SOURCE_PORT_A_ILK (4 << 28) |
1915 | #define PIPE_CRC_SOURCE_FDI_ILK (5 << 28) /* reserved on ivb */ | 1916 | #define PIPE_CRC_SOURCE_FDI_ILK (5 << 28) /* reserved on ivb */ |
1916 | /* vlv source selection */ | 1917 | /* vlv source selection */ |
1917 | #define PIPE_CRC_SOURCE_PIPE_VLV (0 << 27) | 1918 | #define PIPE_CRC_SOURCE_PIPE_VLV (0 << 27) |
1918 | #define PIPE_CRC_SOURCE_HDMIB_VLV (1 << 27) | 1919 | #define PIPE_CRC_SOURCE_HDMIB_VLV (1 << 27) |
1919 | #define PIPE_CRC_SOURCE_HDMIC_VLV (2 << 27) | 1920 | #define PIPE_CRC_SOURCE_HDMIC_VLV (2 << 27) |
1920 | /* with DP port the pipe source is invalid */ | 1921 | /* with DP port the pipe source is invalid */ |
1921 | #define PIPE_CRC_SOURCE_DP_D_VLV (3 << 27) | 1922 | #define PIPE_CRC_SOURCE_DP_D_VLV (3 << 27) |
1922 | #define PIPE_CRC_SOURCE_DP_B_VLV (6 << 27) | 1923 | #define PIPE_CRC_SOURCE_DP_B_VLV (6 << 27) |
1923 | #define PIPE_CRC_SOURCE_DP_C_VLV (7 << 27) | 1924 | #define PIPE_CRC_SOURCE_DP_C_VLV (7 << 27) |
1924 | /* gen3+ source selection */ | 1925 | /* gen3+ source selection */ |
1925 | #define PIPE_CRC_SOURCE_PIPE_I9XX (0 << 28) | 1926 | #define PIPE_CRC_SOURCE_PIPE_I9XX (0 << 28) |
1926 | #define PIPE_CRC_SOURCE_SDVOB_I9XX (1 << 28) | 1927 | #define PIPE_CRC_SOURCE_SDVOB_I9XX (1 << 28) |
1927 | #define PIPE_CRC_SOURCE_SDVOC_I9XX (2 << 28) | 1928 | #define PIPE_CRC_SOURCE_SDVOC_I9XX (2 << 28) |
1928 | /* with DP/TV port the pipe source is invalid */ | 1929 | /* with DP/TV port the pipe source is invalid */ |
1929 | #define PIPE_CRC_SOURCE_DP_D_G4X (3 << 28) | 1930 | #define PIPE_CRC_SOURCE_DP_D_G4X (3 << 28) |
1930 | #define PIPE_CRC_SOURCE_TV_PRE (4 << 28) | 1931 | #define PIPE_CRC_SOURCE_TV_PRE (4 << 28) |
1931 | #define PIPE_CRC_SOURCE_TV_POST (5 << 28) | 1932 | #define PIPE_CRC_SOURCE_TV_POST (5 << 28) |
1932 | #define PIPE_CRC_SOURCE_DP_B_G4X (6 << 28) | 1933 | #define PIPE_CRC_SOURCE_DP_B_G4X (6 << 28) |
1933 | #define PIPE_CRC_SOURCE_DP_C_G4X (7 << 28) | 1934 | #define PIPE_CRC_SOURCE_DP_C_G4X (7 << 28) |
1934 | /* gen2 doesn't have source selection bits */ | 1935 | /* gen2 doesn't have source selection bits */ |
1935 | #define PIPE_CRC_INCLUDE_BORDER_I8XX (1 << 30) | 1936 | #define PIPE_CRC_INCLUDE_BORDER_I8XX (1 << 30) |
1936 | 1937 | ||
1937 | #define _PIPE_CRC_RES_1_A_IVB 0x60064 | 1938 | #define _PIPE_CRC_RES_1_A_IVB 0x60064 |
1938 | #define _PIPE_CRC_RES_2_A_IVB 0x60068 | 1939 | #define _PIPE_CRC_RES_2_A_IVB 0x60068 |
1939 | #define _PIPE_CRC_RES_3_A_IVB 0x6006c | 1940 | #define _PIPE_CRC_RES_3_A_IVB 0x6006c |
1940 | #define _PIPE_CRC_RES_4_A_IVB 0x60070 | 1941 | #define _PIPE_CRC_RES_4_A_IVB 0x60070 |
1941 | #define _PIPE_CRC_RES_5_A_IVB 0x60074 | 1942 | #define _PIPE_CRC_RES_5_A_IVB 0x60074 |
1942 | 1943 | ||
1943 | #define _PIPE_CRC_RES_RED_A 0x60060 | 1944 | #define _PIPE_CRC_RES_RED_A 0x60060 |
1944 | #define _PIPE_CRC_RES_GREEN_A 0x60064 | 1945 | #define _PIPE_CRC_RES_GREEN_A 0x60064 |
1945 | #define _PIPE_CRC_RES_BLUE_A 0x60068 | 1946 | #define _PIPE_CRC_RES_BLUE_A 0x60068 |
1946 | #define _PIPE_CRC_RES_RES1_A_I915 0x6006c | 1947 | #define _PIPE_CRC_RES_RES1_A_I915 0x6006c |
1947 | #define _PIPE_CRC_RES_RES2_A_G4X 0x60080 | 1948 | #define _PIPE_CRC_RES_RES2_A_G4X 0x60080 |
1948 | 1949 | ||
1949 | /* Pipe B CRC regs */ | 1950 | /* Pipe B CRC regs */ |
1950 | #define _PIPE_CRC_RES_1_B_IVB 0x61064 | 1951 | #define _PIPE_CRC_RES_1_B_IVB 0x61064 |
1951 | #define _PIPE_CRC_RES_2_B_IVB 0x61068 | 1952 | #define _PIPE_CRC_RES_2_B_IVB 0x61068 |
1952 | #define _PIPE_CRC_RES_3_B_IVB 0x6106c | 1953 | #define _PIPE_CRC_RES_3_B_IVB 0x6106c |
1953 | #define _PIPE_CRC_RES_4_B_IVB 0x61070 | 1954 | #define _PIPE_CRC_RES_4_B_IVB 0x61070 |
1954 | #define _PIPE_CRC_RES_5_B_IVB 0x61074 | 1955 | #define _PIPE_CRC_RES_5_B_IVB 0x61074 |
1955 | 1956 | ||
1956 | #define PIPE_CRC_CTL(pipe) _TRANSCODER2(pipe, _PIPE_CRC_CTL_A) | 1957 | #define PIPE_CRC_CTL(pipe) _TRANSCODER2(pipe, _PIPE_CRC_CTL_A) |
1957 | #define PIPE_CRC_RES_1_IVB(pipe) \ | 1958 | #define PIPE_CRC_RES_1_IVB(pipe) \ |
1958 | _TRANSCODER2(pipe, _PIPE_CRC_RES_1_A_IVB) | 1959 | _TRANSCODER2(pipe, _PIPE_CRC_RES_1_A_IVB) |
1959 | #define PIPE_CRC_RES_2_IVB(pipe) \ | 1960 | #define PIPE_CRC_RES_2_IVB(pipe) \ |
1960 | _TRANSCODER2(pipe, _PIPE_CRC_RES_2_A_IVB) | 1961 | _TRANSCODER2(pipe, _PIPE_CRC_RES_2_A_IVB) |
1961 | #define PIPE_CRC_RES_3_IVB(pipe) \ | 1962 | #define PIPE_CRC_RES_3_IVB(pipe) \ |
1962 | _TRANSCODER2(pipe, _PIPE_CRC_RES_3_A_IVB) | 1963 | _TRANSCODER2(pipe, _PIPE_CRC_RES_3_A_IVB) |
1963 | #define PIPE_CRC_RES_4_IVB(pipe) \ | 1964 | #define PIPE_CRC_RES_4_IVB(pipe) \ |
1964 | _TRANSCODER2(pipe, _PIPE_CRC_RES_4_A_IVB) | 1965 | _TRANSCODER2(pipe, _PIPE_CRC_RES_4_A_IVB) |
1965 | #define PIPE_CRC_RES_5_IVB(pipe) \ | 1966 | #define PIPE_CRC_RES_5_IVB(pipe) \ |
1966 | _TRANSCODER2(pipe, _PIPE_CRC_RES_5_A_IVB) | 1967 | _TRANSCODER2(pipe, _PIPE_CRC_RES_5_A_IVB) |
1967 | 1968 | ||
1968 | #define PIPE_CRC_RES_RED(pipe) \ | 1969 | #define PIPE_CRC_RES_RED(pipe) \ |
1969 | _TRANSCODER2(pipe, _PIPE_CRC_RES_RED_A) | 1970 | _TRANSCODER2(pipe, _PIPE_CRC_RES_RED_A) |
1970 | #define PIPE_CRC_RES_GREEN(pipe) \ | 1971 | #define PIPE_CRC_RES_GREEN(pipe) \ |
1971 | _TRANSCODER2(pipe, _PIPE_CRC_RES_GREEN_A) | 1972 | _TRANSCODER2(pipe, _PIPE_CRC_RES_GREEN_A) |
1972 | #define PIPE_CRC_RES_BLUE(pipe) \ | 1973 | #define PIPE_CRC_RES_BLUE(pipe) \ |
1973 | _TRANSCODER2(pipe, _PIPE_CRC_RES_BLUE_A) | 1974 | _TRANSCODER2(pipe, _PIPE_CRC_RES_BLUE_A) |
1974 | #define PIPE_CRC_RES_RES1_I915(pipe) \ | 1975 | #define PIPE_CRC_RES_RES1_I915(pipe) \ |
1975 | _TRANSCODER2(pipe, _PIPE_CRC_RES_RES1_A_I915) | 1976 | _TRANSCODER2(pipe, _PIPE_CRC_RES_RES1_A_I915) |
1976 | #define PIPE_CRC_RES_RES2_G4X(pipe) \ | 1977 | #define PIPE_CRC_RES_RES2_G4X(pipe) \ |
1977 | _TRANSCODER2(pipe, _PIPE_CRC_RES_RES2_A_G4X) | 1978 | _TRANSCODER2(pipe, _PIPE_CRC_RES_RES2_A_G4X) |
1978 | 1979 | ||
1979 | /* Pipe A timing regs */ | 1980 | /* Pipe A timing regs */ |
1980 | #define _HTOTAL_A 0x60000 | 1981 | #define _HTOTAL_A 0x60000 |
1981 | #define _HBLANK_A 0x60004 | 1982 | #define _HBLANK_A 0x60004 |
1982 | #define _HSYNC_A 0x60008 | 1983 | #define _HSYNC_A 0x60008 |
1983 | #define _VTOTAL_A 0x6000c | 1984 | #define _VTOTAL_A 0x6000c |
1984 | #define _VBLANK_A 0x60010 | 1985 | #define _VBLANK_A 0x60010 |
1985 | #define _VSYNC_A 0x60014 | 1986 | #define _VSYNC_A 0x60014 |
1986 | #define _PIPEASRC 0x6001c | 1987 | #define _PIPEASRC 0x6001c |
1987 | #define _BCLRPAT_A 0x60020 | 1988 | #define _BCLRPAT_A 0x60020 |
1988 | #define _VSYNCSHIFT_A 0x60028 | 1989 | #define _VSYNCSHIFT_A 0x60028 |
1989 | 1990 | ||
1990 | /* Pipe B timing regs */ | 1991 | /* Pipe B timing regs */ |
1991 | #define _HTOTAL_B 0x61000 | 1992 | #define _HTOTAL_B 0x61000 |
1992 | #define _HBLANK_B 0x61004 | 1993 | #define _HBLANK_B 0x61004 |
1993 | #define _HSYNC_B 0x61008 | 1994 | #define _HSYNC_B 0x61008 |
1994 | #define _VTOTAL_B 0x6100c | 1995 | #define _VTOTAL_B 0x6100c |
1995 | #define _VBLANK_B 0x61010 | 1996 | #define _VBLANK_B 0x61010 |
1996 | #define _VSYNC_B 0x61014 | 1997 | #define _VSYNC_B 0x61014 |
1997 | #define _PIPEBSRC 0x6101c | 1998 | #define _PIPEBSRC 0x6101c |
1998 | #define _BCLRPAT_B 0x61020 | 1999 | #define _BCLRPAT_B 0x61020 |
1999 | #define _VSYNCSHIFT_B 0x61028 | 2000 | #define _VSYNCSHIFT_B 0x61028 |
2000 | 2001 | ||
2001 | #define TRANSCODER_A_OFFSET 0x60000 | 2002 | #define TRANSCODER_A_OFFSET 0x60000 |
2002 | #define TRANSCODER_B_OFFSET 0x61000 | 2003 | #define TRANSCODER_B_OFFSET 0x61000 |
2003 | #define TRANSCODER_C_OFFSET 0x62000 | 2004 | #define TRANSCODER_C_OFFSET 0x62000 |
2004 | #define TRANSCODER_EDP_OFFSET 0x6f000 | 2005 | #define TRANSCODER_EDP_OFFSET 0x6f000 |
2005 | 2006 | ||
2006 | #define _TRANSCODER2(pipe, reg) (dev_priv->info.trans_offsets[(pipe)] - \ | 2007 | #define _TRANSCODER2(pipe, reg) (dev_priv->info.trans_offsets[(pipe)] - \ |
2007 | dev_priv->info.trans_offsets[TRANSCODER_A] + (reg) + \ | 2008 | dev_priv->info.trans_offsets[TRANSCODER_A] + (reg) + \ |
2008 | dev_priv->info.display_mmio_offset) | 2009 | dev_priv->info.display_mmio_offset) |
2009 | 2010 | ||
2010 | #define HTOTAL(trans) _TRANSCODER2(trans, _HTOTAL_A) | 2011 | #define HTOTAL(trans) _TRANSCODER2(trans, _HTOTAL_A) |
2011 | #define HBLANK(trans) _TRANSCODER2(trans, _HBLANK_A) | 2012 | #define HBLANK(trans) _TRANSCODER2(trans, _HBLANK_A) |
2012 | #define HSYNC(trans) _TRANSCODER2(trans, _HSYNC_A) | 2013 | #define HSYNC(trans) _TRANSCODER2(trans, _HSYNC_A) |
2013 | #define VTOTAL(trans) _TRANSCODER2(trans, _VTOTAL_A) | 2014 | #define VTOTAL(trans) _TRANSCODER2(trans, _VTOTAL_A) |
2014 | #define VBLANK(trans) _TRANSCODER2(trans, _VBLANK_A) | 2015 | #define VBLANK(trans) _TRANSCODER2(trans, _VBLANK_A) |
2015 | #define VSYNC(trans) _TRANSCODER2(trans, _VSYNC_A) | 2016 | #define VSYNC(trans) _TRANSCODER2(trans, _VSYNC_A) |
2016 | #define BCLRPAT(trans) _TRANSCODER2(trans, _BCLRPAT_A) | 2017 | #define BCLRPAT(trans) _TRANSCODER2(trans, _BCLRPAT_A) |
2017 | #define VSYNCSHIFT(trans) _TRANSCODER2(trans, _VSYNCSHIFT_A) | 2018 | #define VSYNCSHIFT(trans) _TRANSCODER2(trans, _VSYNCSHIFT_A) |
2018 | #define PIPESRC(trans) _TRANSCODER2(trans, _PIPEASRC) | 2019 | #define PIPESRC(trans) _TRANSCODER2(trans, _PIPEASRC) |
2019 | 2020 | ||
2020 | /* HSW+ eDP PSR registers */ | 2021 | /* HSW+ eDP PSR registers */ |
2021 | #define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800) | 2022 | #define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800) |
2022 | #define EDP_PSR_CTL(dev) (EDP_PSR_BASE(dev) + 0) | 2023 | #define EDP_PSR_CTL(dev) (EDP_PSR_BASE(dev) + 0) |
2023 | #define EDP_PSR_ENABLE (1<<31) | 2024 | #define EDP_PSR_ENABLE (1<<31) |
2024 | #define EDP_PSR_LINK_DISABLE (0<<27) | 2025 | #define EDP_PSR_LINK_DISABLE (0<<27) |
2025 | #define EDP_PSR_LINK_STANDBY (1<<27) | 2026 | #define EDP_PSR_LINK_STANDBY (1<<27) |
2026 | #define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK (3<<25) | 2027 | #define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK (3<<25) |
2027 | #define EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES (0<<25) | 2028 | #define EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES (0<<25) |
2028 | #define EDP_PSR_MIN_LINK_ENTRY_TIME_4_LINES (1<<25) | 2029 | #define EDP_PSR_MIN_LINK_ENTRY_TIME_4_LINES (1<<25) |
2029 | #define EDP_PSR_MIN_LINK_ENTRY_TIME_2_LINES (2<<25) | 2030 | #define EDP_PSR_MIN_LINK_ENTRY_TIME_2_LINES (2<<25) |
2030 | #define EDP_PSR_MIN_LINK_ENTRY_TIME_0_LINES (3<<25) | 2031 | #define EDP_PSR_MIN_LINK_ENTRY_TIME_0_LINES (3<<25) |
2031 | #define EDP_PSR_MAX_SLEEP_TIME_SHIFT 20 | 2032 | #define EDP_PSR_MAX_SLEEP_TIME_SHIFT 20 |
2032 | #define EDP_PSR_SKIP_AUX_EXIT (1<<12) | 2033 | #define EDP_PSR_SKIP_AUX_EXIT (1<<12) |
2033 | #define EDP_PSR_TP1_TP2_SEL (0<<11) | 2034 | #define EDP_PSR_TP1_TP2_SEL (0<<11) |
2034 | #define EDP_PSR_TP1_TP3_SEL (1<<11) | 2035 | #define EDP_PSR_TP1_TP3_SEL (1<<11) |
2035 | #define EDP_PSR_TP2_TP3_TIME_500us (0<<8) | 2036 | #define EDP_PSR_TP2_TP3_TIME_500us (0<<8) |
2036 | #define EDP_PSR_TP2_TP3_TIME_100us (1<<8) | 2037 | #define EDP_PSR_TP2_TP3_TIME_100us (1<<8) |
2037 | #define EDP_PSR_TP2_TP3_TIME_2500us (2<<8) | 2038 | #define EDP_PSR_TP2_TP3_TIME_2500us (2<<8) |
2038 | #define EDP_PSR_TP2_TP3_TIME_0us (3<<8) | 2039 | #define EDP_PSR_TP2_TP3_TIME_0us (3<<8) |
2039 | #define EDP_PSR_TP1_TIME_500us (0<<4) | 2040 | #define EDP_PSR_TP1_TIME_500us (0<<4) |
2040 | #define EDP_PSR_TP1_TIME_100us (1<<4) | 2041 | #define EDP_PSR_TP1_TIME_100us (1<<4) |
2041 | #define EDP_PSR_TP1_TIME_2500us (2<<4) | 2042 | #define EDP_PSR_TP1_TIME_2500us (2<<4) |
2042 | #define EDP_PSR_TP1_TIME_0us (3<<4) | 2043 | #define EDP_PSR_TP1_TIME_0us (3<<4) |
2043 | #define EDP_PSR_IDLE_FRAME_SHIFT 0 | 2044 | #define EDP_PSR_IDLE_FRAME_SHIFT 0 |
2044 | 2045 | ||
2045 | #define EDP_PSR_AUX_CTL(dev) (EDP_PSR_BASE(dev) + 0x10) | 2046 | #define EDP_PSR_AUX_CTL(dev) (EDP_PSR_BASE(dev) + 0x10) |
2046 | #define EDP_PSR_AUX_DATA1(dev) (EDP_PSR_BASE(dev) + 0x14) | 2047 | #define EDP_PSR_AUX_DATA1(dev) (EDP_PSR_BASE(dev) + 0x14) |
2047 | #define EDP_PSR_DPCD_COMMAND 0x80060000 | 2048 | #define EDP_PSR_DPCD_COMMAND 0x80060000 |
2048 | #define EDP_PSR_AUX_DATA2(dev) (EDP_PSR_BASE(dev) + 0x18) | 2049 | #define EDP_PSR_AUX_DATA2(dev) (EDP_PSR_BASE(dev) + 0x18) |
2049 | #define EDP_PSR_DPCD_NORMAL_OPERATION (1<<24) | 2050 | #define EDP_PSR_DPCD_NORMAL_OPERATION (1<<24) |
2050 | #define EDP_PSR_AUX_DATA3(dev) (EDP_PSR_BASE(dev) + 0x1c) | 2051 | #define EDP_PSR_AUX_DATA3(dev) (EDP_PSR_BASE(dev) + 0x1c) |
2051 | #define EDP_PSR_AUX_DATA4(dev) (EDP_PSR_BASE(dev) + 0x20) | 2052 | #define EDP_PSR_AUX_DATA4(dev) (EDP_PSR_BASE(dev) + 0x20) |
2052 | #define EDP_PSR_AUX_DATA5(dev) (EDP_PSR_BASE(dev) + 0x24) | 2053 | #define EDP_PSR_AUX_DATA5(dev) (EDP_PSR_BASE(dev) + 0x24) |
2053 | 2054 | ||
2054 | #define EDP_PSR_STATUS_CTL(dev) (EDP_PSR_BASE(dev) + 0x40) | 2055 | #define EDP_PSR_STATUS_CTL(dev) (EDP_PSR_BASE(dev) + 0x40) |
2055 | #define EDP_PSR_STATUS_STATE_MASK (7<<29) | 2056 | #define EDP_PSR_STATUS_STATE_MASK (7<<29) |
2056 | #define EDP_PSR_STATUS_STATE_IDLE (0<<29) | 2057 | #define EDP_PSR_STATUS_STATE_IDLE (0<<29) |
2057 | #define EDP_PSR_STATUS_STATE_SRDONACK (1<<29) | 2058 | #define EDP_PSR_STATUS_STATE_SRDONACK (1<<29) |
2058 | #define EDP_PSR_STATUS_STATE_SRDENT (2<<29) | 2059 | #define EDP_PSR_STATUS_STATE_SRDENT (2<<29) |
2059 | #define EDP_PSR_STATUS_STATE_BUFOFF (3<<29) | 2060 | #define EDP_PSR_STATUS_STATE_BUFOFF (3<<29) |
2060 | #define EDP_PSR_STATUS_STATE_BUFON (4<<29) | 2061 | #define EDP_PSR_STATUS_STATE_BUFON (4<<29) |
2061 | #define EDP_PSR_STATUS_STATE_AUXACK (5<<29) | 2062 | #define EDP_PSR_STATUS_STATE_AUXACK (5<<29) |
2062 | #define EDP_PSR_STATUS_STATE_SRDOFFACK (6<<29) | 2063 | #define EDP_PSR_STATUS_STATE_SRDOFFACK (6<<29) |
2063 | #define EDP_PSR_STATUS_LINK_MASK (3<<26) | 2064 | #define EDP_PSR_STATUS_LINK_MASK (3<<26) |
2064 | #define EDP_PSR_STATUS_LINK_FULL_OFF (0<<26) | 2065 | #define EDP_PSR_STATUS_LINK_FULL_OFF (0<<26) |
2065 | #define EDP_PSR_STATUS_LINK_FULL_ON (1<<26) | 2066 | #define EDP_PSR_STATUS_LINK_FULL_ON (1<<26) |
2066 | #define EDP_PSR_STATUS_LINK_STANDBY (2<<26) | 2067 | #define EDP_PSR_STATUS_LINK_STANDBY (2<<26) |
2067 | #define EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT 20 | 2068 | #define EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT 20 |
2068 | #define EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK 0x1f | 2069 | #define EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK 0x1f |
2069 | #define EDP_PSR_STATUS_COUNT_SHIFT 16 | 2070 | #define EDP_PSR_STATUS_COUNT_SHIFT 16 |
2070 | #define EDP_PSR_STATUS_COUNT_MASK 0xf | 2071 | #define EDP_PSR_STATUS_COUNT_MASK 0xf |
2071 | #define EDP_PSR_STATUS_AUX_ERROR (1<<15) | 2072 | #define EDP_PSR_STATUS_AUX_ERROR (1<<15) |
2072 | #define EDP_PSR_STATUS_AUX_SENDING (1<<12) | 2073 | #define EDP_PSR_STATUS_AUX_SENDING (1<<12) |
2073 | #define EDP_PSR_STATUS_SENDING_IDLE (1<<9) | 2074 | #define EDP_PSR_STATUS_SENDING_IDLE (1<<9) |
2074 | #define EDP_PSR_STATUS_SENDING_TP2_TP3 (1<<8) | 2075 | #define EDP_PSR_STATUS_SENDING_TP2_TP3 (1<<8) |
2075 | #define EDP_PSR_STATUS_SENDING_TP1 (1<<4) | 2076 | #define EDP_PSR_STATUS_SENDING_TP1 (1<<4) |
2076 | #define EDP_PSR_STATUS_IDLE_MASK 0xf | 2077 | #define EDP_PSR_STATUS_IDLE_MASK 0xf |
2077 | 2078 | ||
2078 | #define EDP_PSR_PERF_CNT(dev) (EDP_PSR_BASE(dev) + 0x44) | 2079 | #define EDP_PSR_PERF_CNT(dev) (EDP_PSR_BASE(dev) + 0x44) |
2079 | #define EDP_PSR_PERF_CNT_MASK 0xffffff | 2080 | #define EDP_PSR_PERF_CNT_MASK 0xffffff |
2080 | 2081 | ||
2081 | #define EDP_PSR_DEBUG_CTL(dev) (EDP_PSR_BASE(dev) + 0x60) | 2082 | #define EDP_PSR_DEBUG_CTL(dev) (EDP_PSR_BASE(dev) + 0x60) |
2082 | #define EDP_PSR_DEBUG_MASK_LPSP (1<<27) | 2083 | #define EDP_PSR_DEBUG_MASK_LPSP (1<<27) |
2083 | #define EDP_PSR_DEBUG_MASK_MEMUP (1<<26) | 2084 | #define EDP_PSR_DEBUG_MASK_MEMUP (1<<26) |
2084 | #define EDP_PSR_DEBUG_MASK_HPD (1<<25) | 2085 | #define EDP_PSR_DEBUG_MASK_HPD (1<<25) |
2085 | 2086 | ||
2086 | /* VGA port control */ | 2087 | /* VGA port control */ |
2087 | #define ADPA 0x61100 | 2088 | #define ADPA 0x61100 |
2088 | #define PCH_ADPA 0xe1100 | 2089 | #define PCH_ADPA 0xe1100 |
2089 | #define VLV_ADPA (VLV_DISPLAY_BASE + ADPA) | 2090 | #define VLV_ADPA (VLV_DISPLAY_BASE + ADPA) |
2090 | 2091 | ||
2091 | #define ADPA_DAC_ENABLE (1<<31) | 2092 | #define ADPA_DAC_ENABLE (1<<31) |
2092 | #define ADPA_DAC_DISABLE 0 | 2093 | #define ADPA_DAC_DISABLE 0 |
2093 | #define ADPA_PIPE_SELECT_MASK (1<<30) | 2094 | #define ADPA_PIPE_SELECT_MASK (1<<30) |
2094 | #define ADPA_PIPE_A_SELECT 0 | 2095 | #define ADPA_PIPE_A_SELECT 0 |
2095 | #define ADPA_PIPE_B_SELECT (1<<30) | 2096 | #define ADPA_PIPE_B_SELECT (1<<30) |
2096 | #define ADPA_PIPE_SELECT(pipe) ((pipe) << 30) | 2097 | #define ADPA_PIPE_SELECT(pipe) ((pipe) << 30) |
2097 | /* CPT uses bits 29:30 for pch transcoder select */ | 2098 | /* CPT uses bits 29:30 for pch transcoder select */ |
2098 | #define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */ | 2099 | #define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */ |
2099 | #define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24) | 2100 | #define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24) |
2100 | #define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24) | 2101 | #define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24) |
2101 | #define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24) | 2102 | #define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24) |
2102 | #define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24) | 2103 | #define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24) |
2103 | #define ADPA_CRT_HOTPLUG_ENABLE (1<<23) | 2104 | #define ADPA_CRT_HOTPLUG_ENABLE (1<<23) |
2104 | #define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22) | 2105 | #define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22) |
2105 | #define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22) | 2106 | #define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22) |
2106 | #define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21) | 2107 | #define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21) |
2107 | #define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21) | 2108 | #define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21) |
2108 | #define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20) | 2109 | #define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20) |
2109 | #define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20) | 2110 | #define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20) |
2110 | #define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18) | 2111 | #define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18) |
2111 | #define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18) | 2112 | #define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18) |
2112 | #define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18) | 2113 | #define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18) |
2113 | #define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18) | 2114 | #define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18) |
2114 | #define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17) | 2115 | #define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17) |
2115 | #define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17) | 2116 | #define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17) |
2116 | #define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16) | 2117 | #define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16) |
2117 | #define ADPA_USE_VGA_HVPOLARITY (1<<15) | 2118 | #define ADPA_USE_VGA_HVPOLARITY (1<<15) |
2118 | #define ADPA_SETS_HVPOLARITY 0 | 2119 | #define ADPA_SETS_HVPOLARITY 0 |
2119 | #define ADPA_VSYNC_CNTL_DISABLE (1<<10) | 2120 | #define ADPA_VSYNC_CNTL_DISABLE (1<<10) |
2120 | #define ADPA_VSYNC_CNTL_ENABLE 0 | 2121 | #define ADPA_VSYNC_CNTL_ENABLE 0 |
2121 | #define ADPA_HSYNC_CNTL_DISABLE (1<<11) | 2122 | #define ADPA_HSYNC_CNTL_DISABLE (1<<11) |
2122 | #define ADPA_HSYNC_CNTL_ENABLE 0 | 2123 | #define ADPA_HSYNC_CNTL_ENABLE 0 |
2123 | #define ADPA_VSYNC_ACTIVE_HIGH (1<<4) | 2124 | #define ADPA_VSYNC_ACTIVE_HIGH (1<<4) |
2124 | #define ADPA_VSYNC_ACTIVE_LOW 0 | 2125 | #define ADPA_VSYNC_ACTIVE_LOW 0 |
2125 | #define ADPA_HSYNC_ACTIVE_HIGH (1<<3) | 2126 | #define ADPA_HSYNC_ACTIVE_HIGH (1<<3) |
2126 | #define ADPA_HSYNC_ACTIVE_LOW 0 | 2127 | #define ADPA_HSYNC_ACTIVE_LOW 0 |
2127 | #define ADPA_DPMS_MASK (~(3<<10)) | 2128 | #define ADPA_DPMS_MASK (~(3<<10)) |
2128 | #define ADPA_DPMS_ON (0<<10) | 2129 | #define ADPA_DPMS_ON (0<<10) |
2129 | #define ADPA_DPMS_SUSPEND (1<<10) | 2130 | #define ADPA_DPMS_SUSPEND (1<<10) |
2130 | #define ADPA_DPMS_STANDBY (2<<10) | 2131 | #define ADPA_DPMS_STANDBY (2<<10) |
2131 | #define ADPA_DPMS_OFF (3<<10) | 2132 | #define ADPA_DPMS_OFF (3<<10) |
2132 | 2133 | ||
2133 | 2134 | ||
2134 | /* Hotplug control (945+ only) */ | 2135 | /* Hotplug control (945+ only) */ |
2135 | #define PORT_HOTPLUG_EN (dev_priv->info.display_mmio_offset + 0x61110) | 2136 | #define PORT_HOTPLUG_EN (dev_priv->info.display_mmio_offset + 0x61110) |
2136 | #define PORTB_HOTPLUG_INT_EN (1 << 29) | 2137 | #define PORTB_HOTPLUG_INT_EN (1 << 29) |
2137 | #define PORTC_HOTPLUG_INT_EN (1 << 28) | 2138 | #define PORTC_HOTPLUG_INT_EN (1 << 28) |
2138 | #define PORTD_HOTPLUG_INT_EN (1 << 27) | 2139 | #define PORTD_HOTPLUG_INT_EN (1 << 27) |
2139 | #define SDVOB_HOTPLUG_INT_EN (1 << 26) | 2140 | #define SDVOB_HOTPLUG_INT_EN (1 << 26) |
2140 | #define SDVOC_HOTPLUG_INT_EN (1 << 25) | 2141 | #define SDVOC_HOTPLUG_INT_EN (1 << 25) |
2141 | #define TV_HOTPLUG_INT_EN (1 << 18) | 2142 | #define TV_HOTPLUG_INT_EN (1 << 18) |
2142 | #define CRT_HOTPLUG_INT_EN (1 << 9) | 2143 | #define CRT_HOTPLUG_INT_EN (1 << 9) |
2143 | #define HOTPLUG_INT_EN_MASK (PORTB_HOTPLUG_INT_EN | \ | 2144 | #define HOTPLUG_INT_EN_MASK (PORTB_HOTPLUG_INT_EN | \ |
2144 | PORTC_HOTPLUG_INT_EN | \ | 2145 | PORTC_HOTPLUG_INT_EN | \ |
2145 | PORTD_HOTPLUG_INT_EN | \ | 2146 | PORTD_HOTPLUG_INT_EN | \ |
2146 | SDVOC_HOTPLUG_INT_EN | \ | 2147 | SDVOC_HOTPLUG_INT_EN | \ |
2147 | SDVOB_HOTPLUG_INT_EN | \ | 2148 | SDVOB_HOTPLUG_INT_EN | \ |
2148 | CRT_HOTPLUG_INT_EN) | 2149 | CRT_HOTPLUG_INT_EN) |
2149 | #define CRT_HOTPLUG_FORCE_DETECT (1 << 3) | 2150 | #define CRT_HOTPLUG_FORCE_DETECT (1 << 3) |
2150 | #define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8) | 2151 | #define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8) |
2151 | /* must use period 64 on GM45 according to docs */ | 2152 | /* must use period 64 on GM45 according to docs */ |
2152 | #define CRT_HOTPLUG_ACTIVATION_PERIOD_64 (1 << 8) | 2153 | #define CRT_HOTPLUG_ACTIVATION_PERIOD_64 (1 << 8) |
2153 | #define CRT_HOTPLUG_DAC_ON_TIME_2M (0 << 7) | 2154 | #define CRT_HOTPLUG_DAC_ON_TIME_2M (0 << 7) |
2154 | #define CRT_HOTPLUG_DAC_ON_TIME_4M (1 << 7) | 2155 | #define CRT_HOTPLUG_DAC_ON_TIME_4M (1 << 7) |
2155 | #define CRT_HOTPLUG_VOLTAGE_COMPARE_40 (0 << 5) | 2156 | #define CRT_HOTPLUG_VOLTAGE_COMPARE_40 (0 << 5) |
2156 | #define CRT_HOTPLUG_VOLTAGE_COMPARE_50 (1 << 5) | 2157 | #define CRT_HOTPLUG_VOLTAGE_COMPARE_50 (1 << 5) |
2157 | #define CRT_HOTPLUG_VOLTAGE_COMPARE_60 (2 << 5) | 2158 | #define CRT_HOTPLUG_VOLTAGE_COMPARE_60 (2 << 5) |
2158 | #define CRT_HOTPLUG_VOLTAGE_COMPARE_70 (3 << 5) | 2159 | #define CRT_HOTPLUG_VOLTAGE_COMPARE_70 (3 << 5) |
2159 | #define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK (3 << 5) | 2160 | #define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK (3 << 5) |
2160 | #define CRT_HOTPLUG_DETECT_DELAY_1G (0 << 4) | 2161 | #define CRT_HOTPLUG_DETECT_DELAY_1G (0 << 4) |
2161 | #define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4) | 2162 | #define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4) |
2162 | #define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2) | 2163 | #define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2) |
2163 | #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) | 2164 | #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) |
2164 | 2165 | ||
2165 | #define PORT_HOTPLUG_STAT (dev_priv->info.display_mmio_offset + 0x61114) | 2166 | #define PORT_HOTPLUG_STAT (dev_priv->info.display_mmio_offset + 0x61114) |
2166 | /* | 2167 | /* |
2167 | * HDMI/DP bits are gen4+ | 2168 | * HDMI/DP bits are gen4+ |
2168 | * | 2169 | * |
2169 | * WARNING: Bspec for hpd status bits on gen4 seems to be completely confused. | 2170 | * WARNING: Bspec for hpd status bits on gen4 seems to be completely confused. |
2170 | * Please check the detailed lore in the commit message for for experimental | 2171 | * Please check the detailed lore in the commit message for for experimental |
2171 | * evidence. | 2172 | * evidence. |
2172 | */ | 2173 | */ |
2173 | #define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 29) | 2174 | #define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 29) |
2174 | #define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28) | 2175 | #define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28) |
2175 | #define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 27) | 2176 | #define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 27) |
2176 | /* VLV DP/HDMI bits again match Bspec */ | 2177 | /* VLV DP/HDMI bits again match Bspec */ |
2177 | #define PORTD_HOTPLUG_LIVE_STATUS_VLV (1 << 27) | 2178 | #define PORTD_HOTPLUG_LIVE_STATUS_VLV (1 << 27) |
2178 | #define PORTC_HOTPLUG_LIVE_STATUS_VLV (1 << 28) | 2179 | #define PORTC_HOTPLUG_LIVE_STATUS_VLV (1 << 28) |
2179 | #define PORTB_HOTPLUG_LIVE_STATUS_VLV (1 << 29) | 2180 | #define PORTB_HOTPLUG_LIVE_STATUS_VLV (1 << 29) |
2180 | #define PORTD_HOTPLUG_INT_STATUS (3 << 21) | 2181 | #define PORTD_HOTPLUG_INT_STATUS (3 << 21) |
2181 | #define PORTC_HOTPLUG_INT_STATUS (3 << 19) | 2182 | #define PORTC_HOTPLUG_INT_STATUS (3 << 19) |
2182 | #define PORTB_HOTPLUG_INT_STATUS (3 << 17) | 2183 | #define PORTB_HOTPLUG_INT_STATUS (3 << 17) |
2183 | /* CRT/TV common between gen3+ */ | 2184 | /* CRT/TV common between gen3+ */ |
2184 | #define CRT_HOTPLUG_INT_STATUS (1 << 11) | 2185 | #define CRT_HOTPLUG_INT_STATUS (1 << 11) |
2185 | #define TV_HOTPLUG_INT_STATUS (1 << 10) | 2186 | #define TV_HOTPLUG_INT_STATUS (1 << 10) |
2186 | #define CRT_HOTPLUG_MONITOR_MASK (3 << 8) | 2187 | #define CRT_HOTPLUG_MONITOR_MASK (3 << 8) |
2187 | #define CRT_HOTPLUG_MONITOR_COLOR (3 << 8) | 2188 | #define CRT_HOTPLUG_MONITOR_COLOR (3 << 8) |
2188 | #define CRT_HOTPLUG_MONITOR_MONO (2 << 8) | 2189 | #define CRT_HOTPLUG_MONITOR_MONO (2 << 8) |
2189 | #define CRT_HOTPLUG_MONITOR_NONE (0 << 8) | 2190 | #define CRT_HOTPLUG_MONITOR_NONE (0 << 8) |
2190 | #define DP_AUX_CHANNEL_D_INT_STATUS_G4X (1 << 6) | 2191 | #define DP_AUX_CHANNEL_D_INT_STATUS_G4X (1 << 6) |
2191 | #define DP_AUX_CHANNEL_C_INT_STATUS_G4X (1 << 5) | 2192 | #define DP_AUX_CHANNEL_C_INT_STATUS_G4X (1 << 5) |
2192 | #define DP_AUX_CHANNEL_B_INT_STATUS_G4X (1 << 4) | 2193 | #define DP_AUX_CHANNEL_B_INT_STATUS_G4X (1 << 4) |
2193 | #define DP_AUX_CHANNEL_MASK_INT_STATUS_G4X (7 << 4) | 2194 | #define DP_AUX_CHANNEL_MASK_INT_STATUS_G4X (7 << 4) |
2194 | 2195 | ||
2195 | /* SDVO is different across gen3/4 */ | 2196 | /* SDVO is different across gen3/4 */ |
2196 | #define SDVOC_HOTPLUG_INT_STATUS_G4X (1 << 3) | 2197 | #define SDVOC_HOTPLUG_INT_STATUS_G4X (1 << 3) |
2197 | #define SDVOB_HOTPLUG_INT_STATUS_G4X (1 << 2) | 2198 | #define SDVOB_HOTPLUG_INT_STATUS_G4X (1 << 2) |
2198 | /* | 2199 | /* |
2199 | * Bspec seems to be seriously misleaded about the SDVO hpd bits on i965g/gm, | 2200 | * Bspec seems to be seriously misleaded about the SDVO hpd bits on i965g/gm, |
2200 | * since reality corrobates that they're the same as on gen3. But keep these | 2201 | * since reality corrobates that they're the same as on gen3. But keep these |
2201 | * bits here (and the comment!) to help any other lost wanderers back onto the | 2202 | * bits here (and the comment!) to help any other lost wanderers back onto the |
2202 | * right tracks. | 2203 | * right tracks. |
2203 | */ | 2204 | */ |
2204 | #define SDVOC_HOTPLUG_INT_STATUS_I965 (3 << 4) | 2205 | #define SDVOC_HOTPLUG_INT_STATUS_I965 (3 << 4) |
2205 | #define SDVOB_HOTPLUG_INT_STATUS_I965 (3 << 2) | 2206 | #define SDVOB_HOTPLUG_INT_STATUS_I965 (3 << 2) |
2206 | #define SDVOC_HOTPLUG_INT_STATUS_I915 (1 << 7) | 2207 | #define SDVOC_HOTPLUG_INT_STATUS_I915 (1 << 7) |
2207 | #define SDVOB_HOTPLUG_INT_STATUS_I915 (1 << 6) | 2208 | #define SDVOB_HOTPLUG_INT_STATUS_I915 (1 << 6) |
2208 | #define HOTPLUG_INT_STATUS_G4X (CRT_HOTPLUG_INT_STATUS | \ | 2209 | #define HOTPLUG_INT_STATUS_G4X (CRT_HOTPLUG_INT_STATUS | \ |
2209 | SDVOB_HOTPLUG_INT_STATUS_G4X | \ | 2210 | SDVOB_HOTPLUG_INT_STATUS_G4X | \ |
2210 | SDVOC_HOTPLUG_INT_STATUS_G4X | \ | 2211 | SDVOC_HOTPLUG_INT_STATUS_G4X | \ |
2211 | PORTB_HOTPLUG_INT_STATUS | \ | 2212 | PORTB_HOTPLUG_INT_STATUS | \ |
2212 | PORTC_HOTPLUG_INT_STATUS | \ | 2213 | PORTC_HOTPLUG_INT_STATUS | \ |
2213 | PORTD_HOTPLUG_INT_STATUS) | 2214 | PORTD_HOTPLUG_INT_STATUS) |
2214 | 2215 | ||
2215 | #define HOTPLUG_INT_STATUS_I915 (CRT_HOTPLUG_INT_STATUS | \ | 2216 | #define HOTPLUG_INT_STATUS_I915 (CRT_HOTPLUG_INT_STATUS | \ |
2216 | SDVOB_HOTPLUG_INT_STATUS_I915 | \ | 2217 | SDVOB_HOTPLUG_INT_STATUS_I915 | \ |
2217 | SDVOC_HOTPLUG_INT_STATUS_I915 | \ | 2218 | SDVOC_HOTPLUG_INT_STATUS_I915 | \ |
2218 | PORTB_HOTPLUG_INT_STATUS | \ | 2219 | PORTB_HOTPLUG_INT_STATUS | \ |
2219 | PORTC_HOTPLUG_INT_STATUS | \ | 2220 | PORTC_HOTPLUG_INT_STATUS | \ |
2220 | PORTD_HOTPLUG_INT_STATUS) | 2221 | PORTD_HOTPLUG_INT_STATUS) |
2221 | 2222 | ||
2222 | /* SDVO and HDMI port control. | 2223 | /* SDVO and HDMI port control. |
2223 | * The same register may be used for SDVO or HDMI */ | 2224 | * The same register may be used for SDVO or HDMI */ |
2224 | #define GEN3_SDVOB 0x61140 | 2225 | #define GEN3_SDVOB 0x61140 |
2225 | #define GEN3_SDVOC 0x61160 | 2226 | #define GEN3_SDVOC 0x61160 |
2226 | #define GEN4_HDMIB GEN3_SDVOB | 2227 | #define GEN4_HDMIB GEN3_SDVOB |
2227 | #define GEN4_HDMIC GEN3_SDVOC | 2228 | #define GEN4_HDMIC GEN3_SDVOC |
2228 | #define PCH_SDVOB 0xe1140 | 2229 | #define PCH_SDVOB 0xe1140 |
2229 | #define PCH_HDMIB PCH_SDVOB | 2230 | #define PCH_HDMIB PCH_SDVOB |
2230 | #define PCH_HDMIC 0xe1150 | 2231 | #define PCH_HDMIC 0xe1150 |
2231 | #define PCH_HDMID 0xe1160 | 2232 | #define PCH_HDMID 0xe1160 |
2232 | 2233 | ||
2233 | #define PORT_DFT_I9XX 0x61150 | 2234 | #define PORT_DFT_I9XX 0x61150 |
2234 | #define DC_BALANCE_RESET (1 << 25) | 2235 | #define DC_BALANCE_RESET (1 << 25) |
2235 | #define PORT_DFT2_G4X 0x61154 | 2236 | #define PORT_DFT2_G4X 0x61154 |
2236 | #define DC_BALANCE_RESET_VLV (1 << 31) | 2237 | #define DC_BALANCE_RESET_VLV (1 << 31) |
2237 | #define PIPE_SCRAMBLE_RESET_MASK (0x3 << 0) | 2238 | #define PIPE_SCRAMBLE_RESET_MASK (0x3 << 0) |
2238 | #define PIPE_B_SCRAMBLE_RESET (1 << 1) | 2239 | #define PIPE_B_SCRAMBLE_RESET (1 << 1) |
2239 | #define PIPE_A_SCRAMBLE_RESET (1 << 0) | 2240 | #define PIPE_A_SCRAMBLE_RESET (1 << 0) |
2240 | 2241 | ||
2241 | /* Gen 3 SDVO bits: */ | 2242 | /* Gen 3 SDVO bits: */ |
2242 | #define SDVO_ENABLE (1 << 31) | 2243 | #define SDVO_ENABLE (1 << 31) |
2243 | #define SDVO_PIPE_SEL(pipe) ((pipe) << 30) | 2244 | #define SDVO_PIPE_SEL(pipe) ((pipe) << 30) |
2244 | #define SDVO_PIPE_SEL_MASK (1 << 30) | 2245 | #define SDVO_PIPE_SEL_MASK (1 << 30) |
2245 | #define SDVO_PIPE_B_SELECT (1 << 30) | 2246 | #define SDVO_PIPE_B_SELECT (1 << 30) |
2246 | #define SDVO_STALL_SELECT (1 << 29) | 2247 | #define SDVO_STALL_SELECT (1 << 29) |
2247 | #define SDVO_INTERRUPT_ENABLE (1 << 26) | 2248 | #define SDVO_INTERRUPT_ENABLE (1 << 26) |
2248 | /** | 2249 | /** |
2249 | * 915G/GM SDVO pixel multiplier. | 2250 | * 915G/GM SDVO pixel multiplier. |
2250 | * Programmed value is multiplier - 1, up to 5x. | 2251 | * Programmed value is multiplier - 1, up to 5x. |
2251 | * \sa DPLL_MD_UDI_MULTIPLIER_MASK | 2252 | * \sa DPLL_MD_UDI_MULTIPLIER_MASK |
2252 | */ | 2253 | */ |
2253 | #define SDVO_PORT_MULTIPLY_MASK (7 << 23) | 2254 | #define SDVO_PORT_MULTIPLY_MASK (7 << 23) |
2254 | #define SDVO_PORT_MULTIPLY_SHIFT 23 | 2255 | #define SDVO_PORT_MULTIPLY_SHIFT 23 |
2255 | #define SDVO_PHASE_SELECT_MASK (15 << 19) | 2256 | #define SDVO_PHASE_SELECT_MASK (15 << 19) |
2256 | #define SDVO_PHASE_SELECT_DEFAULT (6 << 19) | 2257 | #define SDVO_PHASE_SELECT_DEFAULT (6 << 19) |
2257 | #define SDVO_CLOCK_OUTPUT_INVERT (1 << 18) | 2258 | #define SDVO_CLOCK_OUTPUT_INVERT (1 << 18) |
2258 | #define SDVOC_GANG_MODE (1 << 16) /* Port C only */ | 2259 | #define SDVOC_GANG_MODE (1 << 16) /* Port C only */ |
2259 | #define SDVO_BORDER_ENABLE (1 << 7) /* SDVO only */ | 2260 | #define SDVO_BORDER_ENABLE (1 << 7) /* SDVO only */ |
2260 | #define SDVOB_PCIE_CONCURRENCY (1 << 3) /* Port B only */ | 2261 | #define SDVOB_PCIE_CONCURRENCY (1 << 3) /* Port B only */ |
2261 | #define SDVO_DETECTED (1 << 2) | 2262 | #define SDVO_DETECTED (1 << 2) |
2262 | /* Bits to be preserved when writing */ | 2263 | /* Bits to be preserved when writing */ |
2263 | #define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | \ | 2264 | #define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | \ |
2264 | SDVO_INTERRUPT_ENABLE) | 2265 | SDVO_INTERRUPT_ENABLE) |
2265 | #define SDVOC_PRESERVE_MASK ((1 << 17) | SDVO_INTERRUPT_ENABLE) | 2266 | #define SDVOC_PRESERVE_MASK ((1 << 17) | SDVO_INTERRUPT_ENABLE) |
2266 | 2267 | ||
2267 | /* Gen 4 SDVO/HDMI bits: */ | 2268 | /* Gen 4 SDVO/HDMI bits: */ |
2268 | #define SDVO_COLOR_FORMAT_8bpc (0 << 26) | 2269 | #define SDVO_COLOR_FORMAT_8bpc (0 << 26) |
2269 | #define SDVO_COLOR_FORMAT_MASK (7 << 26) | 2270 | #define SDVO_COLOR_FORMAT_MASK (7 << 26) |
2270 | #define SDVO_ENCODING_SDVO (0 << 10) | 2271 | #define SDVO_ENCODING_SDVO (0 << 10) |
2271 | #define SDVO_ENCODING_HDMI (2 << 10) | 2272 | #define SDVO_ENCODING_HDMI (2 << 10) |
2272 | #define HDMI_MODE_SELECT_HDMI (1 << 9) /* HDMI only */ | 2273 | #define HDMI_MODE_SELECT_HDMI (1 << 9) /* HDMI only */ |
2273 | #define HDMI_MODE_SELECT_DVI (0 << 9) /* HDMI only */ | 2274 | #define HDMI_MODE_SELECT_DVI (0 << 9) /* HDMI only */ |
2274 | #define HDMI_COLOR_RANGE_16_235 (1 << 8) /* HDMI only */ | 2275 | #define HDMI_COLOR_RANGE_16_235 (1 << 8) /* HDMI only */ |
2275 | #define SDVO_AUDIO_ENABLE (1 << 6) | 2276 | #define SDVO_AUDIO_ENABLE (1 << 6) |
2276 | /* VSYNC/HSYNC bits new with 965, default is to be set */ | 2277 | /* VSYNC/HSYNC bits new with 965, default is to be set */ |
2277 | #define SDVO_VSYNC_ACTIVE_HIGH (1 << 4) | 2278 | #define SDVO_VSYNC_ACTIVE_HIGH (1 << 4) |
2278 | #define SDVO_HSYNC_ACTIVE_HIGH (1 << 3) | 2279 | #define SDVO_HSYNC_ACTIVE_HIGH (1 << 3) |
2279 | 2280 | ||
2280 | /* Gen 5 (IBX) SDVO/HDMI bits: */ | 2281 | /* Gen 5 (IBX) SDVO/HDMI bits: */ |
2281 | #define HDMI_COLOR_FORMAT_12bpc (3 << 26) /* HDMI only */ | 2282 | #define HDMI_COLOR_FORMAT_12bpc (3 << 26) /* HDMI only */ |
2282 | #define SDVOB_HOTPLUG_ENABLE (1 << 23) /* SDVO only */ | 2283 | #define SDVOB_HOTPLUG_ENABLE (1 << 23) /* SDVO only */ |
2283 | 2284 | ||
2284 | /* Gen 6 (CPT) SDVO/HDMI bits: */ | 2285 | /* Gen 6 (CPT) SDVO/HDMI bits: */ |
2285 | #define SDVO_PIPE_SEL_CPT(pipe) ((pipe) << 29) | 2286 | #define SDVO_PIPE_SEL_CPT(pipe) ((pipe) << 29) |
2286 | #define SDVO_PIPE_SEL_MASK_CPT (3 << 29) | 2287 | #define SDVO_PIPE_SEL_MASK_CPT (3 << 29) |
2287 | 2288 | ||
2288 | 2289 | ||
2289 | /* DVO port control */ | 2290 | /* DVO port control */ |
2290 | #define DVOA 0x61120 | 2291 | #define DVOA 0x61120 |
2291 | #define DVOB 0x61140 | 2292 | #define DVOB 0x61140 |
2292 | #define DVOC 0x61160 | 2293 | #define DVOC 0x61160 |
2293 | #define DVO_ENABLE (1 << 31) | 2294 | #define DVO_ENABLE (1 << 31) |
2294 | #define DVO_PIPE_B_SELECT (1 << 30) | 2295 | #define DVO_PIPE_B_SELECT (1 << 30) |
2295 | #define DVO_PIPE_STALL_UNUSED (0 << 28) | 2296 | #define DVO_PIPE_STALL_UNUSED (0 << 28) |
2296 | #define DVO_PIPE_STALL (1 << 28) | 2297 | #define DVO_PIPE_STALL (1 << 28) |
2297 | #define DVO_PIPE_STALL_TV (2 << 28) | 2298 | #define DVO_PIPE_STALL_TV (2 << 28) |
2298 | #define DVO_PIPE_STALL_MASK (3 << 28) | 2299 | #define DVO_PIPE_STALL_MASK (3 << 28) |
2299 | #define DVO_USE_VGA_SYNC (1 << 15) | 2300 | #define DVO_USE_VGA_SYNC (1 << 15) |
2300 | #define DVO_DATA_ORDER_I740 (0 << 14) | 2301 | #define DVO_DATA_ORDER_I740 (0 << 14) |
2301 | #define DVO_DATA_ORDER_FP (1 << 14) | 2302 | #define DVO_DATA_ORDER_FP (1 << 14) |
2302 | #define DVO_VSYNC_DISABLE (1 << 11) | 2303 | #define DVO_VSYNC_DISABLE (1 << 11) |
2303 | #define DVO_HSYNC_DISABLE (1 << 10) | 2304 | #define DVO_HSYNC_DISABLE (1 << 10) |
2304 | #define DVO_VSYNC_TRISTATE (1 << 9) | 2305 | #define DVO_VSYNC_TRISTATE (1 << 9) |
2305 | #define DVO_HSYNC_TRISTATE (1 << 8) | 2306 | #define DVO_HSYNC_TRISTATE (1 << 8) |
2306 | #define DVO_BORDER_ENABLE (1 << 7) | 2307 | #define DVO_BORDER_ENABLE (1 << 7) |
2307 | #define DVO_DATA_ORDER_GBRG (1 << 6) | 2308 | #define DVO_DATA_ORDER_GBRG (1 << 6) |
2308 | #define DVO_DATA_ORDER_RGGB (0 << 6) | 2309 | #define DVO_DATA_ORDER_RGGB (0 << 6) |
2309 | #define DVO_DATA_ORDER_GBRG_ERRATA (0 << 6) | 2310 | #define DVO_DATA_ORDER_GBRG_ERRATA (0 << 6) |
2310 | #define DVO_DATA_ORDER_RGGB_ERRATA (1 << 6) | 2311 | #define DVO_DATA_ORDER_RGGB_ERRATA (1 << 6) |
2311 | #define DVO_VSYNC_ACTIVE_HIGH (1 << 4) | 2312 | #define DVO_VSYNC_ACTIVE_HIGH (1 << 4) |
2312 | #define DVO_HSYNC_ACTIVE_HIGH (1 << 3) | 2313 | #define DVO_HSYNC_ACTIVE_HIGH (1 << 3) |
2313 | #define DVO_BLANK_ACTIVE_HIGH (1 << 2) | 2314 | #define DVO_BLANK_ACTIVE_HIGH (1 << 2) |
2314 | #define DVO_OUTPUT_CSTATE_PIXELS (1 << 1) /* SDG only */ | 2315 | #define DVO_OUTPUT_CSTATE_PIXELS (1 << 1) /* SDG only */ |
2315 | #define DVO_OUTPUT_SOURCE_SIZE_PIXELS (1 << 0) /* SDG only */ | 2316 | #define DVO_OUTPUT_SOURCE_SIZE_PIXELS (1 << 0) /* SDG only */ |
2316 | #define DVO_PRESERVE_MASK (0x7<<24) | 2317 | #define DVO_PRESERVE_MASK (0x7<<24) |
2317 | #define DVOA_SRCDIM 0x61124 | 2318 | #define DVOA_SRCDIM 0x61124 |
2318 | #define DVOB_SRCDIM 0x61144 | 2319 | #define DVOB_SRCDIM 0x61144 |
2319 | #define DVOC_SRCDIM 0x61164 | 2320 | #define DVOC_SRCDIM 0x61164 |
2320 | #define DVO_SRCDIM_HORIZONTAL_SHIFT 12 | 2321 | #define DVO_SRCDIM_HORIZONTAL_SHIFT 12 |
2321 | #define DVO_SRCDIM_VERTICAL_SHIFT 0 | 2322 | #define DVO_SRCDIM_VERTICAL_SHIFT 0 |
2322 | 2323 | ||
2323 | /* LVDS port control */ | 2324 | /* LVDS port control */ |
2324 | #define LVDS 0x61180 | 2325 | #define LVDS 0x61180 |
2325 | /* | 2326 | /* |
2326 | * Enables the LVDS port. This bit must be set before DPLLs are enabled, as | 2327 | * Enables the LVDS port. This bit must be set before DPLLs are enabled, as |
2327 | * the DPLL semantics change when the LVDS is assigned to that pipe. | 2328 | * the DPLL semantics change when the LVDS is assigned to that pipe. |
2328 | */ | 2329 | */ |
2329 | #define LVDS_PORT_EN (1 << 31) | 2330 | #define LVDS_PORT_EN (1 << 31) |
2330 | /* Selects pipe B for LVDS data. Must be set on pre-965. */ | 2331 | /* Selects pipe B for LVDS data. Must be set on pre-965. */ |
2331 | #define LVDS_PIPEB_SELECT (1 << 30) | 2332 | #define LVDS_PIPEB_SELECT (1 << 30) |
2332 | #define LVDS_PIPE_MASK (1 << 30) | 2333 | #define LVDS_PIPE_MASK (1 << 30) |
2333 | #define LVDS_PIPE(pipe) ((pipe) << 30) | 2334 | #define LVDS_PIPE(pipe) ((pipe) << 30) |
2334 | /* LVDS dithering flag on 965/g4x platform */ | 2335 | /* LVDS dithering flag on 965/g4x platform */ |
2335 | #define LVDS_ENABLE_DITHER (1 << 25) | 2336 | #define LVDS_ENABLE_DITHER (1 << 25) |
2336 | /* LVDS sync polarity flags. Set to invert (i.e. negative) */ | 2337 | /* LVDS sync polarity flags. Set to invert (i.e. negative) */ |
2337 | #define LVDS_VSYNC_POLARITY (1 << 21) | 2338 | #define LVDS_VSYNC_POLARITY (1 << 21) |
2338 | #define LVDS_HSYNC_POLARITY (1 << 20) | 2339 | #define LVDS_HSYNC_POLARITY (1 << 20) |
2339 | 2340 | ||
2340 | /* Enable border for unscaled (or aspect-scaled) display */ | 2341 | /* Enable border for unscaled (or aspect-scaled) display */ |
2341 | #define LVDS_BORDER_ENABLE (1 << 15) | 2342 | #define LVDS_BORDER_ENABLE (1 << 15) |
2342 | /* | 2343 | /* |
2343 | * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per | 2344 | * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per |
2344 | * pixel. | 2345 | * pixel. |
2345 | */ | 2346 | */ |
2346 | #define LVDS_A0A2_CLKA_POWER_MASK (3 << 8) | 2347 | #define LVDS_A0A2_CLKA_POWER_MASK (3 << 8) |
2347 | #define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8) | 2348 | #define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8) |
2348 | #define LVDS_A0A2_CLKA_POWER_UP (3 << 8) | 2349 | #define LVDS_A0A2_CLKA_POWER_UP (3 << 8) |
2349 | /* | 2350 | /* |
2350 | * Controls the A3 data pair, which contains the additional LSBs for 24 bit | 2351 | * Controls the A3 data pair, which contains the additional LSBs for 24 bit |
2351 | * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be | 2352 | * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be |
2352 | * on. | 2353 | * on. |
2353 | */ | 2354 | */ |
2354 | #define LVDS_A3_POWER_MASK (3 << 6) | 2355 | #define LVDS_A3_POWER_MASK (3 << 6) |
2355 | #define LVDS_A3_POWER_DOWN (0 << 6) | 2356 | #define LVDS_A3_POWER_DOWN (0 << 6) |
2356 | #define LVDS_A3_POWER_UP (3 << 6) | 2357 | #define LVDS_A3_POWER_UP (3 << 6) |
2357 | /* | 2358 | /* |
2358 | * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP | 2359 | * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP |
2359 | * is set. | 2360 | * is set. |
2360 | */ | 2361 | */ |
2361 | #define LVDS_CLKB_POWER_MASK (3 << 4) | 2362 | #define LVDS_CLKB_POWER_MASK (3 << 4) |
2362 | #define LVDS_CLKB_POWER_DOWN (0 << 4) | 2363 | #define LVDS_CLKB_POWER_DOWN (0 << 4) |
2363 | #define LVDS_CLKB_POWER_UP (3 << 4) | 2364 | #define LVDS_CLKB_POWER_UP (3 << 4) |
2364 | /* | 2365 | /* |
2365 | * Controls the B0-B3 data pairs. This must be set to match the DPLL p2 | 2366 | * Controls the B0-B3 data pairs. This must be set to match the DPLL p2 |
2366 | * setting for whether we are in dual-channel mode. The B3 pair will | 2367 | * setting for whether we are in dual-channel mode. The B3 pair will |
2367 | * additionally only be powered up when LVDS_A3_POWER_UP is set. | 2368 | * additionally only be powered up when LVDS_A3_POWER_UP is set. |
2368 | */ | 2369 | */ |
2369 | #define LVDS_B0B3_POWER_MASK (3 << 2) | 2370 | #define LVDS_B0B3_POWER_MASK (3 << 2) |
2370 | #define LVDS_B0B3_POWER_DOWN (0 << 2) | 2371 | #define LVDS_B0B3_POWER_DOWN (0 << 2) |
2371 | #define LVDS_B0B3_POWER_UP (3 << 2) | 2372 | #define LVDS_B0B3_POWER_UP (3 << 2) |
2372 | 2373 | ||
2373 | /* Video Data Island Packet control */ | 2374 | /* Video Data Island Packet control */ |
2374 | #define VIDEO_DIP_DATA 0x61178 | 2375 | #define VIDEO_DIP_DATA 0x61178 |
2375 | /* Read the description of VIDEO_DIP_DATA (before Haswel) or VIDEO_DIP_ECC | 2376 | /* Read the description of VIDEO_DIP_DATA (before Haswel) or VIDEO_DIP_ECC |
2376 | * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte | 2377 | * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte |
2377 | * of the infoframe structure specified by CEA-861. */ | 2378 | * of the infoframe structure specified by CEA-861. */ |
2378 | #define VIDEO_DIP_DATA_SIZE 32 | 2379 | #define VIDEO_DIP_DATA_SIZE 32 |
2379 | #define VIDEO_DIP_VSC_DATA_SIZE 36 | 2380 | #define VIDEO_DIP_VSC_DATA_SIZE 36 |
2380 | #define VIDEO_DIP_CTL 0x61170 | 2381 | #define VIDEO_DIP_CTL 0x61170 |
2381 | /* Pre HSW: */ | 2382 | /* Pre HSW: */ |
2382 | #define VIDEO_DIP_ENABLE (1 << 31) | 2383 | #define VIDEO_DIP_ENABLE (1 << 31) |
2383 | #define VIDEO_DIP_PORT(port) ((port) << 29) | 2384 | #define VIDEO_DIP_PORT(port) ((port) << 29) |
2384 | #define VIDEO_DIP_PORT_MASK (3 << 29) | 2385 | #define VIDEO_DIP_PORT_MASK (3 << 29) |
2385 | #define VIDEO_DIP_ENABLE_GCP (1 << 25) | 2386 | #define VIDEO_DIP_ENABLE_GCP (1 << 25) |
2386 | #define VIDEO_DIP_ENABLE_AVI (1 << 21) | 2387 | #define VIDEO_DIP_ENABLE_AVI (1 << 21) |
2387 | #define VIDEO_DIP_ENABLE_VENDOR (2 << 21) | 2388 | #define VIDEO_DIP_ENABLE_VENDOR (2 << 21) |
2388 | #define VIDEO_DIP_ENABLE_GAMUT (4 << 21) | 2389 | #define VIDEO_DIP_ENABLE_GAMUT (4 << 21) |
2389 | #define VIDEO_DIP_ENABLE_SPD (8 << 21) | 2390 | #define VIDEO_DIP_ENABLE_SPD (8 << 21) |
2390 | #define VIDEO_DIP_SELECT_AVI (0 << 19) | 2391 | #define VIDEO_DIP_SELECT_AVI (0 << 19) |
2391 | #define VIDEO_DIP_SELECT_VENDOR (1 << 19) | 2392 | #define VIDEO_DIP_SELECT_VENDOR (1 << 19) |
2392 | #define VIDEO_DIP_SELECT_SPD (3 << 19) | 2393 | #define VIDEO_DIP_SELECT_SPD (3 << 19) |
2393 | #define VIDEO_DIP_SELECT_MASK (3 << 19) | 2394 | #define VIDEO_DIP_SELECT_MASK (3 << 19) |
2394 | #define VIDEO_DIP_FREQ_ONCE (0 << 16) | 2395 | #define VIDEO_DIP_FREQ_ONCE (0 << 16) |
2395 | #define VIDEO_DIP_FREQ_VSYNC (1 << 16) | 2396 | #define VIDEO_DIP_FREQ_VSYNC (1 << 16) |
2396 | #define VIDEO_DIP_FREQ_2VSYNC (2 << 16) | 2397 | #define VIDEO_DIP_FREQ_2VSYNC (2 << 16) |
2397 | #define VIDEO_DIP_FREQ_MASK (3 << 16) | 2398 | #define VIDEO_DIP_FREQ_MASK (3 << 16) |
2398 | /* HSW and later: */ | 2399 | /* HSW and later: */ |
2399 | #define VIDEO_DIP_ENABLE_VSC_HSW (1 << 20) | 2400 | #define VIDEO_DIP_ENABLE_VSC_HSW (1 << 20) |
2400 | #define VIDEO_DIP_ENABLE_GCP_HSW (1 << 16) | 2401 | #define VIDEO_DIP_ENABLE_GCP_HSW (1 << 16) |
2401 | #define VIDEO_DIP_ENABLE_AVI_HSW (1 << 12) | 2402 | #define VIDEO_DIP_ENABLE_AVI_HSW (1 << 12) |
2402 | #define VIDEO_DIP_ENABLE_VS_HSW (1 << 8) | 2403 | #define VIDEO_DIP_ENABLE_VS_HSW (1 << 8) |
2403 | #define VIDEO_DIP_ENABLE_GMP_HSW (1 << 4) | 2404 | #define VIDEO_DIP_ENABLE_GMP_HSW (1 << 4) |
2404 | #define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0) | 2405 | #define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0) |
2405 | 2406 | ||
2406 | /* Panel power sequencing */ | 2407 | /* Panel power sequencing */ |
2407 | #define PP_STATUS 0x61200 | 2408 | #define PP_STATUS 0x61200 |
2408 | #define PP_ON (1 << 31) | 2409 | #define PP_ON (1 << 31) |
2409 | /* | 2410 | /* |
2410 | * Indicates that all dependencies of the panel are on: | 2411 | * Indicates that all dependencies of the panel are on: |
2411 | * | 2412 | * |
2412 | * - PLL enabled | 2413 | * - PLL enabled |
2413 | * - pipe enabled | 2414 | * - pipe enabled |
2414 | * - LVDS/DVOB/DVOC on | 2415 | * - LVDS/DVOB/DVOC on |
2415 | */ | 2416 | */ |
2416 | #define PP_READY (1 << 30) | 2417 | #define PP_READY (1 << 30) |
2417 | #define PP_SEQUENCE_NONE (0 << 28) | 2418 | #define PP_SEQUENCE_NONE (0 << 28) |
2418 | #define PP_SEQUENCE_POWER_UP (1 << 28) | 2419 | #define PP_SEQUENCE_POWER_UP (1 << 28) |
2419 | #define PP_SEQUENCE_POWER_DOWN (2 << 28) | 2420 | #define PP_SEQUENCE_POWER_DOWN (2 << 28) |
2420 | #define PP_SEQUENCE_MASK (3 << 28) | 2421 | #define PP_SEQUENCE_MASK (3 << 28) |
2421 | #define PP_SEQUENCE_SHIFT 28 | 2422 | #define PP_SEQUENCE_SHIFT 28 |
2422 | #define PP_CYCLE_DELAY_ACTIVE (1 << 27) | 2423 | #define PP_CYCLE_DELAY_ACTIVE (1 << 27) |
2423 | #define PP_SEQUENCE_STATE_MASK 0x0000000f | 2424 | #define PP_SEQUENCE_STATE_MASK 0x0000000f |
2424 | #define PP_SEQUENCE_STATE_OFF_IDLE (0x0 << 0) | 2425 | #define PP_SEQUENCE_STATE_OFF_IDLE (0x0 << 0) |
2425 | #define PP_SEQUENCE_STATE_OFF_S0_1 (0x1 << 0) | 2426 | #define PP_SEQUENCE_STATE_OFF_S0_1 (0x1 << 0) |
2426 | #define PP_SEQUENCE_STATE_OFF_S0_2 (0x2 << 0) | 2427 | #define PP_SEQUENCE_STATE_OFF_S0_2 (0x2 << 0) |
2427 | #define PP_SEQUENCE_STATE_OFF_S0_3 (0x3 << 0) | 2428 | #define PP_SEQUENCE_STATE_OFF_S0_3 (0x3 << 0) |
2428 | #define PP_SEQUENCE_STATE_ON_IDLE (0x8 << 0) | 2429 | #define PP_SEQUENCE_STATE_ON_IDLE (0x8 << 0) |
2429 | #define PP_SEQUENCE_STATE_ON_S1_0 (0x9 << 0) | 2430 | #define PP_SEQUENCE_STATE_ON_S1_0 (0x9 << 0) |
2430 | #define PP_SEQUENCE_STATE_ON_S1_2 (0xa << 0) | 2431 | #define PP_SEQUENCE_STATE_ON_S1_2 (0xa << 0) |
2431 | #define PP_SEQUENCE_STATE_ON_S1_3 (0xb << 0) | 2432 | #define PP_SEQUENCE_STATE_ON_S1_3 (0xb << 0) |
2432 | #define PP_SEQUENCE_STATE_RESET (0xf << 0) | 2433 | #define PP_SEQUENCE_STATE_RESET (0xf << 0) |
2433 | #define PP_CONTROL 0x61204 | 2434 | #define PP_CONTROL 0x61204 |
2434 | #define POWER_TARGET_ON (1 << 0) | 2435 | #define POWER_TARGET_ON (1 << 0) |
2435 | #define PP_ON_DELAYS 0x61208 | 2436 | #define PP_ON_DELAYS 0x61208 |
2436 | #define PP_OFF_DELAYS 0x6120c | 2437 | #define PP_OFF_DELAYS 0x6120c |
2437 | #define PP_DIVISOR 0x61210 | 2438 | #define PP_DIVISOR 0x61210 |
2438 | 2439 | ||
2439 | /* Panel fitting */ | 2440 | /* Panel fitting */ |
2440 | #define PFIT_CONTROL (dev_priv->info.display_mmio_offset + 0x61230) | 2441 | #define PFIT_CONTROL (dev_priv->info.display_mmio_offset + 0x61230) |
2441 | #define PFIT_ENABLE (1 << 31) | 2442 | #define PFIT_ENABLE (1 << 31) |
2442 | #define PFIT_PIPE_MASK (3 << 29) | 2443 | #define PFIT_PIPE_MASK (3 << 29) |
2443 | #define PFIT_PIPE_SHIFT 29 | 2444 | #define PFIT_PIPE_SHIFT 29 |
2444 | #define VERT_INTERP_DISABLE (0 << 10) | 2445 | #define VERT_INTERP_DISABLE (0 << 10) |
2445 | #define VERT_INTERP_BILINEAR (1 << 10) | 2446 | #define VERT_INTERP_BILINEAR (1 << 10) |
2446 | #define VERT_INTERP_MASK (3 << 10) | 2447 | #define VERT_INTERP_MASK (3 << 10) |
2447 | #define VERT_AUTO_SCALE (1 << 9) | 2448 | #define VERT_AUTO_SCALE (1 << 9) |
2448 | #define HORIZ_INTERP_DISABLE (0 << 6) | 2449 | #define HORIZ_INTERP_DISABLE (0 << 6) |
2449 | #define HORIZ_INTERP_BILINEAR (1 << 6) | 2450 | #define HORIZ_INTERP_BILINEAR (1 << 6) |
2450 | #define HORIZ_INTERP_MASK (3 << 6) | 2451 | #define HORIZ_INTERP_MASK (3 << 6) |
2451 | #define HORIZ_AUTO_SCALE (1 << 5) | 2452 | #define HORIZ_AUTO_SCALE (1 << 5) |
2452 | #define PANEL_8TO6_DITHER_ENABLE (1 << 3) | 2453 | #define PANEL_8TO6_DITHER_ENABLE (1 << 3) |
2453 | #define PFIT_FILTER_FUZZY (0 << 24) | 2454 | #define PFIT_FILTER_FUZZY (0 << 24) |
2454 | #define PFIT_SCALING_AUTO (0 << 26) | 2455 | #define PFIT_SCALING_AUTO (0 << 26) |
2455 | #define PFIT_SCALING_PROGRAMMED (1 << 26) | 2456 | #define PFIT_SCALING_PROGRAMMED (1 << 26) |
2456 | #define PFIT_SCALING_PILLAR (2 << 26) | 2457 | #define PFIT_SCALING_PILLAR (2 << 26) |
2457 | #define PFIT_SCALING_LETTER (3 << 26) | 2458 | #define PFIT_SCALING_LETTER (3 << 26) |
2458 | #define PFIT_PGM_RATIOS (dev_priv->info.display_mmio_offset + 0x61234) | 2459 | #define PFIT_PGM_RATIOS (dev_priv->info.display_mmio_offset + 0x61234) |
2459 | /* Pre-965 */ | 2460 | /* Pre-965 */ |
2460 | #define PFIT_VERT_SCALE_SHIFT 20 | 2461 | #define PFIT_VERT_SCALE_SHIFT 20 |
2461 | #define PFIT_VERT_SCALE_MASK 0xfff00000 | 2462 | #define PFIT_VERT_SCALE_MASK 0xfff00000 |
2462 | #define PFIT_HORIZ_SCALE_SHIFT 4 | 2463 | #define PFIT_HORIZ_SCALE_SHIFT 4 |
2463 | #define PFIT_HORIZ_SCALE_MASK 0x0000fff0 | 2464 | #define PFIT_HORIZ_SCALE_MASK 0x0000fff0 |
2464 | /* 965+ */ | 2465 | /* 965+ */ |
2465 | #define PFIT_VERT_SCALE_SHIFT_965 16 | 2466 | #define PFIT_VERT_SCALE_SHIFT_965 16 |
2466 | #define PFIT_VERT_SCALE_MASK_965 0x1fff0000 | 2467 | #define PFIT_VERT_SCALE_MASK_965 0x1fff0000 |
2467 | #define PFIT_HORIZ_SCALE_SHIFT_965 0 | 2468 | #define PFIT_HORIZ_SCALE_SHIFT_965 0 |
2468 | #define PFIT_HORIZ_SCALE_MASK_965 0x00001fff | 2469 | #define PFIT_HORIZ_SCALE_MASK_965 0x00001fff |
2469 | 2470 | ||
2470 | #define PFIT_AUTO_RATIOS (dev_priv->info.display_mmio_offset + 0x61238) | 2471 | #define PFIT_AUTO_RATIOS (dev_priv->info.display_mmio_offset + 0x61238) |
2471 | 2472 | ||
2472 | #define _VLV_BLC_PWM_CTL2_A (dev_priv->info.display_mmio_offset + 0x61250) | 2473 | #define _VLV_BLC_PWM_CTL2_A (dev_priv->info.display_mmio_offset + 0x61250) |
2473 | #define _VLV_BLC_PWM_CTL2_B (dev_priv->info.display_mmio_offset + 0x61350) | 2474 | #define _VLV_BLC_PWM_CTL2_B (dev_priv->info.display_mmio_offset + 0x61350) |
2474 | #define VLV_BLC_PWM_CTL2(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \ | 2475 | #define VLV_BLC_PWM_CTL2(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \ |
2475 | _VLV_BLC_PWM_CTL2_B) | 2476 | _VLV_BLC_PWM_CTL2_B) |
2476 | 2477 | ||
2477 | #define _VLV_BLC_PWM_CTL_A (dev_priv->info.display_mmio_offset + 0x61254) | 2478 | #define _VLV_BLC_PWM_CTL_A (dev_priv->info.display_mmio_offset + 0x61254) |
2478 | #define _VLV_BLC_PWM_CTL_B (dev_priv->info.display_mmio_offset + 0x61354) | 2479 | #define _VLV_BLC_PWM_CTL_B (dev_priv->info.display_mmio_offset + 0x61354) |
2479 | #define VLV_BLC_PWM_CTL(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL_A, \ | 2480 | #define VLV_BLC_PWM_CTL(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL_A, \ |
2480 | _VLV_BLC_PWM_CTL_B) | 2481 | _VLV_BLC_PWM_CTL_B) |
2481 | 2482 | ||
2482 | #define _VLV_BLC_HIST_CTL_A (dev_priv->info.display_mmio_offset + 0x61260) | 2483 | #define _VLV_BLC_HIST_CTL_A (dev_priv->info.display_mmio_offset + 0x61260) |
2483 | #define _VLV_BLC_HIST_CTL_B (dev_priv->info.display_mmio_offset + 0x61360) | 2484 | #define _VLV_BLC_HIST_CTL_B (dev_priv->info.display_mmio_offset + 0x61360) |
2484 | #define VLV_BLC_HIST_CTL(pipe) _PIPE(pipe, _VLV_BLC_HIST_CTL_A, \ | 2485 | #define VLV_BLC_HIST_CTL(pipe) _PIPE(pipe, _VLV_BLC_HIST_CTL_A, \ |
2485 | _VLV_BLC_HIST_CTL_B) | 2486 | _VLV_BLC_HIST_CTL_B) |
2486 | 2487 | ||
2487 | /* Backlight control */ | 2488 | /* Backlight control */ |
2488 | #define BLC_PWM_CTL2 (dev_priv->info.display_mmio_offset + 0x61250) /* 965+ only */ | 2489 | #define BLC_PWM_CTL2 (dev_priv->info.display_mmio_offset + 0x61250) /* 965+ only */ |
2489 | #define BLM_PWM_ENABLE (1 << 31) | 2490 | #define BLM_PWM_ENABLE (1 << 31) |
2490 | #define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */ | 2491 | #define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */ |
2491 | #define BLM_PIPE_SELECT (1 << 29) | 2492 | #define BLM_PIPE_SELECT (1 << 29) |
2492 | #define BLM_PIPE_SELECT_IVB (3 << 29) | 2493 | #define BLM_PIPE_SELECT_IVB (3 << 29) |
2493 | #define BLM_PIPE_A (0 << 29) | 2494 | #define BLM_PIPE_A (0 << 29) |
2494 | #define BLM_PIPE_B (1 << 29) | 2495 | #define BLM_PIPE_B (1 << 29) |
2495 | #define BLM_PIPE_C (2 << 29) /* ivb + */ | 2496 | #define BLM_PIPE_C (2 << 29) /* ivb + */ |
2496 | #define BLM_TRANSCODER_A BLM_PIPE_A /* hsw */ | 2497 | #define BLM_TRANSCODER_A BLM_PIPE_A /* hsw */ |
2497 | #define BLM_TRANSCODER_B BLM_PIPE_B | 2498 | #define BLM_TRANSCODER_B BLM_PIPE_B |
2498 | #define BLM_TRANSCODER_C BLM_PIPE_C | 2499 | #define BLM_TRANSCODER_C BLM_PIPE_C |
2499 | #define BLM_TRANSCODER_EDP (3 << 29) | 2500 | #define BLM_TRANSCODER_EDP (3 << 29) |
2500 | #define BLM_PIPE(pipe) ((pipe) << 29) | 2501 | #define BLM_PIPE(pipe) ((pipe) << 29) |
2501 | #define BLM_POLARITY_I965 (1 << 28) /* gen4 only */ | 2502 | #define BLM_POLARITY_I965 (1 << 28) /* gen4 only */ |
2502 | #define BLM_PHASE_IN_INTERUPT_STATUS (1 << 26) | 2503 | #define BLM_PHASE_IN_INTERUPT_STATUS (1 << 26) |
2503 | #define BLM_PHASE_IN_ENABLE (1 << 25) | 2504 | #define BLM_PHASE_IN_ENABLE (1 << 25) |
2504 | #define BLM_PHASE_IN_INTERUPT_ENABL (1 << 24) | 2505 | #define BLM_PHASE_IN_INTERUPT_ENABL (1 << 24) |
2505 | #define BLM_PHASE_IN_TIME_BASE_SHIFT (16) | 2506 | #define BLM_PHASE_IN_TIME_BASE_SHIFT (16) |
2506 | #define BLM_PHASE_IN_TIME_BASE_MASK (0xff << 16) | 2507 | #define BLM_PHASE_IN_TIME_BASE_MASK (0xff << 16) |
2507 | #define BLM_PHASE_IN_COUNT_SHIFT (8) | 2508 | #define BLM_PHASE_IN_COUNT_SHIFT (8) |
2508 | #define BLM_PHASE_IN_COUNT_MASK (0xff << 8) | 2509 | #define BLM_PHASE_IN_COUNT_MASK (0xff << 8) |
2509 | #define BLM_PHASE_IN_INCR_SHIFT (0) | 2510 | #define BLM_PHASE_IN_INCR_SHIFT (0) |
2510 | #define BLM_PHASE_IN_INCR_MASK (0xff << 0) | 2511 | #define BLM_PHASE_IN_INCR_MASK (0xff << 0) |
2511 | #define BLC_PWM_CTL (dev_priv->info.display_mmio_offset + 0x61254) | 2512 | #define BLC_PWM_CTL (dev_priv->info.display_mmio_offset + 0x61254) |
2512 | /* | 2513 | /* |
2513 | * This is the most significant 15 bits of the number of backlight cycles in a | 2514 | * This is the most significant 15 bits of the number of backlight cycles in a |
2514 | * complete cycle of the modulated backlight control. | 2515 | * complete cycle of the modulated backlight control. |
2515 | * | 2516 | * |
2516 | * The actual value is this field multiplied by two. | 2517 | * The actual value is this field multiplied by two. |
2517 | */ | 2518 | */ |
2518 | #define BACKLIGHT_MODULATION_FREQ_SHIFT (17) | 2519 | #define BACKLIGHT_MODULATION_FREQ_SHIFT (17) |
2519 | #define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17) | 2520 | #define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17) |
2520 | #define BLM_LEGACY_MODE (1 << 16) /* gen2 only */ | 2521 | #define BLM_LEGACY_MODE (1 << 16) /* gen2 only */ |
2521 | /* | 2522 | /* |
2522 | * This is the number of cycles out of the backlight modulation cycle for which | 2523 | * This is the number of cycles out of the backlight modulation cycle for which |
2523 | * the backlight is on. | 2524 | * the backlight is on. |
2524 | * | 2525 | * |
2525 | * This field must be no greater than the number of cycles in the complete | 2526 | * This field must be no greater than the number of cycles in the complete |
2526 | * backlight modulation cycle. | 2527 | * backlight modulation cycle. |
2527 | */ | 2528 | */ |
2528 | #define BACKLIGHT_DUTY_CYCLE_SHIFT (0) | 2529 | #define BACKLIGHT_DUTY_CYCLE_SHIFT (0) |
2529 | #define BACKLIGHT_DUTY_CYCLE_MASK (0xffff) | 2530 | #define BACKLIGHT_DUTY_CYCLE_MASK (0xffff) |
2530 | #define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe) | 2531 | #define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe) |
2531 | #define BLM_POLARITY_PNV (1 << 0) /* pnv only */ | 2532 | #define BLM_POLARITY_PNV (1 << 0) /* pnv only */ |
2532 | 2533 | ||
2533 | #define BLC_HIST_CTL (dev_priv->info.display_mmio_offset + 0x61260) | 2534 | #define BLC_HIST_CTL (dev_priv->info.display_mmio_offset + 0x61260) |
2534 | 2535 | ||
2535 | /* New registers for PCH-split platforms. Safe where new bits show up, the | 2536 | /* New registers for PCH-split platforms. Safe where new bits show up, the |
2536 | * register layout machtes with gen4 BLC_PWM_CTL[12]. */ | 2537 | * register layout machtes with gen4 BLC_PWM_CTL[12]. */ |
2537 | #define BLC_PWM_CPU_CTL2 0x48250 | 2538 | #define BLC_PWM_CPU_CTL2 0x48250 |
2538 | #define BLC_PWM_CPU_CTL 0x48254 | 2539 | #define BLC_PWM_CPU_CTL 0x48254 |
2539 | 2540 | ||
2540 | #define HSW_BLC_PWM2_CTL 0x48350 | 2541 | #define HSW_BLC_PWM2_CTL 0x48350 |
2541 | 2542 | ||
2542 | /* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is | 2543 | /* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is |
2543 | * like the normal CTL from gen4 and earlier. Hooray for confusing naming. */ | 2544 | * like the normal CTL from gen4 and earlier. Hooray for confusing naming. */ |
2544 | #define BLC_PWM_PCH_CTL1 0xc8250 | 2545 | #define BLC_PWM_PCH_CTL1 0xc8250 |
2545 | #define BLM_PCH_PWM_ENABLE (1 << 31) | 2546 | #define BLM_PCH_PWM_ENABLE (1 << 31) |
2546 | #define BLM_PCH_OVERRIDE_ENABLE (1 << 30) | 2547 | #define BLM_PCH_OVERRIDE_ENABLE (1 << 30) |
2547 | #define BLM_PCH_POLARITY (1 << 29) | 2548 | #define BLM_PCH_POLARITY (1 << 29) |
2548 | #define BLC_PWM_PCH_CTL2 0xc8254 | 2549 | #define BLC_PWM_PCH_CTL2 0xc8254 |
2549 | 2550 | ||
2550 | #define UTIL_PIN_CTL 0x48400 | 2551 | #define UTIL_PIN_CTL 0x48400 |
2551 | #define UTIL_PIN_ENABLE (1 << 31) | 2552 | #define UTIL_PIN_ENABLE (1 << 31) |
2552 | 2553 | ||
2553 | #define PCH_GTC_CTL 0xe7000 | 2554 | #define PCH_GTC_CTL 0xe7000 |
2554 | #define PCH_GTC_ENABLE (1 << 31) | 2555 | #define PCH_GTC_ENABLE (1 << 31) |
2555 | 2556 | ||
2556 | /* TV port control */ | 2557 | /* TV port control */ |
2557 | #define TV_CTL 0x68000 | 2558 | #define TV_CTL 0x68000 |
2558 | /** Enables the TV encoder */ | 2559 | /** Enables the TV encoder */ |
2559 | # define TV_ENC_ENABLE (1 << 31) | 2560 | # define TV_ENC_ENABLE (1 << 31) |
2560 | /** Sources the TV encoder input from pipe B instead of A. */ | 2561 | /** Sources the TV encoder input from pipe B instead of A. */ |
2561 | # define TV_ENC_PIPEB_SELECT (1 << 30) | 2562 | # define TV_ENC_PIPEB_SELECT (1 << 30) |
2562 | /** Outputs composite video (DAC A only) */ | 2563 | /** Outputs composite video (DAC A only) */ |
2563 | # define TV_ENC_OUTPUT_COMPOSITE (0 << 28) | 2564 | # define TV_ENC_OUTPUT_COMPOSITE (0 << 28) |
2564 | /** Outputs SVideo video (DAC B/C) */ | 2565 | /** Outputs SVideo video (DAC B/C) */ |
2565 | # define TV_ENC_OUTPUT_SVIDEO (1 << 28) | 2566 | # define TV_ENC_OUTPUT_SVIDEO (1 << 28) |
2566 | /** Outputs Component video (DAC A/B/C) */ | 2567 | /** Outputs Component video (DAC A/B/C) */ |
2567 | # define TV_ENC_OUTPUT_COMPONENT (2 << 28) | 2568 | # define TV_ENC_OUTPUT_COMPONENT (2 << 28) |
2568 | /** Outputs Composite and SVideo (DAC A/B/C) */ | 2569 | /** Outputs Composite and SVideo (DAC A/B/C) */ |
2569 | # define TV_ENC_OUTPUT_SVIDEO_COMPOSITE (3 << 28) | 2570 | # define TV_ENC_OUTPUT_SVIDEO_COMPOSITE (3 << 28) |
2570 | # define TV_TRILEVEL_SYNC (1 << 21) | 2571 | # define TV_TRILEVEL_SYNC (1 << 21) |
2571 | /** Enables slow sync generation (945GM only) */ | 2572 | /** Enables slow sync generation (945GM only) */ |
2572 | # define TV_SLOW_SYNC (1 << 20) | 2573 | # define TV_SLOW_SYNC (1 << 20) |
2573 | /** Selects 4x oversampling for 480i and 576p */ | 2574 | /** Selects 4x oversampling for 480i and 576p */ |
2574 | # define TV_OVERSAMPLE_4X (0 << 18) | 2575 | # define TV_OVERSAMPLE_4X (0 << 18) |
2575 | /** Selects 2x oversampling for 720p and 1080i */ | 2576 | /** Selects 2x oversampling for 720p and 1080i */ |
2576 | # define TV_OVERSAMPLE_2X (1 << 18) | 2577 | # define TV_OVERSAMPLE_2X (1 << 18) |
2577 | /** Selects no oversampling for 1080p */ | 2578 | /** Selects no oversampling for 1080p */ |
2578 | # define TV_OVERSAMPLE_NONE (2 << 18) | 2579 | # define TV_OVERSAMPLE_NONE (2 << 18) |
2579 | /** Selects 8x oversampling */ | 2580 | /** Selects 8x oversampling */ |
2580 | # define TV_OVERSAMPLE_8X (3 << 18) | 2581 | # define TV_OVERSAMPLE_8X (3 << 18) |
2581 | /** Selects progressive mode rather than interlaced */ | 2582 | /** Selects progressive mode rather than interlaced */ |
2582 | # define TV_PROGRESSIVE (1 << 17) | 2583 | # define TV_PROGRESSIVE (1 << 17) |
2583 | /** Sets the colorburst to PAL mode. Required for non-M PAL modes. */ | 2584 | /** Sets the colorburst to PAL mode. Required for non-M PAL modes. */ |
2584 | # define TV_PAL_BURST (1 << 16) | 2585 | # define TV_PAL_BURST (1 << 16) |
2585 | /** Field for setting delay of Y compared to C */ | 2586 | /** Field for setting delay of Y compared to C */ |
2586 | # define TV_YC_SKEW_MASK (7 << 12) | 2587 | # define TV_YC_SKEW_MASK (7 << 12) |
2587 | /** Enables a fix for 480p/576p standard definition modes on the 915GM only */ | 2588 | /** Enables a fix for 480p/576p standard definition modes on the 915GM only */ |
2588 | # define TV_ENC_SDP_FIX (1 << 11) | 2589 | # define TV_ENC_SDP_FIX (1 << 11) |
2589 | /** | 2590 | /** |
2590 | * Enables a fix for the 915GM only. | 2591 | * Enables a fix for the 915GM only. |
2591 | * | 2592 | * |
2592 | * Not sure what it does. | 2593 | * Not sure what it does. |
2593 | */ | 2594 | */ |
2594 | # define TV_ENC_C0_FIX (1 << 10) | 2595 | # define TV_ENC_C0_FIX (1 << 10) |
2595 | /** Bits that must be preserved by software */ | 2596 | /** Bits that must be preserved by software */ |
2596 | # define TV_CTL_SAVE ((1 << 11) | (3 << 9) | (7 << 6) | 0xf) | 2597 | # define TV_CTL_SAVE ((1 << 11) | (3 << 9) | (7 << 6) | 0xf) |
2597 | # define TV_FUSE_STATE_MASK (3 << 4) | 2598 | # define TV_FUSE_STATE_MASK (3 << 4) |
2598 | /** Read-only state that reports all features enabled */ | 2599 | /** Read-only state that reports all features enabled */ |
2599 | # define TV_FUSE_STATE_ENABLED (0 << 4) | 2600 | # define TV_FUSE_STATE_ENABLED (0 << 4) |
2600 | /** Read-only state that reports that Macrovision is disabled in hardware*/ | 2601 | /** Read-only state that reports that Macrovision is disabled in hardware*/ |
2601 | # define TV_FUSE_STATE_NO_MACROVISION (1 << 4) | 2602 | # define TV_FUSE_STATE_NO_MACROVISION (1 << 4) |
2602 | /** Read-only state that reports that TV-out is disabled in hardware. */ | 2603 | /** Read-only state that reports that TV-out is disabled in hardware. */ |
2603 | # define TV_FUSE_STATE_DISABLED (2 << 4) | 2604 | # define TV_FUSE_STATE_DISABLED (2 << 4) |
2604 | /** Normal operation */ | 2605 | /** Normal operation */ |
2605 | # define TV_TEST_MODE_NORMAL (0 << 0) | 2606 | # define TV_TEST_MODE_NORMAL (0 << 0) |
2606 | /** Encoder test pattern 1 - combo pattern */ | 2607 | /** Encoder test pattern 1 - combo pattern */ |
2607 | # define TV_TEST_MODE_PATTERN_1 (1 << 0) | 2608 | # define TV_TEST_MODE_PATTERN_1 (1 << 0) |
2608 | /** Encoder test pattern 2 - full screen vertical 75% color bars */ | 2609 | /** Encoder test pattern 2 - full screen vertical 75% color bars */ |
2609 | # define TV_TEST_MODE_PATTERN_2 (2 << 0) | 2610 | # define TV_TEST_MODE_PATTERN_2 (2 << 0) |
2610 | /** Encoder test pattern 3 - full screen horizontal 75% color bars */ | 2611 | /** Encoder test pattern 3 - full screen horizontal 75% color bars */ |
2611 | # define TV_TEST_MODE_PATTERN_3 (3 << 0) | 2612 | # define TV_TEST_MODE_PATTERN_3 (3 << 0) |
2612 | /** Encoder test pattern 4 - random noise */ | 2613 | /** Encoder test pattern 4 - random noise */ |
2613 | # define TV_TEST_MODE_PATTERN_4 (4 << 0) | 2614 | # define TV_TEST_MODE_PATTERN_4 (4 << 0) |
2614 | /** Encoder test pattern 5 - linear color ramps */ | 2615 | /** Encoder test pattern 5 - linear color ramps */ |
2615 | # define TV_TEST_MODE_PATTERN_5 (5 << 0) | 2616 | # define TV_TEST_MODE_PATTERN_5 (5 << 0) |
2616 | /** | 2617 | /** |
2617 | * This test mode forces the DACs to 50% of full output. | 2618 | * This test mode forces the DACs to 50% of full output. |
2618 | * | 2619 | * |
2619 | * This is used for load detection in combination with TVDAC_SENSE_MASK | 2620 | * This is used for load detection in combination with TVDAC_SENSE_MASK |
2620 | */ | 2621 | */ |
2621 | # define TV_TEST_MODE_MONITOR_DETECT (7 << 0) | 2622 | # define TV_TEST_MODE_MONITOR_DETECT (7 << 0) |
2622 | # define TV_TEST_MODE_MASK (7 << 0) | 2623 | # define TV_TEST_MODE_MASK (7 << 0) |
2623 | 2624 | ||
2624 | #define TV_DAC 0x68004 | 2625 | #define TV_DAC 0x68004 |
2625 | # define TV_DAC_SAVE 0x00ffff00 | 2626 | # define TV_DAC_SAVE 0x00ffff00 |
2626 | /** | 2627 | /** |
2627 | * Reports that DAC state change logic has reported change (RO). | 2628 | * Reports that DAC state change logic has reported change (RO). |
2628 | * | 2629 | * |
2629 | * This gets cleared when TV_DAC_STATE_EN is cleared | 2630 | * This gets cleared when TV_DAC_STATE_EN is cleared |
2630 | */ | 2631 | */ |
2631 | # define TVDAC_STATE_CHG (1 << 31) | 2632 | # define TVDAC_STATE_CHG (1 << 31) |
2632 | # define TVDAC_SENSE_MASK (7 << 28) | 2633 | # define TVDAC_SENSE_MASK (7 << 28) |
2633 | /** Reports that DAC A voltage is above the detect threshold */ | 2634 | /** Reports that DAC A voltage is above the detect threshold */ |
2634 | # define TVDAC_A_SENSE (1 << 30) | 2635 | # define TVDAC_A_SENSE (1 << 30) |
2635 | /** Reports that DAC B voltage is above the detect threshold */ | 2636 | /** Reports that DAC B voltage is above the detect threshold */ |
2636 | # define TVDAC_B_SENSE (1 << 29) | 2637 | # define TVDAC_B_SENSE (1 << 29) |
2637 | /** Reports that DAC C voltage is above the detect threshold */ | 2638 | /** Reports that DAC C voltage is above the detect threshold */ |
2638 | # define TVDAC_C_SENSE (1 << 28) | 2639 | # define TVDAC_C_SENSE (1 << 28) |
2639 | /** | 2640 | /** |
2640 | * Enables DAC state detection logic, for load-based TV detection. | 2641 | * Enables DAC state detection logic, for load-based TV detection. |
2641 | * | 2642 | * |
2642 | * The PLL of the chosen pipe (in TV_CTL) must be running, and the encoder set | 2643 | * The PLL of the chosen pipe (in TV_CTL) must be running, and the encoder set |
2643 | * to off, for load detection to work. | 2644 | * to off, for load detection to work. |
2644 | */ | 2645 | */ |
2645 | # define TVDAC_STATE_CHG_EN (1 << 27) | 2646 | # define TVDAC_STATE_CHG_EN (1 << 27) |
2646 | /** Sets the DAC A sense value to high */ | 2647 | /** Sets the DAC A sense value to high */ |
2647 | # define TVDAC_A_SENSE_CTL (1 << 26) | 2648 | # define TVDAC_A_SENSE_CTL (1 << 26) |
2648 | /** Sets the DAC B sense value to high */ | 2649 | /** Sets the DAC B sense value to high */ |
2649 | # define TVDAC_B_SENSE_CTL (1 << 25) | 2650 | # define TVDAC_B_SENSE_CTL (1 << 25) |
2650 | /** Sets the DAC C sense value to high */ | 2651 | /** Sets the DAC C sense value to high */ |
2651 | # define TVDAC_C_SENSE_CTL (1 << 24) | 2652 | # define TVDAC_C_SENSE_CTL (1 << 24) |
2652 | /** Overrides the ENC_ENABLE and DAC voltage levels */ | 2653 | /** Overrides the ENC_ENABLE and DAC voltage levels */ |
2653 | # define DAC_CTL_OVERRIDE (1 << 7) | 2654 | # define DAC_CTL_OVERRIDE (1 << 7) |
2654 | /** Sets the slew rate. Must be preserved in software */ | 2655 | /** Sets the slew rate. Must be preserved in software */ |
2655 | # define ENC_TVDAC_SLEW_FAST (1 << 6) | 2656 | # define ENC_TVDAC_SLEW_FAST (1 << 6) |
2656 | # define DAC_A_1_3_V (0 << 4) | 2657 | # define DAC_A_1_3_V (0 << 4) |
2657 | # define DAC_A_1_1_V (1 << 4) | 2658 | # define DAC_A_1_1_V (1 << 4) |
2658 | # define DAC_A_0_7_V (2 << 4) | 2659 | # define DAC_A_0_7_V (2 << 4) |
2659 | # define DAC_A_MASK (3 << 4) | 2660 | # define DAC_A_MASK (3 << 4) |
2660 | # define DAC_B_1_3_V (0 << 2) | 2661 | # define DAC_B_1_3_V (0 << 2) |
2661 | # define DAC_B_1_1_V (1 << 2) | 2662 | # define DAC_B_1_1_V (1 << 2) |
2662 | # define DAC_B_0_7_V (2 << 2) | 2663 | # define DAC_B_0_7_V (2 << 2) |
2663 | # define DAC_B_MASK (3 << 2) | 2664 | # define DAC_B_MASK (3 << 2) |
2664 | # define DAC_C_1_3_V (0 << 0) | 2665 | # define DAC_C_1_3_V (0 << 0) |
2665 | # define DAC_C_1_1_V (1 << 0) | 2666 | # define DAC_C_1_1_V (1 << 0) |
2666 | # define DAC_C_0_7_V (2 << 0) | 2667 | # define DAC_C_0_7_V (2 << 0) |
2667 | # define DAC_C_MASK (3 << 0) | 2668 | # define DAC_C_MASK (3 << 0) |
2668 | 2669 | ||
2669 | /** | 2670 | /** |
2670 | * CSC coefficients are stored in a floating point format with 9 bits of | 2671 | * CSC coefficients are stored in a floating point format with 9 bits of |
2671 | * mantissa and 2 or 3 bits of exponent. The exponent is represented as 2**-n, | 2672 | * mantissa and 2 or 3 bits of exponent. The exponent is represented as 2**-n, |
2672 | * where 2-bit exponents are unsigned n, and 3-bit exponents are signed n with | 2673 | * where 2-bit exponents are unsigned n, and 3-bit exponents are signed n with |
2673 | * -1 (0x3) being the only legal negative value. | 2674 | * -1 (0x3) being the only legal negative value. |
2674 | */ | 2675 | */ |
2675 | #define TV_CSC_Y 0x68010 | 2676 | #define TV_CSC_Y 0x68010 |
2676 | # define TV_RY_MASK 0x07ff0000 | 2677 | # define TV_RY_MASK 0x07ff0000 |
2677 | # define TV_RY_SHIFT 16 | 2678 | # define TV_RY_SHIFT 16 |
2678 | # define TV_GY_MASK 0x00000fff | 2679 | # define TV_GY_MASK 0x00000fff |
2679 | # define TV_GY_SHIFT 0 | 2680 | # define TV_GY_SHIFT 0 |
2680 | 2681 | ||
2681 | #define TV_CSC_Y2 0x68014 | 2682 | #define TV_CSC_Y2 0x68014 |
2682 | # define TV_BY_MASK 0x07ff0000 | 2683 | # define TV_BY_MASK 0x07ff0000 |
2683 | # define TV_BY_SHIFT 16 | 2684 | # define TV_BY_SHIFT 16 |
2684 | /** | 2685 | /** |
2685 | * Y attenuation for component video. | 2686 | * Y attenuation for component video. |
2686 | * | 2687 | * |
2687 | * Stored in 1.9 fixed point. | 2688 | * Stored in 1.9 fixed point. |
2688 | */ | 2689 | */ |
2689 | # define TV_AY_MASK 0x000003ff | 2690 | # define TV_AY_MASK 0x000003ff |
2690 | # define TV_AY_SHIFT 0 | 2691 | # define TV_AY_SHIFT 0 |
2691 | 2692 | ||
2692 | #define TV_CSC_U 0x68018 | 2693 | #define TV_CSC_U 0x68018 |
2693 | # define TV_RU_MASK 0x07ff0000 | 2694 | # define TV_RU_MASK 0x07ff0000 |
2694 | # define TV_RU_SHIFT 16 | 2695 | # define TV_RU_SHIFT 16 |
2695 | # define TV_GU_MASK 0x000007ff | 2696 | # define TV_GU_MASK 0x000007ff |
2696 | # define TV_GU_SHIFT 0 | 2697 | # define TV_GU_SHIFT 0 |
2697 | 2698 | ||
2698 | #define TV_CSC_U2 0x6801c | 2699 | #define TV_CSC_U2 0x6801c |
2699 | # define TV_BU_MASK 0x07ff0000 | 2700 | # define TV_BU_MASK 0x07ff0000 |
2700 | # define TV_BU_SHIFT 16 | 2701 | # define TV_BU_SHIFT 16 |
2701 | /** | 2702 | /** |
2702 | * U attenuation for component video. | 2703 | * U attenuation for component video. |
2703 | * | 2704 | * |
2704 | * Stored in 1.9 fixed point. | 2705 | * Stored in 1.9 fixed point. |
2705 | */ | 2706 | */ |
2706 | # define TV_AU_MASK 0x000003ff | 2707 | # define TV_AU_MASK 0x000003ff |
2707 | # define TV_AU_SHIFT 0 | 2708 | # define TV_AU_SHIFT 0 |
2708 | 2709 | ||
2709 | #define TV_CSC_V 0x68020 | 2710 | #define TV_CSC_V 0x68020 |
2710 | # define TV_RV_MASK 0x0fff0000 | 2711 | # define TV_RV_MASK 0x0fff0000 |
2711 | # define TV_RV_SHIFT 16 | 2712 | # define TV_RV_SHIFT 16 |
2712 | # define TV_GV_MASK 0x000007ff | 2713 | # define TV_GV_MASK 0x000007ff |
2713 | # define TV_GV_SHIFT 0 | 2714 | # define TV_GV_SHIFT 0 |
2714 | 2715 | ||
2715 | #define TV_CSC_V2 0x68024 | 2716 | #define TV_CSC_V2 0x68024 |
2716 | # define TV_BV_MASK 0x07ff0000 | 2717 | # define TV_BV_MASK 0x07ff0000 |
2717 | # define TV_BV_SHIFT 16 | 2718 | # define TV_BV_SHIFT 16 |
2718 | /** | 2719 | /** |
2719 | * V attenuation for component video. | 2720 | * V attenuation for component video. |
2720 | * | 2721 | * |
2721 | * Stored in 1.9 fixed point. | 2722 | * Stored in 1.9 fixed point. |
2722 | */ | 2723 | */ |
2723 | # define TV_AV_MASK 0x000007ff | 2724 | # define TV_AV_MASK 0x000007ff |
2724 | # define TV_AV_SHIFT 0 | 2725 | # define TV_AV_SHIFT 0 |
2725 | 2726 | ||
2726 | #define TV_CLR_KNOBS 0x68028 | 2727 | #define TV_CLR_KNOBS 0x68028 |
2727 | /** 2s-complement brightness adjustment */ | 2728 | /** 2s-complement brightness adjustment */ |
2728 | # define TV_BRIGHTNESS_MASK 0xff000000 | 2729 | # define TV_BRIGHTNESS_MASK 0xff000000 |
2729 | # define TV_BRIGHTNESS_SHIFT 24 | 2730 | # define TV_BRIGHTNESS_SHIFT 24 |
2730 | /** Contrast adjustment, as a 2.6 unsigned floating point number */ | 2731 | /** Contrast adjustment, as a 2.6 unsigned floating point number */ |
2731 | # define TV_CONTRAST_MASK 0x00ff0000 | 2732 | # define TV_CONTRAST_MASK 0x00ff0000 |
2732 | # define TV_CONTRAST_SHIFT 16 | 2733 | # define TV_CONTRAST_SHIFT 16 |
2733 | /** Saturation adjustment, as a 2.6 unsigned floating point number */ | 2734 | /** Saturation adjustment, as a 2.6 unsigned floating point number */ |
2734 | # define TV_SATURATION_MASK 0x0000ff00 | 2735 | # define TV_SATURATION_MASK 0x0000ff00 |
2735 | # define TV_SATURATION_SHIFT 8 | 2736 | # define TV_SATURATION_SHIFT 8 |
2736 | /** Hue adjustment, as an integer phase angle in degrees */ | 2737 | /** Hue adjustment, as an integer phase angle in degrees */ |
2737 | # define TV_HUE_MASK 0x000000ff | 2738 | # define TV_HUE_MASK 0x000000ff |
2738 | # define TV_HUE_SHIFT 0 | 2739 | # define TV_HUE_SHIFT 0 |
2739 | 2740 | ||
2740 | #define TV_CLR_LEVEL 0x6802c | 2741 | #define TV_CLR_LEVEL 0x6802c |
2741 | /** Controls the DAC level for black */ | 2742 | /** Controls the DAC level for black */ |
2742 | # define TV_BLACK_LEVEL_MASK 0x01ff0000 | 2743 | # define TV_BLACK_LEVEL_MASK 0x01ff0000 |
2743 | # define TV_BLACK_LEVEL_SHIFT 16 | 2744 | # define TV_BLACK_LEVEL_SHIFT 16 |
2744 | /** Controls the DAC level for blanking */ | 2745 | /** Controls the DAC level for blanking */ |
2745 | # define TV_BLANK_LEVEL_MASK 0x000001ff | 2746 | # define TV_BLANK_LEVEL_MASK 0x000001ff |
2746 | # define TV_BLANK_LEVEL_SHIFT 0 | 2747 | # define TV_BLANK_LEVEL_SHIFT 0 |
2747 | 2748 | ||
2748 | #define TV_H_CTL_1 0x68030 | 2749 | #define TV_H_CTL_1 0x68030 |
2749 | /** Number of pixels in the hsync. */ | 2750 | /** Number of pixels in the hsync. */ |
2750 | # define TV_HSYNC_END_MASK 0x1fff0000 | 2751 | # define TV_HSYNC_END_MASK 0x1fff0000 |
2751 | # define TV_HSYNC_END_SHIFT 16 | 2752 | # define TV_HSYNC_END_SHIFT 16 |
2752 | /** Total number of pixels minus one in the line (display and blanking). */ | 2753 | /** Total number of pixels minus one in the line (display and blanking). */ |
2753 | # define TV_HTOTAL_MASK 0x00001fff | 2754 | # define TV_HTOTAL_MASK 0x00001fff |
2754 | # define TV_HTOTAL_SHIFT 0 | 2755 | # define TV_HTOTAL_SHIFT 0 |
2755 | 2756 | ||
2756 | #define TV_H_CTL_2 0x68034 | 2757 | #define TV_H_CTL_2 0x68034 |
2757 | /** Enables the colorburst (needed for non-component color) */ | 2758 | /** Enables the colorburst (needed for non-component color) */ |
2758 | # define TV_BURST_ENA (1 << 31) | 2759 | # define TV_BURST_ENA (1 << 31) |
2759 | /** Offset of the colorburst from the start of hsync, in pixels minus one. */ | 2760 | /** Offset of the colorburst from the start of hsync, in pixels minus one. */ |
2760 | # define TV_HBURST_START_SHIFT 16 | 2761 | # define TV_HBURST_START_SHIFT 16 |
2761 | # define TV_HBURST_START_MASK 0x1fff0000 | 2762 | # define TV_HBURST_START_MASK 0x1fff0000 |
2762 | /** Length of the colorburst */ | 2763 | /** Length of the colorburst */ |
2763 | # define TV_HBURST_LEN_SHIFT 0 | 2764 | # define TV_HBURST_LEN_SHIFT 0 |
2764 | # define TV_HBURST_LEN_MASK 0x0001fff | 2765 | # define TV_HBURST_LEN_MASK 0x0001fff |
2765 | 2766 | ||
2766 | #define TV_H_CTL_3 0x68038 | 2767 | #define TV_H_CTL_3 0x68038 |
2767 | /** End of hblank, measured in pixels minus one from start of hsync */ | 2768 | /** End of hblank, measured in pixels minus one from start of hsync */ |
2768 | # define TV_HBLANK_END_SHIFT 16 | 2769 | # define TV_HBLANK_END_SHIFT 16 |
2769 | # define TV_HBLANK_END_MASK 0x1fff0000 | 2770 | # define TV_HBLANK_END_MASK 0x1fff0000 |
2770 | /** Start of hblank, measured in pixels minus one from start of hsync */ | 2771 | /** Start of hblank, measured in pixels minus one from start of hsync */ |
2771 | # define TV_HBLANK_START_SHIFT 0 | 2772 | # define TV_HBLANK_START_SHIFT 0 |
2772 | # define TV_HBLANK_START_MASK 0x0001fff | 2773 | # define TV_HBLANK_START_MASK 0x0001fff |
2773 | 2774 | ||
2774 | #define TV_V_CTL_1 0x6803c | 2775 | #define TV_V_CTL_1 0x6803c |
2775 | /** XXX */ | 2776 | /** XXX */ |
2776 | # define TV_NBR_END_SHIFT 16 | 2777 | # define TV_NBR_END_SHIFT 16 |
2777 | # define TV_NBR_END_MASK 0x07ff0000 | 2778 | # define TV_NBR_END_MASK 0x07ff0000 |
2778 | /** XXX */ | 2779 | /** XXX */ |
2779 | # define TV_VI_END_F1_SHIFT 8 | 2780 | # define TV_VI_END_F1_SHIFT 8 |
2780 | # define TV_VI_END_F1_MASK 0x00003f00 | 2781 | # define TV_VI_END_F1_MASK 0x00003f00 |
2781 | /** XXX */ | 2782 | /** XXX */ |
2782 | # define TV_VI_END_F2_SHIFT 0 | 2783 | # define TV_VI_END_F2_SHIFT 0 |
2783 | # define TV_VI_END_F2_MASK 0x0000003f | 2784 | # define TV_VI_END_F2_MASK 0x0000003f |
2784 | 2785 | ||
2785 | #define TV_V_CTL_2 0x68040 | 2786 | #define TV_V_CTL_2 0x68040 |
2786 | /** Length of vsync, in half lines */ | 2787 | /** Length of vsync, in half lines */ |
2787 | # define TV_VSYNC_LEN_MASK 0x07ff0000 | 2788 | # define TV_VSYNC_LEN_MASK 0x07ff0000 |
2788 | # define TV_VSYNC_LEN_SHIFT 16 | 2789 | # define TV_VSYNC_LEN_SHIFT 16 |
2789 | /** Offset of the start of vsync in field 1, measured in one less than the | 2790 | /** Offset of the start of vsync in field 1, measured in one less than the |
2790 | * number of half lines. | 2791 | * number of half lines. |
2791 | */ | 2792 | */ |
2792 | # define TV_VSYNC_START_F1_MASK 0x00007f00 | 2793 | # define TV_VSYNC_START_F1_MASK 0x00007f00 |
2793 | # define TV_VSYNC_START_F1_SHIFT 8 | 2794 | # define TV_VSYNC_START_F1_SHIFT 8 |
2794 | /** | 2795 | /** |
2795 | * Offset of the start of vsync in field 2, measured in one less than the | 2796 | * Offset of the start of vsync in field 2, measured in one less than the |
2796 | * number of half lines. | 2797 | * number of half lines. |
2797 | */ | 2798 | */ |
2798 | # define TV_VSYNC_START_F2_MASK 0x0000007f | 2799 | # define TV_VSYNC_START_F2_MASK 0x0000007f |
2799 | # define TV_VSYNC_START_F2_SHIFT 0 | 2800 | # define TV_VSYNC_START_F2_SHIFT 0 |
2800 | 2801 | ||
2801 | #define TV_V_CTL_3 0x68044 | 2802 | #define TV_V_CTL_3 0x68044 |
2802 | /** Enables generation of the equalization signal */ | 2803 | /** Enables generation of the equalization signal */ |
2803 | # define TV_EQUAL_ENA (1 << 31) | 2804 | # define TV_EQUAL_ENA (1 << 31) |
2804 | /** Length of vsync, in half lines */ | 2805 | /** Length of vsync, in half lines */ |
2805 | # define TV_VEQ_LEN_MASK 0x007f0000 | 2806 | # define TV_VEQ_LEN_MASK 0x007f0000 |
2806 | # define TV_VEQ_LEN_SHIFT 16 | 2807 | # define TV_VEQ_LEN_SHIFT 16 |
2807 | /** Offset of the start of equalization in field 1, measured in one less than | 2808 | /** Offset of the start of equalization in field 1, measured in one less than |
2808 | * the number of half lines. | 2809 | * the number of half lines. |
2809 | */ | 2810 | */ |
2810 | # define TV_VEQ_START_F1_MASK 0x0007f00 | 2811 | # define TV_VEQ_START_F1_MASK 0x0007f00 |
2811 | # define TV_VEQ_START_F1_SHIFT 8 | 2812 | # define TV_VEQ_START_F1_SHIFT 8 |
2812 | /** | 2813 | /** |
2813 | * Offset of the start of equalization in field 2, measured in one less than | 2814 | * Offset of the start of equalization in field 2, measured in one less than |
2814 | * the number of half lines. | 2815 | * the number of half lines. |
2815 | */ | 2816 | */ |
2816 | # define TV_VEQ_START_F2_MASK 0x000007f | 2817 | # define TV_VEQ_START_F2_MASK 0x000007f |
2817 | # define TV_VEQ_START_F2_SHIFT 0 | 2818 | # define TV_VEQ_START_F2_SHIFT 0 |
2818 | 2819 | ||
2819 | #define TV_V_CTL_4 0x68048 | 2820 | #define TV_V_CTL_4 0x68048 |
2820 | /** | 2821 | /** |
2821 | * Offset to start of vertical colorburst, measured in one less than the | 2822 | * Offset to start of vertical colorburst, measured in one less than the |
2822 | * number of lines from vertical start. | 2823 | * number of lines from vertical start. |
2823 | */ | 2824 | */ |
2824 | # define TV_VBURST_START_F1_MASK 0x003f0000 | 2825 | # define TV_VBURST_START_F1_MASK 0x003f0000 |
2825 | # define TV_VBURST_START_F1_SHIFT 16 | 2826 | # define TV_VBURST_START_F1_SHIFT 16 |
2826 | /** | 2827 | /** |
2827 | * Offset to the end of vertical colorburst, measured in one less than the | 2828 | * Offset to the end of vertical colorburst, measured in one less than the |
2828 | * number of lines from the start of NBR. | 2829 | * number of lines from the start of NBR. |
2829 | */ | 2830 | */ |
2830 | # define TV_VBURST_END_F1_MASK 0x000000ff | 2831 | # define TV_VBURST_END_F1_MASK 0x000000ff |
2831 | # define TV_VBURST_END_F1_SHIFT 0 | 2832 | # define TV_VBURST_END_F1_SHIFT 0 |
2832 | 2833 | ||
2833 | #define TV_V_CTL_5 0x6804c | 2834 | #define TV_V_CTL_5 0x6804c |
2834 | /** | 2835 | /** |
2835 | * Offset to start of vertical colorburst, measured in one less than the | 2836 | * Offset to start of vertical colorburst, measured in one less than the |
2836 | * number of lines from vertical start. | 2837 | * number of lines from vertical start. |
2837 | */ | 2838 | */ |
2838 | # define TV_VBURST_START_F2_MASK 0x003f0000 | 2839 | # define TV_VBURST_START_F2_MASK 0x003f0000 |
2839 | # define TV_VBURST_START_F2_SHIFT 16 | 2840 | # define TV_VBURST_START_F2_SHIFT 16 |
2840 | /** | 2841 | /** |
2841 | * Offset to the end of vertical colorburst, measured in one less than the | 2842 | * Offset to the end of vertical colorburst, measured in one less than the |
2842 | * number of lines from the start of NBR. | 2843 | * number of lines from the start of NBR. |
2843 | */ | 2844 | */ |
2844 | # define TV_VBURST_END_F2_MASK 0x000000ff | 2845 | # define TV_VBURST_END_F2_MASK 0x000000ff |
2845 | # define TV_VBURST_END_F2_SHIFT 0 | 2846 | # define TV_VBURST_END_F2_SHIFT 0 |
2846 | 2847 | ||
2847 | #define TV_V_CTL_6 0x68050 | 2848 | #define TV_V_CTL_6 0x68050 |
2848 | /** | 2849 | /** |
2849 | * Offset to start of vertical colorburst, measured in one less than the | 2850 | * Offset to start of vertical colorburst, measured in one less than the |
2850 | * number of lines from vertical start. | 2851 | * number of lines from vertical start. |
2851 | */ | 2852 | */ |
2852 | # define TV_VBURST_START_F3_MASK 0x003f0000 | 2853 | # define TV_VBURST_START_F3_MASK 0x003f0000 |
2853 | # define TV_VBURST_START_F3_SHIFT 16 | 2854 | # define TV_VBURST_START_F3_SHIFT 16 |
2854 | /** | 2855 | /** |
2855 | * Offset to the end of vertical colorburst, measured in one less than the | 2856 | * Offset to the end of vertical colorburst, measured in one less than the |
2856 | * number of lines from the start of NBR. | 2857 | * number of lines from the start of NBR. |
2857 | */ | 2858 | */ |
2858 | # define TV_VBURST_END_F3_MASK 0x000000ff | 2859 | # define TV_VBURST_END_F3_MASK 0x000000ff |
2859 | # define TV_VBURST_END_F3_SHIFT 0 | 2860 | # define TV_VBURST_END_F3_SHIFT 0 |
2860 | 2861 | ||
2861 | #define TV_V_CTL_7 0x68054 | 2862 | #define TV_V_CTL_7 0x68054 |
2862 | /** | 2863 | /** |
2863 | * Offset to start of vertical colorburst, measured in one less than the | 2864 | * Offset to start of vertical colorburst, measured in one less than the |
2864 | * number of lines from vertical start. | 2865 | * number of lines from vertical start. |
2865 | */ | 2866 | */ |
2866 | # define TV_VBURST_START_F4_MASK 0x003f0000 | 2867 | # define TV_VBURST_START_F4_MASK 0x003f0000 |
2867 | # define TV_VBURST_START_F4_SHIFT 16 | 2868 | # define TV_VBURST_START_F4_SHIFT 16 |
2868 | /** | 2869 | /** |
2869 | * Offset to the end of vertical colorburst, measured in one less than the | 2870 | * Offset to the end of vertical colorburst, measured in one less than the |
2870 | * number of lines from the start of NBR. | 2871 | * number of lines from the start of NBR. |
2871 | */ | 2872 | */ |
2872 | # define TV_VBURST_END_F4_MASK 0x000000ff | 2873 | # define TV_VBURST_END_F4_MASK 0x000000ff |
2873 | # define TV_VBURST_END_F4_SHIFT 0 | 2874 | # define TV_VBURST_END_F4_SHIFT 0 |
2874 | 2875 | ||
2875 | #define TV_SC_CTL_1 0x68060 | 2876 | #define TV_SC_CTL_1 0x68060 |
2876 | /** Turns on the first subcarrier phase generation DDA */ | 2877 | /** Turns on the first subcarrier phase generation DDA */ |
2877 | # define TV_SC_DDA1_EN (1 << 31) | 2878 | # define TV_SC_DDA1_EN (1 << 31) |
2878 | /** Turns on the first subcarrier phase generation DDA */ | 2879 | /** Turns on the first subcarrier phase generation DDA */ |
2879 | # define TV_SC_DDA2_EN (1 << 30) | 2880 | # define TV_SC_DDA2_EN (1 << 30) |
2880 | /** Turns on the first subcarrier phase generation DDA */ | 2881 | /** Turns on the first subcarrier phase generation DDA */ |
2881 | # define TV_SC_DDA3_EN (1 << 29) | 2882 | # define TV_SC_DDA3_EN (1 << 29) |
2882 | /** Sets the subcarrier DDA to reset frequency every other field */ | 2883 | /** Sets the subcarrier DDA to reset frequency every other field */ |
2883 | # define TV_SC_RESET_EVERY_2 (0 << 24) | 2884 | # define TV_SC_RESET_EVERY_2 (0 << 24) |
2884 | /** Sets the subcarrier DDA to reset frequency every fourth field */ | 2885 | /** Sets the subcarrier DDA to reset frequency every fourth field */ |
2885 | # define TV_SC_RESET_EVERY_4 (1 << 24) | 2886 | # define TV_SC_RESET_EVERY_4 (1 << 24) |
2886 | /** Sets the subcarrier DDA to reset frequency every eighth field */ | 2887 | /** Sets the subcarrier DDA to reset frequency every eighth field */ |
2887 | # define TV_SC_RESET_EVERY_8 (2 << 24) | 2888 | # define TV_SC_RESET_EVERY_8 (2 << 24) |
2888 | /** Sets the subcarrier DDA to never reset the frequency */ | 2889 | /** Sets the subcarrier DDA to never reset the frequency */ |
2889 | # define TV_SC_RESET_NEVER (3 << 24) | 2890 | # define TV_SC_RESET_NEVER (3 << 24) |
2890 | /** Sets the peak amplitude of the colorburst.*/ | 2891 | /** Sets the peak amplitude of the colorburst.*/ |
2891 | # define TV_BURST_LEVEL_MASK 0x00ff0000 | 2892 | # define TV_BURST_LEVEL_MASK 0x00ff0000 |
2892 | # define TV_BURST_LEVEL_SHIFT 16 | 2893 | # define TV_BURST_LEVEL_SHIFT 16 |
2893 | /** Sets the increment of the first subcarrier phase generation DDA */ | 2894 | /** Sets the increment of the first subcarrier phase generation DDA */ |
2894 | # define TV_SCDDA1_INC_MASK 0x00000fff | 2895 | # define TV_SCDDA1_INC_MASK 0x00000fff |
2895 | # define TV_SCDDA1_INC_SHIFT 0 | 2896 | # define TV_SCDDA1_INC_SHIFT 0 |
2896 | 2897 | ||
2897 | #define TV_SC_CTL_2 0x68064 | 2898 | #define TV_SC_CTL_2 0x68064 |
2898 | /** Sets the rollover for the second subcarrier phase generation DDA */ | 2899 | /** Sets the rollover for the second subcarrier phase generation DDA */ |
2899 | # define TV_SCDDA2_SIZE_MASK 0x7fff0000 | 2900 | # define TV_SCDDA2_SIZE_MASK 0x7fff0000 |
2900 | # define TV_SCDDA2_SIZE_SHIFT 16 | 2901 | # define TV_SCDDA2_SIZE_SHIFT 16 |
2901 | /** Sets the increent of the second subcarrier phase generation DDA */ | 2902 | /** Sets the increent of the second subcarrier phase generation DDA */ |
2902 | # define TV_SCDDA2_INC_MASK 0x00007fff | 2903 | # define TV_SCDDA2_INC_MASK 0x00007fff |
2903 | # define TV_SCDDA2_INC_SHIFT 0 | 2904 | # define TV_SCDDA2_INC_SHIFT 0 |
2904 | 2905 | ||
2905 | #define TV_SC_CTL_3 0x68068 | 2906 | #define TV_SC_CTL_3 0x68068 |
2906 | /** Sets the rollover for the third subcarrier phase generation DDA */ | 2907 | /** Sets the rollover for the third subcarrier phase generation DDA */ |
2907 | # define TV_SCDDA3_SIZE_MASK 0x7fff0000 | 2908 | # define TV_SCDDA3_SIZE_MASK 0x7fff0000 |
2908 | # define TV_SCDDA3_SIZE_SHIFT 16 | 2909 | # define TV_SCDDA3_SIZE_SHIFT 16 |
2909 | /** Sets the increent of the third subcarrier phase generation DDA */ | 2910 | /** Sets the increent of the third subcarrier phase generation DDA */ |
2910 | # define TV_SCDDA3_INC_MASK 0x00007fff | 2911 | # define TV_SCDDA3_INC_MASK 0x00007fff |
2911 | # define TV_SCDDA3_INC_SHIFT 0 | 2912 | # define TV_SCDDA3_INC_SHIFT 0 |
2912 | 2913 | ||
2913 | #define TV_WIN_POS 0x68070 | 2914 | #define TV_WIN_POS 0x68070 |
2914 | /** X coordinate of the display from the start of horizontal active */ | 2915 | /** X coordinate of the display from the start of horizontal active */ |
2915 | # define TV_XPOS_MASK 0x1fff0000 | 2916 | # define TV_XPOS_MASK 0x1fff0000 |
2916 | # define TV_XPOS_SHIFT 16 | 2917 | # define TV_XPOS_SHIFT 16 |
2917 | /** Y coordinate of the display from the start of vertical active (NBR) */ | 2918 | /** Y coordinate of the display from the start of vertical active (NBR) */ |
2918 | # define TV_YPOS_MASK 0x00000fff | 2919 | # define TV_YPOS_MASK 0x00000fff |
2919 | # define TV_YPOS_SHIFT 0 | 2920 | # define TV_YPOS_SHIFT 0 |
2920 | 2921 | ||
2921 | #define TV_WIN_SIZE 0x68074 | 2922 | #define TV_WIN_SIZE 0x68074 |
2922 | /** Horizontal size of the display window, measured in pixels*/ | 2923 | /** Horizontal size of the display window, measured in pixels*/ |
2923 | # define TV_XSIZE_MASK 0x1fff0000 | 2924 | # define TV_XSIZE_MASK 0x1fff0000 |
2924 | # define TV_XSIZE_SHIFT 16 | 2925 | # define TV_XSIZE_SHIFT 16 |
2925 | /** | 2926 | /** |
2926 | * Vertical size of the display window, measured in pixels. | 2927 | * Vertical size of the display window, measured in pixels. |
2927 | * | 2928 | * |
2928 | * Must be even for interlaced modes. | 2929 | * Must be even for interlaced modes. |
2929 | */ | 2930 | */ |
2930 | # define TV_YSIZE_MASK 0x00000fff | 2931 | # define TV_YSIZE_MASK 0x00000fff |
2931 | # define TV_YSIZE_SHIFT 0 | 2932 | # define TV_YSIZE_SHIFT 0 |
2932 | 2933 | ||
2933 | #define TV_FILTER_CTL_1 0x68080 | 2934 | #define TV_FILTER_CTL_1 0x68080 |
2934 | /** | 2935 | /** |
2935 | * Enables automatic scaling calculation. | 2936 | * Enables automatic scaling calculation. |
2936 | * | 2937 | * |
2937 | * If set, the rest of the registers are ignored, and the calculated values can | 2938 | * If set, the rest of the registers are ignored, and the calculated values can |
2938 | * be read back from the register. | 2939 | * be read back from the register. |
2939 | */ | 2940 | */ |
2940 | # define TV_AUTO_SCALE (1 << 31) | 2941 | # define TV_AUTO_SCALE (1 << 31) |
2941 | /** | 2942 | /** |
2942 | * Disables the vertical filter. | 2943 | * Disables the vertical filter. |
2943 | * | 2944 | * |
2944 | * This is required on modes more than 1024 pixels wide */ | 2945 | * This is required on modes more than 1024 pixels wide */ |
2945 | # define TV_V_FILTER_BYPASS (1 << 29) | 2946 | # define TV_V_FILTER_BYPASS (1 << 29) |
2946 | /** Enables adaptive vertical filtering */ | 2947 | /** Enables adaptive vertical filtering */ |
2947 | # define TV_VADAPT (1 << 28) | 2948 | # define TV_VADAPT (1 << 28) |
2948 | # define TV_VADAPT_MODE_MASK (3 << 26) | 2949 | # define TV_VADAPT_MODE_MASK (3 << 26) |
2949 | /** Selects the least adaptive vertical filtering mode */ | 2950 | /** Selects the least adaptive vertical filtering mode */ |
2950 | # define TV_VADAPT_MODE_LEAST (0 << 26) | 2951 | # define TV_VADAPT_MODE_LEAST (0 << 26) |
2951 | /** Selects the moderately adaptive vertical filtering mode */ | 2952 | /** Selects the moderately adaptive vertical filtering mode */ |
2952 | # define TV_VADAPT_MODE_MODERATE (1 << 26) | 2953 | # define TV_VADAPT_MODE_MODERATE (1 << 26) |
2953 | /** Selects the most adaptive vertical filtering mode */ | 2954 | /** Selects the most adaptive vertical filtering mode */ |
2954 | # define TV_VADAPT_MODE_MOST (3 << 26) | 2955 | # define TV_VADAPT_MODE_MOST (3 << 26) |
2955 | /** | 2956 | /** |
2956 | * Sets the horizontal scaling factor. | 2957 | * Sets the horizontal scaling factor. |
2957 | * | 2958 | * |
2958 | * This should be the fractional part of the horizontal scaling factor divided | 2959 | * This should be the fractional part of the horizontal scaling factor divided |
2959 | * by the oversampling rate. TV_HSCALE should be less than 1, and set to: | 2960 | * by the oversampling rate. TV_HSCALE should be less than 1, and set to: |
2960 | * | 2961 | * |
2961 | * (src width - 1) / ((oversample * dest width) - 1) | 2962 | * (src width - 1) / ((oversample * dest width) - 1) |
2962 | */ | 2963 | */ |
2963 | # define TV_HSCALE_FRAC_MASK 0x00003fff | 2964 | # define TV_HSCALE_FRAC_MASK 0x00003fff |
2964 | # define TV_HSCALE_FRAC_SHIFT 0 | 2965 | # define TV_HSCALE_FRAC_SHIFT 0 |
2965 | 2966 | ||
2966 | #define TV_FILTER_CTL_2 0x68084 | 2967 | #define TV_FILTER_CTL_2 0x68084 |
2967 | /** | 2968 | /** |
2968 | * Sets the integer part of the 3.15 fixed-point vertical scaling factor. | 2969 | * Sets the integer part of the 3.15 fixed-point vertical scaling factor. |
2969 | * | 2970 | * |
2970 | * TV_VSCALE should be (src height - 1) / ((interlace * dest height) - 1) | 2971 | * TV_VSCALE should be (src height - 1) / ((interlace * dest height) - 1) |
2971 | */ | 2972 | */ |
2972 | # define TV_VSCALE_INT_MASK 0x00038000 | 2973 | # define TV_VSCALE_INT_MASK 0x00038000 |
2973 | # define TV_VSCALE_INT_SHIFT 15 | 2974 | # define TV_VSCALE_INT_SHIFT 15 |
2974 | /** | 2975 | /** |
2975 | * Sets the fractional part of the 3.15 fixed-point vertical scaling factor. | 2976 | * Sets the fractional part of the 3.15 fixed-point vertical scaling factor. |
2976 | * | 2977 | * |
2977 | * \sa TV_VSCALE_INT_MASK | 2978 | * \sa TV_VSCALE_INT_MASK |
2978 | */ | 2979 | */ |
2979 | # define TV_VSCALE_FRAC_MASK 0x00007fff | 2980 | # define TV_VSCALE_FRAC_MASK 0x00007fff |
2980 | # define TV_VSCALE_FRAC_SHIFT 0 | 2981 | # define TV_VSCALE_FRAC_SHIFT 0 |
2981 | 2982 | ||
2982 | #define TV_FILTER_CTL_3 0x68088 | 2983 | #define TV_FILTER_CTL_3 0x68088 |
2983 | /** | 2984 | /** |
2984 | * Sets the integer part of the 3.15 fixed-point vertical scaling factor. | 2985 | * Sets the integer part of the 3.15 fixed-point vertical scaling factor. |
2985 | * | 2986 | * |
2986 | * TV_VSCALE should be (src height - 1) / (1/4 * (dest height - 1)) | 2987 | * TV_VSCALE should be (src height - 1) / (1/4 * (dest height - 1)) |
2987 | * | 2988 | * |
2988 | * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes. | 2989 | * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes. |
2989 | */ | 2990 | */ |
2990 | # define TV_VSCALE_IP_INT_MASK 0x00038000 | 2991 | # define TV_VSCALE_IP_INT_MASK 0x00038000 |
2991 | # define TV_VSCALE_IP_INT_SHIFT 15 | 2992 | # define TV_VSCALE_IP_INT_SHIFT 15 |
2992 | /** | 2993 | /** |
2993 | * Sets the fractional part of the 3.15 fixed-point vertical scaling factor. | 2994 | * Sets the fractional part of the 3.15 fixed-point vertical scaling factor. |
2994 | * | 2995 | * |
2995 | * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes. | 2996 | * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes. |
2996 | * | 2997 | * |
2997 | * \sa TV_VSCALE_IP_INT_MASK | 2998 | * \sa TV_VSCALE_IP_INT_MASK |
2998 | */ | 2999 | */ |
2999 | # define TV_VSCALE_IP_FRAC_MASK 0x00007fff | 3000 | # define TV_VSCALE_IP_FRAC_MASK 0x00007fff |
3000 | # define TV_VSCALE_IP_FRAC_SHIFT 0 | 3001 | # define TV_VSCALE_IP_FRAC_SHIFT 0 |
3001 | 3002 | ||
3002 | #define TV_CC_CONTROL 0x68090 | 3003 | #define TV_CC_CONTROL 0x68090 |
3003 | # define TV_CC_ENABLE (1 << 31) | 3004 | # define TV_CC_ENABLE (1 << 31) |
3004 | /** | 3005 | /** |
3005 | * Specifies which field to send the CC data in. | 3006 | * Specifies which field to send the CC data in. |
3006 | * | 3007 | * |
3007 | * CC data is usually sent in field 0. | 3008 | * CC data is usually sent in field 0. |
3008 | */ | 3009 | */ |
3009 | # define TV_CC_FID_MASK (1 << 27) | 3010 | # define TV_CC_FID_MASK (1 << 27) |
3010 | # define TV_CC_FID_SHIFT 27 | 3011 | # define TV_CC_FID_SHIFT 27 |
3011 | /** Sets the horizontal position of the CC data. Usually 135. */ | 3012 | /** Sets the horizontal position of the CC data. Usually 135. */ |
3012 | # define TV_CC_HOFF_MASK 0x03ff0000 | 3013 | # define TV_CC_HOFF_MASK 0x03ff0000 |
3013 | # define TV_CC_HOFF_SHIFT 16 | 3014 | # define TV_CC_HOFF_SHIFT 16 |
3014 | /** Sets the vertical position of the CC data. Usually 21 */ | 3015 | /** Sets the vertical position of the CC data. Usually 21 */ |
3015 | # define TV_CC_LINE_MASK 0x0000003f | 3016 | # define TV_CC_LINE_MASK 0x0000003f |
3016 | # define TV_CC_LINE_SHIFT 0 | 3017 | # define TV_CC_LINE_SHIFT 0 |
3017 | 3018 | ||
3018 | #define TV_CC_DATA 0x68094 | 3019 | #define TV_CC_DATA 0x68094 |
3019 | # define TV_CC_RDY (1 << 31) | 3020 | # define TV_CC_RDY (1 << 31) |
3020 | /** Second word of CC data to be transmitted. */ | 3021 | /** Second word of CC data to be transmitted. */ |
3021 | # define TV_CC_DATA_2_MASK 0x007f0000 | 3022 | # define TV_CC_DATA_2_MASK 0x007f0000 |
3022 | # define TV_CC_DATA_2_SHIFT 16 | 3023 | # define TV_CC_DATA_2_SHIFT 16 |
3023 | /** First word of CC data to be transmitted. */ | 3024 | /** First word of CC data to be transmitted. */ |
3024 | # define TV_CC_DATA_1_MASK 0x0000007f | 3025 | # define TV_CC_DATA_1_MASK 0x0000007f |
3025 | # define TV_CC_DATA_1_SHIFT 0 | 3026 | # define TV_CC_DATA_1_SHIFT 0 |
3026 | 3027 | ||
3027 | #define TV_H_LUMA_0 0x68100 | 3028 | #define TV_H_LUMA_0 0x68100 |
3028 | #define TV_H_LUMA_59 0x681ec | 3029 | #define TV_H_LUMA_59 0x681ec |
3029 | #define TV_H_CHROMA_0 0x68200 | 3030 | #define TV_H_CHROMA_0 0x68200 |
3030 | #define TV_H_CHROMA_59 0x682ec | 3031 | #define TV_H_CHROMA_59 0x682ec |
3031 | #define TV_V_LUMA_0 0x68300 | 3032 | #define TV_V_LUMA_0 0x68300 |
3032 | #define TV_V_LUMA_42 0x683a8 | 3033 | #define TV_V_LUMA_42 0x683a8 |
3033 | #define TV_V_CHROMA_0 0x68400 | 3034 | #define TV_V_CHROMA_0 0x68400 |
3034 | #define TV_V_CHROMA_42 0x684a8 | 3035 | #define TV_V_CHROMA_42 0x684a8 |
3035 | 3036 | ||
3036 | /* Display Port */ | 3037 | /* Display Port */ |
3037 | #define DP_A 0x64000 /* eDP */ | 3038 | #define DP_A 0x64000 /* eDP */ |
3038 | #define DP_B 0x64100 | 3039 | #define DP_B 0x64100 |
3039 | #define DP_C 0x64200 | 3040 | #define DP_C 0x64200 |
3040 | #define DP_D 0x64300 | 3041 | #define DP_D 0x64300 |
3041 | 3042 | ||
3042 | #define DP_PORT_EN (1 << 31) | 3043 | #define DP_PORT_EN (1 << 31) |
3043 | #define DP_PIPEB_SELECT (1 << 30) | 3044 | #define DP_PIPEB_SELECT (1 << 30) |
3044 | #define DP_PIPE_MASK (1 << 30) | 3045 | #define DP_PIPE_MASK (1 << 30) |
3045 | 3046 | ||
3046 | /* Link training mode - select a suitable mode for each stage */ | 3047 | /* Link training mode - select a suitable mode for each stage */ |
3047 | #define DP_LINK_TRAIN_PAT_1 (0 << 28) | 3048 | #define DP_LINK_TRAIN_PAT_1 (0 << 28) |
3048 | #define DP_LINK_TRAIN_PAT_2 (1 << 28) | 3049 | #define DP_LINK_TRAIN_PAT_2 (1 << 28) |
3049 | #define DP_LINK_TRAIN_PAT_IDLE (2 << 28) | 3050 | #define DP_LINK_TRAIN_PAT_IDLE (2 << 28) |
3050 | #define DP_LINK_TRAIN_OFF (3 << 28) | 3051 | #define DP_LINK_TRAIN_OFF (3 << 28) |
3051 | #define DP_LINK_TRAIN_MASK (3 << 28) | 3052 | #define DP_LINK_TRAIN_MASK (3 << 28) |
3052 | #define DP_LINK_TRAIN_SHIFT 28 | 3053 | #define DP_LINK_TRAIN_SHIFT 28 |
3053 | 3054 | ||
3054 | /* CPT Link training mode */ | 3055 | /* CPT Link training mode */ |
3055 | #define DP_LINK_TRAIN_PAT_1_CPT (0 << 8) | 3056 | #define DP_LINK_TRAIN_PAT_1_CPT (0 << 8) |
3056 | #define DP_LINK_TRAIN_PAT_2_CPT (1 << 8) | 3057 | #define DP_LINK_TRAIN_PAT_2_CPT (1 << 8) |
3057 | #define DP_LINK_TRAIN_PAT_IDLE_CPT (2 << 8) | 3058 | #define DP_LINK_TRAIN_PAT_IDLE_CPT (2 << 8) |
3058 | #define DP_LINK_TRAIN_OFF_CPT (3 << 8) | 3059 | #define DP_LINK_TRAIN_OFF_CPT (3 << 8) |
3059 | #define DP_LINK_TRAIN_MASK_CPT (7 << 8) | 3060 | #define DP_LINK_TRAIN_MASK_CPT (7 << 8) |
3060 | #define DP_LINK_TRAIN_SHIFT_CPT 8 | 3061 | #define DP_LINK_TRAIN_SHIFT_CPT 8 |
3061 | 3062 | ||
3062 | /* Signal voltages. These are mostly controlled by the other end */ | 3063 | /* Signal voltages. These are mostly controlled by the other end */ |
3063 | #define DP_VOLTAGE_0_4 (0 << 25) | 3064 | #define DP_VOLTAGE_0_4 (0 << 25) |
3064 | #define DP_VOLTAGE_0_6 (1 << 25) | 3065 | #define DP_VOLTAGE_0_6 (1 << 25) |
3065 | #define DP_VOLTAGE_0_8 (2 << 25) | 3066 | #define DP_VOLTAGE_0_8 (2 << 25) |
3066 | #define DP_VOLTAGE_1_2 (3 << 25) | 3067 | #define DP_VOLTAGE_1_2 (3 << 25) |
3067 | #define DP_VOLTAGE_MASK (7 << 25) | 3068 | #define DP_VOLTAGE_MASK (7 << 25) |
3068 | #define DP_VOLTAGE_SHIFT 25 | 3069 | #define DP_VOLTAGE_SHIFT 25 |
3069 | 3070 | ||
3070 | /* Signal pre-emphasis levels, like voltages, the other end tells us what | 3071 | /* Signal pre-emphasis levels, like voltages, the other end tells us what |
3071 | * they want | 3072 | * they want |
3072 | */ | 3073 | */ |
3073 | #define DP_PRE_EMPHASIS_0 (0 << 22) | 3074 | #define DP_PRE_EMPHASIS_0 (0 << 22) |
3074 | #define DP_PRE_EMPHASIS_3_5 (1 << 22) | 3075 | #define DP_PRE_EMPHASIS_3_5 (1 << 22) |
3075 | #define DP_PRE_EMPHASIS_6 (2 << 22) | 3076 | #define DP_PRE_EMPHASIS_6 (2 << 22) |
3076 | #define DP_PRE_EMPHASIS_9_5 (3 << 22) | 3077 | #define DP_PRE_EMPHASIS_9_5 (3 << 22) |
3077 | #define DP_PRE_EMPHASIS_MASK (7 << 22) | 3078 | #define DP_PRE_EMPHASIS_MASK (7 << 22) |
3078 | #define DP_PRE_EMPHASIS_SHIFT 22 | 3079 | #define DP_PRE_EMPHASIS_SHIFT 22 |
3079 | 3080 | ||
3080 | /* How many wires to use. I guess 3 was too hard */ | 3081 | /* How many wires to use. I guess 3 was too hard */ |
3081 | #define DP_PORT_WIDTH(width) (((width) - 1) << 19) | 3082 | #define DP_PORT_WIDTH(width) (((width) - 1) << 19) |
3082 | #define DP_PORT_WIDTH_MASK (7 << 19) | 3083 | #define DP_PORT_WIDTH_MASK (7 << 19) |
3083 | 3084 | ||
3084 | /* Mystic DPCD version 1.1 special mode */ | 3085 | /* Mystic DPCD version 1.1 special mode */ |
3085 | #define DP_ENHANCED_FRAMING (1 << 18) | 3086 | #define DP_ENHANCED_FRAMING (1 << 18) |
3086 | 3087 | ||
3087 | /* eDP */ | 3088 | /* eDP */ |
3088 | #define DP_PLL_FREQ_270MHZ (0 << 16) | 3089 | #define DP_PLL_FREQ_270MHZ (0 << 16) |
3089 | #define DP_PLL_FREQ_160MHZ (1 << 16) | 3090 | #define DP_PLL_FREQ_160MHZ (1 << 16) |
3090 | #define DP_PLL_FREQ_MASK (3 << 16) | 3091 | #define DP_PLL_FREQ_MASK (3 << 16) |
3091 | 3092 | ||
3092 | /** locked once port is enabled */ | 3093 | /** locked once port is enabled */ |
3093 | #define DP_PORT_REVERSAL (1 << 15) | 3094 | #define DP_PORT_REVERSAL (1 << 15) |
3094 | 3095 | ||
3095 | /* eDP */ | 3096 | /* eDP */ |
3096 | #define DP_PLL_ENABLE (1 << 14) | 3097 | #define DP_PLL_ENABLE (1 << 14) |
3097 | 3098 | ||
3098 | /** sends the clock on lane 15 of the PEG for debug */ | 3099 | /** sends the clock on lane 15 of the PEG for debug */ |
3099 | #define DP_CLOCK_OUTPUT_ENABLE (1 << 13) | 3100 | #define DP_CLOCK_OUTPUT_ENABLE (1 << 13) |
3100 | 3101 | ||
3101 | #define DP_SCRAMBLING_DISABLE (1 << 12) | 3102 | #define DP_SCRAMBLING_DISABLE (1 << 12) |
3102 | #define DP_SCRAMBLING_DISABLE_IRONLAKE (1 << 7) | 3103 | #define DP_SCRAMBLING_DISABLE_IRONLAKE (1 << 7) |
3103 | 3104 | ||
3104 | /** limit RGB values to avoid confusing TVs */ | 3105 | /** limit RGB values to avoid confusing TVs */ |
3105 | #define DP_COLOR_RANGE_16_235 (1 << 8) | 3106 | #define DP_COLOR_RANGE_16_235 (1 << 8) |
3106 | 3107 | ||
3107 | /** Turn on the audio link */ | 3108 | /** Turn on the audio link */ |
3108 | #define DP_AUDIO_OUTPUT_ENABLE (1 << 6) | 3109 | #define DP_AUDIO_OUTPUT_ENABLE (1 << 6) |
3109 | 3110 | ||
3110 | /** vs and hs sync polarity */ | 3111 | /** vs and hs sync polarity */ |
3111 | #define DP_SYNC_VS_HIGH (1 << 4) | 3112 | #define DP_SYNC_VS_HIGH (1 << 4) |
3112 | #define DP_SYNC_HS_HIGH (1 << 3) | 3113 | #define DP_SYNC_HS_HIGH (1 << 3) |
3113 | 3114 | ||
3114 | /** A fantasy */ | 3115 | /** A fantasy */ |
3115 | #define DP_DETECTED (1 << 2) | 3116 | #define DP_DETECTED (1 << 2) |
3116 | 3117 | ||
3117 | /** The aux channel provides a way to talk to the | 3118 | /** The aux channel provides a way to talk to the |
3118 | * signal sink for DDC etc. Max packet size supported | 3119 | * signal sink for DDC etc. Max packet size supported |
3119 | * is 20 bytes in each direction, hence the 5 fixed | 3120 | * is 20 bytes in each direction, hence the 5 fixed |
3120 | * data registers | 3121 | * data registers |
3121 | */ | 3122 | */ |
3122 | #define DPA_AUX_CH_CTL 0x64010 | 3123 | #define DPA_AUX_CH_CTL 0x64010 |
3123 | #define DPA_AUX_CH_DATA1 0x64014 | 3124 | #define DPA_AUX_CH_DATA1 0x64014 |
3124 | #define DPA_AUX_CH_DATA2 0x64018 | 3125 | #define DPA_AUX_CH_DATA2 0x64018 |
3125 | #define DPA_AUX_CH_DATA3 0x6401c | 3126 | #define DPA_AUX_CH_DATA3 0x6401c |
3126 | #define DPA_AUX_CH_DATA4 0x64020 | 3127 | #define DPA_AUX_CH_DATA4 0x64020 |
3127 | #define DPA_AUX_CH_DATA5 0x64024 | 3128 | #define DPA_AUX_CH_DATA5 0x64024 |
3128 | 3129 | ||
3129 | #define DPB_AUX_CH_CTL 0x64110 | 3130 | #define DPB_AUX_CH_CTL 0x64110 |
3130 | #define DPB_AUX_CH_DATA1 0x64114 | 3131 | #define DPB_AUX_CH_DATA1 0x64114 |
3131 | #define DPB_AUX_CH_DATA2 0x64118 | 3132 | #define DPB_AUX_CH_DATA2 0x64118 |
3132 | #define DPB_AUX_CH_DATA3 0x6411c | 3133 | #define DPB_AUX_CH_DATA3 0x6411c |
3133 | #define DPB_AUX_CH_DATA4 0x64120 | 3134 | #define DPB_AUX_CH_DATA4 0x64120 |
3134 | #define DPB_AUX_CH_DATA5 0x64124 | 3135 | #define DPB_AUX_CH_DATA5 0x64124 |
3135 | 3136 | ||
3136 | #define DPC_AUX_CH_CTL 0x64210 | 3137 | #define DPC_AUX_CH_CTL 0x64210 |
3137 | #define DPC_AUX_CH_DATA1 0x64214 | 3138 | #define DPC_AUX_CH_DATA1 0x64214 |
3138 | #define DPC_AUX_CH_DATA2 0x64218 | 3139 | #define DPC_AUX_CH_DATA2 0x64218 |
3139 | #define DPC_AUX_CH_DATA3 0x6421c | 3140 | #define DPC_AUX_CH_DATA3 0x6421c |
3140 | #define DPC_AUX_CH_DATA4 0x64220 | 3141 | #define DPC_AUX_CH_DATA4 0x64220 |
3141 | #define DPC_AUX_CH_DATA5 0x64224 | 3142 | #define DPC_AUX_CH_DATA5 0x64224 |
3142 | 3143 | ||
3143 | #define DPD_AUX_CH_CTL 0x64310 | 3144 | #define DPD_AUX_CH_CTL 0x64310 |
3144 | #define DPD_AUX_CH_DATA1 0x64314 | 3145 | #define DPD_AUX_CH_DATA1 0x64314 |
3145 | #define DPD_AUX_CH_DATA2 0x64318 | 3146 | #define DPD_AUX_CH_DATA2 0x64318 |
3146 | #define DPD_AUX_CH_DATA3 0x6431c | 3147 | #define DPD_AUX_CH_DATA3 0x6431c |
3147 | #define DPD_AUX_CH_DATA4 0x64320 | 3148 | #define DPD_AUX_CH_DATA4 0x64320 |
3148 | #define DPD_AUX_CH_DATA5 0x64324 | 3149 | #define DPD_AUX_CH_DATA5 0x64324 |
3149 | 3150 | ||
3150 | #define DP_AUX_CH_CTL_SEND_BUSY (1 << 31) | 3151 | #define DP_AUX_CH_CTL_SEND_BUSY (1 << 31) |
3151 | #define DP_AUX_CH_CTL_DONE (1 << 30) | 3152 | #define DP_AUX_CH_CTL_DONE (1 << 30) |
3152 | #define DP_AUX_CH_CTL_INTERRUPT (1 << 29) | 3153 | #define DP_AUX_CH_CTL_INTERRUPT (1 << 29) |
3153 | #define DP_AUX_CH_CTL_TIME_OUT_ERROR (1 << 28) | 3154 | #define DP_AUX_CH_CTL_TIME_OUT_ERROR (1 << 28) |
3154 | #define DP_AUX_CH_CTL_TIME_OUT_400us (0 << 26) | 3155 | #define DP_AUX_CH_CTL_TIME_OUT_400us (0 << 26) |
3155 | #define DP_AUX_CH_CTL_TIME_OUT_600us (1 << 26) | 3156 | #define DP_AUX_CH_CTL_TIME_OUT_600us (1 << 26) |
3156 | #define DP_AUX_CH_CTL_TIME_OUT_800us (2 << 26) | 3157 | #define DP_AUX_CH_CTL_TIME_OUT_800us (2 << 26) |
3157 | #define DP_AUX_CH_CTL_TIME_OUT_1600us (3 << 26) | 3158 | #define DP_AUX_CH_CTL_TIME_OUT_1600us (3 << 26) |
3158 | #define DP_AUX_CH_CTL_TIME_OUT_MASK (3 << 26) | 3159 | #define DP_AUX_CH_CTL_TIME_OUT_MASK (3 << 26) |
3159 | #define DP_AUX_CH_CTL_RECEIVE_ERROR (1 << 25) | 3160 | #define DP_AUX_CH_CTL_RECEIVE_ERROR (1 << 25) |
3160 | #define DP_AUX_CH_CTL_MESSAGE_SIZE_MASK (0x1f << 20) | 3161 | #define DP_AUX_CH_CTL_MESSAGE_SIZE_MASK (0x1f << 20) |
3161 | #define DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT 20 | 3162 | #define DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT 20 |
3162 | #define DP_AUX_CH_CTL_PRECHARGE_2US_MASK (0xf << 16) | 3163 | #define DP_AUX_CH_CTL_PRECHARGE_2US_MASK (0xf << 16) |
3163 | #define DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT 16 | 3164 | #define DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT 16 |
3164 | #define DP_AUX_CH_CTL_AUX_AKSV_SELECT (1 << 15) | 3165 | #define DP_AUX_CH_CTL_AUX_AKSV_SELECT (1 << 15) |
3165 | #define DP_AUX_CH_CTL_MANCHESTER_TEST (1 << 14) | 3166 | #define DP_AUX_CH_CTL_MANCHESTER_TEST (1 << 14) |
3166 | #define DP_AUX_CH_CTL_SYNC_TEST (1 << 13) | 3167 | #define DP_AUX_CH_CTL_SYNC_TEST (1 << 13) |
3167 | #define DP_AUX_CH_CTL_DEGLITCH_TEST (1 << 12) | 3168 | #define DP_AUX_CH_CTL_DEGLITCH_TEST (1 << 12) |
3168 | #define DP_AUX_CH_CTL_PRECHARGE_TEST (1 << 11) | 3169 | #define DP_AUX_CH_CTL_PRECHARGE_TEST (1 << 11) |
3169 | #define DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK (0x7ff) | 3170 | #define DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK (0x7ff) |
3170 | #define DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT 0 | 3171 | #define DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT 0 |
3171 | 3172 | ||
3172 | /* | 3173 | /* |
3173 | * Computing GMCH M and N values for the Display Port link | 3174 | * Computing GMCH M and N values for the Display Port link |
3174 | * | 3175 | * |
3175 | * GMCH M/N = dot clock * bytes per pixel / ls_clk * # of lanes | 3176 | * GMCH M/N = dot clock * bytes per pixel / ls_clk * # of lanes |
3176 | * | 3177 | * |
3177 | * ls_clk (we assume) is the DP link clock (1.62 or 2.7 GHz) | 3178 | * ls_clk (we assume) is the DP link clock (1.62 or 2.7 GHz) |
3178 | * | 3179 | * |
3179 | * The GMCH value is used internally | 3180 | * The GMCH value is used internally |
3180 | * | 3181 | * |
3181 | * bytes_per_pixel is the number of bytes coming out of the plane, | 3182 | * bytes_per_pixel is the number of bytes coming out of the plane, |
3182 | * which is after the LUTs, so we want the bytes for our color format. | 3183 | * which is after the LUTs, so we want the bytes for our color format. |
3183 | * For our current usage, this is always 3, one byte for R, G and B. | 3184 | * For our current usage, this is always 3, one byte for R, G and B. |
3184 | */ | 3185 | */ |
3185 | #define _PIPEA_DATA_M_G4X 0x70050 | 3186 | #define _PIPEA_DATA_M_G4X 0x70050 |
3186 | #define _PIPEB_DATA_M_G4X 0x71050 | 3187 | #define _PIPEB_DATA_M_G4X 0x71050 |
3187 | 3188 | ||
3188 | /* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */ | 3189 | /* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */ |
3189 | #define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */ | 3190 | #define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */ |
3190 | #define TU_SIZE_SHIFT 25 | 3191 | #define TU_SIZE_SHIFT 25 |
3191 | #define TU_SIZE_MASK (0x3f << 25) | 3192 | #define TU_SIZE_MASK (0x3f << 25) |
3192 | 3193 | ||
3193 | #define DATA_LINK_M_N_MASK (0xffffff) | 3194 | #define DATA_LINK_M_N_MASK (0xffffff) |
3194 | #define DATA_LINK_N_MAX (0x800000) | 3195 | #define DATA_LINK_N_MAX (0x800000) |
3195 | 3196 | ||
3196 | #define _PIPEA_DATA_N_G4X 0x70054 | 3197 | #define _PIPEA_DATA_N_G4X 0x70054 |
3197 | #define _PIPEB_DATA_N_G4X 0x71054 | 3198 | #define _PIPEB_DATA_N_G4X 0x71054 |
3198 | #define PIPE_GMCH_DATA_N_MASK (0xffffff) | 3199 | #define PIPE_GMCH_DATA_N_MASK (0xffffff) |
3199 | 3200 | ||
3200 | /* | 3201 | /* |
3201 | * Computing Link M and N values for the Display Port link | 3202 | * Computing Link M and N values for the Display Port link |
3202 | * | 3203 | * |
3203 | * Link M / N = pixel_clock / ls_clk | 3204 | * Link M / N = pixel_clock / ls_clk |
3204 | * | 3205 | * |
3205 | * (the DP spec calls pixel_clock the 'strm_clk') | 3206 | * (the DP spec calls pixel_clock the 'strm_clk') |
3206 | * | 3207 | * |
3207 | * The Link value is transmitted in the Main Stream | 3208 | * The Link value is transmitted in the Main Stream |
3208 | * Attributes and VB-ID. | 3209 | * Attributes and VB-ID. |
3209 | */ | 3210 | */ |
3210 | 3211 | ||
3211 | #define _PIPEA_LINK_M_G4X 0x70060 | 3212 | #define _PIPEA_LINK_M_G4X 0x70060 |
3212 | #define _PIPEB_LINK_M_G4X 0x71060 | 3213 | #define _PIPEB_LINK_M_G4X 0x71060 |
3213 | #define PIPEA_DP_LINK_M_MASK (0xffffff) | 3214 | #define PIPEA_DP_LINK_M_MASK (0xffffff) |
3214 | 3215 | ||
3215 | #define _PIPEA_LINK_N_G4X 0x70064 | 3216 | #define _PIPEA_LINK_N_G4X 0x70064 |
3216 | #define _PIPEB_LINK_N_G4X 0x71064 | 3217 | #define _PIPEB_LINK_N_G4X 0x71064 |
3217 | #define PIPEA_DP_LINK_N_MASK (0xffffff) | 3218 | #define PIPEA_DP_LINK_N_MASK (0xffffff) |
3218 | 3219 | ||
3219 | #define PIPE_DATA_M_G4X(pipe) _PIPE(pipe, _PIPEA_DATA_M_G4X, _PIPEB_DATA_M_G4X) | 3220 | #define PIPE_DATA_M_G4X(pipe) _PIPE(pipe, _PIPEA_DATA_M_G4X, _PIPEB_DATA_M_G4X) |
3220 | #define PIPE_DATA_N_G4X(pipe) _PIPE(pipe, _PIPEA_DATA_N_G4X, _PIPEB_DATA_N_G4X) | 3221 | #define PIPE_DATA_N_G4X(pipe) _PIPE(pipe, _PIPEA_DATA_N_G4X, _PIPEB_DATA_N_G4X) |
3221 | #define PIPE_LINK_M_G4X(pipe) _PIPE(pipe, _PIPEA_LINK_M_G4X, _PIPEB_LINK_M_G4X) | 3222 | #define PIPE_LINK_M_G4X(pipe) _PIPE(pipe, _PIPEA_LINK_M_G4X, _PIPEB_LINK_M_G4X) |
3222 | #define PIPE_LINK_N_G4X(pipe) _PIPE(pipe, _PIPEA_LINK_N_G4X, _PIPEB_LINK_N_G4X) | 3223 | #define PIPE_LINK_N_G4X(pipe) _PIPE(pipe, _PIPEA_LINK_N_G4X, _PIPEB_LINK_N_G4X) |
3223 | 3224 | ||
3224 | /* Display & cursor control */ | 3225 | /* Display & cursor control */ |
3225 | 3226 | ||
3226 | /* Pipe A */ | 3227 | /* Pipe A */ |
3227 | #define _PIPEADSL 0x70000 | 3228 | #define _PIPEADSL 0x70000 |
3228 | #define DSL_LINEMASK_GEN2 0x00000fff | 3229 | #define DSL_LINEMASK_GEN2 0x00000fff |
3229 | #define DSL_LINEMASK_GEN3 0x00001fff | 3230 | #define DSL_LINEMASK_GEN3 0x00001fff |
3230 | #define _PIPEACONF 0x70008 | 3231 | #define _PIPEACONF 0x70008 |
3231 | #define PIPECONF_ENABLE (1<<31) | 3232 | #define PIPECONF_ENABLE (1<<31) |
3232 | #define PIPECONF_DISABLE 0 | 3233 | #define PIPECONF_DISABLE 0 |
3233 | #define PIPECONF_DOUBLE_WIDE (1<<30) | 3234 | #define PIPECONF_DOUBLE_WIDE (1<<30) |
3234 | #define I965_PIPECONF_ACTIVE (1<<30) | 3235 | #define I965_PIPECONF_ACTIVE (1<<30) |
3235 | #define PIPECONF_DSI_PLL_LOCKED (1<<29) /* vlv & pipe A only */ | 3236 | #define PIPECONF_DSI_PLL_LOCKED (1<<29) /* vlv & pipe A only */ |
3236 | #define PIPECONF_FRAME_START_DELAY_MASK (3<<27) | 3237 | #define PIPECONF_FRAME_START_DELAY_MASK (3<<27) |
3237 | #define PIPECONF_SINGLE_WIDE 0 | 3238 | #define PIPECONF_SINGLE_WIDE 0 |
3238 | #define PIPECONF_PIPE_UNLOCKED 0 | 3239 | #define PIPECONF_PIPE_UNLOCKED 0 |
3239 | #define PIPECONF_PIPE_LOCKED (1<<25) | 3240 | #define PIPECONF_PIPE_LOCKED (1<<25) |
3240 | #define PIPECONF_PALETTE 0 | 3241 | #define PIPECONF_PALETTE 0 |
3241 | #define PIPECONF_GAMMA (1<<24) | 3242 | #define PIPECONF_GAMMA (1<<24) |
3242 | #define PIPECONF_FORCE_BORDER (1<<25) | 3243 | #define PIPECONF_FORCE_BORDER (1<<25) |
3243 | #define PIPECONF_INTERLACE_MASK (7 << 21) | 3244 | #define PIPECONF_INTERLACE_MASK (7 << 21) |
3244 | #define PIPECONF_INTERLACE_MASK_HSW (3 << 21) | 3245 | #define PIPECONF_INTERLACE_MASK_HSW (3 << 21) |
3245 | /* Note that pre-gen3 does not support interlaced display directly. Panel | 3246 | /* Note that pre-gen3 does not support interlaced display directly. Panel |
3246 | * fitting must be disabled on pre-ilk for interlaced. */ | 3247 | * fitting must be disabled on pre-ilk for interlaced. */ |
3247 | #define PIPECONF_PROGRESSIVE (0 << 21) | 3248 | #define PIPECONF_PROGRESSIVE (0 << 21) |
3248 | #define PIPECONF_INTERLACE_W_SYNC_SHIFT_PANEL (4 << 21) /* gen4 only */ | 3249 | #define PIPECONF_INTERLACE_W_SYNC_SHIFT_PANEL (4 << 21) /* gen4 only */ |
3249 | #define PIPECONF_INTERLACE_W_SYNC_SHIFT (5 << 21) /* gen4 only */ | 3250 | #define PIPECONF_INTERLACE_W_SYNC_SHIFT (5 << 21) /* gen4 only */ |
3250 | #define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21) | 3251 | #define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21) |
3251 | #define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21) /* gen3 only */ | 3252 | #define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21) /* gen3 only */ |
3252 | /* Ironlake and later have a complete new set of values for interlaced. PFIT | 3253 | /* Ironlake and later have a complete new set of values for interlaced. PFIT |
3253 | * means panel fitter required, PF means progressive fetch, DBL means power | 3254 | * means panel fitter required, PF means progressive fetch, DBL means power |
3254 | * saving pixel doubling. */ | 3255 | * saving pixel doubling. */ |
3255 | #define PIPECONF_PFIT_PF_INTERLACED_ILK (1 << 21) | 3256 | #define PIPECONF_PFIT_PF_INTERLACED_ILK (1 << 21) |
3256 | #define PIPECONF_INTERLACED_ILK (3 << 21) | 3257 | #define PIPECONF_INTERLACED_ILK (3 << 21) |
3257 | #define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */ | 3258 | #define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */ |
3258 | #define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */ | 3259 | #define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */ |
3259 | #define PIPECONF_INTERLACE_MODE_MASK (7 << 21) | 3260 | #define PIPECONF_INTERLACE_MODE_MASK (7 << 21) |
3260 | #define PIPECONF_CXSR_DOWNCLOCK (1<<16) | 3261 | #define PIPECONF_CXSR_DOWNCLOCK (1<<16) |
3261 | #define PIPECONF_COLOR_RANGE_SELECT (1 << 13) | 3262 | #define PIPECONF_COLOR_RANGE_SELECT (1 << 13) |
3262 | #define PIPECONF_BPC_MASK (0x7 << 5) | 3263 | #define PIPECONF_BPC_MASK (0x7 << 5) |
3263 | #define PIPECONF_8BPC (0<<5) | 3264 | #define PIPECONF_8BPC (0<<5) |
3264 | #define PIPECONF_10BPC (1<<5) | 3265 | #define PIPECONF_10BPC (1<<5) |
3265 | #define PIPECONF_6BPC (2<<5) | 3266 | #define PIPECONF_6BPC (2<<5) |
3266 | #define PIPECONF_12BPC (3<<5) | 3267 | #define PIPECONF_12BPC (3<<5) |
3267 | #define PIPECONF_DITHER_EN (1<<4) | 3268 | #define PIPECONF_DITHER_EN (1<<4) |
3268 | #define PIPECONF_DITHER_TYPE_MASK (0x0000000c) | 3269 | #define PIPECONF_DITHER_TYPE_MASK (0x0000000c) |
3269 | #define PIPECONF_DITHER_TYPE_SP (0<<2) | 3270 | #define PIPECONF_DITHER_TYPE_SP (0<<2) |
3270 | #define PIPECONF_DITHER_TYPE_ST1 (1<<2) | 3271 | #define PIPECONF_DITHER_TYPE_ST1 (1<<2) |
3271 | #define PIPECONF_DITHER_TYPE_ST2 (2<<2) | 3272 | #define PIPECONF_DITHER_TYPE_ST2 (2<<2) |
3272 | #define PIPECONF_DITHER_TYPE_TEMP (3<<2) | 3273 | #define PIPECONF_DITHER_TYPE_TEMP (3<<2) |
3273 | #define _PIPEASTAT 0x70024 | 3274 | #define _PIPEASTAT 0x70024 |
3274 | #define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31) | 3275 | #define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31) |
3275 | #define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL<<30) | 3276 | #define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL<<30) |
3276 | #define PIPE_CRC_ERROR_ENABLE (1UL<<29) | 3277 | #define PIPE_CRC_ERROR_ENABLE (1UL<<29) |
3277 | #define PIPE_CRC_DONE_ENABLE (1UL<<28) | 3278 | #define PIPE_CRC_DONE_ENABLE (1UL<<28) |
3278 | #define PIPE_GMBUS_EVENT_ENABLE (1UL<<27) | 3279 | #define PIPE_GMBUS_EVENT_ENABLE (1UL<<27) |
3279 | #define PLANE_FLIP_DONE_INT_EN_VLV (1UL<<26) | 3280 | #define PLANE_FLIP_DONE_INT_EN_VLV (1UL<<26) |
3280 | #define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL<<26) | 3281 | #define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL<<26) |
3281 | #define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25) | 3282 | #define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25) |
3282 | #define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24) | 3283 | #define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24) |
3283 | #define PIPE_DPST_EVENT_ENABLE (1UL<<23) | 3284 | #define PIPE_DPST_EVENT_ENABLE (1UL<<23) |
3284 | #define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL<<22) | 3285 | #define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL<<22) |
3285 | #define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22) | 3286 | #define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22) |
3286 | #define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21) | 3287 | #define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21) |
3287 | #define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20) | 3288 | #define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20) |
3288 | #define PIPE_B_PSR_INTERRUPT_ENABLE_VLV (1UL<<19) | 3289 | #define PIPE_B_PSR_INTERRUPT_ENABLE_VLV (1UL<<19) |
3289 | #define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */ | 3290 | #define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */ |
3290 | #define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */ | 3291 | #define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */ |
3291 | #define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17) | 3292 | #define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17) |
3292 | #define PIPEA_HBLANK_INT_EN_VLV (1UL<<16) | 3293 | #define PIPEA_HBLANK_INT_EN_VLV (1UL<<16) |
3293 | #define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16) | 3294 | #define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16) |
3294 | #define SPRITE1_FLIP_DONE_INT_STATUS_VLV (1UL<<15) | 3295 | #define SPRITE1_FLIP_DONE_INT_STATUS_VLV (1UL<<15) |
3295 | #define SPRITE0_FLIP_DONE_INT_STATUS_VLV (1UL<<14) | 3296 | #define SPRITE0_FLIP_DONE_INT_STATUS_VLV (1UL<<14) |
3296 | #define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13) | 3297 | #define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13) |
3297 | #define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12) | 3298 | #define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12) |
3298 | #define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11) | 3299 | #define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11) |
3299 | #define PLANE_FLIP_DONE_INT_STATUS_VLV (1UL<<10) | 3300 | #define PLANE_FLIP_DONE_INT_STATUS_VLV (1UL<<10) |
3300 | #define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10) | 3301 | #define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10) |
3301 | #define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9) | 3302 | #define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9) |
3302 | #define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8) | 3303 | #define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8) |
3303 | #define PIPE_DPST_EVENT_STATUS (1UL<<7) | 3304 | #define PIPE_DPST_EVENT_STATUS (1UL<<7) |
3304 | #define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6) | 3305 | #define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6) |
3305 | #define PIPE_A_PSR_STATUS_VLV (1UL<<6) | 3306 | #define PIPE_A_PSR_STATUS_VLV (1UL<<6) |
3306 | #define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL<<5) | 3307 | #define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL<<5) |
3307 | #define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4) | 3308 | #define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4) |
3308 | #define PIPE_B_PSR_STATUS_VLV (1UL<<3) | 3309 | #define PIPE_B_PSR_STATUS_VLV (1UL<<3) |
3309 | #define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL<<2) /* pre-965 */ | 3310 | #define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL<<2) /* pre-965 */ |
3310 | #define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */ | 3311 | #define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */ |
3311 | #define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1) | 3312 | #define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1) |
3312 | #define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0) | 3313 | #define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0) |
3313 | 3314 | ||
3314 | #define PIPESTAT_INT_ENABLE_MASK 0x7fff0000 | 3315 | #define PIPESTAT_INT_ENABLE_MASK 0x7fff0000 |
3315 | #define PIPESTAT_INT_STATUS_MASK 0x0000ffff | 3316 | #define PIPESTAT_INT_STATUS_MASK 0x0000ffff |
3316 | 3317 | ||
3317 | #define PIPE_A_OFFSET 0x70000 | 3318 | #define PIPE_A_OFFSET 0x70000 |
3318 | #define PIPE_B_OFFSET 0x71000 | 3319 | #define PIPE_B_OFFSET 0x71000 |
3319 | #define PIPE_C_OFFSET 0x72000 | 3320 | #define PIPE_C_OFFSET 0x72000 |
3320 | /* | 3321 | /* |
3321 | * There's actually no pipe EDP. Some pipe registers have | 3322 | * There's actually no pipe EDP. Some pipe registers have |
3322 | * simply shifted from the pipe to the transcoder, while | 3323 | * simply shifted from the pipe to the transcoder, while |
3323 | * keeping their original offset. Thus we need PIPE_EDP_OFFSET | 3324 | * keeping their original offset. Thus we need PIPE_EDP_OFFSET |
3324 | * to access such registers in transcoder EDP. | 3325 | * to access such registers in transcoder EDP. |
3325 | */ | 3326 | */ |
3326 | #define PIPE_EDP_OFFSET 0x7f000 | 3327 | #define PIPE_EDP_OFFSET 0x7f000 |
3327 | 3328 | ||
3328 | #define _PIPE2(pipe, reg) (dev_priv->info.pipe_offsets[pipe] - \ | 3329 | #define _PIPE2(pipe, reg) (dev_priv->info.pipe_offsets[pipe] - \ |
3329 | dev_priv->info.pipe_offsets[PIPE_A] + (reg) + \ | 3330 | dev_priv->info.pipe_offsets[PIPE_A] + (reg) + \ |
3330 | dev_priv->info.display_mmio_offset) | 3331 | dev_priv->info.display_mmio_offset) |
3331 | 3332 | ||
3332 | #define PIPECONF(pipe) _PIPE2(pipe, _PIPEACONF) | 3333 | #define PIPECONF(pipe) _PIPE2(pipe, _PIPEACONF) |
3333 | #define PIPEDSL(pipe) _PIPE2(pipe, _PIPEADSL) | 3334 | #define PIPEDSL(pipe) _PIPE2(pipe, _PIPEADSL) |
3334 | #define PIPEFRAME(pipe) _PIPE2(pipe, _PIPEAFRAMEHIGH) | 3335 | #define PIPEFRAME(pipe) _PIPE2(pipe, _PIPEAFRAMEHIGH) |
3335 | #define PIPEFRAMEPIXEL(pipe) _PIPE2(pipe, _PIPEAFRAMEPIXEL) | 3336 | #define PIPEFRAMEPIXEL(pipe) _PIPE2(pipe, _PIPEAFRAMEPIXEL) |
3336 | #define PIPESTAT(pipe) _PIPE2(pipe, _PIPEASTAT) | 3337 | #define PIPESTAT(pipe) _PIPE2(pipe, _PIPEASTAT) |
3337 | 3338 | ||
3338 | #define _PIPE_MISC_A 0x70030 | 3339 | #define _PIPE_MISC_A 0x70030 |
3339 | #define _PIPE_MISC_B 0x71030 | 3340 | #define _PIPE_MISC_B 0x71030 |
3340 | #define PIPEMISC_DITHER_BPC_MASK (7<<5) | 3341 | #define PIPEMISC_DITHER_BPC_MASK (7<<5) |
3341 | #define PIPEMISC_DITHER_8_BPC (0<<5) | 3342 | #define PIPEMISC_DITHER_8_BPC (0<<5) |
3342 | #define PIPEMISC_DITHER_10_BPC (1<<5) | 3343 | #define PIPEMISC_DITHER_10_BPC (1<<5) |
3343 | #define PIPEMISC_DITHER_6_BPC (2<<5) | 3344 | #define PIPEMISC_DITHER_6_BPC (2<<5) |
3344 | #define PIPEMISC_DITHER_12_BPC (3<<5) | 3345 | #define PIPEMISC_DITHER_12_BPC (3<<5) |
3345 | #define PIPEMISC_DITHER_ENABLE (1<<4) | 3346 | #define PIPEMISC_DITHER_ENABLE (1<<4) |
3346 | #define PIPEMISC_DITHER_TYPE_MASK (3<<2) | 3347 | #define PIPEMISC_DITHER_TYPE_MASK (3<<2) |
3347 | #define PIPEMISC_DITHER_TYPE_SP (0<<2) | 3348 | #define PIPEMISC_DITHER_TYPE_SP (0<<2) |
3348 | #define PIPEMISC(pipe) _PIPE2(pipe, _PIPE_MISC_A) | 3349 | #define PIPEMISC(pipe) _PIPE2(pipe, _PIPE_MISC_A) |
3349 | 3350 | ||
3350 | #define VLV_DPFLIPSTAT (VLV_DISPLAY_BASE + 0x70028) | 3351 | #define VLV_DPFLIPSTAT (VLV_DISPLAY_BASE + 0x70028) |
3351 | #define PIPEB_LINE_COMPARE_INT_EN (1<<29) | 3352 | #define PIPEB_LINE_COMPARE_INT_EN (1<<29) |
3352 | #define PIPEB_HLINE_INT_EN (1<<28) | 3353 | #define PIPEB_HLINE_INT_EN (1<<28) |
3353 | #define PIPEB_VBLANK_INT_EN (1<<27) | 3354 | #define PIPEB_VBLANK_INT_EN (1<<27) |
3354 | #define SPRITED_FLIP_DONE_INT_EN (1<<26) | 3355 | #define SPRITED_FLIP_DONE_INT_EN (1<<26) |
3355 | #define SPRITEC_FLIP_DONE_INT_EN (1<<25) | 3356 | #define SPRITEC_FLIP_DONE_INT_EN (1<<25) |
3356 | #define PLANEB_FLIP_DONE_INT_EN (1<<24) | 3357 | #define PLANEB_FLIP_DONE_INT_EN (1<<24) |
3357 | #define PIPEA_LINE_COMPARE_INT_EN (1<<21) | 3358 | #define PIPEA_LINE_COMPARE_INT_EN (1<<21) |
3358 | #define PIPEA_HLINE_INT_EN (1<<20) | 3359 | #define PIPEA_HLINE_INT_EN (1<<20) |
3359 | #define PIPEA_VBLANK_INT_EN (1<<19) | 3360 | #define PIPEA_VBLANK_INT_EN (1<<19) |
3360 | #define SPRITEB_FLIP_DONE_INT_EN (1<<18) | 3361 | #define SPRITEB_FLIP_DONE_INT_EN (1<<18) |
3361 | #define SPRITEA_FLIP_DONE_INT_EN (1<<17) | 3362 | #define SPRITEA_FLIP_DONE_INT_EN (1<<17) |
3362 | #define PLANEA_FLIPDONE_INT_EN (1<<16) | 3363 | #define PLANEA_FLIPDONE_INT_EN (1<<16) |
3363 | 3364 | ||
3364 | #define DPINVGTT (VLV_DISPLAY_BASE + 0x7002c) /* VLV only */ | 3365 | #define DPINVGTT (VLV_DISPLAY_BASE + 0x7002c) /* VLV only */ |
3365 | #define CURSORB_INVALID_GTT_INT_EN (1<<23) | 3366 | #define CURSORB_INVALID_GTT_INT_EN (1<<23) |
3366 | #define CURSORA_INVALID_GTT_INT_EN (1<<22) | 3367 | #define CURSORA_INVALID_GTT_INT_EN (1<<22) |
3367 | #define SPRITED_INVALID_GTT_INT_EN (1<<21) | 3368 | #define SPRITED_INVALID_GTT_INT_EN (1<<21) |
3368 | #define SPRITEC_INVALID_GTT_INT_EN (1<<20) | 3369 | #define SPRITEC_INVALID_GTT_INT_EN (1<<20) |
3369 | #define PLANEB_INVALID_GTT_INT_EN (1<<19) | 3370 | #define PLANEB_INVALID_GTT_INT_EN (1<<19) |
3370 | #define SPRITEB_INVALID_GTT_INT_EN (1<<18) | 3371 | #define SPRITEB_INVALID_GTT_INT_EN (1<<18) |
3371 | #define SPRITEA_INVALID_GTT_INT_EN (1<<17) | 3372 | #define SPRITEA_INVALID_GTT_INT_EN (1<<17) |
3372 | #define PLANEA_INVALID_GTT_INT_EN (1<<16) | 3373 | #define PLANEA_INVALID_GTT_INT_EN (1<<16) |
3373 | #define DPINVGTT_EN_MASK 0xff0000 | 3374 | #define DPINVGTT_EN_MASK 0xff0000 |
3374 | #define CURSORB_INVALID_GTT_STATUS (1<<7) | 3375 | #define CURSORB_INVALID_GTT_STATUS (1<<7) |
3375 | #define CURSORA_INVALID_GTT_STATUS (1<<6) | 3376 | #define CURSORA_INVALID_GTT_STATUS (1<<6) |
3376 | #define SPRITED_INVALID_GTT_STATUS (1<<5) | 3377 | #define SPRITED_INVALID_GTT_STATUS (1<<5) |
3377 | #define SPRITEC_INVALID_GTT_STATUS (1<<4) | 3378 | #define SPRITEC_INVALID_GTT_STATUS (1<<4) |
3378 | #define PLANEB_INVALID_GTT_STATUS (1<<3) | 3379 | #define PLANEB_INVALID_GTT_STATUS (1<<3) |
3379 | #define SPRITEB_INVALID_GTT_STATUS (1<<2) | 3380 | #define SPRITEB_INVALID_GTT_STATUS (1<<2) |
3380 | #define SPRITEA_INVALID_GTT_STATUS (1<<1) | 3381 | #define SPRITEA_INVALID_GTT_STATUS (1<<1) |
3381 | #define PLANEA_INVALID_GTT_STATUS (1<<0) | 3382 | #define PLANEA_INVALID_GTT_STATUS (1<<0) |
3382 | #define DPINVGTT_STATUS_MASK 0xff | 3383 | #define DPINVGTT_STATUS_MASK 0xff |
3383 | 3384 | ||
3384 | #define DSPARB 0x70030 | 3385 | #define DSPARB 0x70030 |
3385 | #define DSPARB_CSTART_MASK (0x7f << 7) | 3386 | #define DSPARB_CSTART_MASK (0x7f << 7) |
3386 | #define DSPARB_CSTART_SHIFT 7 | 3387 | #define DSPARB_CSTART_SHIFT 7 |
3387 | #define DSPARB_BSTART_MASK (0x7f) | 3388 | #define DSPARB_BSTART_MASK (0x7f) |
3388 | #define DSPARB_BSTART_SHIFT 0 | 3389 | #define DSPARB_BSTART_SHIFT 0 |
3389 | #define DSPARB_BEND_SHIFT 9 /* on 855 */ | 3390 | #define DSPARB_BEND_SHIFT 9 /* on 855 */ |
3390 | #define DSPARB_AEND_SHIFT 0 | 3391 | #define DSPARB_AEND_SHIFT 0 |
3391 | 3392 | ||
3392 | #define DSPFW1 (dev_priv->info.display_mmio_offset + 0x70034) | 3393 | #define DSPFW1 (dev_priv->info.display_mmio_offset + 0x70034) |
3393 | #define DSPFW_SR_SHIFT 23 | 3394 | #define DSPFW_SR_SHIFT 23 |
3394 | #define DSPFW_SR_MASK (0x1ff<<23) | 3395 | #define DSPFW_SR_MASK (0x1ff<<23) |
3395 | #define DSPFW_CURSORB_SHIFT 16 | 3396 | #define DSPFW_CURSORB_SHIFT 16 |
3396 | #define DSPFW_CURSORB_MASK (0x3f<<16) | 3397 | #define DSPFW_CURSORB_MASK (0x3f<<16) |
3397 | #define DSPFW_PLANEB_SHIFT 8 | 3398 | #define DSPFW_PLANEB_SHIFT 8 |
3398 | #define DSPFW_PLANEB_MASK (0x7f<<8) | 3399 | #define DSPFW_PLANEB_MASK (0x7f<<8) |
3399 | #define DSPFW_PLANEA_MASK (0x7f) | 3400 | #define DSPFW_PLANEA_MASK (0x7f) |
3400 | #define DSPFW2 (dev_priv->info.display_mmio_offset + 0x70038) | 3401 | #define DSPFW2 (dev_priv->info.display_mmio_offset + 0x70038) |
3401 | #define DSPFW_CURSORA_MASK 0x00003f00 | 3402 | #define DSPFW_CURSORA_MASK 0x00003f00 |
3402 | #define DSPFW_CURSORA_SHIFT 8 | 3403 | #define DSPFW_CURSORA_SHIFT 8 |
3403 | #define DSPFW_PLANEC_MASK (0x7f) | 3404 | #define DSPFW_PLANEC_MASK (0x7f) |
3404 | #define DSPFW3 (dev_priv->info.display_mmio_offset + 0x7003c) | 3405 | #define DSPFW3 (dev_priv->info.display_mmio_offset + 0x7003c) |
3405 | #define DSPFW_HPLL_SR_EN (1<<31) | 3406 | #define DSPFW_HPLL_SR_EN (1<<31) |
3406 | #define DSPFW_CURSOR_SR_SHIFT 24 | 3407 | #define DSPFW_CURSOR_SR_SHIFT 24 |
3407 | #define PINEVIEW_SELF_REFRESH_EN (1<<30) | 3408 | #define PINEVIEW_SELF_REFRESH_EN (1<<30) |
3408 | #define DSPFW_CURSOR_SR_MASK (0x3f<<24) | 3409 | #define DSPFW_CURSOR_SR_MASK (0x3f<<24) |
3409 | #define DSPFW_HPLL_CURSOR_SHIFT 16 | 3410 | #define DSPFW_HPLL_CURSOR_SHIFT 16 |
3410 | #define DSPFW_HPLL_CURSOR_MASK (0x3f<<16) | 3411 | #define DSPFW_HPLL_CURSOR_MASK (0x3f<<16) |
3411 | #define DSPFW_HPLL_SR_MASK (0x1ff) | 3412 | #define DSPFW_HPLL_SR_MASK (0x1ff) |
3412 | #define DSPFW4 (dev_priv->info.display_mmio_offset + 0x70070) | 3413 | #define DSPFW4 (dev_priv->info.display_mmio_offset + 0x70070) |
3413 | #define DSPFW7 (dev_priv->info.display_mmio_offset + 0x7007c) | 3414 | #define DSPFW7 (dev_priv->info.display_mmio_offset + 0x7007c) |
3414 | 3415 | ||
3415 | /* drain latency register values*/ | 3416 | /* drain latency register values*/ |
3416 | #define DRAIN_LATENCY_PRECISION_32 32 | 3417 | #define DRAIN_LATENCY_PRECISION_32 32 |
3417 | #define DRAIN_LATENCY_PRECISION_16 16 | 3418 | #define DRAIN_LATENCY_PRECISION_16 16 |
3418 | #define VLV_DDL1 (VLV_DISPLAY_BASE + 0x70050) | 3419 | #define VLV_DDL1 (VLV_DISPLAY_BASE + 0x70050) |
3419 | #define DDL_CURSORA_PRECISION_32 (1<<31) | 3420 | #define DDL_CURSORA_PRECISION_32 (1<<31) |
3420 | #define DDL_CURSORA_PRECISION_16 (0<<31) | 3421 | #define DDL_CURSORA_PRECISION_16 (0<<31) |
3421 | #define DDL_CURSORA_SHIFT 24 | 3422 | #define DDL_CURSORA_SHIFT 24 |
3422 | #define DDL_PLANEA_PRECISION_32 (1<<7) | 3423 | #define DDL_PLANEA_PRECISION_32 (1<<7) |
3423 | #define DDL_PLANEA_PRECISION_16 (0<<7) | 3424 | #define DDL_PLANEA_PRECISION_16 (0<<7) |
3424 | #define VLV_DDL2 (VLV_DISPLAY_BASE + 0x70054) | 3425 | #define VLV_DDL2 (VLV_DISPLAY_BASE + 0x70054) |
3425 | #define DDL_CURSORB_PRECISION_32 (1<<31) | 3426 | #define DDL_CURSORB_PRECISION_32 (1<<31) |
3426 | #define DDL_CURSORB_PRECISION_16 (0<<31) | 3427 | #define DDL_CURSORB_PRECISION_16 (0<<31) |
3427 | #define DDL_CURSORB_SHIFT 24 | 3428 | #define DDL_CURSORB_SHIFT 24 |
3428 | #define DDL_PLANEB_PRECISION_32 (1<<7) | 3429 | #define DDL_PLANEB_PRECISION_32 (1<<7) |
3429 | #define DDL_PLANEB_PRECISION_16 (0<<7) | 3430 | #define DDL_PLANEB_PRECISION_16 (0<<7) |
3430 | 3431 | ||
3431 | /* FIFO watermark sizes etc */ | 3432 | /* FIFO watermark sizes etc */ |
3432 | #define G4X_FIFO_LINE_SIZE 64 | 3433 | #define G4X_FIFO_LINE_SIZE 64 |
3433 | #define I915_FIFO_LINE_SIZE 64 | 3434 | #define I915_FIFO_LINE_SIZE 64 |
3434 | #define I830_FIFO_LINE_SIZE 32 | 3435 | #define I830_FIFO_LINE_SIZE 32 |
3435 | 3436 | ||
3436 | #define VALLEYVIEW_FIFO_SIZE 255 | 3437 | #define VALLEYVIEW_FIFO_SIZE 255 |
3437 | #define G4X_FIFO_SIZE 127 | 3438 | #define G4X_FIFO_SIZE 127 |
3438 | #define I965_FIFO_SIZE 512 | 3439 | #define I965_FIFO_SIZE 512 |
3439 | #define I945_FIFO_SIZE 127 | 3440 | #define I945_FIFO_SIZE 127 |
3440 | #define I915_FIFO_SIZE 95 | 3441 | #define I915_FIFO_SIZE 95 |
3441 | #define I855GM_FIFO_SIZE 127 /* In cachelines */ | 3442 | #define I855GM_FIFO_SIZE 127 /* In cachelines */ |
3442 | #define I830_FIFO_SIZE 95 | 3443 | #define I830_FIFO_SIZE 95 |
3443 | 3444 | ||
3444 | #define VALLEYVIEW_MAX_WM 0xff | 3445 | #define VALLEYVIEW_MAX_WM 0xff |
3445 | #define G4X_MAX_WM 0x3f | 3446 | #define G4X_MAX_WM 0x3f |
3446 | #define I915_MAX_WM 0x3f | 3447 | #define I915_MAX_WM 0x3f |
3447 | 3448 | ||
3448 | #define PINEVIEW_DISPLAY_FIFO 512 /* in 64byte unit */ | 3449 | #define PINEVIEW_DISPLAY_FIFO 512 /* in 64byte unit */ |
3449 | #define PINEVIEW_FIFO_LINE_SIZE 64 | 3450 | #define PINEVIEW_FIFO_LINE_SIZE 64 |
3450 | #define PINEVIEW_MAX_WM 0x1ff | 3451 | #define PINEVIEW_MAX_WM 0x1ff |
3451 | #define PINEVIEW_DFT_WM 0x3f | 3452 | #define PINEVIEW_DFT_WM 0x3f |
3452 | #define PINEVIEW_DFT_HPLLOFF_WM 0 | 3453 | #define PINEVIEW_DFT_HPLLOFF_WM 0 |
3453 | #define PINEVIEW_GUARD_WM 10 | 3454 | #define PINEVIEW_GUARD_WM 10 |
3454 | #define PINEVIEW_CURSOR_FIFO 64 | 3455 | #define PINEVIEW_CURSOR_FIFO 64 |
3455 | #define PINEVIEW_CURSOR_MAX_WM 0x3f | 3456 | #define PINEVIEW_CURSOR_MAX_WM 0x3f |
3456 | #define PINEVIEW_CURSOR_DFT_WM 0 | 3457 | #define PINEVIEW_CURSOR_DFT_WM 0 |
3457 | #define PINEVIEW_CURSOR_GUARD_WM 5 | 3458 | #define PINEVIEW_CURSOR_GUARD_WM 5 |
3458 | 3459 | ||
3459 | #define VALLEYVIEW_CURSOR_MAX_WM 64 | 3460 | #define VALLEYVIEW_CURSOR_MAX_WM 64 |
3460 | #define I965_CURSOR_FIFO 64 | 3461 | #define I965_CURSOR_FIFO 64 |
3461 | #define I965_CURSOR_MAX_WM 32 | 3462 | #define I965_CURSOR_MAX_WM 32 |
3462 | #define I965_CURSOR_DFT_WM 8 | 3463 | #define I965_CURSOR_DFT_WM 8 |
3463 | 3464 | ||
3464 | /* define the Watermark register on Ironlake */ | 3465 | /* define the Watermark register on Ironlake */ |
3465 | #define WM0_PIPEA_ILK 0x45100 | 3466 | #define WM0_PIPEA_ILK 0x45100 |
3466 | #define WM0_PIPE_PLANE_MASK (0xffff<<16) | 3467 | #define WM0_PIPE_PLANE_MASK (0xffff<<16) |
3467 | #define WM0_PIPE_PLANE_SHIFT 16 | 3468 | #define WM0_PIPE_PLANE_SHIFT 16 |
3468 | #define WM0_PIPE_SPRITE_MASK (0xff<<8) | 3469 | #define WM0_PIPE_SPRITE_MASK (0xff<<8) |
3469 | #define WM0_PIPE_SPRITE_SHIFT 8 | 3470 | #define WM0_PIPE_SPRITE_SHIFT 8 |
3470 | #define WM0_PIPE_CURSOR_MASK (0xff) | 3471 | #define WM0_PIPE_CURSOR_MASK (0xff) |
3471 | 3472 | ||
3472 | #define WM0_PIPEB_ILK 0x45104 | 3473 | #define WM0_PIPEB_ILK 0x45104 |
3473 | #define WM0_PIPEC_IVB 0x45200 | 3474 | #define WM0_PIPEC_IVB 0x45200 |
3474 | #define WM1_LP_ILK 0x45108 | 3475 | #define WM1_LP_ILK 0x45108 |
3475 | #define WM1_LP_SR_EN (1<<31) | 3476 | #define WM1_LP_SR_EN (1<<31) |
3476 | #define WM1_LP_LATENCY_SHIFT 24 | 3477 | #define WM1_LP_LATENCY_SHIFT 24 |
3477 | #define WM1_LP_LATENCY_MASK (0x7f<<24) | 3478 | #define WM1_LP_LATENCY_MASK (0x7f<<24) |
3478 | #define WM1_LP_FBC_MASK (0xf<<20) | 3479 | #define WM1_LP_FBC_MASK (0xf<<20) |
3479 | #define WM1_LP_FBC_SHIFT 20 | 3480 | #define WM1_LP_FBC_SHIFT 20 |
3480 | #define WM1_LP_FBC_SHIFT_BDW 19 | 3481 | #define WM1_LP_FBC_SHIFT_BDW 19 |
3481 | #define WM1_LP_SR_MASK (0x7ff<<8) | 3482 | #define WM1_LP_SR_MASK (0x7ff<<8) |
3482 | #define WM1_LP_SR_SHIFT 8 | 3483 | #define WM1_LP_SR_SHIFT 8 |
3483 | #define WM1_LP_CURSOR_MASK (0xff) | 3484 | #define WM1_LP_CURSOR_MASK (0xff) |
3484 | #define WM2_LP_ILK 0x4510c | 3485 | #define WM2_LP_ILK 0x4510c |
3485 | #define WM2_LP_EN (1<<31) | 3486 | #define WM2_LP_EN (1<<31) |
3486 | #define WM3_LP_ILK 0x45110 | 3487 | #define WM3_LP_ILK 0x45110 |
3487 | #define WM3_LP_EN (1<<31) | 3488 | #define WM3_LP_EN (1<<31) |
3488 | #define WM1S_LP_ILK 0x45120 | 3489 | #define WM1S_LP_ILK 0x45120 |
3489 | #define WM2S_LP_IVB 0x45124 | 3490 | #define WM2S_LP_IVB 0x45124 |
3490 | #define WM3S_LP_IVB 0x45128 | 3491 | #define WM3S_LP_IVB 0x45128 |
3491 | #define WM1S_LP_EN (1<<31) | 3492 | #define WM1S_LP_EN (1<<31) |
3492 | 3493 | ||
3493 | #define HSW_WM_LP_VAL(lat, fbc, pri, cur) \ | 3494 | #define HSW_WM_LP_VAL(lat, fbc, pri, cur) \ |
3494 | (WM3_LP_EN | ((lat) << WM1_LP_LATENCY_SHIFT) | \ | 3495 | (WM3_LP_EN | ((lat) << WM1_LP_LATENCY_SHIFT) | \ |
3495 | ((fbc) << WM1_LP_FBC_SHIFT) | ((pri) << WM1_LP_SR_SHIFT) | (cur)) | 3496 | ((fbc) << WM1_LP_FBC_SHIFT) | ((pri) << WM1_LP_SR_SHIFT) | (cur)) |
3496 | 3497 | ||
3497 | /* Memory latency timer register */ | 3498 | /* Memory latency timer register */ |
3498 | #define MLTR_ILK 0x11222 | 3499 | #define MLTR_ILK 0x11222 |
3499 | #define MLTR_WM1_SHIFT 0 | 3500 | #define MLTR_WM1_SHIFT 0 |
3500 | #define MLTR_WM2_SHIFT 8 | 3501 | #define MLTR_WM2_SHIFT 8 |
3501 | /* the unit of memory self-refresh latency time is 0.5us */ | 3502 | /* the unit of memory self-refresh latency time is 0.5us */ |
3502 | #define ILK_SRLT_MASK 0x3f | 3503 | #define ILK_SRLT_MASK 0x3f |
3503 | 3504 | ||
3504 | 3505 | ||
3505 | /* the address where we get all kinds of latency value */ | 3506 | /* the address where we get all kinds of latency value */ |
3506 | #define SSKPD 0x5d10 | 3507 | #define SSKPD 0x5d10 |
3507 | #define SSKPD_WM_MASK 0x3f | 3508 | #define SSKPD_WM_MASK 0x3f |
3508 | #define SSKPD_WM0_SHIFT 0 | 3509 | #define SSKPD_WM0_SHIFT 0 |
3509 | #define SSKPD_WM1_SHIFT 8 | 3510 | #define SSKPD_WM1_SHIFT 8 |
3510 | #define SSKPD_WM2_SHIFT 16 | 3511 | #define SSKPD_WM2_SHIFT 16 |
3511 | #define SSKPD_WM3_SHIFT 24 | 3512 | #define SSKPD_WM3_SHIFT 24 |
3512 | 3513 | ||
3513 | /* | 3514 | /* |
3514 | * The two pipe frame counter registers are not synchronized, so | 3515 | * The two pipe frame counter registers are not synchronized, so |
3515 | * reading a stable value is somewhat tricky. The following code | 3516 | * reading a stable value is somewhat tricky. The following code |
3516 | * should work: | 3517 | * should work: |
3517 | * | 3518 | * |
3518 | * do { | 3519 | * do { |
3519 | * high1 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >> | 3520 | * high1 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >> |
3520 | * PIPE_FRAME_HIGH_SHIFT; | 3521 | * PIPE_FRAME_HIGH_SHIFT; |
3521 | * low1 = ((INREG(PIPEAFRAMEPIXEL) & PIPE_FRAME_LOW_MASK) >> | 3522 | * low1 = ((INREG(PIPEAFRAMEPIXEL) & PIPE_FRAME_LOW_MASK) >> |
3522 | * PIPE_FRAME_LOW_SHIFT); | 3523 | * PIPE_FRAME_LOW_SHIFT); |
3523 | * high2 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >> | 3524 | * high2 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >> |
3524 | * PIPE_FRAME_HIGH_SHIFT); | 3525 | * PIPE_FRAME_HIGH_SHIFT); |
3525 | * } while (high1 != high2); | 3526 | * } while (high1 != high2); |
3526 | * frame = (high1 << 8) | low1; | 3527 | * frame = (high1 << 8) | low1; |
3527 | */ | 3528 | */ |
3528 | #define _PIPEAFRAMEHIGH 0x70040 | 3529 | #define _PIPEAFRAMEHIGH 0x70040 |
3529 | #define PIPE_FRAME_HIGH_MASK 0x0000ffff | 3530 | #define PIPE_FRAME_HIGH_MASK 0x0000ffff |
3530 | #define PIPE_FRAME_HIGH_SHIFT 0 | 3531 | #define PIPE_FRAME_HIGH_SHIFT 0 |
3531 | #define _PIPEAFRAMEPIXEL 0x70044 | 3532 | #define _PIPEAFRAMEPIXEL 0x70044 |
3532 | #define PIPE_FRAME_LOW_MASK 0xff000000 | 3533 | #define PIPE_FRAME_LOW_MASK 0xff000000 |
3533 | #define PIPE_FRAME_LOW_SHIFT 24 | 3534 | #define PIPE_FRAME_LOW_SHIFT 24 |
3534 | #define PIPE_PIXEL_MASK 0x00ffffff | 3535 | #define PIPE_PIXEL_MASK 0x00ffffff |
3535 | #define PIPE_PIXEL_SHIFT 0 | 3536 | #define PIPE_PIXEL_SHIFT 0 |
3536 | /* GM45+ just has to be different */ | 3537 | /* GM45+ just has to be different */ |
3537 | #define _PIPEA_FRMCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x70040) | 3538 | #define _PIPEA_FRMCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x70040) |
3538 | #define _PIPEA_FLIPCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x70044) | 3539 | #define _PIPEA_FLIPCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x70044) |
3539 | #define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45) | 3540 | #define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45) |
3540 | 3541 | ||
3541 | /* Cursor A & B regs */ | 3542 | /* Cursor A & B regs */ |
3542 | #define _CURACNTR (dev_priv->info.display_mmio_offset + 0x70080) | 3543 | #define _CURACNTR (dev_priv->info.display_mmio_offset + 0x70080) |
3543 | /* Old style CUR*CNTR flags (desktop 8xx) */ | 3544 | /* Old style CUR*CNTR flags (desktop 8xx) */ |
3544 | #define CURSOR_ENABLE 0x80000000 | 3545 | #define CURSOR_ENABLE 0x80000000 |
3545 | #define CURSOR_GAMMA_ENABLE 0x40000000 | 3546 | #define CURSOR_GAMMA_ENABLE 0x40000000 |
3546 | #define CURSOR_STRIDE_MASK 0x30000000 | 3547 | #define CURSOR_STRIDE_MASK 0x30000000 |
3547 | #define CURSOR_PIPE_CSC_ENABLE (1<<24) | 3548 | #define CURSOR_PIPE_CSC_ENABLE (1<<24) |
3548 | #define CURSOR_FORMAT_SHIFT 24 | 3549 | #define CURSOR_FORMAT_SHIFT 24 |
3549 | #define CURSOR_FORMAT_MASK (0x07 << CURSOR_FORMAT_SHIFT) | 3550 | #define CURSOR_FORMAT_MASK (0x07 << CURSOR_FORMAT_SHIFT) |
3550 | #define CURSOR_FORMAT_2C (0x00 << CURSOR_FORMAT_SHIFT) | 3551 | #define CURSOR_FORMAT_2C (0x00 << CURSOR_FORMAT_SHIFT) |
3551 | #define CURSOR_FORMAT_3C (0x01 << CURSOR_FORMAT_SHIFT) | 3552 | #define CURSOR_FORMAT_3C (0x01 << CURSOR_FORMAT_SHIFT) |
3552 | #define CURSOR_FORMAT_4C (0x02 << CURSOR_FORMAT_SHIFT) | 3553 | #define CURSOR_FORMAT_4C (0x02 << CURSOR_FORMAT_SHIFT) |
3553 | #define CURSOR_FORMAT_ARGB (0x04 << CURSOR_FORMAT_SHIFT) | 3554 | #define CURSOR_FORMAT_ARGB (0x04 << CURSOR_FORMAT_SHIFT) |
3554 | #define CURSOR_FORMAT_XRGB (0x05 << CURSOR_FORMAT_SHIFT) | 3555 | #define CURSOR_FORMAT_XRGB (0x05 << CURSOR_FORMAT_SHIFT) |
3555 | /* New style CUR*CNTR flags */ | 3556 | /* New style CUR*CNTR flags */ |
3556 | #define CURSOR_MODE 0x27 | 3557 | #define CURSOR_MODE 0x27 |
3557 | #define CURSOR_MODE_DISABLE 0x00 | 3558 | #define CURSOR_MODE_DISABLE 0x00 |
3558 | #define CURSOR_MODE_128_32B_AX 0x02 | 3559 | #define CURSOR_MODE_128_32B_AX 0x02 |
3559 | #define CURSOR_MODE_256_32B_AX 0x03 | 3560 | #define CURSOR_MODE_256_32B_AX 0x03 |
3560 | #define CURSOR_MODE_64_32B_AX 0x07 | 3561 | #define CURSOR_MODE_64_32B_AX 0x07 |
3561 | #define CURSOR_MODE_128_ARGB_AX ((1 << 5) | CURSOR_MODE_128_32B_AX) | 3562 | #define CURSOR_MODE_128_ARGB_AX ((1 << 5) | CURSOR_MODE_128_32B_AX) |
3562 | #define CURSOR_MODE_256_ARGB_AX ((1 << 5) | CURSOR_MODE_256_32B_AX) | 3563 | #define CURSOR_MODE_256_ARGB_AX ((1 << 5) | CURSOR_MODE_256_32B_AX) |
3563 | #define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX) | 3564 | #define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX) |
3564 | #define MCURSOR_PIPE_SELECT (1 << 28) | 3565 | #define MCURSOR_PIPE_SELECT (1 << 28) |
3565 | #define MCURSOR_PIPE_A 0x00 | 3566 | #define MCURSOR_PIPE_A 0x00 |
3566 | #define MCURSOR_PIPE_B (1 << 28) | 3567 | #define MCURSOR_PIPE_B (1 << 28) |
3567 | #define MCURSOR_GAMMA_ENABLE (1 << 26) | 3568 | #define MCURSOR_GAMMA_ENABLE (1 << 26) |
3568 | #define CURSOR_TRICKLE_FEED_DISABLE (1 << 14) | 3569 | #define CURSOR_TRICKLE_FEED_DISABLE (1 << 14) |
3569 | #define _CURABASE (dev_priv->info.display_mmio_offset + 0x70084) | 3570 | #define _CURABASE (dev_priv->info.display_mmio_offset + 0x70084) |
3570 | #define _CURAPOS (dev_priv->info.display_mmio_offset + 0x70088) | 3571 | #define _CURAPOS (dev_priv->info.display_mmio_offset + 0x70088) |
3571 | #define CURSOR_POS_MASK 0x007FF | 3572 | #define CURSOR_POS_MASK 0x007FF |
3572 | #define CURSOR_POS_SIGN 0x8000 | 3573 | #define CURSOR_POS_SIGN 0x8000 |
3573 | #define CURSOR_X_SHIFT 0 | 3574 | #define CURSOR_X_SHIFT 0 |
3574 | #define CURSOR_Y_SHIFT 16 | 3575 | #define CURSOR_Y_SHIFT 16 |
3575 | #define CURSIZE 0x700a0 | 3576 | #define CURSIZE 0x700a0 |
3576 | #define _CURBCNTR (dev_priv->info.display_mmio_offset + 0x700c0) | 3577 | #define _CURBCNTR (dev_priv->info.display_mmio_offset + 0x700c0) |
3577 | #define _CURBBASE (dev_priv->info.display_mmio_offset + 0x700c4) | 3578 | #define _CURBBASE (dev_priv->info.display_mmio_offset + 0x700c4) |
3578 | #define _CURBPOS (dev_priv->info.display_mmio_offset + 0x700c8) | 3579 | #define _CURBPOS (dev_priv->info.display_mmio_offset + 0x700c8) |
3579 | 3580 | ||
3580 | #define _CURBCNTR_IVB 0x71080 | 3581 | #define _CURBCNTR_IVB 0x71080 |
3581 | #define _CURBBASE_IVB 0x71084 | 3582 | #define _CURBBASE_IVB 0x71084 |
3582 | #define _CURBPOS_IVB 0x71088 | 3583 | #define _CURBPOS_IVB 0x71088 |
3583 | 3584 | ||
3584 | #define CURCNTR(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR) | 3585 | #define CURCNTR(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR) |
3585 | #define CURBASE(pipe) _PIPE(pipe, _CURABASE, _CURBBASE) | 3586 | #define CURBASE(pipe) _PIPE(pipe, _CURABASE, _CURBBASE) |
3586 | #define CURPOS(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS) | 3587 | #define CURPOS(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS) |
3587 | 3588 | ||
3588 | #define CURCNTR_IVB(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR_IVB) | 3589 | #define CURCNTR_IVB(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR_IVB) |
3589 | #define CURBASE_IVB(pipe) _PIPE(pipe, _CURABASE, _CURBBASE_IVB) | 3590 | #define CURBASE_IVB(pipe) _PIPE(pipe, _CURABASE, _CURBBASE_IVB) |
3590 | #define CURPOS_IVB(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS_IVB) | 3591 | #define CURPOS_IVB(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS_IVB) |
3591 | 3592 | ||
3592 | /* Display A control */ | 3593 | /* Display A control */ |
3593 | #define _DSPACNTR 0x70180 | 3594 | #define _DSPACNTR 0x70180 |
3594 | #define DISPLAY_PLANE_ENABLE (1<<31) | 3595 | #define DISPLAY_PLANE_ENABLE (1<<31) |
3595 | #define DISPLAY_PLANE_DISABLE 0 | 3596 | #define DISPLAY_PLANE_DISABLE 0 |
3596 | #define DISPPLANE_GAMMA_ENABLE (1<<30) | 3597 | #define DISPPLANE_GAMMA_ENABLE (1<<30) |
3597 | #define DISPPLANE_GAMMA_DISABLE 0 | 3598 | #define DISPPLANE_GAMMA_DISABLE 0 |
3598 | #define DISPPLANE_PIXFORMAT_MASK (0xf<<26) | 3599 | #define DISPPLANE_PIXFORMAT_MASK (0xf<<26) |
3599 | #define DISPPLANE_YUV422 (0x0<<26) | 3600 | #define DISPPLANE_YUV422 (0x0<<26) |
3600 | #define DISPPLANE_8BPP (0x2<<26) | 3601 | #define DISPPLANE_8BPP (0x2<<26) |
3601 | #define DISPPLANE_BGRA555 (0x3<<26) | 3602 | #define DISPPLANE_BGRA555 (0x3<<26) |
3602 | #define DISPPLANE_BGRX555 (0x4<<26) | 3603 | #define DISPPLANE_BGRX555 (0x4<<26) |
3603 | #define DISPPLANE_BGRX565 (0x5<<26) | 3604 | #define DISPPLANE_BGRX565 (0x5<<26) |
3604 | #define DISPPLANE_BGRX888 (0x6<<26) | 3605 | #define DISPPLANE_BGRX888 (0x6<<26) |
3605 | #define DISPPLANE_BGRA888 (0x7<<26) | 3606 | #define DISPPLANE_BGRA888 (0x7<<26) |
3606 | #define DISPPLANE_RGBX101010 (0x8<<26) | 3607 | #define DISPPLANE_RGBX101010 (0x8<<26) |
3607 | #define DISPPLANE_RGBA101010 (0x9<<26) | 3608 | #define DISPPLANE_RGBA101010 (0x9<<26) |
3608 | #define DISPPLANE_BGRX101010 (0xa<<26) | 3609 | #define DISPPLANE_BGRX101010 (0xa<<26) |
3609 | #define DISPPLANE_RGBX161616 (0xc<<26) | 3610 | #define DISPPLANE_RGBX161616 (0xc<<26) |
3610 | #define DISPPLANE_RGBX888 (0xe<<26) | 3611 | #define DISPPLANE_RGBX888 (0xe<<26) |
3611 | #define DISPPLANE_RGBA888 (0xf<<26) | 3612 | #define DISPPLANE_RGBA888 (0xf<<26) |
3612 | #define DISPPLANE_STEREO_ENABLE (1<<25) | 3613 | #define DISPPLANE_STEREO_ENABLE (1<<25) |
3613 | #define DISPPLANE_STEREO_DISABLE 0 | 3614 | #define DISPPLANE_STEREO_DISABLE 0 |
3614 | #define DISPPLANE_PIPE_CSC_ENABLE (1<<24) | 3615 | #define DISPPLANE_PIPE_CSC_ENABLE (1<<24) |
3615 | #define DISPPLANE_SEL_PIPE_SHIFT 24 | 3616 | #define DISPPLANE_SEL_PIPE_SHIFT 24 |
3616 | #define DISPPLANE_SEL_PIPE_MASK (3<<DISPPLANE_SEL_PIPE_SHIFT) | 3617 | #define DISPPLANE_SEL_PIPE_MASK (3<<DISPPLANE_SEL_PIPE_SHIFT) |
3617 | #define DISPPLANE_SEL_PIPE_A 0 | 3618 | #define DISPPLANE_SEL_PIPE_A 0 |
3618 | #define DISPPLANE_SEL_PIPE_B (1<<DISPPLANE_SEL_PIPE_SHIFT) | 3619 | #define DISPPLANE_SEL_PIPE_B (1<<DISPPLANE_SEL_PIPE_SHIFT) |
3619 | #define DISPPLANE_SRC_KEY_ENABLE (1<<22) | 3620 | #define DISPPLANE_SRC_KEY_ENABLE (1<<22) |
3620 | #define DISPPLANE_SRC_KEY_DISABLE 0 | 3621 | #define DISPPLANE_SRC_KEY_DISABLE 0 |
3621 | #define DISPPLANE_LINE_DOUBLE (1<<20) | 3622 | #define DISPPLANE_LINE_DOUBLE (1<<20) |
3622 | #define DISPPLANE_NO_LINE_DOUBLE 0 | 3623 | #define DISPPLANE_NO_LINE_DOUBLE 0 |
3623 | #define DISPPLANE_STEREO_POLARITY_FIRST 0 | 3624 | #define DISPPLANE_STEREO_POLARITY_FIRST 0 |
3624 | #define DISPPLANE_STEREO_POLARITY_SECOND (1<<18) | 3625 | #define DISPPLANE_STEREO_POLARITY_SECOND (1<<18) |
3625 | #define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */ | 3626 | #define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */ |
3626 | #define DISPPLANE_TILED (1<<10) | 3627 | #define DISPPLANE_TILED (1<<10) |
3627 | #define _DSPAADDR 0x70184 | 3628 | #define _DSPAADDR 0x70184 |
3628 | #define _DSPASTRIDE 0x70188 | 3629 | #define _DSPASTRIDE 0x70188 |
3629 | #define _DSPAPOS 0x7018C /* reserved */ | 3630 | #define _DSPAPOS 0x7018C /* reserved */ |
3630 | #define _DSPASIZE 0x70190 | 3631 | #define _DSPASIZE 0x70190 |
3631 | #define _DSPASURF 0x7019C /* 965+ only */ | 3632 | #define _DSPASURF 0x7019C /* 965+ only */ |
3632 | #define _DSPATILEOFF 0x701A4 /* 965+ only */ | 3633 | #define _DSPATILEOFF 0x701A4 /* 965+ only */ |
3633 | #define _DSPAOFFSET 0x701A4 /* HSW */ | 3634 | #define _DSPAOFFSET 0x701A4 /* HSW */ |
3634 | #define _DSPASURFLIVE 0x701AC | 3635 | #define _DSPASURFLIVE 0x701AC |
3635 | 3636 | ||
3636 | #define DSPCNTR(plane) _PIPE2(plane, _DSPACNTR) | 3637 | #define DSPCNTR(plane) _PIPE2(plane, _DSPACNTR) |
3637 | #define DSPADDR(plane) _PIPE2(plane, _DSPAADDR) | 3638 | #define DSPADDR(plane) _PIPE2(plane, _DSPAADDR) |
3638 | #define DSPSTRIDE(plane) _PIPE2(plane, _DSPASTRIDE) | 3639 | #define DSPSTRIDE(plane) _PIPE2(plane, _DSPASTRIDE) |
3639 | #define DSPPOS(plane) _PIPE2(plane, _DSPAPOS) | 3640 | #define DSPPOS(plane) _PIPE2(plane, _DSPAPOS) |
3640 | #define DSPSIZE(plane) _PIPE2(plane, _DSPASIZE) | 3641 | #define DSPSIZE(plane) _PIPE2(plane, _DSPASIZE) |
3641 | #define DSPSURF(plane) _PIPE2(plane, _DSPASURF) | 3642 | #define DSPSURF(plane) _PIPE2(plane, _DSPASURF) |
3642 | #define DSPTILEOFF(plane) _PIPE2(plane, _DSPATILEOFF) | 3643 | #define DSPTILEOFF(plane) _PIPE2(plane, _DSPATILEOFF) |
3643 | #define DSPLINOFF(plane) DSPADDR(plane) | 3644 | #define DSPLINOFF(plane) DSPADDR(plane) |
3644 | #define DSPOFFSET(plane) _PIPE2(plane, _DSPAOFFSET) | 3645 | #define DSPOFFSET(plane) _PIPE2(plane, _DSPAOFFSET) |
3645 | #define DSPSURFLIVE(plane) _PIPE2(plane, _DSPASURFLIVE) | 3646 | #define DSPSURFLIVE(plane) _PIPE2(plane, _DSPASURFLIVE) |
3646 | 3647 | ||
3647 | /* Display/Sprite base address macros */ | 3648 | /* Display/Sprite base address macros */ |
3648 | #define DISP_BASEADDR_MASK (0xfffff000) | 3649 | #define DISP_BASEADDR_MASK (0xfffff000) |
3649 | #define I915_LO_DISPBASE(val) (val & ~DISP_BASEADDR_MASK) | 3650 | #define I915_LO_DISPBASE(val) (val & ~DISP_BASEADDR_MASK) |
3650 | #define I915_HI_DISPBASE(val) (val & DISP_BASEADDR_MASK) | 3651 | #define I915_HI_DISPBASE(val) (val & DISP_BASEADDR_MASK) |
3651 | 3652 | ||
3652 | /* VBIOS flags */ | 3653 | /* VBIOS flags */ |
3653 | #define SWF00 (dev_priv->info.display_mmio_offset + 0x71410) | 3654 | #define SWF00 (dev_priv->info.display_mmio_offset + 0x71410) |
3654 | #define SWF01 (dev_priv->info.display_mmio_offset + 0x71414) | 3655 | #define SWF01 (dev_priv->info.display_mmio_offset + 0x71414) |
3655 | #define SWF02 (dev_priv->info.display_mmio_offset + 0x71418) | 3656 | #define SWF02 (dev_priv->info.display_mmio_offset + 0x71418) |
3656 | #define SWF03 (dev_priv->info.display_mmio_offset + 0x7141c) | 3657 | #define SWF03 (dev_priv->info.display_mmio_offset + 0x7141c) |
3657 | #define SWF04 (dev_priv->info.display_mmio_offset + 0x71420) | 3658 | #define SWF04 (dev_priv->info.display_mmio_offset + 0x71420) |
3658 | #define SWF05 (dev_priv->info.display_mmio_offset + 0x71424) | 3659 | #define SWF05 (dev_priv->info.display_mmio_offset + 0x71424) |
3659 | #define SWF06 (dev_priv->info.display_mmio_offset + 0x71428) | 3660 | #define SWF06 (dev_priv->info.display_mmio_offset + 0x71428) |
3660 | #define SWF10 (dev_priv->info.display_mmio_offset + 0x70410) | 3661 | #define SWF10 (dev_priv->info.display_mmio_offset + 0x70410) |
3661 | #define SWF11 (dev_priv->info.display_mmio_offset + 0x70414) | 3662 | #define SWF11 (dev_priv->info.display_mmio_offset + 0x70414) |
3662 | #define SWF14 (dev_priv->info.display_mmio_offset + 0x71420) | 3663 | #define SWF14 (dev_priv->info.display_mmio_offset + 0x71420) |
3663 | #define SWF30 (dev_priv->info.display_mmio_offset + 0x72414) | 3664 | #define SWF30 (dev_priv->info.display_mmio_offset + 0x72414) |
3664 | #define SWF31 (dev_priv->info.display_mmio_offset + 0x72418) | 3665 | #define SWF31 (dev_priv->info.display_mmio_offset + 0x72418) |
3665 | #define SWF32 (dev_priv->info.display_mmio_offset + 0x7241c) | 3666 | #define SWF32 (dev_priv->info.display_mmio_offset + 0x7241c) |
3666 | 3667 | ||
3667 | /* Pipe B */ | 3668 | /* Pipe B */ |
3668 | #define _PIPEBDSL (dev_priv->info.display_mmio_offset + 0x71000) | 3669 | #define _PIPEBDSL (dev_priv->info.display_mmio_offset + 0x71000) |
3669 | #define _PIPEBCONF (dev_priv->info.display_mmio_offset + 0x71008) | 3670 | #define _PIPEBCONF (dev_priv->info.display_mmio_offset + 0x71008) |
3670 | #define _PIPEBSTAT (dev_priv->info.display_mmio_offset + 0x71024) | 3671 | #define _PIPEBSTAT (dev_priv->info.display_mmio_offset + 0x71024) |
3671 | #define _PIPEBFRAMEHIGH 0x71040 | 3672 | #define _PIPEBFRAMEHIGH 0x71040 |
3672 | #define _PIPEBFRAMEPIXEL 0x71044 | 3673 | #define _PIPEBFRAMEPIXEL 0x71044 |
3673 | #define _PIPEB_FRMCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x71040) | 3674 | #define _PIPEB_FRMCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x71040) |
3674 | #define _PIPEB_FLIPCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x71044) | 3675 | #define _PIPEB_FLIPCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x71044) |
3675 | 3676 | ||
3676 | 3677 | ||
3677 | /* Display B control */ | 3678 | /* Display B control */ |
3678 | #define _DSPBCNTR (dev_priv->info.display_mmio_offset + 0x71180) | 3679 | #define _DSPBCNTR (dev_priv->info.display_mmio_offset + 0x71180) |
3679 | #define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15) | 3680 | #define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15) |
3680 | #define DISPPLANE_ALPHA_TRANS_DISABLE 0 | 3681 | #define DISPPLANE_ALPHA_TRANS_DISABLE 0 |
3681 | #define DISPPLANE_SPRITE_ABOVE_DISPLAY 0 | 3682 | #define DISPPLANE_SPRITE_ABOVE_DISPLAY 0 |
3682 | #define DISPPLANE_SPRITE_ABOVE_OVERLAY (1) | 3683 | #define DISPPLANE_SPRITE_ABOVE_OVERLAY (1) |
3683 | #define _DSPBADDR (dev_priv->info.display_mmio_offset + 0x71184) | 3684 | #define _DSPBADDR (dev_priv->info.display_mmio_offset + 0x71184) |
3684 | #define _DSPBSTRIDE (dev_priv->info.display_mmio_offset + 0x71188) | 3685 | #define _DSPBSTRIDE (dev_priv->info.display_mmio_offset + 0x71188) |
3685 | #define _DSPBPOS (dev_priv->info.display_mmio_offset + 0x7118C) | 3686 | #define _DSPBPOS (dev_priv->info.display_mmio_offset + 0x7118C) |
3686 | #define _DSPBSIZE (dev_priv->info.display_mmio_offset + 0x71190) | 3687 | #define _DSPBSIZE (dev_priv->info.display_mmio_offset + 0x71190) |
3687 | #define _DSPBSURF (dev_priv->info.display_mmio_offset + 0x7119C) | 3688 | #define _DSPBSURF (dev_priv->info.display_mmio_offset + 0x7119C) |
3688 | #define _DSPBTILEOFF (dev_priv->info.display_mmio_offset + 0x711A4) | 3689 | #define _DSPBTILEOFF (dev_priv->info.display_mmio_offset + 0x711A4) |
3689 | #define _DSPBOFFSET (dev_priv->info.display_mmio_offset + 0x711A4) | 3690 | #define _DSPBOFFSET (dev_priv->info.display_mmio_offset + 0x711A4) |
3690 | #define _DSPBSURFLIVE (dev_priv->info.display_mmio_offset + 0x711AC) | 3691 | #define _DSPBSURFLIVE (dev_priv->info.display_mmio_offset + 0x711AC) |
3691 | 3692 | ||
3692 | /* Sprite A control */ | 3693 | /* Sprite A control */ |
3693 | #define _DVSACNTR 0x72180 | 3694 | #define _DVSACNTR 0x72180 |
3694 | #define DVS_ENABLE (1<<31) | 3695 | #define DVS_ENABLE (1<<31) |
3695 | #define DVS_GAMMA_ENABLE (1<<30) | 3696 | #define DVS_GAMMA_ENABLE (1<<30) |
3696 | #define DVS_PIXFORMAT_MASK (3<<25) | 3697 | #define DVS_PIXFORMAT_MASK (3<<25) |
3697 | #define DVS_FORMAT_YUV422 (0<<25) | 3698 | #define DVS_FORMAT_YUV422 (0<<25) |
3698 | #define DVS_FORMAT_RGBX101010 (1<<25) | 3699 | #define DVS_FORMAT_RGBX101010 (1<<25) |
3699 | #define DVS_FORMAT_RGBX888 (2<<25) | 3700 | #define DVS_FORMAT_RGBX888 (2<<25) |
3700 | #define DVS_FORMAT_RGBX161616 (3<<25) | 3701 | #define DVS_FORMAT_RGBX161616 (3<<25) |
3701 | #define DVS_PIPE_CSC_ENABLE (1<<24) | 3702 | #define DVS_PIPE_CSC_ENABLE (1<<24) |
3702 | #define DVS_SOURCE_KEY (1<<22) | 3703 | #define DVS_SOURCE_KEY (1<<22) |
3703 | #define DVS_RGB_ORDER_XBGR (1<<20) | 3704 | #define DVS_RGB_ORDER_XBGR (1<<20) |
3704 | #define DVS_YUV_BYTE_ORDER_MASK (3<<16) | 3705 | #define DVS_YUV_BYTE_ORDER_MASK (3<<16) |
3705 | #define DVS_YUV_ORDER_YUYV (0<<16) | 3706 | #define DVS_YUV_ORDER_YUYV (0<<16) |
3706 | #define DVS_YUV_ORDER_UYVY (1<<16) | 3707 | #define DVS_YUV_ORDER_UYVY (1<<16) |
3707 | #define DVS_YUV_ORDER_YVYU (2<<16) | 3708 | #define DVS_YUV_ORDER_YVYU (2<<16) |
3708 | #define DVS_YUV_ORDER_VYUY (3<<16) | 3709 | #define DVS_YUV_ORDER_VYUY (3<<16) |
3709 | #define DVS_DEST_KEY (1<<2) | 3710 | #define DVS_DEST_KEY (1<<2) |
3710 | #define DVS_TRICKLE_FEED_DISABLE (1<<14) | 3711 | #define DVS_TRICKLE_FEED_DISABLE (1<<14) |
3711 | #define DVS_TILED (1<<10) | 3712 | #define DVS_TILED (1<<10) |
3712 | #define _DVSALINOFF 0x72184 | 3713 | #define _DVSALINOFF 0x72184 |
3713 | #define _DVSASTRIDE 0x72188 | 3714 | #define _DVSASTRIDE 0x72188 |
3714 | #define _DVSAPOS 0x7218c | 3715 | #define _DVSAPOS 0x7218c |
3715 | #define _DVSASIZE 0x72190 | 3716 | #define _DVSASIZE 0x72190 |
3716 | #define _DVSAKEYVAL 0x72194 | 3717 | #define _DVSAKEYVAL 0x72194 |
3717 | #define _DVSAKEYMSK 0x72198 | 3718 | #define _DVSAKEYMSK 0x72198 |
3718 | #define _DVSASURF 0x7219c | 3719 | #define _DVSASURF 0x7219c |
3719 | #define _DVSAKEYMAXVAL 0x721a0 | 3720 | #define _DVSAKEYMAXVAL 0x721a0 |
3720 | #define _DVSATILEOFF 0x721a4 | 3721 | #define _DVSATILEOFF 0x721a4 |
3721 | #define _DVSASURFLIVE 0x721ac | 3722 | #define _DVSASURFLIVE 0x721ac |
3722 | #define _DVSASCALE 0x72204 | 3723 | #define _DVSASCALE 0x72204 |
3723 | #define DVS_SCALE_ENABLE (1<<31) | 3724 | #define DVS_SCALE_ENABLE (1<<31) |
3724 | #define DVS_FILTER_MASK (3<<29) | 3725 | #define DVS_FILTER_MASK (3<<29) |
3725 | #define DVS_FILTER_MEDIUM (0<<29) | 3726 | #define DVS_FILTER_MEDIUM (0<<29) |
3726 | #define DVS_FILTER_ENHANCING (1<<29) | 3727 | #define DVS_FILTER_ENHANCING (1<<29) |
3727 | #define DVS_FILTER_SOFTENING (2<<29) | 3728 | #define DVS_FILTER_SOFTENING (2<<29) |
3728 | #define DVS_VERTICAL_OFFSET_HALF (1<<28) /* must be enabled below */ | 3729 | #define DVS_VERTICAL_OFFSET_HALF (1<<28) /* must be enabled below */ |
3729 | #define DVS_VERTICAL_OFFSET_ENABLE (1<<27) | 3730 | #define DVS_VERTICAL_OFFSET_ENABLE (1<<27) |
3730 | #define _DVSAGAMC 0x72300 | 3731 | #define _DVSAGAMC 0x72300 |
3731 | 3732 | ||
3732 | #define _DVSBCNTR 0x73180 | 3733 | #define _DVSBCNTR 0x73180 |
3733 | #define _DVSBLINOFF 0x73184 | 3734 | #define _DVSBLINOFF 0x73184 |
3734 | #define _DVSBSTRIDE 0x73188 | 3735 | #define _DVSBSTRIDE 0x73188 |
3735 | #define _DVSBPOS 0x7318c | 3736 | #define _DVSBPOS 0x7318c |
3736 | #define _DVSBSIZE 0x73190 | 3737 | #define _DVSBSIZE 0x73190 |
3737 | #define _DVSBKEYVAL 0x73194 | 3738 | #define _DVSBKEYVAL 0x73194 |
3738 | #define _DVSBKEYMSK 0x73198 | 3739 | #define _DVSBKEYMSK 0x73198 |
3739 | #define _DVSBSURF 0x7319c | 3740 | #define _DVSBSURF 0x7319c |
3740 | #define _DVSBKEYMAXVAL 0x731a0 | 3741 | #define _DVSBKEYMAXVAL 0x731a0 |
3741 | #define _DVSBTILEOFF 0x731a4 | 3742 | #define _DVSBTILEOFF 0x731a4 |
3742 | #define _DVSBSURFLIVE 0x731ac | 3743 | #define _DVSBSURFLIVE 0x731ac |
3743 | #define _DVSBSCALE 0x73204 | 3744 | #define _DVSBSCALE 0x73204 |
3744 | #define _DVSBGAMC 0x73300 | 3745 | #define _DVSBGAMC 0x73300 |
3745 | 3746 | ||
3746 | #define DVSCNTR(pipe) _PIPE(pipe, _DVSACNTR, _DVSBCNTR) | 3747 | #define DVSCNTR(pipe) _PIPE(pipe, _DVSACNTR, _DVSBCNTR) |
3747 | #define DVSLINOFF(pipe) _PIPE(pipe, _DVSALINOFF, _DVSBLINOFF) | 3748 | #define DVSLINOFF(pipe) _PIPE(pipe, _DVSALINOFF, _DVSBLINOFF) |
3748 | #define DVSSTRIDE(pipe) _PIPE(pipe, _DVSASTRIDE, _DVSBSTRIDE) | 3749 | #define DVSSTRIDE(pipe) _PIPE(pipe, _DVSASTRIDE, _DVSBSTRIDE) |
3749 | #define DVSPOS(pipe) _PIPE(pipe, _DVSAPOS, _DVSBPOS) | 3750 | #define DVSPOS(pipe) _PIPE(pipe, _DVSAPOS, _DVSBPOS) |
3750 | #define DVSSURF(pipe) _PIPE(pipe, _DVSASURF, _DVSBSURF) | 3751 | #define DVSSURF(pipe) _PIPE(pipe, _DVSASURF, _DVSBSURF) |
3751 | #define DVSKEYMAX(pipe) _PIPE(pipe, _DVSAKEYMAXVAL, _DVSBKEYMAXVAL) | 3752 | #define DVSKEYMAX(pipe) _PIPE(pipe, _DVSAKEYMAXVAL, _DVSBKEYMAXVAL) |
3752 | #define DVSSIZE(pipe) _PIPE(pipe, _DVSASIZE, _DVSBSIZE) | 3753 | #define DVSSIZE(pipe) _PIPE(pipe, _DVSASIZE, _DVSBSIZE) |
3753 | #define DVSSCALE(pipe) _PIPE(pipe, _DVSASCALE, _DVSBSCALE) | 3754 | #define DVSSCALE(pipe) _PIPE(pipe, _DVSASCALE, _DVSBSCALE) |
3754 | #define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF) | 3755 | #define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF) |
3755 | #define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL) | 3756 | #define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL) |
3756 | #define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK) | 3757 | #define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK) |
3757 | #define DVSSURFLIVE(pipe) _PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE) | 3758 | #define DVSSURFLIVE(pipe) _PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE) |
3758 | 3759 | ||
3759 | #define _SPRA_CTL 0x70280 | 3760 | #define _SPRA_CTL 0x70280 |
3760 | #define SPRITE_ENABLE (1<<31) | 3761 | #define SPRITE_ENABLE (1<<31) |
3761 | #define SPRITE_GAMMA_ENABLE (1<<30) | 3762 | #define SPRITE_GAMMA_ENABLE (1<<30) |
3762 | #define SPRITE_PIXFORMAT_MASK (7<<25) | 3763 | #define SPRITE_PIXFORMAT_MASK (7<<25) |
3763 | #define SPRITE_FORMAT_YUV422 (0<<25) | 3764 | #define SPRITE_FORMAT_YUV422 (0<<25) |
3764 | #define SPRITE_FORMAT_RGBX101010 (1<<25) | 3765 | #define SPRITE_FORMAT_RGBX101010 (1<<25) |
3765 | #define SPRITE_FORMAT_RGBX888 (2<<25) | 3766 | #define SPRITE_FORMAT_RGBX888 (2<<25) |
3766 | #define SPRITE_FORMAT_RGBX161616 (3<<25) | 3767 | #define SPRITE_FORMAT_RGBX161616 (3<<25) |
3767 | #define SPRITE_FORMAT_YUV444 (4<<25) | 3768 | #define SPRITE_FORMAT_YUV444 (4<<25) |
3768 | #define SPRITE_FORMAT_XR_BGR101010 (5<<25) /* Extended range */ | 3769 | #define SPRITE_FORMAT_XR_BGR101010 (5<<25) /* Extended range */ |
3769 | #define SPRITE_PIPE_CSC_ENABLE (1<<24) | 3770 | #define SPRITE_PIPE_CSC_ENABLE (1<<24) |
3770 | #define SPRITE_SOURCE_KEY (1<<22) | 3771 | #define SPRITE_SOURCE_KEY (1<<22) |
3771 | #define SPRITE_RGB_ORDER_RGBX (1<<20) /* only for 888 and 161616 */ | 3772 | #define SPRITE_RGB_ORDER_RGBX (1<<20) /* only for 888 and 161616 */ |
3772 | #define SPRITE_YUV_TO_RGB_CSC_DISABLE (1<<19) | 3773 | #define SPRITE_YUV_TO_RGB_CSC_DISABLE (1<<19) |
3773 | #define SPRITE_YUV_CSC_FORMAT_BT709 (1<<18) /* 0 is BT601 */ | 3774 | #define SPRITE_YUV_CSC_FORMAT_BT709 (1<<18) /* 0 is BT601 */ |
3774 | #define SPRITE_YUV_BYTE_ORDER_MASK (3<<16) | 3775 | #define SPRITE_YUV_BYTE_ORDER_MASK (3<<16) |
3775 | #define SPRITE_YUV_ORDER_YUYV (0<<16) | 3776 | #define SPRITE_YUV_ORDER_YUYV (0<<16) |
3776 | #define SPRITE_YUV_ORDER_UYVY (1<<16) | 3777 | #define SPRITE_YUV_ORDER_UYVY (1<<16) |
3777 | #define SPRITE_YUV_ORDER_YVYU (2<<16) | 3778 | #define SPRITE_YUV_ORDER_YVYU (2<<16) |
3778 | #define SPRITE_YUV_ORDER_VYUY (3<<16) | 3779 | #define SPRITE_YUV_ORDER_VYUY (3<<16) |
3779 | #define SPRITE_TRICKLE_FEED_DISABLE (1<<14) | 3780 | #define SPRITE_TRICKLE_FEED_DISABLE (1<<14) |
3780 | #define SPRITE_INT_GAMMA_ENABLE (1<<13) | 3781 | #define SPRITE_INT_GAMMA_ENABLE (1<<13) |
3781 | #define SPRITE_TILED (1<<10) | 3782 | #define SPRITE_TILED (1<<10) |
3782 | #define SPRITE_DEST_KEY (1<<2) | 3783 | #define SPRITE_DEST_KEY (1<<2) |
3783 | #define _SPRA_LINOFF 0x70284 | 3784 | #define _SPRA_LINOFF 0x70284 |
3784 | #define _SPRA_STRIDE 0x70288 | 3785 | #define _SPRA_STRIDE 0x70288 |
3785 | #define _SPRA_POS 0x7028c | 3786 | #define _SPRA_POS 0x7028c |
3786 | #define _SPRA_SIZE 0x70290 | 3787 | #define _SPRA_SIZE 0x70290 |
3787 | #define _SPRA_KEYVAL 0x70294 | 3788 | #define _SPRA_KEYVAL 0x70294 |
3788 | #define _SPRA_KEYMSK 0x70298 | 3789 | #define _SPRA_KEYMSK 0x70298 |
3789 | #define _SPRA_SURF 0x7029c | 3790 | #define _SPRA_SURF 0x7029c |
3790 | #define _SPRA_KEYMAX 0x702a0 | 3791 | #define _SPRA_KEYMAX 0x702a0 |
3791 | #define _SPRA_TILEOFF 0x702a4 | 3792 | #define _SPRA_TILEOFF 0x702a4 |
3792 | #define _SPRA_OFFSET 0x702a4 | 3793 | #define _SPRA_OFFSET 0x702a4 |
3793 | #define _SPRA_SURFLIVE 0x702ac | 3794 | #define _SPRA_SURFLIVE 0x702ac |
3794 | #define _SPRA_SCALE 0x70304 | 3795 | #define _SPRA_SCALE 0x70304 |
3795 | #define SPRITE_SCALE_ENABLE (1<<31) | 3796 | #define SPRITE_SCALE_ENABLE (1<<31) |
3796 | #define SPRITE_FILTER_MASK (3<<29) | 3797 | #define SPRITE_FILTER_MASK (3<<29) |
3797 | #define SPRITE_FILTER_MEDIUM (0<<29) | 3798 | #define SPRITE_FILTER_MEDIUM (0<<29) |
3798 | #define SPRITE_FILTER_ENHANCING (1<<29) | 3799 | #define SPRITE_FILTER_ENHANCING (1<<29) |
3799 | #define SPRITE_FILTER_SOFTENING (2<<29) | 3800 | #define SPRITE_FILTER_SOFTENING (2<<29) |
3800 | #define SPRITE_VERTICAL_OFFSET_HALF (1<<28) /* must be enabled below */ | 3801 | #define SPRITE_VERTICAL_OFFSET_HALF (1<<28) /* must be enabled below */ |
3801 | #define SPRITE_VERTICAL_OFFSET_ENABLE (1<<27) | 3802 | #define SPRITE_VERTICAL_OFFSET_ENABLE (1<<27) |
3802 | #define _SPRA_GAMC 0x70400 | 3803 | #define _SPRA_GAMC 0x70400 |
3803 | 3804 | ||
3804 | #define _SPRB_CTL 0x71280 | 3805 | #define _SPRB_CTL 0x71280 |
3805 | #define _SPRB_LINOFF 0x71284 | 3806 | #define _SPRB_LINOFF 0x71284 |
3806 | #define _SPRB_STRIDE 0x71288 | 3807 | #define _SPRB_STRIDE 0x71288 |
3807 | #define _SPRB_POS 0x7128c | 3808 | #define _SPRB_POS 0x7128c |
3808 | #define _SPRB_SIZE 0x71290 | 3809 | #define _SPRB_SIZE 0x71290 |
3809 | #define _SPRB_KEYVAL 0x71294 | 3810 | #define _SPRB_KEYVAL 0x71294 |
3810 | #define _SPRB_KEYMSK 0x71298 | 3811 | #define _SPRB_KEYMSK 0x71298 |
3811 | #define _SPRB_SURF 0x7129c | 3812 | #define _SPRB_SURF 0x7129c |
3812 | #define _SPRB_KEYMAX 0x712a0 | 3813 | #define _SPRB_KEYMAX 0x712a0 |
3813 | #define _SPRB_TILEOFF 0x712a4 | 3814 | #define _SPRB_TILEOFF 0x712a4 |
3814 | #define _SPRB_OFFSET 0x712a4 | 3815 | #define _SPRB_OFFSET 0x712a4 |
3815 | #define _SPRB_SURFLIVE 0x712ac | 3816 | #define _SPRB_SURFLIVE 0x712ac |
3816 | #define _SPRB_SCALE 0x71304 | 3817 | #define _SPRB_SCALE 0x71304 |
3817 | #define _SPRB_GAMC 0x71400 | 3818 | #define _SPRB_GAMC 0x71400 |
3818 | 3819 | ||
3819 | #define SPRCTL(pipe) _PIPE(pipe, _SPRA_CTL, _SPRB_CTL) | 3820 | #define SPRCTL(pipe) _PIPE(pipe, _SPRA_CTL, _SPRB_CTL) |
3820 | #define SPRLINOFF(pipe) _PIPE(pipe, _SPRA_LINOFF, _SPRB_LINOFF) | 3821 | #define SPRLINOFF(pipe) _PIPE(pipe, _SPRA_LINOFF, _SPRB_LINOFF) |
3821 | #define SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _SPRB_STRIDE) | 3822 | #define SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _SPRB_STRIDE) |
3822 | #define SPRPOS(pipe) _PIPE(pipe, _SPRA_POS, _SPRB_POS) | 3823 | #define SPRPOS(pipe) _PIPE(pipe, _SPRA_POS, _SPRB_POS) |
3823 | #define SPRSIZE(pipe) _PIPE(pipe, _SPRA_SIZE, _SPRB_SIZE) | 3824 | #define SPRSIZE(pipe) _PIPE(pipe, _SPRA_SIZE, _SPRB_SIZE) |
3824 | #define SPRKEYVAL(pipe) _PIPE(pipe, _SPRA_KEYVAL, _SPRB_KEYVAL) | 3825 | #define SPRKEYVAL(pipe) _PIPE(pipe, _SPRA_KEYVAL, _SPRB_KEYVAL) |
3825 | #define SPRKEYMSK(pipe) _PIPE(pipe, _SPRA_KEYMSK, _SPRB_KEYMSK) | 3826 | #define SPRKEYMSK(pipe) _PIPE(pipe, _SPRA_KEYMSK, _SPRB_KEYMSK) |
3826 | #define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF) | 3827 | #define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF) |
3827 | #define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX) | 3828 | #define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX) |
3828 | #define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF) | 3829 | #define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF) |
3829 | #define SPROFFSET(pipe) _PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET) | 3830 | #define SPROFFSET(pipe) _PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET) |
3830 | #define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE) | 3831 | #define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE) |
3831 | #define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC) | 3832 | #define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC) |
3832 | #define SPRSURFLIVE(pipe) _PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE) | 3833 | #define SPRSURFLIVE(pipe) _PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE) |
3833 | 3834 | ||
3834 | #define _SPACNTR (VLV_DISPLAY_BASE + 0x72180) | 3835 | #define _SPACNTR (VLV_DISPLAY_BASE + 0x72180) |
3835 | #define SP_ENABLE (1<<31) | 3836 | #define SP_ENABLE (1<<31) |
3836 | #define SP_GAMMA_ENABLE (1<<30) | 3837 | #define SP_GAMMA_ENABLE (1<<30) |
3837 | #define SP_PIXFORMAT_MASK (0xf<<26) | 3838 | #define SP_PIXFORMAT_MASK (0xf<<26) |
3838 | #define SP_FORMAT_YUV422 (0<<26) | 3839 | #define SP_FORMAT_YUV422 (0<<26) |
3839 | #define SP_FORMAT_BGR565 (5<<26) | 3840 | #define SP_FORMAT_BGR565 (5<<26) |
3840 | #define SP_FORMAT_BGRX8888 (6<<26) | 3841 | #define SP_FORMAT_BGRX8888 (6<<26) |
3841 | #define SP_FORMAT_BGRA8888 (7<<26) | 3842 | #define SP_FORMAT_BGRA8888 (7<<26) |
3842 | #define SP_FORMAT_RGBX1010102 (8<<26) | 3843 | #define SP_FORMAT_RGBX1010102 (8<<26) |
3843 | #define SP_FORMAT_RGBA1010102 (9<<26) | 3844 | #define SP_FORMAT_RGBA1010102 (9<<26) |
3844 | #define SP_FORMAT_RGBX8888 (0xe<<26) | 3845 | #define SP_FORMAT_RGBX8888 (0xe<<26) |
3845 | #define SP_FORMAT_RGBA8888 (0xf<<26) | 3846 | #define SP_FORMAT_RGBA8888 (0xf<<26) |
3846 | #define SP_SOURCE_KEY (1<<22) | 3847 | #define SP_SOURCE_KEY (1<<22) |
3847 | #define SP_YUV_BYTE_ORDER_MASK (3<<16) | 3848 | #define SP_YUV_BYTE_ORDER_MASK (3<<16) |
3848 | #define SP_YUV_ORDER_YUYV (0<<16) | 3849 | #define SP_YUV_ORDER_YUYV (0<<16) |
3849 | #define SP_YUV_ORDER_UYVY (1<<16) | 3850 | #define SP_YUV_ORDER_UYVY (1<<16) |
3850 | #define SP_YUV_ORDER_YVYU (2<<16) | 3851 | #define SP_YUV_ORDER_YVYU (2<<16) |
3851 | #define SP_YUV_ORDER_VYUY (3<<16) | 3852 | #define SP_YUV_ORDER_VYUY (3<<16) |
3852 | #define SP_TILED (1<<10) | 3853 | #define SP_TILED (1<<10) |
3853 | #define _SPALINOFF (VLV_DISPLAY_BASE + 0x72184) | 3854 | #define _SPALINOFF (VLV_DISPLAY_BASE + 0x72184) |
3854 | #define _SPASTRIDE (VLV_DISPLAY_BASE + 0x72188) | 3855 | #define _SPASTRIDE (VLV_DISPLAY_BASE + 0x72188) |
3855 | #define _SPAPOS (VLV_DISPLAY_BASE + 0x7218c) | 3856 | #define _SPAPOS (VLV_DISPLAY_BASE + 0x7218c) |
3856 | #define _SPASIZE (VLV_DISPLAY_BASE + 0x72190) | 3857 | #define _SPASIZE (VLV_DISPLAY_BASE + 0x72190) |
3857 | #define _SPAKEYMINVAL (VLV_DISPLAY_BASE + 0x72194) | 3858 | #define _SPAKEYMINVAL (VLV_DISPLAY_BASE + 0x72194) |
3858 | #define _SPAKEYMSK (VLV_DISPLAY_BASE + 0x72198) | 3859 | #define _SPAKEYMSK (VLV_DISPLAY_BASE + 0x72198) |
3859 | #define _SPASURF (VLV_DISPLAY_BASE + 0x7219c) | 3860 | #define _SPASURF (VLV_DISPLAY_BASE + 0x7219c) |
3860 | #define _SPAKEYMAXVAL (VLV_DISPLAY_BASE + 0x721a0) | 3861 | #define _SPAKEYMAXVAL (VLV_DISPLAY_BASE + 0x721a0) |
3861 | #define _SPATILEOFF (VLV_DISPLAY_BASE + 0x721a4) | 3862 | #define _SPATILEOFF (VLV_DISPLAY_BASE + 0x721a4) |
3862 | #define _SPACONSTALPHA (VLV_DISPLAY_BASE + 0x721a8) | 3863 | #define _SPACONSTALPHA (VLV_DISPLAY_BASE + 0x721a8) |
3863 | #define _SPAGAMC (VLV_DISPLAY_BASE + 0x721f4) | 3864 | #define _SPAGAMC (VLV_DISPLAY_BASE + 0x721f4) |
3864 | 3865 | ||
3865 | #define _SPBCNTR (VLV_DISPLAY_BASE + 0x72280) | 3866 | #define _SPBCNTR (VLV_DISPLAY_BASE + 0x72280) |
3866 | #define _SPBLINOFF (VLV_DISPLAY_BASE + 0x72284) | 3867 | #define _SPBLINOFF (VLV_DISPLAY_BASE + 0x72284) |
3867 | #define _SPBSTRIDE (VLV_DISPLAY_BASE + 0x72288) | 3868 | #define _SPBSTRIDE (VLV_DISPLAY_BASE + 0x72288) |
3868 | #define _SPBPOS (VLV_DISPLAY_BASE + 0x7228c) | 3869 | #define _SPBPOS (VLV_DISPLAY_BASE + 0x7228c) |
3869 | #define _SPBSIZE (VLV_DISPLAY_BASE + 0x72290) | 3870 | #define _SPBSIZE (VLV_DISPLAY_BASE + 0x72290) |
3870 | #define _SPBKEYMINVAL (VLV_DISPLAY_BASE + 0x72294) | 3871 | #define _SPBKEYMINVAL (VLV_DISPLAY_BASE + 0x72294) |
3871 | #define _SPBKEYMSK (VLV_DISPLAY_BASE + 0x72298) | 3872 | #define _SPBKEYMSK (VLV_DISPLAY_BASE + 0x72298) |
3872 | #define _SPBSURF (VLV_DISPLAY_BASE + 0x7229c) | 3873 | #define _SPBSURF (VLV_DISPLAY_BASE + 0x7229c) |
3873 | #define _SPBKEYMAXVAL (VLV_DISPLAY_BASE + 0x722a0) | 3874 | #define _SPBKEYMAXVAL (VLV_DISPLAY_BASE + 0x722a0) |
3874 | #define _SPBTILEOFF (VLV_DISPLAY_BASE + 0x722a4) | 3875 | #define _SPBTILEOFF (VLV_DISPLAY_BASE + 0x722a4) |
3875 | #define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8) | 3876 | #define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8) |
3876 | #define _SPBGAMC (VLV_DISPLAY_BASE + 0x722f4) | 3877 | #define _SPBGAMC (VLV_DISPLAY_BASE + 0x722f4) |
3877 | 3878 | ||
3878 | #define SPCNTR(pipe, plane) _PIPE(pipe * 2 + plane, _SPACNTR, _SPBCNTR) | 3879 | #define SPCNTR(pipe, plane) _PIPE(pipe * 2 + plane, _SPACNTR, _SPBCNTR) |
3879 | #define SPLINOFF(pipe, plane) _PIPE(pipe * 2 + plane, _SPALINOFF, _SPBLINOFF) | 3880 | #define SPLINOFF(pipe, plane) _PIPE(pipe * 2 + plane, _SPALINOFF, _SPBLINOFF) |
3880 | #define SPSTRIDE(pipe, plane) _PIPE(pipe * 2 + plane, _SPASTRIDE, _SPBSTRIDE) | 3881 | #define SPSTRIDE(pipe, plane) _PIPE(pipe * 2 + plane, _SPASTRIDE, _SPBSTRIDE) |
3881 | #define SPPOS(pipe, plane) _PIPE(pipe * 2 + plane, _SPAPOS, _SPBPOS) | 3882 | #define SPPOS(pipe, plane) _PIPE(pipe * 2 + plane, _SPAPOS, _SPBPOS) |
3882 | #define SPSIZE(pipe, plane) _PIPE(pipe * 2 + plane, _SPASIZE, _SPBSIZE) | 3883 | #define SPSIZE(pipe, plane) _PIPE(pipe * 2 + plane, _SPASIZE, _SPBSIZE) |
3883 | #define SPKEYMINVAL(pipe, plane) _PIPE(pipe * 2 + plane, _SPAKEYMINVAL, _SPBKEYMINVAL) | 3884 | #define SPKEYMINVAL(pipe, plane) _PIPE(pipe * 2 + plane, _SPAKEYMINVAL, _SPBKEYMINVAL) |
3884 | #define SPKEYMSK(pipe, plane) _PIPE(pipe * 2 + plane, _SPAKEYMSK, _SPBKEYMSK) | 3885 | #define SPKEYMSK(pipe, plane) _PIPE(pipe * 2 + plane, _SPAKEYMSK, _SPBKEYMSK) |
3885 | #define SPSURF(pipe, plane) _PIPE(pipe * 2 + plane, _SPASURF, _SPBSURF) | 3886 | #define SPSURF(pipe, plane) _PIPE(pipe * 2 + plane, _SPASURF, _SPBSURF) |
3886 | #define SPKEYMAXVAL(pipe, plane) _PIPE(pipe * 2 + plane, _SPAKEYMAXVAL, _SPBKEYMAXVAL) | 3887 | #define SPKEYMAXVAL(pipe, plane) _PIPE(pipe * 2 + plane, _SPAKEYMAXVAL, _SPBKEYMAXVAL) |
3887 | #define SPTILEOFF(pipe, plane) _PIPE(pipe * 2 + plane, _SPATILEOFF, _SPBTILEOFF) | 3888 | #define SPTILEOFF(pipe, plane) _PIPE(pipe * 2 + plane, _SPATILEOFF, _SPBTILEOFF) |
3888 | #define SPCONSTALPHA(pipe, plane) _PIPE(pipe * 2 + plane, _SPACONSTALPHA, _SPBCONSTALPHA) | 3889 | #define SPCONSTALPHA(pipe, plane) _PIPE(pipe * 2 + plane, _SPACONSTALPHA, _SPBCONSTALPHA) |
3889 | #define SPGAMC(pipe, plane) _PIPE(pipe * 2 + plane, _SPAGAMC, _SPBGAMC) | 3890 | #define SPGAMC(pipe, plane) _PIPE(pipe * 2 + plane, _SPAGAMC, _SPBGAMC) |
3890 | 3891 | ||
3891 | /* VBIOS regs */ | 3892 | /* VBIOS regs */ |
3892 | #define VGACNTRL 0x71400 | 3893 | #define VGACNTRL 0x71400 |
3893 | # define VGA_DISP_DISABLE (1 << 31) | 3894 | # define VGA_DISP_DISABLE (1 << 31) |
3894 | # define VGA_2X_MODE (1 << 30) | 3895 | # define VGA_2X_MODE (1 << 30) |
3895 | # define VGA_PIPE_B_SELECT (1 << 29) | 3896 | # define VGA_PIPE_B_SELECT (1 << 29) |
3896 | 3897 | ||
3897 | #define VLV_VGACNTRL (VLV_DISPLAY_BASE + 0x71400) | 3898 | #define VLV_VGACNTRL (VLV_DISPLAY_BASE + 0x71400) |
3898 | 3899 | ||
3899 | /* Ironlake */ | 3900 | /* Ironlake */ |
3900 | 3901 | ||
3901 | #define CPU_VGACNTRL 0x41000 | 3902 | #define CPU_VGACNTRL 0x41000 |
3902 | 3903 | ||
3903 | #define DIGITAL_PORT_HOTPLUG_CNTRL 0x44030 | 3904 | #define DIGITAL_PORT_HOTPLUG_CNTRL 0x44030 |
3904 | #define DIGITAL_PORTA_HOTPLUG_ENABLE (1 << 4) | 3905 | #define DIGITAL_PORTA_HOTPLUG_ENABLE (1 << 4) |
3905 | #define DIGITAL_PORTA_SHORT_PULSE_2MS (0 << 2) | 3906 | #define DIGITAL_PORTA_SHORT_PULSE_2MS (0 << 2) |
3906 | #define DIGITAL_PORTA_SHORT_PULSE_4_5MS (1 << 2) | 3907 | #define DIGITAL_PORTA_SHORT_PULSE_4_5MS (1 << 2) |
3907 | #define DIGITAL_PORTA_SHORT_PULSE_6MS (2 << 2) | 3908 | #define DIGITAL_PORTA_SHORT_PULSE_6MS (2 << 2) |
3908 | #define DIGITAL_PORTA_SHORT_PULSE_100MS (3 << 2) | 3909 | #define DIGITAL_PORTA_SHORT_PULSE_100MS (3 << 2) |
3909 | #define DIGITAL_PORTA_NO_DETECT (0 << 0) | 3910 | #define DIGITAL_PORTA_NO_DETECT (0 << 0) |
3910 | #define DIGITAL_PORTA_LONG_PULSE_DETECT_MASK (1 << 1) | 3911 | #define DIGITAL_PORTA_LONG_PULSE_DETECT_MASK (1 << 1) |
3911 | #define DIGITAL_PORTA_SHORT_PULSE_DETECT_MASK (1 << 0) | 3912 | #define DIGITAL_PORTA_SHORT_PULSE_DETECT_MASK (1 << 0) |
3912 | 3913 | ||
3913 | /* refresh rate hardware control */ | 3914 | /* refresh rate hardware control */ |
3914 | #define RR_HW_CTL 0x45300 | 3915 | #define RR_HW_CTL 0x45300 |
3915 | #define RR_HW_LOW_POWER_FRAMES_MASK 0xff | 3916 | #define RR_HW_LOW_POWER_FRAMES_MASK 0xff |
3916 | #define RR_HW_HIGH_POWER_FRAMES_MASK 0xff00 | 3917 | #define RR_HW_HIGH_POWER_FRAMES_MASK 0xff00 |
3917 | 3918 | ||
3918 | #define FDI_PLL_BIOS_0 0x46000 | 3919 | #define FDI_PLL_BIOS_0 0x46000 |
3919 | #define FDI_PLL_FB_CLOCK_MASK 0xff | 3920 | #define FDI_PLL_FB_CLOCK_MASK 0xff |
3920 | #define FDI_PLL_BIOS_1 0x46004 | 3921 | #define FDI_PLL_BIOS_1 0x46004 |
3921 | #define FDI_PLL_BIOS_2 0x46008 | 3922 | #define FDI_PLL_BIOS_2 0x46008 |
3922 | #define DISPLAY_PORT_PLL_BIOS_0 0x4600c | 3923 | #define DISPLAY_PORT_PLL_BIOS_0 0x4600c |
3923 | #define DISPLAY_PORT_PLL_BIOS_1 0x46010 | 3924 | #define DISPLAY_PORT_PLL_BIOS_1 0x46010 |
3924 | #define DISPLAY_PORT_PLL_BIOS_2 0x46014 | 3925 | #define DISPLAY_PORT_PLL_BIOS_2 0x46014 |
3925 | 3926 | ||
3926 | #define PCH_3DCGDIS0 0x46020 | 3927 | #define PCH_3DCGDIS0 0x46020 |
3927 | # define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18) | 3928 | # define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18) |
3928 | # define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1) | 3929 | # define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1) |
3929 | 3930 | ||
3930 | #define PCH_3DCGDIS1 0x46024 | 3931 | #define PCH_3DCGDIS1 0x46024 |
3931 | # define VFMUNIT_CLOCK_GATE_DISABLE (1 << 11) | 3932 | # define VFMUNIT_CLOCK_GATE_DISABLE (1 << 11) |
3932 | 3933 | ||
3933 | #define FDI_PLL_FREQ_CTL 0x46030 | 3934 | #define FDI_PLL_FREQ_CTL 0x46030 |
3934 | #define FDI_PLL_FREQ_CHANGE_REQUEST (1<<24) | 3935 | #define FDI_PLL_FREQ_CHANGE_REQUEST (1<<24) |
3935 | #define FDI_PLL_FREQ_LOCK_LIMIT_MASK 0xfff00 | 3936 | #define FDI_PLL_FREQ_LOCK_LIMIT_MASK 0xfff00 |
3936 | #define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff | 3937 | #define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff |
3937 | 3938 | ||
3938 | 3939 | ||
3939 | #define _PIPEA_DATA_M1 0x60030 | 3940 | #define _PIPEA_DATA_M1 0x60030 |
3940 | #define PIPE_DATA_M1_OFFSET 0 | 3941 | #define PIPE_DATA_M1_OFFSET 0 |
3941 | #define _PIPEA_DATA_N1 0x60034 | 3942 | #define _PIPEA_DATA_N1 0x60034 |
3942 | #define PIPE_DATA_N1_OFFSET 0 | 3943 | #define PIPE_DATA_N1_OFFSET 0 |
3943 | 3944 | ||
3944 | #define _PIPEA_DATA_M2 0x60038 | 3945 | #define _PIPEA_DATA_M2 0x60038 |
3945 | #define PIPE_DATA_M2_OFFSET 0 | 3946 | #define PIPE_DATA_M2_OFFSET 0 |
3946 | #define _PIPEA_DATA_N2 0x6003c | 3947 | #define _PIPEA_DATA_N2 0x6003c |
3947 | #define PIPE_DATA_N2_OFFSET 0 | 3948 | #define PIPE_DATA_N2_OFFSET 0 |
3948 | 3949 | ||
3949 | #define _PIPEA_LINK_M1 0x60040 | 3950 | #define _PIPEA_LINK_M1 0x60040 |
3950 | #define PIPE_LINK_M1_OFFSET 0 | 3951 | #define PIPE_LINK_M1_OFFSET 0 |
3951 | #define _PIPEA_LINK_N1 0x60044 | 3952 | #define _PIPEA_LINK_N1 0x60044 |
3952 | #define PIPE_LINK_N1_OFFSET 0 | 3953 | #define PIPE_LINK_N1_OFFSET 0 |
3953 | 3954 | ||
3954 | #define _PIPEA_LINK_M2 0x60048 | 3955 | #define _PIPEA_LINK_M2 0x60048 |
3955 | #define PIPE_LINK_M2_OFFSET 0 | 3956 | #define PIPE_LINK_M2_OFFSET 0 |
3956 | #define _PIPEA_LINK_N2 0x6004c | 3957 | #define _PIPEA_LINK_N2 0x6004c |
3957 | #define PIPE_LINK_N2_OFFSET 0 | 3958 | #define PIPE_LINK_N2_OFFSET 0 |
3958 | 3959 | ||
3959 | /* PIPEB timing regs are same start from 0x61000 */ | 3960 | /* PIPEB timing regs are same start from 0x61000 */ |
3960 | 3961 | ||
3961 | #define _PIPEB_DATA_M1 0x61030 | 3962 | #define _PIPEB_DATA_M1 0x61030 |
3962 | #define _PIPEB_DATA_N1 0x61034 | 3963 | #define _PIPEB_DATA_N1 0x61034 |
3963 | #define _PIPEB_DATA_M2 0x61038 | 3964 | #define _PIPEB_DATA_M2 0x61038 |
3964 | #define _PIPEB_DATA_N2 0x6103c | 3965 | #define _PIPEB_DATA_N2 0x6103c |
3965 | #define _PIPEB_LINK_M1 0x61040 | 3966 | #define _PIPEB_LINK_M1 0x61040 |
3966 | #define _PIPEB_LINK_N1 0x61044 | 3967 | #define _PIPEB_LINK_N1 0x61044 |
3967 | #define _PIPEB_LINK_M2 0x61048 | 3968 | #define _PIPEB_LINK_M2 0x61048 |
3968 | #define _PIPEB_LINK_N2 0x6104c | 3969 | #define _PIPEB_LINK_N2 0x6104c |
3969 | 3970 | ||
3970 | #define PIPE_DATA_M1(tran) _TRANSCODER2(tran, _PIPEA_DATA_M1) | 3971 | #define PIPE_DATA_M1(tran) _TRANSCODER2(tran, _PIPEA_DATA_M1) |
3971 | #define PIPE_DATA_N1(tran) _TRANSCODER2(tran, _PIPEA_DATA_N1) | 3972 | #define PIPE_DATA_N1(tran) _TRANSCODER2(tran, _PIPEA_DATA_N1) |
3972 | #define PIPE_DATA_M2(tran) _TRANSCODER2(tran, _PIPEA_DATA_M2) | 3973 | #define PIPE_DATA_M2(tran) _TRANSCODER2(tran, _PIPEA_DATA_M2) |
3973 | #define PIPE_DATA_N2(tran) _TRANSCODER2(tran, _PIPEA_DATA_N2) | 3974 | #define PIPE_DATA_N2(tran) _TRANSCODER2(tran, _PIPEA_DATA_N2) |
3974 | #define PIPE_LINK_M1(tran) _TRANSCODER2(tran, _PIPEA_LINK_M1) | 3975 | #define PIPE_LINK_M1(tran) _TRANSCODER2(tran, _PIPEA_LINK_M1) |
3975 | #define PIPE_LINK_N1(tran) _TRANSCODER2(tran, _PIPEA_LINK_N1) | 3976 | #define PIPE_LINK_N1(tran) _TRANSCODER2(tran, _PIPEA_LINK_N1) |
3976 | #define PIPE_LINK_M2(tran) _TRANSCODER2(tran, _PIPEA_LINK_M2) | 3977 | #define PIPE_LINK_M2(tran) _TRANSCODER2(tran, _PIPEA_LINK_M2) |
3977 | #define PIPE_LINK_N2(tran) _TRANSCODER2(tran, _PIPEA_LINK_N2) | 3978 | #define PIPE_LINK_N2(tran) _TRANSCODER2(tran, _PIPEA_LINK_N2) |
3978 | 3979 | ||
3979 | /* CPU panel fitter */ | 3980 | /* CPU panel fitter */ |
3980 | /* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */ | 3981 | /* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */ |
3981 | #define _PFA_CTL_1 0x68080 | 3982 | #define _PFA_CTL_1 0x68080 |
3982 | #define _PFB_CTL_1 0x68880 | 3983 | #define _PFB_CTL_1 0x68880 |
3983 | #define PF_ENABLE (1<<31) | 3984 | #define PF_ENABLE (1<<31) |
3984 | #define PF_PIPE_SEL_MASK_IVB (3<<29) | 3985 | #define PF_PIPE_SEL_MASK_IVB (3<<29) |
3985 | #define PF_PIPE_SEL_IVB(pipe) ((pipe)<<29) | 3986 | #define PF_PIPE_SEL_IVB(pipe) ((pipe)<<29) |
3986 | #define PF_FILTER_MASK (3<<23) | 3987 | #define PF_FILTER_MASK (3<<23) |
3987 | #define PF_FILTER_PROGRAMMED (0<<23) | 3988 | #define PF_FILTER_PROGRAMMED (0<<23) |
3988 | #define PF_FILTER_MED_3x3 (1<<23) | 3989 | #define PF_FILTER_MED_3x3 (1<<23) |
3989 | #define PF_FILTER_EDGE_ENHANCE (2<<23) | 3990 | #define PF_FILTER_EDGE_ENHANCE (2<<23) |
3990 | #define PF_FILTER_EDGE_SOFTEN (3<<23) | 3991 | #define PF_FILTER_EDGE_SOFTEN (3<<23) |
3991 | #define _PFA_WIN_SZ 0x68074 | 3992 | #define _PFA_WIN_SZ 0x68074 |
3992 | #define _PFB_WIN_SZ 0x68874 | 3993 | #define _PFB_WIN_SZ 0x68874 |
3993 | #define _PFA_WIN_POS 0x68070 | 3994 | #define _PFA_WIN_POS 0x68070 |
3994 | #define _PFB_WIN_POS 0x68870 | 3995 | #define _PFB_WIN_POS 0x68870 |
3995 | #define _PFA_VSCALE 0x68084 | 3996 | #define _PFA_VSCALE 0x68084 |
3996 | #define _PFB_VSCALE 0x68884 | 3997 | #define _PFB_VSCALE 0x68884 |
3997 | #define _PFA_HSCALE 0x68090 | 3998 | #define _PFA_HSCALE 0x68090 |
3998 | #define _PFB_HSCALE 0x68890 | 3999 | #define _PFB_HSCALE 0x68890 |
3999 | 4000 | ||
4000 | #define PF_CTL(pipe) _PIPE(pipe, _PFA_CTL_1, _PFB_CTL_1) | 4001 | #define PF_CTL(pipe) _PIPE(pipe, _PFA_CTL_1, _PFB_CTL_1) |
4001 | #define PF_WIN_SZ(pipe) _PIPE(pipe, _PFA_WIN_SZ, _PFB_WIN_SZ) | 4002 | #define PF_WIN_SZ(pipe) _PIPE(pipe, _PFA_WIN_SZ, _PFB_WIN_SZ) |
4002 | #define PF_WIN_POS(pipe) _PIPE(pipe, _PFA_WIN_POS, _PFB_WIN_POS) | 4003 | #define PF_WIN_POS(pipe) _PIPE(pipe, _PFA_WIN_POS, _PFB_WIN_POS) |
4003 | #define PF_VSCALE(pipe) _PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE) | 4004 | #define PF_VSCALE(pipe) _PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE) |
4004 | #define PF_HSCALE(pipe) _PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE) | 4005 | #define PF_HSCALE(pipe) _PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE) |
4005 | 4006 | ||
4006 | /* legacy palette */ | 4007 | /* legacy palette */ |
4007 | #define _LGC_PALETTE_A 0x4a000 | 4008 | #define _LGC_PALETTE_A 0x4a000 |
4008 | #define _LGC_PALETTE_B 0x4a800 | 4009 | #define _LGC_PALETTE_B 0x4a800 |
4009 | #define LGC_PALETTE(pipe) _PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B) | 4010 | #define LGC_PALETTE(pipe) _PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B) |
4010 | 4011 | ||
4011 | #define _GAMMA_MODE_A 0x4a480 | 4012 | #define _GAMMA_MODE_A 0x4a480 |
4012 | #define _GAMMA_MODE_B 0x4ac80 | 4013 | #define _GAMMA_MODE_B 0x4ac80 |
4013 | #define GAMMA_MODE(pipe) _PIPE(pipe, _GAMMA_MODE_A, _GAMMA_MODE_B) | 4014 | #define GAMMA_MODE(pipe) _PIPE(pipe, _GAMMA_MODE_A, _GAMMA_MODE_B) |
4014 | #define GAMMA_MODE_MODE_MASK (3 << 0) | 4015 | #define GAMMA_MODE_MODE_MASK (3 << 0) |
4015 | #define GAMMA_MODE_MODE_8BIT (0 << 0) | 4016 | #define GAMMA_MODE_MODE_8BIT (0 << 0) |
4016 | #define GAMMA_MODE_MODE_10BIT (1 << 0) | 4017 | #define GAMMA_MODE_MODE_10BIT (1 << 0) |
4017 | #define GAMMA_MODE_MODE_12BIT (2 << 0) | 4018 | #define GAMMA_MODE_MODE_12BIT (2 << 0) |
4018 | #define GAMMA_MODE_MODE_SPLIT (3 << 0) | 4019 | #define GAMMA_MODE_MODE_SPLIT (3 << 0) |
4019 | 4020 | ||
4020 | /* interrupts */ | 4021 | /* interrupts */ |
4021 | #define DE_MASTER_IRQ_CONTROL (1 << 31) | 4022 | #define DE_MASTER_IRQ_CONTROL (1 << 31) |
4022 | #define DE_SPRITEB_FLIP_DONE (1 << 29) | 4023 | #define DE_SPRITEB_FLIP_DONE (1 << 29) |
4023 | #define DE_SPRITEA_FLIP_DONE (1 << 28) | 4024 | #define DE_SPRITEA_FLIP_DONE (1 << 28) |
4024 | #define DE_PLANEB_FLIP_DONE (1 << 27) | 4025 | #define DE_PLANEB_FLIP_DONE (1 << 27) |
4025 | #define DE_PLANEA_FLIP_DONE (1 << 26) | 4026 | #define DE_PLANEA_FLIP_DONE (1 << 26) |
4026 | #define DE_PLANE_FLIP_DONE(plane) (1 << (26 + (plane))) | 4027 | #define DE_PLANE_FLIP_DONE(plane) (1 << (26 + (plane))) |
4027 | #define DE_PCU_EVENT (1 << 25) | 4028 | #define DE_PCU_EVENT (1 << 25) |
4028 | #define DE_GTT_FAULT (1 << 24) | 4029 | #define DE_GTT_FAULT (1 << 24) |
4029 | #define DE_POISON (1 << 23) | 4030 | #define DE_POISON (1 << 23) |
4030 | #define DE_PERFORM_COUNTER (1 << 22) | 4031 | #define DE_PERFORM_COUNTER (1 << 22) |
4031 | #define DE_PCH_EVENT (1 << 21) | 4032 | #define DE_PCH_EVENT (1 << 21) |
4032 | #define DE_AUX_CHANNEL_A (1 << 20) | 4033 | #define DE_AUX_CHANNEL_A (1 << 20) |
4033 | #define DE_DP_A_HOTPLUG (1 << 19) | 4034 | #define DE_DP_A_HOTPLUG (1 << 19) |
4034 | #define DE_GSE (1 << 18) | 4035 | #define DE_GSE (1 << 18) |
4035 | #define DE_PIPEB_VBLANK (1 << 15) | 4036 | #define DE_PIPEB_VBLANK (1 << 15) |
4036 | #define DE_PIPEB_EVEN_FIELD (1 << 14) | 4037 | #define DE_PIPEB_EVEN_FIELD (1 << 14) |
4037 | #define DE_PIPEB_ODD_FIELD (1 << 13) | 4038 | #define DE_PIPEB_ODD_FIELD (1 << 13) |
4038 | #define DE_PIPEB_LINE_COMPARE (1 << 12) | 4039 | #define DE_PIPEB_LINE_COMPARE (1 << 12) |
4039 | #define DE_PIPEB_VSYNC (1 << 11) | 4040 | #define DE_PIPEB_VSYNC (1 << 11) |
4040 | #define DE_PIPEB_CRC_DONE (1 << 10) | 4041 | #define DE_PIPEB_CRC_DONE (1 << 10) |
4041 | #define DE_PIPEB_FIFO_UNDERRUN (1 << 8) | 4042 | #define DE_PIPEB_FIFO_UNDERRUN (1 << 8) |
4042 | #define DE_PIPEA_VBLANK (1 << 7) | 4043 | #define DE_PIPEA_VBLANK (1 << 7) |
4043 | #define DE_PIPE_VBLANK(pipe) (1 << (7 + 8*(pipe))) | 4044 | #define DE_PIPE_VBLANK(pipe) (1 << (7 + 8*(pipe))) |
4044 | #define DE_PIPEA_EVEN_FIELD (1 << 6) | 4045 | #define DE_PIPEA_EVEN_FIELD (1 << 6) |
4045 | #define DE_PIPEA_ODD_FIELD (1 << 5) | 4046 | #define DE_PIPEA_ODD_FIELD (1 << 5) |
4046 | #define DE_PIPEA_LINE_COMPARE (1 << 4) | 4047 | #define DE_PIPEA_LINE_COMPARE (1 << 4) |
4047 | #define DE_PIPEA_VSYNC (1 << 3) | 4048 | #define DE_PIPEA_VSYNC (1 << 3) |
4048 | #define DE_PIPEA_CRC_DONE (1 << 2) | 4049 | #define DE_PIPEA_CRC_DONE (1 << 2) |
4049 | #define DE_PIPE_CRC_DONE(pipe) (1 << (2 + 8*(pipe))) | 4050 | #define DE_PIPE_CRC_DONE(pipe) (1 << (2 + 8*(pipe))) |
4050 | #define DE_PIPEA_FIFO_UNDERRUN (1 << 0) | 4051 | #define DE_PIPEA_FIFO_UNDERRUN (1 << 0) |
4051 | #define DE_PIPE_FIFO_UNDERRUN(pipe) (1 << (8*(pipe))) | 4052 | #define DE_PIPE_FIFO_UNDERRUN(pipe) (1 << (8*(pipe))) |
4052 | 4053 | ||
4053 | /* More Ivybridge lolz */ | 4054 | /* More Ivybridge lolz */ |
4054 | #define DE_ERR_INT_IVB (1<<30) | 4055 | #define DE_ERR_INT_IVB (1<<30) |
4055 | #define DE_GSE_IVB (1<<29) | 4056 | #define DE_GSE_IVB (1<<29) |
4056 | #define DE_PCH_EVENT_IVB (1<<28) | 4057 | #define DE_PCH_EVENT_IVB (1<<28) |
4057 | #define DE_DP_A_HOTPLUG_IVB (1<<27) | 4058 | #define DE_DP_A_HOTPLUG_IVB (1<<27) |
4058 | #define DE_AUX_CHANNEL_A_IVB (1<<26) | 4059 | #define DE_AUX_CHANNEL_A_IVB (1<<26) |
4059 | #define DE_SPRITEC_FLIP_DONE_IVB (1<<14) | 4060 | #define DE_SPRITEC_FLIP_DONE_IVB (1<<14) |
4060 | #define DE_PLANEC_FLIP_DONE_IVB (1<<13) | 4061 | #define DE_PLANEC_FLIP_DONE_IVB (1<<13) |
4061 | #define DE_PIPEC_VBLANK_IVB (1<<10) | 4062 | #define DE_PIPEC_VBLANK_IVB (1<<10) |
4062 | #define DE_SPRITEB_FLIP_DONE_IVB (1<<9) | 4063 | #define DE_SPRITEB_FLIP_DONE_IVB (1<<9) |
4063 | #define DE_PLANEB_FLIP_DONE_IVB (1<<8) | 4064 | #define DE_PLANEB_FLIP_DONE_IVB (1<<8) |
4064 | #define DE_PIPEB_VBLANK_IVB (1<<5) | 4065 | #define DE_PIPEB_VBLANK_IVB (1<<5) |
4065 | #define DE_SPRITEA_FLIP_DONE_IVB (1<<4) | 4066 | #define DE_SPRITEA_FLIP_DONE_IVB (1<<4) |
4066 | #define DE_PLANEA_FLIP_DONE_IVB (1<<3) | 4067 | #define DE_PLANEA_FLIP_DONE_IVB (1<<3) |
4067 | #define DE_PLANE_FLIP_DONE_IVB(plane) (1<< (3 + 5*(plane))) | 4068 | #define DE_PLANE_FLIP_DONE_IVB(plane) (1<< (3 + 5*(plane))) |
4068 | #define DE_PIPEA_VBLANK_IVB (1<<0) | 4069 | #define DE_PIPEA_VBLANK_IVB (1<<0) |
4069 | #define DE_PIPE_VBLANK_IVB(pipe) (1 << (pipe * 5)) | 4070 | #define DE_PIPE_VBLANK_IVB(pipe) (1 << (pipe * 5)) |
4070 | 4071 | ||
4071 | #define VLV_MASTER_IER 0x4400c /* Gunit master IER */ | 4072 | #define VLV_MASTER_IER 0x4400c /* Gunit master IER */ |
4072 | #define MASTER_INTERRUPT_ENABLE (1<<31) | 4073 | #define MASTER_INTERRUPT_ENABLE (1<<31) |
4073 | 4074 | ||
4074 | #define DEISR 0x44000 | 4075 | #define DEISR 0x44000 |
4075 | #define DEIMR 0x44004 | 4076 | #define DEIMR 0x44004 |
4076 | #define DEIIR 0x44008 | 4077 | #define DEIIR 0x44008 |
4077 | #define DEIER 0x4400c | 4078 | #define DEIER 0x4400c |
4078 | 4079 | ||
4079 | #define GTISR 0x44010 | 4080 | #define GTISR 0x44010 |
4080 | #define GTIMR 0x44014 | 4081 | #define GTIMR 0x44014 |
4081 | #define GTIIR 0x44018 | 4082 | #define GTIIR 0x44018 |
4082 | #define GTIER 0x4401c | 4083 | #define GTIER 0x4401c |
4083 | 4084 | ||
4084 | #define GEN8_MASTER_IRQ 0x44200 | 4085 | #define GEN8_MASTER_IRQ 0x44200 |
4085 | #define GEN8_MASTER_IRQ_CONTROL (1<<31) | 4086 | #define GEN8_MASTER_IRQ_CONTROL (1<<31) |
4086 | #define GEN8_PCU_IRQ (1<<30) | 4087 | #define GEN8_PCU_IRQ (1<<30) |
4087 | #define GEN8_DE_PCH_IRQ (1<<23) | 4088 | #define GEN8_DE_PCH_IRQ (1<<23) |
4088 | #define GEN8_DE_MISC_IRQ (1<<22) | 4089 | #define GEN8_DE_MISC_IRQ (1<<22) |
4089 | #define GEN8_DE_PORT_IRQ (1<<20) | 4090 | #define GEN8_DE_PORT_IRQ (1<<20) |
4090 | #define GEN8_DE_PIPE_C_IRQ (1<<18) | 4091 | #define GEN8_DE_PIPE_C_IRQ (1<<18) |
4091 | #define GEN8_DE_PIPE_B_IRQ (1<<17) | 4092 | #define GEN8_DE_PIPE_B_IRQ (1<<17) |
4092 | #define GEN8_DE_PIPE_A_IRQ (1<<16) | 4093 | #define GEN8_DE_PIPE_A_IRQ (1<<16) |
4093 | #define GEN8_DE_PIPE_IRQ(pipe) (1<<(16+pipe)) | 4094 | #define GEN8_DE_PIPE_IRQ(pipe) (1<<(16+pipe)) |
4094 | #define GEN8_GT_VECS_IRQ (1<<6) | 4095 | #define GEN8_GT_VECS_IRQ (1<<6) |
4095 | #define GEN8_GT_VCS2_IRQ (1<<3) | 4096 | #define GEN8_GT_VCS2_IRQ (1<<3) |
4096 | #define GEN8_GT_VCS1_IRQ (1<<2) | 4097 | #define GEN8_GT_VCS1_IRQ (1<<2) |
4097 | #define GEN8_GT_BCS_IRQ (1<<1) | 4098 | #define GEN8_GT_BCS_IRQ (1<<1) |
4098 | #define GEN8_GT_RCS_IRQ (1<<0) | 4099 | #define GEN8_GT_RCS_IRQ (1<<0) |
4099 | 4100 | ||
4100 | #define GEN8_GT_ISR(which) (0x44300 + (0x10 * (which))) | 4101 | #define GEN8_GT_ISR(which) (0x44300 + (0x10 * (which))) |
4101 | #define GEN8_GT_IMR(which) (0x44304 + (0x10 * (which))) | 4102 | #define GEN8_GT_IMR(which) (0x44304 + (0x10 * (which))) |
4102 | #define GEN8_GT_IIR(which) (0x44308 + (0x10 * (which))) | 4103 | #define GEN8_GT_IIR(which) (0x44308 + (0x10 * (which))) |
4103 | #define GEN8_GT_IER(which) (0x4430c + (0x10 * (which))) | 4104 | #define GEN8_GT_IER(which) (0x4430c + (0x10 * (which))) |
4104 | 4105 | ||
4105 | #define GEN8_BCS_IRQ_SHIFT 16 | 4106 | #define GEN8_BCS_IRQ_SHIFT 16 |
4106 | #define GEN8_RCS_IRQ_SHIFT 0 | 4107 | #define GEN8_RCS_IRQ_SHIFT 0 |
4107 | #define GEN8_VCS2_IRQ_SHIFT 16 | 4108 | #define GEN8_VCS2_IRQ_SHIFT 16 |
4108 | #define GEN8_VCS1_IRQ_SHIFT 0 | 4109 | #define GEN8_VCS1_IRQ_SHIFT 0 |
4109 | #define GEN8_VECS_IRQ_SHIFT 0 | 4110 | #define GEN8_VECS_IRQ_SHIFT 0 |
4110 | 4111 | ||
4111 | #define GEN8_DE_PIPE_ISR(pipe) (0x44400 + (0x10 * (pipe))) | 4112 | #define GEN8_DE_PIPE_ISR(pipe) (0x44400 + (0x10 * (pipe))) |
4112 | #define GEN8_DE_PIPE_IMR(pipe) (0x44404 + (0x10 * (pipe))) | 4113 | #define GEN8_DE_PIPE_IMR(pipe) (0x44404 + (0x10 * (pipe))) |
4113 | #define GEN8_DE_PIPE_IIR(pipe) (0x44408 + (0x10 * (pipe))) | 4114 | #define GEN8_DE_PIPE_IIR(pipe) (0x44408 + (0x10 * (pipe))) |
4114 | #define GEN8_DE_PIPE_IER(pipe) (0x4440c + (0x10 * (pipe))) | 4115 | #define GEN8_DE_PIPE_IER(pipe) (0x4440c + (0x10 * (pipe))) |
4115 | #define GEN8_PIPE_FIFO_UNDERRUN (1 << 31) | 4116 | #define GEN8_PIPE_FIFO_UNDERRUN (1 << 31) |
4116 | #define GEN8_PIPE_CDCLK_CRC_ERROR (1 << 29) | 4117 | #define GEN8_PIPE_CDCLK_CRC_ERROR (1 << 29) |
4117 | #define GEN8_PIPE_CDCLK_CRC_DONE (1 << 28) | 4118 | #define GEN8_PIPE_CDCLK_CRC_DONE (1 << 28) |
4118 | #define GEN8_PIPE_CURSOR_FAULT (1 << 10) | 4119 | #define GEN8_PIPE_CURSOR_FAULT (1 << 10) |
4119 | #define GEN8_PIPE_SPRITE_FAULT (1 << 9) | 4120 | #define GEN8_PIPE_SPRITE_FAULT (1 << 9) |
4120 | #define GEN8_PIPE_PRIMARY_FAULT (1 << 8) | 4121 | #define GEN8_PIPE_PRIMARY_FAULT (1 << 8) |
4121 | #define GEN8_PIPE_SPRITE_FLIP_DONE (1 << 5) | 4122 | #define GEN8_PIPE_SPRITE_FLIP_DONE (1 << 5) |
4122 | #define GEN8_PIPE_FLIP_DONE (1 << 4) | 4123 | #define GEN8_PIPE_FLIP_DONE (1 << 4) |
4123 | #define GEN8_PIPE_SCAN_LINE_EVENT (1 << 2) | 4124 | #define GEN8_PIPE_SCAN_LINE_EVENT (1 << 2) |
4124 | #define GEN8_PIPE_VSYNC (1 << 1) | 4125 | #define GEN8_PIPE_VSYNC (1 << 1) |
4125 | #define GEN8_PIPE_VBLANK (1 << 0) | 4126 | #define GEN8_PIPE_VBLANK (1 << 0) |
4126 | #define GEN8_DE_PIPE_IRQ_FAULT_ERRORS \ | 4127 | #define GEN8_DE_PIPE_IRQ_FAULT_ERRORS \ |
4127 | (GEN8_PIPE_CURSOR_FAULT | \ | 4128 | (GEN8_PIPE_CURSOR_FAULT | \ |
4128 | GEN8_PIPE_SPRITE_FAULT | \ | 4129 | GEN8_PIPE_SPRITE_FAULT | \ |
4129 | GEN8_PIPE_PRIMARY_FAULT) | 4130 | GEN8_PIPE_PRIMARY_FAULT) |
4130 | 4131 | ||
4131 | #define GEN8_DE_PORT_ISR 0x44440 | 4132 | #define GEN8_DE_PORT_ISR 0x44440 |
4132 | #define GEN8_DE_PORT_IMR 0x44444 | 4133 | #define GEN8_DE_PORT_IMR 0x44444 |
4133 | #define GEN8_DE_PORT_IIR 0x44448 | 4134 | #define GEN8_DE_PORT_IIR 0x44448 |
4134 | #define GEN8_DE_PORT_IER 0x4444c | 4135 | #define GEN8_DE_PORT_IER 0x4444c |
4135 | #define GEN8_PORT_DP_A_HOTPLUG (1 << 3) | 4136 | #define GEN8_PORT_DP_A_HOTPLUG (1 << 3) |
4136 | #define GEN8_AUX_CHANNEL_A (1 << 0) | 4137 | #define GEN8_AUX_CHANNEL_A (1 << 0) |
4137 | 4138 | ||
4138 | #define GEN8_DE_MISC_ISR 0x44460 | 4139 | #define GEN8_DE_MISC_ISR 0x44460 |
4139 | #define GEN8_DE_MISC_IMR 0x44464 | 4140 | #define GEN8_DE_MISC_IMR 0x44464 |
4140 | #define GEN8_DE_MISC_IIR 0x44468 | 4141 | #define GEN8_DE_MISC_IIR 0x44468 |
4141 | #define GEN8_DE_MISC_IER 0x4446c | 4142 | #define GEN8_DE_MISC_IER 0x4446c |
4142 | #define GEN8_DE_MISC_GSE (1 << 27) | 4143 | #define GEN8_DE_MISC_GSE (1 << 27) |
4143 | 4144 | ||
4144 | #define GEN8_PCU_ISR 0x444e0 | 4145 | #define GEN8_PCU_ISR 0x444e0 |
4145 | #define GEN8_PCU_IMR 0x444e4 | 4146 | #define GEN8_PCU_IMR 0x444e4 |
4146 | #define GEN8_PCU_IIR 0x444e8 | 4147 | #define GEN8_PCU_IIR 0x444e8 |
4147 | #define GEN8_PCU_IER 0x444ec | 4148 | #define GEN8_PCU_IER 0x444ec |
4148 | 4149 | ||
4149 | #define ILK_DISPLAY_CHICKEN2 0x42004 | 4150 | #define ILK_DISPLAY_CHICKEN2 0x42004 |
4150 | /* Required on all Ironlake and Sandybridge according to the B-Spec. */ | 4151 | /* Required on all Ironlake and Sandybridge according to the B-Spec. */ |
4151 | #define ILK_ELPIN_409_SELECT (1 << 25) | 4152 | #define ILK_ELPIN_409_SELECT (1 << 25) |
4152 | #define ILK_DPARB_GATE (1<<22) | 4153 | #define ILK_DPARB_GATE (1<<22) |
4153 | #define ILK_VSDPFD_FULL (1<<21) | 4154 | #define ILK_VSDPFD_FULL (1<<21) |
4154 | #define FUSE_STRAP 0x42014 | 4155 | #define FUSE_STRAP 0x42014 |
4155 | #define ILK_INTERNAL_GRAPHICS_DISABLE (1 << 31) | 4156 | #define ILK_INTERNAL_GRAPHICS_DISABLE (1 << 31) |
4156 | #define ILK_INTERNAL_DISPLAY_DISABLE (1 << 30) | 4157 | #define ILK_INTERNAL_DISPLAY_DISABLE (1 << 30) |
4157 | #define ILK_DISPLAY_DEBUG_DISABLE (1 << 29) | 4158 | #define ILK_DISPLAY_DEBUG_DISABLE (1 << 29) |
4158 | #define ILK_HDCP_DISABLE (1 << 25) | 4159 | #define ILK_HDCP_DISABLE (1 << 25) |
4159 | #define ILK_eDP_A_DISABLE (1 << 24) | 4160 | #define ILK_eDP_A_DISABLE (1 << 24) |
4160 | #define HSW_CDCLK_LIMIT (1 << 24) | 4161 | #define HSW_CDCLK_LIMIT (1 << 24) |
4161 | #define ILK_DESKTOP (1 << 23) | 4162 | #define ILK_DESKTOP (1 << 23) |
4162 | 4163 | ||
4163 | #define ILK_DSPCLK_GATE_D 0x42020 | 4164 | #define ILK_DSPCLK_GATE_D 0x42020 |
4164 | #define ILK_VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) | 4165 | #define ILK_VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) |
4165 | #define ILK_DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9) | 4166 | #define ILK_DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9) |
4166 | #define ILK_DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8) | 4167 | #define ILK_DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8) |
4167 | #define ILK_DPFDUNIT_CLOCK_GATE_ENABLE (1 << 7) | 4168 | #define ILK_DPFDUNIT_CLOCK_GATE_ENABLE (1 << 7) |
4168 | #define ILK_DPARBUNIT_CLOCK_GATE_ENABLE (1 << 5) | 4169 | #define ILK_DPARBUNIT_CLOCK_GATE_ENABLE (1 << 5) |
4169 | 4170 | ||
4170 | #define IVB_CHICKEN3 0x4200c | 4171 | #define IVB_CHICKEN3 0x4200c |
4171 | # define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5) | 4172 | # define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5) |
4172 | # define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2) | 4173 | # define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2) |
4173 | 4174 | ||
4174 | #define CHICKEN_PAR1_1 0x42080 | 4175 | #define CHICKEN_PAR1_1 0x42080 |
4175 | #define DPA_MASK_VBLANK_SRD (1 << 15) | 4176 | #define DPA_MASK_VBLANK_SRD (1 << 15) |
4176 | #define FORCE_ARB_IDLE_PLANES (1 << 14) | 4177 | #define FORCE_ARB_IDLE_PLANES (1 << 14) |
4177 | 4178 | ||
4178 | #define _CHICKEN_PIPESL_1_A 0x420b0 | 4179 | #define _CHICKEN_PIPESL_1_A 0x420b0 |
4179 | #define _CHICKEN_PIPESL_1_B 0x420b4 | 4180 | #define _CHICKEN_PIPESL_1_B 0x420b4 |
4180 | #define HSW_FBCQ_DIS (1 << 22) | 4181 | #define HSW_FBCQ_DIS (1 << 22) |
4181 | #define BDW_DPRS_MASK_VBLANK_SRD (1 << 0) | 4182 | #define BDW_DPRS_MASK_VBLANK_SRD (1 << 0) |
4182 | #define CHICKEN_PIPESL_1(pipe) _PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B) | 4183 | #define CHICKEN_PIPESL_1(pipe) _PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B) |
4183 | 4184 | ||
4184 | #define DISP_ARB_CTL 0x45000 | 4185 | #define DISP_ARB_CTL 0x45000 |
4185 | #define DISP_TILE_SURFACE_SWIZZLING (1<<13) | 4186 | #define DISP_TILE_SURFACE_SWIZZLING (1<<13) |
4186 | #define DISP_FBC_WM_DIS (1<<15) | 4187 | #define DISP_FBC_WM_DIS (1<<15) |
4187 | #define DISP_ARB_CTL2 0x45004 | 4188 | #define DISP_ARB_CTL2 0x45004 |
4188 | #define DISP_DATA_PARTITION_5_6 (1<<6) | 4189 | #define DISP_DATA_PARTITION_5_6 (1<<6) |
4189 | #define GEN7_MSG_CTL 0x45010 | 4190 | #define GEN7_MSG_CTL 0x45010 |
4190 | #define WAIT_FOR_PCH_RESET_ACK (1<<1) | 4191 | #define WAIT_FOR_PCH_RESET_ACK (1<<1) |
4191 | #define WAIT_FOR_PCH_FLR_ACK (1<<0) | 4192 | #define WAIT_FOR_PCH_FLR_ACK (1<<0) |
4192 | #define HSW_NDE_RSTWRN_OPT 0x46408 | 4193 | #define HSW_NDE_RSTWRN_OPT 0x46408 |
4193 | #define RESET_PCH_HANDSHAKE_ENABLE (1<<4) | 4194 | #define RESET_PCH_HANDSHAKE_ENABLE (1<<4) |
4194 | 4195 | ||
4195 | /* GEN7 chicken */ | 4196 | /* GEN7 chicken */ |
4196 | #define GEN7_COMMON_SLICE_CHICKEN1 0x7010 | 4197 | #define GEN7_COMMON_SLICE_CHICKEN1 0x7010 |
4197 | # define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26)) | 4198 | # define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26)) |
4198 | #define COMMON_SLICE_CHICKEN2 0x7014 | 4199 | #define COMMON_SLICE_CHICKEN2 0x7014 |
4199 | # define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0) | 4200 | # define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0) |
4200 | 4201 | ||
4201 | #define GEN7_L3SQCREG1 0xB010 | 4202 | #define GEN7_L3SQCREG1 0xB010 |
4202 | #define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000 | 4203 | #define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000 |
4203 | 4204 | ||
4204 | #define GEN7_L3CNTLREG1 0xB01C | 4205 | #define GEN7_L3CNTLREG1 0xB01C |
4205 | #define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C | 4206 | #define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C |
4206 | #define GEN7_L3AGDIS (1<<19) | 4207 | #define GEN7_L3AGDIS (1<<19) |
4207 | 4208 | ||
4208 | #define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030 | 4209 | #define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030 |
4209 | #define GEN7_WA_L3_CHICKEN_MODE 0x20000000 | 4210 | #define GEN7_WA_L3_CHICKEN_MODE 0x20000000 |
4210 | 4211 | ||
4211 | #define GEN7_L3SQCREG4 0xb034 | 4212 | #define GEN7_L3SQCREG4 0xb034 |
4212 | #define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27) | 4213 | #define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27) |
4213 | 4214 | ||
4214 | /* GEN8 chicken */ | 4215 | /* GEN8 chicken */ |
4215 | #define HDC_CHICKEN0 0x7300 | 4216 | #define HDC_CHICKEN0 0x7300 |
4216 | #define HDC_FORCE_NON_COHERENT (1<<4) | 4217 | #define HDC_FORCE_NON_COHERENT (1<<4) |
4217 | 4218 | ||
4218 | /* WaCatErrorRejectionIssue */ | 4219 | /* WaCatErrorRejectionIssue */ |
4219 | #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030 | 4220 | #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030 |
4220 | #define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11) | 4221 | #define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11) |
4221 | 4222 | ||
4222 | #define HSW_SCRATCH1 0xb038 | 4223 | #define HSW_SCRATCH1 0xb038 |
4223 | #define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1<<27) | 4224 | #define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1<<27) |
4224 | 4225 | ||
4225 | /* PCH */ | 4226 | /* PCH */ |
4226 | 4227 | ||
4227 | /* south display engine interrupt: IBX */ | 4228 | /* south display engine interrupt: IBX */ |
4228 | #define SDE_AUDIO_POWER_D (1 << 27) | 4229 | #define SDE_AUDIO_POWER_D (1 << 27) |
4229 | #define SDE_AUDIO_POWER_C (1 << 26) | 4230 | #define SDE_AUDIO_POWER_C (1 << 26) |
4230 | #define SDE_AUDIO_POWER_B (1 << 25) | 4231 | #define SDE_AUDIO_POWER_B (1 << 25) |
4231 | #define SDE_AUDIO_POWER_SHIFT (25) | 4232 | #define SDE_AUDIO_POWER_SHIFT (25) |
4232 | #define SDE_AUDIO_POWER_MASK (7 << SDE_AUDIO_POWER_SHIFT) | 4233 | #define SDE_AUDIO_POWER_MASK (7 << SDE_AUDIO_POWER_SHIFT) |
4233 | #define SDE_GMBUS (1 << 24) | 4234 | #define SDE_GMBUS (1 << 24) |
4234 | #define SDE_AUDIO_HDCP_TRANSB (1 << 23) | 4235 | #define SDE_AUDIO_HDCP_TRANSB (1 << 23) |
4235 | #define SDE_AUDIO_HDCP_TRANSA (1 << 22) | 4236 | #define SDE_AUDIO_HDCP_TRANSA (1 << 22) |
4236 | #define SDE_AUDIO_HDCP_MASK (3 << 22) | 4237 | #define SDE_AUDIO_HDCP_MASK (3 << 22) |
4237 | #define SDE_AUDIO_TRANSB (1 << 21) | 4238 | #define SDE_AUDIO_TRANSB (1 << 21) |
4238 | #define SDE_AUDIO_TRANSA (1 << 20) | 4239 | #define SDE_AUDIO_TRANSA (1 << 20) |
4239 | #define SDE_AUDIO_TRANS_MASK (3 << 20) | 4240 | #define SDE_AUDIO_TRANS_MASK (3 << 20) |
4240 | #define SDE_POISON (1 << 19) | 4241 | #define SDE_POISON (1 << 19) |
4241 | /* 18 reserved */ | 4242 | /* 18 reserved */ |
4242 | #define SDE_FDI_RXB (1 << 17) | 4243 | #define SDE_FDI_RXB (1 << 17) |
4243 | #define SDE_FDI_RXA (1 << 16) | 4244 | #define SDE_FDI_RXA (1 << 16) |
4244 | #define SDE_FDI_MASK (3 << 16) | 4245 | #define SDE_FDI_MASK (3 << 16) |
4245 | #define SDE_AUXD (1 << 15) | 4246 | #define SDE_AUXD (1 << 15) |
4246 | #define SDE_AUXC (1 << 14) | 4247 | #define SDE_AUXC (1 << 14) |
4247 | #define SDE_AUXB (1 << 13) | 4248 | #define SDE_AUXB (1 << 13) |
4248 | #define SDE_AUX_MASK (7 << 13) | 4249 | #define SDE_AUX_MASK (7 << 13) |
4249 | /* 12 reserved */ | 4250 | /* 12 reserved */ |
4250 | #define SDE_CRT_HOTPLUG (1 << 11) | 4251 | #define SDE_CRT_HOTPLUG (1 << 11) |
4251 | #define SDE_PORTD_HOTPLUG (1 << 10) | 4252 | #define SDE_PORTD_HOTPLUG (1 << 10) |
4252 | #define SDE_PORTC_HOTPLUG (1 << 9) | 4253 | #define SDE_PORTC_HOTPLUG (1 << 9) |
4253 | #define SDE_PORTB_HOTPLUG (1 << 8) | 4254 | #define SDE_PORTB_HOTPLUG (1 << 8) |
4254 | #define SDE_SDVOB_HOTPLUG (1 << 6) | 4255 | #define SDE_SDVOB_HOTPLUG (1 << 6) |
4255 | #define SDE_HOTPLUG_MASK (SDE_CRT_HOTPLUG | \ | 4256 | #define SDE_HOTPLUG_MASK (SDE_CRT_HOTPLUG | \ |
4256 | SDE_SDVOB_HOTPLUG | \ | 4257 | SDE_SDVOB_HOTPLUG | \ |
4257 | SDE_PORTB_HOTPLUG | \ | 4258 | SDE_PORTB_HOTPLUG | \ |
4258 | SDE_PORTC_HOTPLUG | \ | 4259 | SDE_PORTC_HOTPLUG | \ |
4259 | SDE_PORTD_HOTPLUG) | 4260 | SDE_PORTD_HOTPLUG) |
4260 | #define SDE_TRANSB_CRC_DONE (1 << 5) | 4261 | #define SDE_TRANSB_CRC_DONE (1 << 5) |
4261 | #define SDE_TRANSB_CRC_ERR (1 << 4) | 4262 | #define SDE_TRANSB_CRC_ERR (1 << 4) |
4262 | #define SDE_TRANSB_FIFO_UNDER (1 << 3) | 4263 | #define SDE_TRANSB_FIFO_UNDER (1 << 3) |
4263 | #define SDE_TRANSA_CRC_DONE (1 << 2) | 4264 | #define SDE_TRANSA_CRC_DONE (1 << 2) |
4264 | #define SDE_TRANSA_CRC_ERR (1 << 1) | 4265 | #define SDE_TRANSA_CRC_ERR (1 << 1) |
4265 | #define SDE_TRANSA_FIFO_UNDER (1 << 0) | 4266 | #define SDE_TRANSA_FIFO_UNDER (1 << 0) |
4266 | #define SDE_TRANS_MASK (0x3f) | 4267 | #define SDE_TRANS_MASK (0x3f) |
4267 | 4268 | ||
4268 | /* south display engine interrupt: CPT/PPT */ | 4269 | /* south display engine interrupt: CPT/PPT */ |
4269 | #define SDE_AUDIO_POWER_D_CPT (1 << 31) | 4270 | #define SDE_AUDIO_POWER_D_CPT (1 << 31) |
4270 | #define SDE_AUDIO_POWER_C_CPT (1 << 30) | 4271 | #define SDE_AUDIO_POWER_C_CPT (1 << 30) |
4271 | #define SDE_AUDIO_POWER_B_CPT (1 << 29) | 4272 | #define SDE_AUDIO_POWER_B_CPT (1 << 29) |
4272 | #define SDE_AUDIO_POWER_SHIFT_CPT 29 | 4273 | #define SDE_AUDIO_POWER_SHIFT_CPT 29 |
4273 | #define SDE_AUDIO_POWER_MASK_CPT (7 << 29) | 4274 | #define SDE_AUDIO_POWER_MASK_CPT (7 << 29) |
4274 | #define SDE_AUXD_CPT (1 << 27) | 4275 | #define SDE_AUXD_CPT (1 << 27) |
4275 | #define SDE_AUXC_CPT (1 << 26) | 4276 | #define SDE_AUXC_CPT (1 << 26) |
4276 | #define SDE_AUXB_CPT (1 << 25) | 4277 | #define SDE_AUXB_CPT (1 << 25) |
4277 | #define SDE_AUX_MASK_CPT (7 << 25) | 4278 | #define SDE_AUX_MASK_CPT (7 << 25) |
4278 | #define SDE_PORTD_HOTPLUG_CPT (1 << 23) | 4279 | #define SDE_PORTD_HOTPLUG_CPT (1 << 23) |
4279 | #define SDE_PORTC_HOTPLUG_CPT (1 << 22) | 4280 | #define SDE_PORTC_HOTPLUG_CPT (1 << 22) |
4280 | #define SDE_PORTB_HOTPLUG_CPT (1 << 21) | 4281 | #define SDE_PORTB_HOTPLUG_CPT (1 << 21) |
4281 | #define SDE_CRT_HOTPLUG_CPT (1 << 19) | 4282 | #define SDE_CRT_HOTPLUG_CPT (1 << 19) |
4282 | #define SDE_SDVOB_HOTPLUG_CPT (1 << 18) | 4283 | #define SDE_SDVOB_HOTPLUG_CPT (1 << 18) |
4283 | #define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \ | 4284 | #define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \ |
4284 | SDE_SDVOB_HOTPLUG_CPT | \ | 4285 | SDE_SDVOB_HOTPLUG_CPT | \ |
4285 | SDE_PORTD_HOTPLUG_CPT | \ | 4286 | SDE_PORTD_HOTPLUG_CPT | \ |
4286 | SDE_PORTC_HOTPLUG_CPT | \ | 4287 | SDE_PORTC_HOTPLUG_CPT | \ |
4287 | SDE_PORTB_HOTPLUG_CPT) | 4288 | SDE_PORTB_HOTPLUG_CPT) |
4288 | #define SDE_GMBUS_CPT (1 << 17) | 4289 | #define SDE_GMBUS_CPT (1 << 17) |
4289 | #define SDE_ERROR_CPT (1 << 16) | 4290 | #define SDE_ERROR_CPT (1 << 16) |
4290 | #define SDE_AUDIO_CP_REQ_C_CPT (1 << 10) | 4291 | #define SDE_AUDIO_CP_REQ_C_CPT (1 << 10) |
4291 | #define SDE_AUDIO_CP_CHG_C_CPT (1 << 9) | 4292 | #define SDE_AUDIO_CP_CHG_C_CPT (1 << 9) |
4292 | #define SDE_FDI_RXC_CPT (1 << 8) | 4293 | #define SDE_FDI_RXC_CPT (1 << 8) |
4293 | #define SDE_AUDIO_CP_REQ_B_CPT (1 << 6) | 4294 | #define SDE_AUDIO_CP_REQ_B_CPT (1 << 6) |
4294 | #define SDE_AUDIO_CP_CHG_B_CPT (1 << 5) | 4295 | #define SDE_AUDIO_CP_CHG_B_CPT (1 << 5) |
4295 | #define SDE_FDI_RXB_CPT (1 << 4) | 4296 | #define SDE_FDI_RXB_CPT (1 << 4) |
4296 | #define SDE_AUDIO_CP_REQ_A_CPT (1 << 2) | 4297 | #define SDE_AUDIO_CP_REQ_A_CPT (1 << 2) |
4297 | #define SDE_AUDIO_CP_CHG_A_CPT (1 << 1) | 4298 | #define SDE_AUDIO_CP_CHG_A_CPT (1 << 1) |
4298 | #define SDE_FDI_RXA_CPT (1 << 0) | 4299 | #define SDE_FDI_RXA_CPT (1 << 0) |
4299 | #define SDE_AUDIO_CP_REQ_CPT (SDE_AUDIO_CP_REQ_C_CPT | \ | 4300 | #define SDE_AUDIO_CP_REQ_CPT (SDE_AUDIO_CP_REQ_C_CPT | \ |
4300 | SDE_AUDIO_CP_REQ_B_CPT | \ | 4301 | SDE_AUDIO_CP_REQ_B_CPT | \ |
4301 | SDE_AUDIO_CP_REQ_A_CPT) | 4302 | SDE_AUDIO_CP_REQ_A_CPT) |
4302 | #define SDE_AUDIO_CP_CHG_CPT (SDE_AUDIO_CP_CHG_C_CPT | \ | 4303 | #define SDE_AUDIO_CP_CHG_CPT (SDE_AUDIO_CP_CHG_C_CPT | \ |
4303 | SDE_AUDIO_CP_CHG_B_CPT | \ | 4304 | SDE_AUDIO_CP_CHG_B_CPT | \ |
4304 | SDE_AUDIO_CP_CHG_A_CPT) | 4305 | SDE_AUDIO_CP_CHG_A_CPT) |
4305 | #define SDE_FDI_MASK_CPT (SDE_FDI_RXC_CPT | \ | 4306 | #define SDE_FDI_MASK_CPT (SDE_FDI_RXC_CPT | \ |
4306 | SDE_FDI_RXB_CPT | \ | 4307 | SDE_FDI_RXB_CPT | \ |
4307 | SDE_FDI_RXA_CPT) | 4308 | SDE_FDI_RXA_CPT) |
4308 | 4309 | ||
4309 | #define SDEISR 0xc4000 | 4310 | #define SDEISR 0xc4000 |
4310 | #define SDEIMR 0xc4004 | 4311 | #define SDEIMR 0xc4004 |
4311 | #define SDEIIR 0xc4008 | 4312 | #define SDEIIR 0xc4008 |
4312 | #define SDEIER 0xc400c | 4313 | #define SDEIER 0xc400c |
4313 | 4314 | ||
4314 | #define SERR_INT 0xc4040 | 4315 | #define SERR_INT 0xc4040 |
4315 | #define SERR_INT_POISON (1<<31) | 4316 | #define SERR_INT_POISON (1<<31) |
4316 | #define SERR_INT_TRANS_C_FIFO_UNDERRUN (1<<6) | 4317 | #define SERR_INT_TRANS_C_FIFO_UNDERRUN (1<<6) |
4317 | #define SERR_INT_TRANS_B_FIFO_UNDERRUN (1<<3) | 4318 | #define SERR_INT_TRANS_B_FIFO_UNDERRUN (1<<3) |
4318 | #define SERR_INT_TRANS_A_FIFO_UNDERRUN (1<<0) | 4319 | #define SERR_INT_TRANS_A_FIFO_UNDERRUN (1<<0) |
4319 | #define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1<<(pipe*3)) | 4320 | #define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1<<(pipe*3)) |
4320 | 4321 | ||
4321 | /* digital port hotplug */ | 4322 | /* digital port hotplug */ |
4322 | #define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */ | 4323 | #define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */ |
4323 | #define PORTD_HOTPLUG_ENABLE (1 << 20) | 4324 | #define PORTD_HOTPLUG_ENABLE (1 << 20) |
4324 | #define PORTD_PULSE_DURATION_2ms (0) | 4325 | #define PORTD_PULSE_DURATION_2ms (0) |
4325 | #define PORTD_PULSE_DURATION_4_5ms (1 << 18) | 4326 | #define PORTD_PULSE_DURATION_4_5ms (1 << 18) |
4326 | #define PORTD_PULSE_DURATION_6ms (2 << 18) | 4327 | #define PORTD_PULSE_DURATION_6ms (2 << 18) |
4327 | #define PORTD_PULSE_DURATION_100ms (3 << 18) | 4328 | #define PORTD_PULSE_DURATION_100ms (3 << 18) |
4328 | #define PORTD_PULSE_DURATION_MASK (3 << 18) | 4329 | #define PORTD_PULSE_DURATION_MASK (3 << 18) |
4329 | #define PORTD_HOTPLUG_STATUS_MASK (0x3 << 16) | 4330 | #define PORTD_HOTPLUG_STATUS_MASK (0x3 << 16) |
4330 | #define PORTD_HOTPLUG_NO_DETECT (0 << 16) | 4331 | #define PORTD_HOTPLUG_NO_DETECT (0 << 16) |
4331 | #define PORTD_HOTPLUG_SHORT_DETECT (1 << 16) | 4332 | #define PORTD_HOTPLUG_SHORT_DETECT (1 << 16) |
4332 | #define PORTD_HOTPLUG_LONG_DETECT (2 << 16) | 4333 | #define PORTD_HOTPLUG_LONG_DETECT (2 << 16) |
4333 | #define PORTC_HOTPLUG_ENABLE (1 << 12) | 4334 | #define PORTC_HOTPLUG_ENABLE (1 << 12) |
4334 | #define PORTC_PULSE_DURATION_2ms (0) | 4335 | #define PORTC_PULSE_DURATION_2ms (0) |
4335 | #define PORTC_PULSE_DURATION_4_5ms (1 << 10) | 4336 | #define PORTC_PULSE_DURATION_4_5ms (1 << 10) |
4336 | #define PORTC_PULSE_DURATION_6ms (2 << 10) | 4337 | #define PORTC_PULSE_DURATION_6ms (2 << 10) |
4337 | #define PORTC_PULSE_DURATION_100ms (3 << 10) | 4338 | #define PORTC_PULSE_DURATION_100ms (3 << 10) |
4338 | #define PORTC_PULSE_DURATION_MASK (3 << 10) | 4339 | #define PORTC_PULSE_DURATION_MASK (3 << 10) |
4339 | #define PORTC_HOTPLUG_STATUS_MASK (0x3 << 8) | 4340 | #define PORTC_HOTPLUG_STATUS_MASK (0x3 << 8) |
4340 | #define PORTC_HOTPLUG_NO_DETECT (0 << 8) | 4341 | #define PORTC_HOTPLUG_NO_DETECT (0 << 8) |
4341 | #define PORTC_HOTPLUG_SHORT_DETECT (1 << 8) | 4342 | #define PORTC_HOTPLUG_SHORT_DETECT (1 << 8) |
4342 | #define PORTC_HOTPLUG_LONG_DETECT (2 << 8) | 4343 | #define PORTC_HOTPLUG_LONG_DETECT (2 << 8) |
4343 | #define PORTB_HOTPLUG_ENABLE (1 << 4) | 4344 | #define PORTB_HOTPLUG_ENABLE (1 << 4) |
4344 | #define PORTB_PULSE_DURATION_2ms (0) | 4345 | #define PORTB_PULSE_DURATION_2ms (0) |
4345 | #define PORTB_PULSE_DURATION_4_5ms (1 << 2) | 4346 | #define PORTB_PULSE_DURATION_4_5ms (1 << 2) |
4346 | #define PORTB_PULSE_DURATION_6ms (2 << 2) | 4347 | #define PORTB_PULSE_DURATION_6ms (2 << 2) |
4347 | #define PORTB_PULSE_DURATION_100ms (3 << 2) | 4348 | #define PORTB_PULSE_DURATION_100ms (3 << 2) |
4348 | #define PORTB_PULSE_DURATION_MASK (3 << 2) | 4349 | #define PORTB_PULSE_DURATION_MASK (3 << 2) |
4349 | #define PORTB_HOTPLUG_STATUS_MASK (0x3 << 0) | 4350 | #define PORTB_HOTPLUG_STATUS_MASK (0x3 << 0) |
4350 | #define PORTB_HOTPLUG_NO_DETECT (0 << 0) | 4351 | #define PORTB_HOTPLUG_NO_DETECT (0 << 0) |
4351 | #define PORTB_HOTPLUG_SHORT_DETECT (1 << 0) | 4352 | #define PORTB_HOTPLUG_SHORT_DETECT (1 << 0) |
4352 | #define PORTB_HOTPLUG_LONG_DETECT (2 << 0) | 4353 | #define PORTB_HOTPLUG_LONG_DETECT (2 << 0) |
4353 | 4354 | ||
4354 | #define PCH_GPIOA 0xc5010 | 4355 | #define PCH_GPIOA 0xc5010 |
4355 | #define PCH_GPIOB 0xc5014 | 4356 | #define PCH_GPIOB 0xc5014 |
4356 | #define PCH_GPIOC 0xc5018 | 4357 | #define PCH_GPIOC 0xc5018 |
4357 | #define PCH_GPIOD 0xc501c | 4358 | #define PCH_GPIOD 0xc501c |
4358 | #define PCH_GPIOE 0xc5020 | 4359 | #define PCH_GPIOE 0xc5020 |
4359 | #define PCH_GPIOF 0xc5024 | 4360 | #define PCH_GPIOF 0xc5024 |
4360 | 4361 | ||
4361 | #define PCH_GMBUS0 0xc5100 | 4362 | #define PCH_GMBUS0 0xc5100 |
4362 | #define PCH_GMBUS1 0xc5104 | 4363 | #define PCH_GMBUS1 0xc5104 |
4363 | #define PCH_GMBUS2 0xc5108 | 4364 | #define PCH_GMBUS2 0xc5108 |
4364 | #define PCH_GMBUS3 0xc510c | 4365 | #define PCH_GMBUS3 0xc510c |
4365 | #define PCH_GMBUS4 0xc5110 | 4366 | #define PCH_GMBUS4 0xc5110 |
4366 | #define PCH_GMBUS5 0xc5120 | 4367 | #define PCH_GMBUS5 0xc5120 |
4367 | 4368 | ||
4368 | #define _PCH_DPLL_A 0xc6014 | 4369 | #define _PCH_DPLL_A 0xc6014 |
4369 | #define _PCH_DPLL_B 0xc6018 | 4370 | #define _PCH_DPLL_B 0xc6018 |
4370 | #define PCH_DPLL(pll) (pll == 0 ? _PCH_DPLL_A : _PCH_DPLL_B) | 4371 | #define PCH_DPLL(pll) (pll == 0 ? _PCH_DPLL_A : _PCH_DPLL_B) |
4371 | 4372 | ||
4372 | #define _PCH_FPA0 0xc6040 | 4373 | #define _PCH_FPA0 0xc6040 |
4373 | #define FP_CB_TUNE (0x3<<22) | 4374 | #define FP_CB_TUNE (0x3<<22) |
4374 | #define _PCH_FPA1 0xc6044 | 4375 | #define _PCH_FPA1 0xc6044 |
4375 | #define _PCH_FPB0 0xc6048 | 4376 | #define _PCH_FPB0 0xc6048 |
4376 | #define _PCH_FPB1 0xc604c | 4377 | #define _PCH_FPB1 0xc604c |
4377 | #define PCH_FP0(pll) (pll == 0 ? _PCH_FPA0 : _PCH_FPB0) | 4378 | #define PCH_FP0(pll) (pll == 0 ? _PCH_FPA0 : _PCH_FPB0) |
4378 | #define PCH_FP1(pll) (pll == 0 ? _PCH_FPA1 : _PCH_FPB1) | 4379 | #define PCH_FP1(pll) (pll == 0 ? _PCH_FPA1 : _PCH_FPB1) |
4379 | 4380 | ||
4380 | #define PCH_DPLL_TEST 0xc606c | 4381 | #define PCH_DPLL_TEST 0xc606c |
4381 | 4382 | ||
4382 | #define PCH_DREF_CONTROL 0xC6200 | 4383 | #define PCH_DREF_CONTROL 0xC6200 |
4383 | #define DREF_CONTROL_MASK 0x7fc3 | 4384 | #define DREF_CONTROL_MASK 0x7fc3 |
4384 | #define DREF_CPU_SOURCE_OUTPUT_DISABLE (0<<13) | 4385 | #define DREF_CPU_SOURCE_OUTPUT_DISABLE (0<<13) |
4385 | #define DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD (2<<13) | 4386 | #define DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD (2<<13) |
4386 | #define DREF_CPU_SOURCE_OUTPUT_NONSPREAD (3<<13) | 4387 | #define DREF_CPU_SOURCE_OUTPUT_NONSPREAD (3<<13) |
4387 | #define DREF_CPU_SOURCE_OUTPUT_MASK (3<<13) | 4388 | #define DREF_CPU_SOURCE_OUTPUT_MASK (3<<13) |
4388 | #define DREF_SSC_SOURCE_DISABLE (0<<11) | 4389 | #define DREF_SSC_SOURCE_DISABLE (0<<11) |
4389 | #define DREF_SSC_SOURCE_ENABLE (2<<11) | 4390 | #define DREF_SSC_SOURCE_ENABLE (2<<11) |
4390 | #define DREF_SSC_SOURCE_MASK (3<<11) | 4391 | #define DREF_SSC_SOURCE_MASK (3<<11) |
4391 | #define DREF_NONSPREAD_SOURCE_DISABLE (0<<9) | 4392 | #define DREF_NONSPREAD_SOURCE_DISABLE (0<<9) |
4392 | #define DREF_NONSPREAD_CK505_ENABLE (1<<9) | 4393 | #define DREF_NONSPREAD_CK505_ENABLE (1<<9) |
4393 | #define DREF_NONSPREAD_SOURCE_ENABLE (2<<9) | 4394 | #define DREF_NONSPREAD_SOURCE_ENABLE (2<<9) |
4394 | #define DREF_NONSPREAD_SOURCE_MASK (3<<9) | 4395 | #define DREF_NONSPREAD_SOURCE_MASK (3<<9) |
4395 | #define DREF_SUPERSPREAD_SOURCE_DISABLE (0<<7) | 4396 | #define DREF_SUPERSPREAD_SOURCE_DISABLE (0<<7) |
4396 | #define DREF_SUPERSPREAD_SOURCE_ENABLE (2<<7) | 4397 | #define DREF_SUPERSPREAD_SOURCE_ENABLE (2<<7) |
4397 | #define DREF_SUPERSPREAD_SOURCE_MASK (3<<7) | 4398 | #define DREF_SUPERSPREAD_SOURCE_MASK (3<<7) |
4398 | #define DREF_SSC4_DOWNSPREAD (0<<6) | 4399 | #define DREF_SSC4_DOWNSPREAD (0<<6) |
4399 | #define DREF_SSC4_CENTERSPREAD (1<<6) | 4400 | #define DREF_SSC4_CENTERSPREAD (1<<6) |
4400 | #define DREF_SSC1_DISABLE (0<<1) | 4401 | #define DREF_SSC1_DISABLE (0<<1) |
4401 | #define DREF_SSC1_ENABLE (1<<1) | 4402 | #define DREF_SSC1_ENABLE (1<<1) |
4402 | #define DREF_SSC4_DISABLE (0) | 4403 | #define DREF_SSC4_DISABLE (0) |
4403 | #define DREF_SSC4_ENABLE (1) | 4404 | #define DREF_SSC4_ENABLE (1) |
4404 | 4405 | ||
4405 | #define PCH_RAWCLK_FREQ 0xc6204 | 4406 | #define PCH_RAWCLK_FREQ 0xc6204 |
4406 | #define FDL_TP1_TIMER_SHIFT 12 | 4407 | #define FDL_TP1_TIMER_SHIFT 12 |
4407 | #define FDL_TP1_TIMER_MASK (3<<12) | 4408 | #define FDL_TP1_TIMER_MASK (3<<12) |
4408 | #define FDL_TP2_TIMER_SHIFT 10 | 4409 | #define FDL_TP2_TIMER_SHIFT 10 |
4409 | #define FDL_TP2_TIMER_MASK (3<<10) | 4410 | #define FDL_TP2_TIMER_MASK (3<<10) |
4410 | #define RAWCLK_FREQ_MASK 0x3ff | 4411 | #define RAWCLK_FREQ_MASK 0x3ff |
4411 | 4412 | ||
4412 | #define PCH_DPLL_TMR_CFG 0xc6208 | 4413 | #define PCH_DPLL_TMR_CFG 0xc6208 |
4413 | 4414 | ||
4414 | #define PCH_SSC4_PARMS 0xc6210 | 4415 | #define PCH_SSC4_PARMS 0xc6210 |
4415 | #define PCH_SSC4_AUX_PARMS 0xc6214 | 4416 | #define PCH_SSC4_AUX_PARMS 0xc6214 |
4416 | 4417 | ||
4417 | #define PCH_DPLL_SEL 0xc7000 | 4418 | #define PCH_DPLL_SEL 0xc7000 |
4418 | #define TRANS_DPLLB_SEL(pipe) (1 << (pipe * 4)) | 4419 | #define TRANS_DPLLB_SEL(pipe) (1 << (pipe * 4)) |
4419 | #define TRANS_DPLLA_SEL(pipe) 0 | 4420 | #define TRANS_DPLLA_SEL(pipe) 0 |
4420 | #define TRANS_DPLL_ENABLE(pipe) (1 << (pipe * 4 + 3)) | 4421 | #define TRANS_DPLL_ENABLE(pipe) (1 << (pipe * 4 + 3)) |
4421 | 4422 | ||
4422 | /* transcoder */ | 4423 | /* transcoder */ |
4423 | 4424 | ||
4424 | #define _PCH_TRANS_HTOTAL_A 0xe0000 | 4425 | #define _PCH_TRANS_HTOTAL_A 0xe0000 |
4425 | #define TRANS_HTOTAL_SHIFT 16 | 4426 | #define TRANS_HTOTAL_SHIFT 16 |
4426 | #define TRANS_HACTIVE_SHIFT 0 | 4427 | #define TRANS_HACTIVE_SHIFT 0 |
4427 | #define _PCH_TRANS_HBLANK_A 0xe0004 | 4428 | #define _PCH_TRANS_HBLANK_A 0xe0004 |
4428 | #define TRANS_HBLANK_END_SHIFT 16 | 4429 | #define TRANS_HBLANK_END_SHIFT 16 |
4429 | #define TRANS_HBLANK_START_SHIFT 0 | 4430 | #define TRANS_HBLANK_START_SHIFT 0 |
4430 | #define _PCH_TRANS_HSYNC_A 0xe0008 | 4431 | #define _PCH_TRANS_HSYNC_A 0xe0008 |
4431 | #define TRANS_HSYNC_END_SHIFT 16 | 4432 | #define TRANS_HSYNC_END_SHIFT 16 |
4432 | #define TRANS_HSYNC_START_SHIFT 0 | 4433 | #define TRANS_HSYNC_START_SHIFT 0 |
4433 | #define _PCH_TRANS_VTOTAL_A 0xe000c | 4434 | #define _PCH_TRANS_VTOTAL_A 0xe000c |
4434 | #define TRANS_VTOTAL_SHIFT 16 | 4435 | #define TRANS_VTOTAL_SHIFT 16 |
4435 | #define TRANS_VACTIVE_SHIFT 0 | 4436 | #define TRANS_VACTIVE_SHIFT 0 |
4436 | #define _PCH_TRANS_VBLANK_A 0xe0010 | 4437 | #define _PCH_TRANS_VBLANK_A 0xe0010 |
4437 | #define TRANS_VBLANK_END_SHIFT 16 | 4438 | #define TRANS_VBLANK_END_SHIFT 16 |
4438 | #define TRANS_VBLANK_START_SHIFT 0 | 4439 | #define TRANS_VBLANK_START_SHIFT 0 |
4439 | #define _PCH_TRANS_VSYNC_A 0xe0014 | 4440 | #define _PCH_TRANS_VSYNC_A 0xe0014 |
4440 | #define TRANS_VSYNC_END_SHIFT 16 | 4441 | #define TRANS_VSYNC_END_SHIFT 16 |
4441 | #define TRANS_VSYNC_START_SHIFT 0 | 4442 | #define TRANS_VSYNC_START_SHIFT 0 |
4442 | #define _PCH_TRANS_VSYNCSHIFT_A 0xe0028 | 4443 | #define _PCH_TRANS_VSYNCSHIFT_A 0xe0028 |
4443 | 4444 | ||
4444 | #define _PCH_TRANSA_DATA_M1 0xe0030 | 4445 | #define _PCH_TRANSA_DATA_M1 0xe0030 |
4445 | #define _PCH_TRANSA_DATA_N1 0xe0034 | 4446 | #define _PCH_TRANSA_DATA_N1 0xe0034 |
4446 | #define _PCH_TRANSA_DATA_M2 0xe0038 | 4447 | #define _PCH_TRANSA_DATA_M2 0xe0038 |
4447 | #define _PCH_TRANSA_DATA_N2 0xe003c | 4448 | #define _PCH_TRANSA_DATA_N2 0xe003c |
4448 | #define _PCH_TRANSA_LINK_M1 0xe0040 | 4449 | #define _PCH_TRANSA_LINK_M1 0xe0040 |
4449 | #define _PCH_TRANSA_LINK_N1 0xe0044 | 4450 | #define _PCH_TRANSA_LINK_N1 0xe0044 |
4450 | #define _PCH_TRANSA_LINK_M2 0xe0048 | 4451 | #define _PCH_TRANSA_LINK_M2 0xe0048 |
4451 | #define _PCH_TRANSA_LINK_N2 0xe004c | 4452 | #define _PCH_TRANSA_LINK_N2 0xe004c |
4452 | 4453 | ||
4453 | /* Per-transcoder DIP controls */ | 4454 | /* Per-transcoder DIP controls */ |
4454 | 4455 | ||
4455 | #define _VIDEO_DIP_CTL_A 0xe0200 | 4456 | #define _VIDEO_DIP_CTL_A 0xe0200 |
4456 | #define _VIDEO_DIP_DATA_A 0xe0208 | 4457 | #define _VIDEO_DIP_DATA_A 0xe0208 |
4457 | #define _VIDEO_DIP_GCP_A 0xe0210 | 4458 | #define _VIDEO_DIP_GCP_A 0xe0210 |
4458 | 4459 | ||
4459 | #define _VIDEO_DIP_CTL_B 0xe1200 | 4460 | #define _VIDEO_DIP_CTL_B 0xe1200 |
4460 | #define _VIDEO_DIP_DATA_B 0xe1208 | 4461 | #define _VIDEO_DIP_DATA_B 0xe1208 |
4461 | #define _VIDEO_DIP_GCP_B 0xe1210 | 4462 | #define _VIDEO_DIP_GCP_B 0xe1210 |
4462 | 4463 | ||
4463 | #define TVIDEO_DIP_CTL(pipe) _PIPE(pipe, _VIDEO_DIP_CTL_A, _VIDEO_DIP_CTL_B) | 4464 | #define TVIDEO_DIP_CTL(pipe) _PIPE(pipe, _VIDEO_DIP_CTL_A, _VIDEO_DIP_CTL_B) |
4464 | #define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B) | 4465 | #define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B) |
4465 | #define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B) | 4466 | #define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B) |
4466 | 4467 | ||
4467 | #define VLV_VIDEO_DIP_CTL_A (VLV_DISPLAY_BASE + 0x60200) | 4468 | #define VLV_VIDEO_DIP_CTL_A (VLV_DISPLAY_BASE + 0x60200) |
4468 | #define VLV_VIDEO_DIP_DATA_A (VLV_DISPLAY_BASE + 0x60208) | 4469 | #define VLV_VIDEO_DIP_DATA_A (VLV_DISPLAY_BASE + 0x60208) |
4469 | #define VLV_VIDEO_DIP_GDCP_PAYLOAD_A (VLV_DISPLAY_BASE + 0x60210) | 4470 | #define VLV_VIDEO_DIP_GDCP_PAYLOAD_A (VLV_DISPLAY_BASE + 0x60210) |
4470 | 4471 | ||
4471 | #define VLV_VIDEO_DIP_CTL_B (VLV_DISPLAY_BASE + 0x61170) | 4472 | #define VLV_VIDEO_DIP_CTL_B (VLV_DISPLAY_BASE + 0x61170) |
4472 | #define VLV_VIDEO_DIP_DATA_B (VLV_DISPLAY_BASE + 0x61174) | 4473 | #define VLV_VIDEO_DIP_DATA_B (VLV_DISPLAY_BASE + 0x61174) |
4473 | #define VLV_VIDEO_DIP_GDCP_PAYLOAD_B (VLV_DISPLAY_BASE + 0x61178) | 4474 | #define VLV_VIDEO_DIP_GDCP_PAYLOAD_B (VLV_DISPLAY_BASE + 0x61178) |
4474 | 4475 | ||
4475 | #define VLV_TVIDEO_DIP_CTL(pipe) \ | 4476 | #define VLV_TVIDEO_DIP_CTL(pipe) \ |
4476 | _PIPE(pipe, VLV_VIDEO_DIP_CTL_A, VLV_VIDEO_DIP_CTL_B) | 4477 | _PIPE(pipe, VLV_VIDEO_DIP_CTL_A, VLV_VIDEO_DIP_CTL_B) |
4477 | #define VLV_TVIDEO_DIP_DATA(pipe) \ | 4478 | #define VLV_TVIDEO_DIP_DATA(pipe) \ |
4478 | _PIPE(pipe, VLV_VIDEO_DIP_DATA_A, VLV_VIDEO_DIP_DATA_B) | 4479 | _PIPE(pipe, VLV_VIDEO_DIP_DATA_A, VLV_VIDEO_DIP_DATA_B) |
4479 | #define VLV_TVIDEO_DIP_GCP(pipe) \ | 4480 | #define VLV_TVIDEO_DIP_GCP(pipe) \ |
4480 | _PIPE(pipe, VLV_VIDEO_DIP_GDCP_PAYLOAD_A, VLV_VIDEO_DIP_GDCP_PAYLOAD_B) | 4481 | _PIPE(pipe, VLV_VIDEO_DIP_GDCP_PAYLOAD_A, VLV_VIDEO_DIP_GDCP_PAYLOAD_B) |
4481 | 4482 | ||
4482 | /* Haswell DIP controls */ | 4483 | /* Haswell DIP controls */ |
4483 | #define HSW_VIDEO_DIP_CTL_A 0x60200 | 4484 | #define HSW_VIDEO_DIP_CTL_A 0x60200 |
4484 | #define HSW_VIDEO_DIP_AVI_DATA_A 0x60220 | 4485 | #define HSW_VIDEO_DIP_AVI_DATA_A 0x60220 |
4485 | #define HSW_VIDEO_DIP_VS_DATA_A 0x60260 | 4486 | #define HSW_VIDEO_DIP_VS_DATA_A 0x60260 |
4486 | #define HSW_VIDEO_DIP_SPD_DATA_A 0x602A0 | 4487 | #define HSW_VIDEO_DIP_SPD_DATA_A 0x602A0 |
4487 | #define HSW_VIDEO_DIP_GMP_DATA_A 0x602E0 | 4488 | #define HSW_VIDEO_DIP_GMP_DATA_A 0x602E0 |
4488 | #define HSW_VIDEO_DIP_VSC_DATA_A 0x60320 | 4489 | #define HSW_VIDEO_DIP_VSC_DATA_A 0x60320 |
4489 | #define HSW_VIDEO_DIP_AVI_ECC_A 0x60240 | 4490 | #define HSW_VIDEO_DIP_AVI_ECC_A 0x60240 |
4490 | #define HSW_VIDEO_DIP_VS_ECC_A 0x60280 | 4491 | #define HSW_VIDEO_DIP_VS_ECC_A 0x60280 |
4491 | #define HSW_VIDEO_DIP_SPD_ECC_A 0x602C0 | 4492 | #define HSW_VIDEO_DIP_SPD_ECC_A 0x602C0 |
4492 | #define HSW_VIDEO_DIP_GMP_ECC_A 0x60300 | 4493 | #define HSW_VIDEO_DIP_GMP_ECC_A 0x60300 |
4493 | #define HSW_VIDEO_DIP_VSC_ECC_A 0x60344 | 4494 | #define HSW_VIDEO_DIP_VSC_ECC_A 0x60344 |
4494 | #define HSW_VIDEO_DIP_GCP_A 0x60210 | 4495 | #define HSW_VIDEO_DIP_GCP_A 0x60210 |
4495 | 4496 | ||
4496 | #define HSW_VIDEO_DIP_CTL_B 0x61200 | 4497 | #define HSW_VIDEO_DIP_CTL_B 0x61200 |
4497 | #define HSW_VIDEO_DIP_AVI_DATA_B 0x61220 | 4498 | #define HSW_VIDEO_DIP_AVI_DATA_B 0x61220 |
4498 | #define HSW_VIDEO_DIP_VS_DATA_B 0x61260 | 4499 | #define HSW_VIDEO_DIP_VS_DATA_B 0x61260 |
4499 | #define HSW_VIDEO_DIP_SPD_DATA_B 0x612A0 | 4500 | #define HSW_VIDEO_DIP_SPD_DATA_B 0x612A0 |
4500 | #define HSW_VIDEO_DIP_GMP_DATA_B 0x612E0 | 4501 | #define HSW_VIDEO_DIP_GMP_DATA_B 0x612E0 |
4501 | #define HSW_VIDEO_DIP_VSC_DATA_B 0x61320 | 4502 | #define HSW_VIDEO_DIP_VSC_DATA_B 0x61320 |
4502 | #define HSW_VIDEO_DIP_BVI_ECC_B 0x61240 | 4503 | #define HSW_VIDEO_DIP_BVI_ECC_B 0x61240 |
4503 | #define HSW_VIDEO_DIP_VS_ECC_B 0x61280 | 4504 | #define HSW_VIDEO_DIP_VS_ECC_B 0x61280 |
4504 | #define HSW_VIDEO_DIP_SPD_ECC_B 0x612C0 | 4505 | #define HSW_VIDEO_DIP_SPD_ECC_B 0x612C0 |
4505 | #define HSW_VIDEO_DIP_GMP_ECC_B 0x61300 | 4506 | #define HSW_VIDEO_DIP_GMP_ECC_B 0x61300 |
4506 | #define HSW_VIDEO_DIP_VSC_ECC_B 0x61344 | 4507 | #define HSW_VIDEO_DIP_VSC_ECC_B 0x61344 |
4507 | #define HSW_VIDEO_DIP_GCP_B 0x61210 | 4508 | #define HSW_VIDEO_DIP_GCP_B 0x61210 |
4508 | 4509 | ||
4509 | #define HSW_TVIDEO_DIP_CTL(trans) \ | 4510 | #define HSW_TVIDEO_DIP_CTL(trans) \ |
4510 | _TRANSCODER2(trans, HSW_VIDEO_DIP_CTL_A) | 4511 | _TRANSCODER2(trans, HSW_VIDEO_DIP_CTL_A) |
4511 | #define HSW_TVIDEO_DIP_AVI_DATA(trans) \ | 4512 | #define HSW_TVIDEO_DIP_AVI_DATA(trans) \ |
4512 | _TRANSCODER2(trans, HSW_VIDEO_DIP_AVI_DATA_A) | 4513 | _TRANSCODER2(trans, HSW_VIDEO_DIP_AVI_DATA_A) |
4513 | #define HSW_TVIDEO_DIP_VS_DATA(trans) \ | 4514 | #define HSW_TVIDEO_DIP_VS_DATA(trans) \ |
4514 | _TRANSCODER2(trans, HSW_VIDEO_DIP_VS_DATA_A) | 4515 | _TRANSCODER2(trans, HSW_VIDEO_DIP_VS_DATA_A) |
4515 | #define HSW_TVIDEO_DIP_SPD_DATA(trans) \ | 4516 | #define HSW_TVIDEO_DIP_SPD_DATA(trans) \ |
4516 | _TRANSCODER2(trans, HSW_VIDEO_DIP_SPD_DATA_A) | 4517 | _TRANSCODER2(trans, HSW_VIDEO_DIP_SPD_DATA_A) |
4517 | #define HSW_TVIDEO_DIP_GCP(trans) \ | 4518 | #define HSW_TVIDEO_DIP_GCP(trans) \ |
4518 | _TRANSCODER2(trans, HSW_VIDEO_DIP_GCP_A) | 4519 | _TRANSCODER2(trans, HSW_VIDEO_DIP_GCP_A) |
4519 | #define HSW_TVIDEO_DIP_VSC_DATA(trans) \ | 4520 | #define HSW_TVIDEO_DIP_VSC_DATA(trans) \ |
4520 | _TRANSCODER2(trans, HSW_VIDEO_DIP_VSC_DATA_A) | 4521 | _TRANSCODER2(trans, HSW_VIDEO_DIP_VSC_DATA_A) |
4521 | 4522 | ||
4522 | #define HSW_STEREO_3D_CTL_A 0x70020 | 4523 | #define HSW_STEREO_3D_CTL_A 0x70020 |
4523 | #define S3D_ENABLE (1<<31) | 4524 | #define S3D_ENABLE (1<<31) |
4524 | #define HSW_STEREO_3D_CTL_B 0x71020 | 4525 | #define HSW_STEREO_3D_CTL_B 0x71020 |
4525 | 4526 | ||
4526 | #define HSW_STEREO_3D_CTL(trans) \ | 4527 | #define HSW_STEREO_3D_CTL(trans) \ |
4527 | _PIPE2(trans, HSW_STEREO_3D_CTL_A) | 4528 | _PIPE2(trans, HSW_STEREO_3D_CTL_A) |
4528 | 4529 | ||
4529 | #define _PCH_TRANS_HTOTAL_B 0xe1000 | 4530 | #define _PCH_TRANS_HTOTAL_B 0xe1000 |
4530 | #define _PCH_TRANS_HBLANK_B 0xe1004 | 4531 | #define _PCH_TRANS_HBLANK_B 0xe1004 |
4531 | #define _PCH_TRANS_HSYNC_B 0xe1008 | 4532 | #define _PCH_TRANS_HSYNC_B 0xe1008 |
4532 | #define _PCH_TRANS_VTOTAL_B 0xe100c | 4533 | #define _PCH_TRANS_VTOTAL_B 0xe100c |
4533 | #define _PCH_TRANS_VBLANK_B 0xe1010 | 4534 | #define _PCH_TRANS_VBLANK_B 0xe1010 |
4534 | #define _PCH_TRANS_VSYNC_B 0xe1014 | 4535 | #define _PCH_TRANS_VSYNC_B 0xe1014 |
4535 | #define _PCH_TRANS_VSYNCSHIFT_B 0xe1028 | 4536 | #define _PCH_TRANS_VSYNCSHIFT_B 0xe1028 |
4536 | 4537 | ||
4537 | #define PCH_TRANS_HTOTAL(pipe) _PIPE(pipe, _PCH_TRANS_HTOTAL_A, _PCH_TRANS_HTOTAL_B) | 4538 | #define PCH_TRANS_HTOTAL(pipe) _PIPE(pipe, _PCH_TRANS_HTOTAL_A, _PCH_TRANS_HTOTAL_B) |
4538 | #define PCH_TRANS_HBLANK(pipe) _PIPE(pipe, _PCH_TRANS_HBLANK_A, _PCH_TRANS_HBLANK_B) | 4539 | #define PCH_TRANS_HBLANK(pipe) _PIPE(pipe, _PCH_TRANS_HBLANK_A, _PCH_TRANS_HBLANK_B) |
4539 | #define PCH_TRANS_HSYNC(pipe) _PIPE(pipe, _PCH_TRANS_HSYNC_A, _PCH_TRANS_HSYNC_B) | 4540 | #define PCH_TRANS_HSYNC(pipe) _PIPE(pipe, _PCH_TRANS_HSYNC_A, _PCH_TRANS_HSYNC_B) |
4540 | #define PCH_TRANS_VTOTAL(pipe) _PIPE(pipe, _PCH_TRANS_VTOTAL_A, _PCH_TRANS_VTOTAL_B) | 4541 | #define PCH_TRANS_VTOTAL(pipe) _PIPE(pipe, _PCH_TRANS_VTOTAL_A, _PCH_TRANS_VTOTAL_B) |
4541 | #define PCH_TRANS_VBLANK(pipe) _PIPE(pipe, _PCH_TRANS_VBLANK_A, _PCH_TRANS_VBLANK_B) | 4542 | #define PCH_TRANS_VBLANK(pipe) _PIPE(pipe, _PCH_TRANS_VBLANK_A, _PCH_TRANS_VBLANK_B) |
4542 | #define PCH_TRANS_VSYNC(pipe) _PIPE(pipe, _PCH_TRANS_VSYNC_A, _PCH_TRANS_VSYNC_B) | 4543 | #define PCH_TRANS_VSYNC(pipe) _PIPE(pipe, _PCH_TRANS_VSYNC_A, _PCH_TRANS_VSYNC_B) |
4543 | #define PCH_TRANS_VSYNCSHIFT(pipe) _PIPE(pipe, _PCH_TRANS_VSYNCSHIFT_A, \ | 4544 | #define PCH_TRANS_VSYNCSHIFT(pipe) _PIPE(pipe, _PCH_TRANS_VSYNCSHIFT_A, \ |
4544 | _PCH_TRANS_VSYNCSHIFT_B) | 4545 | _PCH_TRANS_VSYNCSHIFT_B) |
4545 | 4546 | ||
4546 | #define _PCH_TRANSB_DATA_M1 0xe1030 | 4547 | #define _PCH_TRANSB_DATA_M1 0xe1030 |
4547 | #define _PCH_TRANSB_DATA_N1 0xe1034 | 4548 | #define _PCH_TRANSB_DATA_N1 0xe1034 |
4548 | #define _PCH_TRANSB_DATA_M2 0xe1038 | 4549 | #define _PCH_TRANSB_DATA_M2 0xe1038 |
4549 | #define _PCH_TRANSB_DATA_N2 0xe103c | 4550 | #define _PCH_TRANSB_DATA_N2 0xe103c |
4550 | #define _PCH_TRANSB_LINK_M1 0xe1040 | 4551 | #define _PCH_TRANSB_LINK_M1 0xe1040 |
4551 | #define _PCH_TRANSB_LINK_N1 0xe1044 | 4552 | #define _PCH_TRANSB_LINK_N1 0xe1044 |
4552 | #define _PCH_TRANSB_LINK_M2 0xe1048 | 4553 | #define _PCH_TRANSB_LINK_M2 0xe1048 |
4553 | #define _PCH_TRANSB_LINK_N2 0xe104c | 4554 | #define _PCH_TRANSB_LINK_N2 0xe104c |
4554 | 4555 | ||
4555 | #define PCH_TRANS_DATA_M1(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_M1, _PCH_TRANSB_DATA_M1) | 4556 | #define PCH_TRANS_DATA_M1(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_M1, _PCH_TRANSB_DATA_M1) |
4556 | #define PCH_TRANS_DATA_N1(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_N1, _PCH_TRANSB_DATA_N1) | 4557 | #define PCH_TRANS_DATA_N1(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_N1, _PCH_TRANSB_DATA_N1) |
4557 | #define PCH_TRANS_DATA_M2(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_M2, _PCH_TRANSB_DATA_M2) | 4558 | #define PCH_TRANS_DATA_M2(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_M2, _PCH_TRANSB_DATA_M2) |
4558 | #define PCH_TRANS_DATA_N2(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_N2, _PCH_TRANSB_DATA_N2) | 4559 | #define PCH_TRANS_DATA_N2(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_N2, _PCH_TRANSB_DATA_N2) |
4559 | #define PCH_TRANS_LINK_M1(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_M1, _PCH_TRANSB_LINK_M1) | 4560 | #define PCH_TRANS_LINK_M1(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_M1, _PCH_TRANSB_LINK_M1) |
4560 | #define PCH_TRANS_LINK_N1(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_N1, _PCH_TRANSB_LINK_N1) | 4561 | #define PCH_TRANS_LINK_N1(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_N1, _PCH_TRANSB_LINK_N1) |
4561 | #define PCH_TRANS_LINK_M2(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_M2, _PCH_TRANSB_LINK_M2) | 4562 | #define PCH_TRANS_LINK_M2(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_M2, _PCH_TRANSB_LINK_M2) |
4562 | #define PCH_TRANS_LINK_N2(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_N2, _PCH_TRANSB_LINK_N2) | 4563 | #define PCH_TRANS_LINK_N2(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_N2, _PCH_TRANSB_LINK_N2) |
4563 | 4564 | ||
4564 | #define _PCH_TRANSACONF 0xf0008 | 4565 | #define _PCH_TRANSACONF 0xf0008 |
4565 | #define _PCH_TRANSBCONF 0xf1008 | 4566 | #define _PCH_TRANSBCONF 0xf1008 |
4566 | #define PCH_TRANSCONF(pipe) _PIPE(pipe, _PCH_TRANSACONF, _PCH_TRANSBCONF) | 4567 | #define PCH_TRANSCONF(pipe) _PIPE(pipe, _PCH_TRANSACONF, _PCH_TRANSBCONF) |
4567 | #define LPT_TRANSCONF _PCH_TRANSACONF /* lpt has only one transcoder */ | 4568 | #define LPT_TRANSCONF _PCH_TRANSACONF /* lpt has only one transcoder */ |
4568 | #define TRANS_DISABLE (0<<31) | 4569 | #define TRANS_DISABLE (0<<31) |
4569 | #define TRANS_ENABLE (1<<31) | 4570 | #define TRANS_ENABLE (1<<31) |
4570 | #define TRANS_STATE_MASK (1<<30) | 4571 | #define TRANS_STATE_MASK (1<<30) |
4571 | #define TRANS_STATE_DISABLE (0<<30) | 4572 | #define TRANS_STATE_DISABLE (0<<30) |
4572 | #define TRANS_STATE_ENABLE (1<<30) | 4573 | #define TRANS_STATE_ENABLE (1<<30) |
4573 | #define TRANS_FSYNC_DELAY_HB1 (0<<27) | 4574 | #define TRANS_FSYNC_DELAY_HB1 (0<<27) |
4574 | #define TRANS_FSYNC_DELAY_HB2 (1<<27) | 4575 | #define TRANS_FSYNC_DELAY_HB2 (1<<27) |
4575 | #define TRANS_FSYNC_DELAY_HB3 (2<<27) | 4576 | #define TRANS_FSYNC_DELAY_HB3 (2<<27) |
4576 | #define TRANS_FSYNC_DELAY_HB4 (3<<27) | 4577 | #define TRANS_FSYNC_DELAY_HB4 (3<<27) |
4577 | #define TRANS_INTERLACE_MASK (7<<21) | 4578 | #define TRANS_INTERLACE_MASK (7<<21) |
4578 | #define TRANS_PROGRESSIVE (0<<21) | 4579 | #define TRANS_PROGRESSIVE (0<<21) |
4579 | #define TRANS_INTERLACED (3<<21) | 4580 | #define TRANS_INTERLACED (3<<21) |
4580 | #define TRANS_LEGACY_INTERLACED_ILK (2<<21) | 4581 | #define TRANS_LEGACY_INTERLACED_ILK (2<<21) |
4581 | #define TRANS_8BPC (0<<5) | 4582 | #define TRANS_8BPC (0<<5) |
4582 | #define TRANS_10BPC (1<<5) | 4583 | #define TRANS_10BPC (1<<5) |
4583 | #define TRANS_6BPC (2<<5) | 4584 | #define TRANS_6BPC (2<<5) |
4584 | #define TRANS_12BPC (3<<5) | 4585 | #define TRANS_12BPC (3<<5) |
4585 | 4586 | ||
4586 | #define _TRANSA_CHICKEN1 0xf0060 | 4587 | #define _TRANSA_CHICKEN1 0xf0060 |
4587 | #define _TRANSB_CHICKEN1 0xf1060 | 4588 | #define _TRANSB_CHICKEN1 0xf1060 |
4588 | #define TRANS_CHICKEN1(pipe) _PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1) | 4589 | #define TRANS_CHICKEN1(pipe) _PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1) |
4589 | #define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1<<4) | 4590 | #define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1<<4) |
4590 | #define _TRANSA_CHICKEN2 0xf0064 | 4591 | #define _TRANSA_CHICKEN2 0xf0064 |
4591 | #define _TRANSB_CHICKEN2 0xf1064 | 4592 | #define _TRANSB_CHICKEN2 0xf1064 |
4592 | #define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2) | 4593 | #define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2) |
4593 | #define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31) | 4594 | #define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31) |
4594 | #define TRANS_CHICKEN2_FDI_POLARITY_REVERSED (1<<29) | 4595 | #define TRANS_CHICKEN2_FDI_POLARITY_REVERSED (1<<29) |
4595 | #define TRANS_CHICKEN2_FRAME_START_DELAY_MASK (3<<27) | 4596 | #define TRANS_CHICKEN2_FRAME_START_DELAY_MASK (3<<27) |
4596 | #define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER (1<<26) | 4597 | #define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER (1<<26) |
4597 | #define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH (1<<25) | 4598 | #define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH (1<<25) |
4598 | 4599 | ||
4599 | #define SOUTH_CHICKEN1 0xc2000 | 4600 | #define SOUTH_CHICKEN1 0xc2000 |
4600 | #define FDIA_PHASE_SYNC_SHIFT_OVR 19 | 4601 | #define FDIA_PHASE_SYNC_SHIFT_OVR 19 |
4601 | #define FDIA_PHASE_SYNC_SHIFT_EN 18 | 4602 | #define FDIA_PHASE_SYNC_SHIFT_EN 18 |
4602 | #define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2))) | 4603 | #define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2))) |
4603 | #define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2))) | 4604 | #define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2))) |
4604 | #define FDI_BC_BIFURCATION_SELECT (1 << 12) | 4605 | #define FDI_BC_BIFURCATION_SELECT (1 << 12) |
4605 | #define SOUTH_CHICKEN2 0xc2004 | 4606 | #define SOUTH_CHICKEN2 0xc2004 |
4606 | #define FDI_MPHY_IOSFSB_RESET_STATUS (1<<13) | 4607 | #define FDI_MPHY_IOSFSB_RESET_STATUS (1<<13) |
4607 | #define FDI_MPHY_IOSFSB_RESET_CTL (1<<12) | 4608 | #define FDI_MPHY_IOSFSB_RESET_CTL (1<<12) |
4608 | #define DPLS_EDP_PPS_FIX_DIS (1<<0) | 4609 | #define DPLS_EDP_PPS_FIX_DIS (1<<0) |
4609 | 4610 | ||
4610 | #define _FDI_RXA_CHICKEN 0xc200c | 4611 | #define _FDI_RXA_CHICKEN 0xc200c |
4611 | #define _FDI_RXB_CHICKEN 0xc2010 | 4612 | #define _FDI_RXB_CHICKEN 0xc2010 |
4612 | #define FDI_RX_PHASE_SYNC_POINTER_OVR (1<<1) | 4613 | #define FDI_RX_PHASE_SYNC_POINTER_OVR (1<<1) |
4613 | #define FDI_RX_PHASE_SYNC_POINTER_EN (1<<0) | 4614 | #define FDI_RX_PHASE_SYNC_POINTER_EN (1<<0) |
4614 | #define FDI_RX_CHICKEN(pipe) _PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN) | 4615 | #define FDI_RX_CHICKEN(pipe) _PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN) |
4615 | 4616 | ||
4616 | #define SOUTH_DSPCLK_GATE_D 0xc2020 | 4617 | #define SOUTH_DSPCLK_GATE_D 0xc2020 |
4617 | #define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1<<30) | 4618 | #define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1<<30) |
4618 | #define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29) | 4619 | #define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29) |
4619 | #define PCH_CPUNIT_CLOCK_GATE_DISABLE (1<<14) | 4620 | #define PCH_CPUNIT_CLOCK_GATE_DISABLE (1<<14) |
4620 | #define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12) | 4621 | #define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12) |
4621 | 4622 | ||
4622 | /* CPU: FDI_TX */ | 4623 | /* CPU: FDI_TX */ |
4623 | #define _FDI_TXA_CTL 0x60100 | 4624 | #define _FDI_TXA_CTL 0x60100 |
4624 | #define _FDI_TXB_CTL 0x61100 | 4625 | #define _FDI_TXB_CTL 0x61100 |
4625 | #define FDI_TX_CTL(pipe) _PIPE(pipe, _FDI_TXA_CTL, _FDI_TXB_CTL) | 4626 | #define FDI_TX_CTL(pipe) _PIPE(pipe, _FDI_TXA_CTL, _FDI_TXB_CTL) |
4626 | #define FDI_TX_DISABLE (0<<31) | 4627 | #define FDI_TX_DISABLE (0<<31) |
4627 | #define FDI_TX_ENABLE (1<<31) | 4628 | #define FDI_TX_ENABLE (1<<31) |
4628 | #define FDI_LINK_TRAIN_PATTERN_1 (0<<28) | 4629 | #define FDI_LINK_TRAIN_PATTERN_1 (0<<28) |
4629 | #define FDI_LINK_TRAIN_PATTERN_2 (1<<28) | 4630 | #define FDI_LINK_TRAIN_PATTERN_2 (1<<28) |
4630 | #define FDI_LINK_TRAIN_PATTERN_IDLE (2<<28) | 4631 | #define FDI_LINK_TRAIN_PATTERN_IDLE (2<<28) |
4631 | #define FDI_LINK_TRAIN_NONE (3<<28) | 4632 | #define FDI_LINK_TRAIN_NONE (3<<28) |
4632 | #define FDI_LINK_TRAIN_VOLTAGE_0_4V (0<<25) | 4633 | #define FDI_LINK_TRAIN_VOLTAGE_0_4V (0<<25) |
4633 | #define FDI_LINK_TRAIN_VOLTAGE_0_6V (1<<25) | 4634 | #define FDI_LINK_TRAIN_VOLTAGE_0_6V (1<<25) |
4634 | #define FDI_LINK_TRAIN_VOLTAGE_0_8V (2<<25) | 4635 | #define FDI_LINK_TRAIN_VOLTAGE_0_8V (2<<25) |
4635 | #define FDI_LINK_TRAIN_VOLTAGE_1_2V (3<<25) | 4636 | #define FDI_LINK_TRAIN_VOLTAGE_1_2V (3<<25) |
4636 | #define FDI_LINK_TRAIN_PRE_EMPHASIS_NONE (0<<22) | 4637 | #define FDI_LINK_TRAIN_PRE_EMPHASIS_NONE (0<<22) |
4637 | #define FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1<<22) | 4638 | #define FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1<<22) |
4638 | #define FDI_LINK_TRAIN_PRE_EMPHASIS_2X (2<<22) | 4639 | #define FDI_LINK_TRAIN_PRE_EMPHASIS_2X (2<<22) |
4639 | #define FDI_LINK_TRAIN_PRE_EMPHASIS_3X (3<<22) | 4640 | #define FDI_LINK_TRAIN_PRE_EMPHASIS_3X (3<<22) |
4640 | /* ILK always use 400mV 0dB for voltage swing and pre-emphasis level. | 4641 | /* ILK always use 400mV 0dB for voltage swing and pre-emphasis level. |
4641 | SNB has different settings. */ | 4642 | SNB has different settings. */ |
4642 | /* SNB A-stepping */ | 4643 | /* SNB A-stepping */ |
4643 | #define FDI_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22) | 4644 | #define FDI_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22) |
4644 | #define FDI_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22) | 4645 | #define FDI_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22) |
4645 | #define FDI_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22) | 4646 | #define FDI_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22) |
4646 | #define FDI_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22) | 4647 | #define FDI_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22) |
4647 | /* SNB B-stepping */ | 4648 | /* SNB B-stepping */ |
4648 | #define FDI_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22) | 4649 | #define FDI_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22) |
4649 | #define FDI_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22) | 4650 | #define FDI_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22) |
4650 | #define FDI_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22) | 4651 | #define FDI_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22) |
4651 | #define FDI_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22) | 4652 | #define FDI_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22) |
4652 | #define FDI_LINK_TRAIN_VOL_EMP_MASK (0x3f<<22) | 4653 | #define FDI_LINK_TRAIN_VOL_EMP_MASK (0x3f<<22) |
4653 | #define FDI_DP_PORT_WIDTH_SHIFT 19 | 4654 | #define FDI_DP_PORT_WIDTH_SHIFT 19 |
4654 | #define FDI_DP_PORT_WIDTH_MASK (7 << FDI_DP_PORT_WIDTH_SHIFT) | 4655 | #define FDI_DP_PORT_WIDTH_MASK (7 << FDI_DP_PORT_WIDTH_SHIFT) |
4655 | #define FDI_DP_PORT_WIDTH(width) (((width) - 1) << FDI_DP_PORT_WIDTH_SHIFT) | 4656 | #define FDI_DP_PORT_WIDTH(width) (((width) - 1) << FDI_DP_PORT_WIDTH_SHIFT) |
4656 | #define FDI_TX_ENHANCE_FRAME_ENABLE (1<<18) | 4657 | #define FDI_TX_ENHANCE_FRAME_ENABLE (1<<18) |
4657 | /* Ironlake: hardwired to 1 */ | 4658 | /* Ironlake: hardwired to 1 */ |
4658 | #define FDI_TX_PLL_ENABLE (1<<14) | 4659 | #define FDI_TX_PLL_ENABLE (1<<14) |
4659 | 4660 | ||
4660 | /* Ivybridge has different bits for lolz */ | 4661 | /* Ivybridge has different bits for lolz */ |
4661 | #define FDI_LINK_TRAIN_PATTERN_1_IVB (0<<8) | 4662 | #define FDI_LINK_TRAIN_PATTERN_1_IVB (0<<8) |
4662 | #define FDI_LINK_TRAIN_PATTERN_2_IVB (1<<8) | 4663 | #define FDI_LINK_TRAIN_PATTERN_2_IVB (1<<8) |
4663 | #define FDI_LINK_TRAIN_PATTERN_IDLE_IVB (2<<8) | 4664 | #define FDI_LINK_TRAIN_PATTERN_IDLE_IVB (2<<8) |
4664 | #define FDI_LINK_TRAIN_NONE_IVB (3<<8) | 4665 | #define FDI_LINK_TRAIN_NONE_IVB (3<<8) |
4665 | 4666 | ||
4666 | /* both Tx and Rx */ | 4667 | /* both Tx and Rx */ |
4667 | #define FDI_COMPOSITE_SYNC (1<<11) | 4668 | #define FDI_COMPOSITE_SYNC (1<<11) |
4668 | #define FDI_LINK_TRAIN_AUTO (1<<10) | 4669 | #define FDI_LINK_TRAIN_AUTO (1<<10) |
4669 | #define FDI_SCRAMBLING_ENABLE (0<<7) | 4670 | #define FDI_SCRAMBLING_ENABLE (0<<7) |
4670 | #define FDI_SCRAMBLING_DISABLE (1<<7) | 4671 | #define FDI_SCRAMBLING_DISABLE (1<<7) |
4671 | 4672 | ||
4672 | /* FDI_RX, FDI_X is hard-wired to Transcoder_X */ | 4673 | /* FDI_RX, FDI_X is hard-wired to Transcoder_X */ |
4673 | #define _FDI_RXA_CTL 0xf000c | 4674 | #define _FDI_RXA_CTL 0xf000c |
4674 | #define _FDI_RXB_CTL 0xf100c | 4675 | #define _FDI_RXB_CTL 0xf100c |
4675 | #define FDI_RX_CTL(pipe) _PIPE(pipe, _FDI_RXA_CTL, _FDI_RXB_CTL) | 4676 | #define FDI_RX_CTL(pipe) _PIPE(pipe, _FDI_RXA_CTL, _FDI_RXB_CTL) |
4676 | #define FDI_RX_ENABLE (1<<31) | 4677 | #define FDI_RX_ENABLE (1<<31) |
4677 | /* train, dp width same as FDI_TX */ | 4678 | /* train, dp width same as FDI_TX */ |
4678 | #define FDI_FS_ERRC_ENABLE (1<<27) | 4679 | #define FDI_FS_ERRC_ENABLE (1<<27) |
4679 | #define FDI_FE_ERRC_ENABLE (1<<26) | 4680 | #define FDI_FE_ERRC_ENABLE (1<<26) |
4680 | #define FDI_RX_POLARITY_REVERSED_LPT (1<<16) | 4681 | #define FDI_RX_POLARITY_REVERSED_LPT (1<<16) |
4681 | #define FDI_8BPC (0<<16) | 4682 | #define FDI_8BPC (0<<16) |
4682 | #define FDI_10BPC (1<<16) | 4683 | #define FDI_10BPC (1<<16) |
4683 | #define FDI_6BPC (2<<16) | 4684 | #define FDI_6BPC (2<<16) |
4684 | #define FDI_12BPC (3<<16) | 4685 | #define FDI_12BPC (3<<16) |
4685 | #define FDI_RX_LINK_REVERSAL_OVERRIDE (1<<15) | 4686 | #define FDI_RX_LINK_REVERSAL_OVERRIDE (1<<15) |
4686 | #define FDI_DMI_LINK_REVERSE_MASK (1<<14) | 4687 | #define FDI_DMI_LINK_REVERSE_MASK (1<<14) |
4687 | #define FDI_RX_PLL_ENABLE (1<<13) | 4688 | #define FDI_RX_PLL_ENABLE (1<<13) |
4688 | #define FDI_FS_ERR_CORRECT_ENABLE (1<<11) | 4689 | #define FDI_FS_ERR_CORRECT_ENABLE (1<<11) |
4689 | #define FDI_FE_ERR_CORRECT_ENABLE (1<<10) | 4690 | #define FDI_FE_ERR_CORRECT_ENABLE (1<<10) |
4690 | #define FDI_FS_ERR_REPORT_ENABLE (1<<9) | 4691 | #define FDI_FS_ERR_REPORT_ENABLE (1<<9) |
4691 | #define FDI_FE_ERR_REPORT_ENABLE (1<<8) | 4692 | #define FDI_FE_ERR_REPORT_ENABLE (1<<8) |
4692 | #define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6) | 4693 | #define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6) |
4693 | #define FDI_PCDCLK (1<<4) | 4694 | #define FDI_PCDCLK (1<<4) |
4694 | /* CPT */ | 4695 | /* CPT */ |
4695 | #define FDI_AUTO_TRAINING (1<<10) | 4696 | #define FDI_AUTO_TRAINING (1<<10) |
4696 | #define FDI_LINK_TRAIN_PATTERN_1_CPT (0<<8) | 4697 | #define FDI_LINK_TRAIN_PATTERN_1_CPT (0<<8) |
4697 | #define FDI_LINK_TRAIN_PATTERN_2_CPT (1<<8) | 4698 | #define FDI_LINK_TRAIN_PATTERN_2_CPT (1<<8) |
4698 | #define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2<<8) | 4699 | #define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2<<8) |
4699 | #define FDI_LINK_TRAIN_NORMAL_CPT (3<<8) | 4700 | #define FDI_LINK_TRAIN_NORMAL_CPT (3<<8) |
4700 | #define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8) | 4701 | #define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8) |
4701 | 4702 | ||
4702 | #define _FDI_RXA_MISC 0xf0010 | 4703 | #define _FDI_RXA_MISC 0xf0010 |
4703 | #define _FDI_RXB_MISC 0xf1010 | 4704 | #define _FDI_RXB_MISC 0xf1010 |
4704 | #define FDI_RX_PWRDN_LANE1_MASK (3<<26) | 4705 | #define FDI_RX_PWRDN_LANE1_MASK (3<<26) |
4705 | #define FDI_RX_PWRDN_LANE1_VAL(x) ((x)<<26) | 4706 | #define FDI_RX_PWRDN_LANE1_VAL(x) ((x)<<26) |
4706 | #define FDI_RX_PWRDN_LANE0_MASK (3<<24) | 4707 | #define FDI_RX_PWRDN_LANE0_MASK (3<<24) |
4707 | #define FDI_RX_PWRDN_LANE0_VAL(x) ((x)<<24) | 4708 | #define FDI_RX_PWRDN_LANE0_VAL(x) ((x)<<24) |
4708 | #define FDI_RX_TP1_TO_TP2_48 (2<<20) | 4709 | #define FDI_RX_TP1_TO_TP2_48 (2<<20) |
4709 | #define FDI_RX_TP1_TO_TP2_64 (3<<20) | 4710 | #define FDI_RX_TP1_TO_TP2_64 (3<<20) |
4710 | #define FDI_RX_FDI_DELAY_90 (0x90<<0) | 4711 | #define FDI_RX_FDI_DELAY_90 (0x90<<0) |
4711 | #define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC) | 4712 | #define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC) |
4712 | 4713 | ||
4713 | #define _FDI_RXA_TUSIZE1 0xf0030 | 4714 | #define _FDI_RXA_TUSIZE1 0xf0030 |
4714 | #define _FDI_RXA_TUSIZE2 0xf0038 | 4715 | #define _FDI_RXA_TUSIZE2 0xf0038 |
4715 | #define _FDI_RXB_TUSIZE1 0xf1030 | 4716 | #define _FDI_RXB_TUSIZE1 0xf1030 |
4716 | #define _FDI_RXB_TUSIZE2 0xf1038 | 4717 | #define _FDI_RXB_TUSIZE2 0xf1038 |
4717 | #define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1) | 4718 | #define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1) |
4718 | #define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2) | 4719 | #define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2) |
4719 | 4720 | ||
4720 | /* FDI_RX interrupt register format */ | 4721 | /* FDI_RX interrupt register format */ |
4721 | #define FDI_RX_INTER_LANE_ALIGN (1<<10) | 4722 | #define FDI_RX_INTER_LANE_ALIGN (1<<10) |
4722 | #define FDI_RX_SYMBOL_LOCK (1<<9) /* train 2 */ | 4723 | #define FDI_RX_SYMBOL_LOCK (1<<9) /* train 2 */ |
4723 | #define FDI_RX_BIT_LOCK (1<<8) /* train 1 */ | 4724 | #define FDI_RX_BIT_LOCK (1<<8) /* train 1 */ |
4724 | #define FDI_RX_TRAIN_PATTERN_2_FAIL (1<<7) | 4725 | #define FDI_RX_TRAIN_PATTERN_2_FAIL (1<<7) |
4725 | #define FDI_RX_FS_CODE_ERR (1<<6) | 4726 | #define FDI_RX_FS_CODE_ERR (1<<6) |
4726 | #define FDI_RX_FE_CODE_ERR (1<<5) | 4727 | #define FDI_RX_FE_CODE_ERR (1<<5) |
4727 | #define FDI_RX_SYMBOL_ERR_RATE_ABOVE (1<<4) | 4728 | #define FDI_RX_SYMBOL_ERR_RATE_ABOVE (1<<4) |
4728 | #define FDI_RX_HDCP_LINK_FAIL (1<<3) | 4729 | #define FDI_RX_HDCP_LINK_FAIL (1<<3) |
4729 | #define FDI_RX_PIXEL_FIFO_OVERFLOW (1<<2) | 4730 | #define FDI_RX_PIXEL_FIFO_OVERFLOW (1<<2) |
4730 | #define FDI_RX_CROSS_CLOCK_OVERFLOW (1<<1) | 4731 | #define FDI_RX_CROSS_CLOCK_OVERFLOW (1<<1) |
4731 | #define FDI_RX_SYMBOL_QUEUE_OVERFLOW (1<<0) | 4732 | #define FDI_RX_SYMBOL_QUEUE_OVERFLOW (1<<0) |
4732 | 4733 | ||
4733 | #define _FDI_RXA_IIR 0xf0014 | 4734 | #define _FDI_RXA_IIR 0xf0014 |
4734 | #define _FDI_RXA_IMR 0xf0018 | 4735 | #define _FDI_RXA_IMR 0xf0018 |
4735 | #define _FDI_RXB_IIR 0xf1014 | 4736 | #define _FDI_RXB_IIR 0xf1014 |
4736 | #define _FDI_RXB_IMR 0xf1018 | 4737 | #define _FDI_RXB_IMR 0xf1018 |
4737 | #define FDI_RX_IIR(pipe) _PIPE(pipe, _FDI_RXA_IIR, _FDI_RXB_IIR) | 4738 | #define FDI_RX_IIR(pipe) _PIPE(pipe, _FDI_RXA_IIR, _FDI_RXB_IIR) |
4738 | #define FDI_RX_IMR(pipe) _PIPE(pipe, _FDI_RXA_IMR, _FDI_RXB_IMR) | 4739 | #define FDI_RX_IMR(pipe) _PIPE(pipe, _FDI_RXA_IMR, _FDI_RXB_IMR) |
4739 | 4740 | ||
4740 | #define FDI_PLL_CTL_1 0xfe000 | 4741 | #define FDI_PLL_CTL_1 0xfe000 |
4741 | #define FDI_PLL_CTL_2 0xfe004 | 4742 | #define FDI_PLL_CTL_2 0xfe004 |
4742 | 4743 | ||
4743 | #define PCH_LVDS 0xe1180 | 4744 | #define PCH_LVDS 0xe1180 |
4744 | #define LVDS_DETECTED (1 << 1) | 4745 | #define LVDS_DETECTED (1 << 1) |
4745 | 4746 | ||
4746 | /* vlv has 2 sets of panel control regs. */ | 4747 | /* vlv has 2 sets of panel control regs. */ |
4747 | #define PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200) | 4748 | #define PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200) |
4748 | #define PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204) | 4749 | #define PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204) |
4749 | #define PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208) | 4750 | #define PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208) |
4750 | #define PANEL_PORT_SELECT_DPB_VLV (1 << 30) | 4751 | #define PANEL_PORT_SELECT_DPB_VLV (1 << 30) |
4751 | #define PANEL_PORT_SELECT_DPC_VLV (2 << 30) | 4752 | #define PANEL_PORT_SELECT_DPC_VLV (2 << 30) |
4752 | #define PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c) | 4753 | #define PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c) |
4753 | #define PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210) | 4754 | #define PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210) |
4754 | 4755 | ||
4755 | #define PIPEB_PP_STATUS (VLV_DISPLAY_BASE + 0x61300) | 4756 | #define PIPEB_PP_STATUS (VLV_DISPLAY_BASE + 0x61300) |
4756 | #define PIPEB_PP_CONTROL (VLV_DISPLAY_BASE + 0x61304) | 4757 | #define PIPEB_PP_CONTROL (VLV_DISPLAY_BASE + 0x61304) |
4757 | #define PIPEB_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61308) | 4758 | #define PIPEB_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61308) |
4758 | #define PIPEB_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6130c) | 4759 | #define PIPEB_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6130c) |
4759 | #define PIPEB_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61310) | 4760 | #define PIPEB_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61310) |
4760 | 4761 | ||
4761 | #define VLV_PIPE_PP_STATUS(pipe) _PIPE(pipe, PIPEA_PP_STATUS, PIPEB_PP_STATUS) | 4762 | #define VLV_PIPE_PP_STATUS(pipe) _PIPE(pipe, PIPEA_PP_STATUS, PIPEB_PP_STATUS) |
4762 | #define VLV_PIPE_PP_CONTROL(pipe) _PIPE(pipe, PIPEA_PP_CONTROL, PIPEB_PP_CONTROL) | 4763 | #define VLV_PIPE_PP_CONTROL(pipe) _PIPE(pipe, PIPEA_PP_CONTROL, PIPEB_PP_CONTROL) |
4763 | #define VLV_PIPE_PP_ON_DELAYS(pipe) \ | 4764 | #define VLV_PIPE_PP_ON_DELAYS(pipe) \ |
4764 | _PIPE(pipe, PIPEA_PP_ON_DELAYS, PIPEB_PP_ON_DELAYS) | 4765 | _PIPE(pipe, PIPEA_PP_ON_DELAYS, PIPEB_PP_ON_DELAYS) |
4765 | #define VLV_PIPE_PP_OFF_DELAYS(pipe) \ | 4766 | #define VLV_PIPE_PP_OFF_DELAYS(pipe) \ |
4766 | _PIPE(pipe, PIPEA_PP_OFF_DELAYS, PIPEB_PP_OFF_DELAYS) | 4767 | _PIPE(pipe, PIPEA_PP_OFF_DELAYS, PIPEB_PP_OFF_DELAYS) |
4767 | #define VLV_PIPE_PP_DIVISOR(pipe) \ | 4768 | #define VLV_PIPE_PP_DIVISOR(pipe) \ |
4768 | _PIPE(pipe, PIPEA_PP_DIVISOR, PIPEB_PP_DIVISOR) | 4769 | _PIPE(pipe, PIPEA_PP_DIVISOR, PIPEB_PP_DIVISOR) |
4769 | 4770 | ||
4770 | #define PCH_PP_STATUS 0xc7200 | 4771 | #define PCH_PP_STATUS 0xc7200 |
4771 | #define PCH_PP_CONTROL 0xc7204 | 4772 | #define PCH_PP_CONTROL 0xc7204 |
4772 | #define PANEL_UNLOCK_REGS (0xabcd << 16) | 4773 | #define PANEL_UNLOCK_REGS (0xabcd << 16) |
4773 | #define PANEL_UNLOCK_MASK (0xffff << 16) | 4774 | #define PANEL_UNLOCK_MASK (0xffff << 16) |
4774 | #define EDP_FORCE_VDD (1 << 3) | 4775 | #define EDP_FORCE_VDD (1 << 3) |
4775 | #define EDP_BLC_ENABLE (1 << 2) | 4776 | #define EDP_BLC_ENABLE (1 << 2) |
4776 | #define PANEL_POWER_RESET (1 << 1) | 4777 | #define PANEL_POWER_RESET (1 << 1) |
4777 | #define PANEL_POWER_OFF (0 << 0) | 4778 | #define PANEL_POWER_OFF (0 << 0) |
4778 | #define PANEL_POWER_ON (1 << 0) | 4779 | #define PANEL_POWER_ON (1 << 0) |
4779 | #define PCH_PP_ON_DELAYS 0xc7208 | 4780 | #define PCH_PP_ON_DELAYS 0xc7208 |
4780 | #define PANEL_PORT_SELECT_MASK (3 << 30) | 4781 | #define PANEL_PORT_SELECT_MASK (3 << 30) |
4781 | #define PANEL_PORT_SELECT_LVDS (0 << 30) | 4782 | #define PANEL_PORT_SELECT_LVDS (0 << 30) |
4782 | #define PANEL_PORT_SELECT_DPA (1 << 30) | 4783 | #define PANEL_PORT_SELECT_DPA (1 << 30) |
4783 | #define PANEL_PORT_SELECT_DPC (2 << 30) | 4784 | #define PANEL_PORT_SELECT_DPC (2 << 30) |
4784 | #define PANEL_PORT_SELECT_DPD (3 << 30) | 4785 | #define PANEL_PORT_SELECT_DPD (3 << 30) |
4785 | #define PANEL_POWER_UP_DELAY_MASK (0x1fff0000) | 4786 | #define PANEL_POWER_UP_DELAY_MASK (0x1fff0000) |
4786 | #define PANEL_POWER_UP_DELAY_SHIFT 16 | 4787 | #define PANEL_POWER_UP_DELAY_SHIFT 16 |
4787 | #define PANEL_LIGHT_ON_DELAY_MASK (0x1fff) | 4788 | #define PANEL_LIGHT_ON_DELAY_MASK (0x1fff) |
4788 | #define PANEL_LIGHT_ON_DELAY_SHIFT 0 | 4789 | #define PANEL_LIGHT_ON_DELAY_SHIFT 0 |
4789 | 4790 | ||
4790 | #define PCH_PP_OFF_DELAYS 0xc720c | 4791 | #define PCH_PP_OFF_DELAYS 0xc720c |
4791 | #define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000) | 4792 | #define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000) |
4792 | #define PANEL_POWER_DOWN_DELAY_SHIFT 16 | 4793 | #define PANEL_POWER_DOWN_DELAY_SHIFT 16 |
4793 | #define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff) | 4794 | #define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff) |
4794 | #define PANEL_LIGHT_OFF_DELAY_SHIFT 0 | 4795 | #define PANEL_LIGHT_OFF_DELAY_SHIFT 0 |
4795 | 4796 | ||
4796 | #define PCH_PP_DIVISOR 0xc7210 | 4797 | #define PCH_PP_DIVISOR 0xc7210 |
4797 | #define PP_REFERENCE_DIVIDER_MASK (0xffffff00) | 4798 | #define PP_REFERENCE_DIVIDER_MASK (0xffffff00) |
4798 | #define PP_REFERENCE_DIVIDER_SHIFT 8 | 4799 | #define PP_REFERENCE_DIVIDER_SHIFT 8 |
4799 | #define PANEL_POWER_CYCLE_DELAY_MASK (0x1f) | 4800 | #define PANEL_POWER_CYCLE_DELAY_MASK (0x1f) |
4800 | #define PANEL_POWER_CYCLE_DELAY_SHIFT 0 | 4801 | #define PANEL_POWER_CYCLE_DELAY_SHIFT 0 |
4801 | 4802 | ||
4802 | #define PCH_DP_B 0xe4100 | 4803 | #define PCH_DP_B 0xe4100 |
4803 | #define PCH_DPB_AUX_CH_CTL 0xe4110 | 4804 | #define PCH_DPB_AUX_CH_CTL 0xe4110 |
4804 | #define PCH_DPB_AUX_CH_DATA1 0xe4114 | 4805 | #define PCH_DPB_AUX_CH_DATA1 0xe4114 |
4805 | #define PCH_DPB_AUX_CH_DATA2 0xe4118 | 4806 | #define PCH_DPB_AUX_CH_DATA2 0xe4118 |
4806 | #define PCH_DPB_AUX_CH_DATA3 0xe411c | 4807 | #define PCH_DPB_AUX_CH_DATA3 0xe411c |
4807 | #define PCH_DPB_AUX_CH_DATA4 0xe4120 | 4808 | #define PCH_DPB_AUX_CH_DATA4 0xe4120 |
4808 | #define PCH_DPB_AUX_CH_DATA5 0xe4124 | 4809 | #define PCH_DPB_AUX_CH_DATA5 0xe4124 |
4809 | 4810 | ||
4810 | #define PCH_DP_C 0xe4200 | 4811 | #define PCH_DP_C 0xe4200 |
4811 | #define PCH_DPC_AUX_CH_CTL 0xe4210 | 4812 | #define PCH_DPC_AUX_CH_CTL 0xe4210 |
4812 | #define PCH_DPC_AUX_CH_DATA1 0xe4214 | 4813 | #define PCH_DPC_AUX_CH_DATA1 0xe4214 |
4813 | #define PCH_DPC_AUX_CH_DATA2 0xe4218 | 4814 | #define PCH_DPC_AUX_CH_DATA2 0xe4218 |
4814 | #define PCH_DPC_AUX_CH_DATA3 0xe421c | 4815 | #define PCH_DPC_AUX_CH_DATA3 0xe421c |
4815 | #define PCH_DPC_AUX_CH_DATA4 0xe4220 | 4816 | #define PCH_DPC_AUX_CH_DATA4 0xe4220 |
4816 | #define PCH_DPC_AUX_CH_DATA5 0xe4224 | 4817 | #define PCH_DPC_AUX_CH_DATA5 0xe4224 |
4817 | 4818 | ||
4818 | #define PCH_DP_D 0xe4300 | 4819 | #define PCH_DP_D 0xe4300 |
4819 | #define PCH_DPD_AUX_CH_CTL 0xe4310 | 4820 | #define PCH_DPD_AUX_CH_CTL 0xe4310 |
4820 | #define PCH_DPD_AUX_CH_DATA1 0xe4314 | 4821 | #define PCH_DPD_AUX_CH_DATA1 0xe4314 |
4821 | #define PCH_DPD_AUX_CH_DATA2 0xe4318 | 4822 | #define PCH_DPD_AUX_CH_DATA2 0xe4318 |
4822 | #define PCH_DPD_AUX_CH_DATA3 0xe431c | 4823 | #define PCH_DPD_AUX_CH_DATA3 0xe431c |
4823 | #define PCH_DPD_AUX_CH_DATA4 0xe4320 | 4824 | #define PCH_DPD_AUX_CH_DATA4 0xe4320 |
4824 | #define PCH_DPD_AUX_CH_DATA5 0xe4324 | 4825 | #define PCH_DPD_AUX_CH_DATA5 0xe4324 |
4825 | 4826 | ||
4826 | /* CPT */ | 4827 | /* CPT */ |
4827 | #define PORT_TRANS_A_SEL_CPT 0 | 4828 | #define PORT_TRANS_A_SEL_CPT 0 |
4828 | #define PORT_TRANS_B_SEL_CPT (1<<29) | 4829 | #define PORT_TRANS_B_SEL_CPT (1<<29) |
4829 | #define PORT_TRANS_C_SEL_CPT (2<<29) | 4830 | #define PORT_TRANS_C_SEL_CPT (2<<29) |
4830 | #define PORT_TRANS_SEL_MASK (3<<29) | 4831 | #define PORT_TRANS_SEL_MASK (3<<29) |
4831 | #define PORT_TRANS_SEL_CPT(pipe) ((pipe) << 29) | 4832 | #define PORT_TRANS_SEL_CPT(pipe) ((pipe) << 29) |
4832 | #define PORT_TO_PIPE(val) (((val) & (1<<30)) >> 30) | 4833 | #define PORT_TO_PIPE(val) (((val) & (1<<30)) >> 30) |
4833 | #define PORT_TO_PIPE_CPT(val) (((val) & PORT_TRANS_SEL_MASK) >> 29) | 4834 | #define PORT_TO_PIPE_CPT(val) (((val) & PORT_TRANS_SEL_MASK) >> 29) |
4834 | 4835 | ||
4835 | #define TRANS_DP_CTL_A 0xe0300 | 4836 | #define TRANS_DP_CTL_A 0xe0300 |
4836 | #define TRANS_DP_CTL_B 0xe1300 | 4837 | #define TRANS_DP_CTL_B 0xe1300 |
4837 | #define TRANS_DP_CTL_C 0xe2300 | 4838 | #define TRANS_DP_CTL_C 0xe2300 |
4838 | #define TRANS_DP_CTL(pipe) _PIPE(pipe, TRANS_DP_CTL_A, TRANS_DP_CTL_B) | 4839 | #define TRANS_DP_CTL(pipe) _PIPE(pipe, TRANS_DP_CTL_A, TRANS_DP_CTL_B) |
4839 | #define TRANS_DP_OUTPUT_ENABLE (1<<31) | 4840 | #define TRANS_DP_OUTPUT_ENABLE (1<<31) |
4840 | #define TRANS_DP_PORT_SEL_B (0<<29) | 4841 | #define TRANS_DP_PORT_SEL_B (0<<29) |
4841 | #define TRANS_DP_PORT_SEL_C (1<<29) | 4842 | #define TRANS_DP_PORT_SEL_C (1<<29) |
4842 | #define TRANS_DP_PORT_SEL_D (2<<29) | 4843 | #define TRANS_DP_PORT_SEL_D (2<<29) |
4843 | #define TRANS_DP_PORT_SEL_NONE (3<<29) | 4844 | #define TRANS_DP_PORT_SEL_NONE (3<<29) |
4844 | #define TRANS_DP_PORT_SEL_MASK (3<<29) | 4845 | #define TRANS_DP_PORT_SEL_MASK (3<<29) |
4845 | #define TRANS_DP_AUDIO_ONLY (1<<26) | 4846 | #define TRANS_DP_AUDIO_ONLY (1<<26) |
4846 | #define TRANS_DP_ENH_FRAMING (1<<18) | 4847 | #define TRANS_DP_ENH_FRAMING (1<<18) |
4847 | #define TRANS_DP_8BPC (0<<9) | 4848 | #define TRANS_DP_8BPC (0<<9) |
4848 | #define TRANS_DP_10BPC (1<<9) | 4849 | #define TRANS_DP_10BPC (1<<9) |
4849 | #define TRANS_DP_6BPC (2<<9) | 4850 | #define TRANS_DP_6BPC (2<<9) |
4850 | #define TRANS_DP_12BPC (3<<9) | 4851 | #define TRANS_DP_12BPC (3<<9) |
4851 | #define TRANS_DP_BPC_MASK (3<<9) | 4852 | #define TRANS_DP_BPC_MASK (3<<9) |
4852 | #define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4) | 4853 | #define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4) |
4853 | #define TRANS_DP_VSYNC_ACTIVE_LOW 0 | 4854 | #define TRANS_DP_VSYNC_ACTIVE_LOW 0 |
4854 | #define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3) | 4855 | #define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3) |
4855 | #define TRANS_DP_HSYNC_ACTIVE_LOW 0 | 4856 | #define TRANS_DP_HSYNC_ACTIVE_LOW 0 |
4856 | #define TRANS_DP_SYNC_MASK (3<<3) | 4857 | #define TRANS_DP_SYNC_MASK (3<<3) |
4857 | 4858 | ||
4858 | /* SNB eDP training params */ | 4859 | /* SNB eDP training params */ |
4859 | /* SNB A-stepping */ | 4860 | /* SNB A-stepping */ |
4860 | #define EDP_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22) | 4861 | #define EDP_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22) |
4861 | #define EDP_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22) | 4862 | #define EDP_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22) |
4862 | #define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22) | 4863 | #define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22) |
4863 | #define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22) | 4864 | #define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22) |
4864 | /* SNB B-stepping */ | 4865 | /* SNB B-stepping */ |
4865 | #define EDP_LINK_TRAIN_400_600MV_0DB_SNB_B (0x0<<22) | 4866 | #define EDP_LINK_TRAIN_400_600MV_0DB_SNB_B (0x0<<22) |
4866 | #define EDP_LINK_TRAIN_400MV_3_5DB_SNB_B (0x1<<22) | 4867 | #define EDP_LINK_TRAIN_400MV_3_5DB_SNB_B (0x1<<22) |
4867 | #define EDP_LINK_TRAIN_400_600MV_6DB_SNB_B (0x3a<<22) | 4868 | #define EDP_LINK_TRAIN_400_600MV_6DB_SNB_B (0x3a<<22) |
4868 | #define EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B (0x39<<22) | 4869 | #define EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B (0x39<<22) |
4869 | #define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38<<22) | 4870 | #define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38<<22) |
4870 | #define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22) | 4871 | #define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22) |
4871 | 4872 | ||
4872 | /* IVB */ | 4873 | /* IVB */ |
4873 | #define EDP_LINK_TRAIN_400MV_0DB_IVB (0x24 <<22) | 4874 | #define EDP_LINK_TRAIN_400MV_0DB_IVB (0x24 <<22) |
4874 | #define EDP_LINK_TRAIN_400MV_3_5DB_IVB (0x2a <<22) | 4875 | #define EDP_LINK_TRAIN_400MV_3_5DB_IVB (0x2a <<22) |
4875 | #define EDP_LINK_TRAIN_400MV_6DB_IVB (0x2f <<22) | 4876 | #define EDP_LINK_TRAIN_400MV_6DB_IVB (0x2f <<22) |
4876 | #define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22) | 4877 | #define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22) |
4877 | #define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22) | 4878 | #define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22) |
4878 | #define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22) | 4879 | #define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22) |
4879 | #define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x3e <<22) | 4880 | #define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x3e <<22) |
4880 | 4881 | ||
4881 | /* legacy values */ | 4882 | /* legacy values */ |
4882 | #define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22) | 4883 | #define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22) |
4883 | #define EDP_LINK_TRAIN_1000MV_0DB_IVB (0x20 <<22) | 4884 | #define EDP_LINK_TRAIN_1000MV_0DB_IVB (0x20 <<22) |
4884 | #define EDP_LINK_TRAIN_500MV_3_5DB_IVB (0x02 <<22) | 4885 | #define EDP_LINK_TRAIN_500MV_3_5DB_IVB (0x02 <<22) |
4885 | #define EDP_LINK_TRAIN_1000MV_3_5DB_IVB (0x22 <<22) | 4886 | #define EDP_LINK_TRAIN_1000MV_3_5DB_IVB (0x22 <<22) |
4886 | #define EDP_LINK_TRAIN_1000MV_6DB_IVB (0x23 <<22) | 4887 | #define EDP_LINK_TRAIN_1000MV_6DB_IVB (0x23 <<22) |
4887 | 4888 | ||
4888 | #define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22) | 4889 | #define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22) |
4889 | 4890 | ||
4890 | #define FORCEWAKE 0xA18C | 4891 | #define FORCEWAKE 0xA18C |
4891 | #define FORCEWAKE_VLV 0x1300b0 | 4892 | #define FORCEWAKE_VLV 0x1300b0 |
4892 | #define FORCEWAKE_ACK_VLV 0x1300b4 | 4893 | #define FORCEWAKE_ACK_VLV 0x1300b4 |
4893 | #define FORCEWAKE_MEDIA_VLV 0x1300b8 | 4894 | #define FORCEWAKE_MEDIA_VLV 0x1300b8 |
4894 | #define FORCEWAKE_ACK_MEDIA_VLV 0x1300bc | 4895 | #define FORCEWAKE_ACK_MEDIA_VLV 0x1300bc |
4895 | #define FORCEWAKE_ACK_HSW 0x130044 | 4896 | #define FORCEWAKE_ACK_HSW 0x130044 |
4896 | #define FORCEWAKE_ACK 0x130090 | 4897 | #define FORCEWAKE_ACK 0x130090 |
4897 | #define VLV_GTLC_WAKE_CTRL 0x130090 | 4898 | #define VLV_GTLC_WAKE_CTRL 0x130090 |
4898 | #define VLV_GTLC_PW_STATUS 0x130094 | 4899 | #define VLV_GTLC_PW_STATUS 0x130094 |
4899 | #define VLV_GTLC_PW_RENDER_STATUS_MASK 0x80 | 4900 | #define VLV_GTLC_PW_RENDER_STATUS_MASK 0x80 |
4900 | #define VLV_GTLC_PW_MEDIA_STATUS_MASK 0x20 | 4901 | #define VLV_GTLC_PW_MEDIA_STATUS_MASK 0x20 |
4901 | #define FORCEWAKE_MT 0xa188 /* multi-threaded */ | 4902 | #define FORCEWAKE_MT 0xa188 /* multi-threaded */ |
4902 | #define FORCEWAKE_KERNEL 0x1 | 4903 | #define FORCEWAKE_KERNEL 0x1 |
4903 | #define FORCEWAKE_USER 0x2 | 4904 | #define FORCEWAKE_USER 0x2 |
4904 | #define FORCEWAKE_MT_ACK 0x130040 | 4905 | #define FORCEWAKE_MT_ACK 0x130040 |
4905 | #define ECOBUS 0xa180 | 4906 | #define ECOBUS 0xa180 |
4906 | #define FORCEWAKE_MT_ENABLE (1<<5) | 4907 | #define FORCEWAKE_MT_ENABLE (1<<5) |
4907 | 4908 | ||
4908 | #define GTFIFODBG 0x120000 | 4909 | #define GTFIFODBG 0x120000 |
4909 | #define GT_FIFO_SBDROPERR (1<<6) | 4910 | #define GT_FIFO_SBDROPERR (1<<6) |
4910 | #define GT_FIFO_BLOBDROPERR (1<<5) | 4911 | #define GT_FIFO_BLOBDROPERR (1<<5) |
4911 | #define GT_FIFO_SB_READ_ABORTERR (1<<4) | 4912 | #define GT_FIFO_SB_READ_ABORTERR (1<<4) |
4912 | #define GT_FIFO_DROPERR (1<<3) | 4913 | #define GT_FIFO_DROPERR (1<<3) |
4913 | #define GT_FIFO_OVFERR (1<<2) | 4914 | #define GT_FIFO_OVFERR (1<<2) |
4914 | #define GT_FIFO_IAWRERR (1<<1) | 4915 | #define GT_FIFO_IAWRERR (1<<1) |
4915 | #define GT_FIFO_IARDERR (1<<0) | 4916 | #define GT_FIFO_IARDERR (1<<0) |
4916 | 4917 | ||
4917 | #define GTFIFOCTL 0x120008 | 4918 | #define GTFIFOCTL 0x120008 |
4918 | #define GT_FIFO_FREE_ENTRIES_MASK 0x7f | 4919 | #define GT_FIFO_FREE_ENTRIES_MASK 0x7f |
4919 | #define GT_FIFO_NUM_RESERVED_ENTRIES 20 | 4920 | #define GT_FIFO_NUM_RESERVED_ENTRIES 20 |
4920 | 4921 | ||
4921 | #define HSW_IDICR 0x9008 | 4922 | #define HSW_IDICR 0x9008 |
4922 | #define IDIHASHMSK(x) (((x) & 0x3f) << 16) | 4923 | #define IDIHASHMSK(x) (((x) & 0x3f) << 16) |
4923 | #define HSW_EDRAM_PRESENT 0x120010 | 4924 | #define HSW_EDRAM_PRESENT 0x120010 |
4924 | 4925 | ||
4925 | #define GEN6_UCGCTL1 0x9400 | 4926 | #define GEN6_UCGCTL1 0x9400 |
4926 | # define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5) | 4927 | # define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5) |
4927 | # define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7) | 4928 | # define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7) |
4928 | 4929 | ||
4929 | #define GEN6_UCGCTL2 0x9404 | 4930 | #define GEN6_UCGCTL2 0x9404 |
4930 | # define GEN7_VDSUNIT_CLOCK_GATE_DISABLE (1 << 30) | 4931 | # define GEN7_VDSUNIT_CLOCK_GATE_DISABLE (1 << 30) |
4931 | # define GEN7_TDLUNIT_CLOCK_GATE_DISABLE (1 << 22) | 4932 | # define GEN7_TDLUNIT_CLOCK_GATE_DISABLE (1 << 22) |
4932 | # define GEN6_RCZUNIT_CLOCK_GATE_DISABLE (1 << 13) | 4933 | # define GEN6_RCZUNIT_CLOCK_GATE_DISABLE (1 << 13) |
4933 | # define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12) | 4934 | # define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12) |
4934 | # define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11) | 4935 | # define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11) |
4935 | 4936 | ||
4936 | #define GEN7_UCGCTL4 0x940c | 4937 | #define GEN7_UCGCTL4 0x940c |
4937 | #define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25) | 4938 | #define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25) |
4938 | 4939 | ||
4939 | #define GEN8_UCGCTL6 0x9430 | 4940 | #define GEN8_UCGCTL6 0x9430 |
4940 | #define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14) | 4941 | #define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14) |
4941 | 4942 | ||
4942 | #define GEN6_RPNSWREQ 0xA008 | 4943 | #define GEN6_RPNSWREQ 0xA008 |
4943 | #define GEN6_TURBO_DISABLE (1<<31) | 4944 | #define GEN6_TURBO_DISABLE (1<<31) |
4944 | #define GEN6_FREQUENCY(x) ((x)<<25) | 4945 | #define GEN6_FREQUENCY(x) ((x)<<25) |
4945 | #define HSW_FREQUENCY(x) ((x)<<24) | 4946 | #define HSW_FREQUENCY(x) ((x)<<24) |
4946 | #define GEN6_OFFSET(x) ((x)<<19) | 4947 | #define GEN6_OFFSET(x) ((x)<<19) |
4947 | #define GEN6_AGGRESSIVE_TURBO (0<<15) | 4948 | #define GEN6_AGGRESSIVE_TURBO (0<<15) |
4948 | #define GEN6_RC_VIDEO_FREQ 0xA00C | 4949 | #define GEN6_RC_VIDEO_FREQ 0xA00C |
4949 | #define GEN6_RC_CONTROL 0xA090 | 4950 | #define GEN6_RC_CONTROL 0xA090 |
4950 | #define GEN6_RC_CTL_RC6pp_ENABLE (1<<16) | 4951 | #define GEN6_RC_CTL_RC6pp_ENABLE (1<<16) |
4951 | #define GEN6_RC_CTL_RC6p_ENABLE (1<<17) | 4952 | #define GEN6_RC_CTL_RC6p_ENABLE (1<<17) |
4952 | #define GEN6_RC_CTL_RC6_ENABLE (1<<18) | 4953 | #define GEN6_RC_CTL_RC6_ENABLE (1<<18) |
4953 | #define GEN6_RC_CTL_RC1e_ENABLE (1<<20) | 4954 | #define GEN6_RC_CTL_RC1e_ENABLE (1<<20) |
4954 | #define GEN6_RC_CTL_RC7_ENABLE (1<<22) | 4955 | #define GEN6_RC_CTL_RC7_ENABLE (1<<22) |
4955 | #define VLV_RC_CTL_CTX_RST_PARALLEL (1<<24) | 4956 | #define VLV_RC_CTL_CTX_RST_PARALLEL (1<<24) |
4956 | #define GEN7_RC_CTL_TO_MODE (1<<28) | 4957 | #define GEN7_RC_CTL_TO_MODE (1<<28) |
4957 | #define GEN6_RC_CTL_EI_MODE(x) ((x)<<27) | 4958 | #define GEN6_RC_CTL_EI_MODE(x) ((x)<<27) |
4958 | #define GEN6_RC_CTL_HW_ENABLE (1<<31) | 4959 | #define GEN6_RC_CTL_HW_ENABLE (1<<31) |
4959 | #define GEN6_RP_DOWN_TIMEOUT 0xA010 | 4960 | #define GEN6_RP_DOWN_TIMEOUT 0xA010 |
4960 | #define GEN6_RP_INTERRUPT_LIMITS 0xA014 | 4961 | #define GEN6_RP_INTERRUPT_LIMITS 0xA014 |
4961 | #define GEN6_RPSTAT1 0xA01C | 4962 | #define GEN6_RPSTAT1 0xA01C |
4962 | #define GEN6_CAGF_SHIFT 8 | 4963 | #define GEN6_CAGF_SHIFT 8 |
4963 | #define HSW_CAGF_SHIFT 7 | 4964 | #define HSW_CAGF_SHIFT 7 |
4964 | #define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT) | 4965 | #define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT) |
4965 | #define HSW_CAGF_MASK (0x7f << HSW_CAGF_SHIFT) | 4966 | #define HSW_CAGF_MASK (0x7f << HSW_CAGF_SHIFT) |
4966 | #define GEN6_RP_CONTROL 0xA024 | 4967 | #define GEN6_RP_CONTROL 0xA024 |
4967 | #define GEN6_RP_MEDIA_TURBO (1<<11) | 4968 | #define GEN6_RP_MEDIA_TURBO (1<<11) |
4968 | #define GEN6_RP_MEDIA_MODE_MASK (3<<9) | 4969 | #define GEN6_RP_MEDIA_MODE_MASK (3<<9) |
4969 | #define GEN6_RP_MEDIA_HW_TURBO_MODE (3<<9) | 4970 | #define GEN6_RP_MEDIA_HW_TURBO_MODE (3<<9) |
4970 | #define GEN6_RP_MEDIA_HW_NORMAL_MODE (2<<9) | 4971 | #define GEN6_RP_MEDIA_HW_NORMAL_MODE (2<<9) |
4971 | #define GEN6_RP_MEDIA_HW_MODE (1<<9) | 4972 | #define GEN6_RP_MEDIA_HW_MODE (1<<9) |
4972 | #define GEN6_RP_MEDIA_SW_MODE (0<<9) | 4973 | #define GEN6_RP_MEDIA_SW_MODE (0<<9) |
4973 | #define GEN6_RP_MEDIA_IS_GFX (1<<8) | 4974 | #define GEN6_RP_MEDIA_IS_GFX (1<<8) |
4974 | #define GEN6_RP_ENABLE (1<<7) | 4975 | #define GEN6_RP_ENABLE (1<<7) |
4975 | #define GEN6_RP_UP_IDLE_MIN (0x1<<3) | 4976 | #define GEN6_RP_UP_IDLE_MIN (0x1<<3) |
4976 | #define GEN6_RP_UP_BUSY_AVG (0x2<<3) | 4977 | #define GEN6_RP_UP_BUSY_AVG (0x2<<3) |
4977 | #define GEN6_RP_UP_BUSY_CONT (0x4<<3) | 4978 | #define GEN6_RP_UP_BUSY_CONT (0x4<<3) |
4978 | #define GEN6_RP_DOWN_IDLE_AVG (0x2<<0) | 4979 | #define GEN6_RP_DOWN_IDLE_AVG (0x2<<0) |
4979 | #define GEN6_RP_DOWN_IDLE_CONT (0x1<<0) | 4980 | #define GEN6_RP_DOWN_IDLE_CONT (0x1<<0) |
4980 | #define GEN6_RP_UP_THRESHOLD 0xA02C | 4981 | #define GEN6_RP_UP_THRESHOLD 0xA02C |
4981 | #define GEN6_RP_DOWN_THRESHOLD 0xA030 | 4982 | #define GEN6_RP_DOWN_THRESHOLD 0xA030 |
4982 | #define GEN6_RP_CUR_UP_EI 0xA050 | 4983 | #define GEN6_RP_CUR_UP_EI 0xA050 |
4983 | #define GEN6_CURICONT_MASK 0xffffff | 4984 | #define GEN6_CURICONT_MASK 0xffffff |
4984 | #define GEN6_RP_CUR_UP 0xA054 | 4985 | #define GEN6_RP_CUR_UP 0xA054 |
4985 | #define GEN6_CURBSYTAVG_MASK 0xffffff | 4986 | #define GEN6_CURBSYTAVG_MASK 0xffffff |
4986 | #define GEN6_RP_PREV_UP 0xA058 | 4987 | #define GEN6_RP_PREV_UP 0xA058 |
4987 | #define GEN6_RP_CUR_DOWN_EI 0xA05C | 4988 | #define GEN6_RP_CUR_DOWN_EI 0xA05C |
4988 | #define GEN6_CURIAVG_MASK 0xffffff | 4989 | #define GEN6_CURIAVG_MASK 0xffffff |
4989 | #define GEN6_RP_CUR_DOWN 0xA060 | 4990 | #define GEN6_RP_CUR_DOWN 0xA060 |
4990 | #define GEN6_RP_PREV_DOWN 0xA064 | 4991 | #define GEN6_RP_PREV_DOWN 0xA064 |
4991 | #define GEN6_RP_UP_EI 0xA068 | 4992 | #define GEN6_RP_UP_EI 0xA068 |
4992 | #define GEN6_RP_DOWN_EI 0xA06C | 4993 | #define GEN6_RP_DOWN_EI 0xA06C |
4993 | #define GEN6_RP_IDLE_HYSTERSIS 0xA070 | 4994 | #define GEN6_RP_IDLE_HYSTERSIS 0xA070 |
4994 | #define GEN6_RC_STATE 0xA094 | 4995 | #define GEN6_RC_STATE 0xA094 |
4995 | #define GEN6_RC1_WAKE_RATE_LIMIT 0xA098 | 4996 | #define GEN6_RC1_WAKE_RATE_LIMIT 0xA098 |
4996 | #define GEN6_RC6_WAKE_RATE_LIMIT 0xA09C | 4997 | #define GEN6_RC6_WAKE_RATE_LIMIT 0xA09C |
4997 | #define GEN6_RC6pp_WAKE_RATE_LIMIT 0xA0A0 | 4998 | #define GEN6_RC6pp_WAKE_RATE_LIMIT 0xA0A0 |
4998 | #define GEN6_RC_EVALUATION_INTERVAL 0xA0A8 | 4999 | #define GEN6_RC_EVALUATION_INTERVAL 0xA0A8 |
4999 | #define GEN6_RC_IDLE_HYSTERSIS 0xA0AC | 5000 | #define GEN6_RC_IDLE_HYSTERSIS 0xA0AC |
5000 | #define GEN6_RC_SLEEP 0xA0B0 | 5001 | #define GEN6_RC_SLEEP 0xA0B0 |
5001 | #define GEN6_RC1e_THRESHOLD 0xA0B4 | 5002 | #define GEN6_RC1e_THRESHOLD 0xA0B4 |
5002 | #define GEN6_RC6_THRESHOLD 0xA0B8 | 5003 | #define GEN6_RC6_THRESHOLD 0xA0B8 |
5003 | #define GEN6_RC6p_THRESHOLD 0xA0BC | 5004 | #define GEN6_RC6p_THRESHOLD 0xA0BC |
5004 | #define GEN6_RC6pp_THRESHOLD 0xA0C0 | 5005 | #define GEN6_RC6pp_THRESHOLD 0xA0C0 |
5005 | #define GEN6_PMINTRMSK 0xA168 | 5006 | #define GEN6_PMINTRMSK 0xA168 |
5006 | 5007 | ||
5007 | #define GEN6_PMISR 0x44020 | 5008 | #define GEN6_PMISR 0x44020 |
5008 | #define GEN6_PMIMR 0x44024 /* rps_lock */ | 5009 | #define GEN6_PMIMR 0x44024 /* rps_lock */ |
5009 | #define GEN6_PMIIR 0x44028 | 5010 | #define GEN6_PMIIR 0x44028 |
5010 | #define GEN6_PMIER 0x4402C | 5011 | #define GEN6_PMIER 0x4402C |
5011 | #define GEN6_PM_MBOX_EVENT (1<<25) | 5012 | #define GEN6_PM_MBOX_EVENT (1<<25) |
5012 | #define GEN6_PM_THERMAL_EVENT (1<<24) | 5013 | #define GEN6_PM_THERMAL_EVENT (1<<24) |
5013 | #define GEN6_PM_RP_DOWN_TIMEOUT (1<<6) | 5014 | #define GEN6_PM_RP_DOWN_TIMEOUT (1<<6) |
5014 | #define GEN6_PM_RP_UP_THRESHOLD (1<<5) | 5015 | #define GEN6_PM_RP_UP_THRESHOLD (1<<5) |
5015 | #define GEN6_PM_RP_DOWN_THRESHOLD (1<<4) | 5016 | #define GEN6_PM_RP_DOWN_THRESHOLD (1<<4) |
5016 | #define GEN6_PM_RP_UP_EI_EXPIRED (1<<2) | 5017 | #define GEN6_PM_RP_UP_EI_EXPIRED (1<<2) |
5017 | #define GEN6_PM_RP_DOWN_EI_EXPIRED (1<<1) | 5018 | #define GEN6_PM_RP_DOWN_EI_EXPIRED (1<<1) |
5018 | #define GEN6_PM_RPS_EVENTS (GEN6_PM_RP_UP_THRESHOLD | \ | 5019 | #define GEN6_PM_RPS_EVENTS (GEN6_PM_RP_UP_THRESHOLD | \ |
5019 | GEN6_PM_RP_DOWN_THRESHOLD | \ | 5020 | GEN6_PM_RP_DOWN_THRESHOLD | \ |
5020 | GEN6_PM_RP_DOWN_TIMEOUT) | 5021 | GEN6_PM_RP_DOWN_TIMEOUT) |
5021 | 5022 | ||
5022 | #define VLV_GTLC_SURVIVABILITY_REG 0x130098 | 5023 | #define VLV_GTLC_SURVIVABILITY_REG 0x130098 |
5023 | #define VLV_GFX_CLK_STATUS_BIT (1<<3) | 5024 | #define VLV_GFX_CLK_STATUS_BIT (1<<3) |
5024 | #define VLV_GFX_CLK_FORCE_ON_BIT (1<<2) | 5025 | #define VLV_GFX_CLK_FORCE_ON_BIT (1<<2) |
5025 | 5026 | ||
5026 | #define GEN6_GT_GFX_RC6_LOCKED 0x138104 | 5027 | #define GEN6_GT_GFX_RC6_LOCKED 0x138104 |
5027 | #define VLV_COUNTER_CONTROL 0x138104 | 5028 | #define VLV_COUNTER_CONTROL 0x138104 |
5028 | #define VLV_COUNT_RANGE_HIGH (1<<15) | 5029 | #define VLV_COUNT_RANGE_HIGH (1<<15) |
5029 | #define VLV_MEDIA_RC6_COUNT_EN (1<<1) | 5030 | #define VLV_MEDIA_RC6_COUNT_EN (1<<1) |
5030 | #define VLV_RENDER_RC6_COUNT_EN (1<<0) | 5031 | #define VLV_RENDER_RC6_COUNT_EN (1<<0) |
5031 | #define GEN6_GT_GFX_RC6 0x138108 | 5032 | #define GEN6_GT_GFX_RC6 0x138108 |
5032 | #define GEN6_GT_GFX_RC6p 0x13810C | 5033 | #define GEN6_GT_GFX_RC6p 0x13810C |
5033 | #define GEN6_GT_GFX_RC6pp 0x138110 | 5034 | #define GEN6_GT_GFX_RC6pp 0x138110 |
5034 | 5035 | ||
5035 | #define GEN6_PCODE_MAILBOX 0x138124 | 5036 | #define GEN6_PCODE_MAILBOX 0x138124 |
5036 | #define GEN6_PCODE_READY (1<<31) | 5037 | #define GEN6_PCODE_READY (1<<31) |
5037 | #define GEN6_READ_OC_PARAMS 0xc | 5038 | #define GEN6_READ_OC_PARAMS 0xc |
5038 | #define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8 | 5039 | #define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8 |
5039 | #define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9 | 5040 | #define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9 |
5040 | #define GEN6_PCODE_WRITE_RC6VIDS 0x4 | 5041 | #define GEN6_PCODE_WRITE_RC6VIDS 0x4 |
5041 | #define GEN6_PCODE_READ_RC6VIDS 0x5 | 5042 | #define GEN6_PCODE_READ_RC6VIDS 0x5 |
5042 | #define GEN6_PCODE_READ_D_COMP 0x10 | 5043 | #define GEN6_PCODE_READ_D_COMP 0x10 |
5043 | #define GEN6_PCODE_WRITE_D_COMP 0x11 | 5044 | #define GEN6_PCODE_WRITE_D_COMP 0x11 |
5044 | #define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5) | 5045 | #define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5) |
5045 | #define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245) | 5046 | #define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245) |
5046 | #define DISPLAY_IPS_CONTROL 0x19 | 5047 | #define DISPLAY_IPS_CONTROL 0x19 |
5047 | #define GEN6_PCODE_DATA 0x138128 | 5048 | #define GEN6_PCODE_DATA 0x138128 |
5048 | #define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 | 5049 | #define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 |
5049 | #define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16 | 5050 | #define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16 |
5050 | 5051 | ||
5051 | #define GEN6_GT_CORE_STATUS 0x138060 | 5052 | #define GEN6_GT_CORE_STATUS 0x138060 |
5052 | #define GEN6_CORE_CPD_STATE_MASK (7<<4) | 5053 | #define GEN6_CORE_CPD_STATE_MASK (7<<4) |
5053 | #define GEN6_RCn_MASK 7 | 5054 | #define GEN6_RCn_MASK 7 |
5054 | #define GEN6_RC0 0 | 5055 | #define GEN6_RC0 0 |
5055 | #define GEN6_RC3 2 | 5056 | #define GEN6_RC3 2 |
5056 | #define GEN6_RC6 3 | 5057 | #define GEN6_RC6 3 |
5057 | #define GEN6_RC7 4 | 5058 | #define GEN6_RC7 4 |
5058 | 5059 | ||
5059 | #define GEN7_MISCCPCTL (0x9424) | 5060 | #define GEN7_MISCCPCTL (0x9424) |
5060 | #define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0) | 5061 | #define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0) |
5061 | 5062 | ||
5062 | /* IVYBRIDGE DPF */ | 5063 | /* IVYBRIDGE DPF */ |
5063 | #define GEN7_L3CDERRST1 0xB008 /* L3CD Error Status 1 */ | 5064 | #define GEN7_L3CDERRST1 0xB008 /* L3CD Error Status 1 */ |
5064 | #define HSW_L3CDERRST11 0xB208 /* L3CD Error Status register 1 slice 1 */ | 5065 | #define HSW_L3CDERRST11 0xB208 /* L3CD Error Status register 1 slice 1 */ |
5065 | #define GEN7_L3CDERRST1_ROW_MASK (0x7ff<<14) | 5066 | #define GEN7_L3CDERRST1_ROW_MASK (0x7ff<<14) |
5066 | #define GEN7_PARITY_ERROR_VALID (1<<13) | 5067 | #define GEN7_PARITY_ERROR_VALID (1<<13) |
5067 | #define GEN7_L3CDERRST1_BANK_MASK (3<<11) | 5068 | #define GEN7_L3CDERRST1_BANK_MASK (3<<11) |
5068 | #define GEN7_L3CDERRST1_SUBBANK_MASK (7<<8) | 5069 | #define GEN7_L3CDERRST1_SUBBANK_MASK (7<<8) |
5069 | #define GEN7_PARITY_ERROR_ROW(reg) \ | 5070 | #define GEN7_PARITY_ERROR_ROW(reg) \ |
5070 | ((reg & GEN7_L3CDERRST1_ROW_MASK) >> 14) | 5071 | ((reg & GEN7_L3CDERRST1_ROW_MASK) >> 14) |
5071 | #define GEN7_PARITY_ERROR_BANK(reg) \ | 5072 | #define GEN7_PARITY_ERROR_BANK(reg) \ |
5072 | ((reg & GEN7_L3CDERRST1_BANK_MASK) >> 11) | 5073 | ((reg & GEN7_L3CDERRST1_BANK_MASK) >> 11) |
5073 | #define GEN7_PARITY_ERROR_SUBBANK(reg) \ | 5074 | #define GEN7_PARITY_ERROR_SUBBANK(reg) \ |
5074 | ((reg & GEN7_L3CDERRST1_SUBBANK_MASK) >> 8) | 5075 | ((reg & GEN7_L3CDERRST1_SUBBANK_MASK) >> 8) |
5075 | #define GEN7_L3CDERRST1_ENABLE (1<<7) | 5076 | #define GEN7_L3CDERRST1_ENABLE (1<<7) |
5076 | 5077 | ||
5077 | #define GEN7_L3LOG_BASE 0xB070 | 5078 | #define GEN7_L3LOG_BASE 0xB070 |
5078 | #define HSW_L3LOG_BASE_SLICE1 0xB270 | 5079 | #define HSW_L3LOG_BASE_SLICE1 0xB270 |
5079 | #define GEN7_L3LOG_SIZE 0x80 | 5080 | #define GEN7_L3LOG_SIZE 0x80 |
5080 | 5081 | ||
5081 | #define GEN7_HALF_SLICE_CHICKEN1 0xe100 /* IVB GT1 + VLV */ | 5082 | #define GEN7_HALF_SLICE_CHICKEN1 0xe100 /* IVB GT1 + VLV */ |
5082 | #define GEN7_HALF_SLICE_CHICKEN1_GT2 0xf100 | 5083 | #define GEN7_HALF_SLICE_CHICKEN1_GT2 0xf100 |
5083 | #define GEN7_MAX_PS_THREAD_DEP (8<<12) | 5084 | #define GEN7_MAX_PS_THREAD_DEP (8<<12) |
5084 | #define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10) | 5085 | #define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10) |
5085 | #define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3) | 5086 | #define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3) |
5086 | 5087 | ||
5087 | #define GEN8_ROW_CHICKEN 0xe4f0 | 5088 | #define GEN8_ROW_CHICKEN 0xe4f0 |
5088 | #define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8) | 5089 | #define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8) |
5089 | #define STALL_DOP_GATING_DISABLE (1<<5) | 5090 | #define STALL_DOP_GATING_DISABLE (1<<5) |
5090 | 5091 | ||
5091 | #define GEN7_ROW_CHICKEN2 0xe4f4 | 5092 | #define GEN7_ROW_CHICKEN2 0xe4f4 |
5092 | #define GEN7_ROW_CHICKEN2_GT2 0xf4f4 | 5093 | #define GEN7_ROW_CHICKEN2_GT2 0xf4f4 |
5093 | #define DOP_CLOCK_GATING_DISABLE (1<<0) | 5094 | #define DOP_CLOCK_GATING_DISABLE (1<<0) |
5094 | 5095 | ||
5095 | #define HSW_ROW_CHICKEN3 0xe49c | 5096 | #define HSW_ROW_CHICKEN3 0xe49c |
5096 | #define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6) | 5097 | #define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6) |
5097 | 5098 | ||
5098 | #define HALF_SLICE_CHICKEN3 0xe184 | 5099 | #define HALF_SLICE_CHICKEN3 0xe184 |
5099 | #define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8) | 5100 | #define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8) |
5100 | #define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1) | 5101 | #define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1) |
5101 | 5102 | ||
5102 | #define G4X_AUD_VID_DID (dev_priv->info.display_mmio_offset + 0x62020) | 5103 | #define G4X_AUD_VID_DID (dev_priv->info.display_mmio_offset + 0x62020) |
5103 | #define INTEL_AUDIO_DEVCL 0x808629FB | 5104 | #define INTEL_AUDIO_DEVCL 0x808629FB |
5104 | #define INTEL_AUDIO_DEVBLC 0x80862801 | 5105 | #define INTEL_AUDIO_DEVBLC 0x80862801 |
5105 | #define INTEL_AUDIO_DEVCTG 0x80862802 | 5106 | #define INTEL_AUDIO_DEVCTG 0x80862802 |
5106 | 5107 | ||
5107 | #define G4X_AUD_CNTL_ST 0x620B4 | 5108 | #define G4X_AUD_CNTL_ST 0x620B4 |
5108 | #define G4X_ELDV_DEVCL_DEVBLC (1 << 13) | 5109 | #define G4X_ELDV_DEVCL_DEVBLC (1 << 13) |
5109 | #define G4X_ELDV_DEVCTG (1 << 14) | 5110 | #define G4X_ELDV_DEVCTG (1 << 14) |
5110 | #define G4X_ELD_ADDR (0xf << 5) | 5111 | #define G4X_ELD_ADDR (0xf << 5) |
5111 | #define G4X_ELD_ACK (1 << 4) | 5112 | #define G4X_ELD_ACK (1 << 4) |
5112 | #define G4X_HDMIW_HDMIEDID 0x6210C | 5113 | #define G4X_HDMIW_HDMIEDID 0x6210C |
5113 | 5114 | ||
5114 | #define IBX_HDMIW_HDMIEDID_A 0xE2050 | 5115 | #define IBX_HDMIW_HDMIEDID_A 0xE2050 |
5115 | #define IBX_HDMIW_HDMIEDID_B 0xE2150 | 5116 | #define IBX_HDMIW_HDMIEDID_B 0xE2150 |
5116 | #define IBX_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \ | 5117 | #define IBX_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \ |
5117 | IBX_HDMIW_HDMIEDID_A, \ | 5118 | IBX_HDMIW_HDMIEDID_A, \ |
5118 | IBX_HDMIW_HDMIEDID_B) | 5119 | IBX_HDMIW_HDMIEDID_B) |
5119 | #define IBX_AUD_CNTL_ST_A 0xE20B4 | 5120 | #define IBX_AUD_CNTL_ST_A 0xE20B4 |
5120 | #define IBX_AUD_CNTL_ST_B 0xE21B4 | 5121 | #define IBX_AUD_CNTL_ST_B 0xE21B4 |
5121 | #define IBX_AUD_CNTL_ST(pipe) _PIPE(pipe, \ | 5122 | #define IBX_AUD_CNTL_ST(pipe) _PIPE(pipe, \ |
5122 | IBX_AUD_CNTL_ST_A, \ | 5123 | IBX_AUD_CNTL_ST_A, \ |
5123 | IBX_AUD_CNTL_ST_B) | 5124 | IBX_AUD_CNTL_ST_B) |
5124 | #define IBX_ELD_BUFFER_SIZE (0x1f << 10) | 5125 | #define IBX_ELD_BUFFER_SIZE (0x1f << 10) |
5125 | #define IBX_ELD_ADDRESS (0x1f << 5) | 5126 | #define IBX_ELD_ADDRESS (0x1f << 5) |
5126 | #define IBX_ELD_ACK (1 << 4) | 5127 | #define IBX_ELD_ACK (1 << 4) |
5127 | #define IBX_AUD_CNTL_ST2 0xE20C0 | 5128 | #define IBX_AUD_CNTL_ST2 0xE20C0 |
5128 | #define IBX_ELD_VALIDB (1 << 0) | 5129 | #define IBX_ELD_VALIDB (1 << 0) |
5129 | #define IBX_CP_READYB (1 << 1) | 5130 | #define IBX_CP_READYB (1 << 1) |
5130 | 5131 | ||
5131 | #define CPT_HDMIW_HDMIEDID_A 0xE5050 | 5132 | #define CPT_HDMIW_HDMIEDID_A 0xE5050 |
5132 | #define CPT_HDMIW_HDMIEDID_B 0xE5150 | 5133 | #define CPT_HDMIW_HDMIEDID_B 0xE5150 |
5133 | #define CPT_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \ | 5134 | #define CPT_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \ |
5134 | CPT_HDMIW_HDMIEDID_A, \ | 5135 | CPT_HDMIW_HDMIEDID_A, \ |
5135 | CPT_HDMIW_HDMIEDID_B) | 5136 | CPT_HDMIW_HDMIEDID_B) |
5136 | #define CPT_AUD_CNTL_ST_A 0xE50B4 | 5137 | #define CPT_AUD_CNTL_ST_A 0xE50B4 |
5137 | #define CPT_AUD_CNTL_ST_B 0xE51B4 | 5138 | #define CPT_AUD_CNTL_ST_B 0xE51B4 |
5138 | #define CPT_AUD_CNTL_ST(pipe) _PIPE(pipe, \ | 5139 | #define CPT_AUD_CNTL_ST(pipe) _PIPE(pipe, \ |
5139 | CPT_AUD_CNTL_ST_A, \ | 5140 | CPT_AUD_CNTL_ST_A, \ |
5140 | CPT_AUD_CNTL_ST_B) | 5141 | CPT_AUD_CNTL_ST_B) |
5141 | #define CPT_AUD_CNTRL_ST2 0xE50C0 | 5142 | #define CPT_AUD_CNTRL_ST2 0xE50C0 |
5142 | 5143 | ||
5143 | #define VLV_HDMIW_HDMIEDID_A (VLV_DISPLAY_BASE + 0x62050) | 5144 | #define VLV_HDMIW_HDMIEDID_A (VLV_DISPLAY_BASE + 0x62050) |
5144 | #define VLV_HDMIW_HDMIEDID_B (VLV_DISPLAY_BASE + 0x62150) | 5145 | #define VLV_HDMIW_HDMIEDID_B (VLV_DISPLAY_BASE + 0x62150) |
5145 | #define VLV_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \ | 5146 | #define VLV_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \ |
5146 | VLV_HDMIW_HDMIEDID_A, \ | 5147 | VLV_HDMIW_HDMIEDID_A, \ |
5147 | VLV_HDMIW_HDMIEDID_B) | 5148 | VLV_HDMIW_HDMIEDID_B) |
5148 | #define VLV_AUD_CNTL_ST_A (VLV_DISPLAY_BASE + 0x620B4) | 5149 | #define VLV_AUD_CNTL_ST_A (VLV_DISPLAY_BASE + 0x620B4) |
5149 | #define VLV_AUD_CNTL_ST_B (VLV_DISPLAY_BASE + 0x621B4) | 5150 | #define VLV_AUD_CNTL_ST_B (VLV_DISPLAY_BASE + 0x621B4) |
5150 | #define VLV_AUD_CNTL_ST(pipe) _PIPE(pipe, \ | 5151 | #define VLV_AUD_CNTL_ST(pipe) _PIPE(pipe, \ |
5151 | VLV_AUD_CNTL_ST_A, \ | 5152 | VLV_AUD_CNTL_ST_A, \ |
5152 | VLV_AUD_CNTL_ST_B) | 5153 | VLV_AUD_CNTL_ST_B) |
5153 | #define VLV_AUD_CNTL_ST2 (VLV_DISPLAY_BASE + 0x620C0) | 5154 | #define VLV_AUD_CNTL_ST2 (VLV_DISPLAY_BASE + 0x620C0) |
5154 | 5155 | ||
5155 | /* These are the 4 32-bit write offset registers for each stream | 5156 | /* These are the 4 32-bit write offset registers for each stream |
5156 | * output buffer. It determines the offset from the | 5157 | * output buffer. It determines the offset from the |
5157 | * 3DSTATE_SO_BUFFERs that the next streamed vertex output goes to. | 5158 | * 3DSTATE_SO_BUFFERs that the next streamed vertex output goes to. |
5158 | */ | 5159 | */ |
5159 | #define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4) | 5160 | #define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4) |
5160 | 5161 | ||
5161 | #define IBX_AUD_CONFIG_A 0xe2000 | 5162 | #define IBX_AUD_CONFIG_A 0xe2000 |
5162 | #define IBX_AUD_CONFIG_B 0xe2100 | 5163 | #define IBX_AUD_CONFIG_B 0xe2100 |
5163 | #define IBX_AUD_CFG(pipe) _PIPE(pipe, \ | 5164 | #define IBX_AUD_CFG(pipe) _PIPE(pipe, \ |
5164 | IBX_AUD_CONFIG_A, \ | 5165 | IBX_AUD_CONFIG_A, \ |
5165 | IBX_AUD_CONFIG_B) | 5166 | IBX_AUD_CONFIG_B) |
5166 | #define CPT_AUD_CONFIG_A 0xe5000 | 5167 | #define CPT_AUD_CONFIG_A 0xe5000 |
5167 | #define CPT_AUD_CONFIG_B 0xe5100 | 5168 | #define CPT_AUD_CONFIG_B 0xe5100 |
5168 | #define CPT_AUD_CFG(pipe) _PIPE(pipe, \ | 5169 | #define CPT_AUD_CFG(pipe) _PIPE(pipe, \ |
5169 | CPT_AUD_CONFIG_A, \ | 5170 | CPT_AUD_CONFIG_A, \ |
5170 | CPT_AUD_CONFIG_B) | 5171 | CPT_AUD_CONFIG_B) |
5171 | #define VLV_AUD_CONFIG_A (VLV_DISPLAY_BASE + 0x62000) | 5172 | #define VLV_AUD_CONFIG_A (VLV_DISPLAY_BASE + 0x62000) |
5172 | #define VLV_AUD_CONFIG_B (VLV_DISPLAY_BASE + 0x62100) | 5173 | #define VLV_AUD_CONFIG_B (VLV_DISPLAY_BASE + 0x62100) |
5173 | #define VLV_AUD_CFG(pipe) _PIPE(pipe, \ | 5174 | #define VLV_AUD_CFG(pipe) _PIPE(pipe, \ |
5174 | VLV_AUD_CONFIG_A, \ | 5175 | VLV_AUD_CONFIG_A, \ |
5175 | VLV_AUD_CONFIG_B) | 5176 | VLV_AUD_CONFIG_B) |
5176 | 5177 | ||
5177 | #define AUD_CONFIG_N_VALUE_INDEX (1 << 29) | 5178 | #define AUD_CONFIG_N_VALUE_INDEX (1 << 29) |
5178 | #define AUD_CONFIG_N_PROG_ENABLE (1 << 28) | 5179 | #define AUD_CONFIG_N_PROG_ENABLE (1 << 28) |
5179 | #define AUD_CONFIG_UPPER_N_SHIFT 20 | 5180 | #define AUD_CONFIG_UPPER_N_SHIFT 20 |
5180 | #define AUD_CONFIG_UPPER_N_VALUE (0xff << 20) | 5181 | #define AUD_CONFIG_UPPER_N_VALUE (0xff << 20) |
5181 | #define AUD_CONFIG_LOWER_N_SHIFT 4 | 5182 | #define AUD_CONFIG_LOWER_N_SHIFT 4 |
5182 | #define AUD_CONFIG_LOWER_N_VALUE (0xfff << 4) | 5183 | #define AUD_CONFIG_LOWER_N_VALUE (0xfff << 4) |
5183 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT 16 | 5184 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT 16 |
5184 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK (0xf << 16) | 5185 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK (0xf << 16) |
5185 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 (0 << 16) | 5186 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 (0 << 16) |
5186 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 (1 << 16) | 5187 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 (1 << 16) |
5187 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 (2 << 16) | 5188 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 (2 << 16) |
5188 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 (3 << 16) | 5189 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 (3 << 16) |
5189 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 (4 << 16) | 5190 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 (4 << 16) |
5190 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 (5 << 16) | 5191 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 (5 << 16) |
5191 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 (6 << 16) | 5192 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 (6 << 16) |
5192 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 (7 << 16) | 5193 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 (7 << 16) |
5193 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 (8 << 16) | 5194 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 (8 << 16) |
5194 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 (9 << 16) | 5195 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 (9 << 16) |
5195 | #define AUD_CONFIG_DISABLE_NCTS (1 << 3) | 5196 | #define AUD_CONFIG_DISABLE_NCTS (1 << 3) |
5196 | 5197 | ||
5197 | /* HSW Audio */ | 5198 | /* HSW Audio */ |
5198 | #define HSW_AUD_CONFIG_A 0x65000 /* Audio Configuration Transcoder A */ | 5199 | #define HSW_AUD_CONFIG_A 0x65000 /* Audio Configuration Transcoder A */ |
5199 | #define HSW_AUD_CONFIG_B 0x65100 /* Audio Configuration Transcoder B */ | 5200 | #define HSW_AUD_CONFIG_B 0x65100 /* Audio Configuration Transcoder B */ |
5200 | #define HSW_AUD_CFG(pipe) _PIPE(pipe, \ | 5201 | #define HSW_AUD_CFG(pipe) _PIPE(pipe, \ |
5201 | HSW_AUD_CONFIG_A, \ | 5202 | HSW_AUD_CONFIG_A, \ |
5202 | HSW_AUD_CONFIG_B) | 5203 | HSW_AUD_CONFIG_B) |
5203 | 5204 | ||
5204 | #define HSW_AUD_MISC_CTRL_A 0x65010 /* Audio Misc Control Convert 1 */ | 5205 | #define HSW_AUD_MISC_CTRL_A 0x65010 /* Audio Misc Control Convert 1 */ |
5205 | #define HSW_AUD_MISC_CTRL_B 0x65110 /* Audio Misc Control Convert 2 */ | 5206 | #define HSW_AUD_MISC_CTRL_B 0x65110 /* Audio Misc Control Convert 2 */ |
5206 | #define HSW_AUD_MISC_CTRL(pipe) _PIPE(pipe, \ | 5207 | #define HSW_AUD_MISC_CTRL(pipe) _PIPE(pipe, \ |
5207 | HSW_AUD_MISC_CTRL_A, \ | 5208 | HSW_AUD_MISC_CTRL_A, \ |
5208 | HSW_AUD_MISC_CTRL_B) | 5209 | HSW_AUD_MISC_CTRL_B) |
5209 | 5210 | ||
5210 | #define HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4 /* Audio DIP and ELD Control State Transcoder A */ | 5211 | #define HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4 /* Audio DIP and ELD Control State Transcoder A */ |
5211 | #define HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4 /* Audio DIP and ELD Control State Transcoder B */ | 5212 | #define HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4 /* Audio DIP and ELD Control State Transcoder B */ |
5212 | #define HSW_AUD_DIP_ELD_CTRL(pipe) _PIPE(pipe, \ | 5213 | #define HSW_AUD_DIP_ELD_CTRL(pipe) _PIPE(pipe, \ |
5213 | HSW_AUD_DIP_ELD_CTRL_ST_A, \ | 5214 | HSW_AUD_DIP_ELD_CTRL_ST_A, \ |
5214 | HSW_AUD_DIP_ELD_CTRL_ST_B) | 5215 | HSW_AUD_DIP_ELD_CTRL_ST_B) |
5215 | 5216 | ||
5216 | /* Audio Digital Converter */ | 5217 | /* Audio Digital Converter */ |
5217 | #define HSW_AUD_DIG_CNVT_1 0x65080 /* Audio Converter 1 */ | 5218 | #define HSW_AUD_DIG_CNVT_1 0x65080 /* Audio Converter 1 */ |
5218 | #define HSW_AUD_DIG_CNVT_2 0x65180 /* Audio Converter 1 */ | 5219 | #define HSW_AUD_DIG_CNVT_2 0x65180 /* Audio Converter 1 */ |
5219 | #define AUD_DIG_CNVT(pipe) _PIPE(pipe, \ | 5220 | #define AUD_DIG_CNVT(pipe) _PIPE(pipe, \ |
5220 | HSW_AUD_DIG_CNVT_1, \ | 5221 | HSW_AUD_DIG_CNVT_1, \ |
5221 | HSW_AUD_DIG_CNVT_2) | 5222 | HSW_AUD_DIG_CNVT_2) |
5222 | #define DIP_PORT_SEL_MASK 0x3 | 5223 | #define DIP_PORT_SEL_MASK 0x3 |
5223 | 5224 | ||
5224 | #define HSW_AUD_EDID_DATA_A 0x65050 | 5225 | #define HSW_AUD_EDID_DATA_A 0x65050 |
5225 | #define HSW_AUD_EDID_DATA_B 0x65150 | 5226 | #define HSW_AUD_EDID_DATA_B 0x65150 |
5226 | #define HSW_AUD_EDID_DATA(pipe) _PIPE(pipe, \ | 5227 | #define HSW_AUD_EDID_DATA(pipe) _PIPE(pipe, \ |
5227 | HSW_AUD_EDID_DATA_A, \ | 5228 | HSW_AUD_EDID_DATA_A, \ |
5228 | HSW_AUD_EDID_DATA_B) | 5229 | HSW_AUD_EDID_DATA_B) |
5229 | 5230 | ||
5230 | #define HSW_AUD_PIPE_CONV_CFG 0x6507c /* Audio pipe and converter configs */ | 5231 | #define HSW_AUD_PIPE_CONV_CFG 0x6507c /* Audio pipe and converter configs */ |
5231 | #define HSW_AUD_PIN_ELD_CP_VLD 0x650c0 /* Audio ELD and CP Ready Status */ | 5232 | #define HSW_AUD_PIN_ELD_CP_VLD 0x650c0 /* Audio ELD and CP Ready Status */ |
5232 | #define AUDIO_INACTIVE_C (1<<11) | 5233 | #define AUDIO_INACTIVE_C (1<<11) |
5233 | #define AUDIO_INACTIVE_B (1<<7) | 5234 | #define AUDIO_INACTIVE_B (1<<7) |
5234 | #define AUDIO_INACTIVE_A (1<<3) | 5235 | #define AUDIO_INACTIVE_A (1<<3) |
5235 | #define AUDIO_OUTPUT_ENABLE_A (1<<2) | 5236 | #define AUDIO_OUTPUT_ENABLE_A (1<<2) |
5236 | #define AUDIO_OUTPUT_ENABLE_B (1<<6) | 5237 | #define AUDIO_OUTPUT_ENABLE_B (1<<6) |
5237 | #define AUDIO_OUTPUT_ENABLE_C (1<<10) | 5238 | #define AUDIO_OUTPUT_ENABLE_C (1<<10) |
5238 | #define AUDIO_ELD_VALID_A (1<<0) | 5239 | #define AUDIO_ELD_VALID_A (1<<0) |
5239 | #define AUDIO_ELD_VALID_B (1<<4) | 5240 | #define AUDIO_ELD_VALID_B (1<<4) |
5240 | #define AUDIO_ELD_VALID_C (1<<8) | 5241 | #define AUDIO_ELD_VALID_C (1<<8) |
5241 | #define AUDIO_CP_READY_A (1<<1) | 5242 | #define AUDIO_CP_READY_A (1<<1) |
5242 | #define AUDIO_CP_READY_B (1<<5) | 5243 | #define AUDIO_CP_READY_B (1<<5) |
5243 | #define AUDIO_CP_READY_C (1<<9) | 5244 | #define AUDIO_CP_READY_C (1<<9) |
5244 | 5245 | ||
5245 | /* HSW Power Wells */ | 5246 | /* HSW Power Wells */ |
5246 | #define HSW_PWR_WELL_BIOS 0x45400 /* CTL1 */ | 5247 | #define HSW_PWR_WELL_BIOS 0x45400 /* CTL1 */ |
5247 | #define HSW_PWR_WELL_DRIVER 0x45404 /* CTL2 */ | 5248 | #define HSW_PWR_WELL_DRIVER 0x45404 /* CTL2 */ |
5248 | #define HSW_PWR_WELL_KVMR 0x45408 /* CTL3 */ | 5249 | #define HSW_PWR_WELL_KVMR 0x45408 /* CTL3 */ |
5249 | #define HSW_PWR_WELL_DEBUG 0x4540C /* CTL4 */ | 5250 | #define HSW_PWR_WELL_DEBUG 0x4540C /* CTL4 */ |
5250 | #define HSW_PWR_WELL_ENABLE_REQUEST (1<<31) | 5251 | #define HSW_PWR_WELL_ENABLE_REQUEST (1<<31) |
5251 | #define HSW_PWR_WELL_STATE_ENABLED (1<<30) | 5252 | #define HSW_PWR_WELL_STATE_ENABLED (1<<30) |
5252 | #define HSW_PWR_WELL_CTL5 0x45410 | 5253 | #define HSW_PWR_WELL_CTL5 0x45410 |
5253 | #define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31) | 5254 | #define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31) |
5254 | #define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20) | 5255 | #define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20) |
5255 | #define HSW_PWR_WELL_FORCE_ON (1<<19) | 5256 | #define HSW_PWR_WELL_FORCE_ON (1<<19) |
5256 | #define HSW_PWR_WELL_CTL6 0x45414 | 5257 | #define HSW_PWR_WELL_CTL6 0x45414 |
5257 | 5258 | ||
5258 | /* Per-pipe DDI Function Control */ | 5259 | /* Per-pipe DDI Function Control */ |
5259 | #define TRANS_DDI_FUNC_CTL_A 0x60400 | 5260 | #define TRANS_DDI_FUNC_CTL_A 0x60400 |
5260 | #define TRANS_DDI_FUNC_CTL_B 0x61400 | 5261 | #define TRANS_DDI_FUNC_CTL_B 0x61400 |
5261 | #define TRANS_DDI_FUNC_CTL_C 0x62400 | 5262 | #define TRANS_DDI_FUNC_CTL_C 0x62400 |
5262 | #define TRANS_DDI_FUNC_CTL_EDP 0x6F400 | 5263 | #define TRANS_DDI_FUNC_CTL_EDP 0x6F400 |
5263 | #define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER2(tran, TRANS_DDI_FUNC_CTL_A) | 5264 | #define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER2(tran, TRANS_DDI_FUNC_CTL_A) |
5264 | 5265 | ||
5265 | #define TRANS_DDI_FUNC_ENABLE (1<<31) | 5266 | #define TRANS_DDI_FUNC_ENABLE (1<<31) |
5266 | /* Those bits are ignored by pipe EDP since it can only connect to DDI A */ | 5267 | /* Those bits are ignored by pipe EDP since it can only connect to DDI A */ |
5267 | #define TRANS_DDI_PORT_MASK (7<<28) | 5268 | #define TRANS_DDI_PORT_MASK (7<<28) |
5268 | #define TRANS_DDI_SELECT_PORT(x) ((x)<<28) | 5269 | #define TRANS_DDI_SELECT_PORT(x) ((x)<<28) |
5269 | #define TRANS_DDI_PORT_NONE (0<<28) | 5270 | #define TRANS_DDI_PORT_NONE (0<<28) |
5270 | #define TRANS_DDI_MODE_SELECT_MASK (7<<24) | 5271 | #define TRANS_DDI_MODE_SELECT_MASK (7<<24) |
5271 | #define TRANS_DDI_MODE_SELECT_HDMI (0<<24) | 5272 | #define TRANS_DDI_MODE_SELECT_HDMI (0<<24) |
5272 | #define TRANS_DDI_MODE_SELECT_DVI (1<<24) | 5273 | #define TRANS_DDI_MODE_SELECT_DVI (1<<24) |
5273 | #define TRANS_DDI_MODE_SELECT_DP_SST (2<<24) | 5274 | #define TRANS_DDI_MODE_SELECT_DP_SST (2<<24) |
5274 | #define TRANS_DDI_MODE_SELECT_DP_MST (3<<24) | 5275 | #define TRANS_DDI_MODE_SELECT_DP_MST (3<<24) |
5275 | #define TRANS_DDI_MODE_SELECT_FDI (4<<24) | 5276 | #define TRANS_DDI_MODE_SELECT_FDI (4<<24) |
5276 | #define TRANS_DDI_BPC_MASK (7<<20) | 5277 | #define TRANS_DDI_BPC_MASK (7<<20) |
5277 | #define TRANS_DDI_BPC_8 (0<<20) | 5278 | #define TRANS_DDI_BPC_8 (0<<20) |
5278 | #define TRANS_DDI_BPC_10 (1<<20) | 5279 | #define TRANS_DDI_BPC_10 (1<<20) |
5279 | #define TRANS_DDI_BPC_6 (2<<20) | 5280 | #define TRANS_DDI_BPC_6 (2<<20) |
5280 | #define TRANS_DDI_BPC_12 (3<<20) | 5281 | #define TRANS_DDI_BPC_12 (3<<20) |
5281 | #define TRANS_DDI_PVSYNC (1<<17) | 5282 | #define TRANS_DDI_PVSYNC (1<<17) |
5282 | #define TRANS_DDI_PHSYNC (1<<16) | 5283 | #define TRANS_DDI_PHSYNC (1<<16) |
5283 | #define TRANS_DDI_EDP_INPUT_MASK (7<<12) | 5284 | #define TRANS_DDI_EDP_INPUT_MASK (7<<12) |
5284 | #define TRANS_DDI_EDP_INPUT_A_ON (0<<12) | 5285 | #define TRANS_DDI_EDP_INPUT_A_ON (0<<12) |
5285 | #define TRANS_DDI_EDP_INPUT_A_ONOFF (4<<12) | 5286 | #define TRANS_DDI_EDP_INPUT_A_ONOFF (4<<12) |
5286 | #define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12) | 5287 | #define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12) |
5287 | #define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12) | 5288 | #define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12) |
5288 | #define TRANS_DDI_BFI_ENABLE (1<<4) | 5289 | #define TRANS_DDI_BFI_ENABLE (1<<4) |
5289 | 5290 | ||
5290 | /* DisplayPort Transport Control */ | 5291 | /* DisplayPort Transport Control */ |
5291 | #define DP_TP_CTL_A 0x64040 | 5292 | #define DP_TP_CTL_A 0x64040 |
5292 | #define DP_TP_CTL_B 0x64140 | 5293 | #define DP_TP_CTL_B 0x64140 |
5293 | #define DP_TP_CTL(port) _PORT(port, DP_TP_CTL_A, DP_TP_CTL_B) | 5294 | #define DP_TP_CTL(port) _PORT(port, DP_TP_CTL_A, DP_TP_CTL_B) |
5294 | #define DP_TP_CTL_ENABLE (1<<31) | 5295 | #define DP_TP_CTL_ENABLE (1<<31) |
5295 | #define DP_TP_CTL_MODE_SST (0<<27) | 5296 | #define DP_TP_CTL_MODE_SST (0<<27) |
5296 | #define DP_TP_CTL_MODE_MST (1<<27) | 5297 | #define DP_TP_CTL_MODE_MST (1<<27) |
5297 | #define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1<<18) | 5298 | #define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1<<18) |
5298 | #define DP_TP_CTL_FDI_AUTOTRAIN (1<<15) | 5299 | #define DP_TP_CTL_FDI_AUTOTRAIN (1<<15) |
5299 | #define DP_TP_CTL_LINK_TRAIN_MASK (7<<8) | 5300 | #define DP_TP_CTL_LINK_TRAIN_MASK (7<<8) |
5300 | #define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8) | 5301 | #define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8) |
5301 | #define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8) | 5302 | #define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8) |
5302 | #define DP_TP_CTL_LINK_TRAIN_PAT3 (4<<8) | 5303 | #define DP_TP_CTL_LINK_TRAIN_PAT3 (4<<8) |
5303 | #define DP_TP_CTL_LINK_TRAIN_IDLE (2<<8) | 5304 | #define DP_TP_CTL_LINK_TRAIN_IDLE (2<<8) |
5304 | #define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8) | 5305 | #define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8) |
5305 | #define DP_TP_CTL_SCRAMBLE_DISABLE (1<<7) | 5306 | #define DP_TP_CTL_SCRAMBLE_DISABLE (1<<7) |
5306 | 5307 | ||
5307 | /* DisplayPort Transport Status */ | 5308 | /* DisplayPort Transport Status */ |
5308 | #define DP_TP_STATUS_A 0x64044 | 5309 | #define DP_TP_STATUS_A 0x64044 |
5309 | #define DP_TP_STATUS_B 0x64144 | 5310 | #define DP_TP_STATUS_B 0x64144 |
5310 | #define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B) | 5311 | #define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B) |
5311 | #define DP_TP_STATUS_IDLE_DONE (1<<25) | 5312 | #define DP_TP_STATUS_IDLE_DONE (1<<25) |
5312 | #define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12) | 5313 | #define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12) |
5313 | 5314 | ||
5314 | /* DDI Buffer Control */ | 5315 | /* DDI Buffer Control */ |
5315 | #define DDI_BUF_CTL_A 0x64000 | 5316 | #define DDI_BUF_CTL_A 0x64000 |
5316 | #define DDI_BUF_CTL_B 0x64100 | 5317 | #define DDI_BUF_CTL_B 0x64100 |
5317 | #define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B) | 5318 | #define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B) |
5318 | #define DDI_BUF_CTL_ENABLE (1<<31) | 5319 | #define DDI_BUF_CTL_ENABLE (1<<31) |
5319 | /* Haswell */ | 5320 | /* Haswell */ |
5320 | #define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */ | 5321 | #define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */ |
5321 | #define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */ | 5322 | #define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */ |
5322 | #define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */ | 5323 | #define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */ |
5323 | #define DDI_BUF_EMP_400MV_9_5DB_HSW (3<<24) /* Sel3 */ | 5324 | #define DDI_BUF_EMP_400MV_9_5DB_HSW (3<<24) /* Sel3 */ |
5324 | #define DDI_BUF_EMP_600MV_0DB_HSW (4<<24) /* Sel4 */ | 5325 | #define DDI_BUF_EMP_600MV_0DB_HSW (4<<24) /* Sel4 */ |
5325 | #define DDI_BUF_EMP_600MV_3_5DB_HSW (5<<24) /* Sel5 */ | 5326 | #define DDI_BUF_EMP_600MV_3_5DB_HSW (5<<24) /* Sel5 */ |
5326 | #define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */ | 5327 | #define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */ |
5327 | #define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */ | 5328 | #define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */ |
5328 | #define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */ | 5329 | #define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */ |
5329 | /* Broadwell */ | 5330 | /* Broadwell */ |
5330 | #define DDI_BUF_EMP_400MV_0DB_BDW (0<<24) /* Sel0 */ | 5331 | #define DDI_BUF_EMP_400MV_0DB_BDW (0<<24) /* Sel0 */ |
5331 | #define DDI_BUF_EMP_400MV_3_5DB_BDW (1<<24) /* Sel1 */ | 5332 | #define DDI_BUF_EMP_400MV_3_5DB_BDW (1<<24) /* Sel1 */ |
5332 | #define DDI_BUF_EMP_400MV_6DB_BDW (2<<24) /* Sel2 */ | 5333 | #define DDI_BUF_EMP_400MV_6DB_BDW (2<<24) /* Sel2 */ |
5333 | #define DDI_BUF_EMP_600MV_0DB_BDW (3<<24) /* Sel3 */ | 5334 | #define DDI_BUF_EMP_600MV_0DB_BDW (3<<24) /* Sel3 */ |
5334 | #define DDI_BUF_EMP_600MV_3_5DB_BDW (4<<24) /* Sel4 */ | 5335 | #define DDI_BUF_EMP_600MV_3_5DB_BDW (4<<24) /* Sel4 */ |
5335 | #define DDI_BUF_EMP_600MV_6DB_BDW (5<<24) /* Sel5 */ | 5336 | #define DDI_BUF_EMP_600MV_6DB_BDW (5<<24) /* Sel5 */ |
5336 | #define DDI_BUF_EMP_800MV_0DB_BDW (6<<24) /* Sel6 */ | 5337 | #define DDI_BUF_EMP_800MV_0DB_BDW (6<<24) /* Sel6 */ |
5337 | #define DDI_BUF_EMP_800MV_3_5DB_BDW (7<<24) /* Sel7 */ | 5338 | #define DDI_BUF_EMP_800MV_3_5DB_BDW (7<<24) /* Sel7 */ |
5338 | #define DDI_BUF_EMP_1200MV_0DB_BDW (8<<24) /* Sel8 */ | 5339 | #define DDI_BUF_EMP_1200MV_0DB_BDW (8<<24) /* Sel8 */ |
5339 | #define DDI_BUF_EMP_MASK (0xf<<24) | 5340 | #define DDI_BUF_EMP_MASK (0xf<<24) |
5340 | #define DDI_BUF_PORT_REVERSAL (1<<16) | 5341 | #define DDI_BUF_PORT_REVERSAL (1<<16) |
5341 | #define DDI_BUF_IS_IDLE (1<<7) | 5342 | #define DDI_BUF_IS_IDLE (1<<7) |
5342 | #define DDI_A_4_LANES (1<<4) | 5343 | #define DDI_A_4_LANES (1<<4) |
5343 | #define DDI_PORT_WIDTH(width) (((width) - 1) << 1) | 5344 | #define DDI_PORT_WIDTH(width) (((width) - 1) << 1) |
5344 | #define DDI_INIT_DISPLAY_DETECTED (1<<0) | 5345 | #define DDI_INIT_DISPLAY_DETECTED (1<<0) |
5345 | 5346 | ||
5346 | /* DDI Buffer Translations */ | 5347 | /* DDI Buffer Translations */ |
5347 | #define DDI_BUF_TRANS_A 0x64E00 | 5348 | #define DDI_BUF_TRANS_A 0x64E00 |
5348 | #define DDI_BUF_TRANS_B 0x64E60 | 5349 | #define DDI_BUF_TRANS_B 0x64E60 |
5349 | #define DDI_BUF_TRANS(port) _PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B) | 5350 | #define DDI_BUF_TRANS(port) _PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B) |
5350 | 5351 | ||
5351 | /* Sideband Interface (SBI) is programmed indirectly, via | 5352 | /* Sideband Interface (SBI) is programmed indirectly, via |
5352 | * SBI_ADDR, which contains the register offset; and SBI_DATA, | 5353 | * SBI_ADDR, which contains the register offset; and SBI_DATA, |
5353 | * which contains the payload */ | 5354 | * which contains the payload */ |
5354 | #define SBI_ADDR 0xC6000 | 5355 | #define SBI_ADDR 0xC6000 |
5355 | #define SBI_DATA 0xC6004 | 5356 | #define SBI_DATA 0xC6004 |
5356 | #define SBI_CTL_STAT 0xC6008 | 5357 | #define SBI_CTL_STAT 0xC6008 |
5357 | #define SBI_CTL_DEST_ICLK (0x0<<16) | 5358 | #define SBI_CTL_DEST_ICLK (0x0<<16) |
5358 | #define SBI_CTL_DEST_MPHY (0x1<<16) | 5359 | #define SBI_CTL_DEST_MPHY (0x1<<16) |
5359 | #define SBI_CTL_OP_IORD (0x2<<8) | 5360 | #define SBI_CTL_OP_IORD (0x2<<8) |
5360 | #define SBI_CTL_OP_IOWR (0x3<<8) | 5361 | #define SBI_CTL_OP_IOWR (0x3<<8) |
5361 | #define SBI_CTL_OP_CRRD (0x6<<8) | 5362 | #define SBI_CTL_OP_CRRD (0x6<<8) |
5362 | #define SBI_CTL_OP_CRWR (0x7<<8) | 5363 | #define SBI_CTL_OP_CRWR (0x7<<8) |
5363 | #define SBI_RESPONSE_FAIL (0x1<<1) | 5364 | #define SBI_RESPONSE_FAIL (0x1<<1) |
5364 | #define SBI_RESPONSE_SUCCESS (0x0<<1) | 5365 | #define SBI_RESPONSE_SUCCESS (0x0<<1) |
5365 | #define SBI_BUSY (0x1<<0) | 5366 | #define SBI_BUSY (0x1<<0) |
5366 | #define SBI_READY (0x0<<0) | 5367 | #define SBI_READY (0x0<<0) |
5367 | 5368 | ||
5368 | /* SBI offsets */ | 5369 | /* SBI offsets */ |
5369 | #define SBI_SSCDIVINTPHASE6 0x0600 | 5370 | #define SBI_SSCDIVINTPHASE6 0x0600 |
5370 | #define SBI_SSCDIVINTPHASE_DIVSEL_MASK ((0x7f)<<1) | 5371 | #define SBI_SSCDIVINTPHASE_DIVSEL_MASK ((0x7f)<<1) |
5371 | #define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x)<<1) | 5372 | #define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x)<<1) |
5372 | #define SBI_SSCDIVINTPHASE_INCVAL_MASK ((0x7f)<<8) | 5373 | #define SBI_SSCDIVINTPHASE_INCVAL_MASK ((0x7f)<<8) |
5373 | #define SBI_SSCDIVINTPHASE_INCVAL(x) ((x)<<8) | 5374 | #define SBI_SSCDIVINTPHASE_INCVAL(x) ((x)<<8) |
5374 | #define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15) | 5375 | #define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15) |
5375 | #define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0) | 5376 | #define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0) |
5376 | #define SBI_SSCCTL 0x020c | 5377 | #define SBI_SSCCTL 0x020c |
5377 | #define SBI_SSCCTL6 0x060C | 5378 | #define SBI_SSCCTL6 0x060C |
5378 | #define SBI_SSCCTL_PATHALT (1<<3) | 5379 | #define SBI_SSCCTL_PATHALT (1<<3) |
5379 | #define SBI_SSCCTL_DISABLE (1<<0) | 5380 | #define SBI_SSCCTL_DISABLE (1<<0) |
5380 | #define SBI_SSCAUXDIV6 0x0610 | 5381 | #define SBI_SSCAUXDIV6 0x0610 |
5381 | #define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4) | 5382 | #define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4) |
5382 | #define SBI_DBUFF0 0x2a00 | 5383 | #define SBI_DBUFF0 0x2a00 |
5383 | #define SBI_GEN0 0x1f00 | 5384 | #define SBI_GEN0 0x1f00 |
5384 | #define SBI_GEN0_CFG_BUFFENABLE_DISABLE (1<<0) | 5385 | #define SBI_GEN0_CFG_BUFFENABLE_DISABLE (1<<0) |
5385 | 5386 | ||
5386 | /* LPT PIXCLK_GATE */ | 5387 | /* LPT PIXCLK_GATE */ |
5387 | #define PIXCLK_GATE 0xC6020 | 5388 | #define PIXCLK_GATE 0xC6020 |
5388 | #define PIXCLK_GATE_UNGATE (1<<0) | 5389 | #define PIXCLK_GATE_UNGATE (1<<0) |
5389 | #define PIXCLK_GATE_GATE (0<<0) | 5390 | #define PIXCLK_GATE_GATE (0<<0) |
5390 | 5391 | ||
5391 | /* SPLL */ | 5392 | /* SPLL */ |
5392 | #define SPLL_CTL 0x46020 | 5393 | #define SPLL_CTL 0x46020 |
5393 | #define SPLL_PLL_ENABLE (1<<31) | 5394 | #define SPLL_PLL_ENABLE (1<<31) |
5394 | #define SPLL_PLL_SSC (1<<28) | 5395 | #define SPLL_PLL_SSC (1<<28) |
5395 | #define SPLL_PLL_NON_SSC (2<<28) | 5396 | #define SPLL_PLL_NON_SSC (2<<28) |
5396 | #define SPLL_PLL_LCPLL (3<<28) | 5397 | #define SPLL_PLL_LCPLL (3<<28) |
5397 | #define SPLL_PLL_REF_MASK (3<<28) | 5398 | #define SPLL_PLL_REF_MASK (3<<28) |
5398 | #define SPLL_PLL_FREQ_810MHz (0<<26) | 5399 | #define SPLL_PLL_FREQ_810MHz (0<<26) |
5399 | #define SPLL_PLL_FREQ_1350MHz (1<<26) | 5400 | #define SPLL_PLL_FREQ_1350MHz (1<<26) |
5400 | #define SPLL_PLL_FREQ_2700MHz (2<<26) | 5401 | #define SPLL_PLL_FREQ_2700MHz (2<<26) |
5401 | #define SPLL_PLL_FREQ_MASK (3<<26) | 5402 | #define SPLL_PLL_FREQ_MASK (3<<26) |
5402 | 5403 | ||
5403 | /* WRPLL */ | 5404 | /* WRPLL */ |
5404 | #define WRPLL_CTL1 0x46040 | 5405 | #define WRPLL_CTL1 0x46040 |
5405 | #define WRPLL_CTL2 0x46060 | 5406 | #define WRPLL_CTL2 0x46060 |
5406 | #define WRPLL_PLL_ENABLE (1<<31) | 5407 | #define WRPLL_PLL_ENABLE (1<<31) |
5407 | #define WRPLL_PLL_SELECT_SSC (0x01<<28) | 5408 | #define WRPLL_PLL_SELECT_SSC (0x01<<28) |
5408 | #define WRPLL_PLL_SELECT_NON_SSC (0x02<<28) | 5409 | #define WRPLL_PLL_SELECT_NON_SSC (0x02<<28) |
5409 | #define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28) | 5410 | #define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28) |
5410 | /* WRPLL divider programming */ | 5411 | /* WRPLL divider programming */ |
5411 | #define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0) | 5412 | #define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0) |
5412 | #define WRPLL_DIVIDER_REF_MASK (0xff) | 5413 | #define WRPLL_DIVIDER_REF_MASK (0xff) |
5413 | #define WRPLL_DIVIDER_POST(x) ((x)<<8) | 5414 | #define WRPLL_DIVIDER_POST(x) ((x)<<8) |
5414 | #define WRPLL_DIVIDER_POST_MASK (0x3f<<8) | 5415 | #define WRPLL_DIVIDER_POST_MASK (0x3f<<8) |
5415 | #define WRPLL_DIVIDER_POST_SHIFT 8 | 5416 | #define WRPLL_DIVIDER_POST_SHIFT 8 |
5416 | #define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16) | 5417 | #define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16) |
5417 | #define WRPLL_DIVIDER_FB_SHIFT 16 | 5418 | #define WRPLL_DIVIDER_FB_SHIFT 16 |
5418 | #define WRPLL_DIVIDER_FB_MASK (0xff<<16) | 5419 | #define WRPLL_DIVIDER_FB_MASK (0xff<<16) |
5419 | 5420 | ||
5420 | /* Port clock selection */ | 5421 | /* Port clock selection */ |
5421 | #define PORT_CLK_SEL_A 0x46100 | 5422 | #define PORT_CLK_SEL_A 0x46100 |
5422 | #define PORT_CLK_SEL_B 0x46104 | 5423 | #define PORT_CLK_SEL_B 0x46104 |
5423 | #define PORT_CLK_SEL(port) _PORT(port, PORT_CLK_SEL_A, PORT_CLK_SEL_B) | 5424 | #define PORT_CLK_SEL(port) _PORT(port, PORT_CLK_SEL_A, PORT_CLK_SEL_B) |
5424 | #define PORT_CLK_SEL_LCPLL_2700 (0<<29) | 5425 | #define PORT_CLK_SEL_LCPLL_2700 (0<<29) |
5425 | #define PORT_CLK_SEL_LCPLL_1350 (1<<29) | 5426 | #define PORT_CLK_SEL_LCPLL_1350 (1<<29) |
5426 | #define PORT_CLK_SEL_LCPLL_810 (2<<29) | 5427 | #define PORT_CLK_SEL_LCPLL_810 (2<<29) |
5427 | #define PORT_CLK_SEL_SPLL (3<<29) | 5428 | #define PORT_CLK_SEL_SPLL (3<<29) |
5428 | #define PORT_CLK_SEL_WRPLL1 (4<<29) | 5429 | #define PORT_CLK_SEL_WRPLL1 (4<<29) |
5429 | #define PORT_CLK_SEL_WRPLL2 (5<<29) | 5430 | #define PORT_CLK_SEL_WRPLL2 (5<<29) |
5430 | #define PORT_CLK_SEL_NONE (7<<29) | 5431 | #define PORT_CLK_SEL_NONE (7<<29) |
5431 | #define PORT_CLK_SEL_MASK (7<<29) | 5432 | #define PORT_CLK_SEL_MASK (7<<29) |
5432 | 5433 | ||
5433 | /* Transcoder clock selection */ | 5434 | /* Transcoder clock selection */ |
5434 | #define TRANS_CLK_SEL_A 0x46140 | 5435 | #define TRANS_CLK_SEL_A 0x46140 |
5435 | #define TRANS_CLK_SEL_B 0x46144 | 5436 | #define TRANS_CLK_SEL_B 0x46144 |
5436 | #define TRANS_CLK_SEL(tran) _TRANSCODER(tran, TRANS_CLK_SEL_A, TRANS_CLK_SEL_B) | 5437 | #define TRANS_CLK_SEL(tran) _TRANSCODER(tran, TRANS_CLK_SEL_A, TRANS_CLK_SEL_B) |
5437 | /* For each transcoder, we need to select the corresponding port clock */ | 5438 | /* For each transcoder, we need to select the corresponding port clock */ |
5438 | #define TRANS_CLK_SEL_DISABLED (0x0<<29) | 5439 | #define TRANS_CLK_SEL_DISABLED (0x0<<29) |
5439 | #define TRANS_CLK_SEL_PORT(x) ((x+1)<<29) | 5440 | #define TRANS_CLK_SEL_PORT(x) ((x+1)<<29) |
5440 | 5441 | ||
5441 | #define TRANSA_MSA_MISC 0x60410 | 5442 | #define TRANSA_MSA_MISC 0x60410 |
5442 | #define TRANSB_MSA_MISC 0x61410 | 5443 | #define TRANSB_MSA_MISC 0x61410 |
5443 | #define TRANSC_MSA_MISC 0x62410 | 5444 | #define TRANSC_MSA_MISC 0x62410 |
5444 | #define TRANS_EDP_MSA_MISC 0x6f410 | 5445 | #define TRANS_EDP_MSA_MISC 0x6f410 |
5445 | #define TRANS_MSA_MISC(tran) _TRANSCODER2(tran, TRANSA_MSA_MISC) | 5446 | #define TRANS_MSA_MISC(tran) _TRANSCODER2(tran, TRANSA_MSA_MISC) |
5446 | 5447 | ||
5447 | #define TRANS_MSA_SYNC_CLK (1<<0) | 5448 | #define TRANS_MSA_SYNC_CLK (1<<0) |
5448 | #define TRANS_MSA_6_BPC (0<<5) | 5449 | #define TRANS_MSA_6_BPC (0<<5) |
5449 | #define TRANS_MSA_8_BPC (1<<5) | 5450 | #define TRANS_MSA_8_BPC (1<<5) |
5450 | #define TRANS_MSA_10_BPC (2<<5) | 5451 | #define TRANS_MSA_10_BPC (2<<5) |
5451 | #define TRANS_MSA_12_BPC (3<<5) | 5452 | #define TRANS_MSA_12_BPC (3<<5) |
5452 | #define TRANS_MSA_16_BPC (4<<5) | 5453 | #define TRANS_MSA_16_BPC (4<<5) |
5453 | 5454 | ||
5454 | /* LCPLL Control */ | 5455 | /* LCPLL Control */ |
5455 | #define LCPLL_CTL 0x130040 | 5456 | #define LCPLL_CTL 0x130040 |
5456 | #define LCPLL_PLL_DISABLE (1<<31) | 5457 | #define LCPLL_PLL_DISABLE (1<<31) |
5457 | #define LCPLL_PLL_LOCK (1<<30) | 5458 | #define LCPLL_PLL_LOCK (1<<30) |
5458 | #define LCPLL_CLK_FREQ_MASK (3<<26) | 5459 | #define LCPLL_CLK_FREQ_MASK (3<<26) |
5459 | #define LCPLL_CLK_FREQ_450 (0<<26) | 5460 | #define LCPLL_CLK_FREQ_450 (0<<26) |
5460 | #define LCPLL_CLK_FREQ_54O_BDW (1<<26) | 5461 | #define LCPLL_CLK_FREQ_54O_BDW (1<<26) |
5461 | #define LCPLL_CLK_FREQ_337_5_BDW (2<<26) | 5462 | #define LCPLL_CLK_FREQ_337_5_BDW (2<<26) |
5462 | #define LCPLL_CLK_FREQ_675_BDW (3<<26) | 5463 | #define LCPLL_CLK_FREQ_675_BDW (3<<26) |
5463 | #define LCPLL_CD_CLOCK_DISABLE (1<<25) | 5464 | #define LCPLL_CD_CLOCK_DISABLE (1<<25) |
5464 | #define LCPLL_CD2X_CLOCK_DISABLE (1<<23) | 5465 | #define LCPLL_CD2X_CLOCK_DISABLE (1<<23) |
5465 | #define LCPLL_POWER_DOWN_ALLOW (1<<22) | 5466 | #define LCPLL_POWER_DOWN_ALLOW (1<<22) |
5466 | #define LCPLL_CD_SOURCE_FCLK (1<<21) | 5467 | #define LCPLL_CD_SOURCE_FCLK (1<<21) |
5467 | #define LCPLL_CD_SOURCE_FCLK_DONE (1<<19) | 5468 | #define LCPLL_CD_SOURCE_FCLK_DONE (1<<19) |
5468 | 5469 | ||
5469 | #define D_COMP (MCHBAR_MIRROR_BASE_SNB + 0x5F0C) | 5470 | #define D_COMP (MCHBAR_MIRROR_BASE_SNB + 0x5F0C) |
5470 | #define D_COMP_RCOMP_IN_PROGRESS (1<<9) | 5471 | #define D_COMP_RCOMP_IN_PROGRESS (1<<9) |
5471 | #define D_COMP_COMP_FORCE (1<<8) | 5472 | #define D_COMP_COMP_FORCE (1<<8) |
5472 | #define D_COMP_COMP_DISABLE (1<<0) | 5473 | #define D_COMP_COMP_DISABLE (1<<0) |
5473 | 5474 | ||
5474 | /* Pipe WM_LINETIME - watermark line time */ | 5475 | /* Pipe WM_LINETIME - watermark line time */ |
5475 | #define PIPE_WM_LINETIME_A 0x45270 | 5476 | #define PIPE_WM_LINETIME_A 0x45270 |
5476 | #define PIPE_WM_LINETIME_B 0x45274 | 5477 | #define PIPE_WM_LINETIME_B 0x45274 |
5477 | #define PIPE_WM_LINETIME(pipe) _PIPE(pipe, PIPE_WM_LINETIME_A, \ | 5478 | #define PIPE_WM_LINETIME(pipe) _PIPE(pipe, PIPE_WM_LINETIME_A, \ |
5478 | PIPE_WM_LINETIME_B) | 5479 | PIPE_WM_LINETIME_B) |
5479 | #define PIPE_WM_LINETIME_MASK (0x1ff) | 5480 | #define PIPE_WM_LINETIME_MASK (0x1ff) |
5480 | #define PIPE_WM_LINETIME_TIME(x) ((x)) | 5481 | #define PIPE_WM_LINETIME_TIME(x) ((x)) |
5481 | #define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff<<16) | 5482 | #define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff<<16) |
5482 | #define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16) | 5483 | #define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16) |
5483 | 5484 | ||
5484 | /* SFUSE_STRAP */ | 5485 | /* SFUSE_STRAP */ |
5485 | #define SFUSE_STRAP 0xc2014 | 5486 | #define SFUSE_STRAP 0xc2014 |
5486 | #define SFUSE_STRAP_FUSE_LOCK (1<<13) | 5487 | #define SFUSE_STRAP_FUSE_LOCK (1<<13) |
5487 | #define SFUSE_STRAP_DISPLAY_DISABLED (1<<7) | 5488 | #define SFUSE_STRAP_DISPLAY_DISABLED (1<<7) |
5488 | #define SFUSE_STRAP_DDIB_DETECTED (1<<2) | 5489 | #define SFUSE_STRAP_DDIB_DETECTED (1<<2) |
5489 | #define SFUSE_STRAP_DDIC_DETECTED (1<<1) | 5490 | #define SFUSE_STRAP_DDIC_DETECTED (1<<1) |
5490 | #define SFUSE_STRAP_DDID_DETECTED (1<<0) | 5491 | #define SFUSE_STRAP_DDID_DETECTED (1<<0) |
5491 | 5492 | ||
5492 | #define WM_MISC 0x45260 | 5493 | #define WM_MISC 0x45260 |
5493 | #define WM_MISC_DATA_PARTITION_5_6 (1 << 0) | 5494 | #define WM_MISC_DATA_PARTITION_5_6 (1 << 0) |
5494 | 5495 | ||
5495 | #define WM_DBG 0x45280 | 5496 | #define WM_DBG 0x45280 |
5496 | #define WM_DBG_DISALLOW_MULTIPLE_LP (1<<0) | 5497 | #define WM_DBG_DISALLOW_MULTIPLE_LP (1<<0) |
5497 | #define WM_DBG_DISALLOW_MAXFIFO (1<<1) | 5498 | #define WM_DBG_DISALLOW_MAXFIFO (1<<1) |
5498 | #define WM_DBG_DISALLOW_SPRITE (1<<2) | 5499 | #define WM_DBG_DISALLOW_SPRITE (1<<2) |
5499 | 5500 | ||
5500 | /* pipe CSC */ | 5501 | /* pipe CSC */ |
5501 | #define _PIPE_A_CSC_COEFF_RY_GY 0x49010 | 5502 | #define _PIPE_A_CSC_COEFF_RY_GY 0x49010 |
5502 | #define _PIPE_A_CSC_COEFF_BY 0x49014 | 5503 | #define _PIPE_A_CSC_COEFF_BY 0x49014 |
5503 | #define _PIPE_A_CSC_COEFF_RU_GU 0x49018 | 5504 | #define _PIPE_A_CSC_COEFF_RU_GU 0x49018 |
5504 | #define _PIPE_A_CSC_COEFF_BU 0x4901c | 5505 | #define _PIPE_A_CSC_COEFF_BU 0x4901c |
5505 | #define _PIPE_A_CSC_COEFF_RV_GV 0x49020 | 5506 | #define _PIPE_A_CSC_COEFF_RV_GV 0x49020 |
5506 | #define _PIPE_A_CSC_COEFF_BV 0x49024 | 5507 | #define _PIPE_A_CSC_COEFF_BV 0x49024 |
5507 | #define _PIPE_A_CSC_MODE 0x49028 | 5508 | #define _PIPE_A_CSC_MODE 0x49028 |
5508 | #define CSC_BLACK_SCREEN_OFFSET (1 << 2) | 5509 | #define CSC_BLACK_SCREEN_OFFSET (1 << 2) |
5509 | #define CSC_POSITION_BEFORE_GAMMA (1 << 1) | 5510 | #define CSC_POSITION_BEFORE_GAMMA (1 << 1) |
5510 | #define CSC_MODE_YUV_TO_RGB (1 << 0) | 5511 | #define CSC_MODE_YUV_TO_RGB (1 << 0) |
5511 | #define _PIPE_A_CSC_PREOFF_HI 0x49030 | 5512 | #define _PIPE_A_CSC_PREOFF_HI 0x49030 |
5512 | #define _PIPE_A_CSC_PREOFF_ME 0x49034 | 5513 | #define _PIPE_A_CSC_PREOFF_ME 0x49034 |
5513 | #define _PIPE_A_CSC_PREOFF_LO 0x49038 | 5514 | #define _PIPE_A_CSC_PREOFF_LO 0x49038 |
5514 | #define _PIPE_A_CSC_POSTOFF_HI 0x49040 | 5515 | #define _PIPE_A_CSC_POSTOFF_HI 0x49040 |
5515 | #define _PIPE_A_CSC_POSTOFF_ME 0x49044 | 5516 | #define _PIPE_A_CSC_POSTOFF_ME 0x49044 |
5516 | #define _PIPE_A_CSC_POSTOFF_LO 0x49048 | 5517 | #define _PIPE_A_CSC_POSTOFF_LO 0x49048 |
5517 | 5518 | ||
5518 | #define _PIPE_B_CSC_COEFF_RY_GY 0x49110 | 5519 | #define _PIPE_B_CSC_COEFF_RY_GY 0x49110 |
5519 | #define _PIPE_B_CSC_COEFF_BY 0x49114 | 5520 | #define _PIPE_B_CSC_COEFF_BY 0x49114 |
5520 | #define _PIPE_B_CSC_COEFF_RU_GU 0x49118 | 5521 | #define _PIPE_B_CSC_COEFF_RU_GU 0x49118 |
5521 | #define _PIPE_B_CSC_COEFF_BU 0x4911c | 5522 | #define _PIPE_B_CSC_COEFF_BU 0x4911c |
5522 | #define _PIPE_B_CSC_COEFF_RV_GV 0x49120 | 5523 | #define _PIPE_B_CSC_COEFF_RV_GV 0x49120 |
5523 | #define _PIPE_B_CSC_COEFF_BV 0x49124 | 5524 | #define _PIPE_B_CSC_COEFF_BV 0x49124 |
5524 | #define _PIPE_B_CSC_MODE 0x49128 | 5525 | #define _PIPE_B_CSC_MODE 0x49128 |
5525 | #define _PIPE_B_CSC_PREOFF_HI 0x49130 | 5526 | #define _PIPE_B_CSC_PREOFF_HI 0x49130 |
5526 | #define _PIPE_B_CSC_PREOFF_ME 0x49134 | 5527 | #define _PIPE_B_CSC_PREOFF_ME 0x49134 |
5527 | #define _PIPE_B_CSC_PREOFF_LO 0x49138 | 5528 | #define _PIPE_B_CSC_PREOFF_LO 0x49138 |
5528 | #define _PIPE_B_CSC_POSTOFF_HI 0x49140 | 5529 | #define _PIPE_B_CSC_POSTOFF_HI 0x49140 |
5529 | #define _PIPE_B_CSC_POSTOFF_ME 0x49144 | 5530 | #define _PIPE_B_CSC_POSTOFF_ME 0x49144 |
5530 | #define _PIPE_B_CSC_POSTOFF_LO 0x49148 | 5531 | #define _PIPE_B_CSC_POSTOFF_LO 0x49148 |
5531 | 5532 | ||
5532 | #define PIPE_CSC_COEFF_RY_GY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RY_GY, _PIPE_B_CSC_COEFF_RY_GY) | 5533 | #define PIPE_CSC_COEFF_RY_GY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RY_GY, _PIPE_B_CSC_COEFF_RY_GY) |
5533 | #define PIPE_CSC_COEFF_BY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BY, _PIPE_B_CSC_COEFF_BY) | 5534 | #define PIPE_CSC_COEFF_BY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BY, _PIPE_B_CSC_COEFF_BY) |
5534 | #define PIPE_CSC_COEFF_RU_GU(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RU_GU, _PIPE_B_CSC_COEFF_RU_GU) | 5535 | #define PIPE_CSC_COEFF_RU_GU(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RU_GU, _PIPE_B_CSC_COEFF_RU_GU) |
5535 | #define PIPE_CSC_COEFF_BU(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BU, _PIPE_B_CSC_COEFF_BU) | 5536 | #define PIPE_CSC_COEFF_BU(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BU, _PIPE_B_CSC_COEFF_BU) |
5536 | #define PIPE_CSC_COEFF_RV_GV(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RV_GV, _PIPE_B_CSC_COEFF_RV_GV) | 5537 | #define PIPE_CSC_COEFF_RV_GV(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RV_GV, _PIPE_B_CSC_COEFF_RV_GV) |
5537 | #define PIPE_CSC_COEFF_BV(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BV, _PIPE_B_CSC_COEFF_BV) | 5538 | #define PIPE_CSC_COEFF_BV(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BV, _PIPE_B_CSC_COEFF_BV) |
5538 | #define PIPE_CSC_MODE(pipe) _PIPE(pipe, _PIPE_A_CSC_MODE, _PIPE_B_CSC_MODE) | 5539 | #define PIPE_CSC_MODE(pipe) _PIPE(pipe, _PIPE_A_CSC_MODE, _PIPE_B_CSC_MODE) |
5539 | #define PIPE_CSC_PREOFF_HI(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_HI, _PIPE_B_CSC_PREOFF_HI) | 5540 | #define PIPE_CSC_PREOFF_HI(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_HI, _PIPE_B_CSC_PREOFF_HI) |
5540 | #define PIPE_CSC_PREOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_ME, _PIPE_B_CSC_PREOFF_ME) | 5541 | #define PIPE_CSC_PREOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_ME, _PIPE_B_CSC_PREOFF_ME) |
5541 | #define PIPE_CSC_PREOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_LO, _PIPE_B_CSC_PREOFF_LO) | 5542 | #define PIPE_CSC_PREOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_LO, _PIPE_B_CSC_PREOFF_LO) |
5542 | #define PIPE_CSC_POSTOFF_HI(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_HI, _PIPE_B_CSC_POSTOFF_HI) | 5543 | #define PIPE_CSC_POSTOFF_HI(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_HI, _PIPE_B_CSC_POSTOFF_HI) |
5543 | #define PIPE_CSC_POSTOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME) | 5544 | #define PIPE_CSC_POSTOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME) |
5544 | #define PIPE_CSC_POSTOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO) | 5545 | #define PIPE_CSC_POSTOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO) |
5545 | 5546 | ||
5546 | /* VLV MIPI registers */ | 5547 | /* VLV MIPI registers */ |
5547 | 5548 | ||
5548 | #define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190) | 5549 | #define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190) |
5549 | #define _MIPIB_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700) | 5550 | #define _MIPIB_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700) |
5550 | #define MIPI_PORT_CTRL(pipe) _PIPE(pipe, _MIPIA_PORT_CTRL, _MIPIB_PORT_CTRL) | 5551 | #define MIPI_PORT_CTRL(pipe) _PIPE(pipe, _MIPIA_PORT_CTRL, _MIPIB_PORT_CTRL) |
5551 | #define DPI_ENABLE (1 << 31) /* A + B */ | 5552 | #define DPI_ENABLE (1 << 31) /* A + B */ |
5552 | #define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27 | 5553 | #define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27 |
5553 | #define MIPIA_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 27) | 5554 | #define MIPIA_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 27) |
5554 | #define DUAL_LINK_MODE_MASK (1 << 26) | 5555 | #define DUAL_LINK_MODE_MASK (1 << 26) |
5555 | #define DUAL_LINK_MODE_FRONT_BACK (0 << 26) | 5556 | #define DUAL_LINK_MODE_FRONT_BACK (0 << 26) |
5556 | #define DUAL_LINK_MODE_PIXEL_ALTERNATIVE (1 << 26) | 5557 | #define DUAL_LINK_MODE_PIXEL_ALTERNATIVE (1 << 26) |
5557 | #define DITHERING_ENABLE (1 << 25) /* A + B */ | 5558 | #define DITHERING_ENABLE (1 << 25) /* A + B */ |
5558 | #define FLOPPED_HSTX (1 << 23) | 5559 | #define FLOPPED_HSTX (1 << 23) |
5559 | #define DE_INVERT (1 << 19) /* XXX */ | 5560 | #define DE_INVERT (1 << 19) /* XXX */ |
5560 | #define MIPIA_FLISDSI_DELAY_COUNT_SHIFT 18 | 5561 | #define MIPIA_FLISDSI_DELAY_COUNT_SHIFT 18 |
5561 | #define MIPIA_FLISDSI_DELAY_COUNT_MASK (0xf << 18) | 5562 | #define MIPIA_FLISDSI_DELAY_COUNT_MASK (0xf << 18) |
5562 | #define AFE_LATCHOUT (1 << 17) | 5563 | #define AFE_LATCHOUT (1 << 17) |
5563 | #define LP_OUTPUT_HOLD (1 << 16) | 5564 | #define LP_OUTPUT_HOLD (1 << 16) |
5564 | #define MIPIB_FLISDSI_DELAY_COUNT_HIGH_SHIFT 15 | 5565 | #define MIPIB_FLISDSI_DELAY_COUNT_HIGH_SHIFT 15 |
5565 | #define MIPIB_FLISDSI_DELAY_COUNT_HIGH_MASK (1 << 15) | 5566 | #define MIPIB_FLISDSI_DELAY_COUNT_HIGH_MASK (1 << 15) |
5566 | #define MIPIB_MIPI4DPHY_DELAY_COUNT_SHIFT 11 | 5567 | #define MIPIB_MIPI4DPHY_DELAY_COUNT_SHIFT 11 |
5567 | #define MIPIB_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 11) | 5568 | #define MIPIB_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 11) |
5568 | #define CSB_SHIFT 9 | 5569 | #define CSB_SHIFT 9 |
5569 | #define CSB_MASK (3 << 9) | 5570 | #define CSB_MASK (3 << 9) |
5570 | #define CSB_20MHZ (0 << 9) | 5571 | #define CSB_20MHZ (0 << 9) |
5571 | #define CSB_10MHZ (1 << 9) | 5572 | #define CSB_10MHZ (1 << 9) |
5572 | #define CSB_40MHZ (2 << 9) | 5573 | #define CSB_40MHZ (2 << 9) |
5573 | #define BANDGAP_MASK (1 << 8) | 5574 | #define BANDGAP_MASK (1 << 8) |
5574 | #define BANDGAP_PNW_CIRCUIT (0 << 8) | 5575 | #define BANDGAP_PNW_CIRCUIT (0 << 8) |
5575 | #define BANDGAP_LNC_CIRCUIT (1 << 8) | 5576 | #define BANDGAP_LNC_CIRCUIT (1 << 8) |
5576 | #define MIPIB_FLISDSI_DELAY_COUNT_LOW_SHIFT 5 | 5577 | #define MIPIB_FLISDSI_DELAY_COUNT_LOW_SHIFT 5 |
5577 | #define MIPIB_FLISDSI_DELAY_COUNT_LOW_MASK (7 << 5) | 5578 | #define MIPIB_FLISDSI_DELAY_COUNT_LOW_MASK (7 << 5) |
5578 | #define TEARING_EFFECT_DELAY (1 << 4) /* A + B */ | 5579 | #define TEARING_EFFECT_DELAY (1 << 4) /* A + B */ |
5579 | #define TEARING_EFFECT_SHIFT 2 /* A + B */ | 5580 | #define TEARING_EFFECT_SHIFT 2 /* A + B */ |
5580 | #define TEARING_EFFECT_MASK (3 << 2) | 5581 | #define TEARING_EFFECT_MASK (3 << 2) |
5581 | #define TEARING_EFFECT_OFF (0 << 2) | 5582 | #define TEARING_EFFECT_OFF (0 << 2) |
5582 | #define TEARING_EFFECT_DSI (1 << 2) | 5583 | #define TEARING_EFFECT_DSI (1 << 2) |
5583 | #define TEARING_EFFECT_GPIO (2 << 2) | 5584 | #define TEARING_EFFECT_GPIO (2 << 2) |
5584 | #define LANE_CONFIGURATION_SHIFT 0 | 5585 | #define LANE_CONFIGURATION_SHIFT 0 |
5585 | #define LANE_CONFIGURATION_MASK (3 << 0) | 5586 | #define LANE_CONFIGURATION_MASK (3 << 0) |
5586 | #define LANE_CONFIGURATION_4LANE (0 << 0) | 5587 | #define LANE_CONFIGURATION_4LANE (0 << 0) |
5587 | #define LANE_CONFIGURATION_DUAL_LINK_A (1 << 0) | 5588 | #define LANE_CONFIGURATION_DUAL_LINK_A (1 << 0) |
5588 | #define LANE_CONFIGURATION_DUAL_LINK_B (2 << 0) | 5589 | #define LANE_CONFIGURATION_DUAL_LINK_B (2 << 0) |
5589 | 5590 | ||
5590 | #define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194) | 5591 | #define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194) |
5591 | #define _MIPIB_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704) | 5592 | #define _MIPIB_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704) |
5592 | #define MIPI_TEARING_CTRL(pipe) _PIPE(pipe, _MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL) | 5593 | #define MIPI_TEARING_CTRL(pipe) _PIPE(pipe, _MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL) |
5593 | #define TEARING_EFFECT_DELAY_SHIFT 0 | 5594 | #define TEARING_EFFECT_DELAY_SHIFT 0 |
5594 | #define TEARING_EFFECT_DELAY_MASK (0xffff << 0) | 5595 | #define TEARING_EFFECT_DELAY_MASK (0xffff << 0) |
5595 | 5596 | ||
5596 | /* XXX: all bits reserved */ | 5597 | /* XXX: all bits reserved */ |
5597 | #define _MIPIA_AUTOPWG (VLV_DISPLAY_BASE + 0x611a0) | 5598 | #define _MIPIA_AUTOPWG (VLV_DISPLAY_BASE + 0x611a0) |
5598 | 5599 | ||
5599 | /* MIPI DSI Controller and D-PHY registers */ | 5600 | /* MIPI DSI Controller and D-PHY registers */ |
5600 | 5601 | ||
5601 | #define _MIPIA_DEVICE_READY (VLV_DISPLAY_BASE + 0xb000) | 5602 | #define _MIPIA_DEVICE_READY (VLV_DISPLAY_BASE + 0xb000) |
5602 | #define _MIPIB_DEVICE_READY (VLV_DISPLAY_BASE + 0xb800) | 5603 | #define _MIPIB_DEVICE_READY (VLV_DISPLAY_BASE + 0xb800) |
5603 | #define MIPI_DEVICE_READY(pipe) _PIPE(pipe, _MIPIA_DEVICE_READY, _MIPIB_DEVICE_READY) | 5604 | #define MIPI_DEVICE_READY(pipe) _PIPE(pipe, _MIPIA_DEVICE_READY, _MIPIB_DEVICE_READY) |
5604 | #define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */ | 5605 | #define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */ |
5605 | #define ULPS_STATE_MASK (3 << 1) | 5606 | #define ULPS_STATE_MASK (3 << 1) |
5606 | #define ULPS_STATE_ENTER (2 << 1) | 5607 | #define ULPS_STATE_ENTER (2 << 1) |
5607 | #define ULPS_STATE_EXIT (1 << 1) | 5608 | #define ULPS_STATE_EXIT (1 << 1) |
5608 | #define ULPS_STATE_NORMAL_OPERATION (0 << 1) | 5609 | #define ULPS_STATE_NORMAL_OPERATION (0 << 1) |
5609 | #define DEVICE_READY (1 << 0) | 5610 | #define DEVICE_READY (1 << 0) |
5610 | 5611 | ||
5611 | #define _MIPIA_INTR_STAT (VLV_DISPLAY_BASE + 0xb004) | 5612 | #define _MIPIA_INTR_STAT (VLV_DISPLAY_BASE + 0xb004) |
5612 | #define _MIPIB_INTR_STAT (VLV_DISPLAY_BASE + 0xb804) | 5613 | #define _MIPIB_INTR_STAT (VLV_DISPLAY_BASE + 0xb804) |
5613 | #define MIPI_INTR_STAT(pipe) _PIPE(pipe, _MIPIA_INTR_STAT, _MIPIB_INTR_STAT) | 5614 | #define MIPI_INTR_STAT(pipe) _PIPE(pipe, _MIPIA_INTR_STAT, _MIPIB_INTR_STAT) |
5614 | #define _MIPIA_INTR_EN (VLV_DISPLAY_BASE + 0xb008) | 5615 | #define _MIPIA_INTR_EN (VLV_DISPLAY_BASE + 0xb008) |
5615 | #define _MIPIB_INTR_EN (VLV_DISPLAY_BASE + 0xb808) | 5616 | #define _MIPIB_INTR_EN (VLV_DISPLAY_BASE + 0xb808) |
5616 | #define MIPI_INTR_EN(pipe) _PIPE(pipe, _MIPIA_INTR_EN, _MIPIB_INTR_EN) | 5617 | #define MIPI_INTR_EN(pipe) _PIPE(pipe, _MIPIA_INTR_EN, _MIPIB_INTR_EN) |
5617 | #define TEARING_EFFECT (1 << 31) | 5618 | #define TEARING_EFFECT (1 << 31) |
5618 | #define SPL_PKT_SENT_INTERRUPT (1 << 30) | 5619 | #define SPL_PKT_SENT_INTERRUPT (1 << 30) |
5619 | #define GEN_READ_DATA_AVAIL (1 << 29) | 5620 | #define GEN_READ_DATA_AVAIL (1 << 29) |
5620 | #define LP_GENERIC_WR_FIFO_FULL (1 << 28) | 5621 | #define LP_GENERIC_WR_FIFO_FULL (1 << 28) |
5621 | #define HS_GENERIC_WR_FIFO_FULL (1 << 27) | 5622 | #define HS_GENERIC_WR_FIFO_FULL (1 << 27) |
5622 | #define RX_PROT_VIOLATION (1 << 26) | 5623 | #define RX_PROT_VIOLATION (1 << 26) |
5623 | #define RX_INVALID_TX_LENGTH (1 << 25) | 5624 | #define RX_INVALID_TX_LENGTH (1 << 25) |
5624 | #define ACK_WITH_NO_ERROR (1 << 24) | 5625 | #define ACK_WITH_NO_ERROR (1 << 24) |
5625 | #define TURN_AROUND_ACK_TIMEOUT (1 << 23) | 5626 | #define TURN_AROUND_ACK_TIMEOUT (1 << 23) |
5626 | #define LP_RX_TIMEOUT (1 << 22) | 5627 | #define LP_RX_TIMEOUT (1 << 22) |
5627 | #define HS_TX_TIMEOUT (1 << 21) | 5628 | #define HS_TX_TIMEOUT (1 << 21) |
5628 | #define DPI_FIFO_UNDERRUN (1 << 20) | 5629 | #define DPI_FIFO_UNDERRUN (1 << 20) |
5629 | #define LOW_CONTENTION (1 << 19) | 5630 | #define LOW_CONTENTION (1 << 19) |
5630 | #define HIGH_CONTENTION (1 << 18) | 5631 | #define HIGH_CONTENTION (1 << 18) |
5631 | #define TXDSI_VC_ID_INVALID (1 << 17) | 5632 | #define TXDSI_VC_ID_INVALID (1 << 17) |
5632 | #define TXDSI_DATA_TYPE_NOT_RECOGNISED (1 << 16) | 5633 | #define TXDSI_DATA_TYPE_NOT_RECOGNISED (1 << 16) |
5633 | #define TXCHECKSUM_ERROR (1 << 15) | 5634 | #define TXCHECKSUM_ERROR (1 << 15) |
5634 | #define TXECC_MULTIBIT_ERROR (1 << 14) | 5635 | #define TXECC_MULTIBIT_ERROR (1 << 14) |
5635 | #define TXECC_SINGLE_BIT_ERROR (1 << 13) | 5636 | #define TXECC_SINGLE_BIT_ERROR (1 << 13) |
5636 | #define TXFALSE_CONTROL_ERROR (1 << 12) | 5637 | #define TXFALSE_CONTROL_ERROR (1 << 12) |
5637 | #define RXDSI_VC_ID_INVALID (1 << 11) | 5638 | #define RXDSI_VC_ID_INVALID (1 << 11) |
5638 | #define RXDSI_DATA_TYPE_NOT_REGOGNISED (1 << 10) | 5639 | #define RXDSI_DATA_TYPE_NOT_REGOGNISED (1 << 10) |
5639 | #define RXCHECKSUM_ERROR (1 << 9) | 5640 | #define RXCHECKSUM_ERROR (1 << 9) |
5640 | #define RXECC_MULTIBIT_ERROR (1 << 8) | 5641 | #define RXECC_MULTIBIT_ERROR (1 << 8) |
5641 | #define RXECC_SINGLE_BIT_ERROR (1 << 7) | 5642 | #define RXECC_SINGLE_BIT_ERROR (1 << 7) |
5642 | #define RXFALSE_CONTROL_ERROR (1 << 6) | 5643 | #define RXFALSE_CONTROL_ERROR (1 << 6) |
5643 | #define RXHS_RECEIVE_TIMEOUT_ERROR (1 << 5) | 5644 | #define RXHS_RECEIVE_TIMEOUT_ERROR (1 << 5) |
5644 | #define RX_LP_TX_SYNC_ERROR (1 << 4) | 5645 | #define RX_LP_TX_SYNC_ERROR (1 << 4) |
5645 | #define RXEXCAPE_MODE_ENTRY_ERROR (1 << 3) | 5646 | #define RXEXCAPE_MODE_ENTRY_ERROR (1 << 3) |
5646 | #define RXEOT_SYNC_ERROR (1 << 2) | 5647 | #define RXEOT_SYNC_ERROR (1 << 2) |
5647 | #define RXSOT_SYNC_ERROR (1 << 1) | 5648 | #define RXSOT_SYNC_ERROR (1 << 1) |
5648 | #define RXSOT_ERROR (1 << 0) | 5649 | #define RXSOT_ERROR (1 << 0) |
5649 | 5650 | ||
5650 | #define _MIPIA_DSI_FUNC_PRG (VLV_DISPLAY_BASE + 0xb00c) | 5651 | #define _MIPIA_DSI_FUNC_PRG (VLV_DISPLAY_BASE + 0xb00c) |
5651 | #define _MIPIB_DSI_FUNC_PRG (VLV_DISPLAY_BASE + 0xb80c) | 5652 | #define _MIPIB_DSI_FUNC_PRG (VLV_DISPLAY_BASE + 0xb80c) |
5652 | #define MIPI_DSI_FUNC_PRG(pipe) _PIPE(pipe, _MIPIA_DSI_FUNC_PRG, _MIPIB_DSI_FUNC_PRG) | 5653 | #define MIPI_DSI_FUNC_PRG(pipe) _PIPE(pipe, _MIPIA_DSI_FUNC_PRG, _MIPIB_DSI_FUNC_PRG) |
5653 | #define CMD_MODE_DATA_WIDTH_MASK (7 << 13) | 5654 | #define CMD_MODE_DATA_WIDTH_MASK (7 << 13) |
5654 | #define CMD_MODE_NOT_SUPPORTED (0 << 13) | 5655 | #define CMD_MODE_NOT_SUPPORTED (0 << 13) |
5655 | #define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13) | 5656 | #define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13) |
5656 | #define CMD_MODE_DATA_WIDTH_9_BIT (2 << 13) | 5657 | #define CMD_MODE_DATA_WIDTH_9_BIT (2 << 13) |
5657 | #define CMD_MODE_DATA_WIDTH_8_BIT (3 << 13) | 5658 | #define CMD_MODE_DATA_WIDTH_8_BIT (3 << 13) |
5658 | #define CMD_MODE_DATA_WIDTH_OPTION1 (4 << 13) | 5659 | #define CMD_MODE_DATA_WIDTH_OPTION1 (4 << 13) |
5659 | #define CMD_MODE_DATA_WIDTH_OPTION2 (5 << 13) | 5660 | #define CMD_MODE_DATA_WIDTH_OPTION2 (5 << 13) |
5660 | #define VID_MODE_FORMAT_MASK (0xf << 7) | 5661 | #define VID_MODE_FORMAT_MASK (0xf << 7) |
5661 | #define VID_MODE_NOT_SUPPORTED (0 << 7) | 5662 | #define VID_MODE_NOT_SUPPORTED (0 << 7) |
5662 | #define VID_MODE_FORMAT_RGB565 (1 << 7) | 5663 | #define VID_MODE_FORMAT_RGB565 (1 << 7) |
5663 | #define VID_MODE_FORMAT_RGB666 (2 << 7) | 5664 | #define VID_MODE_FORMAT_RGB666 (2 << 7) |
5664 | #define VID_MODE_FORMAT_RGB666_LOOSE (3 << 7) | 5665 | #define VID_MODE_FORMAT_RGB666_LOOSE (3 << 7) |
5665 | #define VID_MODE_FORMAT_RGB888 (4 << 7) | 5666 | #define VID_MODE_FORMAT_RGB888 (4 << 7) |
5666 | #define CMD_MODE_CHANNEL_NUMBER_SHIFT 5 | 5667 | #define CMD_MODE_CHANNEL_NUMBER_SHIFT 5 |
5667 | #define CMD_MODE_CHANNEL_NUMBER_MASK (3 << 5) | 5668 | #define CMD_MODE_CHANNEL_NUMBER_MASK (3 << 5) |
5668 | #define VID_MODE_CHANNEL_NUMBER_SHIFT 3 | 5669 | #define VID_MODE_CHANNEL_NUMBER_SHIFT 3 |
5669 | #define VID_MODE_CHANNEL_NUMBER_MASK (3 << 3) | 5670 | #define VID_MODE_CHANNEL_NUMBER_MASK (3 << 3) |
5670 | #define DATA_LANES_PRG_REG_SHIFT 0 | 5671 | #define DATA_LANES_PRG_REG_SHIFT 0 |
5671 | #define DATA_LANES_PRG_REG_MASK (7 << 0) | 5672 | #define DATA_LANES_PRG_REG_MASK (7 << 0) |
5672 | 5673 | ||
5673 | #define _MIPIA_HS_TX_TIMEOUT (VLV_DISPLAY_BASE + 0xb010) | 5674 | #define _MIPIA_HS_TX_TIMEOUT (VLV_DISPLAY_BASE + 0xb010) |
5674 | #define _MIPIB_HS_TX_TIMEOUT (VLV_DISPLAY_BASE + 0xb810) | 5675 | #define _MIPIB_HS_TX_TIMEOUT (VLV_DISPLAY_BASE + 0xb810) |
5675 | #define MIPI_HS_TX_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_HS_TX_TIMEOUT, _MIPIB_HS_TX_TIMEOUT) | 5676 | #define MIPI_HS_TX_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_HS_TX_TIMEOUT, _MIPIB_HS_TX_TIMEOUT) |
5676 | #define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff | 5677 | #define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff |
5677 | 5678 | ||
5678 | #define _MIPIA_LP_RX_TIMEOUT (VLV_DISPLAY_BASE + 0xb014) | 5679 | #define _MIPIA_LP_RX_TIMEOUT (VLV_DISPLAY_BASE + 0xb014) |
5679 | #define _MIPIB_LP_RX_TIMEOUT (VLV_DISPLAY_BASE + 0xb814) | 5680 | #define _MIPIB_LP_RX_TIMEOUT (VLV_DISPLAY_BASE + 0xb814) |
5680 | #define MIPI_LP_RX_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_LP_RX_TIMEOUT, _MIPIB_LP_RX_TIMEOUT) | 5681 | #define MIPI_LP_RX_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_LP_RX_TIMEOUT, _MIPIB_LP_RX_TIMEOUT) |
5681 | #define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff | 5682 | #define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff |
5682 | 5683 | ||
5683 | #define _MIPIA_TURN_AROUND_TIMEOUT (VLV_DISPLAY_BASE + 0xb018) | 5684 | #define _MIPIA_TURN_AROUND_TIMEOUT (VLV_DISPLAY_BASE + 0xb018) |
5684 | #define _MIPIB_TURN_AROUND_TIMEOUT (VLV_DISPLAY_BASE + 0xb818) | 5685 | #define _MIPIB_TURN_AROUND_TIMEOUT (VLV_DISPLAY_BASE + 0xb818) |
5685 | #define MIPI_TURN_AROUND_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT) | 5686 | #define MIPI_TURN_AROUND_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT) |
5686 | #define TURN_AROUND_TIMEOUT_MASK 0x3f | 5687 | #define TURN_AROUND_TIMEOUT_MASK 0x3f |
5687 | 5688 | ||
5688 | #define _MIPIA_DEVICE_RESET_TIMER (VLV_DISPLAY_BASE + 0xb01c) | 5689 | #define _MIPIA_DEVICE_RESET_TIMER (VLV_DISPLAY_BASE + 0xb01c) |
5689 | #define _MIPIB_DEVICE_RESET_TIMER (VLV_DISPLAY_BASE + 0xb81c) | 5690 | #define _MIPIB_DEVICE_RESET_TIMER (VLV_DISPLAY_BASE + 0xb81c) |
5690 | #define MIPI_DEVICE_RESET_TIMER(pipe) _PIPE(pipe, _MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER) | 5691 | #define MIPI_DEVICE_RESET_TIMER(pipe) _PIPE(pipe, _MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER) |
5691 | #define DEVICE_RESET_TIMER_MASK 0xffff | 5692 | #define DEVICE_RESET_TIMER_MASK 0xffff |
5692 | 5693 | ||
5693 | #define _MIPIA_DPI_RESOLUTION (VLV_DISPLAY_BASE + 0xb020) | 5694 | #define _MIPIA_DPI_RESOLUTION (VLV_DISPLAY_BASE + 0xb020) |
5694 | #define _MIPIB_DPI_RESOLUTION (VLV_DISPLAY_BASE + 0xb820) | 5695 | #define _MIPIB_DPI_RESOLUTION (VLV_DISPLAY_BASE + 0xb820) |
5695 | #define MIPI_DPI_RESOLUTION(pipe) _PIPE(pipe, _MIPIA_DPI_RESOLUTION, _MIPIB_DPI_RESOLUTION) | 5696 | #define MIPI_DPI_RESOLUTION(pipe) _PIPE(pipe, _MIPIA_DPI_RESOLUTION, _MIPIB_DPI_RESOLUTION) |
5696 | #define VERTICAL_ADDRESS_SHIFT 16 | 5697 | #define VERTICAL_ADDRESS_SHIFT 16 |
5697 | #define VERTICAL_ADDRESS_MASK (0xffff << 16) | 5698 | #define VERTICAL_ADDRESS_MASK (0xffff << 16) |
5698 | #define HORIZONTAL_ADDRESS_SHIFT 0 | 5699 | #define HORIZONTAL_ADDRESS_SHIFT 0 |
5699 | #define HORIZONTAL_ADDRESS_MASK 0xffff | 5700 | #define HORIZONTAL_ADDRESS_MASK 0xffff |
5700 | 5701 | ||
5701 | #define _MIPIA_DBI_FIFO_THROTTLE (VLV_DISPLAY_BASE + 0xb024) | 5702 | #define _MIPIA_DBI_FIFO_THROTTLE (VLV_DISPLAY_BASE + 0xb024) |
5702 | #define _MIPIB_DBI_FIFO_THROTTLE (VLV_DISPLAY_BASE + 0xb824) | 5703 | #define _MIPIB_DBI_FIFO_THROTTLE (VLV_DISPLAY_BASE + 0xb824) |
5703 | #define MIPI_DBI_FIFO_THROTTLE(pipe) _PIPE(pipe, _MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE) | 5704 | #define MIPI_DBI_FIFO_THROTTLE(pipe) _PIPE(pipe, _MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE) |
5704 | #define DBI_FIFO_EMPTY_HALF (0 << 0) | 5705 | #define DBI_FIFO_EMPTY_HALF (0 << 0) |
5705 | #define DBI_FIFO_EMPTY_QUARTER (1 << 0) | 5706 | #define DBI_FIFO_EMPTY_QUARTER (1 << 0) |
5706 | #define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0) | 5707 | #define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0) |
5707 | 5708 | ||
5708 | /* regs below are bits 15:0 */ | 5709 | /* regs below are bits 15:0 */ |
5709 | #define _MIPIA_HSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb028) | 5710 | #define _MIPIA_HSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb028) |
5710 | #define _MIPIB_HSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb828) | 5711 | #define _MIPIB_HSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb828) |
5711 | #define MIPI_HSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT) | 5712 | #define MIPI_HSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT) |
5712 | 5713 | ||
5713 | #define _MIPIA_HBP_COUNT (VLV_DISPLAY_BASE + 0xb02c) | 5714 | #define _MIPIA_HBP_COUNT (VLV_DISPLAY_BASE + 0xb02c) |
5714 | #define _MIPIB_HBP_COUNT (VLV_DISPLAY_BASE + 0xb82c) | 5715 | #define _MIPIB_HBP_COUNT (VLV_DISPLAY_BASE + 0xb82c) |
5715 | #define MIPI_HBP_COUNT(pipe) _PIPE(pipe, _MIPIA_HBP_COUNT, _MIPIB_HBP_COUNT) | 5716 | #define MIPI_HBP_COUNT(pipe) _PIPE(pipe, _MIPIA_HBP_COUNT, _MIPIB_HBP_COUNT) |
5716 | 5717 | ||
5717 | #define _MIPIA_HFP_COUNT (VLV_DISPLAY_BASE + 0xb030) | 5718 | #define _MIPIA_HFP_COUNT (VLV_DISPLAY_BASE + 0xb030) |
5718 | #define _MIPIB_HFP_COUNT (VLV_DISPLAY_BASE + 0xb830) | 5719 | #define _MIPIB_HFP_COUNT (VLV_DISPLAY_BASE + 0xb830) |
5719 | #define MIPI_HFP_COUNT(pipe) _PIPE(pipe, _MIPIA_HFP_COUNT, _MIPIB_HFP_COUNT) | 5720 | #define MIPI_HFP_COUNT(pipe) _PIPE(pipe, _MIPIA_HFP_COUNT, _MIPIB_HFP_COUNT) |
5720 | 5721 | ||
5721 | #define _MIPIA_HACTIVE_AREA_COUNT (VLV_DISPLAY_BASE + 0xb034) | 5722 | #define _MIPIA_HACTIVE_AREA_COUNT (VLV_DISPLAY_BASE + 0xb034) |
5722 | #define _MIPIB_HACTIVE_AREA_COUNT (VLV_DISPLAY_BASE + 0xb834) | 5723 | #define _MIPIB_HACTIVE_AREA_COUNT (VLV_DISPLAY_BASE + 0xb834) |
5723 | #define MIPI_HACTIVE_AREA_COUNT(pipe) _PIPE(pipe, _MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT) | 5724 | #define MIPI_HACTIVE_AREA_COUNT(pipe) _PIPE(pipe, _MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT) |
5724 | 5725 | ||
5725 | #define _MIPIA_VSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb038) | 5726 | #define _MIPIA_VSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb038) |
5726 | #define _MIPIB_VSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb838) | 5727 | #define _MIPIB_VSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb838) |
5727 | #define MIPI_VSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT) | 5728 | #define MIPI_VSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT) |
5728 | 5729 | ||
5729 | #define _MIPIA_VBP_COUNT (VLV_DISPLAY_BASE + 0xb03c) | 5730 | #define _MIPIA_VBP_COUNT (VLV_DISPLAY_BASE + 0xb03c) |
5730 | #define _MIPIB_VBP_COUNT (VLV_DISPLAY_BASE + 0xb83c) | 5731 | #define _MIPIB_VBP_COUNT (VLV_DISPLAY_BASE + 0xb83c) |
5731 | #define MIPI_VBP_COUNT(pipe) _PIPE(pipe, _MIPIA_VBP_COUNT, _MIPIB_VBP_COUNT) | 5732 | #define MIPI_VBP_COUNT(pipe) _PIPE(pipe, _MIPIA_VBP_COUNT, _MIPIB_VBP_COUNT) |
5732 | 5733 | ||
5733 | #define _MIPIA_VFP_COUNT (VLV_DISPLAY_BASE + 0xb040) | 5734 | #define _MIPIA_VFP_COUNT (VLV_DISPLAY_BASE + 0xb040) |
5734 | #define _MIPIB_VFP_COUNT (VLV_DISPLAY_BASE + 0xb840) | 5735 | #define _MIPIB_VFP_COUNT (VLV_DISPLAY_BASE + 0xb840) |
5735 | #define MIPI_VFP_COUNT(pipe) _PIPE(pipe, _MIPIA_VFP_COUNT, _MIPIB_VFP_COUNT) | 5736 | #define MIPI_VFP_COUNT(pipe) _PIPE(pipe, _MIPIA_VFP_COUNT, _MIPIB_VFP_COUNT) |
5736 | 5737 | ||
5737 | #define _MIPIA_HIGH_LOW_SWITCH_COUNT (VLV_DISPLAY_BASE + 0xb044) | 5738 | #define _MIPIA_HIGH_LOW_SWITCH_COUNT (VLV_DISPLAY_BASE + 0xb044) |
5738 | #define _MIPIB_HIGH_LOW_SWITCH_COUNT (VLV_DISPLAY_BASE + 0xb844) | 5739 | #define _MIPIB_HIGH_LOW_SWITCH_COUNT (VLV_DISPLAY_BASE + 0xb844) |
5739 | #define MIPI_HIGH_LOW_SWITCH_COUNT(pipe) _PIPE(pipe, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT) | 5740 | #define MIPI_HIGH_LOW_SWITCH_COUNT(pipe) _PIPE(pipe, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT) |
5740 | /* regs above are bits 15:0 */ | 5741 | /* regs above are bits 15:0 */ |
5741 | 5742 | ||
5742 | #define _MIPIA_DPI_CONTROL (VLV_DISPLAY_BASE + 0xb048) | 5743 | #define _MIPIA_DPI_CONTROL (VLV_DISPLAY_BASE + 0xb048) |
5743 | #define _MIPIB_DPI_CONTROL (VLV_DISPLAY_BASE + 0xb848) | 5744 | #define _MIPIB_DPI_CONTROL (VLV_DISPLAY_BASE + 0xb848) |
5744 | #define MIPI_DPI_CONTROL(pipe) _PIPE(pipe, _MIPIA_DPI_CONTROL, _MIPIB_DPI_CONTROL) | 5745 | #define MIPI_DPI_CONTROL(pipe) _PIPE(pipe, _MIPIA_DPI_CONTROL, _MIPIB_DPI_CONTROL) |
5745 | #define DPI_LP_MODE (1 << 6) | 5746 | #define DPI_LP_MODE (1 << 6) |
5746 | #define BACKLIGHT_OFF (1 << 5) | 5747 | #define BACKLIGHT_OFF (1 << 5) |
5747 | #define BACKLIGHT_ON (1 << 4) | 5748 | #define BACKLIGHT_ON (1 << 4) |
5748 | #define COLOR_MODE_OFF (1 << 3) | 5749 | #define COLOR_MODE_OFF (1 << 3) |
5749 | #define COLOR_MODE_ON (1 << 2) | 5750 | #define COLOR_MODE_ON (1 << 2) |
5750 | #define TURN_ON (1 << 1) | 5751 | #define TURN_ON (1 << 1) |
5751 | #define SHUTDOWN (1 << 0) | 5752 | #define SHUTDOWN (1 << 0) |
5752 | 5753 | ||
5753 | #define _MIPIA_DPI_DATA (VLV_DISPLAY_BASE + 0xb04c) | 5754 | #define _MIPIA_DPI_DATA (VLV_DISPLAY_BASE + 0xb04c) |
5754 | #define _MIPIB_DPI_DATA (VLV_DISPLAY_BASE + 0xb84c) | 5755 | #define _MIPIB_DPI_DATA (VLV_DISPLAY_BASE + 0xb84c) |
5755 | #define MIPI_DPI_DATA(pipe) _PIPE(pipe, _MIPIA_DPI_DATA, _MIPIB_DPI_DATA) | 5756 | #define MIPI_DPI_DATA(pipe) _PIPE(pipe, _MIPIA_DPI_DATA, _MIPIB_DPI_DATA) |
5756 | #define COMMAND_BYTE_SHIFT 0 | 5757 | #define COMMAND_BYTE_SHIFT 0 |
5757 | #define COMMAND_BYTE_MASK (0x3f << 0) | 5758 | #define COMMAND_BYTE_MASK (0x3f << 0) |
5758 | 5759 | ||
5759 | #define _MIPIA_INIT_COUNT (VLV_DISPLAY_BASE + 0xb050) | 5760 | #define _MIPIA_INIT_COUNT (VLV_DISPLAY_BASE + 0xb050) |
5760 | #define _MIPIB_INIT_COUNT (VLV_DISPLAY_BASE + 0xb850) | 5761 | #define _MIPIB_INIT_COUNT (VLV_DISPLAY_BASE + 0xb850) |
5761 | #define MIPI_INIT_COUNT(pipe) _PIPE(pipe, _MIPIA_INIT_COUNT, _MIPIB_INIT_COUNT) | 5762 | #define MIPI_INIT_COUNT(pipe) _PIPE(pipe, _MIPIA_INIT_COUNT, _MIPIB_INIT_COUNT) |
5762 | #define MASTER_INIT_TIMER_SHIFT 0 | 5763 | #define MASTER_INIT_TIMER_SHIFT 0 |
5763 | #define MASTER_INIT_TIMER_MASK (0xffff << 0) | 5764 | #define MASTER_INIT_TIMER_MASK (0xffff << 0) |
5764 | 5765 | ||
5765 | #define _MIPIA_MAX_RETURN_PKT_SIZE (VLV_DISPLAY_BASE + 0xb054) | 5766 | #define _MIPIA_MAX_RETURN_PKT_SIZE (VLV_DISPLAY_BASE + 0xb054) |
5766 | #define _MIPIB_MAX_RETURN_PKT_SIZE (VLV_DISPLAY_BASE + 0xb854) | 5767 | #define _MIPIB_MAX_RETURN_PKT_SIZE (VLV_DISPLAY_BASE + 0xb854) |
5767 | #define MIPI_MAX_RETURN_PKT_SIZE(pipe) _PIPE(pipe, _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE) | 5768 | #define MIPI_MAX_RETURN_PKT_SIZE(pipe) _PIPE(pipe, _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE) |
5768 | #define MAX_RETURN_PKT_SIZE_SHIFT 0 | 5769 | #define MAX_RETURN_PKT_SIZE_SHIFT 0 |
5769 | #define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0) | 5770 | #define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0) |
5770 | 5771 | ||
5771 | #define _MIPIA_VIDEO_MODE_FORMAT (VLV_DISPLAY_BASE + 0xb058) | 5772 | #define _MIPIA_VIDEO_MODE_FORMAT (VLV_DISPLAY_BASE + 0xb058) |
5772 | #define _MIPIB_VIDEO_MODE_FORMAT (VLV_DISPLAY_BASE + 0xb858) | 5773 | #define _MIPIB_VIDEO_MODE_FORMAT (VLV_DISPLAY_BASE + 0xb858) |
5773 | #define MIPI_VIDEO_MODE_FORMAT(pipe) _PIPE(pipe, _MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT) | 5774 | #define MIPI_VIDEO_MODE_FORMAT(pipe) _PIPE(pipe, _MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT) |
5774 | #define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4) | 5775 | #define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4) |
5775 | #define DISABLE_VIDEO_BTA (1 << 3) | 5776 | #define DISABLE_VIDEO_BTA (1 << 3) |
5776 | #define IP_TG_CONFIG (1 << 2) | 5777 | #define IP_TG_CONFIG (1 << 2) |
5777 | #define VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE (1 << 0) | 5778 | #define VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE (1 << 0) |
5778 | #define VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS (2 << 0) | 5779 | #define VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS (2 << 0) |
5779 | #define VIDEO_MODE_BURST (3 << 0) | 5780 | #define VIDEO_MODE_BURST (3 << 0) |
5780 | 5781 | ||
5781 | #define _MIPIA_EOT_DISABLE (VLV_DISPLAY_BASE + 0xb05c) | 5782 | #define _MIPIA_EOT_DISABLE (VLV_DISPLAY_BASE + 0xb05c) |
5782 | #define _MIPIB_EOT_DISABLE (VLV_DISPLAY_BASE + 0xb85c) | 5783 | #define _MIPIB_EOT_DISABLE (VLV_DISPLAY_BASE + 0xb85c) |
5783 | #define MIPI_EOT_DISABLE(pipe) _PIPE(pipe, _MIPIA_EOT_DISABLE, _MIPIB_EOT_DISABLE) | 5784 | #define MIPI_EOT_DISABLE(pipe) _PIPE(pipe, _MIPIA_EOT_DISABLE, _MIPIB_EOT_DISABLE) |
5784 | #define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7) | 5785 | #define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7) |
5785 | #define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6) | 5786 | #define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6) |
5786 | #define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5) | 5787 | #define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5) |
5787 | #define HIGH_CONTENTION_RECOVERY_DISABLE (1 << 4) | 5788 | #define HIGH_CONTENTION_RECOVERY_DISABLE (1 << 4) |
5788 | #define TXDSI_TYPE_NOT_RECOGNISED_ERROR_RECOVERY_DISABLE (1 << 3) | 5789 | #define TXDSI_TYPE_NOT_RECOGNISED_ERROR_RECOVERY_DISABLE (1 << 3) |
5789 | #define TXECC_MULTIBIT_ERROR_RECOVERY_DISABLE (1 << 2) | 5790 | #define TXECC_MULTIBIT_ERROR_RECOVERY_DISABLE (1 << 2) |
5790 | #define CLOCKSTOP (1 << 1) | 5791 | #define CLOCKSTOP (1 << 1) |
5791 | #define EOT_DISABLE (1 << 0) | 5792 | #define EOT_DISABLE (1 << 0) |
5792 | 5793 | ||
5793 | #define _MIPIA_LP_BYTECLK (VLV_DISPLAY_BASE + 0xb060) | 5794 | #define _MIPIA_LP_BYTECLK (VLV_DISPLAY_BASE + 0xb060) |
5794 | #define _MIPIB_LP_BYTECLK (VLV_DISPLAY_BASE + 0xb860) | 5795 | #define _MIPIB_LP_BYTECLK (VLV_DISPLAY_BASE + 0xb860) |
5795 | #define MIPI_LP_BYTECLK(pipe) _PIPE(pipe, _MIPIA_LP_BYTECLK, _MIPIB_LP_BYTECLK) | 5796 | #define MIPI_LP_BYTECLK(pipe) _PIPE(pipe, _MIPIA_LP_BYTECLK, _MIPIB_LP_BYTECLK) |
5796 | #define LP_BYTECLK_SHIFT 0 | 5797 | #define LP_BYTECLK_SHIFT 0 |
5797 | #define LP_BYTECLK_MASK (0xffff << 0) | 5798 | #define LP_BYTECLK_MASK (0xffff << 0) |
5798 | 5799 | ||
5799 | /* bits 31:0 */ | 5800 | /* bits 31:0 */ |
5800 | #define _MIPIA_LP_GEN_DATA (VLV_DISPLAY_BASE + 0xb064) | 5801 | #define _MIPIA_LP_GEN_DATA (VLV_DISPLAY_BASE + 0xb064) |
5801 | #define _MIPIB_LP_GEN_DATA (VLV_DISPLAY_BASE + 0xb864) | 5802 | #define _MIPIB_LP_GEN_DATA (VLV_DISPLAY_BASE + 0xb864) |
5802 | #define MIPI_LP_GEN_DATA(pipe) _PIPE(pipe, _MIPIA_LP_GEN_DATA, _MIPIB_LP_GEN_DATA) | 5803 | #define MIPI_LP_GEN_DATA(pipe) _PIPE(pipe, _MIPIA_LP_GEN_DATA, _MIPIB_LP_GEN_DATA) |
5803 | 5804 | ||
5804 | /* bits 31:0 */ | 5805 | /* bits 31:0 */ |
5805 | #define _MIPIA_HS_GEN_DATA (VLV_DISPLAY_BASE + 0xb068) | 5806 | #define _MIPIA_HS_GEN_DATA (VLV_DISPLAY_BASE + 0xb068) |
5806 | #define _MIPIB_HS_GEN_DATA (VLV_DISPLAY_BASE + 0xb868) | 5807 | #define _MIPIB_HS_GEN_DATA (VLV_DISPLAY_BASE + 0xb868) |
5807 | #define MIPI_HS_GEN_DATA(pipe) _PIPE(pipe, _MIPIA_HS_GEN_DATA, _MIPIB_HS_GEN_DATA) | 5808 | #define MIPI_HS_GEN_DATA(pipe) _PIPE(pipe, _MIPIA_HS_GEN_DATA, _MIPIB_HS_GEN_DATA) |
5808 | 5809 | ||
5809 | #define _MIPIA_LP_GEN_CTRL (VLV_DISPLAY_BASE + 0xb06c) | 5810 | #define _MIPIA_LP_GEN_CTRL (VLV_DISPLAY_BASE + 0xb06c) |
5810 | #define _MIPIB_LP_GEN_CTRL (VLV_DISPLAY_BASE + 0xb86c) | 5811 | #define _MIPIB_LP_GEN_CTRL (VLV_DISPLAY_BASE + 0xb86c) |
5811 | #define MIPI_LP_GEN_CTRL(pipe) _PIPE(pipe, _MIPIA_LP_GEN_CTRL, _MIPIB_LP_GEN_CTRL) | 5812 | #define MIPI_LP_GEN_CTRL(pipe) _PIPE(pipe, _MIPIA_LP_GEN_CTRL, _MIPIB_LP_GEN_CTRL) |
5812 | #define _MIPIA_HS_GEN_CTRL (VLV_DISPLAY_BASE + 0xb070) | 5813 | #define _MIPIA_HS_GEN_CTRL (VLV_DISPLAY_BASE + 0xb070) |
5813 | #define _MIPIB_HS_GEN_CTRL (VLV_DISPLAY_BASE + 0xb870) | 5814 | #define _MIPIB_HS_GEN_CTRL (VLV_DISPLAY_BASE + 0xb870) |
5814 | #define MIPI_HS_GEN_CTRL(pipe) _PIPE(pipe, _MIPIA_HS_GEN_CTRL, _MIPIB_HS_GEN_CTRL) | 5815 | #define MIPI_HS_GEN_CTRL(pipe) _PIPE(pipe, _MIPIA_HS_GEN_CTRL, _MIPIB_HS_GEN_CTRL) |
5815 | #define LONG_PACKET_WORD_COUNT_SHIFT 8 | 5816 | #define LONG_PACKET_WORD_COUNT_SHIFT 8 |
5816 | #define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8) | 5817 | #define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8) |
5817 | #define SHORT_PACKET_PARAM_SHIFT 8 | 5818 | #define SHORT_PACKET_PARAM_SHIFT 8 |
5818 | #define SHORT_PACKET_PARAM_MASK (0xffff << 8) | 5819 | #define SHORT_PACKET_PARAM_MASK (0xffff << 8) |
5819 | #define VIRTUAL_CHANNEL_SHIFT 6 | 5820 | #define VIRTUAL_CHANNEL_SHIFT 6 |
5820 | #define VIRTUAL_CHANNEL_MASK (3 << 6) | 5821 | #define VIRTUAL_CHANNEL_MASK (3 << 6) |
5821 | #define DATA_TYPE_SHIFT 0 | 5822 | #define DATA_TYPE_SHIFT 0 |
5822 | #define DATA_TYPE_MASK (3f << 0) | 5823 | #define DATA_TYPE_MASK (3f << 0) |
5823 | /* data type values, see include/video/mipi_display.h */ | 5824 | /* data type values, see include/video/mipi_display.h */ |
5824 | 5825 | ||
5825 | #define _MIPIA_GEN_FIFO_STAT (VLV_DISPLAY_BASE + 0xb074) | 5826 | #define _MIPIA_GEN_FIFO_STAT (VLV_DISPLAY_BASE + 0xb074) |
5826 | #define _MIPIB_GEN_FIFO_STAT (VLV_DISPLAY_BASE + 0xb874) | 5827 | #define _MIPIB_GEN_FIFO_STAT (VLV_DISPLAY_BASE + 0xb874) |
5827 | #define MIPI_GEN_FIFO_STAT(pipe) _PIPE(pipe, _MIPIA_GEN_FIFO_STAT, _MIPIB_GEN_FIFO_STAT) | 5828 | #define MIPI_GEN_FIFO_STAT(pipe) _PIPE(pipe, _MIPIA_GEN_FIFO_STAT, _MIPIB_GEN_FIFO_STAT) |
5828 | #define DPI_FIFO_EMPTY (1 << 28) | 5829 | #define DPI_FIFO_EMPTY (1 << 28) |
5829 | #define DBI_FIFO_EMPTY (1 << 27) | 5830 | #define DBI_FIFO_EMPTY (1 << 27) |
5830 | #define LP_CTRL_FIFO_EMPTY (1 << 26) | 5831 | #define LP_CTRL_FIFO_EMPTY (1 << 26) |
5831 | #define LP_CTRL_FIFO_HALF_EMPTY (1 << 25) | 5832 | #define LP_CTRL_FIFO_HALF_EMPTY (1 << 25) |
5832 | #define LP_CTRL_FIFO_FULL (1 << 24) | 5833 | #define LP_CTRL_FIFO_FULL (1 << 24) |
5833 | #define HS_CTRL_FIFO_EMPTY (1 << 18) | 5834 | #define HS_CTRL_FIFO_EMPTY (1 << 18) |
5834 | #define HS_CTRL_FIFO_HALF_EMPTY (1 << 17) | 5835 | #define HS_CTRL_FIFO_HALF_EMPTY (1 << 17) |
5835 | #define HS_CTRL_FIFO_FULL (1 << 16) | 5836 | #define HS_CTRL_FIFO_FULL (1 << 16) |
5836 | #define LP_DATA_FIFO_EMPTY (1 << 10) | 5837 | #define LP_DATA_FIFO_EMPTY (1 << 10) |
5837 | #define LP_DATA_FIFO_HALF_EMPTY (1 << 9) | 5838 | #define LP_DATA_FIFO_HALF_EMPTY (1 << 9) |
5838 | #define LP_DATA_FIFO_FULL (1 << 8) | 5839 | #define LP_DATA_FIFO_FULL (1 << 8) |
5839 | #define HS_DATA_FIFO_EMPTY (1 << 2) | 5840 | #define HS_DATA_FIFO_EMPTY (1 << 2) |
5840 | #define HS_DATA_FIFO_HALF_EMPTY (1 << 1) | 5841 | #define HS_DATA_FIFO_HALF_EMPTY (1 << 1) |
5841 | #define HS_DATA_FIFO_FULL (1 << 0) | 5842 | #define HS_DATA_FIFO_FULL (1 << 0) |
5842 | 5843 | ||
5843 | #define _MIPIA_HS_LS_DBI_ENABLE (VLV_DISPLAY_BASE + 0xb078) | 5844 | #define _MIPIA_HS_LS_DBI_ENABLE (VLV_DISPLAY_BASE + 0xb078) |
5844 | #define _MIPIB_HS_LS_DBI_ENABLE (VLV_DISPLAY_BASE + 0xb878) | 5845 | #define _MIPIB_HS_LS_DBI_ENABLE (VLV_DISPLAY_BASE + 0xb878) |
5845 | #define MIPI_HS_LP_DBI_ENABLE(pipe) _PIPE(pipe, _MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE) | 5846 | #define MIPI_HS_LP_DBI_ENABLE(pipe) _PIPE(pipe, _MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE) |
5846 | #define DBI_HS_LP_MODE_MASK (1 << 0) | 5847 | #define DBI_HS_LP_MODE_MASK (1 << 0) |
5847 | #define DBI_LP_MODE (1 << 0) | 5848 | #define DBI_LP_MODE (1 << 0) |
5848 | #define DBI_HS_MODE (0 << 0) | 5849 | #define DBI_HS_MODE (0 << 0) |
5849 | 5850 | ||
5850 | #define _MIPIA_DPHY_PARAM (VLV_DISPLAY_BASE + 0xb080) | 5851 | #define _MIPIA_DPHY_PARAM (VLV_DISPLAY_BASE + 0xb080) |
5851 | #define _MIPIB_DPHY_PARAM (VLV_DISPLAY_BASE + 0xb880) | 5852 | #define _MIPIB_DPHY_PARAM (VLV_DISPLAY_BASE + 0xb880) |
5852 | #define MIPI_DPHY_PARAM(pipe) _PIPE(pipe, _MIPIA_DPHY_PARAM, _MIPIB_DPHY_PARAM) | 5853 | #define MIPI_DPHY_PARAM(pipe) _PIPE(pipe, _MIPIA_DPHY_PARAM, _MIPIB_DPHY_PARAM) |
5853 | #define EXIT_ZERO_COUNT_SHIFT 24 | 5854 | #define EXIT_ZERO_COUNT_SHIFT 24 |
5854 | #define EXIT_ZERO_COUNT_MASK (0x3f << 24) | 5855 | #define EXIT_ZERO_COUNT_MASK (0x3f << 24) |
5855 | #define TRAIL_COUNT_SHIFT 16 | 5856 | #define TRAIL_COUNT_SHIFT 16 |
5856 | #define TRAIL_COUNT_MASK (0x1f << 16) | 5857 | #define TRAIL_COUNT_MASK (0x1f << 16) |
5857 | #define CLK_ZERO_COUNT_SHIFT 8 | 5858 | #define CLK_ZERO_COUNT_SHIFT 8 |
5858 | #define CLK_ZERO_COUNT_MASK (0xff << 8) | 5859 | #define CLK_ZERO_COUNT_MASK (0xff << 8) |
5859 | #define PREPARE_COUNT_SHIFT 0 | 5860 | #define PREPARE_COUNT_SHIFT 0 |
5860 | #define PREPARE_COUNT_MASK (0x3f << 0) | 5861 | #define PREPARE_COUNT_MASK (0x3f << 0) |
5861 | 5862 | ||
5862 | /* bits 31:0 */ | 5863 | /* bits 31:0 */ |
5863 | #define _MIPIA_DBI_BW_CTRL (VLV_DISPLAY_BASE + 0xb084) | 5864 | #define _MIPIA_DBI_BW_CTRL (VLV_DISPLAY_BASE + 0xb084) |
5864 | #define _MIPIB_DBI_BW_CTRL (VLV_DISPLAY_BASE + 0xb884) | 5865 | #define _MIPIB_DBI_BW_CTRL (VLV_DISPLAY_BASE + 0xb884) |
5865 | #define MIPI_DBI_BW_CTRL(pipe) _PIPE(pipe, _MIPIA_DBI_BW_CTRL, _MIPIB_DBI_BW_CTRL) | 5866 | #define MIPI_DBI_BW_CTRL(pipe) _PIPE(pipe, _MIPIA_DBI_BW_CTRL, _MIPIB_DBI_BW_CTRL) |
5866 | 5867 | ||
5867 | #define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (VLV_DISPLAY_BASE + 0xb088) | 5868 | #define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (VLV_DISPLAY_BASE + 0xb088) |
5868 | #define _MIPIB_CLK_LANE_SWITCH_TIME_CNT (VLV_DISPLAY_BASE + 0xb888) | 5869 | #define _MIPIB_CLK_LANE_SWITCH_TIME_CNT (VLV_DISPLAY_BASE + 0xb888) |
5869 | #define MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe) _PIPE(pipe, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT) | 5870 | #define MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe) _PIPE(pipe, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT) |
5870 | #define LP_HS_SSW_CNT_SHIFT 16 | 5871 | #define LP_HS_SSW_CNT_SHIFT 16 |
5871 | #define LP_HS_SSW_CNT_MASK (0xffff << 16) | 5872 | #define LP_HS_SSW_CNT_MASK (0xffff << 16) |
5872 | #define HS_LP_PWR_SW_CNT_SHIFT 0 | 5873 | #define HS_LP_PWR_SW_CNT_SHIFT 0 |
5873 | #define HS_LP_PWR_SW_CNT_MASK (0xffff << 0) | 5874 | #define HS_LP_PWR_SW_CNT_MASK (0xffff << 0) |
5874 | 5875 | ||
5875 | #define _MIPIA_STOP_STATE_STALL (VLV_DISPLAY_BASE + 0xb08c) | 5876 | #define _MIPIA_STOP_STATE_STALL (VLV_DISPLAY_BASE + 0xb08c) |
5876 | #define _MIPIB_STOP_STATE_STALL (VLV_DISPLAY_BASE + 0xb88c) | 5877 | #define _MIPIB_STOP_STATE_STALL (VLV_DISPLAY_BASE + 0xb88c) |
5877 | #define MIPI_STOP_STATE_STALL(pipe) _PIPE(pipe, _MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL) | 5878 | #define MIPI_STOP_STATE_STALL(pipe) _PIPE(pipe, _MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL) |
5878 | #define STOP_STATE_STALL_COUNTER_SHIFT 0 | 5879 | #define STOP_STATE_STALL_COUNTER_SHIFT 0 |
5879 | #define STOP_STATE_STALL_COUNTER_MASK (0xff << 0) | 5880 | #define STOP_STATE_STALL_COUNTER_MASK (0xff << 0) |
5880 | 5881 | ||
5881 | #define _MIPIA_INTR_STAT_REG_1 (VLV_DISPLAY_BASE + 0xb090) | 5882 | #define _MIPIA_INTR_STAT_REG_1 (VLV_DISPLAY_BASE + 0xb090) |
5882 | #define _MIPIB_INTR_STAT_REG_1 (VLV_DISPLAY_BASE + 0xb890) | 5883 | #define _MIPIB_INTR_STAT_REG_1 (VLV_DISPLAY_BASE + 0xb890) |
5883 | #define MIPI_INTR_STAT_REG_1(pipe) _PIPE(pipe, _MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1) | 5884 | #define MIPI_INTR_STAT_REG_1(pipe) _PIPE(pipe, _MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1) |
5884 | #define _MIPIA_INTR_EN_REG_1 (VLV_DISPLAY_BASE + 0xb094) | 5885 | #define _MIPIA_INTR_EN_REG_1 (VLV_DISPLAY_BASE + 0xb094) |
5885 | #define _MIPIB_INTR_EN_REG_1 (VLV_DISPLAY_BASE + 0xb894) | 5886 | #define _MIPIB_INTR_EN_REG_1 (VLV_DISPLAY_BASE + 0xb894) |
5886 | #define MIPI_INTR_EN_REG_1(pipe) _PIPE(pipe, _MIPIA_INTR_EN_REG_1, _MIPIB_INTR_EN_REG_1) | 5887 | #define MIPI_INTR_EN_REG_1(pipe) _PIPE(pipe, _MIPIA_INTR_EN_REG_1, _MIPIB_INTR_EN_REG_1) |
5887 | #define RX_CONTENTION_DETECTED (1 << 0) | 5888 | #define RX_CONTENTION_DETECTED (1 << 0) |
5888 | 5889 | ||
5889 | /* XXX: only pipe A ?!? */ | 5890 | /* XXX: only pipe A ?!? */ |
5890 | #define MIPIA_DBI_TYPEC_CTRL (VLV_DISPLAY_BASE + 0xb100) | 5891 | #define MIPIA_DBI_TYPEC_CTRL (VLV_DISPLAY_BASE + 0xb100) |
5891 | #define DBI_TYPEC_ENABLE (1 << 31) | 5892 | #define DBI_TYPEC_ENABLE (1 << 31) |
5892 | #define DBI_TYPEC_WIP (1 << 30) | 5893 | #define DBI_TYPEC_WIP (1 << 30) |
5893 | #define DBI_TYPEC_OPTION_SHIFT 28 | 5894 | #define DBI_TYPEC_OPTION_SHIFT 28 |
5894 | #define DBI_TYPEC_OPTION_MASK (3 << 28) | 5895 | #define DBI_TYPEC_OPTION_MASK (3 << 28) |
5895 | #define DBI_TYPEC_FREQ_SHIFT 24 | 5896 | #define DBI_TYPEC_FREQ_SHIFT 24 |
5896 | #define DBI_TYPEC_FREQ_MASK (0xf << 24) | 5897 | #define DBI_TYPEC_FREQ_MASK (0xf << 24) |
5897 | #define DBI_TYPEC_OVERRIDE (1 << 8) | 5898 | #define DBI_TYPEC_OVERRIDE (1 << 8) |
5898 | #define DBI_TYPEC_OVERRIDE_COUNTER_SHIFT 0 | 5899 | #define DBI_TYPEC_OVERRIDE_COUNTER_SHIFT 0 |
5899 | #define DBI_TYPEC_OVERRIDE_COUNTER_MASK (0xff << 0) | 5900 | #define DBI_TYPEC_OVERRIDE_COUNTER_MASK (0xff << 0) |
5900 | 5901 | ||
5901 | 5902 | ||
5902 | /* MIPI adapter registers */ | 5903 | /* MIPI adapter registers */ |
5903 | 5904 | ||
5904 | #define _MIPIA_CTRL (VLV_DISPLAY_BASE + 0xb104) | 5905 | #define _MIPIA_CTRL (VLV_DISPLAY_BASE + 0xb104) |
5905 | #define _MIPIB_CTRL (VLV_DISPLAY_BASE + 0xb904) | 5906 | #define _MIPIB_CTRL (VLV_DISPLAY_BASE + 0xb904) |
5906 | #define MIPI_CTRL(pipe) _PIPE(pipe, _MIPIA_CTRL, _MIPIB_CTRL) | 5907 | #define MIPI_CTRL(pipe) _PIPE(pipe, _MIPIA_CTRL, _MIPIB_CTRL) |
5907 | #define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */ | 5908 | #define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */ |
5908 | #define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5) | 5909 | #define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5) |
5909 | #define ESCAPE_CLOCK_DIVIDER_1 (0 << 5) | 5910 | #define ESCAPE_CLOCK_DIVIDER_1 (0 << 5) |
5910 | #define ESCAPE_CLOCK_DIVIDER_2 (1 << 5) | 5911 | #define ESCAPE_CLOCK_DIVIDER_2 (1 << 5) |
5911 | #define ESCAPE_CLOCK_DIVIDER_4 (2 << 5) | 5912 | #define ESCAPE_CLOCK_DIVIDER_4 (2 << 5) |
5912 | #define READ_REQUEST_PRIORITY_SHIFT 3 | 5913 | #define READ_REQUEST_PRIORITY_SHIFT 3 |
5913 | #define READ_REQUEST_PRIORITY_MASK (3 << 3) | 5914 | #define READ_REQUEST_PRIORITY_MASK (3 << 3) |
5914 | #define READ_REQUEST_PRIORITY_LOW (0 << 3) | 5915 | #define READ_REQUEST_PRIORITY_LOW (0 << 3) |
5915 | #define READ_REQUEST_PRIORITY_HIGH (3 << 3) | 5916 | #define READ_REQUEST_PRIORITY_HIGH (3 << 3) |
5916 | #define RGB_FLIP_TO_BGR (1 << 2) | 5917 | #define RGB_FLIP_TO_BGR (1 << 2) |
5917 | 5918 | ||
5918 | #define _MIPIA_DATA_ADDRESS (VLV_DISPLAY_BASE + 0xb108) | 5919 | #define _MIPIA_DATA_ADDRESS (VLV_DISPLAY_BASE + 0xb108) |
5919 | #define _MIPIB_DATA_ADDRESS (VLV_DISPLAY_BASE + 0xb908) | 5920 | #define _MIPIB_DATA_ADDRESS (VLV_DISPLAY_BASE + 0xb908) |
5920 | #define MIPI_DATA_ADDRESS(pipe) _PIPE(pipe, _MIPIA_DATA_ADDRESS, _MIPIB_DATA_ADDRESS) | 5921 | #define MIPI_DATA_ADDRESS(pipe) _PIPE(pipe, _MIPIA_DATA_ADDRESS, _MIPIB_DATA_ADDRESS) |
5921 | #define DATA_MEM_ADDRESS_SHIFT 5 | 5922 | #define DATA_MEM_ADDRESS_SHIFT 5 |
5922 | #define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5) | 5923 | #define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5) |
5923 | #define DATA_VALID (1 << 0) | 5924 | #define DATA_VALID (1 << 0) |
5924 | 5925 | ||
5925 | #define _MIPIA_DATA_LENGTH (VLV_DISPLAY_BASE + 0xb10c) | 5926 | #define _MIPIA_DATA_LENGTH (VLV_DISPLAY_BASE + 0xb10c) |
5926 | #define _MIPIB_DATA_LENGTH (VLV_DISPLAY_BASE + 0xb90c) | 5927 | #define _MIPIB_DATA_LENGTH (VLV_DISPLAY_BASE + 0xb90c) |
5927 | #define MIPI_DATA_LENGTH(pipe) _PIPE(pipe, _MIPIA_DATA_LENGTH, _MIPIB_DATA_LENGTH) | 5928 | #define MIPI_DATA_LENGTH(pipe) _PIPE(pipe, _MIPIA_DATA_LENGTH, _MIPIB_DATA_LENGTH) |
5928 | #define DATA_LENGTH_SHIFT 0 | 5929 | #define DATA_LENGTH_SHIFT 0 |
5929 | #define DATA_LENGTH_MASK (0xfffff << 0) | 5930 | #define DATA_LENGTH_MASK (0xfffff << 0) |
5930 | 5931 | ||
5931 | #define _MIPIA_COMMAND_ADDRESS (VLV_DISPLAY_BASE + 0xb110) | 5932 | #define _MIPIA_COMMAND_ADDRESS (VLV_DISPLAY_BASE + 0xb110) |
5932 | #define _MIPIB_COMMAND_ADDRESS (VLV_DISPLAY_BASE + 0xb910) | 5933 | #define _MIPIB_COMMAND_ADDRESS (VLV_DISPLAY_BASE + 0xb910) |
5933 | #define MIPI_COMMAND_ADDRESS(pipe) _PIPE(pipe, _MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS) | 5934 | #define MIPI_COMMAND_ADDRESS(pipe) _PIPE(pipe, _MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS) |
5934 | #define COMMAND_MEM_ADDRESS_SHIFT 5 | 5935 | #define COMMAND_MEM_ADDRESS_SHIFT 5 |
5935 | #define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5) | 5936 | #define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5) |
5936 | #define AUTO_PWG_ENABLE (1 << 2) | 5937 | #define AUTO_PWG_ENABLE (1 << 2) |
5937 | #define MEMORY_WRITE_DATA_FROM_PIPE_RENDERING (1 << 1) | 5938 | #define MEMORY_WRITE_DATA_FROM_PIPE_RENDERING (1 << 1) |
5938 | #define COMMAND_VALID (1 << 0) | 5939 | #define COMMAND_VALID (1 << 0) |
5939 | 5940 | ||
5940 | #define _MIPIA_COMMAND_LENGTH (VLV_DISPLAY_BASE + 0xb114) | 5941 | #define _MIPIA_COMMAND_LENGTH (VLV_DISPLAY_BASE + 0xb114) |
5941 | #define _MIPIB_COMMAND_LENGTH (VLV_DISPLAY_BASE + 0xb914) | 5942 | #define _MIPIB_COMMAND_LENGTH (VLV_DISPLAY_BASE + 0xb914) |
5942 | #define MIPI_COMMAND_LENGTH(pipe) _PIPE(pipe, _MIPIA_COMMAND_LENGTH, _MIPIB_COMMAND_LENGTH) | 5943 | #define MIPI_COMMAND_LENGTH(pipe) _PIPE(pipe, _MIPIA_COMMAND_LENGTH, _MIPIB_COMMAND_LENGTH) |
5943 | #define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */ | 5944 | #define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */ |
5944 | #define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n))) | 5945 | #define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n))) |
5945 | 5946 | ||
5946 | #define _MIPIA_READ_DATA_RETURN0 (VLV_DISPLAY_BASE + 0xb118) | 5947 | #define _MIPIA_READ_DATA_RETURN0 (VLV_DISPLAY_BASE + 0xb118) |
5947 | #define _MIPIB_READ_DATA_RETURN0 (VLV_DISPLAY_BASE + 0xb918) | 5948 | #define _MIPIB_READ_DATA_RETURN0 (VLV_DISPLAY_BASE + 0xb918) |
5948 | #define MIPI_READ_DATA_RETURN(pipe, n) \ | 5949 | #define MIPI_READ_DATA_RETURN(pipe, n) \ |
5949 | (_PIPE(pipe, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */ | 5950 | (_PIPE(pipe, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */ |
5950 | 5951 | ||
5951 | #define _MIPIA_READ_DATA_VALID (VLV_DISPLAY_BASE + 0xb138) | 5952 | #define _MIPIA_READ_DATA_VALID (VLV_DISPLAY_BASE + 0xb138) |
5952 | #define _MIPIB_READ_DATA_VALID (VLV_DISPLAY_BASE + 0xb938) | 5953 | #define _MIPIB_READ_DATA_VALID (VLV_DISPLAY_BASE + 0xb938) |
5953 | #define MIPI_READ_DATA_VALID(pipe) _PIPE(pipe, _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID) | 5954 | #define MIPI_READ_DATA_VALID(pipe) _PIPE(pipe, _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID) |
5954 | #define READ_DATA_VALID(n) (1 << (n)) | 5955 | #define READ_DATA_VALID(n) (1 << (n)) |
5955 | 5956 | ||
5956 | /* For UMS only (deprecated): */ | 5957 | /* For UMS only (deprecated): */ |
5957 | #define _PALETTE_A (dev_priv->info.display_mmio_offset + 0xa000) | 5958 | #define _PALETTE_A (dev_priv->info.display_mmio_offset + 0xa000) |
5958 | #define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800) | 5959 | #define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800) |
5959 | #define _DPLL_A (dev_priv->info.display_mmio_offset + 0x6014) | 5960 | #define _DPLL_A (dev_priv->info.display_mmio_offset + 0x6014) |
5960 | #define _DPLL_B (dev_priv->info.display_mmio_offset + 0x6018) | 5961 | #define _DPLL_B (dev_priv->info.display_mmio_offset + 0x6018) |
5961 | #define _DPLL_A_MD (dev_priv->info.display_mmio_offset + 0x601c) | 5962 | #define _DPLL_A_MD (dev_priv->info.display_mmio_offset + 0x601c) |
5962 | #define _DPLL_B_MD (dev_priv->info.display_mmio_offset + 0x6020) | 5963 | #define _DPLL_B_MD (dev_priv->info.display_mmio_offset + 0x6020) |
5963 | 5964 | ||
5964 | #endif /* _I915_REG_H_ */ | 5965 | #endif /* _I915_REG_H_ */ |
5965 | 5966 |
drivers/gpu/drm/i915/intel_display.c
1 | /* | 1 | /* |
2 | * Copyright © 2006-2007 Intel Corporation | 2 | * Copyright © 2006-2007 Intel Corporation |
3 | * | 3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), | 5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation | 6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the | 8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: | 9 | * Software is furnished to do so, subject to the following conditions: |
10 | * | 10 | * |
11 | * The above copyright notice and this permission notice (including the next | 11 | * The above copyright notice and this permission notice (including the next |
12 | * paragraph) shall be included in all copies or substantial portions of the | 12 | * paragraph) shall be included in all copies or substantial portions of the |
13 | * Software. | 13 | * Software. |
14 | * | 14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
21 | * DEALINGS IN THE SOFTWARE. | 21 | * DEALINGS IN THE SOFTWARE. |
22 | * | 22 | * |
23 | * Authors: | 23 | * Authors: |
24 | * Eric Anholt <eric@anholt.net> | 24 | * Eric Anholt <eric@anholt.net> |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include <linux/dmi.h> | 27 | #include <linux/dmi.h> |
28 | #include <linux/module.h> | 28 | #include <linux/module.h> |
29 | #include <linux/input.h> | 29 | #include <linux/input.h> |
30 | #include <linux/i2c.h> | 30 | #include <linux/i2c.h> |
31 | #include <linux/kernel.h> | 31 | #include <linux/kernel.h> |
32 | #include <linux/slab.h> | 32 | #include <linux/slab.h> |
33 | #include <linux/vgaarb.h> | 33 | #include <linux/vgaarb.h> |
34 | #include <drm/drm_edid.h> | 34 | #include <drm/drm_edid.h> |
35 | #include <drm/drmP.h> | 35 | #include <drm/drmP.h> |
36 | #include "intel_drv.h" | 36 | #include "intel_drv.h" |
37 | #include <drm/i915_drm.h> | 37 | #include <drm/i915_drm.h> |
38 | #include "i915_drv.h" | 38 | #include "i915_drv.h" |
39 | #include "i915_trace.h" | 39 | #include "i915_trace.h" |
40 | #include <drm/drm_dp_helper.h> | 40 | #include <drm/drm_dp_helper.h> |
41 | #include <drm/drm_crtc_helper.h> | 41 | #include <drm/drm_crtc_helper.h> |
42 | #include <linux/dma_remapping.h> | 42 | #include <linux/dma_remapping.h> |
43 | 43 | ||
44 | static void intel_increase_pllclock(struct drm_crtc *crtc); | 44 | static void intel_increase_pllclock(struct drm_crtc *crtc); |
45 | static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); | 45 | static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); |
46 | 46 | ||
47 | static void i9xx_crtc_clock_get(struct intel_crtc *crtc, | 47 | static void i9xx_crtc_clock_get(struct intel_crtc *crtc, |
48 | struct intel_crtc_config *pipe_config); | 48 | struct intel_crtc_config *pipe_config); |
49 | static void ironlake_pch_clock_get(struct intel_crtc *crtc, | 49 | static void ironlake_pch_clock_get(struct intel_crtc *crtc, |
50 | struct intel_crtc_config *pipe_config); | 50 | struct intel_crtc_config *pipe_config); |
51 | 51 | ||
52 | static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode, | 52 | static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode, |
53 | int x, int y, struct drm_framebuffer *old_fb); | 53 | int x, int y, struct drm_framebuffer *old_fb); |
54 | static int intel_framebuffer_init(struct drm_device *dev, | 54 | static int intel_framebuffer_init(struct drm_device *dev, |
55 | struct intel_framebuffer *ifb, | 55 | struct intel_framebuffer *ifb, |
56 | struct drm_mode_fb_cmd2 *mode_cmd, | 56 | struct drm_mode_fb_cmd2 *mode_cmd, |
57 | struct drm_i915_gem_object *obj); | 57 | struct drm_i915_gem_object *obj); |
58 | 58 | ||
59 | typedef struct { | 59 | typedef struct { |
60 | int min, max; | 60 | int min, max; |
61 | } intel_range_t; | 61 | } intel_range_t; |
62 | 62 | ||
63 | typedef struct { | 63 | typedef struct { |
64 | int dot_limit; | 64 | int dot_limit; |
65 | int p2_slow, p2_fast; | 65 | int p2_slow, p2_fast; |
66 | } intel_p2_t; | 66 | } intel_p2_t; |
67 | 67 | ||
68 | typedef struct intel_limit intel_limit_t; | 68 | typedef struct intel_limit intel_limit_t; |
69 | struct intel_limit { | 69 | struct intel_limit { |
70 | intel_range_t dot, vco, n, m, m1, m2, p, p1; | 70 | intel_range_t dot, vco, n, m, m1, m2, p, p1; |
71 | intel_p2_t p2; | 71 | intel_p2_t p2; |
72 | }; | 72 | }; |
73 | 73 | ||
74 | int | 74 | int |
75 | intel_pch_rawclk(struct drm_device *dev) | 75 | intel_pch_rawclk(struct drm_device *dev) |
76 | { | 76 | { |
77 | struct drm_i915_private *dev_priv = dev->dev_private; | 77 | struct drm_i915_private *dev_priv = dev->dev_private; |
78 | 78 | ||
79 | WARN_ON(!HAS_PCH_SPLIT(dev)); | 79 | WARN_ON(!HAS_PCH_SPLIT(dev)); |
80 | 80 | ||
81 | return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK; | 81 | return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK; |
82 | } | 82 | } |
83 | 83 | ||
84 | static inline u32 /* units of 100MHz */ | 84 | static inline u32 /* units of 100MHz */ |
85 | intel_fdi_link_freq(struct drm_device *dev) | 85 | intel_fdi_link_freq(struct drm_device *dev) |
86 | { | 86 | { |
87 | if (IS_GEN5(dev)) { | 87 | if (IS_GEN5(dev)) { |
88 | struct drm_i915_private *dev_priv = dev->dev_private; | 88 | struct drm_i915_private *dev_priv = dev->dev_private; |
89 | return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2; | 89 | return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2; |
90 | } else | 90 | } else |
91 | return 27; | 91 | return 27; |
92 | } | 92 | } |
93 | 93 | ||
94 | static const intel_limit_t intel_limits_i8xx_dac = { | 94 | static const intel_limit_t intel_limits_i8xx_dac = { |
95 | .dot = { .min = 25000, .max = 350000 }, | 95 | .dot = { .min = 25000, .max = 350000 }, |
96 | .vco = { .min = 908000, .max = 1512000 }, | 96 | .vco = { .min = 908000, .max = 1512000 }, |
97 | .n = { .min = 2, .max = 16 }, | 97 | .n = { .min = 2, .max = 16 }, |
98 | .m = { .min = 96, .max = 140 }, | 98 | .m = { .min = 96, .max = 140 }, |
99 | .m1 = { .min = 18, .max = 26 }, | 99 | .m1 = { .min = 18, .max = 26 }, |
100 | .m2 = { .min = 6, .max = 16 }, | 100 | .m2 = { .min = 6, .max = 16 }, |
101 | .p = { .min = 4, .max = 128 }, | 101 | .p = { .min = 4, .max = 128 }, |
102 | .p1 = { .min = 2, .max = 33 }, | 102 | .p1 = { .min = 2, .max = 33 }, |
103 | .p2 = { .dot_limit = 165000, | 103 | .p2 = { .dot_limit = 165000, |
104 | .p2_slow = 4, .p2_fast = 2 }, | 104 | .p2_slow = 4, .p2_fast = 2 }, |
105 | }; | 105 | }; |
106 | 106 | ||
107 | static const intel_limit_t intel_limits_i8xx_dvo = { | 107 | static const intel_limit_t intel_limits_i8xx_dvo = { |
108 | .dot = { .min = 25000, .max = 350000 }, | 108 | .dot = { .min = 25000, .max = 350000 }, |
109 | .vco = { .min = 908000, .max = 1512000 }, | 109 | .vco = { .min = 908000, .max = 1512000 }, |
110 | .n = { .min = 2, .max = 16 }, | 110 | .n = { .min = 2, .max = 16 }, |
111 | .m = { .min = 96, .max = 140 }, | 111 | .m = { .min = 96, .max = 140 }, |
112 | .m1 = { .min = 18, .max = 26 }, | 112 | .m1 = { .min = 18, .max = 26 }, |
113 | .m2 = { .min = 6, .max = 16 }, | 113 | .m2 = { .min = 6, .max = 16 }, |
114 | .p = { .min = 4, .max = 128 }, | 114 | .p = { .min = 4, .max = 128 }, |
115 | .p1 = { .min = 2, .max = 33 }, | 115 | .p1 = { .min = 2, .max = 33 }, |
116 | .p2 = { .dot_limit = 165000, | 116 | .p2 = { .dot_limit = 165000, |
117 | .p2_slow = 4, .p2_fast = 4 }, | 117 | .p2_slow = 4, .p2_fast = 4 }, |
118 | }; | 118 | }; |
119 | 119 | ||
120 | static const intel_limit_t intel_limits_i8xx_lvds = { | 120 | static const intel_limit_t intel_limits_i8xx_lvds = { |
121 | .dot = { .min = 25000, .max = 350000 }, | 121 | .dot = { .min = 25000, .max = 350000 }, |
122 | .vco = { .min = 908000, .max = 1512000 }, | 122 | .vco = { .min = 908000, .max = 1512000 }, |
123 | .n = { .min = 2, .max = 16 }, | 123 | .n = { .min = 2, .max = 16 }, |
124 | .m = { .min = 96, .max = 140 }, | 124 | .m = { .min = 96, .max = 140 }, |
125 | .m1 = { .min = 18, .max = 26 }, | 125 | .m1 = { .min = 18, .max = 26 }, |
126 | .m2 = { .min = 6, .max = 16 }, | 126 | .m2 = { .min = 6, .max = 16 }, |
127 | .p = { .min = 4, .max = 128 }, | 127 | .p = { .min = 4, .max = 128 }, |
128 | .p1 = { .min = 1, .max = 6 }, | 128 | .p1 = { .min = 1, .max = 6 }, |
129 | .p2 = { .dot_limit = 165000, | 129 | .p2 = { .dot_limit = 165000, |
130 | .p2_slow = 14, .p2_fast = 7 }, | 130 | .p2_slow = 14, .p2_fast = 7 }, |
131 | }; | 131 | }; |
132 | 132 | ||
133 | static const intel_limit_t intel_limits_i9xx_sdvo = { | 133 | static const intel_limit_t intel_limits_i9xx_sdvo = { |
134 | .dot = { .min = 20000, .max = 400000 }, | 134 | .dot = { .min = 20000, .max = 400000 }, |
135 | .vco = { .min = 1400000, .max = 2800000 }, | 135 | .vco = { .min = 1400000, .max = 2800000 }, |
136 | .n = { .min = 1, .max = 6 }, | 136 | .n = { .min = 1, .max = 6 }, |
137 | .m = { .min = 70, .max = 120 }, | 137 | .m = { .min = 70, .max = 120 }, |
138 | .m1 = { .min = 8, .max = 18 }, | 138 | .m1 = { .min = 8, .max = 18 }, |
139 | .m2 = { .min = 3, .max = 7 }, | 139 | .m2 = { .min = 3, .max = 7 }, |
140 | .p = { .min = 5, .max = 80 }, | 140 | .p = { .min = 5, .max = 80 }, |
141 | .p1 = { .min = 1, .max = 8 }, | 141 | .p1 = { .min = 1, .max = 8 }, |
142 | .p2 = { .dot_limit = 200000, | 142 | .p2 = { .dot_limit = 200000, |
143 | .p2_slow = 10, .p2_fast = 5 }, | 143 | .p2_slow = 10, .p2_fast = 5 }, |
144 | }; | 144 | }; |
145 | 145 | ||
146 | static const intel_limit_t intel_limits_i9xx_lvds = { | 146 | static const intel_limit_t intel_limits_i9xx_lvds = { |
147 | .dot = { .min = 20000, .max = 400000 }, | 147 | .dot = { .min = 20000, .max = 400000 }, |
148 | .vco = { .min = 1400000, .max = 2800000 }, | 148 | .vco = { .min = 1400000, .max = 2800000 }, |
149 | .n = { .min = 1, .max = 6 }, | 149 | .n = { .min = 1, .max = 6 }, |
150 | .m = { .min = 70, .max = 120 }, | 150 | .m = { .min = 70, .max = 120 }, |
151 | .m1 = { .min = 8, .max = 18 }, | 151 | .m1 = { .min = 8, .max = 18 }, |
152 | .m2 = { .min = 3, .max = 7 }, | 152 | .m2 = { .min = 3, .max = 7 }, |
153 | .p = { .min = 7, .max = 98 }, | 153 | .p = { .min = 7, .max = 98 }, |
154 | .p1 = { .min = 1, .max = 8 }, | 154 | .p1 = { .min = 1, .max = 8 }, |
155 | .p2 = { .dot_limit = 112000, | 155 | .p2 = { .dot_limit = 112000, |
156 | .p2_slow = 14, .p2_fast = 7 }, | 156 | .p2_slow = 14, .p2_fast = 7 }, |
157 | }; | 157 | }; |
158 | 158 | ||
159 | 159 | ||
160 | static const intel_limit_t intel_limits_g4x_sdvo = { | 160 | static const intel_limit_t intel_limits_g4x_sdvo = { |
161 | .dot = { .min = 25000, .max = 270000 }, | 161 | .dot = { .min = 25000, .max = 270000 }, |
162 | .vco = { .min = 1750000, .max = 3500000}, | 162 | .vco = { .min = 1750000, .max = 3500000}, |
163 | .n = { .min = 1, .max = 4 }, | 163 | .n = { .min = 1, .max = 4 }, |
164 | .m = { .min = 104, .max = 138 }, | 164 | .m = { .min = 104, .max = 138 }, |
165 | .m1 = { .min = 17, .max = 23 }, | 165 | .m1 = { .min = 17, .max = 23 }, |
166 | .m2 = { .min = 5, .max = 11 }, | 166 | .m2 = { .min = 5, .max = 11 }, |
167 | .p = { .min = 10, .max = 30 }, | 167 | .p = { .min = 10, .max = 30 }, |
168 | .p1 = { .min = 1, .max = 3}, | 168 | .p1 = { .min = 1, .max = 3}, |
169 | .p2 = { .dot_limit = 270000, | 169 | .p2 = { .dot_limit = 270000, |
170 | .p2_slow = 10, | 170 | .p2_slow = 10, |
171 | .p2_fast = 10 | 171 | .p2_fast = 10 |
172 | }, | 172 | }, |
173 | }; | 173 | }; |
174 | 174 | ||
175 | static const intel_limit_t intel_limits_g4x_hdmi = { | 175 | static const intel_limit_t intel_limits_g4x_hdmi = { |
176 | .dot = { .min = 22000, .max = 400000 }, | 176 | .dot = { .min = 22000, .max = 400000 }, |
177 | .vco = { .min = 1750000, .max = 3500000}, | 177 | .vco = { .min = 1750000, .max = 3500000}, |
178 | .n = { .min = 1, .max = 4 }, | 178 | .n = { .min = 1, .max = 4 }, |
179 | .m = { .min = 104, .max = 138 }, | 179 | .m = { .min = 104, .max = 138 }, |
180 | .m1 = { .min = 16, .max = 23 }, | 180 | .m1 = { .min = 16, .max = 23 }, |
181 | .m2 = { .min = 5, .max = 11 }, | 181 | .m2 = { .min = 5, .max = 11 }, |
182 | .p = { .min = 5, .max = 80 }, | 182 | .p = { .min = 5, .max = 80 }, |
183 | .p1 = { .min = 1, .max = 8}, | 183 | .p1 = { .min = 1, .max = 8}, |
184 | .p2 = { .dot_limit = 165000, | 184 | .p2 = { .dot_limit = 165000, |
185 | .p2_slow = 10, .p2_fast = 5 }, | 185 | .p2_slow = 10, .p2_fast = 5 }, |
186 | }; | 186 | }; |
187 | 187 | ||
188 | static const intel_limit_t intel_limits_g4x_single_channel_lvds = { | 188 | static const intel_limit_t intel_limits_g4x_single_channel_lvds = { |
189 | .dot = { .min = 20000, .max = 115000 }, | 189 | .dot = { .min = 20000, .max = 115000 }, |
190 | .vco = { .min = 1750000, .max = 3500000 }, | 190 | .vco = { .min = 1750000, .max = 3500000 }, |
191 | .n = { .min = 1, .max = 3 }, | 191 | .n = { .min = 1, .max = 3 }, |
192 | .m = { .min = 104, .max = 138 }, | 192 | .m = { .min = 104, .max = 138 }, |
193 | .m1 = { .min = 17, .max = 23 }, | 193 | .m1 = { .min = 17, .max = 23 }, |
194 | .m2 = { .min = 5, .max = 11 }, | 194 | .m2 = { .min = 5, .max = 11 }, |
195 | .p = { .min = 28, .max = 112 }, | 195 | .p = { .min = 28, .max = 112 }, |
196 | .p1 = { .min = 2, .max = 8 }, | 196 | .p1 = { .min = 2, .max = 8 }, |
197 | .p2 = { .dot_limit = 0, | 197 | .p2 = { .dot_limit = 0, |
198 | .p2_slow = 14, .p2_fast = 14 | 198 | .p2_slow = 14, .p2_fast = 14 |
199 | }, | 199 | }, |
200 | }; | 200 | }; |
201 | 201 | ||
202 | static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { | 202 | static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { |
203 | .dot = { .min = 80000, .max = 224000 }, | 203 | .dot = { .min = 80000, .max = 224000 }, |
204 | .vco = { .min = 1750000, .max = 3500000 }, | 204 | .vco = { .min = 1750000, .max = 3500000 }, |
205 | .n = { .min = 1, .max = 3 }, | 205 | .n = { .min = 1, .max = 3 }, |
206 | .m = { .min = 104, .max = 138 }, | 206 | .m = { .min = 104, .max = 138 }, |
207 | .m1 = { .min = 17, .max = 23 }, | 207 | .m1 = { .min = 17, .max = 23 }, |
208 | .m2 = { .min = 5, .max = 11 }, | 208 | .m2 = { .min = 5, .max = 11 }, |
209 | .p = { .min = 14, .max = 42 }, | 209 | .p = { .min = 14, .max = 42 }, |
210 | .p1 = { .min = 2, .max = 6 }, | 210 | .p1 = { .min = 2, .max = 6 }, |
211 | .p2 = { .dot_limit = 0, | 211 | .p2 = { .dot_limit = 0, |
212 | .p2_slow = 7, .p2_fast = 7 | 212 | .p2_slow = 7, .p2_fast = 7 |
213 | }, | 213 | }, |
214 | }; | 214 | }; |
215 | 215 | ||
216 | static const intel_limit_t intel_limits_pineview_sdvo = { | 216 | static const intel_limit_t intel_limits_pineview_sdvo = { |
217 | .dot = { .min = 20000, .max = 400000}, | 217 | .dot = { .min = 20000, .max = 400000}, |
218 | .vco = { .min = 1700000, .max = 3500000 }, | 218 | .vco = { .min = 1700000, .max = 3500000 }, |
219 | /* Pineview's Ncounter is a ring counter */ | 219 | /* Pineview's Ncounter is a ring counter */ |
220 | .n = { .min = 3, .max = 6 }, | 220 | .n = { .min = 3, .max = 6 }, |
221 | .m = { .min = 2, .max = 256 }, | 221 | .m = { .min = 2, .max = 256 }, |
222 | /* Pineview only has one combined m divider, which we treat as m2. */ | 222 | /* Pineview only has one combined m divider, which we treat as m2. */ |
223 | .m1 = { .min = 0, .max = 0 }, | 223 | .m1 = { .min = 0, .max = 0 }, |
224 | .m2 = { .min = 0, .max = 254 }, | 224 | .m2 = { .min = 0, .max = 254 }, |
225 | .p = { .min = 5, .max = 80 }, | 225 | .p = { .min = 5, .max = 80 }, |
226 | .p1 = { .min = 1, .max = 8 }, | 226 | .p1 = { .min = 1, .max = 8 }, |
227 | .p2 = { .dot_limit = 200000, | 227 | .p2 = { .dot_limit = 200000, |
228 | .p2_slow = 10, .p2_fast = 5 }, | 228 | .p2_slow = 10, .p2_fast = 5 }, |
229 | }; | 229 | }; |
230 | 230 | ||
231 | static const intel_limit_t intel_limits_pineview_lvds = { | 231 | static const intel_limit_t intel_limits_pineview_lvds = { |
232 | .dot = { .min = 20000, .max = 400000 }, | 232 | .dot = { .min = 20000, .max = 400000 }, |
233 | .vco = { .min = 1700000, .max = 3500000 }, | 233 | .vco = { .min = 1700000, .max = 3500000 }, |
234 | .n = { .min = 3, .max = 6 }, | 234 | .n = { .min = 3, .max = 6 }, |
235 | .m = { .min = 2, .max = 256 }, | 235 | .m = { .min = 2, .max = 256 }, |
236 | .m1 = { .min = 0, .max = 0 }, | 236 | .m1 = { .min = 0, .max = 0 }, |
237 | .m2 = { .min = 0, .max = 254 }, | 237 | .m2 = { .min = 0, .max = 254 }, |
238 | .p = { .min = 7, .max = 112 }, | 238 | .p = { .min = 7, .max = 112 }, |
239 | .p1 = { .min = 1, .max = 8 }, | 239 | .p1 = { .min = 1, .max = 8 }, |
240 | .p2 = { .dot_limit = 112000, | 240 | .p2 = { .dot_limit = 112000, |
241 | .p2_slow = 14, .p2_fast = 14 }, | 241 | .p2_slow = 14, .p2_fast = 14 }, |
242 | }; | 242 | }; |
243 | 243 | ||
244 | /* Ironlake / Sandybridge | 244 | /* Ironlake / Sandybridge |
245 | * | 245 | * |
246 | * We calculate clock using (register_value + 2) for N/M1/M2, so here | 246 | * We calculate clock using (register_value + 2) for N/M1/M2, so here |
247 | * the range value for them is (actual_value - 2). | 247 | * the range value for them is (actual_value - 2). |
248 | */ | 248 | */ |
249 | static const intel_limit_t intel_limits_ironlake_dac = { | 249 | static const intel_limit_t intel_limits_ironlake_dac = { |
250 | .dot = { .min = 25000, .max = 350000 }, | 250 | .dot = { .min = 25000, .max = 350000 }, |
251 | .vco = { .min = 1760000, .max = 3510000 }, | 251 | .vco = { .min = 1760000, .max = 3510000 }, |
252 | .n = { .min = 1, .max = 5 }, | 252 | .n = { .min = 1, .max = 5 }, |
253 | .m = { .min = 79, .max = 127 }, | 253 | .m = { .min = 79, .max = 127 }, |
254 | .m1 = { .min = 12, .max = 22 }, | 254 | .m1 = { .min = 12, .max = 22 }, |
255 | .m2 = { .min = 5, .max = 9 }, | 255 | .m2 = { .min = 5, .max = 9 }, |
256 | .p = { .min = 5, .max = 80 }, | 256 | .p = { .min = 5, .max = 80 }, |
257 | .p1 = { .min = 1, .max = 8 }, | 257 | .p1 = { .min = 1, .max = 8 }, |
258 | .p2 = { .dot_limit = 225000, | 258 | .p2 = { .dot_limit = 225000, |
259 | .p2_slow = 10, .p2_fast = 5 }, | 259 | .p2_slow = 10, .p2_fast = 5 }, |
260 | }; | 260 | }; |
261 | 261 | ||
262 | static const intel_limit_t intel_limits_ironlake_single_lvds = { | 262 | static const intel_limit_t intel_limits_ironlake_single_lvds = { |
263 | .dot = { .min = 25000, .max = 350000 }, | 263 | .dot = { .min = 25000, .max = 350000 }, |
264 | .vco = { .min = 1760000, .max = 3510000 }, | 264 | .vco = { .min = 1760000, .max = 3510000 }, |
265 | .n = { .min = 1, .max = 3 }, | 265 | .n = { .min = 1, .max = 3 }, |
266 | .m = { .min = 79, .max = 118 }, | 266 | .m = { .min = 79, .max = 118 }, |
267 | .m1 = { .min = 12, .max = 22 }, | 267 | .m1 = { .min = 12, .max = 22 }, |
268 | .m2 = { .min = 5, .max = 9 }, | 268 | .m2 = { .min = 5, .max = 9 }, |
269 | .p = { .min = 28, .max = 112 }, | 269 | .p = { .min = 28, .max = 112 }, |
270 | .p1 = { .min = 2, .max = 8 }, | 270 | .p1 = { .min = 2, .max = 8 }, |
271 | .p2 = { .dot_limit = 225000, | 271 | .p2 = { .dot_limit = 225000, |
272 | .p2_slow = 14, .p2_fast = 14 }, | 272 | .p2_slow = 14, .p2_fast = 14 }, |
273 | }; | 273 | }; |
274 | 274 | ||
275 | static const intel_limit_t intel_limits_ironlake_dual_lvds = { | 275 | static const intel_limit_t intel_limits_ironlake_dual_lvds = { |
276 | .dot = { .min = 25000, .max = 350000 }, | 276 | .dot = { .min = 25000, .max = 350000 }, |
277 | .vco = { .min = 1760000, .max = 3510000 }, | 277 | .vco = { .min = 1760000, .max = 3510000 }, |
278 | .n = { .min = 1, .max = 3 }, | 278 | .n = { .min = 1, .max = 3 }, |
279 | .m = { .min = 79, .max = 127 }, | 279 | .m = { .min = 79, .max = 127 }, |
280 | .m1 = { .min = 12, .max = 22 }, | 280 | .m1 = { .min = 12, .max = 22 }, |
281 | .m2 = { .min = 5, .max = 9 }, | 281 | .m2 = { .min = 5, .max = 9 }, |
282 | .p = { .min = 14, .max = 56 }, | 282 | .p = { .min = 14, .max = 56 }, |
283 | .p1 = { .min = 2, .max = 8 }, | 283 | .p1 = { .min = 2, .max = 8 }, |
284 | .p2 = { .dot_limit = 225000, | 284 | .p2 = { .dot_limit = 225000, |
285 | .p2_slow = 7, .p2_fast = 7 }, | 285 | .p2_slow = 7, .p2_fast = 7 }, |
286 | }; | 286 | }; |
287 | 287 | ||
288 | /* LVDS 100mhz refclk limits. */ | 288 | /* LVDS 100mhz refclk limits. */ |
289 | static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { | 289 | static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { |
290 | .dot = { .min = 25000, .max = 350000 }, | 290 | .dot = { .min = 25000, .max = 350000 }, |
291 | .vco = { .min = 1760000, .max = 3510000 }, | 291 | .vco = { .min = 1760000, .max = 3510000 }, |
292 | .n = { .min = 1, .max = 2 }, | 292 | .n = { .min = 1, .max = 2 }, |
293 | .m = { .min = 79, .max = 126 }, | 293 | .m = { .min = 79, .max = 126 }, |
294 | .m1 = { .min = 12, .max = 22 }, | 294 | .m1 = { .min = 12, .max = 22 }, |
295 | .m2 = { .min = 5, .max = 9 }, | 295 | .m2 = { .min = 5, .max = 9 }, |
296 | .p = { .min = 28, .max = 112 }, | 296 | .p = { .min = 28, .max = 112 }, |
297 | .p1 = { .min = 2, .max = 8 }, | 297 | .p1 = { .min = 2, .max = 8 }, |
298 | .p2 = { .dot_limit = 225000, | 298 | .p2 = { .dot_limit = 225000, |
299 | .p2_slow = 14, .p2_fast = 14 }, | 299 | .p2_slow = 14, .p2_fast = 14 }, |
300 | }; | 300 | }; |
301 | 301 | ||
302 | static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { | 302 | static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { |
303 | .dot = { .min = 25000, .max = 350000 }, | 303 | .dot = { .min = 25000, .max = 350000 }, |
304 | .vco = { .min = 1760000, .max = 3510000 }, | 304 | .vco = { .min = 1760000, .max = 3510000 }, |
305 | .n = { .min = 1, .max = 3 }, | 305 | .n = { .min = 1, .max = 3 }, |
306 | .m = { .min = 79, .max = 126 }, | 306 | .m = { .min = 79, .max = 126 }, |
307 | .m1 = { .min = 12, .max = 22 }, | 307 | .m1 = { .min = 12, .max = 22 }, |
308 | .m2 = { .min = 5, .max = 9 }, | 308 | .m2 = { .min = 5, .max = 9 }, |
309 | .p = { .min = 14, .max = 42 }, | 309 | .p = { .min = 14, .max = 42 }, |
310 | .p1 = { .min = 2, .max = 6 }, | 310 | .p1 = { .min = 2, .max = 6 }, |
311 | .p2 = { .dot_limit = 225000, | 311 | .p2 = { .dot_limit = 225000, |
312 | .p2_slow = 7, .p2_fast = 7 }, | 312 | .p2_slow = 7, .p2_fast = 7 }, |
313 | }; | 313 | }; |
314 | 314 | ||
315 | static const intel_limit_t intel_limits_vlv = { | 315 | static const intel_limit_t intel_limits_vlv = { |
316 | /* | 316 | /* |
317 | * These are the data rate limits (measured in fast clocks) | 317 | * These are the data rate limits (measured in fast clocks) |
318 | * since those are the strictest limits we have. The fast | 318 | * since those are the strictest limits we have. The fast |
319 | * clock and actual rate limits are more relaxed, so checking | 319 | * clock and actual rate limits are more relaxed, so checking |
320 | * them would make no difference. | 320 | * them would make no difference. |
321 | */ | 321 | */ |
322 | .dot = { .min = 25000 * 5, .max = 270000 * 5 }, | 322 | .dot = { .min = 25000 * 5, .max = 270000 * 5 }, |
323 | .vco = { .min = 4000000, .max = 6000000 }, | 323 | .vco = { .min = 4000000, .max = 6000000 }, |
324 | .n = { .min = 1, .max = 7 }, | 324 | .n = { .min = 1, .max = 7 }, |
325 | .m1 = { .min = 2, .max = 3 }, | 325 | .m1 = { .min = 2, .max = 3 }, |
326 | .m2 = { .min = 11, .max = 156 }, | 326 | .m2 = { .min = 11, .max = 156 }, |
327 | .p1 = { .min = 2, .max = 3 }, | 327 | .p1 = { .min = 2, .max = 3 }, |
328 | .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */ | 328 | .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */ |
329 | }; | 329 | }; |
330 | 330 | ||
331 | static void vlv_clock(int refclk, intel_clock_t *clock) | 331 | static void vlv_clock(int refclk, intel_clock_t *clock) |
332 | { | 332 | { |
333 | clock->m = clock->m1 * clock->m2; | 333 | clock->m = clock->m1 * clock->m2; |
334 | clock->p = clock->p1 * clock->p2; | 334 | clock->p = clock->p1 * clock->p2; |
335 | if (WARN_ON(clock->n == 0 || clock->p == 0)) | 335 | if (WARN_ON(clock->n == 0 || clock->p == 0)) |
336 | return; | 336 | return; |
337 | clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); | 337 | clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); |
338 | clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); | 338 | clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); |
339 | } | 339 | } |
340 | 340 | ||
341 | /** | 341 | /** |
342 | * Returns whether any output on the specified pipe is of the specified type | 342 | * Returns whether any output on the specified pipe is of the specified type |
343 | */ | 343 | */ |
344 | static bool intel_pipe_has_type(struct drm_crtc *crtc, int type) | 344 | static bool intel_pipe_has_type(struct drm_crtc *crtc, int type) |
345 | { | 345 | { |
346 | struct drm_device *dev = crtc->dev; | 346 | struct drm_device *dev = crtc->dev; |
347 | struct intel_encoder *encoder; | 347 | struct intel_encoder *encoder; |
348 | 348 | ||
349 | for_each_encoder_on_crtc(dev, crtc, encoder) | 349 | for_each_encoder_on_crtc(dev, crtc, encoder) |
350 | if (encoder->type == type) | 350 | if (encoder->type == type) |
351 | return true; | 351 | return true; |
352 | 352 | ||
353 | return false; | 353 | return false; |
354 | } | 354 | } |
355 | 355 | ||
356 | static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, | 356 | static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, |
357 | int refclk) | 357 | int refclk) |
358 | { | 358 | { |
359 | struct drm_device *dev = crtc->dev; | 359 | struct drm_device *dev = crtc->dev; |
360 | const intel_limit_t *limit; | 360 | const intel_limit_t *limit; |
361 | 361 | ||
362 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | 362 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
363 | if (intel_is_dual_link_lvds(dev)) { | 363 | if (intel_is_dual_link_lvds(dev)) { |
364 | if (refclk == 100000) | 364 | if (refclk == 100000) |
365 | limit = &intel_limits_ironlake_dual_lvds_100m; | 365 | limit = &intel_limits_ironlake_dual_lvds_100m; |
366 | else | 366 | else |
367 | limit = &intel_limits_ironlake_dual_lvds; | 367 | limit = &intel_limits_ironlake_dual_lvds; |
368 | } else { | 368 | } else { |
369 | if (refclk == 100000) | 369 | if (refclk == 100000) |
370 | limit = &intel_limits_ironlake_single_lvds_100m; | 370 | limit = &intel_limits_ironlake_single_lvds_100m; |
371 | else | 371 | else |
372 | limit = &intel_limits_ironlake_single_lvds; | 372 | limit = &intel_limits_ironlake_single_lvds; |
373 | } | 373 | } |
374 | } else | 374 | } else |
375 | limit = &intel_limits_ironlake_dac; | 375 | limit = &intel_limits_ironlake_dac; |
376 | 376 | ||
377 | return limit; | 377 | return limit; |
378 | } | 378 | } |
379 | 379 | ||
380 | static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) | 380 | static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) |
381 | { | 381 | { |
382 | struct drm_device *dev = crtc->dev; | 382 | struct drm_device *dev = crtc->dev; |
383 | const intel_limit_t *limit; | 383 | const intel_limit_t *limit; |
384 | 384 | ||
385 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | 385 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
386 | if (intel_is_dual_link_lvds(dev)) | 386 | if (intel_is_dual_link_lvds(dev)) |
387 | limit = &intel_limits_g4x_dual_channel_lvds; | 387 | limit = &intel_limits_g4x_dual_channel_lvds; |
388 | else | 388 | else |
389 | limit = &intel_limits_g4x_single_channel_lvds; | 389 | limit = &intel_limits_g4x_single_channel_lvds; |
390 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) || | 390 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) || |
391 | intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) { | 391 | intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) { |
392 | limit = &intel_limits_g4x_hdmi; | 392 | limit = &intel_limits_g4x_hdmi; |
393 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) { | 393 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) { |
394 | limit = &intel_limits_g4x_sdvo; | 394 | limit = &intel_limits_g4x_sdvo; |
395 | } else /* The option is for other outputs */ | 395 | } else /* The option is for other outputs */ |
396 | limit = &intel_limits_i9xx_sdvo; | 396 | limit = &intel_limits_i9xx_sdvo; |
397 | 397 | ||
398 | return limit; | 398 | return limit; |
399 | } | 399 | } |
400 | 400 | ||
401 | static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk) | 401 | static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk) |
402 | { | 402 | { |
403 | struct drm_device *dev = crtc->dev; | 403 | struct drm_device *dev = crtc->dev; |
404 | const intel_limit_t *limit; | 404 | const intel_limit_t *limit; |
405 | 405 | ||
406 | if (HAS_PCH_SPLIT(dev)) | 406 | if (HAS_PCH_SPLIT(dev)) |
407 | limit = intel_ironlake_limit(crtc, refclk); | 407 | limit = intel_ironlake_limit(crtc, refclk); |
408 | else if (IS_G4X(dev)) { | 408 | else if (IS_G4X(dev)) { |
409 | limit = intel_g4x_limit(crtc); | 409 | limit = intel_g4x_limit(crtc); |
410 | } else if (IS_PINEVIEW(dev)) { | 410 | } else if (IS_PINEVIEW(dev)) { |
411 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) | 411 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) |
412 | limit = &intel_limits_pineview_lvds; | 412 | limit = &intel_limits_pineview_lvds; |
413 | else | 413 | else |
414 | limit = &intel_limits_pineview_sdvo; | 414 | limit = &intel_limits_pineview_sdvo; |
415 | } else if (IS_VALLEYVIEW(dev)) { | 415 | } else if (IS_VALLEYVIEW(dev)) { |
416 | limit = &intel_limits_vlv; | 416 | limit = &intel_limits_vlv; |
417 | } else if (!IS_GEN2(dev)) { | 417 | } else if (!IS_GEN2(dev)) { |
418 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) | 418 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) |
419 | limit = &intel_limits_i9xx_lvds; | 419 | limit = &intel_limits_i9xx_lvds; |
420 | else | 420 | else |
421 | limit = &intel_limits_i9xx_sdvo; | 421 | limit = &intel_limits_i9xx_sdvo; |
422 | } else { | 422 | } else { |
423 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) | 423 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) |
424 | limit = &intel_limits_i8xx_lvds; | 424 | limit = &intel_limits_i8xx_lvds; |
425 | else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO)) | 425 | else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO)) |
426 | limit = &intel_limits_i8xx_dvo; | 426 | limit = &intel_limits_i8xx_dvo; |
427 | else | 427 | else |
428 | limit = &intel_limits_i8xx_dac; | 428 | limit = &intel_limits_i8xx_dac; |
429 | } | 429 | } |
430 | return limit; | 430 | return limit; |
431 | } | 431 | } |
432 | 432 | ||
433 | /* m1 is reserved as 0 in Pineview, n is a ring counter */ | 433 | /* m1 is reserved as 0 in Pineview, n is a ring counter */ |
434 | static void pineview_clock(int refclk, intel_clock_t *clock) | 434 | static void pineview_clock(int refclk, intel_clock_t *clock) |
435 | { | 435 | { |
436 | clock->m = clock->m2 + 2; | 436 | clock->m = clock->m2 + 2; |
437 | clock->p = clock->p1 * clock->p2; | 437 | clock->p = clock->p1 * clock->p2; |
438 | if (WARN_ON(clock->n == 0 || clock->p == 0)) | 438 | if (WARN_ON(clock->n == 0 || clock->p == 0)) |
439 | return; | 439 | return; |
440 | clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); | 440 | clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); |
441 | clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); | 441 | clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); |
442 | } | 442 | } |
443 | 443 | ||
444 | static uint32_t i9xx_dpll_compute_m(struct dpll *dpll) | 444 | static uint32_t i9xx_dpll_compute_m(struct dpll *dpll) |
445 | { | 445 | { |
446 | return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); | 446 | return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); |
447 | } | 447 | } |
448 | 448 | ||
449 | static void i9xx_clock(int refclk, intel_clock_t *clock) | 449 | static void i9xx_clock(int refclk, intel_clock_t *clock) |
450 | { | 450 | { |
451 | clock->m = i9xx_dpll_compute_m(clock); | 451 | clock->m = i9xx_dpll_compute_m(clock); |
452 | clock->p = clock->p1 * clock->p2; | 452 | clock->p = clock->p1 * clock->p2; |
453 | if (WARN_ON(clock->n + 2 == 0 || clock->p == 0)) | 453 | if (WARN_ON(clock->n + 2 == 0 || clock->p == 0)) |
454 | return; | 454 | return; |
455 | clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2); | 455 | clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2); |
456 | clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); | 456 | clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); |
457 | } | 457 | } |
458 | 458 | ||
459 | #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) | 459 | #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) |
460 | /** | 460 | /** |
461 | * Returns whether the given set of divisors are valid for a given refclk with | 461 | * Returns whether the given set of divisors are valid for a given refclk with |
462 | * the given connectors. | 462 | * the given connectors. |
463 | */ | 463 | */ |
464 | 464 | ||
465 | static bool intel_PLL_is_valid(struct drm_device *dev, | 465 | static bool intel_PLL_is_valid(struct drm_device *dev, |
466 | const intel_limit_t *limit, | 466 | const intel_limit_t *limit, |
467 | const intel_clock_t *clock) | 467 | const intel_clock_t *clock) |
468 | { | 468 | { |
469 | if (clock->n < limit->n.min || limit->n.max < clock->n) | 469 | if (clock->n < limit->n.min || limit->n.max < clock->n) |
470 | INTELPllInvalid("n out of range\n"); | 470 | INTELPllInvalid("n out of range\n"); |
471 | if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) | 471 | if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) |
472 | INTELPllInvalid("p1 out of range\n"); | 472 | INTELPllInvalid("p1 out of range\n"); |
473 | if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) | 473 | if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) |
474 | INTELPllInvalid("m2 out of range\n"); | 474 | INTELPllInvalid("m2 out of range\n"); |
475 | if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) | 475 | if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) |
476 | INTELPllInvalid("m1 out of range\n"); | 476 | INTELPllInvalid("m1 out of range\n"); |
477 | 477 | ||
478 | if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev)) | 478 | if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev)) |
479 | if (clock->m1 <= clock->m2) | 479 | if (clock->m1 <= clock->m2) |
480 | INTELPllInvalid("m1 <= m2\n"); | 480 | INTELPllInvalid("m1 <= m2\n"); |
481 | 481 | ||
482 | if (!IS_VALLEYVIEW(dev)) { | 482 | if (!IS_VALLEYVIEW(dev)) { |
483 | if (clock->p < limit->p.min || limit->p.max < clock->p) | 483 | if (clock->p < limit->p.min || limit->p.max < clock->p) |
484 | INTELPllInvalid("p out of range\n"); | 484 | INTELPllInvalid("p out of range\n"); |
485 | if (clock->m < limit->m.min || limit->m.max < clock->m) | 485 | if (clock->m < limit->m.min || limit->m.max < clock->m) |
486 | INTELPllInvalid("m out of range\n"); | 486 | INTELPllInvalid("m out of range\n"); |
487 | } | 487 | } |
488 | 488 | ||
489 | if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) | 489 | if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) |
490 | INTELPllInvalid("vco out of range\n"); | 490 | INTELPllInvalid("vco out of range\n"); |
491 | /* XXX: We may need to be checking "Dot clock" depending on the multiplier, | 491 | /* XXX: We may need to be checking "Dot clock" depending on the multiplier, |
492 | * connector, etc., rather than just a single range. | 492 | * connector, etc., rather than just a single range. |
493 | */ | 493 | */ |
494 | if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) | 494 | if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) |
495 | INTELPllInvalid("dot out of range\n"); | 495 | INTELPllInvalid("dot out of range\n"); |
496 | 496 | ||
497 | return true; | 497 | return true; |
498 | } | 498 | } |
499 | 499 | ||
500 | static bool | 500 | static bool |
501 | i9xx_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, | 501 | i9xx_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, |
502 | int target, int refclk, intel_clock_t *match_clock, | 502 | int target, int refclk, intel_clock_t *match_clock, |
503 | intel_clock_t *best_clock) | 503 | intel_clock_t *best_clock) |
504 | { | 504 | { |
505 | struct drm_device *dev = crtc->dev; | 505 | struct drm_device *dev = crtc->dev; |
506 | intel_clock_t clock; | 506 | intel_clock_t clock; |
507 | int err = target; | 507 | int err = target; |
508 | 508 | ||
509 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | 509 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
510 | /* | 510 | /* |
511 | * For LVDS just rely on its current settings for dual-channel. | 511 | * For LVDS just rely on its current settings for dual-channel. |
512 | * We haven't figured out how to reliably set up different | 512 | * We haven't figured out how to reliably set up different |
513 | * single/dual channel state, if we even can. | 513 | * single/dual channel state, if we even can. |
514 | */ | 514 | */ |
515 | if (intel_is_dual_link_lvds(dev)) | 515 | if (intel_is_dual_link_lvds(dev)) |
516 | clock.p2 = limit->p2.p2_fast; | 516 | clock.p2 = limit->p2.p2_fast; |
517 | else | 517 | else |
518 | clock.p2 = limit->p2.p2_slow; | 518 | clock.p2 = limit->p2.p2_slow; |
519 | } else { | 519 | } else { |
520 | if (target < limit->p2.dot_limit) | 520 | if (target < limit->p2.dot_limit) |
521 | clock.p2 = limit->p2.p2_slow; | 521 | clock.p2 = limit->p2.p2_slow; |
522 | else | 522 | else |
523 | clock.p2 = limit->p2.p2_fast; | 523 | clock.p2 = limit->p2.p2_fast; |
524 | } | 524 | } |
525 | 525 | ||
526 | memset(best_clock, 0, sizeof(*best_clock)); | 526 | memset(best_clock, 0, sizeof(*best_clock)); |
527 | 527 | ||
528 | for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; | 528 | for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; |
529 | clock.m1++) { | 529 | clock.m1++) { |
530 | for (clock.m2 = limit->m2.min; | 530 | for (clock.m2 = limit->m2.min; |
531 | clock.m2 <= limit->m2.max; clock.m2++) { | 531 | clock.m2 <= limit->m2.max; clock.m2++) { |
532 | if (clock.m2 >= clock.m1) | 532 | if (clock.m2 >= clock.m1) |
533 | break; | 533 | break; |
534 | for (clock.n = limit->n.min; | 534 | for (clock.n = limit->n.min; |
535 | clock.n <= limit->n.max; clock.n++) { | 535 | clock.n <= limit->n.max; clock.n++) { |
536 | for (clock.p1 = limit->p1.min; | 536 | for (clock.p1 = limit->p1.min; |
537 | clock.p1 <= limit->p1.max; clock.p1++) { | 537 | clock.p1 <= limit->p1.max; clock.p1++) { |
538 | int this_err; | 538 | int this_err; |
539 | 539 | ||
540 | i9xx_clock(refclk, &clock); | 540 | i9xx_clock(refclk, &clock); |
541 | if (!intel_PLL_is_valid(dev, limit, | 541 | if (!intel_PLL_is_valid(dev, limit, |
542 | &clock)) | 542 | &clock)) |
543 | continue; | 543 | continue; |
544 | if (match_clock && | 544 | if (match_clock && |
545 | clock.p != match_clock->p) | 545 | clock.p != match_clock->p) |
546 | continue; | 546 | continue; |
547 | 547 | ||
548 | this_err = abs(clock.dot - target); | 548 | this_err = abs(clock.dot - target); |
549 | if (this_err < err) { | 549 | if (this_err < err) { |
550 | *best_clock = clock; | 550 | *best_clock = clock; |
551 | err = this_err; | 551 | err = this_err; |
552 | } | 552 | } |
553 | } | 553 | } |
554 | } | 554 | } |
555 | } | 555 | } |
556 | } | 556 | } |
557 | 557 | ||
558 | return (err != target); | 558 | return (err != target); |
559 | } | 559 | } |
560 | 560 | ||
561 | static bool | 561 | static bool |
562 | pnv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, | 562 | pnv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, |
563 | int target, int refclk, intel_clock_t *match_clock, | 563 | int target, int refclk, intel_clock_t *match_clock, |
564 | intel_clock_t *best_clock) | 564 | intel_clock_t *best_clock) |
565 | { | 565 | { |
566 | struct drm_device *dev = crtc->dev; | 566 | struct drm_device *dev = crtc->dev; |
567 | intel_clock_t clock; | 567 | intel_clock_t clock; |
568 | int err = target; | 568 | int err = target; |
569 | 569 | ||
570 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | 570 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
571 | /* | 571 | /* |
572 | * For LVDS just rely on its current settings for dual-channel. | 572 | * For LVDS just rely on its current settings for dual-channel. |
573 | * We haven't figured out how to reliably set up different | 573 | * We haven't figured out how to reliably set up different |
574 | * single/dual channel state, if we even can. | 574 | * single/dual channel state, if we even can. |
575 | */ | 575 | */ |
576 | if (intel_is_dual_link_lvds(dev)) | 576 | if (intel_is_dual_link_lvds(dev)) |
577 | clock.p2 = limit->p2.p2_fast; | 577 | clock.p2 = limit->p2.p2_fast; |
578 | else | 578 | else |
579 | clock.p2 = limit->p2.p2_slow; | 579 | clock.p2 = limit->p2.p2_slow; |
580 | } else { | 580 | } else { |
581 | if (target < limit->p2.dot_limit) | 581 | if (target < limit->p2.dot_limit) |
582 | clock.p2 = limit->p2.p2_slow; | 582 | clock.p2 = limit->p2.p2_slow; |
583 | else | 583 | else |
584 | clock.p2 = limit->p2.p2_fast; | 584 | clock.p2 = limit->p2.p2_fast; |
585 | } | 585 | } |
586 | 586 | ||
587 | memset(best_clock, 0, sizeof(*best_clock)); | 587 | memset(best_clock, 0, sizeof(*best_clock)); |
588 | 588 | ||
589 | for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; | 589 | for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; |
590 | clock.m1++) { | 590 | clock.m1++) { |
591 | for (clock.m2 = limit->m2.min; | 591 | for (clock.m2 = limit->m2.min; |
592 | clock.m2 <= limit->m2.max; clock.m2++) { | 592 | clock.m2 <= limit->m2.max; clock.m2++) { |
593 | for (clock.n = limit->n.min; | 593 | for (clock.n = limit->n.min; |
594 | clock.n <= limit->n.max; clock.n++) { | 594 | clock.n <= limit->n.max; clock.n++) { |
595 | for (clock.p1 = limit->p1.min; | 595 | for (clock.p1 = limit->p1.min; |
596 | clock.p1 <= limit->p1.max; clock.p1++) { | 596 | clock.p1 <= limit->p1.max; clock.p1++) { |
597 | int this_err; | 597 | int this_err; |
598 | 598 | ||
599 | pineview_clock(refclk, &clock); | 599 | pineview_clock(refclk, &clock); |
600 | if (!intel_PLL_is_valid(dev, limit, | 600 | if (!intel_PLL_is_valid(dev, limit, |
601 | &clock)) | 601 | &clock)) |
602 | continue; | 602 | continue; |
603 | if (match_clock && | 603 | if (match_clock && |
604 | clock.p != match_clock->p) | 604 | clock.p != match_clock->p) |
605 | continue; | 605 | continue; |
606 | 606 | ||
607 | this_err = abs(clock.dot - target); | 607 | this_err = abs(clock.dot - target); |
608 | if (this_err < err) { | 608 | if (this_err < err) { |
609 | *best_clock = clock; | 609 | *best_clock = clock; |
610 | err = this_err; | 610 | err = this_err; |
611 | } | 611 | } |
612 | } | 612 | } |
613 | } | 613 | } |
614 | } | 614 | } |
615 | } | 615 | } |
616 | 616 | ||
617 | return (err != target); | 617 | return (err != target); |
618 | } | 618 | } |
619 | 619 | ||
620 | static bool | 620 | static bool |
621 | g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, | 621 | g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, |
622 | int target, int refclk, intel_clock_t *match_clock, | 622 | int target, int refclk, intel_clock_t *match_clock, |
623 | intel_clock_t *best_clock) | 623 | intel_clock_t *best_clock) |
624 | { | 624 | { |
625 | struct drm_device *dev = crtc->dev; | 625 | struct drm_device *dev = crtc->dev; |
626 | intel_clock_t clock; | 626 | intel_clock_t clock; |
627 | int max_n; | 627 | int max_n; |
628 | bool found; | 628 | bool found; |
629 | /* approximately equals target * 0.00585 */ | 629 | /* approximately equals target * 0.00585 */ |
630 | int err_most = (target >> 8) + (target >> 9); | 630 | int err_most = (target >> 8) + (target >> 9); |
631 | found = false; | 631 | found = false; |
632 | 632 | ||
633 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | 633 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
634 | if (intel_is_dual_link_lvds(dev)) | 634 | if (intel_is_dual_link_lvds(dev)) |
635 | clock.p2 = limit->p2.p2_fast; | 635 | clock.p2 = limit->p2.p2_fast; |
636 | else | 636 | else |
637 | clock.p2 = limit->p2.p2_slow; | 637 | clock.p2 = limit->p2.p2_slow; |
638 | } else { | 638 | } else { |
639 | if (target < limit->p2.dot_limit) | 639 | if (target < limit->p2.dot_limit) |
640 | clock.p2 = limit->p2.p2_slow; | 640 | clock.p2 = limit->p2.p2_slow; |
641 | else | 641 | else |
642 | clock.p2 = limit->p2.p2_fast; | 642 | clock.p2 = limit->p2.p2_fast; |
643 | } | 643 | } |
644 | 644 | ||
645 | memset(best_clock, 0, sizeof(*best_clock)); | 645 | memset(best_clock, 0, sizeof(*best_clock)); |
646 | max_n = limit->n.max; | 646 | max_n = limit->n.max; |
647 | /* based on hardware requirement, prefer smaller n to precision */ | 647 | /* based on hardware requirement, prefer smaller n to precision */ |
648 | for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { | 648 | for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { |
649 | /* based on hardware requirement, prefere larger m1,m2 */ | 649 | /* based on hardware requirement, prefere larger m1,m2 */ |
650 | for (clock.m1 = limit->m1.max; | 650 | for (clock.m1 = limit->m1.max; |
651 | clock.m1 >= limit->m1.min; clock.m1--) { | 651 | clock.m1 >= limit->m1.min; clock.m1--) { |
652 | for (clock.m2 = limit->m2.max; | 652 | for (clock.m2 = limit->m2.max; |
653 | clock.m2 >= limit->m2.min; clock.m2--) { | 653 | clock.m2 >= limit->m2.min; clock.m2--) { |
654 | for (clock.p1 = limit->p1.max; | 654 | for (clock.p1 = limit->p1.max; |
655 | clock.p1 >= limit->p1.min; clock.p1--) { | 655 | clock.p1 >= limit->p1.min; clock.p1--) { |
656 | int this_err; | 656 | int this_err; |
657 | 657 | ||
658 | i9xx_clock(refclk, &clock); | 658 | i9xx_clock(refclk, &clock); |
659 | if (!intel_PLL_is_valid(dev, limit, | 659 | if (!intel_PLL_is_valid(dev, limit, |
660 | &clock)) | 660 | &clock)) |
661 | continue; | 661 | continue; |
662 | 662 | ||
663 | this_err = abs(clock.dot - target); | 663 | this_err = abs(clock.dot - target); |
664 | if (this_err < err_most) { | 664 | if (this_err < err_most) { |
665 | *best_clock = clock; | 665 | *best_clock = clock; |
666 | err_most = this_err; | 666 | err_most = this_err; |
667 | max_n = clock.n; | 667 | max_n = clock.n; |
668 | found = true; | 668 | found = true; |
669 | } | 669 | } |
670 | } | 670 | } |
671 | } | 671 | } |
672 | } | 672 | } |
673 | } | 673 | } |
674 | return found; | 674 | return found; |
675 | } | 675 | } |
676 | 676 | ||
677 | static bool | 677 | static bool |
678 | vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, | 678 | vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, |
679 | int target, int refclk, intel_clock_t *match_clock, | 679 | int target, int refclk, intel_clock_t *match_clock, |
680 | intel_clock_t *best_clock) | 680 | intel_clock_t *best_clock) |
681 | { | 681 | { |
682 | struct drm_device *dev = crtc->dev; | 682 | struct drm_device *dev = crtc->dev; |
683 | intel_clock_t clock; | 683 | intel_clock_t clock; |
684 | unsigned int bestppm = 1000000; | 684 | unsigned int bestppm = 1000000; |
685 | /* min update 19.2 MHz */ | 685 | /* min update 19.2 MHz */ |
686 | int max_n = min(limit->n.max, refclk / 19200); | 686 | int max_n = min(limit->n.max, refclk / 19200); |
687 | bool found = false; | 687 | bool found = false; |
688 | 688 | ||
689 | target *= 5; /* fast clock */ | 689 | target *= 5; /* fast clock */ |
690 | 690 | ||
691 | memset(best_clock, 0, sizeof(*best_clock)); | 691 | memset(best_clock, 0, sizeof(*best_clock)); |
692 | 692 | ||
693 | /* based on hardware requirement, prefer smaller n to precision */ | 693 | /* based on hardware requirement, prefer smaller n to precision */ |
694 | for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { | 694 | for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { |
695 | for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { | 695 | for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { |
696 | for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow; | 696 | for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow; |
697 | clock.p2 -= clock.p2 > 10 ? 2 : 1) { | 697 | clock.p2 -= clock.p2 > 10 ? 2 : 1) { |
698 | clock.p = clock.p1 * clock.p2; | 698 | clock.p = clock.p1 * clock.p2; |
699 | /* based on hardware requirement, prefer bigger m1,m2 values */ | 699 | /* based on hardware requirement, prefer bigger m1,m2 values */ |
700 | for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { | 700 | for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { |
701 | unsigned int ppm, diff; | 701 | unsigned int ppm, diff; |
702 | 702 | ||
703 | clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n, | 703 | clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n, |
704 | refclk * clock.m1); | 704 | refclk * clock.m1); |
705 | 705 | ||
706 | vlv_clock(refclk, &clock); | 706 | vlv_clock(refclk, &clock); |
707 | 707 | ||
708 | if (!intel_PLL_is_valid(dev, limit, | 708 | if (!intel_PLL_is_valid(dev, limit, |
709 | &clock)) | 709 | &clock)) |
710 | continue; | 710 | continue; |
711 | 711 | ||
712 | diff = abs(clock.dot - target); | 712 | diff = abs(clock.dot - target); |
713 | ppm = div_u64(1000000ULL * diff, target); | 713 | ppm = div_u64(1000000ULL * diff, target); |
714 | 714 | ||
715 | if (ppm < 100 && clock.p > best_clock->p) { | 715 | if (ppm < 100 && clock.p > best_clock->p) { |
716 | bestppm = 0; | 716 | bestppm = 0; |
717 | *best_clock = clock; | 717 | *best_clock = clock; |
718 | found = true; | 718 | found = true; |
719 | } | 719 | } |
720 | 720 | ||
721 | if (bestppm >= 10 && ppm < bestppm - 10) { | 721 | if (bestppm >= 10 && ppm < bestppm - 10) { |
722 | bestppm = ppm; | 722 | bestppm = ppm; |
723 | *best_clock = clock; | 723 | *best_clock = clock; |
724 | found = true; | 724 | found = true; |
725 | } | 725 | } |
726 | } | 726 | } |
727 | } | 727 | } |
728 | } | 728 | } |
729 | } | 729 | } |
730 | 730 | ||
731 | return found; | 731 | return found; |
732 | } | 732 | } |
733 | 733 | ||
734 | bool intel_crtc_active(struct drm_crtc *crtc) | 734 | bool intel_crtc_active(struct drm_crtc *crtc) |
735 | { | 735 | { |
736 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 736 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
737 | 737 | ||
738 | /* Be paranoid as we can arrive here with only partial | 738 | /* Be paranoid as we can arrive here with only partial |
739 | * state retrieved from the hardware during setup. | 739 | * state retrieved from the hardware during setup. |
740 | * | 740 | * |
741 | * We can ditch the adjusted_mode.crtc_clock check as soon | 741 | * We can ditch the adjusted_mode.crtc_clock check as soon |
742 | * as Haswell has gained clock readout/fastboot support. | 742 | * as Haswell has gained clock readout/fastboot support. |
743 | * | 743 | * |
744 | * We can ditch the crtc->primary->fb check as soon as we can | 744 | * We can ditch the crtc->primary->fb check as soon as we can |
745 | * properly reconstruct framebuffers. | 745 | * properly reconstruct framebuffers. |
746 | */ | 746 | */ |
747 | return intel_crtc->active && crtc->primary->fb && | 747 | return intel_crtc->active && crtc->primary->fb && |
748 | intel_crtc->config.adjusted_mode.crtc_clock; | 748 | intel_crtc->config.adjusted_mode.crtc_clock; |
749 | } | 749 | } |
750 | 750 | ||
751 | enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, | 751 | enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, |
752 | enum pipe pipe) | 752 | enum pipe pipe) |
753 | { | 753 | { |
754 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | 754 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; |
755 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 755 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
756 | 756 | ||
757 | return intel_crtc->config.cpu_transcoder; | 757 | return intel_crtc->config.cpu_transcoder; |
758 | } | 758 | } |
759 | 759 | ||
760 | static void g4x_wait_for_vblank(struct drm_device *dev, int pipe) | 760 | static void g4x_wait_for_vblank(struct drm_device *dev, int pipe) |
761 | { | 761 | { |
762 | struct drm_i915_private *dev_priv = dev->dev_private; | 762 | struct drm_i915_private *dev_priv = dev->dev_private; |
763 | u32 frame, frame_reg = PIPE_FRMCOUNT_GM45(pipe); | 763 | u32 frame, frame_reg = PIPE_FRMCOUNT_GM45(pipe); |
764 | 764 | ||
765 | frame = I915_READ(frame_reg); | 765 | frame = I915_READ(frame_reg); |
766 | 766 | ||
767 | if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50)) | 767 | if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50)) |
768 | DRM_DEBUG_KMS("vblank wait timed out\n"); | 768 | DRM_DEBUG_KMS("vblank wait timed out\n"); |
769 | } | 769 | } |
770 | 770 | ||
771 | /** | 771 | /** |
772 | * intel_wait_for_vblank - wait for vblank on a given pipe | 772 | * intel_wait_for_vblank - wait for vblank on a given pipe |
773 | * @dev: drm device | 773 | * @dev: drm device |
774 | * @pipe: pipe to wait for | 774 | * @pipe: pipe to wait for |
775 | * | 775 | * |
776 | * Wait for vblank to occur on a given pipe. Needed for various bits of | 776 | * Wait for vblank to occur on a given pipe. Needed for various bits of |
777 | * mode setting code. | 777 | * mode setting code. |
778 | */ | 778 | */ |
779 | void intel_wait_for_vblank(struct drm_device *dev, int pipe) | 779 | void intel_wait_for_vblank(struct drm_device *dev, int pipe) |
780 | { | 780 | { |
781 | struct drm_i915_private *dev_priv = dev->dev_private; | 781 | struct drm_i915_private *dev_priv = dev->dev_private; |
782 | int pipestat_reg = PIPESTAT(pipe); | 782 | int pipestat_reg = PIPESTAT(pipe); |
783 | 783 | ||
784 | if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { | 784 | if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { |
785 | g4x_wait_for_vblank(dev, pipe); | 785 | g4x_wait_for_vblank(dev, pipe); |
786 | return; | 786 | return; |
787 | } | 787 | } |
788 | 788 | ||
789 | /* Clear existing vblank status. Note this will clear any other | 789 | /* Clear existing vblank status. Note this will clear any other |
790 | * sticky status fields as well. | 790 | * sticky status fields as well. |
791 | * | 791 | * |
792 | * This races with i915_driver_irq_handler() with the result | 792 | * This races with i915_driver_irq_handler() with the result |
793 | * that either function could miss a vblank event. Here it is not | 793 | * that either function could miss a vblank event. Here it is not |
794 | * fatal, as we will either wait upon the next vblank interrupt or | 794 | * fatal, as we will either wait upon the next vblank interrupt or |
795 | * timeout. Generally speaking intel_wait_for_vblank() is only | 795 | * timeout. Generally speaking intel_wait_for_vblank() is only |
796 | * called during modeset at which time the GPU should be idle and | 796 | * called during modeset at which time the GPU should be idle and |
797 | * should *not* be performing page flips and thus not waiting on | 797 | * should *not* be performing page flips and thus not waiting on |
798 | * vblanks... | 798 | * vblanks... |
799 | * Currently, the result of us stealing a vblank from the irq | 799 | * Currently, the result of us stealing a vblank from the irq |
800 | * handler is that a single frame will be skipped during swapbuffers. | 800 | * handler is that a single frame will be skipped during swapbuffers. |
801 | */ | 801 | */ |
802 | I915_WRITE(pipestat_reg, | 802 | I915_WRITE(pipestat_reg, |
803 | I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS); | 803 | I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS); |
804 | 804 | ||
805 | /* Wait for vblank interrupt bit to set */ | 805 | /* Wait for vblank interrupt bit to set */ |
806 | if (wait_for(I915_READ(pipestat_reg) & | 806 | if (wait_for(I915_READ(pipestat_reg) & |
807 | PIPE_VBLANK_INTERRUPT_STATUS, | 807 | PIPE_VBLANK_INTERRUPT_STATUS, |
808 | 50)) | 808 | 50)) |
809 | DRM_DEBUG_KMS("vblank wait timed out\n"); | 809 | DRM_DEBUG_KMS("vblank wait timed out\n"); |
810 | } | 810 | } |
811 | 811 | ||
812 | static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe) | 812 | static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe) |
813 | { | 813 | { |
814 | struct drm_i915_private *dev_priv = dev->dev_private; | 814 | struct drm_i915_private *dev_priv = dev->dev_private; |
815 | u32 reg = PIPEDSL(pipe); | 815 | u32 reg = PIPEDSL(pipe); |
816 | u32 line1, line2; | 816 | u32 line1, line2; |
817 | u32 line_mask; | 817 | u32 line_mask; |
818 | 818 | ||
819 | if (IS_GEN2(dev)) | 819 | if (IS_GEN2(dev)) |
820 | line_mask = DSL_LINEMASK_GEN2; | 820 | line_mask = DSL_LINEMASK_GEN2; |
821 | else | 821 | else |
822 | line_mask = DSL_LINEMASK_GEN3; | 822 | line_mask = DSL_LINEMASK_GEN3; |
823 | 823 | ||
824 | line1 = I915_READ(reg) & line_mask; | 824 | line1 = I915_READ(reg) & line_mask; |
825 | mdelay(5); | 825 | mdelay(5); |
826 | line2 = I915_READ(reg) & line_mask; | 826 | line2 = I915_READ(reg) & line_mask; |
827 | 827 | ||
828 | return line1 == line2; | 828 | return line1 == line2; |
829 | } | 829 | } |
830 | 830 | ||
831 | /* | 831 | /* |
832 | * intel_wait_for_pipe_off - wait for pipe to turn off | 832 | * intel_wait_for_pipe_off - wait for pipe to turn off |
833 | * @dev: drm device | 833 | * @dev: drm device |
834 | * @pipe: pipe to wait for | 834 | * @pipe: pipe to wait for |
835 | * | 835 | * |
836 | * After disabling a pipe, we can't wait for vblank in the usual way, | 836 | * After disabling a pipe, we can't wait for vblank in the usual way, |
837 | * spinning on the vblank interrupt status bit, since we won't actually | 837 | * spinning on the vblank interrupt status bit, since we won't actually |
838 | * see an interrupt when the pipe is disabled. | 838 | * see an interrupt when the pipe is disabled. |
839 | * | 839 | * |
840 | * On Gen4 and above: | 840 | * On Gen4 and above: |
841 | * wait for the pipe register state bit to turn off | 841 | * wait for the pipe register state bit to turn off |
842 | * | 842 | * |
843 | * Otherwise: | 843 | * Otherwise: |
844 | * wait for the display line value to settle (it usually | 844 | * wait for the display line value to settle (it usually |
845 | * ends up stopping at the start of the next frame). | 845 | * ends up stopping at the start of the next frame). |
846 | * | 846 | * |
847 | */ | 847 | */ |
848 | void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) | 848 | void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) |
849 | { | 849 | { |
850 | struct drm_i915_private *dev_priv = dev->dev_private; | 850 | struct drm_i915_private *dev_priv = dev->dev_private; |
851 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, | 851 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, |
852 | pipe); | 852 | pipe); |
853 | 853 | ||
854 | if (INTEL_INFO(dev)->gen >= 4) { | 854 | if (INTEL_INFO(dev)->gen >= 4) { |
855 | int reg = PIPECONF(cpu_transcoder); | 855 | int reg = PIPECONF(cpu_transcoder); |
856 | 856 | ||
857 | /* Wait for the Pipe State to go off */ | 857 | /* Wait for the Pipe State to go off */ |
858 | if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, | 858 | if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, |
859 | 100)) | 859 | 100)) |
860 | WARN(1, "pipe_off wait timed out\n"); | 860 | WARN(1, "pipe_off wait timed out\n"); |
861 | } else { | 861 | } else { |
862 | /* Wait for the display line to settle */ | 862 | /* Wait for the display line to settle */ |
863 | if (wait_for(pipe_dsl_stopped(dev, pipe), 100)) | 863 | if (wait_for(pipe_dsl_stopped(dev, pipe), 100)) |
864 | WARN(1, "pipe_off wait timed out\n"); | 864 | WARN(1, "pipe_off wait timed out\n"); |
865 | } | 865 | } |
866 | } | 866 | } |
867 | 867 | ||
868 | /* | 868 | /* |
869 | * ibx_digital_port_connected - is the specified port connected? | 869 | * ibx_digital_port_connected - is the specified port connected? |
870 | * @dev_priv: i915 private structure | 870 | * @dev_priv: i915 private structure |
871 | * @port: the port to test | 871 | * @port: the port to test |
872 | * | 872 | * |
873 | * Returns true if @port is connected, false otherwise. | 873 | * Returns true if @port is connected, false otherwise. |
874 | */ | 874 | */ |
875 | bool ibx_digital_port_connected(struct drm_i915_private *dev_priv, | 875 | bool ibx_digital_port_connected(struct drm_i915_private *dev_priv, |
876 | struct intel_digital_port *port) | 876 | struct intel_digital_port *port) |
877 | { | 877 | { |
878 | u32 bit; | 878 | u32 bit; |
879 | 879 | ||
880 | if (HAS_PCH_IBX(dev_priv->dev)) { | 880 | if (HAS_PCH_IBX(dev_priv->dev)) { |
881 | switch(port->port) { | 881 | switch(port->port) { |
882 | case PORT_B: | 882 | case PORT_B: |
883 | bit = SDE_PORTB_HOTPLUG; | 883 | bit = SDE_PORTB_HOTPLUG; |
884 | break; | 884 | break; |
885 | case PORT_C: | 885 | case PORT_C: |
886 | bit = SDE_PORTC_HOTPLUG; | 886 | bit = SDE_PORTC_HOTPLUG; |
887 | break; | 887 | break; |
888 | case PORT_D: | 888 | case PORT_D: |
889 | bit = SDE_PORTD_HOTPLUG; | 889 | bit = SDE_PORTD_HOTPLUG; |
890 | break; | 890 | break; |
891 | default: | 891 | default: |
892 | return true; | 892 | return true; |
893 | } | 893 | } |
894 | } else { | 894 | } else { |
895 | switch(port->port) { | 895 | switch(port->port) { |
896 | case PORT_B: | 896 | case PORT_B: |
897 | bit = SDE_PORTB_HOTPLUG_CPT; | 897 | bit = SDE_PORTB_HOTPLUG_CPT; |
898 | break; | 898 | break; |
899 | case PORT_C: | 899 | case PORT_C: |
900 | bit = SDE_PORTC_HOTPLUG_CPT; | 900 | bit = SDE_PORTC_HOTPLUG_CPT; |
901 | break; | 901 | break; |
902 | case PORT_D: | 902 | case PORT_D: |
903 | bit = SDE_PORTD_HOTPLUG_CPT; | 903 | bit = SDE_PORTD_HOTPLUG_CPT; |
904 | break; | 904 | break; |
905 | default: | 905 | default: |
906 | return true; | 906 | return true; |
907 | } | 907 | } |
908 | } | 908 | } |
909 | 909 | ||
910 | return I915_READ(SDEISR) & bit; | 910 | return I915_READ(SDEISR) & bit; |
911 | } | 911 | } |
912 | 912 | ||
913 | static const char *state_string(bool enabled) | 913 | static const char *state_string(bool enabled) |
914 | { | 914 | { |
915 | return enabled ? "on" : "off"; | 915 | return enabled ? "on" : "off"; |
916 | } | 916 | } |
917 | 917 | ||
918 | /* Only for pre-ILK configs */ | 918 | /* Only for pre-ILK configs */ |
919 | void assert_pll(struct drm_i915_private *dev_priv, | 919 | void assert_pll(struct drm_i915_private *dev_priv, |
920 | enum pipe pipe, bool state) | 920 | enum pipe pipe, bool state) |
921 | { | 921 | { |
922 | int reg; | 922 | int reg; |
923 | u32 val; | 923 | u32 val; |
924 | bool cur_state; | 924 | bool cur_state; |
925 | 925 | ||
926 | reg = DPLL(pipe); | 926 | reg = DPLL(pipe); |
927 | val = I915_READ(reg); | 927 | val = I915_READ(reg); |
928 | cur_state = !!(val & DPLL_VCO_ENABLE); | 928 | cur_state = !!(val & DPLL_VCO_ENABLE); |
929 | WARN(cur_state != state, | 929 | WARN(cur_state != state, |
930 | "PLL state assertion failure (expected %s, current %s)\n", | 930 | "PLL state assertion failure (expected %s, current %s)\n", |
931 | state_string(state), state_string(cur_state)); | 931 | state_string(state), state_string(cur_state)); |
932 | } | 932 | } |
933 | 933 | ||
934 | /* XXX: the dsi pll is shared between MIPI DSI ports */ | 934 | /* XXX: the dsi pll is shared between MIPI DSI ports */ |
935 | static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state) | 935 | static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state) |
936 | { | 936 | { |
937 | u32 val; | 937 | u32 val; |
938 | bool cur_state; | 938 | bool cur_state; |
939 | 939 | ||
940 | mutex_lock(&dev_priv->dpio_lock); | 940 | mutex_lock(&dev_priv->dpio_lock); |
941 | val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL); | 941 | val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL); |
942 | mutex_unlock(&dev_priv->dpio_lock); | 942 | mutex_unlock(&dev_priv->dpio_lock); |
943 | 943 | ||
944 | cur_state = val & DSI_PLL_VCO_EN; | 944 | cur_state = val & DSI_PLL_VCO_EN; |
945 | WARN(cur_state != state, | 945 | WARN(cur_state != state, |
946 | "DSI PLL state assertion failure (expected %s, current %s)\n", | 946 | "DSI PLL state assertion failure (expected %s, current %s)\n", |
947 | state_string(state), state_string(cur_state)); | 947 | state_string(state), state_string(cur_state)); |
948 | } | 948 | } |
949 | #define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true) | 949 | #define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true) |
950 | #define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false) | 950 | #define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false) |
951 | 951 | ||
952 | struct intel_shared_dpll * | 952 | struct intel_shared_dpll * |
953 | intel_crtc_to_shared_dpll(struct intel_crtc *crtc) | 953 | intel_crtc_to_shared_dpll(struct intel_crtc *crtc) |
954 | { | 954 | { |
955 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; | 955 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; |
956 | 956 | ||
957 | if (crtc->config.shared_dpll < 0) | 957 | if (crtc->config.shared_dpll < 0) |
958 | return NULL; | 958 | return NULL; |
959 | 959 | ||
960 | return &dev_priv->shared_dplls[crtc->config.shared_dpll]; | 960 | return &dev_priv->shared_dplls[crtc->config.shared_dpll]; |
961 | } | 961 | } |
962 | 962 | ||
963 | /* For ILK+ */ | 963 | /* For ILK+ */ |
964 | void assert_shared_dpll(struct drm_i915_private *dev_priv, | 964 | void assert_shared_dpll(struct drm_i915_private *dev_priv, |
965 | struct intel_shared_dpll *pll, | 965 | struct intel_shared_dpll *pll, |
966 | bool state) | 966 | bool state) |
967 | { | 967 | { |
968 | bool cur_state; | 968 | bool cur_state; |
969 | struct intel_dpll_hw_state hw_state; | 969 | struct intel_dpll_hw_state hw_state; |
970 | 970 | ||
971 | if (HAS_PCH_LPT(dev_priv->dev)) { | 971 | if (HAS_PCH_LPT(dev_priv->dev)) { |
972 | DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n"); | 972 | DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n"); |
973 | return; | 973 | return; |
974 | } | 974 | } |
975 | 975 | ||
976 | if (WARN (!pll, | 976 | if (WARN (!pll, |
977 | "asserting DPLL %s with no DPLL\n", state_string(state))) | 977 | "asserting DPLL %s with no DPLL\n", state_string(state))) |
978 | return; | 978 | return; |
979 | 979 | ||
980 | cur_state = pll->get_hw_state(dev_priv, pll, &hw_state); | 980 | cur_state = pll->get_hw_state(dev_priv, pll, &hw_state); |
981 | WARN(cur_state != state, | 981 | WARN(cur_state != state, |
982 | "%s assertion failure (expected %s, current %s)\n", | 982 | "%s assertion failure (expected %s, current %s)\n", |
983 | pll->name, state_string(state), state_string(cur_state)); | 983 | pll->name, state_string(state), state_string(cur_state)); |
984 | } | 984 | } |
985 | 985 | ||
986 | static void assert_fdi_tx(struct drm_i915_private *dev_priv, | 986 | static void assert_fdi_tx(struct drm_i915_private *dev_priv, |
987 | enum pipe pipe, bool state) | 987 | enum pipe pipe, bool state) |
988 | { | 988 | { |
989 | int reg; | 989 | int reg; |
990 | u32 val; | 990 | u32 val; |
991 | bool cur_state; | 991 | bool cur_state; |
992 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, | 992 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, |
993 | pipe); | 993 | pipe); |
994 | 994 | ||
995 | if (HAS_DDI(dev_priv->dev)) { | 995 | if (HAS_DDI(dev_priv->dev)) { |
996 | /* DDI does not have a specific FDI_TX register */ | 996 | /* DDI does not have a specific FDI_TX register */ |
997 | reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); | 997 | reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); |
998 | val = I915_READ(reg); | 998 | val = I915_READ(reg); |
999 | cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); | 999 | cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); |
1000 | } else { | 1000 | } else { |
1001 | reg = FDI_TX_CTL(pipe); | 1001 | reg = FDI_TX_CTL(pipe); |
1002 | val = I915_READ(reg); | 1002 | val = I915_READ(reg); |
1003 | cur_state = !!(val & FDI_TX_ENABLE); | 1003 | cur_state = !!(val & FDI_TX_ENABLE); |
1004 | } | 1004 | } |
1005 | WARN(cur_state != state, | 1005 | WARN(cur_state != state, |
1006 | "FDI TX state assertion failure (expected %s, current %s)\n", | 1006 | "FDI TX state assertion failure (expected %s, current %s)\n", |
1007 | state_string(state), state_string(cur_state)); | 1007 | state_string(state), state_string(cur_state)); |
1008 | } | 1008 | } |
1009 | #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) | 1009 | #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) |
1010 | #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) | 1010 | #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) |
1011 | 1011 | ||
1012 | static void assert_fdi_rx(struct drm_i915_private *dev_priv, | 1012 | static void assert_fdi_rx(struct drm_i915_private *dev_priv, |
1013 | enum pipe pipe, bool state) | 1013 | enum pipe pipe, bool state) |
1014 | { | 1014 | { |
1015 | int reg; | 1015 | int reg; |
1016 | u32 val; | 1016 | u32 val; |
1017 | bool cur_state; | 1017 | bool cur_state; |
1018 | 1018 | ||
1019 | reg = FDI_RX_CTL(pipe); | 1019 | reg = FDI_RX_CTL(pipe); |
1020 | val = I915_READ(reg); | 1020 | val = I915_READ(reg); |
1021 | cur_state = !!(val & FDI_RX_ENABLE); | 1021 | cur_state = !!(val & FDI_RX_ENABLE); |
1022 | WARN(cur_state != state, | 1022 | WARN(cur_state != state, |
1023 | "FDI RX state assertion failure (expected %s, current %s)\n", | 1023 | "FDI RX state assertion failure (expected %s, current %s)\n", |
1024 | state_string(state), state_string(cur_state)); | 1024 | state_string(state), state_string(cur_state)); |
1025 | } | 1025 | } |
1026 | #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) | 1026 | #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) |
1027 | #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) | 1027 | #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) |
1028 | 1028 | ||
1029 | static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, | 1029 | static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, |
1030 | enum pipe pipe) | 1030 | enum pipe pipe) |
1031 | { | 1031 | { |
1032 | int reg; | 1032 | int reg; |
1033 | u32 val; | 1033 | u32 val; |
1034 | 1034 | ||
1035 | /* ILK FDI PLL is always enabled */ | 1035 | /* ILK FDI PLL is always enabled */ |
1036 | if (INTEL_INFO(dev_priv->dev)->gen == 5) | 1036 | if (INTEL_INFO(dev_priv->dev)->gen == 5) |
1037 | return; | 1037 | return; |
1038 | 1038 | ||
1039 | /* On Haswell, DDI ports are responsible for the FDI PLL setup */ | 1039 | /* On Haswell, DDI ports are responsible for the FDI PLL setup */ |
1040 | if (HAS_DDI(dev_priv->dev)) | 1040 | if (HAS_DDI(dev_priv->dev)) |
1041 | return; | 1041 | return; |
1042 | 1042 | ||
1043 | reg = FDI_TX_CTL(pipe); | 1043 | reg = FDI_TX_CTL(pipe); |
1044 | val = I915_READ(reg); | 1044 | val = I915_READ(reg); |
1045 | WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); | 1045 | WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); |
1046 | } | 1046 | } |
1047 | 1047 | ||
1048 | void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, | 1048 | void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, |
1049 | enum pipe pipe, bool state) | 1049 | enum pipe pipe, bool state) |
1050 | { | 1050 | { |
1051 | int reg; | 1051 | int reg; |
1052 | u32 val; | 1052 | u32 val; |
1053 | bool cur_state; | 1053 | bool cur_state; |
1054 | 1054 | ||
1055 | reg = FDI_RX_CTL(pipe); | 1055 | reg = FDI_RX_CTL(pipe); |
1056 | val = I915_READ(reg); | 1056 | val = I915_READ(reg); |
1057 | cur_state = !!(val & FDI_RX_PLL_ENABLE); | 1057 | cur_state = !!(val & FDI_RX_PLL_ENABLE); |
1058 | WARN(cur_state != state, | 1058 | WARN(cur_state != state, |
1059 | "FDI RX PLL assertion failure (expected %s, current %s)\n", | 1059 | "FDI RX PLL assertion failure (expected %s, current %s)\n", |
1060 | state_string(state), state_string(cur_state)); | 1060 | state_string(state), state_string(cur_state)); |
1061 | } | 1061 | } |
1062 | 1062 | ||
1063 | static void assert_panel_unlocked(struct drm_i915_private *dev_priv, | 1063 | static void assert_panel_unlocked(struct drm_i915_private *dev_priv, |
1064 | enum pipe pipe) | 1064 | enum pipe pipe) |
1065 | { | 1065 | { |
1066 | int pp_reg, lvds_reg; | 1066 | int pp_reg, lvds_reg; |
1067 | u32 val; | 1067 | u32 val; |
1068 | enum pipe panel_pipe = PIPE_A; | 1068 | enum pipe panel_pipe = PIPE_A; |
1069 | bool locked = true; | 1069 | bool locked = true; |
1070 | 1070 | ||
1071 | if (HAS_PCH_SPLIT(dev_priv->dev)) { | 1071 | if (HAS_PCH_SPLIT(dev_priv->dev)) { |
1072 | pp_reg = PCH_PP_CONTROL; | 1072 | pp_reg = PCH_PP_CONTROL; |
1073 | lvds_reg = PCH_LVDS; | 1073 | lvds_reg = PCH_LVDS; |
1074 | } else { | 1074 | } else { |
1075 | pp_reg = PP_CONTROL; | 1075 | pp_reg = PP_CONTROL; |
1076 | lvds_reg = LVDS; | 1076 | lvds_reg = LVDS; |
1077 | } | 1077 | } |
1078 | 1078 | ||
1079 | val = I915_READ(pp_reg); | 1079 | val = I915_READ(pp_reg); |
1080 | if (!(val & PANEL_POWER_ON) || | 1080 | if (!(val & PANEL_POWER_ON) || |
1081 | ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)) | 1081 | ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)) |
1082 | locked = false; | 1082 | locked = false; |
1083 | 1083 | ||
1084 | if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT) | 1084 | if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT) |
1085 | panel_pipe = PIPE_B; | 1085 | panel_pipe = PIPE_B; |
1086 | 1086 | ||
1087 | WARN(panel_pipe == pipe && locked, | 1087 | WARN(panel_pipe == pipe && locked, |
1088 | "panel assertion failure, pipe %c regs locked\n", | 1088 | "panel assertion failure, pipe %c regs locked\n", |
1089 | pipe_name(pipe)); | 1089 | pipe_name(pipe)); |
1090 | } | 1090 | } |
1091 | 1091 | ||
1092 | static void assert_cursor(struct drm_i915_private *dev_priv, | 1092 | static void assert_cursor(struct drm_i915_private *dev_priv, |
1093 | enum pipe pipe, bool state) | 1093 | enum pipe pipe, bool state) |
1094 | { | 1094 | { |
1095 | struct drm_device *dev = dev_priv->dev; | 1095 | struct drm_device *dev = dev_priv->dev; |
1096 | bool cur_state; | 1096 | bool cur_state; |
1097 | 1097 | ||
1098 | if (IS_845G(dev) || IS_I865G(dev)) | 1098 | if (IS_845G(dev) || IS_I865G(dev)) |
1099 | cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE; | 1099 | cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE; |
1100 | else if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) | 1100 | else if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) |
1101 | cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; | 1101 | cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; |
1102 | else | 1102 | else |
1103 | cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE; | 1103 | cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE; |
1104 | 1104 | ||
1105 | WARN(cur_state != state, | 1105 | WARN(cur_state != state, |
1106 | "cursor on pipe %c assertion failure (expected %s, current %s)\n", | 1106 | "cursor on pipe %c assertion failure (expected %s, current %s)\n", |
1107 | pipe_name(pipe), state_string(state), state_string(cur_state)); | 1107 | pipe_name(pipe), state_string(state), state_string(cur_state)); |
1108 | } | 1108 | } |
1109 | #define assert_cursor_enabled(d, p) assert_cursor(d, p, true) | 1109 | #define assert_cursor_enabled(d, p) assert_cursor(d, p, true) |
1110 | #define assert_cursor_disabled(d, p) assert_cursor(d, p, false) | 1110 | #define assert_cursor_disabled(d, p) assert_cursor(d, p, false) |
1111 | 1111 | ||
1112 | void assert_pipe(struct drm_i915_private *dev_priv, | 1112 | void assert_pipe(struct drm_i915_private *dev_priv, |
1113 | enum pipe pipe, bool state) | 1113 | enum pipe pipe, bool state) |
1114 | { | 1114 | { |
1115 | int reg; | 1115 | int reg; |
1116 | u32 val; | 1116 | u32 val; |
1117 | bool cur_state; | 1117 | bool cur_state; |
1118 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, | 1118 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, |
1119 | pipe); | 1119 | pipe); |
1120 | 1120 | ||
1121 | /* if we need the pipe A quirk it must be always on */ | 1121 | /* if we need the pipe A quirk it must be always on */ |
1122 | if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) | 1122 | if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) |
1123 | state = true; | 1123 | state = true; |
1124 | 1124 | ||
1125 | if (!intel_display_power_enabled(dev_priv, | 1125 | if (!intel_display_power_enabled(dev_priv, |
1126 | POWER_DOMAIN_TRANSCODER(cpu_transcoder))) { | 1126 | POWER_DOMAIN_TRANSCODER(cpu_transcoder))) { |
1127 | cur_state = false; | 1127 | cur_state = false; |
1128 | } else { | 1128 | } else { |
1129 | reg = PIPECONF(cpu_transcoder); | 1129 | reg = PIPECONF(cpu_transcoder); |
1130 | val = I915_READ(reg); | 1130 | val = I915_READ(reg); |
1131 | cur_state = !!(val & PIPECONF_ENABLE); | 1131 | cur_state = !!(val & PIPECONF_ENABLE); |
1132 | } | 1132 | } |
1133 | 1133 | ||
1134 | WARN(cur_state != state, | 1134 | WARN(cur_state != state, |
1135 | "pipe %c assertion failure (expected %s, current %s)\n", | 1135 | "pipe %c assertion failure (expected %s, current %s)\n", |
1136 | pipe_name(pipe), state_string(state), state_string(cur_state)); | 1136 | pipe_name(pipe), state_string(state), state_string(cur_state)); |
1137 | } | 1137 | } |
1138 | 1138 | ||
1139 | static void assert_plane(struct drm_i915_private *dev_priv, | 1139 | static void assert_plane(struct drm_i915_private *dev_priv, |
1140 | enum plane plane, bool state) | 1140 | enum plane plane, bool state) |
1141 | { | 1141 | { |
1142 | int reg; | 1142 | int reg; |
1143 | u32 val; | 1143 | u32 val; |
1144 | bool cur_state; | 1144 | bool cur_state; |
1145 | 1145 | ||
1146 | reg = DSPCNTR(plane); | 1146 | reg = DSPCNTR(plane); |
1147 | val = I915_READ(reg); | 1147 | val = I915_READ(reg); |
1148 | cur_state = !!(val & DISPLAY_PLANE_ENABLE); | 1148 | cur_state = !!(val & DISPLAY_PLANE_ENABLE); |
1149 | WARN(cur_state != state, | 1149 | WARN(cur_state != state, |
1150 | "plane %c assertion failure (expected %s, current %s)\n", | 1150 | "plane %c assertion failure (expected %s, current %s)\n", |
1151 | plane_name(plane), state_string(state), state_string(cur_state)); | 1151 | plane_name(plane), state_string(state), state_string(cur_state)); |
1152 | } | 1152 | } |
1153 | 1153 | ||
1154 | #define assert_plane_enabled(d, p) assert_plane(d, p, true) | 1154 | #define assert_plane_enabled(d, p) assert_plane(d, p, true) |
1155 | #define assert_plane_disabled(d, p) assert_plane(d, p, false) | 1155 | #define assert_plane_disabled(d, p) assert_plane(d, p, false) |
1156 | 1156 | ||
1157 | static void assert_planes_disabled(struct drm_i915_private *dev_priv, | 1157 | static void assert_planes_disabled(struct drm_i915_private *dev_priv, |
1158 | enum pipe pipe) | 1158 | enum pipe pipe) |
1159 | { | 1159 | { |
1160 | struct drm_device *dev = dev_priv->dev; | 1160 | struct drm_device *dev = dev_priv->dev; |
1161 | int reg, i; | 1161 | int reg, i; |
1162 | u32 val; | 1162 | u32 val; |
1163 | int cur_pipe; | 1163 | int cur_pipe; |
1164 | 1164 | ||
1165 | /* Primary planes are fixed to pipes on gen4+ */ | 1165 | /* Primary planes are fixed to pipes on gen4+ */ |
1166 | if (INTEL_INFO(dev)->gen >= 4) { | 1166 | if (INTEL_INFO(dev)->gen >= 4) { |
1167 | reg = DSPCNTR(pipe); | 1167 | reg = DSPCNTR(pipe); |
1168 | val = I915_READ(reg); | 1168 | val = I915_READ(reg); |
1169 | WARN(val & DISPLAY_PLANE_ENABLE, | 1169 | WARN(val & DISPLAY_PLANE_ENABLE, |
1170 | "plane %c assertion failure, should be disabled but not\n", | 1170 | "plane %c assertion failure, should be disabled but not\n", |
1171 | plane_name(pipe)); | 1171 | plane_name(pipe)); |
1172 | return; | 1172 | return; |
1173 | } | 1173 | } |
1174 | 1174 | ||
1175 | /* Need to check both planes against the pipe */ | 1175 | /* Need to check both planes against the pipe */ |
1176 | for_each_pipe(i) { | 1176 | for_each_pipe(i) { |
1177 | reg = DSPCNTR(i); | 1177 | reg = DSPCNTR(i); |
1178 | val = I915_READ(reg); | 1178 | val = I915_READ(reg); |
1179 | cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> | 1179 | cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> |
1180 | DISPPLANE_SEL_PIPE_SHIFT; | 1180 | DISPPLANE_SEL_PIPE_SHIFT; |
1181 | WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe, | 1181 | WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe, |
1182 | "plane %c assertion failure, should be off on pipe %c but is still active\n", | 1182 | "plane %c assertion failure, should be off on pipe %c but is still active\n", |
1183 | plane_name(i), pipe_name(pipe)); | 1183 | plane_name(i), pipe_name(pipe)); |
1184 | } | 1184 | } |
1185 | } | 1185 | } |
1186 | 1186 | ||
1187 | static void assert_sprites_disabled(struct drm_i915_private *dev_priv, | 1187 | static void assert_sprites_disabled(struct drm_i915_private *dev_priv, |
1188 | enum pipe pipe) | 1188 | enum pipe pipe) |
1189 | { | 1189 | { |
1190 | struct drm_device *dev = dev_priv->dev; | 1190 | struct drm_device *dev = dev_priv->dev; |
1191 | int reg, sprite; | 1191 | int reg, sprite; |
1192 | u32 val; | 1192 | u32 val; |
1193 | 1193 | ||
1194 | if (IS_VALLEYVIEW(dev)) { | 1194 | if (IS_VALLEYVIEW(dev)) { |
1195 | for_each_sprite(pipe, sprite) { | 1195 | for_each_sprite(pipe, sprite) { |
1196 | reg = SPCNTR(pipe, sprite); | 1196 | reg = SPCNTR(pipe, sprite); |
1197 | val = I915_READ(reg); | 1197 | val = I915_READ(reg); |
1198 | WARN(val & SP_ENABLE, | 1198 | WARN(val & SP_ENABLE, |
1199 | "sprite %c assertion failure, should be off on pipe %c but is still active\n", | 1199 | "sprite %c assertion failure, should be off on pipe %c but is still active\n", |
1200 | sprite_name(pipe, sprite), pipe_name(pipe)); | 1200 | sprite_name(pipe, sprite), pipe_name(pipe)); |
1201 | } | 1201 | } |
1202 | } else if (INTEL_INFO(dev)->gen >= 7) { | 1202 | } else if (INTEL_INFO(dev)->gen >= 7) { |
1203 | reg = SPRCTL(pipe); | 1203 | reg = SPRCTL(pipe); |
1204 | val = I915_READ(reg); | 1204 | val = I915_READ(reg); |
1205 | WARN(val & SPRITE_ENABLE, | 1205 | WARN(val & SPRITE_ENABLE, |
1206 | "sprite %c assertion failure, should be off on pipe %c but is still active\n", | 1206 | "sprite %c assertion failure, should be off on pipe %c but is still active\n", |
1207 | plane_name(pipe), pipe_name(pipe)); | 1207 | plane_name(pipe), pipe_name(pipe)); |
1208 | } else if (INTEL_INFO(dev)->gen >= 5) { | 1208 | } else if (INTEL_INFO(dev)->gen >= 5) { |
1209 | reg = DVSCNTR(pipe); | 1209 | reg = DVSCNTR(pipe); |
1210 | val = I915_READ(reg); | 1210 | val = I915_READ(reg); |
1211 | WARN(val & DVS_ENABLE, | 1211 | WARN(val & DVS_ENABLE, |
1212 | "sprite %c assertion failure, should be off on pipe %c but is still active\n", | 1212 | "sprite %c assertion failure, should be off on pipe %c but is still active\n", |
1213 | plane_name(pipe), pipe_name(pipe)); | 1213 | plane_name(pipe), pipe_name(pipe)); |
1214 | } | 1214 | } |
1215 | } | 1215 | } |
1216 | 1216 | ||
1217 | static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) | 1217 | static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) |
1218 | { | 1218 | { |
1219 | u32 val; | 1219 | u32 val; |
1220 | bool enabled; | 1220 | bool enabled; |
1221 | 1221 | ||
1222 | WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev))); | 1222 | WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev))); |
1223 | 1223 | ||
1224 | val = I915_READ(PCH_DREF_CONTROL); | 1224 | val = I915_READ(PCH_DREF_CONTROL); |
1225 | enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | | 1225 | enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | |
1226 | DREF_SUPERSPREAD_SOURCE_MASK)); | 1226 | DREF_SUPERSPREAD_SOURCE_MASK)); |
1227 | WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n"); | 1227 | WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n"); |
1228 | } | 1228 | } |
1229 | 1229 | ||
1230 | static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, | 1230 | static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, |
1231 | enum pipe pipe) | 1231 | enum pipe pipe) |
1232 | { | 1232 | { |
1233 | int reg; | 1233 | int reg; |
1234 | u32 val; | 1234 | u32 val; |
1235 | bool enabled; | 1235 | bool enabled; |
1236 | 1236 | ||
1237 | reg = PCH_TRANSCONF(pipe); | 1237 | reg = PCH_TRANSCONF(pipe); |
1238 | val = I915_READ(reg); | 1238 | val = I915_READ(reg); |
1239 | enabled = !!(val & TRANS_ENABLE); | 1239 | enabled = !!(val & TRANS_ENABLE); |
1240 | WARN(enabled, | 1240 | WARN(enabled, |
1241 | "transcoder assertion failed, should be off on pipe %c but is still active\n", | 1241 | "transcoder assertion failed, should be off on pipe %c but is still active\n", |
1242 | pipe_name(pipe)); | 1242 | pipe_name(pipe)); |
1243 | } | 1243 | } |
1244 | 1244 | ||
1245 | static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, | 1245 | static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, |
1246 | enum pipe pipe, u32 port_sel, u32 val) | 1246 | enum pipe pipe, u32 port_sel, u32 val) |
1247 | { | 1247 | { |
1248 | if ((val & DP_PORT_EN) == 0) | 1248 | if ((val & DP_PORT_EN) == 0) |
1249 | return false; | 1249 | return false; |
1250 | 1250 | ||
1251 | if (HAS_PCH_CPT(dev_priv->dev)) { | 1251 | if (HAS_PCH_CPT(dev_priv->dev)) { |
1252 | u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe); | 1252 | u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe); |
1253 | u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg); | 1253 | u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg); |
1254 | if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel) | 1254 | if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel) |
1255 | return false; | 1255 | return false; |
1256 | } else { | 1256 | } else { |
1257 | if ((val & DP_PIPE_MASK) != (pipe << 30)) | 1257 | if ((val & DP_PIPE_MASK) != (pipe << 30)) |
1258 | return false; | 1258 | return false; |
1259 | } | 1259 | } |
1260 | return true; | 1260 | return true; |
1261 | } | 1261 | } |
1262 | 1262 | ||
1263 | static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv, | 1263 | static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv, |
1264 | enum pipe pipe, u32 val) | 1264 | enum pipe pipe, u32 val) |
1265 | { | 1265 | { |
1266 | if ((val & SDVO_ENABLE) == 0) | 1266 | if ((val & SDVO_ENABLE) == 0) |
1267 | return false; | 1267 | return false; |
1268 | 1268 | ||
1269 | if (HAS_PCH_CPT(dev_priv->dev)) { | 1269 | if (HAS_PCH_CPT(dev_priv->dev)) { |
1270 | if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe)) | 1270 | if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe)) |
1271 | return false; | 1271 | return false; |
1272 | } else { | 1272 | } else { |
1273 | if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe)) | 1273 | if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe)) |
1274 | return false; | 1274 | return false; |
1275 | } | 1275 | } |
1276 | return true; | 1276 | return true; |
1277 | } | 1277 | } |
1278 | 1278 | ||
1279 | static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv, | 1279 | static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv, |
1280 | enum pipe pipe, u32 val) | 1280 | enum pipe pipe, u32 val) |
1281 | { | 1281 | { |
1282 | if ((val & LVDS_PORT_EN) == 0) | 1282 | if ((val & LVDS_PORT_EN) == 0) |
1283 | return false; | 1283 | return false; |
1284 | 1284 | ||
1285 | if (HAS_PCH_CPT(dev_priv->dev)) { | 1285 | if (HAS_PCH_CPT(dev_priv->dev)) { |
1286 | if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) | 1286 | if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) |
1287 | return false; | 1287 | return false; |
1288 | } else { | 1288 | } else { |
1289 | if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe)) | 1289 | if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe)) |
1290 | return false; | 1290 | return false; |
1291 | } | 1291 | } |
1292 | return true; | 1292 | return true; |
1293 | } | 1293 | } |
1294 | 1294 | ||
1295 | static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv, | 1295 | static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv, |
1296 | enum pipe pipe, u32 val) | 1296 | enum pipe pipe, u32 val) |
1297 | { | 1297 | { |
1298 | if ((val & ADPA_DAC_ENABLE) == 0) | 1298 | if ((val & ADPA_DAC_ENABLE) == 0) |
1299 | return false; | 1299 | return false; |
1300 | if (HAS_PCH_CPT(dev_priv->dev)) { | 1300 | if (HAS_PCH_CPT(dev_priv->dev)) { |
1301 | if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) | 1301 | if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) |
1302 | return false; | 1302 | return false; |
1303 | } else { | 1303 | } else { |
1304 | if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe)) | 1304 | if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe)) |
1305 | return false; | 1305 | return false; |
1306 | } | 1306 | } |
1307 | return true; | 1307 | return true; |
1308 | } | 1308 | } |
1309 | 1309 | ||
1310 | static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, | 1310 | static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, |
1311 | enum pipe pipe, int reg, u32 port_sel) | 1311 | enum pipe pipe, int reg, u32 port_sel) |
1312 | { | 1312 | { |
1313 | u32 val = I915_READ(reg); | 1313 | u32 val = I915_READ(reg); |
1314 | WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val), | 1314 | WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val), |
1315 | "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", | 1315 | "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", |
1316 | reg, pipe_name(pipe)); | 1316 | reg, pipe_name(pipe)); |
1317 | 1317 | ||
1318 | WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0 | 1318 | WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0 |
1319 | && (val & DP_PIPEB_SELECT), | 1319 | && (val & DP_PIPEB_SELECT), |
1320 | "IBX PCH dp port still using transcoder B\n"); | 1320 | "IBX PCH dp port still using transcoder B\n"); |
1321 | } | 1321 | } |
1322 | 1322 | ||
1323 | static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, | 1323 | static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, |
1324 | enum pipe pipe, int reg) | 1324 | enum pipe pipe, int reg) |
1325 | { | 1325 | { |
1326 | u32 val = I915_READ(reg); | 1326 | u32 val = I915_READ(reg); |
1327 | WARN(hdmi_pipe_enabled(dev_priv, pipe, val), | 1327 | WARN(hdmi_pipe_enabled(dev_priv, pipe, val), |
1328 | "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", | 1328 | "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", |
1329 | reg, pipe_name(pipe)); | 1329 | reg, pipe_name(pipe)); |
1330 | 1330 | ||
1331 | WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0 | 1331 | WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0 |
1332 | && (val & SDVO_PIPE_B_SELECT), | 1332 | && (val & SDVO_PIPE_B_SELECT), |
1333 | "IBX PCH hdmi port still using transcoder B\n"); | 1333 | "IBX PCH hdmi port still using transcoder B\n"); |
1334 | } | 1334 | } |
1335 | 1335 | ||
1336 | static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, | 1336 | static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, |
1337 | enum pipe pipe) | 1337 | enum pipe pipe) |
1338 | { | 1338 | { |
1339 | int reg; | 1339 | int reg; |
1340 | u32 val; | 1340 | u32 val; |
1341 | 1341 | ||
1342 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); | 1342 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); |
1343 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); | 1343 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); |
1344 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); | 1344 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); |
1345 | 1345 | ||
1346 | reg = PCH_ADPA; | 1346 | reg = PCH_ADPA; |
1347 | val = I915_READ(reg); | 1347 | val = I915_READ(reg); |
1348 | WARN(adpa_pipe_enabled(dev_priv, pipe, val), | 1348 | WARN(adpa_pipe_enabled(dev_priv, pipe, val), |
1349 | "PCH VGA enabled on transcoder %c, should be disabled\n", | 1349 | "PCH VGA enabled on transcoder %c, should be disabled\n", |
1350 | pipe_name(pipe)); | 1350 | pipe_name(pipe)); |
1351 | 1351 | ||
1352 | reg = PCH_LVDS; | 1352 | reg = PCH_LVDS; |
1353 | val = I915_READ(reg); | 1353 | val = I915_READ(reg); |
1354 | WARN(lvds_pipe_enabled(dev_priv, pipe, val), | 1354 | WARN(lvds_pipe_enabled(dev_priv, pipe, val), |
1355 | "PCH LVDS enabled on transcoder %c, should be disabled\n", | 1355 | "PCH LVDS enabled on transcoder %c, should be disabled\n", |
1356 | pipe_name(pipe)); | 1356 | pipe_name(pipe)); |
1357 | 1357 | ||
1358 | assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB); | 1358 | assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB); |
1359 | assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC); | 1359 | assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC); |
1360 | assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID); | 1360 | assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID); |
1361 | } | 1361 | } |
1362 | 1362 | ||
1363 | static void intel_init_dpio(struct drm_device *dev) | 1363 | static void intel_init_dpio(struct drm_device *dev) |
1364 | { | 1364 | { |
1365 | struct drm_i915_private *dev_priv = dev->dev_private; | 1365 | struct drm_i915_private *dev_priv = dev->dev_private; |
1366 | 1366 | ||
1367 | if (!IS_VALLEYVIEW(dev)) | 1367 | if (!IS_VALLEYVIEW(dev)) |
1368 | return; | 1368 | return; |
1369 | 1369 | ||
1370 | DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO; | 1370 | DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO; |
1371 | } | 1371 | } |
1372 | 1372 | ||
1373 | static void intel_reset_dpio(struct drm_device *dev) | 1373 | static void intel_reset_dpio(struct drm_device *dev) |
1374 | { | 1374 | { |
1375 | struct drm_i915_private *dev_priv = dev->dev_private; | 1375 | struct drm_i915_private *dev_priv = dev->dev_private; |
1376 | 1376 | ||
1377 | if (!IS_VALLEYVIEW(dev)) | 1377 | if (!IS_VALLEYVIEW(dev)) |
1378 | return; | 1378 | return; |
1379 | 1379 | ||
1380 | /* | 1380 | /* |
1381 | * Enable the CRI clock source so we can get at the display and the | 1381 | * Enable the CRI clock source so we can get at the display and the |
1382 | * reference clock for VGA hotplug / manual detection. | 1382 | * reference clock for VGA hotplug / manual detection. |
1383 | */ | 1383 | */ |
1384 | I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | | 1384 | I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | |
1385 | DPLL_REFA_CLK_ENABLE_VLV | | 1385 | DPLL_REFA_CLK_ENABLE_VLV | |
1386 | DPLL_INTEGRATED_CRI_CLK_VLV); | 1386 | DPLL_INTEGRATED_CRI_CLK_VLV); |
1387 | 1387 | ||
1388 | /* | 1388 | /* |
1389 | * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - | 1389 | * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - |
1390 | * 6. De-assert cmn_reset/side_reset. Same as VLV X0. | 1390 | * 6. De-assert cmn_reset/side_reset. Same as VLV X0. |
1391 | * a. GUnit 0x2110 bit[0] set to 1 (def 0) | 1391 | * a. GUnit 0x2110 bit[0] set to 1 (def 0) |
1392 | * b. The other bits such as sfr settings / modesel may all be set | 1392 | * b. The other bits such as sfr settings / modesel may all be set |
1393 | * to 0. | 1393 | * to 0. |
1394 | * | 1394 | * |
1395 | * This should only be done on init and resume from S3 with both | 1395 | * This should only be done on init and resume from S3 with both |
1396 | * PLLs disabled, or we risk losing DPIO and PLL synchronization. | 1396 | * PLLs disabled, or we risk losing DPIO and PLL synchronization. |
1397 | */ | 1397 | */ |
1398 | I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST); | 1398 | I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST); |
1399 | } | 1399 | } |
1400 | 1400 | ||
1401 | static void vlv_enable_pll(struct intel_crtc *crtc) | 1401 | static void vlv_enable_pll(struct intel_crtc *crtc) |
1402 | { | 1402 | { |
1403 | struct drm_device *dev = crtc->base.dev; | 1403 | struct drm_device *dev = crtc->base.dev; |
1404 | struct drm_i915_private *dev_priv = dev->dev_private; | 1404 | struct drm_i915_private *dev_priv = dev->dev_private; |
1405 | int reg = DPLL(crtc->pipe); | 1405 | int reg = DPLL(crtc->pipe); |
1406 | u32 dpll = crtc->config.dpll_hw_state.dpll; | 1406 | u32 dpll = crtc->config.dpll_hw_state.dpll; |
1407 | 1407 | ||
1408 | assert_pipe_disabled(dev_priv, crtc->pipe); | 1408 | assert_pipe_disabled(dev_priv, crtc->pipe); |
1409 | 1409 | ||
1410 | /* No really, not for ILK+ */ | 1410 | /* No really, not for ILK+ */ |
1411 | BUG_ON(!IS_VALLEYVIEW(dev_priv->dev)); | 1411 | BUG_ON(!IS_VALLEYVIEW(dev_priv->dev)); |
1412 | 1412 | ||
1413 | /* PLL is protected by panel, make sure we can write it */ | 1413 | /* PLL is protected by panel, make sure we can write it */ |
1414 | if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev)) | 1414 | if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev)) |
1415 | assert_panel_unlocked(dev_priv, crtc->pipe); | 1415 | assert_panel_unlocked(dev_priv, crtc->pipe); |
1416 | 1416 | ||
1417 | I915_WRITE(reg, dpll); | 1417 | I915_WRITE(reg, dpll); |
1418 | POSTING_READ(reg); | 1418 | POSTING_READ(reg); |
1419 | udelay(150); | 1419 | udelay(150); |
1420 | 1420 | ||
1421 | if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) | 1421 | if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) |
1422 | DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe); | 1422 | DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe); |
1423 | 1423 | ||
1424 | I915_WRITE(DPLL_MD(crtc->pipe), crtc->config.dpll_hw_state.dpll_md); | 1424 | I915_WRITE(DPLL_MD(crtc->pipe), crtc->config.dpll_hw_state.dpll_md); |
1425 | POSTING_READ(DPLL_MD(crtc->pipe)); | 1425 | POSTING_READ(DPLL_MD(crtc->pipe)); |
1426 | 1426 | ||
1427 | /* We do this three times for luck */ | 1427 | /* We do this three times for luck */ |
1428 | I915_WRITE(reg, dpll); | 1428 | I915_WRITE(reg, dpll); |
1429 | POSTING_READ(reg); | 1429 | POSTING_READ(reg); |
1430 | udelay(150); /* wait for warmup */ | 1430 | udelay(150); /* wait for warmup */ |
1431 | I915_WRITE(reg, dpll); | 1431 | I915_WRITE(reg, dpll); |
1432 | POSTING_READ(reg); | 1432 | POSTING_READ(reg); |
1433 | udelay(150); /* wait for warmup */ | 1433 | udelay(150); /* wait for warmup */ |
1434 | I915_WRITE(reg, dpll); | 1434 | I915_WRITE(reg, dpll); |
1435 | POSTING_READ(reg); | 1435 | POSTING_READ(reg); |
1436 | udelay(150); /* wait for warmup */ | 1436 | udelay(150); /* wait for warmup */ |
1437 | } | 1437 | } |
1438 | 1438 | ||
1439 | static void i9xx_enable_pll(struct intel_crtc *crtc) | 1439 | static void i9xx_enable_pll(struct intel_crtc *crtc) |
1440 | { | 1440 | { |
1441 | struct drm_device *dev = crtc->base.dev; | 1441 | struct drm_device *dev = crtc->base.dev; |
1442 | struct drm_i915_private *dev_priv = dev->dev_private; | 1442 | struct drm_i915_private *dev_priv = dev->dev_private; |
1443 | int reg = DPLL(crtc->pipe); | 1443 | int reg = DPLL(crtc->pipe); |
1444 | u32 dpll = crtc->config.dpll_hw_state.dpll; | 1444 | u32 dpll = crtc->config.dpll_hw_state.dpll; |
1445 | 1445 | ||
1446 | assert_pipe_disabled(dev_priv, crtc->pipe); | 1446 | assert_pipe_disabled(dev_priv, crtc->pipe); |
1447 | 1447 | ||
1448 | /* No really, not for ILK+ */ | 1448 | /* No really, not for ILK+ */ |
1449 | BUG_ON(INTEL_INFO(dev)->gen >= 5); | 1449 | BUG_ON(INTEL_INFO(dev)->gen >= 5); |
1450 | 1450 | ||
1451 | /* PLL is protected by panel, make sure we can write it */ | 1451 | /* PLL is protected by panel, make sure we can write it */ |
1452 | if (IS_MOBILE(dev) && !IS_I830(dev)) | 1452 | if (IS_MOBILE(dev) && !IS_I830(dev)) |
1453 | assert_panel_unlocked(dev_priv, crtc->pipe); | 1453 | assert_panel_unlocked(dev_priv, crtc->pipe); |
1454 | 1454 | ||
1455 | I915_WRITE(reg, dpll); | 1455 | I915_WRITE(reg, dpll); |
1456 | 1456 | ||
1457 | /* Wait for the clocks to stabilize. */ | 1457 | /* Wait for the clocks to stabilize. */ |
1458 | POSTING_READ(reg); | 1458 | POSTING_READ(reg); |
1459 | udelay(150); | 1459 | udelay(150); |
1460 | 1460 | ||
1461 | if (INTEL_INFO(dev)->gen >= 4) { | 1461 | if (INTEL_INFO(dev)->gen >= 4) { |
1462 | I915_WRITE(DPLL_MD(crtc->pipe), | 1462 | I915_WRITE(DPLL_MD(crtc->pipe), |
1463 | crtc->config.dpll_hw_state.dpll_md); | 1463 | crtc->config.dpll_hw_state.dpll_md); |
1464 | } else { | 1464 | } else { |
1465 | /* The pixel multiplier can only be updated once the | 1465 | /* The pixel multiplier can only be updated once the |
1466 | * DPLL is enabled and the clocks are stable. | 1466 | * DPLL is enabled and the clocks are stable. |
1467 | * | 1467 | * |
1468 | * So write it again. | 1468 | * So write it again. |
1469 | */ | 1469 | */ |
1470 | I915_WRITE(reg, dpll); | 1470 | I915_WRITE(reg, dpll); |
1471 | } | 1471 | } |
1472 | 1472 | ||
1473 | /* We do this three times for luck */ | 1473 | /* We do this three times for luck */ |
1474 | I915_WRITE(reg, dpll); | 1474 | I915_WRITE(reg, dpll); |
1475 | POSTING_READ(reg); | 1475 | POSTING_READ(reg); |
1476 | udelay(150); /* wait for warmup */ | 1476 | udelay(150); /* wait for warmup */ |
1477 | I915_WRITE(reg, dpll); | 1477 | I915_WRITE(reg, dpll); |
1478 | POSTING_READ(reg); | 1478 | POSTING_READ(reg); |
1479 | udelay(150); /* wait for warmup */ | 1479 | udelay(150); /* wait for warmup */ |
1480 | I915_WRITE(reg, dpll); | 1480 | I915_WRITE(reg, dpll); |
1481 | POSTING_READ(reg); | 1481 | POSTING_READ(reg); |
1482 | udelay(150); /* wait for warmup */ | 1482 | udelay(150); /* wait for warmup */ |
1483 | } | 1483 | } |
1484 | 1484 | ||
1485 | /** | 1485 | /** |
1486 | * i9xx_disable_pll - disable a PLL | 1486 | * i9xx_disable_pll - disable a PLL |
1487 | * @dev_priv: i915 private structure | 1487 | * @dev_priv: i915 private structure |
1488 | * @pipe: pipe PLL to disable | 1488 | * @pipe: pipe PLL to disable |
1489 | * | 1489 | * |
1490 | * Disable the PLL for @pipe, making sure the pipe is off first. | 1490 | * Disable the PLL for @pipe, making sure the pipe is off first. |
1491 | * | 1491 | * |
1492 | * Note! This is for pre-ILK only. | 1492 | * Note! This is for pre-ILK only. |
1493 | */ | 1493 | */ |
1494 | static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) | 1494 | static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) |
1495 | { | 1495 | { |
1496 | /* Don't disable pipe A or pipe A PLLs if needed */ | 1496 | /* Don't disable pipe A or pipe A PLLs if needed */ |
1497 | if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) | 1497 | if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) |
1498 | return; | 1498 | return; |
1499 | 1499 | ||
1500 | /* Make sure the pipe isn't still relying on us */ | 1500 | /* Make sure the pipe isn't still relying on us */ |
1501 | assert_pipe_disabled(dev_priv, pipe); | 1501 | assert_pipe_disabled(dev_priv, pipe); |
1502 | 1502 | ||
1503 | I915_WRITE(DPLL(pipe), 0); | 1503 | I915_WRITE(DPLL(pipe), 0); |
1504 | POSTING_READ(DPLL(pipe)); | 1504 | POSTING_READ(DPLL(pipe)); |
1505 | } | 1505 | } |
1506 | 1506 | ||
1507 | static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) | 1507 | static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) |
1508 | { | 1508 | { |
1509 | u32 val = 0; | 1509 | u32 val = 0; |
1510 | 1510 | ||
1511 | /* Make sure the pipe isn't still relying on us */ | 1511 | /* Make sure the pipe isn't still relying on us */ |
1512 | assert_pipe_disabled(dev_priv, pipe); | 1512 | assert_pipe_disabled(dev_priv, pipe); |
1513 | 1513 | ||
1514 | /* | 1514 | /* |
1515 | * Leave integrated clock source and reference clock enabled for pipe B. | 1515 | * Leave integrated clock source and reference clock enabled for pipe B. |
1516 | * The latter is needed for VGA hotplug / manual detection. | 1516 | * The latter is needed for VGA hotplug / manual detection. |
1517 | */ | 1517 | */ |
1518 | if (pipe == PIPE_B) | 1518 | if (pipe == PIPE_B) |
1519 | val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV; | 1519 | val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV; |
1520 | I915_WRITE(DPLL(pipe), val); | 1520 | I915_WRITE(DPLL(pipe), val); |
1521 | POSTING_READ(DPLL(pipe)); | 1521 | POSTING_READ(DPLL(pipe)); |
1522 | } | 1522 | } |
1523 | 1523 | ||
1524 | void vlv_wait_port_ready(struct drm_i915_private *dev_priv, | 1524 | void vlv_wait_port_ready(struct drm_i915_private *dev_priv, |
1525 | struct intel_digital_port *dport) | 1525 | struct intel_digital_port *dport) |
1526 | { | 1526 | { |
1527 | u32 port_mask; | 1527 | u32 port_mask; |
1528 | 1528 | ||
1529 | switch (dport->port) { | 1529 | switch (dport->port) { |
1530 | case PORT_B: | 1530 | case PORT_B: |
1531 | port_mask = DPLL_PORTB_READY_MASK; | 1531 | port_mask = DPLL_PORTB_READY_MASK; |
1532 | break; | 1532 | break; |
1533 | case PORT_C: | 1533 | case PORT_C: |
1534 | port_mask = DPLL_PORTC_READY_MASK; | 1534 | port_mask = DPLL_PORTC_READY_MASK; |
1535 | break; | 1535 | break; |
1536 | default: | 1536 | default: |
1537 | BUG(); | 1537 | BUG(); |
1538 | } | 1538 | } |
1539 | 1539 | ||
1540 | if (wait_for((I915_READ(DPLL(0)) & port_mask) == 0, 1000)) | 1540 | if (wait_for((I915_READ(DPLL(0)) & port_mask) == 0, 1000)) |
1541 | WARN(1, "timed out waiting for port %c ready: 0x%08x\n", | 1541 | WARN(1, "timed out waiting for port %c ready: 0x%08x\n", |
1542 | port_name(dport->port), I915_READ(DPLL(0))); | 1542 | port_name(dport->port), I915_READ(DPLL(0))); |
1543 | } | 1543 | } |
1544 | 1544 | ||
1545 | /** | 1545 | /** |
1546 | * ironlake_enable_shared_dpll - enable PCH PLL | 1546 | * ironlake_enable_shared_dpll - enable PCH PLL |
1547 | * @dev_priv: i915 private structure | 1547 | * @dev_priv: i915 private structure |
1548 | * @pipe: pipe PLL to enable | 1548 | * @pipe: pipe PLL to enable |
1549 | * | 1549 | * |
1550 | * The PCH PLL needs to be enabled before the PCH transcoder, since it | 1550 | * The PCH PLL needs to be enabled before the PCH transcoder, since it |
1551 | * drives the transcoder clock. | 1551 | * drives the transcoder clock. |
1552 | */ | 1552 | */ |
1553 | static void ironlake_enable_shared_dpll(struct intel_crtc *crtc) | 1553 | static void ironlake_enable_shared_dpll(struct intel_crtc *crtc) |
1554 | { | 1554 | { |
1555 | struct drm_device *dev = crtc->base.dev; | 1555 | struct drm_device *dev = crtc->base.dev; |
1556 | struct drm_i915_private *dev_priv = dev->dev_private; | 1556 | struct drm_i915_private *dev_priv = dev->dev_private; |
1557 | struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); | 1557 | struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); |
1558 | 1558 | ||
1559 | /* PCH PLLs only available on ILK, SNB and IVB */ | 1559 | /* PCH PLLs only available on ILK, SNB and IVB */ |
1560 | BUG_ON(INTEL_INFO(dev)->gen < 5); | 1560 | BUG_ON(INTEL_INFO(dev)->gen < 5); |
1561 | if (WARN_ON(pll == NULL)) | 1561 | if (WARN_ON(pll == NULL)) |
1562 | return; | 1562 | return; |
1563 | 1563 | ||
1564 | if (WARN_ON(pll->refcount == 0)) | 1564 | if (WARN_ON(pll->refcount == 0)) |
1565 | return; | 1565 | return; |
1566 | 1566 | ||
1567 | DRM_DEBUG_KMS("enable %s (active %d, on? %d)for crtc %d\n", | 1567 | DRM_DEBUG_KMS("enable %s (active %d, on? %d)for crtc %d\n", |
1568 | pll->name, pll->active, pll->on, | 1568 | pll->name, pll->active, pll->on, |
1569 | crtc->base.base.id); | 1569 | crtc->base.base.id); |
1570 | 1570 | ||
1571 | if (pll->active++) { | 1571 | if (pll->active++) { |
1572 | WARN_ON(!pll->on); | 1572 | WARN_ON(!pll->on); |
1573 | assert_shared_dpll_enabled(dev_priv, pll); | 1573 | assert_shared_dpll_enabled(dev_priv, pll); |
1574 | return; | 1574 | return; |
1575 | } | 1575 | } |
1576 | WARN_ON(pll->on); | 1576 | WARN_ON(pll->on); |
1577 | 1577 | ||
1578 | DRM_DEBUG_KMS("enabling %s\n", pll->name); | 1578 | DRM_DEBUG_KMS("enabling %s\n", pll->name); |
1579 | pll->enable(dev_priv, pll); | 1579 | pll->enable(dev_priv, pll); |
1580 | pll->on = true; | 1580 | pll->on = true; |
1581 | } | 1581 | } |
1582 | 1582 | ||
1583 | static void intel_disable_shared_dpll(struct intel_crtc *crtc) | 1583 | static void intel_disable_shared_dpll(struct intel_crtc *crtc) |
1584 | { | 1584 | { |
1585 | struct drm_device *dev = crtc->base.dev; | 1585 | struct drm_device *dev = crtc->base.dev; |
1586 | struct drm_i915_private *dev_priv = dev->dev_private; | 1586 | struct drm_i915_private *dev_priv = dev->dev_private; |
1587 | struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); | 1587 | struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); |
1588 | 1588 | ||
1589 | /* PCH only available on ILK+ */ | 1589 | /* PCH only available on ILK+ */ |
1590 | BUG_ON(INTEL_INFO(dev)->gen < 5); | 1590 | BUG_ON(INTEL_INFO(dev)->gen < 5); |
1591 | if (WARN_ON(pll == NULL)) | 1591 | if (WARN_ON(pll == NULL)) |
1592 | return; | 1592 | return; |
1593 | 1593 | ||
1594 | if (WARN_ON(pll->refcount == 0)) | 1594 | if (WARN_ON(pll->refcount == 0)) |
1595 | return; | 1595 | return; |
1596 | 1596 | ||
1597 | DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n", | 1597 | DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n", |
1598 | pll->name, pll->active, pll->on, | 1598 | pll->name, pll->active, pll->on, |
1599 | crtc->base.base.id); | 1599 | crtc->base.base.id); |
1600 | 1600 | ||
1601 | if (WARN_ON(pll->active == 0)) { | 1601 | if (WARN_ON(pll->active == 0)) { |
1602 | assert_shared_dpll_disabled(dev_priv, pll); | 1602 | assert_shared_dpll_disabled(dev_priv, pll); |
1603 | return; | 1603 | return; |
1604 | } | 1604 | } |
1605 | 1605 | ||
1606 | assert_shared_dpll_enabled(dev_priv, pll); | 1606 | assert_shared_dpll_enabled(dev_priv, pll); |
1607 | WARN_ON(!pll->on); | 1607 | WARN_ON(!pll->on); |
1608 | if (--pll->active) | 1608 | if (--pll->active) |
1609 | return; | 1609 | return; |
1610 | 1610 | ||
1611 | DRM_DEBUG_KMS("disabling %s\n", pll->name); | 1611 | DRM_DEBUG_KMS("disabling %s\n", pll->name); |
1612 | pll->disable(dev_priv, pll); | 1612 | pll->disable(dev_priv, pll); |
1613 | pll->on = false; | 1613 | pll->on = false; |
1614 | } | 1614 | } |
1615 | 1615 | ||
1616 | static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, | 1616 | static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, |
1617 | enum pipe pipe) | 1617 | enum pipe pipe) |
1618 | { | 1618 | { |
1619 | struct drm_device *dev = dev_priv->dev; | 1619 | struct drm_device *dev = dev_priv->dev; |
1620 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | 1620 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; |
1621 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1621 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1622 | uint32_t reg, val, pipeconf_val; | 1622 | uint32_t reg, val, pipeconf_val; |
1623 | 1623 | ||
1624 | /* PCH only available on ILK+ */ | 1624 | /* PCH only available on ILK+ */ |
1625 | BUG_ON(INTEL_INFO(dev)->gen < 5); | 1625 | BUG_ON(INTEL_INFO(dev)->gen < 5); |
1626 | 1626 | ||
1627 | /* Make sure PCH DPLL is enabled */ | 1627 | /* Make sure PCH DPLL is enabled */ |
1628 | assert_shared_dpll_enabled(dev_priv, | 1628 | assert_shared_dpll_enabled(dev_priv, |
1629 | intel_crtc_to_shared_dpll(intel_crtc)); | 1629 | intel_crtc_to_shared_dpll(intel_crtc)); |
1630 | 1630 | ||
1631 | /* FDI must be feeding us bits for PCH ports */ | 1631 | /* FDI must be feeding us bits for PCH ports */ |
1632 | assert_fdi_tx_enabled(dev_priv, pipe); | 1632 | assert_fdi_tx_enabled(dev_priv, pipe); |
1633 | assert_fdi_rx_enabled(dev_priv, pipe); | 1633 | assert_fdi_rx_enabled(dev_priv, pipe); |
1634 | 1634 | ||
1635 | if (HAS_PCH_CPT(dev)) { | 1635 | if (HAS_PCH_CPT(dev)) { |
1636 | /* Workaround: Set the timing override bit before enabling the | 1636 | /* Workaround: Set the timing override bit before enabling the |
1637 | * pch transcoder. */ | 1637 | * pch transcoder. */ |
1638 | reg = TRANS_CHICKEN2(pipe); | 1638 | reg = TRANS_CHICKEN2(pipe); |
1639 | val = I915_READ(reg); | 1639 | val = I915_READ(reg); |
1640 | val |= TRANS_CHICKEN2_TIMING_OVERRIDE; | 1640 | val |= TRANS_CHICKEN2_TIMING_OVERRIDE; |
1641 | I915_WRITE(reg, val); | 1641 | I915_WRITE(reg, val); |
1642 | } | 1642 | } |
1643 | 1643 | ||
1644 | reg = PCH_TRANSCONF(pipe); | 1644 | reg = PCH_TRANSCONF(pipe); |
1645 | val = I915_READ(reg); | 1645 | val = I915_READ(reg); |
1646 | pipeconf_val = I915_READ(PIPECONF(pipe)); | 1646 | pipeconf_val = I915_READ(PIPECONF(pipe)); |
1647 | 1647 | ||
1648 | if (HAS_PCH_IBX(dev_priv->dev)) { | 1648 | if (HAS_PCH_IBX(dev_priv->dev)) { |
1649 | /* | 1649 | /* |
1650 | * make the BPC in transcoder be consistent with | 1650 | * make the BPC in transcoder be consistent with |
1651 | * that in pipeconf reg. | 1651 | * that in pipeconf reg. |
1652 | */ | 1652 | */ |
1653 | val &= ~PIPECONF_BPC_MASK; | 1653 | val &= ~PIPECONF_BPC_MASK; |
1654 | val |= pipeconf_val & PIPECONF_BPC_MASK; | 1654 | val |= pipeconf_val & PIPECONF_BPC_MASK; |
1655 | } | 1655 | } |
1656 | 1656 | ||
1657 | val &= ~TRANS_INTERLACE_MASK; | 1657 | val &= ~TRANS_INTERLACE_MASK; |
1658 | if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) | 1658 | if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) |
1659 | if (HAS_PCH_IBX(dev_priv->dev) && | 1659 | if (HAS_PCH_IBX(dev_priv->dev) && |
1660 | intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) | 1660 | intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) |
1661 | val |= TRANS_LEGACY_INTERLACED_ILK; | 1661 | val |= TRANS_LEGACY_INTERLACED_ILK; |
1662 | else | 1662 | else |
1663 | val |= TRANS_INTERLACED; | 1663 | val |= TRANS_INTERLACED; |
1664 | else | 1664 | else |
1665 | val |= TRANS_PROGRESSIVE; | 1665 | val |= TRANS_PROGRESSIVE; |
1666 | 1666 | ||
1667 | I915_WRITE(reg, val | TRANS_ENABLE); | 1667 | I915_WRITE(reg, val | TRANS_ENABLE); |
1668 | if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) | 1668 | if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) |
1669 | DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe)); | 1669 | DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe)); |
1670 | } | 1670 | } |
1671 | 1671 | ||
1672 | static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, | 1672 | static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, |
1673 | enum transcoder cpu_transcoder) | 1673 | enum transcoder cpu_transcoder) |
1674 | { | 1674 | { |
1675 | u32 val, pipeconf_val; | 1675 | u32 val, pipeconf_val; |
1676 | 1676 | ||
1677 | /* PCH only available on ILK+ */ | 1677 | /* PCH only available on ILK+ */ |
1678 | BUG_ON(INTEL_INFO(dev_priv->dev)->gen < 5); | 1678 | BUG_ON(INTEL_INFO(dev_priv->dev)->gen < 5); |
1679 | 1679 | ||
1680 | /* FDI must be feeding us bits for PCH ports */ | 1680 | /* FDI must be feeding us bits for PCH ports */ |
1681 | assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder); | 1681 | assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder); |
1682 | assert_fdi_rx_enabled(dev_priv, TRANSCODER_A); | 1682 | assert_fdi_rx_enabled(dev_priv, TRANSCODER_A); |
1683 | 1683 | ||
1684 | /* Workaround: set timing override bit. */ | 1684 | /* Workaround: set timing override bit. */ |
1685 | val = I915_READ(_TRANSA_CHICKEN2); | 1685 | val = I915_READ(_TRANSA_CHICKEN2); |
1686 | val |= TRANS_CHICKEN2_TIMING_OVERRIDE; | 1686 | val |= TRANS_CHICKEN2_TIMING_OVERRIDE; |
1687 | I915_WRITE(_TRANSA_CHICKEN2, val); | 1687 | I915_WRITE(_TRANSA_CHICKEN2, val); |
1688 | 1688 | ||
1689 | val = TRANS_ENABLE; | 1689 | val = TRANS_ENABLE; |
1690 | pipeconf_val = I915_READ(PIPECONF(cpu_transcoder)); | 1690 | pipeconf_val = I915_READ(PIPECONF(cpu_transcoder)); |
1691 | 1691 | ||
1692 | if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == | 1692 | if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == |
1693 | PIPECONF_INTERLACED_ILK) | 1693 | PIPECONF_INTERLACED_ILK) |
1694 | val |= TRANS_INTERLACED; | 1694 | val |= TRANS_INTERLACED; |
1695 | else | 1695 | else |
1696 | val |= TRANS_PROGRESSIVE; | 1696 | val |= TRANS_PROGRESSIVE; |
1697 | 1697 | ||
1698 | I915_WRITE(LPT_TRANSCONF, val); | 1698 | I915_WRITE(LPT_TRANSCONF, val); |
1699 | if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100)) | 1699 | if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100)) |
1700 | DRM_ERROR("Failed to enable PCH transcoder\n"); | 1700 | DRM_ERROR("Failed to enable PCH transcoder\n"); |
1701 | } | 1701 | } |
1702 | 1702 | ||
1703 | static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, | 1703 | static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, |
1704 | enum pipe pipe) | 1704 | enum pipe pipe) |
1705 | { | 1705 | { |
1706 | struct drm_device *dev = dev_priv->dev; | 1706 | struct drm_device *dev = dev_priv->dev; |
1707 | uint32_t reg, val; | 1707 | uint32_t reg, val; |
1708 | 1708 | ||
1709 | /* FDI relies on the transcoder */ | 1709 | /* FDI relies on the transcoder */ |
1710 | assert_fdi_tx_disabled(dev_priv, pipe); | 1710 | assert_fdi_tx_disabled(dev_priv, pipe); |
1711 | assert_fdi_rx_disabled(dev_priv, pipe); | 1711 | assert_fdi_rx_disabled(dev_priv, pipe); |
1712 | 1712 | ||
1713 | /* Ports must be off as well */ | 1713 | /* Ports must be off as well */ |
1714 | assert_pch_ports_disabled(dev_priv, pipe); | 1714 | assert_pch_ports_disabled(dev_priv, pipe); |
1715 | 1715 | ||
1716 | reg = PCH_TRANSCONF(pipe); | 1716 | reg = PCH_TRANSCONF(pipe); |
1717 | val = I915_READ(reg); | 1717 | val = I915_READ(reg); |
1718 | val &= ~TRANS_ENABLE; | 1718 | val &= ~TRANS_ENABLE; |
1719 | I915_WRITE(reg, val); | 1719 | I915_WRITE(reg, val); |
1720 | /* wait for PCH transcoder off, transcoder state */ | 1720 | /* wait for PCH transcoder off, transcoder state */ |
1721 | if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) | 1721 | if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) |
1722 | DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe)); | 1722 | DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe)); |
1723 | 1723 | ||
1724 | if (!HAS_PCH_IBX(dev)) { | 1724 | if (!HAS_PCH_IBX(dev)) { |
1725 | /* Workaround: Clear the timing override chicken bit again. */ | 1725 | /* Workaround: Clear the timing override chicken bit again. */ |
1726 | reg = TRANS_CHICKEN2(pipe); | 1726 | reg = TRANS_CHICKEN2(pipe); |
1727 | val = I915_READ(reg); | 1727 | val = I915_READ(reg); |
1728 | val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; | 1728 | val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; |
1729 | I915_WRITE(reg, val); | 1729 | I915_WRITE(reg, val); |
1730 | } | 1730 | } |
1731 | } | 1731 | } |
1732 | 1732 | ||
1733 | static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) | 1733 | static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) |
1734 | { | 1734 | { |
1735 | u32 val; | 1735 | u32 val; |
1736 | 1736 | ||
1737 | val = I915_READ(LPT_TRANSCONF); | 1737 | val = I915_READ(LPT_TRANSCONF); |
1738 | val &= ~TRANS_ENABLE; | 1738 | val &= ~TRANS_ENABLE; |
1739 | I915_WRITE(LPT_TRANSCONF, val); | 1739 | I915_WRITE(LPT_TRANSCONF, val); |
1740 | /* wait for PCH transcoder off, transcoder state */ | 1740 | /* wait for PCH transcoder off, transcoder state */ |
1741 | if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50)) | 1741 | if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50)) |
1742 | DRM_ERROR("Failed to disable PCH transcoder\n"); | 1742 | DRM_ERROR("Failed to disable PCH transcoder\n"); |
1743 | 1743 | ||
1744 | /* Workaround: clear timing override bit. */ | 1744 | /* Workaround: clear timing override bit. */ |
1745 | val = I915_READ(_TRANSA_CHICKEN2); | 1745 | val = I915_READ(_TRANSA_CHICKEN2); |
1746 | val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; | 1746 | val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; |
1747 | I915_WRITE(_TRANSA_CHICKEN2, val); | 1747 | I915_WRITE(_TRANSA_CHICKEN2, val); |
1748 | } | 1748 | } |
1749 | 1749 | ||
1750 | /** | 1750 | /** |
1751 | * intel_enable_pipe - enable a pipe, asserting requirements | 1751 | * intel_enable_pipe - enable a pipe, asserting requirements |
1752 | * @crtc: crtc responsible for the pipe | 1752 | * @crtc: crtc responsible for the pipe |
1753 | * | 1753 | * |
1754 | * Enable @crtc's pipe, making sure that various hardware specific requirements | 1754 | * Enable @crtc's pipe, making sure that various hardware specific requirements |
1755 | * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc. | 1755 | * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc. |
1756 | */ | 1756 | */ |
1757 | static void intel_enable_pipe(struct intel_crtc *crtc) | 1757 | static void intel_enable_pipe(struct intel_crtc *crtc) |
1758 | { | 1758 | { |
1759 | struct drm_device *dev = crtc->base.dev; | 1759 | struct drm_device *dev = crtc->base.dev; |
1760 | struct drm_i915_private *dev_priv = dev->dev_private; | 1760 | struct drm_i915_private *dev_priv = dev->dev_private; |
1761 | enum pipe pipe = crtc->pipe; | 1761 | enum pipe pipe = crtc->pipe; |
1762 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, | 1762 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, |
1763 | pipe); | 1763 | pipe); |
1764 | enum pipe pch_transcoder; | 1764 | enum pipe pch_transcoder; |
1765 | int reg; | 1765 | int reg; |
1766 | u32 val; | 1766 | u32 val; |
1767 | 1767 | ||
1768 | assert_planes_disabled(dev_priv, pipe); | 1768 | assert_planes_disabled(dev_priv, pipe); |
1769 | assert_cursor_disabled(dev_priv, pipe); | 1769 | assert_cursor_disabled(dev_priv, pipe); |
1770 | assert_sprites_disabled(dev_priv, pipe); | 1770 | assert_sprites_disabled(dev_priv, pipe); |
1771 | 1771 | ||
1772 | if (HAS_PCH_LPT(dev_priv->dev)) | 1772 | if (HAS_PCH_LPT(dev_priv->dev)) |
1773 | pch_transcoder = TRANSCODER_A; | 1773 | pch_transcoder = TRANSCODER_A; |
1774 | else | 1774 | else |
1775 | pch_transcoder = pipe; | 1775 | pch_transcoder = pipe; |
1776 | 1776 | ||
1777 | /* | 1777 | /* |
1778 | * A pipe without a PLL won't actually be able to drive bits from | 1778 | * A pipe without a PLL won't actually be able to drive bits from |
1779 | * a plane. On ILK+ the pipe PLLs are integrated, so we don't | 1779 | * a plane. On ILK+ the pipe PLLs are integrated, so we don't |
1780 | * need the check. | 1780 | * need the check. |
1781 | */ | 1781 | */ |
1782 | if (!HAS_PCH_SPLIT(dev_priv->dev)) | 1782 | if (!HAS_PCH_SPLIT(dev_priv->dev)) |
1783 | if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DSI)) | 1783 | if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DSI)) |
1784 | assert_dsi_pll_enabled(dev_priv); | 1784 | assert_dsi_pll_enabled(dev_priv); |
1785 | else | 1785 | else |
1786 | assert_pll_enabled(dev_priv, pipe); | 1786 | assert_pll_enabled(dev_priv, pipe); |
1787 | else { | 1787 | else { |
1788 | if (crtc->config.has_pch_encoder) { | 1788 | if (crtc->config.has_pch_encoder) { |
1789 | /* if driving the PCH, we need FDI enabled */ | 1789 | /* if driving the PCH, we need FDI enabled */ |
1790 | assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder); | 1790 | assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder); |
1791 | assert_fdi_tx_pll_enabled(dev_priv, | 1791 | assert_fdi_tx_pll_enabled(dev_priv, |
1792 | (enum pipe) cpu_transcoder); | 1792 | (enum pipe) cpu_transcoder); |
1793 | } | 1793 | } |
1794 | /* FIXME: assert CPU port conditions for SNB+ */ | 1794 | /* FIXME: assert CPU port conditions for SNB+ */ |
1795 | } | 1795 | } |
1796 | 1796 | ||
1797 | reg = PIPECONF(cpu_transcoder); | 1797 | reg = PIPECONF(cpu_transcoder); |
1798 | val = I915_READ(reg); | 1798 | val = I915_READ(reg); |
1799 | if (val & PIPECONF_ENABLE) { | 1799 | if (val & PIPECONF_ENABLE) { |
1800 | WARN_ON(!(pipe == PIPE_A && | 1800 | WARN_ON(!(pipe == PIPE_A && |
1801 | dev_priv->quirks & QUIRK_PIPEA_FORCE)); | 1801 | dev_priv->quirks & QUIRK_PIPEA_FORCE)); |
1802 | return; | 1802 | return; |
1803 | } | 1803 | } |
1804 | 1804 | ||
1805 | I915_WRITE(reg, val | PIPECONF_ENABLE); | 1805 | I915_WRITE(reg, val | PIPECONF_ENABLE); |
1806 | POSTING_READ(reg); | 1806 | POSTING_READ(reg); |
1807 | 1807 | ||
1808 | /* | 1808 | /* |
1809 | * There's no guarantee the pipe will really start running now. It | 1809 | * There's no guarantee the pipe will really start running now. It |
1810 | * depends on the Gen, the output type and the relative order between | 1810 | * depends on the Gen, the output type and the relative order between |
1811 | * pipe and plane enabling. Avoid waiting on HSW+ since it's not | 1811 | * pipe and plane enabling. Avoid waiting on HSW+ since it's not |
1812 | * necessary. | 1812 | * necessary. |
1813 | * TODO: audit the previous gens. | 1813 | * TODO: audit the previous gens. |
1814 | */ | 1814 | */ |
1815 | if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) | 1815 | if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) |
1816 | intel_wait_for_vblank(dev_priv->dev, pipe); | 1816 | intel_wait_for_vblank(dev_priv->dev, pipe); |
1817 | } | 1817 | } |
1818 | 1818 | ||
1819 | /** | 1819 | /** |
1820 | * intel_disable_pipe - disable a pipe, asserting requirements | 1820 | * intel_disable_pipe - disable a pipe, asserting requirements |
1821 | * @dev_priv: i915 private structure | 1821 | * @dev_priv: i915 private structure |
1822 | * @pipe: pipe to disable | 1822 | * @pipe: pipe to disable |
1823 | * | 1823 | * |
1824 | * Disable @pipe, making sure that various hardware specific requirements | 1824 | * Disable @pipe, making sure that various hardware specific requirements |
1825 | * are met, if applicable, e.g. plane disabled, panel fitter off, etc. | 1825 | * are met, if applicable, e.g. plane disabled, panel fitter off, etc. |
1826 | * | 1826 | * |
1827 | * @pipe should be %PIPE_A or %PIPE_B. | 1827 | * @pipe should be %PIPE_A or %PIPE_B. |
1828 | * | 1828 | * |
1829 | * Will wait until the pipe has shut down before returning. | 1829 | * Will wait until the pipe has shut down before returning. |
1830 | */ | 1830 | */ |
1831 | static void intel_disable_pipe(struct drm_i915_private *dev_priv, | 1831 | static void intel_disable_pipe(struct drm_i915_private *dev_priv, |
1832 | enum pipe pipe) | 1832 | enum pipe pipe) |
1833 | { | 1833 | { |
1834 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, | 1834 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, |
1835 | pipe); | 1835 | pipe); |
1836 | int reg; | 1836 | int reg; |
1837 | u32 val; | 1837 | u32 val; |
1838 | 1838 | ||
1839 | /* | 1839 | /* |
1840 | * Make sure planes won't keep trying to pump pixels to us, | 1840 | * Make sure planes won't keep trying to pump pixels to us, |
1841 | * or we might hang the display. | 1841 | * or we might hang the display. |
1842 | */ | 1842 | */ |
1843 | assert_planes_disabled(dev_priv, pipe); | 1843 | assert_planes_disabled(dev_priv, pipe); |
1844 | assert_cursor_disabled(dev_priv, pipe); | 1844 | assert_cursor_disabled(dev_priv, pipe); |
1845 | assert_sprites_disabled(dev_priv, pipe); | 1845 | assert_sprites_disabled(dev_priv, pipe); |
1846 | 1846 | ||
1847 | /* Don't disable pipe A or pipe A PLLs if needed */ | 1847 | /* Don't disable pipe A or pipe A PLLs if needed */ |
1848 | if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) | 1848 | if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) |
1849 | return; | 1849 | return; |
1850 | 1850 | ||
1851 | reg = PIPECONF(cpu_transcoder); | 1851 | reg = PIPECONF(cpu_transcoder); |
1852 | val = I915_READ(reg); | 1852 | val = I915_READ(reg); |
1853 | if ((val & PIPECONF_ENABLE) == 0) | 1853 | if ((val & PIPECONF_ENABLE) == 0) |
1854 | return; | 1854 | return; |
1855 | 1855 | ||
1856 | I915_WRITE(reg, val & ~PIPECONF_ENABLE); | 1856 | I915_WRITE(reg, val & ~PIPECONF_ENABLE); |
1857 | intel_wait_for_pipe_off(dev_priv->dev, pipe); | 1857 | intel_wait_for_pipe_off(dev_priv->dev, pipe); |
1858 | } | 1858 | } |
1859 | 1859 | ||
1860 | /* | 1860 | /* |
1861 | * Plane regs are double buffered, going from enabled->disabled needs a | 1861 | * Plane regs are double buffered, going from enabled->disabled needs a |
1862 | * trigger in order to latch. The display address reg provides this. | 1862 | * trigger in order to latch. The display address reg provides this. |
1863 | */ | 1863 | */ |
1864 | void intel_flush_primary_plane(struct drm_i915_private *dev_priv, | 1864 | void intel_flush_primary_plane(struct drm_i915_private *dev_priv, |
1865 | enum plane plane) | 1865 | enum plane plane) |
1866 | { | 1866 | { |
1867 | struct drm_device *dev = dev_priv->dev; | 1867 | struct drm_device *dev = dev_priv->dev; |
1868 | u32 reg = INTEL_INFO(dev)->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane); | 1868 | u32 reg = INTEL_INFO(dev)->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane); |
1869 | 1869 | ||
1870 | I915_WRITE(reg, I915_READ(reg)); | 1870 | I915_WRITE(reg, I915_READ(reg)); |
1871 | POSTING_READ(reg); | 1871 | POSTING_READ(reg); |
1872 | } | 1872 | } |
1873 | 1873 | ||
1874 | /** | 1874 | /** |
1875 | * intel_enable_primary_hw_plane - enable the primary plane on a given pipe | 1875 | * intel_enable_primary_hw_plane - enable the primary plane on a given pipe |
1876 | * @dev_priv: i915 private structure | 1876 | * @dev_priv: i915 private structure |
1877 | * @plane: plane to enable | 1877 | * @plane: plane to enable |
1878 | * @pipe: pipe being fed | 1878 | * @pipe: pipe being fed |
1879 | * | 1879 | * |
1880 | * Enable @plane on @pipe, making sure that @pipe is running first. | 1880 | * Enable @plane on @pipe, making sure that @pipe is running first. |
1881 | */ | 1881 | */ |
1882 | static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv, | 1882 | static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv, |
1883 | enum plane plane, enum pipe pipe) | 1883 | enum plane plane, enum pipe pipe) |
1884 | { | 1884 | { |
1885 | struct intel_crtc *intel_crtc = | 1885 | struct intel_crtc *intel_crtc = |
1886 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); | 1886 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); |
1887 | int reg; | 1887 | int reg; |
1888 | u32 val; | 1888 | u32 val; |
1889 | 1889 | ||
1890 | /* If the pipe isn't enabled, we can't pump pixels and may hang */ | 1890 | /* If the pipe isn't enabled, we can't pump pixels and may hang */ |
1891 | assert_pipe_enabled(dev_priv, pipe); | 1891 | assert_pipe_enabled(dev_priv, pipe); |
1892 | 1892 | ||
1893 | WARN(intel_crtc->primary_enabled, "Primary plane already enabled\n"); | 1893 | WARN(intel_crtc->primary_enabled, "Primary plane already enabled\n"); |
1894 | 1894 | ||
1895 | intel_crtc->primary_enabled = true; | 1895 | intel_crtc->primary_enabled = true; |
1896 | 1896 | ||
1897 | reg = DSPCNTR(plane); | 1897 | reg = DSPCNTR(plane); |
1898 | val = I915_READ(reg); | 1898 | val = I915_READ(reg); |
1899 | if (val & DISPLAY_PLANE_ENABLE) | 1899 | if (val & DISPLAY_PLANE_ENABLE) |
1900 | return; | 1900 | return; |
1901 | 1901 | ||
1902 | I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE); | 1902 | I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE); |
1903 | intel_flush_primary_plane(dev_priv, plane); | 1903 | intel_flush_primary_plane(dev_priv, plane); |
1904 | intel_wait_for_vblank(dev_priv->dev, pipe); | 1904 | intel_wait_for_vblank(dev_priv->dev, pipe); |
1905 | } | 1905 | } |
1906 | 1906 | ||
1907 | /** | 1907 | /** |
1908 | * intel_disable_primary_hw_plane - disable the primary hardware plane | 1908 | * intel_disable_primary_hw_plane - disable the primary hardware plane |
1909 | * @dev_priv: i915 private structure | 1909 | * @dev_priv: i915 private structure |
1910 | * @plane: plane to disable | 1910 | * @plane: plane to disable |
1911 | * @pipe: pipe consuming the data | 1911 | * @pipe: pipe consuming the data |
1912 | * | 1912 | * |
1913 | * Disable @plane; should be an independent operation. | 1913 | * Disable @plane; should be an independent operation. |
1914 | */ | 1914 | */ |
1915 | static void intel_disable_primary_hw_plane(struct drm_i915_private *dev_priv, | 1915 | static void intel_disable_primary_hw_plane(struct drm_i915_private *dev_priv, |
1916 | enum plane plane, enum pipe pipe) | 1916 | enum plane plane, enum pipe pipe) |
1917 | { | 1917 | { |
1918 | struct intel_crtc *intel_crtc = | 1918 | struct intel_crtc *intel_crtc = |
1919 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); | 1919 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); |
1920 | int reg; | 1920 | int reg; |
1921 | u32 val; | 1921 | u32 val; |
1922 | 1922 | ||
1923 | WARN(!intel_crtc->primary_enabled, "Primary plane already disabled\n"); | 1923 | WARN(!intel_crtc->primary_enabled, "Primary plane already disabled\n"); |
1924 | 1924 | ||
1925 | intel_crtc->primary_enabled = false; | 1925 | intel_crtc->primary_enabled = false; |
1926 | 1926 | ||
1927 | reg = DSPCNTR(plane); | 1927 | reg = DSPCNTR(plane); |
1928 | val = I915_READ(reg); | 1928 | val = I915_READ(reg); |
1929 | if ((val & DISPLAY_PLANE_ENABLE) == 0) | 1929 | if ((val & DISPLAY_PLANE_ENABLE) == 0) |
1930 | return; | 1930 | return; |
1931 | 1931 | ||
1932 | I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE); | 1932 | I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE); |
1933 | intel_flush_primary_plane(dev_priv, plane); | 1933 | intel_flush_primary_plane(dev_priv, plane); |
1934 | intel_wait_for_vblank(dev_priv->dev, pipe); | 1934 | intel_wait_for_vblank(dev_priv->dev, pipe); |
1935 | } | 1935 | } |
1936 | 1936 | ||
1937 | static bool need_vtd_wa(struct drm_device *dev) | 1937 | static bool need_vtd_wa(struct drm_device *dev) |
1938 | { | 1938 | { |
1939 | #ifdef CONFIG_INTEL_IOMMU | 1939 | #ifdef CONFIG_INTEL_IOMMU |
1940 | if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped) | 1940 | if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped) |
1941 | return true; | 1941 | return true; |
1942 | #endif | 1942 | #endif |
1943 | return false; | 1943 | return false; |
1944 | } | 1944 | } |
1945 | 1945 | ||
1946 | static int intel_align_height(struct drm_device *dev, int height, bool tiled) | 1946 | static int intel_align_height(struct drm_device *dev, int height, bool tiled) |
1947 | { | 1947 | { |
1948 | int tile_height; | 1948 | int tile_height; |
1949 | 1949 | ||
1950 | tile_height = tiled ? (IS_GEN2(dev) ? 16 : 8) : 1; | 1950 | tile_height = tiled ? (IS_GEN2(dev) ? 16 : 8) : 1; |
1951 | return ALIGN(height, tile_height); | 1951 | return ALIGN(height, tile_height); |
1952 | } | 1952 | } |
1953 | 1953 | ||
1954 | int | 1954 | int |
1955 | intel_pin_and_fence_fb_obj(struct drm_device *dev, | 1955 | intel_pin_and_fence_fb_obj(struct drm_device *dev, |
1956 | struct drm_i915_gem_object *obj, | 1956 | struct drm_i915_gem_object *obj, |
1957 | struct intel_ring_buffer *pipelined) | 1957 | struct intel_ring_buffer *pipelined) |
1958 | { | 1958 | { |
1959 | struct drm_i915_private *dev_priv = dev->dev_private; | 1959 | struct drm_i915_private *dev_priv = dev->dev_private; |
1960 | u32 alignment; | 1960 | u32 alignment; |
1961 | int ret; | 1961 | int ret; |
1962 | 1962 | ||
1963 | switch (obj->tiling_mode) { | 1963 | switch (obj->tiling_mode) { |
1964 | case I915_TILING_NONE: | 1964 | case I915_TILING_NONE: |
1965 | if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) | 1965 | if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) |
1966 | alignment = 128 * 1024; | 1966 | alignment = 128 * 1024; |
1967 | else if (INTEL_INFO(dev)->gen >= 4) | 1967 | else if (INTEL_INFO(dev)->gen >= 4) |
1968 | alignment = 4 * 1024; | 1968 | alignment = 4 * 1024; |
1969 | else | 1969 | else |
1970 | alignment = 64 * 1024; | 1970 | alignment = 64 * 1024; |
1971 | break; | 1971 | break; |
1972 | case I915_TILING_X: | 1972 | case I915_TILING_X: |
1973 | /* pin() will align the object as required by fence */ | 1973 | /* pin() will align the object as required by fence */ |
1974 | alignment = 0; | 1974 | alignment = 0; |
1975 | break; | 1975 | break; |
1976 | case I915_TILING_Y: | 1976 | case I915_TILING_Y: |
1977 | WARN(1, "Y tiled bo slipped through, driver bug!\n"); | 1977 | WARN(1, "Y tiled bo slipped through, driver bug!\n"); |
1978 | return -EINVAL; | 1978 | return -EINVAL; |
1979 | default: | 1979 | default: |
1980 | BUG(); | 1980 | BUG(); |
1981 | } | 1981 | } |
1982 | 1982 | ||
1983 | /* Note that the w/a also requires 64 PTE of padding following the | 1983 | /* Note that the w/a also requires 64 PTE of padding following the |
1984 | * bo. We currently fill all unused PTE with the shadow page and so | 1984 | * bo. We currently fill all unused PTE with the shadow page and so |
1985 | * we should always have valid PTE following the scanout preventing | 1985 | * we should always have valid PTE following the scanout preventing |
1986 | * the VT-d warning. | 1986 | * the VT-d warning. |
1987 | */ | 1987 | */ |
1988 | if (need_vtd_wa(dev) && alignment < 256 * 1024) | 1988 | if (need_vtd_wa(dev) && alignment < 256 * 1024) |
1989 | alignment = 256 * 1024; | 1989 | alignment = 256 * 1024; |
1990 | 1990 | ||
1991 | dev_priv->mm.interruptible = false; | 1991 | dev_priv->mm.interruptible = false; |
1992 | ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined); | 1992 | ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined); |
1993 | if (ret) | 1993 | if (ret) |
1994 | goto err_interruptible; | 1994 | goto err_interruptible; |
1995 | 1995 | ||
1996 | /* Install a fence for tiled scan-out. Pre-i965 always needs a | 1996 | /* Install a fence for tiled scan-out. Pre-i965 always needs a |
1997 | * fence, whereas 965+ only requires a fence if using | 1997 | * fence, whereas 965+ only requires a fence if using |
1998 | * framebuffer compression. For simplicity, we always install | 1998 | * framebuffer compression. For simplicity, we always install |
1999 | * a fence as the cost is not that onerous. | 1999 | * a fence as the cost is not that onerous. |
2000 | */ | 2000 | */ |
2001 | ret = i915_gem_object_get_fence(obj); | 2001 | ret = i915_gem_object_get_fence(obj); |
2002 | if (ret) | 2002 | if (ret) |
2003 | goto err_unpin; | 2003 | goto err_unpin; |
2004 | 2004 | ||
2005 | i915_gem_object_pin_fence(obj); | 2005 | i915_gem_object_pin_fence(obj); |
2006 | 2006 | ||
2007 | dev_priv->mm.interruptible = true; | 2007 | dev_priv->mm.interruptible = true; |
2008 | return 0; | 2008 | return 0; |
2009 | 2009 | ||
2010 | err_unpin: | 2010 | err_unpin: |
2011 | i915_gem_object_unpin_from_display_plane(obj); | 2011 | i915_gem_object_unpin_from_display_plane(obj); |
2012 | err_interruptible: | 2012 | err_interruptible: |
2013 | dev_priv->mm.interruptible = true; | 2013 | dev_priv->mm.interruptible = true; |
2014 | return ret; | 2014 | return ret; |
2015 | } | 2015 | } |
2016 | 2016 | ||
2017 | void intel_unpin_fb_obj(struct drm_i915_gem_object *obj) | 2017 | void intel_unpin_fb_obj(struct drm_i915_gem_object *obj) |
2018 | { | 2018 | { |
2019 | i915_gem_object_unpin_fence(obj); | 2019 | i915_gem_object_unpin_fence(obj); |
2020 | i915_gem_object_unpin_from_display_plane(obj); | 2020 | i915_gem_object_unpin_from_display_plane(obj); |
2021 | } | 2021 | } |
2022 | 2022 | ||
2023 | /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel | 2023 | /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel |
2024 | * is assumed to be a power-of-two. */ | 2024 | * is assumed to be a power-of-two. */ |
2025 | unsigned long intel_gen4_compute_page_offset(int *x, int *y, | 2025 | unsigned long intel_gen4_compute_page_offset(int *x, int *y, |
2026 | unsigned int tiling_mode, | 2026 | unsigned int tiling_mode, |
2027 | unsigned int cpp, | 2027 | unsigned int cpp, |
2028 | unsigned int pitch) | 2028 | unsigned int pitch) |
2029 | { | 2029 | { |
2030 | if (tiling_mode != I915_TILING_NONE) { | 2030 | if (tiling_mode != I915_TILING_NONE) { |
2031 | unsigned int tile_rows, tiles; | 2031 | unsigned int tile_rows, tiles; |
2032 | 2032 | ||
2033 | tile_rows = *y / 8; | 2033 | tile_rows = *y / 8; |
2034 | *y %= 8; | 2034 | *y %= 8; |
2035 | 2035 | ||
2036 | tiles = *x / (512/cpp); | 2036 | tiles = *x / (512/cpp); |
2037 | *x %= 512/cpp; | 2037 | *x %= 512/cpp; |
2038 | 2038 | ||
2039 | return tile_rows * pitch * 8 + tiles * 4096; | 2039 | return tile_rows * pitch * 8 + tiles * 4096; |
2040 | } else { | 2040 | } else { |
2041 | unsigned int offset; | 2041 | unsigned int offset; |
2042 | 2042 | ||
2043 | offset = *y * pitch + *x * cpp; | 2043 | offset = *y * pitch + *x * cpp; |
2044 | *y = 0; | 2044 | *y = 0; |
2045 | *x = (offset & 4095) / cpp; | 2045 | *x = (offset & 4095) / cpp; |
2046 | return offset & -4096; | 2046 | return offset & -4096; |
2047 | } | 2047 | } |
2048 | } | 2048 | } |
2049 | 2049 | ||
2050 | int intel_format_to_fourcc(int format) | 2050 | int intel_format_to_fourcc(int format) |
2051 | { | 2051 | { |
2052 | switch (format) { | 2052 | switch (format) { |
2053 | case DISPPLANE_8BPP: | 2053 | case DISPPLANE_8BPP: |
2054 | return DRM_FORMAT_C8; | 2054 | return DRM_FORMAT_C8; |
2055 | case DISPPLANE_BGRX555: | 2055 | case DISPPLANE_BGRX555: |
2056 | return DRM_FORMAT_XRGB1555; | 2056 | return DRM_FORMAT_XRGB1555; |
2057 | case DISPPLANE_BGRX565: | 2057 | case DISPPLANE_BGRX565: |
2058 | return DRM_FORMAT_RGB565; | 2058 | return DRM_FORMAT_RGB565; |
2059 | default: | 2059 | default: |
2060 | case DISPPLANE_BGRX888: | 2060 | case DISPPLANE_BGRX888: |
2061 | return DRM_FORMAT_XRGB8888; | 2061 | return DRM_FORMAT_XRGB8888; |
2062 | case DISPPLANE_RGBX888: | 2062 | case DISPPLANE_RGBX888: |
2063 | return DRM_FORMAT_XBGR8888; | 2063 | return DRM_FORMAT_XBGR8888; |
2064 | case DISPPLANE_BGRX101010: | 2064 | case DISPPLANE_BGRX101010: |
2065 | return DRM_FORMAT_XRGB2101010; | 2065 | return DRM_FORMAT_XRGB2101010; |
2066 | case DISPPLANE_RGBX101010: | 2066 | case DISPPLANE_RGBX101010: |
2067 | return DRM_FORMAT_XBGR2101010; | 2067 | return DRM_FORMAT_XBGR2101010; |
2068 | } | 2068 | } |
2069 | } | 2069 | } |
2070 | 2070 | ||
2071 | static bool intel_alloc_plane_obj(struct intel_crtc *crtc, | 2071 | static bool intel_alloc_plane_obj(struct intel_crtc *crtc, |
2072 | struct intel_plane_config *plane_config) | 2072 | struct intel_plane_config *plane_config) |
2073 | { | 2073 | { |
2074 | struct drm_device *dev = crtc->base.dev; | 2074 | struct drm_device *dev = crtc->base.dev; |
2075 | struct drm_i915_gem_object *obj = NULL; | 2075 | struct drm_i915_gem_object *obj = NULL; |
2076 | struct drm_mode_fb_cmd2 mode_cmd = { 0 }; | 2076 | struct drm_mode_fb_cmd2 mode_cmd = { 0 }; |
2077 | u32 base = plane_config->base; | 2077 | u32 base = plane_config->base; |
2078 | 2078 | ||
2079 | if (plane_config->size == 0) | 2079 | if (plane_config->size == 0) |
2080 | return false; | 2080 | return false; |
2081 | 2081 | ||
2082 | obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base, | 2082 | obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base, |
2083 | plane_config->size); | 2083 | plane_config->size); |
2084 | if (!obj) | 2084 | if (!obj) |
2085 | return false; | 2085 | return false; |
2086 | 2086 | ||
2087 | if (plane_config->tiled) { | 2087 | if (plane_config->tiled) { |
2088 | obj->tiling_mode = I915_TILING_X; | 2088 | obj->tiling_mode = I915_TILING_X; |
2089 | obj->stride = crtc->base.primary->fb->pitches[0]; | 2089 | obj->stride = crtc->base.primary->fb->pitches[0]; |
2090 | } | 2090 | } |
2091 | 2091 | ||
2092 | mode_cmd.pixel_format = crtc->base.primary->fb->pixel_format; | 2092 | mode_cmd.pixel_format = crtc->base.primary->fb->pixel_format; |
2093 | mode_cmd.width = crtc->base.primary->fb->width; | 2093 | mode_cmd.width = crtc->base.primary->fb->width; |
2094 | mode_cmd.height = crtc->base.primary->fb->height; | 2094 | mode_cmd.height = crtc->base.primary->fb->height; |
2095 | mode_cmd.pitches[0] = crtc->base.primary->fb->pitches[0]; | 2095 | mode_cmd.pitches[0] = crtc->base.primary->fb->pitches[0]; |
2096 | 2096 | ||
2097 | mutex_lock(&dev->struct_mutex); | 2097 | mutex_lock(&dev->struct_mutex); |
2098 | 2098 | ||
2099 | if (intel_framebuffer_init(dev, to_intel_framebuffer(crtc->base.primary->fb), | 2099 | if (intel_framebuffer_init(dev, to_intel_framebuffer(crtc->base.primary->fb), |
2100 | &mode_cmd, obj)) { | 2100 | &mode_cmd, obj)) { |
2101 | DRM_DEBUG_KMS("intel fb init failed\n"); | 2101 | DRM_DEBUG_KMS("intel fb init failed\n"); |
2102 | goto out_unref_obj; | 2102 | goto out_unref_obj; |
2103 | } | 2103 | } |
2104 | 2104 | ||
2105 | mutex_unlock(&dev->struct_mutex); | 2105 | mutex_unlock(&dev->struct_mutex); |
2106 | 2106 | ||
2107 | DRM_DEBUG_KMS("plane fb obj %p\n", obj); | 2107 | DRM_DEBUG_KMS("plane fb obj %p\n", obj); |
2108 | return true; | 2108 | return true; |
2109 | 2109 | ||
2110 | out_unref_obj: | 2110 | out_unref_obj: |
2111 | drm_gem_object_unreference(&obj->base); | 2111 | drm_gem_object_unreference(&obj->base); |
2112 | mutex_unlock(&dev->struct_mutex); | 2112 | mutex_unlock(&dev->struct_mutex); |
2113 | return false; | 2113 | return false; |
2114 | } | 2114 | } |
2115 | 2115 | ||
2116 | static void intel_find_plane_obj(struct intel_crtc *intel_crtc, | 2116 | static void intel_find_plane_obj(struct intel_crtc *intel_crtc, |
2117 | struct intel_plane_config *plane_config) | 2117 | struct intel_plane_config *plane_config) |
2118 | { | 2118 | { |
2119 | struct drm_device *dev = intel_crtc->base.dev; | 2119 | struct drm_device *dev = intel_crtc->base.dev; |
2120 | struct drm_crtc *c; | 2120 | struct drm_crtc *c; |
2121 | struct intel_crtc *i; | 2121 | struct intel_crtc *i; |
2122 | struct intel_framebuffer *fb; | 2122 | struct intel_framebuffer *fb; |
2123 | 2123 | ||
2124 | if (!intel_crtc->base.primary->fb) | 2124 | if (!intel_crtc->base.primary->fb) |
2125 | return; | 2125 | return; |
2126 | 2126 | ||
2127 | if (intel_alloc_plane_obj(intel_crtc, plane_config)) | 2127 | if (intel_alloc_plane_obj(intel_crtc, plane_config)) |
2128 | return; | 2128 | return; |
2129 | 2129 | ||
2130 | kfree(intel_crtc->base.primary->fb); | 2130 | kfree(intel_crtc->base.primary->fb); |
2131 | intel_crtc->base.primary->fb = NULL; | 2131 | intel_crtc->base.primary->fb = NULL; |
2132 | 2132 | ||
2133 | /* | 2133 | /* |
2134 | * Failed to alloc the obj, check to see if we should share | 2134 | * Failed to alloc the obj, check to see if we should share |
2135 | * an fb with another CRTC instead | 2135 | * an fb with another CRTC instead |
2136 | */ | 2136 | */ |
2137 | list_for_each_entry(c, &dev->mode_config.crtc_list, head) { | 2137 | list_for_each_entry(c, &dev->mode_config.crtc_list, head) { |
2138 | i = to_intel_crtc(c); | 2138 | i = to_intel_crtc(c); |
2139 | 2139 | ||
2140 | if (c == &intel_crtc->base) | 2140 | if (c == &intel_crtc->base) |
2141 | continue; | 2141 | continue; |
2142 | 2142 | ||
2143 | if (!i->active || !c->primary->fb) | 2143 | if (!i->active || !c->primary->fb) |
2144 | continue; | 2144 | continue; |
2145 | 2145 | ||
2146 | fb = to_intel_framebuffer(c->primary->fb); | 2146 | fb = to_intel_framebuffer(c->primary->fb); |
2147 | if (i915_gem_obj_ggtt_offset(fb->obj) == plane_config->base) { | 2147 | if (i915_gem_obj_ggtt_offset(fb->obj) == plane_config->base) { |
2148 | drm_framebuffer_reference(c->primary->fb); | 2148 | drm_framebuffer_reference(c->primary->fb); |
2149 | intel_crtc->base.primary->fb = c->primary->fb; | 2149 | intel_crtc->base.primary->fb = c->primary->fb; |
2150 | break; | 2150 | break; |
2151 | } | 2151 | } |
2152 | } | 2152 | } |
2153 | } | 2153 | } |
2154 | 2154 | ||
2155 | static int i9xx_update_primary_plane(struct drm_crtc *crtc, | 2155 | static int i9xx_update_primary_plane(struct drm_crtc *crtc, |
2156 | struct drm_framebuffer *fb, | 2156 | struct drm_framebuffer *fb, |
2157 | int x, int y) | 2157 | int x, int y) |
2158 | { | 2158 | { |
2159 | struct drm_device *dev = crtc->dev; | 2159 | struct drm_device *dev = crtc->dev; |
2160 | struct drm_i915_private *dev_priv = dev->dev_private; | 2160 | struct drm_i915_private *dev_priv = dev->dev_private; |
2161 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 2161 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2162 | struct intel_framebuffer *intel_fb; | 2162 | struct intel_framebuffer *intel_fb; |
2163 | struct drm_i915_gem_object *obj; | 2163 | struct drm_i915_gem_object *obj; |
2164 | int plane = intel_crtc->plane; | 2164 | int plane = intel_crtc->plane; |
2165 | unsigned long linear_offset; | 2165 | unsigned long linear_offset; |
2166 | u32 dspcntr; | 2166 | u32 dspcntr; |
2167 | u32 reg; | 2167 | u32 reg; |
2168 | 2168 | ||
2169 | switch (plane) { | 2169 | switch (plane) { |
2170 | case 0: | 2170 | case 0: |
2171 | case 1: | 2171 | case 1: |
2172 | break; | 2172 | break; |
2173 | default: | 2173 | default: |
2174 | DRM_ERROR("Can't update plane %c in SAREA\n", plane_name(plane)); | 2174 | DRM_ERROR("Can't update plane %c in SAREA\n", plane_name(plane)); |
2175 | return -EINVAL; | 2175 | return -EINVAL; |
2176 | } | 2176 | } |
2177 | 2177 | ||
2178 | intel_fb = to_intel_framebuffer(fb); | 2178 | intel_fb = to_intel_framebuffer(fb); |
2179 | obj = intel_fb->obj; | 2179 | obj = intel_fb->obj; |
2180 | 2180 | ||
2181 | reg = DSPCNTR(plane); | 2181 | reg = DSPCNTR(plane); |
2182 | dspcntr = I915_READ(reg); | 2182 | dspcntr = I915_READ(reg); |
2183 | /* Mask out pixel format bits in case we change it */ | 2183 | /* Mask out pixel format bits in case we change it */ |
2184 | dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; | 2184 | dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; |
2185 | switch (fb->pixel_format) { | 2185 | switch (fb->pixel_format) { |
2186 | case DRM_FORMAT_C8: | 2186 | case DRM_FORMAT_C8: |
2187 | dspcntr |= DISPPLANE_8BPP; | 2187 | dspcntr |= DISPPLANE_8BPP; |
2188 | break; | 2188 | break; |
2189 | case DRM_FORMAT_XRGB1555: | 2189 | case DRM_FORMAT_XRGB1555: |
2190 | case DRM_FORMAT_ARGB1555: | 2190 | case DRM_FORMAT_ARGB1555: |
2191 | dspcntr |= DISPPLANE_BGRX555; | 2191 | dspcntr |= DISPPLANE_BGRX555; |
2192 | break; | 2192 | break; |
2193 | case DRM_FORMAT_RGB565: | 2193 | case DRM_FORMAT_RGB565: |
2194 | dspcntr |= DISPPLANE_BGRX565; | 2194 | dspcntr |= DISPPLANE_BGRX565; |
2195 | break; | 2195 | break; |
2196 | case DRM_FORMAT_XRGB8888: | 2196 | case DRM_FORMAT_XRGB8888: |
2197 | case DRM_FORMAT_ARGB8888: | 2197 | case DRM_FORMAT_ARGB8888: |
2198 | dspcntr |= DISPPLANE_BGRX888; | 2198 | dspcntr |= DISPPLANE_BGRX888; |
2199 | break; | 2199 | break; |
2200 | case DRM_FORMAT_XBGR8888: | 2200 | case DRM_FORMAT_XBGR8888: |
2201 | case DRM_FORMAT_ABGR8888: | 2201 | case DRM_FORMAT_ABGR8888: |
2202 | dspcntr |= DISPPLANE_RGBX888; | 2202 | dspcntr |= DISPPLANE_RGBX888; |
2203 | break; | 2203 | break; |
2204 | case DRM_FORMAT_XRGB2101010: | 2204 | case DRM_FORMAT_XRGB2101010: |
2205 | case DRM_FORMAT_ARGB2101010: | 2205 | case DRM_FORMAT_ARGB2101010: |
2206 | dspcntr |= DISPPLANE_BGRX101010; | 2206 | dspcntr |= DISPPLANE_BGRX101010; |
2207 | break; | 2207 | break; |
2208 | case DRM_FORMAT_XBGR2101010: | 2208 | case DRM_FORMAT_XBGR2101010: |
2209 | case DRM_FORMAT_ABGR2101010: | 2209 | case DRM_FORMAT_ABGR2101010: |
2210 | dspcntr |= DISPPLANE_RGBX101010; | 2210 | dspcntr |= DISPPLANE_RGBX101010; |
2211 | break; | 2211 | break; |
2212 | default: | 2212 | default: |
2213 | BUG(); | 2213 | BUG(); |
2214 | } | 2214 | } |
2215 | 2215 | ||
2216 | if (INTEL_INFO(dev)->gen >= 4) { | 2216 | if (INTEL_INFO(dev)->gen >= 4) { |
2217 | if (obj->tiling_mode != I915_TILING_NONE) | 2217 | if (obj->tiling_mode != I915_TILING_NONE) |
2218 | dspcntr |= DISPPLANE_TILED; | 2218 | dspcntr |= DISPPLANE_TILED; |
2219 | else | 2219 | else |
2220 | dspcntr &= ~DISPPLANE_TILED; | 2220 | dspcntr &= ~DISPPLANE_TILED; |
2221 | } | 2221 | } |
2222 | 2222 | ||
2223 | if (IS_G4X(dev)) | 2223 | if (IS_G4X(dev)) |
2224 | dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; | 2224 | dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; |
2225 | 2225 | ||
2226 | I915_WRITE(reg, dspcntr); | 2226 | I915_WRITE(reg, dspcntr); |
2227 | 2227 | ||
2228 | linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); | 2228 | linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); |
2229 | 2229 | ||
2230 | if (INTEL_INFO(dev)->gen >= 4) { | 2230 | if (INTEL_INFO(dev)->gen >= 4) { |
2231 | intel_crtc->dspaddr_offset = | 2231 | intel_crtc->dspaddr_offset = |
2232 | intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, | 2232 | intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, |
2233 | fb->bits_per_pixel / 8, | 2233 | fb->bits_per_pixel / 8, |
2234 | fb->pitches[0]); | 2234 | fb->pitches[0]); |
2235 | linear_offset -= intel_crtc->dspaddr_offset; | 2235 | linear_offset -= intel_crtc->dspaddr_offset; |
2236 | } else { | 2236 | } else { |
2237 | intel_crtc->dspaddr_offset = linear_offset; | 2237 | intel_crtc->dspaddr_offset = linear_offset; |
2238 | } | 2238 | } |
2239 | 2239 | ||
2240 | DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", | 2240 | DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", |
2241 | i915_gem_obj_ggtt_offset(obj), linear_offset, x, y, | 2241 | i915_gem_obj_ggtt_offset(obj), linear_offset, x, y, |
2242 | fb->pitches[0]); | 2242 | fb->pitches[0]); |
2243 | I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); | 2243 | I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); |
2244 | if (INTEL_INFO(dev)->gen >= 4) { | 2244 | if (INTEL_INFO(dev)->gen >= 4) { |
2245 | I915_WRITE(DSPSURF(plane), | 2245 | I915_WRITE(DSPSURF(plane), |
2246 | i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); | 2246 | i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); |
2247 | I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); | 2247 | I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); |
2248 | I915_WRITE(DSPLINOFF(plane), linear_offset); | 2248 | I915_WRITE(DSPLINOFF(plane), linear_offset); |
2249 | } else | 2249 | } else |
2250 | I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset); | 2250 | I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset); |
2251 | POSTING_READ(reg); | 2251 | POSTING_READ(reg); |
2252 | 2252 | ||
2253 | return 0; | 2253 | return 0; |
2254 | } | 2254 | } |
2255 | 2255 | ||
2256 | static int ironlake_update_primary_plane(struct drm_crtc *crtc, | 2256 | static int ironlake_update_primary_plane(struct drm_crtc *crtc, |
2257 | struct drm_framebuffer *fb, | 2257 | struct drm_framebuffer *fb, |
2258 | int x, int y) | 2258 | int x, int y) |
2259 | { | 2259 | { |
2260 | struct drm_device *dev = crtc->dev; | 2260 | struct drm_device *dev = crtc->dev; |
2261 | struct drm_i915_private *dev_priv = dev->dev_private; | 2261 | struct drm_i915_private *dev_priv = dev->dev_private; |
2262 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 2262 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2263 | struct intel_framebuffer *intel_fb; | 2263 | struct intel_framebuffer *intel_fb; |
2264 | struct drm_i915_gem_object *obj; | 2264 | struct drm_i915_gem_object *obj; |
2265 | int plane = intel_crtc->plane; | 2265 | int plane = intel_crtc->plane; |
2266 | unsigned long linear_offset; | 2266 | unsigned long linear_offset; |
2267 | u32 dspcntr; | 2267 | u32 dspcntr; |
2268 | u32 reg; | 2268 | u32 reg; |
2269 | 2269 | ||
2270 | switch (plane) { | 2270 | switch (plane) { |
2271 | case 0: | 2271 | case 0: |
2272 | case 1: | 2272 | case 1: |
2273 | case 2: | 2273 | case 2: |
2274 | break; | 2274 | break; |
2275 | default: | 2275 | default: |
2276 | DRM_ERROR("Can't update plane %c in SAREA\n", plane_name(plane)); | 2276 | DRM_ERROR("Can't update plane %c in SAREA\n", plane_name(plane)); |
2277 | return -EINVAL; | 2277 | return -EINVAL; |
2278 | } | 2278 | } |
2279 | 2279 | ||
2280 | intel_fb = to_intel_framebuffer(fb); | 2280 | intel_fb = to_intel_framebuffer(fb); |
2281 | obj = intel_fb->obj; | 2281 | obj = intel_fb->obj; |
2282 | 2282 | ||
2283 | reg = DSPCNTR(plane); | 2283 | reg = DSPCNTR(plane); |
2284 | dspcntr = I915_READ(reg); | 2284 | dspcntr = I915_READ(reg); |
2285 | /* Mask out pixel format bits in case we change it */ | 2285 | /* Mask out pixel format bits in case we change it */ |
2286 | dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; | 2286 | dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; |
2287 | switch (fb->pixel_format) { | 2287 | switch (fb->pixel_format) { |
2288 | case DRM_FORMAT_C8: | 2288 | case DRM_FORMAT_C8: |
2289 | dspcntr |= DISPPLANE_8BPP; | 2289 | dspcntr |= DISPPLANE_8BPP; |
2290 | break; | 2290 | break; |
2291 | case DRM_FORMAT_RGB565: | 2291 | case DRM_FORMAT_RGB565: |
2292 | dspcntr |= DISPPLANE_BGRX565; | 2292 | dspcntr |= DISPPLANE_BGRX565; |
2293 | break; | 2293 | break; |
2294 | case DRM_FORMAT_XRGB8888: | 2294 | case DRM_FORMAT_XRGB8888: |
2295 | case DRM_FORMAT_ARGB8888: | 2295 | case DRM_FORMAT_ARGB8888: |
2296 | dspcntr |= DISPPLANE_BGRX888; | 2296 | dspcntr |= DISPPLANE_BGRX888; |
2297 | break; | 2297 | break; |
2298 | case DRM_FORMAT_XBGR8888: | 2298 | case DRM_FORMAT_XBGR8888: |
2299 | case DRM_FORMAT_ABGR8888: | 2299 | case DRM_FORMAT_ABGR8888: |
2300 | dspcntr |= DISPPLANE_RGBX888; | 2300 | dspcntr |= DISPPLANE_RGBX888; |
2301 | break; | 2301 | break; |
2302 | case DRM_FORMAT_XRGB2101010: | 2302 | case DRM_FORMAT_XRGB2101010: |
2303 | case DRM_FORMAT_ARGB2101010: | 2303 | case DRM_FORMAT_ARGB2101010: |
2304 | dspcntr |= DISPPLANE_BGRX101010; | 2304 | dspcntr |= DISPPLANE_BGRX101010; |
2305 | break; | 2305 | break; |
2306 | case DRM_FORMAT_XBGR2101010: | 2306 | case DRM_FORMAT_XBGR2101010: |
2307 | case DRM_FORMAT_ABGR2101010: | 2307 | case DRM_FORMAT_ABGR2101010: |
2308 | dspcntr |= DISPPLANE_RGBX101010; | 2308 | dspcntr |= DISPPLANE_RGBX101010; |
2309 | break; | 2309 | break; |
2310 | default: | 2310 | default: |
2311 | BUG(); | 2311 | BUG(); |
2312 | } | 2312 | } |
2313 | 2313 | ||
2314 | if (obj->tiling_mode != I915_TILING_NONE) | 2314 | if (obj->tiling_mode != I915_TILING_NONE) |
2315 | dspcntr |= DISPPLANE_TILED; | 2315 | dspcntr |= DISPPLANE_TILED; |
2316 | else | 2316 | else |
2317 | dspcntr &= ~DISPPLANE_TILED; | 2317 | dspcntr &= ~DISPPLANE_TILED; |
2318 | 2318 | ||
2319 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) | 2319 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) |
2320 | dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE; | 2320 | dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE; |
2321 | else | 2321 | else |
2322 | dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; | 2322 | dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; |
2323 | 2323 | ||
2324 | I915_WRITE(reg, dspcntr); | 2324 | I915_WRITE(reg, dspcntr); |
2325 | 2325 | ||
2326 | linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); | 2326 | linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); |
2327 | intel_crtc->dspaddr_offset = | 2327 | intel_crtc->dspaddr_offset = |
2328 | intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, | 2328 | intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, |
2329 | fb->bits_per_pixel / 8, | 2329 | fb->bits_per_pixel / 8, |
2330 | fb->pitches[0]); | 2330 | fb->pitches[0]); |
2331 | linear_offset -= intel_crtc->dspaddr_offset; | 2331 | linear_offset -= intel_crtc->dspaddr_offset; |
2332 | 2332 | ||
2333 | DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", | 2333 | DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", |
2334 | i915_gem_obj_ggtt_offset(obj), linear_offset, x, y, | 2334 | i915_gem_obj_ggtt_offset(obj), linear_offset, x, y, |
2335 | fb->pitches[0]); | 2335 | fb->pitches[0]); |
2336 | I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); | 2336 | I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); |
2337 | I915_WRITE(DSPSURF(plane), | 2337 | I915_WRITE(DSPSURF(plane), |
2338 | i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); | 2338 | i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); |
2339 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { | 2339 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { |
2340 | I915_WRITE(DSPOFFSET(plane), (y << 16) | x); | 2340 | I915_WRITE(DSPOFFSET(plane), (y << 16) | x); |
2341 | } else { | 2341 | } else { |
2342 | I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); | 2342 | I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); |
2343 | I915_WRITE(DSPLINOFF(plane), linear_offset); | 2343 | I915_WRITE(DSPLINOFF(plane), linear_offset); |
2344 | } | 2344 | } |
2345 | POSTING_READ(reg); | 2345 | POSTING_READ(reg); |
2346 | 2346 | ||
2347 | return 0; | 2347 | return 0; |
2348 | } | 2348 | } |
2349 | 2349 | ||
2350 | /* Assume fb object is pinned & idle & fenced and just update base pointers */ | 2350 | /* Assume fb object is pinned & idle & fenced and just update base pointers */ |
2351 | static int | 2351 | static int |
2352 | intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, | 2352 | intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, |
2353 | int x, int y, enum mode_set_atomic state) | 2353 | int x, int y, enum mode_set_atomic state) |
2354 | { | 2354 | { |
2355 | struct drm_device *dev = crtc->dev; | 2355 | struct drm_device *dev = crtc->dev; |
2356 | struct drm_i915_private *dev_priv = dev->dev_private; | 2356 | struct drm_i915_private *dev_priv = dev->dev_private; |
2357 | 2357 | ||
2358 | if (dev_priv->display.disable_fbc) | 2358 | if (dev_priv->display.disable_fbc) |
2359 | dev_priv->display.disable_fbc(dev); | 2359 | dev_priv->display.disable_fbc(dev); |
2360 | intel_increase_pllclock(crtc); | 2360 | intel_increase_pllclock(crtc); |
2361 | 2361 | ||
2362 | return dev_priv->display.update_primary_plane(crtc, fb, x, y); | 2362 | return dev_priv->display.update_primary_plane(crtc, fb, x, y); |
2363 | } | 2363 | } |
2364 | 2364 | ||
2365 | void intel_display_handle_reset(struct drm_device *dev) | 2365 | void intel_display_handle_reset(struct drm_device *dev) |
2366 | { | 2366 | { |
2367 | struct drm_i915_private *dev_priv = dev->dev_private; | 2367 | struct drm_i915_private *dev_priv = dev->dev_private; |
2368 | struct drm_crtc *crtc; | 2368 | struct drm_crtc *crtc; |
2369 | 2369 | ||
2370 | /* | 2370 | /* |
2371 | * Flips in the rings have been nuked by the reset, | 2371 | * Flips in the rings have been nuked by the reset, |
2372 | * so complete all pending flips so that user space | 2372 | * so complete all pending flips so that user space |
2373 | * will get its events and not get stuck. | 2373 | * will get its events and not get stuck. |
2374 | * | 2374 | * |
2375 | * Also update the base address of all primary | 2375 | * Also update the base address of all primary |
2376 | * planes to the the last fb to make sure we're | 2376 | * planes to the the last fb to make sure we're |
2377 | * showing the correct fb after a reset. | 2377 | * showing the correct fb after a reset. |
2378 | * | 2378 | * |
2379 | * Need to make two loops over the crtcs so that we | 2379 | * Need to make two loops over the crtcs so that we |
2380 | * don't try to grab a crtc mutex before the | 2380 | * don't try to grab a crtc mutex before the |
2381 | * pending_flip_queue really got woken up. | 2381 | * pending_flip_queue really got woken up. |
2382 | */ | 2382 | */ |
2383 | 2383 | ||
2384 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 2384 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
2385 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 2385 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2386 | enum plane plane = intel_crtc->plane; | 2386 | enum plane plane = intel_crtc->plane; |
2387 | 2387 | ||
2388 | intel_prepare_page_flip(dev, plane); | 2388 | intel_prepare_page_flip(dev, plane); |
2389 | intel_finish_page_flip_plane(dev, plane); | 2389 | intel_finish_page_flip_plane(dev, plane); |
2390 | } | 2390 | } |
2391 | 2391 | ||
2392 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 2392 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
2393 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 2393 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2394 | 2394 | ||
2395 | mutex_lock(&crtc->mutex); | 2395 | mutex_lock(&crtc->mutex); |
2396 | /* | 2396 | /* |
2397 | * FIXME: Once we have proper support for primary planes (and | 2397 | * FIXME: Once we have proper support for primary planes (and |
2398 | * disabling them without disabling the entire crtc) allow again | 2398 | * disabling them without disabling the entire crtc) allow again |
2399 | * a NULL crtc->primary->fb. | 2399 | * a NULL crtc->primary->fb. |
2400 | */ | 2400 | */ |
2401 | if (intel_crtc->active && crtc->primary->fb) | 2401 | if (intel_crtc->active && crtc->primary->fb) |
2402 | dev_priv->display.update_primary_plane(crtc, | 2402 | dev_priv->display.update_primary_plane(crtc, |
2403 | crtc->primary->fb, | 2403 | crtc->primary->fb, |
2404 | crtc->x, | 2404 | crtc->x, |
2405 | crtc->y); | 2405 | crtc->y); |
2406 | mutex_unlock(&crtc->mutex); | 2406 | mutex_unlock(&crtc->mutex); |
2407 | } | 2407 | } |
2408 | } | 2408 | } |
2409 | 2409 | ||
2410 | static int | 2410 | static int |
2411 | intel_finish_fb(struct drm_framebuffer *old_fb) | 2411 | intel_finish_fb(struct drm_framebuffer *old_fb) |
2412 | { | 2412 | { |
2413 | struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; | 2413 | struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; |
2414 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; | 2414 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
2415 | bool was_interruptible = dev_priv->mm.interruptible; | 2415 | bool was_interruptible = dev_priv->mm.interruptible; |
2416 | int ret; | 2416 | int ret; |
2417 | 2417 | ||
2418 | /* Big Hammer, we also need to ensure that any pending | 2418 | /* Big Hammer, we also need to ensure that any pending |
2419 | * MI_WAIT_FOR_EVENT inside a user batch buffer on the | 2419 | * MI_WAIT_FOR_EVENT inside a user batch buffer on the |
2420 | * current scanout is retired before unpinning the old | 2420 | * current scanout is retired before unpinning the old |
2421 | * framebuffer. | 2421 | * framebuffer. |
2422 | * | 2422 | * |
2423 | * This should only fail upon a hung GPU, in which case we | 2423 | * This should only fail upon a hung GPU, in which case we |
2424 | * can safely continue. | 2424 | * can safely continue. |
2425 | */ | 2425 | */ |
2426 | dev_priv->mm.interruptible = false; | 2426 | dev_priv->mm.interruptible = false; |
2427 | ret = i915_gem_object_finish_gpu(obj); | 2427 | ret = i915_gem_object_finish_gpu(obj); |
2428 | dev_priv->mm.interruptible = was_interruptible; | 2428 | dev_priv->mm.interruptible = was_interruptible; |
2429 | 2429 | ||
2430 | return ret; | 2430 | return ret; |
2431 | } | 2431 | } |
2432 | 2432 | ||
2433 | static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) | 2433 | static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) |
2434 | { | 2434 | { |
2435 | struct drm_device *dev = crtc->dev; | 2435 | struct drm_device *dev = crtc->dev; |
2436 | struct drm_i915_private *dev_priv = dev->dev_private; | 2436 | struct drm_i915_private *dev_priv = dev->dev_private; |
2437 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 2437 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2438 | unsigned long flags; | 2438 | unsigned long flags; |
2439 | bool pending; | 2439 | bool pending; |
2440 | 2440 | ||
2441 | if (i915_reset_in_progress(&dev_priv->gpu_error) || | 2441 | if (i915_reset_in_progress(&dev_priv->gpu_error) || |
2442 | intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) | 2442 | intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) |
2443 | return false; | 2443 | return false; |
2444 | 2444 | ||
2445 | spin_lock_irqsave(&dev->event_lock, flags); | 2445 | spin_lock_irqsave(&dev->event_lock, flags); |
2446 | pending = to_intel_crtc(crtc)->unpin_work != NULL; | 2446 | pending = to_intel_crtc(crtc)->unpin_work != NULL; |
2447 | spin_unlock_irqrestore(&dev->event_lock, flags); | 2447 | spin_unlock_irqrestore(&dev->event_lock, flags); |
2448 | 2448 | ||
2449 | return pending; | 2449 | return pending; |
2450 | } | 2450 | } |
2451 | 2451 | ||
2452 | static int | 2452 | static int |
2453 | intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | 2453 | intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, |
2454 | struct drm_framebuffer *fb) | 2454 | struct drm_framebuffer *fb) |
2455 | { | 2455 | { |
2456 | struct drm_device *dev = crtc->dev; | 2456 | struct drm_device *dev = crtc->dev; |
2457 | struct drm_i915_private *dev_priv = dev->dev_private; | 2457 | struct drm_i915_private *dev_priv = dev->dev_private; |
2458 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 2458 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2459 | struct drm_framebuffer *old_fb; | 2459 | struct drm_framebuffer *old_fb; |
2460 | int ret; | 2460 | int ret; |
2461 | 2461 | ||
2462 | if (intel_crtc_has_pending_flip(crtc)) { | 2462 | if (intel_crtc_has_pending_flip(crtc)) { |
2463 | DRM_ERROR("pipe is still busy with an old pageflip\n"); | 2463 | DRM_ERROR("pipe is still busy with an old pageflip\n"); |
2464 | return -EBUSY; | 2464 | return -EBUSY; |
2465 | } | 2465 | } |
2466 | 2466 | ||
2467 | /* no fb bound */ | 2467 | /* no fb bound */ |
2468 | if (!fb) { | 2468 | if (!fb) { |
2469 | DRM_ERROR("No FB bound\n"); | 2469 | DRM_ERROR("No FB bound\n"); |
2470 | return 0; | 2470 | return 0; |
2471 | } | 2471 | } |
2472 | 2472 | ||
2473 | if (intel_crtc->plane > INTEL_INFO(dev)->num_pipes) { | 2473 | if (intel_crtc->plane > INTEL_INFO(dev)->num_pipes) { |
2474 | DRM_ERROR("no plane for crtc: plane %c, num_pipes %d\n", | 2474 | DRM_ERROR("no plane for crtc: plane %c, num_pipes %d\n", |
2475 | plane_name(intel_crtc->plane), | 2475 | plane_name(intel_crtc->plane), |
2476 | INTEL_INFO(dev)->num_pipes); | 2476 | INTEL_INFO(dev)->num_pipes); |
2477 | return -EINVAL; | 2477 | return -EINVAL; |
2478 | } | 2478 | } |
2479 | 2479 | ||
2480 | mutex_lock(&dev->struct_mutex); | 2480 | mutex_lock(&dev->struct_mutex); |
2481 | ret = intel_pin_and_fence_fb_obj(dev, | 2481 | ret = intel_pin_and_fence_fb_obj(dev, |
2482 | to_intel_framebuffer(fb)->obj, | 2482 | to_intel_framebuffer(fb)->obj, |
2483 | NULL); | 2483 | NULL); |
2484 | mutex_unlock(&dev->struct_mutex); | 2484 | mutex_unlock(&dev->struct_mutex); |
2485 | if (ret != 0) { | 2485 | if (ret != 0) { |
2486 | DRM_ERROR("pin & fence failed\n"); | 2486 | DRM_ERROR("pin & fence failed\n"); |
2487 | return ret; | 2487 | return ret; |
2488 | } | 2488 | } |
2489 | 2489 | ||
2490 | /* | 2490 | /* |
2491 | * Update pipe size and adjust fitter if needed: the reason for this is | 2491 | * Update pipe size and adjust fitter if needed: the reason for this is |
2492 | * that in compute_mode_changes we check the native mode (not the pfit | 2492 | * that in compute_mode_changes we check the native mode (not the pfit |
2493 | * mode) to see if we can flip rather than do a full mode set. In the | 2493 | * mode) to see if we can flip rather than do a full mode set. In the |
2494 | * fastboot case, we'll flip, but if we don't update the pipesrc and | 2494 | * fastboot case, we'll flip, but if we don't update the pipesrc and |
2495 | * pfit state, we'll end up with a big fb scanned out into the wrong | 2495 | * pfit state, we'll end up with a big fb scanned out into the wrong |
2496 | * sized surface. | 2496 | * sized surface. |
2497 | * | 2497 | * |
2498 | * To fix this properly, we need to hoist the checks up into | 2498 | * To fix this properly, we need to hoist the checks up into |
2499 | * compute_mode_changes (or above), check the actual pfit state and | 2499 | * compute_mode_changes (or above), check the actual pfit state and |
2500 | * whether the platform allows pfit disable with pipe active, and only | 2500 | * whether the platform allows pfit disable with pipe active, and only |
2501 | * then update the pipesrc and pfit state, even on the flip path. | 2501 | * then update the pipesrc and pfit state, even on the flip path. |
2502 | */ | 2502 | */ |
2503 | if (i915.fastboot) { | 2503 | if (i915.fastboot) { |
2504 | const struct drm_display_mode *adjusted_mode = | 2504 | const struct drm_display_mode *adjusted_mode = |
2505 | &intel_crtc->config.adjusted_mode; | 2505 | &intel_crtc->config.adjusted_mode; |
2506 | 2506 | ||
2507 | I915_WRITE(PIPESRC(intel_crtc->pipe), | 2507 | I915_WRITE(PIPESRC(intel_crtc->pipe), |
2508 | ((adjusted_mode->crtc_hdisplay - 1) << 16) | | 2508 | ((adjusted_mode->crtc_hdisplay - 1) << 16) | |
2509 | (adjusted_mode->crtc_vdisplay - 1)); | 2509 | (adjusted_mode->crtc_vdisplay - 1)); |
2510 | if (!intel_crtc->config.pch_pfit.enabled && | 2510 | if (!intel_crtc->config.pch_pfit.enabled && |
2511 | (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || | 2511 | (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || |
2512 | intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { | 2512 | intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { |
2513 | I915_WRITE(PF_CTL(intel_crtc->pipe), 0); | 2513 | I915_WRITE(PF_CTL(intel_crtc->pipe), 0); |
2514 | I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0); | 2514 | I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0); |
2515 | I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0); | 2515 | I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0); |
2516 | } | 2516 | } |
2517 | intel_crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay; | 2517 | intel_crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay; |
2518 | intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay; | 2518 | intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay; |
2519 | } | 2519 | } |
2520 | 2520 | ||
2521 | ret = dev_priv->display.update_primary_plane(crtc, fb, x, y); | 2521 | ret = dev_priv->display.update_primary_plane(crtc, fb, x, y); |
2522 | if (ret) { | 2522 | if (ret) { |
2523 | mutex_lock(&dev->struct_mutex); | 2523 | mutex_lock(&dev->struct_mutex); |
2524 | intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj); | 2524 | intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj); |
2525 | mutex_unlock(&dev->struct_mutex); | 2525 | mutex_unlock(&dev->struct_mutex); |
2526 | DRM_ERROR("failed to update base address\n"); | 2526 | DRM_ERROR("failed to update base address\n"); |
2527 | return ret; | 2527 | return ret; |
2528 | } | 2528 | } |
2529 | 2529 | ||
2530 | old_fb = crtc->primary->fb; | 2530 | old_fb = crtc->primary->fb; |
2531 | crtc->primary->fb = fb; | 2531 | crtc->primary->fb = fb; |
2532 | crtc->x = x; | 2532 | crtc->x = x; |
2533 | crtc->y = y; | 2533 | crtc->y = y; |
2534 | 2534 | ||
2535 | if (old_fb) { | 2535 | if (old_fb) { |
2536 | if (intel_crtc->active && old_fb != fb) | 2536 | if (intel_crtc->active && old_fb != fb) |
2537 | intel_wait_for_vblank(dev, intel_crtc->pipe); | 2537 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
2538 | mutex_lock(&dev->struct_mutex); | 2538 | mutex_lock(&dev->struct_mutex); |
2539 | intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj); | 2539 | intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj); |
2540 | mutex_unlock(&dev->struct_mutex); | 2540 | mutex_unlock(&dev->struct_mutex); |
2541 | } | 2541 | } |
2542 | 2542 | ||
2543 | mutex_lock(&dev->struct_mutex); | 2543 | mutex_lock(&dev->struct_mutex); |
2544 | intel_update_fbc(dev); | 2544 | intel_update_fbc(dev); |
2545 | intel_edp_psr_update(dev); | 2545 | intel_edp_psr_update(dev); |
2546 | mutex_unlock(&dev->struct_mutex); | 2546 | mutex_unlock(&dev->struct_mutex); |
2547 | 2547 | ||
2548 | return 0; | 2548 | return 0; |
2549 | } | 2549 | } |
2550 | 2550 | ||
2551 | static void intel_fdi_normal_train(struct drm_crtc *crtc) | 2551 | static void intel_fdi_normal_train(struct drm_crtc *crtc) |
2552 | { | 2552 | { |
2553 | struct drm_device *dev = crtc->dev; | 2553 | struct drm_device *dev = crtc->dev; |
2554 | struct drm_i915_private *dev_priv = dev->dev_private; | 2554 | struct drm_i915_private *dev_priv = dev->dev_private; |
2555 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 2555 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2556 | int pipe = intel_crtc->pipe; | 2556 | int pipe = intel_crtc->pipe; |
2557 | u32 reg, temp; | 2557 | u32 reg, temp; |
2558 | 2558 | ||
2559 | /* enable normal train */ | 2559 | /* enable normal train */ |
2560 | reg = FDI_TX_CTL(pipe); | 2560 | reg = FDI_TX_CTL(pipe); |
2561 | temp = I915_READ(reg); | 2561 | temp = I915_READ(reg); |
2562 | if (IS_IVYBRIDGE(dev)) { | 2562 | if (IS_IVYBRIDGE(dev)) { |
2563 | temp &= ~FDI_LINK_TRAIN_NONE_IVB; | 2563 | temp &= ~FDI_LINK_TRAIN_NONE_IVB; |
2564 | temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; | 2564 | temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; |
2565 | } else { | 2565 | } else { |
2566 | temp &= ~FDI_LINK_TRAIN_NONE; | 2566 | temp &= ~FDI_LINK_TRAIN_NONE; |
2567 | temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; | 2567 | temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; |
2568 | } | 2568 | } |
2569 | I915_WRITE(reg, temp); | 2569 | I915_WRITE(reg, temp); |
2570 | 2570 | ||
2571 | reg = FDI_RX_CTL(pipe); | 2571 | reg = FDI_RX_CTL(pipe); |
2572 | temp = I915_READ(reg); | 2572 | temp = I915_READ(reg); |
2573 | if (HAS_PCH_CPT(dev)) { | 2573 | if (HAS_PCH_CPT(dev)) { |
2574 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | 2574 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; |
2575 | temp |= FDI_LINK_TRAIN_NORMAL_CPT; | 2575 | temp |= FDI_LINK_TRAIN_NORMAL_CPT; |
2576 | } else { | 2576 | } else { |
2577 | temp &= ~FDI_LINK_TRAIN_NONE; | 2577 | temp &= ~FDI_LINK_TRAIN_NONE; |
2578 | temp |= FDI_LINK_TRAIN_NONE; | 2578 | temp |= FDI_LINK_TRAIN_NONE; |
2579 | } | 2579 | } |
2580 | I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); | 2580 | I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); |
2581 | 2581 | ||
2582 | /* wait one idle pattern time */ | 2582 | /* wait one idle pattern time */ |
2583 | POSTING_READ(reg); | 2583 | POSTING_READ(reg); |
2584 | udelay(1000); | 2584 | udelay(1000); |
2585 | 2585 | ||
2586 | /* IVB wants error correction enabled */ | 2586 | /* IVB wants error correction enabled */ |
2587 | if (IS_IVYBRIDGE(dev)) | 2587 | if (IS_IVYBRIDGE(dev)) |
2588 | I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE | | 2588 | I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE | |
2589 | FDI_FE_ERRC_ENABLE); | 2589 | FDI_FE_ERRC_ENABLE); |
2590 | } | 2590 | } |
2591 | 2591 | ||
2592 | static bool pipe_has_enabled_pch(struct intel_crtc *crtc) | 2592 | static bool pipe_has_enabled_pch(struct intel_crtc *crtc) |
2593 | { | 2593 | { |
2594 | return crtc->base.enabled && crtc->active && | 2594 | return crtc->base.enabled && crtc->active && |
2595 | crtc->config.has_pch_encoder; | 2595 | crtc->config.has_pch_encoder; |
2596 | } | 2596 | } |
2597 | 2597 | ||
2598 | static void ivb_modeset_global_resources(struct drm_device *dev) | 2598 | static void ivb_modeset_global_resources(struct drm_device *dev) |
2599 | { | 2599 | { |
2600 | struct drm_i915_private *dev_priv = dev->dev_private; | 2600 | struct drm_i915_private *dev_priv = dev->dev_private; |
2601 | struct intel_crtc *pipe_B_crtc = | 2601 | struct intel_crtc *pipe_B_crtc = |
2602 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]); | 2602 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]); |
2603 | struct intel_crtc *pipe_C_crtc = | 2603 | struct intel_crtc *pipe_C_crtc = |
2604 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]); | 2604 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]); |
2605 | uint32_t temp; | 2605 | uint32_t temp; |
2606 | 2606 | ||
2607 | /* | 2607 | /* |
2608 | * When everything is off disable fdi C so that we could enable fdi B | 2608 | * When everything is off disable fdi C so that we could enable fdi B |
2609 | * with all lanes. Note that we don't care about enabled pipes without | 2609 | * with all lanes. Note that we don't care about enabled pipes without |
2610 | * an enabled pch encoder. | 2610 | * an enabled pch encoder. |
2611 | */ | 2611 | */ |
2612 | if (!pipe_has_enabled_pch(pipe_B_crtc) && | 2612 | if (!pipe_has_enabled_pch(pipe_B_crtc) && |
2613 | !pipe_has_enabled_pch(pipe_C_crtc)) { | 2613 | !pipe_has_enabled_pch(pipe_C_crtc)) { |
2614 | WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); | 2614 | WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); |
2615 | WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); | 2615 | WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); |
2616 | 2616 | ||
2617 | temp = I915_READ(SOUTH_CHICKEN1); | 2617 | temp = I915_READ(SOUTH_CHICKEN1); |
2618 | temp &= ~FDI_BC_BIFURCATION_SELECT; | 2618 | temp &= ~FDI_BC_BIFURCATION_SELECT; |
2619 | DRM_DEBUG_KMS("disabling fdi C rx\n"); | 2619 | DRM_DEBUG_KMS("disabling fdi C rx\n"); |
2620 | I915_WRITE(SOUTH_CHICKEN1, temp); | 2620 | I915_WRITE(SOUTH_CHICKEN1, temp); |
2621 | } | 2621 | } |
2622 | } | 2622 | } |
2623 | 2623 | ||
2624 | /* The FDI link training functions for ILK/Ibexpeak. */ | 2624 | /* The FDI link training functions for ILK/Ibexpeak. */ |
2625 | static void ironlake_fdi_link_train(struct drm_crtc *crtc) | 2625 | static void ironlake_fdi_link_train(struct drm_crtc *crtc) |
2626 | { | 2626 | { |
2627 | struct drm_device *dev = crtc->dev; | 2627 | struct drm_device *dev = crtc->dev; |
2628 | struct drm_i915_private *dev_priv = dev->dev_private; | 2628 | struct drm_i915_private *dev_priv = dev->dev_private; |
2629 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 2629 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2630 | int pipe = intel_crtc->pipe; | 2630 | int pipe = intel_crtc->pipe; |
2631 | int plane = intel_crtc->plane; | 2631 | int plane = intel_crtc->plane; |
2632 | u32 reg, temp, tries; | 2632 | u32 reg, temp, tries; |
2633 | 2633 | ||
2634 | /* FDI needs bits from pipe & plane first */ | 2634 | /* FDI needs bits from pipe & plane first */ |
2635 | assert_pipe_enabled(dev_priv, pipe); | 2635 | assert_pipe_enabled(dev_priv, pipe); |
2636 | assert_plane_enabled(dev_priv, plane); | 2636 | assert_plane_enabled(dev_priv, plane); |
2637 | 2637 | ||
2638 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit | 2638 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit |
2639 | for train result */ | 2639 | for train result */ |
2640 | reg = FDI_RX_IMR(pipe); | 2640 | reg = FDI_RX_IMR(pipe); |
2641 | temp = I915_READ(reg); | 2641 | temp = I915_READ(reg); |
2642 | temp &= ~FDI_RX_SYMBOL_LOCK; | 2642 | temp &= ~FDI_RX_SYMBOL_LOCK; |
2643 | temp &= ~FDI_RX_BIT_LOCK; | 2643 | temp &= ~FDI_RX_BIT_LOCK; |
2644 | I915_WRITE(reg, temp); | 2644 | I915_WRITE(reg, temp); |
2645 | I915_READ(reg); | 2645 | I915_READ(reg); |
2646 | udelay(150); | 2646 | udelay(150); |
2647 | 2647 | ||
2648 | /* enable CPU FDI TX and PCH FDI RX */ | 2648 | /* enable CPU FDI TX and PCH FDI RX */ |
2649 | reg = FDI_TX_CTL(pipe); | 2649 | reg = FDI_TX_CTL(pipe); |
2650 | temp = I915_READ(reg); | 2650 | temp = I915_READ(reg); |
2651 | temp &= ~FDI_DP_PORT_WIDTH_MASK; | 2651 | temp &= ~FDI_DP_PORT_WIDTH_MASK; |
2652 | temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes); | 2652 | temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes); |
2653 | temp &= ~FDI_LINK_TRAIN_NONE; | 2653 | temp &= ~FDI_LINK_TRAIN_NONE; |
2654 | temp |= FDI_LINK_TRAIN_PATTERN_1; | 2654 | temp |= FDI_LINK_TRAIN_PATTERN_1; |
2655 | I915_WRITE(reg, temp | FDI_TX_ENABLE); | 2655 | I915_WRITE(reg, temp | FDI_TX_ENABLE); |
2656 | 2656 | ||
2657 | reg = FDI_RX_CTL(pipe); | 2657 | reg = FDI_RX_CTL(pipe); |
2658 | temp = I915_READ(reg); | 2658 | temp = I915_READ(reg); |
2659 | temp &= ~FDI_LINK_TRAIN_NONE; | 2659 | temp &= ~FDI_LINK_TRAIN_NONE; |
2660 | temp |= FDI_LINK_TRAIN_PATTERN_1; | 2660 | temp |= FDI_LINK_TRAIN_PATTERN_1; |
2661 | I915_WRITE(reg, temp | FDI_RX_ENABLE); | 2661 | I915_WRITE(reg, temp | FDI_RX_ENABLE); |
2662 | 2662 | ||
2663 | POSTING_READ(reg); | 2663 | POSTING_READ(reg); |
2664 | udelay(150); | 2664 | udelay(150); |
2665 | 2665 | ||
2666 | /* Ironlake workaround, enable clock pointer after FDI enable*/ | 2666 | /* Ironlake workaround, enable clock pointer after FDI enable*/ |
2667 | I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); | 2667 | I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); |
2668 | I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | | 2668 | I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | |
2669 | FDI_RX_PHASE_SYNC_POINTER_EN); | 2669 | FDI_RX_PHASE_SYNC_POINTER_EN); |
2670 | 2670 | ||
2671 | reg = FDI_RX_IIR(pipe); | 2671 | reg = FDI_RX_IIR(pipe); |
2672 | for (tries = 0; tries < 5; tries++) { | 2672 | for (tries = 0; tries < 5; tries++) { |
2673 | temp = I915_READ(reg); | 2673 | temp = I915_READ(reg); |
2674 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); | 2674 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
2675 | 2675 | ||
2676 | if ((temp & FDI_RX_BIT_LOCK)) { | 2676 | if ((temp & FDI_RX_BIT_LOCK)) { |
2677 | DRM_DEBUG_KMS("FDI train 1 done.\n"); | 2677 | DRM_DEBUG_KMS("FDI train 1 done.\n"); |
2678 | I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); | 2678 | I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); |
2679 | break; | 2679 | break; |
2680 | } | 2680 | } |
2681 | } | 2681 | } |
2682 | if (tries == 5) | 2682 | if (tries == 5) |
2683 | DRM_ERROR("FDI train 1 fail!\n"); | 2683 | DRM_ERROR("FDI train 1 fail!\n"); |
2684 | 2684 | ||
2685 | /* Train 2 */ | 2685 | /* Train 2 */ |
2686 | reg = FDI_TX_CTL(pipe); | 2686 | reg = FDI_TX_CTL(pipe); |
2687 | temp = I915_READ(reg); | 2687 | temp = I915_READ(reg); |
2688 | temp &= ~FDI_LINK_TRAIN_NONE; | 2688 | temp &= ~FDI_LINK_TRAIN_NONE; |
2689 | temp |= FDI_LINK_TRAIN_PATTERN_2; | 2689 | temp |= FDI_LINK_TRAIN_PATTERN_2; |
2690 | I915_WRITE(reg, temp); | 2690 | I915_WRITE(reg, temp); |
2691 | 2691 | ||
2692 | reg = FDI_RX_CTL(pipe); | 2692 | reg = FDI_RX_CTL(pipe); |
2693 | temp = I915_READ(reg); | 2693 | temp = I915_READ(reg); |
2694 | temp &= ~FDI_LINK_TRAIN_NONE; | 2694 | temp &= ~FDI_LINK_TRAIN_NONE; |
2695 | temp |= FDI_LINK_TRAIN_PATTERN_2; | 2695 | temp |= FDI_LINK_TRAIN_PATTERN_2; |
2696 | I915_WRITE(reg, temp); | 2696 | I915_WRITE(reg, temp); |
2697 | 2697 | ||
2698 | POSTING_READ(reg); | 2698 | POSTING_READ(reg); |
2699 | udelay(150); | 2699 | udelay(150); |
2700 | 2700 | ||
2701 | reg = FDI_RX_IIR(pipe); | 2701 | reg = FDI_RX_IIR(pipe); |
2702 | for (tries = 0; tries < 5; tries++) { | 2702 | for (tries = 0; tries < 5; tries++) { |
2703 | temp = I915_READ(reg); | 2703 | temp = I915_READ(reg); |
2704 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); | 2704 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
2705 | 2705 | ||
2706 | if (temp & FDI_RX_SYMBOL_LOCK) { | 2706 | if (temp & FDI_RX_SYMBOL_LOCK) { |
2707 | I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); | 2707 | I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); |
2708 | DRM_DEBUG_KMS("FDI train 2 done.\n"); | 2708 | DRM_DEBUG_KMS("FDI train 2 done.\n"); |
2709 | break; | 2709 | break; |
2710 | } | 2710 | } |
2711 | } | 2711 | } |
2712 | if (tries == 5) | 2712 | if (tries == 5) |
2713 | DRM_ERROR("FDI train 2 fail!\n"); | 2713 | DRM_ERROR("FDI train 2 fail!\n"); |
2714 | 2714 | ||
2715 | DRM_DEBUG_KMS("FDI train done\n"); | 2715 | DRM_DEBUG_KMS("FDI train done\n"); |
2716 | 2716 | ||
2717 | } | 2717 | } |
2718 | 2718 | ||
2719 | static const int snb_b_fdi_train_param[] = { | 2719 | static const int snb_b_fdi_train_param[] = { |
2720 | FDI_LINK_TRAIN_400MV_0DB_SNB_B, | 2720 | FDI_LINK_TRAIN_400MV_0DB_SNB_B, |
2721 | FDI_LINK_TRAIN_400MV_6DB_SNB_B, | 2721 | FDI_LINK_TRAIN_400MV_6DB_SNB_B, |
2722 | FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, | 2722 | FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, |
2723 | FDI_LINK_TRAIN_800MV_0DB_SNB_B, | 2723 | FDI_LINK_TRAIN_800MV_0DB_SNB_B, |
2724 | }; | 2724 | }; |
2725 | 2725 | ||
2726 | /* The FDI link training functions for SNB/Cougarpoint. */ | 2726 | /* The FDI link training functions for SNB/Cougarpoint. */ |
2727 | static void gen6_fdi_link_train(struct drm_crtc *crtc) | 2727 | static void gen6_fdi_link_train(struct drm_crtc *crtc) |
2728 | { | 2728 | { |
2729 | struct drm_device *dev = crtc->dev; | 2729 | struct drm_device *dev = crtc->dev; |
2730 | struct drm_i915_private *dev_priv = dev->dev_private; | 2730 | struct drm_i915_private *dev_priv = dev->dev_private; |
2731 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 2731 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2732 | int pipe = intel_crtc->pipe; | 2732 | int pipe = intel_crtc->pipe; |
2733 | u32 reg, temp, i, retry; | 2733 | u32 reg, temp, i, retry; |
2734 | 2734 | ||
2735 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit | 2735 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit |
2736 | for train result */ | 2736 | for train result */ |
2737 | reg = FDI_RX_IMR(pipe); | 2737 | reg = FDI_RX_IMR(pipe); |
2738 | temp = I915_READ(reg); | 2738 | temp = I915_READ(reg); |
2739 | temp &= ~FDI_RX_SYMBOL_LOCK; | 2739 | temp &= ~FDI_RX_SYMBOL_LOCK; |
2740 | temp &= ~FDI_RX_BIT_LOCK; | 2740 | temp &= ~FDI_RX_BIT_LOCK; |
2741 | I915_WRITE(reg, temp); | 2741 | I915_WRITE(reg, temp); |
2742 | 2742 | ||
2743 | POSTING_READ(reg); | 2743 | POSTING_READ(reg); |
2744 | udelay(150); | 2744 | udelay(150); |
2745 | 2745 | ||
2746 | /* enable CPU FDI TX and PCH FDI RX */ | 2746 | /* enable CPU FDI TX and PCH FDI RX */ |
2747 | reg = FDI_TX_CTL(pipe); | 2747 | reg = FDI_TX_CTL(pipe); |
2748 | temp = I915_READ(reg); | 2748 | temp = I915_READ(reg); |
2749 | temp &= ~FDI_DP_PORT_WIDTH_MASK; | 2749 | temp &= ~FDI_DP_PORT_WIDTH_MASK; |
2750 | temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes); | 2750 | temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes); |
2751 | temp &= ~FDI_LINK_TRAIN_NONE; | 2751 | temp &= ~FDI_LINK_TRAIN_NONE; |
2752 | temp |= FDI_LINK_TRAIN_PATTERN_1; | 2752 | temp |= FDI_LINK_TRAIN_PATTERN_1; |
2753 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; | 2753 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2754 | /* SNB-B */ | 2754 | /* SNB-B */ |
2755 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; | 2755 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; |
2756 | I915_WRITE(reg, temp | FDI_TX_ENABLE); | 2756 | I915_WRITE(reg, temp | FDI_TX_ENABLE); |
2757 | 2757 | ||
2758 | I915_WRITE(FDI_RX_MISC(pipe), | 2758 | I915_WRITE(FDI_RX_MISC(pipe), |
2759 | FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); | 2759 | FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); |
2760 | 2760 | ||
2761 | reg = FDI_RX_CTL(pipe); | 2761 | reg = FDI_RX_CTL(pipe); |
2762 | temp = I915_READ(reg); | 2762 | temp = I915_READ(reg); |
2763 | if (HAS_PCH_CPT(dev)) { | 2763 | if (HAS_PCH_CPT(dev)) { |
2764 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | 2764 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; |
2765 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; | 2765 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; |
2766 | } else { | 2766 | } else { |
2767 | temp &= ~FDI_LINK_TRAIN_NONE; | 2767 | temp &= ~FDI_LINK_TRAIN_NONE; |
2768 | temp |= FDI_LINK_TRAIN_PATTERN_1; | 2768 | temp |= FDI_LINK_TRAIN_PATTERN_1; |
2769 | } | 2769 | } |
2770 | I915_WRITE(reg, temp | FDI_RX_ENABLE); | 2770 | I915_WRITE(reg, temp | FDI_RX_ENABLE); |
2771 | 2771 | ||
2772 | POSTING_READ(reg); | 2772 | POSTING_READ(reg); |
2773 | udelay(150); | 2773 | udelay(150); |
2774 | 2774 | ||
2775 | for (i = 0; i < 4; i++) { | 2775 | for (i = 0; i < 4; i++) { |
2776 | reg = FDI_TX_CTL(pipe); | 2776 | reg = FDI_TX_CTL(pipe); |
2777 | temp = I915_READ(reg); | 2777 | temp = I915_READ(reg); |
2778 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; | 2778 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2779 | temp |= snb_b_fdi_train_param[i]; | 2779 | temp |= snb_b_fdi_train_param[i]; |
2780 | I915_WRITE(reg, temp); | 2780 | I915_WRITE(reg, temp); |
2781 | 2781 | ||
2782 | POSTING_READ(reg); | 2782 | POSTING_READ(reg); |
2783 | udelay(500); | 2783 | udelay(500); |
2784 | 2784 | ||
2785 | for (retry = 0; retry < 5; retry++) { | 2785 | for (retry = 0; retry < 5; retry++) { |
2786 | reg = FDI_RX_IIR(pipe); | 2786 | reg = FDI_RX_IIR(pipe); |
2787 | temp = I915_READ(reg); | 2787 | temp = I915_READ(reg); |
2788 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); | 2788 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
2789 | if (temp & FDI_RX_BIT_LOCK) { | 2789 | if (temp & FDI_RX_BIT_LOCK) { |
2790 | I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); | 2790 | I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); |
2791 | DRM_DEBUG_KMS("FDI train 1 done.\n"); | 2791 | DRM_DEBUG_KMS("FDI train 1 done.\n"); |
2792 | break; | 2792 | break; |
2793 | } | 2793 | } |
2794 | udelay(50); | 2794 | udelay(50); |
2795 | } | 2795 | } |
2796 | if (retry < 5) | 2796 | if (retry < 5) |
2797 | break; | 2797 | break; |
2798 | } | 2798 | } |
2799 | if (i == 4) | 2799 | if (i == 4) |
2800 | DRM_ERROR("FDI train 1 fail!\n"); | 2800 | DRM_ERROR("FDI train 1 fail!\n"); |
2801 | 2801 | ||
2802 | /* Train 2 */ | 2802 | /* Train 2 */ |
2803 | reg = FDI_TX_CTL(pipe); | 2803 | reg = FDI_TX_CTL(pipe); |
2804 | temp = I915_READ(reg); | 2804 | temp = I915_READ(reg); |
2805 | temp &= ~FDI_LINK_TRAIN_NONE; | 2805 | temp &= ~FDI_LINK_TRAIN_NONE; |
2806 | temp |= FDI_LINK_TRAIN_PATTERN_2; | 2806 | temp |= FDI_LINK_TRAIN_PATTERN_2; |
2807 | if (IS_GEN6(dev)) { | 2807 | if (IS_GEN6(dev)) { |
2808 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; | 2808 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2809 | /* SNB-B */ | 2809 | /* SNB-B */ |
2810 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; | 2810 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; |
2811 | } | 2811 | } |
2812 | I915_WRITE(reg, temp); | 2812 | I915_WRITE(reg, temp); |
2813 | 2813 | ||
2814 | reg = FDI_RX_CTL(pipe); | 2814 | reg = FDI_RX_CTL(pipe); |
2815 | temp = I915_READ(reg); | 2815 | temp = I915_READ(reg); |
2816 | if (HAS_PCH_CPT(dev)) { | 2816 | if (HAS_PCH_CPT(dev)) { |
2817 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | 2817 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; |
2818 | temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; | 2818 | temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; |
2819 | } else { | 2819 | } else { |
2820 | temp &= ~FDI_LINK_TRAIN_NONE; | 2820 | temp &= ~FDI_LINK_TRAIN_NONE; |
2821 | temp |= FDI_LINK_TRAIN_PATTERN_2; | 2821 | temp |= FDI_LINK_TRAIN_PATTERN_2; |
2822 | } | 2822 | } |
2823 | I915_WRITE(reg, temp); | 2823 | I915_WRITE(reg, temp); |
2824 | 2824 | ||
2825 | POSTING_READ(reg); | 2825 | POSTING_READ(reg); |
2826 | udelay(150); | 2826 | udelay(150); |
2827 | 2827 | ||
2828 | for (i = 0; i < 4; i++) { | 2828 | for (i = 0; i < 4; i++) { |
2829 | reg = FDI_TX_CTL(pipe); | 2829 | reg = FDI_TX_CTL(pipe); |
2830 | temp = I915_READ(reg); | 2830 | temp = I915_READ(reg); |
2831 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; | 2831 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2832 | temp |= snb_b_fdi_train_param[i]; | 2832 | temp |= snb_b_fdi_train_param[i]; |
2833 | I915_WRITE(reg, temp); | 2833 | I915_WRITE(reg, temp); |
2834 | 2834 | ||
2835 | POSTING_READ(reg); | 2835 | POSTING_READ(reg); |
2836 | udelay(500); | 2836 | udelay(500); |
2837 | 2837 | ||
2838 | for (retry = 0; retry < 5; retry++) { | 2838 | for (retry = 0; retry < 5; retry++) { |
2839 | reg = FDI_RX_IIR(pipe); | 2839 | reg = FDI_RX_IIR(pipe); |
2840 | temp = I915_READ(reg); | 2840 | temp = I915_READ(reg); |
2841 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); | 2841 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
2842 | if (temp & FDI_RX_SYMBOL_LOCK) { | 2842 | if (temp & FDI_RX_SYMBOL_LOCK) { |
2843 | I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); | 2843 | I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); |
2844 | DRM_DEBUG_KMS("FDI train 2 done.\n"); | 2844 | DRM_DEBUG_KMS("FDI train 2 done.\n"); |
2845 | break; | 2845 | break; |
2846 | } | 2846 | } |
2847 | udelay(50); | 2847 | udelay(50); |
2848 | } | 2848 | } |
2849 | if (retry < 5) | 2849 | if (retry < 5) |
2850 | break; | 2850 | break; |
2851 | } | 2851 | } |
2852 | if (i == 4) | 2852 | if (i == 4) |
2853 | DRM_ERROR("FDI train 2 fail!\n"); | 2853 | DRM_ERROR("FDI train 2 fail!\n"); |
2854 | 2854 | ||
2855 | DRM_DEBUG_KMS("FDI train done.\n"); | 2855 | DRM_DEBUG_KMS("FDI train done.\n"); |
2856 | } | 2856 | } |
2857 | 2857 | ||
2858 | /* Manual link training for Ivy Bridge A0 parts */ | 2858 | /* Manual link training for Ivy Bridge A0 parts */ |
2859 | static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) | 2859 | static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) |
2860 | { | 2860 | { |
2861 | struct drm_device *dev = crtc->dev; | 2861 | struct drm_device *dev = crtc->dev; |
2862 | struct drm_i915_private *dev_priv = dev->dev_private; | 2862 | struct drm_i915_private *dev_priv = dev->dev_private; |
2863 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 2863 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2864 | int pipe = intel_crtc->pipe; | 2864 | int pipe = intel_crtc->pipe; |
2865 | u32 reg, temp, i, j; | 2865 | u32 reg, temp, i, j; |
2866 | 2866 | ||
2867 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit | 2867 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit |
2868 | for train result */ | 2868 | for train result */ |
2869 | reg = FDI_RX_IMR(pipe); | 2869 | reg = FDI_RX_IMR(pipe); |
2870 | temp = I915_READ(reg); | 2870 | temp = I915_READ(reg); |
2871 | temp &= ~FDI_RX_SYMBOL_LOCK; | 2871 | temp &= ~FDI_RX_SYMBOL_LOCK; |
2872 | temp &= ~FDI_RX_BIT_LOCK; | 2872 | temp &= ~FDI_RX_BIT_LOCK; |
2873 | I915_WRITE(reg, temp); | 2873 | I915_WRITE(reg, temp); |
2874 | 2874 | ||
2875 | POSTING_READ(reg); | 2875 | POSTING_READ(reg); |
2876 | udelay(150); | 2876 | udelay(150); |
2877 | 2877 | ||
2878 | DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n", | 2878 | DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n", |
2879 | I915_READ(FDI_RX_IIR(pipe))); | 2879 | I915_READ(FDI_RX_IIR(pipe))); |
2880 | 2880 | ||
2881 | /* Try each vswing and preemphasis setting twice before moving on */ | 2881 | /* Try each vswing and preemphasis setting twice before moving on */ |
2882 | for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) { | 2882 | for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) { |
2883 | /* disable first in case we need to retry */ | 2883 | /* disable first in case we need to retry */ |
2884 | reg = FDI_TX_CTL(pipe); | 2884 | reg = FDI_TX_CTL(pipe); |
2885 | temp = I915_READ(reg); | 2885 | temp = I915_READ(reg); |
2886 | temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); | 2886 | temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); |
2887 | temp &= ~FDI_TX_ENABLE; | 2887 | temp &= ~FDI_TX_ENABLE; |
2888 | I915_WRITE(reg, temp); | 2888 | I915_WRITE(reg, temp); |
2889 | 2889 | ||
2890 | reg = FDI_RX_CTL(pipe); | 2890 | reg = FDI_RX_CTL(pipe); |
2891 | temp = I915_READ(reg); | 2891 | temp = I915_READ(reg); |
2892 | temp &= ~FDI_LINK_TRAIN_AUTO; | 2892 | temp &= ~FDI_LINK_TRAIN_AUTO; |
2893 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | 2893 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; |
2894 | temp &= ~FDI_RX_ENABLE; | 2894 | temp &= ~FDI_RX_ENABLE; |
2895 | I915_WRITE(reg, temp); | 2895 | I915_WRITE(reg, temp); |
2896 | 2896 | ||
2897 | /* enable CPU FDI TX and PCH FDI RX */ | 2897 | /* enable CPU FDI TX and PCH FDI RX */ |
2898 | reg = FDI_TX_CTL(pipe); | 2898 | reg = FDI_TX_CTL(pipe); |
2899 | temp = I915_READ(reg); | 2899 | temp = I915_READ(reg); |
2900 | temp &= ~FDI_DP_PORT_WIDTH_MASK; | 2900 | temp &= ~FDI_DP_PORT_WIDTH_MASK; |
2901 | temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes); | 2901 | temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes); |
2902 | temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; | 2902 | temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; |
2903 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; | 2903 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2904 | temp |= snb_b_fdi_train_param[j/2]; | 2904 | temp |= snb_b_fdi_train_param[j/2]; |
2905 | temp |= FDI_COMPOSITE_SYNC; | 2905 | temp |= FDI_COMPOSITE_SYNC; |
2906 | I915_WRITE(reg, temp | FDI_TX_ENABLE); | 2906 | I915_WRITE(reg, temp | FDI_TX_ENABLE); |
2907 | 2907 | ||
2908 | I915_WRITE(FDI_RX_MISC(pipe), | 2908 | I915_WRITE(FDI_RX_MISC(pipe), |
2909 | FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); | 2909 | FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); |
2910 | 2910 | ||
2911 | reg = FDI_RX_CTL(pipe); | 2911 | reg = FDI_RX_CTL(pipe); |
2912 | temp = I915_READ(reg); | 2912 | temp = I915_READ(reg); |
2913 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; | 2913 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; |
2914 | temp |= FDI_COMPOSITE_SYNC; | 2914 | temp |= FDI_COMPOSITE_SYNC; |
2915 | I915_WRITE(reg, temp | FDI_RX_ENABLE); | 2915 | I915_WRITE(reg, temp | FDI_RX_ENABLE); |
2916 | 2916 | ||
2917 | POSTING_READ(reg); | 2917 | POSTING_READ(reg); |
2918 | udelay(1); /* should be 0.5us */ | 2918 | udelay(1); /* should be 0.5us */ |
2919 | 2919 | ||
2920 | for (i = 0; i < 4; i++) { | 2920 | for (i = 0; i < 4; i++) { |
2921 | reg = FDI_RX_IIR(pipe); | 2921 | reg = FDI_RX_IIR(pipe); |
2922 | temp = I915_READ(reg); | 2922 | temp = I915_READ(reg); |
2923 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); | 2923 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
2924 | 2924 | ||
2925 | if (temp & FDI_RX_BIT_LOCK || | 2925 | if (temp & FDI_RX_BIT_LOCK || |
2926 | (I915_READ(reg) & FDI_RX_BIT_LOCK)) { | 2926 | (I915_READ(reg) & FDI_RX_BIT_LOCK)) { |
2927 | I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); | 2927 | I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); |
2928 | DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", | 2928 | DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", |
2929 | i); | 2929 | i); |
2930 | break; | 2930 | break; |
2931 | } | 2931 | } |
2932 | udelay(1); /* should be 0.5us */ | 2932 | udelay(1); /* should be 0.5us */ |
2933 | } | 2933 | } |
2934 | if (i == 4) { | 2934 | if (i == 4) { |
2935 | DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2); | 2935 | DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2); |
2936 | continue; | 2936 | continue; |
2937 | } | 2937 | } |
2938 | 2938 | ||
2939 | /* Train 2 */ | 2939 | /* Train 2 */ |
2940 | reg = FDI_TX_CTL(pipe); | 2940 | reg = FDI_TX_CTL(pipe); |
2941 | temp = I915_READ(reg); | 2941 | temp = I915_READ(reg); |
2942 | temp &= ~FDI_LINK_TRAIN_NONE_IVB; | 2942 | temp &= ~FDI_LINK_TRAIN_NONE_IVB; |
2943 | temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; | 2943 | temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; |
2944 | I915_WRITE(reg, temp); | 2944 | I915_WRITE(reg, temp); |
2945 | 2945 | ||
2946 | reg = FDI_RX_CTL(pipe); | 2946 | reg = FDI_RX_CTL(pipe); |
2947 | temp = I915_READ(reg); | 2947 | temp = I915_READ(reg); |
2948 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | 2948 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; |
2949 | temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; | 2949 | temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; |
2950 | I915_WRITE(reg, temp); | 2950 | I915_WRITE(reg, temp); |
2951 | 2951 | ||
2952 | POSTING_READ(reg); | 2952 | POSTING_READ(reg); |
2953 | udelay(2); /* should be 1.5us */ | 2953 | udelay(2); /* should be 1.5us */ |
2954 | 2954 | ||
2955 | for (i = 0; i < 4; i++) { | 2955 | for (i = 0; i < 4; i++) { |
2956 | reg = FDI_RX_IIR(pipe); | 2956 | reg = FDI_RX_IIR(pipe); |
2957 | temp = I915_READ(reg); | 2957 | temp = I915_READ(reg); |
2958 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); | 2958 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
2959 | 2959 | ||
2960 | if (temp & FDI_RX_SYMBOL_LOCK || | 2960 | if (temp & FDI_RX_SYMBOL_LOCK || |
2961 | (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) { | 2961 | (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) { |
2962 | I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); | 2962 | I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); |
2963 | DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", | 2963 | DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", |
2964 | i); | 2964 | i); |
2965 | goto train_done; | 2965 | goto train_done; |
2966 | } | 2966 | } |
2967 | udelay(2); /* should be 1.5us */ | 2967 | udelay(2); /* should be 1.5us */ |
2968 | } | 2968 | } |
2969 | if (i == 4) | 2969 | if (i == 4) |
2970 | DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2); | 2970 | DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2); |
2971 | } | 2971 | } |
2972 | 2972 | ||
2973 | train_done: | 2973 | train_done: |
2974 | DRM_DEBUG_KMS("FDI train done.\n"); | 2974 | DRM_DEBUG_KMS("FDI train done.\n"); |
2975 | } | 2975 | } |
2976 | 2976 | ||
2977 | static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc) | 2977 | static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc) |
2978 | { | 2978 | { |
2979 | struct drm_device *dev = intel_crtc->base.dev; | 2979 | struct drm_device *dev = intel_crtc->base.dev; |
2980 | struct drm_i915_private *dev_priv = dev->dev_private; | 2980 | struct drm_i915_private *dev_priv = dev->dev_private; |
2981 | int pipe = intel_crtc->pipe; | 2981 | int pipe = intel_crtc->pipe; |
2982 | u32 reg, temp; | 2982 | u32 reg, temp; |
2983 | 2983 | ||
2984 | 2984 | ||
2985 | /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ | 2985 | /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ |
2986 | reg = FDI_RX_CTL(pipe); | 2986 | reg = FDI_RX_CTL(pipe); |
2987 | temp = I915_READ(reg); | 2987 | temp = I915_READ(reg); |
2988 | temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16)); | 2988 | temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16)); |
2989 | temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes); | 2989 | temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes); |
2990 | temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; | 2990 | temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; |
2991 | I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); | 2991 | I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); |
2992 | 2992 | ||
2993 | POSTING_READ(reg); | 2993 | POSTING_READ(reg); |
2994 | udelay(200); | 2994 | udelay(200); |
2995 | 2995 | ||
2996 | /* Switch from Rawclk to PCDclk */ | 2996 | /* Switch from Rawclk to PCDclk */ |
2997 | temp = I915_READ(reg); | 2997 | temp = I915_READ(reg); |
2998 | I915_WRITE(reg, temp | FDI_PCDCLK); | 2998 | I915_WRITE(reg, temp | FDI_PCDCLK); |
2999 | 2999 | ||
3000 | POSTING_READ(reg); | 3000 | POSTING_READ(reg); |
3001 | udelay(200); | 3001 | udelay(200); |
3002 | 3002 | ||
3003 | /* Enable CPU FDI TX PLL, always on for Ironlake */ | 3003 | /* Enable CPU FDI TX PLL, always on for Ironlake */ |
3004 | reg = FDI_TX_CTL(pipe); | 3004 | reg = FDI_TX_CTL(pipe); |
3005 | temp = I915_READ(reg); | 3005 | temp = I915_READ(reg); |
3006 | if ((temp & FDI_TX_PLL_ENABLE) == 0) { | 3006 | if ((temp & FDI_TX_PLL_ENABLE) == 0) { |
3007 | I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); | 3007 | I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); |
3008 | 3008 | ||
3009 | POSTING_READ(reg); | 3009 | POSTING_READ(reg); |
3010 | udelay(100); | 3010 | udelay(100); |
3011 | } | 3011 | } |
3012 | } | 3012 | } |
3013 | 3013 | ||
3014 | static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) | 3014 | static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) |
3015 | { | 3015 | { |
3016 | struct drm_device *dev = intel_crtc->base.dev; | 3016 | struct drm_device *dev = intel_crtc->base.dev; |
3017 | struct drm_i915_private *dev_priv = dev->dev_private; | 3017 | struct drm_i915_private *dev_priv = dev->dev_private; |
3018 | int pipe = intel_crtc->pipe; | 3018 | int pipe = intel_crtc->pipe; |
3019 | u32 reg, temp; | 3019 | u32 reg, temp; |
3020 | 3020 | ||
3021 | /* Switch from PCDclk to Rawclk */ | 3021 | /* Switch from PCDclk to Rawclk */ |
3022 | reg = FDI_RX_CTL(pipe); | 3022 | reg = FDI_RX_CTL(pipe); |
3023 | temp = I915_READ(reg); | 3023 | temp = I915_READ(reg); |
3024 | I915_WRITE(reg, temp & ~FDI_PCDCLK); | 3024 | I915_WRITE(reg, temp & ~FDI_PCDCLK); |
3025 | 3025 | ||
3026 | /* Disable CPU FDI TX PLL */ | 3026 | /* Disable CPU FDI TX PLL */ |
3027 | reg = FDI_TX_CTL(pipe); | 3027 | reg = FDI_TX_CTL(pipe); |
3028 | temp = I915_READ(reg); | 3028 | temp = I915_READ(reg); |
3029 | I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); | 3029 | I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); |
3030 | 3030 | ||
3031 | POSTING_READ(reg); | 3031 | POSTING_READ(reg); |
3032 | udelay(100); | 3032 | udelay(100); |
3033 | 3033 | ||
3034 | reg = FDI_RX_CTL(pipe); | 3034 | reg = FDI_RX_CTL(pipe); |
3035 | temp = I915_READ(reg); | 3035 | temp = I915_READ(reg); |
3036 | I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); | 3036 | I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); |
3037 | 3037 | ||
3038 | /* Wait for the clocks to turn off. */ | 3038 | /* Wait for the clocks to turn off. */ |
3039 | POSTING_READ(reg); | 3039 | POSTING_READ(reg); |
3040 | udelay(100); | 3040 | udelay(100); |
3041 | } | 3041 | } |
3042 | 3042 | ||
3043 | static void ironlake_fdi_disable(struct drm_crtc *crtc) | 3043 | static void ironlake_fdi_disable(struct drm_crtc *crtc) |
3044 | { | 3044 | { |
3045 | struct drm_device *dev = crtc->dev; | 3045 | struct drm_device *dev = crtc->dev; |
3046 | struct drm_i915_private *dev_priv = dev->dev_private; | 3046 | struct drm_i915_private *dev_priv = dev->dev_private; |
3047 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 3047 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3048 | int pipe = intel_crtc->pipe; | 3048 | int pipe = intel_crtc->pipe; |
3049 | u32 reg, temp; | 3049 | u32 reg, temp; |
3050 | 3050 | ||
3051 | /* disable CPU FDI tx and PCH FDI rx */ | 3051 | /* disable CPU FDI tx and PCH FDI rx */ |
3052 | reg = FDI_TX_CTL(pipe); | 3052 | reg = FDI_TX_CTL(pipe); |
3053 | temp = I915_READ(reg); | 3053 | temp = I915_READ(reg); |
3054 | I915_WRITE(reg, temp & ~FDI_TX_ENABLE); | 3054 | I915_WRITE(reg, temp & ~FDI_TX_ENABLE); |
3055 | POSTING_READ(reg); | 3055 | POSTING_READ(reg); |
3056 | 3056 | ||
3057 | reg = FDI_RX_CTL(pipe); | 3057 | reg = FDI_RX_CTL(pipe); |
3058 | temp = I915_READ(reg); | 3058 | temp = I915_READ(reg); |
3059 | temp &= ~(0x7 << 16); | 3059 | temp &= ~(0x7 << 16); |
3060 | temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; | 3060 | temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; |
3061 | I915_WRITE(reg, temp & ~FDI_RX_ENABLE); | 3061 | I915_WRITE(reg, temp & ~FDI_RX_ENABLE); |
3062 | 3062 | ||
3063 | POSTING_READ(reg); | 3063 | POSTING_READ(reg); |
3064 | udelay(100); | 3064 | udelay(100); |
3065 | 3065 | ||
3066 | /* Ironlake workaround, disable clock pointer after downing FDI */ | 3066 | /* Ironlake workaround, disable clock pointer after downing FDI */ |
3067 | if (HAS_PCH_IBX(dev)) { | 3067 | if (HAS_PCH_IBX(dev)) { |
3068 | I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); | 3068 | I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); |
3069 | } | 3069 | } |
3070 | 3070 | ||
3071 | /* still set train pattern 1 */ | 3071 | /* still set train pattern 1 */ |
3072 | reg = FDI_TX_CTL(pipe); | 3072 | reg = FDI_TX_CTL(pipe); |
3073 | temp = I915_READ(reg); | 3073 | temp = I915_READ(reg); |
3074 | temp &= ~FDI_LINK_TRAIN_NONE; | 3074 | temp &= ~FDI_LINK_TRAIN_NONE; |
3075 | temp |= FDI_LINK_TRAIN_PATTERN_1; | 3075 | temp |= FDI_LINK_TRAIN_PATTERN_1; |
3076 | I915_WRITE(reg, temp); | 3076 | I915_WRITE(reg, temp); |
3077 | 3077 | ||
3078 | reg = FDI_RX_CTL(pipe); | 3078 | reg = FDI_RX_CTL(pipe); |
3079 | temp = I915_READ(reg); | 3079 | temp = I915_READ(reg); |
3080 | if (HAS_PCH_CPT(dev)) { | 3080 | if (HAS_PCH_CPT(dev)) { |
3081 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | 3081 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; |
3082 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; | 3082 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; |
3083 | } else { | 3083 | } else { |
3084 | temp &= ~FDI_LINK_TRAIN_NONE; | 3084 | temp &= ~FDI_LINK_TRAIN_NONE; |
3085 | temp |= FDI_LINK_TRAIN_PATTERN_1; | 3085 | temp |= FDI_LINK_TRAIN_PATTERN_1; |
3086 | } | 3086 | } |
3087 | /* BPC in FDI rx is consistent with that in PIPECONF */ | 3087 | /* BPC in FDI rx is consistent with that in PIPECONF */ |
3088 | temp &= ~(0x07 << 16); | 3088 | temp &= ~(0x07 << 16); |
3089 | temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; | 3089 | temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; |
3090 | I915_WRITE(reg, temp); | 3090 | I915_WRITE(reg, temp); |
3091 | 3091 | ||
3092 | POSTING_READ(reg); | 3092 | POSTING_READ(reg); |
3093 | udelay(100); | 3093 | udelay(100); |
3094 | } | 3094 | } |
3095 | 3095 | ||
3096 | bool intel_has_pending_fb_unpin(struct drm_device *dev) | 3096 | bool intel_has_pending_fb_unpin(struct drm_device *dev) |
3097 | { | 3097 | { |
3098 | struct intel_crtc *crtc; | 3098 | struct intel_crtc *crtc; |
3099 | 3099 | ||
3100 | /* Note that we don't need to be called with mode_config.lock here | 3100 | /* Note that we don't need to be called with mode_config.lock here |
3101 | * as our list of CRTC objects is static for the lifetime of the | 3101 | * as our list of CRTC objects is static for the lifetime of the |
3102 | * device and so cannot disappear as we iterate. Similarly, we can | 3102 | * device and so cannot disappear as we iterate. Similarly, we can |
3103 | * happily treat the predicates as racy, atomic checks as userspace | 3103 | * happily treat the predicates as racy, atomic checks as userspace |
3104 | * cannot claim and pin a new fb without at least acquring the | 3104 | * cannot claim and pin a new fb without at least acquring the |
3105 | * struct_mutex and so serialising with us. | 3105 | * struct_mutex and so serialising with us. |
3106 | */ | 3106 | */ |
3107 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { | 3107 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { |
3108 | if (atomic_read(&crtc->unpin_work_count) == 0) | 3108 | if (atomic_read(&crtc->unpin_work_count) == 0) |
3109 | continue; | 3109 | continue; |
3110 | 3110 | ||
3111 | if (crtc->unpin_work) | 3111 | if (crtc->unpin_work) |
3112 | intel_wait_for_vblank(dev, crtc->pipe); | 3112 | intel_wait_for_vblank(dev, crtc->pipe); |
3113 | 3113 | ||
3114 | return true; | 3114 | return true; |
3115 | } | 3115 | } |
3116 | 3116 | ||
3117 | return false; | 3117 | return false; |
3118 | } | 3118 | } |
3119 | 3119 | ||
3120 | static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) | 3120 | static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) |
3121 | { | 3121 | { |
3122 | struct drm_device *dev = crtc->dev; | 3122 | struct drm_device *dev = crtc->dev; |
3123 | struct drm_i915_private *dev_priv = dev->dev_private; | 3123 | struct drm_i915_private *dev_priv = dev->dev_private; |
3124 | 3124 | ||
3125 | if (crtc->primary->fb == NULL) | 3125 | if (crtc->primary->fb == NULL) |
3126 | return; | 3126 | return; |
3127 | 3127 | ||
3128 | WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue)); | 3128 | WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue)); |
3129 | 3129 | ||
3130 | wait_event(dev_priv->pending_flip_queue, | 3130 | wait_event(dev_priv->pending_flip_queue, |
3131 | !intel_crtc_has_pending_flip(crtc)); | 3131 | !intel_crtc_has_pending_flip(crtc)); |
3132 | 3132 | ||
3133 | mutex_lock(&dev->struct_mutex); | 3133 | mutex_lock(&dev->struct_mutex); |
3134 | intel_finish_fb(crtc->primary->fb); | 3134 | intel_finish_fb(crtc->primary->fb); |
3135 | mutex_unlock(&dev->struct_mutex); | 3135 | mutex_unlock(&dev->struct_mutex); |
3136 | } | 3136 | } |
3137 | 3137 | ||
3138 | /* Program iCLKIP clock to the desired frequency */ | 3138 | /* Program iCLKIP clock to the desired frequency */ |
3139 | static void lpt_program_iclkip(struct drm_crtc *crtc) | 3139 | static void lpt_program_iclkip(struct drm_crtc *crtc) |
3140 | { | 3140 | { |
3141 | struct drm_device *dev = crtc->dev; | 3141 | struct drm_device *dev = crtc->dev; |
3142 | struct drm_i915_private *dev_priv = dev->dev_private; | 3142 | struct drm_i915_private *dev_priv = dev->dev_private; |
3143 | int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock; | 3143 | int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock; |
3144 | u32 divsel, phaseinc, auxdiv, phasedir = 0; | 3144 | u32 divsel, phaseinc, auxdiv, phasedir = 0; |
3145 | u32 temp; | 3145 | u32 temp; |
3146 | 3146 | ||
3147 | mutex_lock(&dev_priv->dpio_lock); | 3147 | mutex_lock(&dev_priv->dpio_lock); |
3148 | 3148 | ||
3149 | /* It is necessary to ungate the pixclk gate prior to programming | 3149 | /* It is necessary to ungate the pixclk gate prior to programming |
3150 | * the divisors, and gate it back when it is done. | 3150 | * the divisors, and gate it back when it is done. |
3151 | */ | 3151 | */ |
3152 | I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE); | 3152 | I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE); |
3153 | 3153 | ||
3154 | /* Disable SSCCTL */ | 3154 | /* Disable SSCCTL */ |
3155 | intel_sbi_write(dev_priv, SBI_SSCCTL6, | 3155 | intel_sbi_write(dev_priv, SBI_SSCCTL6, |
3156 | intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) | | 3156 | intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) | |
3157 | SBI_SSCCTL_DISABLE, | 3157 | SBI_SSCCTL_DISABLE, |
3158 | SBI_ICLK); | 3158 | SBI_ICLK); |
3159 | 3159 | ||
3160 | /* 20MHz is a corner case which is out of range for the 7-bit divisor */ | 3160 | /* 20MHz is a corner case which is out of range for the 7-bit divisor */ |
3161 | if (clock == 20000) { | 3161 | if (clock == 20000) { |
3162 | auxdiv = 1; | 3162 | auxdiv = 1; |
3163 | divsel = 0x41; | 3163 | divsel = 0x41; |
3164 | phaseinc = 0x20; | 3164 | phaseinc = 0x20; |
3165 | } else { | 3165 | } else { |
3166 | /* The iCLK virtual clock root frequency is in MHz, | 3166 | /* The iCLK virtual clock root frequency is in MHz, |
3167 | * but the adjusted_mode->crtc_clock in in KHz. To get the | 3167 | * but the adjusted_mode->crtc_clock in in KHz. To get the |
3168 | * divisors, it is necessary to divide one by another, so we | 3168 | * divisors, it is necessary to divide one by another, so we |
3169 | * convert the virtual clock precision to KHz here for higher | 3169 | * convert the virtual clock precision to KHz here for higher |
3170 | * precision. | 3170 | * precision. |
3171 | */ | 3171 | */ |
3172 | u32 iclk_virtual_root_freq = 172800 * 1000; | 3172 | u32 iclk_virtual_root_freq = 172800 * 1000; |
3173 | u32 iclk_pi_range = 64; | 3173 | u32 iclk_pi_range = 64; |
3174 | u32 desired_divisor, msb_divisor_value, pi_value; | 3174 | u32 desired_divisor, msb_divisor_value, pi_value; |
3175 | 3175 | ||
3176 | desired_divisor = (iclk_virtual_root_freq / clock); | 3176 | desired_divisor = (iclk_virtual_root_freq / clock); |
3177 | msb_divisor_value = desired_divisor / iclk_pi_range; | 3177 | msb_divisor_value = desired_divisor / iclk_pi_range; |
3178 | pi_value = desired_divisor % iclk_pi_range; | 3178 | pi_value = desired_divisor % iclk_pi_range; |
3179 | 3179 | ||
3180 | auxdiv = 0; | 3180 | auxdiv = 0; |
3181 | divsel = msb_divisor_value - 2; | 3181 | divsel = msb_divisor_value - 2; |
3182 | phaseinc = pi_value; | 3182 | phaseinc = pi_value; |
3183 | } | 3183 | } |
3184 | 3184 | ||
3185 | /* This should not happen with any sane values */ | 3185 | /* This should not happen with any sane values */ |
3186 | WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) & | 3186 | WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) & |
3187 | ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); | 3187 | ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); |
3188 | WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) & | 3188 | WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) & |
3189 | ~SBI_SSCDIVINTPHASE_INCVAL_MASK); | 3189 | ~SBI_SSCDIVINTPHASE_INCVAL_MASK); |
3190 | 3190 | ||
3191 | DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", | 3191 | DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", |
3192 | clock, | 3192 | clock, |
3193 | auxdiv, | 3193 | auxdiv, |
3194 | divsel, | 3194 | divsel, |
3195 | phasedir, | 3195 | phasedir, |
3196 | phaseinc); | 3196 | phaseinc); |
3197 | 3197 | ||
3198 | /* Program SSCDIVINTPHASE6 */ | 3198 | /* Program SSCDIVINTPHASE6 */ |
3199 | temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); | 3199 | temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); |
3200 | temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; | 3200 | temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; |
3201 | temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); | 3201 | temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); |
3202 | temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; | 3202 | temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; |
3203 | temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); | 3203 | temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); |
3204 | temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); | 3204 | temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); |
3205 | temp |= SBI_SSCDIVINTPHASE_PROPAGATE; | 3205 | temp |= SBI_SSCDIVINTPHASE_PROPAGATE; |
3206 | intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK); | 3206 | intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK); |
3207 | 3207 | ||
3208 | /* Program SSCAUXDIV */ | 3208 | /* Program SSCAUXDIV */ |
3209 | temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); | 3209 | temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); |
3210 | temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); | 3210 | temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); |
3211 | temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); | 3211 | temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); |
3212 | intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK); | 3212 | intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK); |
3213 | 3213 | ||
3214 | /* Enable modulator and associated divider */ | 3214 | /* Enable modulator and associated divider */ |
3215 | temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); | 3215 | temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); |
3216 | temp &= ~SBI_SSCCTL_DISABLE; | 3216 | temp &= ~SBI_SSCCTL_DISABLE; |
3217 | intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); | 3217 | intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); |
3218 | 3218 | ||
3219 | /* Wait for initialization time */ | 3219 | /* Wait for initialization time */ |
3220 | udelay(24); | 3220 | udelay(24); |
3221 | 3221 | ||
3222 | I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE); | 3222 | I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE); |
3223 | 3223 | ||
3224 | mutex_unlock(&dev_priv->dpio_lock); | 3224 | mutex_unlock(&dev_priv->dpio_lock); |
3225 | } | 3225 | } |
3226 | 3226 | ||
3227 | static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc, | 3227 | static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc, |
3228 | enum pipe pch_transcoder) | 3228 | enum pipe pch_transcoder) |
3229 | { | 3229 | { |
3230 | struct drm_device *dev = crtc->base.dev; | 3230 | struct drm_device *dev = crtc->base.dev; |
3231 | struct drm_i915_private *dev_priv = dev->dev_private; | 3231 | struct drm_i915_private *dev_priv = dev->dev_private; |
3232 | enum transcoder cpu_transcoder = crtc->config.cpu_transcoder; | 3232 | enum transcoder cpu_transcoder = crtc->config.cpu_transcoder; |
3233 | 3233 | ||
3234 | I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder), | 3234 | I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder), |
3235 | I915_READ(HTOTAL(cpu_transcoder))); | 3235 | I915_READ(HTOTAL(cpu_transcoder))); |
3236 | I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder), | 3236 | I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder), |
3237 | I915_READ(HBLANK(cpu_transcoder))); | 3237 | I915_READ(HBLANK(cpu_transcoder))); |
3238 | I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder), | 3238 | I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder), |
3239 | I915_READ(HSYNC(cpu_transcoder))); | 3239 | I915_READ(HSYNC(cpu_transcoder))); |
3240 | 3240 | ||
3241 | I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder), | 3241 | I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder), |
3242 | I915_READ(VTOTAL(cpu_transcoder))); | 3242 | I915_READ(VTOTAL(cpu_transcoder))); |
3243 | I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder), | 3243 | I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder), |
3244 | I915_READ(VBLANK(cpu_transcoder))); | 3244 | I915_READ(VBLANK(cpu_transcoder))); |
3245 | I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder), | 3245 | I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder), |
3246 | I915_READ(VSYNC(cpu_transcoder))); | 3246 | I915_READ(VSYNC(cpu_transcoder))); |
3247 | I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder), | 3247 | I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder), |
3248 | I915_READ(VSYNCSHIFT(cpu_transcoder))); | 3248 | I915_READ(VSYNCSHIFT(cpu_transcoder))); |
3249 | } | 3249 | } |
3250 | 3250 | ||
3251 | static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev) | 3251 | static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev) |
3252 | { | 3252 | { |
3253 | struct drm_i915_private *dev_priv = dev->dev_private; | 3253 | struct drm_i915_private *dev_priv = dev->dev_private; |
3254 | uint32_t temp; | 3254 | uint32_t temp; |
3255 | 3255 | ||
3256 | temp = I915_READ(SOUTH_CHICKEN1); | 3256 | temp = I915_READ(SOUTH_CHICKEN1); |
3257 | if (temp & FDI_BC_BIFURCATION_SELECT) | 3257 | if (temp & FDI_BC_BIFURCATION_SELECT) |
3258 | return; | 3258 | return; |
3259 | 3259 | ||
3260 | WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); | 3260 | WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); |
3261 | WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); | 3261 | WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); |
3262 | 3262 | ||
3263 | temp |= FDI_BC_BIFURCATION_SELECT; | 3263 | temp |= FDI_BC_BIFURCATION_SELECT; |
3264 | DRM_DEBUG_KMS("enabling fdi C rx\n"); | 3264 | DRM_DEBUG_KMS("enabling fdi C rx\n"); |
3265 | I915_WRITE(SOUTH_CHICKEN1, temp); | 3265 | I915_WRITE(SOUTH_CHICKEN1, temp); |
3266 | POSTING_READ(SOUTH_CHICKEN1); | 3266 | POSTING_READ(SOUTH_CHICKEN1); |
3267 | } | 3267 | } |
3268 | 3268 | ||
3269 | static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc) | 3269 | static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc) |
3270 | { | 3270 | { |
3271 | struct drm_device *dev = intel_crtc->base.dev; | 3271 | struct drm_device *dev = intel_crtc->base.dev; |
3272 | struct drm_i915_private *dev_priv = dev->dev_private; | 3272 | struct drm_i915_private *dev_priv = dev->dev_private; |
3273 | 3273 | ||
3274 | switch (intel_crtc->pipe) { | 3274 | switch (intel_crtc->pipe) { |
3275 | case PIPE_A: | 3275 | case PIPE_A: |
3276 | break; | 3276 | break; |
3277 | case PIPE_B: | 3277 | case PIPE_B: |
3278 | if (intel_crtc->config.fdi_lanes > 2) | 3278 | if (intel_crtc->config.fdi_lanes > 2) |
3279 | WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT); | 3279 | WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT); |
3280 | else | 3280 | else |
3281 | cpt_enable_fdi_bc_bifurcation(dev); | 3281 | cpt_enable_fdi_bc_bifurcation(dev); |
3282 | 3282 | ||
3283 | break; | 3283 | break; |
3284 | case PIPE_C: | 3284 | case PIPE_C: |
3285 | cpt_enable_fdi_bc_bifurcation(dev); | 3285 | cpt_enable_fdi_bc_bifurcation(dev); |
3286 | 3286 | ||
3287 | break; | 3287 | break; |
3288 | default: | 3288 | default: |
3289 | BUG(); | 3289 | BUG(); |
3290 | } | 3290 | } |
3291 | } | 3291 | } |
3292 | 3292 | ||
3293 | /* | 3293 | /* |
3294 | * Enable PCH resources required for PCH ports: | 3294 | * Enable PCH resources required for PCH ports: |
3295 | * - PCH PLLs | 3295 | * - PCH PLLs |
3296 | * - FDI training & RX/TX | 3296 | * - FDI training & RX/TX |
3297 | * - update transcoder timings | 3297 | * - update transcoder timings |
3298 | * - DP transcoding bits | 3298 | * - DP transcoding bits |
3299 | * - transcoder | 3299 | * - transcoder |
3300 | */ | 3300 | */ |
3301 | static void ironlake_pch_enable(struct drm_crtc *crtc) | 3301 | static void ironlake_pch_enable(struct drm_crtc *crtc) |
3302 | { | 3302 | { |
3303 | struct drm_device *dev = crtc->dev; | 3303 | struct drm_device *dev = crtc->dev; |
3304 | struct drm_i915_private *dev_priv = dev->dev_private; | 3304 | struct drm_i915_private *dev_priv = dev->dev_private; |
3305 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 3305 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3306 | int pipe = intel_crtc->pipe; | 3306 | int pipe = intel_crtc->pipe; |
3307 | u32 reg, temp; | 3307 | u32 reg, temp; |
3308 | 3308 | ||
3309 | assert_pch_transcoder_disabled(dev_priv, pipe); | 3309 | assert_pch_transcoder_disabled(dev_priv, pipe); |
3310 | 3310 | ||
3311 | if (IS_IVYBRIDGE(dev)) | 3311 | if (IS_IVYBRIDGE(dev)) |
3312 | ivybridge_update_fdi_bc_bifurcation(intel_crtc); | 3312 | ivybridge_update_fdi_bc_bifurcation(intel_crtc); |
3313 | 3313 | ||
3314 | /* Write the TU size bits before fdi link training, so that error | 3314 | /* Write the TU size bits before fdi link training, so that error |
3315 | * detection works. */ | 3315 | * detection works. */ |
3316 | I915_WRITE(FDI_RX_TUSIZE1(pipe), | 3316 | I915_WRITE(FDI_RX_TUSIZE1(pipe), |
3317 | I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); | 3317 | I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); |
3318 | 3318 | ||
3319 | /* For PCH output, training FDI link */ | 3319 | /* For PCH output, training FDI link */ |
3320 | dev_priv->display.fdi_link_train(crtc); | 3320 | dev_priv->display.fdi_link_train(crtc); |
3321 | 3321 | ||
3322 | /* We need to program the right clock selection before writing the pixel | 3322 | /* We need to program the right clock selection before writing the pixel |
3323 | * mutliplier into the DPLL. */ | 3323 | * mutliplier into the DPLL. */ |
3324 | if (HAS_PCH_CPT(dev)) { | 3324 | if (HAS_PCH_CPT(dev)) { |
3325 | u32 sel; | 3325 | u32 sel; |
3326 | 3326 | ||
3327 | temp = I915_READ(PCH_DPLL_SEL); | 3327 | temp = I915_READ(PCH_DPLL_SEL); |
3328 | temp |= TRANS_DPLL_ENABLE(pipe); | 3328 | temp |= TRANS_DPLL_ENABLE(pipe); |
3329 | sel = TRANS_DPLLB_SEL(pipe); | 3329 | sel = TRANS_DPLLB_SEL(pipe); |
3330 | if (intel_crtc->config.shared_dpll == DPLL_ID_PCH_PLL_B) | 3330 | if (intel_crtc->config.shared_dpll == DPLL_ID_PCH_PLL_B) |
3331 | temp |= sel; | 3331 | temp |= sel; |
3332 | else | 3332 | else |
3333 | temp &= ~sel; | 3333 | temp &= ~sel; |
3334 | I915_WRITE(PCH_DPLL_SEL, temp); | 3334 | I915_WRITE(PCH_DPLL_SEL, temp); |
3335 | } | 3335 | } |
3336 | 3336 | ||
3337 | /* XXX: pch pll's can be enabled any time before we enable the PCH | 3337 | /* XXX: pch pll's can be enabled any time before we enable the PCH |
3338 | * transcoder, and we actually should do this to not upset any PCH | 3338 | * transcoder, and we actually should do this to not upset any PCH |
3339 | * transcoder that already use the clock when we share it. | 3339 | * transcoder that already use the clock when we share it. |
3340 | * | 3340 | * |
3341 | * Note that enable_shared_dpll tries to do the right thing, but | 3341 | * Note that enable_shared_dpll tries to do the right thing, but |
3342 | * get_shared_dpll unconditionally resets the pll - we need that to have | 3342 | * get_shared_dpll unconditionally resets the pll - we need that to have |
3343 | * the right LVDS enable sequence. */ | 3343 | * the right LVDS enable sequence. */ |
3344 | ironlake_enable_shared_dpll(intel_crtc); | 3344 | ironlake_enable_shared_dpll(intel_crtc); |
3345 | 3345 | ||
3346 | /* set transcoder timing, panel must allow it */ | 3346 | /* set transcoder timing, panel must allow it */ |
3347 | assert_panel_unlocked(dev_priv, pipe); | 3347 | assert_panel_unlocked(dev_priv, pipe); |
3348 | ironlake_pch_transcoder_set_timings(intel_crtc, pipe); | 3348 | ironlake_pch_transcoder_set_timings(intel_crtc, pipe); |
3349 | 3349 | ||
3350 | intel_fdi_normal_train(crtc); | 3350 | intel_fdi_normal_train(crtc); |
3351 | 3351 | ||
3352 | /* For PCH DP, enable TRANS_DP_CTL */ | 3352 | /* For PCH DP, enable TRANS_DP_CTL */ |
3353 | if (HAS_PCH_CPT(dev) && | 3353 | if (HAS_PCH_CPT(dev) && |
3354 | (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || | 3354 | (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || |
3355 | intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { | 3355 | intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { |
3356 | u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; | 3356 | u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; |
3357 | reg = TRANS_DP_CTL(pipe); | 3357 | reg = TRANS_DP_CTL(pipe); |
3358 | temp = I915_READ(reg); | 3358 | temp = I915_READ(reg); |
3359 | temp &= ~(TRANS_DP_PORT_SEL_MASK | | 3359 | temp &= ~(TRANS_DP_PORT_SEL_MASK | |
3360 | TRANS_DP_SYNC_MASK | | 3360 | TRANS_DP_SYNC_MASK | |
3361 | TRANS_DP_BPC_MASK); | 3361 | TRANS_DP_BPC_MASK); |
3362 | temp |= (TRANS_DP_OUTPUT_ENABLE | | 3362 | temp |= (TRANS_DP_OUTPUT_ENABLE | |
3363 | TRANS_DP_ENH_FRAMING); | 3363 | TRANS_DP_ENH_FRAMING); |
3364 | temp |= bpc << 9; /* same format but at 11:9 */ | 3364 | temp |= bpc << 9; /* same format but at 11:9 */ |
3365 | 3365 | ||
3366 | if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) | 3366 | if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) |
3367 | temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; | 3367 | temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; |
3368 | if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) | 3368 | if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) |
3369 | temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; | 3369 | temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; |
3370 | 3370 | ||
3371 | switch (intel_trans_dp_port_sel(crtc)) { | 3371 | switch (intel_trans_dp_port_sel(crtc)) { |
3372 | case PCH_DP_B: | 3372 | case PCH_DP_B: |
3373 | temp |= TRANS_DP_PORT_SEL_B; | 3373 | temp |= TRANS_DP_PORT_SEL_B; |
3374 | break; | 3374 | break; |
3375 | case PCH_DP_C: | 3375 | case PCH_DP_C: |
3376 | temp |= TRANS_DP_PORT_SEL_C; | 3376 | temp |= TRANS_DP_PORT_SEL_C; |
3377 | break; | 3377 | break; |
3378 | case PCH_DP_D: | 3378 | case PCH_DP_D: |
3379 | temp |= TRANS_DP_PORT_SEL_D; | 3379 | temp |= TRANS_DP_PORT_SEL_D; |
3380 | break; | 3380 | break; |
3381 | default: | 3381 | default: |
3382 | BUG(); | 3382 | BUG(); |
3383 | } | 3383 | } |
3384 | 3384 | ||
3385 | I915_WRITE(reg, temp); | 3385 | I915_WRITE(reg, temp); |
3386 | } | 3386 | } |
3387 | 3387 | ||
3388 | ironlake_enable_pch_transcoder(dev_priv, pipe); | 3388 | ironlake_enable_pch_transcoder(dev_priv, pipe); |
3389 | } | 3389 | } |
3390 | 3390 | ||
3391 | static void lpt_pch_enable(struct drm_crtc *crtc) | 3391 | static void lpt_pch_enable(struct drm_crtc *crtc) |
3392 | { | 3392 | { |
3393 | struct drm_device *dev = crtc->dev; | 3393 | struct drm_device *dev = crtc->dev; |
3394 | struct drm_i915_private *dev_priv = dev->dev_private; | 3394 | struct drm_i915_private *dev_priv = dev->dev_private; |
3395 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 3395 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3396 | enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; | 3396 | enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; |
3397 | 3397 | ||
3398 | assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A); | 3398 | assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A); |
3399 | 3399 | ||
3400 | lpt_program_iclkip(crtc); | 3400 | lpt_program_iclkip(crtc); |
3401 | 3401 | ||
3402 | /* Set transcoder timing. */ | 3402 | /* Set transcoder timing. */ |
3403 | ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A); | 3403 | ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A); |
3404 | 3404 | ||
3405 | lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); | 3405 | lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); |
3406 | } | 3406 | } |
3407 | 3407 | ||
3408 | static void intel_put_shared_dpll(struct intel_crtc *crtc) | 3408 | static void intel_put_shared_dpll(struct intel_crtc *crtc) |
3409 | { | 3409 | { |
3410 | struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); | 3410 | struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); |
3411 | 3411 | ||
3412 | if (pll == NULL) | 3412 | if (pll == NULL) |
3413 | return; | 3413 | return; |
3414 | 3414 | ||
3415 | if (pll->refcount == 0) { | 3415 | if (pll->refcount == 0) { |
3416 | WARN(1, "bad %s refcount\n", pll->name); | 3416 | WARN(1, "bad %s refcount\n", pll->name); |
3417 | return; | 3417 | return; |
3418 | } | 3418 | } |
3419 | 3419 | ||
3420 | if (--pll->refcount == 0) { | 3420 | if (--pll->refcount == 0) { |
3421 | WARN_ON(pll->on); | 3421 | WARN_ON(pll->on); |
3422 | WARN_ON(pll->active); | 3422 | WARN_ON(pll->active); |
3423 | } | 3423 | } |
3424 | 3424 | ||
3425 | crtc->config.shared_dpll = DPLL_ID_PRIVATE; | 3425 | crtc->config.shared_dpll = DPLL_ID_PRIVATE; |
3426 | } | 3426 | } |
3427 | 3427 | ||
3428 | static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc) | 3428 | static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc) |
3429 | { | 3429 | { |
3430 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; | 3430 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; |
3431 | struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); | 3431 | struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); |
3432 | enum intel_dpll_id i; | 3432 | enum intel_dpll_id i; |
3433 | 3433 | ||
3434 | if (pll) { | 3434 | if (pll) { |
3435 | DRM_DEBUG_KMS("CRTC:%d dropping existing %s\n", | 3435 | DRM_DEBUG_KMS("CRTC:%d dropping existing %s\n", |
3436 | crtc->base.base.id, pll->name); | 3436 | crtc->base.base.id, pll->name); |
3437 | intel_put_shared_dpll(crtc); | 3437 | intel_put_shared_dpll(crtc); |
3438 | } | 3438 | } |
3439 | 3439 | ||
3440 | if (HAS_PCH_IBX(dev_priv->dev)) { | 3440 | if (HAS_PCH_IBX(dev_priv->dev)) { |
3441 | /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */ | 3441 | /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */ |
3442 | i = (enum intel_dpll_id) crtc->pipe; | 3442 | i = (enum intel_dpll_id) crtc->pipe; |
3443 | pll = &dev_priv->shared_dplls[i]; | 3443 | pll = &dev_priv->shared_dplls[i]; |
3444 | 3444 | ||
3445 | DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", | 3445 | DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", |
3446 | crtc->base.base.id, pll->name); | 3446 | crtc->base.base.id, pll->name); |
3447 | 3447 | ||
3448 | goto found; | 3448 | goto found; |
3449 | } | 3449 | } |
3450 | 3450 | ||
3451 | for (i = 0; i < dev_priv->num_shared_dpll; i++) { | 3451 | for (i = 0; i < dev_priv->num_shared_dpll; i++) { |
3452 | pll = &dev_priv->shared_dplls[i]; | 3452 | pll = &dev_priv->shared_dplls[i]; |
3453 | 3453 | ||
3454 | /* Only want to check enabled timings first */ | 3454 | /* Only want to check enabled timings first */ |
3455 | if (pll->refcount == 0) | 3455 | if (pll->refcount == 0) |
3456 | continue; | 3456 | continue; |
3457 | 3457 | ||
3458 | if (memcmp(&crtc->config.dpll_hw_state, &pll->hw_state, | 3458 | if (memcmp(&crtc->config.dpll_hw_state, &pll->hw_state, |
3459 | sizeof(pll->hw_state)) == 0) { | 3459 | sizeof(pll->hw_state)) == 0) { |
3460 | DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n", | 3460 | DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n", |
3461 | crtc->base.base.id, | 3461 | crtc->base.base.id, |
3462 | pll->name, pll->refcount, pll->active); | 3462 | pll->name, pll->refcount, pll->active); |
3463 | 3463 | ||
3464 | goto found; | 3464 | goto found; |
3465 | } | 3465 | } |
3466 | } | 3466 | } |
3467 | 3467 | ||
3468 | /* Ok no matching timings, maybe there's a free one? */ | 3468 | /* Ok no matching timings, maybe there's a free one? */ |
3469 | for (i = 0; i < dev_priv->num_shared_dpll; i++) { | 3469 | for (i = 0; i < dev_priv->num_shared_dpll; i++) { |
3470 | pll = &dev_priv->shared_dplls[i]; | 3470 | pll = &dev_priv->shared_dplls[i]; |
3471 | if (pll->refcount == 0) { | 3471 | if (pll->refcount == 0) { |
3472 | DRM_DEBUG_KMS("CRTC:%d allocated %s\n", | 3472 | DRM_DEBUG_KMS("CRTC:%d allocated %s\n", |
3473 | crtc->base.base.id, pll->name); | 3473 | crtc->base.base.id, pll->name); |
3474 | goto found; | 3474 | goto found; |
3475 | } | 3475 | } |
3476 | } | 3476 | } |
3477 | 3477 | ||
3478 | return NULL; | 3478 | return NULL; |
3479 | 3479 | ||
3480 | found: | 3480 | found: |
3481 | crtc->config.shared_dpll = i; | 3481 | crtc->config.shared_dpll = i; |
3482 | DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name, | 3482 | DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name, |
3483 | pipe_name(crtc->pipe)); | 3483 | pipe_name(crtc->pipe)); |
3484 | 3484 | ||
3485 | if (pll->active == 0) { | 3485 | if (pll->active == 0) { |
3486 | memcpy(&pll->hw_state, &crtc->config.dpll_hw_state, | 3486 | memcpy(&pll->hw_state, &crtc->config.dpll_hw_state, |
3487 | sizeof(pll->hw_state)); | 3487 | sizeof(pll->hw_state)); |
3488 | 3488 | ||
3489 | DRM_DEBUG_DRIVER("setting up %s\n", pll->name); | 3489 | DRM_DEBUG_DRIVER("setting up %s\n", pll->name); |
3490 | WARN_ON(pll->on); | 3490 | WARN_ON(pll->on); |
3491 | assert_shared_dpll_disabled(dev_priv, pll); | 3491 | assert_shared_dpll_disabled(dev_priv, pll); |
3492 | 3492 | ||
3493 | pll->mode_set(dev_priv, pll); | 3493 | pll->mode_set(dev_priv, pll); |
3494 | } | 3494 | } |
3495 | pll->refcount++; | 3495 | pll->refcount++; |
3496 | 3496 | ||
3497 | return pll; | 3497 | return pll; |
3498 | } | 3498 | } |
3499 | 3499 | ||
3500 | static void cpt_verify_modeset(struct drm_device *dev, int pipe) | 3500 | static void cpt_verify_modeset(struct drm_device *dev, int pipe) |
3501 | { | 3501 | { |
3502 | struct drm_i915_private *dev_priv = dev->dev_private; | 3502 | struct drm_i915_private *dev_priv = dev->dev_private; |
3503 | int dslreg = PIPEDSL(pipe); | 3503 | int dslreg = PIPEDSL(pipe); |
3504 | u32 temp; | 3504 | u32 temp; |
3505 | 3505 | ||
3506 | temp = I915_READ(dslreg); | 3506 | temp = I915_READ(dslreg); |
3507 | udelay(500); | 3507 | udelay(500); |
3508 | if (wait_for(I915_READ(dslreg) != temp, 5)) { | 3508 | if (wait_for(I915_READ(dslreg) != temp, 5)) { |
3509 | if (wait_for(I915_READ(dslreg) != temp, 5)) | 3509 | if (wait_for(I915_READ(dslreg) != temp, 5)) |
3510 | DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe)); | 3510 | DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe)); |
3511 | } | 3511 | } |
3512 | } | 3512 | } |
3513 | 3513 | ||
3514 | static void ironlake_pfit_enable(struct intel_crtc *crtc) | 3514 | static void ironlake_pfit_enable(struct intel_crtc *crtc) |
3515 | { | 3515 | { |
3516 | struct drm_device *dev = crtc->base.dev; | 3516 | struct drm_device *dev = crtc->base.dev; |
3517 | struct drm_i915_private *dev_priv = dev->dev_private; | 3517 | struct drm_i915_private *dev_priv = dev->dev_private; |
3518 | int pipe = crtc->pipe; | 3518 | int pipe = crtc->pipe; |
3519 | 3519 | ||
3520 | if (crtc->config.pch_pfit.enabled) { | 3520 | if (crtc->config.pch_pfit.enabled) { |
3521 | /* Force use of hard-coded filter coefficients | 3521 | /* Force use of hard-coded filter coefficients |
3522 | * as some pre-programmed values are broken, | 3522 | * as some pre-programmed values are broken, |
3523 | * e.g. x201. | 3523 | * e.g. x201. |
3524 | */ | 3524 | */ |
3525 | if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) | 3525 | if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) |
3526 | I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 | | 3526 | I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 | |
3527 | PF_PIPE_SEL_IVB(pipe)); | 3527 | PF_PIPE_SEL_IVB(pipe)); |
3528 | else | 3528 | else |
3529 | I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); | 3529 | I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); |
3530 | I915_WRITE(PF_WIN_POS(pipe), crtc->config.pch_pfit.pos); | 3530 | I915_WRITE(PF_WIN_POS(pipe), crtc->config.pch_pfit.pos); |
3531 | I915_WRITE(PF_WIN_SZ(pipe), crtc->config.pch_pfit.size); | 3531 | I915_WRITE(PF_WIN_SZ(pipe), crtc->config.pch_pfit.size); |
3532 | } | 3532 | } |
3533 | } | 3533 | } |
3534 | 3534 | ||
3535 | static void intel_enable_planes(struct drm_crtc *crtc) | 3535 | static void intel_enable_planes(struct drm_crtc *crtc) |
3536 | { | 3536 | { |
3537 | struct drm_device *dev = crtc->dev; | 3537 | struct drm_device *dev = crtc->dev; |
3538 | enum pipe pipe = to_intel_crtc(crtc)->pipe; | 3538 | enum pipe pipe = to_intel_crtc(crtc)->pipe; |
3539 | struct drm_plane *plane; | 3539 | struct drm_plane *plane; |
3540 | struct intel_plane *intel_plane; | 3540 | struct intel_plane *intel_plane; |
3541 | 3541 | ||
3542 | drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) { | 3542 | drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) { |
3543 | intel_plane = to_intel_plane(plane); | 3543 | intel_plane = to_intel_plane(plane); |
3544 | if (intel_plane->pipe == pipe) | 3544 | if (intel_plane->pipe == pipe) |
3545 | intel_plane_restore(&intel_plane->base); | 3545 | intel_plane_restore(&intel_plane->base); |
3546 | } | 3546 | } |
3547 | } | 3547 | } |
3548 | 3548 | ||
3549 | static void intel_disable_planes(struct drm_crtc *crtc) | 3549 | static void intel_disable_planes(struct drm_crtc *crtc) |
3550 | { | 3550 | { |
3551 | struct drm_device *dev = crtc->dev; | 3551 | struct drm_device *dev = crtc->dev; |
3552 | enum pipe pipe = to_intel_crtc(crtc)->pipe; | 3552 | enum pipe pipe = to_intel_crtc(crtc)->pipe; |
3553 | struct drm_plane *plane; | 3553 | struct drm_plane *plane; |
3554 | struct intel_plane *intel_plane; | 3554 | struct intel_plane *intel_plane; |
3555 | 3555 | ||
3556 | drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) { | 3556 | drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) { |
3557 | intel_plane = to_intel_plane(plane); | 3557 | intel_plane = to_intel_plane(plane); |
3558 | if (intel_plane->pipe == pipe) | 3558 | if (intel_plane->pipe == pipe) |
3559 | intel_plane_disable(&intel_plane->base); | 3559 | intel_plane_disable(&intel_plane->base); |
3560 | } | 3560 | } |
3561 | } | 3561 | } |
3562 | 3562 | ||
3563 | void hsw_enable_ips(struct intel_crtc *crtc) | 3563 | void hsw_enable_ips(struct intel_crtc *crtc) |
3564 | { | 3564 | { |
3565 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; | 3565 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; |
3566 | 3566 | ||
3567 | if (!crtc->config.ips_enabled) | 3567 | if (!crtc->config.ips_enabled) |
3568 | return; | 3568 | return; |
3569 | 3569 | ||
3570 | /* We can only enable IPS after we enable a plane and wait for a vblank. | 3570 | /* We can only enable IPS after we enable a plane and wait for a vblank. |
3571 | * We guarantee that the plane is enabled by calling intel_enable_ips | 3571 | * We guarantee that the plane is enabled by calling intel_enable_ips |
3572 | * only after intel_enable_plane. And intel_enable_plane already waits | 3572 | * only after intel_enable_plane. And intel_enable_plane already waits |
3573 | * for a vblank, so all we need to do here is to enable the IPS bit. */ | 3573 | * for a vblank, so all we need to do here is to enable the IPS bit. */ |
3574 | assert_plane_enabled(dev_priv, crtc->plane); | 3574 | assert_plane_enabled(dev_priv, crtc->plane); |
3575 | if (IS_BROADWELL(crtc->base.dev)) { | 3575 | if (IS_BROADWELL(crtc->base.dev)) { |
3576 | mutex_lock(&dev_priv->rps.hw_lock); | 3576 | mutex_lock(&dev_priv->rps.hw_lock); |
3577 | WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000)); | 3577 | WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000)); |
3578 | mutex_unlock(&dev_priv->rps.hw_lock); | 3578 | mutex_unlock(&dev_priv->rps.hw_lock); |
3579 | /* Quoting Art Runyan: "its not safe to expect any particular | 3579 | /* Quoting Art Runyan: "its not safe to expect any particular |
3580 | * value in IPS_CTL bit 31 after enabling IPS through the | 3580 | * value in IPS_CTL bit 31 after enabling IPS through the |
3581 | * mailbox." Moreover, the mailbox may return a bogus state, | 3581 | * mailbox." Moreover, the mailbox may return a bogus state, |
3582 | * so we need to just enable it and continue on. | 3582 | * so we need to just enable it and continue on. |
3583 | */ | 3583 | */ |
3584 | } else { | 3584 | } else { |
3585 | I915_WRITE(IPS_CTL, IPS_ENABLE); | 3585 | I915_WRITE(IPS_CTL, IPS_ENABLE); |
3586 | /* The bit only becomes 1 in the next vblank, so this wait here | 3586 | /* The bit only becomes 1 in the next vblank, so this wait here |
3587 | * is essentially intel_wait_for_vblank. If we don't have this | 3587 | * is essentially intel_wait_for_vblank. If we don't have this |
3588 | * and don't wait for vblanks until the end of crtc_enable, then | 3588 | * and don't wait for vblanks until the end of crtc_enable, then |
3589 | * the HW state readout code will complain that the expected | 3589 | * the HW state readout code will complain that the expected |
3590 | * IPS_CTL value is not the one we read. */ | 3590 | * IPS_CTL value is not the one we read. */ |
3591 | if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50)) | 3591 | if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50)) |
3592 | DRM_ERROR("Timed out waiting for IPS enable\n"); | 3592 | DRM_ERROR("Timed out waiting for IPS enable\n"); |
3593 | } | 3593 | } |
3594 | } | 3594 | } |
3595 | 3595 | ||
3596 | void hsw_disable_ips(struct intel_crtc *crtc) | 3596 | void hsw_disable_ips(struct intel_crtc *crtc) |
3597 | { | 3597 | { |
3598 | struct drm_device *dev = crtc->base.dev; | 3598 | struct drm_device *dev = crtc->base.dev; |
3599 | struct drm_i915_private *dev_priv = dev->dev_private; | 3599 | struct drm_i915_private *dev_priv = dev->dev_private; |
3600 | 3600 | ||
3601 | if (!crtc->config.ips_enabled) | 3601 | if (!crtc->config.ips_enabled) |
3602 | return; | 3602 | return; |
3603 | 3603 | ||
3604 | assert_plane_enabled(dev_priv, crtc->plane); | 3604 | assert_plane_enabled(dev_priv, crtc->plane); |
3605 | if (IS_BROADWELL(crtc->base.dev)) { | 3605 | if (IS_BROADWELL(crtc->base.dev)) { |
3606 | mutex_lock(&dev_priv->rps.hw_lock); | 3606 | mutex_lock(&dev_priv->rps.hw_lock); |
3607 | WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); | 3607 | WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); |
3608 | mutex_unlock(&dev_priv->rps.hw_lock); | 3608 | mutex_unlock(&dev_priv->rps.hw_lock); |
3609 | } else { | 3609 | } else { |
3610 | I915_WRITE(IPS_CTL, 0); | 3610 | I915_WRITE(IPS_CTL, 0); |
3611 | POSTING_READ(IPS_CTL); | 3611 | POSTING_READ(IPS_CTL); |
3612 | } | 3612 | } |
3613 | 3613 | ||
3614 | /* We need to wait for a vblank before we can disable the plane. */ | 3614 | /* We need to wait for a vblank before we can disable the plane. */ |
3615 | intel_wait_for_vblank(dev, crtc->pipe); | 3615 | intel_wait_for_vblank(dev, crtc->pipe); |
3616 | } | 3616 | } |
3617 | 3617 | ||
3618 | /** Loads the palette/gamma unit for the CRTC with the prepared values */ | 3618 | /** Loads the palette/gamma unit for the CRTC with the prepared values */ |
3619 | static void intel_crtc_load_lut(struct drm_crtc *crtc) | 3619 | static void intel_crtc_load_lut(struct drm_crtc *crtc) |
3620 | { | 3620 | { |
3621 | struct drm_device *dev = crtc->dev; | 3621 | struct drm_device *dev = crtc->dev; |
3622 | struct drm_i915_private *dev_priv = dev->dev_private; | 3622 | struct drm_i915_private *dev_priv = dev->dev_private; |
3623 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 3623 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3624 | enum pipe pipe = intel_crtc->pipe; | 3624 | enum pipe pipe = intel_crtc->pipe; |
3625 | int palreg = PALETTE(pipe); | 3625 | int palreg = PALETTE(pipe); |
3626 | int i; | 3626 | int i; |
3627 | bool reenable_ips = false; | 3627 | bool reenable_ips = false; |
3628 | 3628 | ||
3629 | /* The clocks have to be on to load the palette. */ | 3629 | /* The clocks have to be on to load the palette. */ |
3630 | if (!crtc->enabled || !intel_crtc->active) | 3630 | if (!crtc->enabled || !intel_crtc->active) |
3631 | return; | 3631 | return; |
3632 | 3632 | ||
3633 | if (!HAS_PCH_SPLIT(dev_priv->dev)) { | 3633 | if (!HAS_PCH_SPLIT(dev_priv->dev)) { |
3634 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) | 3634 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) |
3635 | assert_dsi_pll_enabled(dev_priv); | 3635 | assert_dsi_pll_enabled(dev_priv); |
3636 | else | 3636 | else |
3637 | assert_pll_enabled(dev_priv, pipe); | 3637 | assert_pll_enabled(dev_priv, pipe); |
3638 | } | 3638 | } |
3639 | 3639 | ||
3640 | /* use legacy palette for Ironlake */ | 3640 | /* use legacy palette for Ironlake */ |
3641 | if (HAS_PCH_SPLIT(dev)) | 3641 | if (HAS_PCH_SPLIT(dev)) |
3642 | palreg = LGC_PALETTE(pipe); | 3642 | palreg = LGC_PALETTE(pipe); |
3643 | 3643 | ||
3644 | /* Workaround : Do not read or write the pipe palette/gamma data while | 3644 | /* Workaround : Do not read or write the pipe palette/gamma data while |
3645 | * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. | 3645 | * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. |
3646 | */ | 3646 | */ |
3647 | if (IS_HASWELL(dev) && intel_crtc->config.ips_enabled && | 3647 | if (IS_HASWELL(dev) && intel_crtc->config.ips_enabled && |
3648 | ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) == | 3648 | ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) == |
3649 | GAMMA_MODE_MODE_SPLIT)) { | 3649 | GAMMA_MODE_MODE_SPLIT)) { |
3650 | hsw_disable_ips(intel_crtc); | 3650 | hsw_disable_ips(intel_crtc); |
3651 | reenable_ips = true; | 3651 | reenable_ips = true; |
3652 | } | 3652 | } |
3653 | 3653 | ||
3654 | for (i = 0; i < 256; i++) { | 3654 | for (i = 0; i < 256; i++) { |
3655 | I915_WRITE(palreg + 4 * i, | 3655 | I915_WRITE(palreg + 4 * i, |
3656 | (intel_crtc->lut_r[i] << 16) | | 3656 | (intel_crtc->lut_r[i] << 16) | |
3657 | (intel_crtc->lut_g[i] << 8) | | 3657 | (intel_crtc->lut_g[i] << 8) | |
3658 | intel_crtc->lut_b[i]); | 3658 | intel_crtc->lut_b[i]); |
3659 | } | 3659 | } |
3660 | 3660 | ||
3661 | if (reenable_ips) | 3661 | if (reenable_ips) |
3662 | hsw_enable_ips(intel_crtc); | 3662 | hsw_enable_ips(intel_crtc); |
3663 | } | 3663 | } |
3664 | 3664 | ||
3665 | static void ironlake_crtc_enable(struct drm_crtc *crtc) | 3665 | static void ironlake_crtc_enable(struct drm_crtc *crtc) |
3666 | { | 3666 | { |
3667 | struct drm_device *dev = crtc->dev; | 3667 | struct drm_device *dev = crtc->dev; |
3668 | struct drm_i915_private *dev_priv = dev->dev_private; | 3668 | struct drm_i915_private *dev_priv = dev->dev_private; |
3669 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 3669 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3670 | struct intel_encoder *encoder; | 3670 | struct intel_encoder *encoder; |
3671 | int pipe = intel_crtc->pipe; | 3671 | int pipe = intel_crtc->pipe; |
3672 | int plane = intel_crtc->plane; | 3672 | int plane = intel_crtc->plane; |
3673 | 3673 | ||
3674 | WARN_ON(!crtc->enabled); | 3674 | WARN_ON(!crtc->enabled); |
3675 | 3675 | ||
3676 | if (intel_crtc->active) | 3676 | if (intel_crtc->active) |
3677 | return; | 3677 | return; |
3678 | 3678 | ||
3679 | intel_crtc->active = true; | 3679 | intel_crtc->active = true; |
3680 | 3680 | ||
3681 | intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); | 3681 | intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); |
3682 | intel_set_pch_fifo_underrun_reporting(dev, pipe, true); | 3682 | intel_set_pch_fifo_underrun_reporting(dev, pipe, true); |
3683 | 3683 | ||
3684 | for_each_encoder_on_crtc(dev, crtc, encoder) | 3684 | for_each_encoder_on_crtc(dev, crtc, encoder) |
3685 | if (encoder->pre_enable) | 3685 | if (encoder->pre_enable) |
3686 | encoder->pre_enable(encoder); | 3686 | encoder->pre_enable(encoder); |
3687 | 3687 | ||
3688 | if (intel_crtc->config.has_pch_encoder) { | 3688 | if (intel_crtc->config.has_pch_encoder) { |
3689 | /* Note: FDI PLL enabling _must_ be done before we enable the | 3689 | /* Note: FDI PLL enabling _must_ be done before we enable the |
3690 | * cpu pipes, hence this is separate from all the other fdi/pch | 3690 | * cpu pipes, hence this is separate from all the other fdi/pch |
3691 | * enabling. */ | 3691 | * enabling. */ |
3692 | ironlake_fdi_pll_enable(intel_crtc); | 3692 | ironlake_fdi_pll_enable(intel_crtc); |
3693 | } else { | 3693 | } else { |
3694 | assert_fdi_tx_disabled(dev_priv, pipe); | 3694 | assert_fdi_tx_disabled(dev_priv, pipe); |
3695 | assert_fdi_rx_disabled(dev_priv, pipe); | 3695 | assert_fdi_rx_disabled(dev_priv, pipe); |
3696 | } | 3696 | } |
3697 | 3697 | ||
3698 | ironlake_pfit_enable(intel_crtc); | 3698 | ironlake_pfit_enable(intel_crtc); |
3699 | 3699 | ||
3700 | /* | 3700 | /* |
3701 | * On ILK+ LUT must be loaded before the pipe is running but with | 3701 | * On ILK+ LUT must be loaded before the pipe is running but with |
3702 | * clocks enabled | 3702 | * clocks enabled |
3703 | */ | 3703 | */ |
3704 | intel_crtc_load_lut(crtc); | 3704 | intel_crtc_load_lut(crtc); |
3705 | 3705 | ||
3706 | intel_update_watermarks(crtc); | 3706 | intel_update_watermarks(crtc); |
3707 | intel_enable_pipe(intel_crtc); | 3707 | intel_enable_pipe(intel_crtc); |
3708 | intel_enable_primary_hw_plane(dev_priv, plane, pipe); | 3708 | intel_enable_primary_hw_plane(dev_priv, plane, pipe); |
3709 | intel_enable_planes(crtc); | 3709 | intel_enable_planes(crtc); |
3710 | intel_crtc_update_cursor(crtc, true); | 3710 | intel_crtc_update_cursor(crtc, true); |
3711 | 3711 | ||
3712 | if (intel_crtc->config.has_pch_encoder) | 3712 | if (intel_crtc->config.has_pch_encoder) |
3713 | ironlake_pch_enable(crtc); | 3713 | ironlake_pch_enable(crtc); |
3714 | 3714 | ||
3715 | mutex_lock(&dev->struct_mutex); | 3715 | mutex_lock(&dev->struct_mutex); |
3716 | intel_update_fbc(dev); | 3716 | intel_update_fbc(dev); |
3717 | mutex_unlock(&dev->struct_mutex); | 3717 | mutex_unlock(&dev->struct_mutex); |
3718 | 3718 | ||
3719 | for_each_encoder_on_crtc(dev, crtc, encoder) | 3719 | for_each_encoder_on_crtc(dev, crtc, encoder) |
3720 | encoder->enable(encoder); | 3720 | encoder->enable(encoder); |
3721 | 3721 | ||
3722 | if (HAS_PCH_CPT(dev)) | 3722 | if (HAS_PCH_CPT(dev)) |
3723 | cpt_verify_modeset(dev, intel_crtc->pipe); | 3723 | cpt_verify_modeset(dev, intel_crtc->pipe); |
3724 | 3724 | ||
3725 | /* | 3725 | /* |
3726 | * There seems to be a race in PCH platform hw (at least on some | 3726 | * There seems to be a race in PCH platform hw (at least on some |
3727 | * outputs) where an enabled pipe still completes any pageflip right | 3727 | * outputs) where an enabled pipe still completes any pageflip right |
3728 | * away (as if the pipe is off) instead of waiting for vblank. As soon | 3728 | * away (as if the pipe is off) instead of waiting for vblank. As soon |
3729 | * as the first vblank happend, everything works as expected. Hence just | 3729 | * as the first vblank happend, everything works as expected. Hence just |
3730 | * wait for one vblank before returning to avoid strange things | 3730 | * wait for one vblank before returning to avoid strange things |
3731 | * happening. | 3731 | * happening. |
3732 | */ | 3732 | */ |
3733 | intel_wait_for_vblank(dev, intel_crtc->pipe); | 3733 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
3734 | } | 3734 | } |
3735 | 3735 | ||
3736 | /* IPS only exists on ULT machines and is tied to pipe A. */ | 3736 | /* IPS only exists on ULT machines and is tied to pipe A. */ |
3737 | static bool hsw_crtc_supports_ips(struct intel_crtc *crtc) | 3737 | static bool hsw_crtc_supports_ips(struct intel_crtc *crtc) |
3738 | { | 3738 | { |
3739 | return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A; | 3739 | return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A; |
3740 | } | 3740 | } |
3741 | 3741 | ||
3742 | static void haswell_crtc_enable_planes(struct drm_crtc *crtc) | 3742 | static void haswell_crtc_enable_planes(struct drm_crtc *crtc) |
3743 | { | 3743 | { |
3744 | struct drm_device *dev = crtc->dev; | 3744 | struct drm_device *dev = crtc->dev; |
3745 | struct drm_i915_private *dev_priv = dev->dev_private; | 3745 | struct drm_i915_private *dev_priv = dev->dev_private; |
3746 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 3746 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3747 | int pipe = intel_crtc->pipe; | 3747 | int pipe = intel_crtc->pipe; |
3748 | int plane = intel_crtc->plane; | 3748 | int plane = intel_crtc->plane; |
3749 | 3749 | ||
3750 | intel_enable_primary_hw_plane(dev_priv, plane, pipe); | 3750 | intel_enable_primary_hw_plane(dev_priv, plane, pipe); |
3751 | intel_enable_planes(crtc); | 3751 | intel_enable_planes(crtc); |
3752 | intel_crtc_update_cursor(crtc, true); | 3752 | intel_crtc_update_cursor(crtc, true); |
3753 | 3753 | ||
3754 | hsw_enable_ips(intel_crtc); | 3754 | hsw_enable_ips(intel_crtc); |
3755 | 3755 | ||
3756 | mutex_lock(&dev->struct_mutex); | 3756 | mutex_lock(&dev->struct_mutex); |
3757 | intel_update_fbc(dev); | 3757 | intel_update_fbc(dev); |
3758 | mutex_unlock(&dev->struct_mutex); | 3758 | mutex_unlock(&dev->struct_mutex); |
3759 | } | 3759 | } |
3760 | 3760 | ||
3761 | static void haswell_crtc_disable_planes(struct drm_crtc *crtc) | 3761 | static void haswell_crtc_disable_planes(struct drm_crtc *crtc) |
3762 | { | 3762 | { |
3763 | struct drm_device *dev = crtc->dev; | 3763 | struct drm_device *dev = crtc->dev; |
3764 | struct drm_i915_private *dev_priv = dev->dev_private; | 3764 | struct drm_i915_private *dev_priv = dev->dev_private; |
3765 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 3765 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3766 | int pipe = intel_crtc->pipe; | 3766 | int pipe = intel_crtc->pipe; |
3767 | int plane = intel_crtc->plane; | 3767 | int plane = intel_crtc->plane; |
3768 | 3768 | ||
3769 | intel_crtc_wait_for_pending_flips(crtc); | 3769 | intel_crtc_wait_for_pending_flips(crtc); |
3770 | drm_vblank_off(dev, pipe); | 3770 | drm_vblank_off(dev, pipe); |
3771 | 3771 | ||
3772 | /* FBC must be disabled before disabling the plane on HSW. */ | 3772 | /* FBC must be disabled before disabling the plane on HSW. */ |
3773 | if (dev_priv->fbc.plane == plane) | 3773 | if (dev_priv->fbc.plane == plane) |
3774 | intel_disable_fbc(dev); | 3774 | intel_disable_fbc(dev); |
3775 | 3775 | ||
3776 | hsw_disable_ips(intel_crtc); | 3776 | hsw_disable_ips(intel_crtc); |
3777 | 3777 | ||
3778 | intel_crtc_update_cursor(crtc, false); | 3778 | intel_crtc_update_cursor(crtc, false); |
3779 | intel_disable_planes(crtc); | 3779 | intel_disable_planes(crtc); |
3780 | intel_disable_primary_hw_plane(dev_priv, plane, pipe); | 3780 | intel_disable_primary_hw_plane(dev_priv, plane, pipe); |
3781 | } | 3781 | } |
3782 | 3782 | ||
3783 | /* | 3783 | /* |
3784 | * This implements the workaround described in the "notes" section of the mode | 3784 | * This implements the workaround described in the "notes" section of the mode |
3785 | * set sequence documentation. When going from no pipes or single pipe to | 3785 | * set sequence documentation. When going from no pipes or single pipe to |
3786 | * multiple pipes, and planes are enabled after the pipe, we need to wait at | 3786 | * multiple pipes, and planes are enabled after the pipe, we need to wait at |
3787 | * least 2 vblanks on the first pipe before enabling planes on the second pipe. | 3787 | * least 2 vblanks on the first pipe before enabling planes on the second pipe. |
3788 | */ | 3788 | */ |
3789 | static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc) | 3789 | static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc) |
3790 | { | 3790 | { |
3791 | struct drm_device *dev = crtc->base.dev; | 3791 | struct drm_device *dev = crtc->base.dev; |
3792 | struct intel_crtc *crtc_it, *other_active_crtc = NULL; | 3792 | struct intel_crtc *crtc_it, *other_active_crtc = NULL; |
3793 | 3793 | ||
3794 | /* We want to get the other_active_crtc only if there's only 1 other | 3794 | /* We want to get the other_active_crtc only if there's only 1 other |
3795 | * active crtc. */ | 3795 | * active crtc. */ |
3796 | list_for_each_entry(crtc_it, &dev->mode_config.crtc_list, base.head) { | 3796 | list_for_each_entry(crtc_it, &dev->mode_config.crtc_list, base.head) { |
3797 | if (!crtc_it->active || crtc_it == crtc) | 3797 | if (!crtc_it->active || crtc_it == crtc) |
3798 | continue; | 3798 | continue; |
3799 | 3799 | ||
3800 | if (other_active_crtc) | 3800 | if (other_active_crtc) |
3801 | return; | 3801 | return; |
3802 | 3802 | ||
3803 | other_active_crtc = crtc_it; | 3803 | other_active_crtc = crtc_it; |
3804 | } | 3804 | } |
3805 | if (!other_active_crtc) | 3805 | if (!other_active_crtc) |
3806 | return; | 3806 | return; |
3807 | 3807 | ||
3808 | intel_wait_for_vblank(dev, other_active_crtc->pipe); | 3808 | intel_wait_for_vblank(dev, other_active_crtc->pipe); |
3809 | intel_wait_for_vblank(dev, other_active_crtc->pipe); | 3809 | intel_wait_for_vblank(dev, other_active_crtc->pipe); |
3810 | } | 3810 | } |
3811 | 3811 | ||
3812 | static void haswell_crtc_enable(struct drm_crtc *crtc) | 3812 | static void haswell_crtc_enable(struct drm_crtc *crtc) |
3813 | { | 3813 | { |
3814 | struct drm_device *dev = crtc->dev; | 3814 | struct drm_device *dev = crtc->dev; |
3815 | struct drm_i915_private *dev_priv = dev->dev_private; | 3815 | struct drm_i915_private *dev_priv = dev->dev_private; |
3816 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 3816 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3817 | struct intel_encoder *encoder; | 3817 | struct intel_encoder *encoder; |
3818 | int pipe = intel_crtc->pipe; | 3818 | int pipe = intel_crtc->pipe; |
3819 | 3819 | ||
3820 | WARN_ON(!crtc->enabled); | 3820 | WARN_ON(!crtc->enabled); |
3821 | 3821 | ||
3822 | if (intel_crtc->active) | 3822 | if (intel_crtc->active) |
3823 | return; | 3823 | return; |
3824 | 3824 | ||
3825 | intel_crtc->active = true; | 3825 | intel_crtc->active = true; |
3826 | 3826 | ||
3827 | intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); | 3827 | intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); |
3828 | if (intel_crtc->config.has_pch_encoder) | 3828 | if (intel_crtc->config.has_pch_encoder) |
3829 | intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true); | 3829 | intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true); |
3830 | 3830 | ||
3831 | if (intel_crtc->config.has_pch_encoder) | 3831 | if (intel_crtc->config.has_pch_encoder) |
3832 | dev_priv->display.fdi_link_train(crtc); | 3832 | dev_priv->display.fdi_link_train(crtc); |
3833 | 3833 | ||
3834 | for_each_encoder_on_crtc(dev, crtc, encoder) | 3834 | for_each_encoder_on_crtc(dev, crtc, encoder) |
3835 | if (encoder->pre_enable) | 3835 | if (encoder->pre_enable) |
3836 | encoder->pre_enable(encoder); | 3836 | encoder->pre_enable(encoder); |
3837 | 3837 | ||
3838 | intel_ddi_enable_pipe_clock(intel_crtc); | 3838 | intel_ddi_enable_pipe_clock(intel_crtc); |
3839 | 3839 | ||
3840 | ironlake_pfit_enable(intel_crtc); | 3840 | ironlake_pfit_enable(intel_crtc); |
3841 | 3841 | ||
3842 | /* | 3842 | /* |
3843 | * On ILK+ LUT must be loaded before the pipe is running but with | 3843 | * On ILK+ LUT must be loaded before the pipe is running but with |
3844 | * clocks enabled | 3844 | * clocks enabled |
3845 | */ | 3845 | */ |
3846 | intel_crtc_load_lut(crtc); | 3846 | intel_crtc_load_lut(crtc); |
3847 | 3847 | ||
3848 | intel_ddi_set_pipe_settings(crtc); | 3848 | intel_ddi_set_pipe_settings(crtc); |
3849 | intel_ddi_enable_transcoder_func(crtc); | 3849 | intel_ddi_enable_transcoder_func(crtc); |
3850 | 3850 | ||
3851 | intel_update_watermarks(crtc); | 3851 | intel_update_watermarks(crtc); |
3852 | intel_enable_pipe(intel_crtc); | 3852 | intel_enable_pipe(intel_crtc); |
3853 | 3853 | ||
3854 | if (intel_crtc->config.has_pch_encoder) | 3854 | if (intel_crtc->config.has_pch_encoder) |
3855 | lpt_pch_enable(crtc); | 3855 | lpt_pch_enable(crtc); |
3856 | 3856 | ||
3857 | for_each_encoder_on_crtc(dev, crtc, encoder) { | 3857 | for_each_encoder_on_crtc(dev, crtc, encoder) { |
3858 | encoder->enable(encoder); | 3858 | encoder->enable(encoder); |
3859 | intel_opregion_notify_encoder(encoder, true); | 3859 | intel_opregion_notify_encoder(encoder, true); |
3860 | } | 3860 | } |
3861 | 3861 | ||
3862 | /* If we change the relative order between pipe/planes enabling, we need | 3862 | /* If we change the relative order between pipe/planes enabling, we need |
3863 | * to change the workaround. */ | 3863 | * to change the workaround. */ |
3864 | haswell_mode_set_planes_workaround(intel_crtc); | 3864 | haswell_mode_set_planes_workaround(intel_crtc); |
3865 | haswell_crtc_enable_planes(crtc); | 3865 | haswell_crtc_enable_planes(crtc); |
3866 | } | 3866 | } |
3867 | 3867 | ||
3868 | static void ironlake_pfit_disable(struct intel_crtc *crtc) | 3868 | static void ironlake_pfit_disable(struct intel_crtc *crtc) |
3869 | { | 3869 | { |
3870 | struct drm_device *dev = crtc->base.dev; | 3870 | struct drm_device *dev = crtc->base.dev; |
3871 | struct drm_i915_private *dev_priv = dev->dev_private; | 3871 | struct drm_i915_private *dev_priv = dev->dev_private; |
3872 | int pipe = crtc->pipe; | 3872 | int pipe = crtc->pipe; |
3873 | 3873 | ||
3874 | /* To avoid upsetting the power well on haswell only disable the pfit if | 3874 | /* To avoid upsetting the power well on haswell only disable the pfit if |
3875 | * it's in use. The hw state code will make sure we get this right. */ | 3875 | * it's in use. The hw state code will make sure we get this right. */ |
3876 | if (crtc->config.pch_pfit.enabled) { | 3876 | if (crtc->config.pch_pfit.enabled) { |
3877 | I915_WRITE(PF_CTL(pipe), 0); | 3877 | I915_WRITE(PF_CTL(pipe), 0); |
3878 | I915_WRITE(PF_WIN_POS(pipe), 0); | 3878 | I915_WRITE(PF_WIN_POS(pipe), 0); |
3879 | I915_WRITE(PF_WIN_SZ(pipe), 0); | 3879 | I915_WRITE(PF_WIN_SZ(pipe), 0); |
3880 | } | 3880 | } |
3881 | } | 3881 | } |
3882 | 3882 | ||
3883 | static void ironlake_crtc_disable(struct drm_crtc *crtc) | 3883 | static void ironlake_crtc_disable(struct drm_crtc *crtc) |
3884 | { | 3884 | { |
3885 | struct drm_device *dev = crtc->dev; | 3885 | struct drm_device *dev = crtc->dev; |
3886 | struct drm_i915_private *dev_priv = dev->dev_private; | 3886 | struct drm_i915_private *dev_priv = dev->dev_private; |
3887 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 3887 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3888 | struct intel_encoder *encoder; | 3888 | struct intel_encoder *encoder; |
3889 | int pipe = intel_crtc->pipe; | 3889 | int pipe = intel_crtc->pipe; |
3890 | int plane = intel_crtc->plane; | 3890 | int plane = intel_crtc->plane; |
3891 | u32 reg, temp; | 3891 | u32 reg, temp; |
3892 | 3892 | ||
3893 | 3893 | ||
3894 | if (!intel_crtc->active) | 3894 | if (!intel_crtc->active) |
3895 | return; | 3895 | return; |
3896 | 3896 | ||
3897 | for_each_encoder_on_crtc(dev, crtc, encoder) | 3897 | for_each_encoder_on_crtc(dev, crtc, encoder) |
3898 | encoder->disable(encoder); | 3898 | encoder->disable(encoder); |
3899 | 3899 | ||
3900 | intel_crtc_wait_for_pending_flips(crtc); | 3900 | intel_crtc_wait_for_pending_flips(crtc); |
3901 | drm_vblank_off(dev, pipe); | 3901 | drm_vblank_off(dev, pipe); |
3902 | 3902 | ||
3903 | if (dev_priv->fbc.plane == plane) | 3903 | if (dev_priv->fbc.plane == plane) |
3904 | intel_disable_fbc(dev); | 3904 | intel_disable_fbc(dev); |
3905 | 3905 | ||
3906 | intel_crtc_update_cursor(crtc, false); | 3906 | intel_crtc_update_cursor(crtc, false); |
3907 | intel_disable_planes(crtc); | 3907 | intel_disable_planes(crtc); |
3908 | intel_disable_primary_hw_plane(dev_priv, plane, pipe); | 3908 | intel_disable_primary_hw_plane(dev_priv, plane, pipe); |
3909 | 3909 | ||
3910 | if (intel_crtc->config.has_pch_encoder) | 3910 | if (intel_crtc->config.has_pch_encoder) |
3911 | intel_set_pch_fifo_underrun_reporting(dev, pipe, false); | 3911 | intel_set_pch_fifo_underrun_reporting(dev, pipe, false); |
3912 | 3912 | ||
3913 | intel_disable_pipe(dev_priv, pipe); | 3913 | intel_disable_pipe(dev_priv, pipe); |
3914 | 3914 | ||
3915 | ironlake_pfit_disable(intel_crtc); | 3915 | ironlake_pfit_disable(intel_crtc); |
3916 | 3916 | ||
3917 | for_each_encoder_on_crtc(dev, crtc, encoder) | 3917 | for_each_encoder_on_crtc(dev, crtc, encoder) |
3918 | if (encoder->post_disable) | 3918 | if (encoder->post_disable) |
3919 | encoder->post_disable(encoder); | 3919 | encoder->post_disable(encoder); |
3920 | 3920 | ||
3921 | if (intel_crtc->config.has_pch_encoder) { | 3921 | if (intel_crtc->config.has_pch_encoder) { |
3922 | ironlake_fdi_disable(crtc); | 3922 | ironlake_fdi_disable(crtc); |
3923 | 3923 | ||
3924 | ironlake_disable_pch_transcoder(dev_priv, pipe); | 3924 | ironlake_disable_pch_transcoder(dev_priv, pipe); |
3925 | intel_set_pch_fifo_underrun_reporting(dev, pipe, true); | 3925 | intel_set_pch_fifo_underrun_reporting(dev, pipe, true); |
3926 | 3926 | ||
3927 | if (HAS_PCH_CPT(dev)) { | 3927 | if (HAS_PCH_CPT(dev)) { |
3928 | /* disable TRANS_DP_CTL */ | 3928 | /* disable TRANS_DP_CTL */ |
3929 | reg = TRANS_DP_CTL(pipe); | 3929 | reg = TRANS_DP_CTL(pipe); |
3930 | temp = I915_READ(reg); | 3930 | temp = I915_READ(reg); |
3931 | temp &= ~(TRANS_DP_OUTPUT_ENABLE | | 3931 | temp &= ~(TRANS_DP_OUTPUT_ENABLE | |
3932 | TRANS_DP_PORT_SEL_MASK); | 3932 | TRANS_DP_PORT_SEL_MASK); |
3933 | temp |= TRANS_DP_PORT_SEL_NONE; | 3933 | temp |= TRANS_DP_PORT_SEL_NONE; |
3934 | I915_WRITE(reg, temp); | 3934 | I915_WRITE(reg, temp); |
3935 | 3935 | ||
3936 | /* disable DPLL_SEL */ | 3936 | /* disable DPLL_SEL */ |
3937 | temp = I915_READ(PCH_DPLL_SEL); | 3937 | temp = I915_READ(PCH_DPLL_SEL); |
3938 | temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe)); | 3938 | temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe)); |
3939 | I915_WRITE(PCH_DPLL_SEL, temp); | 3939 | I915_WRITE(PCH_DPLL_SEL, temp); |
3940 | } | 3940 | } |
3941 | 3941 | ||
3942 | /* disable PCH DPLL */ | 3942 | /* disable PCH DPLL */ |
3943 | intel_disable_shared_dpll(intel_crtc); | 3943 | intel_disable_shared_dpll(intel_crtc); |
3944 | 3944 | ||
3945 | ironlake_fdi_pll_disable(intel_crtc); | 3945 | ironlake_fdi_pll_disable(intel_crtc); |
3946 | } | 3946 | } |
3947 | 3947 | ||
3948 | intel_crtc->active = false; | 3948 | intel_crtc->active = false; |
3949 | intel_update_watermarks(crtc); | 3949 | intel_update_watermarks(crtc); |
3950 | 3950 | ||
3951 | mutex_lock(&dev->struct_mutex); | 3951 | mutex_lock(&dev->struct_mutex); |
3952 | intel_update_fbc(dev); | 3952 | intel_update_fbc(dev); |
3953 | mutex_unlock(&dev->struct_mutex); | 3953 | mutex_unlock(&dev->struct_mutex); |
3954 | } | 3954 | } |
3955 | 3955 | ||
3956 | static void haswell_crtc_disable(struct drm_crtc *crtc) | 3956 | static void haswell_crtc_disable(struct drm_crtc *crtc) |
3957 | { | 3957 | { |
3958 | struct drm_device *dev = crtc->dev; | 3958 | struct drm_device *dev = crtc->dev; |
3959 | struct drm_i915_private *dev_priv = dev->dev_private; | 3959 | struct drm_i915_private *dev_priv = dev->dev_private; |
3960 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 3960 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3961 | struct intel_encoder *encoder; | 3961 | struct intel_encoder *encoder; |
3962 | int pipe = intel_crtc->pipe; | 3962 | int pipe = intel_crtc->pipe; |
3963 | enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; | 3963 | enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; |
3964 | 3964 | ||
3965 | if (!intel_crtc->active) | 3965 | if (!intel_crtc->active) |
3966 | return; | 3966 | return; |
3967 | 3967 | ||
3968 | haswell_crtc_disable_planes(crtc); | 3968 | haswell_crtc_disable_planes(crtc); |
3969 | 3969 | ||
3970 | for_each_encoder_on_crtc(dev, crtc, encoder) { | 3970 | for_each_encoder_on_crtc(dev, crtc, encoder) { |
3971 | intel_opregion_notify_encoder(encoder, false); | 3971 | intel_opregion_notify_encoder(encoder, false); |
3972 | encoder->disable(encoder); | 3972 | encoder->disable(encoder); |
3973 | } | 3973 | } |
3974 | 3974 | ||
3975 | if (intel_crtc->config.has_pch_encoder) | 3975 | if (intel_crtc->config.has_pch_encoder) |
3976 | intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false); | 3976 | intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false); |
3977 | intel_disable_pipe(dev_priv, pipe); | 3977 | intel_disable_pipe(dev_priv, pipe); |
3978 | 3978 | ||
3979 | intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); | 3979 | intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); |
3980 | 3980 | ||
3981 | ironlake_pfit_disable(intel_crtc); | 3981 | ironlake_pfit_disable(intel_crtc); |
3982 | 3982 | ||
3983 | intel_ddi_disable_pipe_clock(intel_crtc); | 3983 | intel_ddi_disable_pipe_clock(intel_crtc); |
3984 | 3984 | ||
3985 | for_each_encoder_on_crtc(dev, crtc, encoder) | 3985 | for_each_encoder_on_crtc(dev, crtc, encoder) |
3986 | if (encoder->post_disable) | 3986 | if (encoder->post_disable) |
3987 | encoder->post_disable(encoder); | 3987 | encoder->post_disable(encoder); |
3988 | 3988 | ||
3989 | if (intel_crtc->config.has_pch_encoder) { | 3989 | if (intel_crtc->config.has_pch_encoder) { |
3990 | lpt_disable_pch_transcoder(dev_priv); | 3990 | lpt_disable_pch_transcoder(dev_priv); |
3991 | intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true); | 3991 | intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true); |
3992 | intel_ddi_fdi_disable(crtc); | 3992 | intel_ddi_fdi_disable(crtc); |
3993 | } | 3993 | } |
3994 | 3994 | ||
3995 | intel_crtc->active = false; | 3995 | intel_crtc->active = false; |
3996 | intel_update_watermarks(crtc); | 3996 | intel_update_watermarks(crtc); |
3997 | 3997 | ||
3998 | mutex_lock(&dev->struct_mutex); | 3998 | mutex_lock(&dev->struct_mutex); |
3999 | intel_update_fbc(dev); | 3999 | intel_update_fbc(dev); |
4000 | mutex_unlock(&dev->struct_mutex); | 4000 | mutex_unlock(&dev->struct_mutex); |
4001 | } | 4001 | } |
4002 | 4002 | ||
4003 | static void ironlake_crtc_off(struct drm_crtc *crtc) | 4003 | static void ironlake_crtc_off(struct drm_crtc *crtc) |
4004 | { | 4004 | { |
4005 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 4005 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4006 | intel_put_shared_dpll(intel_crtc); | 4006 | intel_put_shared_dpll(intel_crtc); |
4007 | } | 4007 | } |
4008 | 4008 | ||
4009 | static void haswell_crtc_off(struct drm_crtc *crtc) | 4009 | static void haswell_crtc_off(struct drm_crtc *crtc) |
4010 | { | 4010 | { |
4011 | intel_ddi_put_crtc_pll(crtc); | 4011 | intel_ddi_put_crtc_pll(crtc); |
4012 | } | 4012 | } |
4013 | 4013 | ||
4014 | static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) | 4014 | static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) |
4015 | { | 4015 | { |
4016 | if (!enable && intel_crtc->overlay) { | 4016 | if (!enable && intel_crtc->overlay) { |
4017 | struct drm_device *dev = intel_crtc->base.dev; | 4017 | struct drm_device *dev = intel_crtc->base.dev; |
4018 | struct drm_i915_private *dev_priv = dev->dev_private; | 4018 | struct drm_i915_private *dev_priv = dev->dev_private; |
4019 | 4019 | ||
4020 | mutex_lock(&dev->struct_mutex); | 4020 | mutex_lock(&dev->struct_mutex); |
4021 | dev_priv->mm.interruptible = false; | 4021 | dev_priv->mm.interruptible = false; |
4022 | (void) intel_overlay_switch_off(intel_crtc->overlay); | 4022 | (void) intel_overlay_switch_off(intel_crtc->overlay); |
4023 | dev_priv->mm.interruptible = true; | 4023 | dev_priv->mm.interruptible = true; |
4024 | mutex_unlock(&dev->struct_mutex); | 4024 | mutex_unlock(&dev->struct_mutex); |
4025 | } | 4025 | } |
4026 | 4026 | ||
4027 | /* Let userspace switch the overlay on again. In most cases userspace | 4027 | /* Let userspace switch the overlay on again. In most cases userspace |
4028 | * has to recompute where to put it anyway. | 4028 | * has to recompute where to put it anyway. |
4029 | */ | 4029 | */ |
4030 | } | 4030 | } |
4031 | 4031 | ||
4032 | /** | 4032 | /** |
4033 | * i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware | 4033 | * i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware |
4034 | * cursor plane briefly if not already running after enabling the display | 4034 | * cursor plane briefly if not already running after enabling the display |
4035 | * plane. | 4035 | * plane. |
4036 | * This workaround avoids occasional blank screens when self refresh is | 4036 | * This workaround avoids occasional blank screens when self refresh is |
4037 | * enabled. | 4037 | * enabled. |
4038 | */ | 4038 | */ |
4039 | static void | 4039 | static void |
4040 | g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe) | 4040 | g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe) |
4041 | { | 4041 | { |
4042 | u32 cntl = I915_READ(CURCNTR(pipe)); | 4042 | u32 cntl = I915_READ(CURCNTR(pipe)); |
4043 | 4043 | ||
4044 | if ((cntl & CURSOR_MODE) == 0) { | 4044 | if ((cntl & CURSOR_MODE) == 0) { |
4045 | u32 fw_bcl_self = I915_READ(FW_BLC_SELF); | 4045 | u32 fw_bcl_self = I915_READ(FW_BLC_SELF); |
4046 | 4046 | ||
4047 | I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN); | 4047 | I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN); |
4048 | I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX); | 4048 | I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX); |
4049 | intel_wait_for_vblank(dev_priv->dev, pipe); | 4049 | intel_wait_for_vblank(dev_priv->dev, pipe); |
4050 | I915_WRITE(CURCNTR(pipe), cntl); | 4050 | I915_WRITE(CURCNTR(pipe), cntl); |
4051 | I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe))); | 4051 | I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe))); |
4052 | I915_WRITE(FW_BLC_SELF, fw_bcl_self); | 4052 | I915_WRITE(FW_BLC_SELF, fw_bcl_self); |
4053 | } | 4053 | } |
4054 | } | 4054 | } |
4055 | 4055 | ||
4056 | static void i9xx_pfit_enable(struct intel_crtc *crtc) | 4056 | static void i9xx_pfit_enable(struct intel_crtc *crtc) |
4057 | { | 4057 | { |
4058 | struct drm_device *dev = crtc->base.dev; | 4058 | struct drm_device *dev = crtc->base.dev; |
4059 | struct drm_i915_private *dev_priv = dev->dev_private; | 4059 | struct drm_i915_private *dev_priv = dev->dev_private; |
4060 | struct intel_crtc_config *pipe_config = &crtc->config; | 4060 | struct intel_crtc_config *pipe_config = &crtc->config; |
4061 | 4061 | ||
4062 | if (!crtc->config.gmch_pfit.control) | 4062 | if (!crtc->config.gmch_pfit.control) |
4063 | return; | 4063 | return; |
4064 | 4064 | ||
4065 | /* | 4065 | /* |
4066 | * The panel fitter should only be adjusted whilst the pipe is disabled, | 4066 | * The panel fitter should only be adjusted whilst the pipe is disabled, |
4067 | * according to register description and PRM. | 4067 | * according to register description and PRM. |
4068 | */ | 4068 | */ |
4069 | WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE); | 4069 | WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE); |
4070 | assert_pipe_disabled(dev_priv, crtc->pipe); | 4070 | assert_pipe_disabled(dev_priv, crtc->pipe); |
4071 | 4071 | ||
4072 | I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios); | 4072 | I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios); |
4073 | I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control); | 4073 | I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control); |
4074 | 4074 | ||
4075 | /* Border color in case we don't scale up to the full screen. Black by | 4075 | /* Border color in case we don't scale up to the full screen. Black by |
4076 | * default, change to something else for debugging. */ | 4076 | * default, change to something else for debugging. */ |
4077 | I915_WRITE(BCLRPAT(crtc->pipe), 0); | 4077 | I915_WRITE(BCLRPAT(crtc->pipe), 0); |
4078 | } | 4078 | } |
4079 | 4079 | ||
4080 | #define for_each_power_domain(domain, mask) \ | 4080 | #define for_each_power_domain(domain, mask) \ |
4081 | for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ | 4081 | for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ |
4082 | if ((1 << (domain)) & (mask)) | 4082 | if ((1 << (domain)) & (mask)) |
4083 | 4083 | ||
4084 | enum intel_display_power_domain | 4084 | enum intel_display_power_domain |
4085 | intel_display_port_power_domain(struct intel_encoder *intel_encoder) | 4085 | intel_display_port_power_domain(struct intel_encoder *intel_encoder) |
4086 | { | 4086 | { |
4087 | struct drm_device *dev = intel_encoder->base.dev; | 4087 | struct drm_device *dev = intel_encoder->base.dev; |
4088 | struct intel_digital_port *intel_dig_port; | 4088 | struct intel_digital_port *intel_dig_port; |
4089 | 4089 | ||
4090 | switch (intel_encoder->type) { | 4090 | switch (intel_encoder->type) { |
4091 | case INTEL_OUTPUT_UNKNOWN: | 4091 | case INTEL_OUTPUT_UNKNOWN: |
4092 | /* Only DDI platforms should ever use this output type */ | 4092 | /* Only DDI platforms should ever use this output type */ |
4093 | WARN_ON_ONCE(!HAS_DDI(dev)); | 4093 | WARN_ON_ONCE(!HAS_DDI(dev)); |
4094 | case INTEL_OUTPUT_DISPLAYPORT: | 4094 | case INTEL_OUTPUT_DISPLAYPORT: |
4095 | case INTEL_OUTPUT_HDMI: | 4095 | case INTEL_OUTPUT_HDMI: |
4096 | case INTEL_OUTPUT_EDP: | 4096 | case INTEL_OUTPUT_EDP: |
4097 | intel_dig_port = enc_to_dig_port(&intel_encoder->base); | 4097 | intel_dig_port = enc_to_dig_port(&intel_encoder->base); |
4098 | switch (intel_dig_port->port) { | 4098 | switch (intel_dig_port->port) { |
4099 | case PORT_A: | 4099 | case PORT_A: |
4100 | return POWER_DOMAIN_PORT_DDI_A_4_LANES; | 4100 | return POWER_DOMAIN_PORT_DDI_A_4_LANES; |
4101 | case PORT_B: | 4101 | case PORT_B: |
4102 | return POWER_DOMAIN_PORT_DDI_B_4_LANES; | 4102 | return POWER_DOMAIN_PORT_DDI_B_4_LANES; |
4103 | case PORT_C: | 4103 | case PORT_C: |
4104 | return POWER_DOMAIN_PORT_DDI_C_4_LANES; | 4104 | return POWER_DOMAIN_PORT_DDI_C_4_LANES; |
4105 | case PORT_D: | 4105 | case PORT_D: |
4106 | return POWER_DOMAIN_PORT_DDI_D_4_LANES; | 4106 | return POWER_DOMAIN_PORT_DDI_D_4_LANES; |
4107 | default: | 4107 | default: |
4108 | WARN_ON_ONCE(1); | 4108 | WARN_ON_ONCE(1); |
4109 | return POWER_DOMAIN_PORT_OTHER; | 4109 | return POWER_DOMAIN_PORT_OTHER; |
4110 | } | 4110 | } |
4111 | case INTEL_OUTPUT_ANALOG: | 4111 | case INTEL_OUTPUT_ANALOG: |
4112 | return POWER_DOMAIN_PORT_CRT; | 4112 | return POWER_DOMAIN_PORT_CRT; |
4113 | case INTEL_OUTPUT_DSI: | 4113 | case INTEL_OUTPUT_DSI: |
4114 | return POWER_DOMAIN_PORT_DSI; | 4114 | return POWER_DOMAIN_PORT_DSI; |
4115 | default: | 4115 | default: |
4116 | return POWER_DOMAIN_PORT_OTHER; | 4116 | return POWER_DOMAIN_PORT_OTHER; |
4117 | } | 4117 | } |
4118 | } | 4118 | } |
4119 | 4119 | ||
4120 | static unsigned long get_crtc_power_domains(struct drm_crtc *crtc) | 4120 | static unsigned long get_crtc_power_domains(struct drm_crtc *crtc) |
4121 | { | 4121 | { |
4122 | struct drm_device *dev = crtc->dev; | 4122 | struct drm_device *dev = crtc->dev; |
4123 | struct intel_encoder *intel_encoder; | 4123 | struct intel_encoder *intel_encoder; |
4124 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 4124 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4125 | enum pipe pipe = intel_crtc->pipe; | 4125 | enum pipe pipe = intel_crtc->pipe; |
4126 | bool pfit_enabled = intel_crtc->config.pch_pfit.enabled; | 4126 | bool pfit_enabled = intel_crtc->config.pch_pfit.enabled; |
4127 | unsigned long mask; | 4127 | unsigned long mask; |
4128 | enum transcoder transcoder; | 4128 | enum transcoder transcoder; |
4129 | 4129 | ||
4130 | transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe); | 4130 | transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe); |
4131 | 4131 | ||
4132 | mask = BIT(POWER_DOMAIN_PIPE(pipe)); | 4132 | mask = BIT(POWER_DOMAIN_PIPE(pipe)); |
4133 | mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder)); | 4133 | mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder)); |
4134 | if (pfit_enabled) | 4134 | if (pfit_enabled) |
4135 | mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); | 4135 | mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); |
4136 | 4136 | ||
4137 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) | 4137 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) |
4138 | mask |= BIT(intel_display_port_power_domain(intel_encoder)); | 4138 | mask |= BIT(intel_display_port_power_domain(intel_encoder)); |
4139 | 4139 | ||
4140 | return mask; | 4140 | return mask; |
4141 | } | 4141 | } |
4142 | 4142 | ||
4143 | void intel_display_set_init_power(struct drm_i915_private *dev_priv, | 4143 | void intel_display_set_init_power(struct drm_i915_private *dev_priv, |
4144 | bool enable) | 4144 | bool enable) |
4145 | { | 4145 | { |
4146 | if (dev_priv->power_domains.init_power_on == enable) | 4146 | if (dev_priv->power_domains.init_power_on == enable) |
4147 | return; | 4147 | return; |
4148 | 4148 | ||
4149 | if (enable) | 4149 | if (enable) |
4150 | intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); | 4150 | intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); |
4151 | else | 4151 | else |
4152 | intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); | 4152 | intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); |
4153 | 4153 | ||
4154 | dev_priv->power_domains.init_power_on = enable; | 4154 | dev_priv->power_domains.init_power_on = enable; |
4155 | } | 4155 | } |
4156 | 4156 | ||
4157 | static void modeset_update_crtc_power_domains(struct drm_device *dev) | 4157 | static void modeset_update_crtc_power_domains(struct drm_device *dev) |
4158 | { | 4158 | { |
4159 | struct drm_i915_private *dev_priv = dev->dev_private; | 4159 | struct drm_i915_private *dev_priv = dev->dev_private; |
4160 | unsigned long pipe_domains[I915_MAX_PIPES] = { 0, }; | 4160 | unsigned long pipe_domains[I915_MAX_PIPES] = { 0, }; |
4161 | struct intel_crtc *crtc; | 4161 | struct intel_crtc *crtc; |
4162 | 4162 | ||
4163 | /* | 4163 | /* |
4164 | * First get all needed power domains, then put all unneeded, to avoid | 4164 | * First get all needed power domains, then put all unneeded, to avoid |
4165 | * any unnecessary toggling of the power wells. | 4165 | * any unnecessary toggling of the power wells. |
4166 | */ | 4166 | */ |
4167 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { | 4167 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { |
4168 | enum intel_display_power_domain domain; | 4168 | enum intel_display_power_domain domain; |
4169 | 4169 | ||
4170 | if (!crtc->base.enabled) | 4170 | if (!crtc->base.enabled) |
4171 | continue; | 4171 | continue; |
4172 | 4172 | ||
4173 | pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base); | 4173 | pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base); |
4174 | 4174 | ||
4175 | for_each_power_domain(domain, pipe_domains[crtc->pipe]) | 4175 | for_each_power_domain(domain, pipe_domains[crtc->pipe]) |
4176 | intel_display_power_get(dev_priv, domain); | 4176 | intel_display_power_get(dev_priv, domain); |
4177 | } | 4177 | } |
4178 | 4178 | ||
4179 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { | 4179 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { |
4180 | enum intel_display_power_domain domain; | 4180 | enum intel_display_power_domain domain; |
4181 | 4181 | ||
4182 | for_each_power_domain(domain, crtc->enabled_power_domains) | 4182 | for_each_power_domain(domain, crtc->enabled_power_domains) |
4183 | intel_display_power_put(dev_priv, domain); | 4183 | intel_display_power_put(dev_priv, domain); |
4184 | 4184 | ||
4185 | crtc->enabled_power_domains = pipe_domains[crtc->pipe]; | 4185 | crtc->enabled_power_domains = pipe_domains[crtc->pipe]; |
4186 | } | 4186 | } |
4187 | 4187 | ||
4188 | intel_display_set_init_power(dev_priv, false); | 4188 | intel_display_set_init_power(dev_priv, false); |
4189 | } | 4189 | } |
4190 | 4190 | ||
4191 | int valleyview_get_vco(struct drm_i915_private *dev_priv) | 4191 | int valleyview_get_vco(struct drm_i915_private *dev_priv) |
4192 | { | 4192 | { |
4193 | int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; | 4193 | int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; |
4194 | 4194 | ||
4195 | /* Obtain SKU information */ | 4195 | /* Obtain SKU information */ |
4196 | mutex_lock(&dev_priv->dpio_lock); | 4196 | mutex_lock(&dev_priv->dpio_lock); |
4197 | hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) & | 4197 | hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) & |
4198 | CCK_FUSE_HPLL_FREQ_MASK; | 4198 | CCK_FUSE_HPLL_FREQ_MASK; |
4199 | mutex_unlock(&dev_priv->dpio_lock); | 4199 | mutex_unlock(&dev_priv->dpio_lock); |
4200 | 4200 | ||
4201 | return vco_freq[hpll_freq]; | 4201 | return vco_freq[hpll_freq]; |
4202 | } | 4202 | } |
4203 | 4203 | ||
4204 | /* Adjust CDclk dividers to allow high res or save power if possible */ | 4204 | /* Adjust CDclk dividers to allow high res or save power if possible */ |
4205 | static void valleyview_set_cdclk(struct drm_device *dev, int cdclk) | 4205 | static void valleyview_set_cdclk(struct drm_device *dev, int cdclk) |
4206 | { | 4206 | { |
4207 | struct drm_i915_private *dev_priv = dev->dev_private; | 4207 | struct drm_i915_private *dev_priv = dev->dev_private; |
4208 | u32 val, cmd; | 4208 | u32 val, cmd; |
4209 | 4209 | ||
4210 | if (cdclk >= 320) /* jump to highest voltage for 400MHz too */ | 4210 | if (cdclk >= 320) /* jump to highest voltage for 400MHz too */ |
4211 | cmd = 2; | 4211 | cmd = 2; |
4212 | else if (cdclk == 266) | 4212 | else if (cdclk == 266) |
4213 | cmd = 1; | 4213 | cmd = 1; |
4214 | else | 4214 | else |
4215 | cmd = 0; | 4215 | cmd = 0; |
4216 | 4216 | ||
4217 | mutex_lock(&dev_priv->rps.hw_lock); | 4217 | mutex_lock(&dev_priv->rps.hw_lock); |
4218 | val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); | 4218 | val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); |
4219 | val &= ~DSPFREQGUAR_MASK; | 4219 | val &= ~DSPFREQGUAR_MASK; |
4220 | val |= (cmd << DSPFREQGUAR_SHIFT); | 4220 | val |= (cmd << DSPFREQGUAR_SHIFT); |
4221 | vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); | 4221 | vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); |
4222 | if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & | 4222 | if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & |
4223 | DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT), | 4223 | DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT), |
4224 | 50)) { | 4224 | 50)) { |
4225 | DRM_ERROR("timed out waiting for CDclk change\n"); | 4225 | DRM_ERROR("timed out waiting for CDclk change\n"); |
4226 | } | 4226 | } |
4227 | mutex_unlock(&dev_priv->rps.hw_lock); | 4227 | mutex_unlock(&dev_priv->rps.hw_lock); |
4228 | 4228 | ||
4229 | if (cdclk == 400) { | 4229 | if (cdclk == 400) { |
4230 | u32 divider, vco; | 4230 | u32 divider, vco; |
4231 | 4231 | ||
4232 | vco = valleyview_get_vco(dev_priv); | 4232 | vco = valleyview_get_vco(dev_priv); |
4233 | divider = ((vco << 1) / cdclk) - 1; | 4233 | divider = ((vco << 1) / cdclk) - 1; |
4234 | 4234 | ||
4235 | mutex_lock(&dev_priv->dpio_lock); | 4235 | mutex_lock(&dev_priv->dpio_lock); |
4236 | /* adjust cdclk divider */ | 4236 | /* adjust cdclk divider */ |
4237 | val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL); | 4237 | val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL); |
4238 | val &= ~0xf; | 4238 | val &= ~0xf; |
4239 | val |= divider; | 4239 | val |= divider; |
4240 | vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val); | 4240 | vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val); |
4241 | mutex_unlock(&dev_priv->dpio_lock); | 4241 | mutex_unlock(&dev_priv->dpio_lock); |
4242 | } | 4242 | } |
4243 | 4243 | ||
4244 | mutex_lock(&dev_priv->dpio_lock); | 4244 | mutex_lock(&dev_priv->dpio_lock); |
4245 | /* adjust self-refresh exit latency value */ | 4245 | /* adjust self-refresh exit latency value */ |
4246 | val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC); | 4246 | val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC); |
4247 | val &= ~0x7f; | 4247 | val &= ~0x7f; |
4248 | 4248 | ||
4249 | /* | 4249 | /* |
4250 | * For high bandwidth configs, we set a higher latency in the bunit | 4250 | * For high bandwidth configs, we set a higher latency in the bunit |
4251 | * so that the core display fetch happens in time to avoid underruns. | 4251 | * so that the core display fetch happens in time to avoid underruns. |
4252 | */ | 4252 | */ |
4253 | if (cdclk == 400) | 4253 | if (cdclk == 400) |
4254 | val |= 4500 / 250; /* 4.5 usec */ | 4254 | val |= 4500 / 250; /* 4.5 usec */ |
4255 | else | 4255 | else |
4256 | val |= 3000 / 250; /* 3.0 usec */ | 4256 | val |= 3000 / 250; /* 3.0 usec */ |
4257 | vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val); | 4257 | vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val); |
4258 | mutex_unlock(&dev_priv->dpio_lock); | 4258 | mutex_unlock(&dev_priv->dpio_lock); |
4259 | 4259 | ||
4260 | /* Since we changed the CDclk, we need to update the GMBUSFREQ too */ | 4260 | /* Since we changed the CDclk, we need to update the GMBUSFREQ too */ |
4261 | intel_i2c_reset(dev); | 4261 | intel_i2c_reset(dev); |
4262 | } | 4262 | } |
4263 | 4263 | ||
4264 | static int valleyview_cur_cdclk(struct drm_i915_private *dev_priv) | 4264 | static int valleyview_cur_cdclk(struct drm_i915_private *dev_priv) |
4265 | { | 4265 | { |
4266 | int cur_cdclk, vco; | 4266 | int cur_cdclk, vco; |
4267 | int divider; | 4267 | int divider; |
4268 | 4268 | ||
4269 | vco = valleyview_get_vco(dev_priv); | 4269 | vco = valleyview_get_vco(dev_priv); |
4270 | 4270 | ||
4271 | mutex_lock(&dev_priv->dpio_lock); | 4271 | mutex_lock(&dev_priv->dpio_lock); |
4272 | divider = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL); | 4272 | divider = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL); |
4273 | mutex_unlock(&dev_priv->dpio_lock); | 4273 | mutex_unlock(&dev_priv->dpio_lock); |
4274 | 4274 | ||
4275 | divider &= 0xf; | 4275 | divider &= 0xf; |
4276 | 4276 | ||
4277 | cur_cdclk = (vco << 1) / (divider + 1); | 4277 | cur_cdclk = (vco << 1) / (divider + 1); |
4278 | 4278 | ||
4279 | return cur_cdclk; | 4279 | return cur_cdclk; |
4280 | } | 4280 | } |
4281 | 4281 | ||
4282 | static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv, | 4282 | static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv, |
4283 | int max_pixclk) | 4283 | int max_pixclk) |
4284 | { | 4284 | { |
4285 | int cur_cdclk; | 4285 | int cur_cdclk; |
4286 | 4286 | ||
4287 | cur_cdclk = valleyview_cur_cdclk(dev_priv); | 4287 | cur_cdclk = valleyview_cur_cdclk(dev_priv); |
4288 | 4288 | ||
4289 | /* | 4289 | /* |
4290 | * Really only a few cases to deal with, as only 4 CDclks are supported: | 4290 | * Really only a few cases to deal with, as only 4 CDclks are supported: |
4291 | * 200MHz | 4291 | * 200MHz |
4292 | * 267MHz | 4292 | * 267MHz |
4293 | * 320MHz | 4293 | * 320MHz |
4294 | * 400MHz | 4294 | * 400MHz |
4295 | * So we check to see whether we're above 90% of the lower bin and | 4295 | * So we check to see whether we're above 90% of the lower bin and |
4296 | * adjust if needed. | 4296 | * adjust if needed. |
4297 | */ | 4297 | */ |
4298 | if (max_pixclk > 288000) { | 4298 | if (max_pixclk > 288000) { |
4299 | return 400; | 4299 | return 400; |
4300 | } else if (max_pixclk > 240000) { | 4300 | } else if (max_pixclk > 240000) { |
4301 | return 320; | 4301 | return 320; |
4302 | } else | 4302 | } else |
4303 | return 266; | 4303 | return 266; |
4304 | /* Looks like the 200MHz CDclk freq doesn't work on some configs */ | 4304 | /* Looks like the 200MHz CDclk freq doesn't work on some configs */ |
4305 | } | 4305 | } |
4306 | 4306 | ||
4307 | /* compute the max pixel clock for new configuration */ | 4307 | /* compute the max pixel clock for new configuration */ |
4308 | static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv) | 4308 | static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv) |
4309 | { | 4309 | { |
4310 | struct drm_device *dev = dev_priv->dev; | 4310 | struct drm_device *dev = dev_priv->dev; |
4311 | struct intel_crtc *intel_crtc; | 4311 | struct intel_crtc *intel_crtc; |
4312 | int max_pixclk = 0; | 4312 | int max_pixclk = 0; |
4313 | 4313 | ||
4314 | list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, | 4314 | list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, |
4315 | base.head) { | 4315 | base.head) { |
4316 | if (intel_crtc->new_enabled) | 4316 | if (intel_crtc->new_enabled) |
4317 | max_pixclk = max(max_pixclk, | 4317 | max_pixclk = max(max_pixclk, |
4318 | intel_crtc->new_config->adjusted_mode.crtc_clock); | 4318 | intel_crtc->new_config->adjusted_mode.crtc_clock); |
4319 | } | 4319 | } |
4320 | 4320 | ||
4321 | return max_pixclk; | 4321 | return max_pixclk; |
4322 | } | 4322 | } |
4323 | 4323 | ||
4324 | static void valleyview_modeset_global_pipes(struct drm_device *dev, | 4324 | static void valleyview_modeset_global_pipes(struct drm_device *dev, |
4325 | unsigned *prepare_pipes) | 4325 | unsigned *prepare_pipes) |
4326 | { | 4326 | { |
4327 | struct drm_i915_private *dev_priv = dev->dev_private; | 4327 | struct drm_i915_private *dev_priv = dev->dev_private; |
4328 | struct intel_crtc *intel_crtc; | 4328 | struct intel_crtc *intel_crtc; |
4329 | int max_pixclk = intel_mode_max_pixclk(dev_priv); | 4329 | int max_pixclk = intel_mode_max_pixclk(dev_priv); |
4330 | int cur_cdclk = valleyview_cur_cdclk(dev_priv); | 4330 | int cur_cdclk = valleyview_cur_cdclk(dev_priv); |
4331 | 4331 | ||
4332 | if (valleyview_calc_cdclk(dev_priv, max_pixclk) == cur_cdclk) | 4332 | if (valleyview_calc_cdclk(dev_priv, max_pixclk) == cur_cdclk) |
4333 | return; | 4333 | return; |
4334 | 4334 | ||
4335 | /* disable/enable all currently active pipes while we change cdclk */ | 4335 | /* disable/enable all currently active pipes while we change cdclk */ |
4336 | list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, | 4336 | list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, |
4337 | base.head) | 4337 | base.head) |
4338 | if (intel_crtc->base.enabled) | 4338 | if (intel_crtc->base.enabled) |
4339 | *prepare_pipes |= (1 << intel_crtc->pipe); | 4339 | *prepare_pipes |= (1 << intel_crtc->pipe); |
4340 | } | 4340 | } |
4341 | 4341 | ||
4342 | static void valleyview_modeset_global_resources(struct drm_device *dev) | 4342 | static void valleyview_modeset_global_resources(struct drm_device *dev) |
4343 | { | 4343 | { |
4344 | struct drm_i915_private *dev_priv = dev->dev_private; | 4344 | struct drm_i915_private *dev_priv = dev->dev_private; |
4345 | int max_pixclk = intel_mode_max_pixclk(dev_priv); | 4345 | int max_pixclk = intel_mode_max_pixclk(dev_priv); |
4346 | int cur_cdclk = valleyview_cur_cdclk(dev_priv); | 4346 | int cur_cdclk = valleyview_cur_cdclk(dev_priv); |
4347 | int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk); | 4347 | int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk); |
4348 | 4348 | ||
4349 | if (req_cdclk != cur_cdclk) | 4349 | if (req_cdclk != cur_cdclk) |
4350 | valleyview_set_cdclk(dev, req_cdclk); | 4350 | valleyview_set_cdclk(dev, req_cdclk); |
4351 | modeset_update_crtc_power_domains(dev); | 4351 | modeset_update_crtc_power_domains(dev); |
4352 | } | 4352 | } |
4353 | 4353 | ||
4354 | static void valleyview_crtc_enable(struct drm_crtc *crtc) | 4354 | static void valleyview_crtc_enable(struct drm_crtc *crtc) |
4355 | { | 4355 | { |
4356 | struct drm_device *dev = crtc->dev; | 4356 | struct drm_device *dev = crtc->dev; |
4357 | struct drm_i915_private *dev_priv = dev->dev_private; | 4357 | struct drm_i915_private *dev_priv = dev->dev_private; |
4358 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 4358 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4359 | struct intel_encoder *encoder; | 4359 | struct intel_encoder *encoder; |
4360 | int pipe = intel_crtc->pipe; | 4360 | int pipe = intel_crtc->pipe; |
4361 | int plane = intel_crtc->plane; | 4361 | int plane = intel_crtc->plane; |
4362 | bool is_dsi; | 4362 | bool is_dsi; |
4363 | 4363 | ||
4364 | WARN_ON(!crtc->enabled); | 4364 | WARN_ON(!crtc->enabled); |
4365 | 4365 | ||
4366 | if (intel_crtc->active) | 4366 | if (intel_crtc->active) |
4367 | return; | 4367 | return; |
4368 | 4368 | ||
4369 | intel_crtc->active = true; | 4369 | intel_crtc->active = true; |
4370 | 4370 | ||
4371 | for_each_encoder_on_crtc(dev, crtc, encoder) | 4371 | for_each_encoder_on_crtc(dev, crtc, encoder) |
4372 | if (encoder->pre_pll_enable) | 4372 | if (encoder->pre_pll_enable) |
4373 | encoder->pre_pll_enable(encoder); | 4373 | encoder->pre_pll_enable(encoder); |
4374 | 4374 | ||
4375 | is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI); | 4375 | is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI); |
4376 | 4376 | ||
4377 | if (!is_dsi) | 4377 | if (!is_dsi) |
4378 | vlv_enable_pll(intel_crtc); | 4378 | vlv_enable_pll(intel_crtc); |
4379 | 4379 | ||
4380 | for_each_encoder_on_crtc(dev, crtc, encoder) | 4380 | for_each_encoder_on_crtc(dev, crtc, encoder) |
4381 | if (encoder->pre_enable) | 4381 | if (encoder->pre_enable) |
4382 | encoder->pre_enable(encoder); | 4382 | encoder->pre_enable(encoder); |
4383 | 4383 | ||
4384 | i9xx_pfit_enable(intel_crtc); | 4384 | i9xx_pfit_enable(intel_crtc); |
4385 | 4385 | ||
4386 | intel_crtc_load_lut(crtc); | 4386 | intel_crtc_load_lut(crtc); |
4387 | 4387 | ||
4388 | intel_update_watermarks(crtc); | 4388 | intel_update_watermarks(crtc); |
4389 | intel_enable_pipe(intel_crtc); | 4389 | intel_enable_pipe(intel_crtc); |
4390 | intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); | 4390 | intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); |
4391 | intel_enable_primary_hw_plane(dev_priv, plane, pipe); | 4391 | intel_enable_primary_hw_plane(dev_priv, plane, pipe); |
4392 | intel_enable_planes(crtc); | 4392 | intel_enable_planes(crtc); |
4393 | intel_crtc_update_cursor(crtc, true); | 4393 | intel_crtc_update_cursor(crtc, true); |
4394 | 4394 | ||
4395 | intel_update_fbc(dev); | 4395 | intel_update_fbc(dev); |
4396 | 4396 | ||
4397 | for_each_encoder_on_crtc(dev, crtc, encoder) | 4397 | for_each_encoder_on_crtc(dev, crtc, encoder) |
4398 | encoder->enable(encoder); | 4398 | encoder->enable(encoder); |
4399 | } | 4399 | } |
4400 | 4400 | ||
4401 | static void i9xx_crtc_enable(struct drm_crtc *crtc) | 4401 | static void i9xx_crtc_enable(struct drm_crtc *crtc) |
4402 | { | 4402 | { |
4403 | struct drm_device *dev = crtc->dev; | 4403 | struct drm_device *dev = crtc->dev; |
4404 | struct drm_i915_private *dev_priv = dev->dev_private; | 4404 | struct drm_i915_private *dev_priv = dev->dev_private; |
4405 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 4405 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4406 | struct intel_encoder *encoder; | 4406 | struct intel_encoder *encoder; |
4407 | int pipe = intel_crtc->pipe; | 4407 | int pipe = intel_crtc->pipe; |
4408 | int plane = intel_crtc->plane; | 4408 | int plane = intel_crtc->plane; |
4409 | 4409 | ||
4410 | WARN_ON(!crtc->enabled); | 4410 | WARN_ON(!crtc->enabled); |
4411 | 4411 | ||
4412 | if (intel_crtc->active) | 4412 | if (intel_crtc->active) |
4413 | return; | 4413 | return; |
4414 | 4414 | ||
4415 | intel_crtc->active = true; | 4415 | intel_crtc->active = true; |
4416 | 4416 | ||
4417 | for_each_encoder_on_crtc(dev, crtc, encoder) | 4417 | for_each_encoder_on_crtc(dev, crtc, encoder) |
4418 | if (encoder->pre_enable) | 4418 | if (encoder->pre_enable) |
4419 | encoder->pre_enable(encoder); | 4419 | encoder->pre_enable(encoder); |
4420 | 4420 | ||
4421 | i9xx_enable_pll(intel_crtc); | 4421 | i9xx_enable_pll(intel_crtc); |
4422 | 4422 | ||
4423 | i9xx_pfit_enable(intel_crtc); | 4423 | i9xx_pfit_enable(intel_crtc); |
4424 | 4424 | ||
4425 | intel_crtc_load_lut(crtc); | 4425 | intel_crtc_load_lut(crtc); |
4426 | 4426 | ||
4427 | intel_update_watermarks(crtc); | 4427 | intel_update_watermarks(crtc); |
4428 | intel_enable_pipe(intel_crtc); | 4428 | intel_enable_pipe(intel_crtc); |
4429 | intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); | 4429 | intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); |
4430 | intel_enable_primary_hw_plane(dev_priv, plane, pipe); | 4430 | intel_enable_primary_hw_plane(dev_priv, plane, pipe); |
4431 | intel_enable_planes(crtc); | 4431 | intel_enable_planes(crtc); |
4432 | /* The fixup needs to happen before cursor is enabled */ | 4432 | /* The fixup needs to happen before cursor is enabled */ |
4433 | if (IS_G4X(dev)) | 4433 | if (IS_G4X(dev)) |
4434 | g4x_fixup_plane(dev_priv, pipe); | 4434 | g4x_fixup_plane(dev_priv, pipe); |
4435 | intel_crtc_update_cursor(crtc, true); | 4435 | intel_crtc_update_cursor(crtc, true); |
4436 | 4436 | ||
4437 | /* Give the overlay scaler a chance to enable if it's on this pipe */ | 4437 | /* Give the overlay scaler a chance to enable if it's on this pipe */ |
4438 | intel_crtc_dpms_overlay(intel_crtc, true); | 4438 | intel_crtc_dpms_overlay(intel_crtc, true); |
4439 | 4439 | ||
4440 | intel_update_fbc(dev); | 4440 | intel_update_fbc(dev); |
4441 | 4441 | ||
4442 | for_each_encoder_on_crtc(dev, crtc, encoder) | 4442 | for_each_encoder_on_crtc(dev, crtc, encoder) |
4443 | encoder->enable(encoder); | 4443 | encoder->enable(encoder); |
4444 | } | 4444 | } |
4445 | 4445 | ||
4446 | static void i9xx_pfit_disable(struct intel_crtc *crtc) | 4446 | static void i9xx_pfit_disable(struct intel_crtc *crtc) |
4447 | { | 4447 | { |
4448 | struct drm_device *dev = crtc->base.dev; | 4448 | struct drm_device *dev = crtc->base.dev; |
4449 | struct drm_i915_private *dev_priv = dev->dev_private; | 4449 | struct drm_i915_private *dev_priv = dev->dev_private; |
4450 | 4450 | ||
4451 | if (!crtc->config.gmch_pfit.control) | 4451 | if (!crtc->config.gmch_pfit.control) |
4452 | return; | 4452 | return; |
4453 | 4453 | ||
4454 | assert_pipe_disabled(dev_priv, crtc->pipe); | 4454 | assert_pipe_disabled(dev_priv, crtc->pipe); |
4455 | 4455 | ||
4456 | DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n", | 4456 | DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n", |
4457 | I915_READ(PFIT_CONTROL)); | 4457 | I915_READ(PFIT_CONTROL)); |
4458 | I915_WRITE(PFIT_CONTROL, 0); | 4458 | I915_WRITE(PFIT_CONTROL, 0); |
4459 | } | 4459 | } |
4460 | 4460 | ||
4461 | static void i9xx_crtc_disable(struct drm_crtc *crtc) | 4461 | static void i9xx_crtc_disable(struct drm_crtc *crtc) |
4462 | { | 4462 | { |
4463 | struct drm_device *dev = crtc->dev; | 4463 | struct drm_device *dev = crtc->dev; |
4464 | struct drm_i915_private *dev_priv = dev->dev_private; | 4464 | struct drm_i915_private *dev_priv = dev->dev_private; |
4465 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 4465 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4466 | struct intel_encoder *encoder; | 4466 | struct intel_encoder *encoder; |
4467 | int pipe = intel_crtc->pipe; | 4467 | int pipe = intel_crtc->pipe; |
4468 | int plane = intel_crtc->plane; | 4468 | int plane = intel_crtc->plane; |
4469 | 4469 | ||
4470 | if (!intel_crtc->active) | 4470 | if (!intel_crtc->active) |
4471 | return; | 4471 | return; |
4472 | 4472 | ||
4473 | for_each_encoder_on_crtc(dev, crtc, encoder) | 4473 | for_each_encoder_on_crtc(dev, crtc, encoder) |
4474 | encoder->disable(encoder); | 4474 | encoder->disable(encoder); |
4475 | 4475 | ||
4476 | /* Give the overlay scaler a chance to disable if it's on this pipe */ | 4476 | /* Give the overlay scaler a chance to disable if it's on this pipe */ |
4477 | intel_crtc_wait_for_pending_flips(crtc); | 4477 | intel_crtc_wait_for_pending_flips(crtc); |
4478 | drm_vblank_off(dev, pipe); | 4478 | drm_vblank_off(dev, pipe); |
4479 | 4479 | ||
4480 | if (dev_priv->fbc.plane == plane) | 4480 | if (dev_priv->fbc.plane == plane) |
4481 | intel_disable_fbc(dev); | 4481 | intel_disable_fbc(dev); |
4482 | 4482 | ||
4483 | intel_crtc_dpms_overlay(intel_crtc, false); | 4483 | intel_crtc_dpms_overlay(intel_crtc, false); |
4484 | intel_crtc_update_cursor(crtc, false); | 4484 | intel_crtc_update_cursor(crtc, false); |
4485 | intel_disable_planes(crtc); | 4485 | intel_disable_planes(crtc); |
4486 | intel_disable_primary_hw_plane(dev_priv, plane, pipe); | 4486 | intel_disable_primary_hw_plane(dev_priv, plane, pipe); |
4487 | 4487 | ||
4488 | intel_set_cpu_fifo_underrun_reporting(dev, pipe, false); | 4488 | intel_set_cpu_fifo_underrun_reporting(dev, pipe, false); |
4489 | intel_disable_pipe(dev_priv, pipe); | 4489 | intel_disable_pipe(dev_priv, pipe); |
4490 | 4490 | ||
4491 | i9xx_pfit_disable(intel_crtc); | 4491 | i9xx_pfit_disable(intel_crtc); |
4492 | 4492 | ||
4493 | for_each_encoder_on_crtc(dev, crtc, encoder) | 4493 | for_each_encoder_on_crtc(dev, crtc, encoder) |
4494 | if (encoder->post_disable) | 4494 | if (encoder->post_disable) |
4495 | encoder->post_disable(encoder); | 4495 | encoder->post_disable(encoder); |
4496 | 4496 | ||
4497 | if (IS_VALLEYVIEW(dev) && !intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) | 4497 | if (IS_VALLEYVIEW(dev) && !intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) |
4498 | vlv_disable_pll(dev_priv, pipe); | 4498 | vlv_disable_pll(dev_priv, pipe); |
4499 | else if (!IS_VALLEYVIEW(dev)) | 4499 | else if (!IS_VALLEYVIEW(dev)) |
4500 | i9xx_disable_pll(dev_priv, pipe); | 4500 | i9xx_disable_pll(dev_priv, pipe); |
4501 | 4501 | ||
4502 | intel_crtc->active = false; | 4502 | intel_crtc->active = false; |
4503 | intel_update_watermarks(crtc); | 4503 | intel_update_watermarks(crtc); |
4504 | 4504 | ||
4505 | intel_update_fbc(dev); | 4505 | intel_update_fbc(dev); |
4506 | } | 4506 | } |
4507 | 4507 | ||
4508 | static void i9xx_crtc_off(struct drm_crtc *crtc) | 4508 | static void i9xx_crtc_off(struct drm_crtc *crtc) |
4509 | { | 4509 | { |
4510 | } | 4510 | } |
4511 | 4511 | ||
4512 | static void intel_crtc_update_sarea(struct drm_crtc *crtc, | 4512 | static void intel_crtc_update_sarea(struct drm_crtc *crtc, |
4513 | bool enabled) | 4513 | bool enabled) |
4514 | { | 4514 | { |
4515 | struct drm_device *dev = crtc->dev; | 4515 | struct drm_device *dev = crtc->dev; |
4516 | struct drm_i915_master_private *master_priv; | 4516 | struct drm_i915_master_private *master_priv; |
4517 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 4517 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4518 | int pipe = intel_crtc->pipe; | 4518 | int pipe = intel_crtc->pipe; |
4519 | 4519 | ||
4520 | if (!dev->primary->master) | 4520 | if (!dev->primary->master) |
4521 | return; | 4521 | return; |
4522 | 4522 | ||
4523 | master_priv = dev->primary->master->driver_priv; | 4523 | master_priv = dev->primary->master->driver_priv; |
4524 | if (!master_priv->sarea_priv) | 4524 | if (!master_priv->sarea_priv) |
4525 | return; | 4525 | return; |
4526 | 4526 | ||
4527 | switch (pipe) { | 4527 | switch (pipe) { |
4528 | case 0: | 4528 | case 0: |
4529 | master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0; | 4529 | master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0; |
4530 | master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0; | 4530 | master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0; |
4531 | break; | 4531 | break; |
4532 | case 1: | 4532 | case 1: |
4533 | master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0; | 4533 | master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0; |
4534 | master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0; | 4534 | master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0; |
4535 | break; | 4535 | break; |
4536 | default: | 4536 | default: |
4537 | DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe)); | 4537 | DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe)); |
4538 | break; | 4538 | break; |
4539 | } | 4539 | } |
4540 | } | 4540 | } |
4541 | 4541 | ||
4542 | /** | 4542 | /** |
4543 | * Sets the power management mode of the pipe and plane. | 4543 | * Sets the power management mode of the pipe and plane. |
4544 | */ | 4544 | */ |
4545 | void intel_crtc_update_dpms(struct drm_crtc *crtc) | 4545 | void intel_crtc_update_dpms(struct drm_crtc *crtc) |
4546 | { | 4546 | { |
4547 | struct drm_device *dev = crtc->dev; | 4547 | struct drm_device *dev = crtc->dev; |
4548 | struct drm_i915_private *dev_priv = dev->dev_private; | 4548 | struct drm_i915_private *dev_priv = dev->dev_private; |
4549 | struct intel_encoder *intel_encoder; | 4549 | struct intel_encoder *intel_encoder; |
4550 | bool enable = false; | 4550 | bool enable = false; |
4551 | 4551 | ||
4552 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) | 4552 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) |
4553 | enable |= intel_encoder->connectors_active; | 4553 | enable |= intel_encoder->connectors_active; |
4554 | 4554 | ||
4555 | if (enable) | 4555 | if (enable) |
4556 | dev_priv->display.crtc_enable(crtc); | 4556 | dev_priv->display.crtc_enable(crtc); |
4557 | else | 4557 | else |
4558 | dev_priv->display.crtc_disable(crtc); | 4558 | dev_priv->display.crtc_disable(crtc); |
4559 | 4559 | ||
4560 | intel_crtc_update_sarea(crtc, enable); | 4560 | intel_crtc_update_sarea(crtc, enable); |
4561 | } | 4561 | } |
4562 | 4562 | ||
4563 | static void intel_crtc_disable(struct drm_crtc *crtc) | 4563 | static void intel_crtc_disable(struct drm_crtc *crtc) |
4564 | { | 4564 | { |
4565 | struct drm_device *dev = crtc->dev; | 4565 | struct drm_device *dev = crtc->dev; |
4566 | struct drm_connector *connector; | 4566 | struct drm_connector *connector; |
4567 | struct drm_i915_private *dev_priv = dev->dev_private; | 4567 | struct drm_i915_private *dev_priv = dev->dev_private; |
4568 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 4568 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4569 | 4569 | ||
4570 | /* crtc should still be enabled when we disable it. */ | 4570 | /* crtc should still be enabled when we disable it. */ |
4571 | WARN_ON(!crtc->enabled); | 4571 | WARN_ON(!crtc->enabled); |
4572 | 4572 | ||
4573 | dev_priv->display.crtc_disable(crtc); | 4573 | dev_priv->display.crtc_disable(crtc); |
4574 | intel_crtc->eld_vld = false; | 4574 | intel_crtc->eld_vld = false; |
4575 | intel_crtc_update_sarea(crtc, false); | 4575 | intel_crtc_update_sarea(crtc, false); |
4576 | dev_priv->display.off(crtc); | 4576 | dev_priv->display.off(crtc); |
4577 | 4577 | ||
4578 | assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane); | 4578 | assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane); |
4579 | assert_cursor_disabled(dev_priv, to_intel_crtc(crtc)->pipe); | 4579 | assert_cursor_disabled(dev_priv, to_intel_crtc(crtc)->pipe); |
4580 | assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe); | 4580 | assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe); |
4581 | 4581 | ||
4582 | if (crtc->primary->fb) { | 4582 | if (crtc->primary->fb) { |
4583 | mutex_lock(&dev->struct_mutex); | 4583 | mutex_lock(&dev->struct_mutex); |
4584 | intel_unpin_fb_obj(to_intel_framebuffer(crtc->primary->fb)->obj); | 4584 | intel_unpin_fb_obj(to_intel_framebuffer(crtc->primary->fb)->obj); |
4585 | mutex_unlock(&dev->struct_mutex); | 4585 | mutex_unlock(&dev->struct_mutex); |
4586 | crtc->primary->fb = NULL; | 4586 | crtc->primary->fb = NULL; |
4587 | } | 4587 | } |
4588 | 4588 | ||
4589 | /* Update computed state. */ | 4589 | /* Update computed state. */ |
4590 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 4590 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
4591 | if (!connector->encoder || !connector->encoder->crtc) | 4591 | if (!connector->encoder || !connector->encoder->crtc) |
4592 | continue; | 4592 | continue; |
4593 | 4593 | ||
4594 | if (connector->encoder->crtc != crtc) | 4594 | if (connector->encoder->crtc != crtc) |
4595 | continue; | 4595 | continue; |
4596 | 4596 | ||
4597 | connector->dpms = DRM_MODE_DPMS_OFF; | 4597 | connector->dpms = DRM_MODE_DPMS_OFF; |
4598 | to_intel_encoder(connector->encoder)->connectors_active = false; | 4598 | to_intel_encoder(connector->encoder)->connectors_active = false; |
4599 | } | 4599 | } |
4600 | } | 4600 | } |
4601 | 4601 | ||
4602 | void intel_encoder_destroy(struct drm_encoder *encoder) | 4602 | void intel_encoder_destroy(struct drm_encoder *encoder) |
4603 | { | 4603 | { |
4604 | struct intel_encoder *intel_encoder = to_intel_encoder(encoder); | 4604 | struct intel_encoder *intel_encoder = to_intel_encoder(encoder); |
4605 | 4605 | ||
4606 | drm_encoder_cleanup(encoder); | 4606 | drm_encoder_cleanup(encoder); |
4607 | kfree(intel_encoder); | 4607 | kfree(intel_encoder); |
4608 | } | 4608 | } |
4609 | 4609 | ||
4610 | /* Simple dpms helper for encoders with just one connector, no cloning and only | 4610 | /* Simple dpms helper for encoders with just one connector, no cloning and only |
4611 | * one kind of off state. It clamps all !ON modes to fully OFF and changes the | 4611 | * one kind of off state. It clamps all !ON modes to fully OFF and changes the |
4612 | * state of the entire output pipe. */ | 4612 | * state of the entire output pipe. */ |
4613 | static void intel_encoder_dpms(struct intel_encoder *encoder, int mode) | 4613 | static void intel_encoder_dpms(struct intel_encoder *encoder, int mode) |
4614 | { | 4614 | { |
4615 | if (mode == DRM_MODE_DPMS_ON) { | 4615 | if (mode == DRM_MODE_DPMS_ON) { |
4616 | encoder->connectors_active = true; | 4616 | encoder->connectors_active = true; |
4617 | 4617 | ||
4618 | intel_crtc_update_dpms(encoder->base.crtc); | 4618 | intel_crtc_update_dpms(encoder->base.crtc); |
4619 | } else { | 4619 | } else { |
4620 | encoder->connectors_active = false; | 4620 | encoder->connectors_active = false; |
4621 | 4621 | ||
4622 | intel_crtc_update_dpms(encoder->base.crtc); | 4622 | intel_crtc_update_dpms(encoder->base.crtc); |
4623 | } | 4623 | } |
4624 | } | 4624 | } |
4625 | 4625 | ||
4626 | /* Cross check the actual hw state with our own modeset state tracking (and it's | 4626 | /* Cross check the actual hw state with our own modeset state tracking (and it's |
4627 | * internal consistency). */ | 4627 | * internal consistency). */ |
4628 | static void intel_connector_check_state(struct intel_connector *connector) | 4628 | static void intel_connector_check_state(struct intel_connector *connector) |
4629 | { | 4629 | { |
4630 | if (connector->get_hw_state(connector)) { | 4630 | if (connector->get_hw_state(connector)) { |
4631 | struct intel_encoder *encoder = connector->encoder; | 4631 | struct intel_encoder *encoder = connector->encoder; |
4632 | struct drm_crtc *crtc; | 4632 | struct drm_crtc *crtc; |
4633 | bool encoder_enabled; | 4633 | bool encoder_enabled; |
4634 | enum pipe pipe; | 4634 | enum pipe pipe; |
4635 | 4635 | ||
4636 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", | 4636 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", |
4637 | connector->base.base.id, | 4637 | connector->base.base.id, |
4638 | drm_get_connector_name(&connector->base)); | 4638 | drm_get_connector_name(&connector->base)); |
4639 | 4639 | ||
4640 | WARN(connector->base.dpms == DRM_MODE_DPMS_OFF, | 4640 | WARN(connector->base.dpms == DRM_MODE_DPMS_OFF, |
4641 | "wrong connector dpms state\n"); | 4641 | "wrong connector dpms state\n"); |
4642 | WARN(connector->base.encoder != &encoder->base, | 4642 | WARN(connector->base.encoder != &encoder->base, |
4643 | "active connector not linked to encoder\n"); | 4643 | "active connector not linked to encoder\n"); |
4644 | WARN(!encoder->connectors_active, | 4644 | WARN(!encoder->connectors_active, |
4645 | "encoder->connectors_active not set\n"); | 4645 | "encoder->connectors_active not set\n"); |
4646 | 4646 | ||
4647 | encoder_enabled = encoder->get_hw_state(encoder, &pipe); | 4647 | encoder_enabled = encoder->get_hw_state(encoder, &pipe); |
4648 | WARN(!encoder_enabled, "encoder not enabled\n"); | 4648 | WARN(!encoder_enabled, "encoder not enabled\n"); |
4649 | if (WARN_ON(!encoder->base.crtc)) | 4649 | if (WARN_ON(!encoder->base.crtc)) |
4650 | return; | 4650 | return; |
4651 | 4651 | ||
4652 | crtc = encoder->base.crtc; | 4652 | crtc = encoder->base.crtc; |
4653 | 4653 | ||
4654 | WARN(!crtc->enabled, "crtc not enabled\n"); | 4654 | WARN(!crtc->enabled, "crtc not enabled\n"); |
4655 | WARN(!to_intel_crtc(crtc)->active, "crtc not active\n"); | 4655 | WARN(!to_intel_crtc(crtc)->active, "crtc not active\n"); |
4656 | WARN(pipe != to_intel_crtc(crtc)->pipe, | 4656 | WARN(pipe != to_intel_crtc(crtc)->pipe, |
4657 | "encoder active on the wrong pipe\n"); | 4657 | "encoder active on the wrong pipe\n"); |
4658 | } | 4658 | } |
4659 | } | 4659 | } |
4660 | 4660 | ||
4661 | /* Even simpler default implementation, if there's really no special case to | 4661 | /* Even simpler default implementation, if there's really no special case to |
4662 | * consider. */ | 4662 | * consider. */ |
4663 | void intel_connector_dpms(struct drm_connector *connector, int mode) | 4663 | void intel_connector_dpms(struct drm_connector *connector, int mode) |
4664 | { | 4664 | { |
4665 | /* All the simple cases only support two dpms states. */ | 4665 | /* All the simple cases only support two dpms states. */ |
4666 | if (mode != DRM_MODE_DPMS_ON) | 4666 | if (mode != DRM_MODE_DPMS_ON) |
4667 | mode = DRM_MODE_DPMS_OFF; | 4667 | mode = DRM_MODE_DPMS_OFF; |
4668 | 4668 | ||
4669 | if (mode == connector->dpms) | 4669 | if (mode == connector->dpms) |
4670 | return; | 4670 | return; |
4671 | 4671 | ||
4672 | connector->dpms = mode; | 4672 | connector->dpms = mode; |
4673 | 4673 | ||
4674 | /* Only need to change hw state when actually enabled */ | 4674 | /* Only need to change hw state when actually enabled */ |
4675 | if (connector->encoder) | 4675 | if (connector->encoder) |
4676 | intel_encoder_dpms(to_intel_encoder(connector->encoder), mode); | 4676 | intel_encoder_dpms(to_intel_encoder(connector->encoder), mode); |
4677 | 4677 | ||
4678 | intel_modeset_check_state(connector->dev); | 4678 | intel_modeset_check_state(connector->dev); |
4679 | } | 4679 | } |
4680 | 4680 | ||
4681 | /* Simple connector->get_hw_state implementation for encoders that support only | 4681 | /* Simple connector->get_hw_state implementation for encoders that support only |
4682 | * one connector and no cloning and hence the encoder state determines the state | 4682 | * one connector and no cloning and hence the encoder state determines the state |
4683 | * of the connector. */ | 4683 | * of the connector. */ |
4684 | bool intel_connector_get_hw_state(struct intel_connector *connector) | 4684 | bool intel_connector_get_hw_state(struct intel_connector *connector) |
4685 | { | 4685 | { |
4686 | enum pipe pipe = 0; | 4686 | enum pipe pipe = 0; |
4687 | struct intel_encoder *encoder = connector->encoder; | 4687 | struct intel_encoder *encoder = connector->encoder; |
4688 | 4688 | ||
4689 | return encoder->get_hw_state(encoder, &pipe); | 4689 | return encoder->get_hw_state(encoder, &pipe); |
4690 | } | 4690 | } |
4691 | 4691 | ||
4692 | static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, | 4692 | static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, |
4693 | struct intel_crtc_config *pipe_config) | 4693 | struct intel_crtc_config *pipe_config) |
4694 | { | 4694 | { |
4695 | struct drm_i915_private *dev_priv = dev->dev_private; | 4695 | struct drm_i915_private *dev_priv = dev->dev_private; |
4696 | struct intel_crtc *pipe_B_crtc = | 4696 | struct intel_crtc *pipe_B_crtc = |
4697 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]); | 4697 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]); |
4698 | 4698 | ||
4699 | DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n", | 4699 | DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n", |
4700 | pipe_name(pipe), pipe_config->fdi_lanes); | 4700 | pipe_name(pipe), pipe_config->fdi_lanes); |
4701 | if (pipe_config->fdi_lanes > 4) { | 4701 | if (pipe_config->fdi_lanes > 4) { |
4702 | DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n", | 4702 | DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n", |
4703 | pipe_name(pipe), pipe_config->fdi_lanes); | 4703 | pipe_name(pipe), pipe_config->fdi_lanes); |
4704 | return false; | 4704 | return false; |
4705 | } | 4705 | } |
4706 | 4706 | ||
4707 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { | 4707 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { |
4708 | if (pipe_config->fdi_lanes > 2) { | 4708 | if (pipe_config->fdi_lanes > 2) { |
4709 | DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n", | 4709 | DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n", |
4710 | pipe_config->fdi_lanes); | 4710 | pipe_config->fdi_lanes); |
4711 | return false; | 4711 | return false; |
4712 | } else { | 4712 | } else { |
4713 | return true; | 4713 | return true; |
4714 | } | 4714 | } |
4715 | } | 4715 | } |
4716 | 4716 | ||
4717 | if (INTEL_INFO(dev)->num_pipes == 2) | 4717 | if (INTEL_INFO(dev)->num_pipes == 2) |
4718 | return true; | 4718 | return true; |
4719 | 4719 | ||
4720 | /* Ivybridge 3 pipe is really complicated */ | 4720 | /* Ivybridge 3 pipe is really complicated */ |
4721 | switch (pipe) { | 4721 | switch (pipe) { |
4722 | case PIPE_A: | 4722 | case PIPE_A: |
4723 | return true; | 4723 | return true; |
4724 | case PIPE_B: | 4724 | case PIPE_B: |
4725 | if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled && | 4725 | if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled && |
4726 | pipe_config->fdi_lanes > 2) { | 4726 | pipe_config->fdi_lanes > 2) { |
4727 | DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n", | 4727 | DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n", |
4728 | pipe_name(pipe), pipe_config->fdi_lanes); | 4728 | pipe_name(pipe), pipe_config->fdi_lanes); |
4729 | return false; | 4729 | return false; |
4730 | } | 4730 | } |
4731 | return true; | 4731 | return true; |
4732 | case PIPE_C: | 4732 | case PIPE_C: |
4733 | if (!pipe_has_enabled_pch(pipe_B_crtc) || | 4733 | if (!pipe_has_enabled_pch(pipe_B_crtc) || |
4734 | pipe_B_crtc->config.fdi_lanes <= 2) { | 4734 | pipe_B_crtc->config.fdi_lanes <= 2) { |
4735 | if (pipe_config->fdi_lanes > 2) { | 4735 | if (pipe_config->fdi_lanes > 2) { |
4736 | DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n", | 4736 | DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n", |
4737 | pipe_name(pipe), pipe_config->fdi_lanes); | 4737 | pipe_name(pipe), pipe_config->fdi_lanes); |
4738 | return false; | 4738 | return false; |
4739 | } | 4739 | } |
4740 | } else { | 4740 | } else { |
4741 | DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n"); | 4741 | DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n"); |
4742 | return false; | 4742 | return false; |
4743 | } | 4743 | } |
4744 | return true; | 4744 | return true; |
4745 | default: | 4745 | default: |
4746 | BUG(); | 4746 | BUG(); |
4747 | } | 4747 | } |
4748 | } | 4748 | } |
4749 | 4749 | ||
4750 | #define RETRY 1 | 4750 | #define RETRY 1 |
4751 | static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc, | 4751 | static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc, |
4752 | struct intel_crtc_config *pipe_config) | 4752 | struct intel_crtc_config *pipe_config) |
4753 | { | 4753 | { |
4754 | struct drm_device *dev = intel_crtc->base.dev; | 4754 | struct drm_device *dev = intel_crtc->base.dev; |
4755 | struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; | 4755 | struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; |
4756 | int lane, link_bw, fdi_dotclock; | 4756 | int lane, link_bw, fdi_dotclock; |
4757 | bool setup_ok, needs_recompute = false; | 4757 | bool setup_ok, needs_recompute = false; |
4758 | 4758 | ||
4759 | retry: | 4759 | retry: |
4760 | /* FDI is a binary signal running at ~2.7GHz, encoding | 4760 | /* FDI is a binary signal running at ~2.7GHz, encoding |
4761 | * each output octet as 10 bits. The actual frequency | 4761 | * each output octet as 10 bits. The actual frequency |
4762 | * is stored as a divider into a 100MHz clock, and the | 4762 | * is stored as a divider into a 100MHz clock, and the |
4763 | * mode pixel clock is stored in units of 1KHz. | 4763 | * mode pixel clock is stored in units of 1KHz. |
4764 | * Hence the bw of each lane in terms of the mode signal | 4764 | * Hence the bw of each lane in terms of the mode signal |
4765 | * is: | 4765 | * is: |
4766 | */ | 4766 | */ |
4767 | link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10; | 4767 | link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10; |
4768 | 4768 | ||
4769 | fdi_dotclock = adjusted_mode->crtc_clock; | 4769 | fdi_dotclock = adjusted_mode->crtc_clock; |
4770 | 4770 | ||
4771 | lane = ironlake_get_lanes_required(fdi_dotclock, link_bw, | 4771 | lane = ironlake_get_lanes_required(fdi_dotclock, link_bw, |
4772 | pipe_config->pipe_bpp); | 4772 | pipe_config->pipe_bpp); |
4773 | 4773 | ||
4774 | pipe_config->fdi_lanes = lane; | 4774 | pipe_config->fdi_lanes = lane; |
4775 | 4775 | ||
4776 | intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, | 4776 | intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, |
4777 | link_bw, &pipe_config->fdi_m_n); | 4777 | link_bw, &pipe_config->fdi_m_n); |
4778 | 4778 | ||
4779 | setup_ok = ironlake_check_fdi_lanes(intel_crtc->base.dev, | 4779 | setup_ok = ironlake_check_fdi_lanes(intel_crtc->base.dev, |
4780 | intel_crtc->pipe, pipe_config); | 4780 | intel_crtc->pipe, pipe_config); |
4781 | if (!setup_ok && pipe_config->pipe_bpp > 6*3) { | 4781 | if (!setup_ok && pipe_config->pipe_bpp > 6*3) { |
4782 | pipe_config->pipe_bpp -= 2*3; | 4782 | pipe_config->pipe_bpp -= 2*3; |
4783 | DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n", | 4783 | DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n", |
4784 | pipe_config->pipe_bpp); | 4784 | pipe_config->pipe_bpp); |
4785 | needs_recompute = true; | 4785 | needs_recompute = true; |
4786 | pipe_config->bw_constrained = true; | 4786 | pipe_config->bw_constrained = true; |
4787 | 4787 | ||
4788 | goto retry; | 4788 | goto retry; |
4789 | } | 4789 | } |
4790 | 4790 | ||
4791 | if (needs_recompute) | 4791 | if (needs_recompute) |
4792 | return RETRY; | 4792 | return RETRY; |
4793 | 4793 | ||
4794 | return setup_ok ? 0 : -EINVAL; | 4794 | return setup_ok ? 0 : -EINVAL; |
4795 | } | 4795 | } |
4796 | 4796 | ||
4797 | static void hsw_compute_ips_config(struct intel_crtc *crtc, | 4797 | static void hsw_compute_ips_config(struct intel_crtc *crtc, |
4798 | struct intel_crtc_config *pipe_config) | 4798 | struct intel_crtc_config *pipe_config) |
4799 | { | 4799 | { |
4800 | pipe_config->ips_enabled = i915.enable_ips && | 4800 | pipe_config->ips_enabled = i915.enable_ips && |
4801 | hsw_crtc_supports_ips(crtc) && | 4801 | hsw_crtc_supports_ips(crtc) && |
4802 | pipe_config->pipe_bpp <= 24; | 4802 | pipe_config->pipe_bpp <= 24; |
4803 | } | 4803 | } |
4804 | 4804 | ||
4805 | static int intel_crtc_compute_config(struct intel_crtc *crtc, | 4805 | static int intel_crtc_compute_config(struct intel_crtc *crtc, |
4806 | struct intel_crtc_config *pipe_config) | 4806 | struct intel_crtc_config *pipe_config) |
4807 | { | 4807 | { |
4808 | struct drm_device *dev = crtc->base.dev; | 4808 | struct drm_device *dev = crtc->base.dev; |
4809 | struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; | 4809 | struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; |
4810 | 4810 | ||
4811 | /* FIXME should check pixel clock limits on all platforms */ | 4811 | /* FIXME should check pixel clock limits on all platforms */ |
4812 | if (INTEL_INFO(dev)->gen < 4) { | 4812 | if (INTEL_INFO(dev)->gen < 4) { |
4813 | struct drm_i915_private *dev_priv = dev->dev_private; | 4813 | struct drm_i915_private *dev_priv = dev->dev_private; |
4814 | int clock_limit = | 4814 | int clock_limit = |
4815 | dev_priv->display.get_display_clock_speed(dev); | 4815 | dev_priv->display.get_display_clock_speed(dev); |
4816 | 4816 | ||
4817 | /* | 4817 | /* |
4818 | * Enable pixel doubling when the dot clock | 4818 | * Enable pixel doubling when the dot clock |
4819 | * is > 90% of the (display) core speed. | 4819 | * is > 90% of the (display) core speed. |
4820 | * | 4820 | * |
4821 | * GDG double wide on either pipe, | 4821 | * GDG double wide on either pipe, |
4822 | * otherwise pipe A only. | 4822 | * otherwise pipe A only. |
4823 | */ | 4823 | */ |
4824 | if ((crtc->pipe == PIPE_A || IS_I915G(dev)) && | 4824 | if ((crtc->pipe == PIPE_A || IS_I915G(dev)) && |
4825 | adjusted_mode->crtc_clock > clock_limit * 9 / 10) { | 4825 | adjusted_mode->crtc_clock > clock_limit * 9 / 10) { |
4826 | clock_limit *= 2; | 4826 | clock_limit *= 2; |
4827 | pipe_config->double_wide = true; | 4827 | pipe_config->double_wide = true; |
4828 | } | 4828 | } |
4829 | 4829 | ||
4830 | if (adjusted_mode->crtc_clock > clock_limit * 9 / 10) | 4830 | if (adjusted_mode->crtc_clock > clock_limit * 9 / 10) |
4831 | return -EINVAL; | 4831 | return -EINVAL; |
4832 | } | 4832 | } |
4833 | 4833 | ||
4834 | /* | 4834 | /* |
4835 | * Pipe horizontal size must be even in: | 4835 | * Pipe horizontal size must be even in: |
4836 | * - DVO ganged mode | 4836 | * - DVO ganged mode |
4837 | * - LVDS dual channel mode | 4837 | * - LVDS dual channel mode |
4838 | * - Double wide pipe | 4838 | * - Double wide pipe |
4839 | */ | 4839 | */ |
4840 | if ((intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && | 4840 | if ((intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && |
4841 | intel_is_dual_link_lvds(dev)) || pipe_config->double_wide) | 4841 | intel_is_dual_link_lvds(dev)) || pipe_config->double_wide) |
4842 | pipe_config->pipe_src_w &= ~1; | 4842 | pipe_config->pipe_src_w &= ~1; |
4843 | 4843 | ||
4844 | /* Cantiga+ cannot handle modes with a hsync front porch of 0. | 4844 | /* Cantiga+ cannot handle modes with a hsync front porch of 0. |
4845 | * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. | 4845 | * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. |
4846 | */ | 4846 | */ |
4847 | if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) && | 4847 | if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) && |
4848 | adjusted_mode->hsync_start == adjusted_mode->hdisplay) | 4848 | adjusted_mode->hsync_start == adjusted_mode->hdisplay) |
4849 | return -EINVAL; | 4849 | return -EINVAL; |
4850 | 4850 | ||
4851 | if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && pipe_config->pipe_bpp > 10*3) { | 4851 | if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && pipe_config->pipe_bpp > 10*3) { |
4852 | pipe_config->pipe_bpp = 10*3; /* 12bpc is gen5+ */ | 4852 | pipe_config->pipe_bpp = 10*3; /* 12bpc is gen5+ */ |
4853 | } else if (INTEL_INFO(dev)->gen <= 4 && pipe_config->pipe_bpp > 8*3) { | 4853 | } else if (INTEL_INFO(dev)->gen <= 4 && pipe_config->pipe_bpp > 8*3) { |
4854 | /* only a 8bpc pipe, with 6bpc dither through the panel fitter | 4854 | /* only a 8bpc pipe, with 6bpc dither through the panel fitter |
4855 | * for lvds. */ | 4855 | * for lvds. */ |
4856 | pipe_config->pipe_bpp = 8*3; | 4856 | pipe_config->pipe_bpp = 8*3; |
4857 | } | 4857 | } |
4858 | 4858 | ||
4859 | if (HAS_IPS(dev)) | 4859 | if (HAS_IPS(dev)) |
4860 | hsw_compute_ips_config(crtc, pipe_config); | 4860 | hsw_compute_ips_config(crtc, pipe_config); |
4861 | 4861 | ||
4862 | /* XXX: PCH clock sharing is done in ->mode_set, so make sure the old | 4862 | /* XXX: PCH clock sharing is done in ->mode_set, so make sure the old |
4863 | * clock survives for now. */ | 4863 | * clock survives for now. */ |
4864 | if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) | 4864 | if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) |
4865 | pipe_config->shared_dpll = crtc->config.shared_dpll; | 4865 | pipe_config->shared_dpll = crtc->config.shared_dpll; |
4866 | 4866 | ||
4867 | if (pipe_config->has_pch_encoder) | 4867 | if (pipe_config->has_pch_encoder) |
4868 | return ironlake_fdi_compute_config(crtc, pipe_config); | 4868 | return ironlake_fdi_compute_config(crtc, pipe_config); |
4869 | 4869 | ||
4870 | return 0; | 4870 | return 0; |
4871 | } | 4871 | } |
4872 | 4872 | ||
4873 | static int valleyview_get_display_clock_speed(struct drm_device *dev) | 4873 | static int valleyview_get_display_clock_speed(struct drm_device *dev) |
4874 | { | 4874 | { |
4875 | return 400000; /* FIXME */ | 4875 | return 400000; /* FIXME */ |
4876 | } | 4876 | } |
4877 | 4877 | ||
4878 | static int i945_get_display_clock_speed(struct drm_device *dev) | 4878 | static int i945_get_display_clock_speed(struct drm_device *dev) |
4879 | { | 4879 | { |
4880 | return 400000; | 4880 | return 400000; |
4881 | } | 4881 | } |
4882 | 4882 | ||
4883 | static int i915_get_display_clock_speed(struct drm_device *dev) | 4883 | static int i915_get_display_clock_speed(struct drm_device *dev) |
4884 | { | 4884 | { |
4885 | return 333000; | 4885 | return 333000; |
4886 | } | 4886 | } |
4887 | 4887 | ||
4888 | static int i9xx_misc_get_display_clock_speed(struct drm_device *dev) | 4888 | static int i9xx_misc_get_display_clock_speed(struct drm_device *dev) |
4889 | { | 4889 | { |
4890 | return 200000; | 4890 | return 200000; |
4891 | } | 4891 | } |
4892 | 4892 | ||
4893 | static int pnv_get_display_clock_speed(struct drm_device *dev) | 4893 | static int pnv_get_display_clock_speed(struct drm_device *dev) |
4894 | { | 4894 | { |
4895 | u16 gcfgc = 0; | 4895 | u16 gcfgc = 0; |
4896 | 4896 | ||
4897 | pci_read_config_word(dev->pdev, GCFGC, &gcfgc); | 4897 | pci_read_config_word(dev->pdev, GCFGC, &gcfgc); |
4898 | 4898 | ||
4899 | switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { | 4899 | switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { |
4900 | case GC_DISPLAY_CLOCK_267_MHZ_PNV: | 4900 | case GC_DISPLAY_CLOCK_267_MHZ_PNV: |
4901 | return 267000; | 4901 | return 267000; |
4902 | case GC_DISPLAY_CLOCK_333_MHZ_PNV: | 4902 | case GC_DISPLAY_CLOCK_333_MHZ_PNV: |
4903 | return 333000; | 4903 | return 333000; |
4904 | case GC_DISPLAY_CLOCK_444_MHZ_PNV: | 4904 | case GC_DISPLAY_CLOCK_444_MHZ_PNV: |
4905 | return 444000; | 4905 | return 444000; |
4906 | case GC_DISPLAY_CLOCK_200_MHZ_PNV: | 4906 | case GC_DISPLAY_CLOCK_200_MHZ_PNV: |
4907 | return 200000; | 4907 | return 200000; |
4908 | default: | 4908 | default: |
4909 | DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc); | 4909 | DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc); |
4910 | case GC_DISPLAY_CLOCK_133_MHZ_PNV: | 4910 | case GC_DISPLAY_CLOCK_133_MHZ_PNV: |
4911 | return 133000; | 4911 | return 133000; |
4912 | case GC_DISPLAY_CLOCK_167_MHZ_PNV: | 4912 | case GC_DISPLAY_CLOCK_167_MHZ_PNV: |
4913 | return 167000; | 4913 | return 167000; |
4914 | } | 4914 | } |
4915 | } | 4915 | } |
4916 | 4916 | ||
4917 | static int i915gm_get_display_clock_speed(struct drm_device *dev) | 4917 | static int i915gm_get_display_clock_speed(struct drm_device *dev) |
4918 | { | 4918 | { |
4919 | u16 gcfgc = 0; | 4919 | u16 gcfgc = 0; |
4920 | 4920 | ||
4921 | pci_read_config_word(dev->pdev, GCFGC, &gcfgc); | 4921 | pci_read_config_word(dev->pdev, GCFGC, &gcfgc); |
4922 | 4922 | ||
4923 | if (gcfgc & GC_LOW_FREQUENCY_ENABLE) | 4923 | if (gcfgc & GC_LOW_FREQUENCY_ENABLE) |
4924 | return 133000; | 4924 | return 133000; |
4925 | else { | 4925 | else { |
4926 | switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { | 4926 | switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { |
4927 | case GC_DISPLAY_CLOCK_333_MHZ: | 4927 | case GC_DISPLAY_CLOCK_333_MHZ: |
4928 | return 333000; | 4928 | return 333000; |
4929 | default: | 4929 | default: |
4930 | case GC_DISPLAY_CLOCK_190_200_MHZ: | 4930 | case GC_DISPLAY_CLOCK_190_200_MHZ: |
4931 | return 190000; | 4931 | return 190000; |
4932 | } | 4932 | } |
4933 | } | 4933 | } |
4934 | } | 4934 | } |
4935 | 4935 | ||
4936 | static int i865_get_display_clock_speed(struct drm_device *dev) | 4936 | static int i865_get_display_clock_speed(struct drm_device *dev) |
4937 | { | 4937 | { |
4938 | return 266000; | 4938 | return 266000; |
4939 | } | 4939 | } |
4940 | 4940 | ||
4941 | static int i855_get_display_clock_speed(struct drm_device *dev) | 4941 | static int i855_get_display_clock_speed(struct drm_device *dev) |
4942 | { | 4942 | { |
4943 | u16 hpllcc = 0; | 4943 | u16 hpllcc = 0; |
4944 | /* Assume that the hardware is in the high speed state. This | 4944 | /* Assume that the hardware is in the high speed state. This |
4945 | * should be the default. | 4945 | * should be the default. |
4946 | */ | 4946 | */ |
4947 | switch (hpllcc & GC_CLOCK_CONTROL_MASK) { | 4947 | switch (hpllcc & GC_CLOCK_CONTROL_MASK) { |
4948 | case GC_CLOCK_133_200: | 4948 | case GC_CLOCK_133_200: |
4949 | case GC_CLOCK_100_200: | 4949 | case GC_CLOCK_100_200: |
4950 | return 200000; | 4950 | return 200000; |
4951 | case GC_CLOCK_166_250: | 4951 | case GC_CLOCK_166_250: |
4952 | return 250000; | 4952 | return 250000; |
4953 | case GC_CLOCK_100_133: | 4953 | case GC_CLOCK_100_133: |
4954 | return 133000; | 4954 | return 133000; |
4955 | } | 4955 | } |
4956 | 4956 | ||
4957 | /* Shouldn't happen */ | 4957 | /* Shouldn't happen */ |
4958 | return 0; | 4958 | return 0; |
4959 | } | 4959 | } |
4960 | 4960 | ||
4961 | static int i830_get_display_clock_speed(struct drm_device *dev) | 4961 | static int i830_get_display_clock_speed(struct drm_device *dev) |
4962 | { | 4962 | { |
4963 | return 133000; | 4963 | return 133000; |
4964 | } | 4964 | } |
4965 | 4965 | ||
4966 | static void | 4966 | static void |
4967 | intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den) | 4967 | intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den) |
4968 | { | 4968 | { |
4969 | while (*num > DATA_LINK_M_N_MASK || | 4969 | while (*num > DATA_LINK_M_N_MASK || |
4970 | *den > DATA_LINK_M_N_MASK) { | 4970 | *den > DATA_LINK_M_N_MASK) { |
4971 | *num >>= 1; | 4971 | *num >>= 1; |
4972 | *den >>= 1; | 4972 | *den >>= 1; |
4973 | } | 4973 | } |
4974 | } | 4974 | } |
4975 | 4975 | ||
4976 | static void compute_m_n(unsigned int m, unsigned int n, | 4976 | static void compute_m_n(unsigned int m, unsigned int n, |
4977 | uint32_t *ret_m, uint32_t *ret_n) | 4977 | uint32_t *ret_m, uint32_t *ret_n) |
4978 | { | 4978 | { |
4979 | *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); | 4979 | *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); |
4980 | *ret_m = div_u64((uint64_t) m * *ret_n, n); | 4980 | *ret_m = div_u64((uint64_t) m * *ret_n, n); |
4981 | intel_reduce_m_n_ratio(ret_m, ret_n); | 4981 | intel_reduce_m_n_ratio(ret_m, ret_n); |
4982 | } | 4982 | } |
4983 | 4983 | ||
4984 | void | 4984 | void |
4985 | intel_link_compute_m_n(int bits_per_pixel, int nlanes, | 4985 | intel_link_compute_m_n(int bits_per_pixel, int nlanes, |
4986 | int pixel_clock, int link_clock, | 4986 | int pixel_clock, int link_clock, |
4987 | struct intel_link_m_n *m_n) | 4987 | struct intel_link_m_n *m_n) |
4988 | { | 4988 | { |
4989 | m_n->tu = 64; | 4989 | m_n->tu = 64; |
4990 | 4990 | ||
4991 | compute_m_n(bits_per_pixel * pixel_clock, | 4991 | compute_m_n(bits_per_pixel * pixel_clock, |
4992 | link_clock * nlanes * 8, | 4992 | link_clock * nlanes * 8, |
4993 | &m_n->gmch_m, &m_n->gmch_n); | 4993 | &m_n->gmch_m, &m_n->gmch_n); |
4994 | 4994 | ||
4995 | compute_m_n(pixel_clock, link_clock, | 4995 | compute_m_n(pixel_clock, link_clock, |
4996 | &m_n->link_m, &m_n->link_n); | 4996 | &m_n->link_m, &m_n->link_n); |
4997 | } | 4997 | } |
4998 | 4998 | ||
4999 | static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) | 4999 | static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) |
5000 | { | 5000 | { |
5001 | if (i915.panel_use_ssc >= 0) | 5001 | if (i915.panel_use_ssc >= 0) |
5002 | return i915.panel_use_ssc != 0; | 5002 | return i915.panel_use_ssc != 0; |
5003 | return dev_priv->vbt.lvds_use_ssc | 5003 | return dev_priv->vbt.lvds_use_ssc |
5004 | && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); | 5004 | && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); |
5005 | } | 5005 | } |
5006 | 5006 | ||
5007 | static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors) | 5007 | static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors) |
5008 | { | 5008 | { |
5009 | struct drm_device *dev = crtc->dev; | 5009 | struct drm_device *dev = crtc->dev; |
5010 | struct drm_i915_private *dev_priv = dev->dev_private; | 5010 | struct drm_i915_private *dev_priv = dev->dev_private; |
5011 | int refclk; | 5011 | int refclk; |
5012 | 5012 | ||
5013 | if (IS_VALLEYVIEW(dev)) { | 5013 | if (IS_VALLEYVIEW(dev)) { |
5014 | refclk = 100000; | 5014 | refclk = 100000; |
5015 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && | 5015 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && |
5016 | intel_panel_use_ssc(dev_priv) && num_connectors < 2) { | 5016 | intel_panel_use_ssc(dev_priv) && num_connectors < 2) { |
5017 | refclk = dev_priv->vbt.lvds_ssc_freq; | 5017 | refclk = dev_priv->vbt.lvds_ssc_freq; |
5018 | DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); | 5018 | DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); |
5019 | } else if (!IS_GEN2(dev)) { | 5019 | } else if (!IS_GEN2(dev)) { |
5020 | refclk = 96000; | 5020 | refclk = 96000; |
5021 | } else { | 5021 | } else { |
5022 | refclk = 48000; | 5022 | refclk = 48000; |
5023 | } | 5023 | } |
5024 | 5024 | ||
5025 | return refclk; | 5025 | return refclk; |
5026 | } | 5026 | } |
5027 | 5027 | ||
5028 | static uint32_t pnv_dpll_compute_fp(struct dpll *dpll) | 5028 | static uint32_t pnv_dpll_compute_fp(struct dpll *dpll) |
5029 | { | 5029 | { |
5030 | return (1 << dpll->n) << 16 | dpll->m2; | 5030 | return (1 << dpll->n) << 16 | dpll->m2; |
5031 | } | 5031 | } |
5032 | 5032 | ||
5033 | static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll) | 5033 | static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll) |
5034 | { | 5034 | { |
5035 | return dpll->n << 16 | dpll->m1 << 8 | dpll->m2; | 5035 | return dpll->n << 16 | dpll->m1 << 8 | dpll->m2; |
5036 | } | 5036 | } |
5037 | 5037 | ||
5038 | static void i9xx_update_pll_dividers(struct intel_crtc *crtc, | 5038 | static void i9xx_update_pll_dividers(struct intel_crtc *crtc, |
5039 | intel_clock_t *reduced_clock) | 5039 | intel_clock_t *reduced_clock) |
5040 | { | 5040 | { |
5041 | struct drm_device *dev = crtc->base.dev; | 5041 | struct drm_device *dev = crtc->base.dev; |
5042 | struct drm_i915_private *dev_priv = dev->dev_private; | 5042 | struct drm_i915_private *dev_priv = dev->dev_private; |
5043 | int pipe = crtc->pipe; | 5043 | int pipe = crtc->pipe; |
5044 | u32 fp, fp2 = 0; | 5044 | u32 fp, fp2 = 0; |
5045 | 5045 | ||
5046 | if (IS_PINEVIEW(dev)) { | 5046 | if (IS_PINEVIEW(dev)) { |
5047 | fp = pnv_dpll_compute_fp(&crtc->config.dpll); | 5047 | fp = pnv_dpll_compute_fp(&crtc->config.dpll); |
5048 | if (reduced_clock) | 5048 | if (reduced_clock) |
5049 | fp2 = pnv_dpll_compute_fp(reduced_clock); | 5049 | fp2 = pnv_dpll_compute_fp(reduced_clock); |
5050 | } else { | 5050 | } else { |
5051 | fp = i9xx_dpll_compute_fp(&crtc->config.dpll); | 5051 | fp = i9xx_dpll_compute_fp(&crtc->config.dpll); |
5052 | if (reduced_clock) | 5052 | if (reduced_clock) |
5053 | fp2 = i9xx_dpll_compute_fp(reduced_clock); | 5053 | fp2 = i9xx_dpll_compute_fp(reduced_clock); |
5054 | } | 5054 | } |
5055 | 5055 | ||
5056 | I915_WRITE(FP0(pipe), fp); | 5056 | I915_WRITE(FP0(pipe), fp); |
5057 | crtc->config.dpll_hw_state.fp0 = fp; | 5057 | crtc->config.dpll_hw_state.fp0 = fp; |
5058 | 5058 | ||
5059 | crtc->lowfreq_avail = false; | 5059 | crtc->lowfreq_avail = false; |
5060 | if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && | 5060 | if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && |
5061 | reduced_clock && i915.powersave) { | 5061 | reduced_clock && i915.powersave) { |
5062 | I915_WRITE(FP1(pipe), fp2); | 5062 | I915_WRITE(FP1(pipe), fp2); |
5063 | crtc->config.dpll_hw_state.fp1 = fp2; | 5063 | crtc->config.dpll_hw_state.fp1 = fp2; |
5064 | crtc->lowfreq_avail = true; | 5064 | crtc->lowfreq_avail = true; |
5065 | } else { | 5065 | } else { |
5066 | I915_WRITE(FP1(pipe), fp); | 5066 | I915_WRITE(FP1(pipe), fp); |
5067 | crtc->config.dpll_hw_state.fp1 = fp; | 5067 | crtc->config.dpll_hw_state.fp1 = fp; |
5068 | } | 5068 | } |
5069 | } | 5069 | } |
5070 | 5070 | ||
5071 | static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe | 5071 | static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe |
5072 | pipe) | 5072 | pipe) |
5073 | { | 5073 | { |
5074 | u32 reg_val; | 5074 | u32 reg_val; |
5075 | 5075 | ||
5076 | /* | 5076 | /* |
5077 | * PLLB opamp always calibrates to max value of 0x3f, force enable it | 5077 | * PLLB opamp always calibrates to max value of 0x3f, force enable it |
5078 | * and set it to a reasonable value instead. | 5078 | * and set it to a reasonable value instead. |
5079 | */ | 5079 | */ |
5080 | reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); | 5080 | reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); |
5081 | reg_val &= 0xffffff00; | 5081 | reg_val &= 0xffffff00; |
5082 | reg_val |= 0x00000030; | 5082 | reg_val |= 0x00000030; |
5083 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); | 5083 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); |
5084 | 5084 | ||
5085 | reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); | 5085 | reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); |
5086 | reg_val &= 0x8cffffff; | 5086 | reg_val &= 0x8cffffff; |
5087 | reg_val = 0x8c000000; | 5087 | reg_val = 0x8c000000; |
5088 | vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); | 5088 | vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); |
5089 | 5089 | ||
5090 | reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); | 5090 | reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); |
5091 | reg_val &= 0xffffff00; | 5091 | reg_val &= 0xffffff00; |
5092 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); | 5092 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); |
5093 | 5093 | ||
5094 | reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); | 5094 | reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); |
5095 | reg_val &= 0x00ffffff; | 5095 | reg_val &= 0x00ffffff; |
5096 | reg_val |= 0xb0000000; | 5096 | reg_val |= 0xb0000000; |
5097 | vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); | 5097 | vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); |
5098 | } | 5098 | } |
5099 | 5099 | ||
5100 | static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc, | 5100 | static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc, |
5101 | struct intel_link_m_n *m_n) | 5101 | struct intel_link_m_n *m_n) |
5102 | { | 5102 | { |
5103 | struct drm_device *dev = crtc->base.dev; | 5103 | struct drm_device *dev = crtc->base.dev; |
5104 | struct drm_i915_private *dev_priv = dev->dev_private; | 5104 | struct drm_i915_private *dev_priv = dev->dev_private; |
5105 | int pipe = crtc->pipe; | 5105 | int pipe = crtc->pipe; |
5106 | 5106 | ||
5107 | I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); | 5107 | I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); |
5108 | I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n); | 5108 | I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n); |
5109 | I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m); | 5109 | I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m); |
5110 | I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n); | 5110 | I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n); |
5111 | } | 5111 | } |
5112 | 5112 | ||
5113 | static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, | 5113 | static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, |
5114 | struct intel_link_m_n *m_n) | 5114 | struct intel_link_m_n *m_n) |
5115 | { | 5115 | { |
5116 | struct drm_device *dev = crtc->base.dev; | 5116 | struct drm_device *dev = crtc->base.dev; |
5117 | struct drm_i915_private *dev_priv = dev->dev_private; | 5117 | struct drm_i915_private *dev_priv = dev->dev_private; |
5118 | int pipe = crtc->pipe; | 5118 | int pipe = crtc->pipe; |
5119 | enum transcoder transcoder = crtc->config.cpu_transcoder; | 5119 | enum transcoder transcoder = crtc->config.cpu_transcoder; |
5120 | 5120 | ||
5121 | if (INTEL_INFO(dev)->gen >= 5) { | 5121 | if (INTEL_INFO(dev)->gen >= 5) { |
5122 | I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m); | 5122 | I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m); |
5123 | I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n); | 5123 | I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n); |
5124 | I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m); | 5124 | I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m); |
5125 | I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n); | 5125 | I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n); |
5126 | } else { | 5126 | } else { |
5127 | I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); | 5127 | I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); |
5128 | I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n); | 5128 | I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n); |
5129 | I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m); | 5129 | I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m); |
5130 | I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n); | 5130 | I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n); |
5131 | } | 5131 | } |
5132 | } | 5132 | } |
5133 | 5133 | ||
5134 | static void intel_dp_set_m_n(struct intel_crtc *crtc) | 5134 | static void intel_dp_set_m_n(struct intel_crtc *crtc) |
5135 | { | 5135 | { |
5136 | if (crtc->config.has_pch_encoder) | 5136 | if (crtc->config.has_pch_encoder) |
5137 | intel_pch_transcoder_set_m_n(crtc, &crtc->config.dp_m_n); | 5137 | intel_pch_transcoder_set_m_n(crtc, &crtc->config.dp_m_n); |
5138 | else | 5138 | else |
5139 | intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n); | 5139 | intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n); |
5140 | } | 5140 | } |
5141 | 5141 | ||
5142 | static void vlv_update_pll(struct intel_crtc *crtc) | 5142 | static void vlv_update_pll(struct intel_crtc *crtc) |
5143 | { | 5143 | { |
5144 | struct drm_device *dev = crtc->base.dev; | 5144 | struct drm_device *dev = crtc->base.dev; |
5145 | struct drm_i915_private *dev_priv = dev->dev_private; | 5145 | struct drm_i915_private *dev_priv = dev->dev_private; |
5146 | int pipe = crtc->pipe; | 5146 | int pipe = crtc->pipe; |
5147 | u32 dpll, mdiv; | 5147 | u32 dpll, mdiv; |
5148 | u32 bestn, bestm1, bestm2, bestp1, bestp2; | 5148 | u32 bestn, bestm1, bestm2, bestp1, bestp2; |
5149 | u32 coreclk, reg_val, dpll_md; | 5149 | u32 coreclk, reg_val, dpll_md; |
5150 | 5150 | ||
5151 | mutex_lock(&dev_priv->dpio_lock); | 5151 | mutex_lock(&dev_priv->dpio_lock); |
5152 | 5152 | ||
5153 | bestn = crtc->config.dpll.n; | 5153 | bestn = crtc->config.dpll.n; |
5154 | bestm1 = crtc->config.dpll.m1; | 5154 | bestm1 = crtc->config.dpll.m1; |
5155 | bestm2 = crtc->config.dpll.m2; | 5155 | bestm2 = crtc->config.dpll.m2; |
5156 | bestp1 = crtc->config.dpll.p1; | 5156 | bestp1 = crtc->config.dpll.p1; |
5157 | bestp2 = crtc->config.dpll.p2; | 5157 | bestp2 = crtc->config.dpll.p2; |
5158 | 5158 | ||
5159 | /* See eDP HDMI DPIO driver vbios notes doc */ | 5159 | /* See eDP HDMI DPIO driver vbios notes doc */ |
5160 | 5160 | ||
5161 | /* PLL B needs special handling */ | 5161 | /* PLL B needs special handling */ |
5162 | if (pipe) | 5162 | if (pipe) |
5163 | vlv_pllb_recal_opamp(dev_priv, pipe); | 5163 | vlv_pllb_recal_opamp(dev_priv, pipe); |
5164 | 5164 | ||
5165 | /* Set up Tx target for periodic Rcomp update */ | 5165 | /* Set up Tx target for periodic Rcomp update */ |
5166 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f); | 5166 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f); |
5167 | 5167 | ||
5168 | /* Disable target IRef on PLL */ | 5168 | /* Disable target IRef on PLL */ |
5169 | reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe)); | 5169 | reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe)); |
5170 | reg_val &= 0x00ffffff; | 5170 | reg_val &= 0x00ffffff; |
5171 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val); | 5171 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val); |
5172 | 5172 | ||
5173 | /* Disable fast lock */ | 5173 | /* Disable fast lock */ |
5174 | vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610); | 5174 | vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610); |
5175 | 5175 | ||
5176 | /* Set idtafcrecal before PLL is enabled */ | 5176 | /* Set idtafcrecal before PLL is enabled */ |
5177 | mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); | 5177 | mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); |
5178 | mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); | 5178 | mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); |
5179 | mdiv |= ((bestn << DPIO_N_SHIFT)); | 5179 | mdiv |= ((bestn << DPIO_N_SHIFT)); |
5180 | mdiv |= (1 << DPIO_K_SHIFT); | 5180 | mdiv |= (1 << DPIO_K_SHIFT); |
5181 | 5181 | ||
5182 | /* | 5182 | /* |
5183 | * Post divider depends on pixel clock rate, DAC vs digital (and LVDS, | 5183 | * Post divider depends on pixel clock rate, DAC vs digital (and LVDS, |
5184 | * but we don't support that). | 5184 | * but we don't support that). |
5185 | * Note: don't use the DAC post divider as it seems unstable. | 5185 | * Note: don't use the DAC post divider as it seems unstable. |
5186 | */ | 5186 | */ |
5187 | mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT); | 5187 | mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT); |
5188 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); | 5188 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); |
5189 | 5189 | ||
5190 | mdiv |= DPIO_ENABLE_CALIBRATION; | 5190 | mdiv |= DPIO_ENABLE_CALIBRATION; |
5191 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); | 5191 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); |
5192 | 5192 | ||
5193 | /* Set HBR and RBR LPF coefficients */ | 5193 | /* Set HBR and RBR LPF coefficients */ |
5194 | if (crtc->config.port_clock == 162000 || | 5194 | if (crtc->config.port_clock == 162000 || |
5195 | intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) || | 5195 | intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) || |
5196 | intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) | 5196 | intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) |
5197 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), | 5197 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), |
5198 | 0x009f0003); | 5198 | 0x009f0003); |
5199 | else | 5199 | else |
5200 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), | 5200 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), |
5201 | 0x00d0000f); | 5201 | 0x00d0000f); |
5202 | 5202 | ||
5203 | if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) || | 5203 | if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) || |
5204 | intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) { | 5204 | intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) { |
5205 | /* Use SSC source */ | 5205 | /* Use SSC source */ |
5206 | if (!pipe) | 5206 | if (!pipe) |
5207 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), | 5207 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), |
5208 | 0x0df40000); | 5208 | 0x0df40000); |
5209 | else | 5209 | else |
5210 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), | 5210 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), |
5211 | 0x0df70000); | 5211 | 0x0df70000); |
5212 | } else { /* HDMI or VGA */ | 5212 | } else { /* HDMI or VGA */ |
5213 | /* Use bend source */ | 5213 | /* Use bend source */ |
5214 | if (!pipe) | 5214 | if (!pipe) |
5215 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), | 5215 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), |
5216 | 0x0df70000); | 5216 | 0x0df70000); |
5217 | else | 5217 | else |
5218 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), | 5218 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), |
5219 | 0x0df40000); | 5219 | 0x0df40000); |
5220 | } | 5220 | } |
5221 | 5221 | ||
5222 | coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); | 5222 | coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); |
5223 | coreclk = (coreclk & 0x0000ff00) | 0x01c00000; | 5223 | coreclk = (coreclk & 0x0000ff00) | 0x01c00000; |
5224 | if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) || | 5224 | if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) || |
5225 | intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP)) | 5225 | intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP)) |
5226 | coreclk |= 0x01000000; | 5226 | coreclk |= 0x01000000; |
5227 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); | 5227 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); |
5228 | 5228 | ||
5229 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000); | 5229 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000); |
5230 | 5230 | ||
5231 | /* | 5231 | /* |
5232 | * Enable DPIO clock input. We should never disable the reference | 5232 | * Enable DPIO clock input. We should never disable the reference |
5233 | * clock for pipe B, since VGA hotplug / manual detection depends | 5233 | * clock for pipe B, since VGA hotplug / manual detection depends |
5234 | * on it. | 5234 | * on it. |
5235 | */ | 5235 | */ |
5236 | dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV | | 5236 | dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV | |
5237 | DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV; | 5237 | DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV; |
5238 | /* We should never disable this, set it here for state tracking */ | 5238 | /* We should never disable this, set it here for state tracking */ |
5239 | if (pipe == PIPE_B) | 5239 | if (pipe == PIPE_B) |
5240 | dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; | 5240 | dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; |
5241 | dpll |= DPLL_VCO_ENABLE; | 5241 | dpll |= DPLL_VCO_ENABLE; |
5242 | crtc->config.dpll_hw_state.dpll = dpll; | 5242 | crtc->config.dpll_hw_state.dpll = dpll; |
5243 | 5243 | ||
5244 | dpll_md = (crtc->config.pixel_multiplier - 1) | 5244 | dpll_md = (crtc->config.pixel_multiplier - 1) |
5245 | << DPLL_MD_UDI_MULTIPLIER_SHIFT; | 5245 | << DPLL_MD_UDI_MULTIPLIER_SHIFT; |
5246 | crtc->config.dpll_hw_state.dpll_md = dpll_md; | 5246 | crtc->config.dpll_hw_state.dpll_md = dpll_md; |
5247 | 5247 | ||
5248 | if (crtc->config.has_dp_encoder) | 5248 | if (crtc->config.has_dp_encoder) |
5249 | intel_dp_set_m_n(crtc); | 5249 | intel_dp_set_m_n(crtc); |
5250 | 5250 | ||
5251 | mutex_unlock(&dev_priv->dpio_lock); | 5251 | mutex_unlock(&dev_priv->dpio_lock); |
5252 | } | 5252 | } |
5253 | 5253 | ||
5254 | static void i9xx_update_pll(struct intel_crtc *crtc, | 5254 | static void i9xx_update_pll(struct intel_crtc *crtc, |
5255 | intel_clock_t *reduced_clock, | 5255 | intel_clock_t *reduced_clock, |
5256 | int num_connectors) | 5256 | int num_connectors) |
5257 | { | 5257 | { |
5258 | struct drm_device *dev = crtc->base.dev; | 5258 | struct drm_device *dev = crtc->base.dev; |
5259 | struct drm_i915_private *dev_priv = dev->dev_private; | 5259 | struct drm_i915_private *dev_priv = dev->dev_private; |
5260 | u32 dpll; | 5260 | u32 dpll; |
5261 | bool is_sdvo; | 5261 | bool is_sdvo; |
5262 | struct dpll *clock = &crtc->config.dpll; | 5262 | struct dpll *clock = &crtc->config.dpll; |
5263 | 5263 | ||
5264 | i9xx_update_pll_dividers(crtc, reduced_clock); | 5264 | i9xx_update_pll_dividers(crtc, reduced_clock); |
5265 | 5265 | ||
5266 | is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) || | 5266 | is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) || |
5267 | intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI); | 5267 | intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI); |
5268 | 5268 | ||
5269 | dpll = DPLL_VGA_MODE_DIS; | 5269 | dpll = DPLL_VGA_MODE_DIS; |
5270 | 5270 | ||
5271 | if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) | 5271 | if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) |
5272 | dpll |= DPLLB_MODE_LVDS; | 5272 | dpll |= DPLLB_MODE_LVDS; |
5273 | else | 5273 | else |
5274 | dpll |= DPLLB_MODE_DAC_SERIAL; | 5274 | dpll |= DPLLB_MODE_DAC_SERIAL; |
5275 | 5275 | ||
5276 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { | 5276 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { |
5277 | dpll |= (crtc->config.pixel_multiplier - 1) | 5277 | dpll |= (crtc->config.pixel_multiplier - 1) |
5278 | << SDVO_MULTIPLIER_SHIFT_HIRES; | 5278 | << SDVO_MULTIPLIER_SHIFT_HIRES; |
5279 | } | 5279 | } |
5280 | 5280 | ||
5281 | if (is_sdvo) | 5281 | if (is_sdvo) |
5282 | dpll |= DPLL_SDVO_HIGH_SPEED; | 5282 | dpll |= DPLL_SDVO_HIGH_SPEED; |
5283 | 5283 | ||
5284 | if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) | 5284 | if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) |
5285 | dpll |= DPLL_SDVO_HIGH_SPEED; | 5285 | dpll |= DPLL_SDVO_HIGH_SPEED; |
5286 | 5286 | ||
5287 | /* compute bitmask from p1 value */ | 5287 | /* compute bitmask from p1 value */ |
5288 | if (IS_PINEVIEW(dev)) | 5288 | if (IS_PINEVIEW(dev)) |
5289 | dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; | 5289 | dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; |
5290 | else { | 5290 | else { |
5291 | dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; | 5291 | dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; |
5292 | if (IS_G4X(dev) && reduced_clock) | 5292 | if (IS_G4X(dev) && reduced_clock) |
5293 | dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; | 5293 | dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; |
5294 | } | 5294 | } |
5295 | switch (clock->p2) { | 5295 | switch (clock->p2) { |
5296 | case 5: | 5296 | case 5: |
5297 | dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; | 5297 | dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; |
5298 | break; | 5298 | break; |
5299 | case 7: | 5299 | case 7: |
5300 | dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; | 5300 | dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; |
5301 | break; | 5301 | break; |
5302 | case 10: | 5302 | case 10: |
5303 | dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; | 5303 | dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; |
5304 | break; | 5304 | break; |
5305 | case 14: | 5305 | case 14: |
5306 | dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; | 5306 | dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; |
5307 | break; | 5307 | break; |
5308 | } | 5308 | } |
5309 | if (INTEL_INFO(dev)->gen >= 4) | 5309 | if (INTEL_INFO(dev)->gen >= 4) |
5310 | dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); | 5310 | dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); |
5311 | 5311 | ||
5312 | if (crtc->config.sdvo_tv_clock) | 5312 | if (crtc->config.sdvo_tv_clock) |
5313 | dpll |= PLL_REF_INPUT_TVCLKINBC; | 5313 | dpll |= PLL_REF_INPUT_TVCLKINBC; |
5314 | else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && | 5314 | else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && |
5315 | intel_panel_use_ssc(dev_priv) && num_connectors < 2) | 5315 | intel_panel_use_ssc(dev_priv) && num_connectors < 2) |
5316 | dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; | 5316 | dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; |
5317 | else | 5317 | else |
5318 | dpll |= PLL_REF_INPUT_DREFCLK; | 5318 | dpll |= PLL_REF_INPUT_DREFCLK; |
5319 | 5319 | ||
5320 | dpll |= DPLL_VCO_ENABLE; | 5320 | dpll |= DPLL_VCO_ENABLE; |
5321 | crtc->config.dpll_hw_state.dpll = dpll; | 5321 | crtc->config.dpll_hw_state.dpll = dpll; |
5322 | 5322 | ||
5323 | if (INTEL_INFO(dev)->gen >= 4) { | 5323 | if (INTEL_INFO(dev)->gen >= 4) { |
5324 | u32 dpll_md = (crtc->config.pixel_multiplier - 1) | 5324 | u32 dpll_md = (crtc->config.pixel_multiplier - 1) |
5325 | << DPLL_MD_UDI_MULTIPLIER_SHIFT; | 5325 | << DPLL_MD_UDI_MULTIPLIER_SHIFT; |
5326 | crtc->config.dpll_hw_state.dpll_md = dpll_md; | 5326 | crtc->config.dpll_hw_state.dpll_md = dpll_md; |
5327 | } | 5327 | } |
5328 | 5328 | ||
5329 | if (crtc->config.has_dp_encoder) | 5329 | if (crtc->config.has_dp_encoder) |
5330 | intel_dp_set_m_n(crtc); | 5330 | intel_dp_set_m_n(crtc); |
5331 | } | 5331 | } |
5332 | 5332 | ||
5333 | static void i8xx_update_pll(struct intel_crtc *crtc, | 5333 | static void i8xx_update_pll(struct intel_crtc *crtc, |
5334 | intel_clock_t *reduced_clock, | 5334 | intel_clock_t *reduced_clock, |
5335 | int num_connectors) | 5335 | int num_connectors) |
5336 | { | 5336 | { |
5337 | struct drm_device *dev = crtc->base.dev; | 5337 | struct drm_device *dev = crtc->base.dev; |
5338 | struct drm_i915_private *dev_priv = dev->dev_private; | 5338 | struct drm_i915_private *dev_priv = dev->dev_private; |
5339 | u32 dpll; | 5339 | u32 dpll; |
5340 | struct dpll *clock = &crtc->config.dpll; | 5340 | struct dpll *clock = &crtc->config.dpll; |
5341 | 5341 | ||
5342 | i9xx_update_pll_dividers(crtc, reduced_clock); | 5342 | i9xx_update_pll_dividers(crtc, reduced_clock); |
5343 | 5343 | ||
5344 | dpll = DPLL_VGA_MODE_DIS; | 5344 | dpll = DPLL_VGA_MODE_DIS; |
5345 | 5345 | ||
5346 | if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) { | 5346 | if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) { |
5347 | dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; | 5347 | dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; |
5348 | } else { | 5348 | } else { |
5349 | if (clock->p1 == 2) | 5349 | if (clock->p1 == 2) |
5350 | dpll |= PLL_P1_DIVIDE_BY_TWO; | 5350 | dpll |= PLL_P1_DIVIDE_BY_TWO; |
5351 | else | 5351 | else |
5352 | dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; | 5352 | dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; |
5353 | if (clock->p2 == 4) | 5353 | if (clock->p2 == 4) |
5354 | dpll |= PLL_P2_DIVIDE_BY_4; | 5354 | dpll |= PLL_P2_DIVIDE_BY_4; |
5355 | } | 5355 | } |
5356 | 5356 | ||
5357 | if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO)) | 5357 | if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO)) |
5358 | dpll |= DPLL_DVO_2X_MODE; | 5358 | dpll |= DPLL_DVO_2X_MODE; |
5359 | 5359 | ||
5360 | if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && | 5360 | if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && |
5361 | intel_panel_use_ssc(dev_priv) && num_connectors < 2) | 5361 | intel_panel_use_ssc(dev_priv) && num_connectors < 2) |
5362 | dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; | 5362 | dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; |
5363 | else | 5363 | else |
5364 | dpll |= PLL_REF_INPUT_DREFCLK; | 5364 | dpll |= PLL_REF_INPUT_DREFCLK; |
5365 | 5365 | ||
5366 | dpll |= DPLL_VCO_ENABLE; | 5366 | dpll |= DPLL_VCO_ENABLE; |
5367 | crtc->config.dpll_hw_state.dpll = dpll; | 5367 | crtc->config.dpll_hw_state.dpll = dpll; |
5368 | } | 5368 | } |
5369 | 5369 | ||
5370 | static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) | 5370 | static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) |
5371 | { | 5371 | { |
5372 | struct drm_device *dev = intel_crtc->base.dev; | 5372 | struct drm_device *dev = intel_crtc->base.dev; |
5373 | struct drm_i915_private *dev_priv = dev->dev_private; | 5373 | struct drm_i915_private *dev_priv = dev->dev_private; |
5374 | enum pipe pipe = intel_crtc->pipe; | 5374 | enum pipe pipe = intel_crtc->pipe; |
5375 | enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; | 5375 | enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; |
5376 | struct drm_display_mode *adjusted_mode = | 5376 | struct drm_display_mode *adjusted_mode = |
5377 | &intel_crtc->config.adjusted_mode; | 5377 | &intel_crtc->config.adjusted_mode; |
5378 | uint32_t crtc_vtotal, crtc_vblank_end; | 5378 | uint32_t crtc_vtotal, crtc_vblank_end; |
5379 | int vsyncshift = 0; | 5379 | int vsyncshift = 0; |
5380 | 5380 | ||
5381 | /* We need to be careful not to changed the adjusted mode, for otherwise | 5381 | /* We need to be careful not to changed the adjusted mode, for otherwise |
5382 | * the hw state checker will get angry at the mismatch. */ | 5382 | * the hw state checker will get angry at the mismatch. */ |
5383 | crtc_vtotal = adjusted_mode->crtc_vtotal; | 5383 | crtc_vtotal = adjusted_mode->crtc_vtotal; |
5384 | crtc_vblank_end = adjusted_mode->crtc_vblank_end; | 5384 | crtc_vblank_end = adjusted_mode->crtc_vblank_end; |
5385 | 5385 | ||
5386 | if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { | 5386 | if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { |
5387 | /* the chip adds 2 halflines automatically */ | 5387 | /* the chip adds 2 halflines automatically */ |
5388 | crtc_vtotal -= 1; | 5388 | crtc_vtotal -= 1; |
5389 | crtc_vblank_end -= 1; | 5389 | crtc_vblank_end -= 1; |
5390 | 5390 | ||
5391 | if (intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO)) | 5391 | if (intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO)) |
5392 | vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; | 5392 | vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; |
5393 | else | 5393 | else |
5394 | vsyncshift = adjusted_mode->crtc_hsync_start - | 5394 | vsyncshift = adjusted_mode->crtc_hsync_start - |
5395 | adjusted_mode->crtc_htotal / 2; | 5395 | adjusted_mode->crtc_htotal / 2; |
5396 | if (vsyncshift < 0) | 5396 | if (vsyncshift < 0) |
5397 | vsyncshift += adjusted_mode->crtc_htotal; | 5397 | vsyncshift += adjusted_mode->crtc_htotal; |
5398 | } | 5398 | } |
5399 | 5399 | ||
5400 | if (INTEL_INFO(dev)->gen > 3) | 5400 | if (INTEL_INFO(dev)->gen > 3) |
5401 | I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift); | 5401 | I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift); |
5402 | 5402 | ||
5403 | I915_WRITE(HTOTAL(cpu_transcoder), | 5403 | I915_WRITE(HTOTAL(cpu_transcoder), |
5404 | (adjusted_mode->crtc_hdisplay - 1) | | 5404 | (adjusted_mode->crtc_hdisplay - 1) | |
5405 | ((adjusted_mode->crtc_htotal - 1) << 16)); | 5405 | ((adjusted_mode->crtc_htotal - 1) << 16)); |
5406 | I915_WRITE(HBLANK(cpu_transcoder), | 5406 | I915_WRITE(HBLANK(cpu_transcoder), |
5407 | (adjusted_mode->crtc_hblank_start - 1) | | 5407 | (adjusted_mode->crtc_hblank_start - 1) | |
5408 | ((adjusted_mode->crtc_hblank_end - 1) << 16)); | 5408 | ((adjusted_mode->crtc_hblank_end - 1) << 16)); |
5409 | I915_WRITE(HSYNC(cpu_transcoder), | 5409 | I915_WRITE(HSYNC(cpu_transcoder), |
5410 | (adjusted_mode->crtc_hsync_start - 1) | | 5410 | (adjusted_mode->crtc_hsync_start - 1) | |
5411 | ((adjusted_mode->crtc_hsync_end - 1) << 16)); | 5411 | ((adjusted_mode->crtc_hsync_end - 1) << 16)); |
5412 | 5412 | ||
5413 | I915_WRITE(VTOTAL(cpu_transcoder), | 5413 | I915_WRITE(VTOTAL(cpu_transcoder), |
5414 | (adjusted_mode->crtc_vdisplay - 1) | | 5414 | (adjusted_mode->crtc_vdisplay - 1) | |
5415 | ((crtc_vtotal - 1) << 16)); | 5415 | ((crtc_vtotal - 1) << 16)); |
5416 | I915_WRITE(VBLANK(cpu_transcoder), | 5416 | I915_WRITE(VBLANK(cpu_transcoder), |
5417 | (adjusted_mode->crtc_vblank_start - 1) | | 5417 | (adjusted_mode->crtc_vblank_start - 1) | |
5418 | ((crtc_vblank_end - 1) << 16)); | 5418 | ((crtc_vblank_end - 1) << 16)); |
5419 | I915_WRITE(VSYNC(cpu_transcoder), | 5419 | I915_WRITE(VSYNC(cpu_transcoder), |
5420 | (adjusted_mode->crtc_vsync_start - 1) | | 5420 | (adjusted_mode->crtc_vsync_start - 1) | |
5421 | ((adjusted_mode->crtc_vsync_end - 1) << 16)); | 5421 | ((adjusted_mode->crtc_vsync_end - 1) << 16)); |
5422 | 5422 | ||
5423 | /* Workaround: when the EDP input selection is B, the VTOTAL_B must be | 5423 | /* Workaround: when the EDP input selection is B, the VTOTAL_B must be |
5424 | * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is | 5424 | * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is |
5425 | * documented on the DDI_FUNC_CTL register description, EDP Input Select | 5425 | * documented on the DDI_FUNC_CTL register description, EDP Input Select |
5426 | * bits. */ | 5426 | * bits. */ |
5427 | if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP && | 5427 | if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP && |
5428 | (pipe == PIPE_B || pipe == PIPE_C)) | 5428 | (pipe == PIPE_B || pipe == PIPE_C)) |
5429 | I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder))); | 5429 | I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder))); |
5430 | 5430 | ||
5431 | /* pipesrc controls the size that is scaled from, which should | 5431 | /* pipesrc controls the size that is scaled from, which should |
5432 | * always be the user's requested size. | 5432 | * always be the user's requested size. |
5433 | */ | 5433 | */ |
5434 | I915_WRITE(PIPESRC(pipe), | 5434 | I915_WRITE(PIPESRC(pipe), |
5435 | ((intel_crtc->config.pipe_src_w - 1) << 16) | | 5435 | ((intel_crtc->config.pipe_src_w - 1) << 16) | |
5436 | (intel_crtc->config.pipe_src_h - 1)); | 5436 | (intel_crtc->config.pipe_src_h - 1)); |
5437 | } | 5437 | } |
5438 | 5438 | ||
5439 | static void intel_get_pipe_timings(struct intel_crtc *crtc, | 5439 | static void intel_get_pipe_timings(struct intel_crtc *crtc, |
5440 | struct intel_crtc_config *pipe_config) | 5440 | struct intel_crtc_config *pipe_config) |
5441 | { | 5441 | { |
5442 | struct drm_device *dev = crtc->base.dev; | 5442 | struct drm_device *dev = crtc->base.dev; |
5443 | struct drm_i915_private *dev_priv = dev->dev_private; | 5443 | struct drm_i915_private *dev_priv = dev->dev_private; |
5444 | enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; | 5444 | enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; |
5445 | uint32_t tmp; | 5445 | uint32_t tmp; |
5446 | 5446 | ||
5447 | tmp = I915_READ(HTOTAL(cpu_transcoder)); | 5447 | tmp = I915_READ(HTOTAL(cpu_transcoder)); |
5448 | pipe_config->adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1; | 5448 | pipe_config->adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1; |
5449 | pipe_config->adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1; | 5449 | pipe_config->adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1; |
5450 | tmp = I915_READ(HBLANK(cpu_transcoder)); | 5450 | tmp = I915_READ(HBLANK(cpu_transcoder)); |
5451 | pipe_config->adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1; | 5451 | pipe_config->adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1; |
5452 | pipe_config->adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1; | 5452 | pipe_config->adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1; |
5453 | tmp = I915_READ(HSYNC(cpu_transcoder)); | 5453 | tmp = I915_READ(HSYNC(cpu_transcoder)); |
5454 | pipe_config->adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1; | 5454 | pipe_config->adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1; |
5455 | pipe_config->adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1; | 5455 | pipe_config->adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1; |
5456 | 5456 | ||
5457 | tmp = I915_READ(VTOTAL(cpu_transcoder)); | 5457 | tmp = I915_READ(VTOTAL(cpu_transcoder)); |
5458 | pipe_config->adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1; | 5458 | pipe_config->adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1; |
5459 | pipe_config->adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1; | 5459 | pipe_config->adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1; |
5460 | tmp = I915_READ(VBLANK(cpu_transcoder)); | 5460 | tmp = I915_READ(VBLANK(cpu_transcoder)); |
5461 | pipe_config->adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1; | 5461 | pipe_config->adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1; |
5462 | pipe_config->adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1; | 5462 | pipe_config->adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1; |
5463 | tmp = I915_READ(VSYNC(cpu_transcoder)); | 5463 | tmp = I915_READ(VSYNC(cpu_transcoder)); |
5464 | pipe_config->adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1; | 5464 | pipe_config->adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1; |
5465 | pipe_config->adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1; | 5465 | pipe_config->adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1; |
5466 | 5466 | ||
5467 | if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) { | 5467 | if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) { |
5468 | pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE; | 5468 | pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE; |
5469 | pipe_config->adjusted_mode.crtc_vtotal += 1; | 5469 | pipe_config->adjusted_mode.crtc_vtotal += 1; |
5470 | pipe_config->adjusted_mode.crtc_vblank_end += 1; | 5470 | pipe_config->adjusted_mode.crtc_vblank_end += 1; |
5471 | } | 5471 | } |
5472 | 5472 | ||
5473 | tmp = I915_READ(PIPESRC(crtc->pipe)); | 5473 | tmp = I915_READ(PIPESRC(crtc->pipe)); |
5474 | pipe_config->pipe_src_h = (tmp & 0xffff) + 1; | 5474 | pipe_config->pipe_src_h = (tmp & 0xffff) + 1; |
5475 | pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1; | 5475 | pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1; |
5476 | 5476 | ||
5477 | pipe_config->requested_mode.vdisplay = pipe_config->pipe_src_h; | 5477 | pipe_config->requested_mode.vdisplay = pipe_config->pipe_src_h; |
5478 | pipe_config->requested_mode.hdisplay = pipe_config->pipe_src_w; | 5478 | pipe_config->requested_mode.hdisplay = pipe_config->pipe_src_w; |
5479 | } | 5479 | } |
5480 | 5480 | ||
5481 | void intel_mode_from_pipe_config(struct drm_display_mode *mode, | 5481 | void intel_mode_from_pipe_config(struct drm_display_mode *mode, |
5482 | struct intel_crtc_config *pipe_config) | 5482 | struct intel_crtc_config *pipe_config) |
5483 | { | 5483 | { |
5484 | mode->hdisplay = pipe_config->adjusted_mode.crtc_hdisplay; | 5484 | mode->hdisplay = pipe_config->adjusted_mode.crtc_hdisplay; |
5485 | mode->htotal = pipe_config->adjusted_mode.crtc_htotal; | 5485 | mode->htotal = pipe_config->adjusted_mode.crtc_htotal; |
5486 | mode->hsync_start = pipe_config->adjusted_mode.crtc_hsync_start; | 5486 | mode->hsync_start = pipe_config->adjusted_mode.crtc_hsync_start; |
5487 | mode->hsync_end = pipe_config->adjusted_mode.crtc_hsync_end; | 5487 | mode->hsync_end = pipe_config->adjusted_mode.crtc_hsync_end; |
5488 | 5488 | ||
5489 | mode->vdisplay = pipe_config->adjusted_mode.crtc_vdisplay; | 5489 | mode->vdisplay = pipe_config->adjusted_mode.crtc_vdisplay; |
5490 | mode->vtotal = pipe_config->adjusted_mode.crtc_vtotal; | 5490 | mode->vtotal = pipe_config->adjusted_mode.crtc_vtotal; |
5491 | mode->vsync_start = pipe_config->adjusted_mode.crtc_vsync_start; | 5491 | mode->vsync_start = pipe_config->adjusted_mode.crtc_vsync_start; |
5492 | mode->vsync_end = pipe_config->adjusted_mode.crtc_vsync_end; | 5492 | mode->vsync_end = pipe_config->adjusted_mode.crtc_vsync_end; |
5493 | 5493 | ||
5494 | mode->flags = pipe_config->adjusted_mode.flags; | 5494 | mode->flags = pipe_config->adjusted_mode.flags; |
5495 | 5495 | ||
5496 | mode->clock = pipe_config->adjusted_mode.crtc_clock; | 5496 | mode->clock = pipe_config->adjusted_mode.crtc_clock; |
5497 | mode->flags |= pipe_config->adjusted_mode.flags; | 5497 | mode->flags |= pipe_config->adjusted_mode.flags; |
5498 | } | 5498 | } |
5499 | 5499 | ||
5500 | static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) | 5500 | static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) |
5501 | { | 5501 | { |
5502 | struct drm_device *dev = intel_crtc->base.dev; | 5502 | struct drm_device *dev = intel_crtc->base.dev; |
5503 | struct drm_i915_private *dev_priv = dev->dev_private; | 5503 | struct drm_i915_private *dev_priv = dev->dev_private; |
5504 | uint32_t pipeconf; | 5504 | uint32_t pipeconf; |
5505 | 5505 | ||
5506 | pipeconf = 0; | 5506 | pipeconf = 0; |
5507 | 5507 | ||
5508 | if (dev_priv->quirks & QUIRK_PIPEA_FORCE && | 5508 | if (dev_priv->quirks & QUIRK_PIPEA_FORCE && |
5509 | I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE) | 5509 | I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE) |
5510 | pipeconf |= PIPECONF_ENABLE; | 5510 | pipeconf |= PIPECONF_ENABLE; |
5511 | 5511 | ||
5512 | if (intel_crtc->config.double_wide) | 5512 | if (intel_crtc->config.double_wide) |
5513 | pipeconf |= PIPECONF_DOUBLE_WIDE; | 5513 | pipeconf |= PIPECONF_DOUBLE_WIDE; |
5514 | 5514 | ||
5515 | /* only g4x and later have fancy bpc/dither controls */ | 5515 | /* only g4x and later have fancy bpc/dither controls */ |
5516 | if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) { | 5516 | if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) { |
5517 | /* Bspec claims that we can't use dithering for 30bpp pipes. */ | 5517 | /* Bspec claims that we can't use dithering for 30bpp pipes. */ |
5518 | if (intel_crtc->config.dither && intel_crtc->config.pipe_bpp != 30) | 5518 | if (intel_crtc->config.dither && intel_crtc->config.pipe_bpp != 30) |
5519 | pipeconf |= PIPECONF_DITHER_EN | | 5519 | pipeconf |= PIPECONF_DITHER_EN | |
5520 | PIPECONF_DITHER_TYPE_SP; | 5520 | PIPECONF_DITHER_TYPE_SP; |
5521 | 5521 | ||
5522 | switch (intel_crtc->config.pipe_bpp) { | 5522 | switch (intel_crtc->config.pipe_bpp) { |
5523 | case 18: | 5523 | case 18: |
5524 | pipeconf |= PIPECONF_6BPC; | 5524 | pipeconf |= PIPECONF_6BPC; |
5525 | break; | 5525 | break; |
5526 | case 24: | 5526 | case 24: |
5527 | pipeconf |= PIPECONF_8BPC; | 5527 | pipeconf |= PIPECONF_8BPC; |
5528 | break; | 5528 | break; |
5529 | case 30: | 5529 | case 30: |
5530 | pipeconf |= PIPECONF_10BPC; | 5530 | pipeconf |= PIPECONF_10BPC; |
5531 | break; | 5531 | break; |
5532 | default: | 5532 | default: |
5533 | /* Case prevented by intel_choose_pipe_bpp_dither. */ | 5533 | /* Case prevented by intel_choose_pipe_bpp_dither. */ |
5534 | BUG(); | 5534 | BUG(); |
5535 | } | 5535 | } |
5536 | } | 5536 | } |
5537 | 5537 | ||
5538 | if (HAS_PIPE_CXSR(dev)) { | 5538 | if (HAS_PIPE_CXSR(dev)) { |
5539 | if (intel_crtc->lowfreq_avail) { | 5539 | if (intel_crtc->lowfreq_avail) { |
5540 | DRM_DEBUG_KMS("enabling CxSR downclocking\n"); | 5540 | DRM_DEBUG_KMS("enabling CxSR downclocking\n"); |
5541 | pipeconf |= PIPECONF_CXSR_DOWNCLOCK; | 5541 | pipeconf |= PIPECONF_CXSR_DOWNCLOCK; |
5542 | } else { | 5542 | } else { |
5543 | DRM_DEBUG_KMS("disabling CxSR downclocking\n"); | 5543 | DRM_DEBUG_KMS("disabling CxSR downclocking\n"); |
5544 | } | 5544 | } |
5545 | } | 5545 | } |
5546 | 5546 | ||
5547 | if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { | 5547 | if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { |
5548 | if (INTEL_INFO(dev)->gen < 4 || | 5548 | if (INTEL_INFO(dev)->gen < 4 || |
5549 | intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO)) | 5549 | intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO)) |
5550 | pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; | 5550 | pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; |
5551 | else | 5551 | else |
5552 | pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; | 5552 | pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; |
5553 | } else | 5553 | } else |
5554 | pipeconf |= PIPECONF_PROGRESSIVE; | 5554 | pipeconf |= PIPECONF_PROGRESSIVE; |
5555 | 5555 | ||
5556 | if (IS_VALLEYVIEW(dev) && intel_crtc->config.limited_color_range) | 5556 | if (IS_VALLEYVIEW(dev) && intel_crtc->config.limited_color_range) |
5557 | pipeconf |= PIPECONF_COLOR_RANGE_SELECT; | 5557 | pipeconf |= PIPECONF_COLOR_RANGE_SELECT; |
5558 | 5558 | ||
5559 | I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf); | 5559 | I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf); |
5560 | POSTING_READ(PIPECONF(intel_crtc->pipe)); | 5560 | POSTING_READ(PIPECONF(intel_crtc->pipe)); |
5561 | } | 5561 | } |
5562 | 5562 | ||
5563 | static int i9xx_crtc_mode_set(struct drm_crtc *crtc, | 5563 | static int i9xx_crtc_mode_set(struct drm_crtc *crtc, |
5564 | int x, int y, | 5564 | int x, int y, |
5565 | struct drm_framebuffer *fb) | 5565 | struct drm_framebuffer *fb) |
5566 | { | 5566 | { |
5567 | struct drm_device *dev = crtc->dev; | 5567 | struct drm_device *dev = crtc->dev; |
5568 | struct drm_i915_private *dev_priv = dev->dev_private; | 5568 | struct drm_i915_private *dev_priv = dev->dev_private; |
5569 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 5569 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
5570 | int pipe = intel_crtc->pipe; | 5570 | int pipe = intel_crtc->pipe; |
5571 | int plane = intel_crtc->plane; | 5571 | int plane = intel_crtc->plane; |
5572 | int refclk, num_connectors = 0; | 5572 | int refclk, num_connectors = 0; |
5573 | intel_clock_t clock, reduced_clock; | 5573 | intel_clock_t clock, reduced_clock; |
5574 | u32 dspcntr; | 5574 | u32 dspcntr; |
5575 | bool ok, has_reduced_clock = false; | 5575 | bool ok, has_reduced_clock = false; |
5576 | bool is_lvds = false, is_dsi = false; | 5576 | bool is_lvds = false, is_dsi = false; |
5577 | struct intel_encoder *encoder; | 5577 | struct intel_encoder *encoder; |
5578 | const intel_limit_t *limit; | 5578 | const intel_limit_t *limit; |
5579 | int ret; | 5579 | int ret; |
5580 | 5580 | ||
5581 | for_each_encoder_on_crtc(dev, crtc, encoder) { | 5581 | for_each_encoder_on_crtc(dev, crtc, encoder) { |
5582 | switch (encoder->type) { | 5582 | switch (encoder->type) { |
5583 | case INTEL_OUTPUT_LVDS: | 5583 | case INTEL_OUTPUT_LVDS: |
5584 | is_lvds = true; | 5584 | is_lvds = true; |
5585 | break; | 5585 | break; |
5586 | case INTEL_OUTPUT_DSI: | 5586 | case INTEL_OUTPUT_DSI: |
5587 | is_dsi = true; | 5587 | is_dsi = true; |
5588 | break; | 5588 | break; |
5589 | } | 5589 | } |
5590 | 5590 | ||
5591 | num_connectors++; | 5591 | num_connectors++; |
5592 | } | 5592 | } |
5593 | 5593 | ||
5594 | if (is_dsi) | 5594 | if (is_dsi) |
5595 | goto skip_dpll; | 5595 | goto skip_dpll; |
5596 | 5596 | ||
5597 | if (!intel_crtc->config.clock_set) { | 5597 | if (!intel_crtc->config.clock_set) { |
5598 | refclk = i9xx_get_refclk(crtc, num_connectors); | 5598 | refclk = i9xx_get_refclk(crtc, num_connectors); |
5599 | 5599 | ||
5600 | /* | 5600 | /* |
5601 | * Returns a set of divisors for the desired target clock with | 5601 | * Returns a set of divisors for the desired target clock with |
5602 | * the given refclk, or FALSE. The returned values represent | 5602 | * the given refclk, or FALSE. The returned values represent |
5603 | * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + | 5603 | * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + |
5604 | * 2) / p1 / p2. | 5604 | * 2) / p1 / p2. |
5605 | */ | 5605 | */ |
5606 | limit = intel_limit(crtc, refclk); | 5606 | limit = intel_limit(crtc, refclk); |
5607 | ok = dev_priv->display.find_dpll(limit, crtc, | 5607 | ok = dev_priv->display.find_dpll(limit, crtc, |
5608 | intel_crtc->config.port_clock, | 5608 | intel_crtc->config.port_clock, |
5609 | refclk, NULL, &clock); | 5609 | refclk, NULL, &clock); |
5610 | if (!ok) { | 5610 | if (!ok) { |
5611 | DRM_ERROR("Couldn't find PLL settings for mode!\n"); | 5611 | DRM_ERROR("Couldn't find PLL settings for mode!\n"); |
5612 | return -EINVAL; | 5612 | return -EINVAL; |
5613 | } | 5613 | } |
5614 | 5614 | ||
5615 | if (is_lvds && dev_priv->lvds_downclock_avail) { | 5615 | if (is_lvds && dev_priv->lvds_downclock_avail) { |
5616 | /* | 5616 | /* |
5617 | * Ensure we match the reduced clock's P to the target | 5617 | * Ensure we match the reduced clock's P to the target |
5618 | * clock. If the clocks don't match, we can't switch | 5618 | * clock. If the clocks don't match, we can't switch |
5619 | * the display clock by using the FP0/FP1. In such case | 5619 | * the display clock by using the FP0/FP1. In such case |
5620 | * we will disable the LVDS downclock feature. | 5620 | * we will disable the LVDS downclock feature. |
5621 | */ | 5621 | */ |
5622 | has_reduced_clock = | 5622 | has_reduced_clock = |
5623 | dev_priv->display.find_dpll(limit, crtc, | 5623 | dev_priv->display.find_dpll(limit, crtc, |
5624 | dev_priv->lvds_downclock, | 5624 | dev_priv->lvds_downclock, |
5625 | refclk, &clock, | 5625 | refclk, &clock, |
5626 | &reduced_clock); | 5626 | &reduced_clock); |
5627 | } | 5627 | } |
5628 | /* Compat-code for transition, will disappear. */ | 5628 | /* Compat-code for transition, will disappear. */ |
5629 | intel_crtc->config.dpll.n = clock.n; | 5629 | intel_crtc->config.dpll.n = clock.n; |
5630 | intel_crtc->config.dpll.m1 = clock.m1; | 5630 | intel_crtc->config.dpll.m1 = clock.m1; |
5631 | intel_crtc->config.dpll.m2 = clock.m2; | 5631 | intel_crtc->config.dpll.m2 = clock.m2; |
5632 | intel_crtc->config.dpll.p1 = clock.p1; | 5632 | intel_crtc->config.dpll.p1 = clock.p1; |
5633 | intel_crtc->config.dpll.p2 = clock.p2; | 5633 | intel_crtc->config.dpll.p2 = clock.p2; |
5634 | } | 5634 | } |
5635 | 5635 | ||
5636 | if (IS_GEN2(dev)) { | 5636 | if (IS_GEN2(dev)) { |
5637 | i8xx_update_pll(intel_crtc, | 5637 | i8xx_update_pll(intel_crtc, |
5638 | has_reduced_clock ? &reduced_clock : NULL, | 5638 | has_reduced_clock ? &reduced_clock : NULL, |
5639 | num_connectors); | 5639 | num_connectors); |
5640 | } else if (IS_VALLEYVIEW(dev)) { | 5640 | } else if (IS_VALLEYVIEW(dev)) { |
5641 | vlv_update_pll(intel_crtc); | 5641 | vlv_update_pll(intel_crtc); |
5642 | } else { | 5642 | } else { |
5643 | i9xx_update_pll(intel_crtc, | 5643 | i9xx_update_pll(intel_crtc, |
5644 | has_reduced_clock ? &reduced_clock : NULL, | 5644 | has_reduced_clock ? &reduced_clock : NULL, |
5645 | num_connectors); | 5645 | num_connectors); |
5646 | } | 5646 | } |
5647 | 5647 | ||
5648 | skip_dpll: | 5648 | skip_dpll: |
5649 | /* Set up the display plane register */ | 5649 | /* Set up the display plane register */ |
5650 | dspcntr = DISPPLANE_GAMMA_ENABLE; | 5650 | dspcntr = DISPPLANE_GAMMA_ENABLE; |
5651 | 5651 | ||
5652 | if (!IS_VALLEYVIEW(dev)) { | 5652 | if (!IS_VALLEYVIEW(dev)) { |
5653 | if (pipe == 0) | 5653 | if (pipe == 0) |
5654 | dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; | 5654 | dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; |
5655 | else | 5655 | else |
5656 | dspcntr |= DISPPLANE_SEL_PIPE_B; | 5656 | dspcntr |= DISPPLANE_SEL_PIPE_B; |
5657 | } | 5657 | } |
5658 | 5658 | ||
5659 | intel_set_pipe_timings(intel_crtc); | 5659 | intel_set_pipe_timings(intel_crtc); |
5660 | 5660 | ||
5661 | /* pipesrc and dspsize control the size that is scaled from, | 5661 | /* pipesrc and dspsize control the size that is scaled from, |
5662 | * which should always be the user's requested size. | 5662 | * which should always be the user's requested size. |
5663 | */ | 5663 | */ |
5664 | I915_WRITE(DSPSIZE(plane), | 5664 | I915_WRITE(DSPSIZE(plane), |
5665 | ((intel_crtc->config.pipe_src_h - 1) << 16) | | 5665 | ((intel_crtc->config.pipe_src_h - 1) << 16) | |
5666 | (intel_crtc->config.pipe_src_w - 1)); | 5666 | (intel_crtc->config.pipe_src_w - 1)); |
5667 | I915_WRITE(DSPPOS(plane), 0); | 5667 | I915_WRITE(DSPPOS(plane), 0); |
5668 | 5668 | ||
5669 | i9xx_set_pipeconf(intel_crtc); | 5669 | i9xx_set_pipeconf(intel_crtc); |
5670 | 5670 | ||
5671 | I915_WRITE(DSPCNTR(plane), dspcntr); | 5671 | I915_WRITE(DSPCNTR(plane), dspcntr); |
5672 | POSTING_READ(DSPCNTR(plane)); | 5672 | POSTING_READ(DSPCNTR(plane)); |
5673 | 5673 | ||
5674 | ret = intel_pipe_set_base(crtc, x, y, fb); | 5674 | ret = intel_pipe_set_base(crtc, x, y, fb); |
5675 | 5675 | ||
5676 | return ret; | 5676 | return ret; |
5677 | } | 5677 | } |
5678 | 5678 | ||
5679 | static void i9xx_get_pfit_config(struct intel_crtc *crtc, | 5679 | static void i9xx_get_pfit_config(struct intel_crtc *crtc, |
5680 | struct intel_crtc_config *pipe_config) | 5680 | struct intel_crtc_config *pipe_config) |
5681 | { | 5681 | { |
5682 | struct drm_device *dev = crtc->base.dev; | 5682 | struct drm_device *dev = crtc->base.dev; |
5683 | struct drm_i915_private *dev_priv = dev->dev_private; | 5683 | struct drm_i915_private *dev_priv = dev->dev_private; |
5684 | uint32_t tmp; | 5684 | uint32_t tmp; |
5685 | 5685 | ||
5686 | if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev))) | 5686 | if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev))) |
5687 | return; | 5687 | return; |
5688 | 5688 | ||
5689 | tmp = I915_READ(PFIT_CONTROL); | 5689 | tmp = I915_READ(PFIT_CONTROL); |
5690 | if (!(tmp & PFIT_ENABLE)) | 5690 | if (!(tmp & PFIT_ENABLE)) |
5691 | return; | 5691 | return; |
5692 | 5692 | ||
5693 | /* Check whether the pfit is attached to our pipe. */ | 5693 | /* Check whether the pfit is attached to our pipe. */ |
5694 | if (INTEL_INFO(dev)->gen < 4) { | 5694 | if (INTEL_INFO(dev)->gen < 4) { |
5695 | if (crtc->pipe != PIPE_B) | 5695 | if (crtc->pipe != PIPE_B) |
5696 | return; | 5696 | return; |
5697 | } else { | 5697 | } else { |
5698 | if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT)) | 5698 | if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT)) |
5699 | return; | 5699 | return; |
5700 | } | 5700 | } |
5701 | 5701 | ||
5702 | pipe_config->gmch_pfit.control = tmp; | 5702 | pipe_config->gmch_pfit.control = tmp; |
5703 | pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS); | 5703 | pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS); |
5704 | if (INTEL_INFO(dev)->gen < 5) | 5704 | if (INTEL_INFO(dev)->gen < 5) |
5705 | pipe_config->gmch_pfit.lvds_border_bits = | 5705 | pipe_config->gmch_pfit.lvds_border_bits = |
5706 | I915_READ(LVDS) & LVDS_BORDER_ENABLE; | 5706 | I915_READ(LVDS) & LVDS_BORDER_ENABLE; |
5707 | } | 5707 | } |
5708 | 5708 | ||
5709 | static void vlv_crtc_clock_get(struct intel_crtc *crtc, | 5709 | static void vlv_crtc_clock_get(struct intel_crtc *crtc, |
5710 | struct intel_crtc_config *pipe_config) | 5710 | struct intel_crtc_config *pipe_config) |
5711 | { | 5711 | { |
5712 | struct drm_device *dev = crtc->base.dev; | 5712 | struct drm_device *dev = crtc->base.dev; |
5713 | struct drm_i915_private *dev_priv = dev->dev_private; | 5713 | struct drm_i915_private *dev_priv = dev->dev_private; |
5714 | int pipe = pipe_config->cpu_transcoder; | 5714 | int pipe = pipe_config->cpu_transcoder; |
5715 | intel_clock_t clock; | 5715 | intel_clock_t clock; |
5716 | u32 mdiv; | 5716 | u32 mdiv; |
5717 | int refclk = 100000; | 5717 | int refclk = 100000; |
5718 | 5718 | ||
5719 | mutex_lock(&dev_priv->dpio_lock); | 5719 | mutex_lock(&dev_priv->dpio_lock); |
5720 | mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe)); | 5720 | mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe)); |
5721 | mutex_unlock(&dev_priv->dpio_lock); | 5721 | mutex_unlock(&dev_priv->dpio_lock); |
5722 | 5722 | ||
5723 | clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7; | 5723 | clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7; |
5724 | clock.m2 = mdiv & DPIO_M2DIV_MASK; | 5724 | clock.m2 = mdiv & DPIO_M2DIV_MASK; |
5725 | clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf; | 5725 | clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf; |
5726 | clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7; | 5726 | clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7; |
5727 | clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f; | 5727 | clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f; |
5728 | 5728 | ||
5729 | vlv_clock(refclk, &clock); | 5729 | vlv_clock(refclk, &clock); |
5730 | 5730 | ||
5731 | /* clock.dot is the fast clock */ | 5731 | /* clock.dot is the fast clock */ |
5732 | pipe_config->port_clock = clock.dot / 5; | 5732 | pipe_config->port_clock = clock.dot / 5; |
5733 | } | 5733 | } |
5734 | 5734 | ||
5735 | static void i9xx_get_plane_config(struct intel_crtc *crtc, | 5735 | static void i9xx_get_plane_config(struct intel_crtc *crtc, |
5736 | struct intel_plane_config *plane_config) | 5736 | struct intel_plane_config *plane_config) |
5737 | { | 5737 | { |
5738 | struct drm_device *dev = crtc->base.dev; | 5738 | struct drm_device *dev = crtc->base.dev; |
5739 | struct drm_i915_private *dev_priv = dev->dev_private; | 5739 | struct drm_i915_private *dev_priv = dev->dev_private; |
5740 | u32 val, base, offset; | 5740 | u32 val, base, offset; |
5741 | int pipe = crtc->pipe, plane = crtc->plane; | 5741 | int pipe = crtc->pipe, plane = crtc->plane; |
5742 | int fourcc, pixel_format; | 5742 | int fourcc, pixel_format; |
5743 | int aligned_height; | 5743 | int aligned_height; |
5744 | 5744 | ||
5745 | crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL); | 5745 | crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL); |
5746 | if (!crtc->base.primary->fb) { | 5746 | if (!crtc->base.primary->fb) { |
5747 | DRM_DEBUG_KMS("failed to alloc fb\n"); | 5747 | DRM_DEBUG_KMS("failed to alloc fb\n"); |
5748 | return; | 5748 | return; |
5749 | } | 5749 | } |
5750 | 5750 | ||
5751 | val = I915_READ(DSPCNTR(plane)); | 5751 | val = I915_READ(DSPCNTR(plane)); |
5752 | 5752 | ||
5753 | if (INTEL_INFO(dev)->gen >= 4) | 5753 | if (INTEL_INFO(dev)->gen >= 4) |
5754 | if (val & DISPPLANE_TILED) | 5754 | if (val & DISPPLANE_TILED) |
5755 | plane_config->tiled = true; | 5755 | plane_config->tiled = true; |
5756 | 5756 | ||
5757 | pixel_format = val & DISPPLANE_PIXFORMAT_MASK; | 5757 | pixel_format = val & DISPPLANE_PIXFORMAT_MASK; |
5758 | fourcc = intel_format_to_fourcc(pixel_format); | 5758 | fourcc = intel_format_to_fourcc(pixel_format); |
5759 | crtc->base.primary->fb->pixel_format = fourcc; | 5759 | crtc->base.primary->fb->pixel_format = fourcc; |
5760 | crtc->base.primary->fb->bits_per_pixel = | 5760 | crtc->base.primary->fb->bits_per_pixel = |
5761 | drm_format_plane_cpp(fourcc, 0) * 8; | 5761 | drm_format_plane_cpp(fourcc, 0) * 8; |
5762 | 5762 | ||
5763 | if (INTEL_INFO(dev)->gen >= 4) { | 5763 | if (INTEL_INFO(dev)->gen >= 4) { |
5764 | if (plane_config->tiled) | 5764 | if (plane_config->tiled) |
5765 | offset = I915_READ(DSPTILEOFF(plane)); | 5765 | offset = I915_READ(DSPTILEOFF(plane)); |
5766 | else | 5766 | else |
5767 | offset = I915_READ(DSPLINOFF(plane)); | 5767 | offset = I915_READ(DSPLINOFF(plane)); |
5768 | base = I915_READ(DSPSURF(plane)) & 0xfffff000; | 5768 | base = I915_READ(DSPSURF(plane)) & 0xfffff000; |
5769 | } else { | 5769 | } else { |
5770 | base = I915_READ(DSPADDR(plane)); | 5770 | base = I915_READ(DSPADDR(plane)); |
5771 | } | 5771 | } |
5772 | plane_config->base = base; | 5772 | plane_config->base = base; |
5773 | 5773 | ||
5774 | val = I915_READ(PIPESRC(pipe)); | 5774 | val = I915_READ(PIPESRC(pipe)); |
5775 | crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1; | 5775 | crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1; |
5776 | crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1; | 5776 | crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1; |
5777 | 5777 | ||
5778 | val = I915_READ(DSPSTRIDE(pipe)); | 5778 | val = I915_READ(DSPSTRIDE(pipe)); |
5779 | crtc->base.primary->fb->pitches[0] = val & 0xffffff80; | 5779 | crtc->base.primary->fb->pitches[0] = val & 0xffffff80; |
5780 | 5780 | ||
5781 | aligned_height = intel_align_height(dev, crtc->base.primary->fb->height, | 5781 | aligned_height = intel_align_height(dev, crtc->base.primary->fb->height, |
5782 | plane_config->tiled); | 5782 | plane_config->tiled); |
5783 | 5783 | ||
5784 | plane_config->size = ALIGN(crtc->base.primary->fb->pitches[0] * | 5784 | plane_config->size = ALIGN(crtc->base.primary->fb->pitches[0] * |
5785 | aligned_height, PAGE_SIZE); | 5785 | aligned_height, PAGE_SIZE); |
5786 | 5786 | ||
5787 | DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", | 5787 | DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", |
5788 | pipe, plane, crtc->base.primary->fb->width, | 5788 | pipe, plane, crtc->base.primary->fb->width, |
5789 | crtc->base.primary->fb->height, | 5789 | crtc->base.primary->fb->height, |
5790 | crtc->base.primary->fb->bits_per_pixel, base, | 5790 | crtc->base.primary->fb->bits_per_pixel, base, |
5791 | crtc->base.primary->fb->pitches[0], | 5791 | crtc->base.primary->fb->pitches[0], |
5792 | plane_config->size); | 5792 | plane_config->size); |
5793 | 5793 | ||
5794 | } | 5794 | } |
5795 | 5795 | ||
5796 | static bool i9xx_get_pipe_config(struct intel_crtc *crtc, | 5796 | static bool i9xx_get_pipe_config(struct intel_crtc *crtc, |
5797 | struct intel_crtc_config *pipe_config) | 5797 | struct intel_crtc_config *pipe_config) |
5798 | { | 5798 | { |
5799 | struct drm_device *dev = crtc->base.dev; | 5799 | struct drm_device *dev = crtc->base.dev; |
5800 | struct drm_i915_private *dev_priv = dev->dev_private; | 5800 | struct drm_i915_private *dev_priv = dev->dev_private; |
5801 | uint32_t tmp; | 5801 | uint32_t tmp; |
5802 | 5802 | ||
5803 | if (!intel_display_power_enabled(dev_priv, | 5803 | if (!intel_display_power_enabled(dev_priv, |
5804 | POWER_DOMAIN_PIPE(crtc->pipe))) | 5804 | POWER_DOMAIN_PIPE(crtc->pipe))) |
5805 | return false; | 5805 | return false; |
5806 | 5806 | ||
5807 | pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; | 5807 | pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; |
5808 | pipe_config->shared_dpll = DPLL_ID_PRIVATE; | 5808 | pipe_config->shared_dpll = DPLL_ID_PRIVATE; |
5809 | 5809 | ||
5810 | tmp = I915_READ(PIPECONF(crtc->pipe)); | 5810 | tmp = I915_READ(PIPECONF(crtc->pipe)); |
5811 | if (!(tmp & PIPECONF_ENABLE)) | 5811 | if (!(tmp & PIPECONF_ENABLE)) |
5812 | return false; | 5812 | return false; |
5813 | 5813 | ||
5814 | if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) { | 5814 | if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) { |
5815 | switch (tmp & PIPECONF_BPC_MASK) { | 5815 | switch (tmp & PIPECONF_BPC_MASK) { |
5816 | case PIPECONF_6BPC: | 5816 | case PIPECONF_6BPC: |
5817 | pipe_config->pipe_bpp = 18; | 5817 | pipe_config->pipe_bpp = 18; |
5818 | break; | 5818 | break; |
5819 | case PIPECONF_8BPC: | 5819 | case PIPECONF_8BPC: |
5820 | pipe_config->pipe_bpp = 24; | 5820 | pipe_config->pipe_bpp = 24; |
5821 | break; | 5821 | break; |
5822 | case PIPECONF_10BPC: | 5822 | case PIPECONF_10BPC: |
5823 | pipe_config->pipe_bpp = 30; | 5823 | pipe_config->pipe_bpp = 30; |
5824 | break; | 5824 | break; |
5825 | default: | 5825 | default: |
5826 | break; | 5826 | break; |
5827 | } | 5827 | } |
5828 | } | 5828 | } |
5829 | 5829 | ||
5830 | if (INTEL_INFO(dev)->gen < 4) | 5830 | if (INTEL_INFO(dev)->gen < 4) |
5831 | pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE; | 5831 | pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE; |
5832 | 5832 | ||
5833 | intel_get_pipe_timings(crtc, pipe_config); | 5833 | intel_get_pipe_timings(crtc, pipe_config); |
5834 | 5834 | ||
5835 | i9xx_get_pfit_config(crtc, pipe_config); | 5835 | i9xx_get_pfit_config(crtc, pipe_config); |
5836 | 5836 | ||
5837 | if (INTEL_INFO(dev)->gen >= 4) { | 5837 | if (INTEL_INFO(dev)->gen >= 4) { |
5838 | tmp = I915_READ(DPLL_MD(crtc->pipe)); | 5838 | tmp = I915_READ(DPLL_MD(crtc->pipe)); |
5839 | pipe_config->pixel_multiplier = | 5839 | pipe_config->pixel_multiplier = |
5840 | ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) | 5840 | ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) |
5841 | >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; | 5841 | >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; |
5842 | pipe_config->dpll_hw_state.dpll_md = tmp; | 5842 | pipe_config->dpll_hw_state.dpll_md = tmp; |
5843 | } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { | 5843 | } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { |
5844 | tmp = I915_READ(DPLL(crtc->pipe)); | 5844 | tmp = I915_READ(DPLL(crtc->pipe)); |
5845 | pipe_config->pixel_multiplier = | 5845 | pipe_config->pixel_multiplier = |
5846 | ((tmp & SDVO_MULTIPLIER_MASK) | 5846 | ((tmp & SDVO_MULTIPLIER_MASK) |
5847 | >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1; | 5847 | >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1; |
5848 | } else { | 5848 | } else { |
5849 | /* Note that on i915G/GM the pixel multiplier is in the sdvo | 5849 | /* Note that on i915G/GM the pixel multiplier is in the sdvo |
5850 | * port and will be fixed up in the encoder->get_config | 5850 | * port and will be fixed up in the encoder->get_config |
5851 | * function. */ | 5851 | * function. */ |
5852 | pipe_config->pixel_multiplier = 1; | 5852 | pipe_config->pixel_multiplier = 1; |
5853 | } | 5853 | } |
5854 | pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe)); | 5854 | pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe)); |
5855 | if (!IS_VALLEYVIEW(dev)) { | 5855 | if (!IS_VALLEYVIEW(dev)) { |
5856 | pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe)); | 5856 | pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe)); |
5857 | pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe)); | 5857 | pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe)); |
5858 | } else { | 5858 | } else { |
5859 | /* Mask out read-only status bits. */ | 5859 | /* Mask out read-only status bits. */ |
5860 | pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV | | 5860 | pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV | |
5861 | DPLL_PORTC_READY_MASK | | 5861 | DPLL_PORTC_READY_MASK | |
5862 | DPLL_PORTB_READY_MASK); | 5862 | DPLL_PORTB_READY_MASK); |
5863 | } | 5863 | } |
5864 | 5864 | ||
5865 | if (IS_VALLEYVIEW(dev)) | 5865 | if (IS_VALLEYVIEW(dev)) |
5866 | vlv_crtc_clock_get(crtc, pipe_config); | 5866 | vlv_crtc_clock_get(crtc, pipe_config); |
5867 | else | 5867 | else |
5868 | i9xx_crtc_clock_get(crtc, pipe_config); | 5868 | i9xx_crtc_clock_get(crtc, pipe_config); |
5869 | 5869 | ||
5870 | return true; | 5870 | return true; |
5871 | } | 5871 | } |
5872 | 5872 | ||
5873 | static void ironlake_init_pch_refclk(struct drm_device *dev) | 5873 | static void ironlake_init_pch_refclk(struct drm_device *dev) |
5874 | { | 5874 | { |
5875 | struct drm_i915_private *dev_priv = dev->dev_private; | 5875 | struct drm_i915_private *dev_priv = dev->dev_private; |
5876 | struct drm_mode_config *mode_config = &dev->mode_config; | 5876 | struct drm_mode_config *mode_config = &dev->mode_config; |
5877 | struct intel_encoder *encoder; | 5877 | struct intel_encoder *encoder; |
5878 | u32 val, final; | 5878 | u32 val, final; |
5879 | bool has_lvds = false; | 5879 | bool has_lvds = false; |
5880 | bool has_cpu_edp = false; | 5880 | bool has_cpu_edp = false; |
5881 | bool has_panel = false; | 5881 | bool has_panel = false; |
5882 | bool has_ck505 = false; | 5882 | bool has_ck505 = false; |
5883 | bool can_ssc = false; | 5883 | bool can_ssc = false; |
5884 | 5884 | ||
5885 | /* We need to take the global config into account */ | 5885 | /* We need to take the global config into account */ |
5886 | list_for_each_entry(encoder, &mode_config->encoder_list, | 5886 | list_for_each_entry(encoder, &mode_config->encoder_list, |
5887 | base.head) { | 5887 | base.head) { |
5888 | switch (encoder->type) { | 5888 | switch (encoder->type) { |
5889 | case INTEL_OUTPUT_LVDS: | 5889 | case INTEL_OUTPUT_LVDS: |
5890 | has_panel = true; | 5890 | has_panel = true; |
5891 | has_lvds = true; | 5891 | has_lvds = true; |
5892 | break; | 5892 | break; |
5893 | case INTEL_OUTPUT_EDP: | 5893 | case INTEL_OUTPUT_EDP: |
5894 | has_panel = true; | 5894 | has_panel = true; |
5895 | if (enc_to_dig_port(&encoder->base)->port == PORT_A) | 5895 | if (enc_to_dig_port(&encoder->base)->port == PORT_A) |
5896 | has_cpu_edp = true; | 5896 | has_cpu_edp = true; |
5897 | break; | 5897 | break; |
5898 | } | 5898 | } |
5899 | } | 5899 | } |
5900 | 5900 | ||
5901 | if (HAS_PCH_IBX(dev)) { | 5901 | if (HAS_PCH_IBX(dev)) { |
5902 | has_ck505 = dev_priv->vbt.display_clock_mode; | 5902 | has_ck505 = dev_priv->vbt.display_clock_mode; |
5903 | can_ssc = has_ck505; | 5903 | can_ssc = has_ck505; |
5904 | } else { | 5904 | } else { |
5905 | has_ck505 = false; | 5905 | has_ck505 = false; |
5906 | can_ssc = true; | 5906 | can_ssc = true; |
5907 | } | 5907 | } |
5908 | 5908 | ||
5909 | DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n", | 5909 | DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n", |
5910 | has_panel, has_lvds, has_ck505); | 5910 | has_panel, has_lvds, has_ck505); |
5911 | 5911 | ||
5912 | /* Ironlake: try to setup display ref clock before DPLL | 5912 | /* Ironlake: try to setup display ref clock before DPLL |
5913 | * enabling. This is only under driver's control after | 5913 | * enabling. This is only under driver's control after |
5914 | * PCH B stepping, previous chipset stepping should be | 5914 | * PCH B stepping, previous chipset stepping should be |
5915 | * ignoring this setting. | 5915 | * ignoring this setting. |
5916 | */ | 5916 | */ |
5917 | val = I915_READ(PCH_DREF_CONTROL); | 5917 | val = I915_READ(PCH_DREF_CONTROL); |
5918 | 5918 | ||
5919 | /* As we must carefully and slowly disable/enable each source in turn, | 5919 | /* As we must carefully and slowly disable/enable each source in turn, |
5920 | * compute the final state we want first and check if we need to | 5920 | * compute the final state we want first and check if we need to |
5921 | * make any changes at all. | 5921 | * make any changes at all. |
5922 | */ | 5922 | */ |
5923 | final = val; | 5923 | final = val; |
5924 | final &= ~DREF_NONSPREAD_SOURCE_MASK; | 5924 | final &= ~DREF_NONSPREAD_SOURCE_MASK; |
5925 | if (has_ck505) | 5925 | if (has_ck505) |
5926 | final |= DREF_NONSPREAD_CK505_ENABLE; | 5926 | final |= DREF_NONSPREAD_CK505_ENABLE; |
5927 | else | 5927 | else |
5928 | final |= DREF_NONSPREAD_SOURCE_ENABLE; | 5928 | final |= DREF_NONSPREAD_SOURCE_ENABLE; |
5929 | 5929 | ||
5930 | final &= ~DREF_SSC_SOURCE_MASK; | 5930 | final &= ~DREF_SSC_SOURCE_MASK; |
5931 | final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; | 5931 | final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; |
5932 | final &= ~DREF_SSC1_ENABLE; | 5932 | final &= ~DREF_SSC1_ENABLE; |
5933 | 5933 | ||
5934 | if (has_panel) { | 5934 | if (has_panel) { |
5935 | final |= DREF_SSC_SOURCE_ENABLE; | 5935 | final |= DREF_SSC_SOURCE_ENABLE; |
5936 | 5936 | ||
5937 | if (intel_panel_use_ssc(dev_priv) && can_ssc) | 5937 | if (intel_panel_use_ssc(dev_priv) && can_ssc) |
5938 | final |= DREF_SSC1_ENABLE; | 5938 | final |= DREF_SSC1_ENABLE; |
5939 | 5939 | ||
5940 | if (has_cpu_edp) { | 5940 | if (has_cpu_edp) { |
5941 | if (intel_panel_use_ssc(dev_priv) && can_ssc) | 5941 | if (intel_panel_use_ssc(dev_priv) && can_ssc) |
5942 | final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; | 5942 | final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; |
5943 | else | 5943 | else |
5944 | final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; | 5944 | final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; |
5945 | } else | 5945 | } else |
5946 | final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; | 5946 | final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; |
5947 | } else { | 5947 | } else { |
5948 | final |= DREF_SSC_SOURCE_DISABLE; | 5948 | final |= DREF_SSC_SOURCE_DISABLE; |
5949 | final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; | 5949 | final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; |
5950 | } | 5950 | } |
5951 | 5951 | ||
5952 | if (final == val) | 5952 | if (final == val) |
5953 | return; | 5953 | return; |
5954 | 5954 | ||
5955 | /* Always enable nonspread source */ | 5955 | /* Always enable nonspread source */ |
5956 | val &= ~DREF_NONSPREAD_SOURCE_MASK; | 5956 | val &= ~DREF_NONSPREAD_SOURCE_MASK; |
5957 | 5957 | ||
5958 | if (has_ck505) | 5958 | if (has_ck505) |
5959 | val |= DREF_NONSPREAD_CK505_ENABLE; | 5959 | val |= DREF_NONSPREAD_CK505_ENABLE; |
5960 | else | 5960 | else |
5961 | val |= DREF_NONSPREAD_SOURCE_ENABLE; | 5961 | val |= DREF_NONSPREAD_SOURCE_ENABLE; |
5962 | 5962 | ||
5963 | if (has_panel) { | 5963 | if (has_panel) { |
5964 | val &= ~DREF_SSC_SOURCE_MASK; | 5964 | val &= ~DREF_SSC_SOURCE_MASK; |
5965 | val |= DREF_SSC_SOURCE_ENABLE; | 5965 | val |= DREF_SSC_SOURCE_ENABLE; |
5966 | 5966 | ||
5967 | /* SSC must be turned on before enabling the CPU output */ | 5967 | /* SSC must be turned on before enabling the CPU output */ |
5968 | if (intel_panel_use_ssc(dev_priv) && can_ssc) { | 5968 | if (intel_panel_use_ssc(dev_priv) && can_ssc) { |
5969 | DRM_DEBUG_KMS("Using SSC on panel\n"); | 5969 | DRM_DEBUG_KMS("Using SSC on panel\n"); |
5970 | val |= DREF_SSC1_ENABLE; | 5970 | val |= DREF_SSC1_ENABLE; |
5971 | } else | 5971 | } else |
5972 | val &= ~DREF_SSC1_ENABLE; | 5972 | val &= ~DREF_SSC1_ENABLE; |
5973 | 5973 | ||
5974 | /* Get SSC going before enabling the outputs */ | 5974 | /* Get SSC going before enabling the outputs */ |
5975 | I915_WRITE(PCH_DREF_CONTROL, val); | 5975 | I915_WRITE(PCH_DREF_CONTROL, val); |
5976 | POSTING_READ(PCH_DREF_CONTROL); | 5976 | POSTING_READ(PCH_DREF_CONTROL); |
5977 | udelay(200); | 5977 | udelay(200); |
5978 | 5978 | ||
5979 | val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; | 5979 | val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; |
5980 | 5980 | ||
5981 | /* Enable CPU source on CPU attached eDP */ | 5981 | /* Enable CPU source on CPU attached eDP */ |
5982 | if (has_cpu_edp) { | 5982 | if (has_cpu_edp) { |
5983 | if (intel_panel_use_ssc(dev_priv) && can_ssc) { | 5983 | if (intel_panel_use_ssc(dev_priv) && can_ssc) { |
5984 | DRM_DEBUG_KMS("Using SSC on eDP\n"); | 5984 | DRM_DEBUG_KMS("Using SSC on eDP\n"); |
5985 | val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; | 5985 | val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; |
5986 | } | 5986 | } |
5987 | else | 5987 | else |
5988 | val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; | 5988 | val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; |
5989 | } else | 5989 | } else |
5990 | val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; | 5990 | val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; |
5991 | 5991 | ||
5992 | I915_WRITE(PCH_DREF_CONTROL, val); | 5992 | I915_WRITE(PCH_DREF_CONTROL, val); |
5993 | POSTING_READ(PCH_DREF_CONTROL); | 5993 | POSTING_READ(PCH_DREF_CONTROL); |
5994 | udelay(200); | 5994 | udelay(200); |
5995 | } else { | 5995 | } else { |
5996 | DRM_DEBUG_KMS("Disabling SSC entirely\n"); | 5996 | DRM_DEBUG_KMS("Disabling SSC entirely\n"); |
5997 | 5997 | ||
5998 | val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; | 5998 | val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; |
5999 | 5999 | ||
6000 | /* Turn off CPU output */ | 6000 | /* Turn off CPU output */ |
6001 | val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; | 6001 | val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; |
6002 | 6002 | ||
6003 | I915_WRITE(PCH_DREF_CONTROL, val); | 6003 | I915_WRITE(PCH_DREF_CONTROL, val); |
6004 | POSTING_READ(PCH_DREF_CONTROL); | 6004 | POSTING_READ(PCH_DREF_CONTROL); |
6005 | udelay(200); | 6005 | udelay(200); |
6006 | 6006 | ||
6007 | /* Turn off the SSC source */ | 6007 | /* Turn off the SSC source */ |
6008 | val &= ~DREF_SSC_SOURCE_MASK; | 6008 | val &= ~DREF_SSC_SOURCE_MASK; |
6009 | val |= DREF_SSC_SOURCE_DISABLE; | 6009 | val |= DREF_SSC_SOURCE_DISABLE; |
6010 | 6010 | ||
6011 | /* Turn off SSC1 */ | 6011 | /* Turn off SSC1 */ |
6012 | val &= ~DREF_SSC1_ENABLE; | 6012 | val &= ~DREF_SSC1_ENABLE; |
6013 | 6013 | ||
6014 | I915_WRITE(PCH_DREF_CONTROL, val); | 6014 | I915_WRITE(PCH_DREF_CONTROL, val); |
6015 | POSTING_READ(PCH_DREF_CONTROL); | 6015 | POSTING_READ(PCH_DREF_CONTROL); |
6016 | udelay(200); | 6016 | udelay(200); |
6017 | } | 6017 | } |
6018 | 6018 | ||
6019 | BUG_ON(val != final); | 6019 | BUG_ON(val != final); |
6020 | } | 6020 | } |
6021 | 6021 | ||
6022 | static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv) | 6022 | static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv) |
6023 | { | 6023 | { |
6024 | uint32_t tmp; | 6024 | uint32_t tmp; |
6025 | 6025 | ||
6026 | tmp = I915_READ(SOUTH_CHICKEN2); | 6026 | tmp = I915_READ(SOUTH_CHICKEN2); |
6027 | tmp |= FDI_MPHY_IOSFSB_RESET_CTL; | 6027 | tmp |= FDI_MPHY_IOSFSB_RESET_CTL; |
6028 | I915_WRITE(SOUTH_CHICKEN2, tmp); | 6028 | I915_WRITE(SOUTH_CHICKEN2, tmp); |
6029 | 6029 | ||
6030 | if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) & | 6030 | if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) & |
6031 | FDI_MPHY_IOSFSB_RESET_STATUS, 100)) | 6031 | FDI_MPHY_IOSFSB_RESET_STATUS, 100)) |
6032 | DRM_ERROR("FDI mPHY reset assert timeout\n"); | 6032 | DRM_ERROR("FDI mPHY reset assert timeout\n"); |
6033 | 6033 | ||
6034 | tmp = I915_READ(SOUTH_CHICKEN2); | 6034 | tmp = I915_READ(SOUTH_CHICKEN2); |
6035 | tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; | 6035 | tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; |
6036 | I915_WRITE(SOUTH_CHICKEN2, tmp); | 6036 | I915_WRITE(SOUTH_CHICKEN2, tmp); |
6037 | 6037 | ||
6038 | if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) & | 6038 | if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) & |
6039 | FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) | 6039 | FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) |
6040 | DRM_ERROR("FDI mPHY reset de-assert timeout\n"); | 6040 | DRM_ERROR("FDI mPHY reset de-assert timeout\n"); |
6041 | } | 6041 | } |
6042 | 6042 | ||
6043 | /* WaMPhyProgramming:hsw */ | 6043 | /* WaMPhyProgramming:hsw */ |
6044 | static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv) | 6044 | static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv) |
6045 | { | 6045 | { |
6046 | uint32_t tmp; | 6046 | uint32_t tmp; |
6047 | 6047 | ||
6048 | tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); | 6048 | tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); |
6049 | tmp &= ~(0xFF << 24); | 6049 | tmp &= ~(0xFF << 24); |
6050 | tmp |= (0x12 << 24); | 6050 | tmp |= (0x12 << 24); |
6051 | intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); | 6051 | intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); |
6052 | 6052 | ||
6053 | tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); | 6053 | tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); |
6054 | tmp |= (1 << 11); | 6054 | tmp |= (1 << 11); |
6055 | intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); | 6055 | intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); |
6056 | 6056 | ||
6057 | tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY); | 6057 | tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY); |
6058 | tmp |= (1 << 11); | 6058 | tmp |= (1 << 11); |
6059 | intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); | 6059 | intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); |
6060 | 6060 | ||
6061 | tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); | 6061 | tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); |
6062 | tmp |= (1 << 24) | (1 << 21) | (1 << 18); | 6062 | tmp |= (1 << 24) | (1 << 21) | (1 << 18); |
6063 | intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); | 6063 | intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); |
6064 | 6064 | ||
6065 | tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY); | 6065 | tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY); |
6066 | tmp |= (1 << 24) | (1 << 21) | (1 << 18); | 6066 | tmp |= (1 << 24) | (1 << 21) | (1 << 18); |
6067 | intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); | 6067 | intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); |
6068 | 6068 | ||
6069 | tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); | 6069 | tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); |
6070 | tmp &= ~(7 << 13); | 6070 | tmp &= ~(7 << 13); |
6071 | tmp |= (5 << 13); | 6071 | tmp |= (5 << 13); |
6072 | intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); | 6072 | intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); |
6073 | 6073 | ||
6074 | tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); | 6074 | tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); |
6075 | tmp &= ~(7 << 13); | 6075 | tmp &= ~(7 << 13); |
6076 | tmp |= (5 << 13); | 6076 | tmp |= (5 << 13); |
6077 | intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); | 6077 | intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); |
6078 | 6078 | ||
6079 | tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); | 6079 | tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); |
6080 | tmp &= ~0xFF; | 6080 | tmp &= ~0xFF; |
6081 | tmp |= 0x1C; | 6081 | tmp |= 0x1C; |
6082 | intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY); | 6082 | intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY); |
6083 | 6083 | ||
6084 | tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY); | 6084 | tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY); |
6085 | tmp &= ~0xFF; | 6085 | tmp &= ~0xFF; |
6086 | tmp |= 0x1C; | 6086 | tmp |= 0x1C; |
6087 | intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY); | 6087 | intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY); |
6088 | 6088 | ||
6089 | tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY); | 6089 | tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY); |
6090 | tmp &= ~(0xFF << 16); | 6090 | tmp &= ~(0xFF << 16); |
6091 | tmp |= (0x1C << 16); | 6091 | tmp |= (0x1C << 16); |
6092 | intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY); | 6092 | intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY); |
6093 | 6093 | ||
6094 | tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY); | 6094 | tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY); |
6095 | tmp &= ~(0xFF << 16); | 6095 | tmp &= ~(0xFF << 16); |
6096 | tmp |= (0x1C << 16); | 6096 | tmp |= (0x1C << 16); |
6097 | intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); | 6097 | intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); |
6098 | 6098 | ||
6099 | tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); | 6099 | tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); |
6100 | tmp |= (1 << 27); | 6100 | tmp |= (1 << 27); |
6101 | intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); | 6101 | intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); |
6102 | 6102 | ||
6103 | tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); | 6103 | tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); |
6104 | tmp |= (1 << 27); | 6104 | tmp |= (1 << 27); |
6105 | intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); | 6105 | intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); |
6106 | 6106 | ||
6107 | tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); | 6107 | tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); |
6108 | tmp &= ~(0xF << 28); | 6108 | tmp &= ~(0xF << 28); |
6109 | tmp |= (4 << 28); | 6109 | tmp |= (4 << 28); |
6110 | intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); | 6110 | intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); |
6111 | 6111 | ||
6112 | tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); | 6112 | tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); |
6113 | tmp &= ~(0xF << 28); | 6113 | tmp &= ~(0xF << 28); |
6114 | tmp |= (4 << 28); | 6114 | tmp |= (4 << 28); |
6115 | intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); | 6115 | intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); |
6116 | } | 6116 | } |
6117 | 6117 | ||
6118 | /* Implements 3 different sequences from BSpec chapter "Display iCLK | 6118 | /* Implements 3 different sequences from BSpec chapter "Display iCLK |
6119 | * Programming" based on the parameters passed: | 6119 | * Programming" based on the parameters passed: |
6120 | * - Sequence to enable CLKOUT_DP | 6120 | * - Sequence to enable CLKOUT_DP |
6121 | * - Sequence to enable CLKOUT_DP without spread | 6121 | * - Sequence to enable CLKOUT_DP without spread |
6122 | * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O | 6122 | * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O |
6123 | */ | 6123 | */ |
6124 | static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread, | 6124 | static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread, |
6125 | bool with_fdi) | 6125 | bool with_fdi) |
6126 | { | 6126 | { |
6127 | struct drm_i915_private *dev_priv = dev->dev_private; | 6127 | struct drm_i915_private *dev_priv = dev->dev_private; |
6128 | uint32_t reg, tmp; | 6128 | uint32_t reg, tmp; |
6129 | 6129 | ||
6130 | if (WARN(with_fdi && !with_spread, "FDI requires downspread\n")) | 6130 | if (WARN(with_fdi && !with_spread, "FDI requires downspread\n")) |
6131 | with_spread = true; | 6131 | with_spread = true; |
6132 | if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE && | 6132 | if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE && |
6133 | with_fdi, "LP PCH doesn't have FDI\n")) | 6133 | with_fdi, "LP PCH doesn't have FDI\n")) |
6134 | with_fdi = false; | 6134 | with_fdi = false; |
6135 | 6135 | ||
6136 | mutex_lock(&dev_priv->dpio_lock); | 6136 | mutex_lock(&dev_priv->dpio_lock); |
6137 | 6137 | ||
6138 | tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); | 6138 | tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); |
6139 | tmp &= ~SBI_SSCCTL_DISABLE; | 6139 | tmp &= ~SBI_SSCCTL_DISABLE; |
6140 | tmp |= SBI_SSCCTL_PATHALT; | 6140 | tmp |= SBI_SSCCTL_PATHALT; |
6141 | intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); | 6141 | intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); |
6142 | 6142 | ||
6143 | udelay(24); | 6143 | udelay(24); |
6144 | 6144 | ||
6145 | if (with_spread) { | 6145 | if (with_spread) { |
6146 | tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); | 6146 | tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); |
6147 | tmp &= ~SBI_SSCCTL_PATHALT; | 6147 | tmp &= ~SBI_SSCCTL_PATHALT; |
6148 | intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); | 6148 | intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); |
6149 | 6149 | ||
6150 | if (with_fdi) { | 6150 | if (with_fdi) { |
6151 | lpt_reset_fdi_mphy(dev_priv); | 6151 | lpt_reset_fdi_mphy(dev_priv); |
6152 | lpt_program_fdi_mphy(dev_priv); | 6152 | lpt_program_fdi_mphy(dev_priv); |
6153 | } | 6153 | } |
6154 | } | 6154 | } |
6155 | 6155 | ||
6156 | reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ? | 6156 | reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ? |
6157 | SBI_GEN0 : SBI_DBUFF0; | 6157 | SBI_GEN0 : SBI_DBUFF0; |
6158 | tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); | 6158 | tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); |
6159 | tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; | 6159 | tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; |
6160 | intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); | 6160 | intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); |
6161 | 6161 | ||
6162 | mutex_unlock(&dev_priv->dpio_lock); | 6162 | mutex_unlock(&dev_priv->dpio_lock); |
6163 | } | 6163 | } |
6164 | 6164 | ||
6165 | /* Sequence to disable CLKOUT_DP */ | 6165 | /* Sequence to disable CLKOUT_DP */ |
6166 | static void lpt_disable_clkout_dp(struct drm_device *dev) | 6166 | static void lpt_disable_clkout_dp(struct drm_device *dev) |
6167 | { | 6167 | { |
6168 | struct drm_i915_private *dev_priv = dev->dev_private; | 6168 | struct drm_i915_private *dev_priv = dev->dev_private; |
6169 | uint32_t reg, tmp; | 6169 | uint32_t reg, tmp; |
6170 | 6170 | ||
6171 | mutex_lock(&dev_priv->dpio_lock); | 6171 | mutex_lock(&dev_priv->dpio_lock); |
6172 | 6172 | ||
6173 | reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ? | 6173 | reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ? |
6174 | SBI_GEN0 : SBI_DBUFF0; | 6174 | SBI_GEN0 : SBI_DBUFF0; |
6175 | tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); | 6175 | tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); |
6176 | tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; | 6176 | tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; |
6177 | intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); | 6177 | intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); |
6178 | 6178 | ||
6179 | tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); | 6179 | tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); |
6180 | if (!(tmp & SBI_SSCCTL_DISABLE)) { | 6180 | if (!(tmp & SBI_SSCCTL_DISABLE)) { |
6181 | if (!(tmp & SBI_SSCCTL_PATHALT)) { | 6181 | if (!(tmp & SBI_SSCCTL_PATHALT)) { |
6182 | tmp |= SBI_SSCCTL_PATHALT; | 6182 | tmp |= SBI_SSCCTL_PATHALT; |
6183 | intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); | 6183 | intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); |
6184 | udelay(32); | 6184 | udelay(32); |
6185 | } | 6185 | } |
6186 | tmp |= SBI_SSCCTL_DISABLE; | 6186 | tmp |= SBI_SSCCTL_DISABLE; |
6187 | intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); | 6187 | intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); |
6188 | } | 6188 | } |
6189 | 6189 | ||
6190 | mutex_unlock(&dev_priv->dpio_lock); | 6190 | mutex_unlock(&dev_priv->dpio_lock); |
6191 | } | 6191 | } |
6192 | 6192 | ||
6193 | static void lpt_init_pch_refclk(struct drm_device *dev) | 6193 | static void lpt_init_pch_refclk(struct drm_device *dev) |
6194 | { | 6194 | { |
6195 | struct drm_mode_config *mode_config = &dev->mode_config; | 6195 | struct drm_mode_config *mode_config = &dev->mode_config; |
6196 | struct intel_encoder *encoder; | 6196 | struct intel_encoder *encoder; |
6197 | bool has_vga = false; | 6197 | bool has_vga = false; |
6198 | 6198 | ||
6199 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { | 6199 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { |
6200 | switch (encoder->type) { | 6200 | switch (encoder->type) { |
6201 | case INTEL_OUTPUT_ANALOG: | 6201 | case INTEL_OUTPUT_ANALOG: |
6202 | has_vga = true; | 6202 | has_vga = true; |
6203 | break; | 6203 | break; |
6204 | } | 6204 | } |
6205 | } | 6205 | } |
6206 | 6206 | ||
6207 | if (has_vga) | 6207 | if (has_vga) |
6208 | lpt_enable_clkout_dp(dev, true, true); | 6208 | lpt_enable_clkout_dp(dev, true, true); |
6209 | else | 6209 | else |
6210 | lpt_disable_clkout_dp(dev); | 6210 | lpt_disable_clkout_dp(dev); |
6211 | } | 6211 | } |
6212 | 6212 | ||
6213 | /* | 6213 | /* |
6214 | * Initialize reference clocks when the driver loads | 6214 | * Initialize reference clocks when the driver loads |
6215 | */ | 6215 | */ |
6216 | void intel_init_pch_refclk(struct drm_device *dev) | 6216 | void intel_init_pch_refclk(struct drm_device *dev) |
6217 | { | 6217 | { |
6218 | if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) | 6218 | if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) |
6219 | ironlake_init_pch_refclk(dev); | 6219 | ironlake_init_pch_refclk(dev); |
6220 | else if (HAS_PCH_LPT(dev)) | 6220 | else if (HAS_PCH_LPT(dev)) |
6221 | lpt_init_pch_refclk(dev); | 6221 | lpt_init_pch_refclk(dev); |
6222 | } | 6222 | } |
6223 | 6223 | ||
6224 | static int ironlake_get_refclk(struct drm_crtc *crtc) | 6224 | static int ironlake_get_refclk(struct drm_crtc *crtc) |
6225 | { | 6225 | { |
6226 | struct drm_device *dev = crtc->dev; | 6226 | struct drm_device *dev = crtc->dev; |
6227 | struct drm_i915_private *dev_priv = dev->dev_private; | 6227 | struct drm_i915_private *dev_priv = dev->dev_private; |
6228 | struct intel_encoder *encoder; | 6228 | struct intel_encoder *encoder; |
6229 | int num_connectors = 0; | 6229 | int num_connectors = 0; |
6230 | bool is_lvds = false; | 6230 | bool is_lvds = false; |
6231 | 6231 | ||
6232 | for_each_encoder_on_crtc(dev, crtc, encoder) { | 6232 | for_each_encoder_on_crtc(dev, crtc, encoder) { |
6233 | switch (encoder->type) { | 6233 | switch (encoder->type) { |
6234 | case INTEL_OUTPUT_LVDS: | 6234 | case INTEL_OUTPUT_LVDS: |
6235 | is_lvds = true; | 6235 | is_lvds = true; |
6236 | break; | 6236 | break; |
6237 | } | 6237 | } |
6238 | num_connectors++; | 6238 | num_connectors++; |
6239 | } | 6239 | } |
6240 | 6240 | ||
6241 | if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { | 6241 | if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { |
6242 | DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", | 6242 | DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", |
6243 | dev_priv->vbt.lvds_ssc_freq); | 6243 | dev_priv->vbt.lvds_ssc_freq); |
6244 | return dev_priv->vbt.lvds_ssc_freq; | 6244 | return dev_priv->vbt.lvds_ssc_freq; |
6245 | } | 6245 | } |
6246 | 6246 | ||
6247 | return 120000; | 6247 | return 120000; |
6248 | } | 6248 | } |
6249 | 6249 | ||
6250 | static void ironlake_set_pipeconf(struct drm_crtc *crtc) | 6250 | static void ironlake_set_pipeconf(struct drm_crtc *crtc) |
6251 | { | 6251 | { |
6252 | struct drm_i915_private *dev_priv = crtc->dev->dev_private; | 6252 | struct drm_i915_private *dev_priv = crtc->dev->dev_private; |
6253 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 6253 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6254 | int pipe = intel_crtc->pipe; | 6254 | int pipe = intel_crtc->pipe; |
6255 | uint32_t val; | 6255 | uint32_t val; |
6256 | 6256 | ||
6257 | val = 0; | 6257 | val = 0; |
6258 | 6258 | ||
6259 | switch (intel_crtc->config.pipe_bpp) { | 6259 | switch (intel_crtc->config.pipe_bpp) { |
6260 | case 18: | 6260 | case 18: |
6261 | val |= PIPECONF_6BPC; | 6261 | val |= PIPECONF_6BPC; |
6262 | break; | 6262 | break; |
6263 | case 24: | 6263 | case 24: |
6264 | val |= PIPECONF_8BPC; | 6264 | val |= PIPECONF_8BPC; |
6265 | break; | 6265 | break; |
6266 | case 30: | 6266 | case 30: |
6267 | val |= PIPECONF_10BPC; | 6267 | val |= PIPECONF_10BPC; |
6268 | break; | 6268 | break; |
6269 | case 36: | 6269 | case 36: |
6270 | val |= PIPECONF_12BPC; | 6270 | val |= PIPECONF_12BPC; |
6271 | break; | 6271 | break; |
6272 | default: | 6272 | default: |
6273 | /* Case prevented by intel_choose_pipe_bpp_dither. */ | 6273 | /* Case prevented by intel_choose_pipe_bpp_dither. */ |
6274 | BUG(); | 6274 | BUG(); |
6275 | } | 6275 | } |
6276 | 6276 | ||
6277 | if (intel_crtc->config.dither) | 6277 | if (intel_crtc->config.dither) |
6278 | val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); | 6278 | val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); |
6279 | 6279 | ||
6280 | if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) | 6280 | if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) |
6281 | val |= PIPECONF_INTERLACED_ILK; | 6281 | val |= PIPECONF_INTERLACED_ILK; |
6282 | else | 6282 | else |
6283 | val |= PIPECONF_PROGRESSIVE; | 6283 | val |= PIPECONF_PROGRESSIVE; |
6284 | 6284 | ||
6285 | if (intel_crtc->config.limited_color_range) | 6285 | if (intel_crtc->config.limited_color_range) |
6286 | val |= PIPECONF_COLOR_RANGE_SELECT; | 6286 | val |= PIPECONF_COLOR_RANGE_SELECT; |
6287 | 6287 | ||
6288 | I915_WRITE(PIPECONF(pipe), val); | 6288 | I915_WRITE(PIPECONF(pipe), val); |
6289 | POSTING_READ(PIPECONF(pipe)); | 6289 | POSTING_READ(PIPECONF(pipe)); |
6290 | } | 6290 | } |
6291 | 6291 | ||
6292 | /* | 6292 | /* |
6293 | * Set up the pipe CSC unit. | 6293 | * Set up the pipe CSC unit. |
6294 | * | 6294 | * |
6295 | * Currently only full range RGB to limited range RGB conversion | 6295 | * Currently only full range RGB to limited range RGB conversion |
6296 | * is supported, but eventually this should handle various | 6296 | * is supported, but eventually this should handle various |
6297 | * RGB<->YCbCr scenarios as well. | 6297 | * RGB<->YCbCr scenarios as well. |
6298 | */ | 6298 | */ |
6299 | static void intel_set_pipe_csc(struct drm_crtc *crtc) | 6299 | static void intel_set_pipe_csc(struct drm_crtc *crtc) |
6300 | { | 6300 | { |
6301 | struct drm_device *dev = crtc->dev; | 6301 | struct drm_device *dev = crtc->dev; |
6302 | struct drm_i915_private *dev_priv = dev->dev_private; | 6302 | struct drm_i915_private *dev_priv = dev->dev_private; |
6303 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 6303 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6304 | int pipe = intel_crtc->pipe; | 6304 | int pipe = intel_crtc->pipe; |
6305 | uint16_t coeff = 0x7800; /* 1.0 */ | 6305 | uint16_t coeff = 0x7800; /* 1.0 */ |
6306 | 6306 | ||
6307 | /* | 6307 | /* |
6308 | * TODO: Check what kind of values actually come out of the pipe | 6308 | * TODO: Check what kind of values actually come out of the pipe |
6309 | * with these coeff/postoff values and adjust to get the best | 6309 | * with these coeff/postoff values and adjust to get the best |
6310 | * accuracy. Perhaps we even need to take the bpc value into | 6310 | * accuracy. Perhaps we even need to take the bpc value into |
6311 | * consideration. | 6311 | * consideration. |
6312 | */ | 6312 | */ |
6313 | 6313 | ||
6314 | if (intel_crtc->config.limited_color_range) | 6314 | if (intel_crtc->config.limited_color_range) |
6315 | coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */ | 6315 | coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */ |
6316 | 6316 | ||
6317 | /* | 6317 | /* |
6318 | * GY/GU and RY/RU should be the other way around according | 6318 | * GY/GU and RY/RU should be the other way around according |
6319 | * to BSpec, but reality doesn't agree. Just set them up in | 6319 | * to BSpec, but reality doesn't agree. Just set them up in |
6320 | * a way that results in the correct picture. | 6320 | * a way that results in the correct picture. |
6321 | */ | 6321 | */ |
6322 | I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16); | 6322 | I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16); |
6323 | I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0); | 6323 | I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0); |
6324 | 6324 | ||
6325 | I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff); | 6325 | I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff); |
6326 | I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0); | 6326 | I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0); |
6327 | 6327 | ||
6328 | I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0); | 6328 | I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0); |
6329 | I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16); | 6329 | I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16); |
6330 | 6330 | ||
6331 | I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0); | 6331 | I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0); |
6332 | I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0); | 6332 | I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0); |
6333 | I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0); | 6333 | I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0); |
6334 | 6334 | ||
6335 | if (INTEL_INFO(dev)->gen > 6) { | 6335 | if (INTEL_INFO(dev)->gen > 6) { |
6336 | uint16_t postoff = 0; | 6336 | uint16_t postoff = 0; |
6337 | 6337 | ||
6338 | if (intel_crtc->config.limited_color_range) | 6338 | if (intel_crtc->config.limited_color_range) |
6339 | postoff = (16 * (1 << 12) / 255) & 0x1fff; | 6339 | postoff = (16 * (1 << 12) / 255) & 0x1fff; |
6340 | 6340 | ||
6341 | I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff); | 6341 | I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff); |
6342 | I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff); | 6342 | I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff); |
6343 | I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff); | 6343 | I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff); |
6344 | 6344 | ||
6345 | I915_WRITE(PIPE_CSC_MODE(pipe), 0); | 6345 | I915_WRITE(PIPE_CSC_MODE(pipe), 0); |
6346 | } else { | 6346 | } else { |
6347 | uint32_t mode = CSC_MODE_YUV_TO_RGB; | 6347 | uint32_t mode = CSC_MODE_YUV_TO_RGB; |
6348 | 6348 | ||
6349 | if (intel_crtc->config.limited_color_range) | 6349 | if (intel_crtc->config.limited_color_range) |
6350 | mode |= CSC_BLACK_SCREEN_OFFSET; | 6350 | mode |= CSC_BLACK_SCREEN_OFFSET; |
6351 | 6351 | ||
6352 | I915_WRITE(PIPE_CSC_MODE(pipe), mode); | 6352 | I915_WRITE(PIPE_CSC_MODE(pipe), mode); |
6353 | } | 6353 | } |
6354 | } | 6354 | } |
6355 | 6355 | ||
6356 | static void haswell_set_pipeconf(struct drm_crtc *crtc) | 6356 | static void haswell_set_pipeconf(struct drm_crtc *crtc) |
6357 | { | 6357 | { |
6358 | struct drm_device *dev = crtc->dev; | 6358 | struct drm_device *dev = crtc->dev; |
6359 | struct drm_i915_private *dev_priv = dev->dev_private; | 6359 | struct drm_i915_private *dev_priv = dev->dev_private; |
6360 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 6360 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6361 | enum pipe pipe = intel_crtc->pipe; | 6361 | enum pipe pipe = intel_crtc->pipe; |
6362 | enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; | 6362 | enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; |
6363 | uint32_t val; | 6363 | uint32_t val; |
6364 | 6364 | ||
6365 | val = 0; | 6365 | val = 0; |
6366 | 6366 | ||
6367 | if (IS_HASWELL(dev) && intel_crtc->config.dither) | 6367 | if (IS_HASWELL(dev) && intel_crtc->config.dither) |
6368 | val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); | 6368 | val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); |
6369 | 6369 | ||
6370 | if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) | 6370 | if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) |
6371 | val |= PIPECONF_INTERLACED_ILK; | 6371 | val |= PIPECONF_INTERLACED_ILK; |
6372 | else | 6372 | else |
6373 | val |= PIPECONF_PROGRESSIVE; | 6373 | val |= PIPECONF_PROGRESSIVE; |
6374 | 6374 | ||
6375 | I915_WRITE(PIPECONF(cpu_transcoder), val); | 6375 | I915_WRITE(PIPECONF(cpu_transcoder), val); |
6376 | POSTING_READ(PIPECONF(cpu_transcoder)); | 6376 | POSTING_READ(PIPECONF(cpu_transcoder)); |
6377 | 6377 | ||
6378 | I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT); | 6378 | I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT); |
6379 | POSTING_READ(GAMMA_MODE(intel_crtc->pipe)); | 6379 | POSTING_READ(GAMMA_MODE(intel_crtc->pipe)); |
6380 | 6380 | ||
6381 | if (IS_BROADWELL(dev)) { | 6381 | if (IS_BROADWELL(dev)) { |
6382 | val = 0; | 6382 | val = 0; |
6383 | 6383 | ||
6384 | switch (intel_crtc->config.pipe_bpp) { | 6384 | switch (intel_crtc->config.pipe_bpp) { |
6385 | case 18: | 6385 | case 18: |
6386 | val |= PIPEMISC_DITHER_6_BPC; | 6386 | val |= PIPEMISC_DITHER_6_BPC; |
6387 | break; | 6387 | break; |
6388 | case 24: | 6388 | case 24: |
6389 | val |= PIPEMISC_DITHER_8_BPC; | 6389 | val |= PIPEMISC_DITHER_8_BPC; |
6390 | break; | 6390 | break; |
6391 | case 30: | 6391 | case 30: |
6392 | val |= PIPEMISC_DITHER_10_BPC; | 6392 | val |= PIPEMISC_DITHER_10_BPC; |
6393 | break; | 6393 | break; |
6394 | case 36: | 6394 | case 36: |
6395 | val |= PIPEMISC_DITHER_12_BPC; | 6395 | val |= PIPEMISC_DITHER_12_BPC; |
6396 | break; | 6396 | break; |
6397 | default: | 6397 | default: |
6398 | /* Case prevented by pipe_config_set_bpp. */ | 6398 | /* Case prevented by pipe_config_set_bpp. */ |
6399 | BUG(); | 6399 | BUG(); |
6400 | } | 6400 | } |
6401 | 6401 | ||
6402 | if (intel_crtc->config.dither) | 6402 | if (intel_crtc->config.dither) |
6403 | val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP; | 6403 | val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP; |
6404 | 6404 | ||
6405 | I915_WRITE(PIPEMISC(pipe), val); | 6405 | I915_WRITE(PIPEMISC(pipe), val); |
6406 | } | 6406 | } |
6407 | } | 6407 | } |
6408 | 6408 | ||
6409 | static bool ironlake_compute_clocks(struct drm_crtc *crtc, | 6409 | static bool ironlake_compute_clocks(struct drm_crtc *crtc, |
6410 | intel_clock_t *clock, | 6410 | intel_clock_t *clock, |
6411 | bool *has_reduced_clock, | 6411 | bool *has_reduced_clock, |
6412 | intel_clock_t *reduced_clock) | 6412 | intel_clock_t *reduced_clock) |
6413 | { | 6413 | { |
6414 | struct drm_device *dev = crtc->dev; | 6414 | struct drm_device *dev = crtc->dev; |
6415 | struct drm_i915_private *dev_priv = dev->dev_private; | 6415 | struct drm_i915_private *dev_priv = dev->dev_private; |
6416 | struct intel_encoder *intel_encoder; | 6416 | struct intel_encoder *intel_encoder; |
6417 | int refclk; | 6417 | int refclk; |
6418 | const intel_limit_t *limit; | 6418 | const intel_limit_t *limit; |
6419 | bool ret, is_lvds = false; | 6419 | bool ret, is_lvds = false; |
6420 | 6420 | ||
6421 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) { | 6421 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) { |
6422 | switch (intel_encoder->type) { | 6422 | switch (intel_encoder->type) { |
6423 | case INTEL_OUTPUT_LVDS: | 6423 | case INTEL_OUTPUT_LVDS: |
6424 | is_lvds = true; | 6424 | is_lvds = true; |
6425 | break; | 6425 | break; |
6426 | } | 6426 | } |
6427 | } | 6427 | } |
6428 | 6428 | ||
6429 | refclk = ironlake_get_refclk(crtc); | 6429 | refclk = ironlake_get_refclk(crtc); |
6430 | 6430 | ||
6431 | /* | 6431 | /* |
6432 | * Returns a set of divisors for the desired target clock with the given | 6432 | * Returns a set of divisors for the desired target clock with the given |
6433 | * refclk, or FALSE. The returned values represent the clock equation: | 6433 | * refclk, or FALSE. The returned values represent the clock equation: |
6434 | * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. | 6434 | * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. |
6435 | */ | 6435 | */ |
6436 | limit = intel_limit(crtc, refclk); | 6436 | limit = intel_limit(crtc, refclk); |
6437 | ret = dev_priv->display.find_dpll(limit, crtc, | 6437 | ret = dev_priv->display.find_dpll(limit, crtc, |
6438 | to_intel_crtc(crtc)->config.port_clock, | 6438 | to_intel_crtc(crtc)->config.port_clock, |
6439 | refclk, NULL, clock); | 6439 | refclk, NULL, clock); |
6440 | if (!ret) | 6440 | if (!ret) |
6441 | return false; | 6441 | return false; |
6442 | 6442 | ||
6443 | if (is_lvds && dev_priv->lvds_downclock_avail) { | 6443 | if (is_lvds && dev_priv->lvds_downclock_avail) { |
6444 | /* | 6444 | /* |
6445 | * Ensure we match the reduced clock's P to the target clock. | 6445 | * Ensure we match the reduced clock's P to the target clock. |
6446 | * If the clocks don't match, we can't switch the display clock | 6446 | * If the clocks don't match, we can't switch the display clock |
6447 | * by using the FP0/FP1. In such case we will disable the LVDS | 6447 | * by using the FP0/FP1. In such case we will disable the LVDS |
6448 | * downclock feature. | 6448 | * downclock feature. |
6449 | */ | 6449 | */ |
6450 | *has_reduced_clock = | 6450 | *has_reduced_clock = |
6451 | dev_priv->display.find_dpll(limit, crtc, | 6451 | dev_priv->display.find_dpll(limit, crtc, |
6452 | dev_priv->lvds_downclock, | 6452 | dev_priv->lvds_downclock, |
6453 | refclk, clock, | 6453 | refclk, clock, |
6454 | reduced_clock); | 6454 | reduced_clock); |
6455 | } | 6455 | } |
6456 | 6456 | ||
6457 | return true; | 6457 | return true; |
6458 | } | 6458 | } |
6459 | 6459 | ||
6460 | int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp) | 6460 | int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp) |
6461 | { | 6461 | { |
6462 | /* | 6462 | /* |
6463 | * Account for spread spectrum to avoid | 6463 | * Account for spread spectrum to avoid |
6464 | * oversubscribing the link. Max center spread | 6464 | * oversubscribing the link. Max center spread |
6465 | * is 2.5%; use 5% for safety's sake. | 6465 | * is 2.5%; use 5% for safety's sake. |
6466 | */ | 6466 | */ |
6467 | u32 bps = target_clock * bpp * 21 / 20; | 6467 | u32 bps = target_clock * bpp * 21 / 20; |
6468 | return DIV_ROUND_UP(bps, link_bw * 8); | 6468 | return DIV_ROUND_UP(bps, link_bw * 8); |
6469 | } | 6469 | } |
6470 | 6470 | ||
6471 | static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor) | 6471 | static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor) |
6472 | { | 6472 | { |
6473 | return i9xx_dpll_compute_m(dpll) < factor * dpll->n; | 6473 | return i9xx_dpll_compute_m(dpll) < factor * dpll->n; |
6474 | } | 6474 | } |
6475 | 6475 | ||
6476 | static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc, | 6476 | static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc, |
6477 | u32 *fp, | 6477 | u32 *fp, |
6478 | intel_clock_t *reduced_clock, u32 *fp2) | 6478 | intel_clock_t *reduced_clock, u32 *fp2) |
6479 | { | 6479 | { |
6480 | struct drm_crtc *crtc = &intel_crtc->base; | 6480 | struct drm_crtc *crtc = &intel_crtc->base; |
6481 | struct drm_device *dev = crtc->dev; | 6481 | struct drm_device *dev = crtc->dev; |
6482 | struct drm_i915_private *dev_priv = dev->dev_private; | 6482 | struct drm_i915_private *dev_priv = dev->dev_private; |
6483 | struct intel_encoder *intel_encoder; | 6483 | struct intel_encoder *intel_encoder; |
6484 | uint32_t dpll; | 6484 | uint32_t dpll; |
6485 | int factor, num_connectors = 0; | 6485 | int factor, num_connectors = 0; |
6486 | bool is_lvds = false, is_sdvo = false; | 6486 | bool is_lvds = false, is_sdvo = false; |
6487 | 6487 | ||
6488 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) { | 6488 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) { |
6489 | switch (intel_encoder->type) { | 6489 | switch (intel_encoder->type) { |
6490 | case INTEL_OUTPUT_LVDS: | 6490 | case INTEL_OUTPUT_LVDS: |
6491 | is_lvds = true; | 6491 | is_lvds = true; |
6492 | break; | 6492 | break; |
6493 | case INTEL_OUTPUT_SDVO: | 6493 | case INTEL_OUTPUT_SDVO: |
6494 | case INTEL_OUTPUT_HDMI: | 6494 | case INTEL_OUTPUT_HDMI: |
6495 | is_sdvo = true; | 6495 | is_sdvo = true; |
6496 | break; | 6496 | break; |
6497 | } | 6497 | } |
6498 | 6498 | ||
6499 | num_connectors++; | 6499 | num_connectors++; |
6500 | } | 6500 | } |
6501 | 6501 | ||
6502 | /* Enable autotuning of the PLL clock (if permissible) */ | 6502 | /* Enable autotuning of the PLL clock (if permissible) */ |
6503 | factor = 21; | 6503 | factor = 21; |
6504 | if (is_lvds) { | 6504 | if (is_lvds) { |
6505 | if ((intel_panel_use_ssc(dev_priv) && | 6505 | if ((intel_panel_use_ssc(dev_priv) && |
6506 | dev_priv->vbt.lvds_ssc_freq == 100000) || | 6506 | dev_priv->vbt.lvds_ssc_freq == 100000) || |
6507 | (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev))) | 6507 | (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev))) |
6508 | factor = 25; | 6508 | factor = 25; |
6509 | } else if (intel_crtc->config.sdvo_tv_clock) | 6509 | } else if (intel_crtc->config.sdvo_tv_clock) |
6510 | factor = 20; | 6510 | factor = 20; |
6511 | 6511 | ||
6512 | if (ironlake_needs_fb_cb_tune(&intel_crtc->config.dpll, factor)) | 6512 | if (ironlake_needs_fb_cb_tune(&intel_crtc->config.dpll, factor)) |
6513 | *fp |= FP_CB_TUNE; | 6513 | *fp |= FP_CB_TUNE; |
6514 | 6514 | ||
6515 | if (fp2 && (reduced_clock->m < factor * reduced_clock->n)) | 6515 | if (fp2 && (reduced_clock->m < factor * reduced_clock->n)) |
6516 | *fp2 |= FP_CB_TUNE; | 6516 | *fp2 |= FP_CB_TUNE; |
6517 | 6517 | ||
6518 | dpll = 0; | 6518 | dpll = 0; |
6519 | 6519 | ||
6520 | if (is_lvds) | 6520 | if (is_lvds) |
6521 | dpll |= DPLLB_MODE_LVDS; | 6521 | dpll |= DPLLB_MODE_LVDS; |
6522 | else | 6522 | else |
6523 | dpll |= DPLLB_MODE_DAC_SERIAL; | 6523 | dpll |= DPLLB_MODE_DAC_SERIAL; |
6524 | 6524 | ||
6525 | dpll |= (intel_crtc->config.pixel_multiplier - 1) | 6525 | dpll |= (intel_crtc->config.pixel_multiplier - 1) |
6526 | << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; | 6526 | << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; |
6527 | 6527 | ||
6528 | if (is_sdvo) | 6528 | if (is_sdvo) |
6529 | dpll |= DPLL_SDVO_HIGH_SPEED; | 6529 | dpll |= DPLL_SDVO_HIGH_SPEED; |
6530 | if (intel_crtc->config.has_dp_encoder) | 6530 | if (intel_crtc->config.has_dp_encoder) |
6531 | dpll |= DPLL_SDVO_HIGH_SPEED; | 6531 | dpll |= DPLL_SDVO_HIGH_SPEED; |
6532 | 6532 | ||
6533 | /* compute bitmask from p1 value */ | 6533 | /* compute bitmask from p1 value */ |
6534 | dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; | 6534 | dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; |
6535 | /* also FPA1 */ | 6535 | /* also FPA1 */ |
6536 | dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; | 6536 | dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; |
6537 | 6537 | ||
6538 | switch (intel_crtc->config.dpll.p2) { | 6538 | switch (intel_crtc->config.dpll.p2) { |
6539 | case 5: | 6539 | case 5: |
6540 | dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; | 6540 | dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; |
6541 | break; | 6541 | break; |
6542 | case 7: | 6542 | case 7: |
6543 | dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; | 6543 | dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; |
6544 | break; | 6544 | break; |
6545 | case 10: | 6545 | case 10: |
6546 | dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; | 6546 | dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; |
6547 | break; | 6547 | break; |
6548 | case 14: | 6548 | case 14: |
6549 | dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; | 6549 | dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; |
6550 | break; | 6550 | break; |
6551 | } | 6551 | } |
6552 | 6552 | ||
6553 | if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) | 6553 | if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) |
6554 | dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; | 6554 | dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; |
6555 | else | 6555 | else |
6556 | dpll |= PLL_REF_INPUT_DREFCLK; | 6556 | dpll |= PLL_REF_INPUT_DREFCLK; |
6557 | 6557 | ||
6558 | return dpll | DPLL_VCO_ENABLE; | 6558 | return dpll | DPLL_VCO_ENABLE; |
6559 | } | 6559 | } |
6560 | 6560 | ||
6561 | static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | 6561 | static int ironlake_crtc_mode_set(struct drm_crtc *crtc, |
6562 | int x, int y, | 6562 | int x, int y, |
6563 | struct drm_framebuffer *fb) | 6563 | struct drm_framebuffer *fb) |
6564 | { | 6564 | { |
6565 | struct drm_device *dev = crtc->dev; | 6565 | struct drm_device *dev = crtc->dev; |
6566 | struct drm_i915_private *dev_priv = dev->dev_private; | 6566 | struct drm_i915_private *dev_priv = dev->dev_private; |
6567 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 6567 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6568 | int pipe = intel_crtc->pipe; | 6568 | int pipe = intel_crtc->pipe; |
6569 | int plane = intel_crtc->plane; | 6569 | int plane = intel_crtc->plane; |
6570 | int num_connectors = 0; | 6570 | int num_connectors = 0; |
6571 | intel_clock_t clock, reduced_clock; | 6571 | intel_clock_t clock, reduced_clock; |
6572 | u32 dpll = 0, fp = 0, fp2 = 0; | 6572 | u32 dpll = 0, fp = 0, fp2 = 0; |
6573 | bool ok, has_reduced_clock = false; | 6573 | bool ok, has_reduced_clock = false; |
6574 | bool is_lvds = false; | 6574 | bool is_lvds = false; |
6575 | struct intel_encoder *encoder; | 6575 | struct intel_encoder *encoder; |
6576 | struct intel_shared_dpll *pll; | 6576 | struct intel_shared_dpll *pll; |
6577 | int ret; | 6577 | int ret; |
6578 | 6578 | ||
6579 | for_each_encoder_on_crtc(dev, crtc, encoder) { | 6579 | for_each_encoder_on_crtc(dev, crtc, encoder) { |
6580 | switch (encoder->type) { | 6580 | switch (encoder->type) { |
6581 | case INTEL_OUTPUT_LVDS: | 6581 | case INTEL_OUTPUT_LVDS: |
6582 | is_lvds = true; | 6582 | is_lvds = true; |
6583 | break; | 6583 | break; |
6584 | } | 6584 | } |
6585 | 6585 | ||
6586 | num_connectors++; | 6586 | num_connectors++; |
6587 | } | 6587 | } |
6588 | 6588 | ||
6589 | WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)), | 6589 | WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)), |
6590 | "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev)); | 6590 | "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev)); |
6591 | 6591 | ||
6592 | ok = ironlake_compute_clocks(crtc, &clock, | 6592 | ok = ironlake_compute_clocks(crtc, &clock, |
6593 | &has_reduced_clock, &reduced_clock); | 6593 | &has_reduced_clock, &reduced_clock); |
6594 | if (!ok && !intel_crtc->config.clock_set) { | 6594 | if (!ok && !intel_crtc->config.clock_set) { |
6595 | DRM_ERROR("Couldn't find PLL settings for mode!\n"); | 6595 | DRM_ERROR("Couldn't find PLL settings for mode!\n"); |
6596 | return -EINVAL; | 6596 | return -EINVAL; |
6597 | } | 6597 | } |
6598 | /* Compat-code for transition, will disappear. */ | 6598 | /* Compat-code for transition, will disappear. */ |
6599 | if (!intel_crtc->config.clock_set) { | 6599 | if (!intel_crtc->config.clock_set) { |
6600 | intel_crtc->config.dpll.n = clock.n; | 6600 | intel_crtc->config.dpll.n = clock.n; |
6601 | intel_crtc->config.dpll.m1 = clock.m1; | 6601 | intel_crtc->config.dpll.m1 = clock.m1; |
6602 | intel_crtc->config.dpll.m2 = clock.m2; | 6602 | intel_crtc->config.dpll.m2 = clock.m2; |
6603 | intel_crtc->config.dpll.p1 = clock.p1; | 6603 | intel_crtc->config.dpll.p1 = clock.p1; |
6604 | intel_crtc->config.dpll.p2 = clock.p2; | 6604 | intel_crtc->config.dpll.p2 = clock.p2; |
6605 | } | 6605 | } |
6606 | 6606 | ||
6607 | /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ | 6607 | /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ |
6608 | if (intel_crtc->config.has_pch_encoder) { | 6608 | if (intel_crtc->config.has_pch_encoder) { |
6609 | fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll); | 6609 | fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll); |
6610 | if (has_reduced_clock) | 6610 | if (has_reduced_clock) |
6611 | fp2 = i9xx_dpll_compute_fp(&reduced_clock); | 6611 | fp2 = i9xx_dpll_compute_fp(&reduced_clock); |
6612 | 6612 | ||
6613 | dpll = ironlake_compute_dpll(intel_crtc, | 6613 | dpll = ironlake_compute_dpll(intel_crtc, |
6614 | &fp, &reduced_clock, | 6614 | &fp, &reduced_clock, |
6615 | has_reduced_clock ? &fp2 : NULL); | 6615 | has_reduced_clock ? &fp2 : NULL); |
6616 | 6616 | ||
6617 | intel_crtc->config.dpll_hw_state.dpll = dpll; | 6617 | intel_crtc->config.dpll_hw_state.dpll = dpll; |
6618 | intel_crtc->config.dpll_hw_state.fp0 = fp; | 6618 | intel_crtc->config.dpll_hw_state.fp0 = fp; |
6619 | if (has_reduced_clock) | 6619 | if (has_reduced_clock) |
6620 | intel_crtc->config.dpll_hw_state.fp1 = fp2; | 6620 | intel_crtc->config.dpll_hw_state.fp1 = fp2; |
6621 | else | 6621 | else |
6622 | intel_crtc->config.dpll_hw_state.fp1 = fp; | 6622 | intel_crtc->config.dpll_hw_state.fp1 = fp; |
6623 | 6623 | ||
6624 | pll = intel_get_shared_dpll(intel_crtc); | 6624 | pll = intel_get_shared_dpll(intel_crtc); |
6625 | if (pll == NULL) { | 6625 | if (pll == NULL) { |
6626 | DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", | 6626 | DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", |
6627 | pipe_name(pipe)); | 6627 | pipe_name(pipe)); |
6628 | return -EINVAL; | 6628 | return -EINVAL; |
6629 | } | 6629 | } |
6630 | } else | 6630 | } else |
6631 | intel_put_shared_dpll(intel_crtc); | 6631 | intel_put_shared_dpll(intel_crtc); |
6632 | 6632 | ||
6633 | if (intel_crtc->config.has_dp_encoder) | 6633 | if (intel_crtc->config.has_dp_encoder) |
6634 | intel_dp_set_m_n(intel_crtc); | 6634 | intel_dp_set_m_n(intel_crtc); |
6635 | 6635 | ||
6636 | if (is_lvds && has_reduced_clock && i915.powersave) | 6636 | if (is_lvds && has_reduced_clock && i915.powersave) |
6637 | intel_crtc->lowfreq_avail = true; | 6637 | intel_crtc->lowfreq_avail = true; |
6638 | else | 6638 | else |
6639 | intel_crtc->lowfreq_avail = false; | 6639 | intel_crtc->lowfreq_avail = false; |
6640 | 6640 | ||
6641 | intel_set_pipe_timings(intel_crtc); | 6641 | intel_set_pipe_timings(intel_crtc); |
6642 | 6642 | ||
6643 | if (intel_crtc->config.has_pch_encoder) { | 6643 | if (intel_crtc->config.has_pch_encoder) { |
6644 | intel_cpu_transcoder_set_m_n(intel_crtc, | 6644 | intel_cpu_transcoder_set_m_n(intel_crtc, |
6645 | &intel_crtc->config.fdi_m_n); | 6645 | &intel_crtc->config.fdi_m_n); |
6646 | } | 6646 | } |
6647 | 6647 | ||
6648 | ironlake_set_pipeconf(crtc); | 6648 | ironlake_set_pipeconf(crtc); |
6649 | 6649 | ||
6650 | /* Set up the display plane register */ | 6650 | /* Set up the display plane register */ |
6651 | I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE); | 6651 | I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE); |
6652 | POSTING_READ(DSPCNTR(plane)); | 6652 | POSTING_READ(DSPCNTR(plane)); |
6653 | 6653 | ||
6654 | ret = intel_pipe_set_base(crtc, x, y, fb); | 6654 | ret = intel_pipe_set_base(crtc, x, y, fb); |
6655 | 6655 | ||
6656 | return ret; | 6656 | return ret; |
6657 | } | 6657 | } |
6658 | 6658 | ||
6659 | static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, | 6659 | static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, |
6660 | struct intel_link_m_n *m_n) | 6660 | struct intel_link_m_n *m_n) |
6661 | { | 6661 | { |
6662 | struct drm_device *dev = crtc->base.dev; | 6662 | struct drm_device *dev = crtc->base.dev; |
6663 | struct drm_i915_private *dev_priv = dev->dev_private; | 6663 | struct drm_i915_private *dev_priv = dev->dev_private; |
6664 | enum pipe pipe = crtc->pipe; | 6664 | enum pipe pipe = crtc->pipe; |
6665 | 6665 | ||
6666 | m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe)); | 6666 | m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe)); |
6667 | m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe)); | 6667 | m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe)); |
6668 | m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe)) | 6668 | m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe)) |
6669 | & ~TU_SIZE_MASK; | 6669 | & ~TU_SIZE_MASK; |
6670 | m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe)); | 6670 | m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe)); |
6671 | m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe)) | 6671 | m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe)) |
6672 | & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; | 6672 | & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; |
6673 | } | 6673 | } |
6674 | 6674 | ||
6675 | static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, | 6675 | static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, |
6676 | enum transcoder transcoder, | 6676 | enum transcoder transcoder, |
6677 | struct intel_link_m_n *m_n) | 6677 | struct intel_link_m_n *m_n) |
6678 | { | 6678 | { |
6679 | struct drm_device *dev = crtc->base.dev; | 6679 | struct drm_device *dev = crtc->base.dev; |
6680 | struct drm_i915_private *dev_priv = dev->dev_private; | 6680 | struct drm_i915_private *dev_priv = dev->dev_private; |
6681 | enum pipe pipe = crtc->pipe; | 6681 | enum pipe pipe = crtc->pipe; |
6682 | 6682 | ||
6683 | if (INTEL_INFO(dev)->gen >= 5) { | 6683 | if (INTEL_INFO(dev)->gen >= 5) { |
6684 | m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder)); | 6684 | m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder)); |
6685 | m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder)); | 6685 | m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder)); |
6686 | m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder)) | 6686 | m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder)) |
6687 | & ~TU_SIZE_MASK; | 6687 | & ~TU_SIZE_MASK; |
6688 | m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder)); | 6688 | m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder)); |
6689 | m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder)) | 6689 | m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder)) |
6690 | & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; | 6690 | & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; |
6691 | } else { | 6691 | } else { |
6692 | m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe)); | 6692 | m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe)); |
6693 | m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe)); | 6693 | m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe)); |
6694 | m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe)) | 6694 | m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe)) |
6695 | & ~TU_SIZE_MASK; | 6695 | & ~TU_SIZE_MASK; |
6696 | m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe)); | 6696 | m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe)); |
6697 | m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe)) | 6697 | m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe)) |
6698 | & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; | 6698 | & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; |
6699 | } | 6699 | } |
6700 | } | 6700 | } |
6701 | 6701 | ||
6702 | void intel_dp_get_m_n(struct intel_crtc *crtc, | 6702 | void intel_dp_get_m_n(struct intel_crtc *crtc, |
6703 | struct intel_crtc_config *pipe_config) | 6703 | struct intel_crtc_config *pipe_config) |
6704 | { | 6704 | { |
6705 | if (crtc->config.has_pch_encoder) | 6705 | if (crtc->config.has_pch_encoder) |
6706 | intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n); | 6706 | intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n); |
6707 | else | 6707 | else |
6708 | intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, | 6708 | intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, |
6709 | &pipe_config->dp_m_n); | 6709 | &pipe_config->dp_m_n); |
6710 | } | 6710 | } |
6711 | 6711 | ||
6712 | static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc, | 6712 | static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc, |
6713 | struct intel_crtc_config *pipe_config) | 6713 | struct intel_crtc_config *pipe_config) |
6714 | { | 6714 | { |
6715 | intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, | 6715 | intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, |
6716 | &pipe_config->fdi_m_n); | 6716 | &pipe_config->fdi_m_n); |
6717 | } | 6717 | } |
6718 | 6718 | ||
6719 | static void ironlake_get_pfit_config(struct intel_crtc *crtc, | 6719 | static void ironlake_get_pfit_config(struct intel_crtc *crtc, |
6720 | struct intel_crtc_config *pipe_config) | 6720 | struct intel_crtc_config *pipe_config) |
6721 | { | 6721 | { |
6722 | struct drm_device *dev = crtc->base.dev; | 6722 | struct drm_device *dev = crtc->base.dev; |
6723 | struct drm_i915_private *dev_priv = dev->dev_private; | 6723 | struct drm_i915_private *dev_priv = dev->dev_private; |
6724 | uint32_t tmp; | 6724 | uint32_t tmp; |
6725 | 6725 | ||
6726 | tmp = I915_READ(PF_CTL(crtc->pipe)); | 6726 | tmp = I915_READ(PF_CTL(crtc->pipe)); |
6727 | 6727 | ||
6728 | if (tmp & PF_ENABLE) { | 6728 | if (tmp & PF_ENABLE) { |
6729 | pipe_config->pch_pfit.enabled = true; | 6729 | pipe_config->pch_pfit.enabled = true; |
6730 | pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe)); | 6730 | pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe)); |
6731 | pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe)); | 6731 | pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe)); |
6732 | 6732 | ||
6733 | /* We currently do not free assignements of panel fitters on | 6733 | /* We currently do not free assignements of panel fitters on |
6734 | * ivb/hsw (since we don't use the higher upscaling modes which | 6734 | * ivb/hsw (since we don't use the higher upscaling modes which |
6735 | * differentiates them) so just WARN about this case for now. */ | 6735 | * differentiates them) so just WARN about this case for now. */ |
6736 | if (IS_GEN7(dev)) { | 6736 | if (IS_GEN7(dev)) { |
6737 | WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) != | 6737 | WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) != |
6738 | PF_PIPE_SEL_IVB(crtc->pipe)); | 6738 | PF_PIPE_SEL_IVB(crtc->pipe)); |
6739 | } | 6739 | } |
6740 | } | 6740 | } |
6741 | } | 6741 | } |
6742 | 6742 | ||
6743 | static void ironlake_get_plane_config(struct intel_crtc *crtc, | 6743 | static void ironlake_get_plane_config(struct intel_crtc *crtc, |
6744 | struct intel_plane_config *plane_config) | 6744 | struct intel_plane_config *plane_config) |
6745 | { | 6745 | { |
6746 | struct drm_device *dev = crtc->base.dev; | 6746 | struct drm_device *dev = crtc->base.dev; |
6747 | struct drm_i915_private *dev_priv = dev->dev_private; | 6747 | struct drm_i915_private *dev_priv = dev->dev_private; |
6748 | u32 val, base, offset; | 6748 | u32 val, base, offset; |
6749 | int pipe = crtc->pipe, plane = crtc->plane; | 6749 | int pipe = crtc->pipe, plane = crtc->plane; |
6750 | int fourcc, pixel_format; | 6750 | int fourcc, pixel_format; |
6751 | int aligned_height; | 6751 | int aligned_height; |
6752 | 6752 | ||
6753 | crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL); | 6753 | crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL); |
6754 | if (!crtc->base.primary->fb) { | 6754 | if (!crtc->base.primary->fb) { |
6755 | DRM_DEBUG_KMS("failed to alloc fb\n"); | 6755 | DRM_DEBUG_KMS("failed to alloc fb\n"); |
6756 | return; | 6756 | return; |
6757 | } | 6757 | } |
6758 | 6758 | ||
6759 | val = I915_READ(DSPCNTR(plane)); | 6759 | val = I915_READ(DSPCNTR(plane)); |
6760 | 6760 | ||
6761 | if (INTEL_INFO(dev)->gen >= 4) | 6761 | if (INTEL_INFO(dev)->gen >= 4) |
6762 | if (val & DISPPLANE_TILED) | 6762 | if (val & DISPPLANE_TILED) |
6763 | plane_config->tiled = true; | 6763 | plane_config->tiled = true; |
6764 | 6764 | ||
6765 | pixel_format = val & DISPPLANE_PIXFORMAT_MASK; | 6765 | pixel_format = val & DISPPLANE_PIXFORMAT_MASK; |
6766 | fourcc = intel_format_to_fourcc(pixel_format); | 6766 | fourcc = intel_format_to_fourcc(pixel_format); |
6767 | crtc->base.primary->fb->pixel_format = fourcc; | 6767 | crtc->base.primary->fb->pixel_format = fourcc; |
6768 | crtc->base.primary->fb->bits_per_pixel = | 6768 | crtc->base.primary->fb->bits_per_pixel = |
6769 | drm_format_plane_cpp(fourcc, 0) * 8; | 6769 | drm_format_plane_cpp(fourcc, 0) * 8; |
6770 | 6770 | ||
6771 | base = I915_READ(DSPSURF(plane)) & 0xfffff000; | 6771 | base = I915_READ(DSPSURF(plane)) & 0xfffff000; |
6772 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { | 6772 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { |
6773 | offset = I915_READ(DSPOFFSET(plane)); | 6773 | offset = I915_READ(DSPOFFSET(plane)); |
6774 | } else { | 6774 | } else { |
6775 | if (plane_config->tiled) | 6775 | if (plane_config->tiled) |
6776 | offset = I915_READ(DSPTILEOFF(plane)); | 6776 | offset = I915_READ(DSPTILEOFF(plane)); |
6777 | else | 6777 | else |
6778 | offset = I915_READ(DSPLINOFF(plane)); | 6778 | offset = I915_READ(DSPLINOFF(plane)); |
6779 | } | 6779 | } |
6780 | plane_config->base = base; | 6780 | plane_config->base = base; |
6781 | 6781 | ||
6782 | val = I915_READ(PIPESRC(pipe)); | 6782 | val = I915_READ(PIPESRC(pipe)); |
6783 | crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1; | 6783 | crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1; |
6784 | crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1; | 6784 | crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1; |
6785 | 6785 | ||
6786 | val = I915_READ(DSPSTRIDE(pipe)); | 6786 | val = I915_READ(DSPSTRIDE(pipe)); |
6787 | crtc->base.primary->fb->pitches[0] = val & 0xffffff80; | 6787 | crtc->base.primary->fb->pitches[0] = val & 0xffffff80; |
6788 | 6788 | ||
6789 | aligned_height = intel_align_height(dev, crtc->base.primary->fb->height, | 6789 | aligned_height = intel_align_height(dev, crtc->base.primary->fb->height, |
6790 | plane_config->tiled); | 6790 | plane_config->tiled); |
6791 | 6791 | ||
6792 | plane_config->size = ALIGN(crtc->base.primary->fb->pitches[0] * | 6792 | plane_config->size = ALIGN(crtc->base.primary->fb->pitches[0] * |
6793 | aligned_height, PAGE_SIZE); | 6793 | aligned_height, PAGE_SIZE); |
6794 | 6794 | ||
6795 | DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", | 6795 | DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", |
6796 | pipe, plane, crtc->base.primary->fb->width, | 6796 | pipe, plane, crtc->base.primary->fb->width, |
6797 | crtc->base.primary->fb->height, | 6797 | crtc->base.primary->fb->height, |
6798 | crtc->base.primary->fb->bits_per_pixel, base, | 6798 | crtc->base.primary->fb->bits_per_pixel, base, |
6799 | crtc->base.primary->fb->pitches[0], | 6799 | crtc->base.primary->fb->pitches[0], |
6800 | plane_config->size); | 6800 | plane_config->size); |
6801 | } | 6801 | } |
6802 | 6802 | ||
6803 | static bool ironlake_get_pipe_config(struct intel_crtc *crtc, | 6803 | static bool ironlake_get_pipe_config(struct intel_crtc *crtc, |
6804 | struct intel_crtc_config *pipe_config) | 6804 | struct intel_crtc_config *pipe_config) |
6805 | { | 6805 | { |
6806 | struct drm_device *dev = crtc->base.dev; | 6806 | struct drm_device *dev = crtc->base.dev; |
6807 | struct drm_i915_private *dev_priv = dev->dev_private; | 6807 | struct drm_i915_private *dev_priv = dev->dev_private; |
6808 | uint32_t tmp; | 6808 | uint32_t tmp; |
6809 | 6809 | ||
6810 | pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; | 6810 | pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; |
6811 | pipe_config->shared_dpll = DPLL_ID_PRIVATE; | 6811 | pipe_config->shared_dpll = DPLL_ID_PRIVATE; |
6812 | 6812 | ||
6813 | tmp = I915_READ(PIPECONF(crtc->pipe)); | 6813 | tmp = I915_READ(PIPECONF(crtc->pipe)); |
6814 | if (!(tmp & PIPECONF_ENABLE)) | 6814 | if (!(tmp & PIPECONF_ENABLE)) |
6815 | return false; | 6815 | return false; |
6816 | 6816 | ||
6817 | switch (tmp & PIPECONF_BPC_MASK) { | 6817 | switch (tmp & PIPECONF_BPC_MASK) { |
6818 | case PIPECONF_6BPC: | 6818 | case PIPECONF_6BPC: |
6819 | pipe_config->pipe_bpp = 18; | 6819 | pipe_config->pipe_bpp = 18; |
6820 | break; | 6820 | break; |
6821 | case PIPECONF_8BPC: | 6821 | case PIPECONF_8BPC: |
6822 | pipe_config->pipe_bpp = 24; | 6822 | pipe_config->pipe_bpp = 24; |
6823 | break; | 6823 | break; |
6824 | case PIPECONF_10BPC: | 6824 | case PIPECONF_10BPC: |
6825 | pipe_config->pipe_bpp = 30; | 6825 | pipe_config->pipe_bpp = 30; |
6826 | break; | 6826 | break; |
6827 | case PIPECONF_12BPC: | 6827 | case PIPECONF_12BPC: |
6828 | pipe_config->pipe_bpp = 36; | 6828 | pipe_config->pipe_bpp = 36; |
6829 | break; | 6829 | break; |
6830 | default: | 6830 | default: |
6831 | break; | 6831 | break; |
6832 | } | 6832 | } |
6833 | 6833 | ||
6834 | if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { | 6834 | if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { |
6835 | struct intel_shared_dpll *pll; | 6835 | struct intel_shared_dpll *pll; |
6836 | 6836 | ||
6837 | pipe_config->has_pch_encoder = true; | 6837 | pipe_config->has_pch_encoder = true; |
6838 | 6838 | ||
6839 | tmp = I915_READ(FDI_RX_CTL(crtc->pipe)); | 6839 | tmp = I915_READ(FDI_RX_CTL(crtc->pipe)); |
6840 | pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> | 6840 | pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> |
6841 | FDI_DP_PORT_WIDTH_SHIFT) + 1; | 6841 | FDI_DP_PORT_WIDTH_SHIFT) + 1; |
6842 | 6842 | ||
6843 | ironlake_get_fdi_m_n_config(crtc, pipe_config); | 6843 | ironlake_get_fdi_m_n_config(crtc, pipe_config); |
6844 | 6844 | ||
6845 | if (HAS_PCH_IBX(dev_priv->dev)) { | 6845 | if (HAS_PCH_IBX(dev_priv->dev)) { |
6846 | pipe_config->shared_dpll = | 6846 | pipe_config->shared_dpll = |
6847 | (enum intel_dpll_id) crtc->pipe; | 6847 | (enum intel_dpll_id) crtc->pipe; |
6848 | } else { | 6848 | } else { |
6849 | tmp = I915_READ(PCH_DPLL_SEL); | 6849 | tmp = I915_READ(PCH_DPLL_SEL); |
6850 | if (tmp & TRANS_DPLLB_SEL(crtc->pipe)) | 6850 | if (tmp & TRANS_DPLLB_SEL(crtc->pipe)) |
6851 | pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B; | 6851 | pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B; |
6852 | else | 6852 | else |
6853 | pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A; | 6853 | pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A; |
6854 | } | 6854 | } |
6855 | 6855 | ||
6856 | pll = &dev_priv->shared_dplls[pipe_config->shared_dpll]; | 6856 | pll = &dev_priv->shared_dplls[pipe_config->shared_dpll]; |
6857 | 6857 | ||
6858 | WARN_ON(!pll->get_hw_state(dev_priv, pll, | 6858 | WARN_ON(!pll->get_hw_state(dev_priv, pll, |
6859 | &pipe_config->dpll_hw_state)); | 6859 | &pipe_config->dpll_hw_state)); |
6860 | 6860 | ||
6861 | tmp = pipe_config->dpll_hw_state.dpll; | 6861 | tmp = pipe_config->dpll_hw_state.dpll; |
6862 | pipe_config->pixel_multiplier = | 6862 | pipe_config->pixel_multiplier = |
6863 | ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) | 6863 | ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) |
6864 | >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; | 6864 | >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; |
6865 | 6865 | ||
6866 | ironlake_pch_clock_get(crtc, pipe_config); | 6866 | ironlake_pch_clock_get(crtc, pipe_config); |
6867 | } else { | 6867 | } else { |
6868 | pipe_config->pixel_multiplier = 1; | 6868 | pipe_config->pixel_multiplier = 1; |
6869 | } | 6869 | } |
6870 | 6870 | ||
6871 | intel_get_pipe_timings(crtc, pipe_config); | 6871 | intel_get_pipe_timings(crtc, pipe_config); |
6872 | 6872 | ||
6873 | ironlake_get_pfit_config(crtc, pipe_config); | 6873 | ironlake_get_pfit_config(crtc, pipe_config); |
6874 | 6874 | ||
6875 | return true; | 6875 | return true; |
6876 | } | 6876 | } |
6877 | 6877 | ||
6878 | static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) | 6878 | static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) |
6879 | { | 6879 | { |
6880 | struct drm_device *dev = dev_priv->dev; | 6880 | struct drm_device *dev = dev_priv->dev; |
6881 | struct intel_ddi_plls *plls = &dev_priv->ddi_plls; | 6881 | struct intel_ddi_plls *plls = &dev_priv->ddi_plls; |
6882 | struct intel_crtc *crtc; | 6882 | struct intel_crtc *crtc; |
6883 | unsigned long irqflags; | 6883 | unsigned long irqflags; |
6884 | uint32_t val; | 6884 | uint32_t val; |
6885 | 6885 | ||
6886 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) | 6886 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) |
6887 | WARN(crtc->active, "CRTC for pipe %c enabled\n", | 6887 | WARN(crtc->active, "CRTC for pipe %c enabled\n", |
6888 | pipe_name(crtc->pipe)); | 6888 | pipe_name(crtc->pipe)); |
6889 | 6889 | ||
6890 | WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n"); | 6890 | WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n"); |
6891 | WARN(plls->spll_refcount, "SPLL enabled\n"); | 6891 | WARN(plls->spll_refcount, "SPLL enabled\n"); |
6892 | WARN(plls->wrpll1_refcount, "WRPLL1 enabled\n"); | 6892 | WARN(plls->wrpll1_refcount, "WRPLL1 enabled\n"); |
6893 | WARN(plls->wrpll2_refcount, "WRPLL2 enabled\n"); | 6893 | WARN(plls->wrpll2_refcount, "WRPLL2 enabled\n"); |
6894 | WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n"); | 6894 | WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n"); |
6895 | WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, | 6895 | WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, |
6896 | "CPU PWM1 enabled\n"); | 6896 | "CPU PWM1 enabled\n"); |
6897 | WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, | 6897 | WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, |
6898 | "CPU PWM2 enabled\n"); | 6898 | "CPU PWM2 enabled\n"); |
6899 | WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, | 6899 | WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, |
6900 | "PCH PWM1 enabled\n"); | 6900 | "PCH PWM1 enabled\n"); |
6901 | WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, | 6901 | WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, |
6902 | "Utility pin enabled\n"); | 6902 | "Utility pin enabled\n"); |
6903 | WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n"); | 6903 | WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n"); |
6904 | 6904 | ||
6905 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 6905 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
6906 | val = I915_READ(DEIMR); | 6906 | val = I915_READ(DEIMR); |
6907 | WARN((val | DE_PCH_EVENT_IVB) != 0xffffffff, | 6907 | WARN((val | DE_PCH_EVENT_IVB) != 0xffffffff, |
6908 | "Unexpected DEIMR bits enabled: 0x%x\n", val); | 6908 | "Unexpected DEIMR bits enabled: 0x%x\n", val); |
6909 | val = I915_READ(SDEIMR); | 6909 | val = I915_READ(SDEIMR); |
6910 | WARN((val | SDE_HOTPLUG_MASK_CPT) != 0xffffffff, | 6910 | WARN((val | SDE_HOTPLUG_MASK_CPT) != 0xffffffff, |
6911 | "Unexpected SDEIMR bits enabled: 0x%x\n", val); | 6911 | "Unexpected SDEIMR bits enabled: 0x%x\n", val); |
6912 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 6912 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
6913 | } | 6913 | } |
6914 | 6914 | ||
6915 | /* | 6915 | /* |
6916 | * This function implements pieces of two sequences from BSpec: | 6916 | * This function implements pieces of two sequences from BSpec: |
6917 | * - Sequence for display software to disable LCPLL | 6917 | * - Sequence for display software to disable LCPLL |
6918 | * - Sequence for display software to allow package C8+ | 6918 | * - Sequence for display software to allow package C8+ |
6919 | * The steps implemented here are just the steps that actually touch the LCPLL | 6919 | * The steps implemented here are just the steps that actually touch the LCPLL |
6920 | * register. Callers should take care of disabling all the display engine | 6920 | * register. Callers should take care of disabling all the display engine |
6921 | * functions, doing the mode unset, fixing interrupts, etc. | 6921 | * functions, doing the mode unset, fixing interrupts, etc. |
6922 | */ | 6922 | */ |
6923 | static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, | 6923 | static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, |
6924 | bool switch_to_fclk, bool allow_power_down) | 6924 | bool switch_to_fclk, bool allow_power_down) |
6925 | { | 6925 | { |
6926 | uint32_t val; | 6926 | uint32_t val; |
6927 | 6927 | ||
6928 | assert_can_disable_lcpll(dev_priv); | 6928 | assert_can_disable_lcpll(dev_priv); |
6929 | 6929 | ||
6930 | val = I915_READ(LCPLL_CTL); | 6930 | val = I915_READ(LCPLL_CTL); |
6931 | 6931 | ||
6932 | if (switch_to_fclk) { | 6932 | if (switch_to_fclk) { |
6933 | val |= LCPLL_CD_SOURCE_FCLK; | 6933 | val |= LCPLL_CD_SOURCE_FCLK; |
6934 | I915_WRITE(LCPLL_CTL, val); | 6934 | I915_WRITE(LCPLL_CTL, val); |
6935 | 6935 | ||
6936 | if (wait_for_atomic_us(I915_READ(LCPLL_CTL) & | 6936 | if (wait_for_atomic_us(I915_READ(LCPLL_CTL) & |
6937 | LCPLL_CD_SOURCE_FCLK_DONE, 1)) | 6937 | LCPLL_CD_SOURCE_FCLK_DONE, 1)) |
6938 | DRM_ERROR("Switching to FCLK failed\n"); | 6938 | DRM_ERROR("Switching to FCLK failed\n"); |
6939 | 6939 | ||
6940 | val = I915_READ(LCPLL_CTL); | 6940 | val = I915_READ(LCPLL_CTL); |
6941 | } | 6941 | } |
6942 | 6942 | ||
6943 | val |= LCPLL_PLL_DISABLE; | 6943 | val |= LCPLL_PLL_DISABLE; |
6944 | I915_WRITE(LCPLL_CTL, val); | 6944 | I915_WRITE(LCPLL_CTL, val); |
6945 | POSTING_READ(LCPLL_CTL); | 6945 | POSTING_READ(LCPLL_CTL); |
6946 | 6946 | ||
6947 | if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1)) | 6947 | if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1)) |
6948 | DRM_ERROR("LCPLL still locked\n"); | 6948 | DRM_ERROR("LCPLL still locked\n"); |
6949 | 6949 | ||
6950 | val = I915_READ(D_COMP); | 6950 | val = I915_READ(D_COMP); |
6951 | val |= D_COMP_COMP_DISABLE; | 6951 | val |= D_COMP_COMP_DISABLE; |
6952 | mutex_lock(&dev_priv->rps.hw_lock); | 6952 | mutex_lock(&dev_priv->rps.hw_lock); |
6953 | if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val)) | 6953 | if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val)) |
6954 | DRM_ERROR("Failed to disable D_COMP\n"); | 6954 | DRM_ERROR("Failed to disable D_COMP\n"); |
6955 | mutex_unlock(&dev_priv->rps.hw_lock); | 6955 | mutex_unlock(&dev_priv->rps.hw_lock); |
6956 | POSTING_READ(D_COMP); | 6956 | POSTING_READ(D_COMP); |
6957 | ndelay(100); | 6957 | ndelay(100); |
6958 | 6958 | ||
6959 | if (wait_for((I915_READ(D_COMP) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1)) | 6959 | if (wait_for((I915_READ(D_COMP) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1)) |
6960 | DRM_ERROR("D_COMP RCOMP still in progress\n"); | 6960 | DRM_ERROR("D_COMP RCOMP still in progress\n"); |
6961 | 6961 | ||
6962 | if (allow_power_down) { | 6962 | if (allow_power_down) { |
6963 | val = I915_READ(LCPLL_CTL); | 6963 | val = I915_READ(LCPLL_CTL); |
6964 | val |= LCPLL_POWER_DOWN_ALLOW; | 6964 | val |= LCPLL_POWER_DOWN_ALLOW; |
6965 | I915_WRITE(LCPLL_CTL, val); | 6965 | I915_WRITE(LCPLL_CTL, val); |
6966 | POSTING_READ(LCPLL_CTL); | 6966 | POSTING_READ(LCPLL_CTL); |
6967 | } | 6967 | } |
6968 | } | 6968 | } |
6969 | 6969 | ||
6970 | /* | 6970 | /* |
6971 | * Fully restores LCPLL, disallowing power down and switching back to LCPLL | 6971 | * Fully restores LCPLL, disallowing power down and switching back to LCPLL |
6972 | * source. | 6972 | * source. |
6973 | */ | 6973 | */ |
6974 | static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) | 6974 | static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) |
6975 | { | 6975 | { |
6976 | uint32_t val; | 6976 | uint32_t val; |
6977 | unsigned long irqflags; | 6977 | unsigned long irqflags; |
6978 | 6978 | ||
6979 | val = I915_READ(LCPLL_CTL); | 6979 | val = I915_READ(LCPLL_CTL); |
6980 | 6980 | ||
6981 | if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK | | 6981 | if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK | |
6982 | LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) | 6982 | LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) |
6983 | return; | 6983 | return; |
6984 | 6984 | ||
6985 | /* | 6985 | /* |
6986 | * Make sure we're not on PC8 state before disabling PC8, otherwise | 6986 | * Make sure we're not on PC8 state before disabling PC8, otherwise |
6987 | * we'll hang the machine. To prevent PC8 state, just enable force_wake. | 6987 | * we'll hang the machine. To prevent PC8 state, just enable force_wake. |
6988 | * | 6988 | * |
6989 | * The other problem is that hsw_restore_lcpll() is called as part of | 6989 | * The other problem is that hsw_restore_lcpll() is called as part of |
6990 | * the runtime PM resume sequence, so we can't just call | 6990 | * the runtime PM resume sequence, so we can't just call |
6991 | * gen6_gt_force_wake_get() because that function calls | 6991 | * gen6_gt_force_wake_get() because that function calls |
6992 | * intel_runtime_pm_get(), and we can't change the runtime PM refcount | 6992 | * intel_runtime_pm_get(), and we can't change the runtime PM refcount |
6993 | * while we are on the resume sequence. So to solve this problem we have | 6993 | * while we are on the resume sequence. So to solve this problem we have |
6994 | * to call special forcewake code that doesn't touch runtime PM and | 6994 | * to call special forcewake code that doesn't touch runtime PM and |
6995 | * doesn't enable the forcewake delayed work. | 6995 | * doesn't enable the forcewake delayed work. |
6996 | */ | 6996 | */ |
6997 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | 6997 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); |
6998 | if (dev_priv->uncore.forcewake_count++ == 0) | 6998 | if (dev_priv->uncore.forcewake_count++ == 0) |
6999 | dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL); | 6999 | dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL); |
7000 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | 7000 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
7001 | 7001 | ||
7002 | if (val & LCPLL_POWER_DOWN_ALLOW) { | 7002 | if (val & LCPLL_POWER_DOWN_ALLOW) { |
7003 | val &= ~LCPLL_POWER_DOWN_ALLOW; | 7003 | val &= ~LCPLL_POWER_DOWN_ALLOW; |
7004 | I915_WRITE(LCPLL_CTL, val); | 7004 | I915_WRITE(LCPLL_CTL, val); |
7005 | POSTING_READ(LCPLL_CTL); | 7005 | POSTING_READ(LCPLL_CTL); |
7006 | } | 7006 | } |
7007 | 7007 | ||
7008 | val = I915_READ(D_COMP); | 7008 | val = I915_READ(D_COMP); |
7009 | val |= D_COMP_COMP_FORCE; | 7009 | val |= D_COMP_COMP_FORCE; |
7010 | val &= ~D_COMP_COMP_DISABLE; | 7010 | val &= ~D_COMP_COMP_DISABLE; |
7011 | mutex_lock(&dev_priv->rps.hw_lock); | 7011 | mutex_lock(&dev_priv->rps.hw_lock); |
7012 | if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val)) | 7012 | if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val)) |
7013 | DRM_ERROR("Failed to enable D_COMP\n"); | 7013 | DRM_ERROR("Failed to enable D_COMP\n"); |
7014 | mutex_unlock(&dev_priv->rps.hw_lock); | 7014 | mutex_unlock(&dev_priv->rps.hw_lock); |
7015 | POSTING_READ(D_COMP); | 7015 | POSTING_READ(D_COMP); |
7016 | 7016 | ||
7017 | val = I915_READ(LCPLL_CTL); | 7017 | val = I915_READ(LCPLL_CTL); |
7018 | val &= ~LCPLL_PLL_DISABLE; | 7018 | val &= ~LCPLL_PLL_DISABLE; |
7019 | I915_WRITE(LCPLL_CTL, val); | 7019 | I915_WRITE(LCPLL_CTL, val); |
7020 | 7020 | ||
7021 | if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5)) | 7021 | if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5)) |
7022 | DRM_ERROR("LCPLL not locked yet\n"); | 7022 | DRM_ERROR("LCPLL not locked yet\n"); |
7023 | 7023 | ||
7024 | if (val & LCPLL_CD_SOURCE_FCLK) { | 7024 | if (val & LCPLL_CD_SOURCE_FCLK) { |
7025 | val = I915_READ(LCPLL_CTL); | 7025 | val = I915_READ(LCPLL_CTL); |
7026 | val &= ~LCPLL_CD_SOURCE_FCLK; | 7026 | val &= ~LCPLL_CD_SOURCE_FCLK; |
7027 | I915_WRITE(LCPLL_CTL, val); | 7027 | I915_WRITE(LCPLL_CTL, val); |
7028 | 7028 | ||
7029 | if (wait_for_atomic_us((I915_READ(LCPLL_CTL) & | 7029 | if (wait_for_atomic_us((I915_READ(LCPLL_CTL) & |
7030 | LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) | 7030 | LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) |
7031 | DRM_ERROR("Switching back to LCPLL failed\n"); | 7031 | DRM_ERROR("Switching back to LCPLL failed\n"); |
7032 | } | 7032 | } |
7033 | 7033 | ||
7034 | /* See the big comment above. */ | 7034 | /* See the big comment above. */ |
7035 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | 7035 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); |
7036 | if (--dev_priv->uncore.forcewake_count == 0) | 7036 | if (--dev_priv->uncore.forcewake_count == 0) |
7037 | dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL); | 7037 | dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL); |
7038 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | 7038 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
7039 | } | 7039 | } |
7040 | 7040 | ||
7041 | /* | 7041 | /* |
7042 | * Package states C8 and deeper are really deep PC states that can only be | 7042 | * Package states C8 and deeper are really deep PC states that can only be |
7043 | * reached when all the devices on the system allow it, so even if the graphics | 7043 | * reached when all the devices on the system allow it, so even if the graphics |
7044 | * device allows PC8+, it doesn't mean the system will actually get to these | 7044 | * device allows PC8+, it doesn't mean the system will actually get to these |
7045 | * states. Our driver only allows PC8+ when going into runtime PM. | 7045 | * states. Our driver only allows PC8+ when going into runtime PM. |
7046 | * | 7046 | * |
7047 | * The requirements for PC8+ are that all the outputs are disabled, the power | 7047 | * The requirements for PC8+ are that all the outputs are disabled, the power |
7048 | * well is disabled and most interrupts are disabled, and these are also | 7048 | * well is disabled and most interrupts are disabled, and these are also |
7049 | * requirements for runtime PM. When these conditions are met, we manually do | 7049 | * requirements for runtime PM. When these conditions are met, we manually do |
7050 | * the other conditions: disable the interrupts, clocks and switch LCPLL refclk | 7050 | * the other conditions: disable the interrupts, clocks and switch LCPLL refclk |
7051 | * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard | 7051 | * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard |
7052 | * hang the machine. | 7052 | * hang the machine. |
7053 | * | 7053 | * |
7054 | * When we really reach PC8 or deeper states (not just when we allow it) we lose | 7054 | * When we really reach PC8 or deeper states (not just when we allow it) we lose |
7055 | * the state of some registers, so when we come back from PC8+ we need to | 7055 | * the state of some registers, so when we come back from PC8+ we need to |
7056 | * restore this state. We don't get into PC8+ if we're not in RC6, so we don't | 7056 | * restore this state. We don't get into PC8+ if we're not in RC6, so we don't |
7057 | * need to take care of the registers kept by RC6. Notice that this happens even | 7057 | * need to take care of the registers kept by RC6. Notice that this happens even |
7058 | * if we don't put the device in PCI D3 state (which is what currently happens | 7058 | * if we don't put the device in PCI D3 state (which is what currently happens |
7059 | * because of the runtime PM support). | 7059 | * because of the runtime PM support). |
7060 | * | 7060 | * |
7061 | * For more, read "Display Sequences for Package C8" on the hardware | 7061 | * For more, read "Display Sequences for Package C8" on the hardware |
7062 | * documentation. | 7062 | * documentation. |
7063 | */ | 7063 | */ |
7064 | void hsw_enable_pc8(struct drm_i915_private *dev_priv) | 7064 | void hsw_enable_pc8(struct drm_i915_private *dev_priv) |
7065 | { | 7065 | { |
7066 | struct drm_device *dev = dev_priv->dev; | 7066 | struct drm_device *dev = dev_priv->dev; |
7067 | uint32_t val; | 7067 | uint32_t val; |
7068 | 7068 | ||
7069 | WARN_ON(!HAS_PC8(dev)); | 7069 | WARN_ON(!HAS_PC8(dev)); |
7070 | 7070 | ||
7071 | DRM_DEBUG_KMS("Enabling package C8+\n"); | 7071 | DRM_DEBUG_KMS("Enabling package C8+\n"); |
7072 | 7072 | ||
7073 | if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { | 7073 | if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { |
7074 | val = I915_READ(SOUTH_DSPCLK_GATE_D); | 7074 | val = I915_READ(SOUTH_DSPCLK_GATE_D); |
7075 | val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; | 7075 | val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; |
7076 | I915_WRITE(SOUTH_DSPCLK_GATE_D, val); | 7076 | I915_WRITE(SOUTH_DSPCLK_GATE_D, val); |
7077 | } | 7077 | } |
7078 | 7078 | ||
7079 | lpt_disable_clkout_dp(dev); | 7079 | lpt_disable_clkout_dp(dev); |
7080 | hsw_runtime_pm_disable_interrupts(dev); | 7080 | hsw_runtime_pm_disable_interrupts(dev); |
7081 | hsw_disable_lcpll(dev_priv, true, true); | 7081 | hsw_disable_lcpll(dev_priv, true, true); |
7082 | } | 7082 | } |
7083 | 7083 | ||
7084 | void hsw_disable_pc8(struct drm_i915_private *dev_priv) | 7084 | void hsw_disable_pc8(struct drm_i915_private *dev_priv) |
7085 | { | 7085 | { |
7086 | struct drm_device *dev = dev_priv->dev; | 7086 | struct drm_device *dev = dev_priv->dev; |
7087 | uint32_t val; | 7087 | uint32_t val; |
7088 | 7088 | ||
7089 | WARN_ON(!HAS_PC8(dev)); | 7089 | WARN_ON(!HAS_PC8(dev)); |
7090 | 7090 | ||
7091 | DRM_DEBUG_KMS("Disabling package C8+\n"); | 7091 | DRM_DEBUG_KMS("Disabling package C8+\n"); |
7092 | 7092 | ||
7093 | hsw_restore_lcpll(dev_priv); | 7093 | hsw_restore_lcpll(dev_priv); |
7094 | hsw_runtime_pm_restore_interrupts(dev); | 7094 | hsw_runtime_pm_restore_interrupts(dev); |
7095 | lpt_init_pch_refclk(dev); | 7095 | lpt_init_pch_refclk(dev); |
7096 | 7096 | ||
7097 | if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { | 7097 | if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { |
7098 | val = I915_READ(SOUTH_DSPCLK_GATE_D); | 7098 | val = I915_READ(SOUTH_DSPCLK_GATE_D); |
7099 | val |= PCH_LP_PARTITION_LEVEL_DISABLE; | 7099 | val |= PCH_LP_PARTITION_LEVEL_DISABLE; |
7100 | I915_WRITE(SOUTH_DSPCLK_GATE_D, val); | 7100 | I915_WRITE(SOUTH_DSPCLK_GATE_D, val); |
7101 | } | 7101 | } |
7102 | 7102 | ||
7103 | intel_prepare_ddi(dev); | 7103 | intel_prepare_ddi(dev); |
7104 | i915_gem_init_swizzling(dev); | 7104 | i915_gem_init_swizzling(dev); |
7105 | mutex_lock(&dev_priv->rps.hw_lock); | 7105 | mutex_lock(&dev_priv->rps.hw_lock); |
7106 | gen6_update_ring_freq(dev); | 7106 | gen6_update_ring_freq(dev); |
7107 | mutex_unlock(&dev_priv->rps.hw_lock); | 7107 | mutex_unlock(&dev_priv->rps.hw_lock); |
7108 | } | 7108 | } |
7109 | 7109 | ||
7110 | static void haswell_modeset_global_resources(struct drm_device *dev) | 7110 | static void haswell_modeset_global_resources(struct drm_device *dev) |
7111 | { | 7111 | { |
7112 | modeset_update_crtc_power_domains(dev); | 7112 | modeset_update_crtc_power_domains(dev); |
7113 | } | 7113 | } |
7114 | 7114 | ||
7115 | static int haswell_crtc_mode_set(struct drm_crtc *crtc, | 7115 | static int haswell_crtc_mode_set(struct drm_crtc *crtc, |
7116 | int x, int y, | 7116 | int x, int y, |
7117 | struct drm_framebuffer *fb) | 7117 | struct drm_framebuffer *fb) |
7118 | { | 7118 | { |
7119 | struct drm_device *dev = crtc->dev; | 7119 | struct drm_device *dev = crtc->dev; |
7120 | struct drm_i915_private *dev_priv = dev->dev_private; | 7120 | struct drm_i915_private *dev_priv = dev->dev_private; |
7121 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 7121 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
7122 | int plane = intel_crtc->plane; | 7122 | int plane = intel_crtc->plane; |
7123 | int ret; | 7123 | int ret; |
7124 | 7124 | ||
7125 | if (!intel_ddi_pll_select(intel_crtc)) | 7125 | if (!intel_ddi_pll_select(intel_crtc)) |
7126 | return -EINVAL; | 7126 | return -EINVAL; |
7127 | intel_ddi_pll_enable(intel_crtc); | 7127 | intel_ddi_pll_enable(intel_crtc); |
7128 | 7128 | ||
7129 | if (intel_crtc->config.has_dp_encoder) | 7129 | if (intel_crtc->config.has_dp_encoder) |
7130 | intel_dp_set_m_n(intel_crtc); | 7130 | intel_dp_set_m_n(intel_crtc); |
7131 | 7131 | ||
7132 | intel_crtc->lowfreq_avail = false; | 7132 | intel_crtc->lowfreq_avail = false; |
7133 | 7133 | ||
7134 | intel_set_pipe_timings(intel_crtc); | 7134 | intel_set_pipe_timings(intel_crtc); |
7135 | 7135 | ||
7136 | if (intel_crtc->config.has_pch_encoder) { | 7136 | if (intel_crtc->config.has_pch_encoder) { |
7137 | intel_cpu_transcoder_set_m_n(intel_crtc, | 7137 | intel_cpu_transcoder_set_m_n(intel_crtc, |
7138 | &intel_crtc->config.fdi_m_n); | 7138 | &intel_crtc->config.fdi_m_n); |
7139 | } | 7139 | } |
7140 | 7140 | ||
7141 | haswell_set_pipeconf(crtc); | 7141 | haswell_set_pipeconf(crtc); |
7142 | 7142 | ||
7143 | intel_set_pipe_csc(crtc); | 7143 | intel_set_pipe_csc(crtc); |
7144 | 7144 | ||
7145 | /* Set up the display plane register */ | 7145 | /* Set up the display plane register */ |
7146 | I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE); | 7146 | I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE); |
7147 | POSTING_READ(DSPCNTR(plane)); | 7147 | POSTING_READ(DSPCNTR(plane)); |
7148 | 7148 | ||
7149 | ret = intel_pipe_set_base(crtc, x, y, fb); | 7149 | ret = intel_pipe_set_base(crtc, x, y, fb); |
7150 | 7150 | ||
7151 | return ret; | 7151 | return ret; |
7152 | } | 7152 | } |
7153 | 7153 | ||
7154 | static bool haswell_get_pipe_config(struct intel_crtc *crtc, | 7154 | static bool haswell_get_pipe_config(struct intel_crtc *crtc, |
7155 | struct intel_crtc_config *pipe_config) | 7155 | struct intel_crtc_config *pipe_config) |
7156 | { | 7156 | { |
7157 | struct drm_device *dev = crtc->base.dev; | 7157 | struct drm_device *dev = crtc->base.dev; |
7158 | struct drm_i915_private *dev_priv = dev->dev_private; | 7158 | struct drm_i915_private *dev_priv = dev->dev_private; |
7159 | enum intel_display_power_domain pfit_domain; | 7159 | enum intel_display_power_domain pfit_domain; |
7160 | uint32_t tmp; | 7160 | uint32_t tmp; |
7161 | 7161 | ||
7162 | if (!intel_display_power_enabled(dev_priv, | 7162 | if (!intel_display_power_enabled(dev_priv, |
7163 | POWER_DOMAIN_PIPE(crtc->pipe))) | 7163 | POWER_DOMAIN_PIPE(crtc->pipe))) |
7164 | return false; | 7164 | return false; |
7165 | 7165 | ||
7166 | pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; | 7166 | pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; |
7167 | pipe_config->shared_dpll = DPLL_ID_PRIVATE; | 7167 | pipe_config->shared_dpll = DPLL_ID_PRIVATE; |
7168 | 7168 | ||
7169 | tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); | 7169 | tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); |
7170 | if (tmp & TRANS_DDI_FUNC_ENABLE) { | 7170 | if (tmp & TRANS_DDI_FUNC_ENABLE) { |
7171 | enum pipe trans_edp_pipe; | 7171 | enum pipe trans_edp_pipe; |
7172 | switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { | 7172 | switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { |
7173 | default: | 7173 | default: |
7174 | WARN(1, "unknown pipe linked to edp transcoder\n"); | 7174 | WARN(1, "unknown pipe linked to edp transcoder\n"); |
7175 | case TRANS_DDI_EDP_INPUT_A_ONOFF: | 7175 | case TRANS_DDI_EDP_INPUT_A_ONOFF: |
7176 | case TRANS_DDI_EDP_INPUT_A_ON: | 7176 | case TRANS_DDI_EDP_INPUT_A_ON: |
7177 | trans_edp_pipe = PIPE_A; | 7177 | trans_edp_pipe = PIPE_A; |
7178 | break; | 7178 | break; |
7179 | case TRANS_DDI_EDP_INPUT_B_ONOFF: | 7179 | case TRANS_DDI_EDP_INPUT_B_ONOFF: |
7180 | trans_edp_pipe = PIPE_B; | 7180 | trans_edp_pipe = PIPE_B; |
7181 | break; | 7181 | break; |
7182 | case TRANS_DDI_EDP_INPUT_C_ONOFF: | 7182 | case TRANS_DDI_EDP_INPUT_C_ONOFF: |
7183 | trans_edp_pipe = PIPE_C; | 7183 | trans_edp_pipe = PIPE_C; |
7184 | break; | 7184 | break; |
7185 | } | 7185 | } |
7186 | 7186 | ||
7187 | if (trans_edp_pipe == crtc->pipe) | 7187 | if (trans_edp_pipe == crtc->pipe) |
7188 | pipe_config->cpu_transcoder = TRANSCODER_EDP; | 7188 | pipe_config->cpu_transcoder = TRANSCODER_EDP; |
7189 | } | 7189 | } |
7190 | 7190 | ||
7191 | if (!intel_display_power_enabled(dev_priv, | 7191 | if (!intel_display_power_enabled(dev_priv, |
7192 | POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder))) | 7192 | POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder))) |
7193 | return false; | 7193 | return false; |
7194 | 7194 | ||
7195 | tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder)); | 7195 | tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder)); |
7196 | if (!(tmp & PIPECONF_ENABLE)) | 7196 | if (!(tmp & PIPECONF_ENABLE)) |
7197 | return false; | 7197 | return false; |
7198 | 7198 | ||
7199 | /* | 7199 | /* |
7200 | * Haswell has only FDI/PCH transcoder A. It is which is connected to | 7200 | * Haswell has only FDI/PCH transcoder A. It is which is connected to |
7201 | * DDI E. So just check whether this pipe is wired to DDI E and whether | 7201 | * DDI E. So just check whether this pipe is wired to DDI E and whether |
7202 | * the PCH transcoder is on. | 7202 | * the PCH transcoder is on. |
7203 | */ | 7203 | */ |
7204 | tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder)); | 7204 | tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder)); |
7205 | if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(PORT_E) && | 7205 | if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(PORT_E) && |
7206 | I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) { | 7206 | I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) { |
7207 | pipe_config->has_pch_encoder = true; | 7207 | pipe_config->has_pch_encoder = true; |
7208 | 7208 | ||
7209 | tmp = I915_READ(FDI_RX_CTL(PIPE_A)); | 7209 | tmp = I915_READ(FDI_RX_CTL(PIPE_A)); |
7210 | pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> | 7210 | pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> |
7211 | FDI_DP_PORT_WIDTH_SHIFT) + 1; | 7211 | FDI_DP_PORT_WIDTH_SHIFT) + 1; |
7212 | 7212 | ||
7213 | ironlake_get_fdi_m_n_config(crtc, pipe_config); | 7213 | ironlake_get_fdi_m_n_config(crtc, pipe_config); |
7214 | } | 7214 | } |
7215 | 7215 | ||
7216 | intel_get_pipe_timings(crtc, pipe_config); | 7216 | intel_get_pipe_timings(crtc, pipe_config); |
7217 | 7217 | ||
7218 | pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); | 7218 | pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); |
7219 | if (intel_display_power_enabled(dev_priv, pfit_domain)) | 7219 | if (intel_display_power_enabled(dev_priv, pfit_domain)) |
7220 | ironlake_get_pfit_config(crtc, pipe_config); | 7220 | ironlake_get_pfit_config(crtc, pipe_config); |
7221 | 7221 | ||
7222 | if (IS_HASWELL(dev)) | 7222 | if (IS_HASWELL(dev)) |
7223 | pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) && | 7223 | pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) && |
7224 | (I915_READ(IPS_CTL) & IPS_ENABLE); | 7224 | (I915_READ(IPS_CTL) & IPS_ENABLE); |
7225 | 7225 | ||
7226 | pipe_config->pixel_multiplier = 1; | 7226 | pipe_config->pixel_multiplier = 1; |
7227 | 7227 | ||
7228 | return true; | 7228 | return true; |
7229 | } | 7229 | } |
7230 | 7230 | ||
7231 | static int intel_crtc_mode_set(struct drm_crtc *crtc, | 7231 | static int intel_crtc_mode_set(struct drm_crtc *crtc, |
7232 | int x, int y, | 7232 | int x, int y, |
7233 | struct drm_framebuffer *fb) | 7233 | struct drm_framebuffer *fb) |
7234 | { | 7234 | { |
7235 | struct drm_device *dev = crtc->dev; | 7235 | struct drm_device *dev = crtc->dev; |
7236 | struct drm_i915_private *dev_priv = dev->dev_private; | 7236 | struct drm_i915_private *dev_priv = dev->dev_private; |
7237 | struct intel_encoder *encoder; | 7237 | struct intel_encoder *encoder; |
7238 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 7238 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
7239 | struct drm_display_mode *mode = &intel_crtc->config.requested_mode; | 7239 | struct drm_display_mode *mode = &intel_crtc->config.requested_mode; |
7240 | int pipe = intel_crtc->pipe; | 7240 | int pipe = intel_crtc->pipe; |
7241 | int ret; | 7241 | int ret; |
7242 | 7242 | ||
7243 | drm_vblank_pre_modeset(dev, pipe); | 7243 | drm_vblank_pre_modeset(dev, pipe); |
7244 | 7244 | ||
7245 | ret = dev_priv->display.crtc_mode_set(crtc, x, y, fb); | 7245 | ret = dev_priv->display.crtc_mode_set(crtc, x, y, fb); |
7246 | 7246 | ||
7247 | drm_vblank_post_modeset(dev, pipe); | 7247 | drm_vblank_post_modeset(dev, pipe); |
7248 | 7248 | ||
7249 | if (ret != 0) | 7249 | if (ret != 0) |
7250 | return ret; | 7250 | return ret; |
7251 | 7251 | ||
7252 | for_each_encoder_on_crtc(dev, crtc, encoder) { | 7252 | for_each_encoder_on_crtc(dev, crtc, encoder) { |
7253 | DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n", | 7253 | DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n", |
7254 | encoder->base.base.id, | 7254 | encoder->base.base.id, |
7255 | drm_get_encoder_name(&encoder->base), | 7255 | drm_get_encoder_name(&encoder->base), |
7256 | mode->base.id, mode->name); | 7256 | mode->base.id, mode->name); |
7257 | encoder->mode_set(encoder); | 7257 | encoder->mode_set(encoder); |
7258 | } | 7258 | } |
7259 | 7259 | ||
7260 | return 0; | 7260 | return 0; |
7261 | } | 7261 | } |
7262 | 7262 | ||
7263 | static struct { | 7263 | static struct { |
7264 | int clock; | 7264 | int clock; |
7265 | u32 config; | 7265 | u32 config; |
7266 | } hdmi_audio_clock[] = { | 7266 | } hdmi_audio_clock[] = { |
7267 | { DIV_ROUND_UP(25200 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 }, | 7267 | { DIV_ROUND_UP(25200 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 }, |
7268 | { 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */ | 7268 | { 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */ |
7269 | { 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 }, | 7269 | { 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 }, |
7270 | { 27000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 }, | 7270 | { 27000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 }, |
7271 | { 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 }, | 7271 | { 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 }, |
7272 | { 54000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 }, | 7272 | { 54000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 }, |
7273 | { DIV_ROUND_UP(74250 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 }, | 7273 | { DIV_ROUND_UP(74250 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 }, |
7274 | { 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 }, | 7274 | { 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 }, |
7275 | { DIV_ROUND_UP(148500 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 }, | 7275 | { DIV_ROUND_UP(148500 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 }, |
7276 | { 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 }, | 7276 | { 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 }, |
7277 | }; | 7277 | }; |
7278 | 7278 | ||
7279 | /* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */ | 7279 | /* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */ |
7280 | static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode) | 7280 | static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode) |
7281 | { | 7281 | { |
7282 | int i; | 7282 | int i; |
7283 | 7283 | ||
7284 | for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) { | 7284 | for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) { |
7285 | if (mode->clock == hdmi_audio_clock[i].clock) | 7285 | if (mode->clock == hdmi_audio_clock[i].clock) |
7286 | break; | 7286 | break; |
7287 | } | 7287 | } |
7288 | 7288 | ||
7289 | if (i == ARRAY_SIZE(hdmi_audio_clock)) { | 7289 | if (i == ARRAY_SIZE(hdmi_audio_clock)) { |
7290 | DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", mode->clock); | 7290 | DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", mode->clock); |
7291 | i = 1; | 7291 | i = 1; |
7292 | } | 7292 | } |
7293 | 7293 | ||
7294 | DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n", | 7294 | DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n", |
7295 | hdmi_audio_clock[i].clock, | 7295 | hdmi_audio_clock[i].clock, |
7296 | hdmi_audio_clock[i].config); | 7296 | hdmi_audio_clock[i].config); |
7297 | 7297 | ||
7298 | return hdmi_audio_clock[i].config; | 7298 | return hdmi_audio_clock[i].config; |
7299 | } | 7299 | } |
7300 | 7300 | ||
7301 | static bool intel_eld_uptodate(struct drm_connector *connector, | 7301 | static bool intel_eld_uptodate(struct drm_connector *connector, |
7302 | int reg_eldv, uint32_t bits_eldv, | 7302 | int reg_eldv, uint32_t bits_eldv, |
7303 | int reg_elda, uint32_t bits_elda, | 7303 | int reg_elda, uint32_t bits_elda, |
7304 | int reg_edid) | 7304 | int reg_edid) |
7305 | { | 7305 | { |
7306 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | 7306 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
7307 | uint8_t *eld = connector->eld; | 7307 | uint8_t *eld = connector->eld; |
7308 | uint32_t i; | 7308 | uint32_t i; |
7309 | 7309 | ||
7310 | i = I915_READ(reg_eldv); | 7310 | i = I915_READ(reg_eldv); |
7311 | i &= bits_eldv; | 7311 | i &= bits_eldv; |
7312 | 7312 | ||
7313 | if (!eld[0]) | 7313 | if (!eld[0]) |
7314 | return !i; | 7314 | return !i; |
7315 | 7315 | ||
7316 | if (!i) | 7316 | if (!i) |
7317 | return false; | 7317 | return false; |
7318 | 7318 | ||
7319 | i = I915_READ(reg_elda); | 7319 | i = I915_READ(reg_elda); |
7320 | i &= ~bits_elda; | 7320 | i &= ~bits_elda; |
7321 | I915_WRITE(reg_elda, i); | 7321 | I915_WRITE(reg_elda, i); |
7322 | 7322 | ||
7323 | for (i = 0; i < eld[2]; i++) | 7323 | for (i = 0; i < eld[2]; i++) |
7324 | if (I915_READ(reg_edid) != *((uint32_t *)eld + i)) | 7324 | if (I915_READ(reg_edid) != *((uint32_t *)eld + i)) |
7325 | return false; | 7325 | return false; |
7326 | 7326 | ||
7327 | return true; | 7327 | return true; |
7328 | } | 7328 | } |
7329 | 7329 | ||
7330 | static void g4x_write_eld(struct drm_connector *connector, | 7330 | static void g4x_write_eld(struct drm_connector *connector, |
7331 | struct drm_crtc *crtc, | 7331 | struct drm_crtc *crtc, |
7332 | struct drm_display_mode *mode) | 7332 | struct drm_display_mode *mode) |
7333 | { | 7333 | { |
7334 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | 7334 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
7335 | uint8_t *eld = connector->eld; | 7335 | uint8_t *eld = connector->eld; |
7336 | uint32_t eldv; | 7336 | uint32_t eldv; |
7337 | uint32_t len; | 7337 | uint32_t len; |
7338 | uint32_t i; | 7338 | uint32_t i; |
7339 | 7339 | ||
7340 | i = I915_READ(G4X_AUD_VID_DID); | 7340 | i = I915_READ(G4X_AUD_VID_DID); |
7341 | 7341 | ||
7342 | if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL) | 7342 | if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL) |
7343 | eldv = G4X_ELDV_DEVCL_DEVBLC; | 7343 | eldv = G4X_ELDV_DEVCL_DEVBLC; |
7344 | else | 7344 | else |
7345 | eldv = G4X_ELDV_DEVCTG; | 7345 | eldv = G4X_ELDV_DEVCTG; |
7346 | 7346 | ||
7347 | if (intel_eld_uptodate(connector, | 7347 | if (intel_eld_uptodate(connector, |
7348 | G4X_AUD_CNTL_ST, eldv, | 7348 | G4X_AUD_CNTL_ST, eldv, |
7349 | G4X_AUD_CNTL_ST, G4X_ELD_ADDR, | 7349 | G4X_AUD_CNTL_ST, G4X_ELD_ADDR, |
7350 | G4X_HDMIW_HDMIEDID)) | 7350 | G4X_HDMIW_HDMIEDID)) |
7351 | return; | 7351 | return; |
7352 | 7352 | ||
7353 | i = I915_READ(G4X_AUD_CNTL_ST); | 7353 | i = I915_READ(G4X_AUD_CNTL_ST); |
7354 | i &= ~(eldv | G4X_ELD_ADDR); | 7354 | i &= ~(eldv | G4X_ELD_ADDR); |
7355 | len = (i >> 9) & 0x1f; /* ELD buffer size */ | 7355 | len = (i >> 9) & 0x1f; /* ELD buffer size */ |
7356 | I915_WRITE(G4X_AUD_CNTL_ST, i); | 7356 | I915_WRITE(G4X_AUD_CNTL_ST, i); |
7357 | 7357 | ||
7358 | if (!eld[0]) | 7358 | if (!eld[0]) |
7359 | return; | 7359 | return; |
7360 | 7360 | ||
7361 | len = min_t(uint8_t, eld[2], len); | 7361 | len = min_t(uint8_t, eld[2], len); |
7362 | DRM_DEBUG_DRIVER("ELD size %d\n", len); | 7362 | DRM_DEBUG_DRIVER("ELD size %d\n", len); |
7363 | for (i = 0; i < len; i++) | 7363 | for (i = 0; i < len; i++) |
7364 | I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i)); | 7364 | I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i)); |
7365 | 7365 | ||
7366 | i = I915_READ(G4X_AUD_CNTL_ST); | 7366 | i = I915_READ(G4X_AUD_CNTL_ST); |
7367 | i |= eldv; | 7367 | i |= eldv; |
7368 | I915_WRITE(G4X_AUD_CNTL_ST, i); | 7368 | I915_WRITE(G4X_AUD_CNTL_ST, i); |
7369 | } | 7369 | } |
7370 | 7370 | ||
7371 | static void haswell_write_eld(struct drm_connector *connector, | 7371 | static void haswell_write_eld(struct drm_connector *connector, |
7372 | struct drm_crtc *crtc, | 7372 | struct drm_crtc *crtc, |
7373 | struct drm_display_mode *mode) | 7373 | struct drm_display_mode *mode) |
7374 | { | 7374 | { |
7375 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | 7375 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
7376 | uint8_t *eld = connector->eld; | 7376 | uint8_t *eld = connector->eld; |
7377 | struct drm_device *dev = crtc->dev; | 7377 | struct drm_device *dev = crtc->dev; |
7378 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 7378 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
7379 | uint32_t eldv; | 7379 | uint32_t eldv; |
7380 | uint32_t i; | 7380 | uint32_t i; |
7381 | int len; | 7381 | int len; |
7382 | int pipe = to_intel_crtc(crtc)->pipe; | 7382 | int pipe = to_intel_crtc(crtc)->pipe; |
7383 | int tmp; | 7383 | int tmp; |
7384 | 7384 | ||
7385 | int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe); | 7385 | int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe); |
7386 | int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe); | 7386 | int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe); |
7387 | int aud_config = HSW_AUD_CFG(pipe); | 7387 | int aud_config = HSW_AUD_CFG(pipe); |
7388 | int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD; | 7388 | int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD; |
7389 | 7389 | ||
7390 | 7390 | ||
7391 | DRM_DEBUG_DRIVER("HDMI: Haswell Audio initialize....\n"); | 7391 | DRM_DEBUG_DRIVER("HDMI: Haswell Audio initialize....\n"); |
7392 | 7392 | ||
7393 | /* Audio output enable */ | 7393 | /* Audio output enable */ |
7394 | DRM_DEBUG_DRIVER("HDMI audio: enable codec\n"); | 7394 | DRM_DEBUG_DRIVER("HDMI audio: enable codec\n"); |
7395 | tmp = I915_READ(aud_cntrl_st2); | 7395 | tmp = I915_READ(aud_cntrl_st2); |
7396 | tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4)); | 7396 | tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4)); |
7397 | I915_WRITE(aud_cntrl_st2, tmp); | 7397 | I915_WRITE(aud_cntrl_st2, tmp); |
7398 | 7398 | ||
7399 | /* Wait for 1 vertical blank */ | 7399 | /* Wait for 1 vertical blank */ |
7400 | intel_wait_for_vblank(dev, pipe); | 7400 | intel_wait_for_vblank(dev, pipe); |
7401 | 7401 | ||
7402 | /* Set ELD valid state */ | 7402 | /* Set ELD valid state */ |
7403 | tmp = I915_READ(aud_cntrl_st2); | 7403 | tmp = I915_READ(aud_cntrl_st2); |
7404 | DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%08x\n", tmp); | 7404 | DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%08x\n", tmp); |
7405 | tmp |= (AUDIO_ELD_VALID_A << (pipe * 4)); | 7405 | tmp |= (AUDIO_ELD_VALID_A << (pipe * 4)); |
7406 | I915_WRITE(aud_cntrl_st2, tmp); | 7406 | I915_WRITE(aud_cntrl_st2, tmp); |
7407 | tmp = I915_READ(aud_cntrl_st2); | 7407 | tmp = I915_READ(aud_cntrl_st2); |
7408 | DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%08x\n", tmp); | 7408 | DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%08x\n", tmp); |
7409 | 7409 | ||
7410 | /* Enable HDMI mode */ | 7410 | /* Enable HDMI mode */ |
7411 | tmp = I915_READ(aud_config); | 7411 | tmp = I915_READ(aud_config); |
7412 | DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%08x\n", tmp); | 7412 | DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%08x\n", tmp); |
7413 | /* clear N_programing_enable and N_value_index */ | 7413 | /* clear N_programing_enable and N_value_index */ |
7414 | tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE); | 7414 | tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE); |
7415 | I915_WRITE(aud_config, tmp); | 7415 | I915_WRITE(aud_config, tmp); |
7416 | 7416 | ||
7417 | DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe)); | 7417 | DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe)); |
7418 | 7418 | ||
7419 | eldv = AUDIO_ELD_VALID_A << (pipe * 4); | 7419 | eldv = AUDIO_ELD_VALID_A << (pipe * 4); |
7420 | intel_crtc->eld_vld = true; | 7420 | intel_crtc->eld_vld = true; |
7421 | 7421 | ||
7422 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { | 7422 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { |
7423 | DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); | 7423 | DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); |
7424 | eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ | 7424 | eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ |
7425 | I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */ | 7425 | I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */ |
7426 | } else { | 7426 | } else { |
7427 | I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode)); | 7427 | I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode)); |
7428 | } | 7428 | } |
7429 | 7429 | ||
7430 | if (intel_eld_uptodate(connector, | 7430 | if (intel_eld_uptodate(connector, |
7431 | aud_cntrl_st2, eldv, | 7431 | aud_cntrl_st2, eldv, |
7432 | aud_cntl_st, IBX_ELD_ADDRESS, | 7432 | aud_cntl_st, IBX_ELD_ADDRESS, |
7433 | hdmiw_hdmiedid)) | 7433 | hdmiw_hdmiedid)) |
7434 | return; | 7434 | return; |
7435 | 7435 | ||
7436 | i = I915_READ(aud_cntrl_st2); | 7436 | i = I915_READ(aud_cntrl_st2); |
7437 | i &= ~eldv; | 7437 | i &= ~eldv; |
7438 | I915_WRITE(aud_cntrl_st2, i); | 7438 | I915_WRITE(aud_cntrl_st2, i); |
7439 | 7439 | ||
7440 | if (!eld[0]) | 7440 | if (!eld[0]) |
7441 | return; | 7441 | return; |
7442 | 7442 | ||
7443 | i = I915_READ(aud_cntl_st); | 7443 | i = I915_READ(aud_cntl_st); |
7444 | i &= ~IBX_ELD_ADDRESS; | 7444 | i &= ~IBX_ELD_ADDRESS; |
7445 | I915_WRITE(aud_cntl_st, i); | 7445 | I915_WRITE(aud_cntl_st, i); |
7446 | i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */ | 7446 | i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */ |
7447 | DRM_DEBUG_DRIVER("port num:%d\n", i); | 7447 | DRM_DEBUG_DRIVER("port num:%d\n", i); |
7448 | 7448 | ||
7449 | len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */ | 7449 | len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */ |
7450 | DRM_DEBUG_DRIVER("ELD size %d\n", len); | 7450 | DRM_DEBUG_DRIVER("ELD size %d\n", len); |
7451 | for (i = 0; i < len; i++) | 7451 | for (i = 0; i < len; i++) |
7452 | I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i)); | 7452 | I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i)); |
7453 | 7453 | ||
7454 | i = I915_READ(aud_cntrl_st2); | 7454 | i = I915_READ(aud_cntrl_st2); |
7455 | i |= eldv; | 7455 | i |= eldv; |
7456 | I915_WRITE(aud_cntrl_st2, i); | 7456 | I915_WRITE(aud_cntrl_st2, i); |
7457 | 7457 | ||
7458 | } | 7458 | } |
7459 | 7459 | ||
7460 | static void ironlake_write_eld(struct drm_connector *connector, | 7460 | static void ironlake_write_eld(struct drm_connector *connector, |
7461 | struct drm_crtc *crtc, | 7461 | struct drm_crtc *crtc, |
7462 | struct drm_display_mode *mode) | 7462 | struct drm_display_mode *mode) |
7463 | { | 7463 | { |
7464 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | 7464 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
7465 | uint8_t *eld = connector->eld; | 7465 | uint8_t *eld = connector->eld; |
7466 | uint32_t eldv; | 7466 | uint32_t eldv; |
7467 | uint32_t i; | 7467 | uint32_t i; |
7468 | int len; | 7468 | int len; |
7469 | int hdmiw_hdmiedid; | 7469 | int hdmiw_hdmiedid; |
7470 | int aud_config; | 7470 | int aud_config; |
7471 | int aud_cntl_st; | 7471 | int aud_cntl_st; |
7472 | int aud_cntrl_st2; | 7472 | int aud_cntrl_st2; |
7473 | int pipe = to_intel_crtc(crtc)->pipe; | 7473 | int pipe = to_intel_crtc(crtc)->pipe; |
7474 | 7474 | ||
7475 | if (HAS_PCH_IBX(connector->dev)) { | 7475 | if (HAS_PCH_IBX(connector->dev)) { |
7476 | hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe); | 7476 | hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe); |
7477 | aud_config = IBX_AUD_CFG(pipe); | 7477 | aud_config = IBX_AUD_CFG(pipe); |
7478 | aud_cntl_st = IBX_AUD_CNTL_ST(pipe); | 7478 | aud_cntl_st = IBX_AUD_CNTL_ST(pipe); |
7479 | aud_cntrl_st2 = IBX_AUD_CNTL_ST2; | 7479 | aud_cntrl_st2 = IBX_AUD_CNTL_ST2; |
7480 | } else if (IS_VALLEYVIEW(connector->dev)) { | 7480 | } else if (IS_VALLEYVIEW(connector->dev)) { |
7481 | hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe); | 7481 | hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe); |
7482 | aud_config = VLV_AUD_CFG(pipe); | 7482 | aud_config = VLV_AUD_CFG(pipe); |
7483 | aud_cntl_st = VLV_AUD_CNTL_ST(pipe); | 7483 | aud_cntl_st = VLV_AUD_CNTL_ST(pipe); |
7484 | aud_cntrl_st2 = VLV_AUD_CNTL_ST2; | 7484 | aud_cntrl_st2 = VLV_AUD_CNTL_ST2; |
7485 | } else { | 7485 | } else { |
7486 | hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe); | 7486 | hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe); |
7487 | aud_config = CPT_AUD_CFG(pipe); | 7487 | aud_config = CPT_AUD_CFG(pipe); |
7488 | aud_cntl_st = CPT_AUD_CNTL_ST(pipe); | 7488 | aud_cntl_st = CPT_AUD_CNTL_ST(pipe); |
7489 | aud_cntrl_st2 = CPT_AUD_CNTRL_ST2; | 7489 | aud_cntrl_st2 = CPT_AUD_CNTRL_ST2; |
7490 | } | 7490 | } |
7491 | 7491 | ||
7492 | DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe)); | 7492 | DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe)); |
7493 | 7493 | ||
7494 | if (IS_VALLEYVIEW(connector->dev)) { | 7494 | if (IS_VALLEYVIEW(connector->dev)) { |
7495 | struct intel_encoder *intel_encoder; | 7495 | struct intel_encoder *intel_encoder; |
7496 | struct intel_digital_port *intel_dig_port; | 7496 | struct intel_digital_port *intel_dig_port; |
7497 | 7497 | ||
7498 | intel_encoder = intel_attached_encoder(connector); | 7498 | intel_encoder = intel_attached_encoder(connector); |
7499 | intel_dig_port = enc_to_dig_port(&intel_encoder->base); | 7499 | intel_dig_port = enc_to_dig_port(&intel_encoder->base); |
7500 | i = intel_dig_port->port; | 7500 | i = intel_dig_port->port; |
7501 | } else { | 7501 | } else { |
7502 | i = I915_READ(aud_cntl_st); | 7502 | i = I915_READ(aud_cntl_st); |
7503 | i = (i >> 29) & DIP_PORT_SEL_MASK; | 7503 | i = (i >> 29) & DIP_PORT_SEL_MASK; |
7504 | /* DIP_Port_Select, 0x1 = PortB */ | 7504 | /* DIP_Port_Select, 0x1 = PortB */ |
7505 | } | 7505 | } |
7506 | 7506 | ||
7507 | if (!i) { | 7507 | if (!i) { |
7508 | DRM_DEBUG_DRIVER("Audio directed to unknown port\n"); | 7508 | DRM_DEBUG_DRIVER("Audio directed to unknown port\n"); |
7509 | /* operate blindly on all ports */ | 7509 | /* operate blindly on all ports */ |
7510 | eldv = IBX_ELD_VALIDB; | 7510 | eldv = IBX_ELD_VALIDB; |
7511 | eldv |= IBX_ELD_VALIDB << 4; | 7511 | eldv |= IBX_ELD_VALIDB << 4; |
7512 | eldv |= IBX_ELD_VALIDB << 8; | 7512 | eldv |= IBX_ELD_VALIDB << 8; |
7513 | } else { | 7513 | } else { |
7514 | DRM_DEBUG_DRIVER("ELD on port %c\n", port_name(i)); | 7514 | DRM_DEBUG_DRIVER("ELD on port %c\n", port_name(i)); |
7515 | eldv = IBX_ELD_VALIDB << ((i - 1) * 4); | 7515 | eldv = IBX_ELD_VALIDB << ((i - 1) * 4); |
7516 | } | 7516 | } |
7517 | 7517 | ||
7518 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { | 7518 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { |
7519 | DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); | 7519 | DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); |
7520 | eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ | 7520 | eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ |
7521 | I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */ | 7521 | I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */ |
7522 | } else { | 7522 | } else { |
7523 | I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode)); | 7523 | I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode)); |
7524 | } | 7524 | } |
7525 | 7525 | ||
7526 | if (intel_eld_uptodate(connector, | 7526 | if (intel_eld_uptodate(connector, |
7527 | aud_cntrl_st2, eldv, | 7527 | aud_cntrl_st2, eldv, |
7528 | aud_cntl_st, IBX_ELD_ADDRESS, | 7528 | aud_cntl_st, IBX_ELD_ADDRESS, |
7529 | hdmiw_hdmiedid)) | 7529 | hdmiw_hdmiedid)) |
7530 | return; | 7530 | return; |
7531 | 7531 | ||
7532 | i = I915_READ(aud_cntrl_st2); | 7532 | i = I915_READ(aud_cntrl_st2); |
7533 | i &= ~eldv; | 7533 | i &= ~eldv; |
7534 | I915_WRITE(aud_cntrl_st2, i); | 7534 | I915_WRITE(aud_cntrl_st2, i); |
7535 | 7535 | ||
7536 | if (!eld[0]) | 7536 | if (!eld[0]) |
7537 | return; | 7537 | return; |
7538 | 7538 | ||
7539 | i = I915_READ(aud_cntl_st); | 7539 | i = I915_READ(aud_cntl_st); |
7540 | i &= ~IBX_ELD_ADDRESS; | 7540 | i &= ~IBX_ELD_ADDRESS; |
7541 | I915_WRITE(aud_cntl_st, i); | 7541 | I915_WRITE(aud_cntl_st, i); |
7542 | 7542 | ||
7543 | len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */ | 7543 | len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */ |
7544 | DRM_DEBUG_DRIVER("ELD size %d\n", len); | 7544 | DRM_DEBUG_DRIVER("ELD size %d\n", len); |
7545 | for (i = 0; i < len; i++) | 7545 | for (i = 0; i < len; i++) |
7546 | I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i)); | 7546 | I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i)); |
7547 | 7547 | ||
7548 | i = I915_READ(aud_cntrl_st2); | 7548 | i = I915_READ(aud_cntrl_st2); |
7549 | i |= eldv; | 7549 | i |= eldv; |
7550 | I915_WRITE(aud_cntrl_st2, i); | 7550 | I915_WRITE(aud_cntrl_st2, i); |
7551 | } | 7551 | } |
7552 | 7552 | ||
7553 | void intel_write_eld(struct drm_encoder *encoder, | 7553 | void intel_write_eld(struct drm_encoder *encoder, |
7554 | struct drm_display_mode *mode) | 7554 | struct drm_display_mode *mode) |
7555 | { | 7555 | { |
7556 | struct drm_crtc *crtc = encoder->crtc; | 7556 | struct drm_crtc *crtc = encoder->crtc; |
7557 | struct drm_connector *connector; | 7557 | struct drm_connector *connector; |
7558 | struct drm_device *dev = encoder->dev; | 7558 | struct drm_device *dev = encoder->dev; |
7559 | struct drm_i915_private *dev_priv = dev->dev_private; | 7559 | struct drm_i915_private *dev_priv = dev->dev_private; |
7560 | 7560 | ||
7561 | connector = drm_select_eld(encoder, mode); | 7561 | connector = drm_select_eld(encoder, mode); |
7562 | if (!connector) | 7562 | if (!connector) |
7563 | return; | 7563 | return; |
7564 | 7564 | ||
7565 | DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", | 7565 | DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", |
7566 | connector->base.id, | 7566 | connector->base.id, |
7567 | drm_get_connector_name(connector), | 7567 | drm_get_connector_name(connector), |
7568 | connector->encoder->base.id, | 7568 | connector->encoder->base.id, |
7569 | drm_get_encoder_name(connector->encoder)); | 7569 | drm_get_encoder_name(connector->encoder)); |
7570 | 7570 | ||
7571 | connector->eld[6] = drm_av_sync_delay(connector, mode) / 2; | 7571 | connector->eld[6] = drm_av_sync_delay(connector, mode) / 2; |
7572 | 7572 | ||
7573 | if (dev_priv->display.write_eld) | 7573 | if (dev_priv->display.write_eld) |
7574 | dev_priv->display.write_eld(connector, crtc, mode); | 7574 | dev_priv->display.write_eld(connector, crtc, mode); |
7575 | } | 7575 | } |
7576 | 7576 | ||
7577 | static void i845_update_cursor(struct drm_crtc *crtc, u32 base) | 7577 | static void i845_update_cursor(struct drm_crtc *crtc, u32 base) |
7578 | { | 7578 | { |
7579 | struct drm_device *dev = crtc->dev; | 7579 | struct drm_device *dev = crtc->dev; |
7580 | struct drm_i915_private *dev_priv = dev->dev_private; | 7580 | struct drm_i915_private *dev_priv = dev->dev_private; |
7581 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 7581 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
7582 | bool visible = base != 0; | 7582 | bool visible = base != 0; |
7583 | u32 cntl; | 7583 | u32 cntl; |
7584 | 7584 | ||
7585 | if (intel_crtc->cursor_visible == visible) | 7585 | if (intel_crtc->cursor_visible == visible) |
7586 | return; | 7586 | return; |
7587 | 7587 | ||
7588 | cntl = I915_READ(_CURACNTR); | 7588 | cntl = I915_READ(_CURACNTR); |
7589 | if (visible) { | 7589 | if (visible) { |
7590 | /* On these chipsets we can only modify the base whilst | 7590 | /* On these chipsets we can only modify the base whilst |
7591 | * the cursor is disabled. | 7591 | * the cursor is disabled. |
7592 | */ | 7592 | */ |
7593 | I915_WRITE(_CURABASE, base); | 7593 | I915_WRITE(_CURABASE, base); |
7594 | 7594 | ||
7595 | cntl &= ~(CURSOR_FORMAT_MASK); | 7595 | cntl &= ~(CURSOR_FORMAT_MASK); |
7596 | /* XXX width must be 64, stride 256 => 0x00 << 28 */ | 7596 | /* XXX width must be 64, stride 256 => 0x00 << 28 */ |
7597 | cntl |= CURSOR_ENABLE | | 7597 | cntl |= CURSOR_ENABLE | |
7598 | CURSOR_GAMMA_ENABLE | | 7598 | CURSOR_GAMMA_ENABLE | |
7599 | CURSOR_FORMAT_ARGB; | 7599 | CURSOR_FORMAT_ARGB; |
7600 | } else | 7600 | } else |
7601 | cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE); | 7601 | cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE); |
7602 | I915_WRITE(_CURACNTR, cntl); | 7602 | I915_WRITE(_CURACNTR, cntl); |
7603 | 7603 | ||
7604 | intel_crtc->cursor_visible = visible; | 7604 | intel_crtc->cursor_visible = visible; |
7605 | } | 7605 | } |
7606 | 7606 | ||
7607 | static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) | 7607 | static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) |
7608 | { | 7608 | { |
7609 | struct drm_device *dev = crtc->dev; | 7609 | struct drm_device *dev = crtc->dev; |
7610 | struct drm_i915_private *dev_priv = dev->dev_private; | 7610 | struct drm_i915_private *dev_priv = dev->dev_private; |
7611 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 7611 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
7612 | int pipe = intel_crtc->pipe; | 7612 | int pipe = intel_crtc->pipe; |
7613 | bool visible = base != 0; | 7613 | bool visible = base != 0; |
7614 | 7614 | ||
7615 | if (intel_crtc->cursor_visible != visible) { | 7615 | if (intel_crtc->cursor_visible != visible) { |
7616 | int16_t width = intel_crtc->cursor_width; | 7616 | int16_t width = intel_crtc->cursor_width; |
7617 | uint32_t cntl = I915_READ(CURCNTR(pipe)); | 7617 | uint32_t cntl = I915_READ(CURCNTR(pipe)); |
7618 | if (base) { | 7618 | if (base) { |
7619 | cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT); | 7619 | cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT); |
7620 | cntl |= MCURSOR_GAMMA_ENABLE; | 7620 | cntl |= MCURSOR_GAMMA_ENABLE; |
7621 | 7621 | ||
7622 | switch (width) { | 7622 | switch (width) { |
7623 | case 64: | 7623 | case 64: |
7624 | cntl |= CURSOR_MODE_64_ARGB_AX; | 7624 | cntl |= CURSOR_MODE_64_ARGB_AX; |
7625 | break; | 7625 | break; |
7626 | case 128: | 7626 | case 128: |
7627 | cntl |= CURSOR_MODE_128_ARGB_AX; | 7627 | cntl |= CURSOR_MODE_128_ARGB_AX; |
7628 | break; | 7628 | break; |
7629 | case 256: | 7629 | case 256: |
7630 | cntl |= CURSOR_MODE_256_ARGB_AX; | 7630 | cntl |= CURSOR_MODE_256_ARGB_AX; |
7631 | break; | 7631 | break; |
7632 | default: | 7632 | default: |
7633 | WARN_ON(1); | 7633 | WARN_ON(1); |
7634 | return; | 7634 | return; |
7635 | } | 7635 | } |
7636 | cntl |= pipe << 28; /* Connect to correct pipe */ | 7636 | cntl |= pipe << 28; /* Connect to correct pipe */ |
7637 | } else { | 7637 | } else { |
7638 | cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); | 7638 | cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); |
7639 | cntl |= CURSOR_MODE_DISABLE; | 7639 | cntl |= CURSOR_MODE_DISABLE; |
7640 | } | 7640 | } |
7641 | I915_WRITE(CURCNTR(pipe), cntl); | 7641 | I915_WRITE(CURCNTR(pipe), cntl); |
7642 | 7642 | ||
7643 | intel_crtc->cursor_visible = visible; | 7643 | intel_crtc->cursor_visible = visible; |
7644 | } | 7644 | } |
7645 | /* and commit changes on next vblank */ | 7645 | /* and commit changes on next vblank */ |
7646 | POSTING_READ(CURCNTR(pipe)); | 7646 | POSTING_READ(CURCNTR(pipe)); |
7647 | I915_WRITE(CURBASE(pipe), base); | 7647 | I915_WRITE(CURBASE(pipe), base); |
7648 | POSTING_READ(CURBASE(pipe)); | 7648 | POSTING_READ(CURBASE(pipe)); |
7649 | } | 7649 | } |
7650 | 7650 | ||
7651 | static void ivb_update_cursor(struct drm_crtc *crtc, u32 base) | 7651 | static void ivb_update_cursor(struct drm_crtc *crtc, u32 base) |
7652 | { | 7652 | { |
7653 | struct drm_device *dev = crtc->dev; | 7653 | struct drm_device *dev = crtc->dev; |
7654 | struct drm_i915_private *dev_priv = dev->dev_private; | 7654 | struct drm_i915_private *dev_priv = dev->dev_private; |
7655 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 7655 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
7656 | int pipe = intel_crtc->pipe; | 7656 | int pipe = intel_crtc->pipe; |
7657 | bool visible = base != 0; | 7657 | bool visible = base != 0; |
7658 | 7658 | ||
7659 | if (intel_crtc->cursor_visible != visible) { | 7659 | if (intel_crtc->cursor_visible != visible) { |
7660 | int16_t width = intel_crtc->cursor_width; | 7660 | int16_t width = intel_crtc->cursor_width; |
7661 | uint32_t cntl = I915_READ(CURCNTR_IVB(pipe)); | 7661 | uint32_t cntl = I915_READ(CURCNTR_IVB(pipe)); |
7662 | if (base) { | 7662 | if (base) { |
7663 | cntl &= ~CURSOR_MODE; | 7663 | cntl &= ~CURSOR_MODE; |
7664 | cntl |= MCURSOR_GAMMA_ENABLE; | 7664 | cntl |= MCURSOR_GAMMA_ENABLE; |
7665 | switch (width) { | 7665 | switch (width) { |
7666 | case 64: | 7666 | case 64: |
7667 | cntl |= CURSOR_MODE_64_ARGB_AX; | 7667 | cntl |= CURSOR_MODE_64_ARGB_AX; |
7668 | break; | 7668 | break; |
7669 | case 128: | 7669 | case 128: |
7670 | cntl |= CURSOR_MODE_128_ARGB_AX; | 7670 | cntl |= CURSOR_MODE_128_ARGB_AX; |
7671 | break; | 7671 | break; |
7672 | case 256: | 7672 | case 256: |
7673 | cntl |= CURSOR_MODE_256_ARGB_AX; | 7673 | cntl |= CURSOR_MODE_256_ARGB_AX; |
7674 | break; | 7674 | break; |
7675 | default: | 7675 | default: |
7676 | WARN_ON(1); | 7676 | WARN_ON(1); |
7677 | return; | 7677 | return; |
7678 | } | 7678 | } |
7679 | } else { | 7679 | } else { |
7680 | cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); | 7680 | cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); |
7681 | cntl |= CURSOR_MODE_DISABLE; | 7681 | cntl |= CURSOR_MODE_DISABLE; |
7682 | } | 7682 | } |
7683 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { | 7683 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { |
7684 | cntl |= CURSOR_PIPE_CSC_ENABLE; | 7684 | cntl |= CURSOR_PIPE_CSC_ENABLE; |
7685 | cntl &= ~CURSOR_TRICKLE_FEED_DISABLE; | 7685 | cntl &= ~CURSOR_TRICKLE_FEED_DISABLE; |
7686 | } | 7686 | } |
7687 | I915_WRITE(CURCNTR_IVB(pipe), cntl); | 7687 | I915_WRITE(CURCNTR_IVB(pipe), cntl); |
7688 | 7688 | ||
7689 | intel_crtc->cursor_visible = visible; | 7689 | intel_crtc->cursor_visible = visible; |
7690 | } | 7690 | } |
7691 | /* and commit changes on next vblank */ | 7691 | /* and commit changes on next vblank */ |
7692 | POSTING_READ(CURCNTR_IVB(pipe)); | 7692 | POSTING_READ(CURCNTR_IVB(pipe)); |
7693 | I915_WRITE(CURBASE_IVB(pipe), base); | 7693 | I915_WRITE(CURBASE_IVB(pipe), base); |
7694 | POSTING_READ(CURBASE_IVB(pipe)); | 7694 | POSTING_READ(CURBASE_IVB(pipe)); |
7695 | } | 7695 | } |
7696 | 7696 | ||
7697 | /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ | 7697 | /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ |
7698 | static void intel_crtc_update_cursor(struct drm_crtc *crtc, | 7698 | static void intel_crtc_update_cursor(struct drm_crtc *crtc, |
7699 | bool on) | 7699 | bool on) |
7700 | { | 7700 | { |
7701 | struct drm_device *dev = crtc->dev; | 7701 | struct drm_device *dev = crtc->dev; |
7702 | struct drm_i915_private *dev_priv = dev->dev_private; | 7702 | struct drm_i915_private *dev_priv = dev->dev_private; |
7703 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 7703 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
7704 | int pipe = intel_crtc->pipe; | 7704 | int pipe = intel_crtc->pipe; |
7705 | int x = intel_crtc->cursor_x; | 7705 | int x = intel_crtc->cursor_x; |
7706 | int y = intel_crtc->cursor_y; | 7706 | int y = intel_crtc->cursor_y; |
7707 | u32 base = 0, pos = 0; | 7707 | u32 base = 0, pos = 0; |
7708 | bool visible; | 7708 | bool visible; |
7709 | 7709 | ||
7710 | if (on) | 7710 | if (on) |
7711 | base = intel_crtc->cursor_addr; | 7711 | base = intel_crtc->cursor_addr; |
7712 | 7712 | ||
7713 | if (x >= intel_crtc->config.pipe_src_w) | 7713 | if (x >= intel_crtc->config.pipe_src_w) |
7714 | base = 0; | 7714 | base = 0; |
7715 | 7715 | ||
7716 | if (y >= intel_crtc->config.pipe_src_h) | 7716 | if (y >= intel_crtc->config.pipe_src_h) |
7717 | base = 0; | 7717 | base = 0; |
7718 | 7718 | ||
7719 | if (x < 0) { | 7719 | if (x < 0) { |
7720 | if (x + intel_crtc->cursor_width <= 0) | 7720 | if (x + intel_crtc->cursor_width <= 0) |
7721 | base = 0; | 7721 | base = 0; |
7722 | 7722 | ||
7723 | pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; | 7723 | pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; |
7724 | x = -x; | 7724 | x = -x; |
7725 | } | 7725 | } |
7726 | pos |= x << CURSOR_X_SHIFT; | 7726 | pos |= x << CURSOR_X_SHIFT; |
7727 | 7727 | ||
7728 | if (y < 0) { | 7728 | if (y < 0) { |
7729 | if (y + intel_crtc->cursor_height <= 0) | 7729 | if (y + intel_crtc->cursor_height <= 0) |
7730 | base = 0; | 7730 | base = 0; |
7731 | 7731 | ||
7732 | pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; | 7732 | pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; |
7733 | y = -y; | 7733 | y = -y; |
7734 | } | 7734 | } |
7735 | pos |= y << CURSOR_Y_SHIFT; | 7735 | pos |= y << CURSOR_Y_SHIFT; |
7736 | 7736 | ||
7737 | visible = base != 0; | 7737 | visible = base != 0; |
7738 | if (!visible && !intel_crtc->cursor_visible) | 7738 | if (!visible && !intel_crtc->cursor_visible) |
7739 | return; | 7739 | return; |
7740 | 7740 | ||
7741 | if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev)) { | 7741 | if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev)) { |
7742 | I915_WRITE(CURPOS_IVB(pipe), pos); | 7742 | I915_WRITE(CURPOS_IVB(pipe), pos); |
7743 | ivb_update_cursor(crtc, base); | 7743 | ivb_update_cursor(crtc, base); |
7744 | } else { | 7744 | } else { |
7745 | I915_WRITE(CURPOS(pipe), pos); | 7745 | I915_WRITE(CURPOS(pipe), pos); |
7746 | if (IS_845G(dev) || IS_I865G(dev)) | 7746 | if (IS_845G(dev) || IS_I865G(dev)) |
7747 | i845_update_cursor(crtc, base); | 7747 | i845_update_cursor(crtc, base); |
7748 | else | 7748 | else |
7749 | i9xx_update_cursor(crtc, base); | 7749 | i9xx_update_cursor(crtc, base); |
7750 | } | 7750 | } |
7751 | } | 7751 | } |
7752 | 7752 | ||
7753 | static int intel_crtc_cursor_set(struct drm_crtc *crtc, | 7753 | static int intel_crtc_cursor_set(struct drm_crtc *crtc, |
7754 | struct drm_file *file, | 7754 | struct drm_file *file, |
7755 | uint32_t handle, | 7755 | uint32_t handle, |
7756 | uint32_t width, uint32_t height) | 7756 | uint32_t width, uint32_t height) |
7757 | { | 7757 | { |
7758 | struct drm_device *dev = crtc->dev; | 7758 | struct drm_device *dev = crtc->dev; |
7759 | struct drm_i915_private *dev_priv = dev->dev_private; | 7759 | struct drm_i915_private *dev_priv = dev->dev_private; |
7760 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 7760 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
7761 | struct drm_i915_gem_object *obj; | 7761 | struct drm_i915_gem_object *obj; |
7762 | unsigned old_width; | 7762 | unsigned old_width; |
7763 | uint32_t addr; | 7763 | uint32_t addr; |
7764 | int ret; | 7764 | int ret; |
7765 | 7765 | ||
7766 | /* if we want to turn off the cursor ignore width and height */ | 7766 | /* if we want to turn off the cursor ignore width and height */ |
7767 | if (!handle) { | 7767 | if (!handle) { |
7768 | DRM_DEBUG_KMS("cursor off\n"); | 7768 | DRM_DEBUG_KMS("cursor off\n"); |
7769 | addr = 0; | 7769 | addr = 0; |
7770 | obj = NULL; | 7770 | obj = NULL; |
7771 | mutex_lock(&dev->struct_mutex); | 7771 | mutex_lock(&dev->struct_mutex); |
7772 | goto finish; | 7772 | goto finish; |
7773 | } | 7773 | } |
7774 | 7774 | ||
7775 | /* Check for which cursor types we support */ | 7775 | /* Check for which cursor types we support */ |
7776 | if (!((width == 64 && height == 64) || | 7776 | if (!((width == 64 && height == 64) || |
7777 | (width == 128 && height == 128 && !IS_GEN2(dev)) || | 7777 | (width == 128 && height == 128 && !IS_GEN2(dev)) || |
7778 | (width == 256 && height == 256 && !IS_GEN2(dev)))) { | 7778 | (width == 256 && height == 256 && !IS_GEN2(dev)))) { |
7779 | DRM_DEBUG("Cursor dimension not supported\n"); | 7779 | DRM_DEBUG("Cursor dimension not supported\n"); |
7780 | return -EINVAL; | 7780 | return -EINVAL; |
7781 | } | 7781 | } |
7782 | 7782 | ||
7783 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); | 7783 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); |
7784 | if (&obj->base == NULL) | 7784 | if (&obj->base == NULL) |
7785 | return -ENOENT; | 7785 | return -ENOENT; |
7786 | 7786 | ||
7787 | if (obj->base.size < width * height * 4) { | 7787 | if (obj->base.size < width * height * 4) { |
7788 | DRM_DEBUG_KMS("buffer is to small\n"); | 7788 | DRM_DEBUG_KMS("buffer is to small\n"); |
7789 | ret = -ENOMEM; | 7789 | ret = -ENOMEM; |
7790 | goto fail; | 7790 | goto fail; |
7791 | } | 7791 | } |
7792 | 7792 | ||
7793 | /* we only need to pin inside GTT if cursor is non-phy */ | 7793 | /* we only need to pin inside GTT if cursor is non-phy */ |
7794 | mutex_lock(&dev->struct_mutex); | 7794 | mutex_lock(&dev->struct_mutex); |
7795 | if (!INTEL_INFO(dev)->cursor_needs_physical) { | 7795 | if (!INTEL_INFO(dev)->cursor_needs_physical) { |
7796 | unsigned alignment; | 7796 | unsigned alignment; |
7797 | 7797 | ||
7798 | if (obj->tiling_mode) { | 7798 | if (obj->tiling_mode) { |
7799 | DRM_DEBUG_KMS("cursor cannot be tiled\n"); | 7799 | DRM_DEBUG_KMS("cursor cannot be tiled\n"); |
7800 | ret = -EINVAL; | 7800 | ret = -EINVAL; |
7801 | goto fail_locked; | 7801 | goto fail_locked; |
7802 | } | 7802 | } |
7803 | 7803 | ||
7804 | /* Note that the w/a also requires 2 PTE of padding following | 7804 | /* Note that the w/a also requires 2 PTE of padding following |
7805 | * the bo. We currently fill all unused PTE with the shadow | 7805 | * the bo. We currently fill all unused PTE with the shadow |
7806 | * page and so we should always have valid PTE following the | 7806 | * page and so we should always have valid PTE following the |
7807 | * cursor preventing the VT-d warning. | 7807 | * cursor preventing the VT-d warning. |
7808 | */ | 7808 | */ |
7809 | alignment = 0; | 7809 | alignment = 0; |
7810 | if (need_vtd_wa(dev)) | 7810 | if (need_vtd_wa(dev)) |
7811 | alignment = 64*1024; | 7811 | alignment = 64*1024; |
7812 | 7812 | ||
7813 | ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL); | 7813 | ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL); |
7814 | if (ret) { | 7814 | if (ret) { |
7815 | DRM_DEBUG_KMS("failed to move cursor bo into the GTT\n"); | 7815 | DRM_DEBUG_KMS("failed to move cursor bo into the GTT\n"); |
7816 | goto fail_locked; | 7816 | goto fail_locked; |
7817 | } | 7817 | } |
7818 | 7818 | ||
7819 | ret = i915_gem_object_put_fence(obj); | 7819 | ret = i915_gem_object_put_fence(obj); |
7820 | if (ret) { | 7820 | if (ret) { |
7821 | DRM_DEBUG_KMS("failed to release fence for cursor"); | 7821 | DRM_DEBUG_KMS("failed to release fence for cursor"); |
7822 | goto fail_unpin; | 7822 | goto fail_unpin; |
7823 | } | 7823 | } |
7824 | 7824 | ||
7825 | addr = i915_gem_obj_ggtt_offset(obj); | 7825 | addr = i915_gem_obj_ggtt_offset(obj); |
7826 | } else { | 7826 | } else { |
7827 | int align = IS_I830(dev) ? 16 * 1024 : 256; | 7827 | int align = IS_I830(dev) ? 16 * 1024 : 256; |
7828 | ret = i915_gem_attach_phys_object(dev, obj, | 7828 | ret = i915_gem_attach_phys_object(dev, obj, |
7829 | (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1, | 7829 | (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1, |
7830 | align); | 7830 | align); |
7831 | if (ret) { | 7831 | if (ret) { |
7832 | DRM_DEBUG_KMS("failed to attach phys object\n"); | 7832 | DRM_DEBUG_KMS("failed to attach phys object\n"); |
7833 | goto fail_locked; | 7833 | goto fail_locked; |
7834 | } | 7834 | } |
7835 | addr = obj->phys_obj->handle->busaddr; | 7835 | addr = obj->phys_obj->handle->busaddr; |
7836 | } | 7836 | } |
7837 | 7837 | ||
7838 | if (IS_GEN2(dev)) | 7838 | if (IS_GEN2(dev)) |
7839 | I915_WRITE(CURSIZE, (height << 12) | width); | 7839 | I915_WRITE(CURSIZE, (height << 12) | width); |
7840 | 7840 | ||
7841 | finish: | 7841 | finish: |
7842 | if (intel_crtc->cursor_bo) { | 7842 | if (intel_crtc->cursor_bo) { |
7843 | if (INTEL_INFO(dev)->cursor_needs_physical) { | 7843 | if (INTEL_INFO(dev)->cursor_needs_physical) { |
7844 | if (intel_crtc->cursor_bo != obj) | 7844 | if (intel_crtc->cursor_bo != obj) |
7845 | i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); | 7845 | i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); |
7846 | } else | 7846 | } else |
7847 | i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo); | 7847 | i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo); |
7848 | drm_gem_object_unreference(&intel_crtc->cursor_bo->base); | 7848 | drm_gem_object_unreference(&intel_crtc->cursor_bo->base); |
7849 | } | 7849 | } |
7850 | 7850 | ||
7851 | mutex_unlock(&dev->struct_mutex); | 7851 | mutex_unlock(&dev->struct_mutex); |
7852 | 7852 | ||
7853 | old_width = intel_crtc->cursor_width; | 7853 | old_width = intel_crtc->cursor_width; |
7854 | 7854 | ||
7855 | intel_crtc->cursor_addr = addr; | 7855 | intel_crtc->cursor_addr = addr; |
7856 | intel_crtc->cursor_bo = obj; | 7856 | intel_crtc->cursor_bo = obj; |
7857 | intel_crtc->cursor_width = width; | 7857 | intel_crtc->cursor_width = width; |
7858 | intel_crtc->cursor_height = height; | 7858 | intel_crtc->cursor_height = height; |
7859 | 7859 | ||
7860 | if (intel_crtc->active) { | 7860 | if (intel_crtc->active) { |
7861 | if (old_width != width) | 7861 | if (old_width != width) |
7862 | intel_update_watermarks(crtc); | 7862 | intel_update_watermarks(crtc); |
7863 | intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL); | 7863 | intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL); |
7864 | } | 7864 | } |
7865 | 7865 | ||
7866 | return 0; | 7866 | return 0; |
7867 | fail_unpin: | 7867 | fail_unpin: |
7868 | i915_gem_object_unpin_from_display_plane(obj); | 7868 | i915_gem_object_unpin_from_display_plane(obj); |
7869 | fail_locked: | 7869 | fail_locked: |
7870 | mutex_unlock(&dev->struct_mutex); | 7870 | mutex_unlock(&dev->struct_mutex); |
7871 | fail: | 7871 | fail: |
7872 | drm_gem_object_unreference_unlocked(&obj->base); | 7872 | drm_gem_object_unreference_unlocked(&obj->base); |
7873 | return ret; | 7873 | return ret; |
7874 | } | 7874 | } |
7875 | 7875 | ||
7876 | static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) | 7876 | static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) |
7877 | { | 7877 | { |
7878 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 7878 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
7879 | 7879 | ||
7880 | intel_crtc->cursor_x = clamp_t(int, x, SHRT_MIN, SHRT_MAX); | 7880 | intel_crtc->cursor_x = clamp_t(int, x, SHRT_MIN, SHRT_MAX); |
7881 | intel_crtc->cursor_y = clamp_t(int, y, SHRT_MIN, SHRT_MAX); | 7881 | intel_crtc->cursor_y = clamp_t(int, y, SHRT_MIN, SHRT_MAX); |
7882 | 7882 | ||
7883 | if (intel_crtc->active) | 7883 | if (intel_crtc->active) |
7884 | intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL); | 7884 | intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL); |
7885 | 7885 | ||
7886 | return 0; | 7886 | return 0; |
7887 | } | 7887 | } |
7888 | 7888 | ||
7889 | static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, | 7889 | static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, |
7890 | u16 *blue, uint32_t start, uint32_t size) | 7890 | u16 *blue, uint32_t start, uint32_t size) |
7891 | { | 7891 | { |
7892 | int end = (start + size > 256) ? 256 : start + size, i; | 7892 | int end = (start + size > 256) ? 256 : start + size, i; |
7893 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 7893 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
7894 | 7894 | ||
7895 | for (i = start; i < end; i++) { | 7895 | for (i = start; i < end; i++) { |
7896 | intel_crtc->lut_r[i] = red[i] >> 8; | 7896 | intel_crtc->lut_r[i] = red[i] >> 8; |
7897 | intel_crtc->lut_g[i] = green[i] >> 8; | 7897 | intel_crtc->lut_g[i] = green[i] >> 8; |
7898 | intel_crtc->lut_b[i] = blue[i] >> 8; | 7898 | intel_crtc->lut_b[i] = blue[i] >> 8; |
7899 | } | 7899 | } |
7900 | 7900 | ||
7901 | intel_crtc_load_lut(crtc); | 7901 | intel_crtc_load_lut(crtc); |
7902 | } | 7902 | } |
7903 | 7903 | ||
7904 | /* VESA 640x480x72Hz mode to set on the pipe */ | 7904 | /* VESA 640x480x72Hz mode to set on the pipe */ |
7905 | static struct drm_display_mode load_detect_mode = { | 7905 | static struct drm_display_mode load_detect_mode = { |
7906 | DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, | 7906 | DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, |
7907 | 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), | 7907 | 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), |
7908 | }; | 7908 | }; |
7909 | 7909 | ||
7910 | struct drm_framebuffer * | 7910 | struct drm_framebuffer * |
7911 | __intel_framebuffer_create(struct drm_device *dev, | 7911 | __intel_framebuffer_create(struct drm_device *dev, |
7912 | struct drm_mode_fb_cmd2 *mode_cmd, | 7912 | struct drm_mode_fb_cmd2 *mode_cmd, |
7913 | struct drm_i915_gem_object *obj) | 7913 | struct drm_i915_gem_object *obj) |
7914 | { | 7914 | { |
7915 | struct intel_framebuffer *intel_fb; | 7915 | struct intel_framebuffer *intel_fb; |
7916 | int ret; | 7916 | int ret; |
7917 | 7917 | ||
7918 | intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); | 7918 | intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); |
7919 | if (!intel_fb) { | 7919 | if (!intel_fb) { |
7920 | drm_gem_object_unreference_unlocked(&obj->base); | 7920 | drm_gem_object_unreference_unlocked(&obj->base); |
7921 | return ERR_PTR(-ENOMEM); | 7921 | return ERR_PTR(-ENOMEM); |
7922 | } | 7922 | } |
7923 | 7923 | ||
7924 | ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); | 7924 | ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); |
7925 | if (ret) | 7925 | if (ret) |
7926 | goto err; | 7926 | goto err; |
7927 | 7927 | ||
7928 | return &intel_fb->base; | 7928 | return &intel_fb->base; |
7929 | err: | 7929 | err: |
7930 | drm_gem_object_unreference_unlocked(&obj->base); | 7930 | drm_gem_object_unreference_unlocked(&obj->base); |
7931 | kfree(intel_fb); | 7931 | kfree(intel_fb); |
7932 | 7932 | ||
7933 | return ERR_PTR(ret); | 7933 | return ERR_PTR(ret); |
7934 | } | 7934 | } |
7935 | 7935 | ||
7936 | static struct drm_framebuffer * | 7936 | static struct drm_framebuffer * |
7937 | intel_framebuffer_create(struct drm_device *dev, | 7937 | intel_framebuffer_create(struct drm_device *dev, |
7938 | struct drm_mode_fb_cmd2 *mode_cmd, | 7938 | struct drm_mode_fb_cmd2 *mode_cmd, |
7939 | struct drm_i915_gem_object *obj) | 7939 | struct drm_i915_gem_object *obj) |
7940 | { | 7940 | { |
7941 | struct drm_framebuffer *fb; | 7941 | struct drm_framebuffer *fb; |
7942 | int ret; | 7942 | int ret; |
7943 | 7943 | ||
7944 | ret = i915_mutex_lock_interruptible(dev); | 7944 | ret = i915_mutex_lock_interruptible(dev); |
7945 | if (ret) | 7945 | if (ret) |
7946 | return ERR_PTR(ret); | 7946 | return ERR_PTR(ret); |
7947 | fb = __intel_framebuffer_create(dev, mode_cmd, obj); | 7947 | fb = __intel_framebuffer_create(dev, mode_cmd, obj); |
7948 | mutex_unlock(&dev->struct_mutex); | 7948 | mutex_unlock(&dev->struct_mutex); |
7949 | 7949 | ||
7950 | return fb; | 7950 | return fb; |
7951 | } | 7951 | } |
7952 | 7952 | ||
7953 | static u32 | 7953 | static u32 |
7954 | intel_framebuffer_pitch_for_width(int width, int bpp) | 7954 | intel_framebuffer_pitch_for_width(int width, int bpp) |
7955 | { | 7955 | { |
7956 | u32 pitch = DIV_ROUND_UP(width * bpp, 8); | 7956 | u32 pitch = DIV_ROUND_UP(width * bpp, 8); |
7957 | return ALIGN(pitch, 64); | 7957 | return ALIGN(pitch, 64); |
7958 | } | 7958 | } |
7959 | 7959 | ||
7960 | static u32 | 7960 | static u32 |
7961 | intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp) | 7961 | intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp) |
7962 | { | 7962 | { |
7963 | u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp); | 7963 | u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp); |
7964 | return ALIGN(pitch * mode->vdisplay, PAGE_SIZE); | 7964 | return ALIGN(pitch * mode->vdisplay, PAGE_SIZE); |
7965 | } | 7965 | } |
7966 | 7966 | ||
7967 | static struct drm_framebuffer * | 7967 | static struct drm_framebuffer * |
7968 | intel_framebuffer_create_for_mode(struct drm_device *dev, | 7968 | intel_framebuffer_create_for_mode(struct drm_device *dev, |
7969 | struct drm_display_mode *mode, | 7969 | struct drm_display_mode *mode, |
7970 | int depth, int bpp) | 7970 | int depth, int bpp) |
7971 | { | 7971 | { |
7972 | struct drm_i915_gem_object *obj; | 7972 | struct drm_i915_gem_object *obj; |
7973 | struct drm_mode_fb_cmd2 mode_cmd = { 0 }; | 7973 | struct drm_mode_fb_cmd2 mode_cmd = { 0 }; |
7974 | 7974 | ||
7975 | obj = i915_gem_alloc_object(dev, | 7975 | obj = i915_gem_alloc_object(dev, |
7976 | intel_framebuffer_size_for_mode(mode, bpp)); | 7976 | intel_framebuffer_size_for_mode(mode, bpp)); |
7977 | if (obj == NULL) | 7977 | if (obj == NULL) |
7978 | return ERR_PTR(-ENOMEM); | 7978 | return ERR_PTR(-ENOMEM); |
7979 | 7979 | ||
7980 | mode_cmd.width = mode->hdisplay; | 7980 | mode_cmd.width = mode->hdisplay; |
7981 | mode_cmd.height = mode->vdisplay; | 7981 | mode_cmd.height = mode->vdisplay; |
7982 | mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width, | 7982 | mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width, |
7983 | bpp); | 7983 | bpp); |
7984 | mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth); | 7984 | mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth); |
7985 | 7985 | ||
7986 | return intel_framebuffer_create(dev, &mode_cmd, obj); | 7986 | return intel_framebuffer_create(dev, &mode_cmd, obj); |
7987 | } | 7987 | } |
7988 | 7988 | ||
7989 | static struct drm_framebuffer * | 7989 | static struct drm_framebuffer * |
7990 | mode_fits_in_fbdev(struct drm_device *dev, | 7990 | mode_fits_in_fbdev(struct drm_device *dev, |
7991 | struct drm_display_mode *mode) | 7991 | struct drm_display_mode *mode) |
7992 | { | 7992 | { |
7993 | #ifdef CONFIG_DRM_I915_FBDEV | 7993 | #ifdef CONFIG_DRM_I915_FBDEV |
7994 | struct drm_i915_private *dev_priv = dev->dev_private; | 7994 | struct drm_i915_private *dev_priv = dev->dev_private; |
7995 | struct drm_i915_gem_object *obj; | 7995 | struct drm_i915_gem_object *obj; |
7996 | struct drm_framebuffer *fb; | 7996 | struct drm_framebuffer *fb; |
7997 | 7997 | ||
7998 | if (!dev_priv->fbdev) | 7998 | if (!dev_priv->fbdev) |
7999 | return NULL; | 7999 | return NULL; |
8000 | 8000 | ||
8001 | if (!dev_priv->fbdev->fb) | 8001 | if (!dev_priv->fbdev->fb) |
8002 | return NULL; | 8002 | return NULL; |
8003 | 8003 | ||
8004 | obj = dev_priv->fbdev->fb->obj; | 8004 | obj = dev_priv->fbdev->fb->obj; |
8005 | BUG_ON(!obj); | 8005 | BUG_ON(!obj); |
8006 | 8006 | ||
8007 | fb = &dev_priv->fbdev->fb->base; | 8007 | fb = &dev_priv->fbdev->fb->base; |
8008 | if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay, | 8008 | if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay, |
8009 | fb->bits_per_pixel)) | 8009 | fb->bits_per_pixel)) |
8010 | return NULL; | 8010 | return NULL; |
8011 | 8011 | ||
8012 | if (obj->base.size < mode->vdisplay * fb->pitches[0]) | 8012 | if (obj->base.size < mode->vdisplay * fb->pitches[0]) |
8013 | return NULL; | 8013 | return NULL; |
8014 | 8014 | ||
8015 | return fb; | 8015 | return fb; |
8016 | #else | 8016 | #else |
8017 | return NULL; | 8017 | return NULL; |
8018 | #endif | 8018 | #endif |
8019 | } | 8019 | } |
8020 | 8020 | ||
8021 | bool intel_get_load_detect_pipe(struct drm_connector *connector, | 8021 | bool intel_get_load_detect_pipe(struct drm_connector *connector, |
8022 | struct drm_display_mode *mode, | 8022 | struct drm_display_mode *mode, |
8023 | struct intel_load_detect_pipe *old) | 8023 | struct intel_load_detect_pipe *old) |
8024 | { | 8024 | { |
8025 | struct intel_crtc *intel_crtc; | 8025 | struct intel_crtc *intel_crtc; |
8026 | struct intel_encoder *intel_encoder = | 8026 | struct intel_encoder *intel_encoder = |
8027 | intel_attached_encoder(connector); | 8027 | intel_attached_encoder(connector); |
8028 | struct drm_crtc *possible_crtc; | 8028 | struct drm_crtc *possible_crtc; |
8029 | struct drm_encoder *encoder = &intel_encoder->base; | 8029 | struct drm_encoder *encoder = &intel_encoder->base; |
8030 | struct drm_crtc *crtc = NULL; | 8030 | struct drm_crtc *crtc = NULL; |
8031 | struct drm_device *dev = encoder->dev; | 8031 | struct drm_device *dev = encoder->dev; |
8032 | struct drm_framebuffer *fb; | 8032 | struct drm_framebuffer *fb; |
8033 | int i = -1; | 8033 | int i = -1; |
8034 | 8034 | ||
8035 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", | 8035 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", |
8036 | connector->base.id, drm_get_connector_name(connector), | 8036 | connector->base.id, drm_get_connector_name(connector), |
8037 | encoder->base.id, drm_get_encoder_name(encoder)); | 8037 | encoder->base.id, drm_get_encoder_name(encoder)); |
8038 | 8038 | ||
8039 | /* | 8039 | /* |
8040 | * Algorithm gets a little messy: | 8040 | * Algorithm gets a little messy: |
8041 | * | 8041 | * |
8042 | * - if the connector already has an assigned crtc, use it (but make | 8042 | * - if the connector already has an assigned crtc, use it (but make |
8043 | * sure it's on first) | 8043 | * sure it's on first) |
8044 | * | 8044 | * |
8045 | * - try to find the first unused crtc that can drive this connector, | 8045 | * - try to find the first unused crtc that can drive this connector, |
8046 | * and use that if we find one | 8046 | * and use that if we find one |
8047 | */ | 8047 | */ |
8048 | 8048 | ||
8049 | /* See if we already have a CRTC for this connector */ | 8049 | /* See if we already have a CRTC for this connector */ |
8050 | if (encoder->crtc) { | 8050 | if (encoder->crtc) { |
8051 | crtc = encoder->crtc; | 8051 | crtc = encoder->crtc; |
8052 | 8052 | ||
8053 | mutex_lock(&crtc->mutex); | 8053 | mutex_lock(&crtc->mutex); |
8054 | 8054 | ||
8055 | old->dpms_mode = connector->dpms; | 8055 | old->dpms_mode = connector->dpms; |
8056 | old->load_detect_temp = false; | 8056 | old->load_detect_temp = false; |
8057 | 8057 | ||
8058 | /* Make sure the crtc and connector are running */ | 8058 | /* Make sure the crtc and connector are running */ |
8059 | if (connector->dpms != DRM_MODE_DPMS_ON) | 8059 | if (connector->dpms != DRM_MODE_DPMS_ON) |
8060 | connector->funcs->dpms(connector, DRM_MODE_DPMS_ON); | 8060 | connector->funcs->dpms(connector, DRM_MODE_DPMS_ON); |
8061 | 8061 | ||
8062 | return true; | 8062 | return true; |
8063 | } | 8063 | } |
8064 | 8064 | ||
8065 | /* Find an unused one (if possible) */ | 8065 | /* Find an unused one (if possible) */ |
8066 | list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) { | 8066 | list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) { |
8067 | i++; | 8067 | i++; |
8068 | if (!(encoder->possible_crtcs & (1 << i))) | 8068 | if (!(encoder->possible_crtcs & (1 << i))) |
8069 | continue; | 8069 | continue; |
8070 | if (!possible_crtc->enabled) { | 8070 | if (!possible_crtc->enabled) { |
8071 | crtc = possible_crtc; | 8071 | crtc = possible_crtc; |
8072 | break; | 8072 | break; |
8073 | } | 8073 | } |
8074 | } | 8074 | } |
8075 | 8075 | ||
8076 | /* | 8076 | /* |
8077 | * If we didn't find an unused CRTC, don't use any. | 8077 | * If we didn't find an unused CRTC, don't use any. |
8078 | */ | 8078 | */ |
8079 | if (!crtc) { | 8079 | if (!crtc) { |
8080 | DRM_DEBUG_KMS("no pipe available for load-detect\n"); | 8080 | DRM_DEBUG_KMS("no pipe available for load-detect\n"); |
8081 | return false; | 8081 | return false; |
8082 | } | 8082 | } |
8083 | 8083 | ||
8084 | mutex_lock(&crtc->mutex); | 8084 | mutex_lock(&crtc->mutex); |
8085 | intel_encoder->new_crtc = to_intel_crtc(crtc); | 8085 | intel_encoder->new_crtc = to_intel_crtc(crtc); |
8086 | to_intel_connector(connector)->new_encoder = intel_encoder; | 8086 | to_intel_connector(connector)->new_encoder = intel_encoder; |
8087 | 8087 | ||
8088 | intel_crtc = to_intel_crtc(crtc); | 8088 | intel_crtc = to_intel_crtc(crtc); |
8089 | intel_crtc->new_enabled = true; | 8089 | intel_crtc->new_enabled = true; |
8090 | intel_crtc->new_config = &intel_crtc->config; | 8090 | intel_crtc->new_config = &intel_crtc->config; |
8091 | old->dpms_mode = connector->dpms; | 8091 | old->dpms_mode = connector->dpms; |
8092 | old->load_detect_temp = true; | 8092 | old->load_detect_temp = true; |
8093 | old->release_fb = NULL; | 8093 | old->release_fb = NULL; |
8094 | 8094 | ||
8095 | if (!mode) | 8095 | if (!mode) |
8096 | mode = &load_detect_mode; | 8096 | mode = &load_detect_mode; |
8097 | 8097 | ||
8098 | /* We need a framebuffer large enough to accommodate all accesses | 8098 | /* We need a framebuffer large enough to accommodate all accesses |
8099 | * that the plane may generate whilst we perform load detection. | 8099 | * that the plane may generate whilst we perform load detection. |
8100 | * We can not rely on the fbcon either being present (we get called | 8100 | * We can not rely on the fbcon either being present (we get called |
8101 | * during its initialisation to detect all boot displays, or it may | 8101 | * during its initialisation to detect all boot displays, or it may |
8102 | * not even exist) or that it is large enough to satisfy the | 8102 | * not even exist) or that it is large enough to satisfy the |
8103 | * requested mode. | 8103 | * requested mode. |
8104 | */ | 8104 | */ |
8105 | fb = mode_fits_in_fbdev(dev, mode); | 8105 | fb = mode_fits_in_fbdev(dev, mode); |
8106 | if (fb == NULL) { | 8106 | if (fb == NULL) { |
8107 | DRM_DEBUG_KMS("creating tmp fb for load-detection\n"); | 8107 | DRM_DEBUG_KMS("creating tmp fb for load-detection\n"); |
8108 | fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32); | 8108 | fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32); |
8109 | old->release_fb = fb; | 8109 | old->release_fb = fb; |
8110 | } else | 8110 | } else |
8111 | DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); | 8111 | DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); |
8112 | if (IS_ERR(fb)) { | 8112 | if (IS_ERR(fb)) { |
8113 | DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); | 8113 | DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); |
8114 | goto fail; | 8114 | goto fail; |
8115 | } | 8115 | } |
8116 | 8116 | ||
8117 | if (intel_set_mode(crtc, mode, 0, 0, fb)) { | 8117 | if (intel_set_mode(crtc, mode, 0, 0, fb)) { |
8118 | DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); | 8118 | DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); |
8119 | if (old->release_fb) | 8119 | if (old->release_fb) |
8120 | old->release_fb->funcs->destroy(old->release_fb); | 8120 | old->release_fb->funcs->destroy(old->release_fb); |
8121 | goto fail; | 8121 | goto fail; |
8122 | } | 8122 | } |
8123 | 8123 | ||
8124 | /* let the connector get through one full cycle before testing */ | 8124 | /* let the connector get through one full cycle before testing */ |
8125 | intel_wait_for_vblank(dev, intel_crtc->pipe); | 8125 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
8126 | return true; | 8126 | return true; |
8127 | 8127 | ||
8128 | fail: | 8128 | fail: |
8129 | intel_crtc->new_enabled = crtc->enabled; | 8129 | intel_crtc->new_enabled = crtc->enabled; |
8130 | if (intel_crtc->new_enabled) | 8130 | if (intel_crtc->new_enabled) |
8131 | intel_crtc->new_config = &intel_crtc->config; | 8131 | intel_crtc->new_config = &intel_crtc->config; |
8132 | else | 8132 | else |
8133 | intel_crtc->new_config = NULL; | 8133 | intel_crtc->new_config = NULL; |
8134 | mutex_unlock(&crtc->mutex); | 8134 | mutex_unlock(&crtc->mutex); |
8135 | return false; | 8135 | return false; |
8136 | } | 8136 | } |
8137 | 8137 | ||
8138 | void intel_release_load_detect_pipe(struct drm_connector *connector, | 8138 | void intel_release_load_detect_pipe(struct drm_connector *connector, |
8139 | struct intel_load_detect_pipe *old) | 8139 | struct intel_load_detect_pipe *old) |
8140 | { | 8140 | { |
8141 | struct intel_encoder *intel_encoder = | 8141 | struct intel_encoder *intel_encoder = |
8142 | intel_attached_encoder(connector); | 8142 | intel_attached_encoder(connector); |
8143 | struct drm_encoder *encoder = &intel_encoder->base; | 8143 | struct drm_encoder *encoder = &intel_encoder->base; |
8144 | struct drm_crtc *crtc = encoder->crtc; | 8144 | struct drm_crtc *crtc = encoder->crtc; |
8145 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 8145 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
8146 | 8146 | ||
8147 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", | 8147 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", |
8148 | connector->base.id, drm_get_connector_name(connector), | 8148 | connector->base.id, drm_get_connector_name(connector), |
8149 | encoder->base.id, drm_get_encoder_name(encoder)); | 8149 | encoder->base.id, drm_get_encoder_name(encoder)); |
8150 | 8150 | ||
8151 | if (old->load_detect_temp) { | 8151 | if (old->load_detect_temp) { |
8152 | to_intel_connector(connector)->new_encoder = NULL; | 8152 | to_intel_connector(connector)->new_encoder = NULL; |
8153 | intel_encoder->new_crtc = NULL; | 8153 | intel_encoder->new_crtc = NULL; |
8154 | intel_crtc->new_enabled = false; | 8154 | intel_crtc->new_enabled = false; |
8155 | intel_crtc->new_config = NULL; | 8155 | intel_crtc->new_config = NULL; |
8156 | intel_set_mode(crtc, NULL, 0, 0, NULL); | 8156 | intel_set_mode(crtc, NULL, 0, 0, NULL); |
8157 | 8157 | ||
8158 | if (old->release_fb) { | 8158 | if (old->release_fb) { |
8159 | drm_framebuffer_unregister_private(old->release_fb); | 8159 | drm_framebuffer_unregister_private(old->release_fb); |
8160 | drm_framebuffer_unreference(old->release_fb); | 8160 | drm_framebuffer_unreference(old->release_fb); |
8161 | } | 8161 | } |
8162 | 8162 | ||
8163 | mutex_unlock(&crtc->mutex); | 8163 | mutex_unlock(&crtc->mutex); |
8164 | return; | 8164 | return; |
8165 | } | 8165 | } |
8166 | 8166 | ||
8167 | /* Switch crtc and encoder back off if necessary */ | 8167 | /* Switch crtc and encoder back off if necessary */ |
8168 | if (old->dpms_mode != DRM_MODE_DPMS_ON) | 8168 | if (old->dpms_mode != DRM_MODE_DPMS_ON) |
8169 | connector->funcs->dpms(connector, old->dpms_mode); | 8169 | connector->funcs->dpms(connector, old->dpms_mode); |
8170 | 8170 | ||
8171 | mutex_unlock(&crtc->mutex); | 8171 | mutex_unlock(&crtc->mutex); |
8172 | } | 8172 | } |
8173 | 8173 | ||
8174 | static int i9xx_pll_refclk(struct drm_device *dev, | 8174 | static int i9xx_pll_refclk(struct drm_device *dev, |
8175 | const struct intel_crtc_config *pipe_config) | 8175 | const struct intel_crtc_config *pipe_config) |
8176 | { | 8176 | { |
8177 | struct drm_i915_private *dev_priv = dev->dev_private; | 8177 | struct drm_i915_private *dev_priv = dev->dev_private; |
8178 | u32 dpll = pipe_config->dpll_hw_state.dpll; | 8178 | u32 dpll = pipe_config->dpll_hw_state.dpll; |
8179 | 8179 | ||
8180 | if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) | 8180 | if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) |
8181 | return dev_priv->vbt.lvds_ssc_freq; | 8181 | return dev_priv->vbt.lvds_ssc_freq; |
8182 | else if (HAS_PCH_SPLIT(dev)) | 8182 | else if (HAS_PCH_SPLIT(dev)) |
8183 | return 120000; | 8183 | return 120000; |
8184 | else if (!IS_GEN2(dev)) | 8184 | else if (!IS_GEN2(dev)) |
8185 | return 96000; | 8185 | return 96000; |
8186 | else | 8186 | else |
8187 | return 48000; | 8187 | return 48000; |
8188 | } | 8188 | } |
8189 | 8189 | ||
8190 | /* Returns the clock of the currently programmed mode of the given pipe. */ | 8190 | /* Returns the clock of the currently programmed mode of the given pipe. */ |
8191 | static void i9xx_crtc_clock_get(struct intel_crtc *crtc, | 8191 | static void i9xx_crtc_clock_get(struct intel_crtc *crtc, |
8192 | struct intel_crtc_config *pipe_config) | 8192 | struct intel_crtc_config *pipe_config) |
8193 | { | 8193 | { |
8194 | struct drm_device *dev = crtc->base.dev; | 8194 | struct drm_device *dev = crtc->base.dev; |
8195 | struct drm_i915_private *dev_priv = dev->dev_private; | 8195 | struct drm_i915_private *dev_priv = dev->dev_private; |
8196 | int pipe = pipe_config->cpu_transcoder; | 8196 | int pipe = pipe_config->cpu_transcoder; |
8197 | u32 dpll = pipe_config->dpll_hw_state.dpll; | 8197 | u32 dpll = pipe_config->dpll_hw_state.dpll; |
8198 | u32 fp; | 8198 | u32 fp; |
8199 | intel_clock_t clock; | 8199 | intel_clock_t clock; |
8200 | int refclk = i9xx_pll_refclk(dev, pipe_config); | 8200 | int refclk = i9xx_pll_refclk(dev, pipe_config); |
8201 | 8201 | ||
8202 | if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) | 8202 | if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) |
8203 | fp = pipe_config->dpll_hw_state.fp0; | 8203 | fp = pipe_config->dpll_hw_state.fp0; |
8204 | else | 8204 | else |
8205 | fp = pipe_config->dpll_hw_state.fp1; | 8205 | fp = pipe_config->dpll_hw_state.fp1; |
8206 | 8206 | ||
8207 | clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; | 8207 | clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; |
8208 | if (IS_PINEVIEW(dev)) { | 8208 | if (IS_PINEVIEW(dev)) { |
8209 | clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; | 8209 | clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; |
8210 | clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; | 8210 | clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; |
8211 | } else { | 8211 | } else { |
8212 | clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; | 8212 | clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; |
8213 | clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; | 8213 | clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; |
8214 | } | 8214 | } |
8215 | 8215 | ||
8216 | if (!IS_GEN2(dev)) { | 8216 | if (!IS_GEN2(dev)) { |
8217 | if (IS_PINEVIEW(dev)) | 8217 | if (IS_PINEVIEW(dev)) |
8218 | clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> | 8218 | clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> |
8219 | DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); | 8219 | DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); |
8220 | else | 8220 | else |
8221 | clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> | 8221 | clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> |
8222 | DPLL_FPA01_P1_POST_DIV_SHIFT); | 8222 | DPLL_FPA01_P1_POST_DIV_SHIFT); |
8223 | 8223 | ||
8224 | switch (dpll & DPLL_MODE_MASK) { | 8224 | switch (dpll & DPLL_MODE_MASK) { |
8225 | case DPLLB_MODE_DAC_SERIAL: | 8225 | case DPLLB_MODE_DAC_SERIAL: |
8226 | clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? | 8226 | clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? |
8227 | 5 : 10; | 8227 | 5 : 10; |
8228 | break; | 8228 | break; |
8229 | case DPLLB_MODE_LVDS: | 8229 | case DPLLB_MODE_LVDS: |
8230 | clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? | 8230 | clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? |
8231 | 7 : 14; | 8231 | 7 : 14; |
8232 | break; | 8232 | break; |
8233 | default: | 8233 | default: |
8234 | DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " | 8234 | DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " |
8235 | "mode\n", (int)(dpll & DPLL_MODE_MASK)); | 8235 | "mode\n", (int)(dpll & DPLL_MODE_MASK)); |
8236 | return; | 8236 | return; |
8237 | } | 8237 | } |
8238 | 8238 | ||
8239 | if (IS_PINEVIEW(dev)) | 8239 | if (IS_PINEVIEW(dev)) |
8240 | pineview_clock(refclk, &clock); | 8240 | pineview_clock(refclk, &clock); |
8241 | else | 8241 | else |
8242 | i9xx_clock(refclk, &clock); | 8242 | i9xx_clock(refclk, &clock); |
8243 | } else { | 8243 | } else { |
8244 | u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS); | 8244 | u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS); |
8245 | bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN); | 8245 | bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN); |
8246 | 8246 | ||
8247 | if (is_lvds) { | 8247 | if (is_lvds) { |
8248 | clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> | 8248 | clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> |
8249 | DPLL_FPA01_P1_POST_DIV_SHIFT); | 8249 | DPLL_FPA01_P1_POST_DIV_SHIFT); |
8250 | 8250 | ||
8251 | if (lvds & LVDS_CLKB_POWER_UP) | 8251 | if (lvds & LVDS_CLKB_POWER_UP) |
8252 | clock.p2 = 7; | 8252 | clock.p2 = 7; |
8253 | else | 8253 | else |
8254 | clock.p2 = 14; | 8254 | clock.p2 = 14; |
8255 | } else { | 8255 | } else { |
8256 | if (dpll & PLL_P1_DIVIDE_BY_TWO) | 8256 | if (dpll & PLL_P1_DIVIDE_BY_TWO) |
8257 | clock.p1 = 2; | 8257 | clock.p1 = 2; |
8258 | else { | 8258 | else { |
8259 | clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> | 8259 | clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> |
8260 | DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; | 8260 | DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; |
8261 | } | 8261 | } |
8262 | if (dpll & PLL_P2_DIVIDE_BY_4) | 8262 | if (dpll & PLL_P2_DIVIDE_BY_4) |
8263 | clock.p2 = 4; | 8263 | clock.p2 = 4; |
8264 | else | 8264 | else |
8265 | clock.p2 = 2; | 8265 | clock.p2 = 2; |
8266 | } | 8266 | } |
8267 | 8267 | ||
8268 | i9xx_clock(refclk, &clock); | 8268 | i9xx_clock(refclk, &clock); |
8269 | } | 8269 | } |
8270 | 8270 | ||
8271 | /* | 8271 | /* |
8272 | * This value includes pixel_multiplier. We will use | 8272 | * This value includes pixel_multiplier. We will use |
8273 | * port_clock to compute adjusted_mode.crtc_clock in the | 8273 | * port_clock to compute adjusted_mode.crtc_clock in the |
8274 | * encoder's get_config() function. | 8274 | * encoder's get_config() function. |
8275 | */ | 8275 | */ |
8276 | pipe_config->port_clock = clock.dot; | 8276 | pipe_config->port_clock = clock.dot; |
8277 | } | 8277 | } |
8278 | 8278 | ||
8279 | int intel_dotclock_calculate(int link_freq, | 8279 | int intel_dotclock_calculate(int link_freq, |
8280 | const struct intel_link_m_n *m_n) | 8280 | const struct intel_link_m_n *m_n) |
8281 | { | 8281 | { |
8282 | /* | 8282 | /* |
8283 | * The calculation for the data clock is: | 8283 | * The calculation for the data clock is: |
8284 | * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp | 8284 | * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp |
8285 | * But we want to avoid losing precison if possible, so: | 8285 | * But we want to avoid losing precison if possible, so: |
8286 | * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp)) | 8286 | * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp)) |
8287 | * | 8287 | * |
8288 | * and the link clock is simpler: | 8288 | * and the link clock is simpler: |
8289 | * link_clock = (m * link_clock) / n | 8289 | * link_clock = (m * link_clock) / n |
8290 | */ | 8290 | */ |
8291 | 8291 | ||
8292 | if (!m_n->link_n) | 8292 | if (!m_n->link_n) |
8293 | return 0; | 8293 | return 0; |
8294 | 8294 | ||
8295 | return div_u64((u64)m_n->link_m * link_freq, m_n->link_n); | 8295 | return div_u64((u64)m_n->link_m * link_freq, m_n->link_n); |
8296 | } | 8296 | } |
8297 | 8297 | ||
8298 | static void ironlake_pch_clock_get(struct intel_crtc *crtc, | 8298 | static void ironlake_pch_clock_get(struct intel_crtc *crtc, |
8299 | struct intel_crtc_config *pipe_config) | 8299 | struct intel_crtc_config *pipe_config) |
8300 | { | 8300 | { |
8301 | struct drm_device *dev = crtc->base.dev; | 8301 | struct drm_device *dev = crtc->base.dev; |
8302 | 8302 | ||
8303 | /* read out port_clock from the DPLL */ | 8303 | /* read out port_clock from the DPLL */ |
8304 | i9xx_crtc_clock_get(crtc, pipe_config); | 8304 | i9xx_crtc_clock_get(crtc, pipe_config); |
8305 | 8305 | ||
8306 | /* | 8306 | /* |
8307 | * This value does not include pixel_multiplier. | 8307 | * This value does not include pixel_multiplier. |
8308 | * We will check that port_clock and adjusted_mode.crtc_clock | 8308 | * We will check that port_clock and adjusted_mode.crtc_clock |
8309 | * agree once we know their relationship in the encoder's | 8309 | * agree once we know their relationship in the encoder's |
8310 | * get_config() function. | 8310 | * get_config() function. |
8311 | */ | 8311 | */ |
8312 | pipe_config->adjusted_mode.crtc_clock = | 8312 | pipe_config->adjusted_mode.crtc_clock = |
8313 | intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000, | 8313 | intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000, |
8314 | &pipe_config->fdi_m_n); | 8314 | &pipe_config->fdi_m_n); |
8315 | } | 8315 | } |
8316 | 8316 | ||
8317 | /** Returns the currently programmed mode of the given pipe. */ | 8317 | /** Returns the currently programmed mode of the given pipe. */ |
8318 | struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, | 8318 | struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, |
8319 | struct drm_crtc *crtc) | 8319 | struct drm_crtc *crtc) |
8320 | { | 8320 | { |
8321 | struct drm_i915_private *dev_priv = dev->dev_private; | 8321 | struct drm_i915_private *dev_priv = dev->dev_private; |
8322 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 8322 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
8323 | enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; | 8323 | enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; |
8324 | struct drm_display_mode *mode; | 8324 | struct drm_display_mode *mode; |
8325 | struct intel_crtc_config pipe_config; | 8325 | struct intel_crtc_config pipe_config; |
8326 | int htot = I915_READ(HTOTAL(cpu_transcoder)); | 8326 | int htot = I915_READ(HTOTAL(cpu_transcoder)); |
8327 | int hsync = I915_READ(HSYNC(cpu_transcoder)); | 8327 | int hsync = I915_READ(HSYNC(cpu_transcoder)); |
8328 | int vtot = I915_READ(VTOTAL(cpu_transcoder)); | 8328 | int vtot = I915_READ(VTOTAL(cpu_transcoder)); |
8329 | int vsync = I915_READ(VSYNC(cpu_transcoder)); | 8329 | int vsync = I915_READ(VSYNC(cpu_transcoder)); |
8330 | enum pipe pipe = intel_crtc->pipe; | 8330 | enum pipe pipe = intel_crtc->pipe; |
8331 | 8331 | ||
8332 | mode = kzalloc(sizeof(*mode), GFP_KERNEL); | 8332 | mode = kzalloc(sizeof(*mode), GFP_KERNEL); |
8333 | if (!mode) | 8333 | if (!mode) |
8334 | return NULL; | 8334 | return NULL; |
8335 | 8335 | ||
8336 | /* | 8336 | /* |
8337 | * Construct a pipe_config sufficient for getting the clock info | 8337 | * Construct a pipe_config sufficient for getting the clock info |
8338 | * back out of crtc_clock_get. | 8338 | * back out of crtc_clock_get. |
8339 | * | 8339 | * |
8340 | * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need | 8340 | * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need |
8341 | * to use a real value here instead. | 8341 | * to use a real value here instead. |
8342 | */ | 8342 | */ |
8343 | pipe_config.cpu_transcoder = (enum transcoder) pipe; | 8343 | pipe_config.cpu_transcoder = (enum transcoder) pipe; |
8344 | pipe_config.pixel_multiplier = 1; | 8344 | pipe_config.pixel_multiplier = 1; |
8345 | pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe)); | 8345 | pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe)); |
8346 | pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe)); | 8346 | pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe)); |
8347 | pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe)); | 8347 | pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe)); |
8348 | i9xx_crtc_clock_get(intel_crtc, &pipe_config); | 8348 | i9xx_crtc_clock_get(intel_crtc, &pipe_config); |
8349 | 8349 | ||
8350 | mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier; | 8350 | mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier; |
8351 | mode->hdisplay = (htot & 0xffff) + 1; | 8351 | mode->hdisplay = (htot & 0xffff) + 1; |
8352 | mode->htotal = ((htot & 0xffff0000) >> 16) + 1; | 8352 | mode->htotal = ((htot & 0xffff0000) >> 16) + 1; |
8353 | mode->hsync_start = (hsync & 0xffff) + 1; | 8353 | mode->hsync_start = (hsync & 0xffff) + 1; |
8354 | mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1; | 8354 | mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1; |
8355 | mode->vdisplay = (vtot & 0xffff) + 1; | 8355 | mode->vdisplay = (vtot & 0xffff) + 1; |
8356 | mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1; | 8356 | mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1; |
8357 | mode->vsync_start = (vsync & 0xffff) + 1; | 8357 | mode->vsync_start = (vsync & 0xffff) + 1; |
8358 | mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1; | 8358 | mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1; |
8359 | 8359 | ||
8360 | drm_mode_set_name(mode); | 8360 | drm_mode_set_name(mode); |
8361 | 8361 | ||
8362 | return mode; | 8362 | return mode; |
8363 | } | 8363 | } |
8364 | 8364 | ||
8365 | static void intel_increase_pllclock(struct drm_crtc *crtc) | 8365 | static void intel_increase_pllclock(struct drm_crtc *crtc) |
8366 | { | 8366 | { |
8367 | struct drm_device *dev = crtc->dev; | 8367 | struct drm_device *dev = crtc->dev; |
8368 | struct drm_i915_private *dev_priv = dev->dev_private; | 8368 | struct drm_i915_private *dev_priv = dev->dev_private; |
8369 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 8369 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
8370 | int pipe = intel_crtc->pipe; | 8370 | int pipe = intel_crtc->pipe; |
8371 | int dpll_reg = DPLL(pipe); | 8371 | int dpll_reg = DPLL(pipe); |
8372 | int dpll; | 8372 | int dpll; |
8373 | 8373 | ||
8374 | if (HAS_PCH_SPLIT(dev)) | 8374 | if (HAS_PCH_SPLIT(dev)) |
8375 | return; | 8375 | return; |
8376 | 8376 | ||
8377 | if (!dev_priv->lvds_downclock_avail) | 8377 | if (!dev_priv->lvds_downclock_avail) |
8378 | return; | 8378 | return; |
8379 | 8379 | ||
8380 | dpll = I915_READ(dpll_reg); | 8380 | dpll = I915_READ(dpll_reg); |
8381 | if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { | 8381 | if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { |
8382 | DRM_DEBUG_DRIVER("upclocking LVDS\n"); | 8382 | DRM_DEBUG_DRIVER("upclocking LVDS\n"); |
8383 | 8383 | ||
8384 | assert_panel_unlocked(dev_priv, pipe); | 8384 | assert_panel_unlocked(dev_priv, pipe); |
8385 | 8385 | ||
8386 | dpll &= ~DISPLAY_RATE_SELECT_FPA1; | 8386 | dpll &= ~DISPLAY_RATE_SELECT_FPA1; |
8387 | I915_WRITE(dpll_reg, dpll); | 8387 | I915_WRITE(dpll_reg, dpll); |
8388 | intel_wait_for_vblank(dev, pipe); | 8388 | intel_wait_for_vblank(dev, pipe); |
8389 | 8389 | ||
8390 | dpll = I915_READ(dpll_reg); | 8390 | dpll = I915_READ(dpll_reg); |
8391 | if (dpll & DISPLAY_RATE_SELECT_FPA1) | 8391 | if (dpll & DISPLAY_RATE_SELECT_FPA1) |
8392 | DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); | 8392 | DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); |
8393 | } | 8393 | } |
8394 | } | 8394 | } |
8395 | 8395 | ||
8396 | static void intel_decrease_pllclock(struct drm_crtc *crtc) | 8396 | static void intel_decrease_pllclock(struct drm_crtc *crtc) |
8397 | { | 8397 | { |
8398 | struct drm_device *dev = crtc->dev; | 8398 | struct drm_device *dev = crtc->dev; |
8399 | struct drm_i915_private *dev_priv = dev->dev_private; | 8399 | struct drm_i915_private *dev_priv = dev->dev_private; |
8400 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 8400 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
8401 | 8401 | ||
8402 | if (HAS_PCH_SPLIT(dev)) | 8402 | if (HAS_PCH_SPLIT(dev)) |
8403 | return; | 8403 | return; |
8404 | 8404 | ||
8405 | if (!dev_priv->lvds_downclock_avail) | 8405 | if (!dev_priv->lvds_downclock_avail) |
8406 | return; | 8406 | return; |
8407 | 8407 | ||
8408 | /* | 8408 | /* |
8409 | * Since this is called by a timer, we should never get here in | 8409 | * Since this is called by a timer, we should never get here in |
8410 | * the manual case. | 8410 | * the manual case. |
8411 | */ | 8411 | */ |
8412 | if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) { | 8412 | if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) { |
8413 | int pipe = intel_crtc->pipe; | 8413 | int pipe = intel_crtc->pipe; |
8414 | int dpll_reg = DPLL(pipe); | 8414 | int dpll_reg = DPLL(pipe); |
8415 | int dpll; | 8415 | int dpll; |
8416 | 8416 | ||
8417 | DRM_DEBUG_DRIVER("downclocking LVDS\n"); | 8417 | DRM_DEBUG_DRIVER("downclocking LVDS\n"); |
8418 | 8418 | ||
8419 | assert_panel_unlocked(dev_priv, pipe); | 8419 | assert_panel_unlocked(dev_priv, pipe); |
8420 | 8420 | ||
8421 | dpll = I915_READ(dpll_reg); | 8421 | dpll = I915_READ(dpll_reg); |
8422 | dpll |= DISPLAY_RATE_SELECT_FPA1; | 8422 | dpll |= DISPLAY_RATE_SELECT_FPA1; |
8423 | I915_WRITE(dpll_reg, dpll); | 8423 | I915_WRITE(dpll_reg, dpll); |
8424 | intel_wait_for_vblank(dev, pipe); | 8424 | intel_wait_for_vblank(dev, pipe); |
8425 | dpll = I915_READ(dpll_reg); | 8425 | dpll = I915_READ(dpll_reg); |
8426 | if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) | 8426 | if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) |
8427 | DRM_DEBUG_DRIVER("failed to downclock LVDS!\n"); | 8427 | DRM_DEBUG_DRIVER("failed to downclock LVDS!\n"); |
8428 | } | 8428 | } |
8429 | 8429 | ||
8430 | } | 8430 | } |
8431 | 8431 | ||
8432 | void intel_mark_busy(struct drm_device *dev) | 8432 | void intel_mark_busy(struct drm_device *dev) |
8433 | { | 8433 | { |
8434 | struct drm_i915_private *dev_priv = dev->dev_private; | 8434 | struct drm_i915_private *dev_priv = dev->dev_private; |
8435 | 8435 | ||
8436 | if (dev_priv->mm.busy) | 8436 | if (dev_priv->mm.busy) |
8437 | return; | 8437 | return; |
8438 | 8438 | ||
8439 | intel_runtime_pm_get(dev_priv); | 8439 | intel_runtime_pm_get(dev_priv); |
8440 | i915_update_gfx_val(dev_priv); | 8440 | i915_update_gfx_val(dev_priv); |
8441 | dev_priv->mm.busy = true; | 8441 | dev_priv->mm.busy = true; |
8442 | } | 8442 | } |
8443 | 8443 | ||
8444 | void intel_mark_idle(struct drm_device *dev) | 8444 | void intel_mark_idle(struct drm_device *dev) |
8445 | { | 8445 | { |
8446 | struct drm_i915_private *dev_priv = dev->dev_private; | 8446 | struct drm_i915_private *dev_priv = dev->dev_private; |
8447 | struct drm_crtc *crtc; | 8447 | struct drm_crtc *crtc; |
8448 | 8448 | ||
8449 | if (!dev_priv->mm.busy) | 8449 | if (!dev_priv->mm.busy) |
8450 | return; | 8450 | return; |
8451 | 8451 | ||
8452 | dev_priv->mm.busy = false; | 8452 | dev_priv->mm.busy = false; |
8453 | 8453 | ||
8454 | if (!i915.powersave) | 8454 | if (!i915.powersave) |
8455 | goto out; | 8455 | goto out; |
8456 | 8456 | ||
8457 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 8457 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
8458 | if (!crtc->primary->fb) | 8458 | if (!crtc->primary->fb) |
8459 | continue; | 8459 | continue; |
8460 | 8460 | ||
8461 | intel_decrease_pllclock(crtc); | 8461 | intel_decrease_pllclock(crtc); |
8462 | } | 8462 | } |
8463 | 8463 | ||
8464 | if (INTEL_INFO(dev)->gen >= 6) | 8464 | if (INTEL_INFO(dev)->gen >= 6) |
8465 | gen6_rps_idle(dev->dev_private); | 8465 | gen6_rps_idle(dev->dev_private); |
8466 | 8466 | ||
8467 | out: | 8467 | out: |
8468 | intel_runtime_pm_put(dev_priv); | 8468 | intel_runtime_pm_put(dev_priv); |
8469 | } | 8469 | } |
8470 | 8470 | ||
8471 | void intel_mark_fb_busy(struct drm_i915_gem_object *obj, | 8471 | void intel_mark_fb_busy(struct drm_i915_gem_object *obj, |
8472 | struct intel_ring_buffer *ring) | 8472 | struct intel_ring_buffer *ring) |
8473 | { | 8473 | { |
8474 | struct drm_device *dev = obj->base.dev; | 8474 | struct drm_device *dev = obj->base.dev; |
8475 | struct drm_crtc *crtc; | 8475 | struct drm_crtc *crtc; |
8476 | 8476 | ||
8477 | if (!i915.powersave) | 8477 | if (!i915.powersave) |
8478 | return; | 8478 | return; |
8479 | 8479 | ||
8480 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 8480 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
8481 | if (!crtc->primary->fb) | 8481 | if (!crtc->primary->fb) |
8482 | continue; | 8482 | continue; |
8483 | 8483 | ||
8484 | if (to_intel_framebuffer(crtc->primary->fb)->obj != obj) | 8484 | if (to_intel_framebuffer(crtc->primary->fb)->obj != obj) |
8485 | continue; | 8485 | continue; |
8486 | 8486 | ||
8487 | intel_increase_pllclock(crtc); | 8487 | intel_increase_pllclock(crtc); |
8488 | if (ring && intel_fbc_enabled(dev)) | 8488 | if (ring && intel_fbc_enabled(dev)) |
8489 | ring->fbc_dirty = true; | 8489 | ring->fbc_dirty = true; |
8490 | } | 8490 | } |
8491 | } | 8491 | } |
8492 | 8492 | ||
8493 | static void intel_crtc_destroy(struct drm_crtc *crtc) | 8493 | static void intel_crtc_destroy(struct drm_crtc *crtc) |
8494 | { | 8494 | { |
8495 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 8495 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
8496 | struct drm_device *dev = crtc->dev; | 8496 | struct drm_device *dev = crtc->dev; |
8497 | struct intel_unpin_work *work; | 8497 | struct intel_unpin_work *work; |
8498 | unsigned long flags; | 8498 | unsigned long flags; |
8499 | 8499 | ||
8500 | spin_lock_irqsave(&dev->event_lock, flags); | 8500 | spin_lock_irqsave(&dev->event_lock, flags); |
8501 | work = intel_crtc->unpin_work; | 8501 | work = intel_crtc->unpin_work; |
8502 | intel_crtc->unpin_work = NULL; | 8502 | intel_crtc->unpin_work = NULL; |
8503 | spin_unlock_irqrestore(&dev->event_lock, flags); | 8503 | spin_unlock_irqrestore(&dev->event_lock, flags); |
8504 | 8504 | ||
8505 | if (work) { | 8505 | if (work) { |
8506 | cancel_work_sync(&work->work); | 8506 | cancel_work_sync(&work->work); |
8507 | kfree(work); | 8507 | kfree(work); |
8508 | } | 8508 | } |
8509 | 8509 | ||
8510 | intel_crtc_cursor_set(crtc, NULL, 0, 0, 0); | 8510 | intel_crtc_cursor_set(crtc, NULL, 0, 0, 0); |
8511 | 8511 | ||
8512 | drm_crtc_cleanup(crtc); | 8512 | drm_crtc_cleanup(crtc); |
8513 | 8513 | ||
8514 | kfree(intel_crtc); | 8514 | kfree(intel_crtc); |
8515 | } | 8515 | } |
8516 | 8516 | ||
8517 | static void intel_unpin_work_fn(struct work_struct *__work) | 8517 | static void intel_unpin_work_fn(struct work_struct *__work) |
8518 | { | 8518 | { |
8519 | struct intel_unpin_work *work = | 8519 | struct intel_unpin_work *work = |
8520 | container_of(__work, struct intel_unpin_work, work); | 8520 | container_of(__work, struct intel_unpin_work, work); |
8521 | struct drm_device *dev = work->crtc->dev; | 8521 | struct drm_device *dev = work->crtc->dev; |
8522 | 8522 | ||
8523 | mutex_lock(&dev->struct_mutex); | 8523 | mutex_lock(&dev->struct_mutex); |
8524 | intel_unpin_fb_obj(work->old_fb_obj); | 8524 | intel_unpin_fb_obj(work->old_fb_obj); |
8525 | drm_gem_object_unreference(&work->pending_flip_obj->base); | 8525 | drm_gem_object_unreference(&work->pending_flip_obj->base); |
8526 | drm_gem_object_unreference(&work->old_fb_obj->base); | 8526 | drm_gem_object_unreference(&work->old_fb_obj->base); |
8527 | 8527 | ||
8528 | intel_update_fbc(dev); | 8528 | intel_update_fbc(dev); |
8529 | mutex_unlock(&dev->struct_mutex); | 8529 | mutex_unlock(&dev->struct_mutex); |
8530 | 8530 | ||
8531 | BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0); | 8531 | BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0); |
8532 | atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count); | 8532 | atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count); |
8533 | 8533 | ||
8534 | kfree(work); | 8534 | kfree(work); |
8535 | } | 8535 | } |
8536 | 8536 | ||
8537 | static void do_intel_finish_page_flip(struct drm_device *dev, | 8537 | static void do_intel_finish_page_flip(struct drm_device *dev, |
8538 | struct drm_crtc *crtc) | 8538 | struct drm_crtc *crtc) |
8539 | { | 8539 | { |
8540 | struct drm_i915_private *dev_priv = dev->dev_private; | 8540 | struct drm_i915_private *dev_priv = dev->dev_private; |
8541 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 8541 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
8542 | struct intel_unpin_work *work; | 8542 | struct intel_unpin_work *work; |
8543 | unsigned long flags; | 8543 | unsigned long flags; |
8544 | 8544 | ||
8545 | /* Ignore early vblank irqs */ | 8545 | /* Ignore early vblank irqs */ |
8546 | if (intel_crtc == NULL) | 8546 | if (intel_crtc == NULL) |
8547 | return; | 8547 | return; |
8548 | 8548 | ||
8549 | spin_lock_irqsave(&dev->event_lock, flags); | 8549 | spin_lock_irqsave(&dev->event_lock, flags); |
8550 | work = intel_crtc->unpin_work; | 8550 | work = intel_crtc->unpin_work; |
8551 | 8551 | ||
8552 | /* Ensure we don't miss a work->pending update ... */ | 8552 | /* Ensure we don't miss a work->pending update ... */ |
8553 | smp_rmb(); | 8553 | smp_rmb(); |
8554 | 8554 | ||
8555 | if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { | 8555 | if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { |
8556 | spin_unlock_irqrestore(&dev->event_lock, flags); | 8556 | spin_unlock_irqrestore(&dev->event_lock, flags); |
8557 | return; | 8557 | return; |
8558 | } | 8558 | } |
8559 | 8559 | ||
8560 | /* and that the unpin work is consistent wrt ->pending. */ | 8560 | /* and that the unpin work is consistent wrt ->pending. */ |
8561 | smp_rmb(); | 8561 | smp_rmb(); |
8562 | 8562 | ||
8563 | intel_crtc->unpin_work = NULL; | 8563 | intel_crtc->unpin_work = NULL; |
8564 | 8564 | ||
8565 | if (work->event) | 8565 | if (work->event) |
8566 | drm_send_vblank_event(dev, intel_crtc->pipe, work->event); | 8566 | drm_send_vblank_event(dev, intel_crtc->pipe, work->event); |
8567 | 8567 | ||
8568 | drm_vblank_put(dev, intel_crtc->pipe); | 8568 | drm_vblank_put(dev, intel_crtc->pipe); |
8569 | 8569 | ||
8570 | spin_unlock_irqrestore(&dev->event_lock, flags); | 8570 | spin_unlock_irqrestore(&dev->event_lock, flags); |
8571 | 8571 | ||
8572 | wake_up_all(&dev_priv->pending_flip_queue); | 8572 | wake_up_all(&dev_priv->pending_flip_queue); |
8573 | 8573 | ||
8574 | queue_work(dev_priv->wq, &work->work); | 8574 | queue_work(dev_priv->wq, &work->work); |
8575 | 8575 | ||
8576 | trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); | 8576 | trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); |
8577 | } | 8577 | } |
8578 | 8578 | ||
8579 | void intel_finish_page_flip(struct drm_device *dev, int pipe) | 8579 | void intel_finish_page_flip(struct drm_device *dev, int pipe) |
8580 | { | 8580 | { |
8581 | struct drm_i915_private *dev_priv = dev->dev_private; | 8581 | struct drm_i915_private *dev_priv = dev->dev_private; |
8582 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | 8582 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; |
8583 | 8583 | ||
8584 | do_intel_finish_page_flip(dev, crtc); | 8584 | do_intel_finish_page_flip(dev, crtc); |
8585 | } | 8585 | } |
8586 | 8586 | ||
8587 | void intel_finish_page_flip_plane(struct drm_device *dev, int plane) | 8587 | void intel_finish_page_flip_plane(struct drm_device *dev, int plane) |
8588 | { | 8588 | { |
8589 | struct drm_i915_private *dev_priv = dev->dev_private; | 8589 | struct drm_i915_private *dev_priv = dev->dev_private; |
8590 | struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane]; | 8590 | struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane]; |
8591 | 8591 | ||
8592 | do_intel_finish_page_flip(dev, crtc); | 8592 | do_intel_finish_page_flip(dev, crtc); |
8593 | } | 8593 | } |
8594 | 8594 | ||
8595 | void intel_prepare_page_flip(struct drm_device *dev, int plane) | 8595 | void intel_prepare_page_flip(struct drm_device *dev, int plane) |
8596 | { | 8596 | { |
8597 | struct drm_i915_private *dev_priv = dev->dev_private; | 8597 | struct drm_i915_private *dev_priv = dev->dev_private; |
8598 | struct intel_crtc *intel_crtc = | 8598 | struct intel_crtc *intel_crtc = |
8599 | to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); | 8599 | to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); |
8600 | unsigned long flags; | 8600 | unsigned long flags; |
8601 | 8601 | ||
8602 | /* NB: An MMIO update of the plane base pointer will also | 8602 | /* NB: An MMIO update of the plane base pointer will also |
8603 | * generate a page-flip completion irq, i.e. every modeset | 8603 | * generate a page-flip completion irq, i.e. every modeset |
8604 | * is also accompanied by a spurious intel_prepare_page_flip(). | 8604 | * is also accompanied by a spurious intel_prepare_page_flip(). |
8605 | */ | 8605 | */ |
8606 | spin_lock_irqsave(&dev->event_lock, flags); | 8606 | spin_lock_irqsave(&dev->event_lock, flags); |
8607 | if (intel_crtc->unpin_work) | 8607 | if (intel_crtc->unpin_work) |
8608 | atomic_inc_not_zero(&intel_crtc->unpin_work->pending); | 8608 | atomic_inc_not_zero(&intel_crtc->unpin_work->pending); |
8609 | spin_unlock_irqrestore(&dev->event_lock, flags); | 8609 | spin_unlock_irqrestore(&dev->event_lock, flags); |
8610 | } | 8610 | } |
8611 | 8611 | ||
8612 | inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc) | 8612 | inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc) |
8613 | { | 8613 | { |
8614 | /* Ensure that the work item is consistent when activating it ... */ | 8614 | /* Ensure that the work item is consistent when activating it ... */ |
8615 | smp_wmb(); | 8615 | smp_wmb(); |
8616 | atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING); | 8616 | atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING); |
8617 | /* and that it is marked active as soon as the irq could fire. */ | 8617 | /* and that it is marked active as soon as the irq could fire. */ |
8618 | smp_wmb(); | 8618 | smp_wmb(); |
8619 | } | 8619 | } |
8620 | 8620 | ||
8621 | static int intel_gen2_queue_flip(struct drm_device *dev, | 8621 | static int intel_gen2_queue_flip(struct drm_device *dev, |
8622 | struct drm_crtc *crtc, | 8622 | struct drm_crtc *crtc, |
8623 | struct drm_framebuffer *fb, | 8623 | struct drm_framebuffer *fb, |
8624 | struct drm_i915_gem_object *obj, | 8624 | struct drm_i915_gem_object *obj, |
8625 | uint32_t flags) | 8625 | uint32_t flags) |
8626 | { | 8626 | { |
8627 | struct drm_i915_private *dev_priv = dev->dev_private; | 8627 | struct drm_i915_private *dev_priv = dev->dev_private; |
8628 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 8628 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
8629 | u32 flip_mask; | 8629 | u32 flip_mask; |
8630 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; | 8630 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; |
8631 | int ret; | 8631 | int ret; |
8632 | 8632 | ||
8633 | ret = intel_pin_and_fence_fb_obj(dev, obj, ring); | 8633 | ret = intel_pin_and_fence_fb_obj(dev, obj, ring); |
8634 | if (ret) | 8634 | if (ret) |
8635 | goto err; | 8635 | goto err; |
8636 | 8636 | ||
8637 | ret = intel_ring_begin(ring, 6); | 8637 | ret = intel_ring_begin(ring, 6); |
8638 | if (ret) | 8638 | if (ret) |
8639 | goto err_unpin; | 8639 | goto err_unpin; |
8640 | 8640 | ||
8641 | /* Can't queue multiple flips, so wait for the previous | 8641 | /* Can't queue multiple flips, so wait for the previous |
8642 | * one to finish before executing the next. | 8642 | * one to finish before executing the next. |
8643 | */ | 8643 | */ |
8644 | if (intel_crtc->plane) | 8644 | if (intel_crtc->plane) |
8645 | flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; | 8645 | flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; |
8646 | else | 8646 | else |
8647 | flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; | 8647 | flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; |
8648 | intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); | 8648 | intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); |
8649 | intel_ring_emit(ring, MI_NOOP); | 8649 | intel_ring_emit(ring, MI_NOOP); |
8650 | intel_ring_emit(ring, MI_DISPLAY_FLIP | | 8650 | intel_ring_emit(ring, MI_DISPLAY_FLIP | |
8651 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | 8651 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
8652 | intel_ring_emit(ring, fb->pitches[0]); | 8652 | intel_ring_emit(ring, fb->pitches[0]); |
8653 | intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); | 8653 | intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); |
8654 | intel_ring_emit(ring, 0); /* aux display base address, unused */ | 8654 | intel_ring_emit(ring, 0); /* aux display base address, unused */ |
8655 | 8655 | ||
8656 | intel_mark_page_flip_active(intel_crtc); | 8656 | intel_mark_page_flip_active(intel_crtc); |
8657 | __intel_ring_advance(ring); | 8657 | __intel_ring_advance(ring); |
8658 | return 0; | 8658 | return 0; |
8659 | 8659 | ||
8660 | err_unpin: | 8660 | err_unpin: |
8661 | intel_unpin_fb_obj(obj); | 8661 | intel_unpin_fb_obj(obj); |
8662 | err: | 8662 | err: |
8663 | return ret; | 8663 | return ret; |
8664 | } | 8664 | } |
8665 | 8665 | ||
8666 | static int intel_gen3_queue_flip(struct drm_device *dev, | 8666 | static int intel_gen3_queue_flip(struct drm_device *dev, |
8667 | struct drm_crtc *crtc, | 8667 | struct drm_crtc *crtc, |
8668 | struct drm_framebuffer *fb, | 8668 | struct drm_framebuffer *fb, |
8669 | struct drm_i915_gem_object *obj, | 8669 | struct drm_i915_gem_object *obj, |
8670 | uint32_t flags) | 8670 | uint32_t flags) |
8671 | { | 8671 | { |
8672 | struct drm_i915_private *dev_priv = dev->dev_private; | 8672 | struct drm_i915_private *dev_priv = dev->dev_private; |
8673 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 8673 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
8674 | u32 flip_mask; | 8674 | u32 flip_mask; |
8675 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; | 8675 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; |
8676 | int ret; | 8676 | int ret; |
8677 | 8677 | ||
8678 | ret = intel_pin_and_fence_fb_obj(dev, obj, ring); | 8678 | ret = intel_pin_and_fence_fb_obj(dev, obj, ring); |
8679 | if (ret) | 8679 | if (ret) |
8680 | goto err; | 8680 | goto err; |
8681 | 8681 | ||
8682 | ret = intel_ring_begin(ring, 6); | 8682 | ret = intel_ring_begin(ring, 6); |
8683 | if (ret) | 8683 | if (ret) |
8684 | goto err_unpin; | 8684 | goto err_unpin; |
8685 | 8685 | ||
8686 | if (intel_crtc->plane) | 8686 | if (intel_crtc->plane) |
8687 | flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; | 8687 | flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; |
8688 | else | 8688 | else |
8689 | flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; | 8689 | flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; |
8690 | intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); | 8690 | intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); |
8691 | intel_ring_emit(ring, MI_NOOP); | 8691 | intel_ring_emit(ring, MI_NOOP); |
8692 | intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | | 8692 | intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | |
8693 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | 8693 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
8694 | intel_ring_emit(ring, fb->pitches[0]); | 8694 | intel_ring_emit(ring, fb->pitches[0]); |
8695 | intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); | 8695 | intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); |
8696 | intel_ring_emit(ring, MI_NOOP); | 8696 | intel_ring_emit(ring, MI_NOOP); |
8697 | 8697 | ||
8698 | intel_mark_page_flip_active(intel_crtc); | 8698 | intel_mark_page_flip_active(intel_crtc); |
8699 | __intel_ring_advance(ring); | 8699 | __intel_ring_advance(ring); |
8700 | return 0; | 8700 | return 0; |
8701 | 8701 | ||
8702 | err_unpin: | 8702 | err_unpin: |
8703 | intel_unpin_fb_obj(obj); | 8703 | intel_unpin_fb_obj(obj); |
8704 | err: | 8704 | err: |
8705 | return ret; | 8705 | return ret; |
8706 | } | 8706 | } |
8707 | 8707 | ||
8708 | static int intel_gen4_queue_flip(struct drm_device *dev, | 8708 | static int intel_gen4_queue_flip(struct drm_device *dev, |
8709 | struct drm_crtc *crtc, | 8709 | struct drm_crtc *crtc, |
8710 | struct drm_framebuffer *fb, | 8710 | struct drm_framebuffer *fb, |
8711 | struct drm_i915_gem_object *obj, | 8711 | struct drm_i915_gem_object *obj, |
8712 | uint32_t flags) | 8712 | uint32_t flags) |
8713 | { | 8713 | { |
8714 | struct drm_i915_private *dev_priv = dev->dev_private; | 8714 | struct drm_i915_private *dev_priv = dev->dev_private; |
8715 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 8715 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
8716 | uint32_t pf, pipesrc; | 8716 | uint32_t pf, pipesrc; |
8717 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; | 8717 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; |
8718 | int ret; | 8718 | int ret; |
8719 | 8719 | ||
8720 | ret = intel_pin_and_fence_fb_obj(dev, obj, ring); | 8720 | ret = intel_pin_and_fence_fb_obj(dev, obj, ring); |
8721 | if (ret) | 8721 | if (ret) |
8722 | goto err; | 8722 | goto err; |
8723 | 8723 | ||
8724 | ret = intel_ring_begin(ring, 4); | 8724 | ret = intel_ring_begin(ring, 4); |
8725 | if (ret) | 8725 | if (ret) |
8726 | goto err_unpin; | 8726 | goto err_unpin; |
8727 | 8727 | ||
8728 | /* i965+ uses the linear or tiled offsets from the | 8728 | /* i965+ uses the linear or tiled offsets from the |
8729 | * Display Registers (which do not change across a page-flip) | 8729 | * Display Registers (which do not change across a page-flip) |
8730 | * so we need only reprogram the base address. | 8730 | * so we need only reprogram the base address. |
8731 | */ | 8731 | */ |
8732 | intel_ring_emit(ring, MI_DISPLAY_FLIP | | 8732 | intel_ring_emit(ring, MI_DISPLAY_FLIP | |
8733 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | 8733 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
8734 | intel_ring_emit(ring, fb->pitches[0]); | 8734 | intel_ring_emit(ring, fb->pitches[0]); |
8735 | intel_ring_emit(ring, | 8735 | intel_ring_emit(ring, |
8736 | (i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset) | | 8736 | (i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset) | |
8737 | obj->tiling_mode); | 8737 | obj->tiling_mode); |
8738 | 8738 | ||
8739 | /* XXX Enabling the panel-fitter across page-flip is so far | 8739 | /* XXX Enabling the panel-fitter across page-flip is so far |
8740 | * untested on non-native modes, so ignore it for now. | 8740 | * untested on non-native modes, so ignore it for now. |
8741 | * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; | 8741 | * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; |
8742 | */ | 8742 | */ |
8743 | pf = 0; | 8743 | pf = 0; |
8744 | pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; | 8744 | pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; |
8745 | intel_ring_emit(ring, pf | pipesrc); | 8745 | intel_ring_emit(ring, pf | pipesrc); |
8746 | 8746 | ||
8747 | intel_mark_page_flip_active(intel_crtc); | 8747 | intel_mark_page_flip_active(intel_crtc); |
8748 | __intel_ring_advance(ring); | 8748 | __intel_ring_advance(ring); |
8749 | return 0; | 8749 | return 0; |
8750 | 8750 | ||
8751 | err_unpin: | 8751 | err_unpin: |
8752 | intel_unpin_fb_obj(obj); | 8752 | intel_unpin_fb_obj(obj); |
8753 | err: | 8753 | err: |
8754 | return ret; | 8754 | return ret; |
8755 | } | 8755 | } |
8756 | 8756 | ||
8757 | static int intel_gen6_queue_flip(struct drm_device *dev, | 8757 | static int intel_gen6_queue_flip(struct drm_device *dev, |
8758 | struct drm_crtc *crtc, | 8758 | struct drm_crtc *crtc, |
8759 | struct drm_framebuffer *fb, | 8759 | struct drm_framebuffer *fb, |
8760 | struct drm_i915_gem_object *obj, | 8760 | struct drm_i915_gem_object *obj, |
8761 | uint32_t flags) | 8761 | uint32_t flags) |
8762 | { | 8762 | { |
8763 | struct drm_i915_private *dev_priv = dev->dev_private; | 8763 | struct drm_i915_private *dev_priv = dev->dev_private; |
8764 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 8764 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
8765 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; | 8765 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; |
8766 | uint32_t pf, pipesrc; | 8766 | uint32_t pf, pipesrc; |
8767 | int ret; | 8767 | int ret; |
8768 | 8768 | ||
8769 | ret = intel_pin_and_fence_fb_obj(dev, obj, ring); | 8769 | ret = intel_pin_and_fence_fb_obj(dev, obj, ring); |
8770 | if (ret) | 8770 | if (ret) |
8771 | goto err; | 8771 | goto err; |
8772 | 8772 | ||
8773 | ret = intel_ring_begin(ring, 4); | 8773 | ret = intel_ring_begin(ring, 4); |
8774 | if (ret) | 8774 | if (ret) |
8775 | goto err_unpin; | 8775 | goto err_unpin; |
8776 | 8776 | ||
8777 | intel_ring_emit(ring, MI_DISPLAY_FLIP | | 8777 | intel_ring_emit(ring, MI_DISPLAY_FLIP | |
8778 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | 8778 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
8779 | intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode); | 8779 | intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode); |
8780 | intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); | 8780 | intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); |
8781 | 8781 | ||
8782 | /* Contrary to the suggestions in the documentation, | 8782 | /* Contrary to the suggestions in the documentation, |
8783 | * "Enable Panel Fitter" does not seem to be required when page | 8783 | * "Enable Panel Fitter" does not seem to be required when page |
8784 | * flipping with a non-native mode, and worse causes a normal | 8784 | * flipping with a non-native mode, and worse causes a normal |
8785 | * modeset to fail. | 8785 | * modeset to fail. |
8786 | * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE; | 8786 | * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE; |
8787 | */ | 8787 | */ |
8788 | pf = 0; | 8788 | pf = 0; |
8789 | pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; | 8789 | pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; |
8790 | intel_ring_emit(ring, pf | pipesrc); | 8790 | intel_ring_emit(ring, pf | pipesrc); |
8791 | 8791 | ||
8792 | intel_mark_page_flip_active(intel_crtc); | 8792 | intel_mark_page_flip_active(intel_crtc); |
8793 | __intel_ring_advance(ring); | 8793 | __intel_ring_advance(ring); |
8794 | return 0; | 8794 | return 0; |
8795 | 8795 | ||
8796 | err_unpin: | 8796 | err_unpin: |
8797 | intel_unpin_fb_obj(obj); | 8797 | intel_unpin_fb_obj(obj); |
8798 | err: | 8798 | err: |
8799 | return ret; | 8799 | return ret; |
8800 | } | 8800 | } |
8801 | 8801 | ||
8802 | static int intel_gen7_queue_flip(struct drm_device *dev, | 8802 | static int intel_gen7_queue_flip(struct drm_device *dev, |
8803 | struct drm_crtc *crtc, | 8803 | struct drm_crtc *crtc, |
8804 | struct drm_framebuffer *fb, | 8804 | struct drm_framebuffer *fb, |
8805 | struct drm_i915_gem_object *obj, | 8805 | struct drm_i915_gem_object *obj, |
8806 | uint32_t flags) | 8806 | uint32_t flags) |
8807 | { | 8807 | { |
8808 | struct drm_i915_private *dev_priv = dev->dev_private; | 8808 | struct drm_i915_private *dev_priv = dev->dev_private; |
8809 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 8809 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
8810 | struct intel_ring_buffer *ring; | 8810 | struct intel_ring_buffer *ring; |
8811 | uint32_t plane_bit = 0; | 8811 | uint32_t plane_bit = 0; |
8812 | int len, ret; | 8812 | int len, ret; |
8813 | 8813 | ||
8814 | ring = obj->ring; | 8814 | ring = obj->ring; |
8815 | if (IS_VALLEYVIEW(dev) || ring == NULL || ring->id != RCS) | 8815 | if (IS_VALLEYVIEW(dev) || ring == NULL || ring->id != RCS) |
8816 | ring = &dev_priv->ring[BCS]; | 8816 | ring = &dev_priv->ring[BCS]; |
8817 | 8817 | ||
8818 | ret = intel_pin_and_fence_fb_obj(dev, obj, ring); | 8818 | ret = intel_pin_and_fence_fb_obj(dev, obj, ring); |
8819 | if (ret) | 8819 | if (ret) |
8820 | goto err; | 8820 | goto err; |
8821 | 8821 | ||
8822 | switch(intel_crtc->plane) { | 8822 | switch(intel_crtc->plane) { |
8823 | case PLANE_A: | 8823 | case PLANE_A: |
8824 | plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A; | 8824 | plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A; |
8825 | break; | 8825 | break; |
8826 | case PLANE_B: | 8826 | case PLANE_B: |
8827 | plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B; | 8827 | plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B; |
8828 | break; | 8828 | break; |
8829 | case PLANE_C: | 8829 | case PLANE_C: |
8830 | plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C; | 8830 | plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C; |
8831 | break; | 8831 | break; |
8832 | default: | 8832 | default: |
8833 | WARN_ONCE(1, "unknown plane in flip command\n"); | 8833 | WARN_ONCE(1, "unknown plane in flip command\n"); |
8834 | ret = -ENODEV; | 8834 | ret = -ENODEV; |
8835 | goto err_unpin; | 8835 | goto err_unpin; |
8836 | } | 8836 | } |
8837 | 8837 | ||
8838 | len = 4; | 8838 | len = 4; |
8839 | if (ring->id == RCS) | 8839 | if (ring->id == RCS) |
8840 | len += 6; | 8840 | len += 6; |
8841 | 8841 | ||
8842 | /* | 8842 | /* |
8843 | * BSpec MI_DISPLAY_FLIP for IVB: | 8843 | * BSpec MI_DISPLAY_FLIP for IVB: |
8844 | * "The full packet must be contained within the same cache line." | 8844 | * "The full packet must be contained within the same cache line." |
8845 | * | 8845 | * |
8846 | * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same | 8846 | * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same |
8847 | * cacheline, if we ever start emitting more commands before | 8847 | * cacheline, if we ever start emitting more commands before |
8848 | * the MI_DISPLAY_FLIP we may need to first emit everything else, | 8848 | * the MI_DISPLAY_FLIP we may need to first emit everything else, |
8849 | * then do the cacheline alignment, and finally emit the | 8849 | * then do the cacheline alignment, and finally emit the |
8850 | * MI_DISPLAY_FLIP. | 8850 | * MI_DISPLAY_FLIP. |
8851 | */ | 8851 | */ |
8852 | ret = intel_ring_cacheline_align(ring); | 8852 | ret = intel_ring_cacheline_align(ring); |
8853 | if (ret) | 8853 | if (ret) |
8854 | goto err_unpin; | 8854 | goto err_unpin; |
8855 | 8855 | ||
8856 | ret = intel_ring_begin(ring, len); | 8856 | ret = intel_ring_begin(ring, len); |
8857 | if (ret) | 8857 | if (ret) |
8858 | goto err_unpin; | 8858 | goto err_unpin; |
8859 | 8859 | ||
8860 | /* Unmask the flip-done completion message. Note that the bspec says that | 8860 | /* Unmask the flip-done completion message. Note that the bspec says that |
8861 | * we should do this for both the BCS and RCS, and that we must not unmask | 8861 | * we should do this for both the BCS and RCS, and that we must not unmask |
8862 | * more than one flip event at any time (or ensure that one flip message | 8862 | * more than one flip event at any time (or ensure that one flip message |
8863 | * can be sent by waiting for flip-done prior to queueing new flips). | 8863 | * can be sent by waiting for flip-done prior to queueing new flips). |
8864 | * Experimentation says that BCS works despite DERRMR masking all | 8864 | * Experimentation says that BCS works despite DERRMR masking all |
8865 | * flip-done completion events and that unmasking all planes at once | 8865 | * flip-done completion events and that unmasking all planes at once |
8866 | * for the RCS also doesn't appear to drop events. Setting the DERRMR | 8866 | * for the RCS also doesn't appear to drop events. Setting the DERRMR |
8867 | * to zero does lead to lockups within MI_DISPLAY_FLIP. | 8867 | * to zero does lead to lockups within MI_DISPLAY_FLIP. |
8868 | */ | 8868 | */ |
8869 | if (ring->id == RCS) { | 8869 | if (ring->id == RCS) { |
8870 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); | 8870 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); |
8871 | intel_ring_emit(ring, DERRMR); | 8871 | intel_ring_emit(ring, DERRMR); |
8872 | intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE | | 8872 | intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE | |
8873 | DERRMR_PIPEB_PRI_FLIP_DONE | | 8873 | DERRMR_PIPEB_PRI_FLIP_DONE | |
8874 | DERRMR_PIPEC_PRI_FLIP_DONE)); | 8874 | DERRMR_PIPEC_PRI_FLIP_DONE)); |
8875 | intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | | 8875 | intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | |
8876 | MI_SRM_LRM_GLOBAL_GTT); | 8876 | MI_SRM_LRM_GLOBAL_GTT); |
8877 | intel_ring_emit(ring, DERRMR); | 8877 | intel_ring_emit(ring, DERRMR); |
8878 | intel_ring_emit(ring, ring->scratch.gtt_offset + 256); | 8878 | intel_ring_emit(ring, ring->scratch.gtt_offset + 256); |
8879 | } | 8879 | } |
8880 | 8880 | ||
8881 | intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); | 8881 | intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); |
8882 | intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); | 8882 | intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); |
8883 | intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); | 8883 | intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); |
8884 | intel_ring_emit(ring, (MI_NOOP)); | 8884 | intel_ring_emit(ring, (MI_NOOP)); |
8885 | 8885 | ||
8886 | intel_mark_page_flip_active(intel_crtc); | 8886 | intel_mark_page_flip_active(intel_crtc); |
8887 | __intel_ring_advance(ring); | 8887 | __intel_ring_advance(ring); |
8888 | return 0; | 8888 | return 0; |
8889 | 8889 | ||
8890 | err_unpin: | 8890 | err_unpin: |
8891 | intel_unpin_fb_obj(obj); | 8891 | intel_unpin_fb_obj(obj); |
8892 | err: | 8892 | err: |
8893 | return ret; | 8893 | return ret; |
8894 | } | 8894 | } |
8895 | 8895 | ||
8896 | static int intel_default_queue_flip(struct drm_device *dev, | 8896 | static int intel_default_queue_flip(struct drm_device *dev, |
8897 | struct drm_crtc *crtc, | 8897 | struct drm_crtc *crtc, |
8898 | struct drm_framebuffer *fb, | 8898 | struct drm_framebuffer *fb, |
8899 | struct drm_i915_gem_object *obj, | 8899 | struct drm_i915_gem_object *obj, |
8900 | uint32_t flags) | 8900 | uint32_t flags) |
8901 | { | 8901 | { |
8902 | return -ENODEV; | 8902 | return -ENODEV; |
8903 | } | 8903 | } |
8904 | 8904 | ||
8905 | static int intel_crtc_page_flip(struct drm_crtc *crtc, | 8905 | static int intel_crtc_page_flip(struct drm_crtc *crtc, |
8906 | struct drm_framebuffer *fb, | 8906 | struct drm_framebuffer *fb, |
8907 | struct drm_pending_vblank_event *event, | 8907 | struct drm_pending_vblank_event *event, |
8908 | uint32_t page_flip_flags) | 8908 | uint32_t page_flip_flags) |
8909 | { | 8909 | { |
8910 | struct drm_device *dev = crtc->dev; | 8910 | struct drm_device *dev = crtc->dev; |
8911 | struct drm_i915_private *dev_priv = dev->dev_private; | 8911 | struct drm_i915_private *dev_priv = dev->dev_private; |
8912 | struct drm_framebuffer *old_fb = crtc->primary->fb; | 8912 | struct drm_framebuffer *old_fb = crtc->primary->fb; |
8913 | struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj; | 8913 | struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj; |
8914 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 8914 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
8915 | struct intel_unpin_work *work; | 8915 | struct intel_unpin_work *work; |
8916 | unsigned long flags; | 8916 | unsigned long flags; |
8917 | int ret; | 8917 | int ret; |
8918 | 8918 | ||
8919 | /* Can't change pixel format via MI display flips. */ | 8919 | /* Can't change pixel format via MI display flips. */ |
8920 | if (fb->pixel_format != crtc->primary->fb->pixel_format) | 8920 | if (fb->pixel_format != crtc->primary->fb->pixel_format) |
8921 | return -EINVAL; | 8921 | return -EINVAL; |
8922 | 8922 | ||
8923 | /* | 8923 | /* |
8924 | * TILEOFF/LINOFF registers can't be changed via MI display flips. | 8924 | * TILEOFF/LINOFF registers can't be changed via MI display flips. |
8925 | * Note that pitch changes could also affect these register. | 8925 | * Note that pitch changes could also affect these register. |
8926 | */ | 8926 | */ |
8927 | if (INTEL_INFO(dev)->gen > 3 && | 8927 | if (INTEL_INFO(dev)->gen > 3 && |
8928 | (fb->offsets[0] != crtc->primary->fb->offsets[0] || | 8928 | (fb->offsets[0] != crtc->primary->fb->offsets[0] || |
8929 | fb->pitches[0] != crtc->primary->fb->pitches[0])) | 8929 | fb->pitches[0] != crtc->primary->fb->pitches[0])) |
8930 | return -EINVAL; | 8930 | return -EINVAL; |
8931 | 8931 | ||
8932 | if (i915_terminally_wedged(&dev_priv->gpu_error)) | 8932 | if (i915_terminally_wedged(&dev_priv->gpu_error)) |
8933 | goto out_hang; | 8933 | goto out_hang; |
8934 | 8934 | ||
8935 | work = kzalloc(sizeof(*work), GFP_KERNEL); | 8935 | work = kzalloc(sizeof(*work), GFP_KERNEL); |
8936 | if (work == NULL) | 8936 | if (work == NULL) |
8937 | return -ENOMEM; | 8937 | return -ENOMEM; |
8938 | 8938 | ||
8939 | work->event = event; | 8939 | work->event = event; |
8940 | work->crtc = crtc; | 8940 | work->crtc = crtc; |
8941 | work->old_fb_obj = to_intel_framebuffer(old_fb)->obj; | 8941 | work->old_fb_obj = to_intel_framebuffer(old_fb)->obj; |
8942 | INIT_WORK(&work->work, intel_unpin_work_fn); | 8942 | INIT_WORK(&work->work, intel_unpin_work_fn); |
8943 | 8943 | ||
8944 | ret = drm_vblank_get(dev, intel_crtc->pipe); | 8944 | ret = drm_vblank_get(dev, intel_crtc->pipe); |
8945 | if (ret) | 8945 | if (ret) |
8946 | goto free_work; | 8946 | goto free_work; |
8947 | 8947 | ||
8948 | /* We borrow the event spin lock for protecting unpin_work */ | 8948 | /* We borrow the event spin lock for protecting unpin_work */ |
8949 | spin_lock_irqsave(&dev->event_lock, flags); | 8949 | spin_lock_irqsave(&dev->event_lock, flags); |
8950 | if (intel_crtc->unpin_work) { | 8950 | if (intel_crtc->unpin_work) { |
8951 | spin_unlock_irqrestore(&dev->event_lock, flags); | 8951 | spin_unlock_irqrestore(&dev->event_lock, flags); |
8952 | kfree(work); | 8952 | kfree(work); |
8953 | drm_vblank_put(dev, intel_crtc->pipe); | 8953 | drm_vblank_put(dev, intel_crtc->pipe); |
8954 | 8954 | ||
8955 | DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); | 8955 | DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); |
8956 | return -EBUSY; | 8956 | return -EBUSY; |
8957 | } | 8957 | } |
8958 | intel_crtc->unpin_work = work; | 8958 | intel_crtc->unpin_work = work; |
8959 | spin_unlock_irqrestore(&dev->event_lock, flags); | 8959 | spin_unlock_irqrestore(&dev->event_lock, flags); |
8960 | 8960 | ||
8961 | if (atomic_read(&intel_crtc->unpin_work_count) >= 2) | 8961 | if (atomic_read(&intel_crtc->unpin_work_count) >= 2) |
8962 | flush_workqueue(dev_priv->wq); | 8962 | flush_workqueue(dev_priv->wq); |
8963 | 8963 | ||
8964 | ret = i915_mutex_lock_interruptible(dev); | 8964 | ret = i915_mutex_lock_interruptible(dev); |
8965 | if (ret) | 8965 | if (ret) |
8966 | goto cleanup; | 8966 | goto cleanup; |
8967 | 8967 | ||
8968 | /* Reference the objects for the scheduled work. */ | 8968 | /* Reference the objects for the scheduled work. */ |
8969 | drm_gem_object_reference(&work->old_fb_obj->base); | 8969 | drm_gem_object_reference(&work->old_fb_obj->base); |
8970 | drm_gem_object_reference(&obj->base); | 8970 | drm_gem_object_reference(&obj->base); |
8971 | 8971 | ||
8972 | crtc->primary->fb = fb; | 8972 | crtc->primary->fb = fb; |
8973 | 8973 | ||
8974 | work->pending_flip_obj = obj; | 8974 | work->pending_flip_obj = obj; |
8975 | 8975 | ||
8976 | work->enable_stall_check = true; | 8976 | work->enable_stall_check = true; |
8977 | 8977 | ||
8978 | atomic_inc(&intel_crtc->unpin_work_count); | 8978 | atomic_inc(&intel_crtc->unpin_work_count); |
8979 | intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); | 8979 | intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); |
8980 | 8980 | ||
8981 | ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, page_flip_flags); | 8981 | ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, page_flip_flags); |
8982 | if (ret) | 8982 | if (ret) |
8983 | goto cleanup_pending; | 8983 | goto cleanup_pending; |
8984 | 8984 | ||
8985 | intel_disable_fbc(dev); | 8985 | intel_disable_fbc(dev); |
8986 | intel_mark_fb_busy(obj, NULL); | 8986 | intel_mark_fb_busy(obj, NULL); |
8987 | mutex_unlock(&dev->struct_mutex); | 8987 | mutex_unlock(&dev->struct_mutex); |
8988 | 8988 | ||
8989 | trace_i915_flip_request(intel_crtc->plane, obj); | 8989 | trace_i915_flip_request(intel_crtc->plane, obj); |
8990 | 8990 | ||
8991 | return 0; | 8991 | return 0; |
8992 | 8992 | ||
8993 | cleanup_pending: | 8993 | cleanup_pending: |
8994 | atomic_dec(&intel_crtc->unpin_work_count); | 8994 | atomic_dec(&intel_crtc->unpin_work_count); |
8995 | crtc->primary->fb = old_fb; | 8995 | crtc->primary->fb = old_fb; |
8996 | drm_gem_object_unreference(&work->old_fb_obj->base); | 8996 | drm_gem_object_unreference(&work->old_fb_obj->base); |
8997 | drm_gem_object_unreference(&obj->base); | 8997 | drm_gem_object_unreference(&obj->base); |
8998 | mutex_unlock(&dev->struct_mutex); | 8998 | mutex_unlock(&dev->struct_mutex); |
8999 | 8999 | ||
9000 | cleanup: | 9000 | cleanup: |
9001 | spin_lock_irqsave(&dev->event_lock, flags); | 9001 | spin_lock_irqsave(&dev->event_lock, flags); |
9002 | intel_crtc->unpin_work = NULL; | 9002 | intel_crtc->unpin_work = NULL; |
9003 | spin_unlock_irqrestore(&dev->event_lock, flags); | 9003 | spin_unlock_irqrestore(&dev->event_lock, flags); |
9004 | 9004 | ||
9005 | drm_vblank_put(dev, intel_crtc->pipe); | 9005 | drm_vblank_put(dev, intel_crtc->pipe); |
9006 | free_work: | 9006 | free_work: |
9007 | kfree(work); | 9007 | kfree(work); |
9008 | 9008 | ||
9009 | if (ret == -EIO) { | 9009 | if (ret == -EIO) { |
9010 | out_hang: | 9010 | out_hang: |
9011 | intel_crtc_wait_for_pending_flips(crtc); | 9011 | intel_crtc_wait_for_pending_flips(crtc); |
9012 | ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb); | 9012 | ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb); |
9013 | if (ret == 0 && event) | 9013 | if (ret == 0 && event) |
9014 | drm_send_vblank_event(dev, intel_crtc->pipe, event); | 9014 | drm_send_vblank_event(dev, intel_crtc->pipe, event); |
9015 | } | 9015 | } |
9016 | return ret; | 9016 | return ret; |
9017 | } | 9017 | } |
9018 | 9018 | ||
9019 | static struct drm_crtc_helper_funcs intel_helper_funcs = { | 9019 | static struct drm_crtc_helper_funcs intel_helper_funcs = { |
9020 | .mode_set_base_atomic = intel_pipe_set_base_atomic, | 9020 | .mode_set_base_atomic = intel_pipe_set_base_atomic, |
9021 | .load_lut = intel_crtc_load_lut, | 9021 | .load_lut = intel_crtc_load_lut, |
9022 | }; | 9022 | }; |
9023 | 9023 | ||
9024 | /** | 9024 | /** |
9025 | * intel_modeset_update_staged_output_state | 9025 | * intel_modeset_update_staged_output_state |
9026 | * | 9026 | * |
9027 | * Updates the staged output configuration state, e.g. after we've read out the | 9027 | * Updates the staged output configuration state, e.g. after we've read out the |
9028 | * current hw state. | 9028 | * current hw state. |
9029 | */ | 9029 | */ |
9030 | static void intel_modeset_update_staged_output_state(struct drm_device *dev) | 9030 | static void intel_modeset_update_staged_output_state(struct drm_device *dev) |
9031 | { | 9031 | { |
9032 | struct intel_crtc *crtc; | 9032 | struct intel_crtc *crtc; |
9033 | struct intel_encoder *encoder; | 9033 | struct intel_encoder *encoder; |
9034 | struct intel_connector *connector; | 9034 | struct intel_connector *connector; |
9035 | 9035 | ||
9036 | list_for_each_entry(connector, &dev->mode_config.connector_list, | 9036 | list_for_each_entry(connector, &dev->mode_config.connector_list, |
9037 | base.head) { | 9037 | base.head) { |
9038 | connector->new_encoder = | 9038 | connector->new_encoder = |
9039 | to_intel_encoder(connector->base.encoder); | 9039 | to_intel_encoder(connector->base.encoder); |
9040 | } | 9040 | } |
9041 | 9041 | ||
9042 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, | 9042 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
9043 | base.head) { | 9043 | base.head) { |
9044 | encoder->new_crtc = | 9044 | encoder->new_crtc = |
9045 | to_intel_crtc(encoder->base.crtc); | 9045 | to_intel_crtc(encoder->base.crtc); |
9046 | } | 9046 | } |
9047 | 9047 | ||
9048 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, | 9048 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, |
9049 | base.head) { | 9049 | base.head) { |
9050 | crtc->new_enabled = crtc->base.enabled; | 9050 | crtc->new_enabled = crtc->base.enabled; |
9051 | 9051 | ||
9052 | if (crtc->new_enabled) | 9052 | if (crtc->new_enabled) |
9053 | crtc->new_config = &crtc->config; | 9053 | crtc->new_config = &crtc->config; |
9054 | else | 9054 | else |
9055 | crtc->new_config = NULL; | 9055 | crtc->new_config = NULL; |
9056 | } | 9056 | } |
9057 | } | 9057 | } |
9058 | 9058 | ||
9059 | /** | 9059 | /** |
9060 | * intel_modeset_commit_output_state | 9060 | * intel_modeset_commit_output_state |
9061 | * | 9061 | * |
9062 | * This function copies the stage display pipe configuration to the real one. | 9062 | * This function copies the stage display pipe configuration to the real one. |
9063 | */ | 9063 | */ |
9064 | static void intel_modeset_commit_output_state(struct drm_device *dev) | 9064 | static void intel_modeset_commit_output_state(struct drm_device *dev) |
9065 | { | 9065 | { |
9066 | struct intel_crtc *crtc; | 9066 | struct intel_crtc *crtc; |
9067 | struct intel_encoder *encoder; | 9067 | struct intel_encoder *encoder; |
9068 | struct intel_connector *connector; | 9068 | struct intel_connector *connector; |
9069 | 9069 | ||
9070 | list_for_each_entry(connector, &dev->mode_config.connector_list, | 9070 | list_for_each_entry(connector, &dev->mode_config.connector_list, |
9071 | base.head) { | 9071 | base.head) { |
9072 | connector->base.encoder = &connector->new_encoder->base; | 9072 | connector->base.encoder = &connector->new_encoder->base; |
9073 | } | 9073 | } |
9074 | 9074 | ||
9075 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, | 9075 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
9076 | base.head) { | 9076 | base.head) { |
9077 | encoder->base.crtc = &encoder->new_crtc->base; | 9077 | encoder->base.crtc = &encoder->new_crtc->base; |
9078 | } | 9078 | } |
9079 | 9079 | ||
9080 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, | 9080 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, |
9081 | base.head) { | 9081 | base.head) { |
9082 | crtc->base.enabled = crtc->new_enabled; | 9082 | crtc->base.enabled = crtc->new_enabled; |
9083 | } | 9083 | } |
9084 | } | 9084 | } |
9085 | 9085 | ||
9086 | static void | 9086 | static void |
9087 | connected_sink_compute_bpp(struct intel_connector * connector, | 9087 | connected_sink_compute_bpp(struct intel_connector * connector, |
9088 | struct intel_crtc_config *pipe_config) | 9088 | struct intel_crtc_config *pipe_config) |
9089 | { | 9089 | { |
9090 | int bpp = pipe_config->pipe_bpp; | 9090 | int bpp = pipe_config->pipe_bpp; |
9091 | 9091 | ||
9092 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n", | 9092 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n", |
9093 | connector->base.base.id, | 9093 | connector->base.base.id, |
9094 | drm_get_connector_name(&connector->base)); | 9094 | drm_get_connector_name(&connector->base)); |
9095 | 9095 | ||
9096 | /* Don't use an invalid EDID bpc value */ | 9096 | /* Don't use an invalid EDID bpc value */ |
9097 | if (connector->base.display_info.bpc && | 9097 | if (connector->base.display_info.bpc && |
9098 | connector->base.display_info.bpc * 3 < bpp) { | 9098 | connector->base.display_info.bpc * 3 < bpp) { |
9099 | DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n", | 9099 | DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n", |
9100 | bpp, connector->base.display_info.bpc*3); | 9100 | bpp, connector->base.display_info.bpc*3); |
9101 | pipe_config->pipe_bpp = connector->base.display_info.bpc*3; | 9101 | pipe_config->pipe_bpp = connector->base.display_info.bpc*3; |
9102 | } | 9102 | } |
9103 | 9103 | ||
9104 | /* Clamp bpp to 8 on screens without EDID 1.4 */ | 9104 | /* Clamp bpp to 8 on screens without EDID 1.4 */ |
9105 | if (connector->base.display_info.bpc == 0 && bpp > 24) { | 9105 | if (connector->base.display_info.bpc == 0 && bpp > 24) { |
9106 | DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n", | 9106 | DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n", |
9107 | bpp); | 9107 | bpp); |
9108 | pipe_config->pipe_bpp = 24; | 9108 | pipe_config->pipe_bpp = 24; |
9109 | } | 9109 | } |
9110 | } | 9110 | } |
9111 | 9111 | ||
9112 | static int | 9112 | static int |
9113 | compute_baseline_pipe_bpp(struct intel_crtc *crtc, | 9113 | compute_baseline_pipe_bpp(struct intel_crtc *crtc, |
9114 | struct drm_framebuffer *fb, | 9114 | struct drm_framebuffer *fb, |
9115 | struct intel_crtc_config *pipe_config) | 9115 | struct intel_crtc_config *pipe_config) |
9116 | { | 9116 | { |
9117 | struct drm_device *dev = crtc->base.dev; | 9117 | struct drm_device *dev = crtc->base.dev; |
9118 | struct intel_connector *connector; | 9118 | struct intel_connector *connector; |
9119 | int bpp; | 9119 | int bpp; |
9120 | 9120 | ||
9121 | switch (fb->pixel_format) { | 9121 | switch (fb->pixel_format) { |
9122 | case DRM_FORMAT_C8: | 9122 | case DRM_FORMAT_C8: |
9123 | bpp = 8*3; /* since we go through a colormap */ | 9123 | bpp = 8*3; /* since we go through a colormap */ |
9124 | break; | 9124 | break; |
9125 | case DRM_FORMAT_XRGB1555: | 9125 | case DRM_FORMAT_XRGB1555: |
9126 | case DRM_FORMAT_ARGB1555: | 9126 | case DRM_FORMAT_ARGB1555: |
9127 | /* checked in intel_framebuffer_init already */ | 9127 | /* checked in intel_framebuffer_init already */ |
9128 | if (WARN_ON(INTEL_INFO(dev)->gen > 3)) | 9128 | if (WARN_ON(INTEL_INFO(dev)->gen > 3)) |
9129 | return -EINVAL; | 9129 | return -EINVAL; |
9130 | case DRM_FORMAT_RGB565: | 9130 | case DRM_FORMAT_RGB565: |
9131 | bpp = 6*3; /* min is 18bpp */ | 9131 | bpp = 6*3; /* min is 18bpp */ |
9132 | break; | 9132 | break; |
9133 | case DRM_FORMAT_XBGR8888: | 9133 | case DRM_FORMAT_XBGR8888: |
9134 | case DRM_FORMAT_ABGR8888: | 9134 | case DRM_FORMAT_ABGR8888: |
9135 | /* checked in intel_framebuffer_init already */ | 9135 | /* checked in intel_framebuffer_init already */ |
9136 | if (WARN_ON(INTEL_INFO(dev)->gen < 4)) | 9136 | if (WARN_ON(INTEL_INFO(dev)->gen < 4)) |
9137 | return -EINVAL; | 9137 | return -EINVAL; |
9138 | case DRM_FORMAT_XRGB8888: | 9138 | case DRM_FORMAT_XRGB8888: |
9139 | case DRM_FORMAT_ARGB8888: | 9139 | case DRM_FORMAT_ARGB8888: |
9140 | bpp = 8*3; | 9140 | bpp = 8*3; |
9141 | break; | 9141 | break; |
9142 | case DRM_FORMAT_XRGB2101010: | 9142 | case DRM_FORMAT_XRGB2101010: |
9143 | case DRM_FORMAT_ARGB2101010: | 9143 | case DRM_FORMAT_ARGB2101010: |
9144 | case DRM_FORMAT_XBGR2101010: | 9144 | case DRM_FORMAT_XBGR2101010: |
9145 | case DRM_FORMAT_ABGR2101010: | 9145 | case DRM_FORMAT_ABGR2101010: |
9146 | /* checked in intel_framebuffer_init already */ | 9146 | /* checked in intel_framebuffer_init already */ |
9147 | if (WARN_ON(INTEL_INFO(dev)->gen < 4)) | 9147 | if (WARN_ON(INTEL_INFO(dev)->gen < 4)) |
9148 | return -EINVAL; | 9148 | return -EINVAL; |
9149 | bpp = 10*3; | 9149 | bpp = 10*3; |
9150 | break; | 9150 | break; |
9151 | /* TODO: gen4+ supports 16 bpc floating point, too. */ | 9151 | /* TODO: gen4+ supports 16 bpc floating point, too. */ |
9152 | default: | 9152 | default: |
9153 | DRM_DEBUG_KMS("unsupported depth\n"); | 9153 | DRM_DEBUG_KMS("unsupported depth\n"); |
9154 | return -EINVAL; | 9154 | return -EINVAL; |
9155 | } | 9155 | } |
9156 | 9156 | ||
9157 | pipe_config->pipe_bpp = bpp; | 9157 | pipe_config->pipe_bpp = bpp; |
9158 | 9158 | ||
9159 | /* Clamp display bpp to EDID value */ | 9159 | /* Clamp display bpp to EDID value */ |
9160 | list_for_each_entry(connector, &dev->mode_config.connector_list, | 9160 | list_for_each_entry(connector, &dev->mode_config.connector_list, |
9161 | base.head) { | 9161 | base.head) { |
9162 | if (!connector->new_encoder || | 9162 | if (!connector->new_encoder || |
9163 | connector->new_encoder->new_crtc != crtc) | 9163 | connector->new_encoder->new_crtc != crtc) |
9164 | continue; | 9164 | continue; |
9165 | 9165 | ||
9166 | connected_sink_compute_bpp(connector, pipe_config); | 9166 | connected_sink_compute_bpp(connector, pipe_config); |
9167 | } | 9167 | } |
9168 | 9168 | ||
9169 | return bpp; | 9169 | return bpp; |
9170 | } | 9170 | } |
9171 | 9171 | ||
9172 | static void intel_dump_crtc_timings(const struct drm_display_mode *mode) | 9172 | static void intel_dump_crtc_timings(const struct drm_display_mode *mode) |
9173 | { | 9173 | { |
9174 | DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, " | 9174 | DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, " |
9175 | "type: 0x%x flags: 0x%x\n", | 9175 | "type: 0x%x flags: 0x%x\n", |
9176 | mode->crtc_clock, | 9176 | mode->crtc_clock, |
9177 | mode->crtc_hdisplay, mode->crtc_hsync_start, | 9177 | mode->crtc_hdisplay, mode->crtc_hsync_start, |
9178 | mode->crtc_hsync_end, mode->crtc_htotal, | 9178 | mode->crtc_hsync_end, mode->crtc_htotal, |
9179 | mode->crtc_vdisplay, mode->crtc_vsync_start, | 9179 | mode->crtc_vdisplay, mode->crtc_vsync_start, |
9180 | mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags); | 9180 | mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags); |
9181 | } | 9181 | } |
9182 | 9182 | ||
9183 | static void intel_dump_pipe_config(struct intel_crtc *crtc, | 9183 | static void intel_dump_pipe_config(struct intel_crtc *crtc, |
9184 | struct intel_crtc_config *pipe_config, | 9184 | struct intel_crtc_config *pipe_config, |
9185 | const char *context) | 9185 | const char *context) |
9186 | { | 9186 | { |
9187 | DRM_DEBUG_KMS("[CRTC:%d]%s config for pipe %c\n", crtc->base.base.id, | 9187 | DRM_DEBUG_KMS("[CRTC:%d]%s config for pipe %c\n", crtc->base.base.id, |
9188 | context, pipe_name(crtc->pipe)); | 9188 | context, pipe_name(crtc->pipe)); |
9189 | 9189 | ||
9190 | DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder)); | 9190 | DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder)); |
9191 | DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n", | 9191 | DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n", |
9192 | pipe_config->pipe_bpp, pipe_config->dither); | 9192 | pipe_config->pipe_bpp, pipe_config->dither); |
9193 | DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", | 9193 | DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", |
9194 | pipe_config->has_pch_encoder, | 9194 | pipe_config->has_pch_encoder, |
9195 | pipe_config->fdi_lanes, | 9195 | pipe_config->fdi_lanes, |
9196 | pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n, | 9196 | pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n, |
9197 | pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n, | 9197 | pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n, |
9198 | pipe_config->fdi_m_n.tu); | 9198 | pipe_config->fdi_m_n.tu); |
9199 | DRM_DEBUG_KMS("dp: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", | 9199 | DRM_DEBUG_KMS("dp: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", |
9200 | pipe_config->has_dp_encoder, | 9200 | pipe_config->has_dp_encoder, |
9201 | pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n, | 9201 | pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n, |
9202 | pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n, | 9202 | pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n, |
9203 | pipe_config->dp_m_n.tu); | 9203 | pipe_config->dp_m_n.tu); |
9204 | DRM_DEBUG_KMS("requested mode:\n"); | 9204 | DRM_DEBUG_KMS("requested mode:\n"); |
9205 | drm_mode_debug_printmodeline(&pipe_config->requested_mode); | 9205 | drm_mode_debug_printmodeline(&pipe_config->requested_mode); |
9206 | DRM_DEBUG_KMS("adjusted mode:\n"); | 9206 | DRM_DEBUG_KMS("adjusted mode:\n"); |
9207 | drm_mode_debug_printmodeline(&pipe_config->adjusted_mode); | 9207 | drm_mode_debug_printmodeline(&pipe_config->adjusted_mode); |
9208 | intel_dump_crtc_timings(&pipe_config->adjusted_mode); | 9208 | intel_dump_crtc_timings(&pipe_config->adjusted_mode); |
9209 | DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock); | 9209 | DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock); |
9210 | DRM_DEBUG_KMS("pipe src size: %dx%d\n", | 9210 | DRM_DEBUG_KMS("pipe src size: %dx%d\n", |
9211 | pipe_config->pipe_src_w, pipe_config->pipe_src_h); | 9211 | pipe_config->pipe_src_w, pipe_config->pipe_src_h); |
9212 | DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n", | 9212 | DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n", |
9213 | pipe_config->gmch_pfit.control, | 9213 | pipe_config->gmch_pfit.control, |
9214 | pipe_config->gmch_pfit.pgm_ratios, | 9214 | pipe_config->gmch_pfit.pgm_ratios, |
9215 | pipe_config->gmch_pfit.lvds_border_bits); | 9215 | pipe_config->gmch_pfit.lvds_border_bits); |
9216 | DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n", | 9216 | DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n", |
9217 | pipe_config->pch_pfit.pos, | 9217 | pipe_config->pch_pfit.pos, |
9218 | pipe_config->pch_pfit.size, | 9218 | pipe_config->pch_pfit.size, |
9219 | pipe_config->pch_pfit.enabled ? "enabled" : "disabled"); | 9219 | pipe_config->pch_pfit.enabled ? "enabled" : "disabled"); |
9220 | DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled); | 9220 | DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled); |
9221 | DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide); | 9221 | DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide); |
9222 | } | 9222 | } |
9223 | 9223 | ||
9224 | static bool encoders_cloneable(const struct intel_encoder *a, | 9224 | static bool encoders_cloneable(const struct intel_encoder *a, |
9225 | const struct intel_encoder *b) | 9225 | const struct intel_encoder *b) |
9226 | { | 9226 | { |
9227 | /* masks could be asymmetric, so check both ways */ | 9227 | /* masks could be asymmetric, so check both ways */ |
9228 | return a == b || (a->cloneable & (1 << b->type) && | 9228 | return a == b || (a->cloneable & (1 << b->type) && |
9229 | b->cloneable & (1 << a->type)); | 9229 | b->cloneable & (1 << a->type)); |
9230 | } | 9230 | } |
9231 | 9231 | ||
9232 | static bool check_single_encoder_cloning(struct intel_crtc *crtc, | 9232 | static bool check_single_encoder_cloning(struct intel_crtc *crtc, |
9233 | struct intel_encoder *encoder) | 9233 | struct intel_encoder *encoder) |
9234 | { | 9234 | { |
9235 | struct drm_device *dev = crtc->base.dev; | 9235 | struct drm_device *dev = crtc->base.dev; |
9236 | struct intel_encoder *source_encoder; | 9236 | struct intel_encoder *source_encoder; |
9237 | 9237 | ||
9238 | list_for_each_entry(source_encoder, | 9238 | list_for_each_entry(source_encoder, |
9239 | &dev->mode_config.encoder_list, base.head) { | 9239 | &dev->mode_config.encoder_list, base.head) { |
9240 | if (source_encoder->new_crtc != crtc) | 9240 | if (source_encoder->new_crtc != crtc) |
9241 | continue; | 9241 | continue; |
9242 | 9242 | ||
9243 | if (!encoders_cloneable(encoder, source_encoder)) | 9243 | if (!encoders_cloneable(encoder, source_encoder)) |
9244 | return false; | 9244 | return false; |
9245 | } | 9245 | } |
9246 | 9246 | ||
9247 | return true; | 9247 | return true; |
9248 | } | 9248 | } |
9249 | 9249 | ||
9250 | static bool check_encoder_cloning(struct intel_crtc *crtc) | 9250 | static bool check_encoder_cloning(struct intel_crtc *crtc) |
9251 | { | 9251 | { |
9252 | struct drm_device *dev = crtc->base.dev; | 9252 | struct drm_device *dev = crtc->base.dev; |
9253 | struct intel_encoder *encoder; | 9253 | struct intel_encoder *encoder; |
9254 | 9254 | ||
9255 | list_for_each_entry(encoder, | 9255 | list_for_each_entry(encoder, |
9256 | &dev->mode_config.encoder_list, base.head) { | 9256 | &dev->mode_config.encoder_list, base.head) { |
9257 | if (encoder->new_crtc != crtc) | 9257 | if (encoder->new_crtc != crtc) |
9258 | continue; | 9258 | continue; |
9259 | 9259 | ||
9260 | if (!check_single_encoder_cloning(crtc, encoder)) | 9260 | if (!check_single_encoder_cloning(crtc, encoder)) |
9261 | return false; | 9261 | return false; |
9262 | } | 9262 | } |
9263 | 9263 | ||
9264 | return true; | 9264 | return true; |
9265 | } | 9265 | } |
9266 | 9266 | ||
9267 | static struct intel_crtc_config * | 9267 | static struct intel_crtc_config * |
9268 | intel_modeset_pipe_config(struct drm_crtc *crtc, | 9268 | intel_modeset_pipe_config(struct drm_crtc *crtc, |
9269 | struct drm_framebuffer *fb, | 9269 | struct drm_framebuffer *fb, |
9270 | struct drm_display_mode *mode) | 9270 | struct drm_display_mode *mode) |
9271 | { | 9271 | { |
9272 | struct drm_device *dev = crtc->dev; | 9272 | struct drm_device *dev = crtc->dev; |
9273 | struct intel_encoder *encoder; | 9273 | struct intel_encoder *encoder; |
9274 | struct intel_crtc_config *pipe_config; | 9274 | struct intel_crtc_config *pipe_config; |
9275 | int plane_bpp, ret = -EINVAL; | 9275 | int plane_bpp, ret = -EINVAL; |
9276 | bool retry = true; | 9276 | bool retry = true; |
9277 | 9277 | ||
9278 | if (!check_encoder_cloning(to_intel_crtc(crtc))) { | 9278 | if (!check_encoder_cloning(to_intel_crtc(crtc))) { |
9279 | DRM_DEBUG_KMS("rejecting invalid cloning configuration\n"); | 9279 | DRM_DEBUG_KMS("rejecting invalid cloning configuration\n"); |
9280 | return ERR_PTR(-EINVAL); | 9280 | return ERR_PTR(-EINVAL); |
9281 | } | 9281 | } |
9282 | 9282 | ||
9283 | pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL); | 9283 | pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL); |
9284 | if (!pipe_config) | 9284 | if (!pipe_config) |
9285 | return ERR_PTR(-ENOMEM); | 9285 | return ERR_PTR(-ENOMEM); |
9286 | 9286 | ||
9287 | drm_mode_copy(&pipe_config->adjusted_mode, mode); | 9287 | drm_mode_copy(&pipe_config->adjusted_mode, mode); |
9288 | drm_mode_copy(&pipe_config->requested_mode, mode); | 9288 | drm_mode_copy(&pipe_config->requested_mode, mode); |
9289 | 9289 | ||
9290 | pipe_config->cpu_transcoder = | 9290 | pipe_config->cpu_transcoder = |
9291 | (enum transcoder) to_intel_crtc(crtc)->pipe; | 9291 | (enum transcoder) to_intel_crtc(crtc)->pipe; |
9292 | pipe_config->shared_dpll = DPLL_ID_PRIVATE; | 9292 | pipe_config->shared_dpll = DPLL_ID_PRIVATE; |
9293 | 9293 | ||
9294 | /* | 9294 | /* |
9295 | * Sanitize sync polarity flags based on requested ones. If neither | 9295 | * Sanitize sync polarity flags based on requested ones. If neither |
9296 | * positive or negative polarity is requested, treat this as meaning | 9296 | * positive or negative polarity is requested, treat this as meaning |
9297 | * negative polarity. | 9297 | * negative polarity. |
9298 | */ | 9298 | */ |
9299 | if (!(pipe_config->adjusted_mode.flags & | 9299 | if (!(pipe_config->adjusted_mode.flags & |
9300 | (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))) | 9300 | (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))) |
9301 | pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC; | 9301 | pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC; |
9302 | 9302 | ||
9303 | if (!(pipe_config->adjusted_mode.flags & | 9303 | if (!(pipe_config->adjusted_mode.flags & |
9304 | (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) | 9304 | (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) |
9305 | pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; | 9305 | pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; |
9306 | 9306 | ||
9307 | /* Compute a starting value for pipe_config->pipe_bpp taking the source | 9307 | /* Compute a starting value for pipe_config->pipe_bpp taking the source |
9308 | * plane pixel format and any sink constraints into account. Returns the | 9308 | * plane pixel format and any sink constraints into account. Returns the |
9309 | * source plane bpp so that dithering can be selected on mismatches | 9309 | * source plane bpp so that dithering can be selected on mismatches |
9310 | * after encoders and crtc also have had their say. */ | 9310 | * after encoders and crtc also have had their say. */ |
9311 | plane_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc), | 9311 | plane_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc), |
9312 | fb, pipe_config); | 9312 | fb, pipe_config); |
9313 | if (plane_bpp < 0) | 9313 | if (plane_bpp < 0) |
9314 | goto fail; | 9314 | goto fail; |
9315 | 9315 | ||
9316 | /* | 9316 | /* |
9317 | * Determine the real pipe dimensions. Note that stereo modes can | 9317 | * Determine the real pipe dimensions. Note that stereo modes can |
9318 | * increase the actual pipe size due to the frame doubling and | 9318 | * increase the actual pipe size due to the frame doubling and |
9319 | * insertion of additional space for blanks between the frame. This | 9319 | * insertion of additional space for blanks between the frame. This |
9320 | * is stored in the crtc timings. We use the requested mode to do this | 9320 | * is stored in the crtc timings. We use the requested mode to do this |
9321 | * computation to clearly distinguish it from the adjusted mode, which | 9321 | * computation to clearly distinguish it from the adjusted mode, which |
9322 | * can be changed by the connectors in the below retry loop. | 9322 | * can be changed by the connectors in the below retry loop. |
9323 | */ | 9323 | */ |
9324 | drm_mode_set_crtcinfo(&pipe_config->requested_mode, CRTC_STEREO_DOUBLE); | 9324 | drm_mode_set_crtcinfo(&pipe_config->requested_mode, CRTC_STEREO_DOUBLE); |
9325 | pipe_config->pipe_src_w = pipe_config->requested_mode.crtc_hdisplay; | 9325 | pipe_config->pipe_src_w = pipe_config->requested_mode.crtc_hdisplay; |
9326 | pipe_config->pipe_src_h = pipe_config->requested_mode.crtc_vdisplay; | 9326 | pipe_config->pipe_src_h = pipe_config->requested_mode.crtc_vdisplay; |
9327 | 9327 | ||
9328 | encoder_retry: | 9328 | encoder_retry: |
9329 | /* Ensure the port clock defaults are reset when retrying. */ | 9329 | /* Ensure the port clock defaults are reset when retrying. */ |
9330 | pipe_config->port_clock = 0; | 9330 | pipe_config->port_clock = 0; |
9331 | pipe_config->pixel_multiplier = 1; | 9331 | pipe_config->pixel_multiplier = 1; |
9332 | 9332 | ||
9333 | /* Fill in default crtc timings, allow encoders to overwrite them. */ | 9333 | /* Fill in default crtc timings, allow encoders to overwrite them. */ |
9334 | drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, CRTC_STEREO_DOUBLE); | 9334 | drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, CRTC_STEREO_DOUBLE); |
9335 | 9335 | ||
9336 | /* Pass our mode to the connectors and the CRTC to give them a chance to | 9336 | /* Pass our mode to the connectors and the CRTC to give them a chance to |
9337 | * adjust it according to limitations or connector properties, and also | 9337 | * adjust it according to limitations or connector properties, and also |
9338 | * a chance to reject the mode entirely. | 9338 | * a chance to reject the mode entirely. |
9339 | */ | 9339 | */ |
9340 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, | 9340 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
9341 | base.head) { | 9341 | base.head) { |
9342 | 9342 | ||
9343 | if (&encoder->new_crtc->base != crtc) | 9343 | if (&encoder->new_crtc->base != crtc) |
9344 | continue; | 9344 | continue; |
9345 | 9345 | ||
9346 | if (!(encoder->compute_config(encoder, pipe_config))) { | 9346 | if (!(encoder->compute_config(encoder, pipe_config))) { |
9347 | DRM_DEBUG_KMS("Encoder config failure\n"); | 9347 | DRM_DEBUG_KMS("Encoder config failure\n"); |
9348 | goto fail; | 9348 | goto fail; |
9349 | } | 9349 | } |
9350 | } | 9350 | } |
9351 | 9351 | ||
9352 | /* Set default port clock if not overwritten by the encoder. Needs to be | 9352 | /* Set default port clock if not overwritten by the encoder. Needs to be |
9353 | * done afterwards in case the encoder adjusts the mode. */ | 9353 | * done afterwards in case the encoder adjusts the mode. */ |
9354 | if (!pipe_config->port_clock) | 9354 | if (!pipe_config->port_clock) |
9355 | pipe_config->port_clock = pipe_config->adjusted_mode.crtc_clock | 9355 | pipe_config->port_clock = pipe_config->adjusted_mode.crtc_clock |
9356 | * pipe_config->pixel_multiplier; | 9356 | * pipe_config->pixel_multiplier; |
9357 | 9357 | ||
9358 | ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config); | 9358 | ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config); |
9359 | if (ret < 0) { | 9359 | if (ret < 0) { |
9360 | DRM_DEBUG_KMS("CRTC fixup failed\n"); | 9360 | DRM_DEBUG_KMS("CRTC fixup failed\n"); |
9361 | goto fail; | 9361 | goto fail; |
9362 | } | 9362 | } |
9363 | 9363 | ||
9364 | if (ret == RETRY) { | 9364 | if (ret == RETRY) { |
9365 | if (WARN(!retry, "loop in pipe configuration computation\n")) { | 9365 | if (WARN(!retry, "loop in pipe configuration computation\n")) { |
9366 | ret = -EINVAL; | 9366 | ret = -EINVAL; |
9367 | goto fail; | 9367 | goto fail; |
9368 | } | 9368 | } |
9369 | 9369 | ||
9370 | DRM_DEBUG_KMS("CRTC bw constrained, retrying\n"); | 9370 | DRM_DEBUG_KMS("CRTC bw constrained, retrying\n"); |
9371 | retry = false; | 9371 | retry = false; |
9372 | goto encoder_retry; | 9372 | goto encoder_retry; |
9373 | } | 9373 | } |
9374 | 9374 | ||
9375 | pipe_config->dither = pipe_config->pipe_bpp != plane_bpp; | 9375 | pipe_config->dither = pipe_config->pipe_bpp != plane_bpp; |
9376 | DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n", | 9376 | DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n", |
9377 | plane_bpp, pipe_config->pipe_bpp, pipe_config->dither); | 9377 | plane_bpp, pipe_config->pipe_bpp, pipe_config->dither); |
9378 | 9378 | ||
9379 | return pipe_config; | 9379 | return pipe_config; |
9380 | fail: | 9380 | fail: |
9381 | kfree(pipe_config); | 9381 | kfree(pipe_config); |
9382 | return ERR_PTR(ret); | 9382 | return ERR_PTR(ret); |
9383 | } | 9383 | } |
9384 | 9384 | ||
9385 | /* Computes which crtcs are affected and sets the relevant bits in the mask. For | 9385 | /* Computes which crtcs are affected and sets the relevant bits in the mask. For |
9386 | * simplicity we use the crtc's pipe number (because it's easier to obtain). */ | 9386 | * simplicity we use the crtc's pipe number (because it's easier to obtain). */ |
9387 | static void | 9387 | static void |
9388 | intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes, | 9388 | intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes, |
9389 | unsigned *prepare_pipes, unsigned *disable_pipes) | 9389 | unsigned *prepare_pipes, unsigned *disable_pipes) |
9390 | { | 9390 | { |
9391 | struct intel_crtc *intel_crtc; | 9391 | struct intel_crtc *intel_crtc; |
9392 | struct drm_device *dev = crtc->dev; | 9392 | struct drm_device *dev = crtc->dev; |
9393 | struct intel_encoder *encoder; | 9393 | struct intel_encoder *encoder; |
9394 | struct intel_connector *connector; | 9394 | struct intel_connector *connector; |
9395 | struct drm_crtc *tmp_crtc; | 9395 | struct drm_crtc *tmp_crtc; |
9396 | 9396 | ||
9397 | *disable_pipes = *modeset_pipes = *prepare_pipes = 0; | 9397 | *disable_pipes = *modeset_pipes = *prepare_pipes = 0; |
9398 | 9398 | ||
9399 | /* Check which crtcs have changed outputs connected to them, these need | 9399 | /* Check which crtcs have changed outputs connected to them, these need |
9400 | * to be part of the prepare_pipes mask. We don't (yet) support global | 9400 | * to be part of the prepare_pipes mask. We don't (yet) support global |
9401 | * modeset across multiple crtcs, so modeset_pipes will only have one | 9401 | * modeset across multiple crtcs, so modeset_pipes will only have one |
9402 | * bit set at most. */ | 9402 | * bit set at most. */ |
9403 | list_for_each_entry(connector, &dev->mode_config.connector_list, | 9403 | list_for_each_entry(connector, &dev->mode_config.connector_list, |
9404 | base.head) { | 9404 | base.head) { |
9405 | if (connector->base.encoder == &connector->new_encoder->base) | 9405 | if (connector->base.encoder == &connector->new_encoder->base) |
9406 | continue; | 9406 | continue; |
9407 | 9407 | ||
9408 | if (connector->base.encoder) { | 9408 | if (connector->base.encoder) { |
9409 | tmp_crtc = connector->base.encoder->crtc; | 9409 | tmp_crtc = connector->base.encoder->crtc; |
9410 | 9410 | ||
9411 | *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe; | 9411 | *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe; |
9412 | } | 9412 | } |
9413 | 9413 | ||
9414 | if (connector->new_encoder) | 9414 | if (connector->new_encoder) |
9415 | *prepare_pipes |= | 9415 | *prepare_pipes |= |
9416 | 1 << connector->new_encoder->new_crtc->pipe; | 9416 | 1 << connector->new_encoder->new_crtc->pipe; |
9417 | } | 9417 | } |
9418 | 9418 | ||
9419 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, | 9419 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
9420 | base.head) { | 9420 | base.head) { |
9421 | if (encoder->base.crtc == &encoder->new_crtc->base) | 9421 | if (encoder->base.crtc == &encoder->new_crtc->base) |
9422 | continue; | 9422 | continue; |
9423 | 9423 | ||
9424 | if (encoder->base.crtc) { | 9424 | if (encoder->base.crtc) { |
9425 | tmp_crtc = encoder->base.crtc; | 9425 | tmp_crtc = encoder->base.crtc; |
9426 | 9426 | ||
9427 | *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe; | 9427 | *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe; |
9428 | } | 9428 | } |
9429 | 9429 | ||
9430 | if (encoder->new_crtc) | 9430 | if (encoder->new_crtc) |
9431 | *prepare_pipes |= 1 << encoder->new_crtc->pipe; | 9431 | *prepare_pipes |= 1 << encoder->new_crtc->pipe; |
9432 | } | 9432 | } |
9433 | 9433 | ||
9434 | /* Check for pipes that will be enabled/disabled ... */ | 9434 | /* Check for pipes that will be enabled/disabled ... */ |
9435 | list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, | 9435 | list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, |
9436 | base.head) { | 9436 | base.head) { |
9437 | if (intel_crtc->base.enabled == intel_crtc->new_enabled) | 9437 | if (intel_crtc->base.enabled == intel_crtc->new_enabled) |
9438 | continue; | 9438 | continue; |
9439 | 9439 | ||
9440 | if (!intel_crtc->new_enabled) | 9440 | if (!intel_crtc->new_enabled) |
9441 | *disable_pipes |= 1 << intel_crtc->pipe; | 9441 | *disable_pipes |= 1 << intel_crtc->pipe; |
9442 | else | 9442 | else |
9443 | *prepare_pipes |= 1 << intel_crtc->pipe; | 9443 | *prepare_pipes |= 1 << intel_crtc->pipe; |
9444 | } | 9444 | } |
9445 | 9445 | ||
9446 | 9446 | ||
9447 | /* set_mode is also used to update properties on life display pipes. */ | 9447 | /* set_mode is also used to update properties on life display pipes. */ |
9448 | intel_crtc = to_intel_crtc(crtc); | 9448 | intel_crtc = to_intel_crtc(crtc); |
9449 | if (intel_crtc->new_enabled) | 9449 | if (intel_crtc->new_enabled) |
9450 | *prepare_pipes |= 1 << intel_crtc->pipe; | 9450 | *prepare_pipes |= 1 << intel_crtc->pipe; |
9451 | 9451 | ||
9452 | /* | 9452 | /* |
9453 | * For simplicity do a full modeset on any pipe where the output routing | 9453 | * For simplicity do a full modeset on any pipe where the output routing |
9454 | * changed. We could be more clever, but that would require us to be | 9454 | * changed. We could be more clever, but that would require us to be |
9455 | * more careful with calling the relevant encoder->mode_set functions. | 9455 | * more careful with calling the relevant encoder->mode_set functions. |
9456 | */ | 9456 | */ |
9457 | if (*prepare_pipes) | 9457 | if (*prepare_pipes) |
9458 | *modeset_pipes = *prepare_pipes; | 9458 | *modeset_pipes = *prepare_pipes; |
9459 | 9459 | ||
9460 | /* ... and mask these out. */ | 9460 | /* ... and mask these out. */ |
9461 | *modeset_pipes &= ~(*disable_pipes); | 9461 | *modeset_pipes &= ~(*disable_pipes); |
9462 | *prepare_pipes &= ~(*disable_pipes); | 9462 | *prepare_pipes &= ~(*disable_pipes); |
9463 | 9463 | ||
9464 | /* | 9464 | /* |
9465 | * HACK: We don't (yet) fully support global modesets. intel_set_config | 9465 | * HACK: We don't (yet) fully support global modesets. intel_set_config |
9466 | * obies this rule, but the modeset restore mode of | 9466 | * obies this rule, but the modeset restore mode of |
9467 | * intel_modeset_setup_hw_state does not. | 9467 | * intel_modeset_setup_hw_state does not. |
9468 | */ | 9468 | */ |
9469 | *modeset_pipes &= 1 << intel_crtc->pipe; | 9469 | *modeset_pipes &= 1 << intel_crtc->pipe; |
9470 | *prepare_pipes &= 1 << intel_crtc->pipe; | 9470 | *prepare_pipes &= 1 << intel_crtc->pipe; |
9471 | 9471 | ||
9472 | DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n", | 9472 | DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n", |
9473 | *modeset_pipes, *prepare_pipes, *disable_pipes); | 9473 | *modeset_pipes, *prepare_pipes, *disable_pipes); |
9474 | } | 9474 | } |
9475 | 9475 | ||
9476 | static bool intel_crtc_in_use(struct drm_crtc *crtc) | 9476 | static bool intel_crtc_in_use(struct drm_crtc *crtc) |
9477 | { | 9477 | { |
9478 | struct drm_encoder *encoder; | 9478 | struct drm_encoder *encoder; |
9479 | struct drm_device *dev = crtc->dev; | 9479 | struct drm_device *dev = crtc->dev; |
9480 | 9480 | ||
9481 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) | 9481 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) |
9482 | if (encoder->crtc == crtc) | 9482 | if (encoder->crtc == crtc) |
9483 | return true; | 9483 | return true; |
9484 | 9484 | ||
9485 | return false; | 9485 | return false; |
9486 | } | 9486 | } |
9487 | 9487 | ||
9488 | static void | 9488 | static void |
9489 | intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes) | 9489 | intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes) |
9490 | { | 9490 | { |
9491 | struct intel_encoder *intel_encoder; | 9491 | struct intel_encoder *intel_encoder; |
9492 | struct intel_crtc *intel_crtc; | 9492 | struct intel_crtc *intel_crtc; |
9493 | struct drm_connector *connector; | 9493 | struct drm_connector *connector; |
9494 | 9494 | ||
9495 | list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list, | 9495 | list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list, |
9496 | base.head) { | 9496 | base.head) { |
9497 | if (!intel_encoder->base.crtc) | 9497 | if (!intel_encoder->base.crtc) |
9498 | continue; | 9498 | continue; |
9499 | 9499 | ||
9500 | intel_crtc = to_intel_crtc(intel_encoder->base.crtc); | 9500 | intel_crtc = to_intel_crtc(intel_encoder->base.crtc); |
9501 | 9501 | ||
9502 | if (prepare_pipes & (1 << intel_crtc->pipe)) | 9502 | if (prepare_pipes & (1 << intel_crtc->pipe)) |
9503 | intel_encoder->connectors_active = false; | 9503 | intel_encoder->connectors_active = false; |
9504 | } | 9504 | } |
9505 | 9505 | ||
9506 | intel_modeset_commit_output_state(dev); | 9506 | intel_modeset_commit_output_state(dev); |
9507 | 9507 | ||
9508 | /* Double check state. */ | 9508 | /* Double check state. */ |
9509 | list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, | 9509 | list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, |
9510 | base.head) { | 9510 | base.head) { |
9511 | WARN_ON(intel_crtc->base.enabled != intel_crtc_in_use(&intel_crtc->base)); | 9511 | WARN_ON(intel_crtc->base.enabled != intel_crtc_in_use(&intel_crtc->base)); |
9512 | WARN_ON(intel_crtc->new_config && | 9512 | WARN_ON(intel_crtc->new_config && |
9513 | intel_crtc->new_config != &intel_crtc->config); | 9513 | intel_crtc->new_config != &intel_crtc->config); |
9514 | WARN_ON(intel_crtc->base.enabled != !!intel_crtc->new_config); | 9514 | WARN_ON(intel_crtc->base.enabled != !!intel_crtc->new_config); |
9515 | } | 9515 | } |
9516 | 9516 | ||
9517 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 9517 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
9518 | if (!connector->encoder || !connector->encoder->crtc) | 9518 | if (!connector->encoder || !connector->encoder->crtc) |
9519 | continue; | 9519 | continue; |
9520 | 9520 | ||
9521 | intel_crtc = to_intel_crtc(connector->encoder->crtc); | 9521 | intel_crtc = to_intel_crtc(connector->encoder->crtc); |
9522 | 9522 | ||
9523 | if (prepare_pipes & (1 << intel_crtc->pipe)) { | 9523 | if (prepare_pipes & (1 << intel_crtc->pipe)) { |
9524 | struct drm_property *dpms_property = | 9524 | struct drm_property *dpms_property = |
9525 | dev->mode_config.dpms_property; | 9525 | dev->mode_config.dpms_property; |
9526 | 9526 | ||
9527 | connector->dpms = DRM_MODE_DPMS_ON; | 9527 | connector->dpms = DRM_MODE_DPMS_ON; |
9528 | drm_object_property_set_value(&connector->base, | 9528 | drm_object_property_set_value(&connector->base, |
9529 | dpms_property, | 9529 | dpms_property, |
9530 | DRM_MODE_DPMS_ON); | 9530 | DRM_MODE_DPMS_ON); |
9531 | 9531 | ||
9532 | intel_encoder = to_intel_encoder(connector->encoder); | 9532 | intel_encoder = to_intel_encoder(connector->encoder); |
9533 | intel_encoder->connectors_active = true; | 9533 | intel_encoder->connectors_active = true; |
9534 | } | 9534 | } |
9535 | } | 9535 | } |
9536 | 9536 | ||
9537 | } | 9537 | } |
9538 | 9538 | ||
9539 | static bool intel_fuzzy_clock_check(int clock1, int clock2) | 9539 | static bool intel_fuzzy_clock_check(int clock1, int clock2) |
9540 | { | 9540 | { |
9541 | int diff; | 9541 | int diff; |
9542 | 9542 | ||
9543 | if (clock1 == clock2) | 9543 | if (clock1 == clock2) |
9544 | return true; | 9544 | return true; |
9545 | 9545 | ||
9546 | if (!clock1 || !clock2) | 9546 | if (!clock1 || !clock2) |
9547 | return false; | 9547 | return false; |
9548 | 9548 | ||
9549 | diff = abs(clock1 - clock2); | 9549 | diff = abs(clock1 - clock2); |
9550 | 9550 | ||
9551 | if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105) | 9551 | if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105) |
9552 | return true; | 9552 | return true; |
9553 | 9553 | ||
9554 | return false; | 9554 | return false; |
9555 | } | 9555 | } |
9556 | 9556 | ||
9557 | #define for_each_intel_crtc_masked(dev, mask, intel_crtc) \ | 9557 | #define for_each_intel_crtc_masked(dev, mask, intel_crtc) \ |
9558 | list_for_each_entry((intel_crtc), \ | 9558 | list_for_each_entry((intel_crtc), \ |
9559 | &(dev)->mode_config.crtc_list, \ | 9559 | &(dev)->mode_config.crtc_list, \ |
9560 | base.head) \ | 9560 | base.head) \ |
9561 | if (mask & (1 <<(intel_crtc)->pipe)) | 9561 | if (mask & (1 <<(intel_crtc)->pipe)) |
9562 | 9562 | ||
9563 | static bool | 9563 | static bool |
9564 | intel_pipe_config_compare(struct drm_device *dev, | 9564 | intel_pipe_config_compare(struct drm_device *dev, |
9565 | struct intel_crtc_config *current_config, | 9565 | struct intel_crtc_config *current_config, |
9566 | struct intel_crtc_config *pipe_config) | 9566 | struct intel_crtc_config *pipe_config) |
9567 | { | 9567 | { |
9568 | #define PIPE_CONF_CHECK_X(name) \ | 9568 | #define PIPE_CONF_CHECK_X(name) \ |
9569 | if (current_config->name != pipe_config->name) { \ | 9569 | if (current_config->name != pipe_config->name) { \ |
9570 | DRM_ERROR("mismatch in " #name " " \ | 9570 | DRM_ERROR("mismatch in " #name " " \ |
9571 | "(expected 0x%08x, found 0x%08x)\n", \ | 9571 | "(expected 0x%08x, found 0x%08x)\n", \ |
9572 | current_config->name, \ | 9572 | current_config->name, \ |
9573 | pipe_config->name); \ | 9573 | pipe_config->name); \ |
9574 | return false; \ | 9574 | return false; \ |
9575 | } | 9575 | } |
9576 | 9576 | ||
9577 | #define PIPE_CONF_CHECK_I(name) \ | 9577 | #define PIPE_CONF_CHECK_I(name) \ |
9578 | if (current_config->name != pipe_config->name) { \ | 9578 | if (current_config->name != pipe_config->name) { \ |
9579 | DRM_ERROR("mismatch in " #name " " \ | 9579 | DRM_ERROR("mismatch in " #name " " \ |
9580 | "(expected %i, found %i)\n", \ | 9580 | "(expected %i, found %i)\n", \ |
9581 | current_config->name, \ | 9581 | current_config->name, \ |
9582 | pipe_config->name); \ | 9582 | pipe_config->name); \ |
9583 | return false; \ | 9583 | return false; \ |
9584 | } | 9584 | } |
9585 | 9585 | ||
9586 | #define PIPE_CONF_CHECK_FLAGS(name, mask) \ | 9586 | #define PIPE_CONF_CHECK_FLAGS(name, mask) \ |
9587 | if ((current_config->name ^ pipe_config->name) & (mask)) { \ | 9587 | if ((current_config->name ^ pipe_config->name) & (mask)) { \ |
9588 | DRM_ERROR("mismatch in " #name "(" #mask ") " \ | 9588 | DRM_ERROR("mismatch in " #name "(" #mask ") " \ |
9589 | "(expected %i, found %i)\n", \ | 9589 | "(expected %i, found %i)\n", \ |
9590 | current_config->name & (mask), \ | 9590 | current_config->name & (mask), \ |
9591 | pipe_config->name & (mask)); \ | 9591 | pipe_config->name & (mask)); \ |
9592 | return false; \ | 9592 | return false; \ |
9593 | } | 9593 | } |
9594 | 9594 | ||
9595 | #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \ | 9595 | #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \ |
9596 | if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \ | 9596 | if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \ |
9597 | DRM_ERROR("mismatch in " #name " " \ | 9597 | DRM_ERROR("mismatch in " #name " " \ |
9598 | "(expected %i, found %i)\n", \ | 9598 | "(expected %i, found %i)\n", \ |
9599 | current_config->name, \ | 9599 | current_config->name, \ |
9600 | pipe_config->name); \ | 9600 | pipe_config->name); \ |
9601 | return false; \ | 9601 | return false; \ |
9602 | } | 9602 | } |
9603 | 9603 | ||
9604 | #define PIPE_CONF_QUIRK(quirk) \ | 9604 | #define PIPE_CONF_QUIRK(quirk) \ |
9605 | ((current_config->quirks | pipe_config->quirks) & (quirk)) | 9605 | ((current_config->quirks | pipe_config->quirks) & (quirk)) |
9606 | 9606 | ||
9607 | PIPE_CONF_CHECK_I(cpu_transcoder); | 9607 | PIPE_CONF_CHECK_I(cpu_transcoder); |
9608 | 9608 | ||
9609 | PIPE_CONF_CHECK_I(has_pch_encoder); | 9609 | PIPE_CONF_CHECK_I(has_pch_encoder); |
9610 | PIPE_CONF_CHECK_I(fdi_lanes); | 9610 | PIPE_CONF_CHECK_I(fdi_lanes); |
9611 | PIPE_CONF_CHECK_I(fdi_m_n.gmch_m); | 9611 | PIPE_CONF_CHECK_I(fdi_m_n.gmch_m); |
9612 | PIPE_CONF_CHECK_I(fdi_m_n.gmch_n); | 9612 | PIPE_CONF_CHECK_I(fdi_m_n.gmch_n); |
9613 | PIPE_CONF_CHECK_I(fdi_m_n.link_m); | 9613 | PIPE_CONF_CHECK_I(fdi_m_n.link_m); |
9614 | PIPE_CONF_CHECK_I(fdi_m_n.link_n); | 9614 | PIPE_CONF_CHECK_I(fdi_m_n.link_n); |
9615 | PIPE_CONF_CHECK_I(fdi_m_n.tu); | 9615 | PIPE_CONF_CHECK_I(fdi_m_n.tu); |
9616 | 9616 | ||
9617 | PIPE_CONF_CHECK_I(has_dp_encoder); | 9617 | PIPE_CONF_CHECK_I(has_dp_encoder); |
9618 | PIPE_CONF_CHECK_I(dp_m_n.gmch_m); | 9618 | PIPE_CONF_CHECK_I(dp_m_n.gmch_m); |
9619 | PIPE_CONF_CHECK_I(dp_m_n.gmch_n); | 9619 | PIPE_CONF_CHECK_I(dp_m_n.gmch_n); |
9620 | PIPE_CONF_CHECK_I(dp_m_n.link_m); | 9620 | PIPE_CONF_CHECK_I(dp_m_n.link_m); |
9621 | PIPE_CONF_CHECK_I(dp_m_n.link_n); | 9621 | PIPE_CONF_CHECK_I(dp_m_n.link_n); |
9622 | PIPE_CONF_CHECK_I(dp_m_n.tu); | 9622 | PIPE_CONF_CHECK_I(dp_m_n.tu); |
9623 | 9623 | ||
9624 | PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay); | 9624 | PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay); |
9625 | PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal); | 9625 | PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal); |
9626 | PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start); | 9626 | PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start); |
9627 | PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_end); | 9627 | PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_end); |
9628 | PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_start); | 9628 | PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_start); |
9629 | PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_end); | 9629 | PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_end); |
9630 | 9630 | ||
9631 | PIPE_CONF_CHECK_I(adjusted_mode.crtc_vdisplay); | 9631 | PIPE_CONF_CHECK_I(adjusted_mode.crtc_vdisplay); |
9632 | PIPE_CONF_CHECK_I(adjusted_mode.crtc_vtotal); | 9632 | PIPE_CONF_CHECK_I(adjusted_mode.crtc_vtotal); |
9633 | PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_start); | 9633 | PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_start); |
9634 | PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_end); | 9634 | PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_end); |
9635 | PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start); | 9635 | PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start); |
9636 | PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end); | 9636 | PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end); |
9637 | 9637 | ||
9638 | PIPE_CONF_CHECK_I(pixel_multiplier); | 9638 | PIPE_CONF_CHECK_I(pixel_multiplier); |
9639 | 9639 | ||
9640 | PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags, | 9640 | PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags, |
9641 | DRM_MODE_FLAG_INTERLACE); | 9641 | DRM_MODE_FLAG_INTERLACE); |
9642 | 9642 | ||
9643 | if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { | 9643 | if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { |
9644 | PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags, | 9644 | PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags, |
9645 | DRM_MODE_FLAG_PHSYNC); | 9645 | DRM_MODE_FLAG_PHSYNC); |
9646 | PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags, | 9646 | PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags, |
9647 | DRM_MODE_FLAG_NHSYNC); | 9647 | DRM_MODE_FLAG_NHSYNC); |
9648 | PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags, | 9648 | PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags, |
9649 | DRM_MODE_FLAG_PVSYNC); | 9649 | DRM_MODE_FLAG_PVSYNC); |
9650 | PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags, | 9650 | PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags, |
9651 | DRM_MODE_FLAG_NVSYNC); | 9651 | DRM_MODE_FLAG_NVSYNC); |
9652 | } | 9652 | } |
9653 | 9653 | ||
9654 | PIPE_CONF_CHECK_I(pipe_src_w); | 9654 | PIPE_CONF_CHECK_I(pipe_src_w); |
9655 | PIPE_CONF_CHECK_I(pipe_src_h); | 9655 | PIPE_CONF_CHECK_I(pipe_src_h); |
9656 | 9656 | ||
9657 | PIPE_CONF_CHECK_I(gmch_pfit.control); | 9657 | /* |
9658 | /* pfit ratios are autocomputed by the hw on gen4+ */ | 9658 | * FIXME: BIOS likes to set up a cloned config with lvds+external |
9659 | if (INTEL_INFO(dev)->gen < 4) | 9659 | * screen. Since we don't yet re-compute the pipe config when moving |
9660 | PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios); | 9660 | * just the lvds port away to another pipe the sw tracking won't match. |
9661 | PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits); | 9661 | * |
9662 | * Proper atomic modesets with recomputed global state will fix this. | ||
9663 | * Until then just don't check gmch state for inherited modes. | ||
9664 | */ | ||
9665 | if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) { | ||
9666 | PIPE_CONF_CHECK_I(gmch_pfit.control); | ||
9667 | /* pfit ratios are autocomputed by the hw on gen4+ */ | ||
9668 | if (INTEL_INFO(dev)->gen < 4) | ||
9669 | PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios); | ||
9670 | PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits); | ||
9671 | } | ||
9672 | |||
9662 | PIPE_CONF_CHECK_I(pch_pfit.enabled); | 9673 | PIPE_CONF_CHECK_I(pch_pfit.enabled); |
9663 | if (current_config->pch_pfit.enabled) { | 9674 | if (current_config->pch_pfit.enabled) { |
9664 | PIPE_CONF_CHECK_I(pch_pfit.pos); | 9675 | PIPE_CONF_CHECK_I(pch_pfit.pos); |
9665 | PIPE_CONF_CHECK_I(pch_pfit.size); | 9676 | PIPE_CONF_CHECK_I(pch_pfit.size); |
9666 | } | 9677 | } |
9667 | 9678 | ||
9668 | /* BDW+ don't expose a synchronous way to read the state */ | 9679 | /* BDW+ don't expose a synchronous way to read the state */ |
9669 | if (IS_HASWELL(dev)) | 9680 | if (IS_HASWELL(dev)) |
9670 | PIPE_CONF_CHECK_I(ips_enabled); | 9681 | PIPE_CONF_CHECK_I(ips_enabled); |
9671 | 9682 | ||
9672 | PIPE_CONF_CHECK_I(double_wide); | 9683 | PIPE_CONF_CHECK_I(double_wide); |
9673 | 9684 | ||
9674 | PIPE_CONF_CHECK_I(shared_dpll); | 9685 | PIPE_CONF_CHECK_I(shared_dpll); |
9675 | PIPE_CONF_CHECK_X(dpll_hw_state.dpll); | 9686 | PIPE_CONF_CHECK_X(dpll_hw_state.dpll); |
9676 | PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); | 9687 | PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); |
9677 | PIPE_CONF_CHECK_X(dpll_hw_state.fp0); | 9688 | PIPE_CONF_CHECK_X(dpll_hw_state.fp0); |
9678 | PIPE_CONF_CHECK_X(dpll_hw_state.fp1); | 9689 | PIPE_CONF_CHECK_X(dpll_hw_state.fp1); |
9679 | 9690 | ||
9680 | if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) | 9691 | if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) |
9681 | PIPE_CONF_CHECK_I(pipe_bpp); | 9692 | PIPE_CONF_CHECK_I(pipe_bpp); |
9682 | 9693 | ||
9683 | PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock); | 9694 | PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock); |
9684 | PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); | 9695 | PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); |
9685 | 9696 | ||
9686 | #undef PIPE_CONF_CHECK_X | 9697 | #undef PIPE_CONF_CHECK_X |
9687 | #undef PIPE_CONF_CHECK_I | 9698 | #undef PIPE_CONF_CHECK_I |
9688 | #undef PIPE_CONF_CHECK_FLAGS | 9699 | #undef PIPE_CONF_CHECK_FLAGS |
9689 | #undef PIPE_CONF_CHECK_CLOCK_FUZZY | 9700 | #undef PIPE_CONF_CHECK_CLOCK_FUZZY |
9690 | #undef PIPE_CONF_QUIRK | 9701 | #undef PIPE_CONF_QUIRK |
9691 | 9702 | ||
9692 | return true; | 9703 | return true; |
9693 | } | 9704 | } |
9694 | 9705 | ||
9695 | static void | 9706 | static void |
9696 | check_connector_state(struct drm_device *dev) | 9707 | check_connector_state(struct drm_device *dev) |
9697 | { | 9708 | { |
9698 | struct intel_connector *connector; | 9709 | struct intel_connector *connector; |
9699 | 9710 | ||
9700 | list_for_each_entry(connector, &dev->mode_config.connector_list, | 9711 | list_for_each_entry(connector, &dev->mode_config.connector_list, |
9701 | base.head) { | 9712 | base.head) { |
9702 | /* This also checks the encoder/connector hw state with the | 9713 | /* This also checks the encoder/connector hw state with the |
9703 | * ->get_hw_state callbacks. */ | 9714 | * ->get_hw_state callbacks. */ |
9704 | intel_connector_check_state(connector); | 9715 | intel_connector_check_state(connector); |
9705 | 9716 | ||
9706 | WARN(&connector->new_encoder->base != connector->base.encoder, | 9717 | WARN(&connector->new_encoder->base != connector->base.encoder, |
9707 | "connector's staged encoder doesn't match current encoder\n"); | 9718 | "connector's staged encoder doesn't match current encoder\n"); |
9708 | } | 9719 | } |
9709 | } | 9720 | } |
9710 | 9721 | ||
9711 | static void | 9722 | static void |
9712 | check_encoder_state(struct drm_device *dev) | 9723 | check_encoder_state(struct drm_device *dev) |
9713 | { | 9724 | { |
9714 | struct intel_encoder *encoder; | 9725 | struct intel_encoder *encoder; |
9715 | struct intel_connector *connector; | 9726 | struct intel_connector *connector; |
9716 | 9727 | ||
9717 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, | 9728 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
9718 | base.head) { | 9729 | base.head) { |
9719 | bool enabled = false; | 9730 | bool enabled = false; |
9720 | bool active = false; | 9731 | bool active = false; |
9721 | enum pipe pipe, tracked_pipe; | 9732 | enum pipe pipe, tracked_pipe; |
9722 | 9733 | ||
9723 | DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", | 9734 | DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", |
9724 | encoder->base.base.id, | 9735 | encoder->base.base.id, |
9725 | drm_get_encoder_name(&encoder->base)); | 9736 | drm_get_encoder_name(&encoder->base)); |
9726 | 9737 | ||
9727 | WARN(&encoder->new_crtc->base != encoder->base.crtc, | 9738 | WARN(&encoder->new_crtc->base != encoder->base.crtc, |
9728 | "encoder's stage crtc doesn't match current crtc\n"); | 9739 | "encoder's stage crtc doesn't match current crtc\n"); |
9729 | WARN(encoder->connectors_active && !encoder->base.crtc, | 9740 | WARN(encoder->connectors_active && !encoder->base.crtc, |
9730 | "encoder's active_connectors set, but no crtc\n"); | 9741 | "encoder's active_connectors set, but no crtc\n"); |
9731 | 9742 | ||
9732 | list_for_each_entry(connector, &dev->mode_config.connector_list, | 9743 | list_for_each_entry(connector, &dev->mode_config.connector_list, |
9733 | base.head) { | 9744 | base.head) { |
9734 | if (connector->base.encoder != &encoder->base) | 9745 | if (connector->base.encoder != &encoder->base) |
9735 | continue; | 9746 | continue; |
9736 | enabled = true; | 9747 | enabled = true; |
9737 | if (connector->base.dpms != DRM_MODE_DPMS_OFF) | 9748 | if (connector->base.dpms != DRM_MODE_DPMS_OFF) |
9738 | active = true; | 9749 | active = true; |
9739 | } | 9750 | } |
9740 | WARN(!!encoder->base.crtc != enabled, | 9751 | WARN(!!encoder->base.crtc != enabled, |
9741 | "encoder's enabled state mismatch " | 9752 | "encoder's enabled state mismatch " |
9742 | "(expected %i, found %i)\n", | 9753 | "(expected %i, found %i)\n", |
9743 | !!encoder->base.crtc, enabled); | 9754 | !!encoder->base.crtc, enabled); |
9744 | WARN(active && !encoder->base.crtc, | 9755 | WARN(active && !encoder->base.crtc, |
9745 | "active encoder with no crtc\n"); | 9756 | "active encoder with no crtc\n"); |
9746 | 9757 | ||
9747 | WARN(encoder->connectors_active != active, | 9758 | WARN(encoder->connectors_active != active, |
9748 | "encoder's computed active state doesn't match tracked active state " | 9759 | "encoder's computed active state doesn't match tracked active state " |
9749 | "(expected %i, found %i)\n", active, encoder->connectors_active); | 9760 | "(expected %i, found %i)\n", active, encoder->connectors_active); |
9750 | 9761 | ||
9751 | active = encoder->get_hw_state(encoder, &pipe); | 9762 | active = encoder->get_hw_state(encoder, &pipe); |
9752 | WARN(active != encoder->connectors_active, | 9763 | WARN(active != encoder->connectors_active, |
9753 | "encoder's hw state doesn't match sw tracking " | 9764 | "encoder's hw state doesn't match sw tracking " |
9754 | "(expected %i, found %i)\n", | 9765 | "(expected %i, found %i)\n", |
9755 | encoder->connectors_active, active); | 9766 | encoder->connectors_active, active); |
9756 | 9767 | ||
9757 | if (!encoder->base.crtc) | 9768 | if (!encoder->base.crtc) |
9758 | continue; | 9769 | continue; |
9759 | 9770 | ||
9760 | tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe; | 9771 | tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe; |
9761 | WARN(active && pipe != tracked_pipe, | 9772 | WARN(active && pipe != tracked_pipe, |
9762 | "active encoder's pipe doesn't match" | 9773 | "active encoder's pipe doesn't match" |
9763 | "(expected %i, found %i)\n", | 9774 | "(expected %i, found %i)\n", |
9764 | tracked_pipe, pipe); | 9775 | tracked_pipe, pipe); |
9765 | 9776 | ||
9766 | } | 9777 | } |
9767 | } | 9778 | } |
9768 | 9779 | ||
9769 | static void | 9780 | static void |
9770 | check_crtc_state(struct drm_device *dev) | 9781 | check_crtc_state(struct drm_device *dev) |
9771 | { | 9782 | { |
9772 | struct drm_i915_private *dev_priv = dev->dev_private; | 9783 | struct drm_i915_private *dev_priv = dev->dev_private; |
9773 | struct intel_crtc *crtc; | 9784 | struct intel_crtc *crtc; |
9774 | struct intel_encoder *encoder; | 9785 | struct intel_encoder *encoder; |
9775 | struct intel_crtc_config pipe_config; | 9786 | struct intel_crtc_config pipe_config; |
9776 | 9787 | ||
9777 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, | 9788 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, |
9778 | base.head) { | 9789 | base.head) { |
9779 | bool enabled = false; | 9790 | bool enabled = false; |
9780 | bool active = false; | 9791 | bool active = false; |
9781 | 9792 | ||
9782 | memset(&pipe_config, 0, sizeof(pipe_config)); | 9793 | memset(&pipe_config, 0, sizeof(pipe_config)); |
9783 | 9794 | ||
9784 | DRM_DEBUG_KMS("[CRTC:%d]\n", | 9795 | DRM_DEBUG_KMS("[CRTC:%d]\n", |
9785 | crtc->base.base.id); | 9796 | crtc->base.base.id); |
9786 | 9797 | ||
9787 | WARN(crtc->active && !crtc->base.enabled, | 9798 | WARN(crtc->active && !crtc->base.enabled, |
9788 | "active crtc, but not enabled in sw tracking\n"); | 9799 | "active crtc, but not enabled in sw tracking\n"); |
9789 | 9800 | ||
9790 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, | 9801 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
9791 | base.head) { | 9802 | base.head) { |
9792 | if (encoder->base.crtc != &crtc->base) | 9803 | if (encoder->base.crtc != &crtc->base) |
9793 | continue; | 9804 | continue; |
9794 | enabled = true; | 9805 | enabled = true; |
9795 | if (encoder->connectors_active) | 9806 | if (encoder->connectors_active) |
9796 | active = true; | 9807 | active = true; |
9797 | } | 9808 | } |
9798 | 9809 | ||
9799 | WARN(active != crtc->active, | 9810 | WARN(active != crtc->active, |
9800 | "crtc's computed active state doesn't match tracked active state " | 9811 | "crtc's computed active state doesn't match tracked active state " |
9801 | "(expected %i, found %i)\n", active, crtc->active); | 9812 | "(expected %i, found %i)\n", active, crtc->active); |
9802 | WARN(enabled != crtc->base.enabled, | 9813 | WARN(enabled != crtc->base.enabled, |
9803 | "crtc's computed enabled state doesn't match tracked enabled state " | 9814 | "crtc's computed enabled state doesn't match tracked enabled state " |
9804 | "(expected %i, found %i)\n", enabled, crtc->base.enabled); | 9815 | "(expected %i, found %i)\n", enabled, crtc->base.enabled); |
9805 | 9816 | ||
9806 | active = dev_priv->display.get_pipe_config(crtc, | 9817 | active = dev_priv->display.get_pipe_config(crtc, |
9807 | &pipe_config); | 9818 | &pipe_config); |
9808 | 9819 | ||
9809 | /* hw state is inconsistent with the pipe A quirk */ | 9820 | /* hw state is inconsistent with the pipe A quirk */ |
9810 | if (crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) | 9821 | if (crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) |
9811 | active = crtc->active; | 9822 | active = crtc->active; |
9812 | 9823 | ||
9813 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, | 9824 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
9814 | base.head) { | 9825 | base.head) { |
9815 | enum pipe pipe; | 9826 | enum pipe pipe; |
9816 | if (encoder->base.crtc != &crtc->base) | 9827 | if (encoder->base.crtc != &crtc->base) |
9817 | continue; | 9828 | continue; |
9818 | if (encoder->get_hw_state(encoder, &pipe)) | 9829 | if (encoder->get_hw_state(encoder, &pipe)) |
9819 | encoder->get_config(encoder, &pipe_config); | 9830 | encoder->get_config(encoder, &pipe_config); |
9820 | } | 9831 | } |
9821 | 9832 | ||
9822 | WARN(crtc->active != active, | 9833 | WARN(crtc->active != active, |
9823 | "crtc active state doesn't match with hw state " | 9834 | "crtc active state doesn't match with hw state " |
9824 | "(expected %i, found %i)\n", crtc->active, active); | 9835 | "(expected %i, found %i)\n", crtc->active, active); |
9825 | 9836 | ||
9826 | if (active && | 9837 | if (active && |
9827 | !intel_pipe_config_compare(dev, &crtc->config, &pipe_config)) { | 9838 | !intel_pipe_config_compare(dev, &crtc->config, &pipe_config)) { |
9828 | WARN(1, "pipe state doesn't match!\n"); | 9839 | WARN(1, "pipe state doesn't match!\n"); |
9829 | intel_dump_pipe_config(crtc, &pipe_config, | 9840 | intel_dump_pipe_config(crtc, &pipe_config, |
9830 | "[hw state]"); | 9841 | "[hw state]"); |
9831 | intel_dump_pipe_config(crtc, &crtc->config, | 9842 | intel_dump_pipe_config(crtc, &crtc->config, |
9832 | "[sw state]"); | 9843 | "[sw state]"); |
9833 | } | 9844 | } |
9834 | } | 9845 | } |
9835 | } | 9846 | } |
9836 | 9847 | ||
9837 | static void | 9848 | static void |
9838 | check_shared_dpll_state(struct drm_device *dev) | 9849 | check_shared_dpll_state(struct drm_device *dev) |
9839 | { | 9850 | { |
9840 | struct drm_i915_private *dev_priv = dev->dev_private; | 9851 | struct drm_i915_private *dev_priv = dev->dev_private; |
9841 | struct intel_crtc *crtc; | 9852 | struct intel_crtc *crtc; |
9842 | struct intel_dpll_hw_state dpll_hw_state; | 9853 | struct intel_dpll_hw_state dpll_hw_state; |
9843 | int i; | 9854 | int i; |
9844 | 9855 | ||
9845 | for (i = 0; i < dev_priv->num_shared_dpll; i++) { | 9856 | for (i = 0; i < dev_priv->num_shared_dpll; i++) { |
9846 | struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; | 9857 | struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; |
9847 | int enabled_crtcs = 0, active_crtcs = 0; | 9858 | int enabled_crtcs = 0, active_crtcs = 0; |
9848 | bool active; | 9859 | bool active; |
9849 | 9860 | ||
9850 | memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); | 9861 | memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); |
9851 | 9862 | ||
9852 | DRM_DEBUG_KMS("%s\n", pll->name); | 9863 | DRM_DEBUG_KMS("%s\n", pll->name); |
9853 | 9864 | ||
9854 | active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state); | 9865 | active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state); |
9855 | 9866 | ||
9856 | WARN(pll->active > pll->refcount, | 9867 | WARN(pll->active > pll->refcount, |
9857 | "more active pll users than references: %i vs %i\n", | 9868 | "more active pll users than references: %i vs %i\n", |
9858 | pll->active, pll->refcount); | 9869 | pll->active, pll->refcount); |
9859 | WARN(pll->active && !pll->on, | 9870 | WARN(pll->active && !pll->on, |
9860 | "pll in active use but not on in sw tracking\n"); | 9871 | "pll in active use but not on in sw tracking\n"); |
9861 | WARN(pll->on && !pll->active, | 9872 | WARN(pll->on && !pll->active, |
9862 | "pll in on but not on in use in sw tracking\n"); | 9873 | "pll in on but not on in use in sw tracking\n"); |
9863 | WARN(pll->on != active, | 9874 | WARN(pll->on != active, |
9864 | "pll on state mismatch (expected %i, found %i)\n", | 9875 | "pll on state mismatch (expected %i, found %i)\n", |
9865 | pll->on, active); | 9876 | pll->on, active); |
9866 | 9877 | ||
9867 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, | 9878 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, |
9868 | base.head) { | 9879 | base.head) { |
9869 | if (crtc->base.enabled && intel_crtc_to_shared_dpll(crtc) == pll) | 9880 | if (crtc->base.enabled && intel_crtc_to_shared_dpll(crtc) == pll) |
9870 | enabled_crtcs++; | 9881 | enabled_crtcs++; |
9871 | if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) | 9882 | if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) |
9872 | active_crtcs++; | 9883 | active_crtcs++; |
9873 | } | 9884 | } |
9874 | WARN(pll->active != active_crtcs, | 9885 | WARN(pll->active != active_crtcs, |
9875 | "pll active crtcs mismatch (expected %i, found %i)\n", | 9886 | "pll active crtcs mismatch (expected %i, found %i)\n", |
9876 | pll->active, active_crtcs); | 9887 | pll->active, active_crtcs); |
9877 | WARN(pll->refcount != enabled_crtcs, | 9888 | WARN(pll->refcount != enabled_crtcs, |
9878 | "pll enabled crtcs mismatch (expected %i, found %i)\n", | 9889 | "pll enabled crtcs mismatch (expected %i, found %i)\n", |
9879 | pll->refcount, enabled_crtcs); | 9890 | pll->refcount, enabled_crtcs); |
9880 | 9891 | ||
9881 | WARN(pll->on && memcmp(&pll->hw_state, &dpll_hw_state, | 9892 | WARN(pll->on && memcmp(&pll->hw_state, &dpll_hw_state, |
9882 | sizeof(dpll_hw_state)), | 9893 | sizeof(dpll_hw_state)), |
9883 | "pll hw state mismatch\n"); | 9894 | "pll hw state mismatch\n"); |
9884 | } | 9895 | } |
9885 | } | 9896 | } |
9886 | 9897 | ||
9887 | void | 9898 | void |
9888 | intel_modeset_check_state(struct drm_device *dev) | 9899 | intel_modeset_check_state(struct drm_device *dev) |
9889 | { | 9900 | { |
9890 | check_connector_state(dev); | 9901 | check_connector_state(dev); |
9891 | check_encoder_state(dev); | 9902 | check_encoder_state(dev); |
9892 | check_crtc_state(dev); | 9903 | check_crtc_state(dev); |
9893 | check_shared_dpll_state(dev); | 9904 | check_shared_dpll_state(dev); |
9894 | } | 9905 | } |
9895 | 9906 | ||
9896 | void ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config, | 9907 | void ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config, |
9897 | int dotclock) | 9908 | int dotclock) |
9898 | { | 9909 | { |
9899 | /* | 9910 | /* |
9900 | * FDI already provided one idea for the dotclock. | 9911 | * FDI already provided one idea for the dotclock. |
9901 | * Yell if the encoder disagrees. | 9912 | * Yell if the encoder disagrees. |
9902 | */ | 9913 | */ |
9903 | WARN(!intel_fuzzy_clock_check(pipe_config->adjusted_mode.crtc_clock, dotclock), | 9914 | WARN(!intel_fuzzy_clock_check(pipe_config->adjusted_mode.crtc_clock, dotclock), |
9904 | "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n", | 9915 | "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n", |
9905 | pipe_config->adjusted_mode.crtc_clock, dotclock); | 9916 | pipe_config->adjusted_mode.crtc_clock, dotclock); |
9906 | } | 9917 | } |
9907 | 9918 | ||
9908 | static int __intel_set_mode(struct drm_crtc *crtc, | 9919 | static int __intel_set_mode(struct drm_crtc *crtc, |
9909 | struct drm_display_mode *mode, | 9920 | struct drm_display_mode *mode, |
9910 | int x, int y, struct drm_framebuffer *fb) | 9921 | int x, int y, struct drm_framebuffer *fb) |
9911 | { | 9922 | { |
9912 | struct drm_device *dev = crtc->dev; | 9923 | struct drm_device *dev = crtc->dev; |
9913 | struct drm_i915_private *dev_priv = dev->dev_private; | 9924 | struct drm_i915_private *dev_priv = dev->dev_private; |
9914 | struct drm_display_mode *saved_mode; | 9925 | struct drm_display_mode *saved_mode; |
9915 | struct intel_crtc_config *pipe_config = NULL; | 9926 | struct intel_crtc_config *pipe_config = NULL; |
9916 | struct intel_crtc *intel_crtc; | 9927 | struct intel_crtc *intel_crtc; |
9917 | unsigned disable_pipes, prepare_pipes, modeset_pipes; | 9928 | unsigned disable_pipes, prepare_pipes, modeset_pipes; |
9918 | int ret = 0; | 9929 | int ret = 0; |
9919 | 9930 | ||
9920 | saved_mode = kmalloc(sizeof(*saved_mode), GFP_KERNEL); | 9931 | saved_mode = kmalloc(sizeof(*saved_mode), GFP_KERNEL); |
9921 | if (!saved_mode) | 9932 | if (!saved_mode) |
9922 | return -ENOMEM; | 9933 | return -ENOMEM; |
9923 | 9934 | ||
9924 | intel_modeset_affected_pipes(crtc, &modeset_pipes, | 9935 | intel_modeset_affected_pipes(crtc, &modeset_pipes, |
9925 | &prepare_pipes, &disable_pipes); | 9936 | &prepare_pipes, &disable_pipes); |
9926 | 9937 | ||
9927 | *saved_mode = crtc->mode; | 9938 | *saved_mode = crtc->mode; |
9928 | 9939 | ||
9929 | /* Hack: Because we don't (yet) support global modeset on multiple | 9940 | /* Hack: Because we don't (yet) support global modeset on multiple |
9930 | * crtcs, we don't keep track of the new mode for more than one crtc. | 9941 | * crtcs, we don't keep track of the new mode for more than one crtc. |
9931 | * Hence simply check whether any bit is set in modeset_pipes in all the | 9942 | * Hence simply check whether any bit is set in modeset_pipes in all the |
9932 | * pieces of code that are not yet converted to deal with mutliple crtcs | 9943 | * pieces of code that are not yet converted to deal with mutliple crtcs |
9933 | * changing their mode at the same time. */ | 9944 | * changing their mode at the same time. */ |
9934 | if (modeset_pipes) { | 9945 | if (modeset_pipes) { |
9935 | pipe_config = intel_modeset_pipe_config(crtc, fb, mode); | 9946 | pipe_config = intel_modeset_pipe_config(crtc, fb, mode); |
9936 | if (IS_ERR(pipe_config)) { | 9947 | if (IS_ERR(pipe_config)) { |
9937 | ret = PTR_ERR(pipe_config); | 9948 | ret = PTR_ERR(pipe_config); |
9938 | pipe_config = NULL; | 9949 | pipe_config = NULL; |
9939 | 9950 | ||
9940 | goto out; | 9951 | goto out; |
9941 | } | 9952 | } |
9942 | intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config, | 9953 | intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config, |
9943 | "[modeset]"); | 9954 | "[modeset]"); |
9944 | to_intel_crtc(crtc)->new_config = pipe_config; | 9955 | to_intel_crtc(crtc)->new_config = pipe_config; |
9945 | } | 9956 | } |
9946 | 9957 | ||
9947 | /* | 9958 | /* |
9948 | * See if the config requires any additional preparation, e.g. | 9959 | * See if the config requires any additional preparation, e.g. |
9949 | * to adjust global state with pipes off. We need to do this | 9960 | * to adjust global state with pipes off. We need to do this |
9950 | * here so we can get the modeset_pipe updated config for the new | 9961 | * here so we can get the modeset_pipe updated config for the new |
9951 | * mode set on this crtc. For other crtcs we need to use the | 9962 | * mode set on this crtc. For other crtcs we need to use the |
9952 | * adjusted_mode bits in the crtc directly. | 9963 | * adjusted_mode bits in the crtc directly. |
9953 | */ | 9964 | */ |
9954 | if (IS_VALLEYVIEW(dev)) { | 9965 | if (IS_VALLEYVIEW(dev)) { |
9955 | valleyview_modeset_global_pipes(dev, &prepare_pipes); | 9966 | valleyview_modeset_global_pipes(dev, &prepare_pipes); |
9956 | 9967 | ||
9957 | /* may have added more to prepare_pipes than we should */ | 9968 | /* may have added more to prepare_pipes than we should */ |
9958 | prepare_pipes &= ~disable_pipes; | 9969 | prepare_pipes &= ~disable_pipes; |
9959 | } | 9970 | } |
9960 | 9971 | ||
9961 | for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc) | 9972 | for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc) |
9962 | intel_crtc_disable(&intel_crtc->base); | 9973 | intel_crtc_disable(&intel_crtc->base); |
9963 | 9974 | ||
9964 | for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) { | 9975 | for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) { |
9965 | if (intel_crtc->base.enabled) | 9976 | if (intel_crtc->base.enabled) |
9966 | dev_priv->display.crtc_disable(&intel_crtc->base); | 9977 | dev_priv->display.crtc_disable(&intel_crtc->base); |
9967 | } | 9978 | } |
9968 | 9979 | ||
9969 | /* crtc->mode is already used by the ->mode_set callbacks, hence we need | 9980 | /* crtc->mode is already used by the ->mode_set callbacks, hence we need |
9970 | * to set it here already despite that we pass it down the callchain. | 9981 | * to set it here already despite that we pass it down the callchain. |
9971 | */ | 9982 | */ |
9972 | if (modeset_pipes) { | 9983 | if (modeset_pipes) { |
9973 | crtc->mode = *mode; | 9984 | crtc->mode = *mode; |
9974 | /* mode_set/enable/disable functions rely on a correct pipe | 9985 | /* mode_set/enable/disable functions rely on a correct pipe |
9975 | * config. */ | 9986 | * config. */ |
9976 | to_intel_crtc(crtc)->config = *pipe_config; | 9987 | to_intel_crtc(crtc)->config = *pipe_config; |
9977 | to_intel_crtc(crtc)->new_config = &to_intel_crtc(crtc)->config; | 9988 | to_intel_crtc(crtc)->new_config = &to_intel_crtc(crtc)->config; |
9978 | 9989 | ||
9979 | /* | 9990 | /* |
9980 | * Calculate and store various constants which | 9991 | * Calculate and store various constants which |
9981 | * are later needed by vblank and swap-completion | 9992 | * are later needed by vblank and swap-completion |
9982 | * timestamping. They are derived from true hwmode. | 9993 | * timestamping. They are derived from true hwmode. |
9983 | */ | 9994 | */ |
9984 | drm_calc_timestamping_constants(crtc, | 9995 | drm_calc_timestamping_constants(crtc, |
9985 | &pipe_config->adjusted_mode); | 9996 | &pipe_config->adjusted_mode); |
9986 | } | 9997 | } |
9987 | 9998 | ||
9988 | /* Only after disabling all output pipelines that will be changed can we | 9999 | /* Only after disabling all output pipelines that will be changed can we |
9989 | * update the the output configuration. */ | 10000 | * update the the output configuration. */ |
9990 | intel_modeset_update_state(dev, prepare_pipes); | 10001 | intel_modeset_update_state(dev, prepare_pipes); |
9991 | 10002 | ||
9992 | if (dev_priv->display.modeset_global_resources) | 10003 | if (dev_priv->display.modeset_global_resources) |
9993 | dev_priv->display.modeset_global_resources(dev); | 10004 | dev_priv->display.modeset_global_resources(dev); |
9994 | 10005 | ||
9995 | /* Set up the DPLL and any encoders state that needs to adjust or depend | 10006 | /* Set up the DPLL and any encoders state that needs to adjust or depend |
9996 | * on the DPLL. | 10007 | * on the DPLL. |
9997 | */ | 10008 | */ |
9998 | for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) { | 10009 | for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) { |
9999 | ret = intel_crtc_mode_set(&intel_crtc->base, | 10010 | ret = intel_crtc_mode_set(&intel_crtc->base, |
10000 | x, y, fb); | 10011 | x, y, fb); |
10001 | if (ret) | 10012 | if (ret) |
10002 | goto done; | 10013 | goto done; |
10003 | } | 10014 | } |
10004 | 10015 | ||
10005 | /* Now enable the clocks, plane, pipe, and connectors that we set up. */ | 10016 | /* Now enable the clocks, plane, pipe, and connectors that we set up. */ |
10006 | for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) | 10017 | for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) |
10007 | dev_priv->display.crtc_enable(&intel_crtc->base); | 10018 | dev_priv->display.crtc_enable(&intel_crtc->base); |
10008 | 10019 | ||
10009 | /* FIXME: add subpixel order */ | 10020 | /* FIXME: add subpixel order */ |
10010 | done: | 10021 | done: |
10011 | if (ret && crtc->enabled) | 10022 | if (ret && crtc->enabled) |
10012 | crtc->mode = *saved_mode; | 10023 | crtc->mode = *saved_mode; |
10013 | 10024 | ||
10014 | out: | 10025 | out: |
10015 | kfree(pipe_config); | 10026 | kfree(pipe_config); |
10016 | kfree(saved_mode); | 10027 | kfree(saved_mode); |
10017 | return ret; | 10028 | return ret; |
10018 | } | 10029 | } |
10019 | 10030 | ||
10020 | static int intel_set_mode(struct drm_crtc *crtc, | 10031 | static int intel_set_mode(struct drm_crtc *crtc, |
10021 | struct drm_display_mode *mode, | 10032 | struct drm_display_mode *mode, |
10022 | int x, int y, struct drm_framebuffer *fb) | 10033 | int x, int y, struct drm_framebuffer *fb) |
10023 | { | 10034 | { |
10024 | int ret; | 10035 | int ret; |
10025 | 10036 | ||
10026 | ret = __intel_set_mode(crtc, mode, x, y, fb); | 10037 | ret = __intel_set_mode(crtc, mode, x, y, fb); |
10027 | 10038 | ||
10028 | if (ret == 0) | 10039 | if (ret == 0) |
10029 | intel_modeset_check_state(crtc->dev); | 10040 | intel_modeset_check_state(crtc->dev); |
10030 | 10041 | ||
10031 | return ret; | 10042 | return ret; |
10032 | } | 10043 | } |
10033 | 10044 | ||
10034 | void intel_crtc_restore_mode(struct drm_crtc *crtc) | 10045 | void intel_crtc_restore_mode(struct drm_crtc *crtc) |
10035 | { | 10046 | { |
10036 | intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb); | 10047 | intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb); |
10037 | } | 10048 | } |
10038 | 10049 | ||
10039 | #undef for_each_intel_crtc_masked | 10050 | #undef for_each_intel_crtc_masked |
10040 | 10051 | ||
10041 | static void intel_set_config_free(struct intel_set_config *config) | 10052 | static void intel_set_config_free(struct intel_set_config *config) |
10042 | { | 10053 | { |
10043 | if (!config) | 10054 | if (!config) |
10044 | return; | 10055 | return; |
10045 | 10056 | ||
10046 | kfree(config->save_connector_encoders); | 10057 | kfree(config->save_connector_encoders); |
10047 | kfree(config->save_encoder_crtcs); | 10058 | kfree(config->save_encoder_crtcs); |
10048 | kfree(config->save_crtc_enabled); | 10059 | kfree(config->save_crtc_enabled); |
10049 | kfree(config); | 10060 | kfree(config); |
10050 | } | 10061 | } |
10051 | 10062 | ||
10052 | static int intel_set_config_save_state(struct drm_device *dev, | 10063 | static int intel_set_config_save_state(struct drm_device *dev, |
10053 | struct intel_set_config *config) | 10064 | struct intel_set_config *config) |
10054 | { | 10065 | { |
10055 | struct drm_crtc *crtc; | 10066 | struct drm_crtc *crtc; |
10056 | struct drm_encoder *encoder; | 10067 | struct drm_encoder *encoder; |
10057 | struct drm_connector *connector; | 10068 | struct drm_connector *connector; |
10058 | int count; | 10069 | int count; |
10059 | 10070 | ||
10060 | config->save_crtc_enabled = | 10071 | config->save_crtc_enabled = |
10061 | kcalloc(dev->mode_config.num_crtc, | 10072 | kcalloc(dev->mode_config.num_crtc, |
10062 | sizeof(bool), GFP_KERNEL); | 10073 | sizeof(bool), GFP_KERNEL); |
10063 | if (!config->save_crtc_enabled) | 10074 | if (!config->save_crtc_enabled) |
10064 | return -ENOMEM; | 10075 | return -ENOMEM; |
10065 | 10076 | ||
10066 | config->save_encoder_crtcs = | 10077 | config->save_encoder_crtcs = |
10067 | kcalloc(dev->mode_config.num_encoder, | 10078 | kcalloc(dev->mode_config.num_encoder, |
10068 | sizeof(struct drm_crtc *), GFP_KERNEL); | 10079 | sizeof(struct drm_crtc *), GFP_KERNEL); |
10069 | if (!config->save_encoder_crtcs) | 10080 | if (!config->save_encoder_crtcs) |
10070 | return -ENOMEM; | 10081 | return -ENOMEM; |
10071 | 10082 | ||
10072 | config->save_connector_encoders = | 10083 | config->save_connector_encoders = |
10073 | kcalloc(dev->mode_config.num_connector, | 10084 | kcalloc(dev->mode_config.num_connector, |
10074 | sizeof(struct drm_encoder *), GFP_KERNEL); | 10085 | sizeof(struct drm_encoder *), GFP_KERNEL); |
10075 | if (!config->save_connector_encoders) | 10086 | if (!config->save_connector_encoders) |
10076 | return -ENOMEM; | 10087 | return -ENOMEM; |
10077 | 10088 | ||
10078 | /* Copy data. Note that driver private data is not affected. | 10089 | /* Copy data. Note that driver private data is not affected. |
10079 | * Should anything bad happen only the expected state is | 10090 | * Should anything bad happen only the expected state is |
10080 | * restored, not the drivers personal bookkeeping. | 10091 | * restored, not the drivers personal bookkeeping. |
10081 | */ | 10092 | */ |
10082 | count = 0; | 10093 | count = 0; |
10083 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 10094 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
10084 | config->save_crtc_enabled[count++] = crtc->enabled; | 10095 | config->save_crtc_enabled[count++] = crtc->enabled; |
10085 | } | 10096 | } |
10086 | 10097 | ||
10087 | count = 0; | 10098 | count = 0; |
10088 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 10099 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
10089 | config->save_encoder_crtcs[count++] = encoder->crtc; | 10100 | config->save_encoder_crtcs[count++] = encoder->crtc; |
10090 | } | 10101 | } |
10091 | 10102 | ||
10092 | count = 0; | 10103 | count = 0; |
10093 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 10104 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
10094 | config->save_connector_encoders[count++] = connector->encoder; | 10105 | config->save_connector_encoders[count++] = connector->encoder; |
10095 | } | 10106 | } |
10096 | 10107 | ||
10097 | return 0; | 10108 | return 0; |
10098 | } | 10109 | } |
10099 | 10110 | ||
10100 | static void intel_set_config_restore_state(struct drm_device *dev, | 10111 | static void intel_set_config_restore_state(struct drm_device *dev, |
10101 | struct intel_set_config *config) | 10112 | struct intel_set_config *config) |
10102 | { | 10113 | { |
10103 | struct intel_crtc *crtc; | 10114 | struct intel_crtc *crtc; |
10104 | struct intel_encoder *encoder; | 10115 | struct intel_encoder *encoder; |
10105 | struct intel_connector *connector; | 10116 | struct intel_connector *connector; |
10106 | int count; | 10117 | int count; |
10107 | 10118 | ||
10108 | count = 0; | 10119 | count = 0; |
10109 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { | 10120 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { |
10110 | crtc->new_enabled = config->save_crtc_enabled[count++]; | 10121 | crtc->new_enabled = config->save_crtc_enabled[count++]; |
10111 | 10122 | ||
10112 | if (crtc->new_enabled) | 10123 | if (crtc->new_enabled) |
10113 | crtc->new_config = &crtc->config; | 10124 | crtc->new_config = &crtc->config; |
10114 | else | 10125 | else |
10115 | crtc->new_config = NULL; | 10126 | crtc->new_config = NULL; |
10116 | } | 10127 | } |
10117 | 10128 | ||
10118 | count = 0; | 10129 | count = 0; |
10119 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { | 10130 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { |
10120 | encoder->new_crtc = | 10131 | encoder->new_crtc = |
10121 | to_intel_crtc(config->save_encoder_crtcs[count++]); | 10132 | to_intel_crtc(config->save_encoder_crtcs[count++]); |
10122 | } | 10133 | } |
10123 | 10134 | ||
10124 | count = 0; | 10135 | count = 0; |
10125 | list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) { | 10136 | list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) { |
10126 | connector->new_encoder = | 10137 | connector->new_encoder = |
10127 | to_intel_encoder(config->save_connector_encoders[count++]); | 10138 | to_intel_encoder(config->save_connector_encoders[count++]); |
10128 | } | 10139 | } |
10129 | } | 10140 | } |
10130 | 10141 | ||
10131 | static bool | 10142 | static bool |
10132 | is_crtc_connector_off(struct drm_mode_set *set) | 10143 | is_crtc_connector_off(struct drm_mode_set *set) |
10133 | { | 10144 | { |
10134 | int i; | 10145 | int i; |
10135 | 10146 | ||
10136 | if (set->num_connectors == 0) | 10147 | if (set->num_connectors == 0) |
10137 | return false; | 10148 | return false; |
10138 | 10149 | ||
10139 | if (WARN_ON(set->connectors == NULL)) | 10150 | if (WARN_ON(set->connectors == NULL)) |
10140 | return false; | 10151 | return false; |
10141 | 10152 | ||
10142 | for (i = 0; i < set->num_connectors; i++) | 10153 | for (i = 0; i < set->num_connectors; i++) |
10143 | if (set->connectors[i]->encoder && | 10154 | if (set->connectors[i]->encoder && |
10144 | set->connectors[i]->encoder->crtc == set->crtc && | 10155 | set->connectors[i]->encoder->crtc == set->crtc && |
10145 | set->connectors[i]->dpms != DRM_MODE_DPMS_ON) | 10156 | set->connectors[i]->dpms != DRM_MODE_DPMS_ON) |
10146 | return true; | 10157 | return true; |
10147 | 10158 | ||
10148 | return false; | 10159 | return false; |
10149 | } | 10160 | } |
10150 | 10161 | ||
10151 | static void | 10162 | static void |
10152 | intel_set_config_compute_mode_changes(struct drm_mode_set *set, | 10163 | intel_set_config_compute_mode_changes(struct drm_mode_set *set, |
10153 | struct intel_set_config *config) | 10164 | struct intel_set_config *config) |
10154 | { | 10165 | { |
10155 | 10166 | ||
10156 | /* We should be able to check here if the fb has the same properties | 10167 | /* We should be able to check here if the fb has the same properties |
10157 | * and then just flip_or_move it */ | 10168 | * and then just flip_or_move it */ |
10158 | if (is_crtc_connector_off(set)) { | 10169 | if (is_crtc_connector_off(set)) { |
10159 | config->mode_changed = true; | 10170 | config->mode_changed = true; |
10160 | } else if (set->crtc->primary->fb != set->fb) { | 10171 | } else if (set->crtc->primary->fb != set->fb) { |
10161 | /* If we have no fb then treat it as a full mode set */ | 10172 | /* If we have no fb then treat it as a full mode set */ |
10162 | if (set->crtc->primary->fb == NULL) { | 10173 | if (set->crtc->primary->fb == NULL) { |
10163 | struct intel_crtc *intel_crtc = | 10174 | struct intel_crtc *intel_crtc = |
10164 | to_intel_crtc(set->crtc); | 10175 | to_intel_crtc(set->crtc); |
10165 | 10176 | ||
10166 | if (intel_crtc->active && i915.fastboot) { | 10177 | if (intel_crtc->active && i915.fastboot) { |
10167 | DRM_DEBUG_KMS("crtc has no fb, will flip\n"); | 10178 | DRM_DEBUG_KMS("crtc has no fb, will flip\n"); |
10168 | config->fb_changed = true; | 10179 | config->fb_changed = true; |
10169 | } else { | 10180 | } else { |
10170 | DRM_DEBUG_KMS("inactive crtc, full mode set\n"); | 10181 | DRM_DEBUG_KMS("inactive crtc, full mode set\n"); |
10171 | config->mode_changed = true; | 10182 | config->mode_changed = true; |
10172 | } | 10183 | } |
10173 | } else if (set->fb == NULL) { | 10184 | } else if (set->fb == NULL) { |
10174 | config->mode_changed = true; | 10185 | config->mode_changed = true; |
10175 | } else if (set->fb->pixel_format != | 10186 | } else if (set->fb->pixel_format != |
10176 | set->crtc->primary->fb->pixel_format) { | 10187 | set->crtc->primary->fb->pixel_format) { |
10177 | config->mode_changed = true; | 10188 | config->mode_changed = true; |
10178 | } else { | 10189 | } else { |
10179 | config->fb_changed = true; | 10190 | config->fb_changed = true; |
10180 | } | 10191 | } |
10181 | } | 10192 | } |
10182 | 10193 | ||
10183 | if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y)) | 10194 | if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y)) |
10184 | config->fb_changed = true; | 10195 | config->fb_changed = true; |
10185 | 10196 | ||
10186 | if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) { | 10197 | if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) { |
10187 | DRM_DEBUG_KMS("modes are different, full mode set\n"); | 10198 | DRM_DEBUG_KMS("modes are different, full mode set\n"); |
10188 | drm_mode_debug_printmodeline(&set->crtc->mode); | 10199 | drm_mode_debug_printmodeline(&set->crtc->mode); |
10189 | drm_mode_debug_printmodeline(set->mode); | 10200 | drm_mode_debug_printmodeline(set->mode); |
10190 | config->mode_changed = true; | 10201 | config->mode_changed = true; |
10191 | } | 10202 | } |
10192 | 10203 | ||
10193 | DRM_DEBUG_KMS("computed changes for [CRTC:%d], mode_changed=%d, fb_changed=%d\n", | 10204 | DRM_DEBUG_KMS("computed changes for [CRTC:%d], mode_changed=%d, fb_changed=%d\n", |
10194 | set->crtc->base.id, config->mode_changed, config->fb_changed); | 10205 | set->crtc->base.id, config->mode_changed, config->fb_changed); |
10195 | } | 10206 | } |
10196 | 10207 | ||
10197 | static int | 10208 | static int |
10198 | intel_modeset_stage_output_state(struct drm_device *dev, | 10209 | intel_modeset_stage_output_state(struct drm_device *dev, |
10199 | struct drm_mode_set *set, | 10210 | struct drm_mode_set *set, |
10200 | struct intel_set_config *config) | 10211 | struct intel_set_config *config) |
10201 | { | 10212 | { |
10202 | struct intel_connector *connector; | 10213 | struct intel_connector *connector; |
10203 | struct intel_encoder *encoder; | 10214 | struct intel_encoder *encoder; |
10204 | struct intel_crtc *crtc; | 10215 | struct intel_crtc *crtc; |
10205 | int ro; | 10216 | int ro; |
10206 | 10217 | ||
10207 | /* The upper layers ensure that we either disable a crtc or have a list | 10218 | /* The upper layers ensure that we either disable a crtc or have a list |
10208 | * of connectors. For paranoia, double-check this. */ | 10219 | * of connectors. For paranoia, double-check this. */ |
10209 | WARN_ON(!set->fb && (set->num_connectors != 0)); | 10220 | WARN_ON(!set->fb && (set->num_connectors != 0)); |
10210 | WARN_ON(set->fb && (set->num_connectors == 0)); | 10221 | WARN_ON(set->fb && (set->num_connectors == 0)); |
10211 | 10222 | ||
10212 | list_for_each_entry(connector, &dev->mode_config.connector_list, | 10223 | list_for_each_entry(connector, &dev->mode_config.connector_list, |
10213 | base.head) { | 10224 | base.head) { |
10214 | /* Otherwise traverse passed in connector list and get encoders | 10225 | /* Otherwise traverse passed in connector list and get encoders |
10215 | * for them. */ | 10226 | * for them. */ |
10216 | for (ro = 0; ro < set->num_connectors; ro++) { | 10227 | for (ro = 0; ro < set->num_connectors; ro++) { |
10217 | if (set->connectors[ro] == &connector->base) { | 10228 | if (set->connectors[ro] == &connector->base) { |
10218 | connector->new_encoder = connector->encoder; | 10229 | connector->new_encoder = connector->encoder; |
10219 | break; | 10230 | break; |
10220 | } | 10231 | } |
10221 | } | 10232 | } |
10222 | 10233 | ||
10223 | /* If we disable the crtc, disable all its connectors. Also, if | 10234 | /* If we disable the crtc, disable all its connectors. Also, if |
10224 | * the connector is on the changing crtc but not on the new | 10235 | * the connector is on the changing crtc but not on the new |
10225 | * connector list, disable it. */ | 10236 | * connector list, disable it. */ |
10226 | if ((!set->fb || ro == set->num_connectors) && | 10237 | if ((!set->fb || ro == set->num_connectors) && |
10227 | connector->base.encoder && | 10238 | connector->base.encoder && |
10228 | connector->base.encoder->crtc == set->crtc) { | 10239 | connector->base.encoder->crtc == set->crtc) { |
10229 | connector->new_encoder = NULL; | 10240 | connector->new_encoder = NULL; |
10230 | 10241 | ||
10231 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n", | 10242 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n", |
10232 | connector->base.base.id, | 10243 | connector->base.base.id, |
10233 | drm_get_connector_name(&connector->base)); | 10244 | drm_get_connector_name(&connector->base)); |
10234 | } | 10245 | } |
10235 | 10246 | ||
10236 | 10247 | ||
10237 | if (&connector->new_encoder->base != connector->base.encoder) { | 10248 | if (&connector->new_encoder->base != connector->base.encoder) { |
10238 | DRM_DEBUG_KMS("encoder changed, full mode switch\n"); | 10249 | DRM_DEBUG_KMS("encoder changed, full mode switch\n"); |
10239 | config->mode_changed = true; | 10250 | config->mode_changed = true; |
10240 | } | 10251 | } |
10241 | } | 10252 | } |
10242 | /* connector->new_encoder is now updated for all connectors. */ | 10253 | /* connector->new_encoder is now updated for all connectors. */ |
10243 | 10254 | ||
10244 | /* Update crtc of enabled connectors. */ | 10255 | /* Update crtc of enabled connectors. */ |
10245 | list_for_each_entry(connector, &dev->mode_config.connector_list, | 10256 | list_for_each_entry(connector, &dev->mode_config.connector_list, |
10246 | base.head) { | 10257 | base.head) { |
10247 | struct drm_crtc *new_crtc; | 10258 | struct drm_crtc *new_crtc; |
10248 | 10259 | ||
10249 | if (!connector->new_encoder) | 10260 | if (!connector->new_encoder) |
10250 | continue; | 10261 | continue; |
10251 | 10262 | ||
10252 | new_crtc = connector->new_encoder->base.crtc; | 10263 | new_crtc = connector->new_encoder->base.crtc; |
10253 | 10264 | ||
10254 | for (ro = 0; ro < set->num_connectors; ro++) { | 10265 | for (ro = 0; ro < set->num_connectors; ro++) { |
10255 | if (set->connectors[ro] == &connector->base) | 10266 | if (set->connectors[ro] == &connector->base) |
10256 | new_crtc = set->crtc; | 10267 | new_crtc = set->crtc; |
10257 | } | 10268 | } |
10258 | 10269 | ||
10259 | /* Make sure the new CRTC will work with the encoder */ | 10270 | /* Make sure the new CRTC will work with the encoder */ |
10260 | if (!drm_encoder_crtc_ok(&connector->new_encoder->base, | 10271 | if (!drm_encoder_crtc_ok(&connector->new_encoder->base, |
10261 | new_crtc)) { | 10272 | new_crtc)) { |
10262 | return -EINVAL; | 10273 | return -EINVAL; |
10263 | } | 10274 | } |
10264 | connector->encoder->new_crtc = to_intel_crtc(new_crtc); | 10275 | connector->encoder->new_crtc = to_intel_crtc(new_crtc); |
10265 | 10276 | ||
10266 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n", | 10277 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n", |
10267 | connector->base.base.id, | 10278 | connector->base.base.id, |
10268 | drm_get_connector_name(&connector->base), | 10279 | drm_get_connector_name(&connector->base), |
10269 | new_crtc->base.id); | 10280 | new_crtc->base.id); |
10270 | } | 10281 | } |
10271 | 10282 | ||
10272 | /* Check for any encoders that needs to be disabled. */ | 10283 | /* Check for any encoders that needs to be disabled. */ |
10273 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, | 10284 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
10274 | base.head) { | 10285 | base.head) { |
10275 | int num_connectors = 0; | 10286 | int num_connectors = 0; |
10276 | list_for_each_entry(connector, | 10287 | list_for_each_entry(connector, |
10277 | &dev->mode_config.connector_list, | 10288 | &dev->mode_config.connector_list, |
10278 | base.head) { | 10289 | base.head) { |
10279 | if (connector->new_encoder == encoder) { | 10290 | if (connector->new_encoder == encoder) { |
10280 | WARN_ON(!connector->new_encoder->new_crtc); | 10291 | WARN_ON(!connector->new_encoder->new_crtc); |
10281 | num_connectors++; | 10292 | num_connectors++; |
10282 | } | 10293 | } |
10283 | } | 10294 | } |
10284 | 10295 | ||
10285 | if (num_connectors == 0) | 10296 | if (num_connectors == 0) |
10286 | encoder->new_crtc = NULL; | 10297 | encoder->new_crtc = NULL; |
10287 | else if (num_connectors > 1) | 10298 | else if (num_connectors > 1) |
10288 | return -EINVAL; | 10299 | return -EINVAL; |
10289 | 10300 | ||
10290 | /* Only now check for crtc changes so we don't miss encoders | 10301 | /* Only now check for crtc changes so we don't miss encoders |
10291 | * that will be disabled. */ | 10302 | * that will be disabled. */ |
10292 | if (&encoder->new_crtc->base != encoder->base.crtc) { | 10303 | if (&encoder->new_crtc->base != encoder->base.crtc) { |
10293 | DRM_DEBUG_KMS("crtc changed, full mode switch\n"); | 10304 | DRM_DEBUG_KMS("crtc changed, full mode switch\n"); |
10294 | config->mode_changed = true; | 10305 | config->mode_changed = true; |
10295 | } | 10306 | } |
10296 | } | 10307 | } |
10297 | /* Now we've also updated encoder->new_crtc for all encoders. */ | 10308 | /* Now we've also updated encoder->new_crtc for all encoders. */ |
10298 | 10309 | ||
10299 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, | 10310 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, |
10300 | base.head) { | 10311 | base.head) { |
10301 | crtc->new_enabled = false; | 10312 | crtc->new_enabled = false; |
10302 | 10313 | ||
10303 | list_for_each_entry(encoder, | 10314 | list_for_each_entry(encoder, |
10304 | &dev->mode_config.encoder_list, | 10315 | &dev->mode_config.encoder_list, |
10305 | base.head) { | 10316 | base.head) { |
10306 | if (encoder->new_crtc == crtc) { | 10317 | if (encoder->new_crtc == crtc) { |
10307 | crtc->new_enabled = true; | 10318 | crtc->new_enabled = true; |
10308 | break; | 10319 | break; |
10309 | } | 10320 | } |
10310 | } | 10321 | } |
10311 | 10322 | ||
10312 | if (crtc->new_enabled != crtc->base.enabled) { | 10323 | if (crtc->new_enabled != crtc->base.enabled) { |
10313 | DRM_DEBUG_KMS("crtc %sabled, full mode switch\n", | 10324 | DRM_DEBUG_KMS("crtc %sabled, full mode switch\n", |
10314 | crtc->new_enabled ? "en" : "dis"); | 10325 | crtc->new_enabled ? "en" : "dis"); |
10315 | config->mode_changed = true; | 10326 | config->mode_changed = true; |
10316 | } | 10327 | } |
10317 | 10328 | ||
10318 | if (crtc->new_enabled) | 10329 | if (crtc->new_enabled) |
10319 | crtc->new_config = &crtc->config; | 10330 | crtc->new_config = &crtc->config; |
10320 | else | 10331 | else |
10321 | crtc->new_config = NULL; | 10332 | crtc->new_config = NULL; |
10322 | } | 10333 | } |
10323 | 10334 | ||
10324 | return 0; | 10335 | return 0; |
10325 | } | 10336 | } |
10326 | 10337 | ||
10327 | static void disable_crtc_nofb(struct intel_crtc *crtc) | 10338 | static void disable_crtc_nofb(struct intel_crtc *crtc) |
10328 | { | 10339 | { |
10329 | struct drm_device *dev = crtc->base.dev; | 10340 | struct drm_device *dev = crtc->base.dev; |
10330 | struct intel_encoder *encoder; | 10341 | struct intel_encoder *encoder; |
10331 | struct intel_connector *connector; | 10342 | struct intel_connector *connector; |
10332 | 10343 | ||
10333 | DRM_DEBUG_KMS("Trying to restore without FB -> disabling pipe %c\n", | 10344 | DRM_DEBUG_KMS("Trying to restore without FB -> disabling pipe %c\n", |
10334 | pipe_name(crtc->pipe)); | 10345 | pipe_name(crtc->pipe)); |
10335 | 10346 | ||
10336 | list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) { | 10347 | list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) { |
10337 | if (connector->new_encoder && | 10348 | if (connector->new_encoder && |
10338 | connector->new_encoder->new_crtc == crtc) | 10349 | connector->new_encoder->new_crtc == crtc) |
10339 | connector->new_encoder = NULL; | 10350 | connector->new_encoder = NULL; |
10340 | } | 10351 | } |
10341 | 10352 | ||
10342 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { | 10353 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { |
10343 | if (encoder->new_crtc == crtc) | 10354 | if (encoder->new_crtc == crtc) |
10344 | encoder->new_crtc = NULL; | 10355 | encoder->new_crtc = NULL; |
10345 | } | 10356 | } |
10346 | 10357 | ||
10347 | crtc->new_enabled = false; | 10358 | crtc->new_enabled = false; |
10348 | crtc->new_config = NULL; | 10359 | crtc->new_config = NULL; |
10349 | } | 10360 | } |
10350 | 10361 | ||
10351 | static int intel_crtc_set_config(struct drm_mode_set *set) | 10362 | static int intel_crtc_set_config(struct drm_mode_set *set) |
10352 | { | 10363 | { |
10353 | struct drm_device *dev; | 10364 | struct drm_device *dev; |
10354 | struct drm_mode_set save_set; | 10365 | struct drm_mode_set save_set; |
10355 | struct intel_set_config *config; | 10366 | struct intel_set_config *config; |
10356 | int ret; | 10367 | int ret; |
10357 | 10368 | ||
10358 | BUG_ON(!set); | 10369 | BUG_ON(!set); |
10359 | BUG_ON(!set->crtc); | 10370 | BUG_ON(!set->crtc); |
10360 | BUG_ON(!set->crtc->helper_private); | 10371 | BUG_ON(!set->crtc->helper_private); |
10361 | 10372 | ||
10362 | /* Enforce sane interface api - has been abused by the fb helper. */ | 10373 | /* Enforce sane interface api - has been abused by the fb helper. */ |
10363 | BUG_ON(!set->mode && set->fb); | 10374 | BUG_ON(!set->mode && set->fb); |
10364 | BUG_ON(set->fb && set->num_connectors == 0); | 10375 | BUG_ON(set->fb && set->num_connectors == 0); |
10365 | 10376 | ||
10366 | if (set->fb) { | 10377 | if (set->fb) { |
10367 | DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n", | 10378 | DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n", |
10368 | set->crtc->base.id, set->fb->base.id, | 10379 | set->crtc->base.id, set->fb->base.id, |
10369 | (int)set->num_connectors, set->x, set->y); | 10380 | (int)set->num_connectors, set->x, set->y); |
10370 | } else { | 10381 | } else { |
10371 | DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id); | 10382 | DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id); |
10372 | } | 10383 | } |
10373 | 10384 | ||
10374 | dev = set->crtc->dev; | 10385 | dev = set->crtc->dev; |
10375 | 10386 | ||
10376 | ret = -ENOMEM; | 10387 | ret = -ENOMEM; |
10377 | config = kzalloc(sizeof(*config), GFP_KERNEL); | 10388 | config = kzalloc(sizeof(*config), GFP_KERNEL); |
10378 | if (!config) | 10389 | if (!config) |
10379 | goto out_config; | 10390 | goto out_config; |
10380 | 10391 | ||
10381 | ret = intel_set_config_save_state(dev, config); | 10392 | ret = intel_set_config_save_state(dev, config); |
10382 | if (ret) | 10393 | if (ret) |
10383 | goto out_config; | 10394 | goto out_config; |
10384 | 10395 | ||
10385 | save_set.crtc = set->crtc; | 10396 | save_set.crtc = set->crtc; |
10386 | save_set.mode = &set->crtc->mode; | 10397 | save_set.mode = &set->crtc->mode; |
10387 | save_set.x = set->crtc->x; | 10398 | save_set.x = set->crtc->x; |
10388 | save_set.y = set->crtc->y; | 10399 | save_set.y = set->crtc->y; |
10389 | save_set.fb = set->crtc->primary->fb; | 10400 | save_set.fb = set->crtc->primary->fb; |
10390 | 10401 | ||
10391 | /* Compute whether we need a full modeset, only an fb base update or no | 10402 | /* Compute whether we need a full modeset, only an fb base update or no |
10392 | * change at all. In the future we might also check whether only the | 10403 | * change at all. In the future we might also check whether only the |
10393 | * mode changed, e.g. for LVDS where we only change the panel fitter in | 10404 | * mode changed, e.g. for LVDS where we only change the panel fitter in |
10394 | * such cases. */ | 10405 | * such cases. */ |
10395 | intel_set_config_compute_mode_changes(set, config); | 10406 | intel_set_config_compute_mode_changes(set, config); |
10396 | 10407 | ||
10397 | ret = intel_modeset_stage_output_state(dev, set, config); | 10408 | ret = intel_modeset_stage_output_state(dev, set, config); |
10398 | if (ret) | 10409 | if (ret) |
10399 | goto fail; | 10410 | goto fail; |
10400 | 10411 | ||
10401 | if (config->mode_changed) { | 10412 | if (config->mode_changed) { |
10402 | ret = intel_set_mode(set->crtc, set->mode, | 10413 | ret = intel_set_mode(set->crtc, set->mode, |
10403 | set->x, set->y, set->fb); | 10414 | set->x, set->y, set->fb); |
10404 | } else if (config->fb_changed) { | 10415 | } else if (config->fb_changed) { |
10405 | intel_crtc_wait_for_pending_flips(set->crtc); | 10416 | intel_crtc_wait_for_pending_flips(set->crtc); |
10406 | 10417 | ||
10407 | ret = intel_pipe_set_base(set->crtc, | 10418 | ret = intel_pipe_set_base(set->crtc, |
10408 | set->x, set->y, set->fb); | 10419 | set->x, set->y, set->fb); |
10409 | /* | 10420 | /* |
10410 | * In the fastboot case this may be our only check of the | 10421 | * In the fastboot case this may be our only check of the |
10411 | * state after boot. It would be better to only do it on | 10422 | * state after boot. It would be better to only do it on |
10412 | * the first update, but we don't have a nice way of doing that | 10423 | * the first update, but we don't have a nice way of doing that |
10413 | * (and really, set_config isn't used much for high freq page | 10424 | * (and really, set_config isn't used much for high freq page |
10414 | * flipping, so increasing its cost here shouldn't be a big | 10425 | * flipping, so increasing its cost here shouldn't be a big |
10415 | * deal). | 10426 | * deal). |
10416 | */ | 10427 | */ |
10417 | if (i915.fastboot && ret == 0) | 10428 | if (i915.fastboot && ret == 0) |
10418 | intel_modeset_check_state(set->crtc->dev); | 10429 | intel_modeset_check_state(set->crtc->dev); |
10419 | } | 10430 | } |
10420 | 10431 | ||
10421 | if (ret) { | 10432 | if (ret) { |
10422 | DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n", | 10433 | DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n", |
10423 | set->crtc->base.id, ret); | 10434 | set->crtc->base.id, ret); |
10424 | fail: | 10435 | fail: |
10425 | intel_set_config_restore_state(dev, config); | 10436 | intel_set_config_restore_state(dev, config); |
10426 | 10437 | ||
10427 | /* | 10438 | /* |
10428 | * HACK: if the pipe was on, but we didn't have a framebuffer, | 10439 | * HACK: if the pipe was on, but we didn't have a framebuffer, |
10429 | * force the pipe off to avoid oopsing in the modeset code | 10440 | * force the pipe off to avoid oopsing in the modeset code |
10430 | * due to fb==NULL. This should only happen during boot since | 10441 | * due to fb==NULL. This should only happen during boot since |
10431 | * we don't yet reconstruct the FB from the hardware state. | 10442 | * we don't yet reconstruct the FB from the hardware state. |
10432 | */ | 10443 | */ |
10433 | if (to_intel_crtc(save_set.crtc)->new_enabled && !save_set.fb) | 10444 | if (to_intel_crtc(save_set.crtc)->new_enabled && !save_set.fb) |
10434 | disable_crtc_nofb(to_intel_crtc(save_set.crtc)); | 10445 | disable_crtc_nofb(to_intel_crtc(save_set.crtc)); |
10435 | 10446 | ||
10436 | /* Try to restore the config */ | 10447 | /* Try to restore the config */ |
10437 | if (config->mode_changed && | 10448 | if (config->mode_changed && |
10438 | intel_set_mode(save_set.crtc, save_set.mode, | 10449 | intel_set_mode(save_set.crtc, save_set.mode, |
10439 | save_set.x, save_set.y, save_set.fb)) | 10450 | save_set.x, save_set.y, save_set.fb)) |
10440 | DRM_ERROR("failed to restore config after modeset failure\n"); | 10451 | DRM_ERROR("failed to restore config after modeset failure\n"); |
10441 | } | 10452 | } |
10442 | 10453 | ||
10443 | out_config: | 10454 | out_config: |
10444 | intel_set_config_free(config); | 10455 | intel_set_config_free(config); |
10445 | return ret; | 10456 | return ret; |
10446 | } | 10457 | } |
10447 | 10458 | ||
10448 | static const struct drm_crtc_funcs intel_crtc_funcs = { | 10459 | static const struct drm_crtc_funcs intel_crtc_funcs = { |
10449 | .cursor_set = intel_crtc_cursor_set, | 10460 | .cursor_set = intel_crtc_cursor_set, |
10450 | .cursor_move = intel_crtc_cursor_move, | 10461 | .cursor_move = intel_crtc_cursor_move, |
10451 | .gamma_set = intel_crtc_gamma_set, | 10462 | .gamma_set = intel_crtc_gamma_set, |
10452 | .set_config = intel_crtc_set_config, | 10463 | .set_config = intel_crtc_set_config, |
10453 | .destroy = intel_crtc_destroy, | 10464 | .destroy = intel_crtc_destroy, |
10454 | .page_flip = intel_crtc_page_flip, | 10465 | .page_flip = intel_crtc_page_flip, |
10455 | }; | 10466 | }; |
10456 | 10467 | ||
10457 | static void intel_cpu_pll_init(struct drm_device *dev) | 10468 | static void intel_cpu_pll_init(struct drm_device *dev) |
10458 | { | 10469 | { |
10459 | if (HAS_DDI(dev)) | 10470 | if (HAS_DDI(dev)) |
10460 | intel_ddi_pll_init(dev); | 10471 | intel_ddi_pll_init(dev); |
10461 | } | 10472 | } |
10462 | 10473 | ||
10463 | static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv, | 10474 | static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv, |
10464 | struct intel_shared_dpll *pll, | 10475 | struct intel_shared_dpll *pll, |
10465 | struct intel_dpll_hw_state *hw_state) | 10476 | struct intel_dpll_hw_state *hw_state) |
10466 | { | 10477 | { |
10467 | uint32_t val; | 10478 | uint32_t val; |
10468 | 10479 | ||
10469 | val = I915_READ(PCH_DPLL(pll->id)); | 10480 | val = I915_READ(PCH_DPLL(pll->id)); |
10470 | hw_state->dpll = val; | 10481 | hw_state->dpll = val; |
10471 | hw_state->fp0 = I915_READ(PCH_FP0(pll->id)); | 10482 | hw_state->fp0 = I915_READ(PCH_FP0(pll->id)); |
10472 | hw_state->fp1 = I915_READ(PCH_FP1(pll->id)); | 10483 | hw_state->fp1 = I915_READ(PCH_FP1(pll->id)); |
10473 | 10484 | ||
10474 | return val & DPLL_VCO_ENABLE; | 10485 | return val & DPLL_VCO_ENABLE; |
10475 | } | 10486 | } |
10476 | 10487 | ||
10477 | static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv, | 10488 | static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv, |
10478 | struct intel_shared_dpll *pll) | 10489 | struct intel_shared_dpll *pll) |
10479 | { | 10490 | { |
10480 | I915_WRITE(PCH_FP0(pll->id), pll->hw_state.fp0); | 10491 | I915_WRITE(PCH_FP0(pll->id), pll->hw_state.fp0); |
10481 | I915_WRITE(PCH_FP1(pll->id), pll->hw_state.fp1); | 10492 | I915_WRITE(PCH_FP1(pll->id), pll->hw_state.fp1); |
10482 | } | 10493 | } |
10483 | 10494 | ||
10484 | static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv, | 10495 | static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv, |
10485 | struct intel_shared_dpll *pll) | 10496 | struct intel_shared_dpll *pll) |
10486 | { | 10497 | { |
10487 | /* PCH refclock must be enabled first */ | 10498 | /* PCH refclock must be enabled first */ |
10488 | ibx_assert_pch_refclk_enabled(dev_priv); | 10499 | ibx_assert_pch_refclk_enabled(dev_priv); |
10489 | 10500 | ||
10490 | I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll); | 10501 | I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll); |
10491 | 10502 | ||
10492 | /* Wait for the clocks to stabilize. */ | 10503 | /* Wait for the clocks to stabilize. */ |
10493 | POSTING_READ(PCH_DPLL(pll->id)); | 10504 | POSTING_READ(PCH_DPLL(pll->id)); |
10494 | udelay(150); | 10505 | udelay(150); |
10495 | 10506 | ||
10496 | /* The pixel multiplier can only be updated once the | 10507 | /* The pixel multiplier can only be updated once the |
10497 | * DPLL is enabled and the clocks are stable. | 10508 | * DPLL is enabled and the clocks are stable. |
10498 | * | 10509 | * |
10499 | * So write it again. | 10510 | * So write it again. |
10500 | */ | 10511 | */ |
10501 | I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll); | 10512 | I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll); |
10502 | POSTING_READ(PCH_DPLL(pll->id)); | 10513 | POSTING_READ(PCH_DPLL(pll->id)); |
10503 | udelay(200); | 10514 | udelay(200); |
10504 | } | 10515 | } |
10505 | 10516 | ||
10506 | static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv, | 10517 | static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv, |
10507 | struct intel_shared_dpll *pll) | 10518 | struct intel_shared_dpll *pll) |
10508 | { | 10519 | { |
10509 | struct drm_device *dev = dev_priv->dev; | 10520 | struct drm_device *dev = dev_priv->dev; |
10510 | struct intel_crtc *crtc; | 10521 | struct intel_crtc *crtc; |
10511 | 10522 | ||
10512 | /* Make sure no transcoder isn't still depending on us. */ | 10523 | /* Make sure no transcoder isn't still depending on us. */ |
10513 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { | 10524 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { |
10514 | if (intel_crtc_to_shared_dpll(crtc) == pll) | 10525 | if (intel_crtc_to_shared_dpll(crtc) == pll) |
10515 | assert_pch_transcoder_disabled(dev_priv, crtc->pipe); | 10526 | assert_pch_transcoder_disabled(dev_priv, crtc->pipe); |
10516 | } | 10527 | } |
10517 | 10528 | ||
10518 | I915_WRITE(PCH_DPLL(pll->id), 0); | 10529 | I915_WRITE(PCH_DPLL(pll->id), 0); |
10519 | POSTING_READ(PCH_DPLL(pll->id)); | 10530 | POSTING_READ(PCH_DPLL(pll->id)); |
10520 | udelay(200); | 10531 | udelay(200); |
10521 | } | 10532 | } |
10522 | 10533 | ||
10523 | static char *ibx_pch_dpll_names[] = { | 10534 | static char *ibx_pch_dpll_names[] = { |
10524 | "PCH DPLL A", | 10535 | "PCH DPLL A", |
10525 | "PCH DPLL B", | 10536 | "PCH DPLL B", |
10526 | }; | 10537 | }; |
10527 | 10538 | ||
10528 | static void ibx_pch_dpll_init(struct drm_device *dev) | 10539 | static void ibx_pch_dpll_init(struct drm_device *dev) |
10529 | { | 10540 | { |
10530 | struct drm_i915_private *dev_priv = dev->dev_private; | 10541 | struct drm_i915_private *dev_priv = dev->dev_private; |
10531 | int i; | 10542 | int i; |
10532 | 10543 | ||
10533 | dev_priv->num_shared_dpll = 2; | 10544 | dev_priv->num_shared_dpll = 2; |
10534 | 10545 | ||
10535 | for (i = 0; i < dev_priv->num_shared_dpll; i++) { | 10546 | for (i = 0; i < dev_priv->num_shared_dpll; i++) { |
10536 | dev_priv->shared_dplls[i].id = i; | 10547 | dev_priv->shared_dplls[i].id = i; |
10537 | dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i]; | 10548 | dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i]; |
10538 | dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set; | 10549 | dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set; |
10539 | dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable; | 10550 | dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable; |
10540 | dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable; | 10551 | dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable; |
10541 | dev_priv->shared_dplls[i].get_hw_state = | 10552 | dev_priv->shared_dplls[i].get_hw_state = |
10542 | ibx_pch_dpll_get_hw_state; | 10553 | ibx_pch_dpll_get_hw_state; |
10543 | } | 10554 | } |
10544 | } | 10555 | } |
10545 | 10556 | ||
10546 | static void intel_shared_dpll_init(struct drm_device *dev) | 10557 | static void intel_shared_dpll_init(struct drm_device *dev) |
10547 | { | 10558 | { |
10548 | struct drm_i915_private *dev_priv = dev->dev_private; | 10559 | struct drm_i915_private *dev_priv = dev->dev_private; |
10549 | 10560 | ||
10550 | if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) | 10561 | if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) |
10551 | ibx_pch_dpll_init(dev); | 10562 | ibx_pch_dpll_init(dev); |
10552 | else | 10563 | else |
10553 | dev_priv->num_shared_dpll = 0; | 10564 | dev_priv->num_shared_dpll = 0; |
10554 | 10565 | ||
10555 | BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS); | 10566 | BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS); |
10556 | } | 10567 | } |
10557 | 10568 | ||
10558 | static void intel_crtc_init(struct drm_device *dev, int pipe) | 10569 | static void intel_crtc_init(struct drm_device *dev, int pipe) |
10559 | { | 10570 | { |
10560 | struct drm_i915_private *dev_priv = dev->dev_private; | 10571 | struct drm_i915_private *dev_priv = dev->dev_private; |
10561 | struct intel_crtc *intel_crtc; | 10572 | struct intel_crtc *intel_crtc; |
10562 | int i; | 10573 | int i; |
10563 | 10574 | ||
10564 | intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL); | 10575 | intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL); |
10565 | if (intel_crtc == NULL) | 10576 | if (intel_crtc == NULL) |
10566 | return; | 10577 | return; |
10567 | 10578 | ||
10568 | drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs); | 10579 | drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs); |
10569 | 10580 | ||
10570 | if (IS_GEN2(dev)) { | 10581 | if (IS_GEN2(dev)) { |
10571 | intel_crtc->max_cursor_width = GEN2_CURSOR_WIDTH; | 10582 | intel_crtc->max_cursor_width = GEN2_CURSOR_WIDTH; |
10572 | intel_crtc->max_cursor_height = GEN2_CURSOR_HEIGHT; | 10583 | intel_crtc->max_cursor_height = GEN2_CURSOR_HEIGHT; |
10573 | } else { | 10584 | } else { |
10574 | intel_crtc->max_cursor_width = CURSOR_WIDTH; | 10585 | intel_crtc->max_cursor_width = CURSOR_WIDTH; |
10575 | intel_crtc->max_cursor_height = CURSOR_HEIGHT; | 10586 | intel_crtc->max_cursor_height = CURSOR_HEIGHT; |
10576 | } | 10587 | } |
10577 | dev->mode_config.cursor_width = intel_crtc->max_cursor_width; | 10588 | dev->mode_config.cursor_width = intel_crtc->max_cursor_width; |
10578 | dev->mode_config.cursor_height = intel_crtc->max_cursor_height; | 10589 | dev->mode_config.cursor_height = intel_crtc->max_cursor_height; |
10579 | 10590 | ||
10580 | drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); | 10591 | drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); |
10581 | for (i = 0; i < 256; i++) { | 10592 | for (i = 0; i < 256; i++) { |
10582 | intel_crtc->lut_r[i] = i; | 10593 | intel_crtc->lut_r[i] = i; |
10583 | intel_crtc->lut_g[i] = i; | 10594 | intel_crtc->lut_g[i] = i; |
10584 | intel_crtc->lut_b[i] = i; | 10595 | intel_crtc->lut_b[i] = i; |
10585 | } | 10596 | } |
10586 | 10597 | ||
10587 | /* | 10598 | /* |
10588 | * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port | 10599 | * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port |
10589 | * is hooked to plane B. Hence we want plane A feeding pipe B. | 10600 | * is hooked to plane B. Hence we want plane A feeding pipe B. |
10590 | */ | 10601 | */ |
10591 | intel_crtc->pipe = pipe; | 10602 | intel_crtc->pipe = pipe; |
10592 | intel_crtc->plane = pipe; | 10603 | intel_crtc->plane = pipe; |
10593 | if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) { | 10604 | if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) { |
10594 | DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); | 10605 | DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); |
10595 | intel_crtc->plane = !pipe; | 10606 | intel_crtc->plane = !pipe; |
10596 | } | 10607 | } |
10597 | 10608 | ||
10598 | BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || | 10609 | BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || |
10599 | dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL); | 10610 | dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL); |
10600 | dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; | 10611 | dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; |
10601 | dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; | 10612 | dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; |
10602 | 10613 | ||
10603 | drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); | 10614 | drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); |
10604 | } | 10615 | } |
10605 | 10616 | ||
10606 | enum pipe intel_get_pipe_from_connector(struct intel_connector *connector) | 10617 | enum pipe intel_get_pipe_from_connector(struct intel_connector *connector) |
10607 | { | 10618 | { |
10608 | struct drm_encoder *encoder = connector->base.encoder; | 10619 | struct drm_encoder *encoder = connector->base.encoder; |
10609 | 10620 | ||
10610 | WARN_ON(!mutex_is_locked(&connector->base.dev->mode_config.mutex)); | 10621 | WARN_ON(!mutex_is_locked(&connector->base.dev->mode_config.mutex)); |
10611 | 10622 | ||
10612 | if (!encoder) | 10623 | if (!encoder) |
10613 | return INVALID_PIPE; | 10624 | return INVALID_PIPE; |
10614 | 10625 | ||
10615 | return to_intel_crtc(encoder->crtc)->pipe; | 10626 | return to_intel_crtc(encoder->crtc)->pipe; |
10616 | } | 10627 | } |
10617 | 10628 | ||
10618 | int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, | 10629 | int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, |
10619 | struct drm_file *file) | 10630 | struct drm_file *file) |
10620 | { | 10631 | { |
10621 | struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; | 10632 | struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; |
10622 | struct drm_mode_object *drmmode_obj; | 10633 | struct drm_mode_object *drmmode_obj; |
10623 | struct intel_crtc *crtc; | 10634 | struct intel_crtc *crtc; |
10624 | 10635 | ||
10625 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 10636 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
10626 | return -ENODEV; | 10637 | return -ENODEV; |
10627 | 10638 | ||
10628 | drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id, | 10639 | drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id, |
10629 | DRM_MODE_OBJECT_CRTC); | 10640 | DRM_MODE_OBJECT_CRTC); |
10630 | 10641 | ||
10631 | if (!drmmode_obj) { | 10642 | if (!drmmode_obj) { |
10632 | DRM_ERROR("no such CRTC id\n"); | 10643 | DRM_ERROR("no such CRTC id\n"); |
10633 | return -ENOENT; | 10644 | return -ENOENT; |
10634 | } | 10645 | } |
10635 | 10646 | ||
10636 | crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); | 10647 | crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); |
10637 | pipe_from_crtc_id->pipe = crtc->pipe; | 10648 | pipe_from_crtc_id->pipe = crtc->pipe; |
10638 | 10649 | ||
10639 | return 0; | 10650 | return 0; |
10640 | } | 10651 | } |
10641 | 10652 | ||
10642 | static int intel_encoder_clones(struct intel_encoder *encoder) | 10653 | static int intel_encoder_clones(struct intel_encoder *encoder) |
10643 | { | 10654 | { |
10644 | struct drm_device *dev = encoder->base.dev; | 10655 | struct drm_device *dev = encoder->base.dev; |
10645 | struct intel_encoder *source_encoder; | 10656 | struct intel_encoder *source_encoder; |
10646 | int index_mask = 0; | 10657 | int index_mask = 0; |
10647 | int entry = 0; | 10658 | int entry = 0; |
10648 | 10659 | ||
10649 | list_for_each_entry(source_encoder, | 10660 | list_for_each_entry(source_encoder, |
10650 | &dev->mode_config.encoder_list, base.head) { | 10661 | &dev->mode_config.encoder_list, base.head) { |
10651 | if (encoders_cloneable(encoder, source_encoder)) | 10662 | if (encoders_cloneable(encoder, source_encoder)) |
10652 | index_mask |= (1 << entry); | 10663 | index_mask |= (1 << entry); |
10653 | 10664 | ||
10654 | entry++; | 10665 | entry++; |
10655 | } | 10666 | } |
10656 | 10667 | ||
10657 | return index_mask; | 10668 | return index_mask; |
10658 | } | 10669 | } |
10659 | 10670 | ||
10660 | static bool has_edp_a(struct drm_device *dev) | 10671 | static bool has_edp_a(struct drm_device *dev) |
10661 | { | 10672 | { |
10662 | struct drm_i915_private *dev_priv = dev->dev_private; | 10673 | struct drm_i915_private *dev_priv = dev->dev_private; |
10663 | 10674 | ||
10664 | if (!IS_MOBILE(dev)) | 10675 | if (!IS_MOBILE(dev)) |
10665 | return false; | 10676 | return false; |
10666 | 10677 | ||
10667 | if ((I915_READ(DP_A) & DP_DETECTED) == 0) | 10678 | if ((I915_READ(DP_A) & DP_DETECTED) == 0) |
10668 | return false; | 10679 | return false; |
10669 | 10680 | ||
10670 | if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE)) | 10681 | if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE)) |
10671 | return false; | 10682 | return false; |
10672 | 10683 | ||
10673 | return true; | 10684 | return true; |
10674 | } | 10685 | } |
10675 | 10686 | ||
10676 | const char *intel_output_name(int output) | 10687 | const char *intel_output_name(int output) |
10677 | { | 10688 | { |
10678 | static const char *names[] = { | 10689 | static const char *names[] = { |
10679 | [INTEL_OUTPUT_UNUSED] = "Unused", | 10690 | [INTEL_OUTPUT_UNUSED] = "Unused", |
10680 | [INTEL_OUTPUT_ANALOG] = "Analog", | 10691 | [INTEL_OUTPUT_ANALOG] = "Analog", |
10681 | [INTEL_OUTPUT_DVO] = "DVO", | 10692 | [INTEL_OUTPUT_DVO] = "DVO", |
10682 | [INTEL_OUTPUT_SDVO] = "SDVO", | 10693 | [INTEL_OUTPUT_SDVO] = "SDVO", |
10683 | [INTEL_OUTPUT_LVDS] = "LVDS", | 10694 | [INTEL_OUTPUT_LVDS] = "LVDS", |
10684 | [INTEL_OUTPUT_TVOUT] = "TV", | 10695 | [INTEL_OUTPUT_TVOUT] = "TV", |
10685 | [INTEL_OUTPUT_HDMI] = "HDMI", | 10696 | [INTEL_OUTPUT_HDMI] = "HDMI", |
10686 | [INTEL_OUTPUT_DISPLAYPORT] = "DisplayPort", | 10697 | [INTEL_OUTPUT_DISPLAYPORT] = "DisplayPort", |
10687 | [INTEL_OUTPUT_EDP] = "eDP", | 10698 | [INTEL_OUTPUT_EDP] = "eDP", |
10688 | [INTEL_OUTPUT_DSI] = "DSI", | 10699 | [INTEL_OUTPUT_DSI] = "DSI", |
10689 | [INTEL_OUTPUT_UNKNOWN] = "Unknown", | 10700 | [INTEL_OUTPUT_UNKNOWN] = "Unknown", |
10690 | }; | 10701 | }; |
10691 | 10702 | ||
10692 | if (output < 0 || output >= ARRAY_SIZE(names) || !names[output]) | 10703 | if (output < 0 || output >= ARRAY_SIZE(names) || !names[output]) |
10693 | return "Invalid"; | 10704 | return "Invalid"; |
10694 | 10705 | ||
10695 | return names[output]; | 10706 | return names[output]; |
10696 | } | 10707 | } |
10697 | 10708 | ||
10698 | static void intel_setup_outputs(struct drm_device *dev) | 10709 | static void intel_setup_outputs(struct drm_device *dev) |
10699 | { | 10710 | { |
10700 | struct drm_i915_private *dev_priv = dev->dev_private; | 10711 | struct drm_i915_private *dev_priv = dev->dev_private; |
10701 | struct intel_encoder *encoder; | 10712 | struct intel_encoder *encoder; |
10702 | bool dpd_is_edp = false; | 10713 | bool dpd_is_edp = false; |
10703 | 10714 | ||
10704 | intel_lvds_init(dev); | 10715 | intel_lvds_init(dev); |
10705 | 10716 | ||
10706 | if (!IS_ULT(dev)) | 10717 | if (!IS_ULT(dev)) |
10707 | intel_crt_init(dev); | 10718 | intel_crt_init(dev); |
10708 | 10719 | ||
10709 | if (HAS_DDI(dev)) { | 10720 | if (HAS_DDI(dev)) { |
10710 | int found; | 10721 | int found; |
10711 | 10722 | ||
10712 | /* Haswell uses DDI functions to detect digital outputs */ | 10723 | /* Haswell uses DDI functions to detect digital outputs */ |
10713 | found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED; | 10724 | found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED; |
10714 | /* DDI A only supports eDP */ | 10725 | /* DDI A only supports eDP */ |
10715 | if (found) | 10726 | if (found) |
10716 | intel_ddi_init(dev, PORT_A); | 10727 | intel_ddi_init(dev, PORT_A); |
10717 | 10728 | ||
10718 | /* DDI B, C and D detection is indicated by the SFUSE_STRAP | 10729 | /* DDI B, C and D detection is indicated by the SFUSE_STRAP |
10719 | * register */ | 10730 | * register */ |
10720 | found = I915_READ(SFUSE_STRAP); | 10731 | found = I915_READ(SFUSE_STRAP); |
10721 | 10732 | ||
10722 | if (found & SFUSE_STRAP_DDIB_DETECTED) | 10733 | if (found & SFUSE_STRAP_DDIB_DETECTED) |
10723 | intel_ddi_init(dev, PORT_B); | 10734 | intel_ddi_init(dev, PORT_B); |
10724 | if (found & SFUSE_STRAP_DDIC_DETECTED) | 10735 | if (found & SFUSE_STRAP_DDIC_DETECTED) |
10725 | intel_ddi_init(dev, PORT_C); | 10736 | intel_ddi_init(dev, PORT_C); |
10726 | if (found & SFUSE_STRAP_DDID_DETECTED) | 10737 | if (found & SFUSE_STRAP_DDID_DETECTED) |
10727 | intel_ddi_init(dev, PORT_D); | 10738 | intel_ddi_init(dev, PORT_D); |
10728 | } else if (HAS_PCH_SPLIT(dev)) { | 10739 | } else if (HAS_PCH_SPLIT(dev)) { |
10729 | int found; | 10740 | int found; |
10730 | dpd_is_edp = intel_dp_is_edp(dev, PORT_D); | 10741 | dpd_is_edp = intel_dp_is_edp(dev, PORT_D); |
10731 | 10742 | ||
10732 | if (has_edp_a(dev)) | 10743 | if (has_edp_a(dev)) |
10733 | intel_dp_init(dev, DP_A, PORT_A); | 10744 | intel_dp_init(dev, DP_A, PORT_A); |
10734 | 10745 | ||
10735 | if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) { | 10746 | if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) { |
10736 | /* PCH SDVOB multiplex with HDMIB */ | 10747 | /* PCH SDVOB multiplex with HDMIB */ |
10737 | found = intel_sdvo_init(dev, PCH_SDVOB, true); | 10748 | found = intel_sdvo_init(dev, PCH_SDVOB, true); |
10738 | if (!found) | 10749 | if (!found) |
10739 | intel_hdmi_init(dev, PCH_HDMIB, PORT_B); | 10750 | intel_hdmi_init(dev, PCH_HDMIB, PORT_B); |
10740 | if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) | 10751 | if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) |
10741 | intel_dp_init(dev, PCH_DP_B, PORT_B); | 10752 | intel_dp_init(dev, PCH_DP_B, PORT_B); |
10742 | } | 10753 | } |
10743 | 10754 | ||
10744 | if (I915_READ(PCH_HDMIC) & SDVO_DETECTED) | 10755 | if (I915_READ(PCH_HDMIC) & SDVO_DETECTED) |
10745 | intel_hdmi_init(dev, PCH_HDMIC, PORT_C); | 10756 | intel_hdmi_init(dev, PCH_HDMIC, PORT_C); |
10746 | 10757 | ||
10747 | if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED) | 10758 | if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED) |
10748 | intel_hdmi_init(dev, PCH_HDMID, PORT_D); | 10759 | intel_hdmi_init(dev, PCH_HDMID, PORT_D); |
10749 | 10760 | ||
10750 | if (I915_READ(PCH_DP_C) & DP_DETECTED) | 10761 | if (I915_READ(PCH_DP_C) & DP_DETECTED) |
10751 | intel_dp_init(dev, PCH_DP_C, PORT_C); | 10762 | intel_dp_init(dev, PCH_DP_C, PORT_C); |
10752 | 10763 | ||
10753 | if (I915_READ(PCH_DP_D) & DP_DETECTED) | 10764 | if (I915_READ(PCH_DP_D) & DP_DETECTED) |
10754 | intel_dp_init(dev, PCH_DP_D, PORT_D); | 10765 | intel_dp_init(dev, PCH_DP_D, PORT_D); |
10755 | } else if (IS_VALLEYVIEW(dev)) { | 10766 | } else if (IS_VALLEYVIEW(dev)) { |
10756 | if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) { | 10767 | if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) { |
10757 | intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB, | 10768 | intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB, |
10758 | PORT_B); | 10769 | PORT_B); |
10759 | if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED) | 10770 | if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED) |
10760 | intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B); | 10771 | intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B); |
10761 | } | 10772 | } |
10762 | 10773 | ||
10763 | if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) { | 10774 | if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) { |
10764 | intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC, | 10775 | intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC, |
10765 | PORT_C); | 10776 | PORT_C); |
10766 | if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED) | 10777 | if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED) |
10767 | intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C); | 10778 | intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C); |
10768 | } | 10779 | } |
10769 | 10780 | ||
10770 | intel_dsi_init(dev); | 10781 | intel_dsi_init(dev); |
10771 | } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { | 10782 | } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { |
10772 | bool found = false; | 10783 | bool found = false; |
10773 | 10784 | ||
10774 | if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { | 10785 | if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { |
10775 | DRM_DEBUG_KMS("probing SDVOB\n"); | 10786 | DRM_DEBUG_KMS("probing SDVOB\n"); |
10776 | found = intel_sdvo_init(dev, GEN3_SDVOB, true); | 10787 | found = intel_sdvo_init(dev, GEN3_SDVOB, true); |
10777 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { | 10788 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { |
10778 | DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); | 10789 | DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); |
10779 | intel_hdmi_init(dev, GEN4_HDMIB, PORT_B); | 10790 | intel_hdmi_init(dev, GEN4_HDMIB, PORT_B); |
10780 | } | 10791 | } |
10781 | 10792 | ||
10782 | if (!found && SUPPORTS_INTEGRATED_DP(dev)) | 10793 | if (!found && SUPPORTS_INTEGRATED_DP(dev)) |
10783 | intel_dp_init(dev, DP_B, PORT_B); | 10794 | intel_dp_init(dev, DP_B, PORT_B); |
10784 | } | 10795 | } |
10785 | 10796 | ||
10786 | /* Before G4X SDVOC doesn't have its own detect register */ | 10797 | /* Before G4X SDVOC doesn't have its own detect register */ |
10787 | 10798 | ||
10788 | if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { | 10799 | if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { |
10789 | DRM_DEBUG_KMS("probing SDVOC\n"); | 10800 | DRM_DEBUG_KMS("probing SDVOC\n"); |
10790 | found = intel_sdvo_init(dev, GEN3_SDVOC, false); | 10801 | found = intel_sdvo_init(dev, GEN3_SDVOC, false); |
10791 | } | 10802 | } |
10792 | 10803 | ||
10793 | if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) { | 10804 | if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) { |
10794 | 10805 | ||
10795 | if (SUPPORTS_INTEGRATED_HDMI(dev)) { | 10806 | if (SUPPORTS_INTEGRATED_HDMI(dev)) { |
10796 | DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); | 10807 | DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); |
10797 | intel_hdmi_init(dev, GEN4_HDMIC, PORT_C); | 10808 | intel_hdmi_init(dev, GEN4_HDMIC, PORT_C); |
10798 | } | 10809 | } |
10799 | if (SUPPORTS_INTEGRATED_DP(dev)) | 10810 | if (SUPPORTS_INTEGRATED_DP(dev)) |
10800 | intel_dp_init(dev, DP_C, PORT_C); | 10811 | intel_dp_init(dev, DP_C, PORT_C); |
10801 | } | 10812 | } |
10802 | 10813 | ||
10803 | if (SUPPORTS_INTEGRATED_DP(dev) && | 10814 | if (SUPPORTS_INTEGRATED_DP(dev) && |
10804 | (I915_READ(DP_D) & DP_DETECTED)) | 10815 | (I915_READ(DP_D) & DP_DETECTED)) |
10805 | intel_dp_init(dev, DP_D, PORT_D); | 10816 | intel_dp_init(dev, DP_D, PORT_D); |
10806 | } else if (IS_GEN2(dev)) | 10817 | } else if (IS_GEN2(dev)) |
10807 | intel_dvo_init(dev); | 10818 | intel_dvo_init(dev); |
10808 | 10819 | ||
10809 | if (SUPPORTS_TV(dev)) | 10820 | if (SUPPORTS_TV(dev)) |
10810 | intel_tv_init(dev); | 10821 | intel_tv_init(dev); |
10811 | 10822 | ||
10812 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { | 10823 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { |
10813 | encoder->base.possible_crtcs = encoder->crtc_mask; | 10824 | encoder->base.possible_crtcs = encoder->crtc_mask; |
10814 | encoder->base.possible_clones = | 10825 | encoder->base.possible_clones = |
10815 | intel_encoder_clones(encoder); | 10826 | intel_encoder_clones(encoder); |
10816 | } | 10827 | } |
10817 | 10828 | ||
10818 | intel_init_pch_refclk(dev); | 10829 | intel_init_pch_refclk(dev); |
10819 | 10830 | ||
10820 | drm_helper_move_panel_connectors_to_head(dev); | 10831 | drm_helper_move_panel_connectors_to_head(dev); |
10821 | } | 10832 | } |
10822 | 10833 | ||
10823 | static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) | 10834 | static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) |
10824 | { | 10835 | { |
10825 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | 10836 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
10826 | 10837 | ||
10827 | drm_framebuffer_cleanup(fb); | 10838 | drm_framebuffer_cleanup(fb); |
10828 | WARN_ON(!intel_fb->obj->framebuffer_references--); | 10839 | WARN_ON(!intel_fb->obj->framebuffer_references--); |
10829 | drm_gem_object_unreference_unlocked(&intel_fb->obj->base); | 10840 | drm_gem_object_unreference_unlocked(&intel_fb->obj->base); |
10830 | kfree(intel_fb); | 10841 | kfree(intel_fb); |
10831 | } | 10842 | } |
10832 | 10843 | ||
10833 | static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, | 10844 | static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, |
10834 | struct drm_file *file, | 10845 | struct drm_file *file, |
10835 | unsigned int *handle) | 10846 | unsigned int *handle) |
10836 | { | 10847 | { |
10837 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | 10848 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
10838 | struct drm_i915_gem_object *obj = intel_fb->obj; | 10849 | struct drm_i915_gem_object *obj = intel_fb->obj; |
10839 | 10850 | ||
10840 | return drm_gem_handle_create(file, &obj->base, handle); | 10851 | return drm_gem_handle_create(file, &obj->base, handle); |
10841 | } | 10852 | } |
10842 | 10853 | ||
10843 | static const struct drm_framebuffer_funcs intel_fb_funcs = { | 10854 | static const struct drm_framebuffer_funcs intel_fb_funcs = { |
10844 | .destroy = intel_user_framebuffer_destroy, | 10855 | .destroy = intel_user_framebuffer_destroy, |
10845 | .create_handle = intel_user_framebuffer_create_handle, | 10856 | .create_handle = intel_user_framebuffer_create_handle, |
10846 | }; | 10857 | }; |
10847 | 10858 | ||
10848 | static int intel_framebuffer_init(struct drm_device *dev, | 10859 | static int intel_framebuffer_init(struct drm_device *dev, |
10849 | struct intel_framebuffer *intel_fb, | 10860 | struct intel_framebuffer *intel_fb, |
10850 | struct drm_mode_fb_cmd2 *mode_cmd, | 10861 | struct drm_mode_fb_cmd2 *mode_cmd, |
10851 | struct drm_i915_gem_object *obj) | 10862 | struct drm_i915_gem_object *obj) |
10852 | { | 10863 | { |
10853 | int aligned_height; | 10864 | int aligned_height; |
10854 | int pitch_limit; | 10865 | int pitch_limit; |
10855 | int ret; | 10866 | int ret; |
10856 | 10867 | ||
10857 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | 10868 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
10858 | 10869 | ||
10859 | if (obj->tiling_mode == I915_TILING_Y) { | 10870 | if (obj->tiling_mode == I915_TILING_Y) { |
10860 | DRM_DEBUG("hardware does not support tiling Y\n"); | 10871 | DRM_DEBUG("hardware does not support tiling Y\n"); |
10861 | return -EINVAL; | 10872 | return -EINVAL; |
10862 | } | 10873 | } |
10863 | 10874 | ||
10864 | if (mode_cmd->pitches[0] & 63) { | 10875 | if (mode_cmd->pitches[0] & 63) { |
10865 | DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n", | 10876 | DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n", |
10866 | mode_cmd->pitches[0]); | 10877 | mode_cmd->pitches[0]); |
10867 | return -EINVAL; | 10878 | return -EINVAL; |
10868 | } | 10879 | } |
10869 | 10880 | ||
10870 | if (INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev)) { | 10881 | if (INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev)) { |
10871 | pitch_limit = 32*1024; | 10882 | pitch_limit = 32*1024; |
10872 | } else if (INTEL_INFO(dev)->gen >= 4) { | 10883 | } else if (INTEL_INFO(dev)->gen >= 4) { |
10873 | if (obj->tiling_mode) | 10884 | if (obj->tiling_mode) |
10874 | pitch_limit = 16*1024; | 10885 | pitch_limit = 16*1024; |
10875 | else | 10886 | else |
10876 | pitch_limit = 32*1024; | 10887 | pitch_limit = 32*1024; |
10877 | } else if (INTEL_INFO(dev)->gen >= 3) { | 10888 | } else if (INTEL_INFO(dev)->gen >= 3) { |
10878 | if (obj->tiling_mode) | 10889 | if (obj->tiling_mode) |
10879 | pitch_limit = 8*1024; | 10890 | pitch_limit = 8*1024; |
10880 | else | 10891 | else |
10881 | pitch_limit = 16*1024; | 10892 | pitch_limit = 16*1024; |
10882 | } else | 10893 | } else |
10883 | /* XXX DSPC is limited to 4k tiled */ | 10894 | /* XXX DSPC is limited to 4k tiled */ |
10884 | pitch_limit = 8*1024; | 10895 | pitch_limit = 8*1024; |
10885 | 10896 | ||
10886 | if (mode_cmd->pitches[0] > pitch_limit) { | 10897 | if (mode_cmd->pitches[0] > pitch_limit) { |
10887 | DRM_DEBUG("%s pitch (%d) must be at less than %d\n", | 10898 | DRM_DEBUG("%s pitch (%d) must be at less than %d\n", |
10888 | obj->tiling_mode ? "tiled" : "linear", | 10899 | obj->tiling_mode ? "tiled" : "linear", |
10889 | mode_cmd->pitches[0], pitch_limit); | 10900 | mode_cmd->pitches[0], pitch_limit); |
10890 | return -EINVAL; | 10901 | return -EINVAL; |
10891 | } | 10902 | } |
10892 | 10903 | ||
10893 | if (obj->tiling_mode != I915_TILING_NONE && | 10904 | if (obj->tiling_mode != I915_TILING_NONE && |
10894 | mode_cmd->pitches[0] != obj->stride) { | 10905 | mode_cmd->pitches[0] != obj->stride) { |
10895 | DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n", | 10906 | DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n", |
10896 | mode_cmd->pitches[0], obj->stride); | 10907 | mode_cmd->pitches[0], obj->stride); |
10897 | return -EINVAL; | 10908 | return -EINVAL; |
10898 | } | 10909 | } |
10899 | 10910 | ||
10900 | /* Reject formats not supported by any plane early. */ | 10911 | /* Reject formats not supported by any plane early. */ |
10901 | switch (mode_cmd->pixel_format) { | 10912 | switch (mode_cmd->pixel_format) { |
10902 | case DRM_FORMAT_C8: | 10913 | case DRM_FORMAT_C8: |
10903 | case DRM_FORMAT_RGB565: | 10914 | case DRM_FORMAT_RGB565: |
10904 | case DRM_FORMAT_XRGB8888: | 10915 | case DRM_FORMAT_XRGB8888: |
10905 | case DRM_FORMAT_ARGB8888: | 10916 | case DRM_FORMAT_ARGB8888: |
10906 | break; | 10917 | break; |
10907 | case DRM_FORMAT_XRGB1555: | 10918 | case DRM_FORMAT_XRGB1555: |
10908 | case DRM_FORMAT_ARGB1555: | 10919 | case DRM_FORMAT_ARGB1555: |
10909 | if (INTEL_INFO(dev)->gen > 3) { | 10920 | if (INTEL_INFO(dev)->gen > 3) { |
10910 | DRM_DEBUG("unsupported pixel format: %s\n", | 10921 | DRM_DEBUG("unsupported pixel format: %s\n", |
10911 | drm_get_format_name(mode_cmd->pixel_format)); | 10922 | drm_get_format_name(mode_cmd->pixel_format)); |
10912 | return -EINVAL; | 10923 | return -EINVAL; |
10913 | } | 10924 | } |
10914 | break; | 10925 | break; |
10915 | case DRM_FORMAT_XBGR8888: | 10926 | case DRM_FORMAT_XBGR8888: |
10916 | case DRM_FORMAT_ABGR8888: | 10927 | case DRM_FORMAT_ABGR8888: |
10917 | case DRM_FORMAT_XRGB2101010: | 10928 | case DRM_FORMAT_XRGB2101010: |
10918 | case DRM_FORMAT_ARGB2101010: | 10929 | case DRM_FORMAT_ARGB2101010: |
10919 | case DRM_FORMAT_XBGR2101010: | 10930 | case DRM_FORMAT_XBGR2101010: |
10920 | case DRM_FORMAT_ABGR2101010: | 10931 | case DRM_FORMAT_ABGR2101010: |
10921 | if (INTEL_INFO(dev)->gen < 4) { | 10932 | if (INTEL_INFO(dev)->gen < 4) { |
10922 | DRM_DEBUG("unsupported pixel format: %s\n", | 10933 | DRM_DEBUG("unsupported pixel format: %s\n", |
10923 | drm_get_format_name(mode_cmd->pixel_format)); | 10934 | drm_get_format_name(mode_cmd->pixel_format)); |
10924 | return -EINVAL; | 10935 | return -EINVAL; |
10925 | } | 10936 | } |
10926 | break; | 10937 | break; |
10927 | case DRM_FORMAT_YUYV: | 10938 | case DRM_FORMAT_YUYV: |
10928 | case DRM_FORMAT_UYVY: | 10939 | case DRM_FORMAT_UYVY: |
10929 | case DRM_FORMAT_YVYU: | 10940 | case DRM_FORMAT_YVYU: |
10930 | case DRM_FORMAT_VYUY: | 10941 | case DRM_FORMAT_VYUY: |
10931 | if (INTEL_INFO(dev)->gen < 5) { | 10942 | if (INTEL_INFO(dev)->gen < 5) { |
10932 | DRM_DEBUG("unsupported pixel format: %s\n", | 10943 | DRM_DEBUG("unsupported pixel format: %s\n", |
10933 | drm_get_format_name(mode_cmd->pixel_format)); | 10944 | drm_get_format_name(mode_cmd->pixel_format)); |
10934 | return -EINVAL; | 10945 | return -EINVAL; |
10935 | } | 10946 | } |
10936 | break; | 10947 | break; |
10937 | default: | 10948 | default: |
10938 | DRM_DEBUG("unsupported pixel format: %s\n", | 10949 | DRM_DEBUG("unsupported pixel format: %s\n", |
10939 | drm_get_format_name(mode_cmd->pixel_format)); | 10950 | drm_get_format_name(mode_cmd->pixel_format)); |
10940 | return -EINVAL; | 10951 | return -EINVAL; |
10941 | } | 10952 | } |
10942 | 10953 | ||
10943 | /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ | 10954 | /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ |
10944 | if (mode_cmd->offsets[0] != 0) | 10955 | if (mode_cmd->offsets[0] != 0) |
10945 | return -EINVAL; | 10956 | return -EINVAL; |
10946 | 10957 | ||
10947 | aligned_height = intel_align_height(dev, mode_cmd->height, | 10958 | aligned_height = intel_align_height(dev, mode_cmd->height, |
10948 | obj->tiling_mode); | 10959 | obj->tiling_mode); |
10949 | /* FIXME drm helper for size checks (especially planar formats)? */ | 10960 | /* FIXME drm helper for size checks (especially planar formats)? */ |
10950 | if (obj->base.size < aligned_height * mode_cmd->pitches[0]) | 10961 | if (obj->base.size < aligned_height * mode_cmd->pitches[0]) |
10951 | return -EINVAL; | 10962 | return -EINVAL; |
10952 | 10963 | ||
10953 | drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); | 10964 | drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); |
10954 | intel_fb->obj = obj; | 10965 | intel_fb->obj = obj; |
10955 | intel_fb->obj->framebuffer_references++; | 10966 | intel_fb->obj->framebuffer_references++; |
10956 | 10967 | ||
10957 | ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); | 10968 | ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); |
10958 | if (ret) { | 10969 | if (ret) { |
10959 | DRM_ERROR("framebuffer init failed %d\n", ret); | 10970 | DRM_ERROR("framebuffer init failed %d\n", ret); |
10960 | return ret; | 10971 | return ret; |
10961 | } | 10972 | } |
10962 | 10973 | ||
10963 | return 0; | 10974 | return 0; |
10964 | } | 10975 | } |
10965 | 10976 | ||
10966 | static struct drm_framebuffer * | 10977 | static struct drm_framebuffer * |
10967 | intel_user_framebuffer_create(struct drm_device *dev, | 10978 | intel_user_framebuffer_create(struct drm_device *dev, |
10968 | struct drm_file *filp, | 10979 | struct drm_file *filp, |
10969 | struct drm_mode_fb_cmd2 *mode_cmd) | 10980 | struct drm_mode_fb_cmd2 *mode_cmd) |
10970 | { | 10981 | { |
10971 | struct drm_i915_gem_object *obj; | 10982 | struct drm_i915_gem_object *obj; |
10972 | 10983 | ||
10973 | obj = to_intel_bo(drm_gem_object_lookup(dev, filp, | 10984 | obj = to_intel_bo(drm_gem_object_lookup(dev, filp, |
10974 | mode_cmd->handles[0])); | 10985 | mode_cmd->handles[0])); |
10975 | if (&obj->base == NULL) | 10986 | if (&obj->base == NULL) |
10976 | return ERR_PTR(-ENOENT); | 10987 | return ERR_PTR(-ENOENT); |
10977 | 10988 | ||
10978 | return intel_framebuffer_create(dev, mode_cmd, obj); | 10989 | return intel_framebuffer_create(dev, mode_cmd, obj); |
10979 | } | 10990 | } |
10980 | 10991 | ||
10981 | #ifndef CONFIG_DRM_I915_FBDEV | 10992 | #ifndef CONFIG_DRM_I915_FBDEV |
10982 | static inline void intel_fbdev_output_poll_changed(struct drm_device *dev) | 10993 | static inline void intel_fbdev_output_poll_changed(struct drm_device *dev) |
10983 | { | 10994 | { |
10984 | } | 10995 | } |
10985 | #endif | 10996 | #endif |
10986 | 10997 | ||
10987 | static const struct drm_mode_config_funcs intel_mode_funcs = { | 10998 | static const struct drm_mode_config_funcs intel_mode_funcs = { |
10988 | .fb_create = intel_user_framebuffer_create, | 10999 | .fb_create = intel_user_framebuffer_create, |
10989 | .output_poll_changed = intel_fbdev_output_poll_changed, | 11000 | .output_poll_changed = intel_fbdev_output_poll_changed, |
10990 | }; | 11001 | }; |
10991 | 11002 | ||
10992 | /* Set up chip specific display functions */ | 11003 | /* Set up chip specific display functions */ |
10993 | static void intel_init_display(struct drm_device *dev) | 11004 | static void intel_init_display(struct drm_device *dev) |
10994 | { | 11005 | { |
10995 | struct drm_i915_private *dev_priv = dev->dev_private; | 11006 | struct drm_i915_private *dev_priv = dev->dev_private; |
10996 | 11007 | ||
10997 | if (HAS_PCH_SPLIT(dev) || IS_G4X(dev)) | 11008 | if (HAS_PCH_SPLIT(dev) || IS_G4X(dev)) |
10998 | dev_priv->display.find_dpll = g4x_find_best_dpll; | 11009 | dev_priv->display.find_dpll = g4x_find_best_dpll; |
10999 | else if (IS_VALLEYVIEW(dev)) | 11010 | else if (IS_VALLEYVIEW(dev)) |
11000 | dev_priv->display.find_dpll = vlv_find_best_dpll; | 11011 | dev_priv->display.find_dpll = vlv_find_best_dpll; |
11001 | else if (IS_PINEVIEW(dev)) | 11012 | else if (IS_PINEVIEW(dev)) |
11002 | dev_priv->display.find_dpll = pnv_find_best_dpll; | 11013 | dev_priv->display.find_dpll = pnv_find_best_dpll; |
11003 | else | 11014 | else |
11004 | dev_priv->display.find_dpll = i9xx_find_best_dpll; | 11015 | dev_priv->display.find_dpll = i9xx_find_best_dpll; |
11005 | 11016 | ||
11006 | if (HAS_DDI(dev)) { | 11017 | if (HAS_DDI(dev)) { |
11007 | dev_priv->display.get_pipe_config = haswell_get_pipe_config; | 11018 | dev_priv->display.get_pipe_config = haswell_get_pipe_config; |
11008 | dev_priv->display.get_plane_config = ironlake_get_plane_config; | 11019 | dev_priv->display.get_plane_config = ironlake_get_plane_config; |
11009 | dev_priv->display.crtc_mode_set = haswell_crtc_mode_set; | 11020 | dev_priv->display.crtc_mode_set = haswell_crtc_mode_set; |
11010 | dev_priv->display.crtc_enable = haswell_crtc_enable; | 11021 | dev_priv->display.crtc_enable = haswell_crtc_enable; |
11011 | dev_priv->display.crtc_disable = haswell_crtc_disable; | 11022 | dev_priv->display.crtc_disable = haswell_crtc_disable; |
11012 | dev_priv->display.off = haswell_crtc_off; | 11023 | dev_priv->display.off = haswell_crtc_off; |
11013 | dev_priv->display.update_primary_plane = | 11024 | dev_priv->display.update_primary_plane = |
11014 | ironlake_update_primary_plane; | 11025 | ironlake_update_primary_plane; |
11015 | } else if (HAS_PCH_SPLIT(dev)) { | 11026 | } else if (HAS_PCH_SPLIT(dev)) { |
11016 | dev_priv->display.get_pipe_config = ironlake_get_pipe_config; | 11027 | dev_priv->display.get_pipe_config = ironlake_get_pipe_config; |
11017 | dev_priv->display.get_plane_config = ironlake_get_plane_config; | 11028 | dev_priv->display.get_plane_config = ironlake_get_plane_config; |
11018 | dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; | 11029 | dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; |
11019 | dev_priv->display.crtc_enable = ironlake_crtc_enable; | 11030 | dev_priv->display.crtc_enable = ironlake_crtc_enable; |
11020 | dev_priv->display.crtc_disable = ironlake_crtc_disable; | 11031 | dev_priv->display.crtc_disable = ironlake_crtc_disable; |
11021 | dev_priv->display.off = ironlake_crtc_off; | 11032 | dev_priv->display.off = ironlake_crtc_off; |
11022 | dev_priv->display.update_primary_plane = | 11033 | dev_priv->display.update_primary_plane = |
11023 | ironlake_update_primary_plane; | 11034 | ironlake_update_primary_plane; |
11024 | } else if (IS_VALLEYVIEW(dev)) { | 11035 | } else if (IS_VALLEYVIEW(dev)) { |
11025 | dev_priv->display.get_pipe_config = i9xx_get_pipe_config; | 11036 | dev_priv->display.get_pipe_config = i9xx_get_pipe_config; |
11026 | dev_priv->display.get_plane_config = i9xx_get_plane_config; | 11037 | dev_priv->display.get_plane_config = i9xx_get_plane_config; |
11027 | dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; | 11038 | dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; |
11028 | dev_priv->display.crtc_enable = valleyview_crtc_enable; | 11039 | dev_priv->display.crtc_enable = valleyview_crtc_enable; |
11029 | dev_priv->display.crtc_disable = i9xx_crtc_disable; | 11040 | dev_priv->display.crtc_disable = i9xx_crtc_disable; |
11030 | dev_priv->display.off = i9xx_crtc_off; | 11041 | dev_priv->display.off = i9xx_crtc_off; |
11031 | dev_priv->display.update_primary_plane = | 11042 | dev_priv->display.update_primary_plane = |
11032 | i9xx_update_primary_plane; | 11043 | i9xx_update_primary_plane; |
11033 | } else { | 11044 | } else { |
11034 | dev_priv->display.get_pipe_config = i9xx_get_pipe_config; | 11045 | dev_priv->display.get_pipe_config = i9xx_get_pipe_config; |
11035 | dev_priv->display.get_plane_config = i9xx_get_plane_config; | 11046 | dev_priv->display.get_plane_config = i9xx_get_plane_config; |
11036 | dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; | 11047 | dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; |
11037 | dev_priv->display.crtc_enable = i9xx_crtc_enable; | 11048 | dev_priv->display.crtc_enable = i9xx_crtc_enable; |
11038 | dev_priv->display.crtc_disable = i9xx_crtc_disable; | 11049 | dev_priv->display.crtc_disable = i9xx_crtc_disable; |
11039 | dev_priv->display.off = i9xx_crtc_off; | 11050 | dev_priv->display.off = i9xx_crtc_off; |
11040 | dev_priv->display.update_primary_plane = | 11051 | dev_priv->display.update_primary_plane = |
11041 | i9xx_update_primary_plane; | 11052 | i9xx_update_primary_plane; |
11042 | } | 11053 | } |
11043 | 11054 | ||
11044 | /* Returns the core display clock speed */ | 11055 | /* Returns the core display clock speed */ |
11045 | if (IS_VALLEYVIEW(dev)) | 11056 | if (IS_VALLEYVIEW(dev)) |
11046 | dev_priv->display.get_display_clock_speed = | 11057 | dev_priv->display.get_display_clock_speed = |
11047 | valleyview_get_display_clock_speed; | 11058 | valleyview_get_display_clock_speed; |
11048 | else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev))) | 11059 | else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev))) |
11049 | dev_priv->display.get_display_clock_speed = | 11060 | dev_priv->display.get_display_clock_speed = |
11050 | i945_get_display_clock_speed; | 11061 | i945_get_display_clock_speed; |
11051 | else if (IS_I915G(dev)) | 11062 | else if (IS_I915G(dev)) |
11052 | dev_priv->display.get_display_clock_speed = | 11063 | dev_priv->display.get_display_clock_speed = |
11053 | i915_get_display_clock_speed; | 11064 | i915_get_display_clock_speed; |
11054 | else if (IS_I945GM(dev) || IS_845G(dev)) | 11065 | else if (IS_I945GM(dev) || IS_845G(dev)) |
11055 | dev_priv->display.get_display_clock_speed = | 11066 | dev_priv->display.get_display_clock_speed = |
11056 | i9xx_misc_get_display_clock_speed; | 11067 | i9xx_misc_get_display_clock_speed; |
11057 | else if (IS_PINEVIEW(dev)) | 11068 | else if (IS_PINEVIEW(dev)) |
11058 | dev_priv->display.get_display_clock_speed = | 11069 | dev_priv->display.get_display_clock_speed = |
11059 | pnv_get_display_clock_speed; | 11070 | pnv_get_display_clock_speed; |
11060 | else if (IS_I915GM(dev)) | 11071 | else if (IS_I915GM(dev)) |
11061 | dev_priv->display.get_display_clock_speed = | 11072 | dev_priv->display.get_display_clock_speed = |
11062 | i915gm_get_display_clock_speed; | 11073 | i915gm_get_display_clock_speed; |
11063 | else if (IS_I865G(dev)) | 11074 | else if (IS_I865G(dev)) |
11064 | dev_priv->display.get_display_clock_speed = | 11075 | dev_priv->display.get_display_clock_speed = |
11065 | i865_get_display_clock_speed; | 11076 | i865_get_display_clock_speed; |
11066 | else if (IS_I85X(dev)) | 11077 | else if (IS_I85X(dev)) |
11067 | dev_priv->display.get_display_clock_speed = | 11078 | dev_priv->display.get_display_clock_speed = |
11068 | i855_get_display_clock_speed; | 11079 | i855_get_display_clock_speed; |
11069 | else /* 852, 830 */ | 11080 | else /* 852, 830 */ |
11070 | dev_priv->display.get_display_clock_speed = | 11081 | dev_priv->display.get_display_clock_speed = |
11071 | i830_get_display_clock_speed; | 11082 | i830_get_display_clock_speed; |
11072 | 11083 | ||
11073 | if (HAS_PCH_SPLIT(dev)) { | 11084 | if (HAS_PCH_SPLIT(dev)) { |
11074 | if (IS_GEN5(dev)) { | 11085 | if (IS_GEN5(dev)) { |
11075 | dev_priv->display.fdi_link_train = ironlake_fdi_link_train; | 11086 | dev_priv->display.fdi_link_train = ironlake_fdi_link_train; |
11076 | dev_priv->display.write_eld = ironlake_write_eld; | 11087 | dev_priv->display.write_eld = ironlake_write_eld; |
11077 | } else if (IS_GEN6(dev)) { | 11088 | } else if (IS_GEN6(dev)) { |
11078 | dev_priv->display.fdi_link_train = gen6_fdi_link_train; | 11089 | dev_priv->display.fdi_link_train = gen6_fdi_link_train; |
11079 | dev_priv->display.write_eld = ironlake_write_eld; | 11090 | dev_priv->display.write_eld = ironlake_write_eld; |
11080 | } else if (IS_IVYBRIDGE(dev)) { | 11091 | } else if (IS_IVYBRIDGE(dev)) { |
11081 | /* FIXME: detect B0+ stepping and use auto training */ | 11092 | /* FIXME: detect B0+ stepping and use auto training */ |
11082 | dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; | 11093 | dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; |
11083 | dev_priv->display.write_eld = ironlake_write_eld; | 11094 | dev_priv->display.write_eld = ironlake_write_eld; |
11084 | dev_priv->display.modeset_global_resources = | 11095 | dev_priv->display.modeset_global_resources = |
11085 | ivb_modeset_global_resources; | 11096 | ivb_modeset_global_resources; |
11086 | } else if (IS_HASWELL(dev) || IS_GEN8(dev)) { | 11097 | } else if (IS_HASWELL(dev) || IS_GEN8(dev)) { |
11087 | dev_priv->display.fdi_link_train = hsw_fdi_link_train; | 11098 | dev_priv->display.fdi_link_train = hsw_fdi_link_train; |
11088 | dev_priv->display.write_eld = haswell_write_eld; | 11099 | dev_priv->display.write_eld = haswell_write_eld; |
11089 | dev_priv->display.modeset_global_resources = | 11100 | dev_priv->display.modeset_global_resources = |
11090 | haswell_modeset_global_resources; | 11101 | haswell_modeset_global_resources; |
11091 | } | 11102 | } |
11092 | } else if (IS_G4X(dev)) { | 11103 | } else if (IS_G4X(dev)) { |
11093 | dev_priv->display.write_eld = g4x_write_eld; | 11104 | dev_priv->display.write_eld = g4x_write_eld; |
11094 | } else if (IS_VALLEYVIEW(dev)) { | 11105 | } else if (IS_VALLEYVIEW(dev)) { |
11095 | dev_priv->display.modeset_global_resources = | 11106 | dev_priv->display.modeset_global_resources = |
11096 | valleyview_modeset_global_resources; | 11107 | valleyview_modeset_global_resources; |
11097 | dev_priv->display.write_eld = ironlake_write_eld; | 11108 | dev_priv->display.write_eld = ironlake_write_eld; |
11098 | } | 11109 | } |
11099 | 11110 | ||
11100 | /* Default just returns -ENODEV to indicate unsupported */ | 11111 | /* Default just returns -ENODEV to indicate unsupported */ |
11101 | dev_priv->display.queue_flip = intel_default_queue_flip; | 11112 | dev_priv->display.queue_flip = intel_default_queue_flip; |
11102 | 11113 | ||
11103 | switch (INTEL_INFO(dev)->gen) { | 11114 | switch (INTEL_INFO(dev)->gen) { |
11104 | case 2: | 11115 | case 2: |
11105 | dev_priv->display.queue_flip = intel_gen2_queue_flip; | 11116 | dev_priv->display.queue_flip = intel_gen2_queue_flip; |
11106 | break; | 11117 | break; |
11107 | 11118 | ||
11108 | case 3: | 11119 | case 3: |
11109 | dev_priv->display.queue_flip = intel_gen3_queue_flip; | 11120 | dev_priv->display.queue_flip = intel_gen3_queue_flip; |
11110 | break; | 11121 | break; |
11111 | 11122 | ||
11112 | case 4: | 11123 | case 4: |
11113 | case 5: | 11124 | case 5: |
11114 | dev_priv->display.queue_flip = intel_gen4_queue_flip; | 11125 | dev_priv->display.queue_flip = intel_gen4_queue_flip; |
11115 | break; | 11126 | break; |
11116 | 11127 | ||
11117 | case 6: | 11128 | case 6: |
11118 | dev_priv->display.queue_flip = intel_gen6_queue_flip; | 11129 | dev_priv->display.queue_flip = intel_gen6_queue_flip; |
11119 | break; | 11130 | break; |
11120 | case 7: | 11131 | case 7: |
11121 | case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */ | 11132 | case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */ |
11122 | dev_priv->display.queue_flip = intel_gen7_queue_flip; | 11133 | dev_priv->display.queue_flip = intel_gen7_queue_flip; |
11123 | break; | 11134 | break; |
11124 | } | 11135 | } |
11125 | 11136 | ||
11126 | intel_panel_init_backlight_funcs(dev); | 11137 | intel_panel_init_backlight_funcs(dev); |
11127 | } | 11138 | } |
11128 | 11139 | ||
11129 | /* | 11140 | /* |
11130 | * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend, | 11141 | * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend, |
11131 | * resume, or other times. This quirk makes sure that's the case for | 11142 | * resume, or other times. This quirk makes sure that's the case for |
11132 | * affected systems. | 11143 | * affected systems. |
11133 | */ | 11144 | */ |
11134 | static void quirk_pipea_force(struct drm_device *dev) | 11145 | static void quirk_pipea_force(struct drm_device *dev) |
11135 | { | 11146 | { |
11136 | struct drm_i915_private *dev_priv = dev->dev_private; | 11147 | struct drm_i915_private *dev_priv = dev->dev_private; |
11137 | 11148 | ||
11138 | dev_priv->quirks |= QUIRK_PIPEA_FORCE; | 11149 | dev_priv->quirks |= QUIRK_PIPEA_FORCE; |
11139 | DRM_INFO("applying pipe a force quirk\n"); | 11150 | DRM_INFO("applying pipe a force quirk\n"); |
11140 | } | 11151 | } |
11141 | 11152 | ||
11142 | /* | 11153 | /* |
11143 | * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason | 11154 | * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason |
11144 | */ | 11155 | */ |
11145 | static void quirk_ssc_force_disable(struct drm_device *dev) | 11156 | static void quirk_ssc_force_disable(struct drm_device *dev) |
11146 | { | 11157 | { |
11147 | struct drm_i915_private *dev_priv = dev->dev_private; | 11158 | struct drm_i915_private *dev_priv = dev->dev_private; |
11148 | dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; | 11159 | dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; |
11149 | DRM_INFO("applying lvds SSC disable quirk\n"); | 11160 | DRM_INFO("applying lvds SSC disable quirk\n"); |
11150 | } | 11161 | } |
11151 | 11162 | ||
11152 | /* | 11163 | /* |
11153 | * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight | 11164 | * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight |
11154 | * brightness value | 11165 | * brightness value |
11155 | */ | 11166 | */ |
11156 | static void quirk_invert_brightness(struct drm_device *dev) | 11167 | static void quirk_invert_brightness(struct drm_device *dev) |
11157 | { | 11168 | { |
11158 | struct drm_i915_private *dev_priv = dev->dev_private; | 11169 | struct drm_i915_private *dev_priv = dev->dev_private; |
11159 | dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS; | 11170 | dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS; |
11160 | DRM_INFO("applying inverted panel brightness quirk\n"); | 11171 | DRM_INFO("applying inverted panel brightness quirk\n"); |
11161 | } | 11172 | } |
11162 | 11173 | ||
11163 | struct intel_quirk { | 11174 | struct intel_quirk { |
11164 | int device; | 11175 | int device; |
11165 | int subsystem_vendor; | 11176 | int subsystem_vendor; |
11166 | int subsystem_device; | 11177 | int subsystem_device; |
11167 | void (*hook)(struct drm_device *dev); | 11178 | void (*hook)(struct drm_device *dev); |
11168 | }; | 11179 | }; |
11169 | 11180 | ||
11170 | /* For systems that don't have a meaningful PCI subdevice/subvendor ID */ | 11181 | /* For systems that don't have a meaningful PCI subdevice/subvendor ID */ |
11171 | struct intel_dmi_quirk { | 11182 | struct intel_dmi_quirk { |
11172 | void (*hook)(struct drm_device *dev); | 11183 | void (*hook)(struct drm_device *dev); |
11173 | const struct dmi_system_id (*dmi_id_list)[]; | 11184 | const struct dmi_system_id (*dmi_id_list)[]; |
11174 | }; | 11185 | }; |
11175 | 11186 | ||
11176 | static int intel_dmi_reverse_brightness(const struct dmi_system_id *id) | 11187 | static int intel_dmi_reverse_brightness(const struct dmi_system_id *id) |
11177 | { | 11188 | { |
11178 | DRM_INFO("Backlight polarity reversed on %s\n", id->ident); | 11189 | DRM_INFO("Backlight polarity reversed on %s\n", id->ident); |
11179 | return 1; | 11190 | return 1; |
11180 | } | 11191 | } |
11181 | 11192 | ||
11182 | static const struct intel_dmi_quirk intel_dmi_quirks[] = { | 11193 | static const struct intel_dmi_quirk intel_dmi_quirks[] = { |
11183 | { | 11194 | { |
11184 | .dmi_id_list = &(const struct dmi_system_id[]) { | 11195 | .dmi_id_list = &(const struct dmi_system_id[]) { |
11185 | { | 11196 | { |
11186 | .callback = intel_dmi_reverse_brightness, | 11197 | .callback = intel_dmi_reverse_brightness, |
11187 | .ident = "NCR Corporation", | 11198 | .ident = "NCR Corporation", |
11188 | .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"), | 11199 | .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"), |
11189 | DMI_MATCH(DMI_PRODUCT_NAME, ""), | 11200 | DMI_MATCH(DMI_PRODUCT_NAME, ""), |
11190 | }, | 11201 | }, |
11191 | }, | 11202 | }, |
11192 | { } /* terminating entry */ | 11203 | { } /* terminating entry */ |
11193 | }, | 11204 | }, |
11194 | .hook = quirk_invert_brightness, | 11205 | .hook = quirk_invert_brightness, |
11195 | }, | 11206 | }, |
11196 | }; | 11207 | }; |
11197 | 11208 | ||
11198 | static struct intel_quirk intel_quirks[] = { | 11209 | static struct intel_quirk intel_quirks[] = { |
11199 | /* HP Mini needs pipe A force quirk (LP: #322104) */ | 11210 | /* HP Mini needs pipe A force quirk (LP: #322104) */ |
11200 | { 0x27ae, 0x103c, 0x361a, quirk_pipea_force }, | 11211 | { 0x27ae, 0x103c, 0x361a, quirk_pipea_force }, |
11201 | 11212 | ||
11202 | /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ | 11213 | /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ |
11203 | { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, | 11214 | { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, |
11204 | 11215 | ||
11205 | /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ | 11216 | /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ |
11206 | { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, | 11217 | { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, |
11207 | 11218 | ||
11208 | /* 830 needs to leave pipe A & dpll A up */ | 11219 | /* 830 needs to leave pipe A & dpll A up */ |
11209 | { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, | 11220 | { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, |
11210 | 11221 | ||
11211 | /* Lenovo U160 cannot use SSC on LVDS */ | 11222 | /* Lenovo U160 cannot use SSC on LVDS */ |
11212 | { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, | 11223 | { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, |
11213 | 11224 | ||
11214 | /* Sony Vaio Y cannot use SSC on LVDS */ | 11225 | /* Sony Vaio Y cannot use SSC on LVDS */ |
11215 | { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, | 11226 | { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, |
11216 | 11227 | ||
11217 | /* Acer Aspire 5734Z must invert backlight brightness */ | 11228 | /* Acer Aspire 5734Z must invert backlight brightness */ |
11218 | { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness }, | 11229 | { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness }, |
11219 | 11230 | ||
11220 | /* Acer/eMachines G725 */ | 11231 | /* Acer/eMachines G725 */ |
11221 | { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness }, | 11232 | { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness }, |
11222 | 11233 | ||
11223 | /* Acer/eMachines e725 */ | 11234 | /* Acer/eMachines e725 */ |
11224 | { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness }, | 11235 | { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness }, |
11225 | 11236 | ||
11226 | /* Acer/Packard Bell NCL20 */ | 11237 | /* Acer/Packard Bell NCL20 */ |
11227 | { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness }, | 11238 | { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness }, |
11228 | 11239 | ||
11229 | /* Acer Aspire 4736Z */ | 11240 | /* Acer Aspire 4736Z */ |
11230 | { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, | 11241 | { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, |
11231 | 11242 | ||
11232 | /* Acer Aspire 5336 */ | 11243 | /* Acer Aspire 5336 */ |
11233 | { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness }, | 11244 | { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness }, |
11234 | }; | 11245 | }; |
11235 | 11246 | ||
11236 | static void intel_init_quirks(struct drm_device *dev) | 11247 | static void intel_init_quirks(struct drm_device *dev) |
11237 | { | 11248 | { |
11238 | struct pci_dev *d = dev->pdev; | 11249 | struct pci_dev *d = dev->pdev; |
11239 | int i; | 11250 | int i; |
11240 | 11251 | ||
11241 | for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) { | 11252 | for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) { |
11242 | struct intel_quirk *q = &intel_quirks[i]; | 11253 | struct intel_quirk *q = &intel_quirks[i]; |
11243 | 11254 | ||
11244 | if (d->device == q->device && | 11255 | if (d->device == q->device && |
11245 | (d->subsystem_vendor == q->subsystem_vendor || | 11256 | (d->subsystem_vendor == q->subsystem_vendor || |
11246 | q->subsystem_vendor == PCI_ANY_ID) && | 11257 | q->subsystem_vendor == PCI_ANY_ID) && |
11247 | (d->subsystem_device == q->subsystem_device || | 11258 | (d->subsystem_device == q->subsystem_device || |
11248 | q->subsystem_device == PCI_ANY_ID)) | 11259 | q->subsystem_device == PCI_ANY_ID)) |
11249 | q->hook(dev); | 11260 | q->hook(dev); |
11250 | } | 11261 | } |
11251 | for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) { | 11262 | for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) { |
11252 | if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0) | 11263 | if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0) |
11253 | intel_dmi_quirks[i].hook(dev); | 11264 | intel_dmi_quirks[i].hook(dev); |
11254 | } | 11265 | } |
11255 | } | 11266 | } |
11256 | 11267 | ||
11257 | /* Disable the VGA plane that we never use */ | 11268 | /* Disable the VGA plane that we never use */ |
11258 | static void i915_disable_vga(struct drm_device *dev) | 11269 | static void i915_disable_vga(struct drm_device *dev) |
11259 | { | 11270 | { |
11260 | struct drm_i915_private *dev_priv = dev->dev_private; | 11271 | struct drm_i915_private *dev_priv = dev->dev_private; |
11261 | u8 sr1; | 11272 | u8 sr1; |
11262 | u32 vga_reg = i915_vgacntrl_reg(dev); | 11273 | u32 vga_reg = i915_vgacntrl_reg(dev); |
11263 | 11274 | ||
11264 | /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */ | 11275 | /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */ |
11265 | vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); | 11276 | vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); |
11266 | outb(SR01, VGA_SR_INDEX); | 11277 | outb(SR01, VGA_SR_INDEX); |
11267 | sr1 = inb(VGA_SR_DATA); | 11278 | sr1 = inb(VGA_SR_DATA); |
11268 | outb(sr1 | 1<<5, VGA_SR_DATA); | 11279 | outb(sr1 | 1<<5, VGA_SR_DATA); |
11269 | vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); | 11280 | vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); |
11270 | udelay(300); | 11281 | udelay(300); |
11271 | 11282 | ||
11272 | I915_WRITE(vga_reg, VGA_DISP_DISABLE); | 11283 | I915_WRITE(vga_reg, VGA_DISP_DISABLE); |
11273 | POSTING_READ(vga_reg); | 11284 | POSTING_READ(vga_reg); |
11274 | } | 11285 | } |
11275 | 11286 | ||
11276 | void intel_modeset_init_hw(struct drm_device *dev) | 11287 | void intel_modeset_init_hw(struct drm_device *dev) |
11277 | { | 11288 | { |
11278 | intel_prepare_ddi(dev); | 11289 | intel_prepare_ddi(dev); |
11279 | 11290 | ||
11280 | intel_init_clock_gating(dev); | 11291 | intel_init_clock_gating(dev); |
11281 | 11292 | ||
11282 | intel_reset_dpio(dev); | 11293 | intel_reset_dpio(dev); |
11283 | 11294 | ||
11284 | mutex_lock(&dev->struct_mutex); | 11295 | mutex_lock(&dev->struct_mutex); |
11285 | intel_enable_gt_powersave(dev); | 11296 | intel_enable_gt_powersave(dev); |
11286 | mutex_unlock(&dev->struct_mutex); | 11297 | mutex_unlock(&dev->struct_mutex); |
11287 | } | 11298 | } |
11288 | 11299 | ||
11289 | void intel_modeset_suspend_hw(struct drm_device *dev) | 11300 | void intel_modeset_suspend_hw(struct drm_device *dev) |
11290 | { | 11301 | { |
11291 | intel_suspend_hw(dev); | 11302 | intel_suspend_hw(dev); |
11292 | } | 11303 | } |
11293 | 11304 | ||
11294 | void intel_modeset_init(struct drm_device *dev) | 11305 | void intel_modeset_init(struct drm_device *dev) |
11295 | { | 11306 | { |
11296 | struct drm_i915_private *dev_priv = dev->dev_private; | 11307 | struct drm_i915_private *dev_priv = dev->dev_private; |
11297 | int sprite, ret; | 11308 | int sprite, ret; |
11298 | enum pipe pipe; | 11309 | enum pipe pipe; |
11299 | struct intel_crtc *crtc; | 11310 | struct intel_crtc *crtc; |
11300 | 11311 | ||
11301 | drm_mode_config_init(dev); | 11312 | drm_mode_config_init(dev); |
11302 | 11313 | ||
11303 | dev->mode_config.min_width = 0; | 11314 | dev->mode_config.min_width = 0; |
11304 | dev->mode_config.min_height = 0; | 11315 | dev->mode_config.min_height = 0; |
11305 | 11316 | ||
11306 | dev->mode_config.preferred_depth = 24; | 11317 | dev->mode_config.preferred_depth = 24; |
11307 | dev->mode_config.prefer_shadow = 1; | 11318 | dev->mode_config.prefer_shadow = 1; |
11308 | 11319 | ||
11309 | dev->mode_config.funcs = &intel_mode_funcs; | 11320 | dev->mode_config.funcs = &intel_mode_funcs; |
11310 | 11321 | ||
11311 | intel_init_quirks(dev); | 11322 | intel_init_quirks(dev); |
11312 | 11323 | ||
11313 | intel_init_pm(dev); | 11324 | intel_init_pm(dev); |
11314 | 11325 | ||
11315 | if (INTEL_INFO(dev)->num_pipes == 0) | 11326 | if (INTEL_INFO(dev)->num_pipes == 0) |
11316 | return; | 11327 | return; |
11317 | 11328 | ||
11318 | intel_init_display(dev); | 11329 | intel_init_display(dev); |
11319 | 11330 | ||
11320 | if (IS_GEN2(dev)) { | 11331 | if (IS_GEN2(dev)) { |
11321 | dev->mode_config.max_width = 2048; | 11332 | dev->mode_config.max_width = 2048; |
11322 | dev->mode_config.max_height = 2048; | 11333 | dev->mode_config.max_height = 2048; |
11323 | } else if (IS_GEN3(dev)) { | 11334 | } else if (IS_GEN3(dev)) { |
11324 | dev->mode_config.max_width = 4096; | 11335 | dev->mode_config.max_width = 4096; |
11325 | dev->mode_config.max_height = 4096; | 11336 | dev->mode_config.max_height = 4096; |
11326 | } else { | 11337 | } else { |
11327 | dev->mode_config.max_width = 8192; | 11338 | dev->mode_config.max_width = 8192; |
11328 | dev->mode_config.max_height = 8192; | 11339 | dev->mode_config.max_height = 8192; |
11329 | } | 11340 | } |
11330 | dev->mode_config.fb_base = dev_priv->gtt.mappable_base; | 11341 | dev->mode_config.fb_base = dev_priv->gtt.mappable_base; |
11331 | 11342 | ||
11332 | DRM_DEBUG_KMS("%d display pipe%s available.\n", | 11343 | DRM_DEBUG_KMS("%d display pipe%s available.\n", |
11333 | INTEL_INFO(dev)->num_pipes, | 11344 | INTEL_INFO(dev)->num_pipes, |
11334 | INTEL_INFO(dev)->num_pipes > 1 ? "s" : ""); | 11345 | INTEL_INFO(dev)->num_pipes > 1 ? "s" : ""); |
11335 | 11346 | ||
11336 | for_each_pipe(pipe) { | 11347 | for_each_pipe(pipe) { |
11337 | intel_crtc_init(dev, pipe); | 11348 | intel_crtc_init(dev, pipe); |
11338 | for_each_sprite(pipe, sprite) { | 11349 | for_each_sprite(pipe, sprite) { |
11339 | ret = intel_plane_init(dev, pipe, sprite); | 11350 | ret = intel_plane_init(dev, pipe, sprite); |
11340 | if (ret) | 11351 | if (ret) |
11341 | DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n", | 11352 | DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n", |
11342 | pipe_name(pipe), sprite_name(pipe, sprite), ret); | 11353 | pipe_name(pipe), sprite_name(pipe, sprite), ret); |
11343 | } | 11354 | } |
11344 | } | 11355 | } |
11345 | 11356 | ||
11346 | intel_init_dpio(dev); | 11357 | intel_init_dpio(dev); |
11347 | intel_reset_dpio(dev); | 11358 | intel_reset_dpio(dev); |
11348 | 11359 | ||
11349 | intel_cpu_pll_init(dev); | 11360 | intel_cpu_pll_init(dev); |
11350 | intel_shared_dpll_init(dev); | 11361 | intel_shared_dpll_init(dev); |
11351 | 11362 | ||
11352 | /* Just disable it once at startup */ | 11363 | /* Just disable it once at startup */ |
11353 | i915_disable_vga(dev); | 11364 | i915_disable_vga(dev); |
11354 | intel_setup_outputs(dev); | 11365 | intel_setup_outputs(dev); |
11355 | 11366 | ||
11356 | /* Just in case the BIOS is doing something questionable. */ | 11367 | /* Just in case the BIOS is doing something questionable. */ |
11357 | intel_disable_fbc(dev); | 11368 | intel_disable_fbc(dev); |
11358 | 11369 | ||
11359 | mutex_lock(&dev->mode_config.mutex); | 11370 | mutex_lock(&dev->mode_config.mutex); |
11360 | intel_modeset_setup_hw_state(dev, false); | 11371 | intel_modeset_setup_hw_state(dev, false); |
11361 | mutex_unlock(&dev->mode_config.mutex); | 11372 | mutex_unlock(&dev->mode_config.mutex); |
11362 | 11373 | ||
11363 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, | 11374 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, |
11364 | base.head) { | 11375 | base.head) { |
11365 | if (!crtc->active) | 11376 | if (!crtc->active) |
11366 | continue; | 11377 | continue; |
11367 | 11378 | ||
11368 | /* | 11379 | /* |
11369 | * Note that reserving the BIOS fb up front prevents us | 11380 | * Note that reserving the BIOS fb up front prevents us |
11370 | * from stuffing other stolen allocations like the ring | 11381 | * from stuffing other stolen allocations like the ring |
11371 | * on top. This prevents some ugliness at boot time, and | 11382 | * on top. This prevents some ugliness at boot time, and |
11372 | * can even allow for smooth boot transitions if the BIOS | 11383 | * can even allow for smooth boot transitions if the BIOS |
11373 | * fb is large enough for the active pipe configuration. | 11384 | * fb is large enough for the active pipe configuration. |
11374 | */ | 11385 | */ |
11375 | if (dev_priv->display.get_plane_config) { | 11386 | if (dev_priv->display.get_plane_config) { |
11376 | dev_priv->display.get_plane_config(crtc, | 11387 | dev_priv->display.get_plane_config(crtc, |
11377 | &crtc->plane_config); | 11388 | &crtc->plane_config); |
11378 | /* | 11389 | /* |
11379 | * If the fb is shared between multiple heads, we'll | 11390 | * If the fb is shared between multiple heads, we'll |
11380 | * just get the first one. | 11391 | * just get the first one. |
11381 | */ | 11392 | */ |
11382 | intel_find_plane_obj(crtc, &crtc->plane_config); | 11393 | intel_find_plane_obj(crtc, &crtc->plane_config); |
11383 | } | 11394 | } |
11384 | } | 11395 | } |
11385 | } | 11396 | } |
11386 | 11397 | ||
11387 | static void | 11398 | static void |
11388 | intel_connector_break_all_links(struct intel_connector *connector) | 11399 | intel_connector_break_all_links(struct intel_connector *connector) |
11389 | { | 11400 | { |
11390 | connector->base.dpms = DRM_MODE_DPMS_OFF; | 11401 | connector->base.dpms = DRM_MODE_DPMS_OFF; |
11391 | connector->base.encoder = NULL; | 11402 | connector->base.encoder = NULL; |
11392 | connector->encoder->connectors_active = false; | 11403 | connector->encoder->connectors_active = false; |
11393 | connector->encoder->base.crtc = NULL; | 11404 | connector->encoder->base.crtc = NULL; |
11394 | } | 11405 | } |
11395 | 11406 | ||
11396 | static void intel_enable_pipe_a(struct drm_device *dev) | 11407 | static void intel_enable_pipe_a(struct drm_device *dev) |
11397 | { | 11408 | { |
11398 | struct intel_connector *connector; | 11409 | struct intel_connector *connector; |
11399 | struct drm_connector *crt = NULL; | 11410 | struct drm_connector *crt = NULL; |
11400 | struct intel_load_detect_pipe load_detect_temp; | 11411 | struct intel_load_detect_pipe load_detect_temp; |
11401 | 11412 | ||
11402 | /* We can't just switch on the pipe A, we need to set things up with a | 11413 | /* We can't just switch on the pipe A, we need to set things up with a |
11403 | * proper mode and output configuration. As a gross hack, enable pipe A | 11414 | * proper mode and output configuration. As a gross hack, enable pipe A |
11404 | * by enabling the load detect pipe once. */ | 11415 | * by enabling the load detect pipe once. */ |
11405 | list_for_each_entry(connector, | 11416 | list_for_each_entry(connector, |
11406 | &dev->mode_config.connector_list, | 11417 | &dev->mode_config.connector_list, |
11407 | base.head) { | 11418 | base.head) { |
11408 | if (connector->encoder->type == INTEL_OUTPUT_ANALOG) { | 11419 | if (connector->encoder->type == INTEL_OUTPUT_ANALOG) { |
11409 | crt = &connector->base; | 11420 | crt = &connector->base; |
11410 | break; | 11421 | break; |
11411 | } | 11422 | } |
11412 | } | 11423 | } |
11413 | 11424 | ||
11414 | if (!crt) | 11425 | if (!crt) |
11415 | return; | 11426 | return; |
11416 | 11427 | ||
11417 | if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp)) | 11428 | if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp)) |
11418 | intel_release_load_detect_pipe(crt, &load_detect_temp); | 11429 | intel_release_load_detect_pipe(crt, &load_detect_temp); |
11419 | 11430 | ||
11420 | 11431 | ||
11421 | } | 11432 | } |
11422 | 11433 | ||
11423 | static bool | 11434 | static bool |
11424 | intel_check_plane_mapping(struct intel_crtc *crtc) | 11435 | intel_check_plane_mapping(struct intel_crtc *crtc) |
11425 | { | 11436 | { |
11426 | struct drm_device *dev = crtc->base.dev; | 11437 | struct drm_device *dev = crtc->base.dev; |
11427 | struct drm_i915_private *dev_priv = dev->dev_private; | 11438 | struct drm_i915_private *dev_priv = dev->dev_private; |
11428 | u32 reg, val; | 11439 | u32 reg, val; |
11429 | 11440 | ||
11430 | if (INTEL_INFO(dev)->num_pipes == 1) | 11441 | if (INTEL_INFO(dev)->num_pipes == 1) |
11431 | return true; | 11442 | return true; |
11432 | 11443 | ||
11433 | reg = DSPCNTR(!crtc->plane); | 11444 | reg = DSPCNTR(!crtc->plane); |
11434 | val = I915_READ(reg); | 11445 | val = I915_READ(reg); |
11435 | 11446 | ||
11436 | if ((val & DISPLAY_PLANE_ENABLE) && | 11447 | if ((val & DISPLAY_PLANE_ENABLE) && |
11437 | (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe)) | 11448 | (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe)) |
11438 | return false; | 11449 | return false; |
11439 | 11450 | ||
11440 | return true; | 11451 | return true; |
11441 | } | 11452 | } |
11442 | 11453 | ||
11443 | static void intel_sanitize_crtc(struct intel_crtc *crtc) | 11454 | static void intel_sanitize_crtc(struct intel_crtc *crtc) |
11444 | { | 11455 | { |
11445 | struct drm_device *dev = crtc->base.dev; | 11456 | struct drm_device *dev = crtc->base.dev; |
11446 | struct drm_i915_private *dev_priv = dev->dev_private; | 11457 | struct drm_i915_private *dev_priv = dev->dev_private; |
11447 | u32 reg; | 11458 | u32 reg; |
11448 | 11459 | ||
11449 | /* Clear any frame start delays used for debugging left by the BIOS */ | 11460 | /* Clear any frame start delays used for debugging left by the BIOS */ |
11450 | reg = PIPECONF(crtc->config.cpu_transcoder); | 11461 | reg = PIPECONF(crtc->config.cpu_transcoder); |
11451 | I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); | 11462 | I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); |
11452 | 11463 | ||
11453 | /* We need to sanitize the plane -> pipe mapping first because this will | 11464 | /* We need to sanitize the plane -> pipe mapping first because this will |
11454 | * disable the crtc (and hence change the state) if it is wrong. Note | 11465 | * disable the crtc (and hence change the state) if it is wrong. Note |
11455 | * that gen4+ has a fixed plane -> pipe mapping. */ | 11466 | * that gen4+ has a fixed plane -> pipe mapping. */ |
11456 | if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) { | 11467 | if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) { |
11457 | struct intel_connector *connector; | 11468 | struct intel_connector *connector; |
11458 | bool plane; | 11469 | bool plane; |
11459 | 11470 | ||
11460 | DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n", | 11471 | DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n", |
11461 | crtc->base.base.id); | 11472 | crtc->base.base.id); |
11462 | 11473 | ||
11463 | /* Pipe has the wrong plane attached and the plane is active. | 11474 | /* Pipe has the wrong plane attached and the plane is active. |
11464 | * Temporarily change the plane mapping and disable everything | 11475 | * Temporarily change the plane mapping and disable everything |
11465 | * ... */ | 11476 | * ... */ |
11466 | plane = crtc->plane; | 11477 | plane = crtc->plane; |
11467 | crtc->plane = !plane; | 11478 | crtc->plane = !plane; |
11468 | dev_priv->display.crtc_disable(&crtc->base); | 11479 | dev_priv->display.crtc_disable(&crtc->base); |
11469 | crtc->plane = plane; | 11480 | crtc->plane = plane; |
11470 | 11481 | ||
11471 | /* ... and break all links. */ | 11482 | /* ... and break all links. */ |
11472 | list_for_each_entry(connector, &dev->mode_config.connector_list, | 11483 | list_for_each_entry(connector, &dev->mode_config.connector_list, |
11473 | base.head) { | 11484 | base.head) { |
11474 | if (connector->encoder->base.crtc != &crtc->base) | 11485 | if (connector->encoder->base.crtc != &crtc->base) |
11475 | continue; | 11486 | continue; |
11476 | 11487 | ||
11477 | intel_connector_break_all_links(connector); | 11488 | intel_connector_break_all_links(connector); |
11478 | } | 11489 | } |
11479 | 11490 | ||
11480 | WARN_ON(crtc->active); | 11491 | WARN_ON(crtc->active); |
11481 | crtc->base.enabled = false; | 11492 | crtc->base.enabled = false; |
11482 | } | 11493 | } |
11483 | 11494 | ||
11484 | if (dev_priv->quirks & QUIRK_PIPEA_FORCE && | 11495 | if (dev_priv->quirks & QUIRK_PIPEA_FORCE && |
11485 | crtc->pipe == PIPE_A && !crtc->active) { | 11496 | crtc->pipe == PIPE_A && !crtc->active) { |
11486 | /* BIOS forgot to enable pipe A, this mostly happens after | 11497 | /* BIOS forgot to enable pipe A, this mostly happens after |
11487 | * resume. Force-enable the pipe to fix this, the update_dpms | 11498 | * resume. Force-enable the pipe to fix this, the update_dpms |
11488 | * call below we restore the pipe to the right state, but leave | 11499 | * call below we restore the pipe to the right state, but leave |
11489 | * the required bits on. */ | 11500 | * the required bits on. */ |
11490 | intel_enable_pipe_a(dev); | 11501 | intel_enable_pipe_a(dev); |
11491 | } | 11502 | } |
11492 | 11503 | ||
11493 | /* Adjust the state of the output pipe according to whether we | 11504 | /* Adjust the state of the output pipe according to whether we |
11494 | * have active connectors/encoders. */ | 11505 | * have active connectors/encoders. */ |
11495 | intel_crtc_update_dpms(&crtc->base); | 11506 | intel_crtc_update_dpms(&crtc->base); |
11496 | 11507 | ||
11497 | if (crtc->active != crtc->base.enabled) { | 11508 | if (crtc->active != crtc->base.enabled) { |
11498 | struct intel_encoder *encoder; | 11509 | struct intel_encoder *encoder; |
11499 | 11510 | ||
11500 | /* This can happen either due to bugs in the get_hw_state | 11511 | /* This can happen either due to bugs in the get_hw_state |
11501 | * functions or because the pipe is force-enabled due to the | 11512 | * functions or because the pipe is force-enabled due to the |
11502 | * pipe A quirk. */ | 11513 | * pipe A quirk. */ |
11503 | DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n", | 11514 | DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n", |
11504 | crtc->base.base.id, | 11515 | crtc->base.base.id, |
11505 | crtc->base.enabled ? "enabled" : "disabled", | 11516 | crtc->base.enabled ? "enabled" : "disabled", |
11506 | crtc->active ? "enabled" : "disabled"); | 11517 | crtc->active ? "enabled" : "disabled"); |
11507 | 11518 | ||
11508 | crtc->base.enabled = crtc->active; | 11519 | crtc->base.enabled = crtc->active; |
11509 | 11520 | ||
11510 | /* Because we only establish the connector -> encoder -> | 11521 | /* Because we only establish the connector -> encoder -> |
11511 | * crtc links if something is active, this means the | 11522 | * crtc links if something is active, this means the |
11512 | * crtc is now deactivated. Break the links. connector | 11523 | * crtc is now deactivated. Break the links. connector |
11513 | * -> encoder links are only establish when things are | 11524 | * -> encoder links are only establish when things are |
11514 | * actually up, hence no need to break them. */ | 11525 | * actually up, hence no need to break them. */ |
11515 | WARN_ON(crtc->active); | 11526 | WARN_ON(crtc->active); |
11516 | 11527 | ||
11517 | for_each_encoder_on_crtc(dev, &crtc->base, encoder) { | 11528 | for_each_encoder_on_crtc(dev, &crtc->base, encoder) { |
11518 | WARN_ON(encoder->connectors_active); | 11529 | WARN_ON(encoder->connectors_active); |
11519 | encoder->base.crtc = NULL; | 11530 | encoder->base.crtc = NULL; |
11520 | } | 11531 | } |
11521 | } | 11532 | } |
11522 | if (crtc->active) { | 11533 | if (crtc->active) { |
11523 | /* | 11534 | /* |
11524 | * We start out with underrun reporting disabled to avoid races. | 11535 | * We start out with underrun reporting disabled to avoid races. |
11525 | * For correct bookkeeping mark this on active crtcs. | 11536 | * For correct bookkeeping mark this on active crtcs. |
11526 | * | 11537 | * |
11527 | * No protection against concurrent access is required - at | 11538 | * No protection against concurrent access is required - at |
11528 | * worst a fifo underrun happens which also sets this to false. | 11539 | * worst a fifo underrun happens which also sets this to false. |
11529 | */ | 11540 | */ |
11530 | crtc->cpu_fifo_underrun_disabled = true; | 11541 | crtc->cpu_fifo_underrun_disabled = true; |
11531 | crtc->pch_fifo_underrun_disabled = true; | 11542 | crtc->pch_fifo_underrun_disabled = true; |
11532 | } | 11543 | } |
11533 | } | 11544 | } |
11534 | 11545 | ||
11535 | static void intel_sanitize_encoder(struct intel_encoder *encoder) | 11546 | static void intel_sanitize_encoder(struct intel_encoder *encoder) |
11536 | { | 11547 | { |
11537 | struct intel_connector *connector; | 11548 | struct intel_connector *connector; |
11538 | struct drm_device *dev = encoder->base.dev; | 11549 | struct drm_device *dev = encoder->base.dev; |
11539 | 11550 | ||
11540 | /* We need to check both for a crtc link (meaning that the | 11551 | /* We need to check both for a crtc link (meaning that the |
11541 | * encoder is active and trying to read from a pipe) and the | 11552 | * encoder is active and trying to read from a pipe) and the |
11542 | * pipe itself being active. */ | 11553 | * pipe itself being active. */ |
11543 | bool has_active_crtc = encoder->base.crtc && | 11554 | bool has_active_crtc = encoder->base.crtc && |
11544 | to_intel_crtc(encoder->base.crtc)->active; | 11555 | to_intel_crtc(encoder->base.crtc)->active; |
11545 | 11556 | ||
11546 | if (encoder->connectors_active && !has_active_crtc) { | 11557 | if (encoder->connectors_active && !has_active_crtc) { |
11547 | DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n", | 11558 | DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n", |
11548 | encoder->base.base.id, | 11559 | encoder->base.base.id, |
11549 | drm_get_encoder_name(&encoder->base)); | 11560 | drm_get_encoder_name(&encoder->base)); |
11550 | 11561 | ||
11551 | /* Connector is active, but has no active pipe. This is | 11562 | /* Connector is active, but has no active pipe. This is |
11552 | * fallout from our resume register restoring. Disable | 11563 | * fallout from our resume register restoring. Disable |
11553 | * the encoder manually again. */ | 11564 | * the encoder manually again. */ |
11554 | if (encoder->base.crtc) { | 11565 | if (encoder->base.crtc) { |
11555 | DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", | 11566 | DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", |
11556 | encoder->base.base.id, | 11567 | encoder->base.base.id, |
11557 | drm_get_encoder_name(&encoder->base)); | 11568 | drm_get_encoder_name(&encoder->base)); |
11558 | encoder->disable(encoder); | 11569 | encoder->disable(encoder); |
11559 | } | 11570 | } |
11560 | 11571 | ||
11561 | /* Inconsistent output/port/pipe state happens presumably due to | 11572 | /* Inconsistent output/port/pipe state happens presumably due to |
11562 | * a bug in one of the get_hw_state functions. Or someplace else | 11573 | * a bug in one of the get_hw_state functions. Or someplace else |
11563 | * in our code, like the register restore mess on resume. Clamp | 11574 | * in our code, like the register restore mess on resume. Clamp |
11564 | * things to off as a safer default. */ | 11575 | * things to off as a safer default. */ |
11565 | list_for_each_entry(connector, | 11576 | list_for_each_entry(connector, |
11566 | &dev->mode_config.connector_list, | 11577 | &dev->mode_config.connector_list, |
11567 | base.head) { | 11578 | base.head) { |
11568 | if (connector->encoder != encoder) | 11579 | if (connector->encoder != encoder) |
11569 | continue; | 11580 | continue; |
11570 | 11581 | ||
11571 | intel_connector_break_all_links(connector); | 11582 | intel_connector_break_all_links(connector); |
11572 | } | 11583 | } |
11573 | } | 11584 | } |
11574 | /* Enabled encoders without active connectors will be fixed in | 11585 | /* Enabled encoders without active connectors will be fixed in |
11575 | * the crtc fixup. */ | 11586 | * the crtc fixup. */ |
11576 | } | 11587 | } |
11577 | 11588 | ||
11578 | void i915_redisable_vga_power_on(struct drm_device *dev) | 11589 | void i915_redisable_vga_power_on(struct drm_device *dev) |
11579 | { | 11590 | { |
11580 | struct drm_i915_private *dev_priv = dev->dev_private; | 11591 | struct drm_i915_private *dev_priv = dev->dev_private; |
11581 | u32 vga_reg = i915_vgacntrl_reg(dev); | 11592 | u32 vga_reg = i915_vgacntrl_reg(dev); |
11582 | 11593 | ||
11583 | if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) { | 11594 | if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) { |
11584 | DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); | 11595 | DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); |
11585 | i915_disable_vga(dev); | 11596 | i915_disable_vga(dev); |
11586 | } | 11597 | } |
11587 | } | 11598 | } |
11588 | 11599 | ||
11589 | void i915_redisable_vga(struct drm_device *dev) | 11600 | void i915_redisable_vga(struct drm_device *dev) |
11590 | { | 11601 | { |
11591 | struct drm_i915_private *dev_priv = dev->dev_private; | 11602 | struct drm_i915_private *dev_priv = dev->dev_private; |
11592 | 11603 | ||
11593 | /* This function can be called both from intel_modeset_setup_hw_state or | 11604 | /* This function can be called both from intel_modeset_setup_hw_state or |
11594 | * at a very early point in our resume sequence, where the power well | 11605 | * at a very early point in our resume sequence, where the power well |
11595 | * structures are not yet restored. Since this function is at a very | 11606 | * structures are not yet restored. Since this function is at a very |
11596 | * paranoid "someone might have enabled VGA while we were not looking" | 11607 | * paranoid "someone might have enabled VGA while we were not looking" |
11597 | * level, just check if the power well is enabled instead of trying to | 11608 | * level, just check if the power well is enabled instead of trying to |
11598 | * follow the "don't touch the power well if we don't need it" policy | 11609 | * follow the "don't touch the power well if we don't need it" policy |
11599 | * the rest of the driver uses. */ | 11610 | * the rest of the driver uses. */ |
11600 | if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_VGA)) | 11611 | if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_VGA)) |
11601 | return; | 11612 | return; |
11602 | 11613 | ||
11603 | i915_redisable_vga_power_on(dev); | 11614 | i915_redisable_vga_power_on(dev); |
11604 | } | 11615 | } |
11605 | 11616 | ||
11606 | static void intel_modeset_readout_hw_state(struct drm_device *dev) | 11617 | static void intel_modeset_readout_hw_state(struct drm_device *dev) |
11607 | { | 11618 | { |
11608 | struct drm_i915_private *dev_priv = dev->dev_private; | 11619 | struct drm_i915_private *dev_priv = dev->dev_private; |
11609 | enum pipe pipe; | 11620 | enum pipe pipe; |
11610 | struct intel_crtc *crtc; | 11621 | struct intel_crtc *crtc; |
11611 | struct intel_encoder *encoder; | 11622 | struct intel_encoder *encoder; |
11612 | struct intel_connector *connector; | 11623 | struct intel_connector *connector; |
11613 | int i; | 11624 | int i; |
11614 | 11625 | ||
11615 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, | 11626 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, |
11616 | base.head) { | 11627 | base.head) { |
11617 | memset(&crtc->config, 0, sizeof(crtc->config)); | 11628 | memset(&crtc->config, 0, sizeof(crtc->config)); |
11629 | |||
11630 | crtc->config.quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE; | ||
11618 | 11631 | ||
11619 | crtc->active = dev_priv->display.get_pipe_config(crtc, | 11632 | crtc->active = dev_priv->display.get_pipe_config(crtc, |
11620 | &crtc->config); | 11633 | &crtc->config); |
11621 | 11634 | ||
11622 | crtc->base.enabled = crtc->active; | 11635 | crtc->base.enabled = crtc->active; |
11623 | crtc->primary_enabled = crtc->active; | 11636 | crtc->primary_enabled = crtc->active; |
11624 | 11637 | ||
11625 | DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n", | 11638 | DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n", |
11626 | crtc->base.base.id, | 11639 | crtc->base.base.id, |
11627 | crtc->active ? "enabled" : "disabled"); | 11640 | crtc->active ? "enabled" : "disabled"); |
11628 | } | 11641 | } |
11629 | 11642 | ||
11630 | /* FIXME: Smash this into the new shared dpll infrastructure. */ | 11643 | /* FIXME: Smash this into the new shared dpll infrastructure. */ |
11631 | if (HAS_DDI(dev)) | 11644 | if (HAS_DDI(dev)) |
11632 | intel_ddi_setup_hw_pll_state(dev); | 11645 | intel_ddi_setup_hw_pll_state(dev); |
11633 | 11646 | ||
11634 | for (i = 0; i < dev_priv->num_shared_dpll; i++) { | 11647 | for (i = 0; i < dev_priv->num_shared_dpll; i++) { |
11635 | struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; | 11648 | struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; |
11636 | 11649 | ||
11637 | pll->on = pll->get_hw_state(dev_priv, pll, &pll->hw_state); | 11650 | pll->on = pll->get_hw_state(dev_priv, pll, &pll->hw_state); |
11638 | pll->active = 0; | 11651 | pll->active = 0; |
11639 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, | 11652 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, |
11640 | base.head) { | 11653 | base.head) { |
11641 | if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) | 11654 | if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) |
11642 | pll->active++; | 11655 | pll->active++; |
11643 | } | 11656 | } |
11644 | pll->refcount = pll->active; | 11657 | pll->refcount = pll->active; |
11645 | 11658 | ||
11646 | DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n", | 11659 | DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n", |
11647 | pll->name, pll->refcount, pll->on); | 11660 | pll->name, pll->refcount, pll->on); |
11648 | } | 11661 | } |
11649 | 11662 | ||
11650 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, | 11663 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
11651 | base.head) { | 11664 | base.head) { |
11652 | pipe = 0; | 11665 | pipe = 0; |
11653 | 11666 | ||
11654 | if (encoder->get_hw_state(encoder, &pipe)) { | 11667 | if (encoder->get_hw_state(encoder, &pipe)) { |
11655 | crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); | 11668 | crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); |
11656 | encoder->base.crtc = &crtc->base; | 11669 | encoder->base.crtc = &crtc->base; |
11657 | encoder->get_config(encoder, &crtc->config); | 11670 | encoder->get_config(encoder, &crtc->config); |
11658 | } else { | 11671 | } else { |
11659 | encoder->base.crtc = NULL; | 11672 | encoder->base.crtc = NULL; |
11660 | } | 11673 | } |
11661 | 11674 | ||
11662 | encoder->connectors_active = false; | 11675 | encoder->connectors_active = false; |
11663 | DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n", | 11676 | DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n", |
11664 | encoder->base.base.id, | 11677 | encoder->base.base.id, |
11665 | drm_get_encoder_name(&encoder->base), | 11678 | drm_get_encoder_name(&encoder->base), |
11666 | encoder->base.crtc ? "enabled" : "disabled", | 11679 | encoder->base.crtc ? "enabled" : "disabled", |
11667 | pipe_name(pipe)); | 11680 | pipe_name(pipe)); |
11668 | } | 11681 | } |
11669 | 11682 | ||
11670 | list_for_each_entry(connector, &dev->mode_config.connector_list, | 11683 | list_for_each_entry(connector, &dev->mode_config.connector_list, |
11671 | base.head) { | 11684 | base.head) { |
11672 | if (connector->get_hw_state(connector)) { | 11685 | if (connector->get_hw_state(connector)) { |
11673 | connector->base.dpms = DRM_MODE_DPMS_ON; | 11686 | connector->base.dpms = DRM_MODE_DPMS_ON; |
11674 | connector->encoder->connectors_active = true; | 11687 | connector->encoder->connectors_active = true; |
11675 | connector->base.encoder = &connector->encoder->base; | 11688 | connector->base.encoder = &connector->encoder->base; |
11676 | } else { | 11689 | } else { |
11677 | connector->base.dpms = DRM_MODE_DPMS_OFF; | 11690 | connector->base.dpms = DRM_MODE_DPMS_OFF; |
11678 | connector->base.encoder = NULL; | 11691 | connector->base.encoder = NULL; |
11679 | } | 11692 | } |
11680 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n", | 11693 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n", |
11681 | connector->base.base.id, | 11694 | connector->base.base.id, |
11682 | drm_get_connector_name(&connector->base), | 11695 | drm_get_connector_name(&connector->base), |
11683 | connector->base.encoder ? "enabled" : "disabled"); | 11696 | connector->base.encoder ? "enabled" : "disabled"); |
11684 | } | 11697 | } |
11685 | } | 11698 | } |
11686 | 11699 | ||
11687 | /* Scan out the current hw modeset state, sanitizes it and maps it into the drm | 11700 | /* Scan out the current hw modeset state, sanitizes it and maps it into the drm |
11688 | * and i915 state tracking structures. */ | 11701 | * and i915 state tracking structures. */ |
11689 | void intel_modeset_setup_hw_state(struct drm_device *dev, | 11702 | void intel_modeset_setup_hw_state(struct drm_device *dev, |
11690 | bool force_restore) | 11703 | bool force_restore) |
11691 | { | 11704 | { |
11692 | struct drm_i915_private *dev_priv = dev->dev_private; | 11705 | struct drm_i915_private *dev_priv = dev->dev_private; |
11693 | enum pipe pipe; | 11706 | enum pipe pipe; |
11694 | struct intel_crtc *crtc; | 11707 | struct intel_crtc *crtc; |
11695 | struct intel_encoder *encoder; | 11708 | struct intel_encoder *encoder; |
11696 | int i; | 11709 | int i; |
11697 | 11710 | ||
11698 | intel_modeset_readout_hw_state(dev); | 11711 | intel_modeset_readout_hw_state(dev); |
11699 | 11712 | ||
11700 | /* | 11713 | /* |
11701 | * Now that we have the config, copy it to each CRTC struct | 11714 | * Now that we have the config, copy it to each CRTC struct |
11702 | * Note that this could go away if we move to using crtc_config | 11715 | * Note that this could go away if we move to using crtc_config |
11703 | * checking everywhere. | 11716 | * checking everywhere. |
11704 | */ | 11717 | */ |
11705 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, | 11718 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, |
11706 | base.head) { | 11719 | base.head) { |
11707 | if (crtc->active && i915.fastboot) { | 11720 | if (crtc->active && i915.fastboot) { |
11708 | intel_mode_from_pipe_config(&crtc->base.mode, &crtc->config); | 11721 | intel_mode_from_pipe_config(&crtc->base.mode, &crtc->config); |
11709 | DRM_DEBUG_KMS("[CRTC:%d] found active mode: ", | 11722 | DRM_DEBUG_KMS("[CRTC:%d] found active mode: ", |
11710 | crtc->base.base.id); | 11723 | crtc->base.base.id); |
11711 | drm_mode_debug_printmodeline(&crtc->base.mode); | 11724 | drm_mode_debug_printmodeline(&crtc->base.mode); |
11712 | } | 11725 | } |
11713 | } | 11726 | } |
11714 | 11727 | ||
11715 | /* HW state is read out, now we need to sanitize this mess. */ | 11728 | /* HW state is read out, now we need to sanitize this mess. */ |
11716 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, | 11729 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
11717 | base.head) { | 11730 | base.head) { |
11718 | intel_sanitize_encoder(encoder); | 11731 | intel_sanitize_encoder(encoder); |
11719 | } | 11732 | } |
11720 | 11733 | ||
11721 | for_each_pipe(pipe) { | 11734 | for_each_pipe(pipe) { |
11722 | crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); | 11735 | crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); |
11723 | intel_sanitize_crtc(crtc); | 11736 | intel_sanitize_crtc(crtc); |
11724 | intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]"); | 11737 | intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]"); |
11725 | } | 11738 | } |
11726 | 11739 | ||
11727 | for (i = 0; i < dev_priv->num_shared_dpll; i++) { | 11740 | for (i = 0; i < dev_priv->num_shared_dpll; i++) { |
11728 | struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; | 11741 | struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; |
11729 | 11742 | ||
11730 | if (!pll->on || pll->active) | 11743 | if (!pll->on || pll->active) |
11731 | continue; | 11744 | continue; |
11732 | 11745 | ||
11733 | DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name); | 11746 | DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name); |
11734 | 11747 | ||
11735 | pll->disable(dev_priv, pll); | 11748 | pll->disable(dev_priv, pll); |
11736 | pll->on = false; | 11749 | pll->on = false; |
11737 | } | 11750 | } |
11738 | 11751 | ||
11739 | if (HAS_PCH_SPLIT(dev)) | 11752 | if (HAS_PCH_SPLIT(dev)) |
11740 | ilk_wm_get_hw_state(dev); | 11753 | ilk_wm_get_hw_state(dev); |
11741 | 11754 | ||
11742 | if (force_restore) { | 11755 | if (force_restore) { |
11743 | i915_redisable_vga(dev); | 11756 | i915_redisable_vga(dev); |
11744 | 11757 | ||
11745 | /* | 11758 | /* |
11746 | * We need to use raw interfaces for restoring state to avoid | 11759 | * We need to use raw interfaces for restoring state to avoid |
11747 | * checking (bogus) intermediate states. | 11760 | * checking (bogus) intermediate states. |
11748 | */ | 11761 | */ |
11749 | for_each_pipe(pipe) { | 11762 | for_each_pipe(pipe) { |
11750 | struct drm_crtc *crtc = | 11763 | struct drm_crtc *crtc = |
11751 | dev_priv->pipe_to_crtc_mapping[pipe]; | 11764 | dev_priv->pipe_to_crtc_mapping[pipe]; |
11752 | 11765 | ||
11753 | __intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, | 11766 | __intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, |
11754 | crtc->primary->fb); | 11767 | crtc->primary->fb); |
11755 | } | 11768 | } |
11756 | } else { | 11769 | } else { |
11757 | intel_modeset_update_staged_output_state(dev); | 11770 | intel_modeset_update_staged_output_state(dev); |
11758 | } | 11771 | } |
11759 | 11772 | ||
11760 | intel_modeset_check_state(dev); | 11773 | intel_modeset_check_state(dev); |
11761 | } | 11774 | } |
11762 | 11775 | ||
11763 | void intel_modeset_gem_init(struct drm_device *dev) | 11776 | void intel_modeset_gem_init(struct drm_device *dev) |
11764 | { | 11777 | { |
11765 | struct drm_crtc *c; | 11778 | struct drm_crtc *c; |
11766 | struct intel_framebuffer *fb; | 11779 | struct intel_framebuffer *fb; |
11767 | 11780 | ||
11768 | mutex_lock(&dev->struct_mutex); | 11781 | mutex_lock(&dev->struct_mutex); |
11769 | intel_init_gt_powersave(dev); | 11782 | intel_init_gt_powersave(dev); |
11770 | mutex_unlock(&dev->struct_mutex); | 11783 | mutex_unlock(&dev->struct_mutex); |
11771 | 11784 | ||
11772 | intel_modeset_init_hw(dev); | 11785 | intel_modeset_init_hw(dev); |
11773 | 11786 | ||
11774 | intel_setup_overlay(dev); | 11787 | intel_setup_overlay(dev); |
11775 | 11788 | ||
11776 | /* | 11789 | /* |
11777 | * Make sure any fbs we allocated at startup are properly | 11790 | * Make sure any fbs we allocated at startup are properly |
11778 | * pinned & fenced. When we do the allocation it's too early | 11791 | * pinned & fenced. When we do the allocation it's too early |
11779 | * for this. | 11792 | * for this. |
11780 | */ | 11793 | */ |
11781 | mutex_lock(&dev->struct_mutex); | 11794 | mutex_lock(&dev->struct_mutex); |
11782 | list_for_each_entry(c, &dev->mode_config.crtc_list, head) { | 11795 | list_for_each_entry(c, &dev->mode_config.crtc_list, head) { |
11783 | if (!c->primary->fb) | 11796 | if (!c->primary->fb) |
11784 | continue; | 11797 | continue; |
11785 | 11798 | ||
11786 | fb = to_intel_framebuffer(c->primary->fb); | 11799 | fb = to_intel_framebuffer(c->primary->fb); |
11787 | if (intel_pin_and_fence_fb_obj(dev, fb->obj, NULL)) { | 11800 | if (intel_pin_and_fence_fb_obj(dev, fb->obj, NULL)) { |
11788 | DRM_ERROR("failed to pin boot fb on pipe %d\n", | 11801 | DRM_ERROR("failed to pin boot fb on pipe %d\n", |
11789 | to_intel_crtc(c)->pipe); | 11802 | to_intel_crtc(c)->pipe); |
11790 | drm_framebuffer_unreference(c->primary->fb); | 11803 | drm_framebuffer_unreference(c->primary->fb); |
11791 | c->primary->fb = NULL; | 11804 | c->primary->fb = NULL; |
11792 | } | 11805 | } |
11793 | } | 11806 | } |
11794 | mutex_unlock(&dev->struct_mutex); | 11807 | mutex_unlock(&dev->struct_mutex); |
11795 | } | 11808 | } |
11796 | 11809 | ||
11797 | void intel_connector_unregister(struct intel_connector *intel_connector) | 11810 | void intel_connector_unregister(struct intel_connector *intel_connector) |
11798 | { | 11811 | { |
11799 | struct drm_connector *connector = &intel_connector->base; | 11812 | struct drm_connector *connector = &intel_connector->base; |
11800 | 11813 | ||
11801 | intel_panel_destroy_backlight(connector); | 11814 | intel_panel_destroy_backlight(connector); |
11802 | drm_sysfs_connector_remove(connector); | 11815 | drm_sysfs_connector_remove(connector); |
11803 | } | 11816 | } |
11804 | 11817 | ||
11805 | void intel_modeset_cleanup(struct drm_device *dev) | 11818 | void intel_modeset_cleanup(struct drm_device *dev) |
11806 | { | 11819 | { |
11807 | struct drm_i915_private *dev_priv = dev->dev_private; | 11820 | struct drm_i915_private *dev_priv = dev->dev_private; |
11808 | struct drm_crtc *crtc; | 11821 | struct drm_crtc *crtc; |
11809 | struct drm_connector *connector; | 11822 | struct drm_connector *connector; |
11810 | 11823 | ||
11811 | /* | 11824 | /* |
11812 | * Interrupts and polling as the first thing to avoid creating havoc. | 11825 | * Interrupts and polling as the first thing to avoid creating havoc. |
11813 | * Too much stuff here (turning of rps, connectors, ...) would | 11826 | * Too much stuff here (turning of rps, connectors, ...) would |
11814 | * experience fancy races otherwise. | 11827 | * experience fancy races otherwise. |
11815 | */ | 11828 | */ |
11816 | drm_irq_uninstall(dev); | 11829 | drm_irq_uninstall(dev); |
11817 | cancel_work_sync(&dev_priv->hotplug_work); | 11830 | cancel_work_sync(&dev_priv->hotplug_work); |
11818 | /* | 11831 | /* |
11819 | * Due to the hpd irq storm handling the hotplug work can re-arm the | 11832 | * Due to the hpd irq storm handling the hotplug work can re-arm the |
11820 | * poll handlers. Hence disable polling after hpd handling is shut down. | 11833 | * poll handlers. Hence disable polling after hpd handling is shut down. |
11821 | */ | 11834 | */ |
11822 | drm_kms_helper_poll_fini(dev); | 11835 | drm_kms_helper_poll_fini(dev); |
11823 | 11836 | ||
11824 | mutex_lock(&dev->struct_mutex); | 11837 | mutex_lock(&dev->struct_mutex); |
11825 | 11838 | ||
11826 | intel_unregister_dsm_handler(); | 11839 | intel_unregister_dsm_handler(); |
11827 | 11840 | ||
11828 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 11841 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
11829 | /* Skip inactive CRTCs */ | 11842 | /* Skip inactive CRTCs */ |
11830 | if (!crtc->primary->fb) | 11843 | if (!crtc->primary->fb) |
11831 | continue; | 11844 | continue; |
11832 | 11845 | ||
11833 | intel_increase_pllclock(crtc); | 11846 | intel_increase_pllclock(crtc); |
11834 | } | 11847 | } |
11835 | 11848 | ||
11836 | intel_disable_fbc(dev); | 11849 | intel_disable_fbc(dev); |
11837 | 11850 | ||
11838 | intel_disable_gt_powersave(dev); | 11851 | intel_disable_gt_powersave(dev); |
11839 | 11852 | ||
11840 | ironlake_teardown_rc6(dev); | 11853 | ironlake_teardown_rc6(dev); |
11841 | 11854 | ||
11842 | mutex_unlock(&dev->struct_mutex); | 11855 | mutex_unlock(&dev->struct_mutex); |
11843 | 11856 | ||
11844 | /* flush any delayed tasks or pending work */ | 11857 | /* flush any delayed tasks or pending work */ |
11845 | flush_scheduled_work(); | 11858 | flush_scheduled_work(); |
11846 | 11859 | ||
11847 | /* destroy the backlight and sysfs files before encoders/connectors */ | 11860 | /* destroy the backlight and sysfs files before encoders/connectors */ |
11848 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 11861 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
11849 | struct intel_connector *intel_connector; | 11862 | struct intel_connector *intel_connector; |
11850 | 11863 | ||
11851 | intel_connector = to_intel_connector(connector); | 11864 | intel_connector = to_intel_connector(connector); |
11852 | intel_connector->unregister(intel_connector); | 11865 | intel_connector->unregister(intel_connector); |
11853 | } | 11866 | } |
11854 | 11867 | ||
11855 | drm_mode_config_cleanup(dev); | 11868 | drm_mode_config_cleanup(dev); |
11856 | 11869 | ||
11857 | intel_cleanup_overlay(dev); | 11870 | intel_cleanup_overlay(dev); |
11858 | 11871 | ||
11859 | mutex_lock(&dev->struct_mutex); | 11872 | mutex_lock(&dev->struct_mutex); |
11860 | intel_cleanup_gt_powersave(dev); | 11873 | intel_cleanup_gt_powersave(dev); |
11861 | mutex_unlock(&dev->struct_mutex); | 11874 | mutex_unlock(&dev->struct_mutex); |
11862 | } | 11875 | } |
11863 | 11876 | ||
11864 | /* | 11877 | /* |
11865 | * Return which encoder is currently attached for connector. | 11878 | * Return which encoder is currently attached for connector. |
11866 | */ | 11879 | */ |
11867 | struct drm_encoder *intel_best_encoder(struct drm_connector *connector) | 11880 | struct drm_encoder *intel_best_encoder(struct drm_connector *connector) |
11868 | { | 11881 | { |
11869 | return &intel_attached_encoder(connector)->base; | 11882 | return &intel_attached_encoder(connector)->base; |
11870 | } | 11883 | } |
11871 | 11884 | ||
11872 | void intel_connector_attach_encoder(struct intel_connector *connector, | 11885 | void intel_connector_attach_encoder(struct intel_connector *connector, |
11873 | struct intel_encoder *encoder) | 11886 | struct intel_encoder *encoder) |
11874 | { | 11887 | { |
11875 | connector->encoder = encoder; | 11888 | connector->encoder = encoder; |
11876 | drm_mode_connector_attach_encoder(&connector->base, | 11889 | drm_mode_connector_attach_encoder(&connector->base, |
11877 | &encoder->base); | 11890 | &encoder->base); |
11878 | } | 11891 | } |
11879 | 11892 | ||
11880 | /* | 11893 | /* |
11881 | * set vga decode state - true == enable VGA decode | 11894 | * set vga decode state - true == enable VGA decode |
11882 | */ | 11895 | */ |
11883 | int intel_modeset_vga_set_state(struct drm_device *dev, bool state) | 11896 | int intel_modeset_vga_set_state(struct drm_device *dev, bool state) |
11884 | { | 11897 | { |
11885 | struct drm_i915_private *dev_priv = dev->dev_private; | 11898 | struct drm_i915_private *dev_priv = dev->dev_private; |
11886 | unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL; | 11899 | unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL; |
11887 | u16 gmch_ctrl; | 11900 | u16 gmch_ctrl; |
11888 | 11901 | ||
11889 | if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) { | 11902 | if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) { |
11890 | DRM_ERROR("failed to read control word\n"); | 11903 | DRM_ERROR("failed to read control word\n"); |
11891 | return -EIO; | 11904 | return -EIO; |
11892 | } | 11905 | } |
11893 | 11906 | ||
11894 | if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state) | 11907 | if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state) |
11895 | return 0; | 11908 | return 0; |
11896 | 11909 | ||
11897 | if (state) | 11910 | if (state) |
11898 | gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; | 11911 | gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; |
11899 | else | 11912 | else |
11900 | gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; | 11913 | gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; |
11901 | 11914 | ||
11902 | if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) { | 11915 | if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) { |
11903 | DRM_ERROR("failed to write control word\n"); | 11916 | DRM_ERROR("failed to write control word\n"); |
11904 | return -EIO; | 11917 | return -EIO; |
11905 | } | 11918 | } |
11906 | 11919 | ||
11907 | return 0; | 11920 | return 0; |
11908 | } | 11921 | } |
11909 | 11922 | ||
11910 | struct intel_display_error_state { | 11923 | struct intel_display_error_state { |
11911 | 11924 | ||
11912 | u32 power_well_driver; | 11925 | u32 power_well_driver; |
11913 | 11926 | ||
11914 | int num_transcoders; | 11927 | int num_transcoders; |
11915 | 11928 | ||
11916 | struct intel_cursor_error_state { | 11929 | struct intel_cursor_error_state { |
11917 | u32 control; | 11930 | u32 control; |
11918 | u32 position; | 11931 | u32 position; |
11919 | u32 base; | 11932 | u32 base; |
11920 | u32 size; | 11933 | u32 size; |
11921 | } cursor[I915_MAX_PIPES]; | 11934 | } cursor[I915_MAX_PIPES]; |
11922 | 11935 | ||
11923 | struct intel_pipe_error_state { | 11936 | struct intel_pipe_error_state { |
11924 | bool power_domain_on; | 11937 | bool power_domain_on; |
11925 | u32 source; | 11938 | u32 source; |
11926 | } pipe[I915_MAX_PIPES]; | 11939 | } pipe[I915_MAX_PIPES]; |
11927 | 11940 | ||
11928 | struct intel_plane_error_state { | 11941 | struct intel_plane_error_state { |
11929 | u32 control; | 11942 | u32 control; |
11930 | u32 stride; | 11943 | u32 stride; |
11931 | u32 size; | 11944 | u32 size; |
11932 | u32 pos; | 11945 | u32 pos; |
11933 | u32 addr; | 11946 | u32 addr; |
11934 | u32 surface; | 11947 | u32 surface; |
11935 | u32 tile_offset; | 11948 | u32 tile_offset; |
11936 | } plane[I915_MAX_PIPES]; | 11949 | } plane[I915_MAX_PIPES]; |
11937 | 11950 | ||
11938 | struct intel_transcoder_error_state { | 11951 | struct intel_transcoder_error_state { |
11939 | bool power_domain_on; | 11952 | bool power_domain_on; |
11940 | enum transcoder cpu_transcoder; | 11953 | enum transcoder cpu_transcoder; |
11941 | 11954 | ||
11942 | u32 conf; | 11955 | u32 conf; |
11943 | 11956 | ||
11944 | u32 htotal; | 11957 | u32 htotal; |
11945 | u32 hblank; | 11958 | u32 hblank; |
11946 | u32 hsync; | 11959 | u32 hsync; |
11947 | u32 vtotal; | 11960 | u32 vtotal; |
11948 | u32 vblank; | 11961 | u32 vblank; |
11949 | u32 vsync; | 11962 | u32 vsync; |
11950 | } transcoder[4]; | 11963 | } transcoder[4]; |
11951 | }; | 11964 | }; |
11952 | 11965 | ||
11953 | struct intel_display_error_state * | 11966 | struct intel_display_error_state * |
11954 | intel_display_capture_error_state(struct drm_device *dev) | 11967 | intel_display_capture_error_state(struct drm_device *dev) |
11955 | { | 11968 | { |
11956 | struct drm_i915_private *dev_priv = dev->dev_private; | 11969 | struct drm_i915_private *dev_priv = dev->dev_private; |
11957 | struct intel_display_error_state *error; | 11970 | struct intel_display_error_state *error; |
11958 | int transcoders[] = { | 11971 | int transcoders[] = { |
11959 | TRANSCODER_A, | 11972 | TRANSCODER_A, |
11960 | TRANSCODER_B, | 11973 | TRANSCODER_B, |
11961 | TRANSCODER_C, | 11974 | TRANSCODER_C, |
11962 | TRANSCODER_EDP, | 11975 | TRANSCODER_EDP, |
11963 | }; | 11976 | }; |
11964 | int i; | 11977 | int i; |
11965 | 11978 | ||
11966 | if (INTEL_INFO(dev)->num_pipes == 0) | 11979 | if (INTEL_INFO(dev)->num_pipes == 0) |
11967 | return NULL; | 11980 | return NULL; |
11968 | 11981 | ||
11969 | error = kzalloc(sizeof(*error), GFP_ATOMIC); | 11982 | error = kzalloc(sizeof(*error), GFP_ATOMIC); |
11970 | if (error == NULL) | 11983 | if (error == NULL) |
11971 | return NULL; | 11984 | return NULL; |
11972 | 11985 | ||
11973 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) | 11986 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) |
11974 | error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); | 11987 | error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); |
11975 | 11988 | ||
11976 | for_each_pipe(i) { | 11989 | for_each_pipe(i) { |
11977 | error->pipe[i].power_domain_on = | 11990 | error->pipe[i].power_domain_on = |
11978 | intel_display_power_enabled_sw(dev_priv, | 11991 | intel_display_power_enabled_sw(dev_priv, |
11979 | POWER_DOMAIN_PIPE(i)); | 11992 | POWER_DOMAIN_PIPE(i)); |
11980 | if (!error->pipe[i].power_domain_on) | 11993 | if (!error->pipe[i].power_domain_on) |
11981 | continue; | 11994 | continue; |
11982 | 11995 | ||
11983 | if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) { | 11996 | if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) { |
11984 | error->cursor[i].control = I915_READ(CURCNTR(i)); | 11997 | error->cursor[i].control = I915_READ(CURCNTR(i)); |
11985 | error->cursor[i].position = I915_READ(CURPOS(i)); | 11998 | error->cursor[i].position = I915_READ(CURPOS(i)); |
11986 | error->cursor[i].base = I915_READ(CURBASE(i)); | 11999 | error->cursor[i].base = I915_READ(CURBASE(i)); |
11987 | } else { | 12000 | } else { |
11988 | error->cursor[i].control = I915_READ(CURCNTR_IVB(i)); | 12001 | error->cursor[i].control = I915_READ(CURCNTR_IVB(i)); |
11989 | error->cursor[i].position = I915_READ(CURPOS_IVB(i)); | 12002 | error->cursor[i].position = I915_READ(CURPOS_IVB(i)); |
11990 | error->cursor[i].base = I915_READ(CURBASE_IVB(i)); | 12003 | error->cursor[i].base = I915_READ(CURBASE_IVB(i)); |
11991 | } | 12004 | } |
11992 | 12005 | ||
11993 | error->plane[i].control = I915_READ(DSPCNTR(i)); | 12006 | error->plane[i].control = I915_READ(DSPCNTR(i)); |
11994 | error->plane[i].stride = I915_READ(DSPSTRIDE(i)); | 12007 | error->plane[i].stride = I915_READ(DSPSTRIDE(i)); |
11995 | if (INTEL_INFO(dev)->gen <= 3) { | 12008 | if (INTEL_INFO(dev)->gen <= 3) { |
11996 | error->plane[i].size = I915_READ(DSPSIZE(i)); | 12009 | error->plane[i].size = I915_READ(DSPSIZE(i)); |
11997 | error->plane[i].pos = I915_READ(DSPPOS(i)); | 12010 | error->plane[i].pos = I915_READ(DSPPOS(i)); |
11998 | } | 12011 | } |
11999 | if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) | 12012 | if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) |
12000 | error->plane[i].addr = I915_READ(DSPADDR(i)); | 12013 | error->plane[i].addr = I915_READ(DSPADDR(i)); |
12001 | if (INTEL_INFO(dev)->gen >= 4) { | 12014 | if (INTEL_INFO(dev)->gen >= 4) { |
12002 | error->plane[i].surface = I915_READ(DSPSURF(i)); | 12015 | error->plane[i].surface = I915_READ(DSPSURF(i)); |
12003 | error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); | 12016 | error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); |
12004 | } | 12017 | } |
12005 | 12018 | ||
12006 | error->pipe[i].source = I915_READ(PIPESRC(i)); | 12019 | error->pipe[i].source = I915_READ(PIPESRC(i)); |
12007 | } | 12020 | } |
12008 | 12021 | ||
12009 | error->num_transcoders = INTEL_INFO(dev)->num_pipes; | 12022 | error->num_transcoders = INTEL_INFO(dev)->num_pipes; |
12010 | if (HAS_DDI(dev_priv->dev)) | 12023 | if (HAS_DDI(dev_priv->dev)) |
12011 | error->num_transcoders++; /* Account for eDP. */ | 12024 | error->num_transcoders++; /* Account for eDP. */ |
12012 | 12025 | ||
12013 | for (i = 0; i < error->num_transcoders; i++) { | 12026 | for (i = 0; i < error->num_transcoders; i++) { |
12014 | enum transcoder cpu_transcoder = transcoders[i]; | 12027 | enum transcoder cpu_transcoder = transcoders[i]; |
12015 | 12028 | ||
12016 | error->transcoder[i].power_domain_on = | 12029 | error->transcoder[i].power_domain_on = |
12017 | intel_display_power_enabled_sw(dev_priv, | 12030 | intel_display_power_enabled_sw(dev_priv, |
12018 | POWER_DOMAIN_TRANSCODER(cpu_transcoder)); | 12031 | POWER_DOMAIN_TRANSCODER(cpu_transcoder)); |
12019 | if (!error->transcoder[i].power_domain_on) | 12032 | if (!error->transcoder[i].power_domain_on) |
12020 | continue; | 12033 | continue; |
12021 | 12034 | ||
12022 | error->transcoder[i].cpu_transcoder = cpu_transcoder; | 12035 | error->transcoder[i].cpu_transcoder = cpu_transcoder; |
12023 | 12036 | ||
12024 | error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder)); | 12037 | error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder)); |
12025 | error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); | 12038 | error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); |
12026 | error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder)); | 12039 | error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder)); |
12027 | error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder)); | 12040 | error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder)); |
12028 | error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); | 12041 | error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); |
12029 | error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder)); | 12042 | error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder)); |
12030 | error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder)); | 12043 | error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder)); |
12031 | } | 12044 | } |
12032 | 12045 | ||
12033 | return error; | 12046 | return error; |
12034 | } | 12047 | } |
12035 | 12048 | ||
12036 | #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) | 12049 | #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) |
12037 | 12050 | ||
12038 | void | 12051 | void |
12039 | intel_display_print_error_state(struct drm_i915_error_state_buf *m, | 12052 | intel_display_print_error_state(struct drm_i915_error_state_buf *m, |
12040 | struct drm_device *dev, | 12053 | struct drm_device *dev, |
12041 | struct intel_display_error_state *error) | 12054 | struct intel_display_error_state *error) |
12042 | { | 12055 | { |
12043 | int i; | 12056 | int i; |
12044 | 12057 | ||
12045 | if (!error) | 12058 | if (!error) |
12046 | return; | 12059 | return; |
12047 | 12060 | ||
12048 | err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes); | 12061 | err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes); |
12049 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) | 12062 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) |
12050 | err_printf(m, "PWR_WELL_CTL2: %08x\n", | 12063 | err_printf(m, "PWR_WELL_CTL2: %08x\n", |
12051 | error->power_well_driver); | 12064 | error->power_well_driver); |
12052 | for_each_pipe(i) { | 12065 | for_each_pipe(i) { |
12053 | err_printf(m, "Pipe [%d]:\n", i); | 12066 | err_printf(m, "Pipe [%d]:\n", i); |
12054 | err_printf(m, " Power: %s\n", | 12067 | err_printf(m, " Power: %s\n", |
12055 | error->pipe[i].power_domain_on ? "on" : "off"); | 12068 | error->pipe[i].power_domain_on ? "on" : "off"); |
12056 | err_printf(m, " SRC: %08x\n", error->pipe[i].source); | 12069 | err_printf(m, " SRC: %08x\n", error->pipe[i].source); |
12057 | 12070 | ||
12058 | err_printf(m, "Plane [%d]:\n", i); | 12071 | err_printf(m, "Plane [%d]:\n", i); |
12059 | err_printf(m, " CNTR: %08x\n", error->plane[i].control); | 12072 | err_printf(m, " CNTR: %08x\n", error->plane[i].control); |
12060 | err_printf(m, " STRIDE: %08x\n", error->plane[i].stride); | 12073 | err_printf(m, " STRIDE: %08x\n", error->plane[i].stride); |
12061 | if (INTEL_INFO(dev)->gen <= 3) { | 12074 | if (INTEL_INFO(dev)->gen <= 3) { |
12062 | err_printf(m, " SIZE: %08x\n", error->plane[i].size); | 12075 | err_printf(m, " SIZE: %08x\n", error->plane[i].size); |
12063 | err_printf(m, " POS: %08x\n", error->plane[i].pos); | 12076 | err_printf(m, " POS: %08x\n", error->plane[i].pos); |
12064 | } | 12077 | } |
12065 | if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) | 12078 | if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) |
12066 | err_printf(m, " ADDR: %08x\n", error->plane[i].addr); | 12079 | err_printf(m, " ADDR: %08x\n", error->plane[i].addr); |
12067 | if (INTEL_INFO(dev)->gen >= 4) { | 12080 | if (INTEL_INFO(dev)->gen >= 4) { |
12068 | err_printf(m, " SURF: %08x\n", error->plane[i].surface); | 12081 | err_printf(m, " SURF: %08x\n", error->plane[i].surface); |
12069 | err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); | 12082 | err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); |
12070 | } | 12083 | } |
12071 | 12084 | ||
12072 | err_printf(m, "Cursor [%d]:\n", i); | 12085 | err_printf(m, "Cursor [%d]:\n", i); |
12073 | err_printf(m, " CNTR: %08x\n", error->cursor[i].control); | 12086 | err_printf(m, " CNTR: %08x\n", error->cursor[i].control); |
12074 | err_printf(m, " POS: %08x\n", error->cursor[i].position); | 12087 | err_printf(m, " POS: %08x\n", error->cursor[i].position); |
12075 | err_printf(m, " BASE: %08x\n", error->cursor[i].base); | 12088 | err_printf(m, " BASE: %08x\n", error->cursor[i].base); |
12076 | } | 12089 | } |
12077 | 12090 | ||
12078 | for (i = 0; i < error->num_transcoders; i++) { | 12091 | for (i = 0; i < error->num_transcoders; i++) { |
12079 | err_printf(m, "CPU transcoder: %c\n", | 12092 | err_printf(m, "CPU transcoder: %c\n", |
12080 | transcoder_name(error->transcoder[i].cpu_transcoder)); | 12093 | transcoder_name(error->transcoder[i].cpu_transcoder)); |
12081 | err_printf(m, " Power: %s\n", | 12094 | err_printf(m, " Power: %s\n", |
12082 | error->transcoder[i].power_domain_on ? "on" : "off"); | 12095 | error->transcoder[i].power_domain_on ? "on" : "off"); |
12083 | err_printf(m, " CONF: %08x\n", error->transcoder[i].conf); | 12096 | err_printf(m, " CONF: %08x\n", error->transcoder[i].conf); |
12084 | err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); | 12097 | err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); |
12085 | err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank); | 12098 | err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank); |
12086 | err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync); | 12099 | err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync); |
12087 | err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal); | 12100 | err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal); |
12088 | err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank); | 12101 | err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank); |
12089 | err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync); | 12102 | err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync); |
12090 | } | 12103 | } |
12091 | } | 12104 | } |
12092 | 12105 |
drivers/gpu/drm/i915/intel_dp.c
1 | /* | 1 | /* |
2 | * Copyright © 2008 Intel Corporation | 2 | * Copyright © 2008 Intel Corporation |
3 | * | 3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), | 5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation | 6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the | 8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: | 9 | * Software is furnished to do so, subject to the following conditions: |
10 | * | 10 | * |
11 | * The above copyright notice and this permission notice (including the next | 11 | * The above copyright notice and this permission notice (including the next |
12 | * paragraph) shall be included in all copies or substantial portions of the | 12 | * paragraph) shall be included in all copies or substantial portions of the |
13 | * Software. | 13 | * Software. |
14 | * | 14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
21 | * IN THE SOFTWARE. | 21 | * IN THE SOFTWARE. |
22 | * | 22 | * |
23 | * Authors: | 23 | * Authors: |
24 | * Keith Packard <keithp@keithp.com> | 24 | * Keith Packard <keithp@keithp.com> |
25 | * | 25 | * |
26 | */ | 26 | */ |
27 | 27 | ||
28 | #include <linux/i2c.h> | 28 | #include <linux/i2c.h> |
29 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
30 | #include <linux/export.h> | 30 | #include <linux/export.h> |
31 | #include <drm/drmP.h> | 31 | #include <drm/drmP.h> |
32 | #include <drm/drm_crtc.h> | 32 | #include <drm/drm_crtc.h> |
33 | #include <drm/drm_crtc_helper.h> | 33 | #include <drm/drm_crtc_helper.h> |
34 | #include <drm/drm_edid.h> | 34 | #include <drm/drm_edid.h> |
35 | #include "intel_drv.h" | 35 | #include "intel_drv.h" |
36 | #include <drm/i915_drm.h> | 36 | #include <drm/i915_drm.h> |
37 | #include "i915_drv.h" | 37 | #include "i915_drv.h" |
38 | 38 | ||
39 | #define DP_LINK_CHECK_TIMEOUT (10 * 1000) | 39 | #define DP_LINK_CHECK_TIMEOUT (10 * 1000) |
40 | 40 | ||
41 | struct dp_link_dpll { | 41 | struct dp_link_dpll { |
42 | int link_bw; | 42 | int link_bw; |
43 | struct dpll dpll; | 43 | struct dpll dpll; |
44 | }; | 44 | }; |
45 | 45 | ||
46 | static const struct dp_link_dpll gen4_dpll[] = { | 46 | static const struct dp_link_dpll gen4_dpll[] = { |
47 | { DP_LINK_BW_1_62, | 47 | { DP_LINK_BW_1_62, |
48 | { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } }, | 48 | { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } }, |
49 | { DP_LINK_BW_2_7, | 49 | { DP_LINK_BW_2_7, |
50 | { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } } | 50 | { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } } |
51 | }; | 51 | }; |
52 | 52 | ||
53 | static const struct dp_link_dpll pch_dpll[] = { | 53 | static const struct dp_link_dpll pch_dpll[] = { |
54 | { DP_LINK_BW_1_62, | 54 | { DP_LINK_BW_1_62, |
55 | { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } }, | 55 | { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } }, |
56 | { DP_LINK_BW_2_7, | 56 | { DP_LINK_BW_2_7, |
57 | { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } } | 57 | { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } } |
58 | }; | 58 | }; |
59 | 59 | ||
60 | static const struct dp_link_dpll vlv_dpll[] = { | 60 | static const struct dp_link_dpll vlv_dpll[] = { |
61 | { DP_LINK_BW_1_62, | 61 | { DP_LINK_BW_1_62, |
62 | { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } }, | 62 | { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } }, |
63 | { DP_LINK_BW_2_7, | 63 | { DP_LINK_BW_2_7, |
64 | { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } } | 64 | { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } } |
65 | }; | 65 | }; |
66 | 66 | ||
67 | /** | 67 | /** |
68 | * is_edp - is the given port attached to an eDP panel (either CPU or PCH) | 68 | * is_edp - is the given port attached to an eDP panel (either CPU or PCH) |
69 | * @intel_dp: DP struct | 69 | * @intel_dp: DP struct |
70 | * | 70 | * |
71 | * If a CPU or PCH DP output is attached to an eDP panel, this function | 71 | * If a CPU or PCH DP output is attached to an eDP panel, this function |
72 | * will return true, and false otherwise. | 72 | * will return true, and false otherwise. |
73 | */ | 73 | */ |
74 | static bool is_edp(struct intel_dp *intel_dp) | 74 | static bool is_edp(struct intel_dp *intel_dp) |
75 | { | 75 | { |
76 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | 76 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
77 | 77 | ||
78 | return intel_dig_port->base.type == INTEL_OUTPUT_EDP; | 78 | return intel_dig_port->base.type == INTEL_OUTPUT_EDP; |
79 | } | 79 | } |
80 | 80 | ||
81 | static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp) | 81 | static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp) |
82 | { | 82 | { |
83 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | 83 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
84 | 84 | ||
85 | return intel_dig_port->base.base.dev; | 85 | return intel_dig_port->base.base.dev; |
86 | } | 86 | } |
87 | 87 | ||
88 | static struct intel_dp *intel_attached_dp(struct drm_connector *connector) | 88 | static struct intel_dp *intel_attached_dp(struct drm_connector *connector) |
89 | { | 89 | { |
90 | return enc_to_intel_dp(&intel_attached_encoder(connector)->base); | 90 | return enc_to_intel_dp(&intel_attached_encoder(connector)->base); |
91 | } | 91 | } |
92 | 92 | ||
93 | static void intel_dp_link_down(struct intel_dp *intel_dp); | 93 | static void intel_dp_link_down(struct intel_dp *intel_dp); |
94 | static bool _edp_panel_vdd_on(struct intel_dp *intel_dp); | 94 | static bool _edp_panel_vdd_on(struct intel_dp *intel_dp); |
95 | static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); | 95 | static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); |
96 | 96 | ||
97 | static int | 97 | static int |
98 | intel_dp_max_link_bw(struct intel_dp *intel_dp) | 98 | intel_dp_max_link_bw(struct intel_dp *intel_dp) |
99 | { | 99 | { |
100 | int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; | 100 | int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; |
101 | struct drm_device *dev = intel_dp->attached_connector->base.dev; | 101 | struct drm_device *dev = intel_dp->attached_connector->base.dev; |
102 | 102 | ||
103 | switch (max_link_bw) { | 103 | switch (max_link_bw) { |
104 | case DP_LINK_BW_1_62: | 104 | case DP_LINK_BW_1_62: |
105 | case DP_LINK_BW_2_7: | 105 | case DP_LINK_BW_2_7: |
106 | break; | 106 | break; |
107 | case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */ | 107 | case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */ |
108 | if ((IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) && | 108 | if ((IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) && |
109 | intel_dp->dpcd[DP_DPCD_REV] >= 0x12) | 109 | intel_dp->dpcd[DP_DPCD_REV] >= 0x12) |
110 | max_link_bw = DP_LINK_BW_5_4; | 110 | max_link_bw = DP_LINK_BW_5_4; |
111 | else | 111 | else |
112 | max_link_bw = DP_LINK_BW_2_7; | 112 | max_link_bw = DP_LINK_BW_2_7; |
113 | break; | 113 | break; |
114 | default: | 114 | default: |
115 | WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n", | 115 | WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n", |
116 | max_link_bw); | 116 | max_link_bw); |
117 | max_link_bw = DP_LINK_BW_1_62; | 117 | max_link_bw = DP_LINK_BW_1_62; |
118 | break; | 118 | break; |
119 | } | 119 | } |
120 | return max_link_bw; | 120 | return max_link_bw; |
121 | } | 121 | } |
122 | 122 | ||
123 | /* | 123 | /* |
124 | * The units on the numbers in the next two are... bizarre. Examples will | 124 | * The units on the numbers in the next two are... bizarre. Examples will |
125 | * make it clearer; this one parallels an example in the eDP spec. | 125 | * make it clearer; this one parallels an example in the eDP spec. |
126 | * | 126 | * |
127 | * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as: | 127 | * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as: |
128 | * | 128 | * |
129 | * 270000 * 1 * 8 / 10 == 216000 | 129 | * 270000 * 1 * 8 / 10 == 216000 |
130 | * | 130 | * |
131 | * The actual data capacity of that configuration is 2.16Gbit/s, so the | 131 | * The actual data capacity of that configuration is 2.16Gbit/s, so the |
132 | * units are decakilobits. ->clock in a drm_display_mode is in kilohertz - | 132 | * units are decakilobits. ->clock in a drm_display_mode is in kilohertz - |
133 | * or equivalently, kilopixels per second - so for 1680x1050R it'd be | 133 | * or equivalently, kilopixels per second - so for 1680x1050R it'd be |
134 | * 119000. At 18bpp that's 2142000 kilobits per second. | 134 | * 119000. At 18bpp that's 2142000 kilobits per second. |
135 | * | 135 | * |
136 | * Thus the strange-looking division by 10 in intel_dp_link_required, to | 136 | * Thus the strange-looking division by 10 in intel_dp_link_required, to |
137 | * get the result in decakilobits instead of kilobits. | 137 | * get the result in decakilobits instead of kilobits. |
138 | */ | 138 | */ |
139 | 139 | ||
140 | static int | 140 | static int |
141 | intel_dp_link_required(int pixel_clock, int bpp) | 141 | intel_dp_link_required(int pixel_clock, int bpp) |
142 | { | 142 | { |
143 | return (pixel_clock * bpp + 9) / 10; | 143 | return (pixel_clock * bpp + 9) / 10; |
144 | } | 144 | } |
145 | 145 | ||
146 | static int | 146 | static int |
147 | intel_dp_max_data_rate(int max_link_clock, int max_lanes) | 147 | intel_dp_max_data_rate(int max_link_clock, int max_lanes) |
148 | { | 148 | { |
149 | return (max_link_clock * max_lanes * 8) / 10; | 149 | return (max_link_clock * max_lanes * 8) / 10; |
150 | } | 150 | } |
151 | 151 | ||
152 | static enum drm_mode_status | 152 | static enum drm_mode_status |
153 | intel_dp_mode_valid(struct drm_connector *connector, | 153 | intel_dp_mode_valid(struct drm_connector *connector, |
154 | struct drm_display_mode *mode) | 154 | struct drm_display_mode *mode) |
155 | { | 155 | { |
156 | struct intel_dp *intel_dp = intel_attached_dp(connector); | 156 | struct intel_dp *intel_dp = intel_attached_dp(connector); |
157 | struct intel_connector *intel_connector = to_intel_connector(connector); | 157 | struct intel_connector *intel_connector = to_intel_connector(connector); |
158 | struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; | 158 | struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; |
159 | int target_clock = mode->clock; | 159 | int target_clock = mode->clock; |
160 | int max_rate, mode_rate, max_lanes, max_link_clock; | 160 | int max_rate, mode_rate, max_lanes, max_link_clock; |
161 | 161 | ||
162 | if (is_edp(intel_dp) && fixed_mode) { | 162 | if (is_edp(intel_dp) && fixed_mode) { |
163 | if (mode->hdisplay > fixed_mode->hdisplay) | 163 | if (mode->hdisplay > fixed_mode->hdisplay) |
164 | return MODE_PANEL; | 164 | return MODE_PANEL; |
165 | 165 | ||
166 | if (mode->vdisplay > fixed_mode->vdisplay) | 166 | if (mode->vdisplay > fixed_mode->vdisplay) |
167 | return MODE_PANEL; | 167 | return MODE_PANEL; |
168 | 168 | ||
169 | target_clock = fixed_mode->clock; | 169 | target_clock = fixed_mode->clock; |
170 | } | 170 | } |
171 | 171 | ||
172 | max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp)); | 172 | max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp)); |
173 | max_lanes = drm_dp_max_lane_count(intel_dp->dpcd); | 173 | max_lanes = drm_dp_max_lane_count(intel_dp->dpcd); |
174 | 174 | ||
175 | max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); | 175 | max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); |
176 | mode_rate = intel_dp_link_required(target_clock, 18); | 176 | mode_rate = intel_dp_link_required(target_clock, 18); |
177 | 177 | ||
178 | if (mode_rate > max_rate) | 178 | if (mode_rate > max_rate) |
179 | return MODE_CLOCK_HIGH; | 179 | return MODE_CLOCK_HIGH; |
180 | 180 | ||
181 | if (mode->clock < 10000) | 181 | if (mode->clock < 10000) |
182 | return MODE_CLOCK_LOW; | 182 | return MODE_CLOCK_LOW; |
183 | 183 | ||
184 | if (mode->flags & DRM_MODE_FLAG_DBLCLK) | 184 | if (mode->flags & DRM_MODE_FLAG_DBLCLK) |
185 | return MODE_H_ILLEGAL; | 185 | return MODE_H_ILLEGAL; |
186 | 186 | ||
187 | return MODE_OK; | 187 | return MODE_OK; |
188 | } | 188 | } |
189 | 189 | ||
190 | static uint32_t | 190 | static uint32_t |
191 | pack_aux(uint8_t *src, int src_bytes) | 191 | pack_aux(uint8_t *src, int src_bytes) |
192 | { | 192 | { |
193 | int i; | 193 | int i; |
194 | uint32_t v = 0; | 194 | uint32_t v = 0; |
195 | 195 | ||
196 | if (src_bytes > 4) | 196 | if (src_bytes > 4) |
197 | src_bytes = 4; | 197 | src_bytes = 4; |
198 | for (i = 0; i < src_bytes; i++) | 198 | for (i = 0; i < src_bytes; i++) |
199 | v |= ((uint32_t) src[i]) << ((3-i) * 8); | 199 | v |= ((uint32_t) src[i]) << ((3-i) * 8); |
200 | return v; | 200 | return v; |
201 | } | 201 | } |
202 | 202 | ||
203 | static void | 203 | static void |
204 | unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) | 204 | unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) |
205 | { | 205 | { |
206 | int i; | 206 | int i; |
207 | if (dst_bytes > 4) | 207 | if (dst_bytes > 4) |
208 | dst_bytes = 4; | 208 | dst_bytes = 4; |
209 | for (i = 0; i < dst_bytes; i++) | 209 | for (i = 0; i < dst_bytes; i++) |
210 | dst[i] = src >> ((3-i) * 8); | 210 | dst[i] = src >> ((3-i) * 8); |
211 | } | 211 | } |
212 | 212 | ||
213 | /* hrawclock is 1/4 the FSB frequency */ | 213 | /* hrawclock is 1/4 the FSB frequency */ |
214 | static int | 214 | static int |
215 | intel_hrawclk(struct drm_device *dev) | 215 | intel_hrawclk(struct drm_device *dev) |
216 | { | 216 | { |
217 | struct drm_i915_private *dev_priv = dev->dev_private; | 217 | struct drm_i915_private *dev_priv = dev->dev_private; |
218 | uint32_t clkcfg; | 218 | uint32_t clkcfg; |
219 | 219 | ||
220 | /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */ | 220 | /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */ |
221 | if (IS_VALLEYVIEW(dev)) | 221 | if (IS_VALLEYVIEW(dev)) |
222 | return 200; | 222 | return 200; |
223 | 223 | ||
224 | clkcfg = I915_READ(CLKCFG); | 224 | clkcfg = I915_READ(CLKCFG); |
225 | switch (clkcfg & CLKCFG_FSB_MASK) { | 225 | switch (clkcfg & CLKCFG_FSB_MASK) { |
226 | case CLKCFG_FSB_400: | 226 | case CLKCFG_FSB_400: |
227 | return 100; | 227 | return 100; |
228 | case CLKCFG_FSB_533: | 228 | case CLKCFG_FSB_533: |
229 | return 133; | 229 | return 133; |
230 | case CLKCFG_FSB_667: | 230 | case CLKCFG_FSB_667: |
231 | return 166; | 231 | return 166; |
232 | case CLKCFG_FSB_800: | 232 | case CLKCFG_FSB_800: |
233 | return 200; | 233 | return 200; |
234 | case CLKCFG_FSB_1067: | 234 | case CLKCFG_FSB_1067: |
235 | return 266; | 235 | return 266; |
236 | case CLKCFG_FSB_1333: | 236 | case CLKCFG_FSB_1333: |
237 | return 333; | 237 | return 333; |
238 | /* these two are just a guess; one of them might be right */ | 238 | /* these two are just a guess; one of them might be right */ |
239 | case CLKCFG_FSB_1600: | 239 | case CLKCFG_FSB_1600: |
240 | case CLKCFG_FSB_1600_ALT: | 240 | case CLKCFG_FSB_1600_ALT: |
241 | return 400; | 241 | return 400; |
242 | default: | 242 | default: |
243 | return 133; | 243 | return 133; |
244 | } | 244 | } |
245 | } | 245 | } |
246 | 246 | ||
247 | static void | 247 | static void |
248 | intel_dp_init_panel_power_sequencer(struct drm_device *dev, | 248 | intel_dp_init_panel_power_sequencer(struct drm_device *dev, |
249 | struct intel_dp *intel_dp, | 249 | struct intel_dp *intel_dp, |
250 | struct edp_power_seq *out); | 250 | struct edp_power_seq *out); |
251 | static void | 251 | static void |
252 | intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, | 252 | intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, |
253 | struct intel_dp *intel_dp, | 253 | struct intel_dp *intel_dp, |
254 | struct edp_power_seq *out); | 254 | struct edp_power_seq *out); |
255 | 255 | ||
256 | static enum pipe | 256 | static enum pipe |
257 | vlv_power_sequencer_pipe(struct intel_dp *intel_dp) | 257 | vlv_power_sequencer_pipe(struct intel_dp *intel_dp) |
258 | { | 258 | { |
259 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | 259 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
260 | struct drm_crtc *crtc = intel_dig_port->base.base.crtc; | 260 | struct drm_crtc *crtc = intel_dig_port->base.base.crtc; |
261 | struct drm_device *dev = intel_dig_port->base.base.dev; | 261 | struct drm_device *dev = intel_dig_port->base.base.dev; |
262 | struct drm_i915_private *dev_priv = dev->dev_private; | 262 | struct drm_i915_private *dev_priv = dev->dev_private; |
263 | enum port port = intel_dig_port->port; | 263 | enum port port = intel_dig_port->port; |
264 | enum pipe pipe; | 264 | enum pipe pipe; |
265 | 265 | ||
266 | /* modeset should have pipe */ | 266 | /* modeset should have pipe */ |
267 | if (crtc) | 267 | if (crtc) |
268 | return to_intel_crtc(crtc)->pipe; | 268 | return to_intel_crtc(crtc)->pipe; |
269 | 269 | ||
270 | /* init time, try to find a pipe with this port selected */ | 270 | /* init time, try to find a pipe with this port selected */ |
271 | for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) { | 271 | for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) { |
272 | u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) & | 272 | u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) & |
273 | PANEL_PORT_SELECT_MASK; | 273 | PANEL_PORT_SELECT_MASK; |
274 | if (port_sel == PANEL_PORT_SELECT_DPB_VLV && port == PORT_B) | 274 | if (port_sel == PANEL_PORT_SELECT_DPB_VLV && port == PORT_B) |
275 | return pipe; | 275 | return pipe; |
276 | if (port_sel == PANEL_PORT_SELECT_DPC_VLV && port == PORT_C) | 276 | if (port_sel == PANEL_PORT_SELECT_DPC_VLV && port == PORT_C) |
277 | return pipe; | 277 | return pipe; |
278 | } | 278 | } |
279 | 279 | ||
280 | /* shrug */ | 280 | /* shrug */ |
281 | return PIPE_A; | 281 | return PIPE_A; |
282 | } | 282 | } |
283 | 283 | ||
284 | static u32 _pp_ctrl_reg(struct intel_dp *intel_dp) | 284 | static u32 _pp_ctrl_reg(struct intel_dp *intel_dp) |
285 | { | 285 | { |
286 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 286 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
287 | 287 | ||
288 | if (HAS_PCH_SPLIT(dev)) | 288 | if (HAS_PCH_SPLIT(dev)) |
289 | return PCH_PP_CONTROL; | 289 | return PCH_PP_CONTROL; |
290 | else | 290 | else |
291 | return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp)); | 291 | return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp)); |
292 | } | 292 | } |
293 | 293 | ||
294 | static u32 _pp_stat_reg(struct intel_dp *intel_dp) | 294 | static u32 _pp_stat_reg(struct intel_dp *intel_dp) |
295 | { | 295 | { |
296 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 296 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
297 | 297 | ||
298 | if (HAS_PCH_SPLIT(dev)) | 298 | if (HAS_PCH_SPLIT(dev)) |
299 | return PCH_PP_STATUS; | 299 | return PCH_PP_STATUS; |
300 | else | 300 | else |
301 | return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp)); | 301 | return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp)); |
302 | } | 302 | } |
303 | 303 | ||
304 | static bool edp_have_panel_power(struct intel_dp *intel_dp) | 304 | static bool edp_have_panel_power(struct intel_dp *intel_dp) |
305 | { | 305 | { |
306 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 306 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
307 | struct drm_i915_private *dev_priv = dev->dev_private; | 307 | struct drm_i915_private *dev_priv = dev->dev_private; |
308 | 308 | ||
309 | return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0; | 309 | return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0; |
310 | } | 310 | } |
311 | 311 | ||
312 | static bool edp_have_panel_vdd(struct intel_dp *intel_dp) | 312 | static bool edp_have_panel_vdd(struct intel_dp *intel_dp) |
313 | { | 313 | { |
314 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 314 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
315 | struct drm_i915_private *dev_priv = dev->dev_private; | 315 | struct drm_i915_private *dev_priv = dev->dev_private; |
316 | 316 | ||
317 | return !dev_priv->pm.suspended && | 317 | return !dev_priv->pm.suspended && |
318 | (I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0; | 318 | (I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0; |
319 | } | 319 | } |
320 | 320 | ||
321 | static void | 321 | static void |
322 | intel_dp_check_edp(struct intel_dp *intel_dp) | 322 | intel_dp_check_edp(struct intel_dp *intel_dp) |
323 | { | 323 | { |
324 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 324 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
325 | struct drm_i915_private *dev_priv = dev->dev_private; | 325 | struct drm_i915_private *dev_priv = dev->dev_private; |
326 | 326 | ||
327 | if (!is_edp(intel_dp)) | 327 | if (!is_edp(intel_dp)) |
328 | return; | 328 | return; |
329 | 329 | ||
330 | if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) { | 330 | if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) { |
331 | WARN(1, "eDP powered off while attempting aux channel communication.\n"); | 331 | WARN(1, "eDP powered off while attempting aux channel communication.\n"); |
332 | DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n", | 332 | DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n", |
333 | I915_READ(_pp_stat_reg(intel_dp)), | 333 | I915_READ(_pp_stat_reg(intel_dp)), |
334 | I915_READ(_pp_ctrl_reg(intel_dp))); | 334 | I915_READ(_pp_ctrl_reg(intel_dp))); |
335 | } | 335 | } |
336 | } | 336 | } |
337 | 337 | ||
338 | static uint32_t | 338 | static uint32_t |
339 | intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq) | 339 | intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq) |
340 | { | 340 | { |
341 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | 341 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
342 | struct drm_device *dev = intel_dig_port->base.base.dev; | 342 | struct drm_device *dev = intel_dig_port->base.base.dev; |
343 | struct drm_i915_private *dev_priv = dev->dev_private; | 343 | struct drm_i915_private *dev_priv = dev->dev_private; |
344 | uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg; | 344 | uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg; |
345 | uint32_t status; | 345 | uint32_t status; |
346 | bool done; | 346 | bool done; |
347 | 347 | ||
348 | #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) | 348 | #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) |
349 | if (has_aux_irq) | 349 | if (has_aux_irq) |
350 | done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, | 350 | done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, |
351 | msecs_to_jiffies_timeout(10)); | 351 | msecs_to_jiffies_timeout(10)); |
352 | else | 352 | else |
353 | done = wait_for_atomic(C, 10) == 0; | 353 | done = wait_for_atomic(C, 10) == 0; |
354 | if (!done) | 354 | if (!done) |
355 | DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n", | 355 | DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n", |
356 | has_aux_irq); | 356 | has_aux_irq); |
357 | #undef C | 357 | #undef C |
358 | 358 | ||
359 | return status; | 359 | return status; |
360 | } | 360 | } |
361 | 361 | ||
362 | static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index) | 362 | static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index) |
363 | { | 363 | { |
364 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | 364 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
365 | struct drm_device *dev = intel_dig_port->base.base.dev; | 365 | struct drm_device *dev = intel_dig_port->base.base.dev; |
366 | 366 | ||
367 | /* | 367 | /* |
368 | * The clock divider is based off the hrawclk, and would like to run at | 368 | * The clock divider is based off the hrawclk, and would like to run at |
369 | * 2MHz. So, take the hrawclk value and divide by 2 and use that | 369 | * 2MHz. So, take the hrawclk value and divide by 2 and use that |
370 | */ | 370 | */ |
371 | return index ? 0 : intel_hrawclk(dev) / 2; | 371 | return index ? 0 : intel_hrawclk(dev) / 2; |
372 | } | 372 | } |
373 | 373 | ||
374 | static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) | 374 | static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) |
375 | { | 375 | { |
376 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | 376 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
377 | struct drm_device *dev = intel_dig_port->base.base.dev; | 377 | struct drm_device *dev = intel_dig_port->base.base.dev; |
378 | 378 | ||
379 | if (index) | 379 | if (index) |
380 | return 0; | 380 | return 0; |
381 | 381 | ||
382 | if (intel_dig_port->port == PORT_A) { | 382 | if (intel_dig_port->port == PORT_A) { |
383 | if (IS_GEN6(dev) || IS_GEN7(dev)) | 383 | if (IS_GEN6(dev) || IS_GEN7(dev)) |
384 | return 200; /* SNB & IVB eDP input clock at 400Mhz */ | 384 | return 200; /* SNB & IVB eDP input clock at 400Mhz */ |
385 | else | 385 | else |
386 | return 225; /* eDP input clock at 450Mhz */ | 386 | return 225; /* eDP input clock at 450Mhz */ |
387 | } else { | 387 | } else { |
388 | return DIV_ROUND_UP(intel_pch_rawclk(dev), 2); | 388 | return DIV_ROUND_UP(intel_pch_rawclk(dev), 2); |
389 | } | 389 | } |
390 | } | 390 | } |
391 | 391 | ||
392 | static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) | 392 | static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) |
393 | { | 393 | { |
394 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | 394 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
395 | struct drm_device *dev = intel_dig_port->base.base.dev; | 395 | struct drm_device *dev = intel_dig_port->base.base.dev; |
396 | struct drm_i915_private *dev_priv = dev->dev_private; | 396 | struct drm_i915_private *dev_priv = dev->dev_private; |
397 | 397 | ||
398 | if (intel_dig_port->port == PORT_A) { | 398 | if (intel_dig_port->port == PORT_A) { |
399 | if (index) | 399 | if (index) |
400 | return 0; | 400 | return 0; |
401 | return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000); | 401 | return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000); |
402 | } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { | 402 | } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { |
403 | /* Workaround for non-ULT HSW */ | 403 | /* Workaround for non-ULT HSW */ |
404 | switch (index) { | 404 | switch (index) { |
405 | case 0: return 63; | 405 | case 0: return 63; |
406 | case 1: return 72; | 406 | case 1: return 72; |
407 | default: return 0; | 407 | default: return 0; |
408 | } | 408 | } |
409 | } else { | 409 | } else { |
410 | return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2); | 410 | return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2); |
411 | } | 411 | } |
412 | } | 412 | } |
413 | 413 | ||
414 | static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index) | 414 | static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index) |
415 | { | 415 | { |
416 | return index ? 0 : 100; | 416 | return index ? 0 : 100; |
417 | } | 417 | } |
418 | 418 | ||
419 | static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp, | 419 | static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp, |
420 | bool has_aux_irq, | 420 | bool has_aux_irq, |
421 | int send_bytes, | 421 | int send_bytes, |
422 | uint32_t aux_clock_divider) | 422 | uint32_t aux_clock_divider) |
423 | { | 423 | { |
424 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | 424 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
425 | struct drm_device *dev = intel_dig_port->base.base.dev; | 425 | struct drm_device *dev = intel_dig_port->base.base.dev; |
426 | uint32_t precharge, timeout; | 426 | uint32_t precharge, timeout; |
427 | 427 | ||
428 | if (IS_GEN6(dev)) | 428 | if (IS_GEN6(dev)) |
429 | precharge = 3; | 429 | precharge = 3; |
430 | else | 430 | else |
431 | precharge = 5; | 431 | precharge = 5; |
432 | 432 | ||
433 | if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL) | 433 | if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL) |
434 | timeout = DP_AUX_CH_CTL_TIME_OUT_600us; | 434 | timeout = DP_AUX_CH_CTL_TIME_OUT_600us; |
435 | else | 435 | else |
436 | timeout = DP_AUX_CH_CTL_TIME_OUT_400us; | 436 | timeout = DP_AUX_CH_CTL_TIME_OUT_400us; |
437 | 437 | ||
438 | return DP_AUX_CH_CTL_SEND_BUSY | | 438 | return DP_AUX_CH_CTL_SEND_BUSY | |
439 | DP_AUX_CH_CTL_DONE | | 439 | DP_AUX_CH_CTL_DONE | |
440 | (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) | | 440 | (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) | |
441 | DP_AUX_CH_CTL_TIME_OUT_ERROR | | 441 | DP_AUX_CH_CTL_TIME_OUT_ERROR | |
442 | timeout | | 442 | timeout | |
443 | DP_AUX_CH_CTL_RECEIVE_ERROR | | 443 | DP_AUX_CH_CTL_RECEIVE_ERROR | |
444 | (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | | 444 | (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | |
445 | (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | | 445 | (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | |
446 | (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT); | 446 | (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT); |
447 | } | 447 | } |
448 | 448 | ||
449 | static int | 449 | static int |
450 | intel_dp_aux_ch(struct intel_dp *intel_dp, | 450 | intel_dp_aux_ch(struct intel_dp *intel_dp, |
451 | uint8_t *send, int send_bytes, | 451 | uint8_t *send, int send_bytes, |
452 | uint8_t *recv, int recv_size) | 452 | uint8_t *recv, int recv_size) |
453 | { | 453 | { |
454 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | 454 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
455 | struct drm_device *dev = intel_dig_port->base.base.dev; | 455 | struct drm_device *dev = intel_dig_port->base.base.dev; |
456 | struct drm_i915_private *dev_priv = dev->dev_private; | 456 | struct drm_i915_private *dev_priv = dev->dev_private; |
457 | uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg; | 457 | uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg; |
458 | uint32_t ch_data = ch_ctl + 4; | 458 | uint32_t ch_data = ch_ctl + 4; |
459 | uint32_t aux_clock_divider; | 459 | uint32_t aux_clock_divider; |
460 | int i, ret, recv_bytes; | 460 | int i, ret, recv_bytes; |
461 | uint32_t status; | 461 | uint32_t status; |
462 | int try, clock = 0; | 462 | int try, clock = 0; |
463 | bool has_aux_irq = HAS_AUX_IRQ(dev); | 463 | bool has_aux_irq = HAS_AUX_IRQ(dev); |
464 | bool vdd; | 464 | bool vdd; |
465 | 465 | ||
466 | vdd = _edp_panel_vdd_on(intel_dp); | 466 | vdd = _edp_panel_vdd_on(intel_dp); |
467 | 467 | ||
468 | /* dp aux is extremely sensitive to irq latency, hence request the | 468 | /* dp aux is extremely sensitive to irq latency, hence request the |
469 | * lowest possible wakeup latency and so prevent the cpu from going into | 469 | * lowest possible wakeup latency and so prevent the cpu from going into |
470 | * deep sleep states. | 470 | * deep sleep states. |
471 | */ | 471 | */ |
472 | pm_qos_update_request(&dev_priv->pm_qos, 0); | 472 | pm_qos_update_request(&dev_priv->pm_qos, 0); |
473 | 473 | ||
474 | intel_dp_check_edp(intel_dp); | 474 | intel_dp_check_edp(intel_dp); |
475 | 475 | ||
476 | intel_aux_display_runtime_get(dev_priv); | 476 | intel_aux_display_runtime_get(dev_priv); |
477 | 477 | ||
478 | /* Try to wait for any previous AUX channel activity */ | 478 | /* Try to wait for any previous AUX channel activity */ |
479 | for (try = 0; try < 3; try++) { | 479 | for (try = 0; try < 3; try++) { |
480 | status = I915_READ_NOTRACE(ch_ctl); | 480 | status = I915_READ_NOTRACE(ch_ctl); |
481 | if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) | 481 | if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) |
482 | break; | 482 | break; |
483 | msleep(1); | 483 | msleep(1); |
484 | } | 484 | } |
485 | 485 | ||
486 | if (try == 3) { | 486 | if (try == 3) { |
487 | WARN(1, "dp_aux_ch not started status 0x%08x\n", | 487 | WARN(1, "dp_aux_ch not started status 0x%08x\n", |
488 | I915_READ(ch_ctl)); | 488 | I915_READ(ch_ctl)); |
489 | ret = -EBUSY; | 489 | ret = -EBUSY; |
490 | goto out; | 490 | goto out; |
491 | } | 491 | } |
492 | 492 | ||
493 | /* Only 5 data registers! */ | 493 | /* Only 5 data registers! */ |
494 | if (WARN_ON(send_bytes > 20 || recv_size > 20)) { | 494 | if (WARN_ON(send_bytes > 20 || recv_size > 20)) { |
495 | ret = -E2BIG; | 495 | ret = -E2BIG; |
496 | goto out; | 496 | goto out; |
497 | } | 497 | } |
498 | 498 | ||
499 | while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) { | 499 | while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) { |
500 | u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp, | 500 | u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp, |
501 | has_aux_irq, | 501 | has_aux_irq, |
502 | send_bytes, | 502 | send_bytes, |
503 | aux_clock_divider); | 503 | aux_clock_divider); |
504 | 504 | ||
505 | /* Must try at least 3 times according to DP spec */ | 505 | /* Must try at least 3 times according to DP spec */ |
506 | for (try = 0; try < 5; try++) { | 506 | for (try = 0; try < 5; try++) { |
507 | /* Load the send data into the aux channel data registers */ | 507 | /* Load the send data into the aux channel data registers */ |
508 | for (i = 0; i < send_bytes; i += 4) | 508 | for (i = 0; i < send_bytes; i += 4) |
509 | I915_WRITE(ch_data + i, | 509 | I915_WRITE(ch_data + i, |
510 | pack_aux(send + i, send_bytes - i)); | 510 | pack_aux(send + i, send_bytes - i)); |
511 | 511 | ||
512 | /* Send the command and wait for it to complete */ | 512 | /* Send the command and wait for it to complete */ |
513 | I915_WRITE(ch_ctl, send_ctl); | 513 | I915_WRITE(ch_ctl, send_ctl); |
514 | 514 | ||
515 | status = intel_dp_aux_wait_done(intel_dp, has_aux_irq); | 515 | status = intel_dp_aux_wait_done(intel_dp, has_aux_irq); |
516 | 516 | ||
517 | /* Clear done status and any errors */ | 517 | /* Clear done status and any errors */ |
518 | I915_WRITE(ch_ctl, | 518 | I915_WRITE(ch_ctl, |
519 | status | | 519 | status | |
520 | DP_AUX_CH_CTL_DONE | | 520 | DP_AUX_CH_CTL_DONE | |
521 | DP_AUX_CH_CTL_TIME_OUT_ERROR | | 521 | DP_AUX_CH_CTL_TIME_OUT_ERROR | |
522 | DP_AUX_CH_CTL_RECEIVE_ERROR); | 522 | DP_AUX_CH_CTL_RECEIVE_ERROR); |
523 | 523 | ||
524 | if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR | | 524 | if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR | |
525 | DP_AUX_CH_CTL_RECEIVE_ERROR)) | 525 | DP_AUX_CH_CTL_RECEIVE_ERROR)) |
526 | continue; | 526 | continue; |
527 | if (status & DP_AUX_CH_CTL_DONE) | 527 | if (status & DP_AUX_CH_CTL_DONE) |
528 | break; | 528 | break; |
529 | } | 529 | } |
530 | if (status & DP_AUX_CH_CTL_DONE) | 530 | if (status & DP_AUX_CH_CTL_DONE) |
531 | break; | 531 | break; |
532 | } | 532 | } |
533 | 533 | ||
534 | if ((status & DP_AUX_CH_CTL_DONE) == 0) { | 534 | if ((status & DP_AUX_CH_CTL_DONE) == 0) { |
535 | DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status); | 535 | DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status); |
536 | ret = -EBUSY; | 536 | ret = -EBUSY; |
537 | goto out; | 537 | goto out; |
538 | } | 538 | } |
539 | 539 | ||
540 | /* Check for timeout or receive error. | 540 | /* Check for timeout or receive error. |
541 | * Timeouts occur when the sink is not connected | 541 | * Timeouts occur when the sink is not connected |
542 | */ | 542 | */ |
543 | if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { | 543 | if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { |
544 | DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status); | 544 | DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status); |
545 | ret = -EIO; | 545 | ret = -EIO; |
546 | goto out; | 546 | goto out; |
547 | } | 547 | } |
548 | 548 | ||
549 | /* Timeouts occur when the device isn't connected, so they're | 549 | /* Timeouts occur when the device isn't connected, so they're |
550 | * "normal" -- don't fill the kernel log with these */ | 550 | * "normal" -- don't fill the kernel log with these */ |
551 | if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { | 551 | if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { |
552 | DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status); | 552 | DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status); |
553 | ret = -ETIMEDOUT; | 553 | ret = -ETIMEDOUT; |
554 | goto out; | 554 | goto out; |
555 | } | 555 | } |
556 | 556 | ||
557 | /* Unload any bytes sent back from the other side */ | 557 | /* Unload any bytes sent back from the other side */ |
558 | recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> | 558 | recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> |
559 | DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); | 559 | DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); |
560 | if (recv_bytes > recv_size) | 560 | if (recv_bytes > recv_size) |
561 | recv_bytes = recv_size; | 561 | recv_bytes = recv_size; |
562 | 562 | ||
563 | for (i = 0; i < recv_bytes; i += 4) | 563 | for (i = 0; i < recv_bytes; i += 4) |
564 | unpack_aux(I915_READ(ch_data + i), | 564 | unpack_aux(I915_READ(ch_data + i), |
565 | recv + i, recv_bytes - i); | 565 | recv + i, recv_bytes - i); |
566 | 566 | ||
567 | ret = recv_bytes; | 567 | ret = recv_bytes; |
568 | out: | 568 | out: |
569 | pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE); | 569 | pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE); |
570 | intel_aux_display_runtime_put(dev_priv); | 570 | intel_aux_display_runtime_put(dev_priv); |
571 | 571 | ||
572 | if (vdd) | 572 | if (vdd) |
573 | edp_panel_vdd_off(intel_dp, false); | 573 | edp_panel_vdd_off(intel_dp, false); |
574 | 574 | ||
575 | return ret; | 575 | return ret; |
576 | } | 576 | } |
577 | 577 | ||
578 | #define BARE_ADDRESS_SIZE 3 | 578 | #define BARE_ADDRESS_SIZE 3 |
579 | #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1) | 579 | #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1) |
580 | static ssize_t | 580 | static ssize_t |
581 | intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) | 581 | intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) |
582 | { | 582 | { |
583 | struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux); | 583 | struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux); |
584 | uint8_t txbuf[20], rxbuf[20]; | 584 | uint8_t txbuf[20], rxbuf[20]; |
585 | size_t txsize, rxsize; | 585 | size_t txsize, rxsize; |
586 | int ret; | 586 | int ret; |
587 | 587 | ||
588 | txbuf[0] = msg->request << 4; | 588 | txbuf[0] = msg->request << 4; |
589 | txbuf[1] = msg->address >> 8; | 589 | txbuf[1] = msg->address >> 8; |
590 | txbuf[2] = msg->address & 0xff; | 590 | txbuf[2] = msg->address & 0xff; |
591 | txbuf[3] = msg->size - 1; | 591 | txbuf[3] = msg->size - 1; |
592 | 592 | ||
593 | switch (msg->request & ~DP_AUX_I2C_MOT) { | 593 | switch (msg->request & ~DP_AUX_I2C_MOT) { |
594 | case DP_AUX_NATIVE_WRITE: | 594 | case DP_AUX_NATIVE_WRITE: |
595 | case DP_AUX_I2C_WRITE: | 595 | case DP_AUX_I2C_WRITE: |
596 | txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE; | 596 | txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE; |
597 | rxsize = 1; | 597 | rxsize = 1; |
598 | 598 | ||
599 | if (WARN_ON(txsize > 20)) | 599 | if (WARN_ON(txsize > 20)) |
600 | return -E2BIG; | 600 | return -E2BIG; |
601 | 601 | ||
602 | memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size); | 602 | memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size); |
603 | 603 | ||
604 | ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize); | 604 | ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize); |
605 | if (ret > 0) { | 605 | if (ret > 0) { |
606 | msg->reply = rxbuf[0] >> 4; | 606 | msg->reply = rxbuf[0] >> 4; |
607 | 607 | ||
608 | /* Return payload size. */ | 608 | /* Return payload size. */ |
609 | ret = msg->size; | 609 | ret = msg->size; |
610 | } | 610 | } |
611 | break; | 611 | break; |
612 | 612 | ||
613 | case DP_AUX_NATIVE_READ: | 613 | case DP_AUX_NATIVE_READ: |
614 | case DP_AUX_I2C_READ: | 614 | case DP_AUX_I2C_READ: |
615 | txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE; | 615 | txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE; |
616 | rxsize = msg->size + 1; | 616 | rxsize = msg->size + 1; |
617 | 617 | ||
618 | if (WARN_ON(rxsize > 20)) | 618 | if (WARN_ON(rxsize > 20)) |
619 | return -E2BIG; | 619 | return -E2BIG; |
620 | 620 | ||
621 | ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize); | 621 | ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize); |
622 | if (ret > 0) { | 622 | if (ret > 0) { |
623 | msg->reply = rxbuf[0] >> 4; | 623 | msg->reply = rxbuf[0] >> 4; |
624 | /* | 624 | /* |
625 | * Assume happy day, and copy the data. The caller is | 625 | * Assume happy day, and copy the data. The caller is |
626 | * expected to check msg->reply before touching it. | 626 | * expected to check msg->reply before touching it. |
627 | * | 627 | * |
628 | * Return payload size. | 628 | * Return payload size. |
629 | */ | 629 | */ |
630 | ret--; | 630 | ret--; |
631 | memcpy(msg->buffer, rxbuf + 1, ret); | 631 | memcpy(msg->buffer, rxbuf + 1, ret); |
632 | } | 632 | } |
633 | break; | 633 | break; |
634 | 634 | ||
635 | default: | 635 | default: |
636 | ret = -EINVAL; | 636 | ret = -EINVAL; |
637 | break; | 637 | break; |
638 | } | 638 | } |
639 | 639 | ||
640 | return ret; | 640 | return ret; |
641 | } | 641 | } |
642 | 642 | ||
643 | static void | 643 | static void |
644 | intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector) | 644 | intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector) |
645 | { | 645 | { |
646 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 646 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
647 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | 647 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
648 | enum port port = intel_dig_port->port; | 648 | enum port port = intel_dig_port->port; |
649 | const char *name = NULL; | 649 | const char *name = NULL; |
650 | int ret; | 650 | int ret; |
651 | 651 | ||
652 | switch (port) { | 652 | switch (port) { |
653 | case PORT_A: | 653 | case PORT_A: |
654 | intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL; | 654 | intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL; |
655 | name = "DPDDC-A"; | 655 | name = "DPDDC-A"; |
656 | break; | 656 | break; |
657 | case PORT_B: | 657 | case PORT_B: |
658 | intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL; | 658 | intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL; |
659 | name = "DPDDC-B"; | 659 | name = "DPDDC-B"; |
660 | break; | 660 | break; |
661 | case PORT_C: | 661 | case PORT_C: |
662 | intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL; | 662 | intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL; |
663 | name = "DPDDC-C"; | 663 | name = "DPDDC-C"; |
664 | break; | 664 | break; |
665 | case PORT_D: | 665 | case PORT_D: |
666 | intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL; | 666 | intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL; |
667 | name = "DPDDC-D"; | 667 | name = "DPDDC-D"; |
668 | break; | 668 | break; |
669 | default: | 669 | default: |
670 | BUG(); | 670 | BUG(); |
671 | } | 671 | } |
672 | 672 | ||
673 | if (!HAS_DDI(dev)) | 673 | if (!HAS_DDI(dev)) |
674 | intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10; | 674 | intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10; |
675 | 675 | ||
676 | intel_dp->aux.name = name; | 676 | intel_dp->aux.name = name; |
677 | intel_dp->aux.dev = dev->dev; | 677 | intel_dp->aux.dev = dev->dev; |
678 | intel_dp->aux.transfer = intel_dp_aux_transfer; | 678 | intel_dp->aux.transfer = intel_dp_aux_transfer; |
679 | 679 | ||
680 | DRM_DEBUG_KMS("registering %s bus for %s\n", name, | 680 | DRM_DEBUG_KMS("registering %s bus for %s\n", name, |
681 | connector->base.kdev->kobj.name); | 681 | connector->base.kdev->kobj.name); |
682 | 682 | ||
683 | ret = drm_dp_aux_register_i2c_bus(&intel_dp->aux); | 683 | ret = drm_dp_aux_register_i2c_bus(&intel_dp->aux); |
684 | if (ret < 0) { | 684 | if (ret < 0) { |
685 | DRM_ERROR("drm_dp_aux_register_i2c_bus() for %s failed (%d)\n", | 685 | DRM_ERROR("drm_dp_aux_register_i2c_bus() for %s failed (%d)\n", |
686 | name, ret); | 686 | name, ret); |
687 | return; | 687 | return; |
688 | } | 688 | } |
689 | 689 | ||
690 | ret = sysfs_create_link(&connector->base.kdev->kobj, | 690 | ret = sysfs_create_link(&connector->base.kdev->kobj, |
691 | &intel_dp->aux.ddc.dev.kobj, | 691 | &intel_dp->aux.ddc.dev.kobj, |
692 | intel_dp->aux.ddc.dev.kobj.name); | 692 | intel_dp->aux.ddc.dev.kobj.name); |
693 | if (ret < 0) { | 693 | if (ret < 0) { |
694 | DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret); | 694 | DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret); |
695 | drm_dp_aux_unregister_i2c_bus(&intel_dp->aux); | 695 | drm_dp_aux_unregister_i2c_bus(&intel_dp->aux); |
696 | } | 696 | } |
697 | } | 697 | } |
698 | 698 | ||
699 | static void | 699 | static void |
700 | intel_dp_connector_unregister(struct intel_connector *intel_connector) | 700 | intel_dp_connector_unregister(struct intel_connector *intel_connector) |
701 | { | 701 | { |
702 | struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base); | 702 | struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base); |
703 | 703 | ||
704 | sysfs_remove_link(&intel_connector->base.kdev->kobj, | 704 | sysfs_remove_link(&intel_connector->base.kdev->kobj, |
705 | intel_dp->aux.ddc.dev.kobj.name); | 705 | intel_dp->aux.ddc.dev.kobj.name); |
706 | intel_connector_unregister(intel_connector); | 706 | intel_connector_unregister(intel_connector); |
707 | } | 707 | } |
708 | 708 | ||
709 | static void | 709 | static void |
710 | intel_dp_set_clock(struct intel_encoder *encoder, | 710 | intel_dp_set_clock(struct intel_encoder *encoder, |
711 | struct intel_crtc_config *pipe_config, int link_bw) | 711 | struct intel_crtc_config *pipe_config, int link_bw) |
712 | { | 712 | { |
713 | struct drm_device *dev = encoder->base.dev; | 713 | struct drm_device *dev = encoder->base.dev; |
714 | const struct dp_link_dpll *divisor = NULL; | 714 | const struct dp_link_dpll *divisor = NULL; |
715 | int i, count = 0; | 715 | int i, count = 0; |
716 | 716 | ||
717 | if (IS_G4X(dev)) { | 717 | if (IS_G4X(dev)) { |
718 | divisor = gen4_dpll; | 718 | divisor = gen4_dpll; |
719 | count = ARRAY_SIZE(gen4_dpll); | 719 | count = ARRAY_SIZE(gen4_dpll); |
720 | } else if (IS_HASWELL(dev)) { | 720 | } else if (IS_HASWELL(dev)) { |
721 | /* Haswell has special-purpose DP DDI clocks. */ | 721 | /* Haswell has special-purpose DP DDI clocks. */ |
722 | } else if (HAS_PCH_SPLIT(dev)) { | 722 | } else if (HAS_PCH_SPLIT(dev)) { |
723 | divisor = pch_dpll; | 723 | divisor = pch_dpll; |
724 | count = ARRAY_SIZE(pch_dpll); | 724 | count = ARRAY_SIZE(pch_dpll); |
725 | } else if (IS_VALLEYVIEW(dev)) { | 725 | } else if (IS_VALLEYVIEW(dev)) { |
726 | divisor = vlv_dpll; | 726 | divisor = vlv_dpll; |
727 | count = ARRAY_SIZE(vlv_dpll); | 727 | count = ARRAY_SIZE(vlv_dpll); |
728 | } | 728 | } |
729 | 729 | ||
730 | if (divisor && count) { | 730 | if (divisor && count) { |
731 | for (i = 0; i < count; i++) { | 731 | for (i = 0; i < count; i++) { |
732 | if (link_bw == divisor[i].link_bw) { | 732 | if (link_bw == divisor[i].link_bw) { |
733 | pipe_config->dpll = divisor[i].dpll; | 733 | pipe_config->dpll = divisor[i].dpll; |
734 | pipe_config->clock_set = true; | 734 | pipe_config->clock_set = true; |
735 | break; | 735 | break; |
736 | } | 736 | } |
737 | } | 737 | } |
738 | } | 738 | } |
739 | } | 739 | } |
740 | 740 | ||
741 | bool | 741 | bool |
742 | intel_dp_compute_config(struct intel_encoder *encoder, | 742 | intel_dp_compute_config(struct intel_encoder *encoder, |
743 | struct intel_crtc_config *pipe_config) | 743 | struct intel_crtc_config *pipe_config) |
744 | { | 744 | { |
745 | struct drm_device *dev = encoder->base.dev; | 745 | struct drm_device *dev = encoder->base.dev; |
746 | struct drm_i915_private *dev_priv = dev->dev_private; | 746 | struct drm_i915_private *dev_priv = dev->dev_private; |
747 | struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; | 747 | struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; |
748 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); | 748 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); |
749 | enum port port = dp_to_dig_port(intel_dp)->port; | 749 | enum port port = dp_to_dig_port(intel_dp)->port; |
750 | struct intel_crtc *intel_crtc = encoder->new_crtc; | 750 | struct intel_crtc *intel_crtc = encoder->new_crtc; |
751 | struct intel_connector *intel_connector = intel_dp->attached_connector; | 751 | struct intel_connector *intel_connector = intel_dp->attached_connector; |
752 | int lane_count, clock; | 752 | int lane_count, clock; |
753 | int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); | 753 | int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); |
754 | /* Conveniently, the link BW constants become indices with a shift...*/ | 754 | /* Conveniently, the link BW constants become indices with a shift...*/ |
755 | int max_clock = intel_dp_max_link_bw(intel_dp) >> 3; | 755 | int max_clock = intel_dp_max_link_bw(intel_dp) >> 3; |
756 | int bpp, mode_rate; | 756 | int bpp, mode_rate; |
757 | static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 }; | 757 | static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 }; |
758 | int link_avail, link_clock; | 758 | int link_avail, link_clock; |
759 | 759 | ||
760 | if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A) | 760 | if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A) |
761 | pipe_config->has_pch_encoder = true; | 761 | pipe_config->has_pch_encoder = true; |
762 | 762 | ||
763 | pipe_config->has_dp_encoder = true; | 763 | pipe_config->has_dp_encoder = true; |
764 | 764 | ||
765 | if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { | 765 | if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { |
766 | intel_fixed_panel_mode(intel_connector->panel.fixed_mode, | 766 | intel_fixed_panel_mode(intel_connector->panel.fixed_mode, |
767 | adjusted_mode); | 767 | adjusted_mode); |
768 | if (!HAS_PCH_SPLIT(dev)) | 768 | if (!HAS_PCH_SPLIT(dev)) |
769 | intel_gmch_panel_fitting(intel_crtc, pipe_config, | 769 | intel_gmch_panel_fitting(intel_crtc, pipe_config, |
770 | intel_connector->panel.fitting_mode); | 770 | intel_connector->panel.fitting_mode); |
771 | else | 771 | else |
772 | intel_pch_panel_fitting(intel_crtc, pipe_config, | 772 | intel_pch_panel_fitting(intel_crtc, pipe_config, |
773 | intel_connector->panel.fitting_mode); | 773 | intel_connector->panel.fitting_mode); |
774 | } | 774 | } |
775 | 775 | ||
776 | if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) | 776 | if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) |
777 | return false; | 777 | return false; |
778 | 778 | ||
779 | DRM_DEBUG_KMS("DP link computation with max lane count %i " | 779 | DRM_DEBUG_KMS("DP link computation with max lane count %i " |
780 | "max bw %02x pixel clock %iKHz\n", | 780 | "max bw %02x pixel clock %iKHz\n", |
781 | max_lane_count, bws[max_clock], | 781 | max_lane_count, bws[max_clock], |
782 | adjusted_mode->crtc_clock); | 782 | adjusted_mode->crtc_clock); |
783 | 783 | ||
784 | /* Walk through all bpp values. Luckily they're all nicely spaced with 2 | 784 | /* Walk through all bpp values. Luckily they're all nicely spaced with 2 |
785 | * bpc in between. */ | 785 | * bpc in between. */ |
786 | bpp = pipe_config->pipe_bpp; | 786 | bpp = pipe_config->pipe_bpp; |
787 | if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp && | 787 | if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp && |
788 | dev_priv->vbt.edp_bpp < bpp) { | 788 | dev_priv->vbt.edp_bpp < bpp) { |
789 | DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n", | 789 | DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n", |
790 | dev_priv->vbt.edp_bpp); | 790 | dev_priv->vbt.edp_bpp); |
791 | bpp = dev_priv->vbt.edp_bpp; | 791 | bpp = dev_priv->vbt.edp_bpp; |
792 | } | 792 | } |
793 | 793 | ||
794 | for (; bpp >= 6*3; bpp -= 2*3) { | 794 | for (; bpp >= 6*3; bpp -= 2*3) { |
795 | mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, | 795 | mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, |
796 | bpp); | 796 | bpp); |
797 | 797 | ||
798 | for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { | 798 | for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { |
799 | for (clock = 0; clock <= max_clock; clock++) { | 799 | for (clock = 0; clock <= max_clock; clock++) { |
800 | link_clock = drm_dp_bw_code_to_link_rate(bws[clock]); | 800 | link_clock = drm_dp_bw_code_to_link_rate(bws[clock]); |
801 | link_avail = intel_dp_max_data_rate(link_clock, | 801 | link_avail = intel_dp_max_data_rate(link_clock, |
802 | lane_count); | 802 | lane_count); |
803 | 803 | ||
804 | if (mode_rate <= link_avail) { | 804 | if (mode_rate <= link_avail) { |
805 | goto found; | 805 | goto found; |
806 | } | 806 | } |
807 | } | 807 | } |
808 | } | 808 | } |
809 | } | 809 | } |
810 | 810 | ||
811 | return false; | 811 | return false; |
812 | 812 | ||
813 | found: | 813 | found: |
814 | if (intel_dp->color_range_auto) { | 814 | if (intel_dp->color_range_auto) { |
815 | /* | 815 | /* |
816 | * See: | 816 | * See: |
817 | * CEA-861-E - 5.1 Default Encoding Parameters | 817 | * CEA-861-E - 5.1 Default Encoding Parameters |
818 | * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry | 818 | * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry |
819 | */ | 819 | */ |
820 | if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1) | 820 | if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1) |
821 | intel_dp->color_range = DP_COLOR_RANGE_16_235; | 821 | intel_dp->color_range = DP_COLOR_RANGE_16_235; |
822 | else | 822 | else |
823 | intel_dp->color_range = 0; | 823 | intel_dp->color_range = 0; |
824 | } | 824 | } |
825 | 825 | ||
826 | if (intel_dp->color_range) | 826 | if (intel_dp->color_range) |
827 | pipe_config->limited_color_range = true; | 827 | pipe_config->limited_color_range = true; |
828 | 828 | ||
829 | intel_dp->link_bw = bws[clock]; | 829 | intel_dp->link_bw = bws[clock]; |
830 | intel_dp->lane_count = lane_count; | 830 | intel_dp->lane_count = lane_count; |
831 | pipe_config->pipe_bpp = bpp; | 831 | pipe_config->pipe_bpp = bpp; |
832 | pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); | 832 | pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); |
833 | 833 | ||
834 | DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n", | 834 | DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n", |
835 | intel_dp->link_bw, intel_dp->lane_count, | 835 | intel_dp->link_bw, intel_dp->lane_count, |
836 | pipe_config->port_clock, bpp); | 836 | pipe_config->port_clock, bpp); |
837 | DRM_DEBUG_KMS("DP link bw required %i available %i\n", | 837 | DRM_DEBUG_KMS("DP link bw required %i available %i\n", |
838 | mode_rate, link_avail); | 838 | mode_rate, link_avail); |
839 | 839 | ||
840 | intel_link_compute_m_n(bpp, lane_count, | 840 | intel_link_compute_m_n(bpp, lane_count, |
841 | adjusted_mode->crtc_clock, | 841 | adjusted_mode->crtc_clock, |
842 | pipe_config->port_clock, | 842 | pipe_config->port_clock, |
843 | &pipe_config->dp_m_n); | 843 | &pipe_config->dp_m_n); |
844 | 844 | ||
845 | intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw); | 845 | intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw); |
846 | 846 | ||
847 | return true; | 847 | return true; |
848 | } | 848 | } |
849 | 849 | ||
850 | static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp) | 850 | static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp) |
851 | { | 851 | { |
852 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | 852 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
853 | struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); | 853 | struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); |
854 | struct drm_device *dev = crtc->base.dev; | 854 | struct drm_device *dev = crtc->base.dev; |
855 | struct drm_i915_private *dev_priv = dev->dev_private; | 855 | struct drm_i915_private *dev_priv = dev->dev_private; |
856 | u32 dpa_ctl; | 856 | u32 dpa_ctl; |
857 | 857 | ||
858 | DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", crtc->config.port_clock); | 858 | DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", crtc->config.port_clock); |
859 | dpa_ctl = I915_READ(DP_A); | 859 | dpa_ctl = I915_READ(DP_A); |
860 | dpa_ctl &= ~DP_PLL_FREQ_MASK; | 860 | dpa_ctl &= ~DP_PLL_FREQ_MASK; |
861 | 861 | ||
862 | if (crtc->config.port_clock == 162000) { | 862 | if (crtc->config.port_clock == 162000) { |
863 | /* For a long time we've carried around a ILK-DevA w/a for the | 863 | /* For a long time we've carried around a ILK-DevA w/a for the |
864 | * 160MHz clock. If we're really unlucky, it's still required. | 864 | * 160MHz clock. If we're really unlucky, it's still required. |
865 | */ | 865 | */ |
866 | DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n"); | 866 | DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n"); |
867 | dpa_ctl |= DP_PLL_FREQ_160MHZ; | 867 | dpa_ctl |= DP_PLL_FREQ_160MHZ; |
868 | intel_dp->DP |= DP_PLL_FREQ_160MHZ; | 868 | intel_dp->DP |= DP_PLL_FREQ_160MHZ; |
869 | } else { | 869 | } else { |
870 | dpa_ctl |= DP_PLL_FREQ_270MHZ; | 870 | dpa_ctl |= DP_PLL_FREQ_270MHZ; |
871 | intel_dp->DP |= DP_PLL_FREQ_270MHZ; | 871 | intel_dp->DP |= DP_PLL_FREQ_270MHZ; |
872 | } | 872 | } |
873 | 873 | ||
874 | I915_WRITE(DP_A, dpa_ctl); | 874 | I915_WRITE(DP_A, dpa_ctl); |
875 | 875 | ||
876 | POSTING_READ(DP_A); | 876 | POSTING_READ(DP_A); |
877 | udelay(500); | 877 | udelay(500); |
878 | } | 878 | } |
879 | 879 | ||
880 | static void intel_dp_mode_set(struct intel_encoder *encoder) | 880 | static void intel_dp_mode_set(struct intel_encoder *encoder) |
881 | { | 881 | { |
882 | struct drm_device *dev = encoder->base.dev; | 882 | struct drm_device *dev = encoder->base.dev; |
883 | struct drm_i915_private *dev_priv = dev->dev_private; | 883 | struct drm_i915_private *dev_priv = dev->dev_private; |
884 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); | 884 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); |
885 | enum port port = dp_to_dig_port(intel_dp)->port; | 885 | enum port port = dp_to_dig_port(intel_dp)->port; |
886 | struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); | 886 | struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); |
887 | struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode; | 887 | struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode; |
888 | 888 | ||
889 | /* | 889 | /* |
890 | * There are four kinds of DP registers: | 890 | * There are four kinds of DP registers: |
891 | * | 891 | * |
892 | * IBX PCH | 892 | * IBX PCH |
893 | * SNB CPU | 893 | * SNB CPU |
894 | * IVB CPU | 894 | * IVB CPU |
895 | * CPT PCH | 895 | * CPT PCH |
896 | * | 896 | * |
897 | * IBX PCH and CPU are the same for almost everything, | 897 | * IBX PCH and CPU are the same for almost everything, |
898 | * except that the CPU DP PLL is configured in this | 898 | * except that the CPU DP PLL is configured in this |
899 | * register | 899 | * register |
900 | * | 900 | * |
901 | * CPT PCH is quite different, having many bits moved | 901 | * CPT PCH is quite different, having many bits moved |
902 | * to the TRANS_DP_CTL register instead. That | 902 | * to the TRANS_DP_CTL register instead. That |
903 | * configuration happens (oddly) in ironlake_pch_enable | 903 | * configuration happens (oddly) in ironlake_pch_enable |
904 | */ | 904 | */ |
905 | 905 | ||
906 | /* Preserve the BIOS-computed detected bit. This is | 906 | /* Preserve the BIOS-computed detected bit. This is |
907 | * supposed to be read-only. | 907 | * supposed to be read-only. |
908 | */ | 908 | */ |
909 | intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED; | 909 | intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED; |
910 | 910 | ||
911 | /* Handle DP bits in common between all three register formats */ | 911 | /* Handle DP bits in common between all three register formats */ |
912 | intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; | 912 | intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; |
913 | intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count); | 913 | intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count); |
914 | 914 | ||
915 | if (intel_dp->has_audio) { | 915 | if (intel_dp->has_audio) { |
916 | DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n", | 916 | DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n", |
917 | pipe_name(crtc->pipe)); | 917 | pipe_name(crtc->pipe)); |
918 | intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; | 918 | intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; |
919 | intel_write_eld(&encoder->base, adjusted_mode); | 919 | intel_write_eld(&encoder->base, adjusted_mode); |
920 | } | 920 | } |
921 | 921 | ||
922 | /* Split out the IBX/CPU vs CPT settings */ | 922 | /* Split out the IBX/CPU vs CPT settings */ |
923 | 923 | ||
924 | if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { | 924 | if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { |
925 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) | 925 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) |
926 | intel_dp->DP |= DP_SYNC_HS_HIGH; | 926 | intel_dp->DP |= DP_SYNC_HS_HIGH; |
927 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) | 927 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) |
928 | intel_dp->DP |= DP_SYNC_VS_HIGH; | 928 | intel_dp->DP |= DP_SYNC_VS_HIGH; |
929 | intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; | 929 | intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; |
930 | 930 | ||
931 | if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) | 931 | if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) |
932 | intel_dp->DP |= DP_ENHANCED_FRAMING; | 932 | intel_dp->DP |= DP_ENHANCED_FRAMING; |
933 | 933 | ||
934 | intel_dp->DP |= crtc->pipe << 29; | 934 | intel_dp->DP |= crtc->pipe << 29; |
935 | } else if (!HAS_PCH_CPT(dev) || port == PORT_A) { | 935 | } else if (!HAS_PCH_CPT(dev) || port == PORT_A) { |
936 | if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev)) | 936 | if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev)) |
937 | intel_dp->DP |= intel_dp->color_range; | 937 | intel_dp->DP |= intel_dp->color_range; |
938 | 938 | ||
939 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) | 939 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) |
940 | intel_dp->DP |= DP_SYNC_HS_HIGH; | 940 | intel_dp->DP |= DP_SYNC_HS_HIGH; |
941 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) | 941 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) |
942 | intel_dp->DP |= DP_SYNC_VS_HIGH; | 942 | intel_dp->DP |= DP_SYNC_VS_HIGH; |
943 | intel_dp->DP |= DP_LINK_TRAIN_OFF; | 943 | intel_dp->DP |= DP_LINK_TRAIN_OFF; |
944 | 944 | ||
945 | if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) | 945 | if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) |
946 | intel_dp->DP |= DP_ENHANCED_FRAMING; | 946 | intel_dp->DP |= DP_ENHANCED_FRAMING; |
947 | 947 | ||
948 | if (crtc->pipe == 1) | 948 | if (crtc->pipe == 1) |
949 | intel_dp->DP |= DP_PIPEB_SELECT; | 949 | intel_dp->DP |= DP_PIPEB_SELECT; |
950 | } else { | 950 | } else { |
951 | intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; | 951 | intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; |
952 | } | 952 | } |
953 | 953 | ||
954 | if (port == PORT_A && !IS_VALLEYVIEW(dev)) | 954 | if (port == PORT_A && !IS_VALLEYVIEW(dev)) |
955 | ironlake_set_pll_cpu_edp(intel_dp); | 955 | ironlake_set_pll_cpu_edp(intel_dp); |
956 | } | 956 | } |
957 | 957 | ||
958 | #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) | 958 | #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) |
959 | #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) | 959 | #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) |
960 | 960 | ||
961 | #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0) | 961 | #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0) |
962 | #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0) | 962 | #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0) |
963 | 963 | ||
964 | #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) | 964 | #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) |
965 | #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) | 965 | #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) |
966 | 966 | ||
967 | static void wait_panel_status(struct intel_dp *intel_dp, | 967 | static void wait_panel_status(struct intel_dp *intel_dp, |
968 | u32 mask, | 968 | u32 mask, |
969 | u32 value) | 969 | u32 value) |
970 | { | 970 | { |
971 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 971 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
972 | struct drm_i915_private *dev_priv = dev->dev_private; | 972 | struct drm_i915_private *dev_priv = dev->dev_private; |
973 | u32 pp_stat_reg, pp_ctrl_reg; | 973 | u32 pp_stat_reg, pp_ctrl_reg; |
974 | 974 | ||
975 | pp_stat_reg = _pp_stat_reg(intel_dp); | 975 | pp_stat_reg = _pp_stat_reg(intel_dp); |
976 | pp_ctrl_reg = _pp_ctrl_reg(intel_dp); | 976 | pp_ctrl_reg = _pp_ctrl_reg(intel_dp); |
977 | 977 | ||
978 | DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n", | 978 | DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n", |
979 | mask, value, | 979 | mask, value, |
980 | I915_READ(pp_stat_reg), | 980 | I915_READ(pp_stat_reg), |
981 | I915_READ(pp_ctrl_reg)); | 981 | I915_READ(pp_ctrl_reg)); |
982 | 982 | ||
983 | if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) { | 983 | if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) { |
984 | DRM_ERROR("Panel status timeout: status %08x control %08x\n", | 984 | DRM_ERROR("Panel status timeout: status %08x control %08x\n", |
985 | I915_READ(pp_stat_reg), | 985 | I915_READ(pp_stat_reg), |
986 | I915_READ(pp_ctrl_reg)); | 986 | I915_READ(pp_ctrl_reg)); |
987 | } | 987 | } |
988 | 988 | ||
989 | DRM_DEBUG_KMS("Wait complete\n"); | 989 | DRM_DEBUG_KMS("Wait complete\n"); |
990 | } | 990 | } |
991 | 991 | ||
992 | static void wait_panel_on(struct intel_dp *intel_dp) | 992 | static void wait_panel_on(struct intel_dp *intel_dp) |
993 | { | 993 | { |
994 | DRM_DEBUG_KMS("Wait for panel power on\n"); | 994 | DRM_DEBUG_KMS("Wait for panel power on\n"); |
995 | wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); | 995 | wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); |
996 | } | 996 | } |
997 | 997 | ||
998 | static void wait_panel_off(struct intel_dp *intel_dp) | 998 | static void wait_panel_off(struct intel_dp *intel_dp) |
999 | { | 999 | { |
1000 | DRM_DEBUG_KMS("Wait for panel power off time\n"); | 1000 | DRM_DEBUG_KMS("Wait for panel power off time\n"); |
1001 | wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); | 1001 | wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); |
1002 | } | 1002 | } |
1003 | 1003 | ||
1004 | static void wait_panel_power_cycle(struct intel_dp *intel_dp) | 1004 | static void wait_panel_power_cycle(struct intel_dp *intel_dp) |
1005 | { | 1005 | { |
1006 | DRM_DEBUG_KMS("Wait for panel power cycle\n"); | 1006 | DRM_DEBUG_KMS("Wait for panel power cycle\n"); |
1007 | 1007 | ||
1008 | /* When we disable the VDD override bit last we have to do the manual | 1008 | /* When we disable the VDD override bit last we have to do the manual |
1009 | * wait. */ | 1009 | * wait. */ |
1010 | wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle, | 1010 | wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle, |
1011 | intel_dp->panel_power_cycle_delay); | 1011 | intel_dp->panel_power_cycle_delay); |
1012 | 1012 | ||
1013 | wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); | 1013 | wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); |
1014 | } | 1014 | } |
1015 | 1015 | ||
1016 | static void wait_backlight_on(struct intel_dp *intel_dp) | 1016 | static void wait_backlight_on(struct intel_dp *intel_dp) |
1017 | { | 1017 | { |
1018 | wait_remaining_ms_from_jiffies(intel_dp->last_power_on, | 1018 | wait_remaining_ms_from_jiffies(intel_dp->last_power_on, |
1019 | intel_dp->backlight_on_delay); | 1019 | intel_dp->backlight_on_delay); |
1020 | } | 1020 | } |
1021 | 1021 | ||
1022 | static void edp_wait_backlight_off(struct intel_dp *intel_dp) | 1022 | static void edp_wait_backlight_off(struct intel_dp *intel_dp) |
1023 | { | 1023 | { |
1024 | wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off, | 1024 | wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off, |
1025 | intel_dp->backlight_off_delay); | 1025 | intel_dp->backlight_off_delay); |
1026 | } | 1026 | } |
1027 | 1027 | ||
1028 | /* Read the current pp_control value, unlocking the register if it | 1028 | /* Read the current pp_control value, unlocking the register if it |
1029 | * is locked | 1029 | * is locked |
1030 | */ | 1030 | */ |
1031 | 1031 | ||
1032 | static u32 ironlake_get_pp_control(struct intel_dp *intel_dp) | 1032 | static u32 ironlake_get_pp_control(struct intel_dp *intel_dp) |
1033 | { | 1033 | { |
1034 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 1034 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1035 | struct drm_i915_private *dev_priv = dev->dev_private; | 1035 | struct drm_i915_private *dev_priv = dev->dev_private; |
1036 | u32 control; | 1036 | u32 control; |
1037 | 1037 | ||
1038 | control = I915_READ(_pp_ctrl_reg(intel_dp)); | 1038 | control = I915_READ(_pp_ctrl_reg(intel_dp)); |
1039 | control &= ~PANEL_UNLOCK_MASK; | 1039 | control &= ~PANEL_UNLOCK_MASK; |
1040 | control |= PANEL_UNLOCK_REGS; | 1040 | control |= PANEL_UNLOCK_REGS; |
1041 | return control; | 1041 | return control; |
1042 | } | 1042 | } |
1043 | 1043 | ||
1044 | static bool _edp_panel_vdd_on(struct intel_dp *intel_dp) | 1044 | static bool _edp_panel_vdd_on(struct intel_dp *intel_dp) |
1045 | { | 1045 | { |
1046 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 1046 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1047 | struct drm_i915_private *dev_priv = dev->dev_private; | 1047 | struct drm_i915_private *dev_priv = dev->dev_private; |
1048 | u32 pp; | 1048 | u32 pp; |
1049 | u32 pp_stat_reg, pp_ctrl_reg; | 1049 | u32 pp_stat_reg, pp_ctrl_reg; |
1050 | bool need_to_disable = !intel_dp->want_panel_vdd; | 1050 | bool need_to_disable = !intel_dp->want_panel_vdd; |
1051 | 1051 | ||
1052 | if (!is_edp(intel_dp)) | 1052 | if (!is_edp(intel_dp)) |
1053 | return false; | 1053 | return false; |
1054 | 1054 | ||
1055 | intel_dp->want_panel_vdd = true; | 1055 | intel_dp->want_panel_vdd = true; |
1056 | 1056 | ||
1057 | if (edp_have_panel_vdd(intel_dp)) | 1057 | if (edp_have_panel_vdd(intel_dp)) |
1058 | return need_to_disable; | 1058 | return need_to_disable; |
1059 | 1059 | ||
1060 | intel_runtime_pm_get(dev_priv); | 1060 | intel_runtime_pm_get(dev_priv); |
1061 | 1061 | ||
1062 | DRM_DEBUG_KMS("Turning eDP VDD on\n"); | 1062 | DRM_DEBUG_KMS("Turning eDP VDD on\n"); |
1063 | 1063 | ||
1064 | if (!edp_have_panel_power(intel_dp)) | 1064 | if (!edp_have_panel_power(intel_dp)) |
1065 | wait_panel_power_cycle(intel_dp); | 1065 | wait_panel_power_cycle(intel_dp); |
1066 | 1066 | ||
1067 | pp = ironlake_get_pp_control(intel_dp); | 1067 | pp = ironlake_get_pp_control(intel_dp); |
1068 | pp |= EDP_FORCE_VDD; | 1068 | pp |= EDP_FORCE_VDD; |
1069 | 1069 | ||
1070 | pp_stat_reg = _pp_stat_reg(intel_dp); | 1070 | pp_stat_reg = _pp_stat_reg(intel_dp); |
1071 | pp_ctrl_reg = _pp_ctrl_reg(intel_dp); | 1071 | pp_ctrl_reg = _pp_ctrl_reg(intel_dp); |
1072 | 1072 | ||
1073 | I915_WRITE(pp_ctrl_reg, pp); | 1073 | I915_WRITE(pp_ctrl_reg, pp); |
1074 | POSTING_READ(pp_ctrl_reg); | 1074 | POSTING_READ(pp_ctrl_reg); |
1075 | DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", | 1075 | DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", |
1076 | I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); | 1076 | I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); |
1077 | /* | 1077 | /* |
1078 | * If the panel wasn't on, delay before accessing aux channel | 1078 | * If the panel wasn't on, delay before accessing aux channel |
1079 | */ | 1079 | */ |
1080 | if (!edp_have_panel_power(intel_dp)) { | 1080 | if (!edp_have_panel_power(intel_dp)) { |
1081 | DRM_DEBUG_KMS("eDP was not running\n"); | 1081 | DRM_DEBUG_KMS("eDP was not running\n"); |
1082 | msleep(intel_dp->panel_power_up_delay); | 1082 | msleep(intel_dp->panel_power_up_delay); |
1083 | } | 1083 | } |
1084 | 1084 | ||
1085 | return need_to_disable; | 1085 | return need_to_disable; |
1086 | } | 1086 | } |
1087 | 1087 | ||
1088 | void intel_edp_panel_vdd_on(struct intel_dp *intel_dp) | 1088 | void intel_edp_panel_vdd_on(struct intel_dp *intel_dp) |
1089 | { | 1089 | { |
1090 | if (is_edp(intel_dp)) { | 1090 | if (is_edp(intel_dp)) { |
1091 | bool vdd = _edp_panel_vdd_on(intel_dp); | 1091 | bool vdd = _edp_panel_vdd_on(intel_dp); |
1092 | 1092 | ||
1093 | WARN(!vdd, "eDP VDD already requested on\n"); | 1093 | WARN(!vdd, "eDP VDD already requested on\n"); |
1094 | } | 1094 | } |
1095 | } | 1095 | } |
1096 | 1096 | ||
1097 | static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) | 1097 | static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) |
1098 | { | 1098 | { |
1099 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 1099 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1100 | struct drm_i915_private *dev_priv = dev->dev_private; | 1100 | struct drm_i915_private *dev_priv = dev->dev_private; |
1101 | u32 pp; | 1101 | u32 pp; |
1102 | u32 pp_stat_reg, pp_ctrl_reg; | 1102 | u32 pp_stat_reg, pp_ctrl_reg; |
1103 | 1103 | ||
1104 | WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); | 1104 | WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); |
1105 | 1105 | ||
1106 | if (!intel_dp->want_panel_vdd && edp_have_panel_vdd(intel_dp)) { | 1106 | if (!intel_dp->want_panel_vdd && edp_have_panel_vdd(intel_dp)) { |
1107 | DRM_DEBUG_KMS("Turning eDP VDD off\n"); | 1107 | DRM_DEBUG_KMS("Turning eDP VDD off\n"); |
1108 | 1108 | ||
1109 | pp = ironlake_get_pp_control(intel_dp); | 1109 | pp = ironlake_get_pp_control(intel_dp); |
1110 | pp &= ~EDP_FORCE_VDD; | 1110 | pp &= ~EDP_FORCE_VDD; |
1111 | 1111 | ||
1112 | pp_ctrl_reg = _pp_ctrl_reg(intel_dp); | 1112 | pp_ctrl_reg = _pp_ctrl_reg(intel_dp); |
1113 | pp_stat_reg = _pp_stat_reg(intel_dp); | 1113 | pp_stat_reg = _pp_stat_reg(intel_dp); |
1114 | 1114 | ||
1115 | I915_WRITE(pp_ctrl_reg, pp); | 1115 | I915_WRITE(pp_ctrl_reg, pp); |
1116 | POSTING_READ(pp_ctrl_reg); | 1116 | POSTING_READ(pp_ctrl_reg); |
1117 | 1117 | ||
1118 | /* Make sure sequencer is idle before allowing subsequent activity */ | 1118 | /* Make sure sequencer is idle before allowing subsequent activity */ |
1119 | DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", | 1119 | DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", |
1120 | I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); | 1120 | I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); |
1121 | 1121 | ||
1122 | if ((pp & POWER_TARGET_ON) == 0) | 1122 | if ((pp & POWER_TARGET_ON) == 0) |
1123 | intel_dp->last_power_cycle = jiffies; | 1123 | intel_dp->last_power_cycle = jiffies; |
1124 | 1124 | ||
1125 | intel_runtime_pm_put(dev_priv); | 1125 | intel_runtime_pm_put(dev_priv); |
1126 | } | 1126 | } |
1127 | } | 1127 | } |
1128 | 1128 | ||
1129 | static void edp_panel_vdd_work(struct work_struct *__work) | 1129 | static void edp_panel_vdd_work(struct work_struct *__work) |
1130 | { | 1130 | { |
1131 | struct intel_dp *intel_dp = container_of(to_delayed_work(__work), | 1131 | struct intel_dp *intel_dp = container_of(to_delayed_work(__work), |
1132 | struct intel_dp, panel_vdd_work); | 1132 | struct intel_dp, panel_vdd_work); |
1133 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 1133 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1134 | 1134 | ||
1135 | mutex_lock(&dev->mode_config.mutex); | 1135 | mutex_lock(&dev->mode_config.mutex); |
1136 | edp_panel_vdd_off_sync(intel_dp); | 1136 | edp_panel_vdd_off_sync(intel_dp); |
1137 | mutex_unlock(&dev->mode_config.mutex); | 1137 | mutex_unlock(&dev->mode_config.mutex); |
1138 | } | 1138 | } |
1139 | 1139 | ||
1140 | static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) | 1140 | static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) |
1141 | { | 1141 | { |
1142 | if (!is_edp(intel_dp)) | 1142 | if (!is_edp(intel_dp)) |
1143 | return; | 1143 | return; |
1144 | 1144 | ||
1145 | WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on"); | 1145 | WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on"); |
1146 | 1146 | ||
1147 | intel_dp->want_panel_vdd = false; | 1147 | intel_dp->want_panel_vdd = false; |
1148 | 1148 | ||
1149 | if (sync) { | 1149 | if (sync) { |
1150 | edp_panel_vdd_off_sync(intel_dp); | 1150 | edp_panel_vdd_off_sync(intel_dp); |
1151 | } else { | 1151 | } else { |
1152 | /* | 1152 | /* |
1153 | * Queue the timer to fire a long | 1153 | * Queue the timer to fire a long |
1154 | * time from now (relative to the power down delay) | 1154 | * time from now (relative to the power down delay) |
1155 | * to keep the panel power up across a sequence of operations | 1155 | * to keep the panel power up across a sequence of operations |
1156 | */ | 1156 | */ |
1157 | schedule_delayed_work(&intel_dp->panel_vdd_work, | 1157 | schedule_delayed_work(&intel_dp->panel_vdd_work, |
1158 | msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5)); | 1158 | msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5)); |
1159 | } | 1159 | } |
1160 | } | 1160 | } |
1161 | 1161 | ||
1162 | void intel_edp_panel_on(struct intel_dp *intel_dp) | 1162 | void intel_edp_panel_on(struct intel_dp *intel_dp) |
1163 | { | 1163 | { |
1164 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 1164 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1165 | struct drm_i915_private *dev_priv = dev->dev_private; | 1165 | struct drm_i915_private *dev_priv = dev->dev_private; |
1166 | u32 pp; | 1166 | u32 pp; |
1167 | u32 pp_ctrl_reg; | 1167 | u32 pp_ctrl_reg; |
1168 | 1168 | ||
1169 | if (!is_edp(intel_dp)) | 1169 | if (!is_edp(intel_dp)) |
1170 | return; | 1170 | return; |
1171 | 1171 | ||
1172 | DRM_DEBUG_KMS("Turn eDP power on\n"); | 1172 | DRM_DEBUG_KMS("Turn eDP power on\n"); |
1173 | 1173 | ||
1174 | if (edp_have_panel_power(intel_dp)) { | 1174 | if (edp_have_panel_power(intel_dp)) { |
1175 | DRM_DEBUG_KMS("eDP power already on\n"); | 1175 | DRM_DEBUG_KMS("eDP power already on\n"); |
1176 | return; | 1176 | return; |
1177 | } | 1177 | } |
1178 | 1178 | ||
1179 | wait_panel_power_cycle(intel_dp); | 1179 | wait_panel_power_cycle(intel_dp); |
1180 | 1180 | ||
1181 | pp_ctrl_reg = _pp_ctrl_reg(intel_dp); | 1181 | pp_ctrl_reg = _pp_ctrl_reg(intel_dp); |
1182 | pp = ironlake_get_pp_control(intel_dp); | 1182 | pp = ironlake_get_pp_control(intel_dp); |
1183 | if (IS_GEN5(dev)) { | 1183 | if (IS_GEN5(dev)) { |
1184 | /* ILK workaround: disable reset around power sequence */ | 1184 | /* ILK workaround: disable reset around power sequence */ |
1185 | pp &= ~PANEL_POWER_RESET; | 1185 | pp &= ~PANEL_POWER_RESET; |
1186 | I915_WRITE(pp_ctrl_reg, pp); | 1186 | I915_WRITE(pp_ctrl_reg, pp); |
1187 | POSTING_READ(pp_ctrl_reg); | 1187 | POSTING_READ(pp_ctrl_reg); |
1188 | } | 1188 | } |
1189 | 1189 | ||
1190 | pp |= POWER_TARGET_ON; | 1190 | pp |= POWER_TARGET_ON; |
1191 | if (!IS_GEN5(dev)) | 1191 | if (!IS_GEN5(dev)) |
1192 | pp |= PANEL_POWER_RESET; | 1192 | pp |= PANEL_POWER_RESET; |
1193 | 1193 | ||
1194 | I915_WRITE(pp_ctrl_reg, pp); | 1194 | I915_WRITE(pp_ctrl_reg, pp); |
1195 | POSTING_READ(pp_ctrl_reg); | 1195 | POSTING_READ(pp_ctrl_reg); |
1196 | 1196 | ||
1197 | wait_panel_on(intel_dp); | 1197 | wait_panel_on(intel_dp); |
1198 | intel_dp->last_power_on = jiffies; | 1198 | intel_dp->last_power_on = jiffies; |
1199 | 1199 | ||
1200 | if (IS_GEN5(dev)) { | 1200 | if (IS_GEN5(dev)) { |
1201 | pp |= PANEL_POWER_RESET; /* restore panel reset bit */ | 1201 | pp |= PANEL_POWER_RESET; /* restore panel reset bit */ |
1202 | I915_WRITE(pp_ctrl_reg, pp); | 1202 | I915_WRITE(pp_ctrl_reg, pp); |
1203 | POSTING_READ(pp_ctrl_reg); | 1203 | POSTING_READ(pp_ctrl_reg); |
1204 | } | 1204 | } |
1205 | } | 1205 | } |
1206 | 1206 | ||
1207 | void intel_edp_panel_off(struct intel_dp *intel_dp) | 1207 | void intel_edp_panel_off(struct intel_dp *intel_dp) |
1208 | { | 1208 | { |
1209 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 1209 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1210 | struct drm_i915_private *dev_priv = dev->dev_private; | 1210 | struct drm_i915_private *dev_priv = dev->dev_private; |
1211 | u32 pp; | 1211 | u32 pp; |
1212 | u32 pp_ctrl_reg; | 1212 | u32 pp_ctrl_reg; |
1213 | 1213 | ||
1214 | if (!is_edp(intel_dp)) | 1214 | if (!is_edp(intel_dp)) |
1215 | return; | 1215 | return; |
1216 | 1216 | ||
1217 | DRM_DEBUG_KMS("Turn eDP power off\n"); | 1217 | DRM_DEBUG_KMS("Turn eDP power off\n"); |
1218 | 1218 | ||
1219 | edp_wait_backlight_off(intel_dp); | 1219 | edp_wait_backlight_off(intel_dp); |
1220 | 1220 | ||
1221 | WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n"); | 1221 | WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n"); |
1222 | 1222 | ||
1223 | pp = ironlake_get_pp_control(intel_dp); | 1223 | pp = ironlake_get_pp_control(intel_dp); |
1224 | /* We need to switch off panel power _and_ force vdd, for otherwise some | 1224 | /* We need to switch off panel power _and_ force vdd, for otherwise some |
1225 | * panels get very unhappy and cease to work. */ | 1225 | * panels get very unhappy and cease to work. */ |
1226 | pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD | | 1226 | pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD | |
1227 | EDP_BLC_ENABLE); | 1227 | EDP_BLC_ENABLE); |
1228 | 1228 | ||
1229 | pp_ctrl_reg = _pp_ctrl_reg(intel_dp); | 1229 | pp_ctrl_reg = _pp_ctrl_reg(intel_dp); |
1230 | 1230 | ||
1231 | intel_dp->want_panel_vdd = false; | 1231 | intel_dp->want_panel_vdd = false; |
1232 | 1232 | ||
1233 | I915_WRITE(pp_ctrl_reg, pp); | 1233 | I915_WRITE(pp_ctrl_reg, pp); |
1234 | POSTING_READ(pp_ctrl_reg); | 1234 | POSTING_READ(pp_ctrl_reg); |
1235 | 1235 | ||
1236 | intel_dp->last_power_cycle = jiffies; | 1236 | intel_dp->last_power_cycle = jiffies; |
1237 | wait_panel_off(intel_dp); | 1237 | wait_panel_off(intel_dp); |
1238 | 1238 | ||
1239 | /* We got a reference when we enabled the VDD. */ | 1239 | /* We got a reference when we enabled the VDD. */ |
1240 | intel_runtime_pm_put(dev_priv); | 1240 | intel_runtime_pm_put(dev_priv); |
1241 | } | 1241 | } |
1242 | 1242 | ||
1243 | void intel_edp_backlight_on(struct intel_dp *intel_dp) | 1243 | void intel_edp_backlight_on(struct intel_dp *intel_dp) |
1244 | { | 1244 | { |
1245 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | 1245 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
1246 | struct drm_device *dev = intel_dig_port->base.base.dev; | 1246 | struct drm_device *dev = intel_dig_port->base.base.dev; |
1247 | struct drm_i915_private *dev_priv = dev->dev_private; | 1247 | struct drm_i915_private *dev_priv = dev->dev_private; |
1248 | u32 pp; | 1248 | u32 pp; |
1249 | u32 pp_ctrl_reg; | 1249 | u32 pp_ctrl_reg; |
1250 | 1250 | ||
1251 | if (!is_edp(intel_dp)) | 1251 | if (!is_edp(intel_dp)) |
1252 | return; | 1252 | return; |
1253 | 1253 | ||
1254 | DRM_DEBUG_KMS("\n"); | 1254 | DRM_DEBUG_KMS("\n"); |
1255 | /* | 1255 | /* |
1256 | * If we enable the backlight right away following a panel power | 1256 | * If we enable the backlight right away following a panel power |
1257 | * on, we may see slight flicker as the panel syncs with the eDP | 1257 | * on, we may see slight flicker as the panel syncs with the eDP |
1258 | * link. So delay a bit to make sure the image is solid before | 1258 | * link. So delay a bit to make sure the image is solid before |
1259 | * allowing it to appear. | 1259 | * allowing it to appear. |
1260 | */ | 1260 | */ |
1261 | wait_backlight_on(intel_dp); | 1261 | wait_backlight_on(intel_dp); |
1262 | pp = ironlake_get_pp_control(intel_dp); | 1262 | pp = ironlake_get_pp_control(intel_dp); |
1263 | pp |= EDP_BLC_ENABLE; | 1263 | pp |= EDP_BLC_ENABLE; |
1264 | 1264 | ||
1265 | pp_ctrl_reg = _pp_ctrl_reg(intel_dp); | 1265 | pp_ctrl_reg = _pp_ctrl_reg(intel_dp); |
1266 | 1266 | ||
1267 | I915_WRITE(pp_ctrl_reg, pp); | 1267 | I915_WRITE(pp_ctrl_reg, pp); |
1268 | POSTING_READ(pp_ctrl_reg); | 1268 | POSTING_READ(pp_ctrl_reg); |
1269 | 1269 | ||
1270 | intel_panel_enable_backlight(intel_dp->attached_connector); | 1270 | intel_panel_enable_backlight(intel_dp->attached_connector); |
1271 | } | 1271 | } |
1272 | 1272 | ||
1273 | void intel_edp_backlight_off(struct intel_dp *intel_dp) | 1273 | void intel_edp_backlight_off(struct intel_dp *intel_dp) |
1274 | { | 1274 | { |
1275 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 1275 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1276 | struct drm_i915_private *dev_priv = dev->dev_private; | 1276 | struct drm_i915_private *dev_priv = dev->dev_private; |
1277 | u32 pp; | 1277 | u32 pp; |
1278 | u32 pp_ctrl_reg; | 1278 | u32 pp_ctrl_reg; |
1279 | 1279 | ||
1280 | if (!is_edp(intel_dp)) | 1280 | if (!is_edp(intel_dp)) |
1281 | return; | 1281 | return; |
1282 | 1282 | ||
1283 | intel_panel_disable_backlight(intel_dp->attached_connector); | 1283 | intel_panel_disable_backlight(intel_dp->attached_connector); |
1284 | 1284 | ||
1285 | DRM_DEBUG_KMS("\n"); | 1285 | DRM_DEBUG_KMS("\n"); |
1286 | pp = ironlake_get_pp_control(intel_dp); | 1286 | pp = ironlake_get_pp_control(intel_dp); |
1287 | pp &= ~EDP_BLC_ENABLE; | 1287 | pp &= ~EDP_BLC_ENABLE; |
1288 | 1288 | ||
1289 | pp_ctrl_reg = _pp_ctrl_reg(intel_dp); | 1289 | pp_ctrl_reg = _pp_ctrl_reg(intel_dp); |
1290 | 1290 | ||
1291 | I915_WRITE(pp_ctrl_reg, pp); | 1291 | I915_WRITE(pp_ctrl_reg, pp); |
1292 | POSTING_READ(pp_ctrl_reg); | 1292 | POSTING_READ(pp_ctrl_reg); |
1293 | intel_dp->last_backlight_off = jiffies; | 1293 | intel_dp->last_backlight_off = jiffies; |
1294 | } | 1294 | } |
1295 | 1295 | ||
1296 | static void ironlake_edp_pll_on(struct intel_dp *intel_dp) | 1296 | static void ironlake_edp_pll_on(struct intel_dp *intel_dp) |
1297 | { | 1297 | { |
1298 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | 1298 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
1299 | struct drm_crtc *crtc = intel_dig_port->base.base.crtc; | 1299 | struct drm_crtc *crtc = intel_dig_port->base.base.crtc; |
1300 | struct drm_device *dev = crtc->dev; | 1300 | struct drm_device *dev = crtc->dev; |
1301 | struct drm_i915_private *dev_priv = dev->dev_private; | 1301 | struct drm_i915_private *dev_priv = dev->dev_private; |
1302 | u32 dpa_ctl; | 1302 | u32 dpa_ctl; |
1303 | 1303 | ||
1304 | assert_pipe_disabled(dev_priv, | 1304 | assert_pipe_disabled(dev_priv, |
1305 | to_intel_crtc(crtc)->pipe); | 1305 | to_intel_crtc(crtc)->pipe); |
1306 | 1306 | ||
1307 | DRM_DEBUG_KMS("\n"); | 1307 | DRM_DEBUG_KMS("\n"); |
1308 | dpa_ctl = I915_READ(DP_A); | 1308 | dpa_ctl = I915_READ(DP_A); |
1309 | WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n"); | 1309 | WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n"); |
1310 | WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n"); | 1310 | WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n"); |
1311 | 1311 | ||
1312 | /* We don't adjust intel_dp->DP while tearing down the link, to | 1312 | /* We don't adjust intel_dp->DP while tearing down the link, to |
1313 | * facilitate link retraining (e.g. after hotplug). Hence clear all | 1313 | * facilitate link retraining (e.g. after hotplug). Hence clear all |
1314 | * enable bits here to ensure that we don't enable too much. */ | 1314 | * enable bits here to ensure that we don't enable too much. */ |
1315 | intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE); | 1315 | intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE); |
1316 | intel_dp->DP |= DP_PLL_ENABLE; | 1316 | intel_dp->DP |= DP_PLL_ENABLE; |
1317 | I915_WRITE(DP_A, intel_dp->DP); | 1317 | I915_WRITE(DP_A, intel_dp->DP); |
1318 | POSTING_READ(DP_A); | 1318 | POSTING_READ(DP_A); |
1319 | udelay(200); | 1319 | udelay(200); |
1320 | } | 1320 | } |
1321 | 1321 | ||
1322 | static void ironlake_edp_pll_off(struct intel_dp *intel_dp) | 1322 | static void ironlake_edp_pll_off(struct intel_dp *intel_dp) |
1323 | { | 1323 | { |
1324 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | 1324 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
1325 | struct drm_crtc *crtc = intel_dig_port->base.base.crtc; | 1325 | struct drm_crtc *crtc = intel_dig_port->base.base.crtc; |
1326 | struct drm_device *dev = crtc->dev; | 1326 | struct drm_device *dev = crtc->dev; |
1327 | struct drm_i915_private *dev_priv = dev->dev_private; | 1327 | struct drm_i915_private *dev_priv = dev->dev_private; |
1328 | u32 dpa_ctl; | 1328 | u32 dpa_ctl; |
1329 | 1329 | ||
1330 | assert_pipe_disabled(dev_priv, | 1330 | assert_pipe_disabled(dev_priv, |
1331 | to_intel_crtc(crtc)->pipe); | 1331 | to_intel_crtc(crtc)->pipe); |
1332 | 1332 | ||
1333 | dpa_ctl = I915_READ(DP_A); | 1333 | dpa_ctl = I915_READ(DP_A); |
1334 | WARN((dpa_ctl & DP_PLL_ENABLE) == 0, | 1334 | WARN((dpa_ctl & DP_PLL_ENABLE) == 0, |
1335 | "dp pll off, should be on\n"); | 1335 | "dp pll off, should be on\n"); |
1336 | WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n"); | 1336 | WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n"); |
1337 | 1337 | ||
1338 | /* We can't rely on the value tracked for the DP register in | 1338 | /* We can't rely on the value tracked for the DP register in |
1339 | * intel_dp->DP because link_down must not change that (otherwise link | 1339 | * intel_dp->DP because link_down must not change that (otherwise link |
1340 | * re-training will fail. */ | 1340 | * re-training will fail. */ |
1341 | dpa_ctl &= ~DP_PLL_ENABLE; | 1341 | dpa_ctl &= ~DP_PLL_ENABLE; |
1342 | I915_WRITE(DP_A, dpa_ctl); | 1342 | I915_WRITE(DP_A, dpa_ctl); |
1343 | POSTING_READ(DP_A); | 1343 | POSTING_READ(DP_A); |
1344 | udelay(200); | 1344 | udelay(200); |
1345 | } | 1345 | } |
1346 | 1346 | ||
1347 | /* If the sink supports it, try to set the power state appropriately */ | 1347 | /* If the sink supports it, try to set the power state appropriately */ |
1348 | void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) | 1348 | void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) |
1349 | { | 1349 | { |
1350 | int ret, i; | 1350 | int ret, i; |
1351 | 1351 | ||
1352 | /* Should have a valid DPCD by this point */ | 1352 | /* Should have a valid DPCD by this point */ |
1353 | if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) | 1353 | if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) |
1354 | return; | 1354 | return; |
1355 | 1355 | ||
1356 | if (mode != DRM_MODE_DPMS_ON) { | 1356 | if (mode != DRM_MODE_DPMS_ON) { |
1357 | ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, | 1357 | ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, |
1358 | DP_SET_POWER_D3); | 1358 | DP_SET_POWER_D3); |
1359 | if (ret != 1) | 1359 | if (ret != 1) |
1360 | DRM_DEBUG_DRIVER("failed to write sink power state\n"); | 1360 | DRM_DEBUG_DRIVER("failed to write sink power state\n"); |
1361 | } else { | 1361 | } else { |
1362 | /* | 1362 | /* |
1363 | * When turning on, we need to retry for 1ms to give the sink | 1363 | * When turning on, we need to retry for 1ms to give the sink |
1364 | * time to wake up. | 1364 | * time to wake up. |
1365 | */ | 1365 | */ |
1366 | for (i = 0; i < 3; i++) { | 1366 | for (i = 0; i < 3; i++) { |
1367 | ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, | 1367 | ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, |
1368 | DP_SET_POWER_D0); | 1368 | DP_SET_POWER_D0); |
1369 | if (ret == 1) | 1369 | if (ret == 1) |
1370 | break; | 1370 | break; |
1371 | msleep(1); | 1371 | msleep(1); |
1372 | } | 1372 | } |
1373 | } | 1373 | } |
1374 | } | 1374 | } |
1375 | 1375 | ||
1376 | static bool intel_dp_get_hw_state(struct intel_encoder *encoder, | 1376 | static bool intel_dp_get_hw_state(struct intel_encoder *encoder, |
1377 | enum pipe *pipe) | 1377 | enum pipe *pipe) |
1378 | { | 1378 | { |
1379 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); | 1379 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); |
1380 | enum port port = dp_to_dig_port(intel_dp)->port; | 1380 | enum port port = dp_to_dig_port(intel_dp)->port; |
1381 | struct drm_device *dev = encoder->base.dev; | 1381 | struct drm_device *dev = encoder->base.dev; |
1382 | struct drm_i915_private *dev_priv = dev->dev_private; | 1382 | struct drm_i915_private *dev_priv = dev->dev_private; |
1383 | enum intel_display_power_domain power_domain; | 1383 | enum intel_display_power_domain power_domain; |
1384 | u32 tmp; | 1384 | u32 tmp; |
1385 | 1385 | ||
1386 | power_domain = intel_display_port_power_domain(encoder); | 1386 | power_domain = intel_display_port_power_domain(encoder); |
1387 | if (!intel_display_power_enabled(dev_priv, power_domain)) | 1387 | if (!intel_display_power_enabled(dev_priv, power_domain)) |
1388 | return false; | 1388 | return false; |
1389 | 1389 | ||
1390 | tmp = I915_READ(intel_dp->output_reg); | 1390 | tmp = I915_READ(intel_dp->output_reg); |
1391 | 1391 | ||
1392 | if (!(tmp & DP_PORT_EN)) | 1392 | if (!(tmp & DP_PORT_EN)) |
1393 | return false; | 1393 | return false; |
1394 | 1394 | ||
1395 | if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { | 1395 | if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { |
1396 | *pipe = PORT_TO_PIPE_CPT(tmp); | 1396 | *pipe = PORT_TO_PIPE_CPT(tmp); |
1397 | } else if (!HAS_PCH_CPT(dev) || port == PORT_A) { | 1397 | } else if (!HAS_PCH_CPT(dev) || port == PORT_A) { |
1398 | *pipe = PORT_TO_PIPE(tmp); | 1398 | *pipe = PORT_TO_PIPE(tmp); |
1399 | } else { | 1399 | } else { |
1400 | u32 trans_sel; | 1400 | u32 trans_sel; |
1401 | u32 trans_dp; | 1401 | u32 trans_dp; |
1402 | int i; | 1402 | int i; |
1403 | 1403 | ||
1404 | switch (intel_dp->output_reg) { | 1404 | switch (intel_dp->output_reg) { |
1405 | case PCH_DP_B: | 1405 | case PCH_DP_B: |
1406 | trans_sel = TRANS_DP_PORT_SEL_B; | 1406 | trans_sel = TRANS_DP_PORT_SEL_B; |
1407 | break; | 1407 | break; |
1408 | case PCH_DP_C: | 1408 | case PCH_DP_C: |
1409 | trans_sel = TRANS_DP_PORT_SEL_C; | 1409 | trans_sel = TRANS_DP_PORT_SEL_C; |
1410 | break; | 1410 | break; |
1411 | case PCH_DP_D: | 1411 | case PCH_DP_D: |
1412 | trans_sel = TRANS_DP_PORT_SEL_D; | 1412 | trans_sel = TRANS_DP_PORT_SEL_D; |
1413 | break; | 1413 | break; |
1414 | default: | 1414 | default: |
1415 | return true; | 1415 | return true; |
1416 | } | 1416 | } |
1417 | 1417 | ||
1418 | for_each_pipe(i) { | 1418 | for_each_pipe(i) { |
1419 | trans_dp = I915_READ(TRANS_DP_CTL(i)); | 1419 | trans_dp = I915_READ(TRANS_DP_CTL(i)); |
1420 | if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) { | 1420 | if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) { |
1421 | *pipe = i; | 1421 | *pipe = i; |
1422 | return true; | 1422 | return true; |
1423 | } | 1423 | } |
1424 | } | 1424 | } |
1425 | 1425 | ||
1426 | DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", | 1426 | DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", |
1427 | intel_dp->output_reg); | 1427 | intel_dp->output_reg); |
1428 | } | 1428 | } |
1429 | 1429 | ||
1430 | return true; | 1430 | return true; |
1431 | } | 1431 | } |
1432 | 1432 | ||
1433 | static void intel_dp_get_config(struct intel_encoder *encoder, | 1433 | static void intel_dp_get_config(struct intel_encoder *encoder, |
1434 | struct intel_crtc_config *pipe_config) | 1434 | struct intel_crtc_config *pipe_config) |
1435 | { | 1435 | { |
1436 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); | 1436 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); |
1437 | u32 tmp, flags = 0; | 1437 | u32 tmp, flags = 0; |
1438 | struct drm_device *dev = encoder->base.dev; | 1438 | struct drm_device *dev = encoder->base.dev; |
1439 | struct drm_i915_private *dev_priv = dev->dev_private; | 1439 | struct drm_i915_private *dev_priv = dev->dev_private; |
1440 | enum port port = dp_to_dig_port(intel_dp)->port; | 1440 | enum port port = dp_to_dig_port(intel_dp)->port; |
1441 | struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); | 1441 | struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); |
1442 | int dotclock; | 1442 | int dotclock; |
1443 | 1443 | ||
1444 | if ((port == PORT_A) || !HAS_PCH_CPT(dev)) { | 1444 | if ((port == PORT_A) || !HAS_PCH_CPT(dev)) { |
1445 | tmp = I915_READ(intel_dp->output_reg); | 1445 | tmp = I915_READ(intel_dp->output_reg); |
1446 | if (tmp & DP_SYNC_HS_HIGH) | 1446 | if (tmp & DP_SYNC_HS_HIGH) |
1447 | flags |= DRM_MODE_FLAG_PHSYNC; | 1447 | flags |= DRM_MODE_FLAG_PHSYNC; |
1448 | else | 1448 | else |
1449 | flags |= DRM_MODE_FLAG_NHSYNC; | 1449 | flags |= DRM_MODE_FLAG_NHSYNC; |
1450 | 1450 | ||
1451 | if (tmp & DP_SYNC_VS_HIGH) | 1451 | if (tmp & DP_SYNC_VS_HIGH) |
1452 | flags |= DRM_MODE_FLAG_PVSYNC; | 1452 | flags |= DRM_MODE_FLAG_PVSYNC; |
1453 | else | 1453 | else |
1454 | flags |= DRM_MODE_FLAG_NVSYNC; | 1454 | flags |= DRM_MODE_FLAG_NVSYNC; |
1455 | } else { | 1455 | } else { |
1456 | tmp = I915_READ(TRANS_DP_CTL(crtc->pipe)); | 1456 | tmp = I915_READ(TRANS_DP_CTL(crtc->pipe)); |
1457 | if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH) | 1457 | if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH) |
1458 | flags |= DRM_MODE_FLAG_PHSYNC; | 1458 | flags |= DRM_MODE_FLAG_PHSYNC; |
1459 | else | 1459 | else |
1460 | flags |= DRM_MODE_FLAG_NHSYNC; | 1460 | flags |= DRM_MODE_FLAG_NHSYNC; |
1461 | 1461 | ||
1462 | if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH) | 1462 | if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH) |
1463 | flags |= DRM_MODE_FLAG_PVSYNC; | 1463 | flags |= DRM_MODE_FLAG_PVSYNC; |
1464 | else | 1464 | else |
1465 | flags |= DRM_MODE_FLAG_NVSYNC; | 1465 | flags |= DRM_MODE_FLAG_NVSYNC; |
1466 | } | 1466 | } |
1467 | 1467 | ||
1468 | pipe_config->adjusted_mode.flags |= flags; | 1468 | pipe_config->adjusted_mode.flags |= flags; |
1469 | 1469 | ||
1470 | pipe_config->has_dp_encoder = true; | 1470 | pipe_config->has_dp_encoder = true; |
1471 | 1471 | ||
1472 | intel_dp_get_m_n(crtc, pipe_config); | 1472 | intel_dp_get_m_n(crtc, pipe_config); |
1473 | 1473 | ||
1474 | if (port == PORT_A) { | 1474 | if (port == PORT_A) { |
1475 | if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ) | 1475 | if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ) |
1476 | pipe_config->port_clock = 162000; | 1476 | pipe_config->port_clock = 162000; |
1477 | else | 1477 | else |
1478 | pipe_config->port_clock = 270000; | 1478 | pipe_config->port_clock = 270000; |
1479 | } | 1479 | } |
1480 | 1480 | ||
1481 | dotclock = intel_dotclock_calculate(pipe_config->port_clock, | 1481 | dotclock = intel_dotclock_calculate(pipe_config->port_clock, |
1482 | &pipe_config->dp_m_n); | 1482 | &pipe_config->dp_m_n); |
1483 | 1483 | ||
1484 | if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A) | 1484 | if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A) |
1485 | ironlake_check_encoder_dotclock(pipe_config, dotclock); | 1485 | ironlake_check_encoder_dotclock(pipe_config, dotclock); |
1486 | 1486 | ||
1487 | pipe_config->adjusted_mode.crtc_clock = dotclock; | 1487 | pipe_config->adjusted_mode.crtc_clock = dotclock; |
1488 | 1488 | ||
1489 | if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp && | 1489 | if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp && |
1490 | pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) { | 1490 | pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) { |
1491 | /* | 1491 | /* |
1492 | * This is a big fat ugly hack. | 1492 | * This is a big fat ugly hack. |
1493 | * | 1493 | * |
1494 | * Some machines in UEFI boot mode provide us a VBT that has 18 | 1494 | * Some machines in UEFI boot mode provide us a VBT that has 18 |
1495 | * bpp and 1.62 GHz link bandwidth for eDP, which for reasons | 1495 | * bpp and 1.62 GHz link bandwidth for eDP, which for reasons |
1496 | * unknown we fail to light up. Yet the same BIOS boots up with | 1496 | * unknown we fail to light up. Yet the same BIOS boots up with |
1497 | * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as | 1497 | * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as |
1498 | * max, not what it tells us to use. | 1498 | * max, not what it tells us to use. |
1499 | * | 1499 | * |
1500 | * Note: This will still be broken if the eDP panel is not lit | 1500 | * Note: This will still be broken if the eDP panel is not lit |
1501 | * up by the BIOS, and thus we can't get the mode at module | 1501 | * up by the BIOS, and thus we can't get the mode at module |
1502 | * load. | 1502 | * load. |
1503 | */ | 1503 | */ |
1504 | DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", | 1504 | DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", |
1505 | pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp); | 1505 | pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp); |
1506 | dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp; | 1506 | dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp; |
1507 | } | 1507 | } |
1508 | } | 1508 | } |
1509 | 1509 | ||
1510 | static bool is_edp_psr(struct drm_device *dev) | 1510 | static bool is_edp_psr(struct drm_device *dev) |
1511 | { | 1511 | { |
1512 | struct drm_i915_private *dev_priv = dev->dev_private; | 1512 | struct drm_i915_private *dev_priv = dev->dev_private; |
1513 | 1513 | ||
1514 | return dev_priv->psr.sink_support; | 1514 | return dev_priv->psr.sink_support; |
1515 | } | 1515 | } |
1516 | 1516 | ||
1517 | static bool intel_edp_is_psr_enabled(struct drm_device *dev) | 1517 | static bool intel_edp_is_psr_enabled(struct drm_device *dev) |
1518 | { | 1518 | { |
1519 | struct drm_i915_private *dev_priv = dev->dev_private; | 1519 | struct drm_i915_private *dev_priv = dev->dev_private; |
1520 | 1520 | ||
1521 | if (!HAS_PSR(dev)) | 1521 | if (!HAS_PSR(dev)) |
1522 | return false; | 1522 | return false; |
1523 | 1523 | ||
1524 | return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE; | 1524 | return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE; |
1525 | } | 1525 | } |
1526 | 1526 | ||
1527 | static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp, | 1527 | static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp, |
1528 | struct edp_vsc_psr *vsc_psr) | 1528 | struct edp_vsc_psr *vsc_psr) |
1529 | { | 1529 | { |
1530 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | 1530 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
1531 | struct drm_device *dev = dig_port->base.base.dev; | 1531 | struct drm_device *dev = dig_port->base.base.dev; |
1532 | struct drm_i915_private *dev_priv = dev->dev_private; | 1532 | struct drm_i915_private *dev_priv = dev->dev_private; |
1533 | struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); | 1533 | struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); |
1534 | u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder); | 1534 | u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder); |
1535 | u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder); | 1535 | u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder); |
1536 | uint32_t *data = (uint32_t *) vsc_psr; | 1536 | uint32_t *data = (uint32_t *) vsc_psr; |
1537 | unsigned int i; | 1537 | unsigned int i; |
1538 | 1538 | ||
1539 | /* As per BSPec (Pipe Video Data Island Packet), we need to disable | 1539 | /* As per BSPec (Pipe Video Data Island Packet), we need to disable |
1540 | the video DIP being updated before program video DIP data buffer | 1540 | the video DIP being updated before program video DIP data buffer |
1541 | registers for DIP being updated. */ | 1541 | registers for DIP being updated. */ |
1542 | I915_WRITE(ctl_reg, 0); | 1542 | I915_WRITE(ctl_reg, 0); |
1543 | POSTING_READ(ctl_reg); | 1543 | POSTING_READ(ctl_reg); |
1544 | 1544 | ||
1545 | for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) { | 1545 | for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) { |
1546 | if (i < sizeof(struct edp_vsc_psr)) | 1546 | if (i < sizeof(struct edp_vsc_psr)) |
1547 | I915_WRITE(data_reg + i, *data++); | 1547 | I915_WRITE(data_reg + i, *data++); |
1548 | else | 1548 | else |
1549 | I915_WRITE(data_reg + i, 0); | 1549 | I915_WRITE(data_reg + i, 0); |
1550 | } | 1550 | } |
1551 | 1551 | ||
1552 | I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW); | 1552 | I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW); |
1553 | POSTING_READ(ctl_reg); | 1553 | POSTING_READ(ctl_reg); |
1554 | } | 1554 | } |
1555 | 1555 | ||
1556 | static void intel_edp_psr_setup(struct intel_dp *intel_dp) | 1556 | static void intel_edp_psr_setup(struct intel_dp *intel_dp) |
1557 | { | 1557 | { |
1558 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 1558 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1559 | struct drm_i915_private *dev_priv = dev->dev_private; | 1559 | struct drm_i915_private *dev_priv = dev->dev_private; |
1560 | struct edp_vsc_psr psr_vsc; | 1560 | struct edp_vsc_psr psr_vsc; |
1561 | 1561 | ||
1562 | if (intel_dp->psr_setup_done) | 1562 | if (intel_dp->psr_setup_done) |
1563 | return; | 1563 | return; |
1564 | 1564 | ||
1565 | /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */ | 1565 | /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */ |
1566 | memset(&psr_vsc, 0, sizeof(psr_vsc)); | 1566 | memset(&psr_vsc, 0, sizeof(psr_vsc)); |
1567 | psr_vsc.sdp_header.HB0 = 0; | 1567 | psr_vsc.sdp_header.HB0 = 0; |
1568 | psr_vsc.sdp_header.HB1 = 0x7; | 1568 | psr_vsc.sdp_header.HB1 = 0x7; |
1569 | psr_vsc.sdp_header.HB2 = 0x2; | 1569 | psr_vsc.sdp_header.HB2 = 0x2; |
1570 | psr_vsc.sdp_header.HB3 = 0x8; | 1570 | psr_vsc.sdp_header.HB3 = 0x8; |
1571 | intel_edp_psr_write_vsc(intel_dp, &psr_vsc); | 1571 | intel_edp_psr_write_vsc(intel_dp, &psr_vsc); |
1572 | 1572 | ||
1573 | /* Avoid continuous PSR exit by masking memup and hpd */ | 1573 | /* Avoid continuous PSR exit by masking memup and hpd */ |
1574 | I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP | | 1574 | I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP | |
1575 | EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP); | 1575 | EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP); |
1576 | 1576 | ||
1577 | intel_dp->psr_setup_done = true; | 1577 | intel_dp->psr_setup_done = true; |
1578 | } | 1578 | } |
1579 | 1579 | ||
1580 | static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp) | 1580 | static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp) |
1581 | { | 1581 | { |
1582 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 1582 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1583 | struct drm_i915_private *dev_priv = dev->dev_private; | 1583 | struct drm_i915_private *dev_priv = dev->dev_private; |
1584 | uint32_t aux_clock_divider; | 1584 | uint32_t aux_clock_divider; |
1585 | int precharge = 0x3; | 1585 | int precharge = 0x3; |
1586 | int msg_size = 5; /* Header(4) + Message(1) */ | 1586 | int msg_size = 5; /* Header(4) + Message(1) */ |
1587 | 1587 | ||
1588 | aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); | 1588 | aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); |
1589 | 1589 | ||
1590 | /* Enable PSR in sink */ | 1590 | /* Enable PSR in sink */ |
1591 | if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) | 1591 | if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) |
1592 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, | 1592 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, |
1593 | DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE); | 1593 | DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE); |
1594 | else | 1594 | else |
1595 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, | 1595 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, |
1596 | DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE); | 1596 | DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE); |
1597 | 1597 | ||
1598 | /* Setup AUX registers */ | 1598 | /* Setup AUX registers */ |
1599 | I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND); | 1599 | I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND); |
1600 | I915_WRITE(EDP_PSR_AUX_DATA2(dev), EDP_PSR_DPCD_NORMAL_OPERATION); | 1600 | I915_WRITE(EDP_PSR_AUX_DATA2(dev), EDP_PSR_DPCD_NORMAL_OPERATION); |
1601 | I915_WRITE(EDP_PSR_AUX_CTL(dev), | 1601 | I915_WRITE(EDP_PSR_AUX_CTL(dev), |
1602 | DP_AUX_CH_CTL_TIME_OUT_400us | | 1602 | DP_AUX_CH_CTL_TIME_OUT_400us | |
1603 | (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | | 1603 | (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | |
1604 | (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | | 1604 | (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | |
1605 | (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT)); | 1605 | (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT)); |
1606 | } | 1606 | } |
1607 | 1607 | ||
1608 | static void intel_edp_psr_enable_source(struct intel_dp *intel_dp) | 1608 | static void intel_edp_psr_enable_source(struct intel_dp *intel_dp) |
1609 | { | 1609 | { |
1610 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 1610 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1611 | struct drm_i915_private *dev_priv = dev->dev_private; | 1611 | struct drm_i915_private *dev_priv = dev->dev_private; |
1612 | uint32_t max_sleep_time = 0x1f; | 1612 | uint32_t max_sleep_time = 0x1f; |
1613 | uint32_t idle_frames = 1; | 1613 | uint32_t idle_frames = 1; |
1614 | uint32_t val = 0x0; | 1614 | uint32_t val = 0x0; |
1615 | const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; | 1615 | const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; |
1616 | 1616 | ||
1617 | if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) { | 1617 | if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) { |
1618 | val |= EDP_PSR_LINK_STANDBY; | 1618 | val |= EDP_PSR_LINK_STANDBY; |
1619 | val |= EDP_PSR_TP2_TP3_TIME_0us; | 1619 | val |= EDP_PSR_TP2_TP3_TIME_0us; |
1620 | val |= EDP_PSR_TP1_TIME_0us; | 1620 | val |= EDP_PSR_TP1_TIME_0us; |
1621 | val |= EDP_PSR_SKIP_AUX_EXIT; | 1621 | val |= EDP_PSR_SKIP_AUX_EXIT; |
1622 | } else | 1622 | } else |
1623 | val |= EDP_PSR_LINK_DISABLE; | 1623 | val |= EDP_PSR_LINK_DISABLE; |
1624 | 1624 | ||
1625 | I915_WRITE(EDP_PSR_CTL(dev), val | | 1625 | I915_WRITE(EDP_PSR_CTL(dev), val | |
1626 | (IS_BROADWELL(dev) ? 0 : link_entry_time) | | 1626 | (IS_BROADWELL(dev) ? 0 : link_entry_time) | |
1627 | max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | | 1627 | max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | |
1628 | idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | | 1628 | idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | |
1629 | EDP_PSR_ENABLE); | 1629 | EDP_PSR_ENABLE); |
1630 | } | 1630 | } |
1631 | 1631 | ||
1632 | static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp) | 1632 | static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp) |
1633 | { | 1633 | { |
1634 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | 1634 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
1635 | struct drm_device *dev = dig_port->base.base.dev; | 1635 | struct drm_device *dev = dig_port->base.base.dev; |
1636 | struct drm_i915_private *dev_priv = dev->dev_private; | 1636 | struct drm_i915_private *dev_priv = dev->dev_private; |
1637 | struct drm_crtc *crtc = dig_port->base.base.crtc; | 1637 | struct drm_crtc *crtc = dig_port->base.base.crtc; |
1638 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1638 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1639 | struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->primary->fb)->obj; | 1639 | struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->primary->fb)->obj; |
1640 | struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; | 1640 | struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; |
1641 | 1641 | ||
1642 | dev_priv->psr.source_ok = false; | 1642 | dev_priv->psr.source_ok = false; |
1643 | 1643 | ||
1644 | if (!HAS_PSR(dev)) { | 1644 | if (!HAS_PSR(dev)) { |
1645 | DRM_DEBUG_KMS("PSR not supported on this platform\n"); | 1645 | DRM_DEBUG_KMS("PSR not supported on this platform\n"); |
1646 | return false; | 1646 | return false; |
1647 | } | 1647 | } |
1648 | 1648 | ||
1649 | if ((intel_encoder->type != INTEL_OUTPUT_EDP) || | 1649 | if ((intel_encoder->type != INTEL_OUTPUT_EDP) || |
1650 | (dig_port->port != PORT_A)) { | 1650 | (dig_port->port != PORT_A)) { |
1651 | DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n"); | 1651 | DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n"); |
1652 | return false; | 1652 | return false; |
1653 | } | 1653 | } |
1654 | 1654 | ||
1655 | if (!i915.enable_psr) { | 1655 | if (!i915.enable_psr) { |
1656 | DRM_DEBUG_KMS("PSR disable by flag\n"); | 1656 | DRM_DEBUG_KMS("PSR disable by flag\n"); |
1657 | return false; | 1657 | return false; |
1658 | } | 1658 | } |
1659 | 1659 | ||
1660 | crtc = dig_port->base.base.crtc; | 1660 | crtc = dig_port->base.base.crtc; |
1661 | if (crtc == NULL) { | 1661 | if (crtc == NULL) { |
1662 | DRM_DEBUG_KMS("crtc not active for PSR\n"); | 1662 | DRM_DEBUG_KMS("crtc not active for PSR\n"); |
1663 | return false; | 1663 | return false; |
1664 | } | 1664 | } |
1665 | 1665 | ||
1666 | intel_crtc = to_intel_crtc(crtc); | 1666 | intel_crtc = to_intel_crtc(crtc); |
1667 | if (!intel_crtc_active(crtc)) { | 1667 | if (!intel_crtc_active(crtc)) { |
1668 | DRM_DEBUG_KMS("crtc not active for PSR\n"); | 1668 | DRM_DEBUG_KMS("crtc not active for PSR\n"); |
1669 | return false; | 1669 | return false; |
1670 | } | 1670 | } |
1671 | 1671 | ||
1672 | obj = to_intel_framebuffer(crtc->primary->fb)->obj; | 1672 | obj = to_intel_framebuffer(crtc->primary->fb)->obj; |
1673 | if (obj->tiling_mode != I915_TILING_X || | 1673 | if (obj->tiling_mode != I915_TILING_X || |
1674 | obj->fence_reg == I915_FENCE_REG_NONE) { | 1674 | obj->fence_reg == I915_FENCE_REG_NONE) { |
1675 | DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n"); | 1675 | DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n"); |
1676 | return false; | 1676 | return false; |
1677 | } | 1677 | } |
1678 | 1678 | ||
1679 | if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) { | 1679 | if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) { |
1680 | DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n"); | 1680 | DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n"); |
1681 | return false; | 1681 | return false; |
1682 | } | 1682 | } |
1683 | 1683 | ||
1684 | if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) & | 1684 | if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) & |
1685 | S3D_ENABLE) { | 1685 | S3D_ENABLE) { |
1686 | DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n"); | 1686 | DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n"); |
1687 | return false; | 1687 | return false; |
1688 | } | 1688 | } |
1689 | 1689 | ||
1690 | if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { | 1690 | if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { |
1691 | DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n"); | 1691 | DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n"); |
1692 | return false; | 1692 | return false; |
1693 | } | 1693 | } |
1694 | 1694 | ||
1695 | dev_priv->psr.source_ok = true; | 1695 | dev_priv->psr.source_ok = true; |
1696 | return true; | 1696 | return true; |
1697 | } | 1697 | } |
1698 | 1698 | ||
1699 | static void intel_edp_psr_do_enable(struct intel_dp *intel_dp) | 1699 | static void intel_edp_psr_do_enable(struct intel_dp *intel_dp) |
1700 | { | 1700 | { |
1701 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 1701 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1702 | 1702 | ||
1703 | if (!intel_edp_psr_match_conditions(intel_dp) || | 1703 | if (!intel_edp_psr_match_conditions(intel_dp) || |
1704 | intel_edp_is_psr_enabled(dev)) | 1704 | intel_edp_is_psr_enabled(dev)) |
1705 | return; | 1705 | return; |
1706 | 1706 | ||
1707 | /* Setup PSR once */ | 1707 | /* Setup PSR once */ |
1708 | intel_edp_psr_setup(intel_dp); | 1708 | intel_edp_psr_setup(intel_dp); |
1709 | 1709 | ||
1710 | /* Enable PSR on the panel */ | 1710 | /* Enable PSR on the panel */ |
1711 | intel_edp_psr_enable_sink(intel_dp); | 1711 | intel_edp_psr_enable_sink(intel_dp); |
1712 | 1712 | ||
1713 | /* Enable PSR on the host */ | 1713 | /* Enable PSR on the host */ |
1714 | intel_edp_psr_enable_source(intel_dp); | 1714 | intel_edp_psr_enable_source(intel_dp); |
1715 | } | 1715 | } |
1716 | 1716 | ||
1717 | void intel_edp_psr_enable(struct intel_dp *intel_dp) | 1717 | void intel_edp_psr_enable(struct intel_dp *intel_dp) |
1718 | { | 1718 | { |
1719 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 1719 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1720 | 1720 | ||
1721 | if (intel_edp_psr_match_conditions(intel_dp) && | 1721 | if (intel_edp_psr_match_conditions(intel_dp) && |
1722 | !intel_edp_is_psr_enabled(dev)) | 1722 | !intel_edp_is_psr_enabled(dev)) |
1723 | intel_edp_psr_do_enable(intel_dp); | 1723 | intel_edp_psr_do_enable(intel_dp); |
1724 | } | 1724 | } |
1725 | 1725 | ||
1726 | void intel_edp_psr_disable(struct intel_dp *intel_dp) | 1726 | void intel_edp_psr_disable(struct intel_dp *intel_dp) |
1727 | { | 1727 | { |
1728 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 1728 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1729 | struct drm_i915_private *dev_priv = dev->dev_private; | 1729 | struct drm_i915_private *dev_priv = dev->dev_private; |
1730 | 1730 | ||
1731 | if (!intel_edp_is_psr_enabled(dev)) | 1731 | if (!intel_edp_is_psr_enabled(dev)) |
1732 | return; | 1732 | return; |
1733 | 1733 | ||
1734 | I915_WRITE(EDP_PSR_CTL(dev), | 1734 | I915_WRITE(EDP_PSR_CTL(dev), |
1735 | I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE); | 1735 | I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE); |
1736 | 1736 | ||
1737 | /* Wait till PSR is idle */ | 1737 | /* Wait till PSR is idle */ |
1738 | if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) & | 1738 | if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) & |
1739 | EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10)) | 1739 | EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10)) |
1740 | DRM_ERROR("Timed out waiting for PSR Idle State\n"); | 1740 | DRM_ERROR("Timed out waiting for PSR Idle State\n"); |
1741 | } | 1741 | } |
1742 | 1742 | ||
1743 | void intel_edp_psr_update(struct drm_device *dev) | 1743 | void intel_edp_psr_update(struct drm_device *dev) |
1744 | { | 1744 | { |
1745 | struct intel_encoder *encoder; | 1745 | struct intel_encoder *encoder; |
1746 | struct intel_dp *intel_dp = NULL; | 1746 | struct intel_dp *intel_dp = NULL; |
1747 | 1747 | ||
1748 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) | 1748 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) |
1749 | if (encoder->type == INTEL_OUTPUT_EDP) { | 1749 | if (encoder->type == INTEL_OUTPUT_EDP) { |
1750 | intel_dp = enc_to_intel_dp(&encoder->base); | 1750 | intel_dp = enc_to_intel_dp(&encoder->base); |
1751 | 1751 | ||
1752 | if (!is_edp_psr(dev)) | 1752 | if (!is_edp_psr(dev)) |
1753 | return; | 1753 | return; |
1754 | 1754 | ||
1755 | if (!intel_edp_psr_match_conditions(intel_dp)) | 1755 | if (!intel_edp_psr_match_conditions(intel_dp)) |
1756 | intel_edp_psr_disable(intel_dp); | 1756 | intel_edp_psr_disable(intel_dp); |
1757 | else | 1757 | else |
1758 | if (!intel_edp_is_psr_enabled(dev)) | 1758 | if (!intel_edp_is_psr_enabled(dev)) |
1759 | intel_edp_psr_do_enable(intel_dp); | 1759 | intel_edp_psr_do_enable(intel_dp); |
1760 | } | 1760 | } |
1761 | } | 1761 | } |
1762 | 1762 | ||
1763 | static void intel_disable_dp(struct intel_encoder *encoder) | 1763 | static void intel_disable_dp(struct intel_encoder *encoder) |
1764 | { | 1764 | { |
1765 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); | 1765 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); |
1766 | enum port port = dp_to_dig_port(intel_dp)->port; | 1766 | enum port port = dp_to_dig_port(intel_dp)->port; |
1767 | struct drm_device *dev = encoder->base.dev; | 1767 | struct drm_device *dev = encoder->base.dev; |
1768 | 1768 | ||
1769 | /* Make sure the panel is off before trying to change the mode. But also | 1769 | /* Make sure the panel is off before trying to change the mode. But also |
1770 | * ensure that we have vdd while we switch off the panel. */ | 1770 | * ensure that we have vdd while we switch off the panel. */ |
1771 | intel_edp_panel_vdd_on(intel_dp); | 1771 | intel_edp_panel_vdd_on(intel_dp); |
1772 | intel_edp_backlight_off(intel_dp); | 1772 | intel_edp_backlight_off(intel_dp); |
1773 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); | 1773 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); |
1774 | intel_edp_panel_off(intel_dp); | 1774 | intel_edp_panel_off(intel_dp); |
1775 | 1775 | ||
1776 | /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */ | 1776 | /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */ |
1777 | if (!(port == PORT_A || IS_VALLEYVIEW(dev))) | 1777 | if (!(port == PORT_A || IS_VALLEYVIEW(dev))) |
1778 | intel_dp_link_down(intel_dp); | 1778 | intel_dp_link_down(intel_dp); |
1779 | } | 1779 | } |
1780 | 1780 | ||
1781 | static void intel_post_disable_dp(struct intel_encoder *encoder) | 1781 | static void intel_post_disable_dp(struct intel_encoder *encoder) |
1782 | { | 1782 | { |
1783 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); | 1783 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); |
1784 | enum port port = dp_to_dig_port(intel_dp)->port; | 1784 | enum port port = dp_to_dig_port(intel_dp)->port; |
1785 | struct drm_device *dev = encoder->base.dev; | 1785 | struct drm_device *dev = encoder->base.dev; |
1786 | 1786 | ||
1787 | if (port == PORT_A || IS_VALLEYVIEW(dev)) { | 1787 | if (port == PORT_A || IS_VALLEYVIEW(dev)) { |
1788 | intel_dp_link_down(intel_dp); | 1788 | intel_dp_link_down(intel_dp); |
1789 | if (!IS_VALLEYVIEW(dev)) | 1789 | if (!IS_VALLEYVIEW(dev)) |
1790 | ironlake_edp_pll_off(intel_dp); | 1790 | ironlake_edp_pll_off(intel_dp); |
1791 | } | 1791 | } |
1792 | } | 1792 | } |
1793 | 1793 | ||
1794 | static void intel_enable_dp(struct intel_encoder *encoder) | 1794 | static void intel_enable_dp(struct intel_encoder *encoder) |
1795 | { | 1795 | { |
1796 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); | 1796 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); |
1797 | struct drm_device *dev = encoder->base.dev; | 1797 | struct drm_device *dev = encoder->base.dev; |
1798 | struct drm_i915_private *dev_priv = dev->dev_private; | 1798 | struct drm_i915_private *dev_priv = dev->dev_private; |
1799 | uint32_t dp_reg = I915_READ(intel_dp->output_reg); | 1799 | uint32_t dp_reg = I915_READ(intel_dp->output_reg); |
1800 | 1800 | ||
1801 | if (WARN_ON(dp_reg & DP_PORT_EN)) | 1801 | if (WARN_ON(dp_reg & DP_PORT_EN)) |
1802 | return; | 1802 | return; |
1803 | 1803 | ||
1804 | intel_edp_panel_vdd_on(intel_dp); | 1804 | intel_edp_panel_vdd_on(intel_dp); |
1805 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); | 1805 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); |
1806 | intel_dp_start_link_train(intel_dp); | 1806 | intel_dp_start_link_train(intel_dp); |
1807 | intel_edp_panel_on(intel_dp); | 1807 | intel_edp_panel_on(intel_dp); |
1808 | edp_panel_vdd_off(intel_dp, true); | 1808 | edp_panel_vdd_off(intel_dp, true); |
1809 | intel_dp_complete_link_train(intel_dp); | 1809 | intel_dp_complete_link_train(intel_dp); |
1810 | intel_dp_stop_link_train(intel_dp); | 1810 | intel_dp_stop_link_train(intel_dp); |
1811 | } | 1811 | } |
1812 | 1812 | ||
1813 | static void g4x_enable_dp(struct intel_encoder *encoder) | 1813 | static void g4x_enable_dp(struct intel_encoder *encoder) |
1814 | { | 1814 | { |
1815 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); | 1815 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); |
1816 | 1816 | ||
1817 | intel_enable_dp(encoder); | 1817 | intel_enable_dp(encoder); |
1818 | intel_edp_backlight_on(intel_dp); | 1818 | intel_edp_backlight_on(intel_dp); |
1819 | } | 1819 | } |
1820 | 1820 | ||
1821 | static void vlv_enable_dp(struct intel_encoder *encoder) | 1821 | static void vlv_enable_dp(struct intel_encoder *encoder) |
1822 | { | 1822 | { |
1823 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); | 1823 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); |
1824 | 1824 | ||
1825 | intel_edp_backlight_on(intel_dp); | 1825 | intel_edp_backlight_on(intel_dp); |
1826 | } | 1826 | } |
1827 | 1827 | ||
1828 | static void g4x_pre_enable_dp(struct intel_encoder *encoder) | 1828 | static void g4x_pre_enable_dp(struct intel_encoder *encoder) |
1829 | { | 1829 | { |
1830 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); | 1830 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); |
1831 | struct intel_digital_port *dport = dp_to_dig_port(intel_dp); | 1831 | struct intel_digital_port *dport = dp_to_dig_port(intel_dp); |
1832 | 1832 | ||
1833 | if (dport->port == PORT_A) | 1833 | if (dport->port == PORT_A) |
1834 | ironlake_edp_pll_on(intel_dp); | 1834 | ironlake_edp_pll_on(intel_dp); |
1835 | } | 1835 | } |
1836 | 1836 | ||
1837 | static void vlv_pre_enable_dp(struct intel_encoder *encoder) | 1837 | static void vlv_pre_enable_dp(struct intel_encoder *encoder) |
1838 | { | 1838 | { |
1839 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); | 1839 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); |
1840 | struct intel_digital_port *dport = dp_to_dig_port(intel_dp); | 1840 | struct intel_digital_port *dport = dp_to_dig_port(intel_dp); |
1841 | struct drm_device *dev = encoder->base.dev; | 1841 | struct drm_device *dev = encoder->base.dev; |
1842 | struct drm_i915_private *dev_priv = dev->dev_private; | 1842 | struct drm_i915_private *dev_priv = dev->dev_private; |
1843 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); | 1843 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); |
1844 | enum dpio_channel port = vlv_dport_to_channel(dport); | 1844 | enum dpio_channel port = vlv_dport_to_channel(dport); |
1845 | int pipe = intel_crtc->pipe; | 1845 | int pipe = intel_crtc->pipe; |
1846 | struct edp_power_seq power_seq; | 1846 | struct edp_power_seq power_seq; |
1847 | u32 val; | 1847 | u32 val; |
1848 | 1848 | ||
1849 | mutex_lock(&dev_priv->dpio_lock); | 1849 | mutex_lock(&dev_priv->dpio_lock); |
1850 | 1850 | ||
1851 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port)); | 1851 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port)); |
1852 | val = 0; | 1852 | val = 0; |
1853 | if (pipe) | 1853 | if (pipe) |
1854 | val |= (1<<21); | 1854 | val |= (1<<21); |
1855 | else | 1855 | else |
1856 | val &= ~(1<<21); | 1856 | val &= ~(1<<21); |
1857 | val |= 0x001000c4; | 1857 | val |= 0x001000c4; |
1858 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val); | 1858 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val); |
1859 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018); | 1859 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018); |
1860 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888); | 1860 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888); |
1861 | 1861 | ||
1862 | mutex_unlock(&dev_priv->dpio_lock); | 1862 | mutex_unlock(&dev_priv->dpio_lock); |
1863 | 1863 | ||
1864 | if (is_edp(intel_dp)) { | 1864 | if (is_edp(intel_dp)) { |
1865 | /* init power sequencer on this pipe and port */ | 1865 | /* init power sequencer on this pipe and port */ |
1866 | intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); | 1866 | intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); |
1867 | intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, | 1867 | intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, |
1868 | &power_seq); | 1868 | &power_seq); |
1869 | } | 1869 | } |
1870 | 1870 | ||
1871 | intel_enable_dp(encoder); | 1871 | intel_enable_dp(encoder); |
1872 | 1872 | ||
1873 | vlv_wait_port_ready(dev_priv, dport); | 1873 | vlv_wait_port_ready(dev_priv, dport); |
1874 | } | 1874 | } |
1875 | 1875 | ||
1876 | static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder) | 1876 | static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder) |
1877 | { | 1877 | { |
1878 | struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); | 1878 | struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); |
1879 | struct drm_device *dev = encoder->base.dev; | 1879 | struct drm_device *dev = encoder->base.dev; |
1880 | struct drm_i915_private *dev_priv = dev->dev_private; | 1880 | struct drm_i915_private *dev_priv = dev->dev_private; |
1881 | struct intel_crtc *intel_crtc = | 1881 | struct intel_crtc *intel_crtc = |
1882 | to_intel_crtc(encoder->base.crtc); | 1882 | to_intel_crtc(encoder->base.crtc); |
1883 | enum dpio_channel port = vlv_dport_to_channel(dport); | 1883 | enum dpio_channel port = vlv_dport_to_channel(dport); |
1884 | int pipe = intel_crtc->pipe; | 1884 | int pipe = intel_crtc->pipe; |
1885 | 1885 | ||
1886 | /* Program Tx lane resets to default */ | 1886 | /* Program Tx lane resets to default */ |
1887 | mutex_lock(&dev_priv->dpio_lock); | 1887 | mutex_lock(&dev_priv->dpio_lock); |
1888 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), | 1888 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), |
1889 | DPIO_PCS_TX_LANE2_RESET | | 1889 | DPIO_PCS_TX_LANE2_RESET | |
1890 | DPIO_PCS_TX_LANE1_RESET); | 1890 | DPIO_PCS_TX_LANE1_RESET); |
1891 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), | 1891 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), |
1892 | DPIO_PCS_CLK_CRI_RXEB_EIOS_EN | | 1892 | DPIO_PCS_CLK_CRI_RXEB_EIOS_EN | |
1893 | DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN | | 1893 | DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN | |
1894 | (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) | | 1894 | (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) | |
1895 | DPIO_PCS_CLK_SOFT_RESET); | 1895 | DPIO_PCS_CLK_SOFT_RESET); |
1896 | 1896 | ||
1897 | /* Fix up inter-pair skew failure */ | 1897 | /* Fix up inter-pair skew failure */ |
1898 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00); | 1898 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00); |
1899 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500); | 1899 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500); |
1900 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000); | 1900 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000); |
1901 | mutex_unlock(&dev_priv->dpio_lock); | 1901 | mutex_unlock(&dev_priv->dpio_lock); |
1902 | } | 1902 | } |
1903 | 1903 | ||
1904 | /* | 1904 | /* |
1905 | * Native read with retry for link status and receiver capability reads for | 1905 | * Native read with retry for link status and receiver capability reads for |
1906 | * cases where the sink may still be asleep. | 1906 | * cases where the sink may still be asleep. |
1907 | * | 1907 | * |
1908 | * Sinks are *supposed* to come up within 1ms from an off state, but we're also | 1908 | * Sinks are *supposed* to come up within 1ms from an off state, but we're also |
1909 | * supposed to retry 3 times per the spec. | 1909 | * supposed to retry 3 times per the spec. |
1910 | */ | 1910 | */ |
1911 | static ssize_t | 1911 | static ssize_t |
1912 | intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset, | 1912 | intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset, |
1913 | void *buffer, size_t size) | 1913 | void *buffer, size_t size) |
1914 | { | 1914 | { |
1915 | ssize_t ret; | 1915 | ssize_t ret; |
1916 | int i; | 1916 | int i; |
1917 | 1917 | ||
1918 | for (i = 0; i < 3; i++) { | 1918 | for (i = 0; i < 3; i++) { |
1919 | ret = drm_dp_dpcd_read(aux, offset, buffer, size); | 1919 | ret = drm_dp_dpcd_read(aux, offset, buffer, size); |
1920 | if (ret == size) | 1920 | if (ret == size) |
1921 | return ret; | 1921 | return ret; |
1922 | msleep(1); | 1922 | msleep(1); |
1923 | } | 1923 | } |
1924 | 1924 | ||
1925 | return ret; | 1925 | return ret; |
1926 | } | 1926 | } |
1927 | 1927 | ||
1928 | /* | 1928 | /* |
1929 | * Fetch AUX CH registers 0x202 - 0x207 which contain | 1929 | * Fetch AUX CH registers 0x202 - 0x207 which contain |
1930 | * link status information | 1930 | * link status information |
1931 | */ | 1931 | */ |
1932 | static bool | 1932 | static bool |
1933 | intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) | 1933 | intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) |
1934 | { | 1934 | { |
1935 | return intel_dp_dpcd_read_wake(&intel_dp->aux, | 1935 | return intel_dp_dpcd_read_wake(&intel_dp->aux, |
1936 | DP_LANE0_1_STATUS, | 1936 | DP_LANE0_1_STATUS, |
1937 | link_status, | 1937 | link_status, |
1938 | DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE; | 1938 | DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE; |
1939 | } | 1939 | } |
1940 | 1940 | ||
1941 | /* | 1941 | /* |
1942 | * These are source-specific values; current Intel hardware supports | 1942 | * These are source-specific values; current Intel hardware supports |
1943 | * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB | 1943 | * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB |
1944 | */ | 1944 | */ |
1945 | 1945 | ||
1946 | static uint8_t | 1946 | static uint8_t |
1947 | intel_dp_voltage_max(struct intel_dp *intel_dp) | 1947 | intel_dp_voltage_max(struct intel_dp *intel_dp) |
1948 | { | 1948 | { |
1949 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 1949 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1950 | enum port port = dp_to_dig_port(intel_dp)->port; | 1950 | enum port port = dp_to_dig_port(intel_dp)->port; |
1951 | 1951 | ||
1952 | if (IS_VALLEYVIEW(dev) || IS_BROADWELL(dev)) | 1952 | if (IS_VALLEYVIEW(dev) || IS_BROADWELL(dev)) |
1953 | return DP_TRAIN_VOLTAGE_SWING_1200; | 1953 | return DP_TRAIN_VOLTAGE_SWING_1200; |
1954 | else if (IS_GEN7(dev) && port == PORT_A) | 1954 | else if (IS_GEN7(dev) && port == PORT_A) |
1955 | return DP_TRAIN_VOLTAGE_SWING_800; | 1955 | return DP_TRAIN_VOLTAGE_SWING_800; |
1956 | else if (HAS_PCH_CPT(dev) && port != PORT_A) | 1956 | else if (HAS_PCH_CPT(dev) && port != PORT_A) |
1957 | return DP_TRAIN_VOLTAGE_SWING_1200; | 1957 | return DP_TRAIN_VOLTAGE_SWING_1200; |
1958 | else | 1958 | else |
1959 | return DP_TRAIN_VOLTAGE_SWING_800; | 1959 | return DP_TRAIN_VOLTAGE_SWING_800; |
1960 | } | 1960 | } |
1961 | 1961 | ||
1962 | static uint8_t | 1962 | static uint8_t |
1963 | intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) | 1963 | intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) |
1964 | { | 1964 | { |
1965 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 1965 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1966 | enum port port = dp_to_dig_port(intel_dp)->port; | 1966 | enum port port = dp_to_dig_port(intel_dp)->port; |
1967 | 1967 | ||
1968 | if (IS_BROADWELL(dev)) { | 1968 | if (IS_BROADWELL(dev)) { |
1969 | switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { | 1969 | switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { |
1970 | case DP_TRAIN_VOLTAGE_SWING_400: | 1970 | case DP_TRAIN_VOLTAGE_SWING_400: |
1971 | case DP_TRAIN_VOLTAGE_SWING_600: | 1971 | case DP_TRAIN_VOLTAGE_SWING_600: |
1972 | return DP_TRAIN_PRE_EMPHASIS_6; | 1972 | return DP_TRAIN_PRE_EMPHASIS_6; |
1973 | case DP_TRAIN_VOLTAGE_SWING_800: | 1973 | case DP_TRAIN_VOLTAGE_SWING_800: |
1974 | return DP_TRAIN_PRE_EMPHASIS_3_5; | 1974 | return DP_TRAIN_PRE_EMPHASIS_3_5; |
1975 | case DP_TRAIN_VOLTAGE_SWING_1200: | 1975 | case DP_TRAIN_VOLTAGE_SWING_1200: |
1976 | default: | 1976 | default: |
1977 | return DP_TRAIN_PRE_EMPHASIS_0; | 1977 | return DP_TRAIN_PRE_EMPHASIS_0; |
1978 | } | 1978 | } |
1979 | } else if (IS_HASWELL(dev)) { | 1979 | } else if (IS_HASWELL(dev)) { |
1980 | switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { | 1980 | switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { |
1981 | case DP_TRAIN_VOLTAGE_SWING_400: | 1981 | case DP_TRAIN_VOLTAGE_SWING_400: |
1982 | return DP_TRAIN_PRE_EMPHASIS_9_5; | 1982 | return DP_TRAIN_PRE_EMPHASIS_9_5; |
1983 | case DP_TRAIN_VOLTAGE_SWING_600: | 1983 | case DP_TRAIN_VOLTAGE_SWING_600: |
1984 | return DP_TRAIN_PRE_EMPHASIS_6; | 1984 | return DP_TRAIN_PRE_EMPHASIS_6; |
1985 | case DP_TRAIN_VOLTAGE_SWING_800: | 1985 | case DP_TRAIN_VOLTAGE_SWING_800: |
1986 | return DP_TRAIN_PRE_EMPHASIS_3_5; | 1986 | return DP_TRAIN_PRE_EMPHASIS_3_5; |
1987 | case DP_TRAIN_VOLTAGE_SWING_1200: | 1987 | case DP_TRAIN_VOLTAGE_SWING_1200: |
1988 | default: | 1988 | default: |
1989 | return DP_TRAIN_PRE_EMPHASIS_0; | 1989 | return DP_TRAIN_PRE_EMPHASIS_0; |
1990 | } | 1990 | } |
1991 | } else if (IS_VALLEYVIEW(dev)) { | 1991 | } else if (IS_VALLEYVIEW(dev)) { |
1992 | switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { | 1992 | switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { |
1993 | case DP_TRAIN_VOLTAGE_SWING_400: | 1993 | case DP_TRAIN_VOLTAGE_SWING_400: |
1994 | return DP_TRAIN_PRE_EMPHASIS_9_5; | 1994 | return DP_TRAIN_PRE_EMPHASIS_9_5; |
1995 | case DP_TRAIN_VOLTAGE_SWING_600: | 1995 | case DP_TRAIN_VOLTAGE_SWING_600: |
1996 | return DP_TRAIN_PRE_EMPHASIS_6; | 1996 | return DP_TRAIN_PRE_EMPHASIS_6; |
1997 | case DP_TRAIN_VOLTAGE_SWING_800: | 1997 | case DP_TRAIN_VOLTAGE_SWING_800: |
1998 | return DP_TRAIN_PRE_EMPHASIS_3_5; | 1998 | return DP_TRAIN_PRE_EMPHASIS_3_5; |
1999 | case DP_TRAIN_VOLTAGE_SWING_1200: | 1999 | case DP_TRAIN_VOLTAGE_SWING_1200: |
2000 | default: | 2000 | default: |
2001 | return DP_TRAIN_PRE_EMPHASIS_0; | 2001 | return DP_TRAIN_PRE_EMPHASIS_0; |
2002 | } | 2002 | } |
2003 | } else if (IS_GEN7(dev) && port == PORT_A) { | 2003 | } else if (IS_GEN7(dev) && port == PORT_A) { |
2004 | switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { | 2004 | switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { |
2005 | case DP_TRAIN_VOLTAGE_SWING_400: | 2005 | case DP_TRAIN_VOLTAGE_SWING_400: |
2006 | return DP_TRAIN_PRE_EMPHASIS_6; | 2006 | return DP_TRAIN_PRE_EMPHASIS_6; |
2007 | case DP_TRAIN_VOLTAGE_SWING_600: | 2007 | case DP_TRAIN_VOLTAGE_SWING_600: |
2008 | case DP_TRAIN_VOLTAGE_SWING_800: | 2008 | case DP_TRAIN_VOLTAGE_SWING_800: |
2009 | return DP_TRAIN_PRE_EMPHASIS_3_5; | 2009 | return DP_TRAIN_PRE_EMPHASIS_3_5; |
2010 | default: | 2010 | default: |
2011 | return DP_TRAIN_PRE_EMPHASIS_0; | 2011 | return DP_TRAIN_PRE_EMPHASIS_0; |
2012 | } | 2012 | } |
2013 | } else { | 2013 | } else { |
2014 | switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { | 2014 | switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { |
2015 | case DP_TRAIN_VOLTAGE_SWING_400: | 2015 | case DP_TRAIN_VOLTAGE_SWING_400: |
2016 | return DP_TRAIN_PRE_EMPHASIS_6; | 2016 | return DP_TRAIN_PRE_EMPHASIS_6; |
2017 | case DP_TRAIN_VOLTAGE_SWING_600: | 2017 | case DP_TRAIN_VOLTAGE_SWING_600: |
2018 | return DP_TRAIN_PRE_EMPHASIS_6; | 2018 | return DP_TRAIN_PRE_EMPHASIS_6; |
2019 | case DP_TRAIN_VOLTAGE_SWING_800: | 2019 | case DP_TRAIN_VOLTAGE_SWING_800: |
2020 | return DP_TRAIN_PRE_EMPHASIS_3_5; | 2020 | return DP_TRAIN_PRE_EMPHASIS_3_5; |
2021 | case DP_TRAIN_VOLTAGE_SWING_1200: | 2021 | case DP_TRAIN_VOLTAGE_SWING_1200: |
2022 | default: | 2022 | default: |
2023 | return DP_TRAIN_PRE_EMPHASIS_0; | 2023 | return DP_TRAIN_PRE_EMPHASIS_0; |
2024 | } | 2024 | } |
2025 | } | 2025 | } |
2026 | } | 2026 | } |
2027 | 2027 | ||
2028 | static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp) | 2028 | static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp) |
2029 | { | 2029 | { |
2030 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 2030 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
2031 | struct drm_i915_private *dev_priv = dev->dev_private; | 2031 | struct drm_i915_private *dev_priv = dev->dev_private; |
2032 | struct intel_digital_port *dport = dp_to_dig_port(intel_dp); | 2032 | struct intel_digital_port *dport = dp_to_dig_port(intel_dp); |
2033 | struct intel_crtc *intel_crtc = | 2033 | struct intel_crtc *intel_crtc = |
2034 | to_intel_crtc(dport->base.base.crtc); | 2034 | to_intel_crtc(dport->base.base.crtc); |
2035 | unsigned long demph_reg_value, preemph_reg_value, | 2035 | unsigned long demph_reg_value, preemph_reg_value, |
2036 | uniqtranscale_reg_value; | 2036 | uniqtranscale_reg_value; |
2037 | uint8_t train_set = intel_dp->train_set[0]; | 2037 | uint8_t train_set = intel_dp->train_set[0]; |
2038 | enum dpio_channel port = vlv_dport_to_channel(dport); | 2038 | enum dpio_channel port = vlv_dport_to_channel(dport); |
2039 | int pipe = intel_crtc->pipe; | 2039 | int pipe = intel_crtc->pipe; |
2040 | 2040 | ||
2041 | switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { | 2041 | switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { |
2042 | case DP_TRAIN_PRE_EMPHASIS_0: | 2042 | case DP_TRAIN_PRE_EMPHASIS_0: |
2043 | preemph_reg_value = 0x0004000; | 2043 | preemph_reg_value = 0x0004000; |
2044 | switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { | 2044 | switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { |
2045 | case DP_TRAIN_VOLTAGE_SWING_400: | 2045 | case DP_TRAIN_VOLTAGE_SWING_400: |
2046 | demph_reg_value = 0x2B405555; | 2046 | demph_reg_value = 0x2B405555; |
2047 | uniqtranscale_reg_value = 0x552AB83A; | 2047 | uniqtranscale_reg_value = 0x552AB83A; |
2048 | break; | 2048 | break; |
2049 | case DP_TRAIN_VOLTAGE_SWING_600: | 2049 | case DP_TRAIN_VOLTAGE_SWING_600: |
2050 | demph_reg_value = 0x2B404040; | 2050 | demph_reg_value = 0x2B404040; |
2051 | uniqtranscale_reg_value = 0x5548B83A; | 2051 | uniqtranscale_reg_value = 0x5548B83A; |
2052 | break; | 2052 | break; |
2053 | case DP_TRAIN_VOLTAGE_SWING_800: | 2053 | case DP_TRAIN_VOLTAGE_SWING_800: |
2054 | demph_reg_value = 0x2B245555; | 2054 | demph_reg_value = 0x2B245555; |
2055 | uniqtranscale_reg_value = 0x5560B83A; | 2055 | uniqtranscale_reg_value = 0x5560B83A; |
2056 | break; | 2056 | break; |
2057 | case DP_TRAIN_VOLTAGE_SWING_1200: | 2057 | case DP_TRAIN_VOLTAGE_SWING_1200: |
2058 | demph_reg_value = 0x2B405555; | 2058 | demph_reg_value = 0x2B405555; |
2059 | uniqtranscale_reg_value = 0x5598DA3A; | 2059 | uniqtranscale_reg_value = 0x5598DA3A; |
2060 | break; | 2060 | break; |
2061 | default: | 2061 | default: |
2062 | return 0; | 2062 | return 0; |
2063 | } | 2063 | } |
2064 | break; | 2064 | break; |
2065 | case DP_TRAIN_PRE_EMPHASIS_3_5: | 2065 | case DP_TRAIN_PRE_EMPHASIS_3_5: |
2066 | preemph_reg_value = 0x0002000; | 2066 | preemph_reg_value = 0x0002000; |
2067 | switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { | 2067 | switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { |
2068 | case DP_TRAIN_VOLTAGE_SWING_400: | 2068 | case DP_TRAIN_VOLTAGE_SWING_400: |
2069 | demph_reg_value = 0x2B404040; | 2069 | demph_reg_value = 0x2B404040; |
2070 | uniqtranscale_reg_value = 0x5552B83A; | 2070 | uniqtranscale_reg_value = 0x5552B83A; |
2071 | break; | 2071 | break; |
2072 | case DP_TRAIN_VOLTAGE_SWING_600: | 2072 | case DP_TRAIN_VOLTAGE_SWING_600: |
2073 | demph_reg_value = 0x2B404848; | 2073 | demph_reg_value = 0x2B404848; |
2074 | uniqtranscale_reg_value = 0x5580B83A; | 2074 | uniqtranscale_reg_value = 0x5580B83A; |
2075 | break; | 2075 | break; |
2076 | case DP_TRAIN_VOLTAGE_SWING_800: | 2076 | case DP_TRAIN_VOLTAGE_SWING_800: |
2077 | demph_reg_value = 0x2B404040; | 2077 | demph_reg_value = 0x2B404040; |
2078 | uniqtranscale_reg_value = 0x55ADDA3A; | 2078 | uniqtranscale_reg_value = 0x55ADDA3A; |
2079 | break; | 2079 | break; |
2080 | default: | 2080 | default: |
2081 | return 0; | 2081 | return 0; |
2082 | } | 2082 | } |
2083 | break; | 2083 | break; |
2084 | case DP_TRAIN_PRE_EMPHASIS_6: | 2084 | case DP_TRAIN_PRE_EMPHASIS_6: |
2085 | preemph_reg_value = 0x0000000; | 2085 | preemph_reg_value = 0x0000000; |
2086 | switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { | 2086 | switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { |
2087 | case DP_TRAIN_VOLTAGE_SWING_400: | 2087 | case DP_TRAIN_VOLTAGE_SWING_400: |
2088 | demph_reg_value = 0x2B305555; | 2088 | demph_reg_value = 0x2B305555; |
2089 | uniqtranscale_reg_value = 0x5570B83A; | 2089 | uniqtranscale_reg_value = 0x5570B83A; |
2090 | break; | 2090 | break; |
2091 | case DP_TRAIN_VOLTAGE_SWING_600: | 2091 | case DP_TRAIN_VOLTAGE_SWING_600: |
2092 | demph_reg_value = 0x2B2B4040; | 2092 | demph_reg_value = 0x2B2B4040; |
2093 | uniqtranscale_reg_value = 0x55ADDA3A; | 2093 | uniqtranscale_reg_value = 0x55ADDA3A; |
2094 | break; | 2094 | break; |
2095 | default: | 2095 | default: |
2096 | return 0; | 2096 | return 0; |
2097 | } | 2097 | } |
2098 | break; | 2098 | break; |
2099 | case DP_TRAIN_PRE_EMPHASIS_9_5: | 2099 | case DP_TRAIN_PRE_EMPHASIS_9_5: |
2100 | preemph_reg_value = 0x0006000; | 2100 | preemph_reg_value = 0x0006000; |
2101 | switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { | 2101 | switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { |
2102 | case DP_TRAIN_VOLTAGE_SWING_400: | 2102 | case DP_TRAIN_VOLTAGE_SWING_400: |
2103 | demph_reg_value = 0x1B405555; | 2103 | demph_reg_value = 0x1B405555; |
2104 | uniqtranscale_reg_value = 0x55ADDA3A; | 2104 | uniqtranscale_reg_value = 0x55ADDA3A; |
2105 | break; | 2105 | break; |
2106 | default: | 2106 | default: |
2107 | return 0; | 2107 | return 0; |
2108 | } | 2108 | } |
2109 | break; | 2109 | break; |
2110 | default: | 2110 | default: |
2111 | return 0; | 2111 | return 0; |
2112 | } | 2112 | } |
2113 | 2113 | ||
2114 | mutex_lock(&dev_priv->dpio_lock); | 2114 | mutex_lock(&dev_priv->dpio_lock); |
2115 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000); | 2115 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000); |
2116 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value); | 2116 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value); |
2117 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port), | 2117 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port), |
2118 | uniqtranscale_reg_value); | 2118 | uniqtranscale_reg_value); |
2119 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040); | 2119 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040); |
2120 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000); | 2120 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000); |
2121 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value); | 2121 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value); |
2122 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000); | 2122 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000); |
2123 | mutex_unlock(&dev_priv->dpio_lock); | 2123 | mutex_unlock(&dev_priv->dpio_lock); |
2124 | 2124 | ||
2125 | return 0; | 2125 | return 0; |
2126 | } | 2126 | } |
2127 | 2127 | ||
2128 | static void | 2128 | static void |
2129 | intel_get_adjust_train(struct intel_dp *intel_dp, | 2129 | intel_get_adjust_train(struct intel_dp *intel_dp, |
2130 | const uint8_t link_status[DP_LINK_STATUS_SIZE]) | 2130 | const uint8_t link_status[DP_LINK_STATUS_SIZE]) |
2131 | { | 2131 | { |
2132 | uint8_t v = 0; | 2132 | uint8_t v = 0; |
2133 | uint8_t p = 0; | 2133 | uint8_t p = 0; |
2134 | int lane; | 2134 | int lane; |
2135 | uint8_t voltage_max; | 2135 | uint8_t voltage_max; |
2136 | uint8_t preemph_max; | 2136 | uint8_t preemph_max; |
2137 | 2137 | ||
2138 | for (lane = 0; lane < intel_dp->lane_count; lane++) { | 2138 | for (lane = 0; lane < intel_dp->lane_count; lane++) { |
2139 | uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane); | 2139 | uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane); |
2140 | uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); | 2140 | uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); |
2141 | 2141 | ||
2142 | if (this_v > v) | 2142 | if (this_v > v) |
2143 | v = this_v; | 2143 | v = this_v; |
2144 | if (this_p > p) | 2144 | if (this_p > p) |
2145 | p = this_p; | 2145 | p = this_p; |
2146 | } | 2146 | } |
2147 | 2147 | ||
2148 | voltage_max = intel_dp_voltage_max(intel_dp); | 2148 | voltage_max = intel_dp_voltage_max(intel_dp); |
2149 | if (v >= voltage_max) | 2149 | if (v >= voltage_max) |
2150 | v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; | 2150 | v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; |
2151 | 2151 | ||
2152 | preemph_max = intel_dp_pre_emphasis_max(intel_dp, v); | 2152 | preemph_max = intel_dp_pre_emphasis_max(intel_dp, v); |
2153 | if (p >= preemph_max) | 2153 | if (p >= preemph_max) |
2154 | p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; | 2154 | p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; |
2155 | 2155 | ||
2156 | for (lane = 0; lane < 4; lane++) | 2156 | for (lane = 0; lane < 4; lane++) |
2157 | intel_dp->train_set[lane] = v | p; | 2157 | intel_dp->train_set[lane] = v | p; |
2158 | } | 2158 | } |
2159 | 2159 | ||
2160 | static uint32_t | 2160 | static uint32_t |
2161 | intel_gen4_signal_levels(uint8_t train_set) | 2161 | intel_gen4_signal_levels(uint8_t train_set) |
2162 | { | 2162 | { |
2163 | uint32_t signal_levels = 0; | 2163 | uint32_t signal_levels = 0; |
2164 | 2164 | ||
2165 | switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { | 2165 | switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { |
2166 | case DP_TRAIN_VOLTAGE_SWING_400: | 2166 | case DP_TRAIN_VOLTAGE_SWING_400: |
2167 | default: | 2167 | default: |
2168 | signal_levels |= DP_VOLTAGE_0_4; | 2168 | signal_levels |= DP_VOLTAGE_0_4; |
2169 | break; | 2169 | break; |
2170 | case DP_TRAIN_VOLTAGE_SWING_600: | 2170 | case DP_TRAIN_VOLTAGE_SWING_600: |
2171 | signal_levels |= DP_VOLTAGE_0_6; | 2171 | signal_levels |= DP_VOLTAGE_0_6; |
2172 | break; | 2172 | break; |
2173 | case DP_TRAIN_VOLTAGE_SWING_800: | 2173 | case DP_TRAIN_VOLTAGE_SWING_800: |
2174 | signal_levels |= DP_VOLTAGE_0_8; | 2174 | signal_levels |= DP_VOLTAGE_0_8; |
2175 | break; | 2175 | break; |
2176 | case DP_TRAIN_VOLTAGE_SWING_1200: | 2176 | case DP_TRAIN_VOLTAGE_SWING_1200: |
2177 | signal_levels |= DP_VOLTAGE_1_2; | 2177 | signal_levels |= DP_VOLTAGE_1_2; |
2178 | break; | 2178 | break; |
2179 | } | 2179 | } |
2180 | switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { | 2180 | switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { |
2181 | case DP_TRAIN_PRE_EMPHASIS_0: | 2181 | case DP_TRAIN_PRE_EMPHASIS_0: |
2182 | default: | 2182 | default: |
2183 | signal_levels |= DP_PRE_EMPHASIS_0; | 2183 | signal_levels |= DP_PRE_EMPHASIS_0; |
2184 | break; | 2184 | break; |
2185 | case DP_TRAIN_PRE_EMPHASIS_3_5: | 2185 | case DP_TRAIN_PRE_EMPHASIS_3_5: |
2186 | signal_levels |= DP_PRE_EMPHASIS_3_5; | 2186 | signal_levels |= DP_PRE_EMPHASIS_3_5; |
2187 | break; | 2187 | break; |
2188 | case DP_TRAIN_PRE_EMPHASIS_6: | 2188 | case DP_TRAIN_PRE_EMPHASIS_6: |
2189 | signal_levels |= DP_PRE_EMPHASIS_6; | 2189 | signal_levels |= DP_PRE_EMPHASIS_6; |
2190 | break; | 2190 | break; |
2191 | case DP_TRAIN_PRE_EMPHASIS_9_5: | 2191 | case DP_TRAIN_PRE_EMPHASIS_9_5: |
2192 | signal_levels |= DP_PRE_EMPHASIS_9_5; | 2192 | signal_levels |= DP_PRE_EMPHASIS_9_5; |
2193 | break; | 2193 | break; |
2194 | } | 2194 | } |
2195 | return signal_levels; | 2195 | return signal_levels; |
2196 | } | 2196 | } |
2197 | 2197 | ||
2198 | /* Gen6's DP voltage swing and pre-emphasis control */ | 2198 | /* Gen6's DP voltage swing and pre-emphasis control */ |
2199 | static uint32_t | 2199 | static uint32_t |
2200 | intel_gen6_edp_signal_levels(uint8_t train_set) | 2200 | intel_gen6_edp_signal_levels(uint8_t train_set) |
2201 | { | 2201 | { |
2202 | int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | | 2202 | int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | |
2203 | DP_TRAIN_PRE_EMPHASIS_MASK); | 2203 | DP_TRAIN_PRE_EMPHASIS_MASK); |
2204 | switch (signal_levels) { | 2204 | switch (signal_levels) { |
2205 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: | 2205 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: |
2206 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: | 2206 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: |
2207 | return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; | 2207 | return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; |
2208 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: | 2208 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: |
2209 | return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B; | 2209 | return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B; |
2210 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: | 2210 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: |
2211 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: | 2211 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: |
2212 | return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B; | 2212 | return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B; |
2213 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: | 2213 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: |
2214 | case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: | 2214 | case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: |
2215 | return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B; | 2215 | return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B; |
2216 | case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: | 2216 | case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: |
2217 | case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0: | 2217 | case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0: |
2218 | return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B; | 2218 | return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B; |
2219 | default: | 2219 | default: |
2220 | DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" | 2220 | DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" |
2221 | "0x%x\n", signal_levels); | 2221 | "0x%x\n", signal_levels); |
2222 | return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; | 2222 | return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; |
2223 | } | 2223 | } |
2224 | } | 2224 | } |
2225 | 2225 | ||
2226 | /* Gen7's DP voltage swing and pre-emphasis control */ | 2226 | /* Gen7's DP voltage swing and pre-emphasis control */ |
2227 | static uint32_t | 2227 | static uint32_t |
2228 | intel_gen7_edp_signal_levels(uint8_t train_set) | 2228 | intel_gen7_edp_signal_levels(uint8_t train_set) |
2229 | { | 2229 | { |
2230 | int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | | 2230 | int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | |
2231 | DP_TRAIN_PRE_EMPHASIS_MASK); | 2231 | DP_TRAIN_PRE_EMPHASIS_MASK); |
2232 | switch (signal_levels) { | 2232 | switch (signal_levels) { |
2233 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: | 2233 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: |
2234 | return EDP_LINK_TRAIN_400MV_0DB_IVB; | 2234 | return EDP_LINK_TRAIN_400MV_0DB_IVB; |
2235 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: | 2235 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: |
2236 | return EDP_LINK_TRAIN_400MV_3_5DB_IVB; | 2236 | return EDP_LINK_TRAIN_400MV_3_5DB_IVB; |
2237 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: | 2237 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: |
2238 | return EDP_LINK_TRAIN_400MV_6DB_IVB; | 2238 | return EDP_LINK_TRAIN_400MV_6DB_IVB; |
2239 | 2239 | ||
2240 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: | 2240 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: |
2241 | return EDP_LINK_TRAIN_600MV_0DB_IVB; | 2241 | return EDP_LINK_TRAIN_600MV_0DB_IVB; |
2242 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: | 2242 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: |
2243 | return EDP_LINK_TRAIN_600MV_3_5DB_IVB; | 2243 | return EDP_LINK_TRAIN_600MV_3_5DB_IVB; |
2244 | 2244 | ||
2245 | case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: | 2245 | case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: |
2246 | return EDP_LINK_TRAIN_800MV_0DB_IVB; | 2246 | return EDP_LINK_TRAIN_800MV_0DB_IVB; |
2247 | case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: | 2247 | case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: |
2248 | return EDP_LINK_TRAIN_800MV_3_5DB_IVB; | 2248 | return EDP_LINK_TRAIN_800MV_3_5DB_IVB; |
2249 | 2249 | ||
2250 | default: | 2250 | default: |
2251 | DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" | 2251 | DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" |
2252 | "0x%x\n", signal_levels); | 2252 | "0x%x\n", signal_levels); |
2253 | return EDP_LINK_TRAIN_500MV_0DB_IVB; | 2253 | return EDP_LINK_TRAIN_500MV_0DB_IVB; |
2254 | } | 2254 | } |
2255 | } | 2255 | } |
2256 | 2256 | ||
2257 | /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */ | 2257 | /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */ |
2258 | static uint32_t | 2258 | static uint32_t |
2259 | intel_hsw_signal_levels(uint8_t train_set) | 2259 | intel_hsw_signal_levels(uint8_t train_set) |
2260 | { | 2260 | { |
2261 | int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | | 2261 | int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | |
2262 | DP_TRAIN_PRE_EMPHASIS_MASK); | 2262 | DP_TRAIN_PRE_EMPHASIS_MASK); |
2263 | switch (signal_levels) { | 2263 | switch (signal_levels) { |
2264 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: | 2264 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: |
2265 | return DDI_BUF_EMP_400MV_0DB_HSW; | 2265 | return DDI_BUF_EMP_400MV_0DB_HSW; |
2266 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: | 2266 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: |
2267 | return DDI_BUF_EMP_400MV_3_5DB_HSW; | 2267 | return DDI_BUF_EMP_400MV_3_5DB_HSW; |
2268 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: | 2268 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: |
2269 | return DDI_BUF_EMP_400MV_6DB_HSW; | 2269 | return DDI_BUF_EMP_400MV_6DB_HSW; |
2270 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5: | 2270 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5: |
2271 | return DDI_BUF_EMP_400MV_9_5DB_HSW; | 2271 | return DDI_BUF_EMP_400MV_9_5DB_HSW; |
2272 | 2272 | ||
2273 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: | 2273 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: |
2274 | return DDI_BUF_EMP_600MV_0DB_HSW; | 2274 | return DDI_BUF_EMP_600MV_0DB_HSW; |
2275 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: | 2275 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: |
2276 | return DDI_BUF_EMP_600MV_3_5DB_HSW; | 2276 | return DDI_BUF_EMP_600MV_3_5DB_HSW; |
2277 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: | 2277 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: |
2278 | return DDI_BUF_EMP_600MV_6DB_HSW; | 2278 | return DDI_BUF_EMP_600MV_6DB_HSW; |
2279 | 2279 | ||
2280 | case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: | 2280 | case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: |
2281 | return DDI_BUF_EMP_800MV_0DB_HSW; | 2281 | return DDI_BUF_EMP_800MV_0DB_HSW; |
2282 | case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: | 2282 | case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: |
2283 | return DDI_BUF_EMP_800MV_3_5DB_HSW; | 2283 | return DDI_BUF_EMP_800MV_3_5DB_HSW; |
2284 | default: | 2284 | default: |
2285 | DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" | 2285 | DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" |
2286 | "0x%x\n", signal_levels); | 2286 | "0x%x\n", signal_levels); |
2287 | return DDI_BUF_EMP_400MV_0DB_HSW; | 2287 | return DDI_BUF_EMP_400MV_0DB_HSW; |
2288 | } | 2288 | } |
2289 | } | 2289 | } |
2290 | 2290 | ||
2291 | static uint32_t | 2291 | static uint32_t |
2292 | intel_bdw_signal_levels(uint8_t train_set) | 2292 | intel_bdw_signal_levels(uint8_t train_set) |
2293 | { | 2293 | { |
2294 | int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | | 2294 | int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | |
2295 | DP_TRAIN_PRE_EMPHASIS_MASK); | 2295 | DP_TRAIN_PRE_EMPHASIS_MASK); |
2296 | switch (signal_levels) { | 2296 | switch (signal_levels) { |
2297 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: | 2297 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: |
2298 | return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */ | 2298 | return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */ |
2299 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: | 2299 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: |
2300 | return DDI_BUF_EMP_400MV_3_5DB_BDW; /* Sel1 */ | 2300 | return DDI_BUF_EMP_400MV_3_5DB_BDW; /* Sel1 */ |
2301 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: | 2301 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: |
2302 | return DDI_BUF_EMP_400MV_6DB_BDW; /* Sel2 */ | 2302 | return DDI_BUF_EMP_400MV_6DB_BDW; /* Sel2 */ |
2303 | 2303 | ||
2304 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: | 2304 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: |
2305 | return DDI_BUF_EMP_600MV_0DB_BDW; /* Sel3 */ | 2305 | return DDI_BUF_EMP_600MV_0DB_BDW; /* Sel3 */ |
2306 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: | 2306 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: |
2307 | return DDI_BUF_EMP_600MV_3_5DB_BDW; /* Sel4 */ | 2307 | return DDI_BUF_EMP_600MV_3_5DB_BDW; /* Sel4 */ |
2308 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: | 2308 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: |
2309 | return DDI_BUF_EMP_600MV_6DB_BDW; /* Sel5 */ | 2309 | return DDI_BUF_EMP_600MV_6DB_BDW; /* Sel5 */ |
2310 | 2310 | ||
2311 | case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: | 2311 | case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: |
2312 | return DDI_BUF_EMP_800MV_0DB_BDW; /* Sel6 */ | 2312 | return DDI_BUF_EMP_800MV_0DB_BDW; /* Sel6 */ |
2313 | case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: | 2313 | case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: |
2314 | return DDI_BUF_EMP_800MV_3_5DB_BDW; /* Sel7 */ | 2314 | return DDI_BUF_EMP_800MV_3_5DB_BDW; /* Sel7 */ |
2315 | 2315 | ||
2316 | case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0: | 2316 | case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0: |
2317 | return DDI_BUF_EMP_1200MV_0DB_BDW; /* Sel8 */ | 2317 | return DDI_BUF_EMP_1200MV_0DB_BDW; /* Sel8 */ |
2318 | 2318 | ||
2319 | default: | 2319 | default: |
2320 | DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" | 2320 | DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" |
2321 | "0x%x\n", signal_levels); | 2321 | "0x%x\n", signal_levels); |
2322 | return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */ | 2322 | return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */ |
2323 | } | 2323 | } |
2324 | } | 2324 | } |
2325 | 2325 | ||
2326 | /* Properly updates "DP" with the correct signal levels. */ | 2326 | /* Properly updates "DP" with the correct signal levels. */ |
2327 | static void | 2327 | static void |
2328 | intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP) | 2328 | intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP) |
2329 | { | 2329 | { |
2330 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | 2330 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
2331 | enum port port = intel_dig_port->port; | 2331 | enum port port = intel_dig_port->port; |
2332 | struct drm_device *dev = intel_dig_port->base.base.dev; | 2332 | struct drm_device *dev = intel_dig_port->base.base.dev; |
2333 | uint32_t signal_levels, mask; | 2333 | uint32_t signal_levels, mask; |
2334 | uint8_t train_set = intel_dp->train_set[0]; | 2334 | uint8_t train_set = intel_dp->train_set[0]; |
2335 | 2335 | ||
2336 | if (IS_BROADWELL(dev)) { | 2336 | if (IS_BROADWELL(dev)) { |
2337 | signal_levels = intel_bdw_signal_levels(train_set); | 2337 | signal_levels = intel_bdw_signal_levels(train_set); |
2338 | mask = DDI_BUF_EMP_MASK; | 2338 | mask = DDI_BUF_EMP_MASK; |
2339 | } else if (IS_HASWELL(dev)) { | 2339 | } else if (IS_HASWELL(dev)) { |
2340 | signal_levels = intel_hsw_signal_levels(train_set); | 2340 | signal_levels = intel_hsw_signal_levels(train_set); |
2341 | mask = DDI_BUF_EMP_MASK; | 2341 | mask = DDI_BUF_EMP_MASK; |
2342 | } else if (IS_VALLEYVIEW(dev)) { | 2342 | } else if (IS_VALLEYVIEW(dev)) { |
2343 | signal_levels = intel_vlv_signal_levels(intel_dp); | 2343 | signal_levels = intel_vlv_signal_levels(intel_dp); |
2344 | mask = 0; | 2344 | mask = 0; |
2345 | } else if (IS_GEN7(dev) && port == PORT_A) { | 2345 | } else if (IS_GEN7(dev) && port == PORT_A) { |
2346 | signal_levels = intel_gen7_edp_signal_levels(train_set); | 2346 | signal_levels = intel_gen7_edp_signal_levels(train_set); |
2347 | mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB; | 2347 | mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB; |
2348 | } else if (IS_GEN6(dev) && port == PORT_A) { | 2348 | } else if (IS_GEN6(dev) && port == PORT_A) { |
2349 | signal_levels = intel_gen6_edp_signal_levels(train_set); | 2349 | signal_levels = intel_gen6_edp_signal_levels(train_set); |
2350 | mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB; | 2350 | mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB; |
2351 | } else { | 2351 | } else { |
2352 | signal_levels = intel_gen4_signal_levels(train_set); | 2352 | signal_levels = intel_gen4_signal_levels(train_set); |
2353 | mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK; | 2353 | mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK; |
2354 | } | 2354 | } |
2355 | 2355 | ||
2356 | DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels); | 2356 | DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels); |
2357 | 2357 | ||
2358 | *DP = (*DP & ~mask) | signal_levels; | 2358 | *DP = (*DP & ~mask) | signal_levels; |
2359 | } | 2359 | } |
2360 | 2360 | ||
2361 | static bool | 2361 | static bool |
2362 | intel_dp_set_link_train(struct intel_dp *intel_dp, | 2362 | intel_dp_set_link_train(struct intel_dp *intel_dp, |
2363 | uint32_t *DP, | 2363 | uint32_t *DP, |
2364 | uint8_t dp_train_pat) | 2364 | uint8_t dp_train_pat) |
2365 | { | 2365 | { |
2366 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | 2366 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
2367 | struct drm_device *dev = intel_dig_port->base.base.dev; | 2367 | struct drm_device *dev = intel_dig_port->base.base.dev; |
2368 | struct drm_i915_private *dev_priv = dev->dev_private; | 2368 | struct drm_i915_private *dev_priv = dev->dev_private; |
2369 | enum port port = intel_dig_port->port; | 2369 | enum port port = intel_dig_port->port; |
2370 | uint8_t buf[sizeof(intel_dp->train_set) + 1]; | 2370 | uint8_t buf[sizeof(intel_dp->train_set) + 1]; |
2371 | int ret, len; | 2371 | int ret, len; |
2372 | 2372 | ||
2373 | if (HAS_DDI(dev)) { | 2373 | if (HAS_DDI(dev)) { |
2374 | uint32_t temp = I915_READ(DP_TP_CTL(port)); | 2374 | uint32_t temp = I915_READ(DP_TP_CTL(port)); |
2375 | 2375 | ||
2376 | if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) | 2376 | if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) |
2377 | temp |= DP_TP_CTL_SCRAMBLE_DISABLE; | 2377 | temp |= DP_TP_CTL_SCRAMBLE_DISABLE; |
2378 | else | 2378 | else |
2379 | temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE; | 2379 | temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE; |
2380 | 2380 | ||
2381 | temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; | 2381 | temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; |
2382 | switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { | 2382 | switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { |
2383 | case DP_TRAINING_PATTERN_DISABLE: | 2383 | case DP_TRAINING_PATTERN_DISABLE: |
2384 | temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; | 2384 | temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; |
2385 | 2385 | ||
2386 | break; | 2386 | break; |
2387 | case DP_TRAINING_PATTERN_1: | 2387 | case DP_TRAINING_PATTERN_1: |
2388 | temp |= DP_TP_CTL_LINK_TRAIN_PAT1; | 2388 | temp |= DP_TP_CTL_LINK_TRAIN_PAT1; |
2389 | break; | 2389 | break; |
2390 | case DP_TRAINING_PATTERN_2: | 2390 | case DP_TRAINING_PATTERN_2: |
2391 | temp |= DP_TP_CTL_LINK_TRAIN_PAT2; | 2391 | temp |= DP_TP_CTL_LINK_TRAIN_PAT2; |
2392 | break; | 2392 | break; |
2393 | case DP_TRAINING_PATTERN_3: | 2393 | case DP_TRAINING_PATTERN_3: |
2394 | temp |= DP_TP_CTL_LINK_TRAIN_PAT3; | 2394 | temp |= DP_TP_CTL_LINK_TRAIN_PAT3; |
2395 | break; | 2395 | break; |
2396 | } | 2396 | } |
2397 | I915_WRITE(DP_TP_CTL(port), temp); | 2397 | I915_WRITE(DP_TP_CTL(port), temp); |
2398 | 2398 | ||
2399 | } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) { | 2399 | } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) { |
2400 | *DP &= ~DP_LINK_TRAIN_MASK_CPT; | 2400 | *DP &= ~DP_LINK_TRAIN_MASK_CPT; |
2401 | 2401 | ||
2402 | switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { | 2402 | switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { |
2403 | case DP_TRAINING_PATTERN_DISABLE: | 2403 | case DP_TRAINING_PATTERN_DISABLE: |
2404 | *DP |= DP_LINK_TRAIN_OFF_CPT; | 2404 | *DP |= DP_LINK_TRAIN_OFF_CPT; |
2405 | break; | 2405 | break; |
2406 | case DP_TRAINING_PATTERN_1: | 2406 | case DP_TRAINING_PATTERN_1: |
2407 | *DP |= DP_LINK_TRAIN_PAT_1_CPT; | 2407 | *DP |= DP_LINK_TRAIN_PAT_1_CPT; |
2408 | break; | 2408 | break; |
2409 | case DP_TRAINING_PATTERN_2: | 2409 | case DP_TRAINING_PATTERN_2: |
2410 | *DP |= DP_LINK_TRAIN_PAT_2_CPT; | 2410 | *DP |= DP_LINK_TRAIN_PAT_2_CPT; |
2411 | break; | 2411 | break; |
2412 | case DP_TRAINING_PATTERN_3: | 2412 | case DP_TRAINING_PATTERN_3: |
2413 | DRM_ERROR("DP training pattern 3 not supported\n"); | 2413 | DRM_ERROR("DP training pattern 3 not supported\n"); |
2414 | *DP |= DP_LINK_TRAIN_PAT_2_CPT; | 2414 | *DP |= DP_LINK_TRAIN_PAT_2_CPT; |
2415 | break; | 2415 | break; |
2416 | } | 2416 | } |
2417 | 2417 | ||
2418 | } else { | 2418 | } else { |
2419 | *DP &= ~DP_LINK_TRAIN_MASK; | 2419 | *DP &= ~DP_LINK_TRAIN_MASK; |
2420 | 2420 | ||
2421 | switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { | 2421 | switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { |
2422 | case DP_TRAINING_PATTERN_DISABLE: | 2422 | case DP_TRAINING_PATTERN_DISABLE: |
2423 | *DP |= DP_LINK_TRAIN_OFF; | 2423 | *DP |= DP_LINK_TRAIN_OFF; |
2424 | break; | 2424 | break; |
2425 | case DP_TRAINING_PATTERN_1: | 2425 | case DP_TRAINING_PATTERN_1: |
2426 | *DP |= DP_LINK_TRAIN_PAT_1; | 2426 | *DP |= DP_LINK_TRAIN_PAT_1; |
2427 | break; | 2427 | break; |
2428 | case DP_TRAINING_PATTERN_2: | 2428 | case DP_TRAINING_PATTERN_2: |
2429 | *DP |= DP_LINK_TRAIN_PAT_2; | 2429 | *DP |= DP_LINK_TRAIN_PAT_2; |
2430 | break; | 2430 | break; |
2431 | case DP_TRAINING_PATTERN_3: | 2431 | case DP_TRAINING_PATTERN_3: |
2432 | DRM_ERROR("DP training pattern 3 not supported\n"); | 2432 | DRM_ERROR("DP training pattern 3 not supported\n"); |
2433 | *DP |= DP_LINK_TRAIN_PAT_2; | 2433 | *DP |= DP_LINK_TRAIN_PAT_2; |
2434 | break; | 2434 | break; |
2435 | } | 2435 | } |
2436 | } | 2436 | } |
2437 | 2437 | ||
2438 | I915_WRITE(intel_dp->output_reg, *DP); | 2438 | I915_WRITE(intel_dp->output_reg, *DP); |
2439 | POSTING_READ(intel_dp->output_reg); | 2439 | POSTING_READ(intel_dp->output_reg); |
2440 | 2440 | ||
2441 | buf[0] = dp_train_pat; | 2441 | buf[0] = dp_train_pat; |
2442 | if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) == | 2442 | if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) == |
2443 | DP_TRAINING_PATTERN_DISABLE) { | 2443 | DP_TRAINING_PATTERN_DISABLE) { |
2444 | /* don't write DP_TRAINING_LANEx_SET on disable */ | 2444 | /* don't write DP_TRAINING_LANEx_SET on disable */ |
2445 | len = 1; | 2445 | len = 1; |
2446 | } else { | 2446 | } else { |
2447 | /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */ | 2447 | /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */ |
2448 | memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count); | 2448 | memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count); |
2449 | len = intel_dp->lane_count + 1; | 2449 | len = intel_dp->lane_count + 1; |
2450 | } | 2450 | } |
2451 | 2451 | ||
2452 | ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET, | 2452 | ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET, |
2453 | buf, len); | 2453 | buf, len); |
2454 | 2454 | ||
2455 | return ret == len; | 2455 | return ret == len; |
2456 | } | 2456 | } |
2457 | 2457 | ||
2458 | static bool | 2458 | static bool |
2459 | intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP, | 2459 | intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP, |
2460 | uint8_t dp_train_pat) | 2460 | uint8_t dp_train_pat) |
2461 | { | 2461 | { |
2462 | memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set)); | 2462 | memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set)); |
2463 | intel_dp_set_signal_levels(intel_dp, DP); | 2463 | intel_dp_set_signal_levels(intel_dp, DP); |
2464 | return intel_dp_set_link_train(intel_dp, DP, dp_train_pat); | 2464 | return intel_dp_set_link_train(intel_dp, DP, dp_train_pat); |
2465 | } | 2465 | } |
2466 | 2466 | ||
2467 | static bool | 2467 | static bool |
2468 | intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP, | 2468 | intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP, |
2469 | const uint8_t link_status[DP_LINK_STATUS_SIZE]) | 2469 | const uint8_t link_status[DP_LINK_STATUS_SIZE]) |
2470 | { | 2470 | { |
2471 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | 2471 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
2472 | struct drm_device *dev = intel_dig_port->base.base.dev; | 2472 | struct drm_device *dev = intel_dig_port->base.base.dev; |
2473 | struct drm_i915_private *dev_priv = dev->dev_private; | 2473 | struct drm_i915_private *dev_priv = dev->dev_private; |
2474 | int ret; | 2474 | int ret; |
2475 | 2475 | ||
2476 | intel_get_adjust_train(intel_dp, link_status); | 2476 | intel_get_adjust_train(intel_dp, link_status); |
2477 | intel_dp_set_signal_levels(intel_dp, DP); | 2477 | intel_dp_set_signal_levels(intel_dp, DP); |
2478 | 2478 | ||
2479 | I915_WRITE(intel_dp->output_reg, *DP); | 2479 | I915_WRITE(intel_dp->output_reg, *DP); |
2480 | POSTING_READ(intel_dp->output_reg); | 2480 | POSTING_READ(intel_dp->output_reg); |
2481 | 2481 | ||
2482 | ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET, | 2482 | ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET, |
2483 | intel_dp->train_set, intel_dp->lane_count); | 2483 | intel_dp->train_set, intel_dp->lane_count); |
2484 | 2484 | ||
2485 | return ret == intel_dp->lane_count; | 2485 | return ret == intel_dp->lane_count; |
2486 | } | 2486 | } |
2487 | 2487 | ||
2488 | static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) | 2488 | static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) |
2489 | { | 2489 | { |
2490 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | 2490 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
2491 | struct drm_device *dev = intel_dig_port->base.base.dev; | 2491 | struct drm_device *dev = intel_dig_port->base.base.dev; |
2492 | struct drm_i915_private *dev_priv = dev->dev_private; | 2492 | struct drm_i915_private *dev_priv = dev->dev_private; |
2493 | enum port port = intel_dig_port->port; | 2493 | enum port port = intel_dig_port->port; |
2494 | uint32_t val; | 2494 | uint32_t val; |
2495 | 2495 | ||
2496 | if (!HAS_DDI(dev)) | 2496 | if (!HAS_DDI(dev)) |
2497 | return; | 2497 | return; |
2498 | 2498 | ||
2499 | val = I915_READ(DP_TP_CTL(port)); | 2499 | val = I915_READ(DP_TP_CTL(port)); |
2500 | val &= ~DP_TP_CTL_LINK_TRAIN_MASK; | 2500 | val &= ~DP_TP_CTL_LINK_TRAIN_MASK; |
2501 | val |= DP_TP_CTL_LINK_TRAIN_IDLE; | 2501 | val |= DP_TP_CTL_LINK_TRAIN_IDLE; |
2502 | I915_WRITE(DP_TP_CTL(port), val); | 2502 | I915_WRITE(DP_TP_CTL(port), val); |
2503 | 2503 | ||
2504 | /* | 2504 | /* |
2505 | * On PORT_A we can have only eDP in SST mode. There the only reason | 2505 | * On PORT_A we can have only eDP in SST mode. There the only reason |
2506 | * we need to set idle transmission mode is to work around a HW issue | 2506 | * we need to set idle transmission mode is to work around a HW issue |
2507 | * where we enable the pipe while not in idle link-training mode. | 2507 | * where we enable the pipe while not in idle link-training mode. |
2508 | * In this case there is requirement to wait for a minimum number of | 2508 | * In this case there is requirement to wait for a minimum number of |
2509 | * idle patterns to be sent. | 2509 | * idle patterns to be sent. |
2510 | */ | 2510 | */ |
2511 | if (port == PORT_A) | 2511 | if (port == PORT_A) |
2512 | return; | 2512 | return; |
2513 | 2513 | ||
2514 | if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE), | 2514 | if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE), |
2515 | 1)) | 2515 | 1)) |
2516 | DRM_ERROR("Timed out waiting for DP idle patterns\n"); | 2516 | DRM_ERROR("Timed out waiting for DP idle patterns\n"); |
2517 | } | 2517 | } |
2518 | 2518 | ||
2519 | /* Enable corresponding port and start training pattern 1 */ | 2519 | /* Enable corresponding port and start training pattern 1 */ |
2520 | void | 2520 | void |
2521 | intel_dp_start_link_train(struct intel_dp *intel_dp) | 2521 | intel_dp_start_link_train(struct intel_dp *intel_dp) |
2522 | { | 2522 | { |
2523 | struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base; | 2523 | struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base; |
2524 | struct drm_device *dev = encoder->dev; | 2524 | struct drm_device *dev = encoder->dev; |
2525 | int i; | 2525 | int i; |
2526 | uint8_t voltage; | 2526 | uint8_t voltage; |
2527 | int voltage_tries, loop_tries; | 2527 | int voltage_tries, loop_tries; |
2528 | uint32_t DP = intel_dp->DP; | 2528 | uint32_t DP = intel_dp->DP; |
2529 | uint8_t link_config[2]; | 2529 | uint8_t link_config[2]; |
2530 | 2530 | ||
2531 | if (HAS_DDI(dev)) | 2531 | if (HAS_DDI(dev)) |
2532 | intel_ddi_prepare_link_retrain(encoder); | 2532 | intel_ddi_prepare_link_retrain(encoder); |
2533 | 2533 | ||
2534 | /* Write the link configuration data */ | 2534 | /* Write the link configuration data */ |
2535 | link_config[0] = intel_dp->link_bw; | 2535 | link_config[0] = intel_dp->link_bw; |
2536 | link_config[1] = intel_dp->lane_count; | 2536 | link_config[1] = intel_dp->lane_count; |
2537 | if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) | 2537 | if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) |
2538 | link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; | 2538 | link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; |
2539 | drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2); | 2539 | drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2); |
2540 | 2540 | ||
2541 | link_config[0] = 0; | 2541 | link_config[0] = 0; |
2542 | link_config[1] = DP_SET_ANSI_8B10B; | 2542 | link_config[1] = DP_SET_ANSI_8B10B; |
2543 | drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2); | 2543 | drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2); |
2544 | 2544 | ||
2545 | DP |= DP_PORT_EN; | 2545 | DP |= DP_PORT_EN; |
2546 | 2546 | ||
2547 | /* clock recovery */ | 2547 | /* clock recovery */ |
2548 | if (!intel_dp_reset_link_train(intel_dp, &DP, | 2548 | if (!intel_dp_reset_link_train(intel_dp, &DP, |
2549 | DP_TRAINING_PATTERN_1 | | 2549 | DP_TRAINING_PATTERN_1 | |
2550 | DP_LINK_SCRAMBLING_DISABLE)) { | 2550 | DP_LINK_SCRAMBLING_DISABLE)) { |
2551 | DRM_ERROR("failed to enable link training\n"); | 2551 | DRM_ERROR("failed to enable link training\n"); |
2552 | return; | 2552 | return; |
2553 | } | 2553 | } |
2554 | 2554 | ||
2555 | voltage = 0xff; | 2555 | voltage = 0xff; |
2556 | voltage_tries = 0; | 2556 | voltage_tries = 0; |
2557 | loop_tries = 0; | 2557 | loop_tries = 0; |
2558 | for (;;) { | 2558 | for (;;) { |
2559 | uint8_t link_status[DP_LINK_STATUS_SIZE]; | 2559 | uint8_t link_status[DP_LINK_STATUS_SIZE]; |
2560 | 2560 | ||
2561 | drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd); | 2561 | drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd); |
2562 | if (!intel_dp_get_link_status(intel_dp, link_status)) { | 2562 | if (!intel_dp_get_link_status(intel_dp, link_status)) { |
2563 | DRM_ERROR("failed to get link status\n"); | 2563 | DRM_ERROR("failed to get link status\n"); |
2564 | break; | 2564 | break; |
2565 | } | 2565 | } |
2566 | 2566 | ||
2567 | if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { | 2567 | if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { |
2568 | DRM_DEBUG_KMS("clock recovery OK\n"); | 2568 | DRM_DEBUG_KMS("clock recovery OK\n"); |
2569 | break; | 2569 | break; |
2570 | } | 2570 | } |
2571 | 2571 | ||
2572 | /* Check to see if we've tried the max voltage */ | 2572 | /* Check to see if we've tried the max voltage */ |
2573 | for (i = 0; i < intel_dp->lane_count; i++) | 2573 | for (i = 0; i < intel_dp->lane_count; i++) |
2574 | if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) | 2574 | if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) |
2575 | break; | 2575 | break; |
2576 | if (i == intel_dp->lane_count) { | 2576 | if (i == intel_dp->lane_count) { |
2577 | ++loop_tries; | 2577 | ++loop_tries; |
2578 | if (loop_tries == 5) { | 2578 | if (loop_tries == 5) { |
2579 | DRM_ERROR("too many full retries, give up\n"); | 2579 | DRM_ERROR("too many full retries, give up\n"); |
2580 | break; | 2580 | break; |
2581 | } | 2581 | } |
2582 | intel_dp_reset_link_train(intel_dp, &DP, | 2582 | intel_dp_reset_link_train(intel_dp, &DP, |
2583 | DP_TRAINING_PATTERN_1 | | 2583 | DP_TRAINING_PATTERN_1 | |
2584 | DP_LINK_SCRAMBLING_DISABLE); | 2584 | DP_LINK_SCRAMBLING_DISABLE); |
2585 | voltage_tries = 0; | 2585 | voltage_tries = 0; |
2586 | continue; | 2586 | continue; |
2587 | } | 2587 | } |
2588 | 2588 | ||
2589 | /* Check to see if we've tried the same voltage 5 times */ | 2589 | /* Check to see if we've tried the same voltage 5 times */ |
2590 | if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { | 2590 | if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { |
2591 | ++voltage_tries; | 2591 | ++voltage_tries; |
2592 | if (voltage_tries == 5) { | 2592 | if (voltage_tries == 5) { |
2593 | DRM_ERROR("too many voltage retries, give up\n"); | 2593 | DRM_ERROR("too many voltage retries, give up\n"); |
2594 | break; | 2594 | break; |
2595 | } | 2595 | } |
2596 | } else | 2596 | } else |
2597 | voltage_tries = 0; | 2597 | voltage_tries = 0; |
2598 | voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; | 2598 | voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; |
2599 | 2599 | ||
2600 | /* Update training set as requested by target */ | 2600 | /* Update training set as requested by target */ |
2601 | if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) { | 2601 | if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) { |
2602 | DRM_ERROR("failed to update link training\n"); | 2602 | DRM_ERROR("failed to update link training\n"); |
2603 | break; | 2603 | break; |
2604 | } | 2604 | } |
2605 | } | 2605 | } |
2606 | 2606 | ||
2607 | intel_dp->DP = DP; | 2607 | intel_dp->DP = DP; |
2608 | } | 2608 | } |
2609 | 2609 | ||
2610 | void | 2610 | void |
2611 | intel_dp_complete_link_train(struct intel_dp *intel_dp) | 2611 | intel_dp_complete_link_train(struct intel_dp *intel_dp) |
2612 | { | 2612 | { |
2613 | bool channel_eq = false; | 2613 | bool channel_eq = false; |
2614 | int tries, cr_tries; | 2614 | int tries, cr_tries; |
2615 | uint32_t DP = intel_dp->DP; | 2615 | uint32_t DP = intel_dp->DP; |
2616 | uint32_t training_pattern = DP_TRAINING_PATTERN_2; | 2616 | uint32_t training_pattern = DP_TRAINING_PATTERN_2; |
2617 | 2617 | ||
2618 | /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/ | 2618 | /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/ |
2619 | if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3) | 2619 | if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3) |
2620 | training_pattern = DP_TRAINING_PATTERN_3; | 2620 | training_pattern = DP_TRAINING_PATTERN_3; |
2621 | 2621 | ||
2622 | /* channel equalization */ | 2622 | /* channel equalization */ |
2623 | if (!intel_dp_set_link_train(intel_dp, &DP, | 2623 | if (!intel_dp_set_link_train(intel_dp, &DP, |
2624 | training_pattern | | 2624 | training_pattern | |
2625 | DP_LINK_SCRAMBLING_DISABLE)) { | 2625 | DP_LINK_SCRAMBLING_DISABLE)) { |
2626 | DRM_ERROR("failed to start channel equalization\n"); | 2626 | DRM_ERROR("failed to start channel equalization\n"); |
2627 | return; | 2627 | return; |
2628 | } | 2628 | } |
2629 | 2629 | ||
2630 | tries = 0; | 2630 | tries = 0; |
2631 | cr_tries = 0; | 2631 | cr_tries = 0; |
2632 | channel_eq = false; | 2632 | channel_eq = false; |
2633 | for (;;) { | 2633 | for (;;) { |
2634 | uint8_t link_status[DP_LINK_STATUS_SIZE]; | 2634 | uint8_t link_status[DP_LINK_STATUS_SIZE]; |
2635 | 2635 | ||
2636 | if (cr_tries > 5) { | 2636 | if (cr_tries > 5) { |
2637 | DRM_ERROR("failed to train DP, aborting\n"); | 2637 | DRM_ERROR("failed to train DP, aborting\n"); |
2638 | break; | 2638 | break; |
2639 | } | 2639 | } |
2640 | 2640 | ||
2641 | drm_dp_link_train_channel_eq_delay(intel_dp->dpcd); | 2641 | drm_dp_link_train_channel_eq_delay(intel_dp->dpcd); |
2642 | if (!intel_dp_get_link_status(intel_dp, link_status)) { | 2642 | if (!intel_dp_get_link_status(intel_dp, link_status)) { |
2643 | DRM_ERROR("failed to get link status\n"); | 2643 | DRM_ERROR("failed to get link status\n"); |
2644 | break; | 2644 | break; |
2645 | } | 2645 | } |
2646 | 2646 | ||
2647 | /* Make sure clock is still ok */ | 2647 | /* Make sure clock is still ok */ |
2648 | if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { | 2648 | if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { |
2649 | intel_dp_start_link_train(intel_dp); | 2649 | intel_dp_start_link_train(intel_dp); |
2650 | intel_dp_set_link_train(intel_dp, &DP, | 2650 | intel_dp_set_link_train(intel_dp, &DP, |
2651 | training_pattern | | 2651 | training_pattern | |
2652 | DP_LINK_SCRAMBLING_DISABLE); | 2652 | DP_LINK_SCRAMBLING_DISABLE); |
2653 | cr_tries++; | 2653 | cr_tries++; |
2654 | continue; | 2654 | continue; |
2655 | } | 2655 | } |
2656 | 2656 | ||
2657 | if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { | 2657 | if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { |
2658 | channel_eq = true; | 2658 | channel_eq = true; |
2659 | break; | 2659 | break; |
2660 | } | 2660 | } |
2661 | 2661 | ||
2662 | /* Try 5 times, then try clock recovery if that fails */ | 2662 | /* Try 5 times, then try clock recovery if that fails */ |
2663 | if (tries > 5) { | 2663 | if (tries > 5) { |
2664 | intel_dp_link_down(intel_dp); | 2664 | intel_dp_link_down(intel_dp); |
2665 | intel_dp_start_link_train(intel_dp); | 2665 | intel_dp_start_link_train(intel_dp); |
2666 | intel_dp_set_link_train(intel_dp, &DP, | 2666 | intel_dp_set_link_train(intel_dp, &DP, |
2667 | training_pattern | | 2667 | training_pattern | |
2668 | DP_LINK_SCRAMBLING_DISABLE); | 2668 | DP_LINK_SCRAMBLING_DISABLE); |
2669 | tries = 0; | 2669 | tries = 0; |
2670 | cr_tries++; | 2670 | cr_tries++; |
2671 | continue; | 2671 | continue; |
2672 | } | 2672 | } |
2673 | 2673 | ||
2674 | /* Update training set as requested by target */ | 2674 | /* Update training set as requested by target */ |
2675 | if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) { | 2675 | if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) { |
2676 | DRM_ERROR("failed to update link training\n"); | 2676 | DRM_ERROR("failed to update link training\n"); |
2677 | break; | 2677 | break; |
2678 | } | 2678 | } |
2679 | ++tries; | 2679 | ++tries; |
2680 | } | 2680 | } |
2681 | 2681 | ||
2682 | intel_dp_set_idle_link_train(intel_dp); | 2682 | intel_dp_set_idle_link_train(intel_dp); |
2683 | 2683 | ||
2684 | intel_dp->DP = DP; | 2684 | intel_dp->DP = DP; |
2685 | 2685 | ||
2686 | if (channel_eq) | 2686 | if (channel_eq) |
2687 | DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n"); | 2687 | DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n"); |
2688 | 2688 | ||
2689 | } | 2689 | } |
2690 | 2690 | ||
2691 | void intel_dp_stop_link_train(struct intel_dp *intel_dp) | 2691 | void intel_dp_stop_link_train(struct intel_dp *intel_dp) |
2692 | { | 2692 | { |
2693 | intel_dp_set_link_train(intel_dp, &intel_dp->DP, | 2693 | intel_dp_set_link_train(intel_dp, &intel_dp->DP, |
2694 | DP_TRAINING_PATTERN_DISABLE); | 2694 | DP_TRAINING_PATTERN_DISABLE); |
2695 | } | 2695 | } |
2696 | 2696 | ||
2697 | static void | 2697 | static void |
2698 | intel_dp_link_down(struct intel_dp *intel_dp) | 2698 | intel_dp_link_down(struct intel_dp *intel_dp) |
2699 | { | 2699 | { |
2700 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | 2700 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
2701 | enum port port = intel_dig_port->port; | 2701 | enum port port = intel_dig_port->port; |
2702 | struct drm_device *dev = intel_dig_port->base.base.dev; | 2702 | struct drm_device *dev = intel_dig_port->base.base.dev; |
2703 | struct drm_i915_private *dev_priv = dev->dev_private; | 2703 | struct drm_i915_private *dev_priv = dev->dev_private; |
2704 | struct intel_crtc *intel_crtc = | 2704 | struct intel_crtc *intel_crtc = |
2705 | to_intel_crtc(intel_dig_port->base.base.crtc); | 2705 | to_intel_crtc(intel_dig_port->base.base.crtc); |
2706 | uint32_t DP = intel_dp->DP; | 2706 | uint32_t DP = intel_dp->DP; |
2707 | 2707 | ||
2708 | /* | 2708 | /* |
2709 | * DDI code has a strict mode set sequence and we should try to respect | 2709 | * DDI code has a strict mode set sequence and we should try to respect |
2710 | * it, otherwise we might hang the machine in many different ways. So we | 2710 | * it, otherwise we might hang the machine in many different ways. So we |
2711 | * really should be disabling the port only on a complete crtc_disable | 2711 | * really should be disabling the port only on a complete crtc_disable |
2712 | * sequence. This function is just called under two conditions on DDI | 2712 | * sequence. This function is just called under two conditions on DDI |
2713 | * code: | 2713 | * code: |
2714 | * - Link train failed while doing crtc_enable, and on this case we | 2714 | * - Link train failed while doing crtc_enable, and on this case we |
2715 | * really should respect the mode set sequence and wait for a | 2715 | * really should respect the mode set sequence and wait for a |
2716 | * crtc_disable. | 2716 | * crtc_disable. |
2717 | * - Someone turned the monitor off and intel_dp_check_link_status | 2717 | * - Someone turned the monitor off and intel_dp_check_link_status |
2718 | * called us. We don't need to disable the whole port on this case, so | 2718 | * called us. We don't need to disable the whole port on this case, so |
2719 | * when someone turns the monitor on again, | 2719 | * when someone turns the monitor on again, |
2720 | * intel_ddi_prepare_link_retrain will take care of redoing the link | 2720 | * intel_ddi_prepare_link_retrain will take care of redoing the link |
2721 | * train. | 2721 | * train. |
2722 | */ | 2722 | */ |
2723 | if (HAS_DDI(dev)) | 2723 | if (HAS_DDI(dev)) |
2724 | return; | 2724 | return; |
2725 | 2725 | ||
2726 | if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)) | 2726 | if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)) |
2727 | return; | 2727 | return; |
2728 | 2728 | ||
2729 | DRM_DEBUG_KMS("\n"); | 2729 | DRM_DEBUG_KMS("\n"); |
2730 | 2730 | ||
2731 | if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) { | 2731 | if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) { |
2732 | DP &= ~DP_LINK_TRAIN_MASK_CPT; | 2732 | DP &= ~DP_LINK_TRAIN_MASK_CPT; |
2733 | I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); | 2733 | I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); |
2734 | } else { | 2734 | } else { |
2735 | DP &= ~DP_LINK_TRAIN_MASK; | 2735 | DP &= ~DP_LINK_TRAIN_MASK; |
2736 | I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); | 2736 | I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); |
2737 | } | 2737 | } |
2738 | POSTING_READ(intel_dp->output_reg); | 2738 | POSTING_READ(intel_dp->output_reg); |
2739 | 2739 | ||
2740 | /* We don't really know why we're doing this */ | 2740 | /* We don't really know why we're doing this */ |
2741 | intel_wait_for_vblank(dev, intel_crtc->pipe); | 2741 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
2742 | 2742 | ||
2743 | if (HAS_PCH_IBX(dev) && | 2743 | if (HAS_PCH_IBX(dev) && |
2744 | I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { | 2744 | I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { |
2745 | struct drm_crtc *crtc = intel_dig_port->base.base.crtc; | 2745 | struct drm_crtc *crtc = intel_dig_port->base.base.crtc; |
2746 | 2746 | ||
2747 | /* Hardware workaround: leaving our transcoder select | 2747 | /* Hardware workaround: leaving our transcoder select |
2748 | * set to transcoder B while it's off will prevent the | 2748 | * set to transcoder B while it's off will prevent the |
2749 | * corresponding HDMI output on transcoder A. | 2749 | * corresponding HDMI output on transcoder A. |
2750 | * | 2750 | * |
2751 | * Combine this with another hardware workaround: | 2751 | * Combine this with another hardware workaround: |
2752 | * transcoder select bit can only be cleared while the | 2752 | * transcoder select bit can only be cleared while the |
2753 | * port is enabled. | 2753 | * port is enabled. |
2754 | */ | 2754 | */ |
2755 | DP &= ~DP_PIPEB_SELECT; | 2755 | DP &= ~DP_PIPEB_SELECT; |
2756 | I915_WRITE(intel_dp->output_reg, DP); | 2756 | I915_WRITE(intel_dp->output_reg, DP); |
2757 | 2757 | ||
2758 | /* Changes to enable or select take place the vblank | 2758 | /* Changes to enable or select take place the vblank |
2759 | * after being written. | 2759 | * after being written. |
2760 | */ | 2760 | */ |
2761 | if (WARN_ON(crtc == NULL)) { | 2761 | if (WARN_ON(crtc == NULL)) { |
2762 | /* We should never try to disable a port without a crtc | 2762 | /* We should never try to disable a port without a crtc |
2763 | * attached. For paranoia keep the code around for a | 2763 | * attached. For paranoia keep the code around for a |
2764 | * bit. */ | 2764 | * bit. */ |
2765 | POSTING_READ(intel_dp->output_reg); | 2765 | POSTING_READ(intel_dp->output_reg); |
2766 | msleep(50); | 2766 | msleep(50); |
2767 | } else | 2767 | } else |
2768 | intel_wait_for_vblank(dev, intel_crtc->pipe); | 2768 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
2769 | } | 2769 | } |
2770 | 2770 | ||
2771 | DP &= ~DP_AUDIO_OUTPUT_ENABLE; | 2771 | DP &= ~DP_AUDIO_OUTPUT_ENABLE; |
2772 | I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); | 2772 | I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); |
2773 | POSTING_READ(intel_dp->output_reg); | 2773 | POSTING_READ(intel_dp->output_reg); |
2774 | msleep(intel_dp->panel_power_down_delay); | 2774 | msleep(intel_dp->panel_power_down_delay); |
2775 | } | 2775 | } |
2776 | 2776 | ||
2777 | static bool | 2777 | static bool |
2778 | intel_dp_get_dpcd(struct intel_dp *intel_dp) | 2778 | intel_dp_get_dpcd(struct intel_dp *intel_dp) |
2779 | { | 2779 | { |
2780 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | 2780 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
2781 | struct drm_device *dev = dig_port->base.base.dev; | 2781 | struct drm_device *dev = dig_port->base.base.dev; |
2782 | struct drm_i915_private *dev_priv = dev->dev_private; | 2782 | struct drm_i915_private *dev_priv = dev->dev_private; |
2783 | 2783 | ||
2784 | char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3]; | 2784 | char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3]; |
2785 | 2785 | ||
2786 | if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd, | 2786 | if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd, |
2787 | sizeof(intel_dp->dpcd)) < 0) | 2787 | sizeof(intel_dp->dpcd)) < 0) |
2788 | return false; /* aux transfer failed */ | 2788 | return false; /* aux transfer failed */ |
2789 | 2789 | ||
2790 | hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd), | 2790 | hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd), |
2791 | 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false); | 2791 | 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false); |
2792 | DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump); | 2792 | DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump); |
2793 | 2793 | ||
2794 | if (intel_dp->dpcd[DP_DPCD_REV] == 0) | 2794 | if (intel_dp->dpcd[DP_DPCD_REV] == 0) |
2795 | return false; /* DPCD not present */ | 2795 | return false; /* DPCD not present */ |
2796 | 2796 | ||
2797 | /* Check if the panel supports PSR */ | 2797 | /* Check if the panel supports PSR */ |
2798 | memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd)); | 2798 | memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd)); |
2799 | if (is_edp(intel_dp)) { | 2799 | if (is_edp(intel_dp)) { |
2800 | intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT, | 2800 | intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT, |
2801 | intel_dp->psr_dpcd, | 2801 | intel_dp->psr_dpcd, |
2802 | sizeof(intel_dp->psr_dpcd)); | 2802 | sizeof(intel_dp->psr_dpcd)); |
2803 | if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) { | 2803 | if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) { |
2804 | dev_priv->psr.sink_support = true; | 2804 | dev_priv->psr.sink_support = true; |
2805 | DRM_DEBUG_KMS("Detected EDP PSR Panel.\n"); | 2805 | DRM_DEBUG_KMS("Detected EDP PSR Panel.\n"); |
2806 | } | 2806 | } |
2807 | } | 2807 | } |
2808 | 2808 | ||
2809 | /* Training Pattern 3 support */ | 2809 | /* Training Pattern 3 support */ |
2810 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 && | 2810 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 && |
2811 | intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED) { | 2811 | intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED) { |
2812 | intel_dp->use_tps3 = true; | 2812 | intel_dp->use_tps3 = true; |
2813 | DRM_DEBUG_KMS("Displayport TPS3 supported"); | 2813 | DRM_DEBUG_KMS("Displayport TPS3 supported"); |
2814 | } else | 2814 | } else |
2815 | intel_dp->use_tps3 = false; | 2815 | intel_dp->use_tps3 = false; |
2816 | 2816 | ||
2817 | if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & | 2817 | if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & |
2818 | DP_DWN_STRM_PORT_PRESENT)) | 2818 | DP_DWN_STRM_PORT_PRESENT)) |
2819 | return true; /* native DP sink */ | 2819 | return true; /* native DP sink */ |
2820 | 2820 | ||
2821 | if (intel_dp->dpcd[DP_DPCD_REV] == 0x10) | 2821 | if (intel_dp->dpcd[DP_DPCD_REV] == 0x10) |
2822 | return true; /* no per-port downstream info */ | 2822 | return true; /* no per-port downstream info */ |
2823 | 2823 | ||
2824 | if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0, | 2824 | if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0, |
2825 | intel_dp->downstream_ports, | 2825 | intel_dp->downstream_ports, |
2826 | DP_MAX_DOWNSTREAM_PORTS) < 0) | 2826 | DP_MAX_DOWNSTREAM_PORTS) < 0) |
2827 | return false; /* downstream port status fetch failed */ | 2827 | return false; /* downstream port status fetch failed */ |
2828 | 2828 | ||
2829 | return true; | 2829 | return true; |
2830 | } | 2830 | } |
2831 | 2831 | ||
2832 | static void | 2832 | static void |
2833 | intel_dp_probe_oui(struct intel_dp *intel_dp) | 2833 | intel_dp_probe_oui(struct intel_dp *intel_dp) |
2834 | { | 2834 | { |
2835 | u8 buf[3]; | 2835 | u8 buf[3]; |
2836 | 2836 | ||
2837 | if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) | 2837 | if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) |
2838 | return; | 2838 | return; |
2839 | 2839 | ||
2840 | intel_edp_panel_vdd_on(intel_dp); | 2840 | intel_edp_panel_vdd_on(intel_dp); |
2841 | 2841 | ||
2842 | if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3) | 2842 | if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3) |
2843 | DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", | 2843 | DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", |
2844 | buf[0], buf[1], buf[2]); | 2844 | buf[0], buf[1], buf[2]); |
2845 | 2845 | ||
2846 | if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3) | 2846 | if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3) |
2847 | DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", | 2847 | DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", |
2848 | buf[0], buf[1], buf[2]); | 2848 | buf[0], buf[1], buf[2]); |
2849 | 2849 | ||
2850 | edp_panel_vdd_off(intel_dp, false); | 2850 | edp_panel_vdd_off(intel_dp, false); |
2851 | } | 2851 | } |
2852 | 2852 | ||
2853 | int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc) | 2853 | int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc) |
2854 | { | 2854 | { |
2855 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | 2855 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
2856 | struct drm_device *dev = intel_dig_port->base.base.dev; | 2856 | struct drm_device *dev = intel_dig_port->base.base.dev; |
2857 | struct intel_crtc *intel_crtc = | 2857 | struct intel_crtc *intel_crtc = |
2858 | to_intel_crtc(intel_dig_port->base.base.crtc); | 2858 | to_intel_crtc(intel_dig_port->base.base.crtc); |
2859 | u8 buf[1]; | 2859 | u8 buf[1]; |
2860 | 2860 | ||
2861 | if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, buf) < 0) | 2861 | if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, buf) < 0) |
2862 | return -EAGAIN; | 2862 | return -EAGAIN; |
2863 | 2863 | ||
2864 | if (!(buf[0] & DP_TEST_CRC_SUPPORTED)) | 2864 | if (!(buf[0] & DP_TEST_CRC_SUPPORTED)) |
2865 | return -ENOTTY; | 2865 | return -ENOTTY; |
2866 | 2866 | ||
2867 | if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, | 2867 | if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, |
2868 | DP_TEST_SINK_START) < 0) | 2868 | DP_TEST_SINK_START) < 0) |
2869 | return -EAGAIN; | 2869 | return -EAGAIN; |
2870 | 2870 | ||
2871 | /* Wait 2 vblanks to be sure we will have the correct CRC value */ | 2871 | /* Wait 2 vblanks to be sure we will have the correct CRC value */ |
2872 | intel_wait_for_vblank(dev, intel_crtc->pipe); | 2872 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
2873 | intel_wait_for_vblank(dev, intel_crtc->pipe); | 2873 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
2874 | 2874 | ||
2875 | if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) | 2875 | if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) |
2876 | return -EAGAIN; | 2876 | return -EAGAIN; |
2877 | 2877 | ||
2878 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 0); | 2878 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 0); |
2879 | return 0; | 2879 | return 0; |
2880 | } | 2880 | } |
2881 | 2881 | ||
2882 | static bool | 2882 | static bool |
2883 | intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector) | 2883 | intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector) |
2884 | { | 2884 | { |
2885 | return intel_dp_dpcd_read_wake(&intel_dp->aux, | 2885 | return intel_dp_dpcd_read_wake(&intel_dp->aux, |
2886 | DP_DEVICE_SERVICE_IRQ_VECTOR, | 2886 | DP_DEVICE_SERVICE_IRQ_VECTOR, |
2887 | sink_irq_vector, 1) == 1; | 2887 | sink_irq_vector, 1) == 1; |
2888 | } | 2888 | } |
2889 | 2889 | ||
2890 | static void | 2890 | static void |
2891 | intel_dp_handle_test_request(struct intel_dp *intel_dp) | 2891 | intel_dp_handle_test_request(struct intel_dp *intel_dp) |
2892 | { | 2892 | { |
2893 | /* NAK by default */ | 2893 | /* NAK by default */ |
2894 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK); | 2894 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK); |
2895 | } | 2895 | } |
2896 | 2896 | ||
2897 | /* | 2897 | /* |
2898 | * According to DP spec | 2898 | * According to DP spec |
2899 | * 5.1.2: | 2899 | * 5.1.2: |
2900 | * 1. Read DPCD | 2900 | * 1. Read DPCD |
2901 | * 2. Configure link according to Receiver Capabilities | 2901 | * 2. Configure link according to Receiver Capabilities |
2902 | * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 | 2902 | * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 |
2903 | * 4. Check link status on receipt of hot-plug interrupt | 2903 | * 4. Check link status on receipt of hot-plug interrupt |
2904 | */ | 2904 | */ |
2905 | 2905 | ||
2906 | void | 2906 | void |
2907 | intel_dp_check_link_status(struct intel_dp *intel_dp) | 2907 | intel_dp_check_link_status(struct intel_dp *intel_dp) |
2908 | { | 2908 | { |
2909 | struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; | 2909 | struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; |
2910 | u8 sink_irq_vector; | 2910 | u8 sink_irq_vector; |
2911 | u8 link_status[DP_LINK_STATUS_SIZE]; | 2911 | u8 link_status[DP_LINK_STATUS_SIZE]; |
2912 | 2912 | ||
2913 | if (!intel_encoder->connectors_active) | 2913 | if (!intel_encoder->connectors_active) |
2914 | return; | 2914 | return; |
2915 | 2915 | ||
2916 | if (WARN_ON(!intel_encoder->base.crtc)) | 2916 | if (WARN_ON(!intel_encoder->base.crtc)) |
2917 | return; | 2917 | return; |
2918 | 2918 | ||
2919 | /* Try to read receiver status if the link appears to be up */ | 2919 | /* Try to read receiver status if the link appears to be up */ |
2920 | if (!intel_dp_get_link_status(intel_dp, link_status)) { | 2920 | if (!intel_dp_get_link_status(intel_dp, link_status)) { |
2921 | return; | 2921 | return; |
2922 | } | 2922 | } |
2923 | 2923 | ||
2924 | /* Now read the DPCD to see if it's actually running */ | 2924 | /* Now read the DPCD to see if it's actually running */ |
2925 | if (!intel_dp_get_dpcd(intel_dp)) { | 2925 | if (!intel_dp_get_dpcd(intel_dp)) { |
2926 | return; | 2926 | return; |
2927 | } | 2927 | } |
2928 | 2928 | ||
2929 | /* Try to read the source of the interrupt */ | 2929 | /* Try to read the source of the interrupt */ |
2930 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && | 2930 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && |
2931 | intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) { | 2931 | intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) { |
2932 | /* Clear interrupt source */ | 2932 | /* Clear interrupt source */ |
2933 | drm_dp_dpcd_writeb(&intel_dp->aux, | 2933 | drm_dp_dpcd_writeb(&intel_dp->aux, |
2934 | DP_DEVICE_SERVICE_IRQ_VECTOR, | 2934 | DP_DEVICE_SERVICE_IRQ_VECTOR, |
2935 | sink_irq_vector); | 2935 | sink_irq_vector); |
2936 | 2936 | ||
2937 | if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST) | 2937 | if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST) |
2938 | intel_dp_handle_test_request(intel_dp); | 2938 | intel_dp_handle_test_request(intel_dp); |
2939 | if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ)) | 2939 | if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ)) |
2940 | DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); | 2940 | DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); |
2941 | } | 2941 | } |
2942 | 2942 | ||
2943 | if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { | 2943 | if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { |
2944 | DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", | 2944 | DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", |
2945 | drm_get_encoder_name(&intel_encoder->base)); | 2945 | drm_get_encoder_name(&intel_encoder->base)); |
2946 | intel_dp_start_link_train(intel_dp); | 2946 | intel_dp_start_link_train(intel_dp); |
2947 | intel_dp_complete_link_train(intel_dp); | 2947 | intel_dp_complete_link_train(intel_dp); |
2948 | intel_dp_stop_link_train(intel_dp); | 2948 | intel_dp_stop_link_train(intel_dp); |
2949 | } | 2949 | } |
2950 | } | 2950 | } |
2951 | 2951 | ||
2952 | /* XXX this is probably wrong for multiple downstream ports */ | 2952 | /* XXX this is probably wrong for multiple downstream ports */ |
2953 | static enum drm_connector_status | 2953 | static enum drm_connector_status |
2954 | intel_dp_detect_dpcd(struct intel_dp *intel_dp) | 2954 | intel_dp_detect_dpcd(struct intel_dp *intel_dp) |
2955 | { | 2955 | { |
2956 | uint8_t *dpcd = intel_dp->dpcd; | 2956 | uint8_t *dpcd = intel_dp->dpcd; |
2957 | uint8_t type; | 2957 | uint8_t type; |
2958 | 2958 | ||
2959 | if (!intel_dp_get_dpcd(intel_dp)) | 2959 | if (!intel_dp_get_dpcd(intel_dp)) |
2960 | return connector_status_disconnected; | 2960 | return connector_status_disconnected; |
2961 | 2961 | ||
2962 | /* if there's no downstream port, we're done */ | 2962 | /* if there's no downstream port, we're done */ |
2963 | if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT)) | 2963 | if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT)) |
2964 | return connector_status_connected; | 2964 | return connector_status_connected; |
2965 | 2965 | ||
2966 | /* If we're HPD-aware, SINK_COUNT changes dynamically */ | 2966 | /* If we're HPD-aware, SINK_COUNT changes dynamically */ |
2967 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && | 2967 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && |
2968 | intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) { | 2968 | intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) { |
2969 | uint8_t reg; | 2969 | uint8_t reg; |
2970 | 2970 | ||
2971 | if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT, | 2971 | if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT, |
2972 | ®, 1) < 0) | 2972 | ®, 1) < 0) |
2973 | return connector_status_unknown; | 2973 | return connector_status_unknown; |
2974 | 2974 | ||
2975 | return DP_GET_SINK_COUNT(reg) ? connector_status_connected | 2975 | return DP_GET_SINK_COUNT(reg) ? connector_status_connected |
2976 | : connector_status_disconnected; | 2976 | : connector_status_disconnected; |
2977 | } | 2977 | } |
2978 | 2978 | ||
2979 | /* If no HPD, poke DDC gently */ | 2979 | /* If no HPD, poke DDC gently */ |
2980 | if (drm_probe_ddc(&intel_dp->aux.ddc)) | 2980 | if (drm_probe_ddc(&intel_dp->aux.ddc)) |
2981 | return connector_status_connected; | 2981 | return connector_status_connected; |
2982 | 2982 | ||
2983 | /* Well we tried, say unknown for unreliable port types */ | 2983 | /* Well we tried, say unknown for unreliable port types */ |
2984 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { | 2984 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { |
2985 | type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; | 2985 | type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; |
2986 | if (type == DP_DS_PORT_TYPE_VGA || | 2986 | if (type == DP_DS_PORT_TYPE_VGA || |
2987 | type == DP_DS_PORT_TYPE_NON_EDID) | 2987 | type == DP_DS_PORT_TYPE_NON_EDID) |
2988 | return connector_status_unknown; | 2988 | return connector_status_unknown; |
2989 | } else { | 2989 | } else { |
2990 | type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & | 2990 | type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & |
2991 | DP_DWN_STRM_PORT_TYPE_MASK; | 2991 | DP_DWN_STRM_PORT_TYPE_MASK; |
2992 | if (type == DP_DWN_STRM_PORT_TYPE_ANALOG || | 2992 | if (type == DP_DWN_STRM_PORT_TYPE_ANALOG || |
2993 | type == DP_DWN_STRM_PORT_TYPE_OTHER) | 2993 | type == DP_DWN_STRM_PORT_TYPE_OTHER) |
2994 | return connector_status_unknown; | 2994 | return connector_status_unknown; |
2995 | } | 2995 | } |
2996 | 2996 | ||
2997 | /* Anything else is out of spec, warn and ignore */ | 2997 | /* Anything else is out of spec, warn and ignore */ |
2998 | DRM_DEBUG_KMS("Broken DP branch device, ignoring\n"); | 2998 | DRM_DEBUG_KMS("Broken DP branch device, ignoring\n"); |
2999 | return connector_status_disconnected; | 2999 | return connector_status_disconnected; |
3000 | } | 3000 | } |
3001 | 3001 | ||
3002 | static enum drm_connector_status | 3002 | static enum drm_connector_status |
3003 | ironlake_dp_detect(struct intel_dp *intel_dp) | 3003 | ironlake_dp_detect(struct intel_dp *intel_dp) |
3004 | { | 3004 | { |
3005 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 3005 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
3006 | struct drm_i915_private *dev_priv = dev->dev_private; | 3006 | struct drm_i915_private *dev_priv = dev->dev_private; |
3007 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | 3007 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
3008 | enum drm_connector_status status; | 3008 | enum drm_connector_status status; |
3009 | 3009 | ||
3010 | /* Can't disconnect eDP, but you can close the lid... */ | 3010 | /* Can't disconnect eDP, but you can close the lid... */ |
3011 | if (is_edp(intel_dp)) { | 3011 | if (is_edp(intel_dp)) { |
3012 | status = intel_panel_detect(dev); | 3012 | status = intel_panel_detect(dev); |
3013 | if (status == connector_status_unknown) | 3013 | if (status == connector_status_unknown) |
3014 | status = connector_status_connected; | 3014 | status = connector_status_connected; |
3015 | return status; | 3015 | return status; |
3016 | } | 3016 | } |
3017 | 3017 | ||
3018 | if (!ibx_digital_port_connected(dev_priv, intel_dig_port)) | 3018 | if (!ibx_digital_port_connected(dev_priv, intel_dig_port)) |
3019 | return connector_status_disconnected; | 3019 | return connector_status_disconnected; |
3020 | 3020 | ||
3021 | return intel_dp_detect_dpcd(intel_dp); | 3021 | return intel_dp_detect_dpcd(intel_dp); |
3022 | } | 3022 | } |
3023 | 3023 | ||
3024 | static enum drm_connector_status | 3024 | static enum drm_connector_status |
3025 | g4x_dp_detect(struct intel_dp *intel_dp) | 3025 | g4x_dp_detect(struct intel_dp *intel_dp) |
3026 | { | 3026 | { |
3027 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 3027 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
3028 | struct drm_i915_private *dev_priv = dev->dev_private; | 3028 | struct drm_i915_private *dev_priv = dev->dev_private; |
3029 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | 3029 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
3030 | uint32_t bit; | 3030 | uint32_t bit; |
3031 | 3031 | ||
3032 | /* Can't disconnect eDP, but you can close the lid... */ | 3032 | /* Can't disconnect eDP, but you can close the lid... */ |
3033 | if (is_edp(intel_dp)) { | 3033 | if (is_edp(intel_dp)) { |
3034 | enum drm_connector_status status; | 3034 | enum drm_connector_status status; |
3035 | 3035 | ||
3036 | status = intel_panel_detect(dev); | 3036 | status = intel_panel_detect(dev); |
3037 | if (status == connector_status_unknown) | 3037 | if (status == connector_status_unknown) |
3038 | status = connector_status_connected; | 3038 | status = connector_status_connected; |
3039 | return status; | 3039 | return status; |
3040 | } | 3040 | } |
3041 | 3041 | ||
3042 | if (IS_VALLEYVIEW(dev)) { | 3042 | if (IS_VALLEYVIEW(dev)) { |
3043 | switch (intel_dig_port->port) { | 3043 | switch (intel_dig_port->port) { |
3044 | case PORT_B: | 3044 | case PORT_B: |
3045 | bit = PORTB_HOTPLUG_LIVE_STATUS_VLV; | 3045 | bit = PORTB_HOTPLUG_LIVE_STATUS_VLV; |
3046 | break; | 3046 | break; |
3047 | case PORT_C: | 3047 | case PORT_C: |
3048 | bit = PORTC_HOTPLUG_LIVE_STATUS_VLV; | 3048 | bit = PORTC_HOTPLUG_LIVE_STATUS_VLV; |
3049 | break; | 3049 | break; |
3050 | case PORT_D: | 3050 | case PORT_D: |
3051 | bit = PORTD_HOTPLUG_LIVE_STATUS_VLV; | 3051 | bit = PORTD_HOTPLUG_LIVE_STATUS_VLV; |
3052 | break; | 3052 | break; |
3053 | default: | 3053 | default: |
3054 | return connector_status_unknown; | 3054 | return connector_status_unknown; |
3055 | } | 3055 | } |
3056 | } else { | 3056 | } else { |
3057 | switch (intel_dig_port->port) { | 3057 | switch (intel_dig_port->port) { |
3058 | case PORT_B: | 3058 | case PORT_B: |
3059 | bit = PORTB_HOTPLUG_LIVE_STATUS_G4X; | 3059 | bit = PORTB_HOTPLUG_LIVE_STATUS_G4X; |
3060 | break; | 3060 | break; |
3061 | case PORT_C: | 3061 | case PORT_C: |
3062 | bit = PORTC_HOTPLUG_LIVE_STATUS_G4X; | 3062 | bit = PORTC_HOTPLUG_LIVE_STATUS_G4X; |
3063 | break; | 3063 | break; |
3064 | case PORT_D: | 3064 | case PORT_D: |
3065 | bit = PORTD_HOTPLUG_LIVE_STATUS_G4X; | 3065 | bit = PORTD_HOTPLUG_LIVE_STATUS_G4X; |
3066 | break; | 3066 | break; |
3067 | default: | 3067 | default: |
3068 | return connector_status_unknown; | 3068 | return connector_status_unknown; |
3069 | } | 3069 | } |
3070 | } | 3070 | } |
3071 | 3071 | ||
3072 | if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0) | 3072 | if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0) |
3073 | return connector_status_disconnected; | 3073 | return connector_status_disconnected; |
3074 | 3074 | ||
3075 | return intel_dp_detect_dpcd(intel_dp); | 3075 | return intel_dp_detect_dpcd(intel_dp); |
3076 | } | 3076 | } |
3077 | 3077 | ||
3078 | static struct edid * | 3078 | static struct edid * |
3079 | intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) | 3079 | intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) |
3080 | { | 3080 | { |
3081 | struct intel_connector *intel_connector = to_intel_connector(connector); | 3081 | struct intel_connector *intel_connector = to_intel_connector(connector); |
3082 | 3082 | ||
3083 | /* use cached edid if we have one */ | 3083 | /* use cached edid if we have one */ |
3084 | if (intel_connector->edid) { | 3084 | if (intel_connector->edid) { |
3085 | /* invalid edid */ | 3085 | /* invalid edid */ |
3086 | if (IS_ERR(intel_connector->edid)) | 3086 | if (IS_ERR(intel_connector->edid)) |
3087 | return NULL; | 3087 | return NULL; |
3088 | 3088 | ||
3089 | return drm_edid_duplicate(intel_connector->edid); | 3089 | return drm_edid_duplicate(intel_connector->edid); |
3090 | } | 3090 | } |
3091 | 3091 | ||
3092 | return drm_get_edid(connector, adapter); | 3092 | return drm_get_edid(connector, adapter); |
3093 | } | 3093 | } |
3094 | 3094 | ||
3095 | static int | 3095 | static int |
3096 | intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter) | 3096 | intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter) |
3097 | { | 3097 | { |
3098 | struct intel_connector *intel_connector = to_intel_connector(connector); | 3098 | struct intel_connector *intel_connector = to_intel_connector(connector); |
3099 | 3099 | ||
3100 | /* use cached edid if we have one */ | 3100 | /* use cached edid if we have one */ |
3101 | if (intel_connector->edid) { | 3101 | if (intel_connector->edid) { |
3102 | /* invalid edid */ | 3102 | /* invalid edid */ |
3103 | if (IS_ERR(intel_connector->edid)) | 3103 | if (IS_ERR(intel_connector->edid)) |
3104 | return 0; | 3104 | return 0; |
3105 | 3105 | ||
3106 | return intel_connector_update_modes(connector, | 3106 | return intel_connector_update_modes(connector, |
3107 | intel_connector->edid); | 3107 | intel_connector->edid); |
3108 | } | 3108 | } |
3109 | 3109 | ||
3110 | return intel_ddc_get_modes(connector, adapter); | 3110 | return intel_ddc_get_modes(connector, adapter); |
3111 | } | 3111 | } |
3112 | 3112 | ||
3113 | static enum drm_connector_status | 3113 | static enum drm_connector_status |
3114 | intel_dp_detect(struct drm_connector *connector, bool force) | 3114 | intel_dp_detect(struct drm_connector *connector, bool force) |
3115 | { | 3115 | { |
3116 | struct intel_dp *intel_dp = intel_attached_dp(connector); | 3116 | struct intel_dp *intel_dp = intel_attached_dp(connector); |
3117 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | 3117 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
3118 | struct intel_encoder *intel_encoder = &intel_dig_port->base; | 3118 | struct intel_encoder *intel_encoder = &intel_dig_port->base; |
3119 | struct drm_device *dev = connector->dev; | 3119 | struct drm_device *dev = connector->dev; |
3120 | struct drm_i915_private *dev_priv = dev->dev_private; | 3120 | struct drm_i915_private *dev_priv = dev->dev_private; |
3121 | enum drm_connector_status status; | 3121 | enum drm_connector_status status; |
3122 | enum intel_display_power_domain power_domain; | 3122 | enum intel_display_power_domain power_domain; |
3123 | struct edid *edid = NULL; | 3123 | struct edid *edid = NULL; |
3124 | 3124 | ||
3125 | intel_runtime_pm_get(dev_priv); | 3125 | intel_runtime_pm_get(dev_priv); |
3126 | 3126 | ||
3127 | power_domain = intel_display_port_power_domain(intel_encoder); | 3127 | power_domain = intel_display_port_power_domain(intel_encoder); |
3128 | intel_display_power_get(dev_priv, power_domain); | 3128 | intel_display_power_get(dev_priv, power_domain); |
3129 | 3129 | ||
3130 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", | 3130 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", |
3131 | connector->base.id, drm_get_connector_name(connector)); | 3131 | connector->base.id, drm_get_connector_name(connector)); |
3132 | 3132 | ||
3133 | intel_dp->has_audio = false; | 3133 | intel_dp->has_audio = false; |
3134 | 3134 | ||
3135 | if (HAS_PCH_SPLIT(dev)) | 3135 | if (HAS_PCH_SPLIT(dev)) |
3136 | status = ironlake_dp_detect(intel_dp); | 3136 | status = ironlake_dp_detect(intel_dp); |
3137 | else | 3137 | else |
3138 | status = g4x_dp_detect(intel_dp); | 3138 | status = g4x_dp_detect(intel_dp); |
3139 | 3139 | ||
3140 | if (status != connector_status_connected) | 3140 | if (status != connector_status_connected) |
3141 | goto out; | 3141 | goto out; |
3142 | 3142 | ||
3143 | intel_dp_probe_oui(intel_dp); | 3143 | intel_dp_probe_oui(intel_dp); |
3144 | 3144 | ||
3145 | if (intel_dp->force_audio != HDMI_AUDIO_AUTO) { | 3145 | if (intel_dp->force_audio != HDMI_AUDIO_AUTO) { |
3146 | intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON); | 3146 | intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON); |
3147 | } else { | 3147 | } else { |
3148 | edid = intel_dp_get_edid(connector, &intel_dp->aux.ddc); | 3148 | edid = intel_dp_get_edid(connector, &intel_dp->aux.ddc); |
3149 | if (edid) { | 3149 | if (edid) { |
3150 | intel_dp->has_audio = drm_detect_monitor_audio(edid); | 3150 | intel_dp->has_audio = drm_detect_monitor_audio(edid); |
3151 | kfree(edid); | 3151 | kfree(edid); |
3152 | } | 3152 | } |
3153 | } | 3153 | } |
3154 | 3154 | ||
3155 | if (intel_encoder->type != INTEL_OUTPUT_EDP) | 3155 | if (intel_encoder->type != INTEL_OUTPUT_EDP) |
3156 | intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; | 3156 | intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; |
3157 | status = connector_status_connected; | 3157 | status = connector_status_connected; |
3158 | 3158 | ||
3159 | out: | 3159 | out: |
3160 | intel_display_power_put(dev_priv, power_domain); | 3160 | intel_display_power_put(dev_priv, power_domain); |
3161 | 3161 | ||
3162 | intel_runtime_pm_put(dev_priv); | 3162 | intel_runtime_pm_put(dev_priv); |
3163 | 3163 | ||
3164 | return status; | 3164 | return status; |
3165 | } | 3165 | } |
3166 | 3166 | ||
3167 | static int intel_dp_get_modes(struct drm_connector *connector) | 3167 | static int intel_dp_get_modes(struct drm_connector *connector) |
3168 | { | 3168 | { |
3169 | struct intel_dp *intel_dp = intel_attached_dp(connector); | 3169 | struct intel_dp *intel_dp = intel_attached_dp(connector); |
3170 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | 3170 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
3171 | struct intel_encoder *intel_encoder = &intel_dig_port->base; | 3171 | struct intel_encoder *intel_encoder = &intel_dig_port->base; |
3172 | struct intel_connector *intel_connector = to_intel_connector(connector); | 3172 | struct intel_connector *intel_connector = to_intel_connector(connector); |
3173 | struct drm_device *dev = connector->dev; | 3173 | struct drm_device *dev = connector->dev; |
3174 | struct drm_i915_private *dev_priv = dev->dev_private; | 3174 | struct drm_i915_private *dev_priv = dev->dev_private; |
3175 | enum intel_display_power_domain power_domain; | 3175 | enum intel_display_power_domain power_domain; |
3176 | int ret; | 3176 | int ret; |
3177 | 3177 | ||
3178 | /* We should parse the EDID data and find out if it has an audio sink | 3178 | /* We should parse the EDID data and find out if it has an audio sink |
3179 | */ | 3179 | */ |
3180 | 3180 | ||
3181 | power_domain = intel_display_port_power_domain(intel_encoder); | 3181 | power_domain = intel_display_port_power_domain(intel_encoder); |
3182 | intel_display_power_get(dev_priv, power_domain); | 3182 | intel_display_power_get(dev_priv, power_domain); |
3183 | 3183 | ||
3184 | ret = intel_dp_get_edid_modes(connector, &intel_dp->aux.ddc); | 3184 | ret = intel_dp_get_edid_modes(connector, &intel_dp->aux.ddc); |
3185 | intel_display_power_put(dev_priv, power_domain); | 3185 | intel_display_power_put(dev_priv, power_domain); |
3186 | if (ret) | 3186 | if (ret) |
3187 | return ret; | 3187 | return ret; |
3188 | 3188 | ||
3189 | /* if eDP has no EDID, fall back to fixed mode */ | 3189 | /* if eDP has no EDID, fall back to fixed mode */ |
3190 | if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { | 3190 | if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { |
3191 | struct drm_display_mode *mode; | 3191 | struct drm_display_mode *mode; |
3192 | mode = drm_mode_duplicate(dev, | 3192 | mode = drm_mode_duplicate(dev, |
3193 | intel_connector->panel.fixed_mode); | 3193 | intel_connector->panel.fixed_mode); |
3194 | if (mode) { | 3194 | if (mode) { |
3195 | drm_mode_probed_add(connector, mode); | 3195 | drm_mode_probed_add(connector, mode); |
3196 | return 1; | 3196 | return 1; |
3197 | } | 3197 | } |
3198 | } | 3198 | } |
3199 | return 0; | 3199 | return 0; |
3200 | } | 3200 | } |
3201 | 3201 | ||
3202 | static bool | 3202 | static bool |
3203 | intel_dp_detect_audio(struct drm_connector *connector) | 3203 | intel_dp_detect_audio(struct drm_connector *connector) |
3204 | { | 3204 | { |
3205 | struct intel_dp *intel_dp = intel_attached_dp(connector); | 3205 | struct intel_dp *intel_dp = intel_attached_dp(connector); |
3206 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | 3206 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
3207 | struct intel_encoder *intel_encoder = &intel_dig_port->base; | 3207 | struct intel_encoder *intel_encoder = &intel_dig_port->base; |
3208 | struct drm_device *dev = connector->dev; | 3208 | struct drm_device *dev = connector->dev; |
3209 | struct drm_i915_private *dev_priv = dev->dev_private; | 3209 | struct drm_i915_private *dev_priv = dev->dev_private; |
3210 | enum intel_display_power_domain power_domain; | 3210 | enum intel_display_power_domain power_domain; |
3211 | struct edid *edid; | 3211 | struct edid *edid; |
3212 | bool has_audio = false; | 3212 | bool has_audio = false; |
3213 | 3213 | ||
3214 | power_domain = intel_display_port_power_domain(intel_encoder); | 3214 | power_domain = intel_display_port_power_domain(intel_encoder); |
3215 | intel_display_power_get(dev_priv, power_domain); | 3215 | intel_display_power_get(dev_priv, power_domain); |
3216 | 3216 | ||
3217 | edid = intel_dp_get_edid(connector, &intel_dp->aux.ddc); | 3217 | edid = intel_dp_get_edid(connector, &intel_dp->aux.ddc); |
3218 | if (edid) { | 3218 | if (edid) { |
3219 | has_audio = drm_detect_monitor_audio(edid); | 3219 | has_audio = drm_detect_monitor_audio(edid); |
3220 | kfree(edid); | 3220 | kfree(edid); |
3221 | } | 3221 | } |
3222 | 3222 | ||
3223 | intel_display_power_put(dev_priv, power_domain); | 3223 | intel_display_power_put(dev_priv, power_domain); |
3224 | 3224 | ||
3225 | return has_audio; | 3225 | return has_audio; |
3226 | } | 3226 | } |
3227 | 3227 | ||
3228 | static int | 3228 | static int |
3229 | intel_dp_set_property(struct drm_connector *connector, | 3229 | intel_dp_set_property(struct drm_connector *connector, |
3230 | struct drm_property *property, | 3230 | struct drm_property *property, |
3231 | uint64_t val) | 3231 | uint64_t val) |
3232 | { | 3232 | { |
3233 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | 3233 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
3234 | struct intel_connector *intel_connector = to_intel_connector(connector); | 3234 | struct intel_connector *intel_connector = to_intel_connector(connector); |
3235 | struct intel_encoder *intel_encoder = intel_attached_encoder(connector); | 3235 | struct intel_encoder *intel_encoder = intel_attached_encoder(connector); |
3236 | struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); | 3236 | struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); |
3237 | int ret; | 3237 | int ret; |
3238 | 3238 | ||
3239 | ret = drm_object_property_set_value(&connector->base, property, val); | 3239 | ret = drm_object_property_set_value(&connector->base, property, val); |
3240 | if (ret) | 3240 | if (ret) |
3241 | return ret; | 3241 | return ret; |
3242 | 3242 | ||
3243 | if (property == dev_priv->force_audio_property) { | 3243 | if (property == dev_priv->force_audio_property) { |
3244 | int i = val; | 3244 | int i = val; |
3245 | bool has_audio; | 3245 | bool has_audio; |
3246 | 3246 | ||
3247 | if (i == intel_dp->force_audio) | 3247 | if (i == intel_dp->force_audio) |
3248 | return 0; | 3248 | return 0; |
3249 | 3249 | ||
3250 | intel_dp->force_audio = i; | 3250 | intel_dp->force_audio = i; |
3251 | 3251 | ||
3252 | if (i == HDMI_AUDIO_AUTO) | 3252 | if (i == HDMI_AUDIO_AUTO) |
3253 | has_audio = intel_dp_detect_audio(connector); | 3253 | has_audio = intel_dp_detect_audio(connector); |
3254 | else | 3254 | else |
3255 | has_audio = (i == HDMI_AUDIO_ON); | 3255 | has_audio = (i == HDMI_AUDIO_ON); |
3256 | 3256 | ||
3257 | if (has_audio == intel_dp->has_audio) | 3257 | if (has_audio == intel_dp->has_audio) |
3258 | return 0; | 3258 | return 0; |
3259 | 3259 | ||
3260 | intel_dp->has_audio = has_audio; | 3260 | intel_dp->has_audio = has_audio; |
3261 | goto done; | 3261 | goto done; |
3262 | } | 3262 | } |
3263 | 3263 | ||
3264 | if (property == dev_priv->broadcast_rgb_property) { | 3264 | if (property == dev_priv->broadcast_rgb_property) { |
3265 | bool old_auto = intel_dp->color_range_auto; | 3265 | bool old_auto = intel_dp->color_range_auto; |
3266 | uint32_t old_range = intel_dp->color_range; | 3266 | uint32_t old_range = intel_dp->color_range; |
3267 | 3267 | ||
3268 | switch (val) { | 3268 | switch (val) { |
3269 | case INTEL_BROADCAST_RGB_AUTO: | 3269 | case INTEL_BROADCAST_RGB_AUTO: |
3270 | intel_dp->color_range_auto = true; | 3270 | intel_dp->color_range_auto = true; |
3271 | break; | 3271 | break; |
3272 | case INTEL_BROADCAST_RGB_FULL: | 3272 | case INTEL_BROADCAST_RGB_FULL: |
3273 | intel_dp->color_range_auto = false; | 3273 | intel_dp->color_range_auto = false; |
3274 | intel_dp->color_range = 0; | 3274 | intel_dp->color_range = 0; |
3275 | break; | 3275 | break; |
3276 | case INTEL_BROADCAST_RGB_LIMITED: | 3276 | case INTEL_BROADCAST_RGB_LIMITED: |
3277 | intel_dp->color_range_auto = false; | 3277 | intel_dp->color_range_auto = false; |
3278 | intel_dp->color_range = DP_COLOR_RANGE_16_235; | 3278 | intel_dp->color_range = DP_COLOR_RANGE_16_235; |
3279 | break; | 3279 | break; |
3280 | default: | 3280 | default: |
3281 | return -EINVAL; | 3281 | return -EINVAL; |
3282 | } | 3282 | } |
3283 | 3283 | ||
3284 | if (old_auto == intel_dp->color_range_auto && | 3284 | if (old_auto == intel_dp->color_range_auto && |
3285 | old_range == intel_dp->color_range) | 3285 | old_range == intel_dp->color_range) |
3286 | return 0; | 3286 | return 0; |
3287 | 3287 | ||
3288 | goto done; | 3288 | goto done; |
3289 | } | 3289 | } |
3290 | 3290 | ||
3291 | if (is_edp(intel_dp) && | 3291 | if (is_edp(intel_dp) && |
3292 | property == connector->dev->mode_config.scaling_mode_property) { | 3292 | property == connector->dev->mode_config.scaling_mode_property) { |
3293 | if (val == DRM_MODE_SCALE_NONE) { | 3293 | if (val == DRM_MODE_SCALE_NONE) { |
3294 | DRM_DEBUG_KMS("no scaling not supported\n"); | 3294 | DRM_DEBUG_KMS("no scaling not supported\n"); |
3295 | return -EINVAL; | 3295 | return -EINVAL; |
3296 | } | 3296 | } |
3297 | 3297 | ||
3298 | if (intel_connector->panel.fitting_mode == val) { | 3298 | if (intel_connector->panel.fitting_mode == val) { |
3299 | /* the eDP scaling property is not changed */ | 3299 | /* the eDP scaling property is not changed */ |
3300 | return 0; | 3300 | return 0; |
3301 | } | 3301 | } |
3302 | intel_connector->panel.fitting_mode = val; | 3302 | intel_connector->panel.fitting_mode = val; |
3303 | 3303 | ||
3304 | goto done; | 3304 | goto done; |
3305 | } | 3305 | } |
3306 | 3306 | ||
3307 | return -EINVAL; | 3307 | return -EINVAL; |
3308 | 3308 | ||
3309 | done: | 3309 | done: |
3310 | if (intel_encoder->base.crtc) | 3310 | if (intel_encoder->base.crtc) |
3311 | intel_crtc_restore_mode(intel_encoder->base.crtc); | 3311 | intel_crtc_restore_mode(intel_encoder->base.crtc); |
3312 | 3312 | ||
3313 | return 0; | 3313 | return 0; |
3314 | } | 3314 | } |
3315 | 3315 | ||
3316 | static void | 3316 | static void |
3317 | intel_dp_connector_destroy(struct drm_connector *connector) | 3317 | intel_dp_connector_destroy(struct drm_connector *connector) |
3318 | { | 3318 | { |
3319 | struct intel_connector *intel_connector = to_intel_connector(connector); | 3319 | struct intel_connector *intel_connector = to_intel_connector(connector); |
3320 | 3320 | ||
3321 | if (!IS_ERR_OR_NULL(intel_connector->edid)) | 3321 | if (!IS_ERR_OR_NULL(intel_connector->edid)) |
3322 | kfree(intel_connector->edid); | 3322 | kfree(intel_connector->edid); |
3323 | 3323 | ||
3324 | /* Can't call is_edp() since the encoder may have been destroyed | 3324 | /* Can't call is_edp() since the encoder may have been destroyed |
3325 | * already. */ | 3325 | * already. */ |
3326 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) | 3326 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) |
3327 | intel_panel_fini(&intel_connector->panel); | 3327 | intel_panel_fini(&intel_connector->panel); |
3328 | 3328 | ||
3329 | drm_connector_cleanup(connector); | 3329 | drm_connector_cleanup(connector); |
3330 | kfree(connector); | 3330 | kfree(connector); |
3331 | } | 3331 | } |
3332 | 3332 | ||
3333 | void intel_dp_encoder_destroy(struct drm_encoder *encoder) | 3333 | void intel_dp_encoder_destroy(struct drm_encoder *encoder) |
3334 | { | 3334 | { |
3335 | struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); | 3335 | struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); |
3336 | struct intel_dp *intel_dp = &intel_dig_port->dp; | 3336 | struct intel_dp *intel_dp = &intel_dig_port->dp; |
3337 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 3337 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
3338 | 3338 | ||
3339 | drm_dp_aux_unregister_i2c_bus(&intel_dp->aux); | 3339 | drm_dp_aux_unregister_i2c_bus(&intel_dp->aux); |
3340 | drm_encoder_cleanup(encoder); | 3340 | drm_encoder_cleanup(encoder); |
3341 | if (is_edp(intel_dp)) { | 3341 | if (is_edp(intel_dp)) { |
3342 | cancel_delayed_work_sync(&intel_dp->panel_vdd_work); | 3342 | cancel_delayed_work_sync(&intel_dp->panel_vdd_work); |
3343 | mutex_lock(&dev->mode_config.mutex); | 3343 | mutex_lock(&dev->mode_config.mutex); |
3344 | edp_panel_vdd_off_sync(intel_dp); | 3344 | edp_panel_vdd_off_sync(intel_dp); |
3345 | mutex_unlock(&dev->mode_config.mutex); | 3345 | mutex_unlock(&dev->mode_config.mutex); |
3346 | } | 3346 | } |
3347 | kfree(intel_dig_port); | 3347 | kfree(intel_dig_port); |
3348 | } | 3348 | } |
3349 | 3349 | ||
3350 | static const struct drm_connector_funcs intel_dp_connector_funcs = { | 3350 | static const struct drm_connector_funcs intel_dp_connector_funcs = { |
3351 | .dpms = intel_connector_dpms, | 3351 | .dpms = intel_connector_dpms, |
3352 | .detect = intel_dp_detect, | 3352 | .detect = intel_dp_detect, |
3353 | .fill_modes = drm_helper_probe_single_connector_modes, | 3353 | .fill_modes = drm_helper_probe_single_connector_modes, |
3354 | .set_property = intel_dp_set_property, | 3354 | .set_property = intel_dp_set_property, |
3355 | .destroy = intel_dp_connector_destroy, | 3355 | .destroy = intel_dp_connector_destroy, |
3356 | }; | 3356 | }; |
3357 | 3357 | ||
3358 | static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { | 3358 | static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { |
3359 | .get_modes = intel_dp_get_modes, | 3359 | .get_modes = intel_dp_get_modes, |
3360 | .mode_valid = intel_dp_mode_valid, | 3360 | .mode_valid = intel_dp_mode_valid, |
3361 | .best_encoder = intel_best_encoder, | 3361 | .best_encoder = intel_best_encoder, |
3362 | }; | 3362 | }; |
3363 | 3363 | ||
3364 | static const struct drm_encoder_funcs intel_dp_enc_funcs = { | 3364 | static const struct drm_encoder_funcs intel_dp_enc_funcs = { |
3365 | .destroy = intel_dp_encoder_destroy, | 3365 | .destroy = intel_dp_encoder_destroy, |
3366 | }; | 3366 | }; |
3367 | 3367 | ||
3368 | static void | 3368 | static void |
3369 | intel_dp_hot_plug(struct intel_encoder *intel_encoder) | 3369 | intel_dp_hot_plug(struct intel_encoder *intel_encoder) |
3370 | { | 3370 | { |
3371 | struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); | 3371 | struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); |
3372 | 3372 | ||
3373 | intel_dp_check_link_status(intel_dp); | 3373 | intel_dp_check_link_status(intel_dp); |
3374 | } | 3374 | } |
3375 | 3375 | ||
3376 | /* Return which DP Port should be selected for Transcoder DP control */ | 3376 | /* Return which DP Port should be selected for Transcoder DP control */ |
3377 | int | 3377 | int |
3378 | intel_trans_dp_port_sel(struct drm_crtc *crtc) | 3378 | intel_trans_dp_port_sel(struct drm_crtc *crtc) |
3379 | { | 3379 | { |
3380 | struct drm_device *dev = crtc->dev; | 3380 | struct drm_device *dev = crtc->dev; |
3381 | struct intel_encoder *intel_encoder; | 3381 | struct intel_encoder *intel_encoder; |
3382 | struct intel_dp *intel_dp; | 3382 | struct intel_dp *intel_dp; |
3383 | 3383 | ||
3384 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) { | 3384 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) { |
3385 | intel_dp = enc_to_intel_dp(&intel_encoder->base); | 3385 | intel_dp = enc_to_intel_dp(&intel_encoder->base); |
3386 | 3386 | ||
3387 | if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || | 3387 | if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || |
3388 | intel_encoder->type == INTEL_OUTPUT_EDP) | 3388 | intel_encoder->type == INTEL_OUTPUT_EDP) |
3389 | return intel_dp->output_reg; | 3389 | return intel_dp->output_reg; |
3390 | } | 3390 | } |
3391 | 3391 | ||
3392 | return -1; | 3392 | return -1; |
3393 | } | 3393 | } |
3394 | 3394 | ||
3395 | /* check the VBT to see whether the eDP is on DP-D port */ | 3395 | /* check the VBT to see whether the eDP is on DP-D port */ |
3396 | bool intel_dp_is_edp(struct drm_device *dev, enum port port) | 3396 | bool intel_dp_is_edp(struct drm_device *dev, enum port port) |
3397 | { | 3397 | { |
3398 | struct drm_i915_private *dev_priv = dev->dev_private; | 3398 | struct drm_i915_private *dev_priv = dev->dev_private; |
3399 | union child_device_config *p_child; | 3399 | union child_device_config *p_child; |
3400 | int i; | 3400 | int i; |
3401 | static const short port_mapping[] = { | 3401 | static const short port_mapping[] = { |
3402 | [PORT_B] = PORT_IDPB, | 3402 | [PORT_B] = PORT_IDPB, |
3403 | [PORT_C] = PORT_IDPC, | 3403 | [PORT_C] = PORT_IDPC, |
3404 | [PORT_D] = PORT_IDPD, | 3404 | [PORT_D] = PORT_IDPD, |
3405 | }; | 3405 | }; |
3406 | 3406 | ||
3407 | if (port == PORT_A) | 3407 | if (port == PORT_A) |
3408 | return true; | 3408 | return true; |
3409 | 3409 | ||
3410 | if (!dev_priv->vbt.child_dev_num) | 3410 | if (!dev_priv->vbt.child_dev_num) |
3411 | return false; | 3411 | return false; |
3412 | 3412 | ||
3413 | for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { | 3413 | for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { |
3414 | p_child = dev_priv->vbt.child_dev + i; | 3414 | p_child = dev_priv->vbt.child_dev + i; |
3415 | 3415 | ||
3416 | if (p_child->common.dvo_port == port_mapping[port] && | 3416 | if (p_child->common.dvo_port == port_mapping[port] && |
3417 | (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) == | 3417 | (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) == |
3418 | (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS)) | 3418 | (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS)) |
3419 | return true; | 3419 | return true; |
3420 | } | 3420 | } |
3421 | return false; | 3421 | return false; |
3422 | } | 3422 | } |
3423 | 3423 | ||
3424 | static void | 3424 | static void |
3425 | intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) | 3425 | intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) |
3426 | { | 3426 | { |
3427 | struct intel_connector *intel_connector = to_intel_connector(connector); | 3427 | struct intel_connector *intel_connector = to_intel_connector(connector); |
3428 | 3428 | ||
3429 | intel_attach_force_audio_property(connector); | 3429 | intel_attach_force_audio_property(connector); |
3430 | intel_attach_broadcast_rgb_property(connector); | 3430 | intel_attach_broadcast_rgb_property(connector); |
3431 | intel_dp->color_range_auto = true; | 3431 | intel_dp->color_range_auto = true; |
3432 | 3432 | ||
3433 | if (is_edp(intel_dp)) { | 3433 | if (is_edp(intel_dp)) { |
3434 | drm_mode_create_scaling_mode_property(connector->dev); | 3434 | drm_mode_create_scaling_mode_property(connector->dev); |
3435 | drm_object_attach_property( | 3435 | drm_object_attach_property( |
3436 | &connector->base, | 3436 | &connector->base, |
3437 | connector->dev->mode_config.scaling_mode_property, | 3437 | connector->dev->mode_config.scaling_mode_property, |
3438 | DRM_MODE_SCALE_ASPECT); | 3438 | DRM_MODE_SCALE_ASPECT); |
3439 | intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT; | 3439 | intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT; |
3440 | } | 3440 | } |
3441 | } | 3441 | } |
3442 | 3442 | ||
3443 | static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp) | 3443 | static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp) |
3444 | { | 3444 | { |
3445 | intel_dp->last_power_cycle = jiffies; | 3445 | intel_dp->last_power_cycle = jiffies; |
3446 | intel_dp->last_power_on = jiffies; | 3446 | intel_dp->last_power_on = jiffies; |
3447 | intel_dp->last_backlight_off = jiffies; | 3447 | intel_dp->last_backlight_off = jiffies; |
3448 | } | 3448 | } |
3449 | 3449 | ||
3450 | static void | 3450 | static void |
3451 | intel_dp_init_panel_power_sequencer(struct drm_device *dev, | 3451 | intel_dp_init_panel_power_sequencer(struct drm_device *dev, |
3452 | struct intel_dp *intel_dp, | 3452 | struct intel_dp *intel_dp, |
3453 | struct edp_power_seq *out) | 3453 | struct edp_power_seq *out) |
3454 | { | 3454 | { |
3455 | struct drm_i915_private *dev_priv = dev->dev_private; | 3455 | struct drm_i915_private *dev_priv = dev->dev_private; |
3456 | struct edp_power_seq cur, vbt, spec, final; | 3456 | struct edp_power_seq cur, vbt, spec, final; |
3457 | u32 pp_on, pp_off, pp_div, pp; | 3457 | u32 pp_on, pp_off, pp_div, pp; |
3458 | int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg; | 3458 | int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg; |
3459 | 3459 | ||
3460 | if (HAS_PCH_SPLIT(dev)) { | 3460 | if (HAS_PCH_SPLIT(dev)) { |
3461 | pp_ctrl_reg = PCH_PP_CONTROL; | 3461 | pp_ctrl_reg = PCH_PP_CONTROL; |
3462 | pp_on_reg = PCH_PP_ON_DELAYS; | 3462 | pp_on_reg = PCH_PP_ON_DELAYS; |
3463 | pp_off_reg = PCH_PP_OFF_DELAYS; | 3463 | pp_off_reg = PCH_PP_OFF_DELAYS; |
3464 | pp_div_reg = PCH_PP_DIVISOR; | 3464 | pp_div_reg = PCH_PP_DIVISOR; |
3465 | } else { | 3465 | } else { |
3466 | enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); | 3466 | enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); |
3467 | 3467 | ||
3468 | pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe); | 3468 | pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe); |
3469 | pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe); | 3469 | pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe); |
3470 | pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe); | 3470 | pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe); |
3471 | pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe); | 3471 | pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe); |
3472 | } | 3472 | } |
3473 | 3473 | ||
3474 | /* Workaround: Need to write PP_CONTROL with the unlock key as | 3474 | /* Workaround: Need to write PP_CONTROL with the unlock key as |
3475 | * the very first thing. */ | 3475 | * the very first thing. */ |
3476 | pp = ironlake_get_pp_control(intel_dp); | 3476 | pp = ironlake_get_pp_control(intel_dp); |
3477 | I915_WRITE(pp_ctrl_reg, pp); | 3477 | I915_WRITE(pp_ctrl_reg, pp); |
3478 | 3478 | ||
3479 | pp_on = I915_READ(pp_on_reg); | 3479 | pp_on = I915_READ(pp_on_reg); |
3480 | pp_off = I915_READ(pp_off_reg); | 3480 | pp_off = I915_READ(pp_off_reg); |
3481 | pp_div = I915_READ(pp_div_reg); | 3481 | pp_div = I915_READ(pp_div_reg); |
3482 | 3482 | ||
3483 | /* Pull timing values out of registers */ | 3483 | /* Pull timing values out of registers */ |
3484 | cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> | 3484 | cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> |
3485 | PANEL_POWER_UP_DELAY_SHIFT; | 3485 | PANEL_POWER_UP_DELAY_SHIFT; |
3486 | 3486 | ||
3487 | cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> | 3487 | cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> |
3488 | PANEL_LIGHT_ON_DELAY_SHIFT; | 3488 | PANEL_LIGHT_ON_DELAY_SHIFT; |
3489 | 3489 | ||
3490 | cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> | 3490 | cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> |
3491 | PANEL_LIGHT_OFF_DELAY_SHIFT; | 3491 | PANEL_LIGHT_OFF_DELAY_SHIFT; |
3492 | 3492 | ||
3493 | cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> | 3493 | cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> |
3494 | PANEL_POWER_DOWN_DELAY_SHIFT; | 3494 | PANEL_POWER_DOWN_DELAY_SHIFT; |
3495 | 3495 | ||
3496 | cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> | 3496 | cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> |
3497 | PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000; | 3497 | PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000; |
3498 | 3498 | ||
3499 | DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", | 3499 | DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", |
3500 | cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12); | 3500 | cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12); |
3501 | 3501 | ||
3502 | vbt = dev_priv->vbt.edp_pps; | 3502 | vbt = dev_priv->vbt.edp_pps; |
3503 | 3503 | ||
3504 | /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of | 3504 | /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of |
3505 | * our hw here, which are all in 100usec. */ | 3505 | * our hw here, which are all in 100usec. */ |
3506 | spec.t1_t3 = 210 * 10; | 3506 | spec.t1_t3 = 210 * 10; |
3507 | spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */ | 3507 | spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */ |
3508 | spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */ | 3508 | spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */ |
3509 | spec.t10 = 500 * 10; | 3509 | spec.t10 = 500 * 10; |
3510 | /* This one is special and actually in units of 100ms, but zero | 3510 | /* This one is special and actually in units of 100ms, but zero |
3511 | * based in the hw (so we need to add 100 ms). But the sw vbt | 3511 | * based in the hw (so we need to add 100 ms). But the sw vbt |
3512 | * table multiplies it with 1000 to make it in units of 100usec, | 3512 | * table multiplies it with 1000 to make it in units of 100usec, |
3513 | * too. */ | 3513 | * too. */ |
3514 | spec.t11_t12 = (510 + 100) * 10; | 3514 | spec.t11_t12 = (510 + 100) * 10; |
3515 | 3515 | ||
3516 | DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", | 3516 | DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", |
3517 | vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12); | 3517 | vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12); |
3518 | 3518 | ||
3519 | /* Use the max of the register settings and vbt. If both are | 3519 | /* Use the max of the register settings and vbt. If both are |
3520 | * unset, fall back to the spec limits. */ | 3520 | * unset, fall back to the spec limits. */ |
3521 | #define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \ | 3521 | #define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \ |
3522 | spec.field : \ | 3522 | spec.field : \ |
3523 | max(cur.field, vbt.field)) | 3523 | max(cur.field, vbt.field)) |
3524 | assign_final(t1_t3); | 3524 | assign_final(t1_t3); |
3525 | assign_final(t8); | 3525 | assign_final(t8); |
3526 | assign_final(t9); | 3526 | assign_final(t9); |
3527 | assign_final(t10); | 3527 | assign_final(t10); |
3528 | assign_final(t11_t12); | 3528 | assign_final(t11_t12); |
3529 | #undef assign_final | 3529 | #undef assign_final |
3530 | 3530 | ||
3531 | #define get_delay(field) (DIV_ROUND_UP(final.field, 10)) | 3531 | #define get_delay(field) (DIV_ROUND_UP(final.field, 10)) |
3532 | intel_dp->panel_power_up_delay = get_delay(t1_t3); | 3532 | intel_dp->panel_power_up_delay = get_delay(t1_t3); |
3533 | intel_dp->backlight_on_delay = get_delay(t8); | 3533 | intel_dp->backlight_on_delay = get_delay(t8); |
3534 | intel_dp->backlight_off_delay = get_delay(t9); | 3534 | intel_dp->backlight_off_delay = get_delay(t9); |
3535 | intel_dp->panel_power_down_delay = get_delay(t10); | 3535 | intel_dp->panel_power_down_delay = get_delay(t10); |
3536 | intel_dp->panel_power_cycle_delay = get_delay(t11_t12); | 3536 | intel_dp->panel_power_cycle_delay = get_delay(t11_t12); |
3537 | #undef get_delay | 3537 | #undef get_delay |
3538 | 3538 | ||
3539 | DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", | 3539 | DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", |
3540 | intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, | 3540 | intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, |
3541 | intel_dp->panel_power_cycle_delay); | 3541 | intel_dp->panel_power_cycle_delay); |
3542 | 3542 | ||
3543 | DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", | 3543 | DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", |
3544 | intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); | 3544 | intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); |
3545 | 3545 | ||
3546 | if (out) | 3546 | if (out) |
3547 | *out = final; | 3547 | *out = final; |
3548 | } | 3548 | } |
3549 | 3549 | ||
3550 | static void | 3550 | static void |
3551 | intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, | 3551 | intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, |
3552 | struct intel_dp *intel_dp, | 3552 | struct intel_dp *intel_dp, |
3553 | struct edp_power_seq *seq) | 3553 | struct edp_power_seq *seq) |
3554 | { | 3554 | { |
3555 | struct drm_i915_private *dev_priv = dev->dev_private; | 3555 | struct drm_i915_private *dev_priv = dev->dev_private; |
3556 | u32 pp_on, pp_off, pp_div, port_sel = 0; | 3556 | u32 pp_on, pp_off, pp_div, port_sel = 0; |
3557 | int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev); | 3557 | int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev); |
3558 | int pp_on_reg, pp_off_reg, pp_div_reg; | 3558 | int pp_on_reg, pp_off_reg, pp_div_reg; |
3559 | 3559 | ||
3560 | if (HAS_PCH_SPLIT(dev)) { | 3560 | if (HAS_PCH_SPLIT(dev)) { |
3561 | pp_on_reg = PCH_PP_ON_DELAYS; | 3561 | pp_on_reg = PCH_PP_ON_DELAYS; |
3562 | pp_off_reg = PCH_PP_OFF_DELAYS; | 3562 | pp_off_reg = PCH_PP_OFF_DELAYS; |
3563 | pp_div_reg = PCH_PP_DIVISOR; | 3563 | pp_div_reg = PCH_PP_DIVISOR; |
3564 | } else { | 3564 | } else { |
3565 | enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); | 3565 | enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); |
3566 | 3566 | ||
3567 | pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe); | 3567 | pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe); |
3568 | pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe); | 3568 | pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe); |
3569 | pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe); | 3569 | pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe); |
3570 | } | 3570 | } |
3571 | 3571 | ||
3572 | /* | 3572 | /* |
3573 | * And finally store the new values in the power sequencer. The | 3573 | * And finally store the new values in the power sequencer. The |
3574 | * backlight delays are set to 1 because we do manual waits on them. For | 3574 | * backlight delays are set to 1 because we do manual waits on them. For |
3575 | * T8, even BSpec recommends doing it. For T9, if we don't do this, | 3575 | * T8, even BSpec recommends doing it. For T9, if we don't do this, |
3576 | * we'll end up waiting for the backlight off delay twice: once when we | 3576 | * we'll end up waiting for the backlight off delay twice: once when we |
3577 | * do the manual sleep, and once when we disable the panel and wait for | 3577 | * do the manual sleep, and once when we disable the panel and wait for |
3578 | * the PP_STATUS bit to become zero. | 3578 | * the PP_STATUS bit to become zero. |
3579 | */ | 3579 | */ |
3580 | pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | | 3580 | pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | |
3581 | (1 << PANEL_LIGHT_ON_DELAY_SHIFT); | 3581 | (1 << PANEL_LIGHT_ON_DELAY_SHIFT); |
3582 | pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) | | 3582 | pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) | |
3583 | (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT); | 3583 | (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT); |
3584 | /* Compute the divisor for the pp clock, simply match the Bspec | 3584 | /* Compute the divisor for the pp clock, simply match the Bspec |
3585 | * formula. */ | 3585 | * formula. */ |
3586 | pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT; | 3586 | pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT; |
3587 | pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000) | 3587 | pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000) |
3588 | << PANEL_POWER_CYCLE_DELAY_SHIFT); | 3588 | << PANEL_POWER_CYCLE_DELAY_SHIFT); |
3589 | 3589 | ||
3590 | /* Haswell doesn't have any port selection bits for the panel | 3590 | /* Haswell doesn't have any port selection bits for the panel |
3591 | * power sequencer any more. */ | 3591 | * power sequencer any more. */ |
3592 | if (IS_VALLEYVIEW(dev)) { | 3592 | if (IS_VALLEYVIEW(dev)) { |
3593 | if (dp_to_dig_port(intel_dp)->port == PORT_B) | 3593 | if (dp_to_dig_port(intel_dp)->port == PORT_B) |
3594 | port_sel = PANEL_PORT_SELECT_DPB_VLV; | 3594 | port_sel = PANEL_PORT_SELECT_DPB_VLV; |
3595 | else | 3595 | else |
3596 | port_sel = PANEL_PORT_SELECT_DPC_VLV; | 3596 | port_sel = PANEL_PORT_SELECT_DPC_VLV; |
3597 | } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { | 3597 | } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { |
3598 | if (dp_to_dig_port(intel_dp)->port == PORT_A) | 3598 | if (dp_to_dig_port(intel_dp)->port == PORT_A) |
3599 | port_sel = PANEL_PORT_SELECT_DPA; | 3599 | port_sel = PANEL_PORT_SELECT_DPA; |
3600 | else | 3600 | else |
3601 | port_sel = PANEL_PORT_SELECT_DPD; | 3601 | port_sel = PANEL_PORT_SELECT_DPD; |
3602 | } | 3602 | } |
3603 | 3603 | ||
3604 | pp_on |= port_sel; | 3604 | pp_on |= port_sel; |
3605 | 3605 | ||
3606 | I915_WRITE(pp_on_reg, pp_on); | 3606 | I915_WRITE(pp_on_reg, pp_on); |
3607 | I915_WRITE(pp_off_reg, pp_off); | 3607 | I915_WRITE(pp_off_reg, pp_off); |
3608 | I915_WRITE(pp_div_reg, pp_div); | 3608 | I915_WRITE(pp_div_reg, pp_div); |
3609 | 3609 | ||
3610 | DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", | 3610 | DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", |
3611 | I915_READ(pp_on_reg), | 3611 | I915_READ(pp_on_reg), |
3612 | I915_READ(pp_off_reg), | 3612 | I915_READ(pp_off_reg), |
3613 | I915_READ(pp_div_reg)); | 3613 | I915_READ(pp_div_reg)); |
3614 | } | 3614 | } |
3615 | 3615 | ||
3616 | static bool intel_edp_init_connector(struct intel_dp *intel_dp, | 3616 | static bool intel_edp_init_connector(struct intel_dp *intel_dp, |
3617 | struct intel_connector *intel_connector, | 3617 | struct intel_connector *intel_connector, |
3618 | struct edp_power_seq *power_seq) | 3618 | struct edp_power_seq *power_seq) |
3619 | { | 3619 | { |
3620 | struct drm_connector *connector = &intel_connector->base; | 3620 | struct drm_connector *connector = &intel_connector->base; |
3621 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | 3621 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
3622 | struct drm_device *dev = intel_dig_port->base.base.dev; | 3622 | struct intel_encoder *intel_encoder = &intel_dig_port->base; |
3623 | struct drm_device *dev = intel_encoder->base.dev; | ||
3623 | struct drm_i915_private *dev_priv = dev->dev_private; | 3624 | struct drm_i915_private *dev_priv = dev->dev_private; |
3624 | struct drm_display_mode *fixed_mode = NULL; | 3625 | struct drm_display_mode *fixed_mode = NULL; |
3625 | bool has_dpcd; | 3626 | bool has_dpcd; |
3626 | struct drm_display_mode *scan; | 3627 | struct drm_display_mode *scan; |
3627 | struct edid *edid; | 3628 | struct edid *edid; |
3628 | 3629 | ||
3629 | if (!is_edp(intel_dp)) | 3630 | if (!is_edp(intel_dp)) |
3630 | return true; | 3631 | return true; |
3632 | |||
3633 | /* The VDD bit needs a power domain reference, so if the bit is already | ||
3634 | * enabled when we boot, grab this reference. */ | ||
3635 | if (edp_have_panel_vdd(intel_dp)) { | ||
3636 | enum intel_display_power_domain power_domain; | ||
3637 | power_domain = intel_display_port_power_domain(intel_encoder); | ||
3638 | intel_display_power_get(dev_priv, power_domain); | ||
3639 | } | ||
3631 | 3640 | ||
3632 | /* Cache DPCD and EDID for edp. */ | 3641 | /* Cache DPCD and EDID for edp. */ |
3633 | intel_edp_panel_vdd_on(intel_dp); | 3642 | intel_edp_panel_vdd_on(intel_dp); |
3634 | has_dpcd = intel_dp_get_dpcd(intel_dp); | 3643 | has_dpcd = intel_dp_get_dpcd(intel_dp); |
3635 | edp_panel_vdd_off(intel_dp, false); | 3644 | edp_panel_vdd_off(intel_dp, false); |
3636 | 3645 | ||
3637 | if (has_dpcd) { | 3646 | if (has_dpcd) { |
3638 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) | 3647 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) |
3639 | dev_priv->no_aux_handshake = | 3648 | dev_priv->no_aux_handshake = |
3640 | intel_dp->dpcd[DP_MAX_DOWNSPREAD] & | 3649 | intel_dp->dpcd[DP_MAX_DOWNSPREAD] & |
3641 | DP_NO_AUX_HANDSHAKE_LINK_TRAINING; | 3650 | DP_NO_AUX_HANDSHAKE_LINK_TRAINING; |
3642 | } else { | 3651 | } else { |
3643 | /* if this fails, presume the device is a ghost */ | 3652 | /* if this fails, presume the device is a ghost */ |
3644 | DRM_INFO("failed to retrieve link info, disabling eDP\n"); | 3653 | DRM_INFO("failed to retrieve link info, disabling eDP\n"); |
3645 | return false; | 3654 | return false; |
3646 | } | 3655 | } |
3647 | 3656 | ||
3648 | /* We now know it's not a ghost, init power sequence regs. */ | 3657 | /* We now know it's not a ghost, init power sequence regs. */ |
3649 | intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, power_seq); | 3658 | intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, power_seq); |
3650 | 3659 | ||
3651 | mutex_lock(&dev->mode_config.mutex); | 3660 | mutex_lock(&dev->mode_config.mutex); |
3652 | edid = drm_get_edid(connector, &intel_dp->aux.ddc); | 3661 | edid = drm_get_edid(connector, &intel_dp->aux.ddc); |
3653 | if (edid) { | 3662 | if (edid) { |
3654 | if (drm_add_edid_modes(connector, edid)) { | 3663 | if (drm_add_edid_modes(connector, edid)) { |
3655 | drm_mode_connector_update_edid_property(connector, | 3664 | drm_mode_connector_update_edid_property(connector, |
3656 | edid); | 3665 | edid); |
3657 | drm_edid_to_eld(connector, edid); | 3666 | drm_edid_to_eld(connector, edid); |
3658 | } else { | 3667 | } else { |
3659 | kfree(edid); | 3668 | kfree(edid); |
3660 | edid = ERR_PTR(-EINVAL); | 3669 | edid = ERR_PTR(-EINVAL); |
3661 | } | 3670 | } |
3662 | } else { | 3671 | } else { |
3663 | edid = ERR_PTR(-ENOENT); | 3672 | edid = ERR_PTR(-ENOENT); |
3664 | } | 3673 | } |
3665 | intel_connector->edid = edid; | 3674 | intel_connector->edid = edid; |
3666 | 3675 | ||
3667 | /* prefer fixed mode from EDID if available */ | 3676 | /* prefer fixed mode from EDID if available */ |
3668 | list_for_each_entry(scan, &connector->probed_modes, head) { | 3677 | list_for_each_entry(scan, &connector->probed_modes, head) { |
3669 | if ((scan->type & DRM_MODE_TYPE_PREFERRED)) { | 3678 | if ((scan->type & DRM_MODE_TYPE_PREFERRED)) { |
3670 | fixed_mode = drm_mode_duplicate(dev, scan); | 3679 | fixed_mode = drm_mode_duplicate(dev, scan); |
3671 | break; | 3680 | break; |
3672 | } | 3681 | } |
3673 | } | 3682 | } |
3674 | 3683 | ||
3675 | /* fallback to VBT if available for eDP */ | 3684 | /* fallback to VBT if available for eDP */ |
3676 | if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) { | 3685 | if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) { |
3677 | fixed_mode = drm_mode_duplicate(dev, | 3686 | fixed_mode = drm_mode_duplicate(dev, |
3678 | dev_priv->vbt.lfp_lvds_vbt_mode); | 3687 | dev_priv->vbt.lfp_lvds_vbt_mode); |
3679 | if (fixed_mode) | 3688 | if (fixed_mode) |
3680 | fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; | 3689 | fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; |
3681 | } | 3690 | } |
3682 | mutex_unlock(&dev->mode_config.mutex); | 3691 | mutex_unlock(&dev->mode_config.mutex); |
3683 | 3692 | ||
3684 | intel_panel_init(&intel_connector->panel, fixed_mode, NULL); | 3693 | intel_panel_init(&intel_connector->panel, fixed_mode, NULL); |
3685 | intel_panel_setup_backlight(connector); | 3694 | intel_panel_setup_backlight(connector); |
3686 | 3695 | ||
3687 | return true; | 3696 | return true; |
3688 | } | 3697 | } |
3689 | 3698 | ||
3690 | bool | 3699 | bool |
3691 | intel_dp_init_connector(struct intel_digital_port *intel_dig_port, | 3700 | intel_dp_init_connector(struct intel_digital_port *intel_dig_port, |
3692 | struct intel_connector *intel_connector) | 3701 | struct intel_connector *intel_connector) |
3693 | { | 3702 | { |
3694 | struct drm_connector *connector = &intel_connector->base; | 3703 | struct drm_connector *connector = &intel_connector->base; |
3695 | struct intel_dp *intel_dp = &intel_dig_port->dp; | 3704 | struct intel_dp *intel_dp = &intel_dig_port->dp; |
3696 | struct intel_encoder *intel_encoder = &intel_dig_port->base; | 3705 | struct intel_encoder *intel_encoder = &intel_dig_port->base; |
3697 | struct drm_device *dev = intel_encoder->base.dev; | 3706 | struct drm_device *dev = intel_encoder->base.dev; |
3698 | struct drm_i915_private *dev_priv = dev->dev_private; | 3707 | struct drm_i915_private *dev_priv = dev->dev_private; |
3699 | enum port port = intel_dig_port->port; | 3708 | enum port port = intel_dig_port->port; |
3700 | struct edp_power_seq power_seq = { 0 }; | 3709 | struct edp_power_seq power_seq = { 0 }; |
3701 | int type; | 3710 | int type; |
3702 | 3711 | ||
3703 | /* intel_dp vfuncs */ | 3712 | /* intel_dp vfuncs */ |
3704 | if (IS_VALLEYVIEW(dev)) | 3713 | if (IS_VALLEYVIEW(dev)) |
3705 | intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider; | 3714 | intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider; |
3706 | else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) | 3715 | else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) |
3707 | intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider; | 3716 | intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider; |
3708 | else if (HAS_PCH_SPLIT(dev)) | 3717 | else if (HAS_PCH_SPLIT(dev)) |
3709 | intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider; | 3718 | intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider; |
3710 | else | 3719 | else |
3711 | intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider; | 3720 | intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider; |
3712 | 3721 | ||
3713 | intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl; | 3722 | intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl; |
3714 | 3723 | ||
3715 | /* Preserve the current hw state. */ | 3724 | /* Preserve the current hw state. */ |
3716 | intel_dp->DP = I915_READ(intel_dp->output_reg); | 3725 | intel_dp->DP = I915_READ(intel_dp->output_reg); |
3717 | intel_dp->attached_connector = intel_connector; | 3726 | intel_dp->attached_connector = intel_connector; |
3718 | 3727 | ||
3719 | if (intel_dp_is_edp(dev, port)) | 3728 | if (intel_dp_is_edp(dev, port)) |
3720 | type = DRM_MODE_CONNECTOR_eDP; | 3729 | type = DRM_MODE_CONNECTOR_eDP; |
3721 | else | 3730 | else |
3722 | type = DRM_MODE_CONNECTOR_DisplayPort; | 3731 | type = DRM_MODE_CONNECTOR_DisplayPort; |
3723 | 3732 | ||
3724 | /* | 3733 | /* |
3725 | * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but | 3734 | * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but |
3726 | * for DP the encoder type can be set by the caller to | 3735 | * for DP the encoder type can be set by the caller to |
3727 | * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it. | 3736 | * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it. |
3728 | */ | 3737 | */ |
3729 | if (type == DRM_MODE_CONNECTOR_eDP) | 3738 | if (type == DRM_MODE_CONNECTOR_eDP) |
3730 | intel_encoder->type = INTEL_OUTPUT_EDP; | 3739 | intel_encoder->type = INTEL_OUTPUT_EDP; |
3731 | 3740 | ||
3732 | DRM_DEBUG_KMS("Adding %s connector on port %c\n", | 3741 | DRM_DEBUG_KMS("Adding %s connector on port %c\n", |
3733 | type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP", | 3742 | type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP", |
3734 | port_name(port)); | 3743 | port_name(port)); |
3735 | 3744 | ||
3736 | drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); | 3745 | drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); |
3737 | drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); | 3746 | drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); |
3738 | 3747 | ||
3739 | connector->interlace_allowed = true; | 3748 | connector->interlace_allowed = true; |
3740 | connector->doublescan_allowed = 0; | 3749 | connector->doublescan_allowed = 0; |
3741 | 3750 | ||
3742 | INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, | 3751 | INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, |
3743 | edp_panel_vdd_work); | 3752 | edp_panel_vdd_work); |
3744 | 3753 | ||
3745 | intel_connector_attach_encoder(intel_connector, intel_encoder); | 3754 | intel_connector_attach_encoder(intel_connector, intel_encoder); |
3746 | drm_sysfs_connector_add(connector); | 3755 | drm_sysfs_connector_add(connector); |
3747 | 3756 | ||
3748 | if (HAS_DDI(dev)) | 3757 | if (HAS_DDI(dev)) |
3749 | intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; | 3758 | intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; |
3750 | else | 3759 | else |
3751 | intel_connector->get_hw_state = intel_connector_get_hw_state; | 3760 | intel_connector->get_hw_state = intel_connector_get_hw_state; |
3752 | intel_connector->unregister = intel_dp_connector_unregister; | 3761 | intel_connector->unregister = intel_dp_connector_unregister; |
3753 | 3762 | ||
3754 | /* Set up the hotplug pin. */ | 3763 | /* Set up the hotplug pin. */ |
3755 | switch (port) { | 3764 | switch (port) { |
3756 | case PORT_A: | 3765 | case PORT_A: |
3757 | intel_encoder->hpd_pin = HPD_PORT_A; | 3766 | intel_encoder->hpd_pin = HPD_PORT_A; |
3758 | break; | 3767 | break; |
3759 | case PORT_B: | 3768 | case PORT_B: |
3760 | intel_encoder->hpd_pin = HPD_PORT_B; | 3769 | intel_encoder->hpd_pin = HPD_PORT_B; |
3761 | break; | 3770 | break; |
3762 | case PORT_C: | 3771 | case PORT_C: |
3763 | intel_encoder->hpd_pin = HPD_PORT_C; | 3772 | intel_encoder->hpd_pin = HPD_PORT_C; |
3764 | break; | 3773 | break; |
3765 | case PORT_D: | 3774 | case PORT_D: |
3766 | intel_encoder->hpd_pin = HPD_PORT_D; | 3775 | intel_encoder->hpd_pin = HPD_PORT_D; |
3767 | break; | 3776 | break; |
3768 | default: | 3777 | default: |
3769 | BUG(); | 3778 | BUG(); |
3770 | } | 3779 | } |
3771 | 3780 | ||
3772 | if (is_edp(intel_dp)) { | 3781 | if (is_edp(intel_dp)) { |
3773 | intel_dp_init_panel_power_timestamps(intel_dp); | 3782 | intel_dp_init_panel_power_timestamps(intel_dp); |
3774 | intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); | 3783 | intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); |
3775 | } | 3784 | } |
3776 | 3785 | ||
3777 | intel_dp_aux_init(intel_dp, intel_connector); | 3786 | intel_dp_aux_init(intel_dp, intel_connector); |
3778 | 3787 | ||
3779 | intel_dp->psr_setup_done = false; | 3788 | intel_dp->psr_setup_done = false; |
3780 | 3789 | ||
3781 | if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) { | 3790 | if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) { |
3782 | drm_dp_aux_unregister_i2c_bus(&intel_dp->aux); | 3791 | drm_dp_aux_unregister_i2c_bus(&intel_dp->aux); |
3783 | if (is_edp(intel_dp)) { | 3792 | if (is_edp(intel_dp)) { |
3784 | cancel_delayed_work_sync(&intel_dp->panel_vdd_work); | 3793 | cancel_delayed_work_sync(&intel_dp->panel_vdd_work); |
3785 | mutex_lock(&dev->mode_config.mutex); | 3794 | mutex_lock(&dev->mode_config.mutex); |
3786 | edp_panel_vdd_off_sync(intel_dp); | 3795 | edp_panel_vdd_off_sync(intel_dp); |
3787 | mutex_unlock(&dev->mode_config.mutex); | 3796 | mutex_unlock(&dev->mode_config.mutex); |
3788 | } | 3797 | } |
3789 | drm_sysfs_connector_remove(connector); | 3798 | drm_sysfs_connector_remove(connector); |
3790 | drm_connector_cleanup(connector); | 3799 | drm_connector_cleanup(connector); |
3791 | return false; | 3800 | return false; |
3792 | } | 3801 | } |
3793 | 3802 | ||
3794 | intel_dp_add_properties(intel_dp, connector); | 3803 | intel_dp_add_properties(intel_dp, connector); |
3795 | 3804 | ||
3796 | /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written | 3805 | /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written |
3797 | * 0xd. Failure to do so will result in spurious interrupts being | 3806 | * 0xd. Failure to do so will result in spurious interrupts being |
3798 | * generated on the port when a cable is not attached. | 3807 | * generated on the port when a cable is not attached. |
3799 | */ | 3808 | */ |
3800 | if (IS_G4X(dev) && !IS_GM45(dev)) { | 3809 | if (IS_G4X(dev) && !IS_GM45(dev)) { |
3801 | u32 temp = I915_READ(PEG_BAND_GAP_DATA); | 3810 | u32 temp = I915_READ(PEG_BAND_GAP_DATA); |
3802 | I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); | 3811 | I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); |
3803 | } | 3812 | } |
3804 | 3813 | ||
3805 | return true; | 3814 | return true; |
3806 | } | 3815 | } |
3807 | 3816 | ||
3808 | void | 3817 | void |
3809 | intel_dp_init(struct drm_device *dev, int output_reg, enum port port) | 3818 | intel_dp_init(struct drm_device *dev, int output_reg, enum port port) |
3810 | { | 3819 | { |
3811 | struct intel_digital_port *intel_dig_port; | 3820 | struct intel_digital_port *intel_dig_port; |
3812 | struct intel_encoder *intel_encoder; | 3821 | struct intel_encoder *intel_encoder; |
3813 | struct drm_encoder *encoder; | 3822 | struct drm_encoder *encoder; |
3814 | struct intel_connector *intel_connector; | 3823 | struct intel_connector *intel_connector; |
3815 | 3824 | ||
3816 | intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL); | 3825 | intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL); |
3817 | if (!intel_dig_port) | 3826 | if (!intel_dig_port) |
3818 | return; | 3827 | return; |
3819 | 3828 | ||
3820 | intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL); | 3829 | intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL); |
3821 | if (!intel_connector) { | 3830 | if (!intel_connector) { |
3822 | kfree(intel_dig_port); | 3831 | kfree(intel_dig_port); |
3823 | return; | 3832 | return; |
3824 | } | 3833 | } |
3825 | 3834 | ||
3826 | intel_encoder = &intel_dig_port->base; | 3835 | intel_encoder = &intel_dig_port->base; |
3827 | encoder = &intel_encoder->base; | 3836 | encoder = &intel_encoder->base; |
3828 | 3837 | ||
3829 | drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, | 3838 | drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, |
3830 | DRM_MODE_ENCODER_TMDS); | 3839 | DRM_MODE_ENCODER_TMDS); |
3831 | 3840 | ||
3832 | intel_encoder->compute_config = intel_dp_compute_config; | 3841 | intel_encoder->compute_config = intel_dp_compute_config; |
3833 | intel_encoder->mode_set = intel_dp_mode_set; | 3842 | intel_encoder->mode_set = intel_dp_mode_set; |
3834 | intel_encoder->disable = intel_disable_dp; | 3843 | intel_encoder->disable = intel_disable_dp; |
3835 | intel_encoder->post_disable = intel_post_disable_dp; | 3844 | intel_encoder->post_disable = intel_post_disable_dp; |
3836 | intel_encoder->get_hw_state = intel_dp_get_hw_state; | 3845 | intel_encoder->get_hw_state = intel_dp_get_hw_state; |
3837 | intel_encoder->get_config = intel_dp_get_config; | 3846 | intel_encoder->get_config = intel_dp_get_config; |
3838 | if (IS_VALLEYVIEW(dev)) { | 3847 | if (IS_VALLEYVIEW(dev)) { |
3839 | intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable; | 3848 | intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable; |
3840 | intel_encoder->pre_enable = vlv_pre_enable_dp; | 3849 | intel_encoder->pre_enable = vlv_pre_enable_dp; |
3841 | intel_encoder->enable = vlv_enable_dp; | 3850 | intel_encoder->enable = vlv_enable_dp; |
3842 | } else { | 3851 | } else { |
3843 | intel_encoder->pre_enable = g4x_pre_enable_dp; | 3852 | intel_encoder->pre_enable = g4x_pre_enable_dp; |
3844 | intel_encoder->enable = g4x_enable_dp; | 3853 | intel_encoder->enable = g4x_enable_dp; |
3845 | } | 3854 | } |
3846 | 3855 | ||
3847 | intel_dig_port->port = port; | 3856 | intel_dig_port->port = port; |
3848 | intel_dig_port->dp.output_reg = output_reg; | 3857 | intel_dig_port->dp.output_reg = output_reg; |
3849 | 3858 | ||
3850 | intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; | 3859 | intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; |
3851 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); | 3860 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); |
3852 | intel_encoder->cloneable = 0; | 3861 | intel_encoder->cloneable = 0; |
3853 | intel_encoder->hot_plug = intel_dp_hot_plug; | 3862 | intel_encoder->hot_plug = intel_dp_hot_plug; |
3854 | 3863 | ||
3855 | if (!intel_dp_init_connector(intel_dig_port, intel_connector)) { | 3864 | if (!intel_dp_init_connector(intel_dig_port, intel_connector)) { |
3856 | drm_encoder_cleanup(encoder); | 3865 | drm_encoder_cleanup(encoder); |
3857 | kfree(intel_dig_port); | 3866 | kfree(intel_dig_port); |
3858 | kfree(intel_connector); | 3867 | kfree(intel_connector); |
3859 | } | 3868 | } |
3860 | } | 3869 | } |
3861 | 3870 |
drivers/gpu/drm/i915/intel_drv.h
1 | /* | 1 | /* |
2 | * Copyright (c) 2006 Dave Airlie <airlied@linux.ie> | 2 | * Copyright (c) 2006 Dave Airlie <airlied@linux.ie> |
3 | * Copyright (c) 2007-2008 Intel Corporation | 3 | * Copyright (c) 2007-2008 Intel Corporation |
4 | * Jesse Barnes <jesse.barnes@intel.com> | 4 | * Jesse Barnes <jesse.barnes@intel.com> |
5 | * | 5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), | 7 | * copy of this software and associated documentation files (the "Software"), |
8 | * to deal in the Software without restriction, including without limitation | 8 | * to deal in the Software without restriction, including without limitation |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
10 | * and/or sell copies of the Software, and to permit persons to whom the | 10 | * and/or sell copies of the Software, and to permit persons to whom the |
11 | * Software is furnished to do so, subject to the following conditions: | 11 | * Software is furnished to do so, subject to the following conditions: |
12 | * | 12 | * |
13 | * The above copyright notice and this permission notice (including the next | 13 | * The above copyright notice and this permission notice (including the next |
14 | * paragraph) shall be included in all copies or substantial portions of the | 14 | * paragraph) shall be included in all copies or substantial portions of the |
15 | * Software. | 15 | * Software. |
16 | * | 16 | * |
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | 17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | 18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | 19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
20 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | 20 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
21 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | 21 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
22 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | 22 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
23 | * IN THE SOFTWARE. | 23 | * IN THE SOFTWARE. |
24 | */ | 24 | */ |
25 | #ifndef __INTEL_DRV_H__ | 25 | #ifndef __INTEL_DRV_H__ |
26 | #define __INTEL_DRV_H__ | 26 | #define __INTEL_DRV_H__ |
27 | 27 | ||
28 | #include <linux/i2c.h> | 28 | #include <linux/i2c.h> |
29 | #include <linux/hdmi.h> | 29 | #include <linux/hdmi.h> |
30 | #include <drm/i915_drm.h> | 30 | #include <drm/i915_drm.h> |
31 | #include "i915_drv.h" | 31 | #include "i915_drv.h" |
32 | #include <drm/drm_crtc.h> | 32 | #include <drm/drm_crtc.h> |
33 | #include <drm/drm_crtc_helper.h> | 33 | #include <drm/drm_crtc_helper.h> |
34 | #include <drm/drm_fb_helper.h> | 34 | #include <drm/drm_fb_helper.h> |
35 | #include <drm/drm_dp_helper.h> | 35 | #include <drm/drm_dp_helper.h> |
36 | 36 | ||
37 | /** | 37 | /** |
38 | * _wait_for - magic (register) wait macro | 38 | * _wait_for - magic (register) wait macro |
39 | * | 39 | * |
40 | * Does the right thing for modeset paths when run under kdgb or similar atomic | 40 | * Does the right thing for modeset paths when run under kdgb or similar atomic |
41 | * contexts. Note that it's important that we check the condition again after | 41 | * contexts. Note that it's important that we check the condition again after |
42 | * having timed out, since the timeout could be due to preemption or similar and | 42 | * having timed out, since the timeout could be due to preemption or similar and |
43 | * we've never had a chance to check the condition before the timeout. | 43 | * we've never had a chance to check the condition before the timeout. |
44 | */ | 44 | */ |
45 | #define _wait_for(COND, MS, W) ({ \ | 45 | #define _wait_for(COND, MS, W) ({ \ |
46 | unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \ | 46 | unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \ |
47 | int ret__ = 0; \ | 47 | int ret__ = 0; \ |
48 | while (!(COND)) { \ | 48 | while (!(COND)) { \ |
49 | if (time_after(jiffies, timeout__)) { \ | 49 | if (time_after(jiffies, timeout__)) { \ |
50 | if (!(COND)) \ | 50 | if (!(COND)) \ |
51 | ret__ = -ETIMEDOUT; \ | 51 | ret__ = -ETIMEDOUT; \ |
52 | break; \ | 52 | break; \ |
53 | } \ | 53 | } \ |
54 | if (W && drm_can_sleep()) { \ | 54 | if (W && drm_can_sleep()) { \ |
55 | msleep(W); \ | 55 | msleep(W); \ |
56 | } else { \ | 56 | } else { \ |
57 | cpu_relax(); \ | 57 | cpu_relax(); \ |
58 | } \ | 58 | } \ |
59 | } \ | 59 | } \ |
60 | ret__; \ | 60 | ret__; \ |
61 | }) | 61 | }) |
62 | 62 | ||
63 | #define wait_for(COND, MS) _wait_for(COND, MS, 1) | 63 | #define wait_for(COND, MS) _wait_for(COND, MS, 1) |
64 | #define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0) | 64 | #define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0) |
65 | #define wait_for_atomic_us(COND, US) _wait_for((COND), \ | 65 | #define wait_for_atomic_us(COND, US) _wait_for((COND), \ |
66 | DIV_ROUND_UP((US), 1000), 0) | 66 | DIV_ROUND_UP((US), 1000), 0) |
67 | 67 | ||
68 | #define KHz(x) (1000 * (x)) | 68 | #define KHz(x) (1000 * (x)) |
69 | #define MHz(x) KHz(1000 * (x)) | 69 | #define MHz(x) KHz(1000 * (x)) |
70 | 70 | ||
71 | /* | 71 | /* |
72 | * Display related stuff | 72 | * Display related stuff |
73 | */ | 73 | */ |
74 | 74 | ||
75 | /* store information about an Ixxx DVO */ | 75 | /* store information about an Ixxx DVO */ |
76 | /* The i830->i865 use multiple DVOs with multiple i2cs */ | 76 | /* The i830->i865 use multiple DVOs with multiple i2cs */ |
77 | /* the i915, i945 have a single sDVO i2c bus - which is different */ | 77 | /* the i915, i945 have a single sDVO i2c bus - which is different */ |
78 | #define MAX_OUTPUTS 6 | 78 | #define MAX_OUTPUTS 6 |
79 | /* maximum connectors per crtcs in the mode set */ | 79 | /* maximum connectors per crtcs in the mode set */ |
80 | 80 | ||
81 | /* Maximum cursor sizes */ | 81 | /* Maximum cursor sizes */ |
82 | #define GEN2_CURSOR_WIDTH 64 | 82 | #define GEN2_CURSOR_WIDTH 64 |
83 | #define GEN2_CURSOR_HEIGHT 64 | 83 | #define GEN2_CURSOR_HEIGHT 64 |
84 | #define CURSOR_WIDTH 256 | 84 | #define CURSOR_WIDTH 256 |
85 | #define CURSOR_HEIGHT 256 | 85 | #define CURSOR_HEIGHT 256 |
86 | 86 | ||
87 | #define INTEL_I2C_BUS_DVO 1 | 87 | #define INTEL_I2C_BUS_DVO 1 |
88 | #define INTEL_I2C_BUS_SDVO 2 | 88 | #define INTEL_I2C_BUS_SDVO 2 |
89 | 89 | ||
90 | /* these are outputs from the chip - integrated only | 90 | /* these are outputs from the chip - integrated only |
91 | external chips are via DVO or SDVO output */ | 91 | external chips are via DVO or SDVO output */ |
92 | #define INTEL_OUTPUT_UNUSED 0 | 92 | #define INTEL_OUTPUT_UNUSED 0 |
93 | #define INTEL_OUTPUT_ANALOG 1 | 93 | #define INTEL_OUTPUT_ANALOG 1 |
94 | #define INTEL_OUTPUT_DVO 2 | 94 | #define INTEL_OUTPUT_DVO 2 |
95 | #define INTEL_OUTPUT_SDVO 3 | 95 | #define INTEL_OUTPUT_SDVO 3 |
96 | #define INTEL_OUTPUT_LVDS 4 | 96 | #define INTEL_OUTPUT_LVDS 4 |
97 | #define INTEL_OUTPUT_TVOUT 5 | 97 | #define INTEL_OUTPUT_TVOUT 5 |
98 | #define INTEL_OUTPUT_HDMI 6 | 98 | #define INTEL_OUTPUT_HDMI 6 |
99 | #define INTEL_OUTPUT_DISPLAYPORT 7 | 99 | #define INTEL_OUTPUT_DISPLAYPORT 7 |
100 | #define INTEL_OUTPUT_EDP 8 | 100 | #define INTEL_OUTPUT_EDP 8 |
101 | #define INTEL_OUTPUT_DSI 9 | 101 | #define INTEL_OUTPUT_DSI 9 |
102 | #define INTEL_OUTPUT_UNKNOWN 10 | 102 | #define INTEL_OUTPUT_UNKNOWN 10 |
103 | 103 | ||
104 | #define INTEL_DVO_CHIP_NONE 0 | 104 | #define INTEL_DVO_CHIP_NONE 0 |
105 | #define INTEL_DVO_CHIP_LVDS 1 | 105 | #define INTEL_DVO_CHIP_LVDS 1 |
106 | #define INTEL_DVO_CHIP_TMDS 2 | 106 | #define INTEL_DVO_CHIP_TMDS 2 |
107 | #define INTEL_DVO_CHIP_TVOUT 4 | 107 | #define INTEL_DVO_CHIP_TVOUT 4 |
108 | 108 | ||
109 | #define INTEL_DSI_COMMAND_MODE 0 | 109 | #define INTEL_DSI_COMMAND_MODE 0 |
110 | #define INTEL_DSI_VIDEO_MODE 1 | 110 | #define INTEL_DSI_VIDEO_MODE 1 |
111 | 111 | ||
112 | struct intel_framebuffer { | 112 | struct intel_framebuffer { |
113 | struct drm_framebuffer base; | 113 | struct drm_framebuffer base; |
114 | struct drm_i915_gem_object *obj; | 114 | struct drm_i915_gem_object *obj; |
115 | }; | 115 | }; |
116 | 116 | ||
117 | struct intel_fbdev { | 117 | struct intel_fbdev { |
118 | struct drm_fb_helper helper; | 118 | struct drm_fb_helper helper; |
119 | struct intel_framebuffer *fb; | 119 | struct intel_framebuffer *fb; |
120 | struct list_head fbdev_list; | 120 | struct list_head fbdev_list; |
121 | struct drm_display_mode *our_mode; | 121 | struct drm_display_mode *our_mode; |
122 | int preferred_bpp; | 122 | int preferred_bpp; |
123 | }; | 123 | }; |
124 | 124 | ||
125 | struct intel_encoder { | 125 | struct intel_encoder { |
126 | struct drm_encoder base; | 126 | struct drm_encoder base; |
127 | /* | 127 | /* |
128 | * The new crtc this encoder will be driven from. Only differs from | 128 | * The new crtc this encoder will be driven from. Only differs from |
129 | * base->crtc while a modeset is in progress. | 129 | * base->crtc while a modeset is in progress. |
130 | */ | 130 | */ |
131 | struct intel_crtc *new_crtc; | 131 | struct intel_crtc *new_crtc; |
132 | 132 | ||
133 | int type; | 133 | int type; |
134 | unsigned int cloneable; | 134 | unsigned int cloneable; |
135 | bool connectors_active; | 135 | bool connectors_active; |
136 | void (*hot_plug)(struct intel_encoder *); | 136 | void (*hot_plug)(struct intel_encoder *); |
137 | bool (*compute_config)(struct intel_encoder *, | 137 | bool (*compute_config)(struct intel_encoder *, |
138 | struct intel_crtc_config *); | 138 | struct intel_crtc_config *); |
139 | void (*pre_pll_enable)(struct intel_encoder *); | 139 | void (*pre_pll_enable)(struct intel_encoder *); |
140 | void (*pre_enable)(struct intel_encoder *); | 140 | void (*pre_enable)(struct intel_encoder *); |
141 | void (*enable)(struct intel_encoder *); | 141 | void (*enable)(struct intel_encoder *); |
142 | void (*mode_set)(struct intel_encoder *intel_encoder); | 142 | void (*mode_set)(struct intel_encoder *intel_encoder); |
143 | void (*disable)(struct intel_encoder *); | 143 | void (*disable)(struct intel_encoder *); |
144 | void (*post_disable)(struct intel_encoder *); | 144 | void (*post_disable)(struct intel_encoder *); |
145 | /* Read out the current hw state of this connector, returning true if | 145 | /* Read out the current hw state of this connector, returning true if |
146 | * the encoder is active. If the encoder is enabled it also set the pipe | 146 | * the encoder is active. If the encoder is enabled it also set the pipe |
147 | * it is connected to in the pipe parameter. */ | 147 | * it is connected to in the pipe parameter. */ |
148 | bool (*get_hw_state)(struct intel_encoder *, enum pipe *pipe); | 148 | bool (*get_hw_state)(struct intel_encoder *, enum pipe *pipe); |
149 | /* Reconstructs the equivalent mode flags for the current hardware | 149 | /* Reconstructs the equivalent mode flags for the current hardware |
150 | * state. This must be called _after_ display->get_pipe_config has | 150 | * state. This must be called _after_ display->get_pipe_config has |
151 | * pre-filled the pipe config. Note that intel_encoder->base.crtc must | 151 | * pre-filled the pipe config. Note that intel_encoder->base.crtc must |
152 | * be set correctly before calling this function. */ | 152 | * be set correctly before calling this function. */ |
153 | void (*get_config)(struct intel_encoder *, | 153 | void (*get_config)(struct intel_encoder *, |
154 | struct intel_crtc_config *pipe_config); | 154 | struct intel_crtc_config *pipe_config); |
155 | int crtc_mask; | 155 | int crtc_mask; |
156 | enum hpd_pin hpd_pin; | 156 | enum hpd_pin hpd_pin; |
157 | }; | 157 | }; |
158 | 158 | ||
159 | struct intel_panel { | 159 | struct intel_panel { |
160 | struct drm_display_mode *fixed_mode; | 160 | struct drm_display_mode *fixed_mode; |
161 | struct drm_display_mode *downclock_mode; | 161 | struct drm_display_mode *downclock_mode; |
162 | int fitting_mode; | 162 | int fitting_mode; |
163 | 163 | ||
164 | /* backlight */ | 164 | /* backlight */ |
165 | struct { | 165 | struct { |
166 | bool present; | 166 | bool present; |
167 | u32 level; | 167 | u32 level; |
168 | u32 max; | 168 | u32 max; |
169 | bool enabled; | 169 | bool enabled; |
170 | bool combination_mode; /* gen 2/4 only */ | 170 | bool combination_mode; /* gen 2/4 only */ |
171 | bool active_low_pwm; | 171 | bool active_low_pwm; |
172 | struct backlight_device *device; | 172 | struct backlight_device *device; |
173 | } backlight; | 173 | } backlight; |
174 | }; | 174 | }; |
175 | 175 | ||
176 | struct intel_connector { | 176 | struct intel_connector { |
177 | struct drm_connector base; | 177 | struct drm_connector base; |
178 | /* | 178 | /* |
179 | * The fixed encoder this connector is connected to. | 179 | * The fixed encoder this connector is connected to. |
180 | */ | 180 | */ |
181 | struct intel_encoder *encoder; | 181 | struct intel_encoder *encoder; |
182 | 182 | ||
183 | /* | 183 | /* |
184 | * The new encoder this connector will be driven. Only differs from | 184 | * The new encoder this connector will be driven. Only differs from |
185 | * encoder while a modeset is in progress. | 185 | * encoder while a modeset is in progress. |
186 | */ | 186 | */ |
187 | struct intel_encoder *new_encoder; | 187 | struct intel_encoder *new_encoder; |
188 | 188 | ||
189 | /* Reads out the current hw, returning true if the connector is enabled | 189 | /* Reads out the current hw, returning true if the connector is enabled |
190 | * and active (i.e. dpms ON state). */ | 190 | * and active (i.e. dpms ON state). */ |
191 | bool (*get_hw_state)(struct intel_connector *); | 191 | bool (*get_hw_state)(struct intel_connector *); |
192 | 192 | ||
193 | /* | 193 | /* |
194 | * Removes all interfaces through which the connector is accessible | 194 | * Removes all interfaces through which the connector is accessible |
195 | * - like sysfs, debugfs entries -, so that no new operations can be | 195 | * - like sysfs, debugfs entries -, so that no new operations can be |
196 | * started on the connector. Also makes sure all currently pending | 196 | * started on the connector. Also makes sure all currently pending |
197 | * operations finish before returing. | 197 | * operations finish before returing. |
198 | */ | 198 | */ |
199 | void (*unregister)(struct intel_connector *); | 199 | void (*unregister)(struct intel_connector *); |
200 | 200 | ||
201 | /* Panel info for eDP and LVDS */ | 201 | /* Panel info for eDP and LVDS */ |
202 | struct intel_panel panel; | 202 | struct intel_panel panel; |
203 | 203 | ||
204 | /* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */ | 204 | /* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */ |
205 | struct edid *edid; | 205 | struct edid *edid; |
206 | 206 | ||
207 | /* since POLL and HPD connectors may use the same HPD line keep the native | 207 | /* since POLL and HPD connectors may use the same HPD line keep the native |
208 | state of connector->polled in case hotplug storm detection changes it */ | 208 | state of connector->polled in case hotplug storm detection changes it */ |
209 | u8 polled; | 209 | u8 polled; |
210 | }; | 210 | }; |
211 | 211 | ||
212 | typedef struct dpll { | 212 | typedef struct dpll { |
213 | /* given values */ | 213 | /* given values */ |
214 | int n; | 214 | int n; |
215 | int m1, m2; | 215 | int m1, m2; |
216 | int p1, p2; | 216 | int p1, p2; |
217 | /* derived values */ | 217 | /* derived values */ |
218 | int dot; | 218 | int dot; |
219 | int vco; | 219 | int vco; |
220 | int m; | 220 | int m; |
221 | int p; | 221 | int p; |
222 | } intel_clock_t; | 222 | } intel_clock_t; |
223 | 223 | ||
224 | struct intel_plane_config { | 224 | struct intel_plane_config { |
225 | bool tiled; | 225 | bool tiled; |
226 | int size; | 226 | int size; |
227 | u32 base; | 227 | u32 base; |
228 | }; | 228 | }; |
229 | 229 | ||
230 | struct intel_crtc_config { | 230 | struct intel_crtc_config { |
231 | /** | 231 | /** |
232 | * quirks - bitfield with hw state readout quirks | 232 | * quirks - bitfield with hw state readout quirks |
233 | * | 233 | * |
234 | * For various reasons the hw state readout code might not be able to | 234 | * For various reasons the hw state readout code might not be able to |
235 | * completely faithfully read out the current state. These cases are | 235 | * completely faithfully read out the current state. These cases are |
236 | * tracked with quirk flags so that fastboot and state checker can act | 236 | * tracked with quirk flags so that fastboot and state checker can act |
237 | * accordingly. | 237 | * accordingly. |
238 | */ | 238 | */ |
239 | #define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */ | 239 | #define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */ |
240 | #define PIPE_CONFIG_QUIRK_INHERITED_MODE (1<<1) /* mode inherited from firmware */ | ||
240 | unsigned long quirks; | 241 | unsigned long quirks; |
241 | 242 | ||
242 | /* User requested mode, only valid as a starting point to | 243 | /* User requested mode, only valid as a starting point to |
243 | * compute adjusted_mode, except in the case of (S)DVO where | 244 | * compute adjusted_mode, except in the case of (S)DVO where |
244 | * it's also for the output timings of the (S)DVO chip. | 245 | * it's also for the output timings of the (S)DVO chip. |
245 | * adjusted_mode will then correspond to the S(DVO) chip's | 246 | * adjusted_mode will then correspond to the S(DVO) chip's |
246 | * preferred input timings. */ | 247 | * preferred input timings. */ |
247 | struct drm_display_mode requested_mode; | 248 | struct drm_display_mode requested_mode; |
248 | /* Actual pipe timings ie. what we program into the pipe timing | 249 | /* Actual pipe timings ie. what we program into the pipe timing |
249 | * registers. adjusted_mode.crtc_clock is the pipe pixel clock. */ | 250 | * registers. adjusted_mode.crtc_clock is the pipe pixel clock. */ |
250 | struct drm_display_mode adjusted_mode; | 251 | struct drm_display_mode adjusted_mode; |
251 | 252 | ||
252 | /* Pipe source size (ie. panel fitter input size) | 253 | /* Pipe source size (ie. panel fitter input size) |
253 | * All planes will be positioned inside this space, | 254 | * All planes will be positioned inside this space, |
254 | * and get clipped at the edges. */ | 255 | * and get clipped at the edges. */ |
255 | int pipe_src_w, pipe_src_h; | 256 | int pipe_src_w, pipe_src_h; |
256 | 257 | ||
257 | /* Whether to set up the PCH/FDI. Note that we never allow sharing | 258 | /* Whether to set up the PCH/FDI. Note that we never allow sharing |
258 | * between pch encoders and cpu encoders. */ | 259 | * between pch encoders and cpu encoders. */ |
259 | bool has_pch_encoder; | 260 | bool has_pch_encoder; |
260 | 261 | ||
261 | /* CPU Transcoder for the pipe. Currently this can only differ from the | 262 | /* CPU Transcoder for the pipe. Currently this can only differ from the |
262 | * pipe on Haswell (where we have a special eDP transcoder). */ | 263 | * pipe on Haswell (where we have a special eDP transcoder). */ |
263 | enum transcoder cpu_transcoder; | 264 | enum transcoder cpu_transcoder; |
264 | 265 | ||
265 | /* | 266 | /* |
266 | * Use reduced/limited/broadcast rbg range, compressing from the full | 267 | * Use reduced/limited/broadcast rbg range, compressing from the full |
267 | * range fed into the crtcs. | 268 | * range fed into the crtcs. |
268 | */ | 269 | */ |
269 | bool limited_color_range; | 270 | bool limited_color_range; |
270 | 271 | ||
271 | /* DP has a bunch of special case unfortunately, so mark the pipe | 272 | /* DP has a bunch of special case unfortunately, so mark the pipe |
272 | * accordingly. */ | 273 | * accordingly. */ |
273 | bool has_dp_encoder; | 274 | bool has_dp_encoder; |
274 | 275 | ||
275 | /* | 276 | /* |
276 | * Enable dithering, used when the selected pipe bpp doesn't match the | 277 | * Enable dithering, used when the selected pipe bpp doesn't match the |
277 | * plane bpp. | 278 | * plane bpp. |
278 | */ | 279 | */ |
279 | bool dither; | 280 | bool dither; |
280 | 281 | ||
281 | /* Controls for the clock computation, to override various stages. */ | 282 | /* Controls for the clock computation, to override various stages. */ |
282 | bool clock_set; | 283 | bool clock_set; |
283 | 284 | ||
284 | /* SDVO TV has a bunch of special case. To make multifunction encoders | 285 | /* SDVO TV has a bunch of special case. To make multifunction encoders |
285 | * work correctly, we need to track this at runtime.*/ | 286 | * work correctly, we need to track this at runtime.*/ |
286 | bool sdvo_tv_clock; | 287 | bool sdvo_tv_clock; |
287 | 288 | ||
288 | /* | 289 | /* |
289 | * crtc bandwidth limit, don't increase pipe bpp or clock if not really | 290 | * crtc bandwidth limit, don't increase pipe bpp or clock if not really |
290 | * required. This is set in the 2nd loop of calling encoder's | 291 | * required. This is set in the 2nd loop of calling encoder's |
291 | * ->compute_config if the first pick doesn't work out. | 292 | * ->compute_config if the first pick doesn't work out. |
292 | */ | 293 | */ |
293 | bool bw_constrained; | 294 | bool bw_constrained; |
294 | 295 | ||
295 | /* Settings for the intel dpll used on pretty much everything but | 296 | /* Settings for the intel dpll used on pretty much everything but |
296 | * haswell. */ | 297 | * haswell. */ |
297 | struct dpll dpll; | 298 | struct dpll dpll; |
298 | 299 | ||
299 | /* Selected dpll when shared or DPLL_ID_PRIVATE. */ | 300 | /* Selected dpll when shared or DPLL_ID_PRIVATE. */ |
300 | enum intel_dpll_id shared_dpll; | 301 | enum intel_dpll_id shared_dpll; |
301 | 302 | ||
302 | /* Actual register state of the dpll, for shared dpll cross-checking. */ | 303 | /* Actual register state of the dpll, for shared dpll cross-checking. */ |
303 | struct intel_dpll_hw_state dpll_hw_state; | 304 | struct intel_dpll_hw_state dpll_hw_state; |
304 | 305 | ||
305 | int pipe_bpp; | 306 | int pipe_bpp; |
306 | struct intel_link_m_n dp_m_n; | 307 | struct intel_link_m_n dp_m_n; |
307 | 308 | ||
308 | /* | 309 | /* |
309 | * Frequence the dpll for the port should run at. Differs from the | 310 | * Frequence the dpll for the port should run at. Differs from the |
310 | * adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also | 311 | * adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also |
311 | * already multiplied by pixel_multiplier. | 312 | * already multiplied by pixel_multiplier. |
312 | */ | 313 | */ |
313 | int port_clock; | 314 | int port_clock; |
314 | 315 | ||
315 | /* Used by SDVO (and if we ever fix it, HDMI). */ | 316 | /* Used by SDVO (and if we ever fix it, HDMI). */ |
316 | unsigned pixel_multiplier; | 317 | unsigned pixel_multiplier; |
317 | 318 | ||
318 | /* Panel fitter controls for gen2-gen4 + VLV */ | 319 | /* Panel fitter controls for gen2-gen4 + VLV */ |
319 | struct { | 320 | struct { |
320 | u32 control; | 321 | u32 control; |
321 | u32 pgm_ratios; | 322 | u32 pgm_ratios; |
322 | u32 lvds_border_bits; | 323 | u32 lvds_border_bits; |
323 | } gmch_pfit; | 324 | } gmch_pfit; |
324 | 325 | ||
325 | /* Panel fitter placement and size for Ironlake+ */ | 326 | /* Panel fitter placement and size for Ironlake+ */ |
326 | struct { | 327 | struct { |
327 | u32 pos; | 328 | u32 pos; |
328 | u32 size; | 329 | u32 size; |
329 | bool enabled; | 330 | bool enabled; |
330 | } pch_pfit; | 331 | } pch_pfit; |
331 | 332 | ||
332 | /* FDI configuration, only valid if has_pch_encoder is set. */ | 333 | /* FDI configuration, only valid if has_pch_encoder is set. */ |
333 | int fdi_lanes; | 334 | int fdi_lanes; |
334 | struct intel_link_m_n fdi_m_n; | 335 | struct intel_link_m_n fdi_m_n; |
335 | 336 | ||
336 | bool ips_enabled; | 337 | bool ips_enabled; |
337 | 338 | ||
338 | bool double_wide; | 339 | bool double_wide; |
339 | }; | 340 | }; |
340 | 341 | ||
341 | struct intel_pipe_wm { | 342 | struct intel_pipe_wm { |
342 | struct intel_wm_level wm[5]; | 343 | struct intel_wm_level wm[5]; |
343 | uint32_t linetime; | 344 | uint32_t linetime; |
344 | bool fbc_wm_enabled; | 345 | bool fbc_wm_enabled; |
345 | }; | 346 | }; |
346 | 347 | ||
347 | struct intel_crtc { | 348 | struct intel_crtc { |
348 | struct drm_crtc base; | 349 | struct drm_crtc base; |
349 | enum pipe pipe; | 350 | enum pipe pipe; |
350 | enum plane plane; | 351 | enum plane plane; |
351 | u8 lut_r[256], lut_g[256], lut_b[256]; | 352 | u8 lut_r[256], lut_g[256], lut_b[256]; |
352 | /* | 353 | /* |
353 | * Whether the crtc and the connected output pipeline is active. Implies | 354 | * Whether the crtc and the connected output pipeline is active. Implies |
354 | * that crtc->enabled is set, i.e. the current mode configuration has | 355 | * that crtc->enabled is set, i.e. the current mode configuration has |
355 | * some outputs connected to this crtc. | 356 | * some outputs connected to this crtc. |
356 | */ | 357 | */ |
357 | bool active; | 358 | bool active; |
358 | unsigned long enabled_power_domains; | 359 | unsigned long enabled_power_domains; |
359 | bool eld_vld; | 360 | bool eld_vld; |
360 | bool primary_enabled; /* is the primary plane (partially) visible? */ | 361 | bool primary_enabled; /* is the primary plane (partially) visible? */ |
361 | bool lowfreq_avail; | 362 | bool lowfreq_avail; |
362 | struct intel_overlay *overlay; | 363 | struct intel_overlay *overlay; |
363 | struct intel_unpin_work *unpin_work; | 364 | struct intel_unpin_work *unpin_work; |
364 | 365 | ||
365 | atomic_t unpin_work_count; | 366 | atomic_t unpin_work_count; |
366 | 367 | ||
367 | /* Display surface base address adjustement for pageflips. Note that on | 368 | /* Display surface base address adjustement for pageflips. Note that on |
368 | * gen4+ this only adjusts up to a tile, offsets within a tile are | 369 | * gen4+ this only adjusts up to a tile, offsets within a tile are |
369 | * handled in the hw itself (with the TILEOFF register). */ | 370 | * handled in the hw itself (with the TILEOFF register). */ |
370 | unsigned long dspaddr_offset; | 371 | unsigned long dspaddr_offset; |
371 | 372 | ||
372 | struct drm_i915_gem_object *cursor_bo; | 373 | struct drm_i915_gem_object *cursor_bo; |
373 | uint32_t cursor_addr; | 374 | uint32_t cursor_addr; |
374 | int16_t cursor_x, cursor_y; | 375 | int16_t cursor_x, cursor_y; |
375 | int16_t cursor_width, cursor_height; | 376 | int16_t cursor_width, cursor_height; |
376 | int16_t max_cursor_width, max_cursor_height; | 377 | int16_t max_cursor_width, max_cursor_height; |
377 | bool cursor_visible; | 378 | bool cursor_visible; |
378 | 379 | ||
379 | struct intel_plane_config plane_config; | 380 | struct intel_plane_config plane_config; |
380 | struct intel_crtc_config config; | 381 | struct intel_crtc_config config; |
381 | struct intel_crtc_config *new_config; | 382 | struct intel_crtc_config *new_config; |
382 | bool new_enabled; | 383 | bool new_enabled; |
383 | 384 | ||
384 | uint32_t ddi_pll_sel; | 385 | uint32_t ddi_pll_sel; |
385 | 386 | ||
386 | /* reset counter value when the last flip was submitted */ | 387 | /* reset counter value when the last flip was submitted */ |
387 | unsigned int reset_counter; | 388 | unsigned int reset_counter; |
388 | 389 | ||
389 | /* Access to these should be protected by dev_priv->irq_lock. */ | 390 | /* Access to these should be protected by dev_priv->irq_lock. */ |
390 | bool cpu_fifo_underrun_disabled; | 391 | bool cpu_fifo_underrun_disabled; |
391 | bool pch_fifo_underrun_disabled; | 392 | bool pch_fifo_underrun_disabled; |
392 | 393 | ||
393 | /* per-pipe watermark state */ | 394 | /* per-pipe watermark state */ |
394 | struct { | 395 | struct { |
395 | /* watermarks currently being used */ | 396 | /* watermarks currently being used */ |
396 | struct intel_pipe_wm active; | 397 | struct intel_pipe_wm active; |
397 | } wm; | 398 | } wm; |
398 | }; | 399 | }; |
399 | 400 | ||
400 | struct intel_plane_wm_parameters { | 401 | struct intel_plane_wm_parameters { |
401 | uint32_t horiz_pixels; | 402 | uint32_t horiz_pixels; |
402 | uint8_t bytes_per_pixel; | 403 | uint8_t bytes_per_pixel; |
403 | bool enabled; | 404 | bool enabled; |
404 | bool scaled; | 405 | bool scaled; |
405 | }; | 406 | }; |
406 | 407 | ||
407 | struct intel_plane { | 408 | struct intel_plane { |
408 | struct drm_plane base; | 409 | struct drm_plane base; |
409 | int plane; | 410 | int plane; |
410 | enum pipe pipe; | 411 | enum pipe pipe; |
411 | struct drm_i915_gem_object *obj; | 412 | struct drm_i915_gem_object *obj; |
412 | bool can_scale; | 413 | bool can_scale; |
413 | int max_downscale; | 414 | int max_downscale; |
414 | u32 lut_r[1024], lut_g[1024], lut_b[1024]; | 415 | u32 lut_r[1024], lut_g[1024], lut_b[1024]; |
415 | int crtc_x, crtc_y; | 416 | int crtc_x, crtc_y; |
416 | unsigned int crtc_w, crtc_h; | 417 | unsigned int crtc_w, crtc_h; |
417 | uint32_t src_x, src_y; | 418 | uint32_t src_x, src_y; |
418 | uint32_t src_w, src_h; | 419 | uint32_t src_w, src_h; |
419 | 420 | ||
420 | /* Since we need to change the watermarks before/after | 421 | /* Since we need to change the watermarks before/after |
421 | * enabling/disabling the planes, we need to store the parameters here | 422 | * enabling/disabling the planes, we need to store the parameters here |
422 | * as the other pieces of the struct may not reflect the values we want | 423 | * as the other pieces of the struct may not reflect the values we want |
423 | * for the watermark calculations. Currently only Haswell uses this. | 424 | * for the watermark calculations. Currently only Haswell uses this. |
424 | */ | 425 | */ |
425 | struct intel_plane_wm_parameters wm; | 426 | struct intel_plane_wm_parameters wm; |
426 | 427 | ||
427 | void (*update_plane)(struct drm_plane *plane, | 428 | void (*update_plane)(struct drm_plane *plane, |
428 | struct drm_crtc *crtc, | 429 | struct drm_crtc *crtc, |
429 | struct drm_framebuffer *fb, | 430 | struct drm_framebuffer *fb, |
430 | struct drm_i915_gem_object *obj, | 431 | struct drm_i915_gem_object *obj, |
431 | int crtc_x, int crtc_y, | 432 | int crtc_x, int crtc_y, |
432 | unsigned int crtc_w, unsigned int crtc_h, | 433 | unsigned int crtc_w, unsigned int crtc_h, |
433 | uint32_t x, uint32_t y, | 434 | uint32_t x, uint32_t y, |
434 | uint32_t src_w, uint32_t src_h); | 435 | uint32_t src_w, uint32_t src_h); |
435 | void (*disable_plane)(struct drm_plane *plane, | 436 | void (*disable_plane)(struct drm_plane *plane, |
436 | struct drm_crtc *crtc); | 437 | struct drm_crtc *crtc); |
437 | int (*update_colorkey)(struct drm_plane *plane, | 438 | int (*update_colorkey)(struct drm_plane *plane, |
438 | struct drm_intel_sprite_colorkey *key); | 439 | struct drm_intel_sprite_colorkey *key); |
439 | void (*get_colorkey)(struct drm_plane *plane, | 440 | void (*get_colorkey)(struct drm_plane *plane, |
440 | struct drm_intel_sprite_colorkey *key); | 441 | struct drm_intel_sprite_colorkey *key); |
441 | }; | 442 | }; |
442 | 443 | ||
443 | struct intel_watermark_params { | 444 | struct intel_watermark_params { |
444 | unsigned long fifo_size; | 445 | unsigned long fifo_size; |
445 | unsigned long max_wm; | 446 | unsigned long max_wm; |
446 | unsigned long default_wm; | 447 | unsigned long default_wm; |
447 | unsigned long guard_size; | 448 | unsigned long guard_size; |
448 | unsigned long cacheline_size; | 449 | unsigned long cacheline_size; |
449 | }; | 450 | }; |
450 | 451 | ||
451 | struct cxsr_latency { | 452 | struct cxsr_latency { |
452 | int is_desktop; | 453 | int is_desktop; |
453 | int is_ddr3; | 454 | int is_ddr3; |
454 | unsigned long fsb_freq; | 455 | unsigned long fsb_freq; |
455 | unsigned long mem_freq; | 456 | unsigned long mem_freq; |
456 | unsigned long display_sr; | 457 | unsigned long display_sr; |
457 | unsigned long display_hpll_disable; | 458 | unsigned long display_hpll_disable; |
458 | unsigned long cursor_sr; | 459 | unsigned long cursor_sr; |
459 | unsigned long cursor_hpll_disable; | 460 | unsigned long cursor_hpll_disable; |
460 | }; | 461 | }; |
461 | 462 | ||
462 | #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) | 463 | #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) |
463 | #define to_intel_connector(x) container_of(x, struct intel_connector, base) | 464 | #define to_intel_connector(x) container_of(x, struct intel_connector, base) |
464 | #define to_intel_encoder(x) container_of(x, struct intel_encoder, base) | 465 | #define to_intel_encoder(x) container_of(x, struct intel_encoder, base) |
465 | #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) | 466 | #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) |
466 | #define to_intel_plane(x) container_of(x, struct intel_plane, base) | 467 | #define to_intel_plane(x) container_of(x, struct intel_plane, base) |
467 | 468 | ||
468 | struct intel_hdmi { | 469 | struct intel_hdmi { |
469 | u32 hdmi_reg; | 470 | u32 hdmi_reg; |
470 | int ddc_bus; | 471 | int ddc_bus; |
471 | uint32_t color_range; | 472 | uint32_t color_range; |
472 | bool color_range_auto; | 473 | bool color_range_auto; |
473 | bool has_hdmi_sink; | 474 | bool has_hdmi_sink; |
474 | bool has_audio; | 475 | bool has_audio; |
475 | enum hdmi_force_audio force_audio; | 476 | enum hdmi_force_audio force_audio; |
476 | bool rgb_quant_range_selectable; | 477 | bool rgb_quant_range_selectable; |
477 | void (*write_infoframe)(struct drm_encoder *encoder, | 478 | void (*write_infoframe)(struct drm_encoder *encoder, |
478 | enum hdmi_infoframe_type type, | 479 | enum hdmi_infoframe_type type, |
479 | const void *frame, ssize_t len); | 480 | const void *frame, ssize_t len); |
480 | void (*set_infoframes)(struct drm_encoder *encoder, | 481 | void (*set_infoframes)(struct drm_encoder *encoder, |
481 | struct drm_display_mode *adjusted_mode); | 482 | struct drm_display_mode *adjusted_mode); |
482 | }; | 483 | }; |
483 | 484 | ||
484 | #define DP_MAX_DOWNSTREAM_PORTS 0x10 | 485 | #define DP_MAX_DOWNSTREAM_PORTS 0x10 |
485 | 486 | ||
486 | struct intel_dp { | 487 | struct intel_dp { |
487 | uint32_t output_reg; | 488 | uint32_t output_reg; |
488 | uint32_t aux_ch_ctl_reg; | 489 | uint32_t aux_ch_ctl_reg; |
489 | uint32_t DP; | 490 | uint32_t DP; |
490 | bool has_audio; | 491 | bool has_audio; |
491 | enum hdmi_force_audio force_audio; | 492 | enum hdmi_force_audio force_audio; |
492 | uint32_t color_range; | 493 | uint32_t color_range; |
493 | bool color_range_auto; | 494 | bool color_range_auto; |
494 | uint8_t link_bw; | 495 | uint8_t link_bw; |
495 | uint8_t lane_count; | 496 | uint8_t lane_count; |
496 | uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; | 497 | uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; |
497 | uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE]; | 498 | uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE]; |
498 | uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; | 499 | uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; |
499 | struct drm_dp_aux aux; | 500 | struct drm_dp_aux aux; |
500 | uint8_t train_set[4]; | 501 | uint8_t train_set[4]; |
501 | int panel_power_up_delay; | 502 | int panel_power_up_delay; |
502 | int panel_power_down_delay; | 503 | int panel_power_down_delay; |
503 | int panel_power_cycle_delay; | 504 | int panel_power_cycle_delay; |
504 | int backlight_on_delay; | 505 | int backlight_on_delay; |
505 | int backlight_off_delay; | 506 | int backlight_off_delay; |
506 | struct delayed_work panel_vdd_work; | 507 | struct delayed_work panel_vdd_work; |
507 | bool want_panel_vdd; | 508 | bool want_panel_vdd; |
508 | unsigned long last_power_cycle; | 509 | unsigned long last_power_cycle; |
509 | unsigned long last_power_on; | 510 | unsigned long last_power_on; |
510 | unsigned long last_backlight_off; | 511 | unsigned long last_backlight_off; |
511 | bool psr_setup_done; | 512 | bool psr_setup_done; |
512 | bool use_tps3; | 513 | bool use_tps3; |
513 | struct intel_connector *attached_connector; | 514 | struct intel_connector *attached_connector; |
514 | 515 | ||
515 | uint32_t (*get_aux_clock_divider)(struct intel_dp *dp, int index); | 516 | uint32_t (*get_aux_clock_divider)(struct intel_dp *dp, int index); |
516 | /* | 517 | /* |
517 | * This function returns the value we have to program the AUX_CTL | 518 | * This function returns the value we have to program the AUX_CTL |
518 | * register with to kick off an AUX transaction. | 519 | * register with to kick off an AUX transaction. |
519 | */ | 520 | */ |
520 | uint32_t (*get_aux_send_ctl)(struct intel_dp *dp, | 521 | uint32_t (*get_aux_send_ctl)(struct intel_dp *dp, |
521 | bool has_aux_irq, | 522 | bool has_aux_irq, |
522 | int send_bytes, | 523 | int send_bytes, |
523 | uint32_t aux_clock_divider); | 524 | uint32_t aux_clock_divider); |
524 | }; | 525 | }; |
525 | 526 | ||
526 | struct intel_digital_port { | 527 | struct intel_digital_port { |
527 | struct intel_encoder base; | 528 | struct intel_encoder base; |
528 | enum port port; | 529 | enum port port; |
529 | u32 saved_port_bits; | 530 | u32 saved_port_bits; |
530 | struct intel_dp dp; | 531 | struct intel_dp dp; |
531 | struct intel_hdmi hdmi; | 532 | struct intel_hdmi hdmi; |
532 | }; | 533 | }; |
533 | 534 | ||
534 | static inline int | 535 | static inline int |
535 | vlv_dport_to_channel(struct intel_digital_port *dport) | 536 | vlv_dport_to_channel(struct intel_digital_port *dport) |
536 | { | 537 | { |
537 | switch (dport->port) { | 538 | switch (dport->port) { |
538 | case PORT_B: | 539 | case PORT_B: |
539 | return DPIO_CH0; | 540 | return DPIO_CH0; |
540 | case PORT_C: | 541 | case PORT_C: |
541 | return DPIO_CH1; | 542 | return DPIO_CH1; |
542 | default: | 543 | default: |
543 | BUG(); | 544 | BUG(); |
544 | } | 545 | } |
545 | } | 546 | } |
546 | 547 | ||
547 | static inline struct drm_crtc * | 548 | static inline struct drm_crtc * |
548 | intel_get_crtc_for_pipe(struct drm_device *dev, int pipe) | 549 | intel_get_crtc_for_pipe(struct drm_device *dev, int pipe) |
549 | { | 550 | { |
550 | struct drm_i915_private *dev_priv = dev->dev_private; | 551 | struct drm_i915_private *dev_priv = dev->dev_private; |
551 | return dev_priv->pipe_to_crtc_mapping[pipe]; | 552 | return dev_priv->pipe_to_crtc_mapping[pipe]; |
552 | } | 553 | } |
553 | 554 | ||
554 | static inline struct drm_crtc * | 555 | static inline struct drm_crtc * |
555 | intel_get_crtc_for_plane(struct drm_device *dev, int plane) | 556 | intel_get_crtc_for_plane(struct drm_device *dev, int plane) |
556 | { | 557 | { |
557 | struct drm_i915_private *dev_priv = dev->dev_private; | 558 | struct drm_i915_private *dev_priv = dev->dev_private; |
558 | return dev_priv->plane_to_crtc_mapping[plane]; | 559 | return dev_priv->plane_to_crtc_mapping[plane]; |
559 | } | 560 | } |
560 | 561 | ||
561 | struct intel_unpin_work { | 562 | struct intel_unpin_work { |
562 | struct work_struct work; | 563 | struct work_struct work; |
563 | struct drm_crtc *crtc; | 564 | struct drm_crtc *crtc; |
564 | struct drm_i915_gem_object *old_fb_obj; | 565 | struct drm_i915_gem_object *old_fb_obj; |
565 | struct drm_i915_gem_object *pending_flip_obj; | 566 | struct drm_i915_gem_object *pending_flip_obj; |
566 | struct drm_pending_vblank_event *event; | 567 | struct drm_pending_vblank_event *event; |
567 | atomic_t pending; | 568 | atomic_t pending; |
568 | #define INTEL_FLIP_INACTIVE 0 | 569 | #define INTEL_FLIP_INACTIVE 0 |
569 | #define INTEL_FLIP_PENDING 1 | 570 | #define INTEL_FLIP_PENDING 1 |
570 | #define INTEL_FLIP_COMPLETE 2 | 571 | #define INTEL_FLIP_COMPLETE 2 |
571 | bool enable_stall_check; | 572 | bool enable_stall_check; |
572 | }; | 573 | }; |
573 | 574 | ||
574 | struct intel_set_config { | 575 | struct intel_set_config { |
575 | struct drm_encoder **save_connector_encoders; | 576 | struct drm_encoder **save_connector_encoders; |
576 | struct drm_crtc **save_encoder_crtcs; | 577 | struct drm_crtc **save_encoder_crtcs; |
577 | bool *save_crtc_enabled; | 578 | bool *save_crtc_enabled; |
578 | 579 | ||
579 | bool fb_changed; | 580 | bool fb_changed; |
580 | bool mode_changed; | 581 | bool mode_changed; |
581 | }; | 582 | }; |
582 | 583 | ||
583 | struct intel_load_detect_pipe { | 584 | struct intel_load_detect_pipe { |
584 | struct drm_framebuffer *release_fb; | 585 | struct drm_framebuffer *release_fb; |
585 | bool load_detect_temp; | 586 | bool load_detect_temp; |
586 | int dpms_mode; | 587 | int dpms_mode; |
587 | }; | 588 | }; |
588 | 589 | ||
589 | static inline struct intel_encoder * | 590 | static inline struct intel_encoder * |
590 | intel_attached_encoder(struct drm_connector *connector) | 591 | intel_attached_encoder(struct drm_connector *connector) |
591 | { | 592 | { |
592 | return to_intel_connector(connector)->encoder; | 593 | return to_intel_connector(connector)->encoder; |
593 | } | 594 | } |
594 | 595 | ||
595 | static inline struct intel_digital_port * | 596 | static inline struct intel_digital_port * |
596 | enc_to_dig_port(struct drm_encoder *encoder) | 597 | enc_to_dig_port(struct drm_encoder *encoder) |
597 | { | 598 | { |
598 | return container_of(encoder, struct intel_digital_port, base.base); | 599 | return container_of(encoder, struct intel_digital_port, base.base); |
599 | } | 600 | } |
600 | 601 | ||
601 | static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) | 602 | static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) |
602 | { | 603 | { |
603 | return &enc_to_dig_port(encoder)->dp; | 604 | return &enc_to_dig_port(encoder)->dp; |
604 | } | 605 | } |
605 | 606 | ||
606 | static inline struct intel_digital_port * | 607 | static inline struct intel_digital_port * |
607 | dp_to_dig_port(struct intel_dp *intel_dp) | 608 | dp_to_dig_port(struct intel_dp *intel_dp) |
608 | { | 609 | { |
609 | return container_of(intel_dp, struct intel_digital_port, dp); | 610 | return container_of(intel_dp, struct intel_digital_port, dp); |
610 | } | 611 | } |
611 | 612 | ||
612 | static inline struct intel_digital_port * | 613 | static inline struct intel_digital_port * |
613 | hdmi_to_dig_port(struct intel_hdmi *intel_hdmi) | 614 | hdmi_to_dig_port(struct intel_hdmi *intel_hdmi) |
614 | { | 615 | { |
615 | return container_of(intel_hdmi, struct intel_digital_port, hdmi); | 616 | return container_of(intel_hdmi, struct intel_digital_port, hdmi); |
616 | } | 617 | } |
617 | 618 | ||
618 | 619 | ||
619 | /* i915_irq.c */ | 620 | /* i915_irq.c */ |
620 | bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, | 621 | bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, |
621 | enum pipe pipe, bool enable); | 622 | enum pipe pipe, bool enable); |
622 | bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, | 623 | bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, |
623 | enum pipe pipe, bool enable); | 624 | enum pipe pipe, bool enable); |
624 | bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, | 625 | bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, |
625 | enum transcoder pch_transcoder, | 626 | enum transcoder pch_transcoder, |
626 | bool enable); | 627 | bool enable); |
627 | void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); | 628 | void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); |
628 | void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); | 629 | void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); |
629 | void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); | 630 | void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); |
630 | void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); | 631 | void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); |
631 | void hsw_runtime_pm_disable_interrupts(struct drm_device *dev); | 632 | void hsw_runtime_pm_disable_interrupts(struct drm_device *dev); |
632 | void hsw_runtime_pm_restore_interrupts(struct drm_device *dev); | 633 | void hsw_runtime_pm_restore_interrupts(struct drm_device *dev); |
633 | 634 | ||
634 | 635 | ||
635 | /* intel_crt.c */ | 636 | /* intel_crt.c */ |
636 | void intel_crt_init(struct drm_device *dev); | 637 | void intel_crt_init(struct drm_device *dev); |
637 | 638 | ||
638 | 639 | ||
639 | /* intel_ddi.c */ | 640 | /* intel_ddi.c */ |
640 | void intel_prepare_ddi(struct drm_device *dev); | 641 | void intel_prepare_ddi(struct drm_device *dev); |
641 | void hsw_fdi_link_train(struct drm_crtc *crtc); | 642 | void hsw_fdi_link_train(struct drm_crtc *crtc); |
642 | void intel_ddi_init(struct drm_device *dev, enum port port); | 643 | void intel_ddi_init(struct drm_device *dev, enum port port); |
643 | enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder); | 644 | enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder); |
644 | bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe); | 645 | bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe); |
645 | int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv); | 646 | int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv); |
646 | void intel_ddi_pll_init(struct drm_device *dev); | 647 | void intel_ddi_pll_init(struct drm_device *dev); |
647 | void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc); | 648 | void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc); |
648 | void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, | 649 | void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, |
649 | enum transcoder cpu_transcoder); | 650 | enum transcoder cpu_transcoder); |
650 | void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc); | 651 | void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc); |
651 | void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc); | 652 | void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc); |
652 | void intel_ddi_setup_hw_pll_state(struct drm_device *dev); | 653 | void intel_ddi_setup_hw_pll_state(struct drm_device *dev); |
653 | bool intel_ddi_pll_select(struct intel_crtc *crtc); | 654 | bool intel_ddi_pll_select(struct intel_crtc *crtc); |
654 | void intel_ddi_pll_enable(struct intel_crtc *crtc); | 655 | void intel_ddi_pll_enable(struct intel_crtc *crtc); |
655 | void intel_ddi_put_crtc_pll(struct drm_crtc *crtc); | 656 | void intel_ddi_put_crtc_pll(struct drm_crtc *crtc); |
656 | void intel_ddi_set_pipe_settings(struct drm_crtc *crtc); | 657 | void intel_ddi_set_pipe_settings(struct drm_crtc *crtc); |
657 | void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder); | 658 | void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder); |
658 | bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); | 659 | bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); |
659 | void intel_ddi_fdi_disable(struct drm_crtc *crtc); | 660 | void intel_ddi_fdi_disable(struct drm_crtc *crtc); |
660 | void intel_ddi_get_config(struct intel_encoder *encoder, | 661 | void intel_ddi_get_config(struct intel_encoder *encoder, |
661 | struct intel_crtc_config *pipe_config); | 662 | struct intel_crtc_config *pipe_config); |
662 | 663 | ||
663 | 664 | ||
664 | /* intel_display.c */ | 665 | /* intel_display.c */ |
665 | const char *intel_output_name(int output); | 666 | const char *intel_output_name(int output); |
666 | bool intel_has_pending_fb_unpin(struct drm_device *dev); | 667 | bool intel_has_pending_fb_unpin(struct drm_device *dev); |
667 | int intel_pch_rawclk(struct drm_device *dev); | 668 | int intel_pch_rawclk(struct drm_device *dev); |
668 | void intel_mark_busy(struct drm_device *dev); | 669 | void intel_mark_busy(struct drm_device *dev); |
669 | void intel_mark_fb_busy(struct drm_i915_gem_object *obj, | 670 | void intel_mark_fb_busy(struct drm_i915_gem_object *obj, |
670 | struct intel_ring_buffer *ring); | 671 | struct intel_ring_buffer *ring); |
671 | void intel_mark_idle(struct drm_device *dev); | 672 | void intel_mark_idle(struct drm_device *dev); |
672 | void intel_crtc_restore_mode(struct drm_crtc *crtc); | 673 | void intel_crtc_restore_mode(struct drm_crtc *crtc); |
673 | void intel_crtc_update_dpms(struct drm_crtc *crtc); | 674 | void intel_crtc_update_dpms(struct drm_crtc *crtc); |
674 | void intel_encoder_destroy(struct drm_encoder *encoder); | 675 | void intel_encoder_destroy(struct drm_encoder *encoder); |
675 | void intel_connector_dpms(struct drm_connector *, int mode); | 676 | void intel_connector_dpms(struct drm_connector *, int mode); |
676 | bool intel_connector_get_hw_state(struct intel_connector *connector); | 677 | bool intel_connector_get_hw_state(struct intel_connector *connector); |
677 | void intel_modeset_check_state(struct drm_device *dev); | 678 | void intel_modeset_check_state(struct drm_device *dev); |
678 | bool ibx_digital_port_connected(struct drm_i915_private *dev_priv, | 679 | bool ibx_digital_port_connected(struct drm_i915_private *dev_priv, |
679 | struct intel_digital_port *port); | 680 | struct intel_digital_port *port); |
680 | void intel_connector_attach_encoder(struct intel_connector *connector, | 681 | void intel_connector_attach_encoder(struct intel_connector *connector, |
681 | struct intel_encoder *encoder); | 682 | struct intel_encoder *encoder); |
682 | struct drm_encoder *intel_best_encoder(struct drm_connector *connector); | 683 | struct drm_encoder *intel_best_encoder(struct drm_connector *connector); |
683 | struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, | 684 | struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, |
684 | struct drm_crtc *crtc); | 685 | struct drm_crtc *crtc); |
685 | enum pipe intel_get_pipe_from_connector(struct intel_connector *connector); | 686 | enum pipe intel_get_pipe_from_connector(struct intel_connector *connector); |
686 | int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, | 687 | int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, |
687 | struct drm_file *file_priv); | 688 | struct drm_file *file_priv); |
688 | enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, | 689 | enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, |
689 | enum pipe pipe); | 690 | enum pipe pipe); |
690 | void intel_wait_for_vblank(struct drm_device *dev, int pipe); | 691 | void intel_wait_for_vblank(struct drm_device *dev, int pipe); |
691 | void intel_wait_for_pipe_off(struct drm_device *dev, int pipe); | 692 | void intel_wait_for_pipe_off(struct drm_device *dev, int pipe); |
692 | int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp); | 693 | int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp); |
693 | void vlv_wait_port_ready(struct drm_i915_private *dev_priv, | 694 | void vlv_wait_port_ready(struct drm_i915_private *dev_priv, |
694 | struct intel_digital_port *dport); | 695 | struct intel_digital_port *dport); |
695 | bool intel_get_load_detect_pipe(struct drm_connector *connector, | 696 | bool intel_get_load_detect_pipe(struct drm_connector *connector, |
696 | struct drm_display_mode *mode, | 697 | struct drm_display_mode *mode, |
697 | struct intel_load_detect_pipe *old); | 698 | struct intel_load_detect_pipe *old); |
698 | void intel_release_load_detect_pipe(struct drm_connector *connector, | 699 | void intel_release_load_detect_pipe(struct drm_connector *connector, |
699 | struct intel_load_detect_pipe *old); | 700 | struct intel_load_detect_pipe *old); |
700 | int intel_pin_and_fence_fb_obj(struct drm_device *dev, | 701 | int intel_pin_and_fence_fb_obj(struct drm_device *dev, |
701 | struct drm_i915_gem_object *obj, | 702 | struct drm_i915_gem_object *obj, |
702 | struct intel_ring_buffer *pipelined); | 703 | struct intel_ring_buffer *pipelined); |
703 | void intel_unpin_fb_obj(struct drm_i915_gem_object *obj); | 704 | void intel_unpin_fb_obj(struct drm_i915_gem_object *obj); |
704 | struct drm_framebuffer * | 705 | struct drm_framebuffer * |
705 | __intel_framebuffer_create(struct drm_device *dev, | 706 | __intel_framebuffer_create(struct drm_device *dev, |
706 | struct drm_mode_fb_cmd2 *mode_cmd, | 707 | struct drm_mode_fb_cmd2 *mode_cmd, |
707 | struct drm_i915_gem_object *obj); | 708 | struct drm_i915_gem_object *obj); |
708 | void intel_prepare_page_flip(struct drm_device *dev, int plane); | 709 | void intel_prepare_page_flip(struct drm_device *dev, int plane); |
709 | void intel_finish_page_flip(struct drm_device *dev, int pipe); | 710 | void intel_finish_page_flip(struct drm_device *dev, int pipe); |
710 | void intel_finish_page_flip_plane(struct drm_device *dev, int plane); | 711 | void intel_finish_page_flip_plane(struct drm_device *dev, int plane); |
711 | struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc); | 712 | struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc); |
712 | void assert_shared_dpll(struct drm_i915_private *dev_priv, | 713 | void assert_shared_dpll(struct drm_i915_private *dev_priv, |
713 | struct intel_shared_dpll *pll, | 714 | struct intel_shared_dpll *pll, |
714 | bool state); | 715 | bool state); |
715 | #define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true) | 716 | #define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true) |
716 | #define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false) | 717 | #define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false) |
717 | void assert_pll(struct drm_i915_private *dev_priv, | 718 | void assert_pll(struct drm_i915_private *dev_priv, |
718 | enum pipe pipe, bool state); | 719 | enum pipe pipe, bool state); |
719 | #define assert_pll_enabled(d, p) assert_pll(d, p, true) | 720 | #define assert_pll_enabled(d, p) assert_pll(d, p, true) |
720 | #define assert_pll_disabled(d, p) assert_pll(d, p, false) | 721 | #define assert_pll_disabled(d, p) assert_pll(d, p, false) |
721 | void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, | 722 | void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, |
722 | enum pipe pipe, bool state); | 723 | enum pipe pipe, bool state); |
723 | #define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true) | 724 | #define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true) |
724 | #define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false) | 725 | #define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false) |
725 | void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state); | 726 | void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state); |
726 | #define assert_pipe_enabled(d, p) assert_pipe(d, p, true) | 727 | #define assert_pipe_enabled(d, p) assert_pipe(d, p, true) |
727 | #define assert_pipe_disabled(d, p) assert_pipe(d, p, false) | 728 | #define assert_pipe_disabled(d, p) assert_pipe(d, p, false) |
728 | void intel_write_eld(struct drm_encoder *encoder, | 729 | void intel_write_eld(struct drm_encoder *encoder, |
729 | struct drm_display_mode *mode); | 730 | struct drm_display_mode *mode); |
730 | unsigned long intel_gen4_compute_page_offset(int *x, int *y, | 731 | unsigned long intel_gen4_compute_page_offset(int *x, int *y, |
731 | unsigned int tiling_mode, | 732 | unsigned int tiling_mode, |
732 | unsigned int bpp, | 733 | unsigned int bpp, |
733 | unsigned int pitch); | 734 | unsigned int pitch); |
734 | void intel_display_handle_reset(struct drm_device *dev); | 735 | void intel_display_handle_reset(struct drm_device *dev); |
735 | void hsw_enable_pc8(struct drm_i915_private *dev_priv); | 736 | void hsw_enable_pc8(struct drm_i915_private *dev_priv); |
736 | void hsw_disable_pc8(struct drm_i915_private *dev_priv); | 737 | void hsw_disable_pc8(struct drm_i915_private *dev_priv); |
737 | void intel_dp_get_m_n(struct intel_crtc *crtc, | 738 | void intel_dp_get_m_n(struct intel_crtc *crtc, |
738 | struct intel_crtc_config *pipe_config); | 739 | struct intel_crtc_config *pipe_config); |
739 | int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); | 740 | int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); |
740 | void | 741 | void |
741 | ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config, | 742 | ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config, |
742 | int dotclock); | 743 | int dotclock); |
743 | bool intel_crtc_active(struct drm_crtc *crtc); | 744 | bool intel_crtc_active(struct drm_crtc *crtc); |
744 | void hsw_enable_ips(struct intel_crtc *crtc); | 745 | void hsw_enable_ips(struct intel_crtc *crtc); |
745 | void hsw_disable_ips(struct intel_crtc *crtc); | 746 | void hsw_disable_ips(struct intel_crtc *crtc); |
746 | void intel_display_set_init_power(struct drm_i915_private *dev, bool enable); | 747 | void intel_display_set_init_power(struct drm_i915_private *dev, bool enable); |
747 | enum intel_display_power_domain | 748 | enum intel_display_power_domain |
748 | intel_display_port_power_domain(struct intel_encoder *intel_encoder); | 749 | intel_display_port_power_domain(struct intel_encoder *intel_encoder); |
749 | int valleyview_get_vco(struct drm_i915_private *dev_priv); | 750 | int valleyview_get_vco(struct drm_i915_private *dev_priv); |
750 | void intel_mode_from_pipe_config(struct drm_display_mode *mode, | 751 | void intel_mode_from_pipe_config(struct drm_display_mode *mode, |
751 | struct intel_crtc_config *pipe_config); | 752 | struct intel_crtc_config *pipe_config); |
752 | int intel_format_to_fourcc(int format); | 753 | int intel_format_to_fourcc(int format); |
753 | 754 | ||
754 | /* intel_dp.c */ | 755 | /* intel_dp.c */ |
755 | void intel_dp_init(struct drm_device *dev, int output_reg, enum port port); | 756 | void intel_dp_init(struct drm_device *dev, int output_reg, enum port port); |
756 | bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port, | 757 | bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port, |
757 | struct intel_connector *intel_connector); | 758 | struct intel_connector *intel_connector); |
758 | void intel_dp_start_link_train(struct intel_dp *intel_dp); | 759 | void intel_dp_start_link_train(struct intel_dp *intel_dp); |
759 | void intel_dp_complete_link_train(struct intel_dp *intel_dp); | 760 | void intel_dp_complete_link_train(struct intel_dp *intel_dp); |
760 | void intel_dp_stop_link_train(struct intel_dp *intel_dp); | 761 | void intel_dp_stop_link_train(struct intel_dp *intel_dp); |
761 | void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); | 762 | void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); |
762 | void intel_dp_encoder_destroy(struct drm_encoder *encoder); | 763 | void intel_dp_encoder_destroy(struct drm_encoder *encoder); |
763 | void intel_dp_check_link_status(struct intel_dp *intel_dp); | 764 | void intel_dp_check_link_status(struct intel_dp *intel_dp); |
764 | int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc); | 765 | int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc); |
765 | bool intel_dp_compute_config(struct intel_encoder *encoder, | 766 | bool intel_dp_compute_config(struct intel_encoder *encoder, |
766 | struct intel_crtc_config *pipe_config); | 767 | struct intel_crtc_config *pipe_config); |
767 | bool intel_dp_is_edp(struct drm_device *dev, enum port port); | 768 | bool intel_dp_is_edp(struct drm_device *dev, enum port port); |
768 | void intel_edp_backlight_on(struct intel_dp *intel_dp); | 769 | void intel_edp_backlight_on(struct intel_dp *intel_dp); |
769 | void intel_edp_backlight_off(struct intel_dp *intel_dp); | 770 | void intel_edp_backlight_off(struct intel_dp *intel_dp); |
770 | void intel_edp_panel_vdd_on(struct intel_dp *intel_dp); | 771 | void intel_edp_panel_vdd_on(struct intel_dp *intel_dp); |
771 | void intel_edp_panel_on(struct intel_dp *intel_dp); | 772 | void intel_edp_panel_on(struct intel_dp *intel_dp); |
772 | void intel_edp_panel_off(struct intel_dp *intel_dp); | 773 | void intel_edp_panel_off(struct intel_dp *intel_dp); |
773 | void intel_edp_psr_enable(struct intel_dp *intel_dp); | 774 | void intel_edp_psr_enable(struct intel_dp *intel_dp); |
774 | void intel_edp_psr_disable(struct intel_dp *intel_dp); | 775 | void intel_edp_psr_disable(struct intel_dp *intel_dp); |
775 | void intel_edp_psr_update(struct drm_device *dev); | 776 | void intel_edp_psr_update(struct drm_device *dev); |
776 | 777 | ||
777 | 778 | ||
778 | /* intel_dsi.c */ | 779 | /* intel_dsi.c */ |
779 | bool intel_dsi_init(struct drm_device *dev); | 780 | bool intel_dsi_init(struct drm_device *dev); |
780 | 781 | ||
781 | 782 | ||
782 | /* intel_dvo.c */ | 783 | /* intel_dvo.c */ |
783 | void intel_dvo_init(struct drm_device *dev); | 784 | void intel_dvo_init(struct drm_device *dev); |
784 | 785 | ||
785 | 786 | ||
786 | /* legacy fbdev emulation in intel_fbdev.c */ | 787 | /* legacy fbdev emulation in intel_fbdev.c */ |
787 | #ifdef CONFIG_DRM_I915_FBDEV | 788 | #ifdef CONFIG_DRM_I915_FBDEV |
788 | extern int intel_fbdev_init(struct drm_device *dev); | 789 | extern int intel_fbdev_init(struct drm_device *dev); |
789 | extern void intel_fbdev_initial_config(struct drm_device *dev); | 790 | extern void intel_fbdev_initial_config(struct drm_device *dev); |
790 | extern void intel_fbdev_fini(struct drm_device *dev); | 791 | extern void intel_fbdev_fini(struct drm_device *dev); |
791 | extern void intel_fbdev_set_suspend(struct drm_device *dev, int state); | 792 | extern void intel_fbdev_set_suspend(struct drm_device *dev, int state); |
792 | extern void intel_fbdev_output_poll_changed(struct drm_device *dev); | 793 | extern void intel_fbdev_output_poll_changed(struct drm_device *dev); |
793 | extern void intel_fbdev_restore_mode(struct drm_device *dev); | 794 | extern void intel_fbdev_restore_mode(struct drm_device *dev); |
794 | #else | 795 | #else |
795 | static inline int intel_fbdev_init(struct drm_device *dev) | 796 | static inline int intel_fbdev_init(struct drm_device *dev) |
796 | { | 797 | { |
797 | return 0; | 798 | return 0; |
798 | } | 799 | } |
799 | 800 | ||
800 | static inline void intel_fbdev_initial_config(struct drm_device *dev) | 801 | static inline void intel_fbdev_initial_config(struct drm_device *dev) |
801 | { | 802 | { |
802 | } | 803 | } |
803 | 804 | ||
804 | static inline void intel_fbdev_fini(struct drm_device *dev) | 805 | static inline void intel_fbdev_fini(struct drm_device *dev) |
805 | { | 806 | { |
806 | } | 807 | } |
807 | 808 | ||
808 | static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state) | 809 | static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state) |
809 | { | 810 | { |
810 | } | 811 | } |
811 | 812 | ||
812 | static inline void intel_fbdev_restore_mode(struct drm_device *dev) | 813 | static inline void intel_fbdev_restore_mode(struct drm_device *dev) |
813 | { | 814 | { |
814 | } | 815 | } |
815 | #endif | 816 | #endif |
816 | 817 | ||
817 | /* intel_hdmi.c */ | 818 | /* intel_hdmi.c */ |
818 | void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port); | 819 | void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port); |
819 | void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, | 820 | void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, |
820 | struct intel_connector *intel_connector); | 821 | struct intel_connector *intel_connector); |
821 | struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); | 822 | struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); |
822 | bool intel_hdmi_compute_config(struct intel_encoder *encoder, | 823 | bool intel_hdmi_compute_config(struct intel_encoder *encoder, |
823 | struct intel_crtc_config *pipe_config); | 824 | struct intel_crtc_config *pipe_config); |
824 | 825 | ||
825 | 826 | ||
826 | /* intel_lvds.c */ | 827 | /* intel_lvds.c */ |
827 | void intel_lvds_init(struct drm_device *dev); | 828 | void intel_lvds_init(struct drm_device *dev); |
828 | bool intel_is_dual_link_lvds(struct drm_device *dev); | 829 | bool intel_is_dual_link_lvds(struct drm_device *dev); |
829 | 830 | ||
830 | 831 | ||
831 | /* intel_modes.c */ | 832 | /* intel_modes.c */ |
832 | int intel_connector_update_modes(struct drm_connector *connector, | 833 | int intel_connector_update_modes(struct drm_connector *connector, |
833 | struct edid *edid); | 834 | struct edid *edid); |
834 | int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); | 835 | int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); |
835 | void intel_attach_force_audio_property(struct drm_connector *connector); | 836 | void intel_attach_force_audio_property(struct drm_connector *connector); |
836 | void intel_attach_broadcast_rgb_property(struct drm_connector *connector); | 837 | void intel_attach_broadcast_rgb_property(struct drm_connector *connector); |
837 | 838 | ||
838 | 839 | ||
839 | /* intel_overlay.c */ | 840 | /* intel_overlay.c */ |
840 | void intel_setup_overlay(struct drm_device *dev); | 841 | void intel_setup_overlay(struct drm_device *dev); |
841 | void intel_cleanup_overlay(struct drm_device *dev); | 842 | void intel_cleanup_overlay(struct drm_device *dev); |
842 | int intel_overlay_switch_off(struct intel_overlay *overlay); | 843 | int intel_overlay_switch_off(struct intel_overlay *overlay); |
843 | int intel_overlay_put_image(struct drm_device *dev, void *data, | 844 | int intel_overlay_put_image(struct drm_device *dev, void *data, |
844 | struct drm_file *file_priv); | 845 | struct drm_file *file_priv); |
845 | int intel_overlay_attrs(struct drm_device *dev, void *data, | 846 | int intel_overlay_attrs(struct drm_device *dev, void *data, |
846 | struct drm_file *file_priv); | 847 | struct drm_file *file_priv); |
847 | 848 | ||
848 | 849 | ||
849 | /* intel_panel.c */ | 850 | /* intel_panel.c */ |
850 | int intel_panel_init(struct intel_panel *panel, | 851 | int intel_panel_init(struct intel_panel *panel, |
851 | struct drm_display_mode *fixed_mode, | 852 | struct drm_display_mode *fixed_mode, |
852 | struct drm_display_mode *downclock_mode); | 853 | struct drm_display_mode *downclock_mode); |
853 | void intel_panel_fini(struct intel_panel *panel); | 854 | void intel_panel_fini(struct intel_panel *panel); |
854 | void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode, | 855 | void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode, |
855 | struct drm_display_mode *adjusted_mode); | 856 | struct drm_display_mode *adjusted_mode); |
856 | void intel_pch_panel_fitting(struct intel_crtc *crtc, | 857 | void intel_pch_panel_fitting(struct intel_crtc *crtc, |
857 | struct intel_crtc_config *pipe_config, | 858 | struct intel_crtc_config *pipe_config, |
858 | int fitting_mode); | 859 | int fitting_mode); |
859 | void intel_gmch_panel_fitting(struct intel_crtc *crtc, | 860 | void intel_gmch_panel_fitting(struct intel_crtc *crtc, |
860 | struct intel_crtc_config *pipe_config, | 861 | struct intel_crtc_config *pipe_config, |
861 | int fitting_mode); | 862 | int fitting_mode); |
862 | void intel_panel_set_backlight(struct intel_connector *connector, u32 level, | 863 | void intel_panel_set_backlight(struct intel_connector *connector, u32 level, |
863 | u32 max); | 864 | u32 max); |
864 | int intel_panel_setup_backlight(struct drm_connector *connector); | 865 | int intel_panel_setup_backlight(struct drm_connector *connector); |
865 | void intel_panel_enable_backlight(struct intel_connector *connector); | 866 | void intel_panel_enable_backlight(struct intel_connector *connector); |
866 | void intel_panel_disable_backlight(struct intel_connector *connector); | 867 | void intel_panel_disable_backlight(struct intel_connector *connector); |
867 | void intel_panel_destroy_backlight(struct drm_connector *connector); | 868 | void intel_panel_destroy_backlight(struct drm_connector *connector); |
868 | void intel_panel_init_backlight_funcs(struct drm_device *dev); | 869 | void intel_panel_init_backlight_funcs(struct drm_device *dev); |
869 | enum drm_connector_status intel_panel_detect(struct drm_device *dev); | 870 | enum drm_connector_status intel_panel_detect(struct drm_device *dev); |
870 | extern struct drm_display_mode *intel_find_panel_downclock( | 871 | extern struct drm_display_mode *intel_find_panel_downclock( |
871 | struct drm_device *dev, | 872 | struct drm_device *dev, |
872 | struct drm_display_mode *fixed_mode, | 873 | struct drm_display_mode *fixed_mode, |
873 | struct drm_connector *connector); | 874 | struct drm_connector *connector); |
874 | 875 | ||
875 | /* intel_pm.c */ | 876 | /* intel_pm.c */ |
876 | void intel_init_clock_gating(struct drm_device *dev); | 877 | void intel_init_clock_gating(struct drm_device *dev); |
877 | void intel_suspend_hw(struct drm_device *dev); | 878 | void intel_suspend_hw(struct drm_device *dev); |
878 | void intel_update_watermarks(struct drm_crtc *crtc); | 879 | void intel_update_watermarks(struct drm_crtc *crtc); |
879 | void intel_update_sprite_watermarks(struct drm_plane *plane, | 880 | void intel_update_sprite_watermarks(struct drm_plane *plane, |
880 | struct drm_crtc *crtc, | 881 | struct drm_crtc *crtc, |
881 | uint32_t sprite_width, int pixel_size, | 882 | uint32_t sprite_width, int pixel_size, |
882 | bool enabled, bool scaled); | 883 | bool enabled, bool scaled); |
883 | void intel_init_pm(struct drm_device *dev); | 884 | void intel_init_pm(struct drm_device *dev); |
884 | void intel_pm_setup(struct drm_device *dev); | 885 | void intel_pm_setup(struct drm_device *dev); |
885 | bool intel_fbc_enabled(struct drm_device *dev); | 886 | bool intel_fbc_enabled(struct drm_device *dev); |
886 | void intel_update_fbc(struct drm_device *dev); | 887 | void intel_update_fbc(struct drm_device *dev); |
887 | void intel_gpu_ips_init(struct drm_i915_private *dev_priv); | 888 | void intel_gpu_ips_init(struct drm_i915_private *dev_priv); |
888 | void intel_gpu_ips_teardown(void); | 889 | void intel_gpu_ips_teardown(void); |
889 | int intel_power_domains_init(struct drm_i915_private *); | 890 | int intel_power_domains_init(struct drm_i915_private *); |
890 | void intel_power_domains_remove(struct drm_i915_private *); | 891 | void intel_power_domains_remove(struct drm_i915_private *); |
891 | bool intel_display_power_enabled(struct drm_i915_private *dev_priv, | 892 | bool intel_display_power_enabled(struct drm_i915_private *dev_priv, |
892 | enum intel_display_power_domain domain); | 893 | enum intel_display_power_domain domain); |
893 | bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv, | 894 | bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv, |
894 | enum intel_display_power_domain domain); | 895 | enum intel_display_power_domain domain); |
895 | void intel_display_power_get(struct drm_i915_private *dev_priv, | 896 | void intel_display_power_get(struct drm_i915_private *dev_priv, |
896 | enum intel_display_power_domain domain); | 897 | enum intel_display_power_domain domain); |
897 | void intel_display_power_put(struct drm_i915_private *dev_priv, | 898 | void intel_display_power_put(struct drm_i915_private *dev_priv, |
898 | enum intel_display_power_domain domain); | 899 | enum intel_display_power_domain domain); |
899 | void intel_power_domains_init_hw(struct drm_i915_private *dev_priv); | 900 | void intel_power_domains_init_hw(struct drm_i915_private *dev_priv); |
900 | void intel_init_gt_powersave(struct drm_device *dev); | 901 | void intel_init_gt_powersave(struct drm_device *dev); |
901 | void intel_cleanup_gt_powersave(struct drm_device *dev); | 902 | void intel_cleanup_gt_powersave(struct drm_device *dev); |
902 | void intel_enable_gt_powersave(struct drm_device *dev); | 903 | void intel_enable_gt_powersave(struct drm_device *dev); |
903 | void intel_disable_gt_powersave(struct drm_device *dev); | 904 | void intel_disable_gt_powersave(struct drm_device *dev); |
904 | void ironlake_teardown_rc6(struct drm_device *dev); | 905 | void ironlake_teardown_rc6(struct drm_device *dev); |
905 | void gen6_update_ring_freq(struct drm_device *dev); | 906 | void gen6_update_ring_freq(struct drm_device *dev); |
906 | void gen6_rps_idle(struct drm_i915_private *dev_priv); | 907 | void gen6_rps_idle(struct drm_i915_private *dev_priv); |
907 | void gen6_rps_boost(struct drm_i915_private *dev_priv); | 908 | void gen6_rps_boost(struct drm_i915_private *dev_priv); |
908 | void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv); | 909 | void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv); |
909 | void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv); | 910 | void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv); |
910 | void intel_runtime_pm_get(struct drm_i915_private *dev_priv); | 911 | void intel_runtime_pm_get(struct drm_i915_private *dev_priv); |
911 | void intel_runtime_pm_put(struct drm_i915_private *dev_priv); | 912 | void intel_runtime_pm_put(struct drm_i915_private *dev_priv); |
912 | void intel_init_runtime_pm(struct drm_i915_private *dev_priv); | 913 | void intel_init_runtime_pm(struct drm_i915_private *dev_priv); |
913 | void intel_fini_runtime_pm(struct drm_i915_private *dev_priv); | 914 | void intel_fini_runtime_pm(struct drm_i915_private *dev_priv); |
914 | void ilk_wm_get_hw_state(struct drm_device *dev); | 915 | void ilk_wm_get_hw_state(struct drm_device *dev); |
915 | 916 | ||
916 | 917 | ||
917 | /* intel_sdvo.c */ | 918 | /* intel_sdvo.c */ |
918 | bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob); | 919 | bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob); |
919 | 920 | ||
920 | 921 | ||
921 | /* intel_sprite.c */ | 922 | /* intel_sprite.c */ |
922 | int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane); | 923 | int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane); |
923 | void intel_flush_primary_plane(struct drm_i915_private *dev_priv, | 924 | void intel_flush_primary_plane(struct drm_i915_private *dev_priv, |
924 | enum plane plane); | 925 | enum plane plane); |
925 | void intel_plane_restore(struct drm_plane *plane); | 926 | void intel_plane_restore(struct drm_plane *plane); |
926 | void intel_plane_disable(struct drm_plane *plane); | 927 | void intel_plane_disable(struct drm_plane *plane); |
927 | int intel_sprite_set_colorkey(struct drm_device *dev, void *data, | 928 | int intel_sprite_set_colorkey(struct drm_device *dev, void *data, |
928 | struct drm_file *file_priv); | 929 | struct drm_file *file_priv); |
929 | int intel_sprite_get_colorkey(struct drm_device *dev, void *data, | 930 | int intel_sprite_get_colorkey(struct drm_device *dev, void *data, |
930 | struct drm_file *file_priv); | 931 | struct drm_file *file_priv); |
931 | 932 | ||
932 | 933 | ||
933 | /* intel_tv.c */ | 934 | /* intel_tv.c */ |
934 | void intel_tv_init(struct drm_device *dev); | 935 | void intel_tv_init(struct drm_device *dev); |
935 | 936 | ||
936 | #endif /* __INTEL_DRV_H__ */ | 937 | #endif /* __INTEL_DRV_H__ */ |
937 | 938 |
drivers/gpu/drm/i915/intel_fbdev.c
1 | /* | 1 | /* |
2 | * Copyright © 2007 David Airlie | 2 | * Copyright © 2007 David Airlie |
3 | * | 3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), | 5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation | 6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the | 8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: | 9 | * Software is furnished to do so, subject to the following conditions: |
10 | * | 10 | * |
11 | * The above copyright notice and this permission notice (including the next | 11 | * The above copyright notice and this permission notice (including the next |
12 | * paragraph) shall be included in all copies or substantial portions of the | 12 | * paragraph) shall be included in all copies or substantial portions of the |
13 | * Software. | 13 | * Software. |
14 | * | 14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
21 | * DEALINGS IN THE SOFTWARE. | 21 | * DEALINGS IN THE SOFTWARE. |
22 | * | 22 | * |
23 | * Authors: | 23 | * Authors: |
24 | * David Airlie | 24 | * David Airlie |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
28 | #include <linux/kernel.h> | 28 | #include <linux/kernel.h> |
29 | #include <linux/errno.h> | 29 | #include <linux/errno.h> |
30 | #include <linux/string.h> | 30 | #include <linux/string.h> |
31 | #include <linux/mm.h> | 31 | #include <linux/mm.h> |
32 | #include <linux/tty.h> | 32 | #include <linux/tty.h> |
33 | #include <linux/sysrq.h> | 33 | #include <linux/sysrq.h> |
34 | #include <linux/delay.h> | 34 | #include <linux/delay.h> |
35 | #include <linux/fb.h> | 35 | #include <linux/fb.h> |
36 | #include <linux/init.h> | 36 | #include <linux/init.h> |
37 | #include <linux/vga_switcheroo.h> | 37 | #include <linux/vga_switcheroo.h> |
38 | 38 | ||
39 | #include <drm/drmP.h> | 39 | #include <drm/drmP.h> |
40 | #include <drm/drm_crtc.h> | 40 | #include <drm/drm_crtc.h> |
41 | #include <drm/drm_fb_helper.h> | 41 | #include <drm/drm_fb_helper.h> |
42 | #include "intel_drv.h" | 42 | #include "intel_drv.h" |
43 | #include <drm/i915_drm.h> | 43 | #include <drm/i915_drm.h> |
44 | #include "i915_drv.h" | 44 | #include "i915_drv.h" |
45 | 45 | ||
46 | static struct fb_ops intelfb_ops = { | 46 | static struct fb_ops intelfb_ops = { |
47 | .owner = THIS_MODULE, | 47 | .owner = THIS_MODULE, |
48 | .fb_check_var = drm_fb_helper_check_var, | 48 | .fb_check_var = drm_fb_helper_check_var, |
49 | .fb_set_par = drm_fb_helper_set_par, | 49 | .fb_set_par = drm_fb_helper_set_par, |
50 | .fb_fillrect = cfb_fillrect, | 50 | .fb_fillrect = cfb_fillrect, |
51 | .fb_copyarea = cfb_copyarea, | 51 | .fb_copyarea = cfb_copyarea, |
52 | .fb_imageblit = cfb_imageblit, | 52 | .fb_imageblit = cfb_imageblit, |
53 | .fb_pan_display = drm_fb_helper_pan_display, | 53 | .fb_pan_display = drm_fb_helper_pan_display, |
54 | .fb_blank = drm_fb_helper_blank, | 54 | .fb_blank = drm_fb_helper_blank, |
55 | .fb_setcmap = drm_fb_helper_setcmap, | 55 | .fb_setcmap = drm_fb_helper_setcmap, |
56 | .fb_debug_enter = drm_fb_helper_debug_enter, | 56 | .fb_debug_enter = drm_fb_helper_debug_enter, |
57 | .fb_debug_leave = drm_fb_helper_debug_leave, | 57 | .fb_debug_leave = drm_fb_helper_debug_leave, |
58 | }; | 58 | }; |
59 | 59 | ||
60 | static int intelfb_alloc(struct drm_fb_helper *helper, | 60 | static int intelfb_alloc(struct drm_fb_helper *helper, |
61 | struct drm_fb_helper_surface_size *sizes) | 61 | struct drm_fb_helper_surface_size *sizes) |
62 | { | 62 | { |
63 | struct intel_fbdev *ifbdev = | 63 | struct intel_fbdev *ifbdev = |
64 | container_of(helper, struct intel_fbdev, helper); | 64 | container_of(helper, struct intel_fbdev, helper); |
65 | struct drm_framebuffer *fb; | 65 | struct drm_framebuffer *fb; |
66 | struct drm_device *dev = helper->dev; | 66 | struct drm_device *dev = helper->dev; |
67 | struct drm_mode_fb_cmd2 mode_cmd = {}; | 67 | struct drm_mode_fb_cmd2 mode_cmd = {}; |
68 | struct drm_i915_gem_object *obj; | 68 | struct drm_i915_gem_object *obj; |
69 | int size, ret; | 69 | int size, ret; |
70 | 70 | ||
71 | /* we don't do packed 24bpp */ | 71 | /* we don't do packed 24bpp */ |
72 | if (sizes->surface_bpp == 24) | 72 | if (sizes->surface_bpp == 24) |
73 | sizes->surface_bpp = 32; | 73 | sizes->surface_bpp = 32; |
74 | 74 | ||
75 | mode_cmd.width = sizes->surface_width; | 75 | mode_cmd.width = sizes->surface_width; |
76 | mode_cmd.height = sizes->surface_height; | 76 | mode_cmd.height = sizes->surface_height; |
77 | 77 | ||
78 | mode_cmd.pitches[0] = ALIGN(mode_cmd.width * | 78 | mode_cmd.pitches[0] = ALIGN(mode_cmd.width * |
79 | DIV_ROUND_UP(sizes->surface_bpp, 8), 64); | 79 | DIV_ROUND_UP(sizes->surface_bpp, 8), 64); |
80 | mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, | 80 | mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, |
81 | sizes->surface_depth); | 81 | sizes->surface_depth); |
82 | 82 | ||
83 | size = mode_cmd.pitches[0] * mode_cmd.height; | 83 | size = mode_cmd.pitches[0] * mode_cmd.height; |
84 | size = ALIGN(size, PAGE_SIZE); | 84 | size = ALIGN(size, PAGE_SIZE); |
85 | obj = i915_gem_object_create_stolen(dev, size); | 85 | obj = i915_gem_object_create_stolen(dev, size); |
86 | if (obj == NULL) | 86 | if (obj == NULL) |
87 | obj = i915_gem_alloc_object(dev, size); | 87 | obj = i915_gem_alloc_object(dev, size); |
88 | if (!obj) { | 88 | if (!obj) { |
89 | DRM_ERROR("failed to allocate framebuffer\n"); | 89 | DRM_ERROR("failed to allocate framebuffer\n"); |
90 | ret = -ENOMEM; | 90 | ret = -ENOMEM; |
91 | goto out; | 91 | goto out; |
92 | } | 92 | } |
93 | 93 | ||
94 | /* Flush everything out, we'll be doing GTT only from now on */ | 94 | /* Flush everything out, we'll be doing GTT only from now on */ |
95 | ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); | 95 | ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); |
96 | if (ret) { | 96 | if (ret) { |
97 | DRM_ERROR("failed to pin obj: %d\n", ret); | 97 | DRM_ERROR("failed to pin obj: %d\n", ret); |
98 | goto out_unref; | 98 | goto out_unref; |
99 | } | 99 | } |
100 | 100 | ||
101 | fb = __intel_framebuffer_create(dev, &mode_cmd, obj); | 101 | fb = __intel_framebuffer_create(dev, &mode_cmd, obj); |
102 | if (IS_ERR(fb)) { | 102 | if (IS_ERR(fb)) { |
103 | ret = PTR_ERR(fb); | 103 | ret = PTR_ERR(fb); |
104 | goto out_unpin; | 104 | goto out_unpin; |
105 | } | 105 | } |
106 | 106 | ||
107 | ifbdev->fb = to_intel_framebuffer(fb); | 107 | ifbdev->fb = to_intel_framebuffer(fb); |
108 | 108 | ||
109 | return 0; | 109 | return 0; |
110 | 110 | ||
111 | out_unpin: | 111 | out_unpin: |
112 | i915_gem_object_ggtt_unpin(obj); | 112 | i915_gem_object_ggtt_unpin(obj); |
113 | out_unref: | 113 | out_unref: |
114 | drm_gem_object_unreference(&obj->base); | 114 | drm_gem_object_unreference(&obj->base); |
115 | out: | 115 | out: |
116 | return ret; | 116 | return ret; |
117 | } | 117 | } |
118 | 118 | ||
119 | static int intelfb_create(struct drm_fb_helper *helper, | 119 | static int intelfb_create(struct drm_fb_helper *helper, |
120 | struct drm_fb_helper_surface_size *sizes) | 120 | struct drm_fb_helper_surface_size *sizes) |
121 | { | 121 | { |
122 | struct intel_fbdev *ifbdev = | 122 | struct intel_fbdev *ifbdev = |
123 | container_of(helper, struct intel_fbdev, helper); | 123 | container_of(helper, struct intel_fbdev, helper); |
124 | struct intel_framebuffer *intel_fb = ifbdev->fb; | 124 | struct intel_framebuffer *intel_fb = ifbdev->fb; |
125 | struct drm_device *dev = helper->dev; | 125 | struct drm_device *dev = helper->dev; |
126 | struct drm_i915_private *dev_priv = dev->dev_private; | 126 | struct drm_i915_private *dev_priv = dev->dev_private; |
127 | struct fb_info *info; | 127 | struct fb_info *info; |
128 | struct drm_framebuffer *fb; | 128 | struct drm_framebuffer *fb; |
129 | struct drm_i915_gem_object *obj; | 129 | struct drm_i915_gem_object *obj; |
130 | int size, ret; | 130 | int size, ret; |
131 | bool prealloc = false; | 131 | bool prealloc = false; |
132 | 132 | ||
133 | mutex_lock(&dev->struct_mutex); | 133 | mutex_lock(&dev->struct_mutex); |
134 | 134 | ||
135 | if (intel_fb && | ||
136 | (sizes->fb_width > intel_fb->base.width || | ||
137 | sizes->fb_height > intel_fb->base.height)) { | ||
138 | DRM_DEBUG_KMS("BIOS fb too small (%dx%d), we require (%dx%d)," | ||
139 | " releasing it\n", | ||
140 | intel_fb->base.width, intel_fb->base.height, | ||
141 | sizes->fb_width, sizes->fb_height); | ||
142 | drm_framebuffer_unreference(&intel_fb->base); | ||
143 | intel_fb = ifbdev->fb = NULL; | ||
144 | } | ||
135 | if (!intel_fb || WARN_ON(!intel_fb->obj)) { | 145 | if (!intel_fb || WARN_ON(!intel_fb->obj)) { |
136 | DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n"); | 146 | DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n"); |
137 | ret = intelfb_alloc(helper, sizes); | 147 | ret = intelfb_alloc(helper, sizes); |
138 | if (ret) | 148 | if (ret) |
139 | goto out_unlock; | 149 | goto out_unlock; |
140 | intel_fb = ifbdev->fb; | 150 | intel_fb = ifbdev->fb; |
141 | } else { | 151 | } else { |
142 | DRM_DEBUG_KMS("re-using BIOS fb\n"); | 152 | DRM_DEBUG_KMS("re-using BIOS fb\n"); |
143 | prealloc = true; | 153 | prealloc = true; |
144 | sizes->fb_width = intel_fb->base.width; | 154 | sizes->fb_width = intel_fb->base.width; |
145 | sizes->fb_height = intel_fb->base.height; | 155 | sizes->fb_height = intel_fb->base.height; |
146 | } | 156 | } |
147 | 157 | ||
148 | obj = intel_fb->obj; | 158 | obj = intel_fb->obj; |
149 | size = obj->base.size; | 159 | size = obj->base.size; |
150 | 160 | ||
151 | info = framebuffer_alloc(0, &dev->pdev->dev); | 161 | info = framebuffer_alloc(0, &dev->pdev->dev); |
152 | if (!info) { | 162 | if (!info) { |
153 | ret = -ENOMEM; | 163 | ret = -ENOMEM; |
154 | goto out_unpin; | 164 | goto out_unpin; |
155 | } | 165 | } |
156 | 166 | ||
157 | info->par = helper; | 167 | info->par = helper; |
158 | 168 | ||
159 | fb = &ifbdev->fb->base; | 169 | fb = &ifbdev->fb->base; |
160 | 170 | ||
161 | ifbdev->helper.fb = fb; | 171 | ifbdev->helper.fb = fb; |
162 | ifbdev->helper.fbdev = info; | 172 | ifbdev->helper.fbdev = info; |
163 | 173 | ||
164 | strcpy(info->fix.id, "inteldrmfb"); | 174 | strcpy(info->fix.id, "inteldrmfb"); |
165 | 175 | ||
166 | info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; | 176 | info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; |
167 | info->fbops = &intelfb_ops; | 177 | info->fbops = &intelfb_ops; |
168 | 178 | ||
169 | ret = fb_alloc_cmap(&info->cmap, 256, 0); | 179 | ret = fb_alloc_cmap(&info->cmap, 256, 0); |
170 | if (ret) { | 180 | if (ret) { |
171 | ret = -ENOMEM; | 181 | ret = -ENOMEM; |
172 | goto out_unpin; | 182 | goto out_unpin; |
173 | } | 183 | } |
174 | /* setup aperture base/size for vesafb takeover */ | 184 | /* setup aperture base/size for vesafb takeover */ |
175 | info->apertures = alloc_apertures(1); | 185 | info->apertures = alloc_apertures(1); |
176 | if (!info->apertures) { | 186 | if (!info->apertures) { |
177 | ret = -ENOMEM; | 187 | ret = -ENOMEM; |
178 | goto out_unpin; | 188 | goto out_unpin; |
179 | } | 189 | } |
180 | info->apertures->ranges[0].base = dev->mode_config.fb_base; | 190 | info->apertures->ranges[0].base = dev->mode_config.fb_base; |
181 | info->apertures->ranges[0].size = dev_priv->gtt.mappable_end; | 191 | info->apertures->ranges[0].size = dev_priv->gtt.mappable_end; |
182 | 192 | ||
183 | info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj); | 193 | info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj); |
184 | info->fix.smem_len = size; | 194 | info->fix.smem_len = size; |
185 | 195 | ||
186 | info->screen_base = | 196 | info->screen_base = |
187 | ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj), | 197 | ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj), |
188 | size); | 198 | size); |
189 | if (!info->screen_base) { | 199 | if (!info->screen_base) { |
190 | ret = -ENOSPC; | 200 | ret = -ENOSPC; |
191 | goto out_unpin; | 201 | goto out_unpin; |
192 | } | 202 | } |
193 | info->screen_size = size; | 203 | info->screen_size = size; |
194 | 204 | ||
195 | /* This driver doesn't need a VT switch to restore the mode on resume */ | 205 | /* This driver doesn't need a VT switch to restore the mode on resume */ |
196 | info->skip_vt_switch = true; | 206 | info->skip_vt_switch = true; |
197 | 207 | ||
198 | drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); | 208 | drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); |
199 | drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height); | 209 | drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height); |
200 | 210 | ||
201 | /* If the object is shmemfs backed, it will have given us zeroed pages. | 211 | /* If the object is shmemfs backed, it will have given us zeroed pages. |
202 | * If the object is stolen however, it will be full of whatever | 212 | * If the object is stolen however, it will be full of whatever |
203 | * garbage was left in there. | 213 | * garbage was left in there. |
204 | */ | 214 | */ |
205 | if (ifbdev->fb->obj->stolen && !prealloc) | 215 | if (ifbdev->fb->obj->stolen && !prealloc) |
206 | memset_io(info->screen_base, 0, info->screen_size); | 216 | memset_io(info->screen_base, 0, info->screen_size); |
207 | 217 | ||
208 | /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ | 218 | /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ |
209 | 219 | ||
210 | DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n", | 220 | DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n", |
211 | fb->width, fb->height, | 221 | fb->width, fb->height, |
212 | i915_gem_obj_ggtt_offset(obj), obj); | 222 | i915_gem_obj_ggtt_offset(obj), obj); |
213 | 223 | ||
214 | mutex_unlock(&dev->struct_mutex); | 224 | mutex_unlock(&dev->struct_mutex); |
215 | vga_switcheroo_client_fb_set(dev->pdev, info); | 225 | vga_switcheroo_client_fb_set(dev->pdev, info); |
216 | return 0; | 226 | return 0; |
217 | 227 | ||
218 | out_unpin: | 228 | out_unpin: |
219 | i915_gem_object_ggtt_unpin(obj); | 229 | i915_gem_object_ggtt_unpin(obj); |
220 | drm_gem_object_unreference(&obj->base); | 230 | drm_gem_object_unreference(&obj->base); |
221 | out_unlock: | 231 | out_unlock: |
222 | mutex_unlock(&dev->struct_mutex); | 232 | mutex_unlock(&dev->struct_mutex); |
223 | return ret; | 233 | return ret; |
224 | } | 234 | } |
225 | 235 | ||
226 | /** Sets the color ramps on behalf of RandR */ | 236 | /** Sets the color ramps on behalf of RandR */ |
227 | static void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, | 237 | static void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, |
228 | u16 blue, int regno) | 238 | u16 blue, int regno) |
229 | { | 239 | { |
230 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 240 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
231 | 241 | ||
232 | intel_crtc->lut_r[regno] = red >> 8; | 242 | intel_crtc->lut_r[regno] = red >> 8; |
233 | intel_crtc->lut_g[regno] = green >> 8; | 243 | intel_crtc->lut_g[regno] = green >> 8; |
234 | intel_crtc->lut_b[regno] = blue >> 8; | 244 | intel_crtc->lut_b[regno] = blue >> 8; |
235 | } | 245 | } |
236 | 246 | ||
237 | static void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, | 247 | static void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, |
238 | u16 *blue, int regno) | 248 | u16 *blue, int regno) |
239 | { | 249 | { |
240 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 250 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
241 | 251 | ||
242 | *red = intel_crtc->lut_r[regno] << 8; | 252 | *red = intel_crtc->lut_r[regno] << 8; |
243 | *green = intel_crtc->lut_g[regno] << 8; | 253 | *green = intel_crtc->lut_g[regno] << 8; |
244 | *blue = intel_crtc->lut_b[regno] << 8; | 254 | *blue = intel_crtc->lut_b[regno] << 8; |
245 | } | 255 | } |
246 | 256 | ||
247 | static struct drm_fb_helper_crtc * | 257 | static struct drm_fb_helper_crtc * |
248 | intel_fb_helper_crtc(struct drm_fb_helper *fb_helper, struct drm_crtc *crtc) | 258 | intel_fb_helper_crtc(struct drm_fb_helper *fb_helper, struct drm_crtc *crtc) |
249 | { | 259 | { |
250 | int i; | 260 | int i; |
251 | 261 | ||
252 | for (i = 0; i < fb_helper->crtc_count; i++) | 262 | for (i = 0; i < fb_helper->crtc_count; i++) |
253 | if (fb_helper->crtc_info[i].mode_set.crtc == crtc) | 263 | if (fb_helper->crtc_info[i].mode_set.crtc == crtc) |
254 | return &fb_helper->crtc_info[i]; | 264 | return &fb_helper->crtc_info[i]; |
255 | 265 | ||
256 | return NULL; | 266 | return NULL; |
257 | } | 267 | } |
258 | 268 | ||
259 | /* | 269 | /* |
260 | * Try to read the BIOS display configuration and use it for the initial | 270 | * Try to read the BIOS display configuration and use it for the initial |
261 | * fb configuration. | 271 | * fb configuration. |
262 | * | 272 | * |
263 | * The BIOS or boot loader will generally create an initial display | 273 | * The BIOS or boot loader will generally create an initial display |
264 | * configuration for us that includes some set of active pipes and displays. | 274 | * configuration for us that includes some set of active pipes and displays. |
265 | * This routine tries to figure out which pipes and connectors are active | 275 | * This routine tries to figure out which pipes and connectors are active |
266 | * and stuffs them into the crtcs and modes array given to us by the | 276 | * and stuffs them into the crtcs and modes array given to us by the |
267 | * drm_fb_helper code. | 277 | * drm_fb_helper code. |
268 | * | 278 | * |
269 | * The overall sequence is: | 279 | * The overall sequence is: |
270 | * intel_fbdev_init - from driver load | 280 | * intel_fbdev_init - from driver load |
271 | * intel_fbdev_init_bios - initialize the intel_fbdev using BIOS data | 281 | * intel_fbdev_init_bios - initialize the intel_fbdev using BIOS data |
272 | * drm_fb_helper_init - build fb helper structs | 282 | * drm_fb_helper_init - build fb helper structs |
273 | * drm_fb_helper_single_add_all_connectors - more fb helper structs | 283 | * drm_fb_helper_single_add_all_connectors - more fb helper structs |
274 | * intel_fbdev_initial_config - apply the config | 284 | * intel_fbdev_initial_config - apply the config |
275 | * drm_fb_helper_initial_config - call ->probe then register_framebuffer() | 285 | * drm_fb_helper_initial_config - call ->probe then register_framebuffer() |
276 | * drm_setup_crtcs - build crtc config for fbdev | 286 | * drm_setup_crtcs - build crtc config for fbdev |
277 | * intel_fb_initial_config - find active connectors etc | 287 | * intel_fb_initial_config - find active connectors etc |
278 | * drm_fb_helper_single_fb_probe - set up fbdev | 288 | * drm_fb_helper_single_fb_probe - set up fbdev |
279 | * intelfb_create - re-use or alloc fb, build out fbdev structs | 289 | * intelfb_create - re-use or alloc fb, build out fbdev structs |
280 | * | 290 | * |
281 | * Note that we don't make special consideration whether we could actually | 291 | * Note that we don't make special consideration whether we could actually |
282 | * switch to the selected modes without a full modeset. E.g. when the display | 292 | * switch to the selected modes without a full modeset. E.g. when the display |
283 | * is in VGA mode we need to recalculate watermarks and set a new high-res | 293 | * is in VGA mode we need to recalculate watermarks and set a new high-res |
284 | * framebuffer anyway. | 294 | * framebuffer anyway. |
285 | */ | 295 | */ |
286 | static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, | 296 | static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, |
287 | struct drm_fb_helper_crtc **crtcs, | 297 | struct drm_fb_helper_crtc **crtcs, |
288 | struct drm_display_mode **modes, | 298 | struct drm_display_mode **modes, |
289 | bool *enabled, int width, int height) | 299 | bool *enabled, int width, int height) |
290 | { | 300 | { |
291 | struct drm_device *dev = fb_helper->dev; | 301 | struct drm_device *dev = fb_helper->dev; |
292 | int i, j; | 302 | int i, j; |
293 | bool *save_enabled; | 303 | bool *save_enabled; |
294 | bool fallback = true; | 304 | bool fallback = true; |
295 | int num_connectors_enabled = 0; | 305 | int num_connectors_enabled = 0; |
296 | int num_connectors_detected = 0; | 306 | int num_connectors_detected = 0; |
297 | 307 | ||
298 | /* | 308 | /* |
299 | * If the user specified any force options, just bail here | 309 | * If the user specified any force options, just bail here |
300 | * and use that config. | 310 | * and use that config. |
301 | */ | 311 | */ |
302 | for (i = 0; i < fb_helper->connector_count; i++) { | 312 | for (i = 0; i < fb_helper->connector_count; i++) { |
303 | struct drm_fb_helper_connector *fb_conn; | 313 | struct drm_fb_helper_connector *fb_conn; |
304 | struct drm_connector *connector; | 314 | struct drm_connector *connector; |
305 | 315 | ||
306 | fb_conn = fb_helper->connector_info[i]; | 316 | fb_conn = fb_helper->connector_info[i]; |
307 | connector = fb_conn->connector; | 317 | connector = fb_conn->connector; |
308 | 318 | ||
309 | if (!enabled[i]) | 319 | if (!enabled[i]) |
310 | continue; | 320 | continue; |
311 | 321 | ||
312 | if (connector->force != DRM_FORCE_UNSPECIFIED) | 322 | if (connector->force != DRM_FORCE_UNSPECIFIED) |
313 | return false; | 323 | return false; |
314 | } | 324 | } |
315 | 325 | ||
316 | save_enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool), | 326 | save_enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool), |
317 | GFP_KERNEL); | 327 | GFP_KERNEL); |
318 | if (!save_enabled) | 328 | if (!save_enabled) |
319 | return false; | 329 | return false; |
320 | 330 | ||
321 | memcpy(save_enabled, enabled, dev->mode_config.num_connector); | 331 | memcpy(save_enabled, enabled, dev->mode_config.num_connector); |
322 | 332 | ||
323 | for (i = 0; i < fb_helper->connector_count; i++) { | 333 | for (i = 0; i < fb_helper->connector_count; i++) { |
324 | struct drm_fb_helper_connector *fb_conn; | 334 | struct drm_fb_helper_connector *fb_conn; |
325 | struct drm_connector *connector; | 335 | struct drm_connector *connector; |
326 | struct drm_encoder *encoder; | 336 | struct drm_encoder *encoder; |
327 | struct drm_fb_helper_crtc *new_crtc; | 337 | struct drm_fb_helper_crtc *new_crtc; |
328 | 338 | ||
329 | fb_conn = fb_helper->connector_info[i]; | 339 | fb_conn = fb_helper->connector_info[i]; |
330 | connector = fb_conn->connector; | 340 | connector = fb_conn->connector; |
331 | 341 | ||
332 | if (connector->status == connector_status_connected) | 342 | if (connector->status == connector_status_connected) |
333 | num_connectors_detected++; | 343 | num_connectors_detected++; |
334 | 344 | ||
335 | if (!enabled[i]) { | 345 | if (!enabled[i]) { |
336 | DRM_DEBUG_KMS("connector %d not enabled, skipping\n", | 346 | DRM_DEBUG_KMS("connector %d not enabled, skipping\n", |
337 | connector->base.id); | 347 | connector->base.id); |
338 | continue; | 348 | continue; |
339 | } | 349 | } |
340 | 350 | ||
341 | encoder = connector->encoder; | 351 | encoder = connector->encoder; |
342 | if (!encoder || WARN_ON(!encoder->crtc)) { | 352 | if (!encoder || WARN_ON(!encoder->crtc)) { |
343 | DRM_DEBUG_KMS("connector %d has no encoder or crtc, skipping\n", | 353 | DRM_DEBUG_KMS("connector %d has no encoder or crtc, skipping\n", |
344 | connector->base.id); | 354 | connector->base.id); |
345 | enabled[i] = false; | 355 | enabled[i] = false; |
346 | continue; | 356 | continue; |
347 | } | 357 | } |
348 | 358 | ||
349 | num_connectors_enabled++; | 359 | num_connectors_enabled++; |
350 | 360 | ||
351 | new_crtc = intel_fb_helper_crtc(fb_helper, encoder->crtc); | 361 | new_crtc = intel_fb_helper_crtc(fb_helper, encoder->crtc); |
352 | 362 | ||
353 | /* | 363 | /* |
354 | * Make sure we're not trying to drive multiple connectors | 364 | * Make sure we're not trying to drive multiple connectors |
355 | * with a single CRTC, since our cloning support may not | 365 | * with a single CRTC, since our cloning support may not |
356 | * match the BIOS. | 366 | * match the BIOS. |
357 | */ | 367 | */ |
358 | for (j = 0; j < fb_helper->connector_count; j++) { | 368 | for (j = 0; j < fb_helper->connector_count; j++) { |
359 | if (crtcs[j] == new_crtc) { | 369 | if (crtcs[j] == new_crtc) { |
360 | DRM_DEBUG_KMS("fallback: cloned configuration\n"); | 370 | DRM_DEBUG_KMS("fallback: cloned configuration\n"); |
361 | fallback = true; | 371 | fallback = true; |
362 | goto out; | 372 | goto out; |
363 | } | 373 | } |
364 | } | 374 | } |
365 | 375 | ||
366 | DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n", | 376 | DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n", |
367 | fb_conn->connector->base.id); | 377 | fb_conn->connector->base.id); |
368 | 378 | ||
369 | /* go for command line mode first */ | 379 | /* go for command line mode first */ |
370 | modes[i] = drm_pick_cmdline_mode(fb_conn, width, height); | 380 | modes[i] = drm_pick_cmdline_mode(fb_conn, width, height); |
371 | 381 | ||
372 | /* try for preferred next */ | 382 | /* try for preferred next */ |
373 | if (!modes[i]) { | 383 | if (!modes[i]) { |
374 | DRM_DEBUG_KMS("looking for preferred mode on connector %d\n", | 384 | DRM_DEBUG_KMS("looking for preferred mode on connector %d\n", |
375 | fb_conn->connector->base.id); | 385 | fb_conn->connector->base.id); |
376 | modes[i] = drm_has_preferred_mode(fb_conn, width, | 386 | modes[i] = drm_has_preferred_mode(fb_conn, width, |
377 | height); | 387 | height); |
378 | } | 388 | } |
379 | 389 | ||
380 | /* last resort: use current mode */ | 390 | /* last resort: use current mode */ |
381 | if (!modes[i]) { | 391 | if (!modes[i]) { |
382 | /* | 392 | /* |
383 | * IMPORTANT: We want to use the adjusted mode (i.e. | 393 | * IMPORTANT: We want to use the adjusted mode (i.e. |
384 | * after the panel fitter upscaling) as the initial | 394 | * after the panel fitter upscaling) as the initial |
385 | * config, not the input mode, which is what crtc->mode | 395 | * config, not the input mode, which is what crtc->mode |
386 | * usually contains. But since our current fastboot | 396 | * usually contains. But since our current fastboot |
387 | * code puts a mode derived from the post-pfit timings | 397 | * code puts a mode derived from the post-pfit timings |
388 | * into crtc->mode this works out correctly. We don't | 398 | * into crtc->mode this works out correctly. We don't |
389 | * use hwmode anywhere right now, so use it for this | 399 | * use hwmode anywhere right now, so use it for this |
390 | * since the fb helper layer wants a pointer to | 400 | * since the fb helper layer wants a pointer to |
391 | * something we own. | 401 | * something we own. |
392 | */ | 402 | */ |
393 | intel_mode_from_pipe_config(&encoder->crtc->hwmode, | 403 | intel_mode_from_pipe_config(&encoder->crtc->hwmode, |
394 | &to_intel_crtc(encoder->crtc)->config); | 404 | &to_intel_crtc(encoder->crtc)->config); |
395 | modes[i] = &encoder->crtc->hwmode; | 405 | modes[i] = &encoder->crtc->hwmode; |
396 | } | 406 | } |
397 | crtcs[i] = new_crtc; | 407 | crtcs[i] = new_crtc; |
398 | 408 | ||
399 | DRM_DEBUG_KMS("connector %s on crtc %d: %s\n", | 409 | DRM_DEBUG_KMS("connector %s on crtc %d: %s\n", |
400 | drm_get_connector_name(connector), | 410 | drm_get_connector_name(connector), |
401 | encoder->crtc->base.id, | 411 | encoder->crtc->base.id, |
402 | modes[i]->name); | 412 | modes[i]->name); |
403 | 413 | ||
404 | fallback = false; | 414 | fallback = false; |
405 | } | 415 | } |
406 | 416 | ||
407 | /* | 417 | /* |
408 | * If the BIOS didn't enable everything it could, fall back to have the | 418 | * If the BIOS didn't enable everything it could, fall back to have the |
409 | * same user experiencing of lighting up as much as possible like the | 419 | * same user experiencing of lighting up as much as possible like the |
410 | * fbdev helper library. | 420 | * fbdev helper library. |
411 | */ | 421 | */ |
412 | if (num_connectors_enabled != num_connectors_detected && | 422 | if (num_connectors_enabled != num_connectors_detected && |
413 | num_connectors_enabled < INTEL_INFO(dev)->num_pipes) { | 423 | num_connectors_enabled < INTEL_INFO(dev)->num_pipes) { |
414 | DRM_DEBUG_KMS("fallback: Not all outputs enabled\n"); | 424 | DRM_DEBUG_KMS("fallback: Not all outputs enabled\n"); |
415 | DRM_DEBUG_KMS("Enabled: %i, detected: %i\n", num_connectors_enabled, | 425 | DRM_DEBUG_KMS("Enabled: %i, detected: %i\n", num_connectors_enabled, |
416 | num_connectors_detected); | 426 | num_connectors_detected); |
417 | fallback = true; | 427 | fallback = true; |
418 | } | 428 | } |
419 | 429 | ||
420 | out: | 430 | out: |
421 | if (fallback) { | 431 | if (fallback) { |
422 | DRM_DEBUG_KMS("Not using firmware configuration\n"); | 432 | DRM_DEBUG_KMS("Not using firmware configuration\n"); |
423 | memcpy(enabled, save_enabled, dev->mode_config.num_connector); | 433 | memcpy(enabled, save_enabled, dev->mode_config.num_connector); |
424 | kfree(save_enabled); | 434 | kfree(save_enabled); |
425 | return false; | 435 | return false; |
426 | } | 436 | } |
427 | 437 | ||
428 | kfree(save_enabled); | 438 | kfree(save_enabled); |
429 | return true; | 439 | return true; |
430 | } | 440 | } |
431 | 441 | ||
432 | static struct drm_fb_helper_funcs intel_fb_helper_funcs = { | 442 | static struct drm_fb_helper_funcs intel_fb_helper_funcs = { |
433 | .initial_config = intel_fb_initial_config, | 443 | .initial_config = intel_fb_initial_config, |
434 | .gamma_set = intel_crtc_fb_gamma_set, | 444 | .gamma_set = intel_crtc_fb_gamma_set, |
435 | .gamma_get = intel_crtc_fb_gamma_get, | 445 | .gamma_get = intel_crtc_fb_gamma_get, |
436 | .fb_probe = intelfb_create, | 446 | .fb_probe = intelfb_create, |
437 | }; | 447 | }; |
438 | 448 | ||
439 | static void intel_fbdev_destroy(struct drm_device *dev, | 449 | static void intel_fbdev_destroy(struct drm_device *dev, |
440 | struct intel_fbdev *ifbdev) | 450 | struct intel_fbdev *ifbdev) |
441 | { | 451 | { |
442 | if (ifbdev->helper.fbdev) { | 452 | if (ifbdev->helper.fbdev) { |
443 | struct fb_info *info = ifbdev->helper.fbdev; | 453 | struct fb_info *info = ifbdev->helper.fbdev; |
444 | 454 | ||
445 | unregister_framebuffer(info); | 455 | unregister_framebuffer(info); |
446 | iounmap(info->screen_base); | 456 | iounmap(info->screen_base); |
447 | if (info->cmap.len) | 457 | if (info->cmap.len) |
448 | fb_dealloc_cmap(&info->cmap); | 458 | fb_dealloc_cmap(&info->cmap); |
449 | 459 | ||
450 | framebuffer_release(info); | 460 | framebuffer_release(info); |
451 | } | 461 | } |
452 | 462 | ||
453 | drm_fb_helper_fini(&ifbdev->helper); | 463 | drm_fb_helper_fini(&ifbdev->helper); |
454 | 464 | ||
455 | drm_framebuffer_unregister_private(&ifbdev->fb->base); | 465 | drm_framebuffer_unregister_private(&ifbdev->fb->base); |
456 | drm_framebuffer_remove(&ifbdev->fb->base); | 466 | drm_framebuffer_remove(&ifbdev->fb->base); |
457 | } | 467 | } |
458 | 468 | ||
459 | /* | 469 | /* |
460 | * Build an intel_fbdev struct using a BIOS allocated framebuffer, if possible. | 470 | * Build an intel_fbdev struct using a BIOS allocated framebuffer, if possible. |
461 | * The core display code will have read out the current plane configuration, | 471 | * The core display code will have read out the current plane configuration, |
462 | * so we use that to figure out if there's an object for us to use as the | 472 | * so we use that to figure out if there's an object for us to use as the |
463 | * fb, and if so, we re-use it for the fbdev configuration. | 473 | * fb, and if so, we re-use it for the fbdev configuration. |
464 | * | 474 | * |
465 | * Note we only support a single fb shared across pipes for boot (mostly for | 475 | * Note we only support a single fb shared across pipes for boot (mostly for |
466 | * fbcon), so we just find the biggest and use that. | 476 | * fbcon), so we just find the biggest and use that. |
467 | */ | 477 | */ |
468 | static bool intel_fbdev_init_bios(struct drm_device *dev, | 478 | static bool intel_fbdev_init_bios(struct drm_device *dev, |
469 | struct intel_fbdev *ifbdev) | 479 | struct intel_fbdev *ifbdev) |
470 | { | 480 | { |
471 | struct intel_framebuffer *fb = NULL; | 481 | struct intel_framebuffer *fb = NULL; |
472 | struct drm_crtc *crtc; | 482 | struct drm_crtc *crtc; |
473 | struct intel_crtc *intel_crtc; | 483 | struct intel_crtc *intel_crtc; |
474 | struct intel_plane_config *plane_config = NULL; | 484 | struct intel_plane_config *plane_config = NULL; |
475 | unsigned int max_size = 0; | 485 | unsigned int max_size = 0; |
476 | 486 | ||
477 | if (!i915.fastboot) | 487 | if (!i915.fastboot) |
478 | return false; | 488 | return false; |
479 | 489 | ||
480 | /* Find the largest fb */ | 490 | /* Find the largest fb */ |
481 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 491 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
482 | intel_crtc = to_intel_crtc(crtc); | 492 | intel_crtc = to_intel_crtc(crtc); |
483 | 493 | ||
484 | if (!intel_crtc->active || !crtc->primary->fb) { | 494 | if (!intel_crtc->active || !crtc->primary->fb) { |
485 | DRM_DEBUG_KMS("pipe %c not active or no fb, skipping\n", | 495 | DRM_DEBUG_KMS("pipe %c not active or no fb, skipping\n", |
486 | pipe_name(intel_crtc->pipe)); | 496 | pipe_name(intel_crtc->pipe)); |
487 | continue; | 497 | continue; |
488 | } | 498 | } |
489 | 499 | ||
490 | if (intel_crtc->plane_config.size > max_size) { | 500 | if (intel_crtc->plane_config.size > max_size) { |
491 | DRM_DEBUG_KMS("found possible fb from plane %c\n", | 501 | DRM_DEBUG_KMS("found possible fb from plane %c\n", |
492 | pipe_name(intel_crtc->pipe)); | 502 | pipe_name(intel_crtc->pipe)); |
493 | plane_config = &intel_crtc->plane_config; | 503 | plane_config = &intel_crtc->plane_config; |
494 | fb = to_intel_framebuffer(crtc->primary->fb); | 504 | fb = to_intel_framebuffer(crtc->primary->fb); |
495 | max_size = plane_config->size; | 505 | max_size = plane_config->size; |
496 | } | 506 | } |
497 | } | 507 | } |
498 | 508 | ||
499 | if (!fb) { | 509 | if (!fb) { |
500 | DRM_DEBUG_KMS("no active fbs found, not using BIOS config\n"); | 510 | DRM_DEBUG_KMS("no active fbs found, not using BIOS config\n"); |
501 | goto out; | 511 | goto out; |
502 | } | 512 | } |
503 | 513 | ||
504 | /* Now make sure all the pipes will fit into it */ | 514 | /* Now make sure all the pipes will fit into it */ |
505 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 515 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
506 | unsigned int cur_size; | 516 | unsigned int cur_size; |
507 | 517 | ||
508 | intel_crtc = to_intel_crtc(crtc); | 518 | intel_crtc = to_intel_crtc(crtc); |
509 | 519 | ||
510 | if (!intel_crtc->active) { | 520 | if (!intel_crtc->active) { |
511 | DRM_DEBUG_KMS("pipe %c not active, skipping\n", | 521 | DRM_DEBUG_KMS("pipe %c not active, skipping\n", |
512 | pipe_name(intel_crtc->pipe)); | 522 | pipe_name(intel_crtc->pipe)); |
513 | continue; | 523 | continue; |
514 | } | 524 | } |
515 | 525 | ||
516 | DRM_DEBUG_KMS("checking plane %c for BIOS fb\n", | 526 | DRM_DEBUG_KMS("checking plane %c for BIOS fb\n", |
517 | pipe_name(intel_crtc->pipe)); | 527 | pipe_name(intel_crtc->pipe)); |
518 | 528 | ||
519 | /* | 529 | /* |
520 | * See if the plane fb we found above will fit on this | 530 | * See if the plane fb we found above will fit on this |
521 | * pipe. Note we need to use the selected fb's pitch and bpp | 531 | * pipe. Note we need to use the selected fb's pitch and bpp |
522 | * rather than the current pipe's, since they differ. | 532 | * rather than the current pipe's, since they differ. |
523 | */ | 533 | */ |
524 | cur_size = intel_crtc->config.adjusted_mode.crtc_hdisplay; | 534 | cur_size = intel_crtc->config.adjusted_mode.crtc_hdisplay; |
525 | cur_size = cur_size * fb->base.bits_per_pixel / 8; | 535 | cur_size = cur_size * fb->base.bits_per_pixel / 8; |
526 | if (fb->base.pitches[0] < cur_size) { | 536 | if (fb->base.pitches[0] < cur_size) { |
527 | DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n", | 537 | DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n", |
528 | pipe_name(intel_crtc->pipe), | 538 | pipe_name(intel_crtc->pipe), |
529 | cur_size, fb->base.pitches[0]); | 539 | cur_size, fb->base.pitches[0]); |
530 | plane_config = NULL; | 540 | plane_config = NULL; |
531 | fb = NULL; | 541 | fb = NULL; |
532 | break; | 542 | break; |
533 | } | 543 | } |
534 | 544 | ||
535 | cur_size = intel_crtc->config.adjusted_mode.crtc_vdisplay; | 545 | cur_size = intel_crtc->config.adjusted_mode.crtc_vdisplay; |
536 | cur_size = ALIGN(cur_size, plane_config->tiled ? (IS_GEN2(dev) ? 16 : 8) : 1); | 546 | cur_size = ALIGN(cur_size, plane_config->tiled ? (IS_GEN2(dev) ? 16 : 8) : 1); |
537 | cur_size *= fb->base.pitches[0]; | 547 | cur_size *= fb->base.pitches[0]; |
538 | DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n", | 548 | DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n", |
539 | pipe_name(intel_crtc->pipe), | 549 | pipe_name(intel_crtc->pipe), |
540 | intel_crtc->config.adjusted_mode.crtc_hdisplay, | 550 | intel_crtc->config.adjusted_mode.crtc_hdisplay, |
541 | intel_crtc->config.adjusted_mode.crtc_vdisplay, | 551 | intel_crtc->config.adjusted_mode.crtc_vdisplay, |
542 | fb->base.bits_per_pixel, | 552 | fb->base.bits_per_pixel, |
543 | cur_size); | 553 | cur_size); |
544 | 554 | ||
545 | if (cur_size > max_size) { | 555 | if (cur_size > max_size) { |
546 | DRM_DEBUG_KMS("fb not big enough for plane %c (%d vs %d)\n", | 556 | DRM_DEBUG_KMS("fb not big enough for plane %c (%d vs %d)\n", |
547 | pipe_name(intel_crtc->pipe), | 557 | pipe_name(intel_crtc->pipe), |
548 | cur_size, max_size); | 558 | cur_size, max_size); |
549 | plane_config = NULL; | 559 | plane_config = NULL; |
550 | fb = NULL; | 560 | fb = NULL; |
551 | break; | 561 | break; |
552 | } | 562 | } |
553 | 563 | ||
554 | DRM_DEBUG_KMS("fb big enough for plane %c (%d >= %d)\n", | 564 | DRM_DEBUG_KMS("fb big enough for plane %c (%d >= %d)\n", |
555 | pipe_name(intel_crtc->pipe), | 565 | pipe_name(intel_crtc->pipe), |
556 | max_size, cur_size); | 566 | max_size, cur_size); |
557 | } | 567 | } |
558 | 568 | ||
559 | if (!fb) { | 569 | if (!fb) { |
560 | DRM_DEBUG_KMS("BIOS fb not suitable for all pipes, not using\n"); | 570 | DRM_DEBUG_KMS("BIOS fb not suitable for all pipes, not using\n"); |
561 | goto out; | 571 | goto out; |
562 | } | 572 | } |
563 | 573 | ||
564 | ifbdev->preferred_bpp = fb->base.bits_per_pixel; | 574 | ifbdev->preferred_bpp = fb->base.bits_per_pixel; |
565 | ifbdev->fb = fb; | 575 | ifbdev->fb = fb; |
566 | 576 | ||
567 | drm_framebuffer_reference(&ifbdev->fb->base); | 577 | drm_framebuffer_reference(&ifbdev->fb->base); |
568 | 578 | ||
569 | /* Final pass to check if any active pipes don't have fbs */ | 579 | /* Final pass to check if any active pipes don't have fbs */ |
570 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 580 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
571 | intel_crtc = to_intel_crtc(crtc); | 581 | intel_crtc = to_intel_crtc(crtc); |
572 | 582 | ||
573 | if (!intel_crtc->active) | 583 | if (!intel_crtc->active) |
574 | continue; | 584 | continue; |
575 | 585 | ||
576 | WARN(!crtc->primary->fb, | 586 | WARN(!crtc->primary->fb, |
577 | "re-used BIOS config but lost an fb on crtc %d\n", | 587 | "re-used BIOS config but lost an fb on crtc %d\n", |
578 | crtc->base.id); | 588 | crtc->base.id); |
579 | } | 589 | } |
580 | 590 | ||
581 | 591 | ||
582 | DRM_DEBUG_KMS("using BIOS fb for initial console\n"); | 592 | DRM_DEBUG_KMS("using BIOS fb for initial console\n"); |
583 | return true; | 593 | return true; |
584 | 594 | ||
585 | out: | 595 | out: |
586 | 596 | ||
587 | return false; | 597 | return false; |
588 | } | 598 | } |
589 | 599 | ||
590 | int intel_fbdev_init(struct drm_device *dev) | 600 | int intel_fbdev_init(struct drm_device *dev) |
591 | { | 601 | { |
592 | struct intel_fbdev *ifbdev; | 602 | struct intel_fbdev *ifbdev; |
593 | struct drm_i915_private *dev_priv = dev->dev_private; | 603 | struct drm_i915_private *dev_priv = dev->dev_private; |
594 | int ret; | 604 | int ret; |
595 | 605 | ||
596 | if (WARN_ON(INTEL_INFO(dev)->num_pipes == 0)) | 606 | if (WARN_ON(INTEL_INFO(dev)->num_pipes == 0)) |
597 | return -ENODEV; | 607 | return -ENODEV; |
598 | 608 | ||
599 | ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL); | 609 | ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL); |
600 | if (ifbdev == NULL) | 610 | if (ifbdev == NULL) |
601 | return -ENOMEM; | 611 | return -ENOMEM; |
602 | 612 | ||
603 | ifbdev->helper.funcs = &intel_fb_helper_funcs; | 613 | ifbdev->helper.funcs = &intel_fb_helper_funcs; |
604 | if (!intel_fbdev_init_bios(dev, ifbdev)) | 614 | if (!intel_fbdev_init_bios(dev, ifbdev)) |
605 | ifbdev->preferred_bpp = 32; | 615 | ifbdev->preferred_bpp = 32; |
606 | 616 | ||
607 | ret = drm_fb_helper_init(dev, &ifbdev->helper, | 617 | ret = drm_fb_helper_init(dev, &ifbdev->helper, |
608 | INTEL_INFO(dev)->num_pipes, 4); | 618 | INTEL_INFO(dev)->num_pipes, 4); |
609 | if (ret) { | 619 | if (ret) { |
610 | kfree(ifbdev); | 620 | kfree(ifbdev); |
611 | return ret; | 621 | return ret; |
612 | } | 622 | } |
613 | 623 | ||
614 | dev_priv->fbdev = ifbdev; | 624 | dev_priv->fbdev = ifbdev; |
615 | drm_fb_helper_single_add_all_connectors(&ifbdev->helper); | 625 | drm_fb_helper_single_add_all_connectors(&ifbdev->helper); |
616 | 626 | ||
617 | return 0; | 627 | return 0; |
618 | } | 628 | } |
619 | 629 | ||
620 | void intel_fbdev_initial_config(struct drm_device *dev) | 630 | void intel_fbdev_initial_config(struct drm_device *dev) |
621 | { | 631 | { |
622 | struct drm_i915_private *dev_priv = dev->dev_private; | 632 | struct drm_i915_private *dev_priv = dev->dev_private; |
623 | struct intel_fbdev *ifbdev = dev_priv->fbdev; | 633 | struct intel_fbdev *ifbdev = dev_priv->fbdev; |
624 | 634 | ||
625 | /* Due to peculiar init order wrt to hpd handling this is separate. */ | 635 | /* Due to peculiar init order wrt to hpd handling this is separate. */ |
626 | drm_fb_helper_initial_config(&ifbdev->helper, ifbdev->preferred_bpp); | 636 | drm_fb_helper_initial_config(&ifbdev->helper, ifbdev->preferred_bpp); |
627 | } | 637 | } |
628 | 638 | ||
629 | void intel_fbdev_fini(struct drm_device *dev) | 639 | void intel_fbdev_fini(struct drm_device *dev) |
630 | { | 640 | { |
631 | struct drm_i915_private *dev_priv = dev->dev_private; | 641 | struct drm_i915_private *dev_priv = dev->dev_private; |
632 | if (!dev_priv->fbdev) | 642 | if (!dev_priv->fbdev) |
633 | return; | 643 | return; |
634 | 644 | ||
635 | intel_fbdev_destroy(dev, dev_priv->fbdev); | 645 | intel_fbdev_destroy(dev, dev_priv->fbdev); |
636 | kfree(dev_priv->fbdev); | 646 | kfree(dev_priv->fbdev); |
637 | dev_priv->fbdev = NULL; | 647 | dev_priv->fbdev = NULL; |
638 | } | 648 | } |
639 | 649 | ||
640 | void intel_fbdev_set_suspend(struct drm_device *dev, int state) | 650 | void intel_fbdev_set_suspend(struct drm_device *dev, int state) |
641 | { | 651 | { |
642 | struct drm_i915_private *dev_priv = dev->dev_private; | 652 | struct drm_i915_private *dev_priv = dev->dev_private; |
643 | struct intel_fbdev *ifbdev = dev_priv->fbdev; | 653 | struct intel_fbdev *ifbdev = dev_priv->fbdev; |
644 | struct fb_info *info; | 654 | struct fb_info *info; |
645 | 655 | ||
646 | if (!ifbdev) | 656 | if (!ifbdev) |
647 | return; | 657 | return; |
648 | 658 | ||
649 | info = ifbdev->helper.fbdev; | 659 | info = ifbdev->helper.fbdev; |
650 | 660 | ||
651 | /* On resume from hibernation: If the object is shmemfs backed, it has | 661 | /* On resume from hibernation: If the object is shmemfs backed, it has |
652 | * been restored from swap. If the object is stolen however, it will be | 662 | * been restored from swap. If the object is stolen however, it will be |
653 | * full of whatever garbage was left in there. | 663 | * full of whatever garbage was left in there. |
654 | */ | 664 | */ |
655 | if (state == FBINFO_STATE_RUNNING && ifbdev->fb->obj->stolen) | 665 | if (state == FBINFO_STATE_RUNNING && ifbdev->fb->obj->stolen) |
656 | memset_io(info->screen_base, 0, info->screen_size); | 666 | memset_io(info->screen_base, 0, info->screen_size); |
657 | 667 | ||
658 | fb_set_suspend(info, state); | 668 | fb_set_suspend(info, state); |
659 | } | 669 | } |
660 | 670 | ||
661 | void intel_fbdev_output_poll_changed(struct drm_device *dev) | 671 | void intel_fbdev_output_poll_changed(struct drm_device *dev) |
662 | { | 672 | { |
663 | struct drm_i915_private *dev_priv = dev->dev_private; | 673 | struct drm_i915_private *dev_priv = dev->dev_private; |
664 | if (dev_priv->fbdev) | 674 | if (dev_priv->fbdev) |
665 | drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper); | 675 | drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper); |
666 | } | 676 | } |
667 | 677 | ||
668 | void intel_fbdev_restore_mode(struct drm_device *dev) | 678 | void intel_fbdev_restore_mode(struct drm_device *dev) |
669 | { | 679 | { |
670 | int ret; | 680 | int ret; |
671 | struct drm_i915_private *dev_priv = dev->dev_private; | 681 | struct drm_i915_private *dev_priv = dev->dev_private; |
672 | 682 | ||
673 | if (!dev_priv->fbdev) | 683 | if (!dev_priv->fbdev) |
674 | return; | 684 | return; |
675 | 685 | ||
676 | drm_modeset_lock_all(dev); | 686 | drm_modeset_lock_all(dev); |
677 | 687 | ||
678 | ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper); | 688 | ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper); |
679 | if (ret) | 689 | if (ret) |
680 | DRM_DEBUG("failed to restore crtc mode\n"); | 690 | DRM_DEBUG("failed to restore crtc mode\n"); |
681 | 691 | ||
682 | drm_modeset_unlock_all(dev); | 692 | drm_modeset_unlock_all(dev); |
683 | } | 693 | } |
684 | 694 |
drivers/gpu/drm/i915/intel_hdmi.c
1 | /* | 1 | /* |
2 | * Copyright 2006 Dave Airlie <airlied@linux.ie> | 2 | * Copyright 2006 Dave Airlie <airlied@linux.ie> |
3 | * Copyright © 2006-2009 Intel Corporation | 3 | * Copyright © 2006-2009 Intel Corporation |
4 | * | 4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | 5 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * copy of this software and associated documentation files (the "Software"), | 6 | * copy of this software and associated documentation files (the "Software"), |
7 | * to deal in the Software without restriction, including without limitation | 7 | * to deal in the Software without restriction, including without limitation |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | 8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
9 | * and/or sell copies of the Software, and to permit persons to whom the | 9 | * and/or sell copies of the Software, and to permit persons to whom the |
10 | * Software is furnished to do so, subject to the following conditions: | 10 | * Software is furnished to do so, subject to the following conditions: |
11 | * | 11 | * |
12 | * The above copyright notice and this permission notice (including the next | 12 | * The above copyright notice and this permission notice (including the next |
13 | * paragraph) shall be included in all copies or substantial portions of the | 13 | * paragraph) shall be included in all copies or substantial portions of the |
14 | * Software. | 14 | * Software. |
15 | * | 15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | 19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
21 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | 21 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
22 | * DEALINGS IN THE SOFTWARE. | 22 | * DEALINGS IN THE SOFTWARE. |
23 | * | 23 | * |
24 | * Authors: | 24 | * Authors: |
25 | * Eric Anholt <eric@anholt.net> | 25 | * Eric Anholt <eric@anholt.net> |
26 | * Jesse Barnes <jesse.barnes@intel.com> | 26 | * Jesse Barnes <jesse.barnes@intel.com> |
27 | */ | 27 | */ |
28 | 28 | ||
29 | #include <linux/i2c.h> | 29 | #include <linux/i2c.h> |
30 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
31 | #include <linux/delay.h> | 31 | #include <linux/delay.h> |
32 | #include <linux/hdmi.h> | 32 | #include <linux/hdmi.h> |
33 | #include <drm/drmP.h> | 33 | #include <drm/drmP.h> |
34 | #include <drm/drm_crtc.h> | 34 | #include <drm/drm_crtc.h> |
35 | #include <drm/drm_edid.h> | 35 | #include <drm/drm_edid.h> |
36 | #include "intel_drv.h" | 36 | #include "intel_drv.h" |
37 | #include <drm/i915_drm.h> | 37 | #include <drm/i915_drm.h> |
38 | #include "i915_drv.h" | 38 | #include "i915_drv.h" |
39 | 39 | ||
40 | static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi) | 40 | static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi) |
41 | { | 41 | { |
42 | return hdmi_to_dig_port(intel_hdmi)->base.base.dev; | 42 | return hdmi_to_dig_port(intel_hdmi)->base.base.dev; |
43 | } | 43 | } |
44 | 44 | ||
45 | static void | 45 | static void |
46 | assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi) | 46 | assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi) |
47 | { | 47 | { |
48 | struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi); | 48 | struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi); |
49 | struct drm_i915_private *dev_priv = dev->dev_private; | 49 | struct drm_i915_private *dev_priv = dev->dev_private; |
50 | uint32_t enabled_bits; | 50 | uint32_t enabled_bits; |
51 | 51 | ||
52 | enabled_bits = HAS_DDI(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE; | 52 | enabled_bits = HAS_DDI(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE; |
53 | 53 | ||
54 | WARN(I915_READ(intel_hdmi->hdmi_reg) & enabled_bits, | 54 | WARN(I915_READ(intel_hdmi->hdmi_reg) & enabled_bits, |
55 | "HDMI port enabled, expecting disabled\n"); | 55 | "HDMI port enabled, expecting disabled\n"); |
56 | } | 56 | } |
57 | 57 | ||
58 | struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder) | 58 | struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder) |
59 | { | 59 | { |
60 | struct intel_digital_port *intel_dig_port = | 60 | struct intel_digital_port *intel_dig_port = |
61 | container_of(encoder, struct intel_digital_port, base.base); | 61 | container_of(encoder, struct intel_digital_port, base.base); |
62 | return &intel_dig_port->hdmi; | 62 | return &intel_dig_port->hdmi; |
63 | } | 63 | } |
64 | 64 | ||
65 | static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector) | 65 | static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector) |
66 | { | 66 | { |
67 | return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base); | 67 | return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base); |
68 | } | 68 | } |
69 | 69 | ||
70 | static u32 g4x_infoframe_index(enum hdmi_infoframe_type type) | 70 | static u32 g4x_infoframe_index(enum hdmi_infoframe_type type) |
71 | { | 71 | { |
72 | switch (type) { | 72 | switch (type) { |
73 | case HDMI_INFOFRAME_TYPE_AVI: | 73 | case HDMI_INFOFRAME_TYPE_AVI: |
74 | return VIDEO_DIP_SELECT_AVI; | 74 | return VIDEO_DIP_SELECT_AVI; |
75 | case HDMI_INFOFRAME_TYPE_SPD: | 75 | case HDMI_INFOFRAME_TYPE_SPD: |
76 | return VIDEO_DIP_SELECT_SPD; | 76 | return VIDEO_DIP_SELECT_SPD; |
77 | case HDMI_INFOFRAME_TYPE_VENDOR: | 77 | case HDMI_INFOFRAME_TYPE_VENDOR: |
78 | return VIDEO_DIP_SELECT_VENDOR; | 78 | return VIDEO_DIP_SELECT_VENDOR; |
79 | default: | 79 | default: |
80 | DRM_DEBUG_DRIVER("unknown info frame type %d\n", type); | 80 | DRM_DEBUG_DRIVER("unknown info frame type %d\n", type); |
81 | return 0; | 81 | return 0; |
82 | } | 82 | } |
83 | } | 83 | } |
84 | 84 | ||
85 | static u32 g4x_infoframe_enable(enum hdmi_infoframe_type type) | 85 | static u32 g4x_infoframe_enable(enum hdmi_infoframe_type type) |
86 | { | 86 | { |
87 | switch (type) { | 87 | switch (type) { |
88 | case HDMI_INFOFRAME_TYPE_AVI: | 88 | case HDMI_INFOFRAME_TYPE_AVI: |
89 | return VIDEO_DIP_ENABLE_AVI; | 89 | return VIDEO_DIP_ENABLE_AVI; |
90 | case HDMI_INFOFRAME_TYPE_SPD: | 90 | case HDMI_INFOFRAME_TYPE_SPD: |
91 | return VIDEO_DIP_ENABLE_SPD; | 91 | return VIDEO_DIP_ENABLE_SPD; |
92 | case HDMI_INFOFRAME_TYPE_VENDOR: | 92 | case HDMI_INFOFRAME_TYPE_VENDOR: |
93 | return VIDEO_DIP_ENABLE_VENDOR; | 93 | return VIDEO_DIP_ENABLE_VENDOR; |
94 | default: | 94 | default: |
95 | DRM_DEBUG_DRIVER("unknown info frame type %d\n", type); | 95 | DRM_DEBUG_DRIVER("unknown info frame type %d\n", type); |
96 | return 0; | 96 | return 0; |
97 | } | 97 | } |
98 | } | 98 | } |
99 | 99 | ||
100 | static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type) | 100 | static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type) |
101 | { | 101 | { |
102 | switch (type) { | 102 | switch (type) { |
103 | case HDMI_INFOFRAME_TYPE_AVI: | 103 | case HDMI_INFOFRAME_TYPE_AVI: |
104 | return VIDEO_DIP_ENABLE_AVI_HSW; | 104 | return VIDEO_DIP_ENABLE_AVI_HSW; |
105 | case HDMI_INFOFRAME_TYPE_SPD: | 105 | case HDMI_INFOFRAME_TYPE_SPD: |
106 | return VIDEO_DIP_ENABLE_SPD_HSW; | 106 | return VIDEO_DIP_ENABLE_SPD_HSW; |
107 | case HDMI_INFOFRAME_TYPE_VENDOR: | 107 | case HDMI_INFOFRAME_TYPE_VENDOR: |
108 | return VIDEO_DIP_ENABLE_VS_HSW; | 108 | return VIDEO_DIP_ENABLE_VS_HSW; |
109 | default: | 109 | default: |
110 | DRM_DEBUG_DRIVER("unknown info frame type %d\n", type); | 110 | DRM_DEBUG_DRIVER("unknown info frame type %d\n", type); |
111 | return 0; | 111 | return 0; |
112 | } | 112 | } |
113 | } | 113 | } |
114 | 114 | ||
115 | static u32 hsw_infoframe_data_reg(enum hdmi_infoframe_type type, | 115 | static u32 hsw_infoframe_data_reg(enum hdmi_infoframe_type type, |
116 | enum transcoder cpu_transcoder, | 116 | enum transcoder cpu_transcoder, |
117 | struct drm_i915_private *dev_priv) | 117 | struct drm_i915_private *dev_priv) |
118 | { | 118 | { |
119 | switch (type) { | 119 | switch (type) { |
120 | case HDMI_INFOFRAME_TYPE_AVI: | 120 | case HDMI_INFOFRAME_TYPE_AVI: |
121 | return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder); | 121 | return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder); |
122 | case HDMI_INFOFRAME_TYPE_SPD: | 122 | case HDMI_INFOFRAME_TYPE_SPD: |
123 | return HSW_TVIDEO_DIP_SPD_DATA(cpu_transcoder); | 123 | return HSW_TVIDEO_DIP_SPD_DATA(cpu_transcoder); |
124 | case HDMI_INFOFRAME_TYPE_VENDOR: | 124 | case HDMI_INFOFRAME_TYPE_VENDOR: |
125 | return HSW_TVIDEO_DIP_VS_DATA(cpu_transcoder); | 125 | return HSW_TVIDEO_DIP_VS_DATA(cpu_transcoder); |
126 | default: | 126 | default: |
127 | DRM_DEBUG_DRIVER("unknown info frame type %d\n", type); | 127 | DRM_DEBUG_DRIVER("unknown info frame type %d\n", type); |
128 | return 0; | 128 | return 0; |
129 | } | 129 | } |
130 | } | 130 | } |
131 | 131 | ||
132 | static void g4x_write_infoframe(struct drm_encoder *encoder, | 132 | static void g4x_write_infoframe(struct drm_encoder *encoder, |
133 | enum hdmi_infoframe_type type, | 133 | enum hdmi_infoframe_type type, |
134 | const void *frame, ssize_t len) | 134 | const void *frame, ssize_t len) |
135 | { | 135 | { |
136 | const uint32_t *data = frame; | 136 | const uint32_t *data = frame; |
137 | struct drm_device *dev = encoder->dev; | 137 | struct drm_device *dev = encoder->dev; |
138 | struct drm_i915_private *dev_priv = dev->dev_private; | 138 | struct drm_i915_private *dev_priv = dev->dev_private; |
139 | u32 val = I915_READ(VIDEO_DIP_CTL); | 139 | u32 val = I915_READ(VIDEO_DIP_CTL); |
140 | int i; | 140 | int i; |
141 | 141 | ||
142 | WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); | 142 | WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); |
143 | 143 | ||
144 | val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ | 144 | val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ |
145 | val |= g4x_infoframe_index(type); | 145 | val |= g4x_infoframe_index(type); |
146 | 146 | ||
147 | val &= ~g4x_infoframe_enable(type); | 147 | val &= ~g4x_infoframe_enable(type); |
148 | 148 | ||
149 | I915_WRITE(VIDEO_DIP_CTL, val); | 149 | I915_WRITE(VIDEO_DIP_CTL, val); |
150 | 150 | ||
151 | mmiowb(); | 151 | mmiowb(); |
152 | for (i = 0; i < len; i += 4) { | 152 | for (i = 0; i < len; i += 4) { |
153 | I915_WRITE(VIDEO_DIP_DATA, *data); | 153 | I915_WRITE(VIDEO_DIP_DATA, *data); |
154 | data++; | 154 | data++; |
155 | } | 155 | } |
156 | /* Write every possible data byte to force correct ECC calculation. */ | 156 | /* Write every possible data byte to force correct ECC calculation. */ |
157 | for (; i < VIDEO_DIP_DATA_SIZE; i += 4) | 157 | for (; i < VIDEO_DIP_DATA_SIZE; i += 4) |
158 | I915_WRITE(VIDEO_DIP_DATA, 0); | 158 | I915_WRITE(VIDEO_DIP_DATA, 0); |
159 | mmiowb(); | 159 | mmiowb(); |
160 | 160 | ||
161 | val |= g4x_infoframe_enable(type); | 161 | val |= g4x_infoframe_enable(type); |
162 | val &= ~VIDEO_DIP_FREQ_MASK; | 162 | val &= ~VIDEO_DIP_FREQ_MASK; |
163 | val |= VIDEO_DIP_FREQ_VSYNC; | 163 | val |= VIDEO_DIP_FREQ_VSYNC; |
164 | 164 | ||
165 | I915_WRITE(VIDEO_DIP_CTL, val); | 165 | I915_WRITE(VIDEO_DIP_CTL, val); |
166 | POSTING_READ(VIDEO_DIP_CTL); | 166 | POSTING_READ(VIDEO_DIP_CTL); |
167 | } | 167 | } |
168 | 168 | ||
169 | static void ibx_write_infoframe(struct drm_encoder *encoder, | 169 | static void ibx_write_infoframe(struct drm_encoder *encoder, |
170 | enum hdmi_infoframe_type type, | 170 | enum hdmi_infoframe_type type, |
171 | const void *frame, ssize_t len) | 171 | const void *frame, ssize_t len) |
172 | { | 172 | { |
173 | const uint32_t *data = frame; | 173 | const uint32_t *data = frame; |
174 | struct drm_device *dev = encoder->dev; | 174 | struct drm_device *dev = encoder->dev; |
175 | struct drm_i915_private *dev_priv = dev->dev_private; | 175 | struct drm_i915_private *dev_priv = dev->dev_private; |
176 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | 176 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); |
177 | int i, reg = TVIDEO_DIP_CTL(intel_crtc->pipe); | 177 | int i, reg = TVIDEO_DIP_CTL(intel_crtc->pipe); |
178 | u32 val = I915_READ(reg); | 178 | u32 val = I915_READ(reg); |
179 | 179 | ||
180 | WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); | 180 | WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); |
181 | 181 | ||
182 | val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ | 182 | val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ |
183 | val |= g4x_infoframe_index(type); | 183 | val |= g4x_infoframe_index(type); |
184 | 184 | ||
185 | val &= ~g4x_infoframe_enable(type); | 185 | val &= ~g4x_infoframe_enable(type); |
186 | 186 | ||
187 | I915_WRITE(reg, val); | 187 | I915_WRITE(reg, val); |
188 | 188 | ||
189 | mmiowb(); | 189 | mmiowb(); |
190 | for (i = 0; i < len; i += 4) { | 190 | for (i = 0; i < len; i += 4) { |
191 | I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data); | 191 | I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data); |
192 | data++; | 192 | data++; |
193 | } | 193 | } |
194 | /* Write every possible data byte to force correct ECC calculation. */ | 194 | /* Write every possible data byte to force correct ECC calculation. */ |
195 | for (; i < VIDEO_DIP_DATA_SIZE; i += 4) | 195 | for (; i < VIDEO_DIP_DATA_SIZE; i += 4) |
196 | I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0); | 196 | I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0); |
197 | mmiowb(); | 197 | mmiowb(); |
198 | 198 | ||
199 | val |= g4x_infoframe_enable(type); | 199 | val |= g4x_infoframe_enable(type); |
200 | val &= ~VIDEO_DIP_FREQ_MASK; | 200 | val &= ~VIDEO_DIP_FREQ_MASK; |
201 | val |= VIDEO_DIP_FREQ_VSYNC; | 201 | val |= VIDEO_DIP_FREQ_VSYNC; |
202 | 202 | ||
203 | I915_WRITE(reg, val); | 203 | I915_WRITE(reg, val); |
204 | POSTING_READ(reg); | 204 | POSTING_READ(reg); |
205 | } | 205 | } |
206 | 206 | ||
207 | static void cpt_write_infoframe(struct drm_encoder *encoder, | 207 | static void cpt_write_infoframe(struct drm_encoder *encoder, |
208 | enum hdmi_infoframe_type type, | 208 | enum hdmi_infoframe_type type, |
209 | const void *frame, ssize_t len) | 209 | const void *frame, ssize_t len) |
210 | { | 210 | { |
211 | const uint32_t *data = frame; | 211 | const uint32_t *data = frame; |
212 | struct drm_device *dev = encoder->dev; | 212 | struct drm_device *dev = encoder->dev; |
213 | struct drm_i915_private *dev_priv = dev->dev_private; | 213 | struct drm_i915_private *dev_priv = dev->dev_private; |
214 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | 214 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); |
215 | int i, reg = TVIDEO_DIP_CTL(intel_crtc->pipe); | 215 | int i, reg = TVIDEO_DIP_CTL(intel_crtc->pipe); |
216 | u32 val = I915_READ(reg); | 216 | u32 val = I915_READ(reg); |
217 | 217 | ||
218 | WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); | 218 | WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); |
219 | 219 | ||
220 | val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ | 220 | val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ |
221 | val |= g4x_infoframe_index(type); | 221 | val |= g4x_infoframe_index(type); |
222 | 222 | ||
223 | /* The DIP control register spec says that we need to update the AVI | 223 | /* The DIP control register spec says that we need to update the AVI |
224 | * infoframe without clearing its enable bit */ | 224 | * infoframe without clearing its enable bit */ |
225 | if (type != HDMI_INFOFRAME_TYPE_AVI) | 225 | if (type != HDMI_INFOFRAME_TYPE_AVI) |
226 | val &= ~g4x_infoframe_enable(type); | 226 | val &= ~g4x_infoframe_enable(type); |
227 | 227 | ||
228 | I915_WRITE(reg, val); | 228 | I915_WRITE(reg, val); |
229 | 229 | ||
230 | mmiowb(); | 230 | mmiowb(); |
231 | for (i = 0; i < len; i += 4) { | 231 | for (i = 0; i < len; i += 4) { |
232 | I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data); | 232 | I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data); |
233 | data++; | 233 | data++; |
234 | } | 234 | } |
235 | /* Write every possible data byte to force correct ECC calculation. */ | 235 | /* Write every possible data byte to force correct ECC calculation. */ |
236 | for (; i < VIDEO_DIP_DATA_SIZE; i += 4) | 236 | for (; i < VIDEO_DIP_DATA_SIZE; i += 4) |
237 | I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0); | 237 | I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0); |
238 | mmiowb(); | 238 | mmiowb(); |
239 | 239 | ||
240 | val |= g4x_infoframe_enable(type); | 240 | val |= g4x_infoframe_enable(type); |
241 | val &= ~VIDEO_DIP_FREQ_MASK; | 241 | val &= ~VIDEO_DIP_FREQ_MASK; |
242 | val |= VIDEO_DIP_FREQ_VSYNC; | 242 | val |= VIDEO_DIP_FREQ_VSYNC; |
243 | 243 | ||
244 | I915_WRITE(reg, val); | 244 | I915_WRITE(reg, val); |
245 | POSTING_READ(reg); | 245 | POSTING_READ(reg); |
246 | } | 246 | } |
247 | 247 | ||
248 | static void vlv_write_infoframe(struct drm_encoder *encoder, | 248 | static void vlv_write_infoframe(struct drm_encoder *encoder, |
249 | enum hdmi_infoframe_type type, | 249 | enum hdmi_infoframe_type type, |
250 | const void *frame, ssize_t len) | 250 | const void *frame, ssize_t len) |
251 | { | 251 | { |
252 | const uint32_t *data = frame; | 252 | const uint32_t *data = frame; |
253 | struct drm_device *dev = encoder->dev; | 253 | struct drm_device *dev = encoder->dev; |
254 | struct drm_i915_private *dev_priv = dev->dev_private; | 254 | struct drm_i915_private *dev_priv = dev->dev_private; |
255 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | 255 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); |
256 | int i, reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); | 256 | int i, reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); |
257 | u32 val = I915_READ(reg); | 257 | u32 val = I915_READ(reg); |
258 | 258 | ||
259 | WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); | 259 | WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); |
260 | 260 | ||
261 | val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ | 261 | val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ |
262 | val |= g4x_infoframe_index(type); | 262 | val |= g4x_infoframe_index(type); |
263 | 263 | ||
264 | val &= ~g4x_infoframe_enable(type); | 264 | val &= ~g4x_infoframe_enable(type); |
265 | 265 | ||
266 | I915_WRITE(reg, val); | 266 | I915_WRITE(reg, val); |
267 | 267 | ||
268 | mmiowb(); | 268 | mmiowb(); |
269 | for (i = 0; i < len; i += 4) { | 269 | for (i = 0; i < len; i += 4) { |
270 | I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data); | 270 | I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data); |
271 | data++; | 271 | data++; |
272 | } | 272 | } |
273 | /* Write every possible data byte to force correct ECC calculation. */ | 273 | /* Write every possible data byte to force correct ECC calculation. */ |
274 | for (; i < VIDEO_DIP_DATA_SIZE; i += 4) | 274 | for (; i < VIDEO_DIP_DATA_SIZE; i += 4) |
275 | I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0); | 275 | I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0); |
276 | mmiowb(); | 276 | mmiowb(); |
277 | 277 | ||
278 | val |= g4x_infoframe_enable(type); | 278 | val |= g4x_infoframe_enable(type); |
279 | val &= ~VIDEO_DIP_FREQ_MASK; | 279 | val &= ~VIDEO_DIP_FREQ_MASK; |
280 | val |= VIDEO_DIP_FREQ_VSYNC; | 280 | val |= VIDEO_DIP_FREQ_VSYNC; |
281 | 281 | ||
282 | I915_WRITE(reg, val); | 282 | I915_WRITE(reg, val); |
283 | POSTING_READ(reg); | 283 | POSTING_READ(reg); |
284 | } | 284 | } |
285 | 285 | ||
286 | static void hsw_write_infoframe(struct drm_encoder *encoder, | 286 | static void hsw_write_infoframe(struct drm_encoder *encoder, |
287 | enum hdmi_infoframe_type type, | 287 | enum hdmi_infoframe_type type, |
288 | const void *frame, ssize_t len) | 288 | const void *frame, ssize_t len) |
289 | { | 289 | { |
290 | const uint32_t *data = frame; | 290 | const uint32_t *data = frame; |
291 | struct drm_device *dev = encoder->dev; | 291 | struct drm_device *dev = encoder->dev; |
292 | struct drm_i915_private *dev_priv = dev->dev_private; | 292 | struct drm_i915_private *dev_priv = dev->dev_private; |
293 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | 293 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); |
294 | u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder); | 294 | u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder); |
295 | u32 data_reg; | 295 | u32 data_reg; |
296 | int i; | 296 | int i; |
297 | u32 val = I915_READ(ctl_reg); | 297 | u32 val = I915_READ(ctl_reg); |
298 | 298 | ||
299 | data_reg = hsw_infoframe_data_reg(type, | 299 | data_reg = hsw_infoframe_data_reg(type, |
300 | intel_crtc->config.cpu_transcoder, | 300 | intel_crtc->config.cpu_transcoder, |
301 | dev_priv); | 301 | dev_priv); |
302 | if (data_reg == 0) | 302 | if (data_reg == 0) |
303 | return; | 303 | return; |
304 | 304 | ||
305 | val &= ~hsw_infoframe_enable(type); | 305 | val &= ~hsw_infoframe_enable(type); |
306 | I915_WRITE(ctl_reg, val); | 306 | I915_WRITE(ctl_reg, val); |
307 | 307 | ||
308 | mmiowb(); | 308 | mmiowb(); |
309 | for (i = 0; i < len; i += 4) { | 309 | for (i = 0; i < len; i += 4) { |
310 | I915_WRITE(data_reg + i, *data); | 310 | I915_WRITE(data_reg + i, *data); |
311 | data++; | 311 | data++; |
312 | } | 312 | } |
313 | /* Write every possible data byte to force correct ECC calculation. */ | 313 | /* Write every possible data byte to force correct ECC calculation. */ |
314 | for (; i < VIDEO_DIP_DATA_SIZE; i += 4) | 314 | for (; i < VIDEO_DIP_DATA_SIZE; i += 4) |
315 | I915_WRITE(data_reg + i, 0); | 315 | I915_WRITE(data_reg + i, 0); |
316 | mmiowb(); | 316 | mmiowb(); |
317 | 317 | ||
318 | val |= hsw_infoframe_enable(type); | 318 | val |= hsw_infoframe_enable(type); |
319 | I915_WRITE(ctl_reg, val); | 319 | I915_WRITE(ctl_reg, val); |
320 | POSTING_READ(ctl_reg); | 320 | POSTING_READ(ctl_reg); |
321 | } | 321 | } |
322 | 322 | ||
323 | /* | 323 | /* |
324 | * The data we write to the DIP data buffer registers is 1 byte bigger than the | 324 | * The data we write to the DIP data buffer registers is 1 byte bigger than the |
325 | * HDMI infoframe size because of an ECC/reserved byte at position 3 (starting | 325 | * HDMI infoframe size because of an ECC/reserved byte at position 3 (starting |
326 | * at 0). It's also a byte used by DisplayPort so the same DIP registers can be | 326 | * at 0). It's also a byte used by DisplayPort so the same DIP registers can be |
327 | * used for both technologies. | 327 | * used for both technologies. |
328 | * | 328 | * |
329 | * DW0: Reserved/ECC/DP | HB2 | HB1 | HB0 | 329 | * DW0: Reserved/ECC/DP | HB2 | HB1 | HB0 |
330 | * DW1: DB3 | DB2 | DB1 | DB0 | 330 | * DW1: DB3 | DB2 | DB1 | DB0 |
331 | * DW2: DB7 | DB6 | DB5 | DB4 | 331 | * DW2: DB7 | DB6 | DB5 | DB4 |
332 | * DW3: ... | 332 | * DW3: ... |
333 | * | 333 | * |
334 | * (HB is Header Byte, DB is Data Byte) | 334 | * (HB is Header Byte, DB is Data Byte) |
335 | * | 335 | * |
336 | * The hdmi pack() functions don't know about that hardware specific hole so we | 336 | * The hdmi pack() functions don't know about that hardware specific hole so we |
337 | * trick them by giving an offset into the buffer and moving back the header | 337 | * trick them by giving an offset into the buffer and moving back the header |
338 | * bytes by one. | 338 | * bytes by one. |
339 | */ | 339 | */ |
340 | static void intel_write_infoframe(struct drm_encoder *encoder, | 340 | static void intel_write_infoframe(struct drm_encoder *encoder, |
341 | union hdmi_infoframe *frame) | 341 | union hdmi_infoframe *frame) |
342 | { | 342 | { |
343 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | 343 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); |
344 | uint8_t buffer[VIDEO_DIP_DATA_SIZE]; | 344 | uint8_t buffer[VIDEO_DIP_DATA_SIZE]; |
345 | ssize_t len; | 345 | ssize_t len; |
346 | 346 | ||
347 | /* see comment above for the reason for this offset */ | 347 | /* see comment above for the reason for this offset */ |
348 | len = hdmi_infoframe_pack(frame, buffer + 1, sizeof(buffer) - 1); | 348 | len = hdmi_infoframe_pack(frame, buffer + 1, sizeof(buffer) - 1); |
349 | if (len < 0) | 349 | if (len < 0) |
350 | return; | 350 | return; |
351 | 351 | ||
352 | /* Insert the 'hole' (see big comment above) at position 3 */ | 352 | /* Insert the 'hole' (see big comment above) at position 3 */ |
353 | buffer[0] = buffer[1]; | 353 | buffer[0] = buffer[1]; |
354 | buffer[1] = buffer[2]; | 354 | buffer[1] = buffer[2]; |
355 | buffer[2] = buffer[3]; | 355 | buffer[2] = buffer[3]; |
356 | buffer[3] = 0; | 356 | buffer[3] = 0; |
357 | len++; | 357 | len++; |
358 | 358 | ||
359 | intel_hdmi->write_infoframe(encoder, frame->any.type, buffer, len); | 359 | intel_hdmi->write_infoframe(encoder, frame->any.type, buffer, len); |
360 | } | 360 | } |
361 | 361 | ||
362 | static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, | 362 | static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, |
363 | struct drm_display_mode *adjusted_mode) | 363 | struct drm_display_mode *adjusted_mode) |
364 | { | 364 | { |
365 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | 365 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); |
366 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | 366 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); |
367 | union hdmi_infoframe frame; | 367 | union hdmi_infoframe frame; |
368 | int ret; | 368 | int ret; |
369 | 369 | ||
370 | ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, | 370 | ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, |
371 | adjusted_mode); | 371 | adjusted_mode); |
372 | if (ret < 0) { | 372 | if (ret < 0) { |
373 | DRM_ERROR("couldn't fill AVI infoframe\n"); | 373 | DRM_ERROR("couldn't fill AVI infoframe\n"); |
374 | return; | 374 | return; |
375 | } | 375 | } |
376 | 376 | ||
377 | if (intel_hdmi->rgb_quant_range_selectable) { | 377 | if (intel_hdmi->rgb_quant_range_selectable) { |
378 | if (intel_crtc->config.limited_color_range) | 378 | if (intel_crtc->config.limited_color_range) |
379 | frame.avi.quantization_range = | 379 | frame.avi.quantization_range = |
380 | HDMI_QUANTIZATION_RANGE_LIMITED; | 380 | HDMI_QUANTIZATION_RANGE_LIMITED; |
381 | else | 381 | else |
382 | frame.avi.quantization_range = | 382 | frame.avi.quantization_range = |
383 | HDMI_QUANTIZATION_RANGE_FULL; | 383 | HDMI_QUANTIZATION_RANGE_FULL; |
384 | } | 384 | } |
385 | 385 | ||
386 | intel_write_infoframe(encoder, &frame); | 386 | intel_write_infoframe(encoder, &frame); |
387 | } | 387 | } |
388 | 388 | ||
389 | static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder) | 389 | static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder) |
390 | { | 390 | { |
391 | union hdmi_infoframe frame; | 391 | union hdmi_infoframe frame; |
392 | int ret; | 392 | int ret; |
393 | 393 | ||
394 | ret = hdmi_spd_infoframe_init(&frame.spd, "Intel", "Integrated gfx"); | 394 | ret = hdmi_spd_infoframe_init(&frame.spd, "Intel", "Integrated gfx"); |
395 | if (ret < 0) { | 395 | if (ret < 0) { |
396 | DRM_ERROR("couldn't fill SPD infoframe\n"); | 396 | DRM_ERROR("couldn't fill SPD infoframe\n"); |
397 | return; | 397 | return; |
398 | } | 398 | } |
399 | 399 | ||
400 | frame.spd.sdi = HDMI_SPD_SDI_PC; | 400 | frame.spd.sdi = HDMI_SPD_SDI_PC; |
401 | 401 | ||
402 | intel_write_infoframe(encoder, &frame); | 402 | intel_write_infoframe(encoder, &frame); |
403 | } | 403 | } |
404 | 404 | ||
405 | static void | 405 | static void |
406 | intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder, | 406 | intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder, |
407 | struct drm_display_mode *adjusted_mode) | 407 | struct drm_display_mode *adjusted_mode) |
408 | { | 408 | { |
409 | union hdmi_infoframe frame; | 409 | union hdmi_infoframe frame; |
410 | int ret; | 410 | int ret; |
411 | 411 | ||
412 | ret = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi, | 412 | ret = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi, |
413 | adjusted_mode); | 413 | adjusted_mode); |
414 | if (ret < 0) | 414 | if (ret < 0) |
415 | return; | 415 | return; |
416 | 416 | ||
417 | intel_write_infoframe(encoder, &frame); | 417 | intel_write_infoframe(encoder, &frame); |
418 | } | 418 | } |
419 | 419 | ||
420 | static void g4x_set_infoframes(struct drm_encoder *encoder, | 420 | static void g4x_set_infoframes(struct drm_encoder *encoder, |
421 | struct drm_display_mode *adjusted_mode) | 421 | struct drm_display_mode *adjusted_mode) |
422 | { | 422 | { |
423 | struct drm_i915_private *dev_priv = encoder->dev->dev_private; | 423 | struct drm_i915_private *dev_priv = encoder->dev->dev_private; |
424 | struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); | 424 | struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); |
425 | struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; | 425 | struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; |
426 | u32 reg = VIDEO_DIP_CTL; | 426 | u32 reg = VIDEO_DIP_CTL; |
427 | u32 val = I915_READ(reg); | 427 | u32 val = I915_READ(reg); |
428 | u32 port = VIDEO_DIP_PORT(intel_dig_port->port); | 428 | u32 port = VIDEO_DIP_PORT(intel_dig_port->port); |
429 | 429 | ||
430 | assert_hdmi_port_disabled(intel_hdmi); | 430 | assert_hdmi_port_disabled(intel_hdmi); |
431 | 431 | ||
432 | /* If the registers were not initialized yet, they might be zeroes, | 432 | /* If the registers were not initialized yet, they might be zeroes, |
433 | * which means we're selecting the AVI DIP and we're setting its | 433 | * which means we're selecting the AVI DIP and we're setting its |
434 | * frequency to once. This seems to really confuse the HW and make | 434 | * frequency to once. This seems to really confuse the HW and make |
435 | * things stop working (the register spec says the AVI always needs to | 435 | * things stop working (the register spec says the AVI always needs to |
436 | * be sent every VSync). So here we avoid writing to the register more | 436 | * be sent every VSync). So here we avoid writing to the register more |
437 | * than we need and also explicitly select the AVI DIP and explicitly | 437 | * than we need and also explicitly select the AVI DIP and explicitly |
438 | * set its frequency to every VSync. Avoiding to write it twice seems to | 438 | * set its frequency to every VSync. Avoiding to write it twice seems to |
439 | * be enough to solve the problem, but being defensive shouldn't hurt us | 439 | * be enough to solve the problem, but being defensive shouldn't hurt us |
440 | * either. */ | 440 | * either. */ |
441 | val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC; | 441 | val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC; |
442 | 442 | ||
443 | if (!intel_hdmi->has_hdmi_sink) { | 443 | if (!intel_hdmi->has_hdmi_sink) { |
444 | if (!(val & VIDEO_DIP_ENABLE)) | 444 | if (!(val & VIDEO_DIP_ENABLE)) |
445 | return; | 445 | return; |
446 | val &= ~VIDEO_DIP_ENABLE; | 446 | val &= ~VIDEO_DIP_ENABLE; |
447 | I915_WRITE(reg, val); | 447 | I915_WRITE(reg, val); |
448 | POSTING_READ(reg); | 448 | POSTING_READ(reg); |
449 | return; | 449 | return; |
450 | } | 450 | } |
451 | 451 | ||
452 | if (port != (val & VIDEO_DIP_PORT_MASK)) { | 452 | if (port != (val & VIDEO_DIP_PORT_MASK)) { |
453 | if (val & VIDEO_DIP_ENABLE) { | 453 | if (val & VIDEO_DIP_ENABLE) { |
454 | val &= ~VIDEO_DIP_ENABLE; | 454 | val &= ~VIDEO_DIP_ENABLE; |
455 | I915_WRITE(reg, val); | 455 | I915_WRITE(reg, val); |
456 | POSTING_READ(reg); | 456 | POSTING_READ(reg); |
457 | } | 457 | } |
458 | val &= ~VIDEO_DIP_PORT_MASK; | 458 | val &= ~VIDEO_DIP_PORT_MASK; |
459 | val |= port; | 459 | val |= port; |
460 | } | 460 | } |
461 | 461 | ||
462 | val |= VIDEO_DIP_ENABLE; | 462 | val |= VIDEO_DIP_ENABLE; |
463 | val &= ~VIDEO_DIP_ENABLE_VENDOR; | 463 | val &= ~VIDEO_DIP_ENABLE_VENDOR; |
464 | 464 | ||
465 | I915_WRITE(reg, val); | 465 | I915_WRITE(reg, val); |
466 | POSTING_READ(reg); | 466 | POSTING_READ(reg); |
467 | 467 | ||
468 | intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); | 468 | intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); |
469 | intel_hdmi_set_spd_infoframe(encoder); | 469 | intel_hdmi_set_spd_infoframe(encoder); |
470 | intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); | 470 | intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); |
471 | } | 471 | } |
472 | 472 | ||
473 | static void ibx_set_infoframes(struct drm_encoder *encoder, | 473 | static void ibx_set_infoframes(struct drm_encoder *encoder, |
474 | struct drm_display_mode *adjusted_mode) | 474 | struct drm_display_mode *adjusted_mode) |
475 | { | 475 | { |
476 | struct drm_i915_private *dev_priv = encoder->dev->dev_private; | 476 | struct drm_i915_private *dev_priv = encoder->dev->dev_private; |
477 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | 477 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); |
478 | struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); | 478 | struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); |
479 | struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; | 479 | struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; |
480 | u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe); | 480 | u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe); |
481 | u32 val = I915_READ(reg); | 481 | u32 val = I915_READ(reg); |
482 | u32 port = VIDEO_DIP_PORT(intel_dig_port->port); | 482 | u32 port = VIDEO_DIP_PORT(intel_dig_port->port); |
483 | 483 | ||
484 | assert_hdmi_port_disabled(intel_hdmi); | 484 | assert_hdmi_port_disabled(intel_hdmi); |
485 | 485 | ||
486 | /* See the big comment in g4x_set_infoframes() */ | 486 | /* See the big comment in g4x_set_infoframes() */ |
487 | val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC; | 487 | val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC; |
488 | 488 | ||
489 | if (!intel_hdmi->has_hdmi_sink) { | 489 | if (!intel_hdmi->has_hdmi_sink) { |
490 | if (!(val & VIDEO_DIP_ENABLE)) | 490 | if (!(val & VIDEO_DIP_ENABLE)) |
491 | return; | 491 | return; |
492 | val &= ~VIDEO_DIP_ENABLE; | 492 | val &= ~VIDEO_DIP_ENABLE; |
493 | I915_WRITE(reg, val); | 493 | I915_WRITE(reg, val); |
494 | POSTING_READ(reg); | 494 | POSTING_READ(reg); |
495 | return; | 495 | return; |
496 | } | 496 | } |
497 | 497 | ||
498 | if (port != (val & VIDEO_DIP_PORT_MASK)) { | 498 | if (port != (val & VIDEO_DIP_PORT_MASK)) { |
499 | if (val & VIDEO_DIP_ENABLE) { | 499 | if (val & VIDEO_DIP_ENABLE) { |
500 | val &= ~VIDEO_DIP_ENABLE; | 500 | val &= ~VIDEO_DIP_ENABLE; |
501 | I915_WRITE(reg, val); | 501 | I915_WRITE(reg, val); |
502 | POSTING_READ(reg); | 502 | POSTING_READ(reg); |
503 | } | 503 | } |
504 | val &= ~VIDEO_DIP_PORT_MASK; | 504 | val &= ~VIDEO_DIP_PORT_MASK; |
505 | val |= port; | 505 | val |= port; |
506 | } | 506 | } |
507 | 507 | ||
508 | val |= VIDEO_DIP_ENABLE; | 508 | val |= VIDEO_DIP_ENABLE; |
509 | val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | | 509 | val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | |
510 | VIDEO_DIP_ENABLE_GCP); | 510 | VIDEO_DIP_ENABLE_GCP); |
511 | 511 | ||
512 | I915_WRITE(reg, val); | 512 | I915_WRITE(reg, val); |
513 | POSTING_READ(reg); | 513 | POSTING_READ(reg); |
514 | 514 | ||
515 | intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); | 515 | intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); |
516 | intel_hdmi_set_spd_infoframe(encoder); | 516 | intel_hdmi_set_spd_infoframe(encoder); |
517 | intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); | 517 | intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); |
518 | } | 518 | } |
519 | 519 | ||
520 | static void cpt_set_infoframes(struct drm_encoder *encoder, | 520 | static void cpt_set_infoframes(struct drm_encoder *encoder, |
521 | struct drm_display_mode *adjusted_mode) | 521 | struct drm_display_mode *adjusted_mode) |
522 | { | 522 | { |
523 | struct drm_i915_private *dev_priv = encoder->dev->dev_private; | 523 | struct drm_i915_private *dev_priv = encoder->dev->dev_private; |
524 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | 524 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); |
525 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | 525 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); |
526 | u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe); | 526 | u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe); |
527 | u32 val = I915_READ(reg); | 527 | u32 val = I915_READ(reg); |
528 | 528 | ||
529 | assert_hdmi_port_disabled(intel_hdmi); | 529 | assert_hdmi_port_disabled(intel_hdmi); |
530 | 530 | ||
531 | /* See the big comment in g4x_set_infoframes() */ | 531 | /* See the big comment in g4x_set_infoframes() */ |
532 | val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC; | 532 | val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC; |
533 | 533 | ||
534 | if (!intel_hdmi->has_hdmi_sink) { | 534 | if (!intel_hdmi->has_hdmi_sink) { |
535 | if (!(val & VIDEO_DIP_ENABLE)) | 535 | if (!(val & VIDEO_DIP_ENABLE)) |
536 | return; | 536 | return; |
537 | val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI); | 537 | val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI); |
538 | I915_WRITE(reg, val); | 538 | I915_WRITE(reg, val); |
539 | POSTING_READ(reg); | 539 | POSTING_READ(reg); |
540 | return; | 540 | return; |
541 | } | 541 | } |
542 | 542 | ||
543 | /* Set both together, unset both together: see the spec. */ | 543 | /* Set both together, unset both together: see the spec. */ |
544 | val |= VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI; | 544 | val |= VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI; |
545 | val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | | 545 | val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | |
546 | VIDEO_DIP_ENABLE_GCP); | 546 | VIDEO_DIP_ENABLE_GCP); |
547 | 547 | ||
548 | I915_WRITE(reg, val); | 548 | I915_WRITE(reg, val); |
549 | POSTING_READ(reg); | 549 | POSTING_READ(reg); |
550 | 550 | ||
551 | intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); | 551 | intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); |
552 | intel_hdmi_set_spd_infoframe(encoder); | 552 | intel_hdmi_set_spd_infoframe(encoder); |
553 | intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); | 553 | intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); |
554 | } | 554 | } |
555 | 555 | ||
556 | static void vlv_set_infoframes(struct drm_encoder *encoder, | 556 | static void vlv_set_infoframes(struct drm_encoder *encoder, |
557 | struct drm_display_mode *adjusted_mode) | 557 | struct drm_display_mode *adjusted_mode) |
558 | { | 558 | { |
559 | struct drm_i915_private *dev_priv = encoder->dev->dev_private; | 559 | struct drm_i915_private *dev_priv = encoder->dev->dev_private; |
560 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | 560 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); |
561 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | 561 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); |
562 | u32 reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); | 562 | u32 reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); |
563 | u32 val = I915_READ(reg); | 563 | u32 val = I915_READ(reg); |
564 | 564 | ||
565 | assert_hdmi_port_disabled(intel_hdmi); | 565 | assert_hdmi_port_disabled(intel_hdmi); |
566 | 566 | ||
567 | /* See the big comment in g4x_set_infoframes() */ | 567 | /* See the big comment in g4x_set_infoframes() */ |
568 | val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC; | 568 | val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC; |
569 | 569 | ||
570 | if (!intel_hdmi->has_hdmi_sink) { | 570 | if (!intel_hdmi->has_hdmi_sink) { |
571 | if (!(val & VIDEO_DIP_ENABLE)) | 571 | if (!(val & VIDEO_DIP_ENABLE)) |
572 | return; | 572 | return; |
573 | val &= ~VIDEO_DIP_ENABLE; | 573 | val &= ~VIDEO_DIP_ENABLE; |
574 | I915_WRITE(reg, val); | 574 | I915_WRITE(reg, val); |
575 | POSTING_READ(reg); | 575 | POSTING_READ(reg); |
576 | return; | 576 | return; |
577 | } | 577 | } |
578 | 578 | ||
579 | val |= VIDEO_DIP_ENABLE; | 579 | val |= VIDEO_DIP_ENABLE; |
580 | val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | | 580 | val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | |
581 | VIDEO_DIP_ENABLE_GCP); | 581 | VIDEO_DIP_ENABLE_GCP); |
582 | 582 | ||
583 | I915_WRITE(reg, val); | 583 | I915_WRITE(reg, val); |
584 | POSTING_READ(reg); | 584 | POSTING_READ(reg); |
585 | 585 | ||
586 | intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); | 586 | intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); |
587 | intel_hdmi_set_spd_infoframe(encoder); | 587 | intel_hdmi_set_spd_infoframe(encoder); |
588 | intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); | 588 | intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); |
589 | } | 589 | } |
590 | 590 | ||
591 | static void hsw_set_infoframes(struct drm_encoder *encoder, | 591 | static void hsw_set_infoframes(struct drm_encoder *encoder, |
592 | struct drm_display_mode *adjusted_mode) | 592 | struct drm_display_mode *adjusted_mode) |
593 | { | 593 | { |
594 | struct drm_i915_private *dev_priv = encoder->dev->dev_private; | 594 | struct drm_i915_private *dev_priv = encoder->dev->dev_private; |
595 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | 595 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); |
596 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | 596 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); |
597 | u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder); | 597 | u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder); |
598 | u32 val = I915_READ(reg); | 598 | u32 val = I915_READ(reg); |
599 | 599 | ||
600 | assert_hdmi_port_disabled(intel_hdmi); | 600 | assert_hdmi_port_disabled(intel_hdmi); |
601 | 601 | ||
602 | if (!intel_hdmi->has_hdmi_sink) { | 602 | if (!intel_hdmi->has_hdmi_sink) { |
603 | I915_WRITE(reg, 0); | 603 | I915_WRITE(reg, 0); |
604 | POSTING_READ(reg); | 604 | POSTING_READ(reg); |
605 | return; | 605 | return; |
606 | } | 606 | } |
607 | 607 | ||
608 | val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_GCP_HSW | | 608 | val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_GCP_HSW | |
609 | VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW); | 609 | VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW); |
610 | 610 | ||
611 | I915_WRITE(reg, val); | 611 | I915_WRITE(reg, val); |
612 | POSTING_READ(reg); | 612 | POSTING_READ(reg); |
613 | 613 | ||
614 | intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); | 614 | intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); |
615 | intel_hdmi_set_spd_infoframe(encoder); | 615 | intel_hdmi_set_spd_infoframe(encoder); |
616 | intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); | 616 | intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); |
617 | } | 617 | } |
618 | 618 | ||
619 | static void intel_hdmi_mode_set(struct intel_encoder *encoder) | 619 | static void intel_hdmi_mode_set(struct intel_encoder *encoder) |
620 | { | 620 | { |
621 | struct drm_device *dev = encoder->base.dev; | 621 | struct drm_device *dev = encoder->base.dev; |
622 | struct drm_i915_private *dev_priv = dev->dev_private; | 622 | struct drm_i915_private *dev_priv = dev->dev_private; |
623 | struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); | 623 | struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); |
624 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); | 624 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); |
625 | struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode; | 625 | struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode; |
626 | u32 hdmi_val; | 626 | u32 hdmi_val; |
627 | 627 | ||
628 | hdmi_val = SDVO_ENCODING_HDMI; | 628 | hdmi_val = SDVO_ENCODING_HDMI; |
629 | if (!HAS_PCH_SPLIT(dev)) | 629 | if (!HAS_PCH_SPLIT(dev)) |
630 | hdmi_val |= intel_hdmi->color_range; | 630 | hdmi_val |= intel_hdmi->color_range; |
631 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) | 631 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) |
632 | hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH; | 632 | hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH; |
633 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) | 633 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) |
634 | hdmi_val |= SDVO_HSYNC_ACTIVE_HIGH; | 634 | hdmi_val |= SDVO_HSYNC_ACTIVE_HIGH; |
635 | 635 | ||
636 | if (crtc->config.pipe_bpp > 24) | 636 | if (crtc->config.pipe_bpp > 24) |
637 | hdmi_val |= HDMI_COLOR_FORMAT_12bpc; | 637 | hdmi_val |= HDMI_COLOR_FORMAT_12bpc; |
638 | else | 638 | else |
639 | hdmi_val |= SDVO_COLOR_FORMAT_8bpc; | 639 | hdmi_val |= SDVO_COLOR_FORMAT_8bpc; |
640 | 640 | ||
641 | /* Required on CPT */ | 641 | /* Required on CPT */ |
642 | if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev)) | 642 | if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev)) |
643 | hdmi_val |= HDMI_MODE_SELECT_HDMI; | 643 | hdmi_val |= HDMI_MODE_SELECT_HDMI; |
644 | 644 | ||
645 | if (intel_hdmi->has_audio) { | 645 | if (intel_hdmi->has_audio) { |
646 | DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n", | 646 | DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n", |
647 | pipe_name(crtc->pipe)); | 647 | pipe_name(crtc->pipe)); |
648 | hdmi_val |= SDVO_AUDIO_ENABLE; | 648 | hdmi_val |= SDVO_AUDIO_ENABLE; |
649 | hdmi_val |= HDMI_MODE_SELECT_HDMI; | 649 | hdmi_val |= HDMI_MODE_SELECT_HDMI; |
650 | intel_write_eld(&encoder->base, adjusted_mode); | 650 | intel_write_eld(&encoder->base, adjusted_mode); |
651 | } | 651 | } |
652 | 652 | ||
653 | if (HAS_PCH_CPT(dev)) | 653 | if (HAS_PCH_CPT(dev)) |
654 | hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe); | 654 | hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe); |
655 | else | 655 | else |
656 | hdmi_val |= SDVO_PIPE_SEL(crtc->pipe); | 656 | hdmi_val |= SDVO_PIPE_SEL(crtc->pipe); |
657 | 657 | ||
658 | I915_WRITE(intel_hdmi->hdmi_reg, hdmi_val); | 658 | I915_WRITE(intel_hdmi->hdmi_reg, hdmi_val); |
659 | POSTING_READ(intel_hdmi->hdmi_reg); | 659 | POSTING_READ(intel_hdmi->hdmi_reg); |
660 | 660 | ||
661 | intel_hdmi->set_infoframes(&encoder->base, adjusted_mode); | 661 | intel_hdmi->set_infoframes(&encoder->base, adjusted_mode); |
662 | } | 662 | } |
663 | 663 | ||
664 | static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder, | 664 | static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder, |
665 | enum pipe *pipe) | 665 | enum pipe *pipe) |
666 | { | 666 | { |
667 | struct drm_device *dev = encoder->base.dev; | 667 | struct drm_device *dev = encoder->base.dev; |
668 | struct drm_i915_private *dev_priv = dev->dev_private; | 668 | struct drm_i915_private *dev_priv = dev->dev_private; |
669 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); | 669 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); |
670 | enum intel_display_power_domain power_domain; | 670 | enum intel_display_power_domain power_domain; |
671 | u32 tmp; | 671 | u32 tmp; |
672 | 672 | ||
673 | power_domain = intel_display_port_power_domain(encoder); | 673 | power_domain = intel_display_port_power_domain(encoder); |
674 | if (!intel_display_power_enabled(dev_priv, power_domain)) | 674 | if (!intel_display_power_enabled(dev_priv, power_domain)) |
675 | return false; | 675 | return false; |
676 | 676 | ||
677 | tmp = I915_READ(intel_hdmi->hdmi_reg); | 677 | tmp = I915_READ(intel_hdmi->hdmi_reg); |
678 | 678 | ||
679 | if (!(tmp & SDVO_ENABLE)) | 679 | if (!(tmp & SDVO_ENABLE)) |
680 | return false; | 680 | return false; |
681 | 681 | ||
682 | if (HAS_PCH_CPT(dev)) | 682 | if (HAS_PCH_CPT(dev)) |
683 | *pipe = PORT_TO_PIPE_CPT(tmp); | 683 | *pipe = PORT_TO_PIPE_CPT(tmp); |
684 | else | 684 | else |
685 | *pipe = PORT_TO_PIPE(tmp); | 685 | *pipe = PORT_TO_PIPE(tmp); |
686 | 686 | ||
687 | return true; | 687 | return true; |
688 | } | 688 | } |
689 | 689 | ||
690 | static void intel_hdmi_get_config(struct intel_encoder *encoder, | 690 | static void intel_hdmi_get_config(struct intel_encoder *encoder, |
691 | struct intel_crtc_config *pipe_config) | 691 | struct intel_crtc_config *pipe_config) |
692 | { | 692 | { |
693 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); | 693 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); |
694 | struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; | 694 | struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; |
695 | u32 tmp, flags = 0; | 695 | u32 tmp, flags = 0; |
696 | int dotclock; | 696 | int dotclock; |
697 | 697 | ||
698 | tmp = I915_READ(intel_hdmi->hdmi_reg); | 698 | tmp = I915_READ(intel_hdmi->hdmi_reg); |
699 | 699 | ||
700 | if (tmp & SDVO_HSYNC_ACTIVE_HIGH) | 700 | if (tmp & SDVO_HSYNC_ACTIVE_HIGH) |
701 | flags |= DRM_MODE_FLAG_PHSYNC; | 701 | flags |= DRM_MODE_FLAG_PHSYNC; |
702 | else | 702 | else |
703 | flags |= DRM_MODE_FLAG_NHSYNC; | 703 | flags |= DRM_MODE_FLAG_NHSYNC; |
704 | 704 | ||
705 | if (tmp & SDVO_VSYNC_ACTIVE_HIGH) | 705 | if (tmp & SDVO_VSYNC_ACTIVE_HIGH) |
706 | flags |= DRM_MODE_FLAG_PVSYNC; | 706 | flags |= DRM_MODE_FLAG_PVSYNC; |
707 | else | 707 | else |
708 | flags |= DRM_MODE_FLAG_NVSYNC; | 708 | flags |= DRM_MODE_FLAG_NVSYNC; |
709 | 709 | ||
710 | pipe_config->adjusted_mode.flags |= flags; | 710 | pipe_config->adjusted_mode.flags |= flags; |
711 | 711 | ||
712 | if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc) | 712 | if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc) |
713 | dotclock = pipe_config->port_clock * 2 / 3; | 713 | dotclock = pipe_config->port_clock * 2 / 3; |
714 | else | 714 | else |
715 | dotclock = pipe_config->port_clock; | 715 | dotclock = pipe_config->port_clock; |
716 | 716 | ||
717 | if (HAS_PCH_SPLIT(dev_priv->dev)) | 717 | if (HAS_PCH_SPLIT(dev_priv->dev)) |
718 | ironlake_check_encoder_dotclock(pipe_config, dotclock); | 718 | ironlake_check_encoder_dotclock(pipe_config, dotclock); |
719 | 719 | ||
720 | pipe_config->adjusted_mode.crtc_clock = dotclock; | 720 | pipe_config->adjusted_mode.crtc_clock = dotclock; |
721 | } | 721 | } |
722 | 722 | ||
723 | static void intel_enable_hdmi(struct intel_encoder *encoder) | 723 | static void intel_enable_hdmi(struct intel_encoder *encoder) |
724 | { | 724 | { |
725 | struct drm_device *dev = encoder->base.dev; | 725 | struct drm_device *dev = encoder->base.dev; |
726 | struct drm_i915_private *dev_priv = dev->dev_private; | 726 | struct drm_i915_private *dev_priv = dev->dev_private; |
727 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); | 727 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); |
728 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); | 728 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); |
729 | u32 temp; | 729 | u32 temp; |
730 | u32 enable_bits = SDVO_ENABLE; | 730 | u32 enable_bits = SDVO_ENABLE; |
731 | 731 | ||
732 | if (intel_hdmi->has_audio) | 732 | if (intel_hdmi->has_audio) |
733 | enable_bits |= SDVO_AUDIO_ENABLE; | 733 | enable_bits |= SDVO_AUDIO_ENABLE; |
734 | 734 | ||
735 | temp = I915_READ(intel_hdmi->hdmi_reg); | 735 | temp = I915_READ(intel_hdmi->hdmi_reg); |
736 | 736 | ||
737 | /* HW workaround for IBX, we need to move the port to transcoder A | 737 | /* HW workaround for IBX, we need to move the port to transcoder A |
738 | * before disabling it, so restore the transcoder select bit here. */ | 738 | * before disabling it, so restore the transcoder select bit here. */ |
739 | if (HAS_PCH_IBX(dev)) | 739 | if (HAS_PCH_IBX(dev)) |
740 | enable_bits |= SDVO_PIPE_SEL(intel_crtc->pipe); | 740 | enable_bits |= SDVO_PIPE_SEL(intel_crtc->pipe); |
741 | 741 | ||
742 | /* HW workaround, need to toggle enable bit off and on for 12bpc, but | 742 | /* HW workaround, need to toggle enable bit off and on for 12bpc, but |
743 | * we do this anyway which shows more stable in testing. | 743 | * we do this anyway which shows more stable in testing. |
744 | */ | 744 | */ |
745 | if (HAS_PCH_SPLIT(dev)) { | 745 | if (HAS_PCH_SPLIT(dev)) { |
746 | I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE); | 746 | I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE); |
747 | POSTING_READ(intel_hdmi->hdmi_reg); | 747 | POSTING_READ(intel_hdmi->hdmi_reg); |
748 | } | 748 | } |
749 | 749 | ||
750 | temp |= enable_bits; | 750 | temp |= enable_bits; |
751 | 751 | ||
752 | I915_WRITE(intel_hdmi->hdmi_reg, temp); | 752 | I915_WRITE(intel_hdmi->hdmi_reg, temp); |
753 | POSTING_READ(intel_hdmi->hdmi_reg); | 753 | POSTING_READ(intel_hdmi->hdmi_reg); |
754 | 754 | ||
755 | /* HW workaround, need to write this twice for issue that may result | 755 | /* HW workaround, need to write this twice for issue that may result |
756 | * in first write getting masked. | 756 | * in first write getting masked. |
757 | */ | 757 | */ |
758 | if (HAS_PCH_SPLIT(dev)) { | 758 | if (HAS_PCH_SPLIT(dev)) { |
759 | I915_WRITE(intel_hdmi->hdmi_reg, temp); | 759 | I915_WRITE(intel_hdmi->hdmi_reg, temp); |
760 | POSTING_READ(intel_hdmi->hdmi_reg); | 760 | POSTING_READ(intel_hdmi->hdmi_reg); |
761 | } | 761 | } |
762 | } | 762 | } |
763 | 763 | ||
764 | static void vlv_enable_hdmi(struct intel_encoder *encoder) | 764 | static void vlv_enable_hdmi(struct intel_encoder *encoder) |
765 | { | 765 | { |
766 | } | 766 | } |
767 | 767 | ||
768 | static void intel_disable_hdmi(struct intel_encoder *encoder) | 768 | static void intel_disable_hdmi(struct intel_encoder *encoder) |
769 | { | 769 | { |
770 | struct drm_device *dev = encoder->base.dev; | 770 | struct drm_device *dev = encoder->base.dev; |
771 | struct drm_i915_private *dev_priv = dev->dev_private; | 771 | struct drm_i915_private *dev_priv = dev->dev_private; |
772 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); | 772 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); |
773 | u32 temp; | 773 | u32 temp; |
774 | u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE; | 774 | u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE; |
775 | 775 | ||
776 | temp = I915_READ(intel_hdmi->hdmi_reg); | 776 | temp = I915_READ(intel_hdmi->hdmi_reg); |
777 | 777 | ||
778 | /* HW workaround for IBX, we need to move the port to transcoder A | 778 | /* HW workaround for IBX, we need to move the port to transcoder A |
779 | * before disabling it. */ | 779 | * before disabling it. */ |
780 | if (HAS_PCH_IBX(dev)) { | 780 | if (HAS_PCH_IBX(dev)) { |
781 | struct drm_crtc *crtc = encoder->base.crtc; | 781 | struct drm_crtc *crtc = encoder->base.crtc; |
782 | int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1; | 782 | int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1; |
783 | 783 | ||
784 | if (temp & SDVO_PIPE_B_SELECT) { | 784 | if (temp & SDVO_PIPE_B_SELECT) { |
785 | temp &= ~SDVO_PIPE_B_SELECT; | 785 | temp &= ~SDVO_PIPE_B_SELECT; |
786 | I915_WRITE(intel_hdmi->hdmi_reg, temp); | 786 | I915_WRITE(intel_hdmi->hdmi_reg, temp); |
787 | POSTING_READ(intel_hdmi->hdmi_reg); | 787 | POSTING_READ(intel_hdmi->hdmi_reg); |
788 | 788 | ||
789 | /* Again we need to write this twice. */ | 789 | /* Again we need to write this twice. */ |
790 | I915_WRITE(intel_hdmi->hdmi_reg, temp); | 790 | I915_WRITE(intel_hdmi->hdmi_reg, temp); |
791 | POSTING_READ(intel_hdmi->hdmi_reg); | 791 | POSTING_READ(intel_hdmi->hdmi_reg); |
792 | 792 | ||
793 | /* Transcoder selection bits only update | 793 | /* Transcoder selection bits only update |
794 | * effectively on vblank. */ | 794 | * effectively on vblank. */ |
795 | if (crtc) | 795 | if (crtc) |
796 | intel_wait_for_vblank(dev, pipe); | 796 | intel_wait_for_vblank(dev, pipe); |
797 | else | 797 | else |
798 | msleep(50); | 798 | msleep(50); |
799 | } | 799 | } |
800 | } | 800 | } |
801 | 801 | ||
802 | /* HW workaround, need to toggle enable bit off and on for 12bpc, but | 802 | /* HW workaround, need to toggle enable bit off and on for 12bpc, but |
803 | * we do this anyway which shows more stable in testing. | 803 | * we do this anyway which shows more stable in testing. |
804 | */ | 804 | */ |
805 | if (HAS_PCH_SPLIT(dev)) { | 805 | if (HAS_PCH_SPLIT(dev)) { |
806 | I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE); | 806 | I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE); |
807 | POSTING_READ(intel_hdmi->hdmi_reg); | 807 | POSTING_READ(intel_hdmi->hdmi_reg); |
808 | } | 808 | } |
809 | 809 | ||
810 | temp &= ~enable_bits; | 810 | temp &= ~enable_bits; |
811 | 811 | ||
812 | I915_WRITE(intel_hdmi->hdmi_reg, temp); | 812 | I915_WRITE(intel_hdmi->hdmi_reg, temp); |
813 | POSTING_READ(intel_hdmi->hdmi_reg); | 813 | POSTING_READ(intel_hdmi->hdmi_reg); |
814 | 814 | ||
815 | /* HW workaround, need to write this twice for issue that may result | 815 | /* HW workaround, need to write this twice for issue that may result |
816 | * in first write getting masked. | 816 | * in first write getting masked. |
817 | */ | 817 | */ |
818 | if (HAS_PCH_SPLIT(dev)) { | 818 | if (HAS_PCH_SPLIT(dev)) { |
819 | I915_WRITE(intel_hdmi->hdmi_reg, temp); | 819 | I915_WRITE(intel_hdmi->hdmi_reg, temp); |
820 | POSTING_READ(intel_hdmi->hdmi_reg); | 820 | POSTING_READ(intel_hdmi->hdmi_reg); |
821 | } | 821 | } |
822 | } | 822 | } |
823 | 823 | ||
824 | static int hdmi_portclock_limit(struct intel_hdmi *hdmi) | 824 | static int hdmi_portclock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit) |
825 | { | 825 | { |
826 | struct drm_device *dev = intel_hdmi_to_dev(hdmi); | 826 | struct drm_device *dev = intel_hdmi_to_dev(hdmi); |
827 | 827 | ||
828 | if (!hdmi->has_hdmi_sink || IS_G4X(dev)) | 828 | if ((respect_dvi_limit && !hdmi->has_hdmi_sink) || IS_G4X(dev)) |
829 | return 165000; | 829 | return 165000; |
830 | else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) | 830 | else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) |
831 | return 300000; | 831 | return 300000; |
832 | else | 832 | else |
833 | return 225000; | 833 | return 225000; |
834 | } | 834 | } |
835 | 835 | ||
836 | static enum drm_mode_status | 836 | static enum drm_mode_status |
837 | intel_hdmi_mode_valid(struct drm_connector *connector, | 837 | intel_hdmi_mode_valid(struct drm_connector *connector, |
838 | struct drm_display_mode *mode) | 838 | struct drm_display_mode *mode) |
839 | { | 839 | { |
840 | if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector))) | 840 | if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector), |
841 | true)) | ||
841 | return MODE_CLOCK_HIGH; | 842 | return MODE_CLOCK_HIGH; |
842 | if (mode->clock < 20000) | 843 | if (mode->clock < 20000) |
843 | return MODE_CLOCK_LOW; | 844 | return MODE_CLOCK_LOW; |
844 | 845 | ||
845 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | 846 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) |
846 | return MODE_NO_DBLESCAN; | 847 | return MODE_NO_DBLESCAN; |
847 | 848 | ||
848 | return MODE_OK; | 849 | return MODE_OK; |
849 | } | 850 | } |
850 | 851 | ||
851 | static bool hdmi_12bpc_possible(struct intel_crtc *crtc) | 852 | static bool hdmi_12bpc_possible(struct intel_crtc *crtc) |
852 | { | 853 | { |
853 | struct drm_device *dev = crtc->base.dev; | 854 | struct drm_device *dev = crtc->base.dev; |
854 | struct intel_encoder *encoder; | 855 | struct intel_encoder *encoder; |
855 | int count = 0, count_hdmi = 0; | 856 | int count = 0, count_hdmi = 0; |
856 | 857 | ||
857 | if (!HAS_PCH_SPLIT(dev)) | 858 | if (!HAS_PCH_SPLIT(dev)) |
858 | return false; | 859 | return false; |
859 | 860 | ||
860 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { | 861 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { |
861 | if (encoder->new_crtc != crtc) | 862 | if (encoder->new_crtc != crtc) |
862 | continue; | 863 | continue; |
863 | 864 | ||
864 | count_hdmi += encoder->type == INTEL_OUTPUT_HDMI; | 865 | count_hdmi += encoder->type == INTEL_OUTPUT_HDMI; |
865 | count++; | 866 | count++; |
866 | } | 867 | } |
867 | 868 | ||
868 | /* | 869 | /* |
869 | * HDMI 12bpc affects the clocks, so it's only possible | 870 | * HDMI 12bpc affects the clocks, so it's only possible |
870 | * when not cloning with other encoder types. | 871 | * when not cloning with other encoder types. |
871 | */ | 872 | */ |
872 | return count_hdmi > 0 && count_hdmi == count; | 873 | return count_hdmi > 0 && count_hdmi == count; |
873 | } | 874 | } |
874 | 875 | ||
875 | bool intel_hdmi_compute_config(struct intel_encoder *encoder, | 876 | bool intel_hdmi_compute_config(struct intel_encoder *encoder, |
876 | struct intel_crtc_config *pipe_config) | 877 | struct intel_crtc_config *pipe_config) |
877 | { | 878 | { |
878 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); | 879 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); |
879 | struct drm_device *dev = encoder->base.dev; | 880 | struct drm_device *dev = encoder->base.dev; |
880 | struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; | 881 | struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; |
881 | int clock_12bpc = pipe_config->adjusted_mode.crtc_clock * 3 / 2; | 882 | int clock_12bpc = pipe_config->adjusted_mode.crtc_clock * 3 / 2; |
882 | int portclock_limit = hdmi_portclock_limit(intel_hdmi); | 883 | int portclock_limit = hdmi_portclock_limit(intel_hdmi, false); |
883 | int desired_bpp; | 884 | int desired_bpp; |
884 | 885 | ||
885 | if (intel_hdmi->color_range_auto) { | 886 | if (intel_hdmi->color_range_auto) { |
886 | /* See CEA-861-E - 5.1 Default Encoding Parameters */ | 887 | /* See CEA-861-E - 5.1 Default Encoding Parameters */ |
887 | if (intel_hdmi->has_hdmi_sink && | 888 | if (intel_hdmi->has_hdmi_sink && |
888 | drm_match_cea_mode(adjusted_mode) > 1) | 889 | drm_match_cea_mode(adjusted_mode) > 1) |
889 | intel_hdmi->color_range = HDMI_COLOR_RANGE_16_235; | 890 | intel_hdmi->color_range = HDMI_COLOR_RANGE_16_235; |
890 | else | 891 | else |
891 | intel_hdmi->color_range = 0; | 892 | intel_hdmi->color_range = 0; |
892 | } | 893 | } |
893 | 894 | ||
894 | if (intel_hdmi->color_range) | 895 | if (intel_hdmi->color_range) |
895 | pipe_config->limited_color_range = true; | 896 | pipe_config->limited_color_range = true; |
896 | 897 | ||
897 | if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev)) | 898 | if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev)) |
898 | pipe_config->has_pch_encoder = true; | 899 | pipe_config->has_pch_encoder = true; |
899 | 900 | ||
900 | /* | 901 | /* |
901 | * HDMI is either 12 or 8, so if the display lets 10bpc sneak | 902 | * HDMI is either 12 or 8, so if the display lets 10bpc sneak |
902 | * through, clamp it down. Note that g4x/vlv don't support 12bpc hdmi | 903 | * through, clamp it down. Note that g4x/vlv don't support 12bpc hdmi |
903 | * outputs. We also need to check that the higher clock still fits | 904 | * outputs. We also need to check that the higher clock still fits |
904 | * within limits. | 905 | * within limits. |
905 | */ | 906 | */ |
906 | if (pipe_config->pipe_bpp > 8*3 && intel_hdmi->has_hdmi_sink && | 907 | if (pipe_config->pipe_bpp > 8*3 && intel_hdmi->has_hdmi_sink && |
907 | clock_12bpc <= portclock_limit && | 908 | clock_12bpc <= portclock_limit && |
908 | hdmi_12bpc_possible(encoder->new_crtc)) { | 909 | hdmi_12bpc_possible(encoder->new_crtc)) { |
909 | DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); | 910 | DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); |
910 | desired_bpp = 12*3; | 911 | desired_bpp = 12*3; |
911 | 912 | ||
912 | /* Need to adjust the port link by 1.5x for 12bpc. */ | 913 | /* Need to adjust the port link by 1.5x for 12bpc. */ |
913 | pipe_config->port_clock = clock_12bpc; | 914 | pipe_config->port_clock = clock_12bpc; |
914 | } else { | 915 | } else { |
915 | DRM_DEBUG_KMS("picking bpc to 8 for HDMI output\n"); | 916 | DRM_DEBUG_KMS("picking bpc to 8 for HDMI output\n"); |
916 | desired_bpp = 8*3; | 917 | desired_bpp = 8*3; |
917 | } | 918 | } |
918 | 919 | ||
919 | if (!pipe_config->bw_constrained) { | 920 | if (!pipe_config->bw_constrained) { |
920 | DRM_DEBUG_KMS("forcing pipe bpc to %i for HDMI\n", desired_bpp); | 921 | DRM_DEBUG_KMS("forcing pipe bpc to %i for HDMI\n", desired_bpp); |
921 | pipe_config->pipe_bpp = desired_bpp; | 922 | pipe_config->pipe_bpp = desired_bpp; |
922 | } | 923 | } |
923 | 924 | ||
924 | if (adjusted_mode->crtc_clock > portclock_limit) { | 925 | if (adjusted_mode->crtc_clock > portclock_limit) { |
925 | DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n"); | 926 | DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n"); |
926 | return false; | 927 | return false; |
927 | } | 928 | } |
928 | 929 | ||
929 | return true; | 930 | return true; |
930 | } | 931 | } |
931 | 932 | ||
932 | static enum drm_connector_status | 933 | static enum drm_connector_status |
933 | intel_hdmi_detect(struct drm_connector *connector, bool force) | 934 | intel_hdmi_detect(struct drm_connector *connector, bool force) |
934 | { | 935 | { |
935 | struct drm_device *dev = connector->dev; | 936 | struct drm_device *dev = connector->dev; |
936 | struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); | 937 | struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); |
937 | struct intel_digital_port *intel_dig_port = | 938 | struct intel_digital_port *intel_dig_port = |
938 | hdmi_to_dig_port(intel_hdmi); | 939 | hdmi_to_dig_port(intel_hdmi); |
939 | struct intel_encoder *intel_encoder = &intel_dig_port->base; | 940 | struct intel_encoder *intel_encoder = &intel_dig_port->base; |
940 | struct drm_i915_private *dev_priv = dev->dev_private; | 941 | struct drm_i915_private *dev_priv = dev->dev_private; |
941 | struct edid *edid; | 942 | struct edid *edid; |
942 | enum intel_display_power_domain power_domain; | 943 | enum intel_display_power_domain power_domain; |
943 | enum drm_connector_status status = connector_status_disconnected; | 944 | enum drm_connector_status status = connector_status_disconnected; |
944 | 945 | ||
945 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", | 946 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", |
946 | connector->base.id, drm_get_connector_name(connector)); | 947 | connector->base.id, drm_get_connector_name(connector)); |
947 | 948 | ||
948 | power_domain = intel_display_port_power_domain(intel_encoder); | 949 | power_domain = intel_display_port_power_domain(intel_encoder); |
949 | intel_display_power_get(dev_priv, power_domain); | 950 | intel_display_power_get(dev_priv, power_domain); |
950 | 951 | ||
951 | intel_hdmi->has_hdmi_sink = false; | 952 | intel_hdmi->has_hdmi_sink = false; |
952 | intel_hdmi->has_audio = false; | 953 | intel_hdmi->has_audio = false; |
953 | intel_hdmi->rgb_quant_range_selectable = false; | 954 | intel_hdmi->rgb_quant_range_selectable = false; |
954 | edid = drm_get_edid(connector, | 955 | edid = drm_get_edid(connector, |
955 | intel_gmbus_get_adapter(dev_priv, | 956 | intel_gmbus_get_adapter(dev_priv, |
956 | intel_hdmi->ddc_bus)); | 957 | intel_hdmi->ddc_bus)); |
957 | 958 | ||
958 | if (edid) { | 959 | if (edid) { |
959 | if (edid->input & DRM_EDID_INPUT_DIGITAL) { | 960 | if (edid->input & DRM_EDID_INPUT_DIGITAL) { |
960 | status = connector_status_connected; | 961 | status = connector_status_connected; |
961 | if (intel_hdmi->force_audio != HDMI_AUDIO_OFF_DVI) | 962 | if (intel_hdmi->force_audio != HDMI_AUDIO_OFF_DVI) |
962 | intel_hdmi->has_hdmi_sink = | 963 | intel_hdmi->has_hdmi_sink = |
963 | drm_detect_hdmi_monitor(edid); | 964 | drm_detect_hdmi_monitor(edid); |
964 | intel_hdmi->has_audio = drm_detect_monitor_audio(edid); | 965 | intel_hdmi->has_audio = drm_detect_monitor_audio(edid); |
965 | intel_hdmi->rgb_quant_range_selectable = | 966 | intel_hdmi->rgb_quant_range_selectable = |
966 | drm_rgb_quant_range_selectable(edid); | 967 | drm_rgb_quant_range_selectable(edid); |
967 | } | 968 | } |
968 | kfree(edid); | 969 | kfree(edid); |
969 | } | 970 | } |
970 | 971 | ||
971 | if (status == connector_status_connected) { | 972 | if (status == connector_status_connected) { |
972 | if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO) | 973 | if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO) |
973 | intel_hdmi->has_audio = | 974 | intel_hdmi->has_audio = |
974 | (intel_hdmi->force_audio == HDMI_AUDIO_ON); | 975 | (intel_hdmi->force_audio == HDMI_AUDIO_ON); |
975 | intel_encoder->type = INTEL_OUTPUT_HDMI; | 976 | intel_encoder->type = INTEL_OUTPUT_HDMI; |
976 | } | 977 | } |
977 | 978 | ||
978 | intel_display_power_put(dev_priv, power_domain); | 979 | intel_display_power_put(dev_priv, power_domain); |
979 | 980 | ||
980 | return status; | 981 | return status; |
981 | } | 982 | } |
982 | 983 | ||
983 | static int intel_hdmi_get_modes(struct drm_connector *connector) | 984 | static int intel_hdmi_get_modes(struct drm_connector *connector) |
984 | { | 985 | { |
985 | struct intel_encoder *intel_encoder = intel_attached_encoder(connector); | 986 | struct intel_encoder *intel_encoder = intel_attached_encoder(connector); |
986 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base); | 987 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base); |
987 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | 988 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
988 | enum intel_display_power_domain power_domain; | 989 | enum intel_display_power_domain power_domain; |
989 | int ret; | 990 | int ret; |
990 | 991 | ||
991 | /* We should parse the EDID data and find out if it's an HDMI sink so | 992 | /* We should parse the EDID data and find out if it's an HDMI sink so |
992 | * we can send audio to it. | 993 | * we can send audio to it. |
993 | */ | 994 | */ |
994 | 995 | ||
995 | power_domain = intel_display_port_power_domain(intel_encoder); | 996 | power_domain = intel_display_port_power_domain(intel_encoder); |
996 | intel_display_power_get(dev_priv, power_domain); | 997 | intel_display_power_get(dev_priv, power_domain); |
997 | 998 | ||
998 | ret = intel_ddc_get_modes(connector, | 999 | ret = intel_ddc_get_modes(connector, |
999 | intel_gmbus_get_adapter(dev_priv, | 1000 | intel_gmbus_get_adapter(dev_priv, |
1000 | intel_hdmi->ddc_bus)); | 1001 | intel_hdmi->ddc_bus)); |
1001 | 1002 | ||
1002 | intel_display_power_put(dev_priv, power_domain); | 1003 | intel_display_power_put(dev_priv, power_domain); |
1003 | 1004 | ||
1004 | return ret; | 1005 | return ret; |
1005 | } | 1006 | } |
1006 | 1007 | ||
1007 | static bool | 1008 | static bool |
1008 | intel_hdmi_detect_audio(struct drm_connector *connector) | 1009 | intel_hdmi_detect_audio(struct drm_connector *connector) |
1009 | { | 1010 | { |
1010 | struct intel_encoder *intel_encoder = intel_attached_encoder(connector); | 1011 | struct intel_encoder *intel_encoder = intel_attached_encoder(connector); |
1011 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base); | 1012 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base); |
1012 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | 1013 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
1013 | enum intel_display_power_domain power_domain; | 1014 | enum intel_display_power_domain power_domain; |
1014 | struct edid *edid; | 1015 | struct edid *edid; |
1015 | bool has_audio = false; | 1016 | bool has_audio = false; |
1016 | 1017 | ||
1017 | power_domain = intel_display_port_power_domain(intel_encoder); | 1018 | power_domain = intel_display_port_power_domain(intel_encoder); |
1018 | intel_display_power_get(dev_priv, power_domain); | 1019 | intel_display_power_get(dev_priv, power_domain); |
1019 | 1020 | ||
1020 | edid = drm_get_edid(connector, | 1021 | edid = drm_get_edid(connector, |
1021 | intel_gmbus_get_adapter(dev_priv, | 1022 | intel_gmbus_get_adapter(dev_priv, |
1022 | intel_hdmi->ddc_bus)); | 1023 | intel_hdmi->ddc_bus)); |
1023 | if (edid) { | 1024 | if (edid) { |
1024 | if (edid->input & DRM_EDID_INPUT_DIGITAL) | 1025 | if (edid->input & DRM_EDID_INPUT_DIGITAL) |
1025 | has_audio = drm_detect_monitor_audio(edid); | 1026 | has_audio = drm_detect_monitor_audio(edid); |
1026 | kfree(edid); | 1027 | kfree(edid); |
1027 | } | 1028 | } |
1028 | 1029 | ||
1029 | intel_display_power_put(dev_priv, power_domain); | 1030 | intel_display_power_put(dev_priv, power_domain); |
1030 | 1031 | ||
1031 | return has_audio; | 1032 | return has_audio; |
1032 | } | 1033 | } |
1033 | 1034 | ||
1034 | static int | 1035 | static int |
1035 | intel_hdmi_set_property(struct drm_connector *connector, | 1036 | intel_hdmi_set_property(struct drm_connector *connector, |
1036 | struct drm_property *property, | 1037 | struct drm_property *property, |
1037 | uint64_t val) | 1038 | uint64_t val) |
1038 | { | 1039 | { |
1039 | struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); | 1040 | struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); |
1040 | struct intel_digital_port *intel_dig_port = | 1041 | struct intel_digital_port *intel_dig_port = |
1041 | hdmi_to_dig_port(intel_hdmi); | 1042 | hdmi_to_dig_port(intel_hdmi); |
1042 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | 1043 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
1043 | int ret; | 1044 | int ret; |
1044 | 1045 | ||
1045 | ret = drm_object_property_set_value(&connector->base, property, val); | 1046 | ret = drm_object_property_set_value(&connector->base, property, val); |
1046 | if (ret) | 1047 | if (ret) |
1047 | return ret; | 1048 | return ret; |
1048 | 1049 | ||
1049 | if (property == dev_priv->force_audio_property) { | 1050 | if (property == dev_priv->force_audio_property) { |
1050 | enum hdmi_force_audio i = val; | 1051 | enum hdmi_force_audio i = val; |
1051 | bool has_audio; | 1052 | bool has_audio; |
1052 | 1053 | ||
1053 | if (i == intel_hdmi->force_audio) | 1054 | if (i == intel_hdmi->force_audio) |
1054 | return 0; | 1055 | return 0; |
1055 | 1056 | ||
1056 | intel_hdmi->force_audio = i; | 1057 | intel_hdmi->force_audio = i; |
1057 | 1058 | ||
1058 | if (i == HDMI_AUDIO_AUTO) | 1059 | if (i == HDMI_AUDIO_AUTO) |
1059 | has_audio = intel_hdmi_detect_audio(connector); | 1060 | has_audio = intel_hdmi_detect_audio(connector); |
1060 | else | 1061 | else |
1061 | has_audio = (i == HDMI_AUDIO_ON); | 1062 | has_audio = (i == HDMI_AUDIO_ON); |
1062 | 1063 | ||
1063 | if (i == HDMI_AUDIO_OFF_DVI) | 1064 | if (i == HDMI_AUDIO_OFF_DVI) |
1064 | intel_hdmi->has_hdmi_sink = 0; | 1065 | intel_hdmi->has_hdmi_sink = 0; |
1065 | 1066 | ||
1066 | intel_hdmi->has_audio = has_audio; | 1067 | intel_hdmi->has_audio = has_audio; |
1067 | goto done; | 1068 | goto done; |
1068 | } | 1069 | } |
1069 | 1070 | ||
1070 | if (property == dev_priv->broadcast_rgb_property) { | 1071 | if (property == dev_priv->broadcast_rgb_property) { |
1071 | bool old_auto = intel_hdmi->color_range_auto; | 1072 | bool old_auto = intel_hdmi->color_range_auto; |
1072 | uint32_t old_range = intel_hdmi->color_range; | 1073 | uint32_t old_range = intel_hdmi->color_range; |
1073 | 1074 | ||
1074 | switch (val) { | 1075 | switch (val) { |
1075 | case INTEL_BROADCAST_RGB_AUTO: | 1076 | case INTEL_BROADCAST_RGB_AUTO: |
1076 | intel_hdmi->color_range_auto = true; | 1077 | intel_hdmi->color_range_auto = true; |
1077 | break; | 1078 | break; |
1078 | case INTEL_BROADCAST_RGB_FULL: | 1079 | case INTEL_BROADCAST_RGB_FULL: |
1079 | intel_hdmi->color_range_auto = false; | 1080 | intel_hdmi->color_range_auto = false; |
1080 | intel_hdmi->color_range = 0; | 1081 | intel_hdmi->color_range = 0; |
1081 | break; | 1082 | break; |
1082 | case INTEL_BROADCAST_RGB_LIMITED: | 1083 | case INTEL_BROADCAST_RGB_LIMITED: |
1083 | intel_hdmi->color_range_auto = false; | 1084 | intel_hdmi->color_range_auto = false; |
1084 | intel_hdmi->color_range = HDMI_COLOR_RANGE_16_235; | 1085 | intel_hdmi->color_range = HDMI_COLOR_RANGE_16_235; |
1085 | break; | 1086 | break; |
1086 | default: | 1087 | default: |
1087 | return -EINVAL; | 1088 | return -EINVAL; |
1088 | } | 1089 | } |
1089 | 1090 | ||
1090 | if (old_auto == intel_hdmi->color_range_auto && | 1091 | if (old_auto == intel_hdmi->color_range_auto && |
1091 | old_range == intel_hdmi->color_range) | 1092 | old_range == intel_hdmi->color_range) |
1092 | return 0; | 1093 | return 0; |
1093 | 1094 | ||
1094 | goto done; | 1095 | goto done; |
1095 | } | 1096 | } |
1096 | 1097 | ||
1097 | return -EINVAL; | 1098 | return -EINVAL; |
1098 | 1099 | ||
1099 | done: | 1100 | done: |
1100 | if (intel_dig_port->base.base.crtc) | 1101 | if (intel_dig_port->base.base.crtc) |
1101 | intel_crtc_restore_mode(intel_dig_port->base.base.crtc); | 1102 | intel_crtc_restore_mode(intel_dig_port->base.base.crtc); |
1102 | 1103 | ||
1103 | return 0; | 1104 | return 0; |
1104 | } | 1105 | } |
1105 | 1106 | ||
1106 | static void vlv_hdmi_pre_enable(struct intel_encoder *encoder) | 1107 | static void vlv_hdmi_pre_enable(struct intel_encoder *encoder) |
1107 | { | 1108 | { |
1108 | struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); | 1109 | struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); |
1109 | struct drm_device *dev = encoder->base.dev; | 1110 | struct drm_device *dev = encoder->base.dev; |
1110 | struct drm_i915_private *dev_priv = dev->dev_private; | 1111 | struct drm_i915_private *dev_priv = dev->dev_private; |
1111 | struct intel_crtc *intel_crtc = | 1112 | struct intel_crtc *intel_crtc = |
1112 | to_intel_crtc(encoder->base.crtc); | 1113 | to_intel_crtc(encoder->base.crtc); |
1113 | enum dpio_channel port = vlv_dport_to_channel(dport); | 1114 | enum dpio_channel port = vlv_dport_to_channel(dport); |
1114 | int pipe = intel_crtc->pipe; | 1115 | int pipe = intel_crtc->pipe; |
1115 | u32 val; | 1116 | u32 val; |
1116 | 1117 | ||
1117 | if (!IS_VALLEYVIEW(dev)) | 1118 | if (!IS_VALLEYVIEW(dev)) |
1118 | return; | 1119 | return; |
1119 | 1120 | ||
1120 | /* Enable clock channels for this port */ | 1121 | /* Enable clock channels for this port */ |
1121 | mutex_lock(&dev_priv->dpio_lock); | 1122 | mutex_lock(&dev_priv->dpio_lock); |
1122 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port)); | 1123 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port)); |
1123 | val = 0; | 1124 | val = 0; |
1124 | if (pipe) | 1125 | if (pipe) |
1125 | val |= (1<<21); | 1126 | val |= (1<<21); |
1126 | else | 1127 | else |
1127 | val &= ~(1<<21); | 1128 | val &= ~(1<<21); |
1128 | val |= 0x001000c4; | 1129 | val |= 0x001000c4; |
1129 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val); | 1130 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val); |
1130 | 1131 | ||
1131 | /* HDMI 1.0V-2dB */ | 1132 | /* HDMI 1.0V-2dB */ |
1132 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0); | 1133 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0); |
1133 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), 0x2b245f5f); | 1134 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), 0x2b245f5f); |
1134 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port), 0x5578b83a); | 1135 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port), 0x5578b83a); |
1135 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0c782040); | 1136 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0c782040); |
1136 | vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), 0x2b247878); | 1137 | vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), 0x2b247878); |
1137 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000); | 1138 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000); |
1138 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000); | 1139 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000); |
1139 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN); | 1140 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN); |
1140 | 1141 | ||
1141 | /* Program lane clock */ | 1142 | /* Program lane clock */ |
1142 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018); | 1143 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018); |
1143 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888); | 1144 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888); |
1144 | mutex_unlock(&dev_priv->dpio_lock); | 1145 | mutex_unlock(&dev_priv->dpio_lock); |
1145 | 1146 | ||
1146 | intel_enable_hdmi(encoder); | 1147 | intel_enable_hdmi(encoder); |
1147 | 1148 | ||
1148 | vlv_wait_port_ready(dev_priv, dport); | 1149 | vlv_wait_port_ready(dev_priv, dport); |
1149 | } | 1150 | } |
1150 | 1151 | ||
1151 | static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder) | 1152 | static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder) |
1152 | { | 1153 | { |
1153 | struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); | 1154 | struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); |
1154 | struct drm_device *dev = encoder->base.dev; | 1155 | struct drm_device *dev = encoder->base.dev; |
1155 | struct drm_i915_private *dev_priv = dev->dev_private; | 1156 | struct drm_i915_private *dev_priv = dev->dev_private; |
1156 | struct intel_crtc *intel_crtc = | 1157 | struct intel_crtc *intel_crtc = |
1157 | to_intel_crtc(encoder->base.crtc); | 1158 | to_intel_crtc(encoder->base.crtc); |
1158 | enum dpio_channel port = vlv_dport_to_channel(dport); | 1159 | enum dpio_channel port = vlv_dport_to_channel(dport); |
1159 | int pipe = intel_crtc->pipe; | 1160 | int pipe = intel_crtc->pipe; |
1160 | 1161 | ||
1161 | if (!IS_VALLEYVIEW(dev)) | 1162 | if (!IS_VALLEYVIEW(dev)) |
1162 | return; | 1163 | return; |
1163 | 1164 | ||
1164 | /* Program Tx lane resets to default */ | 1165 | /* Program Tx lane resets to default */ |
1165 | mutex_lock(&dev_priv->dpio_lock); | 1166 | mutex_lock(&dev_priv->dpio_lock); |
1166 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), | 1167 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), |
1167 | DPIO_PCS_TX_LANE2_RESET | | 1168 | DPIO_PCS_TX_LANE2_RESET | |
1168 | DPIO_PCS_TX_LANE1_RESET); | 1169 | DPIO_PCS_TX_LANE1_RESET); |
1169 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), | 1170 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), |
1170 | DPIO_PCS_CLK_CRI_RXEB_EIOS_EN | | 1171 | DPIO_PCS_CLK_CRI_RXEB_EIOS_EN | |
1171 | DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN | | 1172 | DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN | |
1172 | (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) | | 1173 | (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) | |
1173 | DPIO_PCS_CLK_SOFT_RESET); | 1174 | DPIO_PCS_CLK_SOFT_RESET); |
1174 | 1175 | ||
1175 | /* Fix up inter-pair skew failure */ | 1176 | /* Fix up inter-pair skew failure */ |
1176 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00); | 1177 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00); |
1177 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500); | 1178 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500); |
1178 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000); | 1179 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000); |
1179 | 1180 | ||
1180 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000); | 1181 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000); |
1181 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN); | 1182 | vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN); |
1182 | mutex_unlock(&dev_priv->dpio_lock); | 1183 | mutex_unlock(&dev_priv->dpio_lock); |
1183 | } | 1184 | } |
1184 | 1185 | ||
1185 | static void vlv_hdmi_post_disable(struct intel_encoder *encoder) | 1186 | static void vlv_hdmi_post_disable(struct intel_encoder *encoder) |
1186 | { | 1187 | { |
1187 | struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); | 1188 | struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); |
1188 | struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; | 1189 | struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; |
1189 | struct intel_crtc *intel_crtc = | 1190 | struct intel_crtc *intel_crtc = |
1190 | to_intel_crtc(encoder->base.crtc); | 1191 | to_intel_crtc(encoder->base.crtc); |
1191 | enum dpio_channel port = vlv_dport_to_channel(dport); | 1192 | enum dpio_channel port = vlv_dport_to_channel(dport); |
1192 | int pipe = intel_crtc->pipe; | 1193 | int pipe = intel_crtc->pipe; |
1193 | 1194 | ||
1194 | /* Reset lanes to avoid HDMI flicker (VLV w/a) */ | 1195 | /* Reset lanes to avoid HDMI flicker (VLV w/a) */ |
1195 | mutex_lock(&dev_priv->dpio_lock); | 1196 | mutex_lock(&dev_priv->dpio_lock); |
1196 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000); | 1197 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000); |
1197 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060); | 1198 | vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060); |
1198 | mutex_unlock(&dev_priv->dpio_lock); | 1199 | mutex_unlock(&dev_priv->dpio_lock); |
1199 | } | 1200 | } |
1200 | 1201 | ||
1201 | static void intel_hdmi_destroy(struct drm_connector *connector) | 1202 | static void intel_hdmi_destroy(struct drm_connector *connector) |
1202 | { | 1203 | { |
1203 | drm_connector_cleanup(connector); | 1204 | drm_connector_cleanup(connector); |
1204 | kfree(connector); | 1205 | kfree(connector); |
1205 | } | 1206 | } |
1206 | 1207 | ||
1207 | static const struct drm_connector_funcs intel_hdmi_connector_funcs = { | 1208 | static const struct drm_connector_funcs intel_hdmi_connector_funcs = { |
1208 | .dpms = intel_connector_dpms, | 1209 | .dpms = intel_connector_dpms, |
1209 | .detect = intel_hdmi_detect, | 1210 | .detect = intel_hdmi_detect, |
1210 | .fill_modes = drm_helper_probe_single_connector_modes, | 1211 | .fill_modes = drm_helper_probe_single_connector_modes, |
1211 | .set_property = intel_hdmi_set_property, | 1212 | .set_property = intel_hdmi_set_property, |
1212 | .destroy = intel_hdmi_destroy, | 1213 | .destroy = intel_hdmi_destroy, |
1213 | }; | 1214 | }; |
1214 | 1215 | ||
1215 | static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = { | 1216 | static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = { |
1216 | .get_modes = intel_hdmi_get_modes, | 1217 | .get_modes = intel_hdmi_get_modes, |
1217 | .mode_valid = intel_hdmi_mode_valid, | 1218 | .mode_valid = intel_hdmi_mode_valid, |
1218 | .best_encoder = intel_best_encoder, | 1219 | .best_encoder = intel_best_encoder, |
1219 | }; | 1220 | }; |
1220 | 1221 | ||
1221 | static const struct drm_encoder_funcs intel_hdmi_enc_funcs = { | 1222 | static const struct drm_encoder_funcs intel_hdmi_enc_funcs = { |
1222 | .destroy = intel_encoder_destroy, | 1223 | .destroy = intel_encoder_destroy, |
1223 | }; | 1224 | }; |
1224 | 1225 | ||
1225 | static void | 1226 | static void |
1226 | intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector) | 1227 | intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector) |
1227 | { | 1228 | { |
1228 | intel_attach_force_audio_property(connector); | 1229 | intel_attach_force_audio_property(connector); |
1229 | intel_attach_broadcast_rgb_property(connector); | 1230 | intel_attach_broadcast_rgb_property(connector); |
1230 | intel_hdmi->color_range_auto = true; | 1231 | intel_hdmi->color_range_auto = true; |
1231 | } | 1232 | } |
1232 | 1233 | ||
1233 | void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, | 1234 | void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, |
1234 | struct intel_connector *intel_connector) | 1235 | struct intel_connector *intel_connector) |
1235 | { | 1236 | { |
1236 | struct drm_connector *connector = &intel_connector->base; | 1237 | struct drm_connector *connector = &intel_connector->base; |
1237 | struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; | 1238 | struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; |
1238 | struct intel_encoder *intel_encoder = &intel_dig_port->base; | 1239 | struct intel_encoder *intel_encoder = &intel_dig_port->base; |
1239 | struct drm_device *dev = intel_encoder->base.dev; | 1240 | struct drm_device *dev = intel_encoder->base.dev; |
1240 | struct drm_i915_private *dev_priv = dev->dev_private; | 1241 | struct drm_i915_private *dev_priv = dev->dev_private; |
1241 | enum port port = intel_dig_port->port; | 1242 | enum port port = intel_dig_port->port; |
1242 | 1243 | ||
1243 | drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, | 1244 | drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, |
1244 | DRM_MODE_CONNECTOR_HDMIA); | 1245 | DRM_MODE_CONNECTOR_HDMIA); |
1245 | drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); | 1246 | drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); |
1246 | 1247 | ||
1247 | connector->interlace_allowed = 1; | 1248 | connector->interlace_allowed = 1; |
1248 | connector->doublescan_allowed = 0; | 1249 | connector->doublescan_allowed = 0; |
1249 | connector->stereo_allowed = 1; | 1250 | connector->stereo_allowed = 1; |
1250 | 1251 | ||
1251 | switch (port) { | 1252 | switch (port) { |
1252 | case PORT_B: | 1253 | case PORT_B: |
1253 | intel_hdmi->ddc_bus = GMBUS_PORT_DPB; | 1254 | intel_hdmi->ddc_bus = GMBUS_PORT_DPB; |
1254 | intel_encoder->hpd_pin = HPD_PORT_B; | 1255 | intel_encoder->hpd_pin = HPD_PORT_B; |
1255 | break; | 1256 | break; |
1256 | case PORT_C: | 1257 | case PORT_C: |
1257 | intel_hdmi->ddc_bus = GMBUS_PORT_DPC; | 1258 | intel_hdmi->ddc_bus = GMBUS_PORT_DPC; |
1258 | intel_encoder->hpd_pin = HPD_PORT_C; | 1259 | intel_encoder->hpd_pin = HPD_PORT_C; |
1259 | break; | 1260 | break; |
1260 | case PORT_D: | 1261 | case PORT_D: |
1261 | intel_hdmi->ddc_bus = GMBUS_PORT_DPD; | 1262 | intel_hdmi->ddc_bus = GMBUS_PORT_DPD; |
1262 | intel_encoder->hpd_pin = HPD_PORT_D; | 1263 | intel_encoder->hpd_pin = HPD_PORT_D; |
1263 | break; | 1264 | break; |
1264 | case PORT_A: | 1265 | case PORT_A: |
1265 | intel_encoder->hpd_pin = HPD_PORT_A; | 1266 | intel_encoder->hpd_pin = HPD_PORT_A; |
1266 | /* Internal port only for eDP. */ | 1267 | /* Internal port only for eDP. */ |
1267 | default: | 1268 | default: |
1268 | BUG(); | 1269 | BUG(); |
1269 | } | 1270 | } |
1270 | 1271 | ||
1271 | if (IS_VALLEYVIEW(dev)) { | 1272 | if (IS_VALLEYVIEW(dev)) { |
1272 | intel_hdmi->write_infoframe = vlv_write_infoframe; | 1273 | intel_hdmi->write_infoframe = vlv_write_infoframe; |
1273 | intel_hdmi->set_infoframes = vlv_set_infoframes; | 1274 | intel_hdmi->set_infoframes = vlv_set_infoframes; |
1274 | } else if (!HAS_PCH_SPLIT(dev)) { | 1275 | } else if (!HAS_PCH_SPLIT(dev)) { |
1275 | intel_hdmi->write_infoframe = g4x_write_infoframe; | 1276 | intel_hdmi->write_infoframe = g4x_write_infoframe; |
1276 | intel_hdmi->set_infoframes = g4x_set_infoframes; | 1277 | intel_hdmi->set_infoframes = g4x_set_infoframes; |
1277 | } else if (HAS_DDI(dev)) { | 1278 | } else if (HAS_DDI(dev)) { |
1278 | intel_hdmi->write_infoframe = hsw_write_infoframe; | 1279 | intel_hdmi->write_infoframe = hsw_write_infoframe; |
1279 | intel_hdmi->set_infoframes = hsw_set_infoframes; | 1280 | intel_hdmi->set_infoframes = hsw_set_infoframes; |
1280 | } else if (HAS_PCH_IBX(dev)) { | 1281 | } else if (HAS_PCH_IBX(dev)) { |
1281 | intel_hdmi->write_infoframe = ibx_write_infoframe; | 1282 | intel_hdmi->write_infoframe = ibx_write_infoframe; |
1282 | intel_hdmi->set_infoframes = ibx_set_infoframes; | 1283 | intel_hdmi->set_infoframes = ibx_set_infoframes; |
1283 | } else { | 1284 | } else { |
1284 | intel_hdmi->write_infoframe = cpt_write_infoframe; | 1285 | intel_hdmi->write_infoframe = cpt_write_infoframe; |
1285 | intel_hdmi->set_infoframes = cpt_set_infoframes; | 1286 | intel_hdmi->set_infoframes = cpt_set_infoframes; |
1286 | } | 1287 | } |
1287 | 1288 | ||
1288 | if (HAS_DDI(dev)) | 1289 | if (HAS_DDI(dev)) |
1289 | intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; | 1290 | intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; |
1290 | else | 1291 | else |
1291 | intel_connector->get_hw_state = intel_connector_get_hw_state; | 1292 | intel_connector->get_hw_state = intel_connector_get_hw_state; |
1292 | intel_connector->unregister = intel_connector_unregister; | 1293 | intel_connector->unregister = intel_connector_unregister; |
1293 | 1294 | ||
1294 | intel_hdmi_add_properties(intel_hdmi, connector); | 1295 | intel_hdmi_add_properties(intel_hdmi, connector); |
1295 | 1296 | ||
1296 | intel_connector_attach_encoder(intel_connector, intel_encoder); | 1297 | intel_connector_attach_encoder(intel_connector, intel_encoder); |
1297 | drm_sysfs_connector_add(connector); | 1298 | drm_sysfs_connector_add(connector); |
1298 | 1299 | ||
1299 | /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written | 1300 | /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written |
1300 | * 0xd. Failure to do so will result in spurious interrupts being | 1301 | * 0xd. Failure to do so will result in spurious interrupts being |
1301 | * generated on the port when a cable is not attached. | 1302 | * generated on the port when a cable is not attached. |
1302 | */ | 1303 | */ |
1303 | if (IS_G4X(dev) && !IS_GM45(dev)) { | 1304 | if (IS_G4X(dev) && !IS_GM45(dev)) { |
1304 | u32 temp = I915_READ(PEG_BAND_GAP_DATA); | 1305 | u32 temp = I915_READ(PEG_BAND_GAP_DATA); |
1305 | I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); | 1306 | I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); |
1306 | } | 1307 | } |
1307 | } | 1308 | } |
1308 | 1309 | ||
1309 | void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port) | 1310 | void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port) |
1310 | { | 1311 | { |
1311 | struct intel_digital_port *intel_dig_port; | 1312 | struct intel_digital_port *intel_dig_port; |
1312 | struct intel_encoder *intel_encoder; | 1313 | struct intel_encoder *intel_encoder; |
1313 | struct intel_connector *intel_connector; | 1314 | struct intel_connector *intel_connector; |
1314 | 1315 | ||
1315 | intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL); | 1316 | intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL); |
1316 | if (!intel_dig_port) | 1317 | if (!intel_dig_port) |
1317 | return; | 1318 | return; |
1318 | 1319 | ||
1319 | intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL); | 1320 | intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL); |
1320 | if (!intel_connector) { | 1321 | if (!intel_connector) { |
1321 | kfree(intel_dig_port); | 1322 | kfree(intel_dig_port); |
1322 | return; | 1323 | return; |
1323 | } | 1324 | } |
1324 | 1325 | ||
1325 | intel_encoder = &intel_dig_port->base; | 1326 | intel_encoder = &intel_dig_port->base; |
1326 | 1327 | ||
1327 | drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs, | 1328 | drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs, |
1328 | DRM_MODE_ENCODER_TMDS); | 1329 | DRM_MODE_ENCODER_TMDS); |
1329 | 1330 | ||
1330 | intel_encoder->compute_config = intel_hdmi_compute_config; | 1331 | intel_encoder->compute_config = intel_hdmi_compute_config; |
1331 | intel_encoder->mode_set = intel_hdmi_mode_set; | 1332 | intel_encoder->mode_set = intel_hdmi_mode_set; |
1332 | intel_encoder->disable = intel_disable_hdmi; | 1333 | intel_encoder->disable = intel_disable_hdmi; |
1333 | intel_encoder->get_hw_state = intel_hdmi_get_hw_state; | 1334 | intel_encoder->get_hw_state = intel_hdmi_get_hw_state; |
1334 | intel_encoder->get_config = intel_hdmi_get_config; | 1335 | intel_encoder->get_config = intel_hdmi_get_config; |
1335 | if (IS_VALLEYVIEW(dev)) { | 1336 | if (IS_VALLEYVIEW(dev)) { |
1336 | intel_encoder->pre_pll_enable = vlv_hdmi_pre_pll_enable; | 1337 | intel_encoder->pre_pll_enable = vlv_hdmi_pre_pll_enable; |
1337 | intel_encoder->pre_enable = vlv_hdmi_pre_enable; | 1338 | intel_encoder->pre_enable = vlv_hdmi_pre_enable; |
1338 | intel_encoder->enable = vlv_enable_hdmi; | 1339 | intel_encoder->enable = vlv_enable_hdmi; |
1339 | intel_encoder->post_disable = vlv_hdmi_post_disable; | 1340 | intel_encoder->post_disable = vlv_hdmi_post_disable; |
1340 | } else { | 1341 | } else { |
1341 | intel_encoder->enable = intel_enable_hdmi; | 1342 | intel_encoder->enable = intel_enable_hdmi; |
1342 | } | 1343 | } |
1343 | 1344 | ||
1344 | intel_encoder->type = INTEL_OUTPUT_HDMI; | 1345 | intel_encoder->type = INTEL_OUTPUT_HDMI; |
1345 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); | 1346 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); |
1346 | intel_encoder->cloneable = 1 << INTEL_OUTPUT_ANALOG; | 1347 | intel_encoder->cloneable = 1 << INTEL_OUTPUT_ANALOG; |
1347 | /* | 1348 | /* |
1348 | * BSpec is unclear about HDMI+HDMI cloning on g4x, but it seems | 1349 | * BSpec is unclear about HDMI+HDMI cloning on g4x, but it seems |
1349 | * to work on real hardware. And since g4x can send infoframes to | 1350 | * to work on real hardware. And since g4x can send infoframes to |
1350 | * only one port anyway, nothing is lost by allowing it. | 1351 | * only one port anyway, nothing is lost by allowing it. |
1351 | */ | 1352 | */ |
1352 | if (IS_G4X(dev)) | 1353 | if (IS_G4X(dev)) |
1353 | intel_encoder->cloneable |= 1 << INTEL_OUTPUT_HDMI; | 1354 | intel_encoder->cloneable |= 1 << INTEL_OUTPUT_HDMI; |
1354 | 1355 | ||
1355 | intel_dig_port->port = port; | 1356 | intel_dig_port->port = port; |
1356 | intel_dig_port->hdmi.hdmi_reg = hdmi_reg; | 1357 | intel_dig_port->hdmi.hdmi_reg = hdmi_reg; |
1357 | intel_dig_port->dp.output_reg = 0; | 1358 | intel_dig_port->dp.output_reg = 0; |
1358 | 1359 | ||
1359 | intel_hdmi_init_connector(intel_dig_port, intel_connector); | 1360 | intel_hdmi_init_connector(intel_dig_port, intel_connector); |
1360 | } | 1361 | } |
1361 | 1362 |
drivers/gpu/drm/i915/intel_ringbuffer.c
1 | /* | 1 | /* |
2 | * Copyright © 2008-2010 Intel Corporation | 2 | * Copyright © 2008-2010 Intel Corporation |
3 | * | 3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), | 5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation | 6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the | 8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: | 9 | * Software is furnished to do so, subject to the following conditions: |
10 | * | 10 | * |
11 | * The above copyright notice and this permission notice (including the next | 11 | * The above copyright notice and this permission notice (including the next |
12 | * paragraph) shall be included in all copies or substantial portions of the | 12 | * paragraph) shall be included in all copies or substantial portions of the |
13 | * Software. | 13 | * Software. |
14 | * | 14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
21 | * IN THE SOFTWARE. | 21 | * IN THE SOFTWARE. |
22 | * | 22 | * |
23 | * Authors: | 23 | * Authors: |
24 | * Eric Anholt <eric@anholt.net> | 24 | * Eric Anholt <eric@anholt.net> |
25 | * Zou Nan hai <nanhai.zou@intel.com> | 25 | * Zou Nan hai <nanhai.zou@intel.com> |
26 | * Xiang Hai hao<haihao.xiang@intel.com> | 26 | * Xiang Hai hao<haihao.xiang@intel.com> |
27 | * | 27 | * |
28 | */ | 28 | */ |
29 | 29 | ||
30 | #include <drm/drmP.h> | 30 | #include <drm/drmP.h> |
31 | #include "i915_drv.h" | 31 | #include "i915_drv.h" |
32 | #include <drm/i915_drm.h> | 32 | #include <drm/i915_drm.h> |
33 | #include "i915_trace.h" | 33 | #include "i915_trace.h" |
34 | #include "intel_drv.h" | 34 | #include "intel_drv.h" |
35 | 35 | ||
36 | static inline int ring_space(struct intel_ring_buffer *ring) | 36 | static inline int ring_space(struct intel_ring_buffer *ring) |
37 | { | 37 | { |
38 | int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE); | 38 | int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE); |
39 | if (space < 0) | 39 | if (space < 0) |
40 | space += ring->size; | 40 | space += ring->size; |
41 | return space; | 41 | return space; |
42 | } | 42 | } |
43 | 43 | ||
44 | void __intel_ring_advance(struct intel_ring_buffer *ring) | 44 | void __intel_ring_advance(struct intel_ring_buffer *ring) |
45 | { | 45 | { |
46 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | 46 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
47 | 47 | ||
48 | ring->tail &= ring->size - 1; | 48 | ring->tail &= ring->size - 1; |
49 | if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring)) | 49 | if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring)) |
50 | return; | 50 | return; |
51 | ring->write_tail(ring, ring->tail); | 51 | ring->write_tail(ring, ring->tail); |
52 | } | 52 | } |
53 | 53 | ||
54 | static int | 54 | static int |
55 | gen2_render_ring_flush(struct intel_ring_buffer *ring, | 55 | gen2_render_ring_flush(struct intel_ring_buffer *ring, |
56 | u32 invalidate_domains, | 56 | u32 invalidate_domains, |
57 | u32 flush_domains) | 57 | u32 flush_domains) |
58 | { | 58 | { |
59 | u32 cmd; | 59 | u32 cmd; |
60 | int ret; | 60 | int ret; |
61 | 61 | ||
62 | cmd = MI_FLUSH; | 62 | cmd = MI_FLUSH; |
63 | if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0) | 63 | if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0) |
64 | cmd |= MI_NO_WRITE_FLUSH; | 64 | cmd |= MI_NO_WRITE_FLUSH; |
65 | 65 | ||
66 | if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) | 66 | if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) |
67 | cmd |= MI_READ_FLUSH; | 67 | cmd |= MI_READ_FLUSH; |
68 | 68 | ||
69 | ret = intel_ring_begin(ring, 2); | 69 | ret = intel_ring_begin(ring, 2); |
70 | if (ret) | 70 | if (ret) |
71 | return ret; | 71 | return ret; |
72 | 72 | ||
73 | intel_ring_emit(ring, cmd); | 73 | intel_ring_emit(ring, cmd); |
74 | intel_ring_emit(ring, MI_NOOP); | 74 | intel_ring_emit(ring, MI_NOOP); |
75 | intel_ring_advance(ring); | 75 | intel_ring_advance(ring); |
76 | 76 | ||
77 | return 0; | 77 | return 0; |
78 | } | 78 | } |
79 | 79 | ||
80 | static int | 80 | static int |
81 | gen4_render_ring_flush(struct intel_ring_buffer *ring, | 81 | gen4_render_ring_flush(struct intel_ring_buffer *ring, |
82 | u32 invalidate_domains, | 82 | u32 invalidate_domains, |
83 | u32 flush_domains) | 83 | u32 flush_domains) |
84 | { | 84 | { |
85 | struct drm_device *dev = ring->dev; | 85 | struct drm_device *dev = ring->dev; |
86 | u32 cmd; | 86 | u32 cmd; |
87 | int ret; | 87 | int ret; |
88 | 88 | ||
89 | /* | 89 | /* |
90 | * read/write caches: | 90 | * read/write caches: |
91 | * | 91 | * |
92 | * I915_GEM_DOMAIN_RENDER is always invalidated, but is | 92 | * I915_GEM_DOMAIN_RENDER is always invalidated, but is |
93 | * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is | 93 | * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is |
94 | * also flushed at 2d versus 3d pipeline switches. | 94 | * also flushed at 2d versus 3d pipeline switches. |
95 | * | 95 | * |
96 | * read-only caches: | 96 | * read-only caches: |
97 | * | 97 | * |
98 | * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if | 98 | * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if |
99 | * MI_READ_FLUSH is set, and is always flushed on 965. | 99 | * MI_READ_FLUSH is set, and is always flushed on 965. |
100 | * | 100 | * |
101 | * I915_GEM_DOMAIN_COMMAND may not exist? | 101 | * I915_GEM_DOMAIN_COMMAND may not exist? |
102 | * | 102 | * |
103 | * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is | 103 | * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is |
104 | * invalidated when MI_EXE_FLUSH is set. | 104 | * invalidated when MI_EXE_FLUSH is set. |
105 | * | 105 | * |
106 | * I915_GEM_DOMAIN_VERTEX, which exists on 965, is | 106 | * I915_GEM_DOMAIN_VERTEX, which exists on 965, is |
107 | * invalidated with every MI_FLUSH. | 107 | * invalidated with every MI_FLUSH. |
108 | * | 108 | * |
109 | * TLBs: | 109 | * TLBs: |
110 | * | 110 | * |
111 | * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND | 111 | * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND |
112 | * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and | 112 | * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and |
113 | * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER | 113 | * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER |
114 | * are flushed at any MI_FLUSH. | 114 | * are flushed at any MI_FLUSH. |
115 | */ | 115 | */ |
116 | 116 | ||
117 | cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; | 117 | cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; |
118 | if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) | 118 | if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) |
119 | cmd &= ~MI_NO_WRITE_FLUSH; | 119 | cmd &= ~MI_NO_WRITE_FLUSH; |
120 | if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) | 120 | if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) |
121 | cmd |= MI_EXE_FLUSH; | 121 | cmd |= MI_EXE_FLUSH; |
122 | 122 | ||
123 | if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && | 123 | if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && |
124 | (IS_G4X(dev) || IS_GEN5(dev))) | 124 | (IS_G4X(dev) || IS_GEN5(dev))) |
125 | cmd |= MI_INVALIDATE_ISP; | 125 | cmd |= MI_INVALIDATE_ISP; |
126 | 126 | ||
127 | ret = intel_ring_begin(ring, 2); | 127 | ret = intel_ring_begin(ring, 2); |
128 | if (ret) | 128 | if (ret) |
129 | return ret; | 129 | return ret; |
130 | 130 | ||
131 | intel_ring_emit(ring, cmd); | 131 | intel_ring_emit(ring, cmd); |
132 | intel_ring_emit(ring, MI_NOOP); | 132 | intel_ring_emit(ring, MI_NOOP); |
133 | intel_ring_advance(ring); | 133 | intel_ring_advance(ring); |
134 | 134 | ||
135 | return 0; | 135 | return 0; |
136 | } | 136 | } |
137 | 137 | ||
138 | /** | 138 | /** |
139 | * Emits a PIPE_CONTROL with a non-zero post-sync operation, for | 139 | * Emits a PIPE_CONTROL with a non-zero post-sync operation, for |
140 | * implementing two workarounds on gen6. From section 1.4.7.1 | 140 | * implementing two workarounds on gen6. From section 1.4.7.1 |
141 | * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1: | 141 | * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1: |
142 | * | 142 | * |
143 | * [DevSNB-C+{W/A}] Before any depth stall flush (including those | 143 | * [DevSNB-C+{W/A}] Before any depth stall flush (including those |
144 | * produced by non-pipelined state commands), software needs to first | 144 | * produced by non-pipelined state commands), software needs to first |
145 | * send a PIPE_CONTROL with no bits set except Post-Sync Operation != | 145 | * send a PIPE_CONTROL with no bits set except Post-Sync Operation != |
146 | * 0. | 146 | * 0. |
147 | * | 147 | * |
148 | * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable | 148 | * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable |
149 | * =1, a PIPE_CONTROL with any non-zero post-sync-op is required. | 149 | * =1, a PIPE_CONTROL with any non-zero post-sync-op is required. |
150 | * | 150 | * |
151 | * And the workaround for these two requires this workaround first: | 151 | * And the workaround for these two requires this workaround first: |
152 | * | 152 | * |
153 | * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent | 153 | * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent |
154 | * BEFORE the pipe-control with a post-sync op and no write-cache | 154 | * BEFORE the pipe-control with a post-sync op and no write-cache |
155 | * flushes. | 155 | * flushes. |
156 | * | 156 | * |
157 | * And this last workaround is tricky because of the requirements on | 157 | * And this last workaround is tricky because of the requirements on |
158 | * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM | 158 | * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM |
159 | * volume 2 part 1: | 159 | * volume 2 part 1: |
160 | * | 160 | * |
161 | * "1 of the following must also be set: | 161 | * "1 of the following must also be set: |
162 | * - Render Target Cache Flush Enable ([12] of DW1) | 162 | * - Render Target Cache Flush Enable ([12] of DW1) |
163 | * - Depth Cache Flush Enable ([0] of DW1) | 163 | * - Depth Cache Flush Enable ([0] of DW1) |
164 | * - Stall at Pixel Scoreboard ([1] of DW1) | 164 | * - Stall at Pixel Scoreboard ([1] of DW1) |
165 | * - Depth Stall ([13] of DW1) | 165 | * - Depth Stall ([13] of DW1) |
166 | * - Post-Sync Operation ([13] of DW1) | 166 | * - Post-Sync Operation ([13] of DW1) |
167 | * - Notify Enable ([8] of DW1)" | 167 | * - Notify Enable ([8] of DW1)" |
168 | * | 168 | * |
169 | * The cache flushes require the workaround flush that triggered this | 169 | * The cache flushes require the workaround flush that triggered this |
170 | * one, so we can't use it. Depth stall would trigger the same. | 170 | * one, so we can't use it. Depth stall would trigger the same. |
171 | * Post-sync nonzero is what triggered this second workaround, so we | 171 | * Post-sync nonzero is what triggered this second workaround, so we |
172 | * can't use that one either. Notify enable is IRQs, which aren't | 172 | * can't use that one either. Notify enable is IRQs, which aren't |
173 | * really our business. That leaves only stall at scoreboard. | 173 | * really our business. That leaves only stall at scoreboard. |
174 | */ | 174 | */ |
175 | static int | 175 | static int |
176 | intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring) | 176 | intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring) |
177 | { | 177 | { |
178 | u32 scratch_addr = ring->scratch.gtt_offset + 128; | 178 | u32 scratch_addr = ring->scratch.gtt_offset + 128; |
179 | int ret; | 179 | int ret; |
180 | 180 | ||
181 | 181 | ||
182 | ret = intel_ring_begin(ring, 6); | 182 | ret = intel_ring_begin(ring, 6); |
183 | if (ret) | 183 | if (ret) |
184 | return ret; | 184 | return ret; |
185 | 185 | ||
186 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); | 186 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); |
187 | intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | | 187 | intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | |
188 | PIPE_CONTROL_STALL_AT_SCOREBOARD); | 188 | PIPE_CONTROL_STALL_AT_SCOREBOARD); |
189 | intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ | 189 | intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ |
190 | intel_ring_emit(ring, 0); /* low dword */ | 190 | intel_ring_emit(ring, 0); /* low dword */ |
191 | intel_ring_emit(ring, 0); /* high dword */ | 191 | intel_ring_emit(ring, 0); /* high dword */ |
192 | intel_ring_emit(ring, MI_NOOP); | 192 | intel_ring_emit(ring, MI_NOOP); |
193 | intel_ring_advance(ring); | 193 | intel_ring_advance(ring); |
194 | 194 | ||
195 | ret = intel_ring_begin(ring, 6); | 195 | ret = intel_ring_begin(ring, 6); |
196 | if (ret) | 196 | if (ret) |
197 | return ret; | 197 | return ret; |
198 | 198 | ||
199 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); | 199 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); |
200 | intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE); | 200 | intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE); |
201 | intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ | 201 | intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ |
202 | intel_ring_emit(ring, 0); | 202 | intel_ring_emit(ring, 0); |
203 | intel_ring_emit(ring, 0); | 203 | intel_ring_emit(ring, 0); |
204 | intel_ring_emit(ring, MI_NOOP); | 204 | intel_ring_emit(ring, MI_NOOP); |
205 | intel_ring_advance(ring); | 205 | intel_ring_advance(ring); |
206 | 206 | ||
207 | return 0; | 207 | return 0; |
208 | } | 208 | } |
209 | 209 | ||
210 | static int | 210 | static int |
211 | gen6_render_ring_flush(struct intel_ring_buffer *ring, | 211 | gen6_render_ring_flush(struct intel_ring_buffer *ring, |
212 | u32 invalidate_domains, u32 flush_domains) | 212 | u32 invalidate_domains, u32 flush_domains) |
213 | { | 213 | { |
214 | u32 flags = 0; | 214 | u32 flags = 0; |
215 | u32 scratch_addr = ring->scratch.gtt_offset + 128; | 215 | u32 scratch_addr = ring->scratch.gtt_offset + 128; |
216 | int ret; | 216 | int ret; |
217 | 217 | ||
218 | /* Force SNB workarounds for PIPE_CONTROL flushes */ | 218 | /* Force SNB workarounds for PIPE_CONTROL flushes */ |
219 | ret = intel_emit_post_sync_nonzero_flush(ring); | 219 | ret = intel_emit_post_sync_nonzero_flush(ring); |
220 | if (ret) | 220 | if (ret) |
221 | return ret; | 221 | return ret; |
222 | 222 | ||
223 | /* Just flush everything. Experiments have shown that reducing the | 223 | /* Just flush everything. Experiments have shown that reducing the |
224 | * number of bits based on the write domains has little performance | 224 | * number of bits based on the write domains has little performance |
225 | * impact. | 225 | * impact. |
226 | */ | 226 | */ |
227 | if (flush_domains) { | 227 | if (flush_domains) { |
228 | flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; | 228 | flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; |
229 | flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; | 229 | flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; |
230 | /* | 230 | /* |
231 | * Ensure that any following seqno writes only happen | 231 | * Ensure that any following seqno writes only happen |
232 | * when the render cache is indeed flushed. | 232 | * when the render cache is indeed flushed. |
233 | */ | 233 | */ |
234 | flags |= PIPE_CONTROL_CS_STALL; | 234 | flags |= PIPE_CONTROL_CS_STALL; |
235 | } | 235 | } |
236 | if (invalidate_domains) { | 236 | if (invalidate_domains) { |
237 | flags |= PIPE_CONTROL_TLB_INVALIDATE; | 237 | flags |= PIPE_CONTROL_TLB_INVALIDATE; |
238 | flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; | 238 | flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; |
239 | flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; | 239 | flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; |
240 | flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; | 240 | flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; |
241 | flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; | 241 | flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; |
242 | flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; | 242 | flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; |
243 | /* | 243 | /* |
244 | * TLB invalidate requires a post-sync write. | 244 | * TLB invalidate requires a post-sync write. |
245 | */ | 245 | */ |
246 | flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL; | 246 | flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL; |
247 | } | 247 | } |
248 | 248 | ||
249 | ret = intel_ring_begin(ring, 4); | 249 | ret = intel_ring_begin(ring, 4); |
250 | if (ret) | 250 | if (ret) |
251 | return ret; | 251 | return ret; |
252 | 252 | ||
253 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); | 253 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); |
254 | intel_ring_emit(ring, flags); | 254 | intel_ring_emit(ring, flags); |
255 | intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); | 255 | intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); |
256 | intel_ring_emit(ring, 0); | 256 | intel_ring_emit(ring, 0); |
257 | intel_ring_advance(ring); | 257 | intel_ring_advance(ring); |
258 | 258 | ||
259 | return 0; | 259 | return 0; |
260 | } | 260 | } |
261 | 261 | ||
262 | static int | 262 | static int |
263 | gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring) | 263 | gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring) |
264 | { | 264 | { |
265 | int ret; | 265 | int ret; |
266 | 266 | ||
267 | ret = intel_ring_begin(ring, 4); | 267 | ret = intel_ring_begin(ring, 4); |
268 | if (ret) | 268 | if (ret) |
269 | return ret; | 269 | return ret; |
270 | 270 | ||
271 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); | 271 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); |
272 | intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | | 272 | intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | |
273 | PIPE_CONTROL_STALL_AT_SCOREBOARD); | 273 | PIPE_CONTROL_STALL_AT_SCOREBOARD); |
274 | intel_ring_emit(ring, 0); | 274 | intel_ring_emit(ring, 0); |
275 | intel_ring_emit(ring, 0); | 275 | intel_ring_emit(ring, 0); |
276 | intel_ring_advance(ring); | 276 | intel_ring_advance(ring); |
277 | 277 | ||
278 | return 0; | 278 | return 0; |
279 | } | 279 | } |
280 | 280 | ||
281 | static int gen7_ring_fbc_flush(struct intel_ring_buffer *ring, u32 value) | 281 | static int gen7_ring_fbc_flush(struct intel_ring_buffer *ring, u32 value) |
282 | { | 282 | { |
283 | int ret; | 283 | int ret; |
284 | 284 | ||
285 | if (!ring->fbc_dirty) | 285 | if (!ring->fbc_dirty) |
286 | return 0; | 286 | return 0; |
287 | 287 | ||
288 | ret = intel_ring_begin(ring, 6); | 288 | ret = intel_ring_begin(ring, 6); |
289 | if (ret) | 289 | if (ret) |
290 | return ret; | 290 | return ret; |
291 | /* WaFbcNukeOn3DBlt:ivb/hsw */ | 291 | /* WaFbcNukeOn3DBlt:ivb/hsw */ |
292 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); | 292 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); |
293 | intel_ring_emit(ring, MSG_FBC_REND_STATE); | 293 | intel_ring_emit(ring, MSG_FBC_REND_STATE); |
294 | intel_ring_emit(ring, value); | 294 | intel_ring_emit(ring, value); |
295 | intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | MI_SRM_LRM_GLOBAL_GTT); | 295 | intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | MI_SRM_LRM_GLOBAL_GTT); |
296 | intel_ring_emit(ring, MSG_FBC_REND_STATE); | 296 | intel_ring_emit(ring, MSG_FBC_REND_STATE); |
297 | intel_ring_emit(ring, ring->scratch.gtt_offset + 256); | 297 | intel_ring_emit(ring, ring->scratch.gtt_offset + 256); |
298 | intel_ring_advance(ring); | 298 | intel_ring_advance(ring); |
299 | 299 | ||
300 | ring->fbc_dirty = false; | 300 | ring->fbc_dirty = false; |
301 | return 0; | 301 | return 0; |
302 | } | 302 | } |
303 | 303 | ||
304 | static int | 304 | static int |
305 | gen7_render_ring_flush(struct intel_ring_buffer *ring, | 305 | gen7_render_ring_flush(struct intel_ring_buffer *ring, |
306 | u32 invalidate_domains, u32 flush_domains) | 306 | u32 invalidate_domains, u32 flush_domains) |
307 | { | 307 | { |
308 | u32 flags = 0; | 308 | u32 flags = 0; |
309 | u32 scratch_addr = ring->scratch.gtt_offset + 128; | 309 | u32 scratch_addr = ring->scratch.gtt_offset + 128; |
310 | int ret; | 310 | int ret; |
311 | 311 | ||
312 | /* | 312 | /* |
313 | * Ensure that any following seqno writes only happen when the render | 313 | * Ensure that any following seqno writes only happen when the render |
314 | * cache is indeed flushed. | 314 | * cache is indeed flushed. |
315 | * | 315 | * |
316 | * Workaround: 4th PIPE_CONTROL command (except the ones with only | 316 | * Workaround: 4th PIPE_CONTROL command (except the ones with only |
317 | * read-cache invalidate bits set) must have the CS_STALL bit set. We | 317 | * read-cache invalidate bits set) must have the CS_STALL bit set. We |
318 | * don't try to be clever and just set it unconditionally. | 318 | * don't try to be clever and just set it unconditionally. |
319 | */ | 319 | */ |
320 | flags |= PIPE_CONTROL_CS_STALL; | 320 | flags |= PIPE_CONTROL_CS_STALL; |
321 | 321 | ||
322 | /* Just flush everything. Experiments have shown that reducing the | 322 | /* Just flush everything. Experiments have shown that reducing the |
323 | * number of bits based on the write domains has little performance | 323 | * number of bits based on the write domains has little performance |
324 | * impact. | 324 | * impact. |
325 | */ | 325 | */ |
326 | if (flush_domains) { | 326 | if (flush_domains) { |
327 | flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; | 327 | flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; |
328 | flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; | 328 | flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; |
329 | } | 329 | } |
330 | if (invalidate_domains) { | 330 | if (invalidate_domains) { |
331 | flags |= PIPE_CONTROL_TLB_INVALIDATE; | 331 | flags |= PIPE_CONTROL_TLB_INVALIDATE; |
332 | flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; | 332 | flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; |
333 | flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; | 333 | flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; |
334 | flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; | 334 | flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; |
335 | flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; | 335 | flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; |
336 | flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; | 336 | flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; |
337 | /* | 337 | /* |
338 | * TLB invalidate requires a post-sync write. | 338 | * TLB invalidate requires a post-sync write. |
339 | */ | 339 | */ |
340 | flags |= PIPE_CONTROL_QW_WRITE; | 340 | flags |= PIPE_CONTROL_QW_WRITE; |
341 | flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; | 341 | flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; |
342 | 342 | ||
343 | /* Workaround: we must issue a pipe_control with CS-stall bit | 343 | /* Workaround: we must issue a pipe_control with CS-stall bit |
344 | * set before a pipe_control command that has the state cache | 344 | * set before a pipe_control command that has the state cache |
345 | * invalidate bit set. */ | 345 | * invalidate bit set. */ |
346 | gen7_render_ring_cs_stall_wa(ring); | 346 | gen7_render_ring_cs_stall_wa(ring); |
347 | } | 347 | } |
348 | 348 | ||
349 | ret = intel_ring_begin(ring, 4); | 349 | ret = intel_ring_begin(ring, 4); |
350 | if (ret) | 350 | if (ret) |
351 | return ret; | 351 | return ret; |
352 | 352 | ||
353 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); | 353 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); |
354 | intel_ring_emit(ring, flags); | 354 | intel_ring_emit(ring, flags); |
355 | intel_ring_emit(ring, scratch_addr); | 355 | intel_ring_emit(ring, scratch_addr); |
356 | intel_ring_emit(ring, 0); | 356 | intel_ring_emit(ring, 0); |
357 | intel_ring_advance(ring); | 357 | intel_ring_advance(ring); |
358 | 358 | ||
359 | if (!invalidate_domains && flush_domains) | 359 | if (!invalidate_domains && flush_domains) |
360 | return gen7_ring_fbc_flush(ring, FBC_REND_NUKE); | 360 | return gen7_ring_fbc_flush(ring, FBC_REND_NUKE); |
361 | 361 | ||
362 | return 0; | 362 | return 0; |
363 | } | 363 | } |
364 | 364 | ||
365 | static int | 365 | static int |
366 | gen8_render_ring_flush(struct intel_ring_buffer *ring, | 366 | gen8_render_ring_flush(struct intel_ring_buffer *ring, |
367 | u32 invalidate_domains, u32 flush_domains) | 367 | u32 invalidate_domains, u32 flush_domains) |
368 | { | 368 | { |
369 | u32 flags = 0; | 369 | u32 flags = 0; |
370 | u32 scratch_addr = ring->scratch.gtt_offset + 128; | 370 | u32 scratch_addr = ring->scratch.gtt_offset + 128; |
371 | int ret; | 371 | int ret; |
372 | 372 | ||
373 | flags |= PIPE_CONTROL_CS_STALL; | 373 | flags |= PIPE_CONTROL_CS_STALL; |
374 | 374 | ||
375 | if (flush_domains) { | 375 | if (flush_domains) { |
376 | flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; | 376 | flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; |
377 | flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; | 377 | flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; |
378 | } | 378 | } |
379 | if (invalidate_domains) { | 379 | if (invalidate_domains) { |
380 | flags |= PIPE_CONTROL_TLB_INVALIDATE; | 380 | flags |= PIPE_CONTROL_TLB_INVALIDATE; |
381 | flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; | 381 | flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; |
382 | flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; | 382 | flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; |
383 | flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; | 383 | flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; |
384 | flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; | 384 | flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; |
385 | flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; | 385 | flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; |
386 | flags |= PIPE_CONTROL_QW_WRITE; | 386 | flags |= PIPE_CONTROL_QW_WRITE; |
387 | flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; | 387 | flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; |
388 | } | 388 | } |
389 | 389 | ||
390 | ret = intel_ring_begin(ring, 6); | 390 | ret = intel_ring_begin(ring, 6); |
391 | if (ret) | 391 | if (ret) |
392 | return ret; | 392 | return ret; |
393 | 393 | ||
394 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); | 394 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); |
395 | intel_ring_emit(ring, flags); | 395 | intel_ring_emit(ring, flags); |
396 | intel_ring_emit(ring, scratch_addr); | 396 | intel_ring_emit(ring, scratch_addr); |
397 | intel_ring_emit(ring, 0); | 397 | intel_ring_emit(ring, 0); |
398 | intel_ring_emit(ring, 0); | 398 | intel_ring_emit(ring, 0); |
399 | intel_ring_emit(ring, 0); | 399 | intel_ring_emit(ring, 0); |
400 | intel_ring_advance(ring); | 400 | intel_ring_advance(ring); |
401 | 401 | ||
402 | return 0; | 402 | return 0; |
403 | 403 | ||
404 | } | 404 | } |
405 | 405 | ||
406 | static void ring_write_tail(struct intel_ring_buffer *ring, | 406 | static void ring_write_tail(struct intel_ring_buffer *ring, |
407 | u32 value) | 407 | u32 value) |
408 | { | 408 | { |
409 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | 409 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
410 | I915_WRITE_TAIL(ring, value); | 410 | I915_WRITE_TAIL(ring, value); |
411 | } | 411 | } |
412 | 412 | ||
413 | u64 intel_ring_get_active_head(struct intel_ring_buffer *ring) | 413 | u64 intel_ring_get_active_head(struct intel_ring_buffer *ring) |
414 | { | 414 | { |
415 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | 415 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
416 | u64 acthd; | 416 | u64 acthd; |
417 | 417 | ||
418 | if (INTEL_INFO(ring->dev)->gen >= 8) | 418 | if (INTEL_INFO(ring->dev)->gen >= 8) |
419 | acthd = I915_READ64_2x32(RING_ACTHD(ring->mmio_base), | 419 | acthd = I915_READ64_2x32(RING_ACTHD(ring->mmio_base), |
420 | RING_ACTHD_UDW(ring->mmio_base)); | 420 | RING_ACTHD_UDW(ring->mmio_base)); |
421 | else if (INTEL_INFO(ring->dev)->gen >= 4) | 421 | else if (INTEL_INFO(ring->dev)->gen >= 4) |
422 | acthd = I915_READ(RING_ACTHD(ring->mmio_base)); | 422 | acthd = I915_READ(RING_ACTHD(ring->mmio_base)); |
423 | else | 423 | else |
424 | acthd = I915_READ(ACTHD); | 424 | acthd = I915_READ(ACTHD); |
425 | 425 | ||
426 | return acthd; | 426 | return acthd; |
427 | } | 427 | } |
428 | 428 | ||
429 | static void ring_setup_phys_status_page(struct intel_ring_buffer *ring) | 429 | static void ring_setup_phys_status_page(struct intel_ring_buffer *ring) |
430 | { | 430 | { |
431 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | 431 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
432 | u32 addr; | 432 | u32 addr; |
433 | 433 | ||
434 | addr = dev_priv->status_page_dmah->busaddr; | 434 | addr = dev_priv->status_page_dmah->busaddr; |
435 | if (INTEL_INFO(ring->dev)->gen >= 4) | 435 | if (INTEL_INFO(ring->dev)->gen >= 4) |
436 | addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; | 436 | addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; |
437 | I915_WRITE(HWS_PGA, addr); | 437 | I915_WRITE(HWS_PGA, addr); |
438 | } | 438 | } |
439 | 439 | ||
440 | static int init_ring_common(struct intel_ring_buffer *ring) | 440 | static bool stop_ring(struct intel_ring_buffer *ring) |
441 | { | 441 | { |
442 | struct drm_device *dev = ring->dev; | 442 | struct drm_i915_private *dev_priv = to_i915(ring->dev); |
443 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
444 | struct drm_i915_gem_object *obj = ring->obj; | ||
445 | int ret = 0; | ||
446 | u32 head; | ||
447 | 443 | ||
448 | gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); | 444 | if (!IS_GEN2(ring->dev)) { |
445 | I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING)); | ||
446 | if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) { | ||
447 | DRM_ERROR("%s :timed out trying to stop ring\n", ring->name); | ||
448 | return false; | ||
449 | } | ||
450 | } | ||
449 | 451 | ||
450 | /* Stop the ring if it's running. */ | ||
451 | I915_WRITE_CTL(ring, 0); | 452 | I915_WRITE_CTL(ring, 0); |
452 | I915_WRITE_HEAD(ring, 0); | 453 | I915_WRITE_HEAD(ring, 0); |
453 | ring->write_tail(ring, 0); | 454 | ring->write_tail(ring, 0); |
454 | if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) | ||
455 | DRM_ERROR("%s :timed out trying to stop ring\n", ring->name); | ||
456 | 455 | ||
457 | if (I915_NEED_GFX_HWS(dev)) | 456 | if (!IS_GEN2(ring->dev)) { |
458 | intel_ring_setup_status_page(ring); | 457 | (void)I915_READ_CTL(ring); |
459 | else | 458 | I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING)); |
460 | ring_setup_phys_status_page(ring); | 459 | } |
461 | 460 | ||
462 | head = I915_READ_HEAD(ring) & HEAD_ADDR; | 461 | return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0; |
462 | } | ||
463 | 463 | ||
464 | /* G45 ring initialization fails to reset head to zero */ | 464 | static int init_ring_common(struct intel_ring_buffer *ring) |
465 | if (head != 0) { | 465 | { |
466 | struct drm_device *dev = ring->dev; | ||
467 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
468 | struct drm_i915_gem_object *obj = ring->obj; | ||
469 | int ret = 0; | ||
470 | |||
471 | gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); | ||
472 | |||
473 | if (!stop_ring(ring)) { | ||
474 | /* G45 ring initialization often fails to reset head to zero */ | ||
466 | DRM_DEBUG_KMS("%s head not reset to zero " | 475 | DRM_DEBUG_KMS("%s head not reset to zero " |
467 | "ctl %08x head %08x tail %08x start %08x\n", | 476 | "ctl %08x head %08x tail %08x start %08x\n", |
468 | ring->name, | 477 | ring->name, |
469 | I915_READ_CTL(ring), | 478 | I915_READ_CTL(ring), |
470 | I915_READ_HEAD(ring), | 479 | I915_READ_HEAD(ring), |
471 | I915_READ_TAIL(ring), | 480 | I915_READ_TAIL(ring), |
472 | I915_READ_START(ring)); | 481 | I915_READ_START(ring)); |
473 | 482 | ||
474 | I915_WRITE_HEAD(ring, 0); | 483 | if (!stop_ring(ring)) { |
475 | |||
476 | if (I915_READ_HEAD(ring) & HEAD_ADDR) { | ||
477 | DRM_ERROR("failed to set %s head to zero " | 484 | DRM_ERROR("failed to set %s head to zero " |
478 | "ctl %08x head %08x tail %08x start %08x\n", | 485 | "ctl %08x head %08x tail %08x start %08x\n", |
479 | ring->name, | 486 | ring->name, |
480 | I915_READ_CTL(ring), | 487 | I915_READ_CTL(ring), |
481 | I915_READ_HEAD(ring), | 488 | I915_READ_HEAD(ring), |
482 | I915_READ_TAIL(ring), | 489 | I915_READ_TAIL(ring), |
483 | I915_READ_START(ring)); | 490 | I915_READ_START(ring)); |
491 | ret = -EIO; | ||
492 | goto out; | ||
484 | } | 493 | } |
485 | } | 494 | } |
495 | |||
496 | if (I915_NEED_GFX_HWS(dev)) | ||
497 | intel_ring_setup_status_page(ring); | ||
498 | else | ||
499 | ring_setup_phys_status_page(ring); | ||
486 | 500 | ||
487 | /* Initialize the ring. This must happen _after_ we've cleared the ring | 501 | /* Initialize the ring. This must happen _after_ we've cleared the ring |
488 | * registers with the above sequence (the readback of the HEAD registers | 502 | * registers with the above sequence (the readback of the HEAD registers |
489 | * also enforces ordering), otherwise the hw might lose the new ring | 503 | * also enforces ordering), otherwise the hw might lose the new ring |
490 | * register values. */ | 504 | * register values. */ |
491 | I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj)); | 505 | I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj)); |
492 | I915_WRITE_CTL(ring, | 506 | I915_WRITE_CTL(ring, |
493 | ((ring->size - PAGE_SIZE) & RING_NR_PAGES) | 507 | ((ring->size - PAGE_SIZE) & RING_NR_PAGES) |
494 | | RING_VALID); | 508 | | RING_VALID); |
495 | 509 | ||
496 | /* If the head is still not zero, the ring is dead */ | 510 | /* If the head is still not zero, the ring is dead */ |
497 | if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 && | 511 | if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 && |
498 | I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) && | 512 | I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) && |
499 | (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) { | 513 | (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) { |
500 | DRM_ERROR("%s initialization failed " | 514 | DRM_ERROR("%s initialization failed " |
501 | "ctl %08x head %08x tail %08x start %08x\n", | 515 | "ctl %08x head %08x tail %08x start %08x\n", |
502 | ring->name, | 516 | ring->name, |
503 | I915_READ_CTL(ring), | 517 | I915_READ_CTL(ring), |
504 | I915_READ_HEAD(ring), | 518 | I915_READ_HEAD(ring), |
505 | I915_READ_TAIL(ring), | 519 | I915_READ_TAIL(ring), |
506 | I915_READ_START(ring)); | 520 | I915_READ_START(ring)); |
507 | ret = -EIO; | 521 | ret = -EIO; |
508 | goto out; | 522 | goto out; |
509 | } | 523 | } |
510 | 524 | ||
511 | if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) | 525 | if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) |
512 | i915_kernel_lost_context(ring->dev); | 526 | i915_kernel_lost_context(ring->dev); |
513 | else { | 527 | else { |
514 | ring->head = I915_READ_HEAD(ring); | 528 | ring->head = I915_READ_HEAD(ring); |
515 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; | 529 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; |
516 | ring->space = ring_space(ring); | 530 | ring->space = ring_space(ring); |
517 | ring->last_retired_head = -1; | 531 | ring->last_retired_head = -1; |
518 | } | 532 | } |
519 | 533 | ||
520 | memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); | 534 | memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); |
521 | 535 | ||
522 | out: | 536 | out: |
523 | gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); | 537 | gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); |
524 | 538 | ||
525 | return ret; | 539 | return ret; |
526 | } | 540 | } |
527 | 541 | ||
528 | static int | 542 | static int |
529 | init_pipe_control(struct intel_ring_buffer *ring) | 543 | init_pipe_control(struct intel_ring_buffer *ring) |
530 | { | 544 | { |
531 | int ret; | 545 | int ret; |
532 | 546 | ||
533 | if (ring->scratch.obj) | 547 | if (ring->scratch.obj) |
534 | return 0; | 548 | return 0; |
535 | 549 | ||
536 | ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096); | 550 | ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096); |
537 | if (ring->scratch.obj == NULL) { | 551 | if (ring->scratch.obj == NULL) { |
538 | DRM_ERROR("Failed to allocate seqno page\n"); | 552 | DRM_ERROR("Failed to allocate seqno page\n"); |
539 | ret = -ENOMEM; | 553 | ret = -ENOMEM; |
540 | goto err; | 554 | goto err; |
541 | } | 555 | } |
542 | 556 | ||
543 | ret = i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC); | 557 | ret = i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC); |
544 | if (ret) | 558 | if (ret) |
545 | goto err_unref; | 559 | goto err_unref; |
546 | 560 | ||
547 | ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0); | 561 | ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0); |
548 | if (ret) | 562 | if (ret) |
549 | goto err_unref; | 563 | goto err_unref; |
550 | 564 | ||
551 | ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj); | 565 | ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj); |
552 | ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl)); | 566 | ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl)); |
553 | if (ring->scratch.cpu_page == NULL) { | 567 | if (ring->scratch.cpu_page == NULL) { |
554 | ret = -ENOMEM; | 568 | ret = -ENOMEM; |
555 | goto err_unpin; | 569 | goto err_unpin; |
556 | } | 570 | } |
557 | 571 | ||
558 | DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", | 572 | DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", |
559 | ring->name, ring->scratch.gtt_offset); | 573 | ring->name, ring->scratch.gtt_offset); |
560 | return 0; | 574 | return 0; |
561 | 575 | ||
562 | err_unpin: | 576 | err_unpin: |
563 | i915_gem_object_ggtt_unpin(ring->scratch.obj); | 577 | i915_gem_object_ggtt_unpin(ring->scratch.obj); |
564 | err_unref: | 578 | err_unref: |
565 | drm_gem_object_unreference(&ring->scratch.obj->base); | 579 | drm_gem_object_unreference(&ring->scratch.obj->base); |
566 | err: | 580 | err: |
567 | return ret; | 581 | return ret; |
568 | } | 582 | } |
569 | 583 | ||
570 | static int init_render_ring(struct intel_ring_buffer *ring) | 584 | static int init_render_ring(struct intel_ring_buffer *ring) |
571 | { | 585 | { |
572 | struct drm_device *dev = ring->dev; | 586 | struct drm_device *dev = ring->dev; |
573 | struct drm_i915_private *dev_priv = dev->dev_private; | 587 | struct drm_i915_private *dev_priv = dev->dev_private; |
574 | int ret = init_ring_common(ring); | 588 | int ret = init_ring_common(ring); |
575 | 589 | ||
576 | /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ | 590 | /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ |
577 | if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7) | 591 | if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7) |
578 | I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); | 592 | I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); |
579 | 593 | ||
580 | /* We need to disable the AsyncFlip performance optimisations in order | 594 | /* We need to disable the AsyncFlip performance optimisations in order |
581 | * to use MI_WAIT_FOR_EVENT within the CS. It should already be | 595 | * to use MI_WAIT_FOR_EVENT within the CS. It should already be |
582 | * programmed to '1' on all products. | 596 | * programmed to '1' on all products. |
583 | * | 597 | * |
584 | * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw | 598 | * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw |
585 | */ | 599 | */ |
586 | if (INTEL_INFO(dev)->gen >= 6) | 600 | if (INTEL_INFO(dev)->gen >= 6) |
587 | I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); | 601 | I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); |
588 | 602 | ||
589 | /* Required for the hardware to program scanline values for waiting */ | 603 | /* Required for the hardware to program scanline values for waiting */ |
590 | if (INTEL_INFO(dev)->gen == 6) | 604 | if (INTEL_INFO(dev)->gen == 6) |
591 | I915_WRITE(GFX_MODE, | 605 | I915_WRITE(GFX_MODE, |
592 | _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_ALWAYS)); | 606 | _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_ALWAYS)); |
593 | 607 | ||
594 | if (IS_GEN7(dev)) | 608 | if (IS_GEN7(dev)) |
595 | I915_WRITE(GFX_MODE_GEN7, | 609 | I915_WRITE(GFX_MODE_GEN7, |
596 | _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) | | 610 | _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) | |
597 | _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); | 611 | _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); |
598 | 612 | ||
599 | if (INTEL_INFO(dev)->gen >= 5) { | 613 | if (INTEL_INFO(dev)->gen >= 5) { |
600 | ret = init_pipe_control(ring); | 614 | ret = init_pipe_control(ring); |
601 | if (ret) | 615 | if (ret) |
602 | return ret; | 616 | return ret; |
603 | } | 617 | } |
604 | 618 | ||
605 | if (IS_GEN6(dev)) { | 619 | if (IS_GEN6(dev)) { |
606 | /* From the Sandybridge PRM, volume 1 part 3, page 24: | 620 | /* From the Sandybridge PRM, volume 1 part 3, page 24: |
607 | * "If this bit is set, STCunit will have LRA as replacement | 621 | * "If this bit is set, STCunit will have LRA as replacement |
608 | * policy. [...] This bit must be reset. LRA replacement | 622 | * policy. [...] This bit must be reset. LRA replacement |
609 | * policy is not supported." | 623 | * policy is not supported." |
610 | */ | 624 | */ |
611 | I915_WRITE(CACHE_MODE_0, | 625 | I915_WRITE(CACHE_MODE_0, |
612 | _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); | 626 | _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); |
613 | 627 | ||
614 | /* This is not explicitly set for GEN6, so read the register. | 628 | /* This is not explicitly set for GEN6, so read the register. |
615 | * see intel_ring_mi_set_context() for why we care. | 629 | * see intel_ring_mi_set_context() for why we care. |
616 | * TODO: consider explicitly setting the bit for GEN5 | 630 | * TODO: consider explicitly setting the bit for GEN5 |
617 | */ | 631 | */ |
618 | ring->itlb_before_ctx_switch = | 632 | ring->itlb_before_ctx_switch = |
619 | !!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS); | 633 | !!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS); |
620 | } | 634 | } |
621 | 635 | ||
622 | if (INTEL_INFO(dev)->gen >= 6) | 636 | if (INTEL_INFO(dev)->gen >= 6) |
623 | I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); | 637 | I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); |
624 | 638 | ||
625 | if (HAS_L3_DPF(dev)) | 639 | if (HAS_L3_DPF(dev)) |
626 | I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev)); | 640 | I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev)); |
627 | 641 | ||
628 | return ret; | 642 | return ret; |
629 | } | 643 | } |
630 | 644 | ||
631 | static void render_ring_cleanup(struct intel_ring_buffer *ring) | 645 | static void render_ring_cleanup(struct intel_ring_buffer *ring) |
632 | { | 646 | { |
633 | struct drm_device *dev = ring->dev; | 647 | struct drm_device *dev = ring->dev; |
634 | 648 | ||
635 | if (ring->scratch.obj == NULL) | 649 | if (ring->scratch.obj == NULL) |
636 | return; | 650 | return; |
637 | 651 | ||
638 | if (INTEL_INFO(dev)->gen >= 5) { | 652 | if (INTEL_INFO(dev)->gen >= 5) { |
639 | kunmap(sg_page(ring->scratch.obj->pages->sgl)); | 653 | kunmap(sg_page(ring->scratch.obj->pages->sgl)); |
640 | i915_gem_object_ggtt_unpin(ring->scratch.obj); | 654 | i915_gem_object_ggtt_unpin(ring->scratch.obj); |
641 | } | 655 | } |
642 | 656 | ||
643 | drm_gem_object_unreference(&ring->scratch.obj->base); | 657 | drm_gem_object_unreference(&ring->scratch.obj->base); |
644 | ring->scratch.obj = NULL; | 658 | ring->scratch.obj = NULL; |
645 | } | 659 | } |
646 | 660 | ||
647 | static void | 661 | static void |
648 | update_mboxes(struct intel_ring_buffer *ring, | 662 | update_mboxes(struct intel_ring_buffer *ring, |
649 | u32 mmio_offset) | 663 | u32 mmio_offset) |
650 | { | 664 | { |
651 | /* NB: In order to be able to do semaphore MBOX updates for varying number | 665 | /* NB: In order to be able to do semaphore MBOX updates for varying number |
652 | * of rings, it's easiest if we round up each individual update to a | 666 | * of rings, it's easiest if we round up each individual update to a |
653 | * multiple of 2 (since ring updates must always be a multiple of 2) | 667 | * multiple of 2 (since ring updates must always be a multiple of 2) |
654 | * even though the actual update only requires 3 dwords. | 668 | * even though the actual update only requires 3 dwords. |
655 | */ | 669 | */ |
656 | #define MBOX_UPDATE_DWORDS 4 | 670 | #define MBOX_UPDATE_DWORDS 4 |
657 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); | 671 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); |
658 | intel_ring_emit(ring, mmio_offset); | 672 | intel_ring_emit(ring, mmio_offset); |
659 | intel_ring_emit(ring, ring->outstanding_lazy_seqno); | 673 | intel_ring_emit(ring, ring->outstanding_lazy_seqno); |
660 | intel_ring_emit(ring, MI_NOOP); | 674 | intel_ring_emit(ring, MI_NOOP); |
661 | } | 675 | } |
662 | 676 | ||
663 | /** | 677 | /** |
664 | * gen6_add_request - Update the semaphore mailbox registers | 678 | * gen6_add_request - Update the semaphore mailbox registers |
665 | * | 679 | * |
666 | * @ring - ring that is adding a request | 680 | * @ring - ring that is adding a request |
667 | * @seqno - return seqno stuck into the ring | 681 | * @seqno - return seqno stuck into the ring |
668 | * | 682 | * |
669 | * Update the mailbox registers in the *other* rings with the current seqno. | 683 | * Update the mailbox registers in the *other* rings with the current seqno. |
670 | * This acts like a signal in the canonical semaphore. | 684 | * This acts like a signal in the canonical semaphore. |
671 | */ | 685 | */ |
672 | static int | 686 | static int |
673 | gen6_add_request(struct intel_ring_buffer *ring) | 687 | gen6_add_request(struct intel_ring_buffer *ring) |
674 | { | 688 | { |
675 | struct drm_device *dev = ring->dev; | 689 | struct drm_device *dev = ring->dev; |
676 | struct drm_i915_private *dev_priv = dev->dev_private; | 690 | struct drm_i915_private *dev_priv = dev->dev_private; |
677 | struct intel_ring_buffer *useless; | 691 | struct intel_ring_buffer *useless; |
678 | int i, ret, num_dwords = 4; | 692 | int i, ret, num_dwords = 4; |
679 | 693 | ||
680 | if (i915_semaphore_is_enabled(dev)) | 694 | if (i915_semaphore_is_enabled(dev)) |
681 | num_dwords += ((I915_NUM_RINGS-1) * MBOX_UPDATE_DWORDS); | 695 | num_dwords += ((I915_NUM_RINGS-1) * MBOX_UPDATE_DWORDS); |
682 | #undef MBOX_UPDATE_DWORDS | 696 | #undef MBOX_UPDATE_DWORDS |
683 | 697 | ||
684 | ret = intel_ring_begin(ring, num_dwords); | 698 | ret = intel_ring_begin(ring, num_dwords); |
685 | if (ret) | 699 | if (ret) |
686 | return ret; | 700 | return ret; |
687 | 701 | ||
688 | if (i915_semaphore_is_enabled(dev)) { | 702 | if (i915_semaphore_is_enabled(dev)) { |
689 | for_each_ring(useless, dev_priv, i) { | 703 | for_each_ring(useless, dev_priv, i) { |
690 | u32 mbox_reg = ring->signal_mbox[i]; | 704 | u32 mbox_reg = ring->signal_mbox[i]; |
691 | if (mbox_reg != GEN6_NOSYNC) | 705 | if (mbox_reg != GEN6_NOSYNC) |
692 | update_mboxes(ring, mbox_reg); | 706 | update_mboxes(ring, mbox_reg); |
693 | } | 707 | } |
694 | } | 708 | } |
695 | 709 | ||
696 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); | 710 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); |
697 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | 711 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
698 | intel_ring_emit(ring, ring->outstanding_lazy_seqno); | 712 | intel_ring_emit(ring, ring->outstanding_lazy_seqno); |
699 | intel_ring_emit(ring, MI_USER_INTERRUPT); | 713 | intel_ring_emit(ring, MI_USER_INTERRUPT); |
700 | __intel_ring_advance(ring); | 714 | __intel_ring_advance(ring); |
701 | 715 | ||
702 | return 0; | 716 | return 0; |
703 | } | 717 | } |
704 | 718 | ||
705 | static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev, | 719 | static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev, |
706 | u32 seqno) | 720 | u32 seqno) |
707 | { | 721 | { |
708 | struct drm_i915_private *dev_priv = dev->dev_private; | 722 | struct drm_i915_private *dev_priv = dev->dev_private; |
709 | return dev_priv->last_seqno < seqno; | 723 | return dev_priv->last_seqno < seqno; |
710 | } | 724 | } |
711 | 725 | ||
712 | /** | 726 | /** |
713 | * intel_ring_sync - sync the waiter to the signaller on seqno | 727 | * intel_ring_sync - sync the waiter to the signaller on seqno |
714 | * | 728 | * |
715 | * @waiter - ring that is waiting | 729 | * @waiter - ring that is waiting |
716 | * @signaller - ring which has, or will signal | 730 | * @signaller - ring which has, or will signal |
717 | * @seqno - seqno which the waiter will block on | 731 | * @seqno - seqno which the waiter will block on |
718 | */ | 732 | */ |
719 | static int | 733 | static int |
720 | gen6_ring_sync(struct intel_ring_buffer *waiter, | 734 | gen6_ring_sync(struct intel_ring_buffer *waiter, |
721 | struct intel_ring_buffer *signaller, | 735 | struct intel_ring_buffer *signaller, |
722 | u32 seqno) | 736 | u32 seqno) |
723 | { | 737 | { |
724 | int ret; | 738 | int ret; |
725 | u32 dw1 = MI_SEMAPHORE_MBOX | | 739 | u32 dw1 = MI_SEMAPHORE_MBOX | |
726 | MI_SEMAPHORE_COMPARE | | 740 | MI_SEMAPHORE_COMPARE | |
727 | MI_SEMAPHORE_REGISTER; | 741 | MI_SEMAPHORE_REGISTER; |
728 | 742 | ||
729 | /* Throughout all of the GEM code, seqno passed implies our current | 743 | /* Throughout all of the GEM code, seqno passed implies our current |
730 | * seqno is >= the last seqno executed. However for hardware the | 744 | * seqno is >= the last seqno executed. However for hardware the |
731 | * comparison is strictly greater than. | 745 | * comparison is strictly greater than. |
732 | */ | 746 | */ |
733 | seqno -= 1; | 747 | seqno -= 1; |
734 | 748 | ||
735 | WARN_ON(signaller->semaphore_register[waiter->id] == | 749 | WARN_ON(signaller->semaphore_register[waiter->id] == |
736 | MI_SEMAPHORE_SYNC_INVALID); | 750 | MI_SEMAPHORE_SYNC_INVALID); |
737 | 751 | ||
738 | ret = intel_ring_begin(waiter, 4); | 752 | ret = intel_ring_begin(waiter, 4); |
739 | if (ret) | 753 | if (ret) |
740 | return ret; | 754 | return ret; |
741 | 755 | ||
742 | /* If seqno wrap happened, omit the wait with no-ops */ | 756 | /* If seqno wrap happened, omit the wait with no-ops */ |
743 | if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) { | 757 | if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) { |
744 | intel_ring_emit(waiter, | 758 | intel_ring_emit(waiter, |
745 | dw1 | | 759 | dw1 | |
746 | signaller->semaphore_register[waiter->id]); | 760 | signaller->semaphore_register[waiter->id]); |
747 | intel_ring_emit(waiter, seqno); | 761 | intel_ring_emit(waiter, seqno); |
748 | intel_ring_emit(waiter, 0); | 762 | intel_ring_emit(waiter, 0); |
749 | intel_ring_emit(waiter, MI_NOOP); | 763 | intel_ring_emit(waiter, MI_NOOP); |
750 | } else { | 764 | } else { |
751 | intel_ring_emit(waiter, MI_NOOP); | 765 | intel_ring_emit(waiter, MI_NOOP); |
752 | intel_ring_emit(waiter, MI_NOOP); | 766 | intel_ring_emit(waiter, MI_NOOP); |
753 | intel_ring_emit(waiter, MI_NOOP); | 767 | intel_ring_emit(waiter, MI_NOOP); |
754 | intel_ring_emit(waiter, MI_NOOP); | 768 | intel_ring_emit(waiter, MI_NOOP); |
755 | } | 769 | } |
756 | intel_ring_advance(waiter); | 770 | intel_ring_advance(waiter); |
757 | 771 | ||
758 | return 0; | 772 | return 0; |
759 | } | 773 | } |
760 | 774 | ||
761 | #define PIPE_CONTROL_FLUSH(ring__, addr__) \ | 775 | #define PIPE_CONTROL_FLUSH(ring__, addr__) \ |
762 | do { \ | 776 | do { \ |
763 | intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \ | 777 | intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \ |
764 | PIPE_CONTROL_DEPTH_STALL); \ | 778 | PIPE_CONTROL_DEPTH_STALL); \ |
765 | intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ | 779 | intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ |
766 | intel_ring_emit(ring__, 0); \ | 780 | intel_ring_emit(ring__, 0); \ |
767 | intel_ring_emit(ring__, 0); \ | 781 | intel_ring_emit(ring__, 0); \ |
768 | } while (0) | 782 | } while (0) |
769 | 783 | ||
770 | static int | 784 | static int |
771 | pc_render_add_request(struct intel_ring_buffer *ring) | 785 | pc_render_add_request(struct intel_ring_buffer *ring) |
772 | { | 786 | { |
773 | u32 scratch_addr = ring->scratch.gtt_offset + 128; | 787 | u32 scratch_addr = ring->scratch.gtt_offset + 128; |
774 | int ret; | 788 | int ret; |
775 | 789 | ||
776 | /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently | 790 | /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently |
777 | * incoherent with writes to memory, i.e. completely fubar, | 791 | * incoherent with writes to memory, i.e. completely fubar, |
778 | * so we need to use PIPE_NOTIFY instead. | 792 | * so we need to use PIPE_NOTIFY instead. |
779 | * | 793 | * |
780 | * However, we also need to workaround the qword write | 794 | * However, we also need to workaround the qword write |
781 | * incoherence by flushing the 6 PIPE_NOTIFY buffers out to | 795 | * incoherence by flushing the 6 PIPE_NOTIFY buffers out to |
782 | * memory before requesting an interrupt. | 796 | * memory before requesting an interrupt. |
783 | */ | 797 | */ |
784 | ret = intel_ring_begin(ring, 32); | 798 | ret = intel_ring_begin(ring, 32); |
785 | if (ret) | 799 | if (ret) |
786 | return ret; | 800 | return ret; |
787 | 801 | ||
788 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | | 802 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | |
789 | PIPE_CONTROL_WRITE_FLUSH | | 803 | PIPE_CONTROL_WRITE_FLUSH | |
790 | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); | 804 | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); |
791 | intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); | 805 | intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); |
792 | intel_ring_emit(ring, ring->outstanding_lazy_seqno); | 806 | intel_ring_emit(ring, ring->outstanding_lazy_seqno); |
793 | intel_ring_emit(ring, 0); | 807 | intel_ring_emit(ring, 0); |
794 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | 808 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
795 | scratch_addr += 128; /* write to separate cachelines */ | 809 | scratch_addr += 128; /* write to separate cachelines */ |
796 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | 810 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
797 | scratch_addr += 128; | 811 | scratch_addr += 128; |
798 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | 812 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
799 | scratch_addr += 128; | 813 | scratch_addr += 128; |
800 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | 814 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
801 | scratch_addr += 128; | 815 | scratch_addr += 128; |
802 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | 816 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
803 | scratch_addr += 128; | 817 | scratch_addr += 128; |
804 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | 818 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
805 | 819 | ||
806 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | | 820 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | |
807 | PIPE_CONTROL_WRITE_FLUSH | | 821 | PIPE_CONTROL_WRITE_FLUSH | |
808 | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | | 822 | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | |
809 | PIPE_CONTROL_NOTIFY); | 823 | PIPE_CONTROL_NOTIFY); |
810 | intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); | 824 | intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); |
811 | intel_ring_emit(ring, ring->outstanding_lazy_seqno); | 825 | intel_ring_emit(ring, ring->outstanding_lazy_seqno); |
812 | intel_ring_emit(ring, 0); | 826 | intel_ring_emit(ring, 0); |
813 | __intel_ring_advance(ring); | 827 | __intel_ring_advance(ring); |
814 | 828 | ||
815 | return 0; | 829 | return 0; |
816 | } | 830 | } |
817 | 831 | ||
818 | static u32 | 832 | static u32 |
819 | gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) | 833 | gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) |
820 | { | 834 | { |
821 | /* Workaround to force correct ordering between irq and seqno writes on | 835 | /* Workaround to force correct ordering between irq and seqno writes on |
822 | * ivb (and maybe also on snb) by reading from a CS register (like | 836 | * ivb (and maybe also on snb) by reading from a CS register (like |
823 | * ACTHD) before reading the status page. */ | 837 | * ACTHD) before reading the status page. */ |
824 | if (!lazy_coherency) { | 838 | if (!lazy_coherency) { |
825 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | 839 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
826 | POSTING_READ(RING_ACTHD(ring->mmio_base)); | 840 | POSTING_READ(RING_ACTHD(ring->mmio_base)); |
827 | } | 841 | } |
828 | 842 | ||
829 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); | 843 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); |
830 | } | 844 | } |
831 | 845 | ||
832 | static u32 | 846 | static u32 |
833 | ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) | 847 | ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) |
834 | { | 848 | { |
835 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); | 849 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); |
836 | } | 850 | } |
837 | 851 | ||
838 | static void | 852 | static void |
839 | ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno) | 853 | ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno) |
840 | { | 854 | { |
841 | intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno); | 855 | intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno); |
842 | } | 856 | } |
843 | 857 | ||
844 | static u32 | 858 | static u32 |
845 | pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) | 859 | pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) |
846 | { | 860 | { |
847 | return ring->scratch.cpu_page[0]; | 861 | return ring->scratch.cpu_page[0]; |
848 | } | 862 | } |
849 | 863 | ||
850 | static void | 864 | static void |
851 | pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno) | 865 | pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno) |
852 | { | 866 | { |
853 | ring->scratch.cpu_page[0] = seqno; | 867 | ring->scratch.cpu_page[0] = seqno; |
854 | } | 868 | } |
855 | 869 | ||
856 | static bool | 870 | static bool |
857 | gen5_ring_get_irq(struct intel_ring_buffer *ring) | 871 | gen5_ring_get_irq(struct intel_ring_buffer *ring) |
858 | { | 872 | { |
859 | struct drm_device *dev = ring->dev; | 873 | struct drm_device *dev = ring->dev; |
860 | struct drm_i915_private *dev_priv = dev->dev_private; | 874 | struct drm_i915_private *dev_priv = dev->dev_private; |
861 | unsigned long flags; | 875 | unsigned long flags; |
862 | 876 | ||
863 | if (!dev->irq_enabled) | 877 | if (!dev->irq_enabled) |
864 | return false; | 878 | return false; |
865 | 879 | ||
866 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 880 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
867 | if (ring->irq_refcount++ == 0) | 881 | if (ring->irq_refcount++ == 0) |
868 | ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask); | 882 | ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask); |
869 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | 883 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
870 | 884 | ||
871 | return true; | 885 | return true; |
872 | } | 886 | } |
873 | 887 | ||
874 | static void | 888 | static void |
875 | gen5_ring_put_irq(struct intel_ring_buffer *ring) | 889 | gen5_ring_put_irq(struct intel_ring_buffer *ring) |
876 | { | 890 | { |
877 | struct drm_device *dev = ring->dev; | 891 | struct drm_device *dev = ring->dev; |
878 | struct drm_i915_private *dev_priv = dev->dev_private; | 892 | struct drm_i915_private *dev_priv = dev->dev_private; |
879 | unsigned long flags; | 893 | unsigned long flags; |
880 | 894 | ||
881 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 895 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
882 | if (--ring->irq_refcount == 0) | 896 | if (--ring->irq_refcount == 0) |
883 | ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask); | 897 | ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask); |
884 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | 898 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
885 | } | 899 | } |
886 | 900 | ||
887 | static bool | 901 | static bool |
888 | i9xx_ring_get_irq(struct intel_ring_buffer *ring) | 902 | i9xx_ring_get_irq(struct intel_ring_buffer *ring) |
889 | { | 903 | { |
890 | struct drm_device *dev = ring->dev; | 904 | struct drm_device *dev = ring->dev; |
891 | struct drm_i915_private *dev_priv = dev->dev_private; | 905 | struct drm_i915_private *dev_priv = dev->dev_private; |
892 | unsigned long flags; | 906 | unsigned long flags; |
893 | 907 | ||
894 | if (!dev->irq_enabled) | 908 | if (!dev->irq_enabled) |
895 | return false; | 909 | return false; |
896 | 910 | ||
897 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 911 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
898 | if (ring->irq_refcount++ == 0) { | 912 | if (ring->irq_refcount++ == 0) { |
899 | dev_priv->irq_mask &= ~ring->irq_enable_mask; | 913 | dev_priv->irq_mask &= ~ring->irq_enable_mask; |
900 | I915_WRITE(IMR, dev_priv->irq_mask); | 914 | I915_WRITE(IMR, dev_priv->irq_mask); |
901 | POSTING_READ(IMR); | 915 | POSTING_READ(IMR); |
902 | } | 916 | } |
903 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | 917 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
904 | 918 | ||
905 | return true; | 919 | return true; |
906 | } | 920 | } |
907 | 921 | ||
908 | static void | 922 | static void |
909 | i9xx_ring_put_irq(struct intel_ring_buffer *ring) | 923 | i9xx_ring_put_irq(struct intel_ring_buffer *ring) |
910 | { | 924 | { |
911 | struct drm_device *dev = ring->dev; | 925 | struct drm_device *dev = ring->dev; |
912 | struct drm_i915_private *dev_priv = dev->dev_private; | 926 | struct drm_i915_private *dev_priv = dev->dev_private; |
913 | unsigned long flags; | 927 | unsigned long flags; |
914 | 928 | ||
915 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 929 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
916 | if (--ring->irq_refcount == 0) { | 930 | if (--ring->irq_refcount == 0) { |
917 | dev_priv->irq_mask |= ring->irq_enable_mask; | 931 | dev_priv->irq_mask |= ring->irq_enable_mask; |
918 | I915_WRITE(IMR, dev_priv->irq_mask); | 932 | I915_WRITE(IMR, dev_priv->irq_mask); |
919 | POSTING_READ(IMR); | 933 | POSTING_READ(IMR); |
920 | } | 934 | } |
921 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | 935 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
922 | } | 936 | } |
923 | 937 | ||
924 | static bool | 938 | static bool |
925 | i8xx_ring_get_irq(struct intel_ring_buffer *ring) | 939 | i8xx_ring_get_irq(struct intel_ring_buffer *ring) |
926 | { | 940 | { |
927 | struct drm_device *dev = ring->dev; | 941 | struct drm_device *dev = ring->dev; |
928 | struct drm_i915_private *dev_priv = dev->dev_private; | 942 | struct drm_i915_private *dev_priv = dev->dev_private; |
929 | unsigned long flags; | 943 | unsigned long flags; |
930 | 944 | ||
931 | if (!dev->irq_enabled) | 945 | if (!dev->irq_enabled) |
932 | return false; | 946 | return false; |
933 | 947 | ||
934 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 948 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
935 | if (ring->irq_refcount++ == 0) { | 949 | if (ring->irq_refcount++ == 0) { |
936 | dev_priv->irq_mask &= ~ring->irq_enable_mask; | 950 | dev_priv->irq_mask &= ~ring->irq_enable_mask; |
937 | I915_WRITE16(IMR, dev_priv->irq_mask); | 951 | I915_WRITE16(IMR, dev_priv->irq_mask); |
938 | POSTING_READ16(IMR); | 952 | POSTING_READ16(IMR); |
939 | } | 953 | } |
940 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | 954 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
941 | 955 | ||
942 | return true; | 956 | return true; |
943 | } | 957 | } |
944 | 958 | ||
945 | static void | 959 | static void |
946 | i8xx_ring_put_irq(struct intel_ring_buffer *ring) | 960 | i8xx_ring_put_irq(struct intel_ring_buffer *ring) |
947 | { | 961 | { |
948 | struct drm_device *dev = ring->dev; | 962 | struct drm_device *dev = ring->dev; |
949 | struct drm_i915_private *dev_priv = dev->dev_private; | 963 | struct drm_i915_private *dev_priv = dev->dev_private; |
950 | unsigned long flags; | 964 | unsigned long flags; |
951 | 965 | ||
952 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 966 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
953 | if (--ring->irq_refcount == 0) { | 967 | if (--ring->irq_refcount == 0) { |
954 | dev_priv->irq_mask |= ring->irq_enable_mask; | 968 | dev_priv->irq_mask |= ring->irq_enable_mask; |
955 | I915_WRITE16(IMR, dev_priv->irq_mask); | 969 | I915_WRITE16(IMR, dev_priv->irq_mask); |
956 | POSTING_READ16(IMR); | 970 | POSTING_READ16(IMR); |
957 | } | 971 | } |
958 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | 972 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
959 | } | 973 | } |
960 | 974 | ||
961 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring) | 975 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring) |
962 | { | 976 | { |
963 | struct drm_device *dev = ring->dev; | 977 | struct drm_device *dev = ring->dev; |
964 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | 978 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
965 | u32 mmio = 0; | 979 | u32 mmio = 0; |
966 | 980 | ||
967 | /* The ring status page addresses are no longer next to the rest of | 981 | /* The ring status page addresses are no longer next to the rest of |
968 | * the ring registers as of gen7. | 982 | * the ring registers as of gen7. |
969 | */ | 983 | */ |
970 | if (IS_GEN7(dev)) { | 984 | if (IS_GEN7(dev)) { |
971 | switch (ring->id) { | 985 | switch (ring->id) { |
972 | case RCS: | 986 | case RCS: |
973 | mmio = RENDER_HWS_PGA_GEN7; | 987 | mmio = RENDER_HWS_PGA_GEN7; |
974 | break; | 988 | break; |
975 | case BCS: | 989 | case BCS: |
976 | mmio = BLT_HWS_PGA_GEN7; | 990 | mmio = BLT_HWS_PGA_GEN7; |
977 | break; | 991 | break; |
978 | case VCS: | 992 | case VCS: |
979 | mmio = BSD_HWS_PGA_GEN7; | 993 | mmio = BSD_HWS_PGA_GEN7; |
980 | break; | 994 | break; |
981 | case VECS: | 995 | case VECS: |
982 | mmio = VEBOX_HWS_PGA_GEN7; | 996 | mmio = VEBOX_HWS_PGA_GEN7; |
983 | break; | 997 | break; |
984 | } | 998 | } |
985 | } else if (IS_GEN6(ring->dev)) { | 999 | } else if (IS_GEN6(ring->dev)) { |
986 | mmio = RING_HWS_PGA_GEN6(ring->mmio_base); | 1000 | mmio = RING_HWS_PGA_GEN6(ring->mmio_base); |
987 | } else { | 1001 | } else { |
988 | /* XXX: gen8 returns to sanity */ | 1002 | /* XXX: gen8 returns to sanity */ |
989 | mmio = RING_HWS_PGA(ring->mmio_base); | 1003 | mmio = RING_HWS_PGA(ring->mmio_base); |
990 | } | 1004 | } |
991 | 1005 | ||
992 | I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); | 1006 | I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); |
993 | POSTING_READ(mmio); | 1007 | POSTING_READ(mmio); |
994 | 1008 | ||
995 | /* | 1009 | /* |
996 | * Flush the TLB for this page | 1010 | * Flush the TLB for this page |
997 | * | 1011 | * |
998 | * FIXME: These two bits have disappeared on gen8, so a question | 1012 | * FIXME: These two bits have disappeared on gen8, so a question |
999 | * arises: do we still need this and if so how should we go about | 1013 | * arises: do we still need this and if so how should we go about |
1000 | * invalidating the TLB? | 1014 | * invalidating the TLB? |
1001 | */ | 1015 | */ |
1002 | if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) { | 1016 | if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) { |
1003 | u32 reg = RING_INSTPM(ring->mmio_base); | 1017 | u32 reg = RING_INSTPM(ring->mmio_base); |
1004 | 1018 | ||
1005 | /* ring should be idle before issuing a sync flush*/ | 1019 | /* ring should be idle before issuing a sync flush*/ |
1006 | WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); | 1020 | WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); |
1007 | 1021 | ||
1008 | I915_WRITE(reg, | 1022 | I915_WRITE(reg, |
1009 | _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | | 1023 | _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | |
1010 | INSTPM_SYNC_FLUSH)); | 1024 | INSTPM_SYNC_FLUSH)); |
1011 | if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0, | 1025 | if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0, |
1012 | 1000)) | 1026 | 1000)) |
1013 | DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", | 1027 | DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", |
1014 | ring->name); | 1028 | ring->name); |
1015 | } | 1029 | } |
1016 | } | 1030 | } |
1017 | 1031 | ||
1018 | static int | 1032 | static int |
1019 | bsd_ring_flush(struct intel_ring_buffer *ring, | 1033 | bsd_ring_flush(struct intel_ring_buffer *ring, |
1020 | u32 invalidate_domains, | 1034 | u32 invalidate_domains, |
1021 | u32 flush_domains) | 1035 | u32 flush_domains) |
1022 | { | 1036 | { |
1023 | int ret; | 1037 | int ret; |
1024 | 1038 | ||
1025 | ret = intel_ring_begin(ring, 2); | 1039 | ret = intel_ring_begin(ring, 2); |
1026 | if (ret) | 1040 | if (ret) |
1027 | return ret; | 1041 | return ret; |
1028 | 1042 | ||
1029 | intel_ring_emit(ring, MI_FLUSH); | 1043 | intel_ring_emit(ring, MI_FLUSH); |
1030 | intel_ring_emit(ring, MI_NOOP); | 1044 | intel_ring_emit(ring, MI_NOOP); |
1031 | intel_ring_advance(ring); | 1045 | intel_ring_advance(ring); |
1032 | return 0; | 1046 | return 0; |
1033 | } | 1047 | } |
1034 | 1048 | ||
1035 | static int | 1049 | static int |
1036 | i9xx_add_request(struct intel_ring_buffer *ring) | 1050 | i9xx_add_request(struct intel_ring_buffer *ring) |
1037 | { | 1051 | { |
1038 | int ret; | 1052 | int ret; |
1039 | 1053 | ||
1040 | ret = intel_ring_begin(ring, 4); | 1054 | ret = intel_ring_begin(ring, 4); |
1041 | if (ret) | 1055 | if (ret) |
1042 | return ret; | 1056 | return ret; |
1043 | 1057 | ||
1044 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); | 1058 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); |
1045 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | 1059 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
1046 | intel_ring_emit(ring, ring->outstanding_lazy_seqno); | 1060 | intel_ring_emit(ring, ring->outstanding_lazy_seqno); |
1047 | intel_ring_emit(ring, MI_USER_INTERRUPT); | 1061 | intel_ring_emit(ring, MI_USER_INTERRUPT); |
1048 | __intel_ring_advance(ring); | 1062 | __intel_ring_advance(ring); |
1049 | 1063 | ||
1050 | return 0; | 1064 | return 0; |
1051 | } | 1065 | } |
1052 | 1066 | ||
1053 | static bool | 1067 | static bool |
1054 | gen6_ring_get_irq(struct intel_ring_buffer *ring) | 1068 | gen6_ring_get_irq(struct intel_ring_buffer *ring) |
1055 | { | 1069 | { |
1056 | struct drm_device *dev = ring->dev; | 1070 | struct drm_device *dev = ring->dev; |
1057 | struct drm_i915_private *dev_priv = dev->dev_private; | 1071 | struct drm_i915_private *dev_priv = dev->dev_private; |
1058 | unsigned long flags; | 1072 | unsigned long flags; |
1059 | 1073 | ||
1060 | if (!dev->irq_enabled) | 1074 | if (!dev->irq_enabled) |
1061 | return false; | 1075 | return false; |
1062 | 1076 | ||
1063 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 1077 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
1064 | if (ring->irq_refcount++ == 0) { | 1078 | if (ring->irq_refcount++ == 0) { |
1065 | if (HAS_L3_DPF(dev) && ring->id == RCS) | 1079 | if (HAS_L3_DPF(dev) && ring->id == RCS) |
1066 | I915_WRITE_IMR(ring, | 1080 | I915_WRITE_IMR(ring, |
1067 | ~(ring->irq_enable_mask | | 1081 | ~(ring->irq_enable_mask | |
1068 | GT_PARITY_ERROR(dev))); | 1082 | GT_PARITY_ERROR(dev))); |
1069 | else | 1083 | else |
1070 | I915_WRITE_IMR(ring, ~ring->irq_enable_mask); | 1084 | I915_WRITE_IMR(ring, ~ring->irq_enable_mask); |
1071 | ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask); | 1085 | ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask); |
1072 | } | 1086 | } |
1073 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | 1087 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
1074 | 1088 | ||
1075 | return true; | 1089 | return true; |
1076 | } | 1090 | } |
1077 | 1091 | ||
1078 | static void | 1092 | static void |
1079 | gen6_ring_put_irq(struct intel_ring_buffer *ring) | 1093 | gen6_ring_put_irq(struct intel_ring_buffer *ring) |
1080 | { | 1094 | { |
1081 | struct drm_device *dev = ring->dev; | 1095 | struct drm_device *dev = ring->dev; |
1082 | struct drm_i915_private *dev_priv = dev->dev_private; | 1096 | struct drm_i915_private *dev_priv = dev->dev_private; |
1083 | unsigned long flags; | 1097 | unsigned long flags; |
1084 | 1098 | ||
1085 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 1099 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
1086 | if (--ring->irq_refcount == 0) { | 1100 | if (--ring->irq_refcount == 0) { |
1087 | if (HAS_L3_DPF(dev) && ring->id == RCS) | 1101 | if (HAS_L3_DPF(dev) && ring->id == RCS) |
1088 | I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev)); | 1102 | I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev)); |
1089 | else | 1103 | else |
1090 | I915_WRITE_IMR(ring, ~0); | 1104 | I915_WRITE_IMR(ring, ~0); |
1091 | ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask); | 1105 | ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask); |
1092 | } | 1106 | } |
1093 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | 1107 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
1094 | } | 1108 | } |
1095 | 1109 | ||
1096 | static bool | 1110 | static bool |
1097 | hsw_vebox_get_irq(struct intel_ring_buffer *ring) | 1111 | hsw_vebox_get_irq(struct intel_ring_buffer *ring) |
1098 | { | 1112 | { |
1099 | struct drm_device *dev = ring->dev; | 1113 | struct drm_device *dev = ring->dev; |
1100 | struct drm_i915_private *dev_priv = dev->dev_private; | 1114 | struct drm_i915_private *dev_priv = dev->dev_private; |
1101 | unsigned long flags; | 1115 | unsigned long flags; |
1102 | 1116 | ||
1103 | if (!dev->irq_enabled) | 1117 | if (!dev->irq_enabled) |
1104 | return false; | 1118 | return false; |
1105 | 1119 | ||
1106 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 1120 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
1107 | if (ring->irq_refcount++ == 0) { | 1121 | if (ring->irq_refcount++ == 0) { |
1108 | I915_WRITE_IMR(ring, ~ring->irq_enable_mask); | 1122 | I915_WRITE_IMR(ring, ~ring->irq_enable_mask); |
1109 | snb_enable_pm_irq(dev_priv, ring->irq_enable_mask); | 1123 | snb_enable_pm_irq(dev_priv, ring->irq_enable_mask); |
1110 | } | 1124 | } |
1111 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | 1125 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
1112 | 1126 | ||
1113 | return true; | 1127 | return true; |
1114 | } | 1128 | } |
1115 | 1129 | ||
1116 | static void | 1130 | static void |
1117 | hsw_vebox_put_irq(struct intel_ring_buffer *ring) | 1131 | hsw_vebox_put_irq(struct intel_ring_buffer *ring) |
1118 | { | 1132 | { |
1119 | struct drm_device *dev = ring->dev; | 1133 | struct drm_device *dev = ring->dev; |
1120 | struct drm_i915_private *dev_priv = dev->dev_private; | 1134 | struct drm_i915_private *dev_priv = dev->dev_private; |
1121 | unsigned long flags; | 1135 | unsigned long flags; |
1122 | 1136 | ||
1123 | if (!dev->irq_enabled) | 1137 | if (!dev->irq_enabled) |
1124 | return; | 1138 | return; |
1125 | 1139 | ||
1126 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 1140 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
1127 | if (--ring->irq_refcount == 0) { | 1141 | if (--ring->irq_refcount == 0) { |
1128 | I915_WRITE_IMR(ring, ~0); | 1142 | I915_WRITE_IMR(ring, ~0); |
1129 | snb_disable_pm_irq(dev_priv, ring->irq_enable_mask); | 1143 | snb_disable_pm_irq(dev_priv, ring->irq_enable_mask); |
1130 | } | 1144 | } |
1131 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | 1145 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
1132 | } | 1146 | } |
1133 | 1147 | ||
1134 | static bool | 1148 | static bool |
1135 | gen8_ring_get_irq(struct intel_ring_buffer *ring) | 1149 | gen8_ring_get_irq(struct intel_ring_buffer *ring) |
1136 | { | 1150 | { |
1137 | struct drm_device *dev = ring->dev; | 1151 | struct drm_device *dev = ring->dev; |
1138 | struct drm_i915_private *dev_priv = dev->dev_private; | 1152 | struct drm_i915_private *dev_priv = dev->dev_private; |
1139 | unsigned long flags; | 1153 | unsigned long flags; |
1140 | 1154 | ||
1141 | if (!dev->irq_enabled) | 1155 | if (!dev->irq_enabled) |
1142 | return false; | 1156 | return false; |
1143 | 1157 | ||
1144 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 1158 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
1145 | if (ring->irq_refcount++ == 0) { | 1159 | if (ring->irq_refcount++ == 0) { |
1146 | if (HAS_L3_DPF(dev) && ring->id == RCS) { | 1160 | if (HAS_L3_DPF(dev) && ring->id == RCS) { |
1147 | I915_WRITE_IMR(ring, | 1161 | I915_WRITE_IMR(ring, |
1148 | ~(ring->irq_enable_mask | | 1162 | ~(ring->irq_enable_mask | |
1149 | GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); | 1163 | GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); |
1150 | } else { | 1164 | } else { |
1151 | I915_WRITE_IMR(ring, ~ring->irq_enable_mask); | 1165 | I915_WRITE_IMR(ring, ~ring->irq_enable_mask); |
1152 | } | 1166 | } |
1153 | POSTING_READ(RING_IMR(ring->mmio_base)); | 1167 | POSTING_READ(RING_IMR(ring->mmio_base)); |
1154 | } | 1168 | } |
1155 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | 1169 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
1156 | 1170 | ||
1157 | return true; | 1171 | return true; |
1158 | } | 1172 | } |
1159 | 1173 | ||
1160 | static void | 1174 | static void |
1161 | gen8_ring_put_irq(struct intel_ring_buffer *ring) | 1175 | gen8_ring_put_irq(struct intel_ring_buffer *ring) |
1162 | { | 1176 | { |
1163 | struct drm_device *dev = ring->dev; | 1177 | struct drm_device *dev = ring->dev; |
1164 | struct drm_i915_private *dev_priv = dev->dev_private; | 1178 | struct drm_i915_private *dev_priv = dev->dev_private; |
1165 | unsigned long flags; | 1179 | unsigned long flags; |
1166 | 1180 | ||
1167 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 1181 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
1168 | if (--ring->irq_refcount == 0) { | 1182 | if (--ring->irq_refcount == 0) { |
1169 | if (HAS_L3_DPF(dev) && ring->id == RCS) { | 1183 | if (HAS_L3_DPF(dev) && ring->id == RCS) { |
1170 | I915_WRITE_IMR(ring, | 1184 | I915_WRITE_IMR(ring, |
1171 | ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); | 1185 | ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); |
1172 | } else { | 1186 | } else { |
1173 | I915_WRITE_IMR(ring, ~0); | 1187 | I915_WRITE_IMR(ring, ~0); |
1174 | } | 1188 | } |
1175 | POSTING_READ(RING_IMR(ring->mmio_base)); | 1189 | POSTING_READ(RING_IMR(ring->mmio_base)); |
1176 | } | 1190 | } |
1177 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | 1191 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
1178 | } | 1192 | } |
1179 | 1193 | ||
1180 | static int | 1194 | static int |
1181 | i965_dispatch_execbuffer(struct intel_ring_buffer *ring, | 1195 | i965_dispatch_execbuffer(struct intel_ring_buffer *ring, |
1182 | u32 offset, u32 length, | 1196 | u32 offset, u32 length, |
1183 | unsigned flags) | 1197 | unsigned flags) |
1184 | { | 1198 | { |
1185 | int ret; | 1199 | int ret; |
1186 | 1200 | ||
1187 | ret = intel_ring_begin(ring, 2); | 1201 | ret = intel_ring_begin(ring, 2); |
1188 | if (ret) | 1202 | if (ret) |
1189 | return ret; | 1203 | return ret; |
1190 | 1204 | ||
1191 | intel_ring_emit(ring, | 1205 | intel_ring_emit(ring, |
1192 | MI_BATCH_BUFFER_START | | 1206 | MI_BATCH_BUFFER_START | |
1193 | MI_BATCH_GTT | | 1207 | MI_BATCH_GTT | |
1194 | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); | 1208 | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); |
1195 | intel_ring_emit(ring, offset); | 1209 | intel_ring_emit(ring, offset); |
1196 | intel_ring_advance(ring); | 1210 | intel_ring_advance(ring); |
1197 | 1211 | ||
1198 | return 0; | 1212 | return 0; |
1199 | } | 1213 | } |
1200 | 1214 | ||
1201 | /* Just userspace ABI convention to limit the wa batch bo to a resonable size */ | 1215 | /* Just userspace ABI convention to limit the wa batch bo to a resonable size */ |
1202 | #define I830_BATCH_LIMIT (256*1024) | 1216 | #define I830_BATCH_LIMIT (256*1024) |
1203 | static int | 1217 | static int |
1204 | i830_dispatch_execbuffer(struct intel_ring_buffer *ring, | 1218 | i830_dispatch_execbuffer(struct intel_ring_buffer *ring, |
1205 | u32 offset, u32 len, | 1219 | u32 offset, u32 len, |
1206 | unsigned flags) | 1220 | unsigned flags) |
1207 | { | 1221 | { |
1208 | int ret; | 1222 | int ret; |
1209 | 1223 | ||
1210 | if (flags & I915_DISPATCH_PINNED) { | 1224 | if (flags & I915_DISPATCH_PINNED) { |
1211 | ret = intel_ring_begin(ring, 4); | 1225 | ret = intel_ring_begin(ring, 4); |
1212 | if (ret) | 1226 | if (ret) |
1213 | return ret; | 1227 | return ret; |
1214 | 1228 | ||
1215 | intel_ring_emit(ring, MI_BATCH_BUFFER); | 1229 | intel_ring_emit(ring, MI_BATCH_BUFFER); |
1216 | intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); | 1230 | intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); |
1217 | intel_ring_emit(ring, offset + len - 8); | 1231 | intel_ring_emit(ring, offset + len - 8); |
1218 | intel_ring_emit(ring, MI_NOOP); | 1232 | intel_ring_emit(ring, MI_NOOP); |
1219 | intel_ring_advance(ring); | 1233 | intel_ring_advance(ring); |
1220 | } else { | 1234 | } else { |
1221 | u32 cs_offset = ring->scratch.gtt_offset; | 1235 | u32 cs_offset = ring->scratch.gtt_offset; |
1222 | 1236 | ||
1223 | if (len > I830_BATCH_LIMIT) | 1237 | if (len > I830_BATCH_LIMIT) |
1224 | return -ENOSPC; | 1238 | return -ENOSPC; |
1225 | 1239 | ||
1226 | ret = intel_ring_begin(ring, 9+3); | 1240 | ret = intel_ring_begin(ring, 9+3); |
1227 | if (ret) | 1241 | if (ret) |
1228 | return ret; | 1242 | return ret; |
1229 | /* Blit the batch (which has now all relocs applied) to the stable batch | 1243 | /* Blit the batch (which has now all relocs applied) to the stable batch |
1230 | * scratch bo area (so that the CS never stumbles over its tlb | 1244 | * scratch bo area (so that the CS never stumbles over its tlb |
1231 | * invalidation bug) ... */ | 1245 | * invalidation bug) ... */ |
1232 | intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD | | 1246 | intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD | |
1233 | XY_SRC_COPY_BLT_WRITE_ALPHA | | 1247 | XY_SRC_COPY_BLT_WRITE_ALPHA | |
1234 | XY_SRC_COPY_BLT_WRITE_RGB); | 1248 | XY_SRC_COPY_BLT_WRITE_RGB); |
1235 | intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096); | 1249 | intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096); |
1236 | intel_ring_emit(ring, 0); | 1250 | intel_ring_emit(ring, 0); |
1237 | intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024); | 1251 | intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024); |
1238 | intel_ring_emit(ring, cs_offset); | 1252 | intel_ring_emit(ring, cs_offset); |
1239 | intel_ring_emit(ring, 0); | 1253 | intel_ring_emit(ring, 0); |
1240 | intel_ring_emit(ring, 4096); | 1254 | intel_ring_emit(ring, 4096); |
1241 | intel_ring_emit(ring, offset); | 1255 | intel_ring_emit(ring, offset); |
1242 | intel_ring_emit(ring, MI_FLUSH); | 1256 | intel_ring_emit(ring, MI_FLUSH); |
1243 | 1257 | ||
1244 | /* ... and execute it. */ | 1258 | /* ... and execute it. */ |
1245 | intel_ring_emit(ring, MI_BATCH_BUFFER); | 1259 | intel_ring_emit(ring, MI_BATCH_BUFFER); |
1246 | intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); | 1260 | intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); |
1247 | intel_ring_emit(ring, cs_offset + len - 8); | 1261 | intel_ring_emit(ring, cs_offset + len - 8); |
1248 | intel_ring_advance(ring); | 1262 | intel_ring_advance(ring); |
1249 | } | 1263 | } |
1250 | 1264 | ||
1251 | return 0; | 1265 | return 0; |
1252 | } | 1266 | } |
1253 | 1267 | ||
1254 | static int | 1268 | static int |
1255 | i915_dispatch_execbuffer(struct intel_ring_buffer *ring, | 1269 | i915_dispatch_execbuffer(struct intel_ring_buffer *ring, |
1256 | u32 offset, u32 len, | 1270 | u32 offset, u32 len, |
1257 | unsigned flags) | 1271 | unsigned flags) |
1258 | { | 1272 | { |
1259 | int ret; | 1273 | int ret; |
1260 | 1274 | ||
1261 | ret = intel_ring_begin(ring, 2); | 1275 | ret = intel_ring_begin(ring, 2); |
1262 | if (ret) | 1276 | if (ret) |
1263 | return ret; | 1277 | return ret; |
1264 | 1278 | ||
1265 | intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); | 1279 | intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); |
1266 | intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); | 1280 | intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); |
1267 | intel_ring_advance(ring); | 1281 | intel_ring_advance(ring); |
1268 | 1282 | ||
1269 | return 0; | 1283 | return 0; |
1270 | } | 1284 | } |
1271 | 1285 | ||
1272 | static void cleanup_status_page(struct intel_ring_buffer *ring) | 1286 | static void cleanup_status_page(struct intel_ring_buffer *ring) |
1273 | { | 1287 | { |
1274 | struct drm_i915_gem_object *obj; | 1288 | struct drm_i915_gem_object *obj; |
1275 | 1289 | ||
1276 | obj = ring->status_page.obj; | 1290 | obj = ring->status_page.obj; |
1277 | if (obj == NULL) | 1291 | if (obj == NULL) |
1278 | return; | 1292 | return; |
1279 | 1293 | ||
1280 | kunmap(sg_page(obj->pages->sgl)); | 1294 | kunmap(sg_page(obj->pages->sgl)); |
1281 | i915_gem_object_ggtt_unpin(obj); | 1295 | i915_gem_object_ggtt_unpin(obj); |
1282 | drm_gem_object_unreference(&obj->base); | 1296 | drm_gem_object_unreference(&obj->base); |
1283 | ring->status_page.obj = NULL; | 1297 | ring->status_page.obj = NULL; |
1284 | } | 1298 | } |
1285 | 1299 | ||
1286 | static int init_status_page(struct intel_ring_buffer *ring) | 1300 | static int init_status_page(struct intel_ring_buffer *ring) |
1287 | { | 1301 | { |
1288 | struct drm_device *dev = ring->dev; | 1302 | struct drm_device *dev = ring->dev; |
1289 | struct drm_i915_gem_object *obj; | 1303 | struct drm_i915_gem_object *obj; |
1290 | int ret; | 1304 | int ret; |
1291 | 1305 | ||
1292 | obj = i915_gem_alloc_object(dev, 4096); | 1306 | obj = i915_gem_alloc_object(dev, 4096); |
1293 | if (obj == NULL) { | 1307 | if (obj == NULL) { |
1294 | DRM_ERROR("Failed to allocate status page\n"); | 1308 | DRM_ERROR("Failed to allocate status page\n"); |
1295 | ret = -ENOMEM; | 1309 | ret = -ENOMEM; |
1296 | goto err; | 1310 | goto err; |
1297 | } | 1311 | } |
1298 | 1312 | ||
1299 | ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); | 1313 | ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); |
1300 | if (ret) | 1314 | if (ret) |
1301 | goto err_unref; | 1315 | goto err_unref; |
1302 | 1316 | ||
1303 | ret = i915_gem_obj_ggtt_pin(obj, 4096, 0); | 1317 | ret = i915_gem_obj_ggtt_pin(obj, 4096, 0); |
1304 | if (ret) | 1318 | if (ret) |
1305 | goto err_unref; | 1319 | goto err_unref; |
1306 | 1320 | ||
1307 | ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj); | 1321 | ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj); |
1308 | ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); | 1322 | ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); |
1309 | if (ring->status_page.page_addr == NULL) { | 1323 | if (ring->status_page.page_addr == NULL) { |
1310 | ret = -ENOMEM; | 1324 | ret = -ENOMEM; |
1311 | goto err_unpin; | 1325 | goto err_unpin; |
1312 | } | 1326 | } |
1313 | ring->status_page.obj = obj; | 1327 | ring->status_page.obj = obj; |
1314 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); | 1328 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); |
1315 | 1329 | ||
1316 | DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", | 1330 | DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", |
1317 | ring->name, ring->status_page.gfx_addr); | 1331 | ring->name, ring->status_page.gfx_addr); |
1318 | 1332 | ||
1319 | return 0; | 1333 | return 0; |
1320 | 1334 | ||
1321 | err_unpin: | 1335 | err_unpin: |
1322 | i915_gem_object_ggtt_unpin(obj); | 1336 | i915_gem_object_ggtt_unpin(obj); |
1323 | err_unref: | 1337 | err_unref: |
1324 | drm_gem_object_unreference(&obj->base); | 1338 | drm_gem_object_unreference(&obj->base); |
1325 | err: | 1339 | err: |
1326 | return ret; | 1340 | return ret; |
1327 | } | 1341 | } |
1328 | 1342 | ||
1329 | static int init_phys_status_page(struct intel_ring_buffer *ring) | 1343 | static int init_phys_status_page(struct intel_ring_buffer *ring) |
1330 | { | 1344 | { |
1331 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | 1345 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
1332 | 1346 | ||
1333 | if (!dev_priv->status_page_dmah) { | 1347 | if (!dev_priv->status_page_dmah) { |
1334 | dev_priv->status_page_dmah = | 1348 | dev_priv->status_page_dmah = |
1335 | drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE); | 1349 | drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE); |
1336 | if (!dev_priv->status_page_dmah) | 1350 | if (!dev_priv->status_page_dmah) |
1337 | return -ENOMEM; | 1351 | return -ENOMEM; |
1338 | } | 1352 | } |
1339 | 1353 | ||
1340 | ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; | 1354 | ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; |
1341 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); | 1355 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); |
1342 | 1356 | ||
1343 | return 0; | 1357 | return 0; |
1344 | } | 1358 | } |
1345 | 1359 | ||
1346 | static int intel_init_ring_buffer(struct drm_device *dev, | 1360 | static int intel_init_ring_buffer(struct drm_device *dev, |
1347 | struct intel_ring_buffer *ring) | 1361 | struct intel_ring_buffer *ring) |
1348 | { | 1362 | { |
1349 | struct drm_i915_gem_object *obj; | 1363 | struct drm_i915_gem_object *obj; |
1350 | struct drm_i915_private *dev_priv = dev->dev_private; | 1364 | struct drm_i915_private *dev_priv = dev->dev_private; |
1351 | int ret; | 1365 | int ret; |
1352 | 1366 | ||
1353 | ring->dev = dev; | 1367 | ring->dev = dev; |
1354 | INIT_LIST_HEAD(&ring->active_list); | 1368 | INIT_LIST_HEAD(&ring->active_list); |
1355 | INIT_LIST_HEAD(&ring->request_list); | 1369 | INIT_LIST_HEAD(&ring->request_list); |
1356 | ring->size = 32 * PAGE_SIZE; | 1370 | ring->size = 32 * PAGE_SIZE; |
1357 | memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno)); | 1371 | memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno)); |
1358 | 1372 | ||
1359 | init_waitqueue_head(&ring->irq_queue); | 1373 | init_waitqueue_head(&ring->irq_queue); |
1360 | 1374 | ||
1361 | if (I915_NEED_GFX_HWS(dev)) { | 1375 | if (I915_NEED_GFX_HWS(dev)) { |
1362 | ret = init_status_page(ring); | 1376 | ret = init_status_page(ring); |
1363 | if (ret) | 1377 | if (ret) |
1364 | return ret; | 1378 | return ret; |
1365 | } else { | 1379 | } else { |
1366 | BUG_ON(ring->id != RCS); | 1380 | BUG_ON(ring->id != RCS); |
1367 | ret = init_phys_status_page(ring); | 1381 | ret = init_phys_status_page(ring); |
1368 | if (ret) | 1382 | if (ret) |
1369 | return ret; | 1383 | return ret; |
1370 | } | 1384 | } |
1371 | 1385 | ||
1372 | obj = NULL; | 1386 | obj = NULL; |
1373 | if (!HAS_LLC(dev)) | 1387 | if (!HAS_LLC(dev)) |
1374 | obj = i915_gem_object_create_stolen(dev, ring->size); | 1388 | obj = i915_gem_object_create_stolen(dev, ring->size); |
1375 | if (obj == NULL) | 1389 | if (obj == NULL) |
1376 | obj = i915_gem_alloc_object(dev, ring->size); | 1390 | obj = i915_gem_alloc_object(dev, ring->size); |
1377 | if (obj == NULL) { | 1391 | if (obj == NULL) { |
1378 | DRM_ERROR("Failed to allocate ringbuffer\n"); | 1392 | DRM_ERROR("Failed to allocate ringbuffer\n"); |
1379 | ret = -ENOMEM; | 1393 | ret = -ENOMEM; |
1380 | goto err_hws; | 1394 | goto err_hws; |
1381 | } | 1395 | } |
1382 | 1396 | ||
1383 | ring->obj = obj; | 1397 | ring->obj = obj; |
1384 | 1398 | ||
1385 | ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); | 1399 | ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); |
1386 | if (ret) | 1400 | if (ret) |
1387 | goto err_unref; | 1401 | goto err_unref; |
1388 | 1402 | ||
1389 | ret = i915_gem_object_set_to_gtt_domain(obj, true); | 1403 | ret = i915_gem_object_set_to_gtt_domain(obj, true); |
1390 | if (ret) | 1404 | if (ret) |
1391 | goto err_unpin; | 1405 | goto err_unpin; |
1392 | 1406 | ||
1393 | ring->virtual_start = | 1407 | ring->virtual_start = |
1394 | ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj), | 1408 | ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj), |
1395 | ring->size); | 1409 | ring->size); |
1396 | if (ring->virtual_start == NULL) { | 1410 | if (ring->virtual_start == NULL) { |
1397 | DRM_ERROR("Failed to map ringbuffer.\n"); | 1411 | DRM_ERROR("Failed to map ringbuffer.\n"); |
1398 | ret = -EINVAL; | 1412 | ret = -EINVAL; |
1399 | goto err_unpin; | 1413 | goto err_unpin; |
1400 | } | 1414 | } |
1401 | 1415 | ||
1402 | ret = ring->init(ring); | 1416 | ret = ring->init(ring); |
1403 | if (ret) | 1417 | if (ret) |
1404 | goto err_unmap; | 1418 | goto err_unmap; |
1405 | 1419 | ||
1406 | /* Workaround an erratum on the i830 which causes a hang if | 1420 | /* Workaround an erratum on the i830 which causes a hang if |
1407 | * the TAIL pointer points to within the last 2 cachelines | 1421 | * the TAIL pointer points to within the last 2 cachelines |
1408 | * of the buffer. | 1422 | * of the buffer. |
1409 | */ | 1423 | */ |
1410 | ring->effective_size = ring->size; | 1424 | ring->effective_size = ring->size; |
1411 | if (IS_I830(ring->dev) || IS_845G(ring->dev)) | 1425 | if (IS_I830(ring->dev) || IS_845G(ring->dev)) |
1412 | ring->effective_size -= 128; | 1426 | ring->effective_size -= 128; |
1413 | 1427 | ||
1414 | i915_cmd_parser_init_ring(ring); | 1428 | i915_cmd_parser_init_ring(ring); |
1415 | 1429 | ||
1416 | return 0; | 1430 | return 0; |
1417 | 1431 | ||
1418 | err_unmap: | 1432 | err_unmap: |
1419 | iounmap(ring->virtual_start); | 1433 | iounmap(ring->virtual_start); |
1420 | err_unpin: | 1434 | err_unpin: |
1421 | i915_gem_object_ggtt_unpin(obj); | 1435 | i915_gem_object_ggtt_unpin(obj); |
1422 | err_unref: | 1436 | err_unref: |
1423 | drm_gem_object_unreference(&obj->base); | 1437 | drm_gem_object_unreference(&obj->base); |
1424 | ring->obj = NULL; | 1438 | ring->obj = NULL; |
1425 | err_hws: | 1439 | err_hws: |
1426 | cleanup_status_page(ring); | 1440 | cleanup_status_page(ring); |
1427 | return ret; | 1441 | return ret; |
1428 | } | 1442 | } |
1429 | 1443 | ||
1430 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) | 1444 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) |
1431 | { | 1445 | { |
1432 | struct drm_i915_private *dev_priv; | 1446 | struct drm_i915_private *dev_priv; |
1433 | int ret; | 1447 | int ret; |
1434 | 1448 | ||
1435 | if (ring->obj == NULL) | 1449 | if (ring->obj == NULL) |
1436 | return; | 1450 | return; |
1437 | 1451 | ||
1438 | /* Disable the ring buffer. The ring must be idle at this point */ | 1452 | /* Disable the ring buffer. The ring must be idle at this point */ |
1439 | dev_priv = ring->dev->dev_private; | 1453 | dev_priv = ring->dev->dev_private; |
1440 | ret = intel_ring_idle(ring); | 1454 | ret = intel_ring_idle(ring); |
1441 | if (ret && !i915_reset_in_progress(&dev_priv->gpu_error)) | 1455 | if (ret && !i915_reset_in_progress(&dev_priv->gpu_error)) |
1442 | DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", | 1456 | DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", |
1443 | ring->name, ret); | 1457 | ring->name, ret); |
1444 | 1458 | ||
1445 | I915_WRITE_CTL(ring, 0); | 1459 | I915_WRITE_CTL(ring, 0); |
1446 | 1460 | ||
1447 | iounmap(ring->virtual_start); | 1461 | iounmap(ring->virtual_start); |
1448 | 1462 | ||
1449 | i915_gem_object_ggtt_unpin(ring->obj); | 1463 | i915_gem_object_ggtt_unpin(ring->obj); |
1450 | drm_gem_object_unreference(&ring->obj->base); | 1464 | drm_gem_object_unreference(&ring->obj->base); |
1451 | ring->obj = NULL; | 1465 | ring->obj = NULL; |
1452 | ring->preallocated_lazy_request = NULL; | 1466 | ring->preallocated_lazy_request = NULL; |
1453 | ring->outstanding_lazy_seqno = 0; | 1467 | ring->outstanding_lazy_seqno = 0; |
1454 | 1468 | ||
1455 | if (ring->cleanup) | 1469 | if (ring->cleanup) |
1456 | ring->cleanup(ring); | 1470 | ring->cleanup(ring); |
1457 | 1471 | ||
1458 | cleanup_status_page(ring); | 1472 | cleanup_status_page(ring); |
1459 | } | 1473 | } |
1460 | 1474 | ||
1461 | static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n) | 1475 | static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n) |
1462 | { | 1476 | { |
1463 | struct drm_i915_gem_request *request; | 1477 | struct drm_i915_gem_request *request; |
1464 | u32 seqno = 0, tail; | 1478 | u32 seqno = 0, tail; |
1465 | int ret; | 1479 | int ret; |
1466 | 1480 | ||
1467 | if (ring->last_retired_head != -1) { | 1481 | if (ring->last_retired_head != -1) { |
1468 | ring->head = ring->last_retired_head; | 1482 | ring->head = ring->last_retired_head; |
1469 | ring->last_retired_head = -1; | 1483 | ring->last_retired_head = -1; |
1470 | 1484 | ||
1471 | ring->space = ring_space(ring); | 1485 | ring->space = ring_space(ring); |
1472 | if (ring->space >= n) | 1486 | if (ring->space >= n) |
1473 | return 0; | 1487 | return 0; |
1474 | } | 1488 | } |
1475 | 1489 | ||
1476 | list_for_each_entry(request, &ring->request_list, list) { | 1490 | list_for_each_entry(request, &ring->request_list, list) { |
1477 | int space; | 1491 | int space; |
1478 | 1492 | ||
1479 | if (request->tail == -1) | 1493 | if (request->tail == -1) |
1480 | continue; | 1494 | continue; |
1481 | 1495 | ||
1482 | space = request->tail - (ring->tail + I915_RING_FREE_SPACE); | 1496 | space = request->tail - (ring->tail + I915_RING_FREE_SPACE); |
1483 | if (space < 0) | 1497 | if (space < 0) |
1484 | space += ring->size; | 1498 | space += ring->size; |
1485 | if (space >= n) { | 1499 | if (space >= n) { |
1486 | seqno = request->seqno; | 1500 | seqno = request->seqno; |
1487 | tail = request->tail; | 1501 | tail = request->tail; |
1488 | break; | 1502 | break; |
1489 | } | 1503 | } |
1490 | 1504 | ||
1491 | /* Consume this request in case we need more space than | 1505 | /* Consume this request in case we need more space than |
1492 | * is available and so need to prevent a race between | 1506 | * is available and so need to prevent a race between |
1493 | * updating last_retired_head and direct reads of | 1507 | * updating last_retired_head and direct reads of |
1494 | * I915_RING_HEAD. It also provides a nice sanity check. | 1508 | * I915_RING_HEAD. It also provides a nice sanity check. |
1495 | */ | 1509 | */ |
1496 | request->tail = -1; | 1510 | request->tail = -1; |
1497 | } | 1511 | } |
1498 | 1512 | ||
1499 | if (seqno == 0) | 1513 | if (seqno == 0) |
1500 | return -ENOSPC; | 1514 | return -ENOSPC; |
1501 | 1515 | ||
1502 | ret = i915_wait_seqno(ring, seqno); | 1516 | ret = i915_wait_seqno(ring, seqno); |
1503 | if (ret) | 1517 | if (ret) |
1504 | return ret; | 1518 | return ret; |
1505 | 1519 | ||
1506 | ring->head = tail; | 1520 | ring->head = tail; |
1507 | ring->space = ring_space(ring); | 1521 | ring->space = ring_space(ring); |
1508 | if (WARN_ON(ring->space < n)) | 1522 | if (WARN_ON(ring->space < n)) |
1509 | return -ENOSPC; | 1523 | return -ENOSPC; |
1510 | 1524 | ||
1511 | return 0; | 1525 | return 0; |
1512 | } | 1526 | } |
1513 | 1527 | ||
1514 | static int ring_wait_for_space(struct intel_ring_buffer *ring, int n) | 1528 | static int ring_wait_for_space(struct intel_ring_buffer *ring, int n) |
1515 | { | 1529 | { |
1516 | struct drm_device *dev = ring->dev; | 1530 | struct drm_device *dev = ring->dev; |
1517 | struct drm_i915_private *dev_priv = dev->dev_private; | 1531 | struct drm_i915_private *dev_priv = dev->dev_private; |
1518 | unsigned long end; | 1532 | unsigned long end; |
1519 | int ret; | 1533 | int ret; |
1520 | 1534 | ||
1521 | ret = intel_ring_wait_request(ring, n); | 1535 | ret = intel_ring_wait_request(ring, n); |
1522 | if (ret != -ENOSPC) | 1536 | if (ret != -ENOSPC) |
1523 | return ret; | 1537 | return ret; |
1524 | 1538 | ||
1525 | /* force the tail write in case we have been skipping them */ | 1539 | /* force the tail write in case we have been skipping them */ |
1526 | __intel_ring_advance(ring); | 1540 | __intel_ring_advance(ring); |
1527 | 1541 | ||
1528 | trace_i915_ring_wait_begin(ring); | 1542 | trace_i915_ring_wait_begin(ring); |
1529 | /* With GEM the hangcheck timer should kick us out of the loop, | 1543 | /* With GEM the hangcheck timer should kick us out of the loop, |
1530 | * leaving it early runs the risk of corrupting GEM state (due | 1544 | * leaving it early runs the risk of corrupting GEM state (due |
1531 | * to running on almost untested codepaths). But on resume | 1545 | * to running on almost untested codepaths). But on resume |
1532 | * timers don't work yet, so prevent a complete hang in that | 1546 | * timers don't work yet, so prevent a complete hang in that |
1533 | * case by choosing an insanely large timeout. */ | 1547 | * case by choosing an insanely large timeout. */ |
1534 | end = jiffies + 60 * HZ; | 1548 | end = jiffies + 60 * HZ; |
1535 | 1549 | ||
1536 | do { | 1550 | do { |
1537 | ring->head = I915_READ_HEAD(ring); | 1551 | ring->head = I915_READ_HEAD(ring); |
1538 | ring->space = ring_space(ring); | 1552 | ring->space = ring_space(ring); |
1539 | if (ring->space >= n) { | 1553 | if (ring->space >= n) { |
1540 | trace_i915_ring_wait_end(ring); | 1554 | trace_i915_ring_wait_end(ring); |
1541 | return 0; | 1555 | return 0; |
1542 | } | 1556 | } |
1543 | 1557 | ||
1544 | if (!drm_core_check_feature(dev, DRIVER_MODESET) && | 1558 | if (!drm_core_check_feature(dev, DRIVER_MODESET) && |
1545 | dev->primary->master) { | 1559 | dev->primary->master) { |
1546 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | 1560 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
1547 | if (master_priv->sarea_priv) | 1561 | if (master_priv->sarea_priv) |
1548 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; | 1562 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; |
1549 | } | 1563 | } |
1550 | 1564 | ||
1551 | msleep(1); | 1565 | msleep(1); |
1552 | 1566 | ||
1553 | ret = i915_gem_check_wedge(&dev_priv->gpu_error, | 1567 | ret = i915_gem_check_wedge(&dev_priv->gpu_error, |
1554 | dev_priv->mm.interruptible); | 1568 | dev_priv->mm.interruptible); |
1555 | if (ret) | 1569 | if (ret) |
1556 | return ret; | 1570 | return ret; |
1557 | } while (!time_after(jiffies, end)); | 1571 | } while (!time_after(jiffies, end)); |
1558 | trace_i915_ring_wait_end(ring); | 1572 | trace_i915_ring_wait_end(ring); |
1559 | return -EBUSY; | 1573 | return -EBUSY; |
1560 | } | 1574 | } |
1561 | 1575 | ||
1562 | static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) | 1576 | static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) |
1563 | { | 1577 | { |
1564 | uint32_t __iomem *virt; | 1578 | uint32_t __iomem *virt; |
1565 | int rem = ring->size - ring->tail; | 1579 | int rem = ring->size - ring->tail; |
1566 | 1580 | ||
1567 | if (ring->space < rem) { | 1581 | if (ring->space < rem) { |
1568 | int ret = ring_wait_for_space(ring, rem); | 1582 | int ret = ring_wait_for_space(ring, rem); |
1569 | if (ret) | 1583 | if (ret) |
1570 | return ret; | 1584 | return ret; |
1571 | } | 1585 | } |
1572 | 1586 | ||
1573 | virt = ring->virtual_start + ring->tail; | 1587 | virt = ring->virtual_start + ring->tail; |
1574 | rem /= 4; | 1588 | rem /= 4; |
1575 | while (rem--) | 1589 | while (rem--) |
1576 | iowrite32(MI_NOOP, virt++); | 1590 | iowrite32(MI_NOOP, virt++); |
1577 | 1591 | ||
1578 | ring->tail = 0; | 1592 | ring->tail = 0; |
1579 | ring->space = ring_space(ring); | 1593 | ring->space = ring_space(ring); |
1580 | 1594 | ||
1581 | return 0; | 1595 | return 0; |
1582 | } | 1596 | } |
1583 | 1597 | ||
1584 | int intel_ring_idle(struct intel_ring_buffer *ring) | 1598 | int intel_ring_idle(struct intel_ring_buffer *ring) |
1585 | { | 1599 | { |
1586 | u32 seqno; | 1600 | u32 seqno; |
1587 | int ret; | 1601 | int ret; |
1588 | 1602 | ||
1589 | /* We need to add any requests required to flush the objects and ring */ | 1603 | /* We need to add any requests required to flush the objects and ring */ |
1590 | if (ring->outstanding_lazy_seqno) { | 1604 | if (ring->outstanding_lazy_seqno) { |
1591 | ret = i915_add_request(ring, NULL); | 1605 | ret = i915_add_request(ring, NULL); |
1592 | if (ret) | 1606 | if (ret) |
1593 | return ret; | 1607 | return ret; |
1594 | } | 1608 | } |
1595 | 1609 | ||
1596 | /* Wait upon the last request to be completed */ | 1610 | /* Wait upon the last request to be completed */ |
1597 | if (list_empty(&ring->request_list)) | 1611 | if (list_empty(&ring->request_list)) |
1598 | return 0; | 1612 | return 0; |
1599 | 1613 | ||
1600 | seqno = list_entry(ring->request_list.prev, | 1614 | seqno = list_entry(ring->request_list.prev, |
1601 | struct drm_i915_gem_request, | 1615 | struct drm_i915_gem_request, |
1602 | list)->seqno; | 1616 | list)->seqno; |
1603 | 1617 | ||
1604 | return i915_wait_seqno(ring, seqno); | 1618 | return i915_wait_seqno(ring, seqno); |
1605 | } | 1619 | } |
1606 | 1620 | ||
1607 | static int | 1621 | static int |
1608 | intel_ring_alloc_seqno(struct intel_ring_buffer *ring) | 1622 | intel_ring_alloc_seqno(struct intel_ring_buffer *ring) |
1609 | { | 1623 | { |
1610 | if (ring->outstanding_lazy_seqno) | 1624 | if (ring->outstanding_lazy_seqno) |
1611 | return 0; | 1625 | return 0; |
1612 | 1626 | ||
1613 | if (ring->preallocated_lazy_request == NULL) { | 1627 | if (ring->preallocated_lazy_request == NULL) { |
1614 | struct drm_i915_gem_request *request; | 1628 | struct drm_i915_gem_request *request; |
1615 | 1629 | ||
1616 | request = kmalloc(sizeof(*request), GFP_KERNEL); | 1630 | request = kmalloc(sizeof(*request), GFP_KERNEL); |
1617 | if (request == NULL) | 1631 | if (request == NULL) |
1618 | return -ENOMEM; | 1632 | return -ENOMEM; |
1619 | 1633 | ||
1620 | ring->preallocated_lazy_request = request; | 1634 | ring->preallocated_lazy_request = request; |
1621 | } | 1635 | } |
1622 | 1636 | ||
1623 | return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno); | 1637 | return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno); |
1624 | } | 1638 | } |
1625 | 1639 | ||
1626 | static int __intel_ring_prepare(struct intel_ring_buffer *ring, | 1640 | static int __intel_ring_prepare(struct intel_ring_buffer *ring, |
1627 | int bytes) | 1641 | int bytes) |
1628 | { | 1642 | { |
1629 | int ret; | 1643 | int ret; |
1630 | 1644 | ||
1631 | if (unlikely(ring->tail + bytes > ring->effective_size)) { | 1645 | if (unlikely(ring->tail + bytes > ring->effective_size)) { |
1632 | ret = intel_wrap_ring_buffer(ring); | 1646 | ret = intel_wrap_ring_buffer(ring); |
1633 | if (unlikely(ret)) | 1647 | if (unlikely(ret)) |
1634 | return ret; | 1648 | return ret; |
1635 | } | 1649 | } |
1636 | 1650 | ||
1637 | if (unlikely(ring->space < bytes)) { | 1651 | if (unlikely(ring->space < bytes)) { |
1638 | ret = ring_wait_for_space(ring, bytes); | 1652 | ret = ring_wait_for_space(ring, bytes); |
1639 | if (unlikely(ret)) | 1653 | if (unlikely(ret)) |
1640 | return ret; | 1654 | return ret; |
1641 | } | 1655 | } |
1642 | 1656 | ||
1643 | return 0; | 1657 | return 0; |
1644 | } | 1658 | } |
1645 | 1659 | ||
1646 | int intel_ring_begin(struct intel_ring_buffer *ring, | 1660 | int intel_ring_begin(struct intel_ring_buffer *ring, |
1647 | int num_dwords) | 1661 | int num_dwords) |
1648 | { | 1662 | { |
1649 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | 1663 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
1650 | int ret; | 1664 | int ret; |
1651 | 1665 | ||
1652 | ret = i915_gem_check_wedge(&dev_priv->gpu_error, | 1666 | ret = i915_gem_check_wedge(&dev_priv->gpu_error, |
1653 | dev_priv->mm.interruptible); | 1667 | dev_priv->mm.interruptible); |
1654 | if (ret) | 1668 | if (ret) |
1655 | return ret; | 1669 | return ret; |
1656 | 1670 | ||
1657 | ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t)); | 1671 | ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t)); |
1658 | if (ret) | 1672 | if (ret) |
1659 | return ret; | 1673 | return ret; |
1660 | 1674 | ||
1661 | /* Preallocate the olr before touching the ring */ | 1675 | /* Preallocate the olr before touching the ring */ |
1662 | ret = intel_ring_alloc_seqno(ring); | 1676 | ret = intel_ring_alloc_seqno(ring); |
1663 | if (ret) | 1677 | if (ret) |
1664 | return ret; | 1678 | return ret; |
1665 | 1679 | ||
1666 | ring->space -= num_dwords * sizeof(uint32_t); | 1680 | ring->space -= num_dwords * sizeof(uint32_t); |
1667 | return 0; | 1681 | return 0; |
1668 | } | 1682 | } |
1669 | 1683 | ||
1670 | /* Align the ring tail to a cacheline boundary */ | 1684 | /* Align the ring tail to a cacheline boundary */ |
1671 | int intel_ring_cacheline_align(struct intel_ring_buffer *ring) | 1685 | int intel_ring_cacheline_align(struct intel_ring_buffer *ring) |
1672 | { | 1686 | { |
1673 | int num_dwords = (64 - (ring->tail & 63)) / sizeof(uint32_t); | 1687 | int num_dwords = (64 - (ring->tail & 63)) / sizeof(uint32_t); |
1674 | int ret; | 1688 | int ret; |
1675 | 1689 | ||
1676 | if (num_dwords == 0) | 1690 | if (num_dwords == 0) |
1677 | return 0; | 1691 | return 0; |
1678 | 1692 | ||
1679 | ret = intel_ring_begin(ring, num_dwords); | 1693 | ret = intel_ring_begin(ring, num_dwords); |
1680 | if (ret) | 1694 | if (ret) |
1681 | return ret; | 1695 | return ret; |
1682 | 1696 | ||
1683 | while (num_dwords--) | 1697 | while (num_dwords--) |
1684 | intel_ring_emit(ring, MI_NOOP); | 1698 | intel_ring_emit(ring, MI_NOOP); |
1685 | 1699 | ||
1686 | intel_ring_advance(ring); | 1700 | intel_ring_advance(ring); |
1687 | 1701 | ||
1688 | return 0; | 1702 | return 0; |
1689 | } | 1703 | } |
1690 | 1704 | ||
1691 | void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno) | 1705 | void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno) |
1692 | { | 1706 | { |
1693 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | 1707 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
1694 | 1708 | ||
1695 | BUG_ON(ring->outstanding_lazy_seqno); | 1709 | BUG_ON(ring->outstanding_lazy_seqno); |
1696 | 1710 | ||
1697 | if (INTEL_INFO(ring->dev)->gen >= 6) { | 1711 | if (INTEL_INFO(ring->dev)->gen >= 6) { |
1698 | I915_WRITE(RING_SYNC_0(ring->mmio_base), 0); | 1712 | I915_WRITE(RING_SYNC_0(ring->mmio_base), 0); |
1699 | I915_WRITE(RING_SYNC_1(ring->mmio_base), 0); | 1713 | I915_WRITE(RING_SYNC_1(ring->mmio_base), 0); |
1700 | if (HAS_VEBOX(ring->dev)) | 1714 | if (HAS_VEBOX(ring->dev)) |
1701 | I915_WRITE(RING_SYNC_2(ring->mmio_base), 0); | 1715 | I915_WRITE(RING_SYNC_2(ring->mmio_base), 0); |
1702 | } | 1716 | } |
1703 | 1717 | ||
1704 | ring->set_seqno(ring, seqno); | 1718 | ring->set_seqno(ring, seqno); |
1705 | ring->hangcheck.seqno = seqno; | 1719 | ring->hangcheck.seqno = seqno; |
1706 | } | 1720 | } |
1707 | 1721 | ||
1708 | static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, | 1722 | static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, |
1709 | u32 value) | 1723 | u32 value) |
1710 | { | 1724 | { |
1711 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | 1725 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
1712 | 1726 | ||
1713 | /* Every tail move must follow the sequence below */ | 1727 | /* Every tail move must follow the sequence below */ |
1714 | 1728 | ||
1715 | /* Disable notification that the ring is IDLE. The GT | 1729 | /* Disable notification that the ring is IDLE. The GT |
1716 | * will then assume that it is busy and bring it out of rc6. | 1730 | * will then assume that it is busy and bring it out of rc6. |
1717 | */ | 1731 | */ |
1718 | I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, | 1732 | I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, |
1719 | _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); | 1733 | _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); |
1720 | 1734 | ||
1721 | /* Clear the context id. Here be magic! */ | 1735 | /* Clear the context id. Here be magic! */ |
1722 | I915_WRITE64(GEN6_BSD_RNCID, 0x0); | 1736 | I915_WRITE64(GEN6_BSD_RNCID, 0x0); |
1723 | 1737 | ||
1724 | /* Wait for the ring not to be idle, i.e. for it to wake up. */ | 1738 | /* Wait for the ring not to be idle, i.e. for it to wake up. */ |
1725 | if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) & | 1739 | if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) & |
1726 | GEN6_BSD_SLEEP_INDICATOR) == 0, | 1740 | GEN6_BSD_SLEEP_INDICATOR) == 0, |
1727 | 50)) | 1741 | 50)) |
1728 | DRM_ERROR("timed out waiting for the BSD ring to wake up\n"); | 1742 | DRM_ERROR("timed out waiting for the BSD ring to wake up\n"); |
1729 | 1743 | ||
1730 | /* Now that the ring is fully powered up, update the tail */ | 1744 | /* Now that the ring is fully powered up, update the tail */ |
1731 | I915_WRITE_TAIL(ring, value); | 1745 | I915_WRITE_TAIL(ring, value); |
1732 | POSTING_READ(RING_TAIL(ring->mmio_base)); | 1746 | POSTING_READ(RING_TAIL(ring->mmio_base)); |
1733 | 1747 | ||
1734 | /* Let the ring send IDLE messages to the GT again, | 1748 | /* Let the ring send IDLE messages to the GT again, |
1735 | * and so let it sleep to conserve power when idle. | 1749 | * and so let it sleep to conserve power when idle. |
1736 | */ | 1750 | */ |
1737 | I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, | 1751 | I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, |
1738 | _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); | 1752 | _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); |
1739 | } | 1753 | } |
1740 | 1754 | ||
1741 | static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring, | 1755 | static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring, |
1742 | u32 invalidate, u32 flush) | 1756 | u32 invalidate, u32 flush) |
1743 | { | 1757 | { |
1744 | uint32_t cmd; | 1758 | uint32_t cmd; |
1745 | int ret; | 1759 | int ret; |
1746 | 1760 | ||
1747 | ret = intel_ring_begin(ring, 4); | 1761 | ret = intel_ring_begin(ring, 4); |
1748 | if (ret) | 1762 | if (ret) |
1749 | return ret; | 1763 | return ret; |
1750 | 1764 | ||
1751 | cmd = MI_FLUSH_DW; | 1765 | cmd = MI_FLUSH_DW; |
1752 | if (INTEL_INFO(ring->dev)->gen >= 8) | 1766 | if (INTEL_INFO(ring->dev)->gen >= 8) |
1753 | cmd += 1; | 1767 | cmd += 1; |
1754 | /* | 1768 | /* |
1755 | * Bspec vol 1c.5 - video engine command streamer: | 1769 | * Bspec vol 1c.5 - video engine command streamer: |
1756 | * "If ENABLED, all TLBs will be invalidated once the flush | 1770 | * "If ENABLED, all TLBs will be invalidated once the flush |
1757 | * operation is complete. This bit is only valid when the | 1771 | * operation is complete. This bit is only valid when the |
1758 | * Post-Sync Operation field is a value of 1h or 3h." | 1772 | * Post-Sync Operation field is a value of 1h or 3h." |
1759 | */ | 1773 | */ |
1760 | if (invalidate & I915_GEM_GPU_DOMAINS) | 1774 | if (invalidate & I915_GEM_GPU_DOMAINS) |
1761 | cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD | | 1775 | cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD | |
1762 | MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; | 1776 | MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; |
1763 | intel_ring_emit(ring, cmd); | 1777 | intel_ring_emit(ring, cmd); |
1764 | intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); | 1778 | intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); |
1765 | if (INTEL_INFO(ring->dev)->gen >= 8) { | 1779 | if (INTEL_INFO(ring->dev)->gen >= 8) { |
1766 | intel_ring_emit(ring, 0); /* upper addr */ | 1780 | intel_ring_emit(ring, 0); /* upper addr */ |
1767 | intel_ring_emit(ring, 0); /* value */ | 1781 | intel_ring_emit(ring, 0); /* value */ |
1768 | } else { | 1782 | } else { |
1769 | intel_ring_emit(ring, 0); | 1783 | intel_ring_emit(ring, 0); |
1770 | intel_ring_emit(ring, MI_NOOP); | 1784 | intel_ring_emit(ring, MI_NOOP); |
1771 | } | 1785 | } |
1772 | intel_ring_advance(ring); | 1786 | intel_ring_advance(ring); |
1773 | return 0; | 1787 | return 0; |
1774 | } | 1788 | } |
1775 | 1789 | ||
1776 | static int | 1790 | static int |
1777 | gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, | 1791 | gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, |
1778 | u32 offset, u32 len, | 1792 | u32 offset, u32 len, |
1779 | unsigned flags) | 1793 | unsigned flags) |
1780 | { | 1794 | { |
1781 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | 1795 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
1782 | bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL && | 1796 | bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL && |
1783 | !(flags & I915_DISPATCH_SECURE); | 1797 | !(flags & I915_DISPATCH_SECURE); |
1784 | int ret; | 1798 | int ret; |
1785 | 1799 | ||
1786 | ret = intel_ring_begin(ring, 4); | 1800 | ret = intel_ring_begin(ring, 4); |
1787 | if (ret) | 1801 | if (ret) |
1788 | return ret; | 1802 | return ret; |
1789 | 1803 | ||
1790 | /* FIXME(BDW): Address space and security selectors. */ | 1804 | /* FIXME(BDW): Address space and security selectors. */ |
1791 | intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8)); | 1805 | intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8)); |
1792 | intel_ring_emit(ring, offset); | 1806 | intel_ring_emit(ring, offset); |
1793 | intel_ring_emit(ring, 0); | 1807 | intel_ring_emit(ring, 0); |
1794 | intel_ring_emit(ring, MI_NOOP); | 1808 | intel_ring_emit(ring, MI_NOOP); |
1795 | intel_ring_advance(ring); | 1809 | intel_ring_advance(ring); |
1796 | 1810 | ||
1797 | return 0; | 1811 | return 0; |
1798 | } | 1812 | } |
1799 | 1813 | ||
1800 | static int | 1814 | static int |
1801 | hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, | 1815 | hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, |
1802 | u32 offset, u32 len, | 1816 | u32 offset, u32 len, |
1803 | unsigned flags) | 1817 | unsigned flags) |
1804 | { | 1818 | { |
1805 | int ret; | 1819 | int ret; |
1806 | 1820 | ||
1807 | ret = intel_ring_begin(ring, 2); | 1821 | ret = intel_ring_begin(ring, 2); |
1808 | if (ret) | 1822 | if (ret) |
1809 | return ret; | 1823 | return ret; |
1810 | 1824 | ||
1811 | intel_ring_emit(ring, | 1825 | intel_ring_emit(ring, |
1812 | MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW | | 1826 | MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW | |
1813 | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW)); | 1827 | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW)); |
1814 | /* bit0-7 is the length on GEN6+ */ | 1828 | /* bit0-7 is the length on GEN6+ */ |
1815 | intel_ring_emit(ring, offset); | 1829 | intel_ring_emit(ring, offset); |
1816 | intel_ring_advance(ring); | 1830 | intel_ring_advance(ring); |
1817 | 1831 | ||
1818 | return 0; | 1832 | return 0; |
1819 | } | 1833 | } |
1820 | 1834 | ||
1821 | static int | 1835 | static int |
1822 | gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, | 1836 | gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, |
1823 | u32 offset, u32 len, | 1837 | u32 offset, u32 len, |
1824 | unsigned flags) | 1838 | unsigned flags) |
1825 | { | 1839 | { |
1826 | int ret; | 1840 | int ret; |
1827 | 1841 | ||
1828 | ret = intel_ring_begin(ring, 2); | 1842 | ret = intel_ring_begin(ring, 2); |
1829 | if (ret) | 1843 | if (ret) |
1830 | return ret; | 1844 | return ret; |
1831 | 1845 | ||
1832 | intel_ring_emit(ring, | 1846 | intel_ring_emit(ring, |
1833 | MI_BATCH_BUFFER_START | | 1847 | MI_BATCH_BUFFER_START | |
1834 | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); | 1848 | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); |
1835 | /* bit0-7 is the length on GEN6+ */ | 1849 | /* bit0-7 is the length on GEN6+ */ |
1836 | intel_ring_emit(ring, offset); | 1850 | intel_ring_emit(ring, offset); |
1837 | intel_ring_advance(ring); | 1851 | intel_ring_advance(ring); |
1838 | 1852 | ||
1839 | return 0; | 1853 | return 0; |
1840 | } | 1854 | } |
1841 | 1855 | ||
1842 | /* Blitter support (SandyBridge+) */ | 1856 | /* Blitter support (SandyBridge+) */ |
1843 | 1857 | ||
1844 | static int gen6_ring_flush(struct intel_ring_buffer *ring, | 1858 | static int gen6_ring_flush(struct intel_ring_buffer *ring, |
1845 | u32 invalidate, u32 flush) | 1859 | u32 invalidate, u32 flush) |
1846 | { | 1860 | { |
1847 | struct drm_device *dev = ring->dev; | 1861 | struct drm_device *dev = ring->dev; |
1848 | uint32_t cmd; | 1862 | uint32_t cmd; |
1849 | int ret; | 1863 | int ret; |
1850 | 1864 | ||
1851 | ret = intel_ring_begin(ring, 4); | 1865 | ret = intel_ring_begin(ring, 4); |
1852 | if (ret) | 1866 | if (ret) |
1853 | return ret; | 1867 | return ret; |
1854 | 1868 | ||
1855 | cmd = MI_FLUSH_DW; | 1869 | cmd = MI_FLUSH_DW; |
1856 | if (INTEL_INFO(ring->dev)->gen >= 8) | 1870 | if (INTEL_INFO(ring->dev)->gen >= 8) |
1857 | cmd += 1; | 1871 | cmd += 1; |
1858 | /* | 1872 | /* |
1859 | * Bspec vol 1c.3 - blitter engine command streamer: | 1873 | * Bspec vol 1c.3 - blitter engine command streamer: |
1860 | * "If ENABLED, all TLBs will be invalidated once the flush | 1874 | * "If ENABLED, all TLBs will be invalidated once the flush |
1861 | * operation is complete. This bit is only valid when the | 1875 | * operation is complete. This bit is only valid when the |
1862 | * Post-Sync Operation field is a value of 1h or 3h." | 1876 | * Post-Sync Operation field is a value of 1h or 3h." |
1863 | */ | 1877 | */ |
1864 | if (invalidate & I915_GEM_DOMAIN_RENDER) | 1878 | if (invalidate & I915_GEM_DOMAIN_RENDER) |
1865 | cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX | | 1879 | cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX | |
1866 | MI_FLUSH_DW_OP_STOREDW; | 1880 | MI_FLUSH_DW_OP_STOREDW; |
1867 | intel_ring_emit(ring, cmd); | 1881 | intel_ring_emit(ring, cmd); |
1868 | intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); | 1882 | intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); |
1869 | if (INTEL_INFO(ring->dev)->gen >= 8) { | 1883 | if (INTEL_INFO(ring->dev)->gen >= 8) { |
1870 | intel_ring_emit(ring, 0); /* upper addr */ | 1884 | intel_ring_emit(ring, 0); /* upper addr */ |
1871 | intel_ring_emit(ring, 0); /* value */ | 1885 | intel_ring_emit(ring, 0); /* value */ |
1872 | } else { | 1886 | } else { |
1873 | intel_ring_emit(ring, 0); | 1887 | intel_ring_emit(ring, 0); |
1874 | intel_ring_emit(ring, MI_NOOP); | 1888 | intel_ring_emit(ring, MI_NOOP); |
1875 | } | 1889 | } |
1876 | intel_ring_advance(ring); | 1890 | intel_ring_advance(ring); |
1877 | 1891 | ||
1878 | if (IS_GEN7(dev) && !invalidate && flush) | 1892 | if (IS_GEN7(dev) && !invalidate && flush) |
1879 | return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN); | 1893 | return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN); |
1880 | 1894 | ||
1881 | return 0; | 1895 | return 0; |
1882 | } | 1896 | } |
1883 | 1897 | ||
1884 | int intel_init_render_ring_buffer(struct drm_device *dev) | 1898 | int intel_init_render_ring_buffer(struct drm_device *dev) |
1885 | { | 1899 | { |
1886 | struct drm_i915_private *dev_priv = dev->dev_private; | 1900 | struct drm_i915_private *dev_priv = dev->dev_private; |
1887 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; | 1901 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; |
1888 | 1902 | ||
1889 | ring->name = "render ring"; | 1903 | ring->name = "render ring"; |
1890 | ring->id = RCS; | 1904 | ring->id = RCS; |
1891 | ring->mmio_base = RENDER_RING_BASE; | 1905 | ring->mmio_base = RENDER_RING_BASE; |
1892 | 1906 | ||
1893 | if (INTEL_INFO(dev)->gen >= 6) { | 1907 | if (INTEL_INFO(dev)->gen >= 6) { |
1894 | ring->add_request = gen6_add_request; | 1908 | ring->add_request = gen6_add_request; |
1895 | ring->flush = gen7_render_ring_flush; | 1909 | ring->flush = gen7_render_ring_flush; |
1896 | if (INTEL_INFO(dev)->gen == 6) | 1910 | if (INTEL_INFO(dev)->gen == 6) |
1897 | ring->flush = gen6_render_ring_flush; | 1911 | ring->flush = gen6_render_ring_flush; |
1898 | if (INTEL_INFO(dev)->gen >= 8) { | 1912 | if (INTEL_INFO(dev)->gen >= 8) { |
1899 | ring->flush = gen8_render_ring_flush; | 1913 | ring->flush = gen8_render_ring_flush; |
1900 | ring->irq_get = gen8_ring_get_irq; | 1914 | ring->irq_get = gen8_ring_get_irq; |
1901 | ring->irq_put = gen8_ring_put_irq; | 1915 | ring->irq_put = gen8_ring_put_irq; |
1902 | } else { | 1916 | } else { |
1903 | ring->irq_get = gen6_ring_get_irq; | 1917 | ring->irq_get = gen6_ring_get_irq; |
1904 | ring->irq_put = gen6_ring_put_irq; | 1918 | ring->irq_put = gen6_ring_put_irq; |
1905 | } | 1919 | } |
1906 | ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT; | 1920 | ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT; |
1907 | ring->get_seqno = gen6_ring_get_seqno; | 1921 | ring->get_seqno = gen6_ring_get_seqno; |
1908 | ring->set_seqno = ring_set_seqno; | 1922 | ring->set_seqno = ring_set_seqno; |
1909 | ring->sync_to = gen6_ring_sync; | 1923 | ring->sync_to = gen6_ring_sync; |
1910 | ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_INVALID; | 1924 | ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_INVALID; |
1911 | ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_RV; | 1925 | ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_RV; |
1912 | ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_RB; | 1926 | ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_RB; |
1913 | ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_RVE; | 1927 | ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_RVE; |
1914 | ring->signal_mbox[RCS] = GEN6_NOSYNC; | 1928 | ring->signal_mbox[RCS] = GEN6_NOSYNC; |
1915 | ring->signal_mbox[VCS] = GEN6_VRSYNC; | 1929 | ring->signal_mbox[VCS] = GEN6_VRSYNC; |
1916 | ring->signal_mbox[BCS] = GEN6_BRSYNC; | 1930 | ring->signal_mbox[BCS] = GEN6_BRSYNC; |
1917 | ring->signal_mbox[VECS] = GEN6_VERSYNC; | 1931 | ring->signal_mbox[VECS] = GEN6_VERSYNC; |
1918 | } else if (IS_GEN5(dev)) { | 1932 | } else if (IS_GEN5(dev)) { |
1919 | ring->add_request = pc_render_add_request; | 1933 | ring->add_request = pc_render_add_request; |
1920 | ring->flush = gen4_render_ring_flush; | 1934 | ring->flush = gen4_render_ring_flush; |
1921 | ring->get_seqno = pc_render_get_seqno; | 1935 | ring->get_seqno = pc_render_get_seqno; |
1922 | ring->set_seqno = pc_render_set_seqno; | 1936 | ring->set_seqno = pc_render_set_seqno; |
1923 | ring->irq_get = gen5_ring_get_irq; | 1937 | ring->irq_get = gen5_ring_get_irq; |
1924 | ring->irq_put = gen5_ring_put_irq; | 1938 | ring->irq_put = gen5_ring_put_irq; |
1925 | ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT | | 1939 | ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT | |
1926 | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT; | 1940 | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT; |
1927 | } else { | 1941 | } else { |
1928 | ring->add_request = i9xx_add_request; | 1942 | ring->add_request = i9xx_add_request; |
1929 | if (INTEL_INFO(dev)->gen < 4) | 1943 | if (INTEL_INFO(dev)->gen < 4) |
1930 | ring->flush = gen2_render_ring_flush; | 1944 | ring->flush = gen2_render_ring_flush; |
1931 | else | 1945 | else |
1932 | ring->flush = gen4_render_ring_flush; | 1946 | ring->flush = gen4_render_ring_flush; |
1933 | ring->get_seqno = ring_get_seqno; | 1947 | ring->get_seqno = ring_get_seqno; |
1934 | ring->set_seqno = ring_set_seqno; | 1948 | ring->set_seqno = ring_set_seqno; |
1935 | if (IS_GEN2(dev)) { | 1949 | if (IS_GEN2(dev)) { |
1936 | ring->irq_get = i8xx_ring_get_irq; | 1950 | ring->irq_get = i8xx_ring_get_irq; |
1937 | ring->irq_put = i8xx_ring_put_irq; | 1951 | ring->irq_put = i8xx_ring_put_irq; |
1938 | } else { | 1952 | } else { |
1939 | ring->irq_get = i9xx_ring_get_irq; | 1953 | ring->irq_get = i9xx_ring_get_irq; |
1940 | ring->irq_put = i9xx_ring_put_irq; | 1954 | ring->irq_put = i9xx_ring_put_irq; |
1941 | } | 1955 | } |
1942 | ring->irq_enable_mask = I915_USER_INTERRUPT; | 1956 | ring->irq_enable_mask = I915_USER_INTERRUPT; |
1943 | } | 1957 | } |
1944 | ring->write_tail = ring_write_tail; | 1958 | ring->write_tail = ring_write_tail; |
1945 | if (IS_HASWELL(dev)) | 1959 | if (IS_HASWELL(dev)) |
1946 | ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; | 1960 | ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; |
1947 | else if (IS_GEN8(dev)) | 1961 | else if (IS_GEN8(dev)) |
1948 | ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; | 1962 | ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; |
1949 | else if (INTEL_INFO(dev)->gen >= 6) | 1963 | else if (INTEL_INFO(dev)->gen >= 6) |
1950 | ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; | 1964 | ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; |
1951 | else if (INTEL_INFO(dev)->gen >= 4) | 1965 | else if (INTEL_INFO(dev)->gen >= 4) |
1952 | ring->dispatch_execbuffer = i965_dispatch_execbuffer; | 1966 | ring->dispatch_execbuffer = i965_dispatch_execbuffer; |
1953 | else if (IS_I830(dev) || IS_845G(dev)) | 1967 | else if (IS_I830(dev) || IS_845G(dev)) |
1954 | ring->dispatch_execbuffer = i830_dispatch_execbuffer; | 1968 | ring->dispatch_execbuffer = i830_dispatch_execbuffer; |
1955 | else | 1969 | else |
1956 | ring->dispatch_execbuffer = i915_dispatch_execbuffer; | 1970 | ring->dispatch_execbuffer = i915_dispatch_execbuffer; |
1957 | ring->init = init_render_ring; | 1971 | ring->init = init_render_ring; |
1958 | ring->cleanup = render_ring_cleanup; | 1972 | ring->cleanup = render_ring_cleanup; |
1959 | 1973 | ||
1960 | /* Workaround batchbuffer to combat CS tlb bug. */ | 1974 | /* Workaround batchbuffer to combat CS tlb bug. */ |
1961 | if (HAS_BROKEN_CS_TLB(dev)) { | 1975 | if (HAS_BROKEN_CS_TLB(dev)) { |
1962 | struct drm_i915_gem_object *obj; | 1976 | struct drm_i915_gem_object *obj; |
1963 | int ret; | 1977 | int ret; |
1964 | 1978 | ||
1965 | obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT); | 1979 | obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT); |
1966 | if (obj == NULL) { | 1980 | if (obj == NULL) { |
1967 | DRM_ERROR("Failed to allocate batch bo\n"); | 1981 | DRM_ERROR("Failed to allocate batch bo\n"); |
1968 | return -ENOMEM; | 1982 | return -ENOMEM; |
1969 | } | 1983 | } |
1970 | 1984 | ||
1971 | ret = i915_gem_obj_ggtt_pin(obj, 0, 0); | 1985 | ret = i915_gem_obj_ggtt_pin(obj, 0, 0); |
1972 | if (ret != 0) { | 1986 | if (ret != 0) { |
1973 | drm_gem_object_unreference(&obj->base); | 1987 | drm_gem_object_unreference(&obj->base); |
1974 | DRM_ERROR("Failed to ping batch bo\n"); | 1988 | DRM_ERROR("Failed to ping batch bo\n"); |
1975 | return ret; | 1989 | return ret; |
1976 | } | 1990 | } |
1977 | 1991 | ||
1978 | ring->scratch.obj = obj; | 1992 | ring->scratch.obj = obj; |
1979 | ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj); | 1993 | ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj); |
1980 | } | 1994 | } |
1981 | 1995 | ||
1982 | return intel_init_ring_buffer(dev, ring); | 1996 | return intel_init_ring_buffer(dev, ring); |
1983 | } | 1997 | } |
1984 | 1998 | ||
1985 | int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) | 1999 | int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) |
1986 | { | 2000 | { |
1987 | struct drm_i915_private *dev_priv = dev->dev_private; | 2001 | struct drm_i915_private *dev_priv = dev->dev_private; |
1988 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; | 2002 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; |
1989 | int ret; | 2003 | int ret; |
1990 | 2004 | ||
1991 | ring->name = "render ring"; | 2005 | ring->name = "render ring"; |
1992 | ring->id = RCS; | 2006 | ring->id = RCS; |
1993 | ring->mmio_base = RENDER_RING_BASE; | 2007 | ring->mmio_base = RENDER_RING_BASE; |
1994 | 2008 | ||
1995 | if (INTEL_INFO(dev)->gen >= 6) { | 2009 | if (INTEL_INFO(dev)->gen >= 6) { |
1996 | /* non-kms not supported on gen6+ */ | 2010 | /* non-kms not supported on gen6+ */ |
1997 | return -ENODEV; | 2011 | return -ENODEV; |
1998 | } | 2012 | } |
1999 | 2013 | ||
2000 | /* Note: gem is not supported on gen5/ilk without kms (the corresponding | 2014 | /* Note: gem is not supported on gen5/ilk without kms (the corresponding |
2001 | * gem_init ioctl returns with -ENODEV). Hence we do not need to set up | 2015 | * gem_init ioctl returns with -ENODEV). Hence we do not need to set up |
2002 | * the special gen5 functions. */ | 2016 | * the special gen5 functions. */ |
2003 | ring->add_request = i9xx_add_request; | 2017 | ring->add_request = i9xx_add_request; |
2004 | if (INTEL_INFO(dev)->gen < 4) | 2018 | if (INTEL_INFO(dev)->gen < 4) |
2005 | ring->flush = gen2_render_ring_flush; | 2019 | ring->flush = gen2_render_ring_flush; |
2006 | else | 2020 | else |
2007 | ring->flush = gen4_render_ring_flush; | 2021 | ring->flush = gen4_render_ring_flush; |
2008 | ring->get_seqno = ring_get_seqno; | 2022 | ring->get_seqno = ring_get_seqno; |
2009 | ring->set_seqno = ring_set_seqno; | 2023 | ring->set_seqno = ring_set_seqno; |
2010 | if (IS_GEN2(dev)) { | 2024 | if (IS_GEN2(dev)) { |
2011 | ring->irq_get = i8xx_ring_get_irq; | 2025 | ring->irq_get = i8xx_ring_get_irq; |
2012 | ring->irq_put = i8xx_ring_put_irq; | 2026 | ring->irq_put = i8xx_ring_put_irq; |
2013 | } else { | 2027 | } else { |
2014 | ring->irq_get = i9xx_ring_get_irq; | 2028 | ring->irq_get = i9xx_ring_get_irq; |
2015 | ring->irq_put = i9xx_ring_put_irq; | 2029 | ring->irq_put = i9xx_ring_put_irq; |
2016 | } | 2030 | } |
2017 | ring->irq_enable_mask = I915_USER_INTERRUPT; | 2031 | ring->irq_enable_mask = I915_USER_INTERRUPT; |
2018 | ring->write_tail = ring_write_tail; | 2032 | ring->write_tail = ring_write_tail; |
2019 | if (INTEL_INFO(dev)->gen >= 4) | 2033 | if (INTEL_INFO(dev)->gen >= 4) |
2020 | ring->dispatch_execbuffer = i965_dispatch_execbuffer; | 2034 | ring->dispatch_execbuffer = i965_dispatch_execbuffer; |
2021 | else if (IS_I830(dev) || IS_845G(dev)) | 2035 | else if (IS_I830(dev) || IS_845G(dev)) |
2022 | ring->dispatch_execbuffer = i830_dispatch_execbuffer; | 2036 | ring->dispatch_execbuffer = i830_dispatch_execbuffer; |
2023 | else | 2037 | else |
2024 | ring->dispatch_execbuffer = i915_dispatch_execbuffer; | 2038 | ring->dispatch_execbuffer = i915_dispatch_execbuffer; |
2025 | ring->init = init_render_ring; | 2039 | ring->init = init_render_ring; |
2026 | ring->cleanup = render_ring_cleanup; | 2040 | ring->cleanup = render_ring_cleanup; |
2027 | 2041 | ||
2028 | ring->dev = dev; | 2042 | ring->dev = dev; |
2029 | INIT_LIST_HEAD(&ring->active_list); | 2043 | INIT_LIST_HEAD(&ring->active_list); |
2030 | INIT_LIST_HEAD(&ring->request_list); | 2044 | INIT_LIST_HEAD(&ring->request_list); |
2031 | 2045 | ||
2032 | ring->size = size; | 2046 | ring->size = size; |
2033 | ring->effective_size = ring->size; | 2047 | ring->effective_size = ring->size; |
2034 | if (IS_I830(ring->dev) || IS_845G(ring->dev)) | 2048 | if (IS_I830(ring->dev) || IS_845G(ring->dev)) |
2035 | ring->effective_size -= 128; | 2049 | ring->effective_size -= 128; |
2036 | 2050 | ||
2037 | ring->virtual_start = ioremap_wc(start, size); | 2051 | ring->virtual_start = ioremap_wc(start, size); |
2038 | if (ring->virtual_start == NULL) { | 2052 | if (ring->virtual_start == NULL) { |
2039 | DRM_ERROR("can not ioremap virtual address for" | 2053 | DRM_ERROR("can not ioremap virtual address for" |
2040 | " ring buffer\n"); | 2054 | " ring buffer\n"); |
2041 | return -ENOMEM; | 2055 | return -ENOMEM; |
2042 | } | 2056 | } |
2043 | 2057 | ||
2044 | if (!I915_NEED_GFX_HWS(dev)) { | 2058 | if (!I915_NEED_GFX_HWS(dev)) { |
2045 | ret = init_phys_status_page(ring); | 2059 | ret = init_phys_status_page(ring); |
2046 | if (ret) | 2060 | if (ret) |
2047 | return ret; | 2061 | return ret; |
2048 | } | 2062 | } |
2049 | 2063 | ||
2050 | return 0; | 2064 | return 0; |
2051 | } | 2065 | } |
2052 | 2066 | ||
2053 | int intel_init_bsd_ring_buffer(struct drm_device *dev) | 2067 | int intel_init_bsd_ring_buffer(struct drm_device *dev) |
2054 | { | 2068 | { |
2055 | struct drm_i915_private *dev_priv = dev->dev_private; | 2069 | struct drm_i915_private *dev_priv = dev->dev_private; |
2056 | struct intel_ring_buffer *ring = &dev_priv->ring[VCS]; | 2070 | struct intel_ring_buffer *ring = &dev_priv->ring[VCS]; |
2057 | 2071 | ||
2058 | ring->name = "bsd ring"; | 2072 | ring->name = "bsd ring"; |
2059 | ring->id = VCS; | 2073 | ring->id = VCS; |
2060 | 2074 | ||
2061 | ring->write_tail = ring_write_tail; | 2075 | ring->write_tail = ring_write_tail; |
2062 | if (INTEL_INFO(dev)->gen >= 6) { | 2076 | if (INTEL_INFO(dev)->gen >= 6) { |
2063 | ring->mmio_base = GEN6_BSD_RING_BASE; | 2077 | ring->mmio_base = GEN6_BSD_RING_BASE; |
2064 | /* gen6 bsd needs a special wa for tail updates */ | 2078 | /* gen6 bsd needs a special wa for tail updates */ |
2065 | if (IS_GEN6(dev)) | 2079 | if (IS_GEN6(dev)) |
2066 | ring->write_tail = gen6_bsd_ring_write_tail; | 2080 | ring->write_tail = gen6_bsd_ring_write_tail; |
2067 | ring->flush = gen6_bsd_ring_flush; | 2081 | ring->flush = gen6_bsd_ring_flush; |
2068 | ring->add_request = gen6_add_request; | 2082 | ring->add_request = gen6_add_request; |
2069 | ring->get_seqno = gen6_ring_get_seqno; | 2083 | ring->get_seqno = gen6_ring_get_seqno; |
2070 | ring->set_seqno = ring_set_seqno; | 2084 | ring->set_seqno = ring_set_seqno; |
2071 | if (INTEL_INFO(dev)->gen >= 8) { | 2085 | if (INTEL_INFO(dev)->gen >= 8) { |
2072 | ring->irq_enable_mask = | 2086 | ring->irq_enable_mask = |
2073 | GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; | 2087 | GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; |
2074 | ring->irq_get = gen8_ring_get_irq; | 2088 | ring->irq_get = gen8_ring_get_irq; |
2075 | ring->irq_put = gen8_ring_put_irq; | 2089 | ring->irq_put = gen8_ring_put_irq; |
2076 | ring->dispatch_execbuffer = | 2090 | ring->dispatch_execbuffer = |
2077 | gen8_ring_dispatch_execbuffer; | 2091 | gen8_ring_dispatch_execbuffer; |
2078 | } else { | 2092 | } else { |
2079 | ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; | 2093 | ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; |
2080 | ring->irq_get = gen6_ring_get_irq; | 2094 | ring->irq_get = gen6_ring_get_irq; |
2081 | ring->irq_put = gen6_ring_put_irq; | 2095 | ring->irq_put = gen6_ring_put_irq; |
2082 | ring->dispatch_execbuffer = | 2096 | ring->dispatch_execbuffer = |
2083 | gen6_ring_dispatch_execbuffer; | 2097 | gen6_ring_dispatch_execbuffer; |
2084 | } | 2098 | } |
2085 | ring->sync_to = gen6_ring_sync; | 2099 | ring->sync_to = gen6_ring_sync; |
2086 | ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR; | 2100 | ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR; |
2087 | ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID; | 2101 | ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID; |
2088 | ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VB; | 2102 | ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VB; |
2089 | ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_VVE; | 2103 | ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_VVE; |
2090 | ring->signal_mbox[RCS] = GEN6_RVSYNC; | 2104 | ring->signal_mbox[RCS] = GEN6_RVSYNC; |
2091 | ring->signal_mbox[VCS] = GEN6_NOSYNC; | 2105 | ring->signal_mbox[VCS] = GEN6_NOSYNC; |
2092 | ring->signal_mbox[BCS] = GEN6_BVSYNC; | 2106 | ring->signal_mbox[BCS] = GEN6_BVSYNC; |
2093 | ring->signal_mbox[VECS] = GEN6_VEVSYNC; | 2107 | ring->signal_mbox[VECS] = GEN6_VEVSYNC; |
2094 | } else { | 2108 | } else { |
2095 | ring->mmio_base = BSD_RING_BASE; | 2109 | ring->mmio_base = BSD_RING_BASE; |
2096 | ring->flush = bsd_ring_flush; | 2110 | ring->flush = bsd_ring_flush; |
2097 | ring->add_request = i9xx_add_request; | 2111 | ring->add_request = i9xx_add_request; |
2098 | ring->get_seqno = ring_get_seqno; | 2112 | ring->get_seqno = ring_get_seqno; |
2099 | ring->set_seqno = ring_set_seqno; | 2113 | ring->set_seqno = ring_set_seqno; |
2100 | if (IS_GEN5(dev)) { | 2114 | if (IS_GEN5(dev)) { |
2101 | ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT; | 2115 | ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT; |
2102 | ring->irq_get = gen5_ring_get_irq; | 2116 | ring->irq_get = gen5_ring_get_irq; |
2103 | ring->irq_put = gen5_ring_put_irq; | 2117 | ring->irq_put = gen5_ring_put_irq; |
2104 | } else { | 2118 | } else { |
2105 | ring->irq_enable_mask = I915_BSD_USER_INTERRUPT; | 2119 | ring->irq_enable_mask = I915_BSD_USER_INTERRUPT; |
2106 | ring->irq_get = i9xx_ring_get_irq; | 2120 | ring->irq_get = i9xx_ring_get_irq; |
2107 | ring->irq_put = i9xx_ring_put_irq; | 2121 | ring->irq_put = i9xx_ring_put_irq; |
2108 | } | 2122 | } |
2109 | ring->dispatch_execbuffer = i965_dispatch_execbuffer; | 2123 | ring->dispatch_execbuffer = i965_dispatch_execbuffer; |
2110 | } | 2124 | } |
2111 | ring->init = init_ring_common; | 2125 | ring->init = init_ring_common; |
2112 | 2126 | ||
2113 | return intel_init_ring_buffer(dev, ring); | 2127 | return intel_init_ring_buffer(dev, ring); |
2114 | } | 2128 | } |
2115 | 2129 | ||
2116 | int intel_init_blt_ring_buffer(struct drm_device *dev) | 2130 | int intel_init_blt_ring_buffer(struct drm_device *dev) |
2117 | { | 2131 | { |
2118 | struct drm_i915_private *dev_priv = dev->dev_private; | 2132 | struct drm_i915_private *dev_priv = dev->dev_private; |
2119 | struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; | 2133 | struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; |
2120 | 2134 | ||
2121 | ring->name = "blitter ring"; | 2135 | ring->name = "blitter ring"; |
2122 | ring->id = BCS; | 2136 | ring->id = BCS; |
2123 | 2137 | ||
2124 | ring->mmio_base = BLT_RING_BASE; | 2138 | ring->mmio_base = BLT_RING_BASE; |
2125 | ring->write_tail = ring_write_tail; | 2139 | ring->write_tail = ring_write_tail; |
2126 | ring->flush = gen6_ring_flush; | 2140 | ring->flush = gen6_ring_flush; |
2127 | ring->add_request = gen6_add_request; | 2141 | ring->add_request = gen6_add_request; |
2128 | ring->get_seqno = gen6_ring_get_seqno; | 2142 | ring->get_seqno = gen6_ring_get_seqno; |
2129 | ring->set_seqno = ring_set_seqno; | 2143 | ring->set_seqno = ring_set_seqno; |
2130 | if (INTEL_INFO(dev)->gen >= 8) { | 2144 | if (INTEL_INFO(dev)->gen >= 8) { |
2131 | ring->irq_enable_mask = | 2145 | ring->irq_enable_mask = |
2132 | GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; | 2146 | GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; |
2133 | ring->irq_get = gen8_ring_get_irq; | 2147 | ring->irq_get = gen8_ring_get_irq; |
2134 | ring->irq_put = gen8_ring_put_irq; | 2148 | ring->irq_put = gen8_ring_put_irq; |
2135 | ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; | 2149 | ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; |
2136 | } else { | 2150 | } else { |
2137 | ring->irq_enable_mask = GT_BLT_USER_INTERRUPT; | 2151 | ring->irq_enable_mask = GT_BLT_USER_INTERRUPT; |
2138 | ring->irq_get = gen6_ring_get_irq; | 2152 | ring->irq_get = gen6_ring_get_irq; |
2139 | ring->irq_put = gen6_ring_put_irq; | 2153 | ring->irq_put = gen6_ring_put_irq; |
2140 | ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; | 2154 | ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; |
2141 | } | 2155 | } |
2142 | ring->sync_to = gen6_ring_sync; | 2156 | ring->sync_to = gen6_ring_sync; |
2143 | ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR; | 2157 | ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR; |
2144 | ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV; | 2158 | ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV; |
2145 | ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_INVALID; | 2159 | ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_INVALID; |
2146 | ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_BVE; | 2160 | ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_BVE; |
2147 | ring->signal_mbox[RCS] = GEN6_RBSYNC; | 2161 | ring->signal_mbox[RCS] = GEN6_RBSYNC; |
2148 | ring->signal_mbox[VCS] = GEN6_VBSYNC; | 2162 | ring->signal_mbox[VCS] = GEN6_VBSYNC; |
2149 | ring->signal_mbox[BCS] = GEN6_NOSYNC; | 2163 | ring->signal_mbox[BCS] = GEN6_NOSYNC; |
2150 | ring->signal_mbox[VECS] = GEN6_VEBSYNC; | 2164 | ring->signal_mbox[VECS] = GEN6_VEBSYNC; |
2151 | ring->init = init_ring_common; | 2165 | ring->init = init_ring_common; |
2152 | 2166 | ||
2153 | return intel_init_ring_buffer(dev, ring); | 2167 | return intel_init_ring_buffer(dev, ring); |
2154 | } | 2168 | } |
2155 | 2169 | ||
2156 | int intel_init_vebox_ring_buffer(struct drm_device *dev) | 2170 | int intel_init_vebox_ring_buffer(struct drm_device *dev) |
2157 | { | 2171 | { |
2158 | struct drm_i915_private *dev_priv = dev->dev_private; | 2172 | struct drm_i915_private *dev_priv = dev->dev_private; |
2159 | struct intel_ring_buffer *ring = &dev_priv->ring[VECS]; | 2173 | struct intel_ring_buffer *ring = &dev_priv->ring[VECS]; |
2160 | 2174 | ||
2161 | ring->name = "video enhancement ring"; | 2175 | ring->name = "video enhancement ring"; |
2162 | ring->id = VECS; | 2176 | ring->id = VECS; |
2163 | 2177 | ||
2164 | ring->mmio_base = VEBOX_RING_BASE; | 2178 | ring->mmio_base = VEBOX_RING_BASE; |
2165 | ring->write_tail = ring_write_tail; | 2179 | ring->write_tail = ring_write_tail; |
2166 | ring->flush = gen6_ring_flush; | 2180 | ring->flush = gen6_ring_flush; |
2167 | ring->add_request = gen6_add_request; | 2181 | ring->add_request = gen6_add_request; |
2168 | ring->get_seqno = gen6_ring_get_seqno; | 2182 | ring->get_seqno = gen6_ring_get_seqno; |
2169 | ring->set_seqno = ring_set_seqno; | 2183 | ring->set_seqno = ring_set_seqno; |
2170 | 2184 | ||
2171 | if (INTEL_INFO(dev)->gen >= 8) { | 2185 | if (INTEL_INFO(dev)->gen >= 8) { |
2172 | ring->irq_enable_mask = | 2186 | ring->irq_enable_mask = |
2173 | GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT; | 2187 | GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT; |
2174 | ring->irq_get = gen8_ring_get_irq; | 2188 | ring->irq_get = gen8_ring_get_irq; |
2175 | ring->irq_put = gen8_ring_put_irq; | 2189 | ring->irq_put = gen8_ring_put_irq; |
2176 | ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; | 2190 | ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; |
2177 | } else { | 2191 | } else { |
2178 | ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; | 2192 | ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; |
2179 | ring->irq_get = hsw_vebox_get_irq; | 2193 | ring->irq_get = hsw_vebox_get_irq; |
2180 | ring->irq_put = hsw_vebox_put_irq; | 2194 | ring->irq_put = hsw_vebox_put_irq; |
2181 | ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; | 2195 | ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; |
2182 | } | 2196 | } |
2183 | ring->sync_to = gen6_ring_sync; | 2197 | ring->sync_to = gen6_ring_sync; |
2184 | ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER; | 2198 | ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER; |
2185 | ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV; | 2199 | ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV; |
2186 | ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VEB; | 2200 | ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VEB; |
2187 | ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_INVALID; | 2201 | ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_INVALID; |
2188 | ring->signal_mbox[RCS] = GEN6_RVESYNC; | 2202 | ring->signal_mbox[RCS] = GEN6_RVESYNC; |
2189 | ring->signal_mbox[VCS] = GEN6_VVESYNC; | 2203 | ring->signal_mbox[VCS] = GEN6_VVESYNC; |
2190 | ring->signal_mbox[BCS] = GEN6_BVESYNC; | 2204 | ring->signal_mbox[BCS] = GEN6_BVESYNC; |
2191 | ring->signal_mbox[VECS] = GEN6_NOSYNC; | 2205 | ring->signal_mbox[VECS] = GEN6_NOSYNC; |
2192 | ring->init = init_ring_common; | 2206 | ring->init = init_ring_common; |
2193 | 2207 | ||
2194 | return intel_init_ring_buffer(dev, ring); | 2208 | return intel_init_ring_buffer(dev, ring); |
2195 | } | 2209 | } |
2196 | 2210 | ||
2197 | int | 2211 | int |
2198 | intel_ring_flush_all_caches(struct intel_ring_buffer *ring) | 2212 | intel_ring_flush_all_caches(struct intel_ring_buffer *ring) |
2199 | { | 2213 | { |
2200 | int ret; | 2214 | int ret; |
2201 | 2215 | ||
2202 | if (!ring->gpu_caches_dirty) | 2216 | if (!ring->gpu_caches_dirty) |
2203 | return 0; | 2217 | return 0; |
2204 | 2218 | ||
2205 | ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS); | 2219 | ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS); |
2206 | if (ret) | 2220 | if (ret) |
2207 | return ret; | 2221 | return ret; |
2208 | 2222 | ||
2209 | trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS); | 2223 | trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS); |
2210 | 2224 | ||
2211 | ring->gpu_caches_dirty = false; | 2225 | ring->gpu_caches_dirty = false; |
2212 | return 0; | 2226 | return 0; |
2213 | } | 2227 | } |
2214 | 2228 | ||
2215 | int | 2229 | int |
2216 | intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring) | 2230 | intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring) |
2217 | { | 2231 | { |
2218 | uint32_t flush_domains; | 2232 | uint32_t flush_domains; |
2219 | int ret; | 2233 | int ret; |
2220 | 2234 | ||
2221 | flush_domains = 0; | 2235 | flush_domains = 0; |
2222 | if (ring->gpu_caches_dirty) | 2236 | if (ring->gpu_caches_dirty) |
2223 | flush_domains = I915_GEM_GPU_DOMAINS; | 2237 | flush_domains = I915_GEM_GPU_DOMAINS; |
2224 | 2238 | ||
2225 | ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains); | 2239 | ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains); |
drivers/gpu/drm/i915/intel_ringbuffer.h
1 | #ifndef _INTEL_RINGBUFFER_H_ | 1 | #ifndef _INTEL_RINGBUFFER_H_ |
2 | #define _INTEL_RINGBUFFER_H_ | 2 | #define _INTEL_RINGBUFFER_H_ |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use" | 5 | * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use" |
6 | * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use" | 6 | * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use" |
7 | * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use" | 7 | * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use" |
8 | * | 8 | * |
9 | * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same | 9 | * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same |
10 | * cacheline, the Head Pointer must not be greater than the Tail | 10 | * cacheline, the Head Pointer must not be greater than the Tail |
11 | * Pointer." | 11 | * Pointer." |
12 | */ | 12 | */ |
13 | #define I915_RING_FREE_SPACE 64 | 13 | #define I915_RING_FREE_SPACE 64 |
14 | 14 | ||
15 | struct intel_hw_status_page { | 15 | struct intel_hw_status_page { |
16 | u32 *page_addr; | 16 | u32 *page_addr; |
17 | unsigned int gfx_addr; | 17 | unsigned int gfx_addr; |
18 | struct drm_i915_gem_object *obj; | 18 | struct drm_i915_gem_object *obj; |
19 | }; | 19 | }; |
20 | 20 | ||
21 | #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base)) | 21 | #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base)) |
22 | #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) | 22 | #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) |
23 | 23 | ||
24 | #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base)) | 24 | #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base)) |
25 | #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) | 25 | #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) |
26 | 26 | ||
27 | #define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base)) | 27 | #define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base)) |
28 | #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) | 28 | #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) |
29 | 29 | ||
30 | #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base)) | 30 | #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base)) |
31 | #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) | 31 | #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) |
32 | 32 | ||
33 | #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) | 33 | #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) |
34 | #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) | 34 | #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) |
35 | 35 | ||
36 | #define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base)) | 36 | #define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base)) |
37 | #define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val) | ||
37 | 38 | ||
38 | enum intel_ring_hangcheck_action { | 39 | enum intel_ring_hangcheck_action { |
39 | HANGCHECK_IDLE = 0, | 40 | HANGCHECK_IDLE = 0, |
40 | HANGCHECK_WAIT, | 41 | HANGCHECK_WAIT, |
41 | HANGCHECK_ACTIVE, | 42 | HANGCHECK_ACTIVE, |
42 | HANGCHECK_KICK, | 43 | HANGCHECK_KICK, |
43 | HANGCHECK_HUNG, | 44 | HANGCHECK_HUNG, |
44 | }; | 45 | }; |
45 | 46 | ||
46 | #define HANGCHECK_SCORE_RING_HUNG 31 | 47 | #define HANGCHECK_SCORE_RING_HUNG 31 |
47 | 48 | ||
48 | struct intel_ring_hangcheck { | 49 | struct intel_ring_hangcheck { |
49 | u64 acthd; | 50 | u64 acthd; |
50 | u32 seqno; | 51 | u32 seqno; |
51 | int score; | 52 | int score; |
52 | enum intel_ring_hangcheck_action action; | 53 | enum intel_ring_hangcheck_action action; |
53 | bool deadlock; | 54 | bool deadlock; |
54 | }; | 55 | }; |
55 | 56 | ||
56 | struct intel_ring_buffer { | 57 | struct intel_ring_buffer { |
57 | const char *name; | 58 | const char *name; |
58 | enum intel_ring_id { | 59 | enum intel_ring_id { |
59 | RCS = 0x0, | 60 | RCS = 0x0, |
60 | VCS, | 61 | VCS, |
61 | BCS, | 62 | BCS, |
62 | VECS, | 63 | VECS, |
63 | } id; | 64 | } id; |
64 | #define I915_NUM_RINGS 4 | 65 | #define I915_NUM_RINGS 4 |
65 | u32 mmio_base; | 66 | u32 mmio_base; |
66 | void __iomem *virtual_start; | 67 | void __iomem *virtual_start; |
67 | struct drm_device *dev; | 68 | struct drm_device *dev; |
68 | struct drm_i915_gem_object *obj; | 69 | struct drm_i915_gem_object *obj; |
69 | 70 | ||
70 | u32 head; | 71 | u32 head; |
71 | u32 tail; | 72 | u32 tail; |
72 | int space; | 73 | int space; |
73 | int size; | 74 | int size; |
74 | int effective_size; | 75 | int effective_size; |
75 | struct intel_hw_status_page status_page; | 76 | struct intel_hw_status_page status_page; |
76 | 77 | ||
77 | /** We track the position of the requests in the ring buffer, and | 78 | /** We track the position of the requests in the ring buffer, and |
78 | * when each is retired we increment last_retired_head as the GPU | 79 | * when each is retired we increment last_retired_head as the GPU |
79 | * must have finished processing the request and so we know we | 80 | * must have finished processing the request and so we know we |
80 | * can advance the ringbuffer up to that position. | 81 | * can advance the ringbuffer up to that position. |
81 | * | 82 | * |
82 | * last_retired_head is set to -1 after the value is consumed so | 83 | * last_retired_head is set to -1 after the value is consumed so |
83 | * we can detect new retirements. | 84 | * we can detect new retirements. |
84 | */ | 85 | */ |
85 | u32 last_retired_head; | 86 | u32 last_retired_head; |
86 | 87 | ||
87 | unsigned irq_refcount; /* protected by dev_priv->irq_lock */ | 88 | unsigned irq_refcount; /* protected by dev_priv->irq_lock */ |
88 | u32 irq_enable_mask; /* bitmask to enable ring interrupt */ | 89 | u32 irq_enable_mask; /* bitmask to enable ring interrupt */ |
89 | u32 trace_irq_seqno; | 90 | u32 trace_irq_seqno; |
90 | u32 sync_seqno[I915_NUM_RINGS-1]; | 91 | u32 sync_seqno[I915_NUM_RINGS-1]; |
91 | bool __must_check (*irq_get)(struct intel_ring_buffer *ring); | 92 | bool __must_check (*irq_get)(struct intel_ring_buffer *ring); |
92 | void (*irq_put)(struct intel_ring_buffer *ring); | 93 | void (*irq_put)(struct intel_ring_buffer *ring); |
93 | 94 | ||
94 | int (*init)(struct intel_ring_buffer *ring); | 95 | int (*init)(struct intel_ring_buffer *ring); |
95 | 96 | ||
96 | void (*write_tail)(struct intel_ring_buffer *ring, | 97 | void (*write_tail)(struct intel_ring_buffer *ring, |
97 | u32 value); | 98 | u32 value); |
98 | int __must_check (*flush)(struct intel_ring_buffer *ring, | 99 | int __must_check (*flush)(struct intel_ring_buffer *ring, |
99 | u32 invalidate_domains, | 100 | u32 invalidate_domains, |
100 | u32 flush_domains); | 101 | u32 flush_domains); |
101 | int (*add_request)(struct intel_ring_buffer *ring); | 102 | int (*add_request)(struct intel_ring_buffer *ring); |
102 | /* Some chipsets are not quite as coherent as advertised and need | 103 | /* Some chipsets are not quite as coherent as advertised and need |
103 | * an expensive kick to force a true read of the up-to-date seqno. | 104 | * an expensive kick to force a true read of the up-to-date seqno. |
104 | * However, the up-to-date seqno is not always required and the last | 105 | * However, the up-to-date seqno is not always required and the last |
105 | * seen value is good enough. Note that the seqno will always be | 106 | * seen value is good enough. Note that the seqno will always be |
106 | * monotonic, even if not coherent. | 107 | * monotonic, even if not coherent. |
107 | */ | 108 | */ |
108 | u32 (*get_seqno)(struct intel_ring_buffer *ring, | 109 | u32 (*get_seqno)(struct intel_ring_buffer *ring, |
109 | bool lazy_coherency); | 110 | bool lazy_coherency); |
110 | void (*set_seqno)(struct intel_ring_buffer *ring, | 111 | void (*set_seqno)(struct intel_ring_buffer *ring, |
111 | u32 seqno); | 112 | u32 seqno); |
112 | int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, | 113 | int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, |
113 | u32 offset, u32 length, | 114 | u32 offset, u32 length, |
114 | unsigned flags); | 115 | unsigned flags); |
115 | #define I915_DISPATCH_SECURE 0x1 | 116 | #define I915_DISPATCH_SECURE 0x1 |
116 | #define I915_DISPATCH_PINNED 0x2 | 117 | #define I915_DISPATCH_PINNED 0x2 |
117 | void (*cleanup)(struct intel_ring_buffer *ring); | 118 | void (*cleanup)(struct intel_ring_buffer *ring); |
118 | int (*sync_to)(struct intel_ring_buffer *ring, | 119 | int (*sync_to)(struct intel_ring_buffer *ring, |
119 | struct intel_ring_buffer *to, | 120 | struct intel_ring_buffer *to, |
120 | u32 seqno); | 121 | u32 seqno); |
121 | 122 | ||
122 | /* our mbox written by others */ | 123 | /* our mbox written by others */ |
123 | u32 semaphore_register[I915_NUM_RINGS]; | 124 | u32 semaphore_register[I915_NUM_RINGS]; |
124 | /* mboxes this ring signals to */ | 125 | /* mboxes this ring signals to */ |
125 | u32 signal_mbox[I915_NUM_RINGS]; | 126 | u32 signal_mbox[I915_NUM_RINGS]; |
126 | 127 | ||
127 | /** | 128 | /** |
128 | * List of objects currently involved in rendering from the | 129 | * List of objects currently involved in rendering from the |
129 | * ringbuffer. | 130 | * ringbuffer. |
130 | * | 131 | * |
131 | * Includes buffers having the contents of their GPU caches | 132 | * Includes buffers having the contents of their GPU caches |
132 | * flushed, not necessarily primitives. last_rendering_seqno | 133 | * flushed, not necessarily primitives. last_rendering_seqno |
133 | * represents when the rendering involved will be completed. | 134 | * represents when the rendering involved will be completed. |
134 | * | 135 | * |
135 | * A reference is held on the buffer while on this list. | 136 | * A reference is held on the buffer while on this list. |
136 | */ | 137 | */ |
137 | struct list_head active_list; | 138 | struct list_head active_list; |
138 | 139 | ||
139 | /** | 140 | /** |
140 | * List of breadcrumbs associated with GPU requests currently | 141 | * List of breadcrumbs associated with GPU requests currently |
141 | * outstanding. | 142 | * outstanding. |
142 | */ | 143 | */ |
143 | struct list_head request_list; | 144 | struct list_head request_list; |
144 | 145 | ||
145 | /** | 146 | /** |
146 | * Do we have some not yet emitted requests outstanding? | 147 | * Do we have some not yet emitted requests outstanding? |
147 | */ | 148 | */ |
148 | struct drm_i915_gem_request *preallocated_lazy_request; | 149 | struct drm_i915_gem_request *preallocated_lazy_request; |
149 | u32 outstanding_lazy_seqno; | 150 | u32 outstanding_lazy_seqno; |
150 | bool gpu_caches_dirty; | 151 | bool gpu_caches_dirty; |
151 | bool fbc_dirty; | 152 | bool fbc_dirty; |
152 | 153 | ||
153 | wait_queue_head_t irq_queue; | 154 | wait_queue_head_t irq_queue; |
154 | 155 | ||
155 | /** | 156 | /** |
156 | * Do an explicit TLB flush before MI_SET_CONTEXT | 157 | * Do an explicit TLB flush before MI_SET_CONTEXT |
157 | */ | 158 | */ |
158 | bool itlb_before_ctx_switch; | 159 | bool itlb_before_ctx_switch; |
159 | struct i915_hw_context *default_context; | 160 | struct i915_hw_context *default_context; |
160 | struct i915_hw_context *last_context; | 161 | struct i915_hw_context *last_context; |
161 | 162 | ||
162 | struct intel_ring_hangcheck hangcheck; | 163 | struct intel_ring_hangcheck hangcheck; |
163 | 164 | ||
164 | struct { | 165 | struct { |
165 | struct drm_i915_gem_object *obj; | 166 | struct drm_i915_gem_object *obj; |
166 | u32 gtt_offset; | 167 | u32 gtt_offset; |
167 | volatile u32 *cpu_page; | 168 | volatile u32 *cpu_page; |
168 | } scratch; | 169 | } scratch; |
169 | 170 | ||
170 | /* | 171 | /* |
171 | * Tables of commands the command parser needs to know about | 172 | * Tables of commands the command parser needs to know about |
172 | * for this ring. | 173 | * for this ring. |
173 | */ | 174 | */ |
174 | const struct drm_i915_cmd_table *cmd_tables; | 175 | const struct drm_i915_cmd_table *cmd_tables; |
175 | int cmd_table_count; | 176 | int cmd_table_count; |
176 | 177 | ||
177 | /* | 178 | /* |
178 | * Table of registers allowed in commands that read/write registers. | 179 | * Table of registers allowed in commands that read/write registers. |
179 | */ | 180 | */ |
180 | const u32 *reg_table; | 181 | const u32 *reg_table; |
181 | int reg_count; | 182 | int reg_count; |
182 | 183 | ||
183 | /* | 184 | /* |
184 | * Table of registers allowed in commands that read/write registers, but | 185 | * Table of registers allowed in commands that read/write registers, but |
185 | * only from the DRM master. | 186 | * only from the DRM master. |
186 | */ | 187 | */ |
187 | const u32 *master_reg_table; | 188 | const u32 *master_reg_table; |
188 | int master_reg_count; | 189 | int master_reg_count; |
189 | 190 | ||
190 | /* | 191 | /* |
191 | * Returns the bitmask for the length field of the specified command. | 192 | * Returns the bitmask for the length field of the specified command. |
192 | * Return 0 for an unrecognized/invalid command. | 193 | * Return 0 for an unrecognized/invalid command. |
193 | * | 194 | * |
194 | * If the command parser finds an entry for a command in the ring's | 195 | * If the command parser finds an entry for a command in the ring's |
195 | * cmd_tables, it gets the command's length based on the table entry. | 196 | * cmd_tables, it gets the command's length based on the table entry. |
196 | * If not, it calls this function to determine the per-ring length field | 197 | * If not, it calls this function to determine the per-ring length field |
197 | * encoding for the command (i.e. certain opcode ranges use certain bits | 198 | * encoding for the command (i.e. certain opcode ranges use certain bits |
198 | * to encode the command length in the header). | 199 | * to encode the command length in the header). |
199 | */ | 200 | */ |
200 | u32 (*get_cmd_length_mask)(u32 cmd_header); | 201 | u32 (*get_cmd_length_mask)(u32 cmd_header); |
201 | }; | 202 | }; |
202 | 203 | ||
203 | static inline bool | 204 | static inline bool |
204 | intel_ring_initialized(struct intel_ring_buffer *ring) | 205 | intel_ring_initialized(struct intel_ring_buffer *ring) |
205 | { | 206 | { |
206 | return ring->obj != NULL; | 207 | return ring->obj != NULL; |
207 | } | 208 | } |
208 | 209 | ||
209 | static inline unsigned | 210 | static inline unsigned |
210 | intel_ring_flag(struct intel_ring_buffer *ring) | 211 | intel_ring_flag(struct intel_ring_buffer *ring) |
211 | { | 212 | { |
212 | return 1 << ring->id; | 213 | return 1 << ring->id; |
213 | } | 214 | } |
214 | 215 | ||
215 | static inline u32 | 216 | static inline u32 |
216 | intel_ring_sync_index(struct intel_ring_buffer *ring, | 217 | intel_ring_sync_index(struct intel_ring_buffer *ring, |
217 | struct intel_ring_buffer *other) | 218 | struct intel_ring_buffer *other) |
218 | { | 219 | { |
219 | int idx; | 220 | int idx; |
220 | 221 | ||
221 | /* | 222 | /* |
222 | * cs -> 0 = vcs, 1 = bcs | 223 | * cs -> 0 = vcs, 1 = bcs |
223 | * vcs -> 0 = bcs, 1 = cs, | 224 | * vcs -> 0 = bcs, 1 = cs, |
224 | * bcs -> 0 = cs, 1 = vcs. | 225 | * bcs -> 0 = cs, 1 = vcs. |
225 | */ | 226 | */ |
226 | 227 | ||
227 | idx = (other - ring) - 1; | 228 | idx = (other - ring) - 1; |
228 | if (idx < 0) | 229 | if (idx < 0) |
229 | idx += I915_NUM_RINGS; | 230 | idx += I915_NUM_RINGS; |
230 | 231 | ||
231 | return idx; | 232 | return idx; |
232 | } | 233 | } |
233 | 234 | ||
234 | static inline u32 | 235 | static inline u32 |
235 | intel_read_status_page(struct intel_ring_buffer *ring, | 236 | intel_read_status_page(struct intel_ring_buffer *ring, |
236 | int reg) | 237 | int reg) |
237 | { | 238 | { |
238 | /* Ensure that the compiler doesn't optimize away the load. */ | 239 | /* Ensure that the compiler doesn't optimize away the load. */ |
239 | barrier(); | 240 | barrier(); |
240 | return ring->status_page.page_addr[reg]; | 241 | return ring->status_page.page_addr[reg]; |
241 | } | 242 | } |
242 | 243 | ||
243 | static inline void | 244 | static inline void |
244 | intel_write_status_page(struct intel_ring_buffer *ring, | 245 | intel_write_status_page(struct intel_ring_buffer *ring, |
245 | int reg, u32 value) | 246 | int reg, u32 value) |
246 | { | 247 | { |
247 | ring->status_page.page_addr[reg] = value; | 248 | ring->status_page.page_addr[reg] = value; |
248 | } | 249 | } |
249 | 250 | ||
250 | /** | 251 | /** |
251 | * Reads a dword out of the status page, which is written to from the command | 252 | * Reads a dword out of the status page, which is written to from the command |
252 | * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or | 253 | * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or |
253 | * MI_STORE_DATA_IMM. | 254 | * MI_STORE_DATA_IMM. |
254 | * | 255 | * |
255 | * The following dwords have a reserved meaning: | 256 | * The following dwords have a reserved meaning: |
256 | * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. | 257 | * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. |
257 | * 0x04: ring 0 head pointer | 258 | * 0x04: ring 0 head pointer |
258 | * 0x05: ring 1 head pointer (915-class) | 259 | * 0x05: ring 1 head pointer (915-class) |
259 | * 0x06: ring 2 head pointer (915-class) | 260 | * 0x06: ring 2 head pointer (915-class) |
260 | * 0x10-0x1b: Context status DWords (GM45) | 261 | * 0x10-0x1b: Context status DWords (GM45) |
261 | * 0x1f: Last written status offset. (GM45) | 262 | * 0x1f: Last written status offset. (GM45) |
262 | * | 263 | * |
263 | * The area from dword 0x20 to 0x3ff is available for driver usage. | 264 | * The area from dword 0x20 to 0x3ff is available for driver usage. |
264 | */ | 265 | */ |
265 | #define I915_GEM_HWS_INDEX 0x20 | 266 | #define I915_GEM_HWS_INDEX 0x20 |
266 | #define I915_GEM_HWS_SCRATCH_INDEX 0x30 | 267 | #define I915_GEM_HWS_SCRATCH_INDEX 0x30 |
267 | #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) | 268 | #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) |
268 | 269 | ||
269 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); | 270 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); |
270 | 271 | ||
271 | int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); | 272 | int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); |
272 | int __must_check intel_ring_cacheline_align(struct intel_ring_buffer *ring); | 273 | int __must_check intel_ring_cacheline_align(struct intel_ring_buffer *ring); |
273 | static inline void intel_ring_emit(struct intel_ring_buffer *ring, | 274 | static inline void intel_ring_emit(struct intel_ring_buffer *ring, |
274 | u32 data) | 275 | u32 data) |
275 | { | 276 | { |
276 | iowrite32(data, ring->virtual_start + ring->tail); | 277 | iowrite32(data, ring->virtual_start + ring->tail); |
277 | ring->tail += 4; | 278 | ring->tail += 4; |
278 | } | 279 | } |
279 | static inline void intel_ring_advance(struct intel_ring_buffer *ring) | 280 | static inline void intel_ring_advance(struct intel_ring_buffer *ring) |
280 | { | 281 | { |
281 | ring->tail &= ring->size - 1; | 282 | ring->tail &= ring->size - 1; |
282 | } | 283 | } |
283 | void __intel_ring_advance(struct intel_ring_buffer *ring); | 284 | void __intel_ring_advance(struct intel_ring_buffer *ring); |
284 | 285 | ||
285 | int __must_check intel_ring_idle(struct intel_ring_buffer *ring); | 286 | int __must_check intel_ring_idle(struct intel_ring_buffer *ring); |
286 | void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno); | 287 | void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno); |
287 | int intel_ring_flush_all_caches(struct intel_ring_buffer *ring); | 288 | int intel_ring_flush_all_caches(struct intel_ring_buffer *ring); |
288 | int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring); | 289 | int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring); |
289 | 290 | ||
290 | int intel_init_render_ring_buffer(struct drm_device *dev); | 291 | int intel_init_render_ring_buffer(struct drm_device *dev); |
291 | int intel_init_bsd_ring_buffer(struct drm_device *dev); | 292 | int intel_init_bsd_ring_buffer(struct drm_device *dev); |
292 | int intel_init_blt_ring_buffer(struct drm_device *dev); | 293 | int intel_init_blt_ring_buffer(struct drm_device *dev); |
293 | int intel_init_vebox_ring_buffer(struct drm_device *dev); | 294 | int intel_init_vebox_ring_buffer(struct drm_device *dev); |
294 | 295 | ||
295 | u64 intel_ring_get_active_head(struct intel_ring_buffer *ring); | 296 | u64 intel_ring_get_active_head(struct intel_ring_buffer *ring); |
296 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring); | 297 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring); |
297 | 298 | ||
298 | static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring) | 299 | static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring) |
299 | { | 300 | { |
300 | return ring->tail; | 301 | return ring->tail; |
301 | } | 302 | } |
302 | 303 | ||
303 | static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring) | 304 | static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring) |
304 | { | 305 | { |
305 | BUG_ON(ring->outstanding_lazy_seqno == 0); | 306 | BUG_ON(ring->outstanding_lazy_seqno == 0); |
306 | return ring->outstanding_lazy_seqno; | 307 | return ring->outstanding_lazy_seqno; |
307 | } | 308 | } |
308 | 309 | ||
309 | static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno) | 310 | static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno) |
310 | { | 311 | { |
311 | if (ring->trace_irq_seqno == 0 && ring->irq_get(ring)) | 312 | if (ring->trace_irq_seqno == 0 && ring->irq_get(ring)) |
312 | ring->trace_irq_seqno = seqno; | 313 | ring->trace_irq_seqno = seqno; |
313 | } | 314 | } |
314 | 315 | ||
315 | /* DRI warts */ | 316 | /* DRI warts */ |
316 | int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size); | 317 | int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size); |
317 | 318 | ||
318 | #endif /* _INTEL_RINGBUFFER_H_ */ | 319 | #endif /* _INTEL_RINGBUFFER_H_ */ |
319 | 320 |