Commit 0a03726ca982129b1e054f0e8c34ca7eea348acd
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux
Pull the intel i915 hibernation memory corruption fix from Dave Airlie: "I tracked down the misc memory corruption after i915 hibernate to the blinking fbcon cursor, and realised the i915 driver wasn't doing the fbdev suspend/resume calls at all. nouveau and radeon have done these calls for a long time. This has been fairly well tested and is definitely the main culprit in hibernate not working." Yay. * 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: drm/i915: suspend fbdev device around suspend/hibernate
Showing 3 changed files Inline Diff
drivers/gpu/drm/i915/i915_drv.c
1 | /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*- | 1 | /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*- |
2 | */ | 2 | */ |
3 | /* | 3 | /* |
4 | * | 4 | * |
5 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. | 5 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. |
6 | * All Rights Reserved. | 6 | * All Rights Reserved. |
7 | * | 7 | * |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | 8 | * Permission is hereby granted, free of charge, to any person obtaining a |
9 | * copy of this software and associated documentation files (the | 9 | * copy of this software and associated documentation files (the |
10 | * "Software"), to deal in the Software without restriction, including | 10 | * "Software"), to deal in the Software without restriction, including |
11 | * without limitation the rights to use, copy, modify, merge, publish, | 11 | * without limitation the rights to use, copy, modify, merge, publish, |
12 | * distribute, sub license, and/or sell copies of the Software, and to | 12 | * distribute, sub license, and/or sell copies of the Software, and to |
13 | * permit persons to whom the Software is furnished to do so, subject to | 13 | * permit persons to whom the Software is furnished to do so, subject to |
14 | * the following conditions: | 14 | * the following conditions: |
15 | * | 15 | * |
16 | * The above copyright notice and this permission notice (including the | 16 | * The above copyright notice and this permission notice (including the |
17 | * next paragraph) shall be included in all copies or substantial portions | 17 | * next paragraph) shall be included in all copies or substantial portions |
18 | * of the Software. | 18 | * of the Software. |
19 | * | 19 | * |
20 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | 20 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
21 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 21 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
22 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. | 22 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. |
23 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR | 23 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR |
24 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, | 24 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, |
25 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE | 25 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE |
26 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | 26 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
27 | * | 27 | * |
28 | */ | 28 | */ |
29 | 29 | ||
30 | #include <linux/device.h> | 30 | #include <linux/device.h> |
31 | #include "drmP.h" | 31 | #include "drmP.h" |
32 | #include "drm.h" | 32 | #include "drm.h" |
33 | #include "i915_drm.h" | 33 | #include "i915_drm.h" |
34 | #include "i915_drv.h" | 34 | #include "i915_drv.h" |
35 | #include "intel_drv.h" | 35 | #include "intel_drv.h" |
36 | 36 | ||
37 | #include <linux/console.h> | 37 | #include <linux/console.h> |
38 | #include <linux/module.h> | 38 | #include <linux/module.h> |
39 | #include "drm_crtc_helper.h" | 39 | #include "drm_crtc_helper.h" |
40 | 40 | ||
41 | static int i915_modeset __read_mostly = -1; | 41 | static int i915_modeset __read_mostly = -1; |
42 | module_param_named(modeset, i915_modeset, int, 0400); | 42 | module_param_named(modeset, i915_modeset, int, 0400); |
43 | MODULE_PARM_DESC(modeset, | 43 | MODULE_PARM_DESC(modeset, |
44 | "Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, " | 44 | "Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, " |
45 | "1=on, -1=force vga console preference [default])"); | 45 | "1=on, -1=force vga console preference [default])"); |
46 | 46 | ||
47 | unsigned int i915_fbpercrtc __always_unused = 0; | 47 | unsigned int i915_fbpercrtc __always_unused = 0; |
48 | module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); | 48 | module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); |
49 | 49 | ||
50 | int i915_panel_ignore_lid __read_mostly = 0; | 50 | int i915_panel_ignore_lid __read_mostly = 0; |
51 | module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600); | 51 | module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600); |
52 | MODULE_PARM_DESC(panel_ignore_lid, | 52 | MODULE_PARM_DESC(panel_ignore_lid, |
53 | "Override lid status (0=autodetect [default], 1=lid open, " | 53 | "Override lid status (0=autodetect [default], 1=lid open, " |
54 | "-1=lid closed)"); | 54 | "-1=lid closed)"); |
55 | 55 | ||
56 | unsigned int i915_powersave __read_mostly = 1; | 56 | unsigned int i915_powersave __read_mostly = 1; |
57 | module_param_named(powersave, i915_powersave, int, 0600); | 57 | module_param_named(powersave, i915_powersave, int, 0600); |
58 | MODULE_PARM_DESC(powersave, | 58 | MODULE_PARM_DESC(powersave, |
59 | "Enable powersavings, fbc, downclocking, etc. (default: true)"); | 59 | "Enable powersavings, fbc, downclocking, etc. (default: true)"); |
60 | 60 | ||
61 | int i915_semaphores __read_mostly = -1; | 61 | int i915_semaphores __read_mostly = -1; |
62 | module_param_named(semaphores, i915_semaphores, int, 0600); | 62 | module_param_named(semaphores, i915_semaphores, int, 0600); |
63 | MODULE_PARM_DESC(semaphores, | 63 | MODULE_PARM_DESC(semaphores, |
64 | "Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))"); | 64 | "Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))"); |
65 | 65 | ||
66 | int i915_enable_rc6 __read_mostly = -1; | 66 | int i915_enable_rc6 __read_mostly = -1; |
67 | module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); | 67 | module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); |
68 | MODULE_PARM_DESC(i915_enable_rc6, | 68 | MODULE_PARM_DESC(i915_enable_rc6, |
69 | "Enable power-saving render C-state 6 (default: -1 (use per-chip default)"); | 69 | "Enable power-saving render C-state 6 (default: -1 (use per-chip default)"); |
70 | 70 | ||
71 | int i915_enable_fbc __read_mostly = -1; | 71 | int i915_enable_fbc __read_mostly = -1; |
72 | module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); | 72 | module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); |
73 | MODULE_PARM_DESC(i915_enable_fbc, | 73 | MODULE_PARM_DESC(i915_enable_fbc, |
74 | "Enable frame buffer compression for power savings " | 74 | "Enable frame buffer compression for power savings " |
75 | "(default: -1 (use per-chip default))"); | 75 | "(default: -1 (use per-chip default))"); |
76 | 76 | ||
77 | unsigned int i915_lvds_downclock __read_mostly = 0; | 77 | unsigned int i915_lvds_downclock __read_mostly = 0; |
78 | module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); | 78 | module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); |
79 | MODULE_PARM_DESC(lvds_downclock, | 79 | MODULE_PARM_DESC(lvds_downclock, |
80 | "Use panel (LVDS/eDP) downclocking for power savings " | 80 | "Use panel (LVDS/eDP) downclocking for power savings " |
81 | "(default: false)"); | 81 | "(default: false)"); |
82 | 82 | ||
83 | int i915_panel_use_ssc __read_mostly = -1; | 83 | int i915_panel_use_ssc __read_mostly = -1; |
84 | module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600); | 84 | module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600); |
85 | MODULE_PARM_DESC(lvds_use_ssc, | 85 | MODULE_PARM_DESC(lvds_use_ssc, |
86 | "Use Spread Spectrum Clock with panels [LVDS/eDP] " | 86 | "Use Spread Spectrum Clock with panels [LVDS/eDP] " |
87 | "(default: auto from VBT)"); | 87 | "(default: auto from VBT)"); |
88 | 88 | ||
89 | int i915_vbt_sdvo_panel_type __read_mostly = -1; | 89 | int i915_vbt_sdvo_panel_type __read_mostly = -1; |
90 | module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600); | 90 | module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600); |
91 | MODULE_PARM_DESC(vbt_sdvo_panel_type, | 91 | MODULE_PARM_DESC(vbt_sdvo_panel_type, |
92 | "Override selection of SDVO panel mode in the VBT " | 92 | "Override selection of SDVO panel mode in the VBT " |
93 | "(default: auto)"); | 93 | "(default: auto)"); |
94 | 94 | ||
95 | static bool i915_try_reset __read_mostly = true; | 95 | static bool i915_try_reset __read_mostly = true; |
96 | module_param_named(reset, i915_try_reset, bool, 0600); | 96 | module_param_named(reset, i915_try_reset, bool, 0600); |
97 | MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)"); | 97 | MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)"); |
98 | 98 | ||
99 | bool i915_enable_hangcheck __read_mostly = true; | 99 | bool i915_enable_hangcheck __read_mostly = true; |
100 | module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644); | 100 | module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644); |
101 | MODULE_PARM_DESC(enable_hangcheck, | 101 | MODULE_PARM_DESC(enable_hangcheck, |
102 | "Periodically check GPU activity for detecting hangs. " | 102 | "Periodically check GPU activity for detecting hangs. " |
103 | "WARNING: Disabling this can cause system wide hangs. " | 103 | "WARNING: Disabling this can cause system wide hangs. " |
104 | "(default: true)"); | 104 | "(default: true)"); |
105 | 105 | ||
106 | bool i915_enable_ppgtt __read_mostly = 1; | 106 | bool i915_enable_ppgtt __read_mostly = 1; |
107 | module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, bool, 0600); | 107 | module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, bool, 0600); |
108 | MODULE_PARM_DESC(i915_enable_ppgtt, | 108 | MODULE_PARM_DESC(i915_enable_ppgtt, |
109 | "Enable PPGTT (default: true)"); | 109 | "Enable PPGTT (default: true)"); |
110 | 110 | ||
111 | static struct drm_driver driver; | 111 | static struct drm_driver driver; |
112 | extern int intel_agp_enabled; | 112 | extern int intel_agp_enabled; |
113 | 113 | ||
114 | #define INTEL_VGA_DEVICE(id, info) { \ | 114 | #define INTEL_VGA_DEVICE(id, info) { \ |
115 | .class = PCI_BASE_CLASS_DISPLAY << 16, \ | 115 | .class = PCI_BASE_CLASS_DISPLAY << 16, \ |
116 | .class_mask = 0xff0000, \ | 116 | .class_mask = 0xff0000, \ |
117 | .vendor = 0x8086, \ | 117 | .vendor = 0x8086, \ |
118 | .device = id, \ | 118 | .device = id, \ |
119 | .subvendor = PCI_ANY_ID, \ | 119 | .subvendor = PCI_ANY_ID, \ |
120 | .subdevice = PCI_ANY_ID, \ | 120 | .subdevice = PCI_ANY_ID, \ |
121 | .driver_data = (unsigned long) info } | 121 | .driver_data = (unsigned long) info } |
122 | 122 | ||
123 | static const struct intel_device_info intel_i830_info = { | 123 | static const struct intel_device_info intel_i830_info = { |
124 | .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, | 124 | .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, |
125 | .has_overlay = 1, .overlay_needs_physical = 1, | 125 | .has_overlay = 1, .overlay_needs_physical = 1, |
126 | }; | 126 | }; |
127 | 127 | ||
128 | static const struct intel_device_info intel_845g_info = { | 128 | static const struct intel_device_info intel_845g_info = { |
129 | .gen = 2, | 129 | .gen = 2, |
130 | .has_overlay = 1, .overlay_needs_physical = 1, | 130 | .has_overlay = 1, .overlay_needs_physical = 1, |
131 | }; | 131 | }; |
132 | 132 | ||
133 | static const struct intel_device_info intel_i85x_info = { | 133 | static const struct intel_device_info intel_i85x_info = { |
134 | .gen = 2, .is_i85x = 1, .is_mobile = 1, | 134 | .gen = 2, .is_i85x = 1, .is_mobile = 1, |
135 | .cursor_needs_physical = 1, | 135 | .cursor_needs_physical = 1, |
136 | .has_overlay = 1, .overlay_needs_physical = 1, | 136 | .has_overlay = 1, .overlay_needs_physical = 1, |
137 | }; | 137 | }; |
138 | 138 | ||
139 | static const struct intel_device_info intel_i865g_info = { | 139 | static const struct intel_device_info intel_i865g_info = { |
140 | .gen = 2, | 140 | .gen = 2, |
141 | .has_overlay = 1, .overlay_needs_physical = 1, | 141 | .has_overlay = 1, .overlay_needs_physical = 1, |
142 | }; | 142 | }; |
143 | 143 | ||
144 | static const struct intel_device_info intel_i915g_info = { | 144 | static const struct intel_device_info intel_i915g_info = { |
145 | .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, | 145 | .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, |
146 | .has_overlay = 1, .overlay_needs_physical = 1, | 146 | .has_overlay = 1, .overlay_needs_physical = 1, |
147 | }; | 147 | }; |
148 | static const struct intel_device_info intel_i915gm_info = { | 148 | static const struct intel_device_info intel_i915gm_info = { |
149 | .gen = 3, .is_mobile = 1, | 149 | .gen = 3, .is_mobile = 1, |
150 | .cursor_needs_physical = 1, | 150 | .cursor_needs_physical = 1, |
151 | .has_overlay = 1, .overlay_needs_physical = 1, | 151 | .has_overlay = 1, .overlay_needs_physical = 1, |
152 | .supports_tv = 1, | 152 | .supports_tv = 1, |
153 | }; | 153 | }; |
154 | static const struct intel_device_info intel_i945g_info = { | 154 | static const struct intel_device_info intel_i945g_info = { |
155 | .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, | 155 | .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, |
156 | .has_overlay = 1, .overlay_needs_physical = 1, | 156 | .has_overlay = 1, .overlay_needs_physical = 1, |
157 | }; | 157 | }; |
158 | static const struct intel_device_info intel_i945gm_info = { | 158 | static const struct intel_device_info intel_i945gm_info = { |
159 | .gen = 3, .is_i945gm = 1, .is_mobile = 1, | 159 | .gen = 3, .is_i945gm = 1, .is_mobile = 1, |
160 | .has_hotplug = 1, .cursor_needs_physical = 1, | 160 | .has_hotplug = 1, .cursor_needs_physical = 1, |
161 | .has_overlay = 1, .overlay_needs_physical = 1, | 161 | .has_overlay = 1, .overlay_needs_physical = 1, |
162 | .supports_tv = 1, | 162 | .supports_tv = 1, |
163 | }; | 163 | }; |
164 | 164 | ||
165 | static const struct intel_device_info intel_i965g_info = { | 165 | static const struct intel_device_info intel_i965g_info = { |
166 | .gen = 4, .is_broadwater = 1, | 166 | .gen = 4, .is_broadwater = 1, |
167 | .has_hotplug = 1, | 167 | .has_hotplug = 1, |
168 | .has_overlay = 1, | 168 | .has_overlay = 1, |
169 | }; | 169 | }; |
170 | 170 | ||
171 | static const struct intel_device_info intel_i965gm_info = { | 171 | static const struct intel_device_info intel_i965gm_info = { |
172 | .gen = 4, .is_crestline = 1, | 172 | .gen = 4, .is_crestline = 1, |
173 | .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1, | 173 | .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1, |
174 | .has_overlay = 1, | 174 | .has_overlay = 1, |
175 | .supports_tv = 1, | 175 | .supports_tv = 1, |
176 | }; | 176 | }; |
177 | 177 | ||
178 | static const struct intel_device_info intel_g33_info = { | 178 | static const struct intel_device_info intel_g33_info = { |
179 | .gen = 3, .is_g33 = 1, | 179 | .gen = 3, .is_g33 = 1, |
180 | .need_gfx_hws = 1, .has_hotplug = 1, | 180 | .need_gfx_hws = 1, .has_hotplug = 1, |
181 | .has_overlay = 1, | 181 | .has_overlay = 1, |
182 | }; | 182 | }; |
183 | 183 | ||
184 | static const struct intel_device_info intel_g45_info = { | 184 | static const struct intel_device_info intel_g45_info = { |
185 | .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, | 185 | .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, |
186 | .has_pipe_cxsr = 1, .has_hotplug = 1, | 186 | .has_pipe_cxsr = 1, .has_hotplug = 1, |
187 | .has_bsd_ring = 1, | 187 | .has_bsd_ring = 1, |
188 | }; | 188 | }; |
189 | 189 | ||
190 | static const struct intel_device_info intel_gm45_info = { | 190 | static const struct intel_device_info intel_gm45_info = { |
191 | .gen = 4, .is_g4x = 1, | 191 | .gen = 4, .is_g4x = 1, |
192 | .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, | 192 | .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, |
193 | .has_pipe_cxsr = 1, .has_hotplug = 1, | 193 | .has_pipe_cxsr = 1, .has_hotplug = 1, |
194 | .supports_tv = 1, | 194 | .supports_tv = 1, |
195 | .has_bsd_ring = 1, | 195 | .has_bsd_ring = 1, |
196 | }; | 196 | }; |
197 | 197 | ||
198 | static const struct intel_device_info intel_pineview_info = { | 198 | static const struct intel_device_info intel_pineview_info = { |
199 | .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, | 199 | .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, |
200 | .need_gfx_hws = 1, .has_hotplug = 1, | 200 | .need_gfx_hws = 1, .has_hotplug = 1, |
201 | .has_overlay = 1, | 201 | .has_overlay = 1, |
202 | }; | 202 | }; |
203 | 203 | ||
204 | static const struct intel_device_info intel_ironlake_d_info = { | 204 | static const struct intel_device_info intel_ironlake_d_info = { |
205 | .gen = 5, | 205 | .gen = 5, |
206 | .need_gfx_hws = 1, .has_hotplug = 1, | 206 | .need_gfx_hws = 1, .has_hotplug = 1, |
207 | .has_bsd_ring = 1, | 207 | .has_bsd_ring = 1, |
208 | }; | 208 | }; |
209 | 209 | ||
210 | static const struct intel_device_info intel_ironlake_m_info = { | 210 | static const struct intel_device_info intel_ironlake_m_info = { |
211 | .gen = 5, .is_mobile = 1, | 211 | .gen = 5, .is_mobile = 1, |
212 | .need_gfx_hws = 1, .has_hotplug = 1, | 212 | .need_gfx_hws = 1, .has_hotplug = 1, |
213 | .has_fbc = 1, | 213 | .has_fbc = 1, |
214 | .has_bsd_ring = 1, | 214 | .has_bsd_ring = 1, |
215 | }; | 215 | }; |
216 | 216 | ||
217 | static const struct intel_device_info intel_sandybridge_d_info = { | 217 | static const struct intel_device_info intel_sandybridge_d_info = { |
218 | .gen = 6, | 218 | .gen = 6, |
219 | .need_gfx_hws = 1, .has_hotplug = 1, | 219 | .need_gfx_hws = 1, .has_hotplug = 1, |
220 | .has_bsd_ring = 1, | 220 | .has_bsd_ring = 1, |
221 | .has_blt_ring = 1, | 221 | .has_blt_ring = 1, |
222 | .has_llc = 1, | 222 | .has_llc = 1, |
223 | }; | 223 | }; |
224 | 224 | ||
225 | static const struct intel_device_info intel_sandybridge_m_info = { | 225 | static const struct intel_device_info intel_sandybridge_m_info = { |
226 | .gen = 6, .is_mobile = 1, | 226 | .gen = 6, .is_mobile = 1, |
227 | .need_gfx_hws = 1, .has_hotplug = 1, | 227 | .need_gfx_hws = 1, .has_hotplug = 1, |
228 | .has_fbc = 1, | 228 | .has_fbc = 1, |
229 | .has_bsd_ring = 1, | 229 | .has_bsd_ring = 1, |
230 | .has_blt_ring = 1, | 230 | .has_blt_ring = 1, |
231 | .has_llc = 1, | 231 | .has_llc = 1, |
232 | }; | 232 | }; |
233 | 233 | ||
234 | static const struct intel_device_info intel_ivybridge_d_info = { | 234 | static const struct intel_device_info intel_ivybridge_d_info = { |
235 | .is_ivybridge = 1, .gen = 7, | 235 | .is_ivybridge = 1, .gen = 7, |
236 | .need_gfx_hws = 1, .has_hotplug = 1, | 236 | .need_gfx_hws = 1, .has_hotplug = 1, |
237 | .has_bsd_ring = 1, | 237 | .has_bsd_ring = 1, |
238 | .has_blt_ring = 1, | 238 | .has_blt_ring = 1, |
239 | .has_llc = 1, | 239 | .has_llc = 1, |
240 | }; | 240 | }; |
241 | 241 | ||
242 | static const struct intel_device_info intel_ivybridge_m_info = { | 242 | static const struct intel_device_info intel_ivybridge_m_info = { |
243 | .is_ivybridge = 1, .gen = 7, .is_mobile = 1, | 243 | .is_ivybridge = 1, .gen = 7, .is_mobile = 1, |
244 | .need_gfx_hws = 1, .has_hotplug = 1, | 244 | .need_gfx_hws = 1, .has_hotplug = 1, |
245 | .has_fbc = 0, /* FBC is not enabled on Ivybridge mobile yet */ | 245 | .has_fbc = 0, /* FBC is not enabled on Ivybridge mobile yet */ |
246 | .has_bsd_ring = 1, | 246 | .has_bsd_ring = 1, |
247 | .has_blt_ring = 1, | 247 | .has_blt_ring = 1, |
248 | .has_llc = 1, | 248 | .has_llc = 1, |
249 | }; | 249 | }; |
250 | 250 | ||
251 | static const struct pci_device_id pciidlist[] = { /* aka */ | 251 | static const struct pci_device_id pciidlist[] = { /* aka */ |
252 | INTEL_VGA_DEVICE(0x3577, &intel_i830_info), /* I830_M */ | 252 | INTEL_VGA_DEVICE(0x3577, &intel_i830_info), /* I830_M */ |
253 | INTEL_VGA_DEVICE(0x2562, &intel_845g_info), /* 845_G */ | 253 | INTEL_VGA_DEVICE(0x2562, &intel_845g_info), /* 845_G */ |
254 | INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), /* I855_GM */ | 254 | INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), /* I855_GM */ |
255 | INTEL_VGA_DEVICE(0x358e, &intel_i85x_info), | 255 | INTEL_VGA_DEVICE(0x358e, &intel_i85x_info), |
256 | INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), /* I865_G */ | 256 | INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), /* I865_G */ |
257 | INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), /* I915_G */ | 257 | INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), /* I915_G */ |
258 | INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), /* E7221_G */ | 258 | INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), /* E7221_G */ |
259 | INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), /* I915_GM */ | 259 | INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), /* I915_GM */ |
260 | INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), /* I945_G */ | 260 | INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), /* I945_G */ |
261 | INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), /* I945_GM */ | 261 | INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), /* I945_GM */ |
262 | INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), /* I945_GME */ | 262 | INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), /* I945_GME */ |
263 | INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), /* I946_GZ */ | 263 | INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), /* I946_GZ */ |
264 | INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), /* G35_G */ | 264 | INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), /* G35_G */ |
265 | INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), /* I965_Q */ | 265 | INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), /* I965_Q */ |
266 | INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), /* I965_G */ | 266 | INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), /* I965_G */ |
267 | INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), /* Q35_G */ | 267 | INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), /* Q35_G */ |
268 | INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), /* G33_G */ | 268 | INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), /* G33_G */ |
269 | INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), /* Q33_G */ | 269 | INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), /* Q33_G */ |
270 | INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), /* I965_GM */ | 270 | INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), /* I965_GM */ |
271 | INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), /* I965_GME */ | 271 | INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), /* I965_GME */ |
272 | INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), /* GM45_G */ | 272 | INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), /* GM45_G */ |
273 | INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), /* IGD_E_G */ | 273 | INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), /* IGD_E_G */ |
274 | INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), /* Q45_G */ | 274 | INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), /* Q45_G */ |
275 | INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */ | 275 | INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */ |
276 | INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */ | 276 | INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */ |
277 | INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */ | 277 | INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */ |
278 | INTEL_VGA_DEVICE(0x2e92, &intel_g45_info), /* B43_G.1 */ | 278 | INTEL_VGA_DEVICE(0x2e92, &intel_g45_info), /* B43_G.1 */ |
279 | INTEL_VGA_DEVICE(0xa001, &intel_pineview_info), | 279 | INTEL_VGA_DEVICE(0xa001, &intel_pineview_info), |
280 | INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), | 280 | INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), |
281 | INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), | 281 | INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), |
282 | INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info), | 282 | INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info), |
283 | INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info), | 283 | INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info), |
284 | INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info), | 284 | INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info), |
285 | INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info), | 285 | INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info), |
286 | INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info), | 286 | INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info), |
287 | INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info), | 287 | INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info), |
288 | INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info), | 288 | INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info), |
289 | INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info), | 289 | INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info), |
290 | INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */ | 290 | INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */ |
291 | INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */ | 291 | INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */ |
292 | INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */ | 292 | INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */ |
293 | INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */ | 293 | INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */ |
294 | INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */ | 294 | INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */ |
295 | {0, 0, 0} | 295 | {0, 0, 0} |
296 | }; | 296 | }; |
297 | 297 | ||
298 | #if defined(CONFIG_DRM_I915_KMS) | 298 | #if defined(CONFIG_DRM_I915_KMS) |
299 | MODULE_DEVICE_TABLE(pci, pciidlist); | 299 | MODULE_DEVICE_TABLE(pci, pciidlist); |
300 | #endif | 300 | #endif |
301 | 301 | ||
302 | #define INTEL_PCH_DEVICE_ID_MASK 0xff00 | 302 | #define INTEL_PCH_DEVICE_ID_MASK 0xff00 |
303 | #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 | 303 | #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 |
304 | #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 | 304 | #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 |
305 | #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 | 305 | #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 |
306 | 306 | ||
307 | void intel_detect_pch(struct drm_device *dev) | 307 | void intel_detect_pch(struct drm_device *dev) |
308 | { | 308 | { |
309 | struct drm_i915_private *dev_priv = dev->dev_private; | 309 | struct drm_i915_private *dev_priv = dev->dev_private; |
310 | struct pci_dev *pch; | 310 | struct pci_dev *pch; |
311 | 311 | ||
312 | /* | 312 | /* |
313 | * The reason to probe ISA bridge instead of Dev31:Fun0 is to | 313 | * The reason to probe ISA bridge instead of Dev31:Fun0 is to |
314 | * make graphics device passthrough work easy for VMM, that only | 314 | * make graphics device passthrough work easy for VMM, that only |
315 | * need to expose ISA bridge to let driver know the real hardware | 315 | * need to expose ISA bridge to let driver know the real hardware |
316 | * underneath. This is a requirement from virtualization team. | 316 | * underneath. This is a requirement from virtualization team. |
317 | */ | 317 | */ |
318 | pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); | 318 | pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); |
319 | if (pch) { | 319 | if (pch) { |
320 | if (pch->vendor == PCI_VENDOR_ID_INTEL) { | 320 | if (pch->vendor == PCI_VENDOR_ID_INTEL) { |
321 | int id; | 321 | int id; |
322 | id = pch->device & INTEL_PCH_DEVICE_ID_MASK; | 322 | id = pch->device & INTEL_PCH_DEVICE_ID_MASK; |
323 | 323 | ||
324 | if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { | 324 | if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { |
325 | dev_priv->pch_type = PCH_IBX; | 325 | dev_priv->pch_type = PCH_IBX; |
326 | DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); | 326 | DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); |
327 | } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { | 327 | } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { |
328 | dev_priv->pch_type = PCH_CPT; | 328 | dev_priv->pch_type = PCH_CPT; |
329 | DRM_DEBUG_KMS("Found CougarPoint PCH\n"); | 329 | DRM_DEBUG_KMS("Found CougarPoint PCH\n"); |
330 | } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { | 330 | } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { |
331 | /* PantherPoint is CPT compatible */ | 331 | /* PantherPoint is CPT compatible */ |
332 | dev_priv->pch_type = PCH_CPT; | 332 | dev_priv->pch_type = PCH_CPT; |
333 | DRM_DEBUG_KMS("Found PatherPoint PCH\n"); | 333 | DRM_DEBUG_KMS("Found PatherPoint PCH\n"); |
334 | } | 334 | } |
335 | } | 335 | } |
336 | pci_dev_put(pch); | 336 | pci_dev_put(pch); |
337 | } | 337 | } |
338 | } | 338 | } |
339 | 339 | ||
340 | void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) | 340 | void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) |
341 | { | 341 | { |
342 | int count; | 342 | int count; |
343 | 343 | ||
344 | count = 0; | 344 | count = 0; |
345 | while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) | 345 | while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) |
346 | udelay(10); | 346 | udelay(10); |
347 | 347 | ||
348 | I915_WRITE_NOTRACE(FORCEWAKE, 1); | 348 | I915_WRITE_NOTRACE(FORCEWAKE, 1); |
349 | POSTING_READ(FORCEWAKE); | 349 | POSTING_READ(FORCEWAKE); |
350 | 350 | ||
351 | count = 0; | 351 | count = 0; |
352 | while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0) | 352 | while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0) |
353 | udelay(10); | 353 | udelay(10); |
354 | } | 354 | } |
355 | 355 | ||
356 | void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) | 356 | void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) |
357 | { | 357 | { |
358 | int count; | 358 | int count; |
359 | 359 | ||
360 | count = 0; | 360 | count = 0; |
361 | while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1)) | 361 | while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1)) |
362 | udelay(10); | 362 | udelay(10); |
363 | 363 | ||
364 | I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 1); | 364 | I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 1); |
365 | POSTING_READ(FORCEWAKE_MT); | 365 | POSTING_READ(FORCEWAKE_MT); |
366 | 366 | ||
367 | count = 0; | 367 | count = 0; |
368 | while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0) | 368 | while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0) |
369 | udelay(10); | 369 | udelay(10); |
370 | } | 370 | } |
371 | 371 | ||
372 | /* | 372 | /* |
373 | * Generally this is called implicitly by the register read function. However, | 373 | * Generally this is called implicitly by the register read function. However, |
374 | * if some sequence requires the GT to not power down then this function should | 374 | * if some sequence requires the GT to not power down then this function should |
375 | * be called at the beginning of the sequence followed by a call to | 375 | * be called at the beginning of the sequence followed by a call to |
376 | * gen6_gt_force_wake_put() at the end of the sequence. | 376 | * gen6_gt_force_wake_put() at the end of the sequence. |
377 | */ | 377 | */ |
378 | void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) | 378 | void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) |
379 | { | 379 | { |
380 | unsigned long irqflags; | 380 | unsigned long irqflags; |
381 | 381 | ||
382 | spin_lock_irqsave(&dev_priv->gt_lock, irqflags); | 382 | spin_lock_irqsave(&dev_priv->gt_lock, irqflags); |
383 | if (dev_priv->forcewake_count++ == 0) | 383 | if (dev_priv->forcewake_count++ == 0) |
384 | dev_priv->display.force_wake_get(dev_priv); | 384 | dev_priv->display.force_wake_get(dev_priv); |
385 | spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); | 385 | spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); |
386 | } | 386 | } |
387 | 387 | ||
388 | static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) | 388 | static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) |
389 | { | 389 | { |
390 | u32 gtfifodbg; | 390 | u32 gtfifodbg; |
391 | gtfifodbg = I915_READ_NOTRACE(GTFIFODBG); | 391 | gtfifodbg = I915_READ_NOTRACE(GTFIFODBG); |
392 | if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK, | 392 | if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK, |
393 | "MMIO read or write has been dropped %x\n", gtfifodbg)) | 393 | "MMIO read or write has been dropped %x\n", gtfifodbg)) |
394 | I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK); | 394 | I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK); |
395 | } | 395 | } |
396 | 396 | ||
397 | void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) | 397 | void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) |
398 | { | 398 | { |
399 | I915_WRITE_NOTRACE(FORCEWAKE, 0); | 399 | I915_WRITE_NOTRACE(FORCEWAKE, 0); |
400 | /* The below doubles as a POSTING_READ */ | 400 | /* The below doubles as a POSTING_READ */ |
401 | gen6_gt_check_fifodbg(dev_priv); | 401 | gen6_gt_check_fifodbg(dev_priv); |
402 | } | 402 | } |
403 | 403 | ||
404 | void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) | 404 | void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) |
405 | { | 405 | { |
406 | I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0); | 406 | I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0); |
407 | /* The below doubles as a POSTING_READ */ | 407 | /* The below doubles as a POSTING_READ */ |
408 | gen6_gt_check_fifodbg(dev_priv); | 408 | gen6_gt_check_fifodbg(dev_priv); |
409 | } | 409 | } |
410 | 410 | ||
411 | /* | 411 | /* |
412 | * see gen6_gt_force_wake_get() | 412 | * see gen6_gt_force_wake_get() |
413 | */ | 413 | */ |
414 | void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) | 414 | void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) |
415 | { | 415 | { |
416 | unsigned long irqflags; | 416 | unsigned long irqflags; |
417 | 417 | ||
418 | spin_lock_irqsave(&dev_priv->gt_lock, irqflags); | 418 | spin_lock_irqsave(&dev_priv->gt_lock, irqflags); |
419 | if (--dev_priv->forcewake_count == 0) | 419 | if (--dev_priv->forcewake_count == 0) |
420 | dev_priv->display.force_wake_put(dev_priv); | 420 | dev_priv->display.force_wake_put(dev_priv); |
421 | spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); | 421 | spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); |
422 | } | 422 | } |
423 | 423 | ||
424 | int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) | 424 | int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) |
425 | { | 425 | { |
426 | int ret = 0; | 426 | int ret = 0; |
427 | 427 | ||
428 | if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { | 428 | if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { |
429 | int loop = 500; | 429 | int loop = 500; |
430 | u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); | 430 | u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); |
431 | while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { | 431 | while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { |
432 | udelay(10); | 432 | udelay(10); |
433 | fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); | 433 | fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); |
434 | } | 434 | } |
435 | if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES)) | 435 | if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES)) |
436 | ++ret; | 436 | ++ret; |
437 | dev_priv->gt_fifo_count = fifo; | 437 | dev_priv->gt_fifo_count = fifo; |
438 | } | 438 | } |
439 | dev_priv->gt_fifo_count--; | 439 | dev_priv->gt_fifo_count--; |
440 | 440 | ||
441 | return ret; | 441 | return ret; |
442 | } | 442 | } |
443 | 443 | ||
444 | static int i915_drm_freeze(struct drm_device *dev) | 444 | static int i915_drm_freeze(struct drm_device *dev) |
445 | { | 445 | { |
446 | struct drm_i915_private *dev_priv = dev->dev_private; | 446 | struct drm_i915_private *dev_priv = dev->dev_private; |
447 | 447 | ||
448 | drm_kms_helper_poll_disable(dev); | 448 | drm_kms_helper_poll_disable(dev); |
449 | 449 | ||
450 | pci_save_state(dev->pdev); | 450 | pci_save_state(dev->pdev); |
451 | 451 | ||
452 | /* If KMS is active, we do the leavevt stuff here */ | 452 | /* If KMS is active, we do the leavevt stuff here */ |
453 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 453 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
454 | int error = i915_gem_idle(dev); | 454 | int error = i915_gem_idle(dev); |
455 | if (error) { | 455 | if (error) { |
456 | dev_err(&dev->pdev->dev, | 456 | dev_err(&dev->pdev->dev, |
457 | "GEM idle failed, resume might fail\n"); | 457 | "GEM idle failed, resume might fail\n"); |
458 | return error; | 458 | return error; |
459 | } | 459 | } |
460 | drm_irq_uninstall(dev); | 460 | drm_irq_uninstall(dev); |
461 | } | 461 | } |
462 | 462 | ||
463 | i915_save_state(dev); | 463 | i915_save_state(dev); |
464 | 464 | ||
465 | intel_opregion_fini(dev); | 465 | intel_opregion_fini(dev); |
466 | 466 | ||
467 | /* Modeset on resume, not lid events */ | 467 | /* Modeset on resume, not lid events */ |
468 | dev_priv->modeset_on_lid = 0; | 468 | dev_priv->modeset_on_lid = 0; |
469 | 469 | ||
470 | console_lock(); | ||
471 | intel_fbdev_set_suspend(dev, 1); | ||
472 | console_unlock(); | ||
473 | |||
470 | return 0; | 474 | return 0; |
471 | } | 475 | } |
472 | 476 | ||
473 | int i915_suspend(struct drm_device *dev, pm_message_t state) | 477 | int i915_suspend(struct drm_device *dev, pm_message_t state) |
474 | { | 478 | { |
475 | int error; | 479 | int error; |
476 | 480 | ||
477 | if (!dev || !dev->dev_private) { | 481 | if (!dev || !dev->dev_private) { |
478 | DRM_ERROR("dev: %p\n", dev); | 482 | DRM_ERROR("dev: %p\n", dev); |
479 | DRM_ERROR("DRM not initialized, aborting suspend.\n"); | 483 | DRM_ERROR("DRM not initialized, aborting suspend.\n"); |
480 | return -ENODEV; | 484 | return -ENODEV; |
481 | } | 485 | } |
482 | 486 | ||
483 | if (state.event == PM_EVENT_PRETHAW) | 487 | if (state.event == PM_EVENT_PRETHAW) |
484 | return 0; | 488 | return 0; |
485 | 489 | ||
486 | 490 | ||
487 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) | 491 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
488 | return 0; | 492 | return 0; |
489 | 493 | ||
490 | error = i915_drm_freeze(dev); | 494 | error = i915_drm_freeze(dev); |
491 | if (error) | 495 | if (error) |
492 | return error; | 496 | return error; |
493 | 497 | ||
494 | if (state.event == PM_EVENT_SUSPEND) { | 498 | if (state.event == PM_EVENT_SUSPEND) { |
495 | /* Shut down the device */ | 499 | /* Shut down the device */ |
496 | pci_disable_device(dev->pdev); | 500 | pci_disable_device(dev->pdev); |
497 | pci_set_power_state(dev->pdev, PCI_D3hot); | 501 | pci_set_power_state(dev->pdev, PCI_D3hot); |
498 | } | 502 | } |
499 | 503 | ||
500 | return 0; | 504 | return 0; |
501 | } | 505 | } |
502 | 506 | ||
503 | static int i915_drm_thaw(struct drm_device *dev) | 507 | static int i915_drm_thaw(struct drm_device *dev) |
504 | { | 508 | { |
505 | struct drm_i915_private *dev_priv = dev->dev_private; | 509 | struct drm_i915_private *dev_priv = dev->dev_private; |
506 | int error = 0; | 510 | int error = 0; |
507 | 511 | ||
508 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 512 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
509 | mutex_lock(&dev->struct_mutex); | 513 | mutex_lock(&dev->struct_mutex); |
510 | i915_gem_restore_gtt_mappings(dev); | 514 | i915_gem_restore_gtt_mappings(dev); |
511 | mutex_unlock(&dev->struct_mutex); | 515 | mutex_unlock(&dev->struct_mutex); |
512 | } | 516 | } |
513 | 517 | ||
514 | i915_restore_state(dev); | 518 | i915_restore_state(dev); |
515 | intel_opregion_setup(dev); | 519 | intel_opregion_setup(dev); |
516 | 520 | ||
517 | /* KMS EnterVT equivalent */ | 521 | /* KMS EnterVT equivalent */ |
518 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 522 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
519 | mutex_lock(&dev->struct_mutex); | 523 | mutex_lock(&dev->struct_mutex); |
520 | dev_priv->mm.suspended = 0; | 524 | dev_priv->mm.suspended = 0; |
521 | 525 | ||
522 | error = i915_gem_init_hw(dev); | 526 | error = i915_gem_init_hw(dev); |
523 | mutex_unlock(&dev->struct_mutex); | 527 | mutex_unlock(&dev->struct_mutex); |
524 | 528 | ||
525 | if (HAS_PCH_SPLIT(dev)) | 529 | if (HAS_PCH_SPLIT(dev)) |
526 | ironlake_init_pch_refclk(dev); | 530 | ironlake_init_pch_refclk(dev); |
527 | 531 | ||
528 | drm_mode_config_reset(dev); | 532 | drm_mode_config_reset(dev); |
529 | drm_irq_install(dev); | 533 | drm_irq_install(dev); |
530 | 534 | ||
531 | /* Resume the modeset for every activated CRTC */ | 535 | /* Resume the modeset for every activated CRTC */ |
532 | drm_helper_resume_force_mode(dev); | 536 | drm_helper_resume_force_mode(dev); |
533 | 537 | ||
534 | if (IS_IRONLAKE_M(dev)) | 538 | if (IS_IRONLAKE_M(dev)) |
535 | ironlake_enable_rc6(dev); | 539 | ironlake_enable_rc6(dev); |
536 | } | 540 | } |
537 | 541 | ||
538 | intel_opregion_init(dev); | 542 | intel_opregion_init(dev); |
539 | 543 | ||
540 | dev_priv->modeset_on_lid = 0; | 544 | dev_priv->modeset_on_lid = 0; |
541 | 545 | ||
546 | console_lock(); | ||
547 | intel_fbdev_set_suspend(dev, 0); | ||
548 | console_unlock(); | ||
542 | return error; | 549 | return error; |
543 | } | 550 | } |
544 | 551 | ||
545 | int i915_resume(struct drm_device *dev) | 552 | int i915_resume(struct drm_device *dev) |
546 | { | 553 | { |
547 | int ret; | 554 | int ret; |
548 | 555 | ||
549 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) | 556 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
550 | return 0; | 557 | return 0; |
551 | 558 | ||
552 | if (pci_enable_device(dev->pdev)) | 559 | if (pci_enable_device(dev->pdev)) |
553 | return -EIO; | 560 | return -EIO; |
554 | 561 | ||
555 | pci_set_master(dev->pdev); | 562 | pci_set_master(dev->pdev); |
556 | 563 | ||
557 | ret = i915_drm_thaw(dev); | 564 | ret = i915_drm_thaw(dev); |
558 | if (ret) | 565 | if (ret) |
559 | return ret; | 566 | return ret; |
560 | 567 | ||
561 | drm_kms_helper_poll_enable(dev); | 568 | drm_kms_helper_poll_enable(dev); |
562 | return 0; | 569 | return 0; |
563 | } | 570 | } |
564 | 571 | ||
565 | static int i8xx_do_reset(struct drm_device *dev, u8 flags) | 572 | static int i8xx_do_reset(struct drm_device *dev, u8 flags) |
566 | { | 573 | { |
567 | struct drm_i915_private *dev_priv = dev->dev_private; | 574 | struct drm_i915_private *dev_priv = dev->dev_private; |
568 | 575 | ||
569 | if (IS_I85X(dev)) | 576 | if (IS_I85X(dev)) |
570 | return -ENODEV; | 577 | return -ENODEV; |
571 | 578 | ||
572 | I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830); | 579 | I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830); |
573 | POSTING_READ(D_STATE); | 580 | POSTING_READ(D_STATE); |
574 | 581 | ||
575 | if (IS_I830(dev) || IS_845G(dev)) { | 582 | if (IS_I830(dev) || IS_845G(dev)) { |
576 | I915_WRITE(DEBUG_RESET_I830, | 583 | I915_WRITE(DEBUG_RESET_I830, |
577 | DEBUG_RESET_DISPLAY | | 584 | DEBUG_RESET_DISPLAY | |
578 | DEBUG_RESET_RENDER | | 585 | DEBUG_RESET_RENDER | |
579 | DEBUG_RESET_FULL); | 586 | DEBUG_RESET_FULL); |
580 | POSTING_READ(DEBUG_RESET_I830); | 587 | POSTING_READ(DEBUG_RESET_I830); |
581 | msleep(1); | 588 | msleep(1); |
582 | 589 | ||
583 | I915_WRITE(DEBUG_RESET_I830, 0); | 590 | I915_WRITE(DEBUG_RESET_I830, 0); |
584 | POSTING_READ(DEBUG_RESET_I830); | 591 | POSTING_READ(DEBUG_RESET_I830); |
585 | } | 592 | } |
586 | 593 | ||
587 | msleep(1); | 594 | msleep(1); |
588 | 595 | ||
589 | I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830); | 596 | I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830); |
590 | POSTING_READ(D_STATE); | 597 | POSTING_READ(D_STATE); |
591 | 598 | ||
592 | return 0; | 599 | return 0; |
593 | } | 600 | } |
594 | 601 | ||
595 | static int i965_reset_complete(struct drm_device *dev) | 602 | static int i965_reset_complete(struct drm_device *dev) |
596 | { | 603 | { |
597 | u8 gdrst; | 604 | u8 gdrst; |
598 | pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); | 605 | pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); |
599 | return gdrst & 0x1; | 606 | return gdrst & 0x1; |
600 | } | 607 | } |
601 | 608 | ||
602 | static int i965_do_reset(struct drm_device *dev, u8 flags) | 609 | static int i965_do_reset(struct drm_device *dev, u8 flags) |
603 | { | 610 | { |
604 | u8 gdrst; | 611 | u8 gdrst; |
605 | 612 | ||
606 | /* | 613 | /* |
607 | * Set the domains we want to reset (GRDOM/bits 2 and 3) as | 614 | * Set the domains we want to reset (GRDOM/bits 2 and 3) as |
608 | * well as the reset bit (GR/bit 0). Setting the GR bit | 615 | * well as the reset bit (GR/bit 0). Setting the GR bit |
609 | * triggers the reset; when done, the hardware will clear it. | 616 | * triggers the reset; when done, the hardware will clear it. |
610 | */ | 617 | */ |
611 | pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); | 618 | pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); |
612 | pci_write_config_byte(dev->pdev, I965_GDRST, gdrst | flags | 0x1); | 619 | pci_write_config_byte(dev->pdev, I965_GDRST, gdrst | flags | 0x1); |
613 | 620 | ||
614 | return wait_for(i965_reset_complete(dev), 500); | 621 | return wait_for(i965_reset_complete(dev), 500); |
615 | } | 622 | } |
616 | 623 | ||
617 | static int ironlake_do_reset(struct drm_device *dev, u8 flags) | 624 | static int ironlake_do_reset(struct drm_device *dev, u8 flags) |
618 | { | 625 | { |
619 | struct drm_i915_private *dev_priv = dev->dev_private; | 626 | struct drm_i915_private *dev_priv = dev->dev_private; |
620 | u32 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR); | 627 | u32 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR); |
621 | I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, gdrst | flags | 0x1); | 628 | I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, gdrst | flags | 0x1); |
622 | return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); | 629 | return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); |
623 | } | 630 | } |
624 | 631 | ||
625 | static int gen6_do_reset(struct drm_device *dev, u8 flags) | 632 | static int gen6_do_reset(struct drm_device *dev, u8 flags) |
626 | { | 633 | { |
627 | struct drm_i915_private *dev_priv = dev->dev_private; | 634 | struct drm_i915_private *dev_priv = dev->dev_private; |
628 | int ret; | 635 | int ret; |
629 | unsigned long irqflags; | 636 | unsigned long irqflags; |
630 | 637 | ||
631 | /* Hold gt_lock across reset to prevent any register access | 638 | /* Hold gt_lock across reset to prevent any register access |
632 | * with forcewake not set correctly | 639 | * with forcewake not set correctly |
633 | */ | 640 | */ |
634 | spin_lock_irqsave(&dev_priv->gt_lock, irqflags); | 641 | spin_lock_irqsave(&dev_priv->gt_lock, irqflags); |
635 | 642 | ||
636 | /* Reset the chip */ | 643 | /* Reset the chip */ |
637 | 644 | ||
638 | /* GEN6_GDRST is not in the gt power well, no need to check | 645 | /* GEN6_GDRST is not in the gt power well, no need to check |
639 | * for fifo space for the write or forcewake the chip for | 646 | * for fifo space for the write or forcewake the chip for |
640 | * the read | 647 | * the read |
641 | */ | 648 | */ |
642 | I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL); | 649 | I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL); |
643 | 650 | ||
644 | /* Spin waiting for the device to ack the reset request */ | 651 | /* Spin waiting for the device to ack the reset request */ |
645 | ret = wait_for((I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); | 652 | ret = wait_for((I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); |
646 | 653 | ||
647 | /* If reset with a user forcewake, try to restore, otherwise turn it off */ | 654 | /* If reset with a user forcewake, try to restore, otherwise turn it off */ |
648 | if (dev_priv->forcewake_count) | 655 | if (dev_priv->forcewake_count) |
649 | dev_priv->display.force_wake_get(dev_priv); | 656 | dev_priv->display.force_wake_get(dev_priv); |
650 | else | 657 | else |
651 | dev_priv->display.force_wake_put(dev_priv); | 658 | dev_priv->display.force_wake_put(dev_priv); |
652 | 659 | ||
653 | /* Restore fifo count */ | 660 | /* Restore fifo count */ |
654 | dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); | 661 | dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); |
655 | 662 | ||
656 | spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); | 663 | spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); |
657 | return ret; | 664 | return ret; |
658 | } | 665 | } |
659 | 666 | ||
660 | /** | 667 | /** |
661 | * i915_reset - reset chip after a hang | 668 | * i915_reset - reset chip after a hang |
662 | * @dev: drm device to reset | 669 | * @dev: drm device to reset |
663 | * @flags: reset domains | 670 | * @flags: reset domains |
664 | * | 671 | * |
665 | * Reset the chip. Useful if a hang is detected. Returns zero on successful | 672 | * Reset the chip. Useful if a hang is detected. Returns zero on successful |
666 | * reset or otherwise an error code. | 673 | * reset or otherwise an error code. |
667 | * | 674 | * |
668 | * Procedure is fairly simple: | 675 | * Procedure is fairly simple: |
669 | * - reset the chip using the reset reg | 676 | * - reset the chip using the reset reg |
670 | * - re-init context state | 677 | * - re-init context state |
671 | * - re-init hardware status page | 678 | * - re-init hardware status page |
672 | * - re-init ring buffer | 679 | * - re-init ring buffer |
673 | * - re-init interrupt state | 680 | * - re-init interrupt state |
674 | * - re-init display | 681 | * - re-init display |
675 | */ | 682 | */ |
676 | int i915_reset(struct drm_device *dev, u8 flags) | 683 | int i915_reset(struct drm_device *dev, u8 flags) |
677 | { | 684 | { |
678 | drm_i915_private_t *dev_priv = dev->dev_private; | 685 | drm_i915_private_t *dev_priv = dev->dev_private; |
679 | /* | 686 | /* |
680 | * We really should only reset the display subsystem if we actually | 687 | * We really should only reset the display subsystem if we actually |
681 | * need to | 688 | * need to |
682 | */ | 689 | */ |
683 | bool need_display = true; | 690 | bool need_display = true; |
684 | int ret; | 691 | int ret; |
685 | 692 | ||
686 | if (!i915_try_reset) | 693 | if (!i915_try_reset) |
687 | return 0; | 694 | return 0; |
688 | 695 | ||
689 | if (!mutex_trylock(&dev->struct_mutex)) | 696 | if (!mutex_trylock(&dev->struct_mutex)) |
690 | return -EBUSY; | 697 | return -EBUSY; |
691 | 698 | ||
692 | i915_gem_reset(dev); | 699 | i915_gem_reset(dev); |
693 | 700 | ||
694 | ret = -ENODEV; | 701 | ret = -ENODEV; |
695 | if (get_seconds() - dev_priv->last_gpu_reset < 5) { | 702 | if (get_seconds() - dev_priv->last_gpu_reset < 5) { |
696 | DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); | 703 | DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); |
697 | } else switch (INTEL_INFO(dev)->gen) { | 704 | } else switch (INTEL_INFO(dev)->gen) { |
698 | case 7: | 705 | case 7: |
699 | case 6: | 706 | case 6: |
700 | ret = gen6_do_reset(dev, flags); | 707 | ret = gen6_do_reset(dev, flags); |
701 | break; | 708 | break; |
702 | case 5: | 709 | case 5: |
703 | ret = ironlake_do_reset(dev, flags); | 710 | ret = ironlake_do_reset(dev, flags); |
704 | break; | 711 | break; |
705 | case 4: | 712 | case 4: |
706 | ret = i965_do_reset(dev, flags); | 713 | ret = i965_do_reset(dev, flags); |
707 | break; | 714 | break; |
708 | case 2: | 715 | case 2: |
709 | ret = i8xx_do_reset(dev, flags); | 716 | ret = i8xx_do_reset(dev, flags); |
710 | break; | 717 | break; |
711 | } | 718 | } |
712 | dev_priv->last_gpu_reset = get_seconds(); | 719 | dev_priv->last_gpu_reset = get_seconds(); |
713 | if (ret) { | 720 | if (ret) { |
714 | DRM_ERROR("Failed to reset chip.\n"); | 721 | DRM_ERROR("Failed to reset chip.\n"); |
715 | mutex_unlock(&dev->struct_mutex); | 722 | mutex_unlock(&dev->struct_mutex); |
716 | return ret; | 723 | return ret; |
717 | } | 724 | } |
718 | 725 | ||
719 | /* Ok, now get things going again... */ | 726 | /* Ok, now get things going again... */ |
720 | 727 | ||
721 | /* | 728 | /* |
722 | * Everything depends on having the GTT running, so we need to start | 729 | * Everything depends on having the GTT running, so we need to start |
723 | * there. Fortunately we don't need to do this unless we reset the | 730 | * there. Fortunately we don't need to do this unless we reset the |
724 | * chip at a PCI level. | 731 | * chip at a PCI level. |
725 | * | 732 | * |
726 | * Next we need to restore the context, but we don't use those | 733 | * Next we need to restore the context, but we don't use those |
727 | * yet either... | 734 | * yet either... |
728 | * | 735 | * |
729 | * Ring buffer needs to be re-initialized in the KMS case, or if X | 736 | * Ring buffer needs to be re-initialized in the KMS case, or if X |
730 | * was running at the time of the reset (i.e. we weren't VT | 737 | * was running at the time of the reset (i.e. we weren't VT |
731 | * switched away). | 738 | * switched away). |
732 | */ | 739 | */ |
733 | if (drm_core_check_feature(dev, DRIVER_MODESET) || | 740 | if (drm_core_check_feature(dev, DRIVER_MODESET) || |
734 | !dev_priv->mm.suspended) { | 741 | !dev_priv->mm.suspended) { |
735 | dev_priv->mm.suspended = 0; | 742 | dev_priv->mm.suspended = 0; |
736 | 743 | ||
737 | i915_gem_init_swizzling(dev); | 744 | i915_gem_init_swizzling(dev); |
738 | 745 | ||
739 | dev_priv->ring[RCS].init(&dev_priv->ring[RCS]); | 746 | dev_priv->ring[RCS].init(&dev_priv->ring[RCS]); |
740 | if (HAS_BSD(dev)) | 747 | if (HAS_BSD(dev)) |
741 | dev_priv->ring[VCS].init(&dev_priv->ring[VCS]); | 748 | dev_priv->ring[VCS].init(&dev_priv->ring[VCS]); |
742 | if (HAS_BLT(dev)) | 749 | if (HAS_BLT(dev)) |
743 | dev_priv->ring[BCS].init(&dev_priv->ring[BCS]); | 750 | dev_priv->ring[BCS].init(&dev_priv->ring[BCS]); |
744 | 751 | ||
745 | i915_gem_init_ppgtt(dev); | 752 | i915_gem_init_ppgtt(dev); |
746 | 753 | ||
747 | mutex_unlock(&dev->struct_mutex); | 754 | mutex_unlock(&dev->struct_mutex); |
748 | drm_irq_uninstall(dev); | 755 | drm_irq_uninstall(dev); |
749 | drm_mode_config_reset(dev); | 756 | drm_mode_config_reset(dev); |
750 | drm_irq_install(dev); | 757 | drm_irq_install(dev); |
751 | mutex_lock(&dev->struct_mutex); | 758 | mutex_lock(&dev->struct_mutex); |
752 | } | 759 | } |
753 | 760 | ||
754 | mutex_unlock(&dev->struct_mutex); | 761 | mutex_unlock(&dev->struct_mutex); |
755 | 762 | ||
756 | /* | 763 | /* |
757 | * Perform a full modeset as on later generations, e.g. Ironlake, we may | 764 | * Perform a full modeset as on later generations, e.g. Ironlake, we may |
758 | * need to retrain the display link and cannot just restore the register | 765 | * need to retrain the display link and cannot just restore the register |
759 | * values. | 766 | * values. |
760 | */ | 767 | */ |
761 | if (need_display) { | 768 | if (need_display) { |
762 | mutex_lock(&dev->mode_config.mutex); | 769 | mutex_lock(&dev->mode_config.mutex); |
763 | drm_helper_resume_force_mode(dev); | 770 | drm_helper_resume_force_mode(dev); |
764 | mutex_unlock(&dev->mode_config.mutex); | 771 | mutex_unlock(&dev->mode_config.mutex); |
765 | } | 772 | } |
766 | 773 | ||
767 | return 0; | 774 | return 0; |
768 | } | 775 | } |
769 | 776 | ||
770 | 777 | ||
771 | static int __devinit | 778 | static int __devinit |
772 | i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | 779 | i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
773 | { | 780 | { |
774 | /* Only bind to function 0 of the device. Early generations | 781 | /* Only bind to function 0 of the device. Early generations |
775 | * used function 1 as a placeholder for multi-head. This causes | 782 | * used function 1 as a placeholder for multi-head. This causes |
776 | * us confusion instead, especially on the systems where both | 783 | * us confusion instead, especially on the systems where both |
777 | * functions have the same PCI-ID! | 784 | * functions have the same PCI-ID! |
778 | */ | 785 | */ |
779 | if (PCI_FUNC(pdev->devfn)) | 786 | if (PCI_FUNC(pdev->devfn)) |
780 | return -ENODEV; | 787 | return -ENODEV; |
781 | 788 | ||
782 | return drm_get_pci_dev(pdev, ent, &driver); | 789 | return drm_get_pci_dev(pdev, ent, &driver); |
783 | } | 790 | } |
784 | 791 | ||
785 | static void | 792 | static void |
786 | i915_pci_remove(struct pci_dev *pdev) | 793 | i915_pci_remove(struct pci_dev *pdev) |
787 | { | 794 | { |
788 | struct drm_device *dev = pci_get_drvdata(pdev); | 795 | struct drm_device *dev = pci_get_drvdata(pdev); |
789 | 796 | ||
790 | drm_put_dev(dev); | 797 | drm_put_dev(dev); |
791 | } | 798 | } |
792 | 799 | ||
793 | static int i915_pm_suspend(struct device *dev) | 800 | static int i915_pm_suspend(struct device *dev) |
794 | { | 801 | { |
795 | struct pci_dev *pdev = to_pci_dev(dev); | 802 | struct pci_dev *pdev = to_pci_dev(dev); |
796 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | 803 | struct drm_device *drm_dev = pci_get_drvdata(pdev); |
797 | int error; | 804 | int error; |
798 | 805 | ||
799 | if (!drm_dev || !drm_dev->dev_private) { | 806 | if (!drm_dev || !drm_dev->dev_private) { |
800 | dev_err(dev, "DRM not initialized, aborting suspend.\n"); | 807 | dev_err(dev, "DRM not initialized, aborting suspend.\n"); |
801 | return -ENODEV; | 808 | return -ENODEV; |
802 | } | 809 | } |
803 | 810 | ||
804 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) | 811 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
805 | return 0; | 812 | return 0; |
806 | 813 | ||
807 | error = i915_drm_freeze(drm_dev); | 814 | error = i915_drm_freeze(drm_dev); |
808 | if (error) | 815 | if (error) |
809 | return error; | 816 | return error; |
810 | 817 | ||
811 | pci_disable_device(pdev); | 818 | pci_disable_device(pdev); |
812 | pci_set_power_state(pdev, PCI_D3hot); | 819 | pci_set_power_state(pdev, PCI_D3hot); |
813 | 820 | ||
814 | return 0; | 821 | return 0; |
815 | } | 822 | } |
816 | 823 | ||
817 | static int i915_pm_resume(struct device *dev) | 824 | static int i915_pm_resume(struct device *dev) |
818 | { | 825 | { |
819 | struct pci_dev *pdev = to_pci_dev(dev); | 826 | struct pci_dev *pdev = to_pci_dev(dev); |
820 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | 827 | struct drm_device *drm_dev = pci_get_drvdata(pdev); |
821 | 828 | ||
822 | return i915_resume(drm_dev); | 829 | return i915_resume(drm_dev); |
823 | } | 830 | } |
824 | 831 | ||
825 | static int i915_pm_freeze(struct device *dev) | 832 | static int i915_pm_freeze(struct device *dev) |
826 | { | 833 | { |
827 | struct pci_dev *pdev = to_pci_dev(dev); | 834 | struct pci_dev *pdev = to_pci_dev(dev); |
828 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | 835 | struct drm_device *drm_dev = pci_get_drvdata(pdev); |
829 | 836 | ||
830 | if (!drm_dev || !drm_dev->dev_private) { | 837 | if (!drm_dev || !drm_dev->dev_private) { |
831 | dev_err(dev, "DRM not initialized, aborting suspend.\n"); | 838 | dev_err(dev, "DRM not initialized, aborting suspend.\n"); |
832 | return -ENODEV; | 839 | return -ENODEV; |
833 | } | 840 | } |
834 | 841 | ||
835 | return i915_drm_freeze(drm_dev); | 842 | return i915_drm_freeze(drm_dev); |
836 | } | 843 | } |
837 | 844 | ||
838 | static int i915_pm_thaw(struct device *dev) | 845 | static int i915_pm_thaw(struct device *dev) |
839 | { | 846 | { |
840 | struct pci_dev *pdev = to_pci_dev(dev); | 847 | struct pci_dev *pdev = to_pci_dev(dev); |
841 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | 848 | struct drm_device *drm_dev = pci_get_drvdata(pdev); |
842 | 849 | ||
843 | return i915_drm_thaw(drm_dev); | 850 | return i915_drm_thaw(drm_dev); |
844 | } | 851 | } |
845 | 852 | ||
846 | static int i915_pm_poweroff(struct device *dev) | 853 | static int i915_pm_poweroff(struct device *dev) |
847 | { | 854 | { |
848 | struct pci_dev *pdev = to_pci_dev(dev); | 855 | struct pci_dev *pdev = to_pci_dev(dev); |
849 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | 856 | struct drm_device *drm_dev = pci_get_drvdata(pdev); |
850 | 857 | ||
851 | return i915_drm_freeze(drm_dev); | 858 | return i915_drm_freeze(drm_dev); |
852 | } | 859 | } |
853 | 860 | ||
854 | static const struct dev_pm_ops i915_pm_ops = { | 861 | static const struct dev_pm_ops i915_pm_ops = { |
855 | .suspend = i915_pm_suspend, | 862 | .suspend = i915_pm_suspend, |
856 | .resume = i915_pm_resume, | 863 | .resume = i915_pm_resume, |
857 | .freeze = i915_pm_freeze, | 864 | .freeze = i915_pm_freeze, |
858 | .thaw = i915_pm_thaw, | 865 | .thaw = i915_pm_thaw, |
859 | .poweroff = i915_pm_poweroff, | 866 | .poweroff = i915_pm_poweroff, |
860 | .restore = i915_pm_resume, | 867 | .restore = i915_pm_resume, |
861 | }; | 868 | }; |
862 | 869 | ||
863 | static struct vm_operations_struct i915_gem_vm_ops = { | 870 | static struct vm_operations_struct i915_gem_vm_ops = { |
864 | .fault = i915_gem_fault, | 871 | .fault = i915_gem_fault, |
865 | .open = drm_gem_vm_open, | 872 | .open = drm_gem_vm_open, |
866 | .close = drm_gem_vm_close, | 873 | .close = drm_gem_vm_close, |
867 | }; | 874 | }; |
868 | 875 | ||
869 | static const struct file_operations i915_driver_fops = { | 876 | static const struct file_operations i915_driver_fops = { |
870 | .owner = THIS_MODULE, | 877 | .owner = THIS_MODULE, |
871 | .open = drm_open, | 878 | .open = drm_open, |
872 | .release = drm_release, | 879 | .release = drm_release, |
873 | .unlocked_ioctl = drm_ioctl, | 880 | .unlocked_ioctl = drm_ioctl, |
874 | .mmap = drm_gem_mmap, | 881 | .mmap = drm_gem_mmap, |
875 | .poll = drm_poll, | 882 | .poll = drm_poll, |
876 | .fasync = drm_fasync, | 883 | .fasync = drm_fasync, |
877 | .read = drm_read, | 884 | .read = drm_read, |
878 | #ifdef CONFIG_COMPAT | 885 | #ifdef CONFIG_COMPAT |
879 | .compat_ioctl = i915_compat_ioctl, | 886 | .compat_ioctl = i915_compat_ioctl, |
880 | #endif | 887 | #endif |
881 | .llseek = noop_llseek, | 888 | .llseek = noop_llseek, |
882 | }; | 889 | }; |
883 | 890 | ||
884 | static struct drm_driver driver = { | 891 | static struct drm_driver driver = { |
885 | /* Don't use MTRRs here; the Xserver or userspace app should | 892 | /* Don't use MTRRs here; the Xserver or userspace app should |
886 | * deal with them for Intel hardware. | 893 | * deal with them for Intel hardware. |
887 | */ | 894 | */ |
888 | .driver_features = | 895 | .driver_features = |
889 | DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/ | 896 | DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/ |
890 | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM, | 897 | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM, |
891 | .load = i915_driver_load, | 898 | .load = i915_driver_load, |
892 | .unload = i915_driver_unload, | 899 | .unload = i915_driver_unload, |
893 | .open = i915_driver_open, | 900 | .open = i915_driver_open, |
894 | .lastclose = i915_driver_lastclose, | 901 | .lastclose = i915_driver_lastclose, |
895 | .preclose = i915_driver_preclose, | 902 | .preclose = i915_driver_preclose, |
896 | .postclose = i915_driver_postclose, | 903 | .postclose = i915_driver_postclose, |
897 | 904 | ||
898 | /* Used in place of i915_pm_ops for non-DRIVER_MODESET */ | 905 | /* Used in place of i915_pm_ops for non-DRIVER_MODESET */ |
899 | .suspend = i915_suspend, | 906 | .suspend = i915_suspend, |
900 | .resume = i915_resume, | 907 | .resume = i915_resume, |
901 | 908 | ||
902 | .device_is_agp = i915_driver_device_is_agp, | 909 | .device_is_agp = i915_driver_device_is_agp, |
903 | .reclaim_buffers = drm_core_reclaim_buffers, | 910 | .reclaim_buffers = drm_core_reclaim_buffers, |
904 | .master_create = i915_master_create, | 911 | .master_create = i915_master_create, |
905 | .master_destroy = i915_master_destroy, | 912 | .master_destroy = i915_master_destroy, |
906 | #if defined(CONFIG_DEBUG_FS) | 913 | #if defined(CONFIG_DEBUG_FS) |
907 | .debugfs_init = i915_debugfs_init, | 914 | .debugfs_init = i915_debugfs_init, |
908 | .debugfs_cleanup = i915_debugfs_cleanup, | 915 | .debugfs_cleanup = i915_debugfs_cleanup, |
909 | #endif | 916 | #endif |
910 | .gem_init_object = i915_gem_init_object, | 917 | .gem_init_object = i915_gem_init_object, |
911 | .gem_free_object = i915_gem_free_object, | 918 | .gem_free_object = i915_gem_free_object, |
912 | .gem_vm_ops = &i915_gem_vm_ops, | 919 | .gem_vm_ops = &i915_gem_vm_ops, |
913 | .dumb_create = i915_gem_dumb_create, | 920 | .dumb_create = i915_gem_dumb_create, |
914 | .dumb_map_offset = i915_gem_mmap_gtt, | 921 | .dumb_map_offset = i915_gem_mmap_gtt, |
915 | .dumb_destroy = i915_gem_dumb_destroy, | 922 | .dumb_destroy = i915_gem_dumb_destroy, |
916 | .ioctls = i915_ioctls, | 923 | .ioctls = i915_ioctls, |
917 | .fops = &i915_driver_fops, | 924 | .fops = &i915_driver_fops, |
918 | .name = DRIVER_NAME, | 925 | .name = DRIVER_NAME, |
919 | .desc = DRIVER_DESC, | 926 | .desc = DRIVER_DESC, |
920 | .date = DRIVER_DATE, | 927 | .date = DRIVER_DATE, |
921 | .major = DRIVER_MAJOR, | 928 | .major = DRIVER_MAJOR, |
922 | .minor = DRIVER_MINOR, | 929 | .minor = DRIVER_MINOR, |
923 | .patchlevel = DRIVER_PATCHLEVEL, | 930 | .patchlevel = DRIVER_PATCHLEVEL, |
924 | }; | 931 | }; |
925 | 932 | ||
926 | static struct pci_driver i915_pci_driver = { | 933 | static struct pci_driver i915_pci_driver = { |
927 | .name = DRIVER_NAME, | 934 | .name = DRIVER_NAME, |
928 | .id_table = pciidlist, | 935 | .id_table = pciidlist, |
929 | .probe = i915_pci_probe, | 936 | .probe = i915_pci_probe, |
930 | .remove = i915_pci_remove, | 937 | .remove = i915_pci_remove, |
931 | .driver.pm = &i915_pm_ops, | 938 | .driver.pm = &i915_pm_ops, |
932 | }; | 939 | }; |
933 | 940 | ||
934 | static int __init i915_init(void) | 941 | static int __init i915_init(void) |
935 | { | 942 | { |
936 | if (!intel_agp_enabled) { | 943 | if (!intel_agp_enabled) { |
937 | DRM_ERROR("drm/i915 can't work without intel_agp module!\n"); | 944 | DRM_ERROR("drm/i915 can't work without intel_agp module!\n"); |
938 | return -ENODEV; | 945 | return -ENODEV; |
939 | } | 946 | } |
940 | 947 | ||
941 | driver.num_ioctls = i915_max_ioctl; | 948 | driver.num_ioctls = i915_max_ioctl; |
942 | 949 | ||
943 | /* | 950 | /* |
944 | * If CONFIG_DRM_I915_KMS is set, default to KMS unless | 951 | * If CONFIG_DRM_I915_KMS is set, default to KMS unless |
945 | * explicitly disabled with the module pararmeter. | 952 | * explicitly disabled with the module pararmeter. |
946 | * | 953 | * |
947 | * Otherwise, just follow the parameter (defaulting to off). | 954 | * Otherwise, just follow the parameter (defaulting to off). |
948 | * | 955 | * |
949 | * Allow optional vga_text_mode_force boot option to override | 956 | * Allow optional vga_text_mode_force boot option to override |
950 | * the default behavior. | 957 | * the default behavior. |
951 | */ | 958 | */ |
952 | #if defined(CONFIG_DRM_I915_KMS) | 959 | #if defined(CONFIG_DRM_I915_KMS) |
953 | if (i915_modeset != 0) | 960 | if (i915_modeset != 0) |
954 | driver.driver_features |= DRIVER_MODESET; | 961 | driver.driver_features |= DRIVER_MODESET; |
955 | #endif | 962 | #endif |
956 | if (i915_modeset == 1) | 963 | if (i915_modeset == 1) |
957 | driver.driver_features |= DRIVER_MODESET; | 964 | driver.driver_features |= DRIVER_MODESET; |
958 | 965 | ||
959 | #ifdef CONFIG_VGA_CONSOLE | 966 | #ifdef CONFIG_VGA_CONSOLE |
960 | if (vgacon_text_force() && i915_modeset == -1) | 967 | if (vgacon_text_force() && i915_modeset == -1) |
961 | driver.driver_features &= ~DRIVER_MODESET; | 968 | driver.driver_features &= ~DRIVER_MODESET; |
962 | #endif | 969 | #endif |
963 | 970 | ||
964 | if (!(driver.driver_features & DRIVER_MODESET)) | 971 | if (!(driver.driver_features & DRIVER_MODESET)) |
965 | driver.get_vblank_timestamp = NULL; | 972 | driver.get_vblank_timestamp = NULL; |
966 | 973 | ||
967 | return drm_pci_init(&driver, &i915_pci_driver); | 974 | return drm_pci_init(&driver, &i915_pci_driver); |
968 | } | 975 | } |
969 | 976 | ||
970 | static void __exit i915_exit(void) | 977 | static void __exit i915_exit(void) |
971 | { | 978 | { |
972 | drm_pci_exit(&driver, &i915_pci_driver); | 979 | drm_pci_exit(&driver, &i915_pci_driver); |
973 | } | 980 | } |
974 | 981 | ||
975 | module_init(i915_init); | 982 | module_init(i915_init); |
976 | module_exit(i915_exit); | 983 | module_exit(i915_exit); |
977 | 984 | ||
978 | MODULE_AUTHOR(DRIVER_AUTHOR); | 985 | MODULE_AUTHOR(DRIVER_AUTHOR); |
979 | MODULE_DESCRIPTION(DRIVER_DESC); | 986 | MODULE_DESCRIPTION(DRIVER_DESC); |
980 | MODULE_LICENSE("GPL and additional rights"); | 987 | MODULE_LICENSE("GPL and additional rights"); |
981 | 988 | ||
982 | #define __i915_read(x, y) \ | 989 | #define __i915_read(x, y) \ |
983 | u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ | 990 | u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ |
984 | u##x val = 0; \ | 991 | u##x val = 0; \ |
985 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ | 992 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ |
986 | unsigned long irqflags; \ | 993 | unsigned long irqflags; \ |
987 | spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \ | 994 | spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \ |
988 | if (dev_priv->forcewake_count == 0) \ | 995 | if (dev_priv->forcewake_count == 0) \ |
989 | dev_priv->display.force_wake_get(dev_priv); \ | 996 | dev_priv->display.force_wake_get(dev_priv); \ |
990 | val = read##y(dev_priv->regs + reg); \ | 997 | val = read##y(dev_priv->regs + reg); \ |
991 | if (dev_priv->forcewake_count == 0) \ | 998 | if (dev_priv->forcewake_count == 0) \ |
992 | dev_priv->display.force_wake_put(dev_priv); \ | 999 | dev_priv->display.force_wake_put(dev_priv); \ |
993 | spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \ | 1000 | spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \ |
994 | } else { \ | 1001 | } else { \ |
995 | val = read##y(dev_priv->regs + reg); \ | 1002 | val = read##y(dev_priv->regs + reg); \ |
996 | } \ | 1003 | } \ |
997 | trace_i915_reg_rw(false, reg, val, sizeof(val)); \ | 1004 | trace_i915_reg_rw(false, reg, val, sizeof(val)); \ |
998 | return val; \ | 1005 | return val; \ |
999 | } | 1006 | } |
1000 | 1007 | ||
1001 | __i915_read(8, b) | 1008 | __i915_read(8, b) |
1002 | __i915_read(16, w) | 1009 | __i915_read(16, w) |
1003 | __i915_read(32, l) | 1010 | __i915_read(32, l) |
1004 | __i915_read(64, q) | 1011 | __i915_read(64, q) |
1005 | #undef __i915_read | 1012 | #undef __i915_read |
1006 | 1013 | ||
1007 | #define __i915_write(x, y) \ | 1014 | #define __i915_write(x, y) \ |
1008 | void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ | 1015 | void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ |
1009 | u32 __fifo_ret = 0; \ | 1016 | u32 __fifo_ret = 0; \ |
1010 | trace_i915_reg_rw(true, reg, val, sizeof(val)); \ | 1017 | trace_i915_reg_rw(true, reg, val, sizeof(val)); \ |
1011 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ | 1018 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ |
1012 | __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ | 1019 | __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ |
1013 | } \ | 1020 | } \ |
1014 | write##y(val, dev_priv->regs + reg); \ | 1021 | write##y(val, dev_priv->regs + reg); \ |
1015 | if (unlikely(__fifo_ret)) { \ | 1022 | if (unlikely(__fifo_ret)) { \ |
1016 | gen6_gt_check_fifodbg(dev_priv); \ | 1023 | gen6_gt_check_fifodbg(dev_priv); \ |
1017 | } \ | 1024 | } \ |
1018 | } | 1025 | } |
1019 | __i915_write(8, b) | 1026 | __i915_write(8, b) |
1020 | __i915_write(16, w) | 1027 | __i915_write(16, w) |
1021 | __i915_write(32, l) | 1028 | __i915_write(32, l) |
1022 | __i915_write(64, q) | 1029 | __i915_write(64, q) |
1023 | #undef __i915_write | 1030 | #undef __i915_write |
1024 | 1031 |
drivers/gpu/drm/i915/intel_drv.h
1 | /* | 1 | /* |
2 | * Copyright (c) 2006 Dave Airlie <airlied@linux.ie> | 2 | * Copyright (c) 2006 Dave Airlie <airlied@linux.ie> |
3 | * Copyright (c) 2007-2008 Intel Corporation | 3 | * Copyright (c) 2007-2008 Intel Corporation |
4 | * Jesse Barnes <jesse.barnes@intel.com> | 4 | * Jesse Barnes <jesse.barnes@intel.com> |
5 | * | 5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), | 7 | * copy of this software and associated documentation files (the "Software"), |
8 | * to deal in the Software without restriction, including without limitation | 8 | * to deal in the Software without restriction, including without limitation |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
10 | * and/or sell copies of the Software, and to permit persons to whom the | 10 | * and/or sell copies of the Software, and to permit persons to whom the |
11 | * Software is furnished to do so, subject to the following conditions: | 11 | * Software is furnished to do so, subject to the following conditions: |
12 | * | 12 | * |
13 | * The above copyright notice and this permission notice (including the next | 13 | * The above copyright notice and this permission notice (including the next |
14 | * paragraph) shall be included in all copies or substantial portions of the | 14 | * paragraph) shall be included in all copies or substantial portions of the |
15 | * Software. | 15 | * Software. |
16 | * | 16 | * |
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | 17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | 18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | 19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
20 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | 20 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
21 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | 21 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
22 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | 22 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
23 | * IN THE SOFTWARE. | 23 | * IN THE SOFTWARE. |
24 | */ | 24 | */ |
25 | #ifndef __INTEL_DRV_H__ | 25 | #ifndef __INTEL_DRV_H__ |
26 | #define __INTEL_DRV_H__ | 26 | #define __INTEL_DRV_H__ |
27 | 27 | ||
28 | #include <linux/i2c.h> | 28 | #include <linux/i2c.h> |
29 | #include "i915_drm.h" | 29 | #include "i915_drm.h" |
30 | #include "i915_drv.h" | 30 | #include "i915_drv.h" |
31 | #include "drm_crtc.h" | 31 | #include "drm_crtc.h" |
32 | #include "drm_crtc_helper.h" | 32 | #include "drm_crtc_helper.h" |
33 | #include "drm_fb_helper.h" | 33 | #include "drm_fb_helper.h" |
34 | 34 | ||
35 | #define _wait_for(COND, MS, W) ({ \ | 35 | #define _wait_for(COND, MS, W) ({ \ |
36 | unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \ | 36 | unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \ |
37 | int ret__ = 0; \ | 37 | int ret__ = 0; \ |
38 | while (!(COND)) { \ | 38 | while (!(COND)) { \ |
39 | if (time_after(jiffies, timeout__)) { \ | 39 | if (time_after(jiffies, timeout__)) { \ |
40 | ret__ = -ETIMEDOUT; \ | 40 | ret__ = -ETIMEDOUT; \ |
41 | break; \ | 41 | break; \ |
42 | } \ | 42 | } \ |
43 | if (W && drm_can_sleep()) msleep(W); \ | 43 | if (W && drm_can_sleep()) msleep(W); \ |
44 | } \ | 44 | } \ |
45 | ret__; \ | 45 | ret__; \ |
46 | }) | 46 | }) |
47 | 47 | ||
48 | #define wait_for(COND, MS) _wait_for(COND, MS, 1) | 48 | #define wait_for(COND, MS) _wait_for(COND, MS, 1) |
49 | #define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0) | 49 | #define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0) |
50 | 50 | ||
51 | #define KHz(x) (1000*x) | 51 | #define KHz(x) (1000*x) |
52 | #define MHz(x) KHz(1000*x) | 52 | #define MHz(x) KHz(1000*x) |
53 | 53 | ||
54 | /* | 54 | /* |
55 | * Display related stuff | 55 | * Display related stuff |
56 | */ | 56 | */ |
57 | 57 | ||
58 | /* store information about an Ixxx DVO */ | 58 | /* store information about an Ixxx DVO */ |
59 | /* The i830->i865 use multiple DVOs with multiple i2cs */ | 59 | /* The i830->i865 use multiple DVOs with multiple i2cs */ |
60 | /* the i915, i945 have a single sDVO i2c bus - which is different */ | 60 | /* the i915, i945 have a single sDVO i2c bus - which is different */ |
61 | #define MAX_OUTPUTS 6 | 61 | #define MAX_OUTPUTS 6 |
62 | /* maximum connectors per crtcs in the mode set */ | 62 | /* maximum connectors per crtcs in the mode set */ |
63 | #define INTELFB_CONN_LIMIT 4 | 63 | #define INTELFB_CONN_LIMIT 4 |
64 | 64 | ||
65 | #define INTEL_I2C_BUS_DVO 1 | 65 | #define INTEL_I2C_BUS_DVO 1 |
66 | #define INTEL_I2C_BUS_SDVO 2 | 66 | #define INTEL_I2C_BUS_SDVO 2 |
67 | 67 | ||
68 | /* these are outputs from the chip - integrated only | 68 | /* these are outputs from the chip - integrated only |
69 | external chips are via DVO or SDVO output */ | 69 | external chips are via DVO or SDVO output */ |
70 | #define INTEL_OUTPUT_UNUSED 0 | 70 | #define INTEL_OUTPUT_UNUSED 0 |
71 | #define INTEL_OUTPUT_ANALOG 1 | 71 | #define INTEL_OUTPUT_ANALOG 1 |
72 | #define INTEL_OUTPUT_DVO 2 | 72 | #define INTEL_OUTPUT_DVO 2 |
73 | #define INTEL_OUTPUT_SDVO 3 | 73 | #define INTEL_OUTPUT_SDVO 3 |
74 | #define INTEL_OUTPUT_LVDS 4 | 74 | #define INTEL_OUTPUT_LVDS 4 |
75 | #define INTEL_OUTPUT_TVOUT 5 | 75 | #define INTEL_OUTPUT_TVOUT 5 |
76 | #define INTEL_OUTPUT_HDMI 6 | 76 | #define INTEL_OUTPUT_HDMI 6 |
77 | #define INTEL_OUTPUT_DISPLAYPORT 7 | 77 | #define INTEL_OUTPUT_DISPLAYPORT 7 |
78 | #define INTEL_OUTPUT_EDP 8 | 78 | #define INTEL_OUTPUT_EDP 8 |
79 | 79 | ||
80 | /* Intel Pipe Clone Bit */ | 80 | /* Intel Pipe Clone Bit */ |
81 | #define INTEL_HDMIB_CLONE_BIT 1 | 81 | #define INTEL_HDMIB_CLONE_BIT 1 |
82 | #define INTEL_HDMIC_CLONE_BIT 2 | 82 | #define INTEL_HDMIC_CLONE_BIT 2 |
83 | #define INTEL_HDMID_CLONE_BIT 3 | 83 | #define INTEL_HDMID_CLONE_BIT 3 |
84 | #define INTEL_HDMIE_CLONE_BIT 4 | 84 | #define INTEL_HDMIE_CLONE_BIT 4 |
85 | #define INTEL_HDMIF_CLONE_BIT 5 | 85 | #define INTEL_HDMIF_CLONE_BIT 5 |
86 | #define INTEL_SDVO_NON_TV_CLONE_BIT 6 | 86 | #define INTEL_SDVO_NON_TV_CLONE_BIT 6 |
87 | #define INTEL_SDVO_TV_CLONE_BIT 7 | 87 | #define INTEL_SDVO_TV_CLONE_BIT 7 |
88 | #define INTEL_SDVO_LVDS_CLONE_BIT 8 | 88 | #define INTEL_SDVO_LVDS_CLONE_BIT 8 |
89 | #define INTEL_ANALOG_CLONE_BIT 9 | 89 | #define INTEL_ANALOG_CLONE_BIT 9 |
90 | #define INTEL_TV_CLONE_BIT 10 | 90 | #define INTEL_TV_CLONE_BIT 10 |
91 | #define INTEL_DP_B_CLONE_BIT 11 | 91 | #define INTEL_DP_B_CLONE_BIT 11 |
92 | #define INTEL_DP_C_CLONE_BIT 12 | 92 | #define INTEL_DP_C_CLONE_BIT 12 |
93 | #define INTEL_DP_D_CLONE_BIT 13 | 93 | #define INTEL_DP_D_CLONE_BIT 13 |
94 | #define INTEL_LVDS_CLONE_BIT 14 | 94 | #define INTEL_LVDS_CLONE_BIT 14 |
95 | #define INTEL_DVO_TMDS_CLONE_BIT 15 | 95 | #define INTEL_DVO_TMDS_CLONE_BIT 15 |
96 | #define INTEL_DVO_LVDS_CLONE_BIT 16 | 96 | #define INTEL_DVO_LVDS_CLONE_BIT 16 |
97 | #define INTEL_EDP_CLONE_BIT 17 | 97 | #define INTEL_EDP_CLONE_BIT 17 |
98 | 98 | ||
99 | #define INTEL_DVO_CHIP_NONE 0 | 99 | #define INTEL_DVO_CHIP_NONE 0 |
100 | #define INTEL_DVO_CHIP_LVDS 1 | 100 | #define INTEL_DVO_CHIP_LVDS 1 |
101 | #define INTEL_DVO_CHIP_TMDS 2 | 101 | #define INTEL_DVO_CHIP_TMDS 2 |
102 | #define INTEL_DVO_CHIP_TVOUT 4 | 102 | #define INTEL_DVO_CHIP_TVOUT 4 |
103 | 103 | ||
104 | /* drm_display_mode->private_flags */ | 104 | /* drm_display_mode->private_flags */ |
105 | #define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0) | 105 | #define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0) |
106 | #define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT) | 106 | #define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT) |
107 | #define INTEL_MODE_DP_FORCE_6BPC (0x10) | 107 | #define INTEL_MODE_DP_FORCE_6BPC (0x10) |
108 | 108 | ||
109 | static inline void | 109 | static inline void |
110 | intel_mode_set_pixel_multiplier(struct drm_display_mode *mode, | 110 | intel_mode_set_pixel_multiplier(struct drm_display_mode *mode, |
111 | int multiplier) | 111 | int multiplier) |
112 | { | 112 | { |
113 | mode->clock *= multiplier; | 113 | mode->clock *= multiplier; |
114 | mode->private_flags |= multiplier; | 114 | mode->private_flags |= multiplier; |
115 | } | 115 | } |
116 | 116 | ||
117 | static inline int | 117 | static inline int |
118 | intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode) | 118 | intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode) |
119 | { | 119 | { |
120 | return (mode->private_flags & INTEL_MODE_PIXEL_MULTIPLIER_MASK) >> INTEL_MODE_PIXEL_MULTIPLIER_SHIFT; | 120 | return (mode->private_flags & INTEL_MODE_PIXEL_MULTIPLIER_MASK) >> INTEL_MODE_PIXEL_MULTIPLIER_SHIFT; |
121 | } | 121 | } |
122 | 122 | ||
123 | struct intel_framebuffer { | 123 | struct intel_framebuffer { |
124 | struct drm_framebuffer base; | 124 | struct drm_framebuffer base; |
125 | struct drm_i915_gem_object *obj; | 125 | struct drm_i915_gem_object *obj; |
126 | }; | 126 | }; |
127 | 127 | ||
128 | struct intel_fbdev { | 128 | struct intel_fbdev { |
129 | struct drm_fb_helper helper; | 129 | struct drm_fb_helper helper; |
130 | struct intel_framebuffer ifb; | 130 | struct intel_framebuffer ifb; |
131 | struct list_head fbdev_list; | 131 | struct list_head fbdev_list; |
132 | struct drm_display_mode *our_mode; | 132 | struct drm_display_mode *our_mode; |
133 | }; | 133 | }; |
134 | 134 | ||
135 | struct intel_encoder { | 135 | struct intel_encoder { |
136 | struct drm_encoder base; | 136 | struct drm_encoder base; |
137 | int type; | 137 | int type; |
138 | bool needs_tv_clock; | 138 | bool needs_tv_clock; |
139 | void (*hot_plug)(struct intel_encoder *); | 139 | void (*hot_plug)(struct intel_encoder *); |
140 | int crtc_mask; | 140 | int crtc_mask; |
141 | int clone_mask; | 141 | int clone_mask; |
142 | }; | 142 | }; |
143 | 143 | ||
144 | struct intel_connector { | 144 | struct intel_connector { |
145 | struct drm_connector base; | 145 | struct drm_connector base; |
146 | struct intel_encoder *encoder; | 146 | struct intel_encoder *encoder; |
147 | }; | 147 | }; |
148 | 148 | ||
149 | struct intel_crtc { | 149 | struct intel_crtc { |
150 | struct drm_crtc base; | 150 | struct drm_crtc base; |
151 | enum pipe pipe; | 151 | enum pipe pipe; |
152 | enum plane plane; | 152 | enum plane plane; |
153 | u8 lut_r[256], lut_g[256], lut_b[256]; | 153 | u8 lut_r[256], lut_g[256], lut_b[256]; |
154 | int dpms_mode; | 154 | int dpms_mode; |
155 | bool active; /* is the crtc on? independent of the dpms mode */ | 155 | bool active; /* is the crtc on? independent of the dpms mode */ |
156 | bool busy; /* is scanout buffer being updated frequently? */ | 156 | bool busy; /* is scanout buffer being updated frequently? */ |
157 | struct timer_list idle_timer; | 157 | struct timer_list idle_timer; |
158 | bool lowfreq_avail; | 158 | bool lowfreq_avail; |
159 | struct intel_overlay *overlay; | 159 | struct intel_overlay *overlay; |
160 | struct intel_unpin_work *unpin_work; | 160 | struct intel_unpin_work *unpin_work; |
161 | int fdi_lanes; | 161 | int fdi_lanes; |
162 | 162 | ||
163 | struct drm_i915_gem_object *cursor_bo; | 163 | struct drm_i915_gem_object *cursor_bo; |
164 | uint32_t cursor_addr; | 164 | uint32_t cursor_addr; |
165 | int16_t cursor_x, cursor_y; | 165 | int16_t cursor_x, cursor_y; |
166 | int16_t cursor_width, cursor_height; | 166 | int16_t cursor_width, cursor_height; |
167 | bool cursor_visible; | 167 | bool cursor_visible; |
168 | unsigned int bpp; | 168 | unsigned int bpp; |
169 | 169 | ||
170 | bool no_pll; /* tertiary pipe for IVB */ | 170 | bool no_pll; /* tertiary pipe for IVB */ |
171 | bool use_pll_a; | 171 | bool use_pll_a; |
172 | }; | 172 | }; |
173 | 173 | ||
174 | struct intel_plane { | 174 | struct intel_plane { |
175 | struct drm_plane base; | 175 | struct drm_plane base; |
176 | enum pipe pipe; | 176 | enum pipe pipe; |
177 | struct drm_i915_gem_object *obj; | 177 | struct drm_i915_gem_object *obj; |
178 | bool primary_disabled; | 178 | bool primary_disabled; |
179 | int max_downscale; | 179 | int max_downscale; |
180 | u32 lut_r[1024], lut_g[1024], lut_b[1024]; | 180 | u32 lut_r[1024], lut_g[1024], lut_b[1024]; |
181 | void (*update_plane)(struct drm_plane *plane, | 181 | void (*update_plane)(struct drm_plane *plane, |
182 | struct drm_framebuffer *fb, | 182 | struct drm_framebuffer *fb, |
183 | struct drm_i915_gem_object *obj, | 183 | struct drm_i915_gem_object *obj, |
184 | int crtc_x, int crtc_y, | 184 | int crtc_x, int crtc_y, |
185 | unsigned int crtc_w, unsigned int crtc_h, | 185 | unsigned int crtc_w, unsigned int crtc_h, |
186 | uint32_t x, uint32_t y, | 186 | uint32_t x, uint32_t y, |
187 | uint32_t src_w, uint32_t src_h); | 187 | uint32_t src_w, uint32_t src_h); |
188 | void (*disable_plane)(struct drm_plane *plane); | 188 | void (*disable_plane)(struct drm_plane *plane); |
189 | int (*update_colorkey)(struct drm_plane *plane, | 189 | int (*update_colorkey)(struct drm_plane *plane, |
190 | struct drm_intel_sprite_colorkey *key); | 190 | struct drm_intel_sprite_colorkey *key); |
191 | void (*get_colorkey)(struct drm_plane *plane, | 191 | void (*get_colorkey)(struct drm_plane *plane, |
192 | struct drm_intel_sprite_colorkey *key); | 192 | struct drm_intel_sprite_colorkey *key); |
193 | }; | 193 | }; |
194 | 194 | ||
195 | #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) | 195 | #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) |
196 | #define to_intel_connector(x) container_of(x, struct intel_connector, base) | 196 | #define to_intel_connector(x) container_of(x, struct intel_connector, base) |
197 | #define to_intel_encoder(x) container_of(x, struct intel_encoder, base) | 197 | #define to_intel_encoder(x) container_of(x, struct intel_encoder, base) |
198 | #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) | 198 | #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) |
199 | #define to_intel_plane(x) container_of(x, struct intel_plane, base) | 199 | #define to_intel_plane(x) container_of(x, struct intel_plane, base) |
200 | 200 | ||
201 | #define DIP_HEADER_SIZE 5 | 201 | #define DIP_HEADER_SIZE 5 |
202 | 202 | ||
203 | #define DIP_TYPE_AVI 0x82 | 203 | #define DIP_TYPE_AVI 0x82 |
204 | #define DIP_VERSION_AVI 0x2 | 204 | #define DIP_VERSION_AVI 0x2 |
205 | #define DIP_LEN_AVI 13 | 205 | #define DIP_LEN_AVI 13 |
206 | 206 | ||
207 | #define DIP_TYPE_SPD 0x83 | 207 | #define DIP_TYPE_SPD 0x83 |
208 | #define DIP_VERSION_SPD 0x1 | 208 | #define DIP_VERSION_SPD 0x1 |
209 | #define DIP_LEN_SPD 25 | 209 | #define DIP_LEN_SPD 25 |
210 | #define DIP_SPD_UNKNOWN 0 | 210 | #define DIP_SPD_UNKNOWN 0 |
211 | #define DIP_SPD_DSTB 0x1 | 211 | #define DIP_SPD_DSTB 0x1 |
212 | #define DIP_SPD_DVDP 0x2 | 212 | #define DIP_SPD_DVDP 0x2 |
213 | #define DIP_SPD_DVHS 0x3 | 213 | #define DIP_SPD_DVHS 0x3 |
214 | #define DIP_SPD_HDDVR 0x4 | 214 | #define DIP_SPD_HDDVR 0x4 |
215 | #define DIP_SPD_DVC 0x5 | 215 | #define DIP_SPD_DVC 0x5 |
216 | #define DIP_SPD_DSC 0x6 | 216 | #define DIP_SPD_DSC 0x6 |
217 | #define DIP_SPD_VCD 0x7 | 217 | #define DIP_SPD_VCD 0x7 |
218 | #define DIP_SPD_GAME 0x8 | 218 | #define DIP_SPD_GAME 0x8 |
219 | #define DIP_SPD_PC 0x9 | 219 | #define DIP_SPD_PC 0x9 |
220 | #define DIP_SPD_BD 0xa | 220 | #define DIP_SPD_BD 0xa |
221 | #define DIP_SPD_SCD 0xb | 221 | #define DIP_SPD_SCD 0xb |
222 | 222 | ||
223 | struct dip_infoframe { | 223 | struct dip_infoframe { |
224 | uint8_t type; /* HB0 */ | 224 | uint8_t type; /* HB0 */ |
225 | uint8_t ver; /* HB1 */ | 225 | uint8_t ver; /* HB1 */ |
226 | uint8_t len; /* HB2 - body len, not including checksum */ | 226 | uint8_t len; /* HB2 - body len, not including checksum */ |
227 | uint8_t ecc; /* Header ECC */ | 227 | uint8_t ecc; /* Header ECC */ |
228 | uint8_t checksum; /* PB0 */ | 228 | uint8_t checksum; /* PB0 */ |
229 | union { | 229 | union { |
230 | struct { | 230 | struct { |
231 | /* PB1 - Y 6:5, A 4:4, B 3:2, S 1:0 */ | 231 | /* PB1 - Y 6:5, A 4:4, B 3:2, S 1:0 */ |
232 | uint8_t Y_A_B_S; | 232 | uint8_t Y_A_B_S; |
233 | /* PB2 - C 7:6, M 5:4, R 3:0 */ | 233 | /* PB2 - C 7:6, M 5:4, R 3:0 */ |
234 | uint8_t C_M_R; | 234 | uint8_t C_M_R; |
235 | /* PB3 - ITC 7:7, EC 6:4, Q 3:2, SC 1:0 */ | 235 | /* PB3 - ITC 7:7, EC 6:4, Q 3:2, SC 1:0 */ |
236 | uint8_t ITC_EC_Q_SC; | 236 | uint8_t ITC_EC_Q_SC; |
237 | /* PB4 - VIC 6:0 */ | 237 | /* PB4 - VIC 6:0 */ |
238 | uint8_t VIC; | 238 | uint8_t VIC; |
239 | /* PB5 - PR 3:0 */ | 239 | /* PB5 - PR 3:0 */ |
240 | uint8_t PR; | 240 | uint8_t PR; |
241 | /* PB6 to PB13 */ | 241 | /* PB6 to PB13 */ |
242 | uint16_t top_bar_end; | 242 | uint16_t top_bar_end; |
243 | uint16_t bottom_bar_start; | 243 | uint16_t bottom_bar_start; |
244 | uint16_t left_bar_end; | 244 | uint16_t left_bar_end; |
245 | uint16_t right_bar_start; | 245 | uint16_t right_bar_start; |
246 | } avi; | 246 | } avi; |
247 | struct { | 247 | struct { |
248 | uint8_t vn[8]; | 248 | uint8_t vn[8]; |
249 | uint8_t pd[16]; | 249 | uint8_t pd[16]; |
250 | uint8_t sdi; | 250 | uint8_t sdi; |
251 | } spd; | 251 | } spd; |
252 | uint8_t payload[27]; | 252 | uint8_t payload[27]; |
253 | } __attribute__ ((packed)) body; | 253 | } __attribute__ ((packed)) body; |
254 | } __attribute__((packed)); | 254 | } __attribute__((packed)); |
255 | 255 | ||
256 | static inline struct drm_crtc * | 256 | static inline struct drm_crtc * |
257 | intel_get_crtc_for_pipe(struct drm_device *dev, int pipe) | 257 | intel_get_crtc_for_pipe(struct drm_device *dev, int pipe) |
258 | { | 258 | { |
259 | struct drm_i915_private *dev_priv = dev->dev_private; | 259 | struct drm_i915_private *dev_priv = dev->dev_private; |
260 | return dev_priv->pipe_to_crtc_mapping[pipe]; | 260 | return dev_priv->pipe_to_crtc_mapping[pipe]; |
261 | } | 261 | } |
262 | 262 | ||
263 | static inline struct drm_crtc * | 263 | static inline struct drm_crtc * |
264 | intel_get_crtc_for_plane(struct drm_device *dev, int plane) | 264 | intel_get_crtc_for_plane(struct drm_device *dev, int plane) |
265 | { | 265 | { |
266 | struct drm_i915_private *dev_priv = dev->dev_private; | 266 | struct drm_i915_private *dev_priv = dev->dev_private; |
267 | return dev_priv->plane_to_crtc_mapping[plane]; | 267 | return dev_priv->plane_to_crtc_mapping[plane]; |
268 | } | 268 | } |
269 | 269 | ||
270 | struct intel_unpin_work { | 270 | struct intel_unpin_work { |
271 | struct work_struct work; | 271 | struct work_struct work; |
272 | struct drm_device *dev; | 272 | struct drm_device *dev; |
273 | struct drm_i915_gem_object *old_fb_obj; | 273 | struct drm_i915_gem_object *old_fb_obj; |
274 | struct drm_i915_gem_object *pending_flip_obj; | 274 | struct drm_i915_gem_object *pending_flip_obj; |
275 | struct drm_pending_vblank_event *event; | 275 | struct drm_pending_vblank_event *event; |
276 | int pending; | 276 | int pending; |
277 | bool enable_stall_check; | 277 | bool enable_stall_check; |
278 | }; | 278 | }; |
279 | 279 | ||
280 | struct intel_fbc_work { | 280 | struct intel_fbc_work { |
281 | struct delayed_work work; | 281 | struct delayed_work work; |
282 | struct drm_crtc *crtc; | 282 | struct drm_crtc *crtc; |
283 | struct drm_framebuffer *fb; | 283 | struct drm_framebuffer *fb; |
284 | int interval; | 284 | int interval; |
285 | }; | 285 | }; |
286 | 286 | ||
287 | int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); | 287 | int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); |
288 | extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus); | 288 | extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus); |
289 | 289 | ||
290 | extern void intel_attach_force_audio_property(struct drm_connector *connector); | 290 | extern void intel_attach_force_audio_property(struct drm_connector *connector); |
291 | extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector); | 291 | extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector); |
292 | 292 | ||
293 | extern void intel_crt_init(struct drm_device *dev); | 293 | extern void intel_crt_init(struct drm_device *dev); |
294 | extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg); | 294 | extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg); |
295 | void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); | 295 | void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); |
296 | extern bool intel_sdvo_init(struct drm_device *dev, int output_device); | 296 | extern bool intel_sdvo_init(struct drm_device *dev, int output_device); |
297 | extern void intel_dvo_init(struct drm_device *dev); | 297 | extern void intel_dvo_init(struct drm_device *dev); |
298 | extern void intel_tv_init(struct drm_device *dev); | 298 | extern void intel_tv_init(struct drm_device *dev); |
299 | extern void intel_mark_busy(struct drm_device *dev, | 299 | extern void intel_mark_busy(struct drm_device *dev, |
300 | struct drm_i915_gem_object *obj); | 300 | struct drm_i915_gem_object *obj); |
301 | extern bool intel_lvds_init(struct drm_device *dev); | 301 | extern bool intel_lvds_init(struct drm_device *dev); |
302 | extern void intel_dp_init(struct drm_device *dev, int dp_reg); | 302 | extern void intel_dp_init(struct drm_device *dev, int dp_reg); |
303 | void | 303 | void |
304 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | 304 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, |
305 | struct drm_display_mode *adjusted_mode); | 305 | struct drm_display_mode *adjusted_mode); |
306 | extern bool intel_dpd_is_edp(struct drm_device *dev); | 306 | extern bool intel_dpd_is_edp(struct drm_device *dev); |
307 | extern void intel_edp_link_config(struct intel_encoder *, int *, int *); | 307 | extern void intel_edp_link_config(struct intel_encoder *, int *, int *); |
308 | extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder); | 308 | extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder); |
309 | extern int intel_plane_init(struct drm_device *dev, enum pipe pipe); | 309 | extern int intel_plane_init(struct drm_device *dev, enum pipe pipe); |
310 | 310 | ||
311 | /* intel_panel.c */ | 311 | /* intel_panel.c */ |
312 | extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, | 312 | extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, |
313 | struct drm_display_mode *adjusted_mode); | 313 | struct drm_display_mode *adjusted_mode); |
314 | extern void intel_pch_panel_fitting(struct drm_device *dev, | 314 | extern void intel_pch_panel_fitting(struct drm_device *dev, |
315 | int fitting_mode, | 315 | int fitting_mode, |
316 | struct drm_display_mode *mode, | 316 | struct drm_display_mode *mode, |
317 | struct drm_display_mode *adjusted_mode); | 317 | struct drm_display_mode *adjusted_mode); |
318 | extern u32 intel_panel_get_max_backlight(struct drm_device *dev); | 318 | extern u32 intel_panel_get_max_backlight(struct drm_device *dev); |
319 | extern u32 intel_panel_get_backlight(struct drm_device *dev); | 319 | extern u32 intel_panel_get_backlight(struct drm_device *dev); |
320 | extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); | 320 | extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); |
321 | extern int intel_panel_setup_backlight(struct drm_device *dev); | 321 | extern int intel_panel_setup_backlight(struct drm_device *dev); |
322 | extern void intel_panel_enable_backlight(struct drm_device *dev); | 322 | extern void intel_panel_enable_backlight(struct drm_device *dev); |
323 | extern void intel_panel_disable_backlight(struct drm_device *dev); | 323 | extern void intel_panel_disable_backlight(struct drm_device *dev); |
324 | extern void intel_panel_destroy_backlight(struct drm_device *dev); | 324 | extern void intel_panel_destroy_backlight(struct drm_device *dev); |
325 | extern enum drm_connector_status intel_panel_detect(struct drm_device *dev); | 325 | extern enum drm_connector_status intel_panel_detect(struct drm_device *dev); |
326 | 326 | ||
327 | extern void intel_crtc_load_lut(struct drm_crtc *crtc); | 327 | extern void intel_crtc_load_lut(struct drm_crtc *crtc); |
328 | extern void intel_encoder_prepare(struct drm_encoder *encoder); | 328 | extern void intel_encoder_prepare(struct drm_encoder *encoder); |
329 | extern void intel_encoder_commit(struct drm_encoder *encoder); | 329 | extern void intel_encoder_commit(struct drm_encoder *encoder); |
330 | extern void intel_encoder_destroy(struct drm_encoder *encoder); | 330 | extern void intel_encoder_destroy(struct drm_encoder *encoder); |
331 | 331 | ||
332 | static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector) | 332 | static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector) |
333 | { | 333 | { |
334 | return to_intel_connector(connector)->encoder; | 334 | return to_intel_connector(connector)->encoder; |
335 | } | 335 | } |
336 | 336 | ||
337 | extern void intel_connector_attach_encoder(struct intel_connector *connector, | 337 | extern void intel_connector_attach_encoder(struct intel_connector *connector, |
338 | struct intel_encoder *encoder); | 338 | struct intel_encoder *encoder); |
339 | extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector); | 339 | extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector); |
340 | 340 | ||
341 | extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, | 341 | extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, |
342 | struct drm_crtc *crtc); | 342 | struct drm_crtc *crtc); |
343 | int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, | 343 | int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, |
344 | struct drm_file *file_priv); | 344 | struct drm_file *file_priv); |
345 | extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); | 345 | extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); |
346 | extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe); | 346 | extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe); |
347 | 347 | ||
348 | struct intel_load_detect_pipe { | 348 | struct intel_load_detect_pipe { |
349 | struct drm_framebuffer *release_fb; | 349 | struct drm_framebuffer *release_fb; |
350 | bool load_detect_temp; | 350 | bool load_detect_temp; |
351 | int dpms_mode; | 351 | int dpms_mode; |
352 | }; | 352 | }; |
353 | extern bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, | 353 | extern bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, |
354 | struct drm_connector *connector, | 354 | struct drm_connector *connector, |
355 | struct drm_display_mode *mode, | 355 | struct drm_display_mode *mode, |
356 | struct intel_load_detect_pipe *old); | 356 | struct intel_load_detect_pipe *old); |
357 | extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, | 357 | extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, |
358 | struct drm_connector *connector, | 358 | struct drm_connector *connector, |
359 | struct intel_load_detect_pipe *old); | 359 | struct intel_load_detect_pipe *old); |
360 | 360 | ||
361 | extern void intelfb_restore(void); | 361 | extern void intelfb_restore(void); |
362 | extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, | 362 | extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, |
363 | u16 blue, int regno); | 363 | u16 blue, int regno); |
364 | extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, | 364 | extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, |
365 | u16 *blue, int regno); | 365 | u16 *blue, int regno); |
366 | extern void intel_enable_clock_gating(struct drm_device *dev); | 366 | extern void intel_enable_clock_gating(struct drm_device *dev); |
367 | extern void ironlake_enable_drps(struct drm_device *dev); | 367 | extern void ironlake_enable_drps(struct drm_device *dev); |
368 | extern void ironlake_disable_drps(struct drm_device *dev); | 368 | extern void ironlake_disable_drps(struct drm_device *dev); |
369 | extern void gen6_enable_rps(struct drm_i915_private *dev_priv); | 369 | extern void gen6_enable_rps(struct drm_i915_private *dev_priv); |
370 | extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv); | 370 | extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv); |
371 | extern void gen6_disable_rps(struct drm_device *dev); | 371 | extern void gen6_disable_rps(struct drm_device *dev); |
372 | extern void intel_init_emon(struct drm_device *dev); | 372 | extern void intel_init_emon(struct drm_device *dev); |
373 | 373 | ||
374 | extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, | 374 | extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, |
375 | struct drm_i915_gem_object *obj, | 375 | struct drm_i915_gem_object *obj, |
376 | struct intel_ring_buffer *pipelined); | 376 | struct intel_ring_buffer *pipelined); |
377 | extern void intel_unpin_fb_obj(struct drm_i915_gem_object *obj); | 377 | extern void intel_unpin_fb_obj(struct drm_i915_gem_object *obj); |
378 | 378 | ||
379 | extern int intel_framebuffer_init(struct drm_device *dev, | 379 | extern int intel_framebuffer_init(struct drm_device *dev, |
380 | struct intel_framebuffer *ifb, | 380 | struct intel_framebuffer *ifb, |
381 | struct drm_mode_fb_cmd2 *mode_cmd, | 381 | struct drm_mode_fb_cmd2 *mode_cmd, |
382 | struct drm_i915_gem_object *obj); | 382 | struct drm_i915_gem_object *obj); |
383 | extern int intel_fbdev_init(struct drm_device *dev); | 383 | extern int intel_fbdev_init(struct drm_device *dev); |
384 | extern void intel_fbdev_fini(struct drm_device *dev); | 384 | extern void intel_fbdev_fini(struct drm_device *dev); |
385 | 385 | extern void intel_fbdev_set_suspend(struct drm_device *dev, int state); | |
386 | extern void intel_prepare_page_flip(struct drm_device *dev, int plane); | 386 | extern void intel_prepare_page_flip(struct drm_device *dev, int plane); |
387 | extern void intel_finish_page_flip(struct drm_device *dev, int pipe); | 387 | extern void intel_finish_page_flip(struct drm_device *dev, int pipe); |
388 | extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane); | 388 | extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane); |
389 | 389 | ||
390 | extern void intel_setup_overlay(struct drm_device *dev); | 390 | extern void intel_setup_overlay(struct drm_device *dev); |
391 | extern void intel_cleanup_overlay(struct drm_device *dev); | 391 | extern void intel_cleanup_overlay(struct drm_device *dev); |
392 | extern int intel_overlay_switch_off(struct intel_overlay *overlay); | 392 | extern int intel_overlay_switch_off(struct intel_overlay *overlay); |
393 | extern int intel_overlay_put_image(struct drm_device *dev, void *data, | 393 | extern int intel_overlay_put_image(struct drm_device *dev, void *data, |
394 | struct drm_file *file_priv); | 394 | struct drm_file *file_priv); |
395 | extern int intel_overlay_attrs(struct drm_device *dev, void *data, | 395 | extern int intel_overlay_attrs(struct drm_device *dev, void *data, |
396 | struct drm_file *file_priv); | 396 | struct drm_file *file_priv); |
397 | 397 | ||
398 | extern void intel_fb_output_poll_changed(struct drm_device *dev); | 398 | extern void intel_fb_output_poll_changed(struct drm_device *dev); |
399 | extern void intel_fb_restore_mode(struct drm_device *dev); | 399 | extern void intel_fb_restore_mode(struct drm_device *dev); |
400 | 400 | ||
401 | extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, | 401 | extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, |
402 | bool state); | 402 | bool state); |
403 | #define assert_pipe_enabled(d, p) assert_pipe(d, p, true) | 403 | #define assert_pipe_enabled(d, p) assert_pipe(d, p, true) |
404 | #define assert_pipe_disabled(d, p) assert_pipe(d, p, false) | 404 | #define assert_pipe_disabled(d, p) assert_pipe(d, p, false) |
405 | 405 | ||
406 | extern void intel_init_clock_gating(struct drm_device *dev); | 406 | extern void intel_init_clock_gating(struct drm_device *dev); |
407 | extern void intel_write_eld(struct drm_encoder *encoder, | 407 | extern void intel_write_eld(struct drm_encoder *encoder, |
408 | struct drm_display_mode *mode); | 408 | struct drm_display_mode *mode); |
409 | extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe); | 409 | extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe); |
410 | 410 | ||
411 | /* For use by IVB LP watermark workaround in intel_sprite.c */ | 411 | /* For use by IVB LP watermark workaround in intel_sprite.c */ |
412 | extern void sandybridge_update_wm(struct drm_device *dev); | 412 | extern void sandybridge_update_wm(struct drm_device *dev); |
413 | extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe, | 413 | extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe, |
414 | uint32_t sprite_width, | 414 | uint32_t sprite_width, |
415 | int pixel_size); | 415 | int pixel_size); |
416 | 416 | ||
417 | extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data, | 417 | extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data, |
418 | struct drm_file *file_priv); | 418 | struct drm_file *file_priv); |
419 | extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data, | 419 | extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data, |
420 | struct drm_file *file_priv); | 420 | struct drm_file *file_priv); |
421 | 421 | ||
422 | #endif /* __INTEL_DRV_H__ */ | 422 | #endif /* __INTEL_DRV_H__ */ |
423 | 423 |
drivers/gpu/drm/i915/intel_fb.c
1 | /* | 1 | /* |
2 | * Copyright ยฉ 2007 David Airlie | 2 | * Copyright ยฉ 2007 David Airlie |
3 | * | 3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), | 5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation | 6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the | 8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: | 9 | * Software is furnished to do so, subject to the following conditions: |
10 | * | 10 | * |
11 | * The above copyright notice and this permission notice (including the next | 11 | * The above copyright notice and this permission notice (including the next |
12 | * paragraph) shall be included in all copies or substantial portions of the | 12 | * paragraph) shall be included in all copies or substantial portions of the |
13 | * Software. | 13 | * Software. |
14 | * | 14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
21 | * DEALINGS IN THE SOFTWARE. | 21 | * DEALINGS IN THE SOFTWARE. |
22 | * | 22 | * |
23 | * Authors: | 23 | * Authors: |
24 | * David Airlie | 24 | * David Airlie |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
28 | #include <linux/kernel.h> | 28 | #include <linux/kernel.h> |
29 | #include <linux/errno.h> | 29 | #include <linux/errno.h> |
30 | #include <linux/string.h> | 30 | #include <linux/string.h> |
31 | #include <linux/mm.h> | 31 | #include <linux/mm.h> |
32 | #include <linux/tty.h> | 32 | #include <linux/tty.h> |
33 | #include <linux/sysrq.h> | 33 | #include <linux/sysrq.h> |
34 | #include <linux/delay.h> | 34 | #include <linux/delay.h> |
35 | #include <linux/fb.h> | 35 | #include <linux/fb.h> |
36 | #include <linux/init.h> | 36 | #include <linux/init.h> |
37 | #include <linux/vga_switcheroo.h> | 37 | #include <linux/vga_switcheroo.h> |
38 | 38 | ||
39 | #include "drmP.h" | 39 | #include "drmP.h" |
40 | #include "drm.h" | 40 | #include "drm.h" |
41 | #include "drm_crtc.h" | 41 | #include "drm_crtc.h" |
42 | #include "drm_fb_helper.h" | 42 | #include "drm_fb_helper.h" |
43 | #include "intel_drv.h" | 43 | #include "intel_drv.h" |
44 | #include "i915_drm.h" | 44 | #include "i915_drm.h" |
45 | #include "i915_drv.h" | 45 | #include "i915_drv.h" |
46 | 46 | ||
47 | static struct fb_ops intelfb_ops = { | 47 | static struct fb_ops intelfb_ops = { |
48 | .owner = THIS_MODULE, | 48 | .owner = THIS_MODULE, |
49 | .fb_check_var = drm_fb_helper_check_var, | 49 | .fb_check_var = drm_fb_helper_check_var, |
50 | .fb_set_par = drm_fb_helper_set_par, | 50 | .fb_set_par = drm_fb_helper_set_par, |
51 | .fb_fillrect = cfb_fillrect, | 51 | .fb_fillrect = cfb_fillrect, |
52 | .fb_copyarea = cfb_copyarea, | 52 | .fb_copyarea = cfb_copyarea, |
53 | .fb_imageblit = cfb_imageblit, | 53 | .fb_imageblit = cfb_imageblit, |
54 | .fb_pan_display = drm_fb_helper_pan_display, | 54 | .fb_pan_display = drm_fb_helper_pan_display, |
55 | .fb_blank = drm_fb_helper_blank, | 55 | .fb_blank = drm_fb_helper_blank, |
56 | .fb_setcmap = drm_fb_helper_setcmap, | 56 | .fb_setcmap = drm_fb_helper_setcmap, |
57 | .fb_debug_enter = drm_fb_helper_debug_enter, | 57 | .fb_debug_enter = drm_fb_helper_debug_enter, |
58 | .fb_debug_leave = drm_fb_helper_debug_leave, | 58 | .fb_debug_leave = drm_fb_helper_debug_leave, |
59 | }; | 59 | }; |
60 | 60 | ||
61 | static int intelfb_create(struct intel_fbdev *ifbdev, | 61 | static int intelfb_create(struct intel_fbdev *ifbdev, |
62 | struct drm_fb_helper_surface_size *sizes) | 62 | struct drm_fb_helper_surface_size *sizes) |
63 | { | 63 | { |
64 | struct drm_device *dev = ifbdev->helper.dev; | 64 | struct drm_device *dev = ifbdev->helper.dev; |
65 | struct drm_i915_private *dev_priv = dev->dev_private; | 65 | struct drm_i915_private *dev_priv = dev->dev_private; |
66 | struct fb_info *info; | 66 | struct fb_info *info; |
67 | struct drm_framebuffer *fb; | 67 | struct drm_framebuffer *fb; |
68 | struct drm_mode_fb_cmd2 mode_cmd; | 68 | struct drm_mode_fb_cmd2 mode_cmd; |
69 | struct drm_i915_gem_object *obj; | 69 | struct drm_i915_gem_object *obj; |
70 | struct device *device = &dev->pdev->dev; | 70 | struct device *device = &dev->pdev->dev; |
71 | int size, ret; | 71 | int size, ret; |
72 | 72 | ||
73 | /* we don't do packed 24bpp */ | 73 | /* we don't do packed 24bpp */ |
74 | if (sizes->surface_bpp == 24) | 74 | if (sizes->surface_bpp == 24) |
75 | sizes->surface_bpp = 32; | 75 | sizes->surface_bpp = 32; |
76 | 76 | ||
77 | mode_cmd.width = sizes->surface_width; | 77 | mode_cmd.width = sizes->surface_width; |
78 | mode_cmd.height = sizes->surface_height; | 78 | mode_cmd.height = sizes->surface_height; |
79 | 79 | ||
80 | mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((sizes->surface_bpp + 7) / | 80 | mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((sizes->surface_bpp + 7) / |
81 | 8), 64); | 81 | 8), 64); |
82 | mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, | 82 | mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, |
83 | sizes->surface_depth); | 83 | sizes->surface_depth); |
84 | 84 | ||
85 | size = mode_cmd.pitches[0] * mode_cmd.height; | 85 | size = mode_cmd.pitches[0] * mode_cmd.height; |
86 | size = ALIGN(size, PAGE_SIZE); | 86 | size = ALIGN(size, PAGE_SIZE); |
87 | obj = i915_gem_alloc_object(dev, size); | 87 | obj = i915_gem_alloc_object(dev, size); |
88 | if (!obj) { | 88 | if (!obj) { |
89 | DRM_ERROR("failed to allocate framebuffer\n"); | 89 | DRM_ERROR("failed to allocate framebuffer\n"); |
90 | ret = -ENOMEM; | 90 | ret = -ENOMEM; |
91 | goto out; | 91 | goto out; |
92 | } | 92 | } |
93 | 93 | ||
94 | mutex_lock(&dev->struct_mutex); | 94 | mutex_lock(&dev->struct_mutex); |
95 | 95 | ||
96 | /* Flush everything out, we'll be doing GTT only from now on */ | 96 | /* Flush everything out, we'll be doing GTT only from now on */ |
97 | ret = intel_pin_and_fence_fb_obj(dev, obj, false); | 97 | ret = intel_pin_and_fence_fb_obj(dev, obj, false); |
98 | if (ret) { | 98 | if (ret) { |
99 | DRM_ERROR("failed to pin fb: %d\n", ret); | 99 | DRM_ERROR("failed to pin fb: %d\n", ret); |
100 | goto out_unref; | 100 | goto out_unref; |
101 | } | 101 | } |
102 | 102 | ||
103 | info = framebuffer_alloc(0, device); | 103 | info = framebuffer_alloc(0, device); |
104 | if (!info) { | 104 | if (!info) { |
105 | ret = -ENOMEM; | 105 | ret = -ENOMEM; |
106 | goto out_unpin; | 106 | goto out_unpin; |
107 | } | 107 | } |
108 | 108 | ||
109 | info->par = ifbdev; | 109 | info->par = ifbdev; |
110 | 110 | ||
111 | ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj); | 111 | ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj); |
112 | if (ret) | 112 | if (ret) |
113 | goto out_unpin; | 113 | goto out_unpin; |
114 | 114 | ||
115 | fb = &ifbdev->ifb.base; | 115 | fb = &ifbdev->ifb.base; |
116 | 116 | ||
117 | ifbdev->helper.fb = fb; | 117 | ifbdev->helper.fb = fb; |
118 | ifbdev->helper.fbdev = info; | 118 | ifbdev->helper.fbdev = info; |
119 | 119 | ||
120 | strcpy(info->fix.id, "inteldrmfb"); | 120 | strcpy(info->fix.id, "inteldrmfb"); |
121 | 121 | ||
122 | info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; | 122 | info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; |
123 | info->fbops = &intelfb_ops; | 123 | info->fbops = &intelfb_ops; |
124 | 124 | ||
125 | ret = fb_alloc_cmap(&info->cmap, 256, 0); | 125 | ret = fb_alloc_cmap(&info->cmap, 256, 0); |
126 | if (ret) { | 126 | if (ret) { |
127 | ret = -ENOMEM; | 127 | ret = -ENOMEM; |
128 | goto out_unpin; | 128 | goto out_unpin; |
129 | } | 129 | } |
130 | /* setup aperture base/size for vesafb takeover */ | 130 | /* setup aperture base/size for vesafb takeover */ |
131 | info->apertures = alloc_apertures(1); | 131 | info->apertures = alloc_apertures(1); |
132 | if (!info->apertures) { | 132 | if (!info->apertures) { |
133 | ret = -ENOMEM; | 133 | ret = -ENOMEM; |
134 | goto out_unpin; | 134 | goto out_unpin; |
135 | } | 135 | } |
136 | info->apertures->ranges[0].base = dev->mode_config.fb_base; | 136 | info->apertures->ranges[0].base = dev->mode_config.fb_base; |
137 | info->apertures->ranges[0].size = | 137 | info->apertures->ranges[0].size = |
138 | dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; | 138 | dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; |
139 | 139 | ||
140 | info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset; | 140 | info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset; |
141 | info->fix.smem_len = size; | 141 | info->fix.smem_len = size; |
142 | 142 | ||
143 | info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size); | 143 | info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size); |
144 | if (!info->screen_base) { | 144 | if (!info->screen_base) { |
145 | ret = -ENOSPC; | 145 | ret = -ENOSPC; |
146 | goto out_unpin; | 146 | goto out_unpin; |
147 | } | 147 | } |
148 | info->screen_size = size; | 148 | info->screen_size = size; |
149 | 149 | ||
150 | // memset(info->screen_base, 0, size); | 150 | // memset(info->screen_base, 0, size); |
151 | 151 | ||
152 | drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); | 152 | drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); |
153 | drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height); | 153 | drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height); |
154 | 154 | ||
155 | /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ | 155 | /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ |
156 | 156 | ||
157 | DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n", | 157 | DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n", |
158 | fb->width, fb->height, | 158 | fb->width, fb->height, |
159 | obj->gtt_offset, obj); | 159 | obj->gtt_offset, obj); |
160 | 160 | ||
161 | 161 | ||
162 | mutex_unlock(&dev->struct_mutex); | 162 | mutex_unlock(&dev->struct_mutex); |
163 | vga_switcheroo_client_fb_set(dev->pdev, info); | 163 | vga_switcheroo_client_fb_set(dev->pdev, info); |
164 | return 0; | 164 | return 0; |
165 | 165 | ||
166 | out_unpin: | 166 | out_unpin: |
167 | i915_gem_object_unpin(obj); | 167 | i915_gem_object_unpin(obj); |
168 | out_unref: | 168 | out_unref: |
169 | drm_gem_object_unreference(&obj->base); | 169 | drm_gem_object_unreference(&obj->base); |
170 | mutex_unlock(&dev->struct_mutex); | 170 | mutex_unlock(&dev->struct_mutex); |
171 | out: | 171 | out: |
172 | return ret; | 172 | return ret; |
173 | } | 173 | } |
174 | 174 | ||
175 | static int intel_fb_find_or_create_single(struct drm_fb_helper *helper, | 175 | static int intel_fb_find_or_create_single(struct drm_fb_helper *helper, |
176 | struct drm_fb_helper_surface_size *sizes) | 176 | struct drm_fb_helper_surface_size *sizes) |
177 | { | 177 | { |
178 | struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper; | 178 | struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper; |
179 | int new_fb = 0; | 179 | int new_fb = 0; |
180 | int ret; | 180 | int ret; |
181 | 181 | ||
182 | if (!helper->fb) { | 182 | if (!helper->fb) { |
183 | ret = intelfb_create(ifbdev, sizes); | 183 | ret = intelfb_create(ifbdev, sizes); |
184 | if (ret) | 184 | if (ret) |
185 | return ret; | 185 | return ret; |
186 | new_fb = 1; | 186 | new_fb = 1; |
187 | } | 187 | } |
188 | return new_fb; | 188 | return new_fb; |
189 | } | 189 | } |
190 | 190 | ||
191 | static struct drm_fb_helper_funcs intel_fb_helper_funcs = { | 191 | static struct drm_fb_helper_funcs intel_fb_helper_funcs = { |
192 | .gamma_set = intel_crtc_fb_gamma_set, | 192 | .gamma_set = intel_crtc_fb_gamma_set, |
193 | .gamma_get = intel_crtc_fb_gamma_get, | 193 | .gamma_get = intel_crtc_fb_gamma_get, |
194 | .fb_probe = intel_fb_find_or_create_single, | 194 | .fb_probe = intel_fb_find_or_create_single, |
195 | }; | 195 | }; |
196 | 196 | ||
197 | static void intel_fbdev_destroy(struct drm_device *dev, | 197 | static void intel_fbdev_destroy(struct drm_device *dev, |
198 | struct intel_fbdev *ifbdev) | 198 | struct intel_fbdev *ifbdev) |
199 | { | 199 | { |
200 | struct fb_info *info; | 200 | struct fb_info *info; |
201 | struct intel_framebuffer *ifb = &ifbdev->ifb; | 201 | struct intel_framebuffer *ifb = &ifbdev->ifb; |
202 | 202 | ||
203 | if (ifbdev->helper.fbdev) { | 203 | if (ifbdev->helper.fbdev) { |
204 | info = ifbdev->helper.fbdev; | 204 | info = ifbdev->helper.fbdev; |
205 | unregister_framebuffer(info); | 205 | unregister_framebuffer(info); |
206 | iounmap(info->screen_base); | 206 | iounmap(info->screen_base); |
207 | if (info->cmap.len) | 207 | if (info->cmap.len) |
208 | fb_dealloc_cmap(&info->cmap); | 208 | fb_dealloc_cmap(&info->cmap); |
209 | framebuffer_release(info); | 209 | framebuffer_release(info); |
210 | } | 210 | } |
211 | 211 | ||
212 | drm_fb_helper_fini(&ifbdev->helper); | 212 | drm_fb_helper_fini(&ifbdev->helper); |
213 | 213 | ||
214 | drm_framebuffer_cleanup(&ifb->base); | 214 | drm_framebuffer_cleanup(&ifb->base); |
215 | if (ifb->obj) { | 215 | if (ifb->obj) { |
216 | drm_gem_object_unreference_unlocked(&ifb->obj->base); | 216 | drm_gem_object_unreference_unlocked(&ifb->obj->base); |
217 | ifb->obj = NULL; | 217 | ifb->obj = NULL; |
218 | } | 218 | } |
219 | } | 219 | } |
220 | 220 | ||
221 | int intel_fbdev_init(struct drm_device *dev) | 221 | int intel_fbdev_init(struct drm_device *dev) |
222 | { | 222 | { |
223 | struct intel_fbdev *ifbdev; | 223 | struct intel_fbdev *ifbdev; |
224 | drm_i915_private_t *dev_priv = dev->dev_private; | 224 | drm_i915_private_t *dev_priv = dev->dev_private; |
225 | int ret; | 225 | int ret; |
226 | 226 | ||
227 | ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL); | 227 | ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL); |
228 | if (!ifbdev) | 228 | if (!ifbdev) |
229 | return -ENOMEM; | 229 | return -ENOMEM; |
230 | 230 | ||
231 | dev_priv->fbdev = ifbdev; | 231 | dev_priv->fbdev = ifbdev; |
232 | ifbdev->helper.funcs = &intel_fb_helper_funcs; | 232 | ifbdev->helper.funcs = &intel_fb_helper_funcs; |
233 | 233 | ||
234 | ret = drm_fb_helper_init(dev, &ifbdev->helper, | 234 | ret = drm_fb_helper_init(dev, &ifbdev->helper, |
235 | dev_priv->num_pipe, | 235 | dev_priv->num_pipe, |
236 | INTELFB_CONN_LIMIT); | 236 | INTELFB_CONN_LIMIT); |
237 | if (ret) { | 237 | if (ret) { |
238 | kfree(ifbdev); | 238 | kfree(ifbdev); |
239 | return ret; | 239 | return ret; |
240 | } | 240 | } |
241 | 241 | ||
242 | drm_fb_helper_single_add_all_connectors(&ifbdev->helper); | 242 | drm_fb_helper_single_add_all_connectors(&ifbdev->helper); |
243 | drm_fb_helper_initial_config(&ifbdev->helper, 32); | 243 | drm_fb_helper_initial_config(&ifbdev->helper, 32); |
244 | return 0; | 244 | return 0; |
245 | } | 245 | } |
246 | 246 | ||
247 | void intel_fbdev_fini(struct drm_device *dev) | 247 | void intel_fbdev_fini(struct drm_device *dev) |
248 | { | 248 | { |
249 | drm_i915_private_t *dev_priv = dev->dev_private; | 249 | drm_i915_private_t *dev_priv = dev->dev_private; |
250 | if (!dev_priv->fbdev) | 250 | if (!dev_priv->fbdev) |
251 | return; | 251 | return; |
252 | 252 | ||
253 | intel_fbdev_destroy(dev, dev_priv->fbdev); | 253 | intel_fbdev_destroy(dev, dev_priv->fbdev); |
254 | kfree(dev_priv->fbdev); | 254 | kfree(dev_priv->fbdev); |
255 | dev_priv->fbdev = NULL; | 255 | dev_priv->fbdev = NULL; |
256 | } | 256 | } |
257 | |||
258 | void intel_fbdev_set_suspend(struct drm_device *dev, int state) | ||
259 | { | ||
260 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
261 | if (!dev_priv->fbdev) | ||
262 | return; | ||
263 | |||
264 | fb_set_suspend(dev_priv->fbdev->helper.fbdev, state); | ||
265 | } | ||
266 | |||
257 | MODULE_LICENSE("GPL and additional rights"); | 267 | MODULE_LICENSE("GPL and additional rights"); |
258 | 268 | ||
259 | void intel_fb_output_poll_changed(struct drm_device *dev) | 269 | void intel_fb_output_poll_changed(struct drm_device *dev) |
260 | { | 270 | { |
261 | drm_i915_private_t *dev_priv = dev->dev_private; | 271 | drm_i915_private_t *dev_priv = dev->dev_private; |
262 | drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper); | 272 | drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper); |
263 | } | 273 | } |
264 | 274 | ||
265 | void intel_fb_restore_mode(struct drm_device *dev) | 275 | void intel_fb_restore_mode(struct drm_device *dev) |
266 | { | 276 | { |
267 | int ret; | 277 | int ret; |
268 | drm_i915_private_t *dev_priv = dev->dev_private; | 278 | drm_i915_private_t *dev_priv = dev->dev_private; |
269 | struct drm_mode_config *config = &dev->mode_config; | 279 | struct drm_mode_config *config = &dev->mode_config; |
270 | struct drm_plane *plane; | 280 | struct drm_plane *plane; |
271 | 281 | ||
272 | ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper); | 282 | ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper); |
273 | if (ret) | 283 | if (ret) |
274 | DRM_DEBUG("failed to restore crtc mode\n"); | 284 | DRM_DEBUG("failed to restore crtc mode\n"); |
275 | 285 | ||
276 | /* Be sure to shut off any planes that may be active */ | 286 | /* Be sure to shut off any planes that may be active */ |
277 | list_for_each_entry(plane, &config->plane_list, head) | 287 | list_for_each_entry(plane, &config->plane_list, head) |
278 | plane->funcs->disable_plane(plane); | 288 | plane->funcs->disable_plane(plane); |
279 | } | 289 | } |
280 | 290 |