Commit 7a1619b97e978bb9c05fa4bbe64171068bd5bf85

Authored by Michel Dänzer
Committed by Dave Airlie
1 parent 471dd2ef37

drm/radeon: Make sure CS mutex is held across GPU reset.

This was only the case if the GPU reset was triggered from the CS ioctl,
otherwise other processes could happily enter the CS ioctl and wreak havoc
during the GPU reset.

This is a little complicated because the GPU reset can be triggered from the
CS ioctl, in which case we're already holding the mutex, or from other call
paths, in which case we need to lock the mutex. AFAICT the mutex API doesn't
allow recursive locking or finding out the mutex owner, so we need to handle
this with helper functions which allow recursive locking from the same
process.

Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
Reviewed-by: Jerome Glisse <jglisse@redhat.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>

Showing 3 changed files with 62 additions and 12 deletions Inline Diff

drivers/gpu/drm/radeon/radeon.h
1 /* 1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc. 2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc. 3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse. 4 * Copyright 2009 Jerome Glisse.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation 8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the 10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions: 11 * Software is furnished to do so, subject to the following conditions:
12 * 12 *
13 * The above copyright notice and this permission notice shall be included in 13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software. 14 * all copies or substantial portions of the Software.
15 * 15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE. 22 * OTHER DEALINGS IN THE SOFTWARE.
23 * 23 *
24 * Authors: Dave Airlie 24 * Authors: Dave Airlie
25 * Alex Deucher 25 * Alex Deucher
26 * Jerome Glisse 26 * Jerome Glisse
27 */ 27 */
28 #ifndef __RADEON_H__ 28 #ifndef __RADEON_H__
29 #define __RADEON_H__ 29 #define __RADEON_H__
30 30
31 /* TODO: Here are things that needs to be done : 31 /* TODO: Here are things that needs to be done :
32 * - surface allocator & initializer : (bit like scratch reg) should 32 * - surface allocator & initializer : (bit like scratch reg) should
33 * initialize HDP_ stuff on RS600, R600, R700 hw, well anythings 33 * initialize HDP_ stuff on RS600, R600, R700 hw, well anythings
34 * related to surface 34 * related to surface
35 * - WB : write back stuff (do it bit like scratch reg things) 35 * - WB : write back stuff (do it bit like scratch reg things)
36 * - Vblank : look at Jesse's rework and what we should do 36 * - Vblank : look at Jesse's rework and what we should do
37 * - r600/r700: gart & cp 37 * - r600/r700: gart & cp
38 * - cs : clean cs ioctl use bitmap & things like that. 38 * - cs : clean cs ioctl use bitmap & things like that.
39 * - power management stuff 39 * - power management stuff
40 * - Barrier in gart code 40 * - Barrier in gart code
41 * - Unmappabled vram ? 41 * - Unmappabled vram ?
42 * - TESTING, TESTING, TESTING 42 * - TESTING, TESTING, TESTING
43 */ 43 */
44 44
45 /* Initialization path: 45 /* Initialization path:
46 * We expect that acceleration initialization might fail for various 46 * We expect that acceleration initialization might fail for various
47 * reasons even thought we work hard to make it works on most 47 * reasons even thought we work hard to make it works on most
48 * configurations. In order to still have a working userspace in such 48 * configurations. In order to still have a working userspace in such
49 * situation the init path must succeed up to the memory controller 49 * situation the init path must succeed up to the memory controller
50 * initialization point. Failure before this point are considered as 50 * initialization point. Failure before this point are considered as
51 * fatal error. Here is the init callchain : 51 * fatal error. Here is the init callchain :
52 * radeon_device_init perform common structure, mutex initialization 52 * radeon_device_init perform common structure, mutex initialization
53 * asic_init setup the GPU memory layout and perform all 53 * asic_init setup the GPU memory layout and perform all
54 * one time initialization (failure in this 54 * one time initialization (failure in this
55 * function are considered fatal) 55 * function are considered fatal)
56 * asic_startup setup the GPU acceleration, in order to 56 * asic_startup setup the GPU acceleration, in order to
57 * follow guideline the first thing this 57 * follow guideline the first thing this
58 * function should do is setting the GPU 58 * function should do is setting the GPU
59 * memory controller (only MC setup failure 59 * memory controller (only MC setup failure
60 * are considered as fatal) 60 * are considered as fatal)
61 */ 61 */
62 62
63 #include <linux/atomic.h> 63 #include <linux/atomic.h>
64 #include <linux/wait.h> 64 #include <linux/wait.h>
65 #include <linux/list.h> 65 #include <linux/list.h>
66 #include <linux/kref.h> 66 #include <linux/kref.h>
67 67
68 #include <ttm/ttm_bo_api.h> 68 #include <ttm/ttm_bo_api.h>
69 #include <ttm/ttm_bo_driver.h> 69 #include <ttm/ttm_bo_driver.h>
70 #include <ttm/ttm_placement.h> 70 #include <ttm/ttm_placement.h>
71 #include <ttm/ttm_module.h> 71 #include <ttm/ttm_module.h>
72 #include <ttm/ttm_execbuf_util.h> 72 #include <ttm/ttm_execbuf_util.h>
73 73
74 #include "radeon_family.h" 74 #include "radeon_family.h"
75 #include "radeon_mode.h" 75 #include "radeon_mode.h"
76 #include "radeon_reg.h" 76 #include "radeon_reg.h"
77 77
78 /* 78 /*
79 * Modules parameters. 79 * Modules parameters.
80 */ 80 */
81 extern int radeon_no_wb; 81 extern int radeon_no_wb;
82 extern int radeon_modeset; 82 extern int radeon_modeset;
83 extern int radeon_dynclks; 83 extern int radeon_dynclks;
84 extern int radeon_r4xx_atom; 84 extern int radeon_r4xx_atom;
85 extern int radeon_agpmode; 85 extern int radeon_agpmode;
86 extern int radeon_vram_limit; 86 extern int radeon_vram_limit;
87 extern int radeon_gart_size; 87 extern int radeon_gart_size;
88 extern int radeon_benchmarking; 88 extern int radeon_benchmarking;
89 extern int radeon_testing; 89 extern int radeon_testing;
90 extern int radeon_connector_table; 90 extern int radeon_connector_table;
91 extern int radeon_tv; 91 extern int radeon_tv;
92 extern int radeon_audio; 92 extern int radeon_audio;
93 extern int radeon_disp_priority; 93 extern int radeon_disp_priority;
94 extern int radeon_hw_i2c; 94 extern int radeon_hw_i2c;
95 extern int radeon_pcie_gen2; 95 extern int radeon_pcie_gen2;
96 extern int radeon_msi; 96 extern int radeon_msi;
97 97
98 /* 98 /*
99 * Copy from radeon_drv.h so we don't have to include both and have conflicting 99 * Copy from radeon_drv.h so we don't have to include both and have conflicting
100 * symbol; 100 * symbol;
101 */ 101 */
102 #define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */ 102 #define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
103 #define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2) 103 #define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2)
104 /* RADEON_IB_POOL_SIZE must be a power of 2 */ 104 /* RADEON_IB_POOL_SIZE must be a power of 2 */
105 #define RADEON_IB_POOL_SIZE 16 105 #define RADEON_IB_POOL_SIZE 16
106 #define RADEON_DEBUGFS_MAX_COMPONENTS 32 106 #define RADEON_DEBUGFS_MAX_COMPONENTS 32
107 #define RADEONFB_CONN_LIMIT 4 107 #define RADEONFB_CONN_LIMIT 4
108 #define RADEON_BIOS_NUM_SCRATCH 8 108 #define RADEON_BIOS_NUM_SCRATCH 8
109 109
110 /* 110 /*
111 * Errata workarounds. 111 * Errata workarounds.
112 */ 112 */
113 enum radeon_pll_errata { 113 enum radeon_pll_errata {
114 CHIP_ERRATA_R300_CG = 0x00000001, 114 CHIP_ERRATA_R300_CG = 0x00000001,
115 CHIP_ERRATA_PLL_DUMMYREADS = 0x00000002, 115 CHIP_ERRATA_PLL_DUMMYREADS = 0x00000002,
116 CHIP_ERRATA_PLL_DELAY = 0x00000004 116 CHIP_ERRATA_PLL_DELAY = 0x00000004
117 }; 117 };
118 118
119 119
120 struct radeon_device; 120 struct radeon_device;
121 121
122 122
123 /* 123 /*
124 * BIOS. 124 * BIOS.
125 */ 125 */
126 #define ATRM_BIOS_PAGE 4096 126 #define ATRM_BIOS_PAGE 4096
127 127
128 #if defined(CONFIG_VGA_SWITCHEROO) 128 #if defined(CONFIG_VGA_SWITCHEROO)
129 bool radeon_atrm_supported(struct pci_dev *pdev); 129 bool radeon_atrm_supported(struct pci_dev *pdev);
130 int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len); 130 int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len);
131 #else 131 #else
132 static inline bool radeon_atrm_supported(struct pci_dev *pdev) 132 static inline bool radeon_atrm_supported(struct pci_dev *pdev)
133 { 133 {
134 return false; 134 return false;
135 } 135 }
136 136
137 static inline int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len){ 137 static inline int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len){
138 return -EINVAL; 138 return -EINVAL;
139 } 139 }
140 #endif 140 #endif
141 bool radeon_get_bios(struct radeon_device *rdev); 141 bool radeon_get_bios(struct radeon_device *rdev);
142 142
143 143
144 /* 144 /*
145 * Dummy page 145 * Dummy page
146 */ 146 */
147 struct radeon_dummy_page { 147 struct radeon_dummy_page {
148 struct page *page; 148 struct page *page;
149 dma_addr_t addr; 149 dma_addr_t addr;
150 }; 150 };
151 int radeon_dummy_page_init(struct radeon_device *rdev); 151 int radeon_dummy_page_init(struct radeon_device *rdev);
152 void radeon_dummy_page_fini(struct radeon_device *rdev); 152 void radeon_dummy_page_fini(struct radeon_device *rdev);
153 153
154 154
155 /* 155 /*
156 * Clocks 156 * Clocks
157 */ 157 */
158 struct radeon_clock { 158 struct radeon_clock {
159 struct radeon_pll p1pll; 159 struct radeon_pll p1pll;
160 struct radeon_pll p2pll; 160 struct radeon_pll p2pll;
161 struct radeon_pll dcpll; 161 struct radeon_pll dcpll;
162 struct radeon_pll spll; 162 struct radeon_pll spll;
163 struct radeon_pll mpll; 163 struct radeon_pll mpll;
164 /* 10 Khz units */ 164 /* 10 Khz units */
165 uint32_t default_mclk; 165 uint32_t default_mclk;
166 uint32_t default_sclk; 166 uint32_t default_sclk;
167 uint32_t default_dispclk; 167 uint32_t default_dispclk;
168 uint32_t dp_extclk; 168 uint32_t dp_extclk;
169 uint32_t max_pixel_clock; 169 uint32_t max_pixel_clock;
170 }; 170 };
171 171
172 /* 172 /*
173 * Power management 173 * Power management
174 */ 174 */
175 int radeon_pm_init(struct radeon_device *rdev); 175 int radeon_pm_init(struct radeon_device *rdev);
176 void radeon_pm_fini(struct radeon_device *rdev); 176 void radeon_pm_fini(struct radeon_device *rdev);
177 void radeon_pm_compute_clocks(struct radeon_device *rdev); 177 void radeon_pm_compute_clocks(struct radeon_device *rdev);
178 void radeon_pm_suspend(struct radeon_device *rdev); 178 void radeon_pm_suspend(struct radeon_device *rdev);
179 void radeon_pm_resume(struct radeon_device *rdev); 179 void radeon_pm_resume(struct radeon_device *rdev);
180 void radeon_combios_get_power_modes(struct radeon_device *rdev); 180 void radeon_combios_get_power_modes(struct radeon_device *rdev);
181 void radeon_atombios_get_power_modes(struct radeon_device *rdev); 181 void radeon_atombios_get_power_modes(struct radeon_device *rdev);
182 void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type); 182 void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
183 int radeon_atom_get_max_vddc(struct radeon_device *rdev, u16 *voltage); 183 int radeon_atom_get_max_vddc(struct radeon_device *rdev, u16 *voltage);
184 void rs690_pm_info(struct radeon_device *rdev); 184 void rs690_pm_info(struct radeon_device *rdev);
185 extern int rv6xx_get_temp(struct radeon_device *rdev); 185 extern int rv6xx_get_temp(struct radeon_device *rdev);
186 extern int rv770_get_temp(struct radeon_device *rdev); 186 extern int rv770_get_temp(struct radeon_device *rdev);
187 extern int evergreen_get_temp(struct radeon_device *rdev); 187 extern int evergreen_get_temp(struct radeon_device *rdev);
188 extern int sumo_get_temp(struct radeon_device *rdev); 188 extern int sumo_get_temp(struct radeon_device *rdev);
189 189
190 /* 190 /*
191 * Fences. 191 * Fences.
192 */ 192 */
193 struct radeon_fence_driver { 193 struct radeon_fence_driver {
194 uint32_t scratch_reg; 194 uint32_t scratch_reg;
195 atomic_t seq; 195 atomic_t seq;
196 uint32_t last_seq; 196 uint32_t last_seq;
197 unsigned long last_jiffies; 197 unsigned long last_jiffies;
198 unsigned long last_timeout; 198 unsigned long last_timeout;
199 wait_queue_head_t queue; 199 wait_queue_head_t queue;
200 rwlock_t lock; 200 rwlock_t lock;
201 struct list_head created; 201 struct list_head created;
202 struct list_head emited; 202 struct list_head emited;
203 struct list_head signaled; 203 struct list_head signaled;
204 bool initialized; 204 bool initialized;
205 }; 205 };
206 206
207 struct radeon_fence { 207 struct radeon_fence {
208 struct radeon_device *rdev; 208 struct radeon_device *rdev;
209 struct kref kref; 209 struct kref kref;
210 struct list_head list; 210 struct list_head list;
211 /* protected by radeon_fence.lock */ 211 /* protected by radeon_fence.lock */
212 uint32_t seq; 212 uint32_t seq;
213 bool emited; 213 bool emited;
214 bool signaled; 214 bool signaled;
215 }; 215 };
216 216
217 int radeon_fence_driver_init(struct radeon_device *rdev); 217 int radeon_fence_driver_init(struct radeon_device *rdev);
218 void radeon_fence_driver_fini(struct radeon_device *rdev); 218 void radeon_fence_driver_fini(struct radeon_device *rdev);
219 int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence); 219 int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence);
220 int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence); 220 int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence);
221 void radeon_fence_process(struct radeon_device *rdev); 221 void radeon_fence_process(struct radeon_device *rdev);
222 bool radeon_fence_signaled(struct radeon_fence *fence); 222 bool radeon_fence_signaled(struct radeon_fence *fence);
223 int radeon_fence_wait(struct radeon_fence *fence, bool interruptible); 223 int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
224 int radeon_fence_wait_next(struct radeon_device *rdev); 224 int radeon_fence_wait_next(struct radeon_device *rdev);
225 int radeon_fence_wait_last(struct radeon_device *rdev); 225 int radeon_fence_wait_last(struct radeon_device *rdev);
226 struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence); 226 struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
227 void radeon_fence_unref(struct radeon_fence **fence); 227 void radeon_fence_unref(struct radeon_fence **fence);
228 228
229 /* 229 /*
230 * Tiling registers 230 * Tiling registers
231 */ 231 */
232 struct radeon_surface_reg { 232 struct radeon_surface_reg {
233 struct radeon_bo *bo; 233 struct radeon_bo *bo;
234 }; 234 };
235 235
236 #define RADEON_GEM_MAX_SURFACES 8 236 #define RADEON_GEM_MAX_SURFACES 8
237 237
238 /* 238 /*
239 * TTM. 239 * TTM.
240 */ 240 */
241 struct radeon_mman { 241 struct radeon_mman {
242 struct ttm_bo_global_ref bo_global_ref; 242 struct ttm_bo_global_ref bo_global_ref;
243 struct drm_global_reference mem_global_ref; 243 struct drm_global_reference mem_global_ref;
244 struct ttm_bo_device bdev; 244 struct ttm_bo_device bdev;
245 bool mem_global_referenced; 245 bool mem_global_referenced;
246 bool initialized; 246 bool initialized;
247 }; 247 };
248 248
249 struct radeon_bo { 249 struct radeon_bo {
250 /* Protected by gem.mutex */ 250 /* Protected by gem.mutex */
251 struct list_head list; 251 struct list_head list;
252 /* Protected by tbo.reserved */ 252 /* Protected by tbo.reserved */
253 u32 placements[3]; 253 u32 placements[3];
254 struct ttm_placement placement; 254 struct ttm_placement placement;
255 struct ttm_buffer_object tbo; 255 struct ttm_buffer_object tbo;
256 struct ttm_bo_kmap_obj kmap; 256 struct ttm_bo_kmap_obj kmap;
257 unsigned pin_count; 257 unsigned pin_count;
258 void *kptr; 258 void *kptr;
259 u32 tiling_flags; 259 u32 tiling_flags;
260 u32 pitch; 260 u32 pitch;
261 int surface_reg; 261 int surface_reg;
262 /* Constant after initialization */ 262 /* Constant after initialization */
263 struct radeon_device *rdev; 263 struct radeon_device *rdev;
264 struct drm_gem_object gem_base; 264 struct drm_gem_object gem_base;
265 }; 265 };
266 #define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base) 266 #define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
267 267
268 struct radeon_bo_list { 268 struct radeon_bo_list {
269 struct ttm_validate_buffer tv; 269 struct ttm_validate_buffer tv;
270 struct radeon_bo *bo; 270 struct radeon_bo *bo;
271 uint64_t gpu_offset; 271 uint64_t gpu_offset;
272 unsigned rdomain; 272 unsigned rdomain;
273 unsigned wdomain; 273 unsigned wdomain;
274 u32 tiling_flags; 274 u32 tiling_flags;
275 }; 275 };
276 276
277 /* 277 /*
278 * GEM objects. 278 * GEM objects.
279 */ 279 */
280 struct radeon_gem { 280 struct radeon_gem {
281 struct mutex mutex; 281 struct mutex mutex;
282 struct list_head objects; 282 struct list_head objects;
283 }; 283 };
284 284
285 int radeon_gem_init(struct radeon_device *rdev); 285 int radeon_gem_init(struct radeon_device *rdev);
286 void radeon_gem_fini(struct radeon_device *rdev); 286 void radeon_gem_fini(struct radeon_device *rdev);
287 int radeon_gem_object_create(struct radeon_device *rdev, int size, 287 int radeon_gem_object_create(struct radeon_device *rdev, int size,
288 int alignment, int initial_domain, 288 int alignment, int initial_domain,
289 bool discardable, bool kernel, 289 bool discardable, bool kernel,
290 struct drm_gem_object **obj); 290 struct drm_gem_object **obj);
291 int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, 291 int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
292 uint64_t *gpu_addr); 292 uint64_t *gpu_addr);
293 void radeon_gem_object_unpin(struct drm_gem_object *obj); 293 void radeon_gem_object_unpin(struct drm_gem_object *obj);
294 294
295 int radeon_mode_dumb_create(struct drm_file *file_priv, 295 int radeon_mode_dumb_create(struct drm_file *file_priv,
296 struct drm_device *dev, 296 struct drm_device *dev,
297 struct drm_mode_create_dumb *args); 297 struct drm_mode_create_dumb *args);
298 int radeon_mode_dumb_mmap(struct drm_file *filp, 298 int radeon_mode_dumb_mmap(struct drm_file *filp,
299 struct drm_device *dev, 299 struct drm_device *dev,
300 uint32_t handle, uint64_t *offset_p); 300 uint32_t handle, uint64_t *offset_p);
301 int radeon_mode_dumb_destroy(struct drm_file *file_priv, 301 int radeon_mode_dumb_destroy(struct drm_file *file_priv,
302 struct drm_device *dev, 302 struct drm_device *dev,
303 uint32_t handle); 303 uint32_t handle);
304 304
305 /* 305 /*
306 * GART structures, functions & helpers 306 * GART structures, functions & helpers
307 */ 307 */
308 struct radeon_mc; 308 struct radeon_mc;
309 309
310 #define RADEON_GPU_PAGE_SIZE 4096 310 #define RADEON_GPU_PAGE_SIZE 4096
311 #define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1) 311 #define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
312 #define RADEON_GPU_PAGE_SHIFT 12 312 #define RADEON_GPU_PAGE_SHIFT 12
313 313
314 struct radeon_gart { 314 struct radeon_gart {
315 dma_addr_t table_addr; 315 dma_addr_t table_addr;
316 struct radeon_bo *robj; 316 struct radeon_bo *robj;
317 void *ptr; 317 void *ptr;
318 unsigned num_gpu_pages; 318 unsigned num_gpu_pages;
319 unsigned num_cpu_pages; 319 unsigned num_cpu_pages;
320 unsigned table_size; 320 unsigned table_size;
321 struct page **pages; 321 struct page **pages;
322 dma_addr_t *pages_addr; 322 dma_addr_t *pages_addr;
323 bool *ttm_alloced; 323 bool *ttm_alloced;
324 bool ready; 324 bool ready;
325 }; 325 };
326 326
327 int radeon_gart_table_ram_alloc(struct radeon_device *rdev); 327 int radeon_gart_table_ram_alloc(struct radeon_device *rdev);
328 void radeon_gart_table_ram_free(struct radeon_device *rdev); 328 void radeon_gart_table_ram_free(struct radeon_device *rdev);
329 int radeon_gart_table_vram_alloc(struct radeon_device *rdev); 329 int radeon_gart_table_vram_alloc(struct radeon_device *rdev);
330 void radeon_gart_table_vram_free(struct radeon_device *rdev); 330 void radeon_gart_table_vram_free(struct radeon_device *rdev);
331 int radeon_gart_table_vram_pin(struct radeon_device *rdev); 331 int radeon_gart_table_vram_pin(struct radeon_device *rdev);
332 void radeon_gart_table_vram_unpin(struct radeon_device *rdev); 332 void radeon_gart_table_vram_unpin(struct radeon_device *rdev);
333 int radeon_gart_init(struct radeon_device *rdev); 333 int radeon_gart_init(struct radeon_device *rdev);
334 void radeon_gart_fini(struct radeon_device *rdev); 334 void radeon_gart_fini(struct radeon_device *rdev);
335 void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, 335 void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
336 int pages); 336 int pages);
337 int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, 337 int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
338 int pages, struct page **pagelist, 338 int pages, struct page **pagelist,
339 dma_addr_t *dma_addr); 339 dma_addr_t *dma_addr);
340 void radeon_gart_restore(struct radeon_device *rdev); 340 void radeon_gart_restore(struct radeon_device *rdev);
341 341
342 342
343 /* 343 /*
344 * GPU MC structures, functions & helpers 344 * GPU MC structures, functions & helpers
345 */ 345 */
346 struct radeon_mc { 346 struct radeon_mc {
347 resource_size_t aper_size; 347 resource_size_t aper_size;
348 resource_size_t aper_base; 348 resource_size_t aper_base;
349 resource_size_t agp_base; 349 resource_size_t agp_base;
350 /* for some chips with <= 32MB we need to lie 350 /* for some chips with <= 32MB we need to lie
351 * about vram size near mc fb location */ 351 * about vram size near mc fb location */
352 u64 mc_vram_size; 352 u64 mc_vram_size;
353 u64 visible_vram_size; 353 u64 visible_vram_size;
354 u64 gtt_size; 354 u64 gtt_size;
355 u64 gtt_start; 355 u64 gtt_start;
356 u64 gtt_end; 356 u64 gtt_end;
357 u64 vram_start; 357 u64 vram_start;
358 u64 vram_end; 358 u64 vram_end;
359 unsigned vram_width; 359 unsigned vram_width;
360 u64 real_vram_size; 360 u64 real_vram_size;
361 int vram_mtrr; 361 int vram_mtrr;
362 bool vram_is_ddr; 362 bool vram_is_ddr;
363 bool igp_sideport_enabled; 363 bool igp_sideport_enabled;
364 u64 gtt_base_align; 364 u64 gtt_base_align;
365 }; 365 };
366 366
367 bool radeon_combios_sideport_present(struct radeon_device *rdev); 367 bool radeon_combios_sideport_present(struct radeon_device *rdev);
368 bool radeon_atombios_sideport_present(struct radeon_device *rdev); 368 bool radeon_atombios_sideport_present(struct radeon_device *rdev);
369 369
370 /* 370 /*
371 * GPU scratch registers structures, functions & helpers 371 * GPU scratch registers structures, functions & helpers
372 */ 372 */
373 struct radeon_scratch { 373 struct radeon_scratch {
374 unsigned num_reg; 374 unsigned num_reg;
375 uint32_t reg_base; 375 uint32_t reg_base;
376 bool free[32]; 376 bool free[32];
377 uint32_t reg[32]; 377 uint32_t reg[32];
378 }; 378 };
379 379
380 int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg); 380 int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg);
381 void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg); 381 void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg);
382 382
383 383
384 /* 384 /*
385 * IRQS. 385 * IRQS.
386 */ 386 */
387 387
388 struct radeon_unpin_work { 388 struct radeon_unpin_work {
389 struct work_struct work; 389 struct work_struct work;
390 struct radeon_device *rdev; 390 struct radeon_device *rdev;
391 int crtc_id; 391 int crtc_id;
392 struct radeon_fence *fence; 392 struct radeon_fence *fence;
393 struct drm_pending_vblank_event *event; 393 struct drm_pending_vblank_event *event;
394 struct radeon_bo *old_rbo; 394 struct radeon_bo *old_rbo;
395 u64 new_crtc_base; 395 u64 new_crtc_base;
396 }; 396 };
397 397
398 struct r500_irq_stat_regs { 398 struct r500_irq_stat_regs {
399 u32 disp_int; 399 u32 disp_int;
400 }; 400 };
401 401
402 struct r600_irq_stat_regs { 402 struct r600_irq_stat_regs {
403 u32 disp_int; 403 u32 disp_int;
404 u32 disp_int_cont; 404 u32 disp_int_cont;
405 u32 disp_int_cont2; 405 u32 disp_int_cont2;
406 u32 d1grph_int; 406 u32 d1grph_int;
407 u32 d2grph_int; 407 u32 d2grph_int;
408 }; 408 };
409 409
410 struct evergreen_irq_stat_regs { 410 struct evergreen_irq_stat_regs {
411 u32 disp_int; 411 u32 disp_int;
412 u32 disp_int_cont; 412 u32 disp_int_cont;
413 u32 disp_int_cont2; 413 u32 disp_int_cont2;
414 u32 disp_int_cont3; 414 u32 disp_int_cont3;
415 u32 disp_int_cont4; 415 u32 disp_int_cont4;
416 u32 disp_int_cont5; 416 u32 disp_int_cont5;
417 u32 d1grph_int; 417 u32 d1grph_int;
418 u32 d2grph_int; 418 u32 d2grph_int;
419 u32 d3grph_int; 419 u32 d3grph_int;
420 u32 d4grph_int; 420 u32 d4grph_int;
421 u32 d5grph_int; 421 u32 d5grph_int;
422 u32 d6grph_int; 422 u32 d6grph_int;
423 }; 423 };
424 424
425 union radeon_irq_stat_regs { 425 union radeon_irq_stat_regs {
426 struct r500_irq_stat_regs r500; 426 struct r500_irq_stat_regs r500;
427 struct r600_irq_stat_regs r600; 427 struct r600_irq_stat_regs r600;
428 struct evergreen_irq_stat_regs evergreen; 428 struct evergreen_irq_stat_regs evergreen;
429 }; 429 };
430 430
431 #define RADEON_MAX_HPD_PINS 6 431 #define RADEON_MAX_HPD_PINS 6
432 #define RADEON_MAX_CRTCS 6 432 #define RADEON_MAX_CRTCS 6
433 #define RADEON_MAX_HDMI_BLOCKS 2 433 #define RADEON_MAX_HDMI_BLOCKS 2
434 434
435 struct radeon_irq { 435 struct radeon_irq {
436 bool installed; 436 bool installed;
437 bool sw_int; 437 bool sw_int;
438 bool crtc_vblank_int[RADEON_MAX_CRTCS]; 438 bool crtc_vblank_int[RADEON_MAX_CRTCS];
439 bool pflip[RADEON_MAX_CRTCS]; 439 bool pflip[RADEON_MAX_CRTCS];
440 wait_queue_head_t vblank_queue; 440 wait_queue_head_t vblank_queue;
441 bool hpd[RADEON_MAX_HPD_PINS]; 441 bool hpd[RADEON_MAX_HPD_PINS];
442 bool gui_idle; 442 bool gui_idle;
443 bool gui_idle_acked; 443 bool gui_idle_acked;
444 wait_queue_head_t idle_queue; 444 wait_queue_head_t idle_queue;
445 bool hdmi[RADEON_MAX_HDMI_BLOCKS]; 445 bool hdmi[RADEON_MAX_HDMI_BLOCKS];
446 spinlock_t sw_lock; 446 spinlock_t sw_lock;
447 int sw_refcount; 447 int sw_refcount;
448 union radeon_irq_stat_regs stat_regs; 448 union radeon_irq_stat_regs stat_regs;
449 spinlock_t pflip_lock[RADEON_MAX_CRTCS]; 449 spinlock_t pflip_lock[RADEON_MAX_CRTCS];
450 int pflip_refcount[RADEON_MAX_CRTCS]; 450 int pflip_refcount[RADEON_MAX_CRTCS];
451 }; 451 };
452 452
453 int radeon_irq_kms_init(struct radeon_device *rdev); 453 int radeon_irq_kms_init(struct radeon_device *rdev);
454 void radeon_irq_kms_fini(struct radeon_device *rdev); 454 void radeon_irq_kms_fini(struct radeon_device *rdev);
455 void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev); 455 void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev);
456 void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev); 456 void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
457 void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc); 457 void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
458 void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc); 458 void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
459 459
460 /* 460 /*
461 * CP & ring. 461 * CP & ring.
462 */ 462 */
463 struct radeon_ib { 463 struct radeon_ib {
464 struct list_head list; 464 struct list_head list;
465 unsigned idx; 465 unsigned idx;
466 uint64_t gpu_addr; 466 uint64_t gpu_addr;
467 struct radeon_fence *fence; 467 struct radeon_fence *fence;
468 uint32_t *ptr; 468 uint32_t *ptr;
469 uint32_t length_dw; 469 uint32_t length_dw;
470 bool free; 470 bool free;
471 }; 471 };
472 472
473 /* 473 /*
474 * locking - 474 * locking -
475 * mutex protects scheduled_ibs, ready, alloc_bm 475 * mutex protects scheduled_ibs, ready, alloc_bm
476 */ 476 */
477 struct radeon_ib_pool { 477 struct radeon_ib_pool {
478 struct mutex mutex; 478 struct mutex mutex;
479 struct radeon_bo *robj; 479 struct radeon_bo *robj;
480 struct list_head bogus_ib; 480 struct list_head bogus_ib;
481 struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; 481 struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
482 bool ready; 482 bool ready;
483 unsigned head_id; 483 unsigned head_id;
484 }; 484 };
485 485
486 struct radeon_cp { 486 struct radeon_cp {
487 struct radeon_bo *ring_obj; 487 struct radeon_bo *ring_obj;
488 volatile uint32_t *ring; 488 volatile uint32_t *ring;
489 unsigned rptr; 489 unsigned rptr;
490 unsigned wptr; 490 unsigned wptr;
491 unsigned wptr_old; 491 unsigned wptr_old;
492 unsigned ring_size; 492 unsigned ring_size;
493 unsigned ring_free_dw; 493 unsigned ring_free_dw;
494 int count_dw; 494 int count_dw;
495 uint64_t gpu_addr; 495 uint64_t gpu_addr;
496 uint32_t align_mask; 496 uint32_t align_mask;
497 uint32_t ptr_mask; 497 uint32_t ptr_mask;
498 struct mutex mutex; 498 struct mutex mutex;
499 bool ready; 499 bool ready;
500 }; 500 };
501 501
502 /* 502 /*
503 * R6xx+ IH ring 503 * R6xx+ IH ring
504 */ 504 */
505 struct r600_ih { 505 struct r600_ih {
506 struct radeon_bo *ring_obj; 506 struct radeon_bo *ring_obj;
507 volatile uint32_t *ring; 507 volatile uint32_t *ring;
508 unsigned rptr; 508 unsigned rptr;
509 unsigned wptr; 509 unsigned wptr;
510 unsigned wptr_old; 510 unsigned wptr_old;
511 unsigned ring_size; 511 unsigned ring_size;
512 uint64_t gpu_addr; 512 uint64_t gpu_addr;
513 uint32_t ptr_mask; 513 uint32_t ptr_mask;
514 spinlock_t lock; 514 spinlock_t lock;
515 bool enabled; 515 bool enabled;
516 }; 516 };
517 517
518 struct r600_blit_cp_primitives { 518 struct r600_blit_cp_primitives {
519 void (*set_render_target)(struct radeon_device *rdev, int format, 519 void (*set_render_target)(struct radeon_device *rdev, int format,
520 int w, int h, u64 gpu_addr); 520 int w, int h, u64 gpu_addr);
521 void (*cp_set_surface_sync)(struct radeon_device *rdev, 521 void (*cp_set_surface_sync)(struct radeon_device *rdev,
522 u32 sync_type, u32 size, 522 u32 sync_type, u32 size,
523 u64 mc_addr); 523 u64 mc_addr);
524 void (*set_shaders)(struct radeon_device *rdev); 524 void (*set_shaders)(struct radeon_device *rdev);
525 void (*set_vtx_resource)(struct radeon_device *rdev, u64 gpu_addr); 525 void (*set_vtx_resource)(struct radeon_device *rdev, u64 gpu_addr);
526 void (*set_tex_resource)(struct radeon_device *rdev, 526 void (*set_tex_resource)(struct radeon_device *rdev,
527 int format, int w, int h, int pitch, 527 int format, int w, int h, int pitch,
528 u64 gpu_addr, u32 size); 528 u64 gpu_addr, u32 size);
529 void (*set_scissors)(struct radeon_device *rdev, int x1, int y1, 529 void (*set_scissors)(struct radeon_device *rdev, int x1, int y1,
530 int x2, int y2); 530 int x2, int y2);
531 void (*draw_auto)(struct radeon_device *rdev); 531 void (*draw_auto)(struct radeon_device *rdev);
532 void (*set_default_state)(struct radeon_device *rdev); 532 void (*set_default_state)(struct radeon_device *rdev);
533 }; 533 };
534 534
535 struct r600_blit { 535 struct r600_blit {
536 struct mutex mutex; 536 struct mutex mutex;
537 struct radeon_bo *shader_obj; 537 struct radeon_bo *shader_obj;
538 struct r600_blit_cp_primitives primitives; 538 struct r600_blit_cp_primitives primitives;
539 int max_dim; 539 int max_dim;
540 int ring_size_common; 540 int ring_size_common;
541 int ring_size_per_loop; 541 int ring_size_per_loop;
542 u64 shader_gpu_addr; 542 u64 shader_gpu_addr;
543 u32 vs_offset, ps_offset; 543 u32 vs_offset, ps_offset;
544 u32 state_offset; 544 u32 state_offset;
545 u32 state_len; 545 u32 state_len;
546 u32 vb_used, vb_total; 546 u32 vb_used, vb_total;
547 struct radeon_ib *vb_ib; 547 struct radeon_ib *vb_ib;
548 }; 548 };
549 549
550 void r600_blit_suspend(struct radeon_device *rdev); 550 void r600_blit_suspend(struct radeon_device *rdev);
551 551
552 int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib); 552 int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib);
553 void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib); 553 void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib);
554 int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib); 554 int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
555 int radeon_ib_pool_init(struct radeon_device *rdev); 555 int radeon_ib_pool_init(struct radeon_device *rdev);
556 void radeon_ib_pool_fini(struct radeon_device *rdev); 556 void radeon_ib_pool_fini(struct radeon_device *rdev);
557 int radeon_ib_test(struct radeon_device *rdev); 557 int radeon_ib_test(struct radeon_device *rdev);
558 extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib); 558 extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib);
559 /* Ring access between begin & end cannot sleep */ 559 /* Ring access between begin & end cannot sleep */
560 void radeon_ring_free_size(struct radeon_device *rdev); 560 void radeon_ring_free_size(struct radeon_device *rdev);
561 int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw); 561 int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw);
562 int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw); 562 int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw);
563 void radeon_ring_commit(struct radeon_device *rdev); 563 void radeon_ring_commit(struct radeon_device *rdev);
564 void radeon_ring_unlock_commit(struct radeon_device *rdev); 564 void radeon_ring_unlock_commit(struct radeon_device *rdev);
565 void radeon_ring_unlock_undo(struct radeon_device *rdev); 565 void radeon_ring_unlock_undo(struct radeon_device *rdev);
566 int radeon_ring_test(struct radeon_device *rdev); 566 int radeon_ring_test(struct radeon_device *rdev);
567 int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size); 567 int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size);
568 void radeon_ring_fini(struct radeon_device *rdev); 568 void radeon_ring_fini(struct radeon_device *rdev);
569 569
570 570
571 /* 571 /*
572 * CS. 572 * CS.
573 */ 573 */
574 struct radeon_cs_reloc { 574 struct radeon_cs_reloc {
575 struct drm_gem_object *gobj; 575 struct drm_gem_object *gobj;
576 struct radeon_bo *robj; 576 struct radeon_bo *robj;
577 struct radeon_bo_list lobj; 577 struct radeon_bo_list lobj;
578 uint32_t handle; 578 uint32_t handle;
579 uint32_t flags; 579 uint32_t flags;
580 }; 580 };
581 581
582 struct radeon_cs_chunk { 582 struct radeon_cs_chunk {
583 uint32_t chunk_id; 583 uint32_t chunk_id;
584 uint32_t length_dw; 584 uint32_t length_dw;
585 int kpage_idx[2]; 585 int kpage_idx[2];
586 uint32_t *kpage[2]; 586 uint32_t *kpage[2];
587 uint32_t *kdata; 587 uint32_t *kdata;
588 void __user *user_ptr; 588 void __user *user_ptr;
589 int last_copied_page; 589 int last_copied_page;
590 int last_page_index; 590 int last_page_index;
591 }; 591 };
592 592
593 struct radeon_cs_parser { 593 struct radeon_cs_parser {
594 struct device *dev; 594 struct device *dev;
595 struct radeon_device *rdev; 595 struct radeon_device *rdev;
596 struct drm_file *filp; 596 struct drm_file *filp;
597 /* chunks */ 597 /* chunks */
598 unsigned nchunks; 598 unsigned nchunks;
599 struct radeon_cs_chunk *chunks; 599 struct radeon_cs_chunk *chunks;
600 uint64_t *chunks_array; 600 uint64_t *chunks_array;
601 /* IB */ 601 /* IB */
602 unsigned idx; 602 unsigned idx;
603 /* relocations */ 603 /* relocations */
604 unsigned nrelocs; 604 unsigned nrelocs;
605 struct radeon_cs_reloc *relocs; 605 struct radeon_cs_reloc *relocs;
606 struct radeon_cs_reloc **relocs_ptr; 606 struct radeon_cs_reloc **relocs_ptr;
607 struct list_head validated; 607 struct list_head validated;
608 /* indices of various chunks */ 608 /* indices of various chunks */
609 int chunk_ib_idx; 609 int chunk_ib_idx;
610 int chunk_relocs_idx; 610 int chunk_relocs_idx;
611 struct radeon_ib *ib; 611 struct radeon_ib *ib;
612 void *track; 612 void *track;
613 unsigned family; 613 unsigned family;
614 int parser_error; 614 int parser_error;
615 }; 615 };
616 616
617 extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx); 617 extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
618 extern int radeon_cs_finish_pages(struct radeon_cs_parser *p); 618 extern int radeon_cs_finish_pages(struct radeon_cs_parser *p);
619 extern u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx); 619 extern u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx);
620 620
621 struct radeon_cs_packet { 621 struct radeon_cs_packet {
622 unsigned idx; 622 unsigned idx;
623 unsigned type; 623 unsigned type;
624 unsigned reg; 624 unsigned reg;
625 unsigned opcode; 625 unsigned opcode;
626 int count; 626 int count;
627 unsigned one_reg_wr; 627 unsigned one_reg_wr;
628 }; 628 };
629 629
630 typedef int (*radeon_packet0_check_t)(struct radeon_cs_parser *p, 630 typedef int (*radeon_packet0_check_t)(struct radeon_cs_parser *p,
631 struct radeon_cs_packet *pkt, 631 struct radeon_cs_packet *pkt,
632 unsigned idx, unsigned reg); 632 unsigned idx, unsigned reg);
633 typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p, 633 typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p,
634 struct radeon_cs_packet *pkt); 634 struct radeon_cs_packet *pkt);
635 635
636 636
637 /* 637 /*
638 * AGP 638 * AGP
639 */ 639 */
640 int radeon_agp_init(struct radeon_device *rdev); 640 int radeon_agp_init(struct radeon_device *rdev);
641 void radeon_agp_resume(struct radeon_device *rdev); 641 void radeon_agp_resume(struct radeon_device *rdev);
642 void radeon_agp_suspend(struct radeon_device *rdev); 642 void radeon_agp_suspend(struct radeon_device *rdev);
643 void radeon_agp_fini(struct radeon_device *rdev); 643 void radeon_agp_fini(struct radeon_device *rdev);
644 644
645 645
646 /* 646 /*
647 * Writeback 647 * Writeback
648 */ 648 */
649 struct radeon_wb { 649 struct radeon_wb {
650 struct radeon_bo *wb_obj; 650 struct radeon_bo *wb_obj;
651 volatile uint32_t *wb; 651 volatile uint32_t *wb;
652 uint64_t gpu_addr; 652 uint64_t gpu_addr;
653 bool enabled; 653 bool enabled;
654 bool use_event; 654 bool use_event;
655 }; 655 };
656 656
657 #define RADEON_WB_SCRATCH_OFFSET 0 657 #define RADEON_WB_SCRATCH_OFFSET 0
658 #define RADEON_WB_CP_RPTR_OFFSET 1024 658 #define RADEON_WB_CP_RPTR_OFFSET 1024
659 #define RADEON_WB_CP1_RPTR_OFFSET 1280 659 #define RADEON_WB_CP1_RPTR_OFFSET 1280
660 #define RADEON_WB_CP2_RPTR_OFFSET 1536 660 #define RADEON_WB_CP2_RPTR_OFFSET 1536
661 #define R600_WB_IH_WPTR_OFFSET 2048 661 #define R600_WB_IH_WPTR_OFFSET 2048
662 #define R600_WB_EVENT_OFFSET 3072 662 #define R600_WB_EVENT_OFFSET 3072
663 663
664 /** 664 /**
665 * struct radeon_pm - power management datas 665 * struct radeon_pm - power management datas
666 * @max_bandwidth: maximum bandwidth the gpu has (MByte/s) 666 * @max_bandwidth: maximum bandwidth the gpu has (MByte/s)
667 * @igp_sideport_mclk: sideport memory clock Mhz (rs690,rs740,rs780,rs880) 667 * @igp_sideport_mclk: sideport memory clock Mhz (rs690,rs740,rs780,rs880)
668 * @igp_system_mclk: system clock Mhz (rs690,rs740,rs780,rs880) 668 * @igp_system_mclk: system clock Mhz (rs690,rs740,rs780,rs880)
669 * @igp_ht_link_clk: ht link clock Mhz (rs690,rs740,rs780,rs880) 669 * @igp_ht_link_clk: ht link clock Mhz (rs690,rs740,rs780,rs880)
670 * @igp_ht_link_width: ht link width in bits (rs690,rs740,rs780,rs880) 670 * @igp_ht_link_width: ht link width in bits (rs690,rs740,rs780,rs880)
671 * @k8_bandwidth: k8 bandwidth the gpu has (MByte/s) (IGP) 671 * @k8_bandwidth: k8 bandwidth the gpu has (MByte/s) (IGP)
672 * @sideport_bandwidth: sideport bandwidth the gpu has (MByte/s) (IGP) 672 * @sideport_bandwidth: sideport bandwidth the gpu has (MByte/s) (IGP)
673 * @ht_bandwidth: ht bandwidth the gpu has (MByte/s) (IGP) 673 * @ht_bandwidth: ht bandwidth the gpu has (MByte/s) (IGP)
674 * @core_bandwidth: core GPU bandwidth the gpu has (MByte/s) (IGP) 674 * @core_bandwidth: core GPU bandwidth the gpu has (MByte/s) (IGP)
675 * @sclk: GPU clock Mhz (core bandwidth depends of this clock) 675 * @sclk: GPU clock Mhz (core bandwidth depends of this clock)
676 * @needed_bandwidth: current bandwidth needs 676 * @needed_bandwidth: current bandwidth needs
677 * 677 *
678 * It keeps track of various data needed to take powermanagement decision. 678 * It keeps track of various data needed to take powermanagement decision.
679 * Bandwidth need is used to determine minimun clock of the GPU and memory. 679 * Bandwidth need is used to determine minimun clock of the GPU and memory.
680 * Equation between gpu/memory clock and available bandwidth is hw dependent 680 * Equation between gpu/memory clock and available bandwidth is hw dependent
681 * (type of memory, bus size, efficiency, ...) 681 * (type of memory, bus size, efficiency, ...)
682 */ 682 */
683 683
684 enum radeon_pm_method { 684 enum radeon_pm_method {
685 PM_METHOD_PROFILE, 685 PM_METHOD_PROFILE,
686 PM_METHOD_DYNPM, 686 PM_METHOD_DYNPM,
687 }; 687 };
688 688
689 enum radeon_dynpm_state { 689 enum radeon_dynpm_state {
690 DYNPM_STATE_DISABLED, 690 DYNPM_STATE_DISABLED,
691 DYNPM_STATE_MINIMUM, 691 DYNPM_STATE_MINIMUM,
692 DYNPM_STATE_PAUSED, 692 DYNPM_STATE_PAUSED,
693 DYNPM_STATE_ACTIVE, 693 DYNPM_STATE_ACTIVE,
694 DYNPM_STATE_SUSPENDED, 694 DYNPM_STATE_SUSPENDED,
695 }; 695 };
696 enum radeon_dynpm_action { 696 enum radeon_dynpm_action {
697 DYNPM_ACTION_NONE, 697 DYNPM_ACTION_NONE,
698 DYNPM_ACTION_MINIMUM, 698 DYNPM_ACTION_MINIMUM,
699 DYNPM_ACTION_DOWNCLOCK, 699 DYNPM_ACTION_DOWNCLOCK,
700 DYNPM_ACTION_UPCLOCK, 700 DYNPM_ACTION_UPCLOCK,
701 DYNPM_ACTION_DEFAULT 701 DYNPM_ACTION_DEFAULT
702 }; 702 };
703 703
704 enum radeon_voltage_type { 704 enum radeon_voltage_type {
705 VOLTAGE_NONE = 0, 705 VOLTAGE_NONE = 0,
706 VOLTAGE_GPIO, 706 VOLTAGE_GPIO,
707 VOLTAGE_VDDC, 707 VOLTAGE_VDDC,
708 VOLTAGE_SW 708 VOLTAGE_SW
709 }; 709 };
710 710
711 enum radeon_pm_state_type { 711 enum radeon_pm_state_type {
712 POWER_STATE_TYPE_DEFAULT, 712 POWER_STATE_TYPE_DEFAULT,
713 POWER_STATE_TYPE_POWERSAVE, 713 POWER_STATE_TYPE_POWERSAVE,
714 POWER_STATE_TYPE_BATTERY, 714 POWER_STATE_TYPE_BATTERY,
715 POWER_STATE_TYPE_BALANCED, 715 POWER_STATE_TYPE_BALANCED,
716 POWER_STATE_TYPE_PERFORMANCE, 716 POWER_STATE_TYPE_PERFORMANCE,
717 }; 717 };
718 718
719 enum radeon_pm_profile_type { 719 enum radeon_pm_profile_type {
720 PM_PROFILE_DEFAULT, 720 PM_PROFILE_DEFAULT,
721 PM_PROFILE_AUTO, 721 PM_PROFILE_AUTO,
722 PM_PROFILE_LOW, 722 PM_PROFILE_LOW,
723 PM_PROFILE_MID, 723 PM_PROFILE_MID,
724 PM_PROFILE_HIGH, 724 PM_PROFILE_HIGH,
725 }; 725 };
726 726
727 #define PM_PROFILE_DEFAULT_IDX 0 727 #define PM_PROFILE_DEFAULT_IDX 0
728 #define PM_PROFILE_LOW_SH_IDX 1 728 #define PM_PROFILE_LOW_SH_IDX 1
729 #define PM_PROFILE_MID_SH_IDX 2 729 #define PM_PROFILE_MID_SH_IDX 2
730 #define PM_PROFILE_HIGH_SH_IDX 3 730 #define PM_PROFILE_HIGH_SH_IDX 3
731 #define PM_PROFILE_LOW_MH_IDX 4 731 #define PM_PROFILE_LOW_MH_IDX 4
732 #define PM_PROFILE_MID_MH_IDX 5 732 #define PM_PROFILE_MID_MH_IDX 5
733 #define PM_PROFILE_HIGH_MH_IDX 6 733 #define PM_PROFILE_HIGH_MH_IDX 6
734 #define PM_PROFILE_MAX 7 734 #define PM_PROFILE_MAX 7
735 735
736 struct radeon_pm_profile { 736 struct radeon_pm_profile {
737 int dpms_off_ps_idx; 737 int dpms_off_ps_idx;
738 int dpms_on_ps_idx; 738 int dpms_on_ps_idx;
739 int dpms_off_cm_idx; 739 int dpms_off_cm_idx;
740 int dpms_on_cm_idx; 740 int dpms_on_cm_idx;
741 }; 741 };
742 742
743 enum radeon_int_thermal_type { 743 enum radeon_int_thermal_type {
744 THERMAL_TYPE_NONE, 744 THERMAL_TYPE_NONE,
745 THERMAL_TYPE_RV6XX, 745 THERMAL_TYPE_RV6XX,
746 THERMAL_TYPE_RV770, 746 THERMAL_TYPE_RV770,
747 THERMAL_TYPE_EVERGREEN, 747 THERMAL_TYPE_EVERGREEN,
748 THERMAL_TYPE_SUMO, 748 THERMAL_TYPE_SUMO,
749 THERMAL_TYPE_NI, 749 THERMAL_TYPE_NI,
750 }; 750 };
751 751
752 struct radeon_voltage { 752 struct radeon_voltage {
753 enum radeon_voltage_type type; 753 enum radeon_voltage_type type;
754 /* gpio voltage */ 754 /* gpio voltage */
755 struct radeon_gpio_rec gpio; 755 struct radeon_gpio_rec gpio;
756 u32 delay; /* delay in usec from voltage drop to sclk change */ 756 u32 delay; /* delay in usec from voltage drop to sclk change */
757 bool active_high; /* voltage drop is active when bit is high */ 757 bool active_high; /* voltage drop is active when bit is high */
758 /* VDDC voltage */ 758 /* VDDC voltage */
759 u8 vddc_id; /* index into vddc voltage table */ 759 u8 vddc_id; /* index into vddc voltage table */
760 u8 vddci_id; /* index into vddci voltage table */ 760 u8 vddci_id; /* index into vddci voltage table */
761 bool vddci_enabled; 761 bool vddci_enabled;
762 /* r6xx+ sw */ 762 /* r6xx+ sw */
763 u16 voltage; 763 u16 voltage;
764 /* evergreen+ vddci */ 764 /* evergreen+ vddci */
765 u16 vddci; 765 u16 vddci;
766 }; 766 };
767 767
768 /* clock mode flags */ 768 /* clock mode flags */
769 #define RADEON_PM_MODE_NO_DISPLAY (1 << 0) 769 #define RADEON_PM_MODE_NO_DISPLAY (1 << 0)
770 770
771 struct radeon_pm_clock_info { 771 struct radeon_pm_clock_info {
772 /* memory clock */ 772 /* memory clock */
773 u32 mclk; 773 u32 mclk;
774 /* engine clock */ 774 /* engine clock */
775 u32 sclk; 775 u32 sclk;
776 /* voltage info */ 776 /* voltage info */
777 struct radeon_voltage voltage; 777 struct radeon_voltage voltage;
778 /* standardized clock flags */ 778 /* standardized clock flags */
779 u32 flags; 779 u32 flags;
780 }; 780 };
781 781
782 /* state flags */ 782 /* state flags */
783 #define RADEON_PM_STATE_SINGLE_DISPLAY_ONLY (1 << 0) 783 #define RADEON_PM_STATE_SINGLE_DISPLAY_ONLY (1 << 0)
784 784
785 struct radeon_power_state { 785 struct radeon_power_state {
786 enum radeon_pm_state_type type; 786 enum radeon_pm_state_type type;
787 /* XXX: use a define for num clock modes */ 787 /* XXX: use a define for num clock modes */
788 struct radeon_pm_clock_info clock_info[8]; 788 struct radeon_pm_clock_info clock_info[8];
789 /* number of valid clock modes in this power state */ 789 /* number of valid clock modes in this power state */
790 int num_clock_modes; 790 int num_clock_modes;
791 struct radeon_pm_clock_info *default_clock_mode; 791 struct radeon_pm_clock_info *default_clock_mode;
792 /* standardized state flags */ 792 /* standardized state flags */
793 u32 flags; 793 u32 flags;
794 u32 misc; /* vbios specific flags */ 794 u32 misc; /* vbios specific flags */
795 u32 misc2; /* vbios specific flags */ 795 u32 misc2; /* vbios specific flags */
796 int pcie_lanes; /* pcie lanes */ 796 int pcie_lanes; /* pcie lanes */
797 }; 797 };
798 798
799 /* 799 /*
800 * Some modes are overclocked by very low value, accept them 800 * Some modes are overclocked by very low value, accept them
801 */ 801 */
802 #define RADEON_MODE_OVERCLOCK_MARGIN 500 /* 5 MHz */ 802 #define RADEON_MODE_OVERCLOCK_MARGIN 500 /* 5 MHz */
803 803
804 struct radeon_pm { 804 struct radeon_pm {
805 struct mutex mutex; 805 struct mutex mutex;
806 u32 active_crtcs; 806 u32 active_crtcs;
807 int active_crtc_count; 807 int active_crtc_count;
808 int req_vblank; 808 int req_vblank;
809 bool vblank_sync; 809 bool vblank_sync;
810 bool gui_idle; 810 bool gui_idle;
811 fixed20_12 max_bandwidth; 811 fixed20_12 max_bandwidth;
812 fixed20_12 igp_sideport_mclk; 812 fixed20_12 igp_sideport_mclk;
813 fixed20_12 igp_system_mclk; 813 fixed20_12 igp_system_mclk;
814 fixed20_12 igp_ht_link_clk; 814 fixed20_12 igp_ht_link_clk;
815 fixed20_12 igp_ht_link_width; 815 fixed20_12 igp_ht_link_width;
816 fixed20_12 k8_bandwidth; 816 fixed20_12 k8_bandwidth;
817 fixed20_12 sideport_bandwidth; 817 fixed20_12 sideport_bandwidth;
818 fixed20_12 ht_bandwidth; 818 fixed20_12 ht_bandwidth;
819 fixed20_12 core_bandwidth; 819 fixed20_12 core_bandwidth;
820 fixed20_12 sclk; 820 fixed20_12 sclk;
821 fixed20_12 mclk; 821 fixed20_12 mclk;
822 fixed20_12 needed_bandwidth; 822 fixed20_12 needed_bandwidth;
823 struct radeon_power_state *power_state; 823 struct radeon_power_state *power_state;
824 /* number of valid power states */ 824 /* number of valid power states */
825 int num_power_states; 825 int num_power_states;
826 int current_power_state_index; 826 int current_power_state_index;
827 int current_clock_mode_index; 827 int current_clock_mode_index;
828 int requested_power_state_index; 828 int requested_power_state_index;
829 int requested_clock_mode_index; 829 int requested_clock_mode_index;
830 int default_power_state_index; 830 int default_power_state_index;
831 u32 current_sclk; 831 u32 current_sclk;
832 u32 current_mclk; 832 u32 current_mclk;
833 u16 current_vddc; 833 u16 current_vddc;
834 u16 current_vddci; 834 u16 current_vddci;
835 u32 default_sclk; 835 u32 default_sclk;
836 u32 default_mclk; 836 u32 default_mclk;
837 u16 default_vddc; 837 u16 default_vddc;
838 u16 default_vddci; 838 u16 default_vddci;
839 struct radeon_i2c_chan *i2c_bus; 839 struct radeon_i2c_chan *i2c_bus;
840 /* selected pm method */ 840 /* selected pm method */
841 enum radeon_pm_method pm_method; 841 enum radeon_pm_method pm_method;
842 /* dynpm power management */ 842 /* dynpm power management */
843 struct delayed_work dynpm_idle_work; 843 struct delayed_work dynpm_idle_work;
844 enum radeon_dynpm_state dynpm_state; 844 enum radeon_dynpm_state dynpm_state;
845 enum radeon_dynpm_action dynpm_planned_action; 845 enum radeon_dynpm_action dynpm_planned_action;
846 unsigned long dynpm_action_timeout; 846 unsigned long dynpm_action_timeout;
847 bool dynpm_can_upclock; 847 bool dynpm_can_upclock;
848 bool dynpm_can_downclock; 848 bool dynpm_can_downclock;
849 /* profile-based power management */ 849 /* profile-based power management */
850 enum radeon_pm_profile_type profile; 850 enum radeon_pm_profile_type profile;
851 int profile_index; 851 int profile_index;
852 struct radeon_pm_profile profiles[PM_PROFILE_MAX]; 852 struct radeon_pm_profile profiles[PM_PROFILE_MAX];
853 /* internal thermal controller on rv6xx+ */ 853 /* internal thermal controller on rv6xx+ */
854 enum radeon_int_thermal_type int_thermal_type; 854 enum radeon_int_thermal_type int_thermal_type;
855 struct device *int_hwmon_dev; 855 struct device *int_hwmon_dev;
856 }; 856 };
857 857
858 858
859 /* 859 /*
860 * Benchmarking 860 * Benchmarking
861 */ 861 */
862 void radeon_benchmark(struct radeon_device *rdev, int test_number); 862 void radeon_benchmark(struct radeon_device *rdev, int test_number);
863 863
864 864
865 /* 865 /*
866 * Testing 866 * Testing
867 */ 867 */
868 void radeon_test_moves(struct radeon_device *rdev); 868 void radeon_test_moves(struct radeon_device *rdev);
869 869
870 870
871 /* 871 /*
872 * Debugfs 872 * Debugfs
873 */ 873 */
874 int radeon_debugfs_add_files(struct radeon_device *rdev, 874 int radeon_debugfs_add_files(struct radeon_device *rdev,
875 struct drm_info_list *files, 875 struct drm_info_list *files,
876 unsigned nfiles); 876 unsigned nfiles);
877 int radeon_debugfs_fence_init(struct radeon_device *rdev); 877 int radeon_debugfs_fence_init(struct radeon_device *rdev);
878 878
879 879
880 /* 880 /*
881 * ASIC specific functions. 881 * ASIC specific functions.
882 */ 882 */
883 struct radeon_asic { 883 struct radeon_asic {
884 int (*init)(struct radeon_device *rdev); 884 int (*init)(struct radeon_device *rdev);
885 void (*fini)(struct radeon_device *rdev); 885 void (*fini)(struct radeon_device *rdev);
886 int (*resume)(struct radeon_device *rdev); 886 int (*resume)(struct radeon_device *rdev);
887 int (*suspend)(struct radeon_device *rdev); 887 int (*suspend)(struct radeon_device *rdev);
888 void (*vga_set_state)(struct radeon_device *rdev, bool state); 888 void (*vga_set_state)(struct radeon_device *rdev, bool state);
889 bool (*gpu_is_lockup)(struct radeon_device *rdev); 889 bool (*gpu_is_lockup)(struct radeon_device *rdev);
890 int (*asic_reset)(struct radeon_device *rdev); 890 int (*asic_reset)(struct radeon_device *rdev);
891 void (*gart_tlb_flush)(struct radeon_device *rdev); 891 void (*gart_tlb_flush)(struct radeon_device *rdev);
892 int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr); 892 int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr);
893 int (*cp_init)(struct radeon_device *rdev, unsigned ring_size); 893 int (*cp_init)(struct radeon_device *rdev, unsigned ring_size);
894 void (*cp_fini)(struct radeon_device *rdev); 894 void (*cp_fini)(struct radeon_device *rdev);
895 void (*cp_disable)(struct radeon_device *rdev); 895 void (*cp_disable)(struct radeon_device *rdev);
896 void (*cp_commit)(struct radeon_device *rdev); 896 void (*cp_commit)(struct radeon_device *rdev);
897 void (*ring_start)(struct radeon_device *rdev); 897 void (*ring_start)(struct radeon_device *rdev);
898 int (*ring_test)(struct radeon_device *rdev); 898 int (*ring_test)(struct radeon_device *rdev);
899 void (*ring_ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib); 899 void (*ring_ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
900 int (*irq_set)(struct radeon_device *rdev); 900 int (*irq_set)(struct radeon_device *rdev);
901 int (*irq_process)(struct radeon_device *rdev); 901 int (*irq_process)(struct radeon_device *rdev);
902 u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc); 902 u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
903 void (*fence_ring_emit)(struct radeon_device *rdev, struct radeon_fence *fence); 903 void (*fence_ring_emit)(struct radeon_device *rdev, struct radeon_fence *fence);
904 int (*cs_parse)(struct radeon_cs_parser *p); 904 int (*cs_parse)(struct radeon_cs_parser *p);
905 int (*copy_blit)(struct radeon_device *rdev, 905 int (*copy_blit)(struct radeon_device *rdev,
906 uint64_t src_offset, 906 uint64_t src_offset,
907 uint64_t dst_offset, 907 uint64_t dst_offset,
908 unsigned num_gpu_pages, 908 unsigned num_gpu_pages,
909 struct radeon_fence *fence); 909 struct radeon_fence *fence);
910 int (*copy_dma)(struct radeon_device *rdev, 910 int (*copy_dma)(struct radeon_device *rdev,
911 uint64_t src_offset, 911 uint64_t src_offset,
912 uint64_t dst_offset, 912 uint64_t dst_offset,
913 unsigned num_gpu_pages, 913 unsigned num_gpu_pages,
914 struct radeon_fence *fence); 914 struct radeon_fence *fence);
915 int (*copy)(struct radeon_device *rdev, 915 int (*copy)(struct radeon_device *rdev,
916 uint64_t src_offset, 916 uint64_t src_offset,
917 uint64_t dst_offset, 917 uint64_t dst_offset,
918 unsigned num_gpu_pages, 918 unsigned num_gpu_pages,
919 struct radeon_fence *fence); 919 struct radeon_fence *fence);
920 uint32_t (*get_engine_clock)(struct radeon_device *rdev); 920 uint32_t (*get_engine_clock)(struct radeon_device *rdev);
921 void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock); 921 void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
922 uint32_t (*get_memory_clock)(struct radeon_device *rdev); 922 uint32_t (*get_memory_clock)(struct radeon_device *rdev);
923 void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock); 923 void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock);
924 int (*get_pcie_lanes)(struct radeon_device *rdev); 924 int (*get_pcie_lanes)(struct radeon_device *rdev);
925 void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes); 925 void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
926 void (*set_clock_gating)(struct radeon_device *rdev, int enable); 926 void (*set_clock_gating)(struct radeon_device *rdev, int enable);
927 int (*set_surface_reg)(struct radeon_device *rdev, int reg, 927 int (*set_surface_reg)(struct radeon_device *rdev, int reg,
928 uint32_t tiling_flags, uint32_t pitch, 928 uint32_t tiling_flags, uint32_t pitch,
929 uint32_t offset, uint32_t obj_size); 929 uint32_t offset, uint32_t obj_size);
930 void (*clear_surface_reg)(struct radeon_device *rdev, int reg); 930 void (*clear_surface_reg)(struct radeon_device *rdev, int reg);
931 void (*bandwidth_update)(struct radeon_device *rdev); 931 void (*bandwidth_update)(struct radeon_device *rdev);
932 void (*hpd_init)(struct radeon_device *rdev); 932 void (*hpd_init)(struct radeon_device *rdev);
933 void (*hpd_fini)(struct radeon_device *rdev); 933 void (*hpd_fini)(struct radeon_device *rdev);
934 bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd); 934 bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
935 void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd); 935 void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
936 /* ioctl hw specific callback. Some hw might want to perform special 936 /* ioctl hw specific callback. Some hw might want to perform special
937 * operation on specific ioctl. For instance on wait idle some hw 937 * operation on specific ioctl. For instance on wait idle some hw
938 * might want to perform and HDP flush through MMIO as it seems that 938 * might want to perform and HDP flush through MMIO as it seems that
939 * some R6XX/R7XX hw doesn't take HDP flush into account if programmed 939 * some R6XX/R7XX hw doesn't take HDP flush into account if programmed
940 * through ring. 940 * through ring.
941 */ 941 */
942 void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo); 942 void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
943 bool (*gui_idle)(struct radeon_device *rdev); 943 bool (*gui_idle)(struct radeon_device *rdev);
944 /* power management */ 944 /* power management */
945 void (*pm_misc)(struct radeon_device *rdev); 945 void (*pm_misc)(struct radeon_device *rdev);
946 void (*pm_prepare)(struct radeon_device *rdev); 946 void (*pm_prepare)(struct radeon_device *rdev);
947 void (*pm_finish)(struct radeon_device *rdev); 947 void (*pm_finish)(struct radeon_device *rdev);
948 void (*pm_init_profile)(struct radeon_device *rdev); 948 void (*pm_init_profile)(struct radeon_device *rdev);
949 void (*pm_get_dynpm_state)(struct radeon_device *rdev); 949 void (*pm_get_dynpm_state)(struct radeon_device *rdev);
950 /* pageflipping */ 950 /* pageflipping */
951 void (*pre_page_flip)(struct radeon_device *rdev, int crtc); 951 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
952 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base); 952 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
953 void (*post_page_flip)(struct radeon_device *rdev, int crtc); 953 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
954 }; 954 };
955 955
956 /* 956 /*
957 * Asic structures 957 * Asic structures
958 */ 958 */
959 struct r100_gpu_lockup { 959 struct r100_gpu_lockup {
960 unsigned long last_jiffies; 960 unsigned long last_jiffies;
961 u32 last_cp_rptr; 961 u32 last_cp_rptr;
962 }; 962 };
963 963
964 struct r100_asic { 964 struct r100_asic {
965 const unsigned *reg_safe_bm; 965 const unsigned *reg_safe_bm;
966 unsigned reg_safe_bm_size; 966 unsigned reg_safe_bm_size;
967 u32 hdp_cntl; 967 u32 hdp_cntl;
968 struct r100_gpu_lockup lockup; 968 struct r100_gpu_lockup lockup;
969 }; 969 };
970 970
971 struct r300_asic { 971 struct r300_asic {
972 const unsigned *reg_safe_bm; 972 const unsigned *reg_safe_bm;
973 unsigned reg_safe_bm_size; 973 unsigned reg_safe_bm_size;
974 u32 resync_scratch; 974 u32 resync_scratch;
975 u32 hdp_cntl; 975 u32 hdp_cntl;
976 struct r100_gpu_lockup lockup; 976 struct r100_gpu_lockup lockup;
977 }; 977 };
978 978
979 struct r600_asic { 979 struct r600_asic {
980 unsigned max_pipes; 980 unsigned max_pipes;
981 unsigned max_tile_pipes; 981 unsigned max_tile_pipes;
982 unsigned max_simds; 982 unsigned max_simds;
983 unsigned max_backends; 983 unsigned max_backends;
984 unsigned max_gprs; 984 unsigned max_gprs;
985 unsigned max_threads; 985 unsigned max_threads;
986 unsigned max_stack_entries; 986 unsigned max_stack_entries;
987 unsigned max_hw_contexts; 987 unsigned max_hw_contexts;
988 unsigned max_gs_threads; 988 unsigned max_gs_threads;
989 unsigned sx_max_export_size; 989 unsigned sx_max_export_size;
990 unsigned sx_max_export_pos_size; 990 unsigned sx_max_export_pos_size;
991 unsigned sx_max_export_smx_size; 991 unsigned sx_max_export_smx_size;
992 unsigned sq_num_cf_insts; 992 unsigned sq_num_cf_insts;
993 unsigned tiling_nbanks; 993 unsigned tiling_nbanks;
994 unsigned tiling_npipes; 994 unsigned tiling_npipes;
995 unsigned tiling_group_size; 995 unsigned tiling_group_size;
996 unsigned tile_config; 996 unsigned tile_config;
997 unsigned backend_map; 997 unsigned backend_map;
998 struct r100_gpu_lockup lockup; 998 struct r100_gpu_lockup lockup;
999 }; 999 };
1000 1000
1001 struct rv770_asic { 1001 struct rv770_asic {
1002 unsigned max_pipes; 1002 unsigned max_pipes;
1003 unsigned max_tile_pipes; 1003 unsigned max_tile_pipes;
1004 unsigned max_simds; 1004 unsigned max_simds;
1005 unsigned max_backends; 1005 unsigned max_backends;
1006 unsigned max_gprs; 1006 unsigned max_gprs;
1007 unsigned max_threads; 1007 unsigned max_threads;
1008 unsigned max_stack_entries; 1008 unsigned max_stack_entries;
1009 unsigned max_hw_contexts; 1009 unsigned max_hw_contexts;
1010 unsigned max_gs_threads; 1010 unsigned max_gs_threads;
1011 unsigned sx_max_export_size; 1011 unsigned sx_max_export_size;
1012 unsigned sx_max_export_pos_size; 1012 unsigned sx_max_export_pos_size;
1013 unsigned sx_max_export_smx_size; 1013 unsigned sx_max_export_smx_size;
1014 unsigned sq_num_cf_insts; 1014 unsigned sq_num_cf_insts;
1015 unsigned sx_num_of_sets; 1015 unsigned sx_num_of_sets;
1016 unsigned sc_prim_fifo_size; 1016 unsigned sc_prim_fifo_size;
1017 unsigned sc_hiz_tile_fifo_size; 1017 unsigned sc_hiz_tile_fifo_size;
1018 unsigned sc_earlyz_tile_fifo_fize; 1018 unsigned sc_earlyz_tile_fifo_fize;
1019 unsigned tiling_nbanks; 1019 unsigned tiling_nbanks;
1020 unsigned tiling_npipes; 1020 unsigned tiling_npipes;
1021 unsigned tiling_group_size; 1021 unsigned tiling_group_size;
1022 unsigned tile_config; 1022 unsigned tile_config;
1023 unsigned backend_map; 1023 unsigned backend_map;
1024 struct r100_gpu_lockup lockup; 1024 struct r100_gpu_lockup lockup;
1025 }; 1025 };
1026 1026
1027 struct evergreen_asic { 1027 struct evergreen_asic {
1028 unsigned num_ses; 1028 unsigned num_ses;
1029 unsigned max_pipes; 1029 unsigned max_pipes;
1030 unsigned max_tile_pipes; 1030 unsigned max_tile_pipes;
1031 unsigned max_simds; 1031 unsigned max_simds;
1032 unsigned max_backends; 1032 unsigned max_backends;
1033 unsigned max_gprs; 1033 unsigned max_gprs;
1034 unsigned max_threads; 1034 unsigned max_threads;
1035 unsigned max_stack_entries; 1035 unsigned max_stack_entries;
1036 unsigned max_hw_contexts; 1036 unsigned max_hw_contexts;
1037 unsigned max_gs_threads; 1037 unsigned max_gs_threads;
1038 unsigned sx_max_export_size; 1038 unsigned sx_max_export_size;
1039 unsigned sx_max_export_pos_size; 1039 unsigned sx_max_export_pos_size;
1040 unsigned sx_max_export_smx_size; 1040 unsigned sx_max_export_smx_size;
1041 unsigned sq_num_cf_insts; 1041 unsigned sq_num_cf_insts;
1042 unsigned sx_num_of_sets; 1042 unsigned sx_num_of_sets;
1043 unsigned sc_prim_fifo_size; 1043 unsigned sc_prim_fifo_size;
1044 unsigned sc_hiz_tile_fifo_size; 1044 unsigned sc_hiz_tile_fifo_size;
1045 unsigned sc_earlyz_tile_fifo_size; 1045 unsigned sc_earlyz_tile_fifo_size;
1046 unsigned tiling_nbanks; 1046 unsigned tiling_nbanks;
1047 unsigned tiling_npipes; 1047 unsigned tiling_npipes;
1048 unsigned tiling_group_size; 1048 unsigned tiling_group_size;
1049 unsigned tile_config; 1049 unsigned tile_config;
1050 unsigned backend_map; 1050 unsigned backend_map;
1051 struct r100_gpu_lockup lockup; 1051 struct r100_gpu_lockup lockup;
1052 }; 1052 };
1053 1053
1054 struct cayman_asic { 1054 struct cayman_asic {
1055 unsigned max_shader_engines; 1055 unsigned max_shader_engines;
1056 unsigned max_pipes_per_simd; 1056 unsigned max_pipes_per_simd;
1057 unsigned max_tile_pipes; 1057 unsigned max_tile_pipes;
1058 unsigned max_simds_per_se; 1058 unsigned max_simds_per_se;
1059 unsigned max_backends_per_se; 1059 unsigned max_backends_per_se;
1060 unsigned max_texture_channel_caches; 1060 unsigned max_texture_channel_caches;
1061 unsigned max_gprs; 1061 unsigned max_gprs;
1062 unsigned max_threads; 1062 unsigned max_threads;
1063 unsigned max_gs_threads; 1063 unsigned max_gs_threads;
1064 unsigned max_stack_entries; 1064 unsigned max_stack_entries;
1065 unsigned sx_num_of_sets; 1065 unsigned sx_num_of_sets;
1066 unsigned sx_max_export_size; 1066 unsigned sx_max_export_size;
1067 unsigned sx_max_export_pos_size; 1067 unsigned sx_max_export_pos_size;
1068 unsigned sx_max_export_smx_size; 1068 unsigned sx_max_export_smx_size;
1069 unsigned max_hw_contexts; 1069 unsigned max_hw_contexts;
1070 unsigned sq_num_cf_insts; 1070 unsigned sq_num_cf_insts;
1071 unsigned sc_prim_fifo_size; 1071 unsigned sc_prim_fifo_size;
1072 unsigned sc_hiz_tile_fifo_size; 1072 unsigned sc_hiz_tile_fifo_size;
1073 unsigned sc_earlyz_tile_fifo_size; 1073 unsigned sc_earlyz_tile_fifo_size;
1074 1074
1075 unsigned num_shader_engines; 1075 unsigned num_shader_engines;
1076 unsigned num_shader_pipes_per_simd; 1076 unsigned num_shader_pipes_per_simd;
1077 unsigned num_tile_pipes; 1077 unsigned num_tile_pipes;
1078 unsigned num_simds_per_se; 1078 unsigned num_simds_per_se;
1079 unsigned num_backends_per_se; 1079 unsigned num_backends_per_se;
1080 unsigned backend_disable_mask_per_asic; 1080 unsigned backend_disable_mask_per_asic;
1081 unsigned backend_map; 1081 unsigned backend_map;
1082 unsigned num_texture_channel_caches; 1082 unsigned num_texture_channel_caches;
1083 unsigned mem_max_burst_length_bytes; 1083 unsigned mem_max_burst_length_bytes;
1084 unsigned mem_row_size_in_kb; 1084 unsigned mem_row_size_in_kb;
1085 unsigned shader_engine_tile_size; 1085 unsigned shader_engine_tile_size;
1086 unsigned num_gpus; 1086 unsigned num_gpus;
1087 unsigned multi_gpu_tile_size; 1087 unsigned multi_gpu_tile_size;
1088 1088
1089 unsigned tile_config; 1089 unsigned tile_config;
1090 struct r100_gpu_lockup lockup; 1090 struct r100_gpu_lockup lockup;
1091 }; 1091 };
1092 1092
1093 union radeon_asic_config { 1093 union radeon_asic_config {
1094 struct r300_asic r300; 1094 struct r300_asic r300;
1095 struct r100_asic r100; 1095 struct r100_asic r100;
1096 struct r600_asic r600; 1096 struct r600_asic r600;
1097 struct rv770_asic rv770; 1097 struct rv770_asic rv770;
1098 struct evergreen_asic evergreen; 1098 struct evergreen_asic evergreen;
1099 struct cayman_asic cayman; 1099 struct cayman_asic cayman;
1100 }; 1100 };
1101 1101
1102 /* 1102 /*
1103 * asic initizalization from radeon_asic.c 1103 * asic initizalization from radeon_asic.c
1104 */ 1104 */
1105 void radeon_agp_disable(struct radeon_device *rdev); 1105 void radeon_agp_disable(struct radeon_device *rdev);
1106 int radeon_asic_init(struct radeon_device *rdev); 1106 int radeon_asic_init(struct radeon_device *rdev);
1107 1107
1108 1108
1109 /* 1109 /*
1110 * IOCTL. 1110 * IOCTL.
1111 */ 1111 */
1112 int radeon_gem_info_ioctl(struct drm_device *dev, void *data, 1112 int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
1113 struct drm_file *filp); 1113 struct drm_file *filp);
1114 int radeon_gem_create_ioctl(struct drm_device *dev, void *data, 1114 int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
1115 struct drm_file *filp); 1115 struct drm_file *filp);
1116 int radeon_gem_pin_ioctl(struct drm_device *dev, void *data, 1116 int radeon_gem_pin_ioctl(struct drm_device *dev, void *data,
1117 struct drm_file *file_priv); 1117 struct drm_file *file_priv);
1118 int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data, 1118 int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data,
1119 struct drm_file *file_priv); 1119 struct drm_file *file_priv);
1120 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data, 1120 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1121 struct drm_file *file_priv); 1121 struct drm_file *file_priv);
1122 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data, 1122 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
1123 struct drm_file *file_priv); 1123 struct drm_file *file_priv);
1124 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, 1124 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1125 struct drm_file *filp); 1125 struct drm_file *filp);
1126 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, 1126 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
1127 struct drm_file *filp); 1127 struct drm_file *filp);
1128 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, 1128 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
1129 struct drm_file *filp); 1129 struct drm_file *filp);
1130 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, 1130 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
1131 struct drm_file *filp); 1131 struct drm_file *filp);
1132 int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 1132 int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
1133 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, 1133 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
1134 struct drm_file *filp); 1134 struct drm_file *filp);
1135 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, 1135 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
1136 struct drm_file *filp); 1136 struct drm_file *filp);
1137 1137
1138 /* VRAM scratch page for HDP bug, default vram page */ 1138 /* VRAM scratch page for HDP bug, default vram page */
1139 struct r600_vram_scratch { 1139 struct r600_vram_scratch {
1140 struct radeon_bo *robj; 1140 struct radeon_bo *robj;
1141 volatile uint32_t *ptr; 1141 volatile uint32_t *ptr;
1142 u64 gpu_addr; 1142 u64 gpu_addr;
1143 }; 1143 };
1144 1144
1145
1145 /* 1146 /*
1147 * Mutex which allows recursive locking from the same process.
1148 */
1149 struct radeon_mutex {
1150 struct mutex mutex;
1151 struct task_struct *owner;
1152 int level;
1153 };
1154
1155 static inline void radeon_mutex_init(struct radeon_mutex *mutex)
1156 {
1157 mutex_init(&mutex->mutex);
1158 mutex->owner = NULL;
1159 mutex->level = 0;
1160 }
1161
1162 static inline void radeon_mutex_lock(struct radeon_mutex *mutex)
1163 {
1164 if (mutex_trylock(&mutex->mutex)) {
1165 /* The mutex was unlocked before, so it's ours now */
1166 mutex->owner = current;
1167 } else if (mutex->owner != current) {
1168 /* Another process locked the mutex, take it */
1169 mutex_lock(&mutex->mutex);
1170 mutex->owner = current;
1171 }
1172 /* Otherwise the mutex was already locked by this process */
1173
1174 mutex->level++;
1175 }
1176
1177 static inline void radeon_mutex_unlock(struct radeon_mutex *mutex)
1178 {
1179 if (--mutex->level > 0)
1180 return;
1181
1182 mutex->owner = NULL;
1183 mutex_unlock(&mutex->mutex);
1184 }
1185
1186
1187 /*
1146 * Core structure, functions and helpers. 1188 * Core structure, functions and helpers.
1147 */ 1189 */
1148 typedef uint32_t (*radeon_rreg_t)(struct radeon_device*, uint32_t); 1190 typedef uint32_t (*radeon_rreg_t)(struct radeon_device*, uint32_t);
1149 typedef void (*radeon_wreg_t)(struct radeon_device*, uint32_t, uint32_t); 1191 typedef void (*radeon_wreg_t)(struct radeon_device*, uint32_t, uint32_t);
1150 1192
1151 struct radeon_device { 1193 struct radeon_device {
1152 struct device *dev; 1194 struct device *dev;
1153 struct drm_device *ddev; 1195 struct drm_device *ddev;
1154 struct pci_dev *pdev; 1196 struct pci_dev *pdev;
1155 /* ASIC */ 1197 /* ASIC */
1156 union radeon_asic_config config; 1198 union radeon_asic_config config;
1157 enum radeon_family family; 1199 enum radeon_family family;
1158 unsigned long flags; 1200 unsigned long flags;
1159 int usec_timeout; 1201 int usec_timeout;
1160 enum radeon_pll_errata pll_errata; 1202 enum radeon_pll_errata pll_errata;
1161 int num_gb_pipes; 1203 int num_gb_pipes;
1162 int num_z_pipes; 1204 int num_z_pipes;
1163 int disp_priority; 1205 int disp_priority;
1164 /* BIOS */ 1206 /* BIOS */
1165 uint8_t *bios; 1207 uint8_t *bios;
1166 bool is_atom_bios; 1208 bool is_atom_bios;
1167 uint16_t bios_header_start; 1209 uint16_t bios_header_start;
1168 struct radeon_bo *stollen_vga_memory; 1210 struct radeon_bo *stollen_vga_memory;
1169 /* Register mmio */ 1211 /* Register mmio */
1170 resource_size_t rmmio_base; 1212 resource_size_t rmmio_base;
1171 resource_size_t rmmio_size; 1213 resource_size_t rmmio_size;
1172 void __iomem *rmmio; 1214 void __iomem *rmmio;
1173 radeon_rreg_t mc_rreg; 1215 radeon_rreg_t mc_rreg;
1174 radeon_wreg_t mc_wreg; 1216 radeon_wreg_t mc_wreg;
1175 radeon_rreg_t pll_rreg; 1217 radeon_rreg_t pll_rreg;
1176 radeon_wreg_t pll_wreg; 1218 radeon_wreg_t pll_wreg;
1177 uint32_t pcie_reg_mask; 1219 uint32_t pcie_reg_mask;
1178 radeon_rreg_t pciep_rreg; 1220 radeon_rreg_t pciep_rreg;
1179 radeon_wreg_t pciep_wreg; 1221 radeon_wreg_t pciep_wreg;
1180 /* io port */ 1222 /* io port */
1181 void __iomem *rio_mem; 1223 void __iomem *rio_mem;
1182 resource_size_t rio_mem_size; 1224 resource_size_t rio_mem_size;
1183 struct radeon_clock clock; 1225 struct radeon_clock clock;
1184 struct radeon_mc mc; 1226 struct radeon_mc mc;
1185 struct radeon_gart gart; 1227 struct radeon_gart gart;
1186 struct radeon_mode_info mode_info; 1228 struct radeon_mode_info mode_info;
1187 struct radeon_scratch scratch; 1229 struct radeon_scratch scratch;
1188 struct radeon_mman mman; 1230 struct radeon_mman mman;
1189 struct radeon_fence_driver fence_drv; 1231 struct radeon_fence_driver fence_drv;
1190 struct radeon_cp cp; 1232 struct radeon_cp cp;
1191 /* cayman compute rings */ 1233 /* cayman compute rings */
1192 struct radeon_cp cp1; 1234 struct radeon_cp cp1;
1193 struct radeon_cp cp2; 1235 struct radeon_cp cp2;
1194 struct radeon_ib_pool ib_pool; 1236 struct radeon_ib_pool ib_pool;
1195 struct radeon_irq irq; 1237 struct radeon_irq irq;
1196 struct radeon_asic *asic; 1238 struct radeon_asic *asic;
1197 struct radeon_gem gem; 1239 struct radeon_gem gem;
1198 struct radeon_pm pm; 1240 struct radeon_pm pm;
1199 uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH]; 1241 uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH];
1200 struct mutex cs_mutex; 1242 struct radeon_mutex cs_mutex;
1201 struct radeon_wb wb; 1243 struct radeon_wb wb;
1202 struct radeon_dummy_page dummy_page; 1244 struct radeon_dummy_page dummy_page;
1203 bool gpu_lockup; 1245 bool gpu_lockup;
1204 bool shutdown; 1246 bool shutdown;
1205 bool suspend; 1247 bool suspend;
1206 bool need_dma32; 1248 bool need_dma32;
1207 bool accel_working; 1249 bool accel_working;
1208 struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; 1250 struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
1209 const struct firmware *me_fw; /* all family ME firmware */ 1251 const struct firmware *me_fw; /* all family ME firmware */
1210 const struct firmware *pfp_fw; /* r6/700 PFP firmware */ 1252 const struct firmware *pfp_fw; /* r6/700 PFP firmware */
1211 const struct firmware *rlc_fw; /* r6/700 RLC firmware */ 1253 const struct firmware *rlc_fw; /* r6/700 RLC firmware */
1212 const struct firmware *mc_fw; /* NI MC firmware */ 1254 const struct firmware *mc_fw; /* NI MC firmware */
1213 struct r600_blit r600_blit; 1255 struct r600_blit r600_blit;
1214 struct r600_vram_scratch vram_scratch; 1256 struct r600_vram_scratch vram_scratch;
1215 int msi_enabled; /* msi enabled */ 1257 int msi_enabled; /* msi enabled */
1216 struct r600_ih ih; /* r6/700 interrupt ring */ 1258 struct r600_ih ih; /* r6/700 interrupt ring */
1217 struct work_struct hotplug_work; 1259 struct work_struct hotplug_work;
1218 int num_crtc; /* number of crtcs */ 1260 int num_crtc; /* number of crtcs */
1219 struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */ 1261 struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
1220 struct mutex vram_mutex; 1262 struct mutex vram_mutex;
1221 1263
1222 /* audio stuff */ 1264 /* audio stuff */
1223 bool audio_enabled; 1265 bool audio_enabled;
1224 struct timer_list audio_timer; 1266 struct timer_list audio_timer;
1225 int audio_channels; 1267 int audio_channels;
1226 int audio_rate; 1268 int audio_rate;
1227 int audio_bits_per_sample; 1269 int audio_bits_per_sample;
1228 uint8_t audio_status_bits; 1270 uint8_t audio_status_bits;
1229 uint8_t audio_category_code; 1271 uint8_t audio_category_code;
1230 1272
1231 struct notifier_block acpi_nb; 1273 struct notifier_block acpi_nb;
1232 /* only one userspace can use Hyperz features or CMASK at a time */ 1274 /* only one userspace can use Hyperz features or CMASK at a time */
1233 struct drm_file *hyperz_filp; 1275 struct drm_file *hyperz_filp;
1234 struct drm_file *cmask_filp; 1276 struct drm_file *cmask_filp;
1235 /* i2c buses */ 1277 /* i2c buses */
1236 struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS]; 1278 struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS];
1237 }; 1279 };
1238 1280
1239 int radeon_device_init(struct radeon_device *rdev, 1281 int radeon_device_init(struct radeon_device *rdev,
1240 struct drm_device *ddev, 1282 struct drm_device *ddev,
1241 struct pci_dev *pdev, 1283 struct pci_dev *pdev,
1242 uint32_t flags); 1284 uint32_t flags);
1243 void radeon_device_fini(struct radeon_device *rdev); 1285 void radeon_device_fini(struct radeon_device *rdev);
1244 int radeon_gpu_wait_for_idle(struct radeon_device *rdev); 1286 int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
1245 1287
1246 uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg); 1288 uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
1247 void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 1289 void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
1248 u32 r100_io_rreg(struct radeon_device *rdev, u32 reg); 1290 u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
1249 void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v); 1291 void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
1250 1292
1251 /* 1293 /*
1252 * Cast helper 1294 * Cast helper
1253 */ 1295 */
1254 #define to_radeon_fence(p) ((struct radeon_fence *)(p)) 1296 #define to_radeon_fence(p) ((struct radeon_fence *)(p))
1255 1297
1256 /* 1298 /*
1257 * Registers read & write functions. 1299 * Registers read & write functions.
1258 */ 1300 */
1259 #define RREG8(reg) readb((rdev->rmmio) + (reg)) 1301 #define RREG8(reg) readb((rdev->rmmio) + (reg))
1260 #define WREG8(reg, v) writeb(v, (rdev->rmmio) + (reg)) 1302 #define WREG8(reg, v) writeb(v, (rdev->rmmio) + (reg))
1261 #define RREG16(reg) readw((rdev->rmmio) + (reg)) 1303 #define RREG16(reg) readw((rdev->rmmio) + (reg))
1262 #define WREG16(reg, v) writew(v, (rdev->rmmio) + (reg)) 1304 #define WREG16(reg, v) writew(v, (rdev->rmmio) + (reg))
1263 #define RREG32(reg) r100_mm_rreg(rdev, (reg)) 1305 #define RREG32(reg) r100_mm_rreg(rdev, (reg))
1264 #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg))) 1306 #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg)))
1265 #define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v)) 1307 #define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v))
1266 #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) 1308 #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
1267 #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) 1309 #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
1268 #define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg)) 1310 #define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg))
1269 #define WREG32_PLL(reg, v) rdev->pll_wreg(rdev, (reg), (v)) 1311 #define WREG32_PLL(reg, v) rdev->pll_wreg(rdev, (reg), (v))
1270 #define RREG32_MC(reg) rdev->mc_rreg(rdev, (reg)) 1312 #define RREG32_MC(reg) rdev->mc_rreg(rdev, (reg))
1271 #define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v)) 1313 #define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v))
1272 #define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg)) 1314 #define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg))
1273 #define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v)) 1315 #define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v))
1274 #define RREG32_PCIE_P(reg) rdev->pciep_rreg(rdev, (reg)) 1316 #define RREG32_PCIE_P(reg) rdev->pciep_rreg(rdev, (reg))
1275 #define WREG32_PCIE_P(reg, v) rdev->pciep_wreg(rdev, (reg), (v)) 1317 #define WREG32_PCIE_P(reg, v) rdev->pciep_wreg(rdev, (reg), (v))
1276 #define WREG32_P(reg, val, mask) \ 1318 #define WREG32_P(reg, val, mask) \
1277 do { \ 1319 do { \
1278 uint32_t tmp_ = RREG32(reg); \ 1320 uint32_t tmp_ = RREG32(reg); \
1279 tmp_ &= (mask); \ 1321 tmp_ &= (mask); \
1280 tmp_ |= ((val) & ~(mask)); \ 1322 tmp_ |= ((val) & ~(mask)); \
1281 WREG32(reg, tmp_); \ 1323 WREG32(reg, tmp_); \
1282 } while (0) 1324 } while (0)
1283 #define WREG32_PLL_P(reg, val, mask) \ 1325 #define WREG32_PLL_P(reg, val, mask) \
1284 do { \ 1326 do { \
1285 uint32_t tmp_ = RREG32_PLL(reg); \ 1327 uint32_t tmp_ = RREG32_PLL(reg); \
1286 tmp_ &= (mask); \ 1328 tmp_ &= (mask); \
1287 tmp_ |= ((val) & ~(mask)); \ 1329 tmp_ |= ((val) & ~(mask)); \
1288 WREG32_PLL(reg, tmp_); \ 1330 WREG32_PLL(reg, tmp_); \
1289 } while (0) 1331 } while (0)
1290 #define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg))) 1332 #define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg)))
1291 #define RREG32_IO(reg) r100_io_rreg(rdev, (reg)) 1333 #define RREG32_IO(reg) r100_io_rreg(rdev, (reg))
1292 #define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v)) 1334 #define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v))
1293 1335
1294 /* 1336 /*
1295 * Indirect registers accessor 1337 * Indirect registers accessor
1296 */ 1338 */
1297 static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg) 1339 static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
1298 { 1340 {
1299 uint32_t r; 1341 uint32_t r;
1300 1342
1301 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); 1343 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
1302 r = RREG32(RADEON_PCIE_DATA); 1344 r = RREG32(RADEON_PCIE_DATA);
1303 return r; 1345 return r;
1304 } 1346 }
1305 1347
1306 static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 1348 static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1307 { 1349 {
1308 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); 1350 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
1309 WREG32(RADEON_PCIE_DATA, (v)); 1351 WREG32(RADEON_PCIE_DATA, (v));
1310 } 1352 }
1311 1353
1312 void r100_pll_errata_after_index(struct radeon_device *rdev); 1354 void r100_pll_errata_after_index(struct radeon_device *rdev);
1313 1355
1314 1356
1315 /* 1357 /*
1316 * ASICs helpers. 1358 * ASICs helpers.
1317 */ 1359 */
1318 #define ASIC_IS_RN50(rdev) ((rdev->pdev->device == 0x515e) || \ 1360 #define ASIC_IS_RN50(rdev) ((rdev->pdev->device == 0x515e) || \
1319 (rdev->pdev->device == 0x5969)) 1361 (rdev->pdev->device == 0x5969))
1320 #define ASIC_IS_RV100(rdev) ((rdev->family == CHIP_RV100) || \ 1362 #define ASIC_IS_RV100(rdev) ((rdev->family == CHIP_RV100) || \
1321 (rdev->family == CHIP_RV200) || \ 1363 (rdev->family == CHIP_RV200) || \
1322 (rdev->family == CHIP_RS100) || \ 1364 (rdev->family == CHIP_RS100) || \
1323 (rdev->family == CHIP_RS200) || \ 1365 (rdev->family == CHIP_RS200) || \
1324 (rdev->family == CHIP_RV250) || \ 1366 (rdev->family == CHIP_RV250) || \
1325 (rdev->family == CHIP_RV280) || \ 1367 (rdev->family == CHIP_RV280) || \
1326 (rdev->family == CHIP_RS300)) 1368 (rdev->family == CHIP_RS300))
1327 #define ASIC_IS_R300(rdev) ((rdev->family == CHIP_R300) || \ 1369 #define ASIC_IS_R300(rdev) ((rdev->family == CHIP_R300) || \
1328 (rdev->family == CHIP_RV350) || \ 1370 (rdev->family == CHIP_RV350) || \
1329 (rdev->family == CHIP_R350) || \ 1371 (rdev->family == CHIP_R350) || \
1330 (rdev->family == CHIP_RV380) || \ 1372 (rdev->family == CHIP_RV380) || \
1331 (rdev->family == CHIP_R420) || \ 1373 (rdev->family == CHIP_R420) || \
1332 (rdev->family == CHIP_R423) || \ 1374 (rdev->family == CHIP_R423) || \
1333 (rdev->family == CHIP_RV410) || \ 1375 (rdev->family == CHIP_RV410) || \
1334 (rdev->family == CHIP_RS400) || \ 1376 (rdev->family == CHIP_RS400) || \
1335 (rdev->family == CHIP_RS480)) 1377 (rdev->family == CHIP_RS480))
1336 #define ASIC_IS_X2(rdev) ((rdev->ddev->pdev->device == 0x9441) || \ 1378 #define ASIC_IS_X2(rdev) ((rdev->ddev->pdev->device == 0x9441) || \
1337 (rdev->ddev->pdev->device == 0x9443) || \ 1379 (rdev->ddev->pdev->device == 0x9443) || \
1338 (rdev->ddev->pdev->device == 0x944B) || \ 1380 (rdev->ddev->pdev->device == 0x944B) || \
1339 (rdev->ddev->pdev->device == 0x9506) || \ 1381 (rdev->ddev->pdev->device == 0x9506) || \
1340 (rdev->ddev->pdev->device == 0x9509) || \ 1382 (rdev->ddev->pdev->device == 0x9509) || \
1341 (rdev->ddev->pdev->device == 0x950F) || \ 1383 (rdev->ddev->pdev->device == 0x950F) || \
1342 (rdev->ddev->pdev->device == 0x689C) || \ 1384 (rdev->ddev->pdev->device == 0x689C) || \
1343 (rdev->ddev->pdev->device == 0x689D)) 1385 (rdev->ddev->pdev->device == 0x689D))
1344 #define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600)) 1386 #define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600))
1345 #define ASIC_IS_DCE2(rdev) ((rdev->family == CHIP_RS600) || \ 1387 #define ASIC_IS_DCE2(rdev) ((rdev->family == CHIP_RS600) || \
1346 (rdev->family == CHIP_RS690) || \ 1388 (rdev->family == CHIP_RS690) || \
1347 (rdev->family == CHIP_RS740) || \ 1389 (rdev->family == CHIP_RS740) || \
1348 (rdev->family >= CHIP_R600)) 1390 (rdev->family >= CHIP_R600))
1349 #define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620)) 1391 #define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620))
1350 #define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730)) 1392 #define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730))
1351 #define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR)) 1393 #define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR))
1352 #define ASIC_IS_DCE41(rdev) ((rdev->family >= CHIP_PALM) && \ 1394 #define ASIC_IS_DCE41(rdev) ((rdev->family >= CHIP_PALM) && \
1353 (rdev->flags & RADEON_IS_IGP)) 1395 (rdev->flags & RADEON_IS_IGP))
1354 #define ASIC_IS_DCE5(rdev) ((rdev->family >= CHIP_BARTS)) 1396 #define ASIC_IS_DCE5(rdev) ((rdev->family >= CHIP_BARTS))
1355 1397
1356 /* 1398 /*
1357 * BIOS helpers. 1399 * BIOS helpers.
1358 */ 1400 */
1359 #define RBIOS8(i) (rdev->bios[i]) 1401 #define RBIOS8(i) (rdev->bios[i])
1360 #define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8)) 1402 #define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
1361 #define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16)) 1403 #define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
1362 1404
1363 int radeon_combios_init(struct radeon_device *rdev); 1405 int radeon_combios_init(struct radeon_device *rdev);
1364 void radeon_combios_fini(struct radeon_device *rdev); 1406 void radeon_combios_fini(struct radeon_device *rdev);
1365 int radeon_atombios_init(struct radeon_device *rdev); 1407 int radeon_atombios_init(struct radeon_device *rdev);
1366 void radeon_atombios_fini(struct radeon_device *rdev); 1408 void radeon_atombios_fini(struct radeon_device *rdev);
1367 1409
1368 1410
1369 /* 1411 /*
1370 * RING helpers. 1412 * RING helpers.
1371 */ 1413 */
1372 1414
1373 #if DRM_DEBUG_CODE == 0 1415 #if DRM_DEBUG_CODE == 0
1374 static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) 1416 static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
1375 { 1417 {
1376 rdev->cp.ring[rdev->cp.wptr++] = v; 1418 rdev->cp.ring[rdev->cp.wptr++] = v;
1377 rdev->cp.wptr &= rdev->cp.ptr_mask; 1419 rdev->cp.wptr &= rdev->cp.ptr_mask;
1378 rdev->cp.count_dw--; 1420 rdev->cp.count_dw--;
1379 rdev->cp.ring_free_dw--; 1421 rdev->cp.ring_free_dw--;
1380 } 1422 }
1381 #else 1423 #else
1382 /* With debugging this is just too big to inline */ 1424 /* With debugging this is just too big to inline */
1383 void radeon_ring_write(struct radeon_device *rdev, uint32_t v); 1425 void radeon_ring_write(struct radeon_device *rdev, uint32_t v);
1384 #endif 1426 #endif
1385 1427
1386 /* 1428 /*
1387 * ASICs macro. 1429 * ASICs macro.
1388 */ 1430 */
1389 #define radeon_init(rdev) (rdev)->asic->init((rdev)) 1431 #define radeon_init(rdev) (rdev)->asic->init((rdev))
1390 #define radeon_fini(rdev) (rdev)->asic->fini((rdev)) 1432 #define radeon_fini(rdev) (rdev)->asic->fini((rdev))
1391 #define radeon_resume(rdev) (rdev)->asic->resume((rdev)) 1433 #define radeon_resume(rdev) (rdev)->asic->resume((rdev))
1392 #define radeon_suspend(rdev) (rdev)->asic->suspend((rdev)) 1434 #define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
1393 #define radeon_cs_parse(p) rdev->asic->cs_parse((p)) 1435 #define radeon_cs_parse(p) rdev->asic->cs_parse((p))
1394 #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) 1436 #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
1395 #define radeon_gpu_is_lockup(rdev) (rdev)->asic->gpu_is_lockup((rdev)) 1437 #define radeon_gpu_is_lockup(rdev) (rdev)->asic->gpu_is_lockup((rdev))
1396 #define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev)) 1438 #define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
1397 #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev)) 1439 #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev))
1398 #define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p)) 1440 #define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p))
1399 #define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev)) 1441 #define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev))
1400 #define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev)) 1442 #define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev))
1401 #define radeon_ring_test(rdev) (rdev)->asic->ring_test((rdev)) 1443 #define radeon_ring_test(rdev) (rdev)->asic->ring_test((rdev))
1402 #define radeon_ring_ib_execute(rdev, ib) (rdev)->asic->ring_ib_execute((rdev), (ib)) 1444 #define radeon_ring_ib_execute(rdev, ib) (rdev)->asic->ring_ib_execute((rdev), (ib))
1403 #define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev)) 1445 #define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev))
1404 #define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev)) 1446 #define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev))
1405 #define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc)) 1447 #define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc))
1406 #define radeon_fence_ring_emit(rdev, fence) (rdev)->asic->fence_ring_emit((rdev), (fence)) 1448 #define radeon_fence_ring_emit(rdev, fence) (rdev)->asic->fence_ring_emit((rdev), (fence))
1407 #define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy_blit((rdev), (s), (d), (np), (f)) 1449 #define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy_blit((rdev), (s), (d), (np), (f))
1408 #define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy_dma((rdev), (s), (d), (np), (f)) 1450 #define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy_dma((rdev), (s), (d), (np), (f))
1409 #define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy((rdev), (s), (d), (np), (f)) 1451 #define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy((rdev), (s), (d), (np), (f))
1410 #define radeon_get_engine_clock(rdev) (rdev)->asic->get_engine_clock((rdev)) 1452 #define radeon_get_engine_clock(rdev) (rdev)->asic->get_engine_clock((rdev))
1411 #define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) 1453 #define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
1412 #define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev)) 1454 #define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev))
1413 #define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_memory_clock((rdev), (e)) 1455 #define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_memory_clock((rdev), (e))
1414 #define radeon_get_pcie_lanes(rdev) (rdev)->asic->get_pcie_lanes((rdev)) 1456 #define radeon_get_pcie_lanes(rdev) (rdev)->asic->get_pcie_lanes((rdev))
1415 #define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l)) 1457 #define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l))
1416 #define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e)) 1458 #define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e))
1417 #define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s))) 1459 #define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s)))
1418 #define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r))) 1460 #define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r)))
1419 #define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev)) 1461 #define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev))
1420 #define radeon_hpd_init(rdev) (rdev)->asic->hpd_init((rdev)) 1462 #define radeon_hpd_init(rdev) (rdev)->asic->hpd_init((rdev))
1421 #define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev)) 1463 #define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev))
1422 #define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd)) 1464 #define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd))
1423 #define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd)) 1465 #define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd))
1424 #define radeon_gui_idle(rdev) (rdev)->asic->gui_idle((rdev)) 1466 #define radeon_gui_idle(rdev) (rdev)->asic->gui_idle((rdev))
1425 #define radeon_pm_misc(rdev) (rdev)->asic->pm_misc((rdev)) 1467 #define radeon_pm_misc(rdev) (rdev)->asic->pm_misc((rdev))
1426 #define radeon_pm_prepare(rdev) (rdev)->asic->pm_prepare((rdev)) 1468 #define radeon_pm_prepare(rdev) (rdev)->asic->pm_prepare((rdev))
1427 #define radeon_pm_finish(rdev) (rdev)->asic->pm_finish((rdev)) 1469 #define radeon_pm_finish(rdev) (rdev)->asic->pm_finish((rdev))
1428 #define radeon_pm_init_profile(rdev) (rdev)->asic->pm_init_profile((rdev)) 1470 #define radeon_pm_init_profile(rdev) (rdev)->asic->pm_init_profile((rdev))
1429 #define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm_get_dynpm_state((rdev)) 1471 #define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm_get_dynpm_state((rdev))
1430 #define radeon_pre_page_flip(rdev, crtc) rdev->asic->pre_page_flip((rdev), (crtc)) 1472 #define radeon_pre_page_flip(rdev, crtc) rdev->asic->pre_page_flip((rdev), (crtc))
1431 #define radeon_page_flip(rdev, crtc, base) rdev->asic->page_flip((rdev), (crtc), (base)) 1473 #define radeon_page_flip(rdev, crtc, base) rdev->asic->page_flip((rdev), (crtc), (base))
1432 #define radeon_post_page_flip(rdev, crtc) rdev->asic->post_page_flip((rdev), (crtc)) 1474 #define radeon_post_page_flip(rdev, crtc) rdev->asic->post_page_flip((rdev), (crtc))
1433 1475
1434 /* Common functions */ 1476 /* Common functions */
1435 /* AGP */ 1477 /* AGP */
1436 extern int radeon_gpu_reset(struct radeon_device *rdev); 1478 extern int radeon_gpu_reset(struct radeon_device *rdev);
1437 extern void radeon_agp_disable(struct radeon_device *rdev); 1479 extern void radeon_agp_disable(struct radeon_device *rdev);
1438 extern int radeon_modeset_init(struct radeon_device *rdev); 1480 extern int radeon_modeset_init(struct radeon_device *rdev);
1439 extern void radeon_modeset_fini(struct radeon_device *rdev); 1481 extern void radeon_modeset_fini(struct radeon_device *rdev);
1440 extern bool radeon_card_posted(struct radeon_device *rdev); 1482 extern bool radeon_card_posted(struct radeon_device *rdev);
1441 extern void radeon_update_bandwidth_info(struct radeon_device *rdev); 1483 extern void radeon_update_bandwidth_info(struct radeon_device *rdev);
1442 extern void radeon_update_display_priority(struct radeon_device *rdev); 1484 extern void radeon_update_display_priority(struct radeon_device *rdev);
1443 extern bool radeon_boot_test_post_card(struct radeon_device *rdev); 1485 extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
1444 extern void radeon_scratch_init(struct radeon_device *rdev); 1486 extern void radeon_scratch_init(struct radeon_device *rdev);
1445 extern void radeon_wb_fini(struct radeon_device *rdev); 1487 extern void radeon_wb_fini(struct radeon_device *rdev);
1446 extern int radeon_wb_init(struct radeon_device *rdev); 1488 extern int radeon_wb_init(struct radeon_device *rdev);
1447 extern void radeon_wb_disable(struct radeon_device *rdev); 1489 extern void radeon_wb_disable(struct radeon_device *rdev);
1448 extern void radeon_surface_init(struct radeon_device *rdev); 1490 extern void radeon_surface_init(struct radeon_device *rdev);
1449 extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); 1491 extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
1450 extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable); 1492 extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
1451 extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); 1493 extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
1452 extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain); 1494 extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
1453 extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo); 1495 extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
1454 extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base); 1496 extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
1455 extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); 1497 extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
1456 extern int radeon_resume_kms(struct drm_device *dev); 1498 extern int radeon_resume_kms(struct drm_device *dev);
1457 extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); 1499 extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
1458 extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size); 1500 extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
1459 1501
1460 /* 1502 /*
1461 * R600 vram scratch functions 1503 * R600 vram scratch functions
1462 */ 1504 */
1463 int r600_vram_scratch_init(struct radeon_device *rdev); 1505 int r600_vram_scratch_init(struct radeon_device *rdev);
1464 void r600_vram_scratch_fini(struct radeon_device *rdev); 1506 void r600_vram_scratch_fini(struct radeon_device *rdev);
1465 1507
1466 /* 1508 /*
1467 * r600 functions used by radeon_encoder.c 1509 * r600 functions used by radeon_encoder.c
1468 */ 1510 */
1469 extern void r600_hdmi_enable(struct drm_encoder *encoder); 1511 extern void r600_hdmi_enable(struct drm_encoder *encoder);
1470 extern void r600_hdmi_disable(struct drm_encoder *encoder); 1512 extern void r600_hdmi_disable(struct drm_encoder *encoder);
1471 extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); 1513 extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
1472 1514
1473 extern int ni_init_microcode(struct radeon_device *rdev); 1515 extern int ni_init_microcode(struct radeon_device *rdev);
1474 extern int ni_mc_load_microcode(struct radeon_device *rdev); 1516 extern int ni_mc_load_microcode(struct radeon_device *rdev);
1475 1517
1476 /* radeon_acpi.c */ 1518 /* radeon_acpi.c */
1477 #if defined(CONFIG_ACPI) 1519 #if defined(CONFIG_ACPI)
1478 extern int radeon_acpi_init(struct radeon_device *rdev); 1520 extern int radeon_acpi_init(struct radeon_device *rdev);
1479 #else 1521 #else
1480 static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; } 1522 static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; }
1481 #endif 1523 #endif
1482 1524
1483 #include "radeon_object.h" 1525 #include "radeon_object.h"
1484 1526
1485 #endif 1527 #endif
1486 1528
drivers/gpu/drm/radeon/radeon_cs.c
1 /* 1 /*
2 * Copyright 2008 Jerome Glisse. 2 * Copyright 2008 Jerome Glisse.
3 * All Rights Reserved. 3 * All Rights Reserved.
4 * 4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a 5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"), 6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation 7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the 9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions: 10 * Software is furnished to do so, subject to the following conditions:
11 * 11 *
12 * The above copyright notice and this permission notice (including the next 12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the 13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software. 14 * Software.
15 * 15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE. 22 * DEALINGS IN THE SOFTWARE.
23 * 23 *
24 * Authors: 24 * Authors:
25 * Jerome Glisse <glisse@freedesktop.org> 25 * Jerome Glisse <glisse@freedesktop.org>
26 */ 26 */
27 #include "drmP.h" 27 #include "drmP.h"
28 #include "radeon_drm.h" 28 #include "radeon_drm.h"
29 #include "radeon_reg.h" 29 #include "radeon_reg.h"
30 #include "radeon.h" 30 #include "radeon.h"
31 31
32 void r100_cs_dump_packet(struct radeon_cs_parser *p, 32 void r100_cs_dump_packet(struct radeon_cs_parser *p,
33 struct radeon_cs_packet *pkt); 33 struct radeon_cs_packet *pkt);
34 34
35 int radeon_cs_parser_relocs(struct radeon_cs_parser *p) 35 int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
36 { 36 {
37 struct drm_device *ddev = p->rdev->ddev; 37 struct drm_device *ddev = p->rdev->ddev;
38 struct radeon_cs_chunk *chunk; 38 struct radeon_cs_chunk *chunk;
39 unsigned i, j; 39 unsigned i, j;
40 bool duplicate; 40 bool duplicate;
41 41
42 if (p->chunk_relocs_idx == -1) { 42 if (p->chunk_relocs_idx == -1) {
43 return 0; 43 return 0;
44 } 44 }
45 chunk = &p->chunks[p->chunk_relocs_idx]; 45 chunk = &p->chunks[p->chunk_relocs_idx];
46 /* FIXME: we assume that each relocs use 4 dwords */ 46 /* FIXME: we assume that each relocs use 4 dwords */
47 p->nrelocs = chunk->length_dw / 4; 47 p->nrelocs = chunk->length_dw / 4;
48 p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL); 48 p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
49 if (p->relocs_ptr == NULL) { 49 if (p->relocs_ptr == NULL) {
50 return -ENOMEM; 50 return -ENOMEM;
51 } 51 }
52 p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL); 52 p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
53 if (p->relocs == NULL) { 53 if (p->relocs == NULL) {
54 return -ENOMEM; 54 return -ENOMEM;
55 } 55 }
56 for (i = 0; i < p->nrelocs; i++) { 56 for (i = 0; i < p->nrelocs; i++) {
57 struct drm_radeon_cs_reloc *r; 57 struct drm_radeon_cs_reloc *r;
58 58
59 duplicate = false; 59 duplicate = false;
60 r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4]; 60 r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
61 for (j = 0; j < p->nrelocs; j++) { 61 for (j = 0; j < p->nrelocs; j++) {
62 if (r->handle == p->relocs[j].handle) { 62 if (r->handle == p->relocs[j].handle) {
63 p->relocs_ptr[i] = &p->relocs[j]; 63 p->relocs_ptr[i] = &p->relocs[j];
64 duplicate = true; 64 duplicate = true;
65 break; 65 break;
66 } 66 }
67 } 67 }
68 if (!duplicate) { 68 if (!duplicate) {
69 p->relocs[i].gobj = drm_gem_object_lookup(ddev, 69 p->relocs[i].gobj = drm_gem_object_lookup(ddev,
70 p->filp, 70 p->filp,
71 r->handle); 71 r->handle);
72 if (p->relocs[i].gobj == NULL) { 72 if (p->relocs[i].gobj == NULL) {
73 DRM_ERROR("gem object lookup failed 0x%x\n", 73 DRM_ERROR("gem object lookup failed 0x%x\n",
74 r->handle); 74 r->handle);
75 return -ENOENT; 75 return -ENOENT;
76 } 76 }
77 p->relocs_ptr[i] = &p->relocs[i]; 77 p->relocs_ptr[i] = &p->relocs[i];
78 p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj); 78 p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
79 p->relocs[i].lobj.bo = p->relocs[i].robj; 79 p->relocs[i].lobj.bo = p->relocs[i].robj;
80 p->relocs[i].lobj.wdomain = r->write_domain; 80 p->relocs[i].lobj.wdomain = r->write_domain;
81 p->relocs[i].lobj.rdomain = r->read_domains; 81 p->relocs[i].lobj.rdomain = r->read_domains;
82 p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo; 82 p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo;
83 p->relocs[i].handle = r->handle; 83 p->relocs[i].handle = r->handle;
84 p->relocs[i].flags = r->flags; 84 p->relocs[i].flags = r->flags;
85 radeon_bo_list_add_object(&p->relocs[i].lobj, 85 radeon_bo_list_add_object(&p->relocs[i].lobj,
86 &p->validated); 86 &p->validated);
87 } 87 }
88 } 88 }
89 return radeon_bo_list_validate(&p->validated); 89 return radeon_bo_list_validate(&p->validated);
90 } 90 }
91 91
92 int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) 92 int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
93 { 93 {
94 struct drm_radeon_cs *cs = data; 94 struct drm_radeon_cs *cs = data;
95 uint64_t *chunk_array_ptr; 95 uint64_t *chunk_array_ptr;
96 unsigned size, i; 96 unsigned size, i;
97 97
98 if (!cs->num_chunks) { 98 if (!cs->num_chunks) {
99 return 0; 99 return 0;
100 } 100 }
101 /* get chunks */ 101 /* get chunks */
102 INIT_LIST_HEAD(&p->validated); 102 INIT_LIST_HEAD(&p->validated);
103 p->idx = 0; 103 p->idx = 0;
104 p->chunk_ib_idx = -1; 104 p->chunk_ib_idx = -1;
105 p->chunk_relocs_idx = -1; 105 p->chunk_relocs_idx = -1;
106 p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL); 106 p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
107 if (p->chunks_array == NULL) { 107 if (p->chunks_array == NULL) {
108 return -ENOMEM; 108 return -ENOMEM;
109 } 109 }
110 chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks); 110 chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
111 if (DRM_COPY_FROM_USER(p->chunks_array, chunk_array_ptr, 111 if (DRM_COPY_FROM_USER(p->chunks_array, chunk_array_ptr,
112 sizeof(uint64_t)*cs->num_chunks)) { 112 sizeof(uint64_t)*cs->num_chunks)) {
113 return -EFAULT; 113 return -EFAULT;
114 } 114 }
115 p->nchunks = cs->num_chunks; 115 p->nchunks = cs->num_chunks;
116 p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL); 116 p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
117 if (p->chunks == NULL) { 117 if (p->chunks == NULL) {
118 return -ENOMEM; 118 return -ENOMEM;
119 } 119 }
120 for (i = 0; i < p->nchunks; i++) { 120 for (i = 0; i < p->nchunks; i++) {
121 struct drm_radeon_cs_chunk __user **chunk_ptr = NULL; 121 struct drm_radeon_cs_chunk __user **chunk_ptr = NULL;
122 struct drm_radeon_cs_chunk user_chunk; 122 struct drm_radeon_cs_chunk user_chunk;
123 uint32_t __user *cdata; 123 uint32_t __user *cdata;
124 124
125 chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i]; 125 chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
126 if (DRM_COPY_FROM_USER(&user_chunk, chunk_ptr, 126 if (DRM_COPY_FROM_USER(&user_chunk, chunk_ptr,
127 sizeof(struct drm_radeon_cs_chunk))) { 127 sizeof(struct drm_radeon_cs_chunk))) {
128 return -EFAULT; 128 return -EFAULT;
129 } 129 }
130 p->chunks[i].length_dw = user_chunk.length_dw; 130 p->chunks[i].length_dw = user_chunk.length_dw;
131 p->chunks[i].kdata = NULL; 131 p->chunks[i].kdata = NULL;
132 p->chunks[i].chunk_id = user_chunk.chunk_id; 132 p->chunks[i].chunk_id = user_chunk.chunk_id;
133 133
134 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) { 134 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
135 p->chunk_relocs_idx = i; 135 p->chunk_relocs_idx = i;
136 } 136 }
137 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) { 137 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
138 p->chunk_ib_idx = i; 138 p->chunk_ib_idx = i;
139 /* zero length IB isn't useful */ 139 /* zero length IB isn't useful */
140 if (p->chunks[i].length_dw == 0) 140 if (p->chunks[i].length_dw == 0)
141 return -EINVAL; 141 return -EINVAL;
142 } 142 }
143 143
144 p->chunks[i].length_dw = user_chunk.length_dw; 144 p->chunks[i].length_dw = user_chunk.length_dw;
145 p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data; 145 p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
146 146
147 cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data; 147 cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
148 if (p->chunks[i].chunk_id != RADEON_CHUNK_ID_IB) { 148 if (p->chunks[i].chunk_id != RADEON_CHUNK_ID_IB) {
149 size = p->chunks[i].length_dw * sizeof(uint32_t); 149 size = p->chunks[i].length_dw * sizeof(uint32_t);
150 p->chunks[i].kdata = kmalloc(size, GFP_KERNEL); 150 p->chunks[i].kdata = kmalloc(size, GFP_KERNEL);
151 if (p->chunks[i].kdata == NULL) { 151 if (p->chunks[i].kdata == NULL) {
152 return -ENOMEM; 152 return -ENOMEM;
153 } 153 }
154 if (DRM_COPY_FROM_USER(p->chunks[i].kdata, 154 if (DRM_COPY_FROM_USER(p->chunks[i].kdata,
155 p->chunks[i].user_ptr, size)) { 155 p->chunks[i].user_ptr, size)) {
156 return -EFAULT; 156 return -EFAULT;
157 } 157 }
158 } else { 158 } else {
159 p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL); 159 p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
160 p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL); 160 p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
161 if (p->chunks[i].kpage[0] == NULL || p->chunks[i].kpage[1] == NULL) { 161 if (p->chunks[i].kpage[0] == NULL || p->chunks[i].kpage[1] == NULL) {
162 kfree(p->chunks[i].kpage[0]); 162 kfree(p->chunks[i].kpage[0]);
163 kfree(p->chunks[i].kpage[1]); 163 kfree(p->chunks[i].kpage[1]);
164 return -ENOMEM; 164 return -ENOMEM;
165 } 165 }
166 p->chunks[i].kpage_idx[0] = -1; 166 p->chunks[i].kpage_idx[0] = -1;
167 p->chunks[i].kpage_idx[1] = -1; 167 p->chunks[i].kpage_idx[1] = -1;
168 p->chunks[i].last_copied_page = -1; 168 p->chunks[i].last_copied_page = -1;
169 p->chunks[i].last_page_index = ((p->chunks[i].length_dw * 4) - 1) / PAGE_SIZE; 169 p->chunks[i].last_page_index = ((p->chunks[i].length_dw * 4) - 1) / PAGE_SIZE;
170 } 170 }
171 } 171 }
172 if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) { 172 if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
173 DRM_ERROR("cs IB too big: %d\n", 173 DRM_ERROR("cs IB too big: %d\n",
174 p->chunks[p->chunk_ib_idx].length_dw); 174 p->chunks[p->chunk_ib_idx].length_dw);
175 return -EINVAL; 175 return -EINVAL;
176 } 176 }
177 return 0; 177 return 0;
178 } 178 }
179 179
180 /** 180 /**
181 * cs_parser_fini() - clean parser states 181 * cs_parser_fini() - clean parser states
182 * @parser: parser structure holding parsing context. 182 * @parser: parser structure holding parsing context.
183 * @error: error number 183 * @error: error number
184 * 184 *
185 * If error is set than unvalidate buffer, otherwise just free memory 185 * If error is set than unvalidate buffer, otherwise just free memory
186 * used by parsing context. 186 * used by parsing context.
187 **/ 187 **/
188 static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error) 188 static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
189 { 189 {
190 unsigned i; 190 unsigned i;
191 191
192 192
193 if (!error && parser->ib) 193 if (!error && parser->ib)
194 ttm_eu_fence_buffer_objects(&parser->validated, 194 ttm_eu_fence_buffer_objects(&parser->validated,
195 parser->ib->fence); 195 parser->ib->fence);
196 else 196 else
197 ttm_eu_backoff_reservation(&parser->validated); 197 ttm_eu_backoff_reservation(&parser->validated);
198 198
199 if (parser->relocs != NULL) { 199 if (parser->relocs != NULL) {
200 for (i = 0; i < parser->nrelocs; i++) { 200 for (i = 0; i < parser->nrelocs; i++) {
201 if (parser->relocs[i].gobj) 201 if (parser->relocs[i].gobj)
202 drm_gem_object_unreference_unlocked(parser->relocs[i].gobj); 202 drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
203 } 203 }
204 } 204 }
205 kfree(parser->track); 205 kfree(parser->track);
206 kfree(parser->relocs); 206 kfree(parser->relocs);
207 kfree(parser->relocs_ptr); 207 kfree(parser->relocs_ptr);
208 for (i = 0; i < parser->nchunks; i++) { 208 for (i = 0; i < parser->nchunks; i++) {
209 kfree(parser->chunks[i].kdata); 209 kfree(parser->chunks[i].kdata);
210 kfree(parser->chunks[i].kpage[0]); 210 kfree(parser->chunks[i].kpage[0]);
211 kfree(parser->chunks[i].kpage[1]); 211 kfree(parser->chunks[i].kpage[1]);
212 } 212 }
213 kfree(parser->chunks); 213 kfree(parser->chunks);
214 kfree(parser->chunks_array); 214 kfree(parser->chunks_array);
215 radeon_ib_free(parser->rdev, &parser->ib); 215 radeon_ib_free(parser->rdev, &parser->ib);
216 } 216 }
217 217
218 int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 218 int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
219 { 219 {
220 struct radeon_device *rdev = dev->dev_private; 220 struct radeon_device *rdev = dev->dev_private;
221 struct radeon_cs_parser parser; 221 struct radeon_cs_parser parser;
222 struct radeon_cs_chunk *ib_chunk; 222 struct radeon_cs_chunk *ib_chunk;
223 int r; 223 int r;
224 224
225 mutex_lock(&rdev->cs_mutex); 225 radeon_mutex_lock(&rdev->cs_mutex);
226 /* initialize parser */ 226 /* initialize parser */
227 memset(&parser, 0, sizeof(struct radeon_cs_parser)); 227 memset(&parser, 0, sizeof(struct radeon_cs_parser));
228 parser.filp = filp; 228 parser.filp = filp;
229 parser.rdev = rdev; 229 parser.rdev = rdev;
230 parser.dev = rdev->dev; 230 parser.dev = rdev->dev;
231 parser.family = rdev->family; 231 parser.family = rdev->family;
232 r = radeon_cs_parser_init(&parser, data); 232 r = radeon_cs_parser_init(&parser, data);
233 if (r) { 233 if (r) {
234 DRM_ERROR("Failed to initialize parser !\n"); 234 DRM_ERROR("Failed to initialize parser !\n");
235 radeon_cs_parser_fini(&parser, r); 235 radeon_cs_parser_fini(&parser, r);
236 mutex_unlock(&rdev->cs_mutex); 236 radeon_mutex_unlock(&rdev->cs_mutex);
237 return r; 237 return r;
238 } 238 }
239 r = radeon_ib_get(rdev, &parser.ib); 239 r = radeon_ib_get(rdev, &parser.ib);
240 if (r) { 240 if (r) {
241 DRM_ERROR("Failed to get ib !\n"); 241 DRM_ERROR("Failed to get ib !\n");
242 radeon_cs_parser_fini(&parser, r); 242 radeon_cs_parser_fini(&parser, r);
243 mutex_unlock(&rdev->cs_mutex); 243 radeon_mutex_unlock(&rdev->cs_mutex);
244 return r; 244 return r;
245 } 245 }
246 r = radeon_cs_parser_relocs(&parser); 246 r = radeon_cs_parser_relocs(&parser);
247 if (r) { 247 if (r) {
248 if (r != -ERESTARTSYS) 248 if (r != -ERESTARTSYS)
249 DRM_ERROR("Failed to parse relocation %d!\n", r); 249 DRM_ERROR("Failed to parse relocation %d!\n", r);
250 radeon_cs_parser_fini(&parser, r); 250 radeon_cs_parser_fini(&parser, r);
251 mutex_unlock(&rdev->cs_mutex); 251 radeon_mutex_unlock(&rdev->cs_mutex);
252 return r; 252 return r;
253 } 253 }
254 /* Copy the packet into the IB, the parser will read from the 254 /* Copy the packet into the IB, the parser will read from the
255 * input memory (cached) and write to the IB (which can be 255 * input memory (cached) and write to the IB (which can be
256 * uncached). */ 256 * uncached). */
257 ib_chunk = &parser.chunks[parser.chunk_ib_idx]; 257 ib_chunk = &parser.chunks[parser.chunk_ib_idx];
258 parser.ib->length_dw = ib_chunk->length_dw; 258 parser.ib->length_dw = ib_chunk->length_dw;
259 r = radeon_cs_parse(&parser); 259 r = radeon_cs_parse(&parser);
260 if (r || parser.parser_error) { 260 if (r || parser.parser_error) {
261 DRM_ERROR("Invalid command stream !\n"); 261 DRM_ERROR("Invalid command stream !\n");
262 radeon_cs_parser_fini(&parser, r); 262 radeon_cs_parser_fini(&parser, r);
263 mutex_unlock(&rdev->cs_mutex); 263 radeon_mutex_unlock(&rdev->cs_mutex);
264 return r; 264 return r;
265 } 265 }
266 r = radeon_cs_finish_pages(&parser); 266 r = radeon_cs_finish_pages(&parser);
267 if (r) { 267 if (r) {
268 DRM_ERROR("Invalid command stream !\n"); 268 DRM_ERROR("Invalid command stream !\n");
269 radeon_cs_parser_fini(&parser, r); 269 radeon_cs_parser_fini(&parser, r);
270 mutex_unlock(&rdev->cs_mutex); 270 radeon_mutex_unlock(&rdev->cs_mutex);
271 return r; 271 return r;
272 } 272 }
273 r = radeon_ib_schedule(rdev, parser.ib); 273 r = radeon_ib_schedule(rdev, parser.ib);
274 if (r) { 274 if (r) {
275 DRM_ERROR("Failed to schedule IB !\n"); 275 DRM_ERROR("Failed to schedule IB !\n");
276 } 276 }
277 radeon_cs_parser_fini(&parser, r); 277 radeon_cs_parser_fini(&parser, r);
278 mutex_unlock(&rdev->cs_mutex); 278 radeon_mutex_unlock(&rdev->cs_mutex);
279 return r; 279 return r;
280 } 280 }
281 281
282 int radeon_cs_finish_pages(struct radeon_cs_parser *p) 282 int radeon_cs_finish_pages(struct radeon_cs_parser *p)
283 { 283 {
284 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx]; 284 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
285 int i; 285 int i;
286 int size = PAGE_SIZE; 286 int size = PAGE_SIZE;
287 287
288 for (i = ibc->last_copied_page + 1; i <= ibc->last_page_index; i++) { 288 for (i = ibc->last_copied_page + 1; i <= ibc->last_page_index; i++) {
289 if (i == ibc->last_page_index) { 289 if (i == ibc->last_page_index) {
290 size = (ibc->length_dw * 4) % PAGE_SIZE; 290 size = (ibc->length_dw * 4) % PAGE_SIZE;
291 if (size == 0) 291 if (size == 0)
292 size = PAGE_SIZE; 292 size = PAGE_SIZE;
293 } 293 }
294 294
295 if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)), 295 if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)),
296 ibc->user_ptr + (i * PAGE_SIZE), 296 ibc->user_ptr + (i * PAGE_SIZE),
297 size)) 297 size))
298 return -EFAULT; 298 return -EFAULT;
299 } 299 }
300 return 0; 300 return 0;
301 } 301 }
302 302
303 int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx) 303 int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
304 { 304 {
305 int new_page; 305 int new_page;
306 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx]; 306 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
307 int i; 307 int i;
308 int size = PAGE_SIZE; 308 int size = PAGE_SIZE;
309 309
310 for (i = ibc->last_copied_page + 1; i < pg_idx; i++) { 310 for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
311 if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)), 311 if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)),
312 ibc->user_ptr + (i * PAGE_SIZE), 312 ibc->user_ptr + (i * PAGE_SIZE),
313 PAGE_SIZE)) { 313 PAGE_SIZE)) {
314 p->parser_error = -EFAULT; 314 p->parser_error = -EFAULT;
315 return 0; 315 return 0;
316 } 316 }
317 } 317 }
318 318
319 new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1; 319 new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1;
320 320
321 if (pg_idx == ibc->last_page_index) { 321 if (pg_idx == ibc->last_page_index) {
322 size = (ibc->length_dw * 4) % PAGE_SIZE; 322 size = (ibc->length_dw * 4) % PAGE_SIZE;
323 if (size == 0) 323 if (size == 0)
324 size = PAGE_SIZE; 324 size = PAGE_SIZE;
325 } 325 }
326 326
327 if (DRM_COPY_FROM_USER(ibc->kpage[new_page], 327 if (DRM_COPY_FROM_USER(ibc->kpage[new_page],
328 ibc->user_ptr + (pg_idx * PAGE_SIZE), 328 ibc->user_ptr + (pg_idx * PAGE_SIZE),
329 size)) { 329 size)) {
330 p->parser_error = -EFAULT; 330 p->parser_error = -EFAULT;
331 return 0; 331 return 0;
332 } 332 }
333 333
334 /* copy to IB here */ 334 /* copy to IB here */
335 memcpy((void *)(p->ib->ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size); 335 memcpy((void *)(p->ib->ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size);
336 336
337 ibc->last_copied_page = pg_idx; 337 ibc->last_copied_page = pg_idx;
338 ibc->kpage_idx[new_page] = pg_idx; 338 ibc->kpage_idx[new_page] = pg_idx;
339 339
340 return new_page; 340 return new_page;
341 } 341 }
342 342
drivers/gpu/drm/radeon/radeon_device.c
1 /* 1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc. 2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc. 3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse. 4 * Copyright 2009 Jerome Glisse.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation 8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the 10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions: 11 * Software is furnished to do so, subject to the following conditions:
12 * 12 *
13 * The above copyright notice and this permission notice shall be included in 13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software. 14 * all copies or substantial portions of the Software.
15 * 15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE. 22 * OTHER DEALINGS IN THE SOFTWARE.
23 * 23 *
24 * Authors: Dave Airlie 24 * Authors: Dave Airlie
25 * Alex Deucher 25 * Alex Deucher
26 * Jerome Glisse 26 * Jerome Glisse
27 */ 27 */
28 #include <linux/console.h> 28 #include <linux/console.h>
29 #include <linux/slab.h> 29 #include <linux/slab.h>
30 #include <drm/drmP.h> 30 #include <drm/drmP.h>
31 #include <drm/drm_crtc_helper.h> 31 #include <drm/drm_crtc_helper.h>
32 #include <drm/radeon_drm.h> 32 #include <drm/radeon_drm.h>
33 #include <linux/vgaarb.h> 33 #include <linux/vgaarb.h>
34 #include <linux/vga_switcheroo.h> 34 #include <linux/vga_switcheroo.h>
35 #include <linux/efi.h> 35 #include <linux/efi.h>
36 #include "radeon_reg.h" 36 #include "radeon_reg.h"
37 #include "radeon.h" 37 #include "radeon.h"
38 #include "atom.h" 38 #include "atom.h"
39 39
40 static const char radeon_family_name[][16] = { 40 static const char radeon_family_name[][16] = {
41 "R100", 41 "R100",
42 "RV100", 42 "RV100",
43 "RS100", 43 "RS100",
44 "RV200", 44 "RV200",
45 "RS200", 45 "RS200",
46 "R200", 46 "R200",
47 "RV250", 47 "RV250",
48 "RS300", 48 "RS300",
49 "RV280", 49 "RV280",
50 "R300", 50 "R300",
51 "R350", 51 "R350",
52 "RV350", 52 "RV350",
53 "RV380", 53 "RV380",
54 "R420", 54 "R420",
55 "R423", 55 "R423",
56 "RV410", 56 "RV410",
57 "RS400", 57 "RS400",
58 "RS480", 58 "RS480",
59 "RS600", 59 "RS600",
60 "RS690", 60 "RS690",
61 "RS740", 61 "RS740",
62 "RV515", 62 "RV515",
63 "R520", 63 "R520",
64 "RV530", 64 "RV530",
65 "RV560", 65 "RV560",
66 "RV570", 66 "RV570",
67 "R580", 67 "R580",
68 "R600", 68 "R600",
69 "RV610", 69 "RV610",
70 "RV630", 70 "RV630",
71 "RV670", 71 "RV670",
72 "RV620", 72 "RV620",
73 "RV635", 73 "RV635",
74 "RS780", 74 "RS780",
75 "RS880", 75 "RS880",
76 "RV770", 76 "RV770",
77 "RV730", 77 "RV730",
78 "RV710", 78 "RV710",
79 "RV740", 79 "RV740",
80 "CEDAR", 80 "CEDAR",
81 "REDWOOD", 81 "REDWOOD",
82 "JUNIPER", 82 "JUNIPER",
83 "CYPRESS", 83 "CYPRESS",
84 "HEMLOCK", 84 "HEMLOCK",
85 "PALM", 85 "PALM",
86 "SUMO", 86 "SUMO",
87 "SUMO2", 87 "SUMO2",
88 "BARTS", 88 "BARTS",
89 "TURKS", 89 "TURKS",
90 "CAICOS", 90 "CAICOS",
91 "CAYMAN", 91 "CAYMAN",
92 "LAST", 92 "LAST",
93 }; 93 };
94 94
95 /* 95 /*
96 * Clear GPU surface registers. 96 * Clear GPU surface registers.
97 */ 97 */
98 void radeon_surface_init(struct radeon_device *rdev) 98 void radeon_surface_init(struct radeon_device *rdev)
99 { 99 {
100 /* FIXME: check this out */ 100 /* FIXME: check this out */
101 if (rdev->family < CHIP_R600) { 101 if (rdev->family < CHIP_R600) {
102 int i; 102 int i;
103 103
104 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { 104 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
105 if (rdev->surface_regs[i].bo) 105 if (rdev->surface_regs[i].bo)
106 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo); 106 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
107 else 107 else
108 radeon_clear_surface_reg(rdev, i); 108 radeon_clear_surface_reg(rdev, i);
109 } 109 }
110 /* enable surfaces */ 110 /* enable surfaces */
111 WREG32(RADEON_SURFACE_CNTL, 0); 111 WREG32(RADEON_SURFACE_CNTL, 0);
112 } 112 }
113 } 113 }
114 114
115 /* 115 /*
116 * GPU scratch registers helpers function. 116 * GPU scratch registers helpers function.
117 */ 117 */
118 void radeon_scratch_init(struct radeon_device *rdev) 118 void radeon_scratch_init(struct radeon_device *rdev)
119 { 119 {
120 int i; 120 int i;
121 121
122 /* FIXME: check this out */ 122 /* FIXME: check this out */
123 if (rdev->family < CHIP_R300) { 123 if (rdev->family < CHIP_R300) {
124 rdev->scratch.num_reg = 5; 124 rdev->scratch.num_reg = 5;
125 } else { 125 } else {
126 rdev->scratch.num_reg = 7; 126 rdev->scratch.num_reg = 7;
127 } 127 }
128 rdev->scratch.reg_base = RADEON_SCRATCH_REG0; 128 rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
129 for (i = 0; i < rdev->scratch.num_reg; i++) { 129 for (i = 0; i < rdev->scratch.num_reg; i++) {
130 rdev->scratch.free[i] = true; 130 rdev->scratch.free[i] = true;
131 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4); 131 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
132 } 132 }
133 } 133 }
134 134
135 int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg) 135 int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
136 { 136 {
137 int i; 137 int i;
138 138
139 for (i = 0; i < rdev->scratch.num_reg; i++) { 139 for (i = 0; i < rdev->scratch.num_reg; i++) {
140 if (rdev->scratch.free[i]) { 140 if (rdev->scratch.free[i]) {
141 rdev->scratch.free[i] = false; 141 rdev->scratch.free[i] = false;
142 *reg = rdev->scratch.reg[i]; 142 *reg = rdev->scratch.reg[i];
143 return 0; 143 return 0;
144 } 144 }
145 } 145 }
146 return -EINVAL; 146 return -EINVAL;
147 } 147 }
148 148
149 void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg) 149 void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
150 { 150 {
151 int i; 151 int i;
152 152
153 for (i = 0; i < rdev->scratch.num_reg; i++) { 153 for (i = 0; i < rdev->scratch.num_reg; i++) {
154 if (rdev->scratch.reg[i] == reg) { 154 if (rdev->scratch.reg[i] == reg) {
155 rdev->scratch.free[i] = true; 155 rdev->scratch.free[i] = true;
156 return; 156 return;
157 } 157 }
158 } 158 }
159 } 159 }
160 160
161 void radeon_wb_disable(struct radeon_device *rdev) 161 void radeon_wb_disable(struct radeon_device *rdev)
162 { 162 {
163 int r; 163 int r;
164 164
165 if (rdev->wb.wb_obj) { 165 if (rdev->wb.wb_obj) {
166 r = radeon_bo_reserve(rdev->wb.wb_obj, false); 166 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
167 if (unlikely(r != 0)) 167 if (unlikely(r != 0))
168 return; 168 return;
169 radeon_bo_kunmap(rdev->wb.wb_obj); 169 radeon_bo_kunmap(rdev->wb.wb_obj);
170 radeon_bo_unpin(rdev->wb.wb_obj); 170 radeon_bo_unpin(rdev->wb.wb_obj);
171 radeon_bo_unreserve(rdev->wb.wb_obj); 171 radeon_bo_unreserve(rdev->wb.wb_obj);
172 } 172 }
173 rdev->wb.enabled = false; 173 rdev->wb.enabled = false;
174 } 174 }
175 175
176 void radeon_wb_fini(struct radeon_device *rdev) 176 void radeon_wb_fini(struct radeon_device *rdev)
177 { 177 {
178 radeon_wb_disable(rdev); 178 radeon_wb_disable(rdev);
179 if (rdev->wb.wb_obj) { 179 if (rdev->wb.wb_obj) {
180 radeon_bo_unref(&rdev->wb.wb_obj); 180 radeon_bo_unref(&rdev->wb.wb_obj);
181 rdev->wb.wb = NULL; 181 rdev->wb.wb = NULL;
182 rdev->wb.wb_obj = NULL; 182 rdev->wb.wb_obj = NULL;
183 } 183 }
184 } 184 }
185 185
186 int radeon_wb_init(struct radeon_device *rdev) 186 int radeon_wb_init(struct radeon_device *rdev)
187 { 187 {
188 int r; 188 int r;
189 189
190 if (rdev->wb.wb_obj == NULL) { 190 if (rdev->wb.wb_obj == NULL) {
191 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, 191 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
192 RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj); 192 RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
193 if (r) { 193 if (r) {
194 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); 194 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
195 return r; 195 return r;
196 } 196 }
197 } 197 }
198 r = radeon_bo_reserve(rdev->wb.wb_obj, false); 198 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
199 if (unlikely(r != 0)) { 199 if (unlikely(r != 0)) {
200 radeon_wb_fini(rdev); 200 radeon_wb_fini(rdev);
201 return r; 201 return r;
202 } 202 }
203 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, 203 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
204 &rdev->wb.gpu_addr); 204 &rdev->wb.gpu_addr);
205 if (r) { 205 if (r) {
206 radeon_bo_unreserve(rdev->wb.wb_obj); 206 radeon_bo_unreserve(rdev->wb.wb_obj);
207 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r); 207 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
208 radeon_wb_fini(rdev); 208 radeon_wb_fini(rdev);
209 return r; 209 return r;
210 } 210 }
211 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); 211 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
212 radeon_bo_unreserve(rdev->wb.wb_obj); 212 radeon_bo_unreserve(rdev->wb.wb_obj);
213 if (r) { 213 if (r) {
214 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r); 214 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
215 radeon_wb_fini(rdev); 215 radeon_wb_fini(rdev);
216 return r; 216 return r;
217 } 217 }
218 218
219 /* clear wb memory */ 219 /* clear wb memory */
220 memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE); 220 memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
221 /* disable event_write fences */ 221 /* disable event_write fences */
222 rdev->wb.use_event = false; 222 rdev->wb.use_event = false;
223 /* disabled via module param */ 223 /* disabled via module param */
224 if (radeon_no_wb == 1) 224 if (radeon_no_wb == 1)
225 rdev->wb.enabled = false; 225 rdev->wb.enabled = false;
226 else { 226 else {
227 /* often unreliable on AGP */ 227 /* often unreliable on AGP */
228 if (rdev->flags & RADEON_IS_AGP) { 228 if (rdev->flags & RADEON_IS_AGP) {
229 rdev->wb.enabled = false; 229 rdev->wb.enabled = false;
230 } else { 230 } else {
231 rdev->wb.enabled = true; 231 rdev->wb.enabled = true;
232 /* event_write fences are only available on r600+ */ 232 /* event_write fences are only available on r600+ */
233 if (rdev->family >= CHIP_R600) 233 if (rdev->family >= CHIP_R600)
234 rdev->wb.use_event = true; 234 rdev->wb.use_event = true;
235 } 235 }
236 } 236 }
237 /* always use writeback/events on NI */ 237 /* always use writeback/events on NI */
238 if (ASIC_IS_DCE5(rdev)) { 238 if (ASIC_IS_DCE5(rdev)) {
239 rdev->wb.enabled = true; 239 rdev->wb.enabled = true;
240 rdev->wb.use_event = true; 240 rdev->wb.use_event = true;
241 } 241 }
242 242
243 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis"); 243 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
244 244
245 return 0; 245 return 0;
246 } 246 }
247 247
248 /** 248 /**
249 * radeon_vram_location - try to find VRAM location 249 * radeon_vram_location - try to find VRAM location
250 * @rdev: radeon device structure holding all necessary informations 250 * @rdev: radeon device structure holding all necessary informations
251 * @mc: memory controller structure holding memory informations 251 * @mc: memory controller structure holding memory informations
252 * @base: base address at which to put VRAM 252 * @base: base address at which to put VRAM
253 * 253 *
254 * Function will place try to place VRAM at base address provided 254 * Function will place try to place VRAM at base address provided
255 * as parameter (which is so far either PCI aperture address or 255 * as parameter (which is so far either PCI aperture address or
256 * for IGP TOM base address). 256 * for IGP TOM base address).
257 * 257 *
258 * If there is not enough space to fit the unvisible VRAM in the 32bits 258 * If there is not enough space to fit the unvisible VRAM in the 32bits
259 * address space then we limit the VRAM size to the aperture. 259 * address space then we limit the VRAM size to the aperture.
260 * 260 *
261 * If we are using AGP and if the AGP aperture doesn't allow us to have 261 * If we are using AGP and if the AGP aperture doesn't allow us to have
262 * room for all the VRAM than we restrict the VRAM to the PCI aperture 262 * room for all the VRAM than we restrict the VRAM to the PCI aperture
263 * size and print a warning. 263 * size and print a warning.
264 * 264 *
265 * This function will never fails, worst case are limiting VRAM. 265 * This function will never fails, worst case are limiting VRAM.
266 * 266 *
267 * Note: GTT start, end, size should be initialized before calling this 267 * Note: GTT start, end, size should be initialized before calling this
268 * function on AGP platform. 268 * function on AGP platform.
269 * 269 *
270 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size, 270 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
271 * this shouldn't be a problem as we are using the PCI aperture as a reference. 271 * this shouldn't be a problem as we are using the PCI aperture as a reference.
272 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but 272 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
273 * not IGP. 273 * not IGP.
274 * 274 *
275 * Note: we use mc_vram_size as on some board we need to program the mc to 275 * Note: we use mc_vram_size as on some board we need to program the mc to
276 * cover the whole aperture even if VRAM size is inferior to aperture size 276 * cover the whole aperture even if VRAM size is inferior to aperture size
277 * Novell bug 204882 + along with lots of ubuntu ones 277 * Novell bug 204882 + along with lots of ubuntu ones
278 * 278 *
279 * Note: when limiting vram it's safe to overwritte real_vram_size because 279 * Note: when limiting vram it's safe to overwritte real_vram_size because
280 * we are not in case where real_vram_size is inferior to mc_vram_size (ie 280 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
281 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu 281 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
282 * ones) 282 * ones)
283 * 283 *
284 * Note: IGP TOM addr should be the same as the aperture addr, we don't 284 * Note: IGP TOM addr should be the same as the aperture addr, we don't
285 * explicitly check for that thought. 285 * explicitly check for that thought.
286 * 286 *
287 * FIXME: when reducing VRAM size align new size on power of 2. 287 * FIXME: when reducing VRAM size align new size on power of 2.
288 */ 288 */
289 void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base) 289 void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
290 { 290 {
291 mc->vram_start = base; 291 mc->vram_start = base;
292 if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) { 292 if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) {
293 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); 293 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
294 mc->real_vram_size = mc->aper_size; 294 mc->real_vram_size = mc->aper_size;
295 mc->mc_vram_size = mc->aper_size; 295 mc->mc_vram_size = mc->aper_size;
296 } 296 }
297 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 297 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
298 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) { 298 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
299 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); 299 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
300 mc->real_vram_size = mc->aper_size; 300 mc->real_vram_size = mc->aper_size;
301 mc->mc_vram_size = mc->aper_size; 301 mc->mc_vram_size = mc->aper_size;
302 } 302 }
303 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 303 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
304 if (radeon_vram_limit && radeon_vram_limit < mc->real_vram_size) 304 if (radeon_vram_limit && radeon_vram_limit < mc->real_vram_size)
305 mc->real_vram_size = radeon_vram_limit; 305 mc->real_vram_size = radeon_vram_limit;
306 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", 306 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
307 mc->mc_vram_size >> 20, mc->vram_start, 307 mc->mc_vram_size >> 20, mc->vram_start,
308 mc->vram_end, mc->real_vram_size >> 20); 308 mc->vram_end, mc->real_vram_size >> 20);
309 } 309 }
310 310
311 /** 311 /**
312 * radeon_gtt_location - try to find GTT location 312 * radeon_gtt_location - try to find GTT location
313 * @rdev: radeon device structure holding all necessary informations 313 * @rdev: radeon device structure holding all necessary informations
314 * @mc: memory controller structure holding memory informations 314 * @mc: memory controller structure holding memory informations
315 * 315 *
316 * Function will place try to place GTT before or after VRAM. 316 * Function will place try to place GTT before or after VRAM.
317 * 317 *
318 * If GTT size is bigger than space left then we ajust GTT size. 318 * If GTT size is bigger than space left then we ajust GTT size.
319 * Thus function will never fails. 319 * Thus function will never fails.
320 * 320 *
321 * FIXME: when reducing GTT size align new size on power of 2. 321 * FIXME: when reducing GTT size align new size on power of 2.
322 */ 322 */
323 void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) 323 void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
324 { 324 {
325 u64 size_af, size_bf; 325 u64 size_af, size_bf;
326 326
327 size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align; 327 size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
328 size_bf = mc->vram_start & ~mc->gtt_base_align; 328 size_bf = mc->vram_start & ~mc->gtt_base_align;
329 if (size_bf > size_af) { 329 if (size_bf > size_af) {
330 if (mc->gtt_size > size_bf) { 330 if (mc->gtt_size > size_bf) {
331 dev_warn(rdev->dev, "limiting GTT\n"); 331 dev_warn(rdev->dev, "limiting GTT\n");
332 mc->gtt_size = size_bf; 332 mc->gtt_size = size_bf;
333 } 333 }
334 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size; 334 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
335 } else { 335 } else {
336 if (mc->gtt_size > size_af) { 336 if (mc->gtt_size > size_af) {
337 dev_warn(rdev->dev, "limiting GTT\n"); 337 dev_warn(rdev->dev, "limiting GTT\n");
338 mc->gtt_size = size_af; 338 mc->gtt_size = size_af;
339 } 339 }
340 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align; 340 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
341 } 341 }
342 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; 342 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
343 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n", 343 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
344 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end); 344 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
345 } 345 }
346 346
347 /* 347 /*
348 * GPU helpers function. 348 * GPU helpers function.
349 */ 349 */
350 bool radeon_card_posted(struct radeon_device *rdev) 350 bool radeon_card_posted(struct radeon_device *rdev)
351 { 351 {
352 uint32_t reg; 352 uint32_t reg;
353 353
354 if (efi_enabled && rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) 354 if (efi_enabled && rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
355 return false; 355 return false;
356 356
357 /* first check CRTCs */ 357 /* first check CRTCs */
358 if (ASIC_IS_DCE41(rdev)) { 358 if (ASIC_IS_DCE41(rdev)) {
359 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | 359 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
360 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); 360 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
361 if (reg & EVERGREEN_CRTC_MASTER_EN) 361 if (reg & EVERGREEN_CRTC_MASTER_EN)
362 return true; 362 return true;
363 } else if (ASIC_IS_DCE4(rdev)) { 363 } else if (ASIC_IS_DCE4(rdev)) {
364 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | 364 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
365 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) | 365 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
366 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | 366 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
367 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) | 367 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
368 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | 368 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
369 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); 369 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
370 if (reg & EVERGREEN_CRTC_MASTER_EN) 370 if (reg & EVERGREEN_CRTC_MASTER_EN)
371 return true; 371 return true;
372 } else if (ASIC_IS_AVIVO(rdev)) { 372 } else if (ASIC_IS_AVIVO(rdev)) {
373 reg = RREG32(AVIVO_D1CRTC_CONTROL) | 373 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
374 RREG32(AVIVO_D2CRTC_CONTROL); 374 RREG32(AVIVO_D2CRTC_CONTROL);
375 if (reg & AVIVO_CRTC_EN) { 375 if (reg & AVIVO_CRTC_EN) {
376 return true; 376 return true;
377 } 377 }
378 } else { 378 } else {
379 reg = RREG32(RADEON_CRTC_GEN_CNTL) | 379 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
380 RREG32(RADEON_CRTC2_GEN_CNTL); 380 RREG32(RADEON_CRTC2_GEN_CNTL);
381 if (reg & RADEON_CRTC_EN) { 381 if (reg & RADEON_CRTC_EN) {
382 return true; 382 return true;
383 } 383 }
384 } 384 }
385 385
386 /* then check MEM_SIZE, in case the crtcs are off */ 386 /* then check MEM_SIZE, in case the crtcs are off */
387 if (rdev->family >= CHIP_R600) 387 if (rdev->family >= CHIP_R600)
388 reg = RREG32(R600_CONFIG_MEMSIZE); 388 reg = RREG32(R600_CONFIG_MEMSIZE);
389 else 389 else
390 reg = RREG32(RADEON_CONFIG_MEMSIZE); 390 reg = RREG32(RADEON_CONFIG_MEMSIZE);
391 391
392 if (reg) 392 if (reg)
393 return true; 393 return true;
394 394
395 return false; 395 return false;
396 396
397 } 397 }
398 398
399 void radeon_update_bandwidth_info(struct radeon_device *rdev) 399 void radeon_update_bandwidth_info(struct radeon_device *rdev)
400 { 400 {
401 fixed20_12 a; 401 fixed20_12 a;
402 u32 sclk = rdev->pm.current_sclk; 402 u32 sclk = rdev->pm.current_sclk;
403 u32 mclk = rdev->pm.current_mclk; 403 u32 mclk = rdev->pm.current_mclk;
404 404
405 /* sclk/mclk in Mhz */ 405 /* sclk/mclk in Mhz */
406 a.full = dfixed_const(100); 406 a.full = dfixed_const(100);
407 rdev->pm.sclk.full = dfixed_const(sclk); 407 rdev->pm.sclk.full = dfixed_const(sclk);
408 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a); 408 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
409 rdev->pm.mclk.full = dfixed_const(mclk); 409 rdev->pm.mclk.full = dfixed_const(mclk);
410 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a); 410 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
411 411
412 if (rdev->flags & RADEON_IS_IGP) { 412 if (rdev->flags & RADEON_IS_IGP) {
413 a.full = dfixed_const(16); 413 a.full = dfixed_const(16);
414 /* core_bandwidth = sclk(Mhz) * 16 */ 414 /* core_bandwidth = sclk(Mhz) * 16 */
415 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a); 415 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
416 } 416 }
417 } 417 }
418 418
419 bool radeon_boot_test_post_card(struct radeon_device *rdev) 419 bool radeon_boot_test_post_card(struct radeon_device *rdev)
420 { 420 {
421 if (radeon_card_posted(rdev)) 421 if (radeon_card_posted(rdev))
422 return true; 422 return true;
423 423
424 if (rdev->bios) { 424 if (rdev->bios) {
425 DRM_INFO("GPU not posted. posting now...\n"); 425 DRM_INFO("GPU not posted. posting now...\n");
426 if (rdev->is_atom_bios) 426 if (rdev->is_atom_bios)
427 atom_asic_init(rdev->mode_info.atom_context); 427 atom_asic_init(rdev->mode_info.atom_context);
428 else 428 else
429 radeon_combios_asic_init(rdev->ddev); 429 radeon_combios_asic_init(rdev->ddev);
430 return true; 430 return true;
431 } else { 431 } else {
432 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 432 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
433 return false; 433 return false;
434 } 434 }
435 } 435 }
436 436
437 int radeon_dummy_page_init(struct radeon_device *rdev) 437 int radeon_dummy_page_init(struct radeon_device *rdev)
438 { 438 {
439 if (rdev->dummy_page.page) 439 if (rdev->dummy_page.page)
440 return 0; 440 return 0;
441 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO); 441 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
442 if (rdev->dummy_page.page == NULL) 442 if (rdev->dummy_page.page == NULL)
443 return -ENOMEM; 443 return -ENOMEM;
444 rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page, 444 rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
445 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 445 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
446 if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) { 446 if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
447 dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n"); 447 dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
448 __free_page(rdev->dummy_page.page); 448 __free_page(rdev->dummy_page.page);
449 rdev->dummy_page.page = NULL; 449 rdev->dummy_page.page = NULL;
450 return -ENOMEM; 450 return -ENOMEM;
451 } 451 }
452 return 0; 452 return 0;
453 } 453 }
454 454
455 void radeon_dummy_page_fini(struct radeon_device *rdev) 455 void radeon_dummy_page_fini(struct radeon_device *rdev)
456 { 456 {
457 if (rdev->dummy_page.page == NULL) 457 if (rdev->dummy_page.page == NULL)
458 return; 458 return;
459 pci_unmap_page(rdev->pdev, rdev->dummy_page.addr, 459 pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
460 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 460 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
461 __free_page(rdev->dummy_page.page); 461 __free_page(rdev->dummy_page.page);
462 rdev->dummy_page.page = NULL; 462 rdev->dummy_page.page = NULL;
463 } 463 }
464 464
465 465
466 /* ATOM accessor methods */ 466 /* ATOM accessor methods */
467 static uint32_t cail_pll_read(struct card_info *info, uint32_t reg) 467 static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
468 { 468 {
469 struct radeon_device *rdev = info->dev->dev_private; 469 struct radeon_device *rdev = info->dev->dev_private;
470 uint32_t r; 470 uint32_t r;
471 471
472 r = rdev->pll_rreg(rdev, reg); 472 r = rdev->pll_rreg(rdev, reg);
473 return r; 473 return r;
474 } 474 }
475 475
476 static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val) 476 static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
477 { 477 {
478 struct radeon_device *rdev = info->dev->dev_private; 478 struct radeon_device *rdev = info->dev->dev_private;
479 479
480 rdev->pll_wreg(rdev, reg, val); 480 rdev->pll_wreg(rdev, reg, val);
481 } 481 }
482 482
483 static uint32_t cail_mc_read(struct card_info *info, uint32_t reg) 483 static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
484 { 484 {
485 struct radeon_device *rdev = info->dev->dev_private; 485 struct radeon_device *rdev = info->dev->dev_private;
486 uint32_t r; 486 uint32_t r;
487 487
488 r = rdev->mc_rreg(rdev, reg); 488 r = rdev->mc_rreg(rdev, reg);
489 return r; 489 return r;
490 } 490 }
491 491
492 static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val) 492 static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
493 { 493 {
494 struct radeon_device *rdev = info->dev->dev_private; 494 struct radeon_device *rdev = info->dev->dev_private;
495 495
496 rdev->mc_wreg(rdev, reg, val); 496 rdev->mc_wreg(rdev, reg, val);
497 } 497 }
498 498
499 static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val) 499 static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
500 { 500 {
501 struct radeon_device *rdev = info->dev->dev_private; 501 struct radeon_device *rdev = info->dev->dev_private;
502 502
503 WREG32(reg*4, val); 503 WREG32(reg*4, val);
504 } 504 }
505 505
506 static uint32_t cail_reg_read(struct card_info *info, uint32_t reg) 506 static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
507 { 507 {
508 struct radeon_device *rdev = info->dev->dev_private; 508 struct radeon_device *rdev = info->dev->dev_private;
509 uint32_t r; 509 uint32_t r;
510 510
511 r = RREG32(reg*4); 511 r = RREG32(reg*4);
512 return r; 512 return r;
513 } 513 }
514 514
515 static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val) 515 static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
516 { 516 {
517 struct radeon_device *rdev = info->dev->dev_private; 517 struct radeon_device *rdev = info->dev->dev_private;
518 518
519 WREG32_IO(reg*4, val); 519 WREG32_IO(reg*4, val);
520 } 520 }
521 521
522 static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg) 522 static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
523 { 523 {
524 struct radeon_device *rdev = info->dev->dev_private; 524 struct radeon_device *rdev = info->dev->dev_private;
525 uint32_t r; 525 uint32_t r;
526 526
527 r = RREG32_IO(reg*4); 527 r = RREG32_IO(reg*4);
528 return r; 528 return r;
529 } 529 }
530 530
531 int radeon_atombios_init(struct radeon_device *rdev) 531 int radeon_atombios_init(struct radeon_device *rdev)
532 { 532 {
533 struct card_info *atom_card_info = 533 struct card_info *atom_card_info =
534 kzalloc(sizeof(struct card_info), GFP_KERNEL); 534 kzalloc(sizeof(struct card_info), GFP_KERNEL);
535 535
536 if (!atom_card_info) 536 if (!atom_card_info)
537 return -ENOMEM; 537 return -ENOMEM;
538 538
539 rdev->mode_info.atom_card_info = atom_card_info; 539 rdev->mode_info.atom_card_info = atom_card_info;
540 atom_card_info->dev = rdev->ddev; 540 atom_card_info->dev = rdev->ddev;
541 atom_card_info->reg_read = cail_reg_read; 541 atom_card_info->reg_read = cail_reg_read;
542 atom_card_info->reg_write = cail_reg_write; 542 atom_card_info->reg_write = cail_reg_write;
543 /* needed for iio ops */ 543 /* needed for iio ops */
544 if (rdev->rio_mem) { 544 if (rdev->rio_mem) {
545 atom_card_info->ioreg_read = cail_ioreg_read; 545 atom_card_info->ioreg_read = cail_ioreg_read;
546 atom_card_info->ioreg_write = cail_ioreg_write; 546 atom_card_info->ioreg_write = cail_ioreg_write;
547 } else { 547 } else {
548 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n"); 548 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
549 atom_card_info->ioreg_read = cail_reg_read; 549 atom_card_info->ioreg_read = cail_reg_read;
550 atom_card_info->ioreg_write = cail_reg_write; 550 atom_card_info->ioreg_write = cail_reg_write;
551 } 551 }
552 atom_card_info->mc_read = cail_mc_read; 552 atom_card_info->mc_read = cail_mc_read;
553 atom_card_info->mc_write = cail_mc_write; 553 atom_card_info->mc_write = cail_mc_write;
554 atom_card_info->pll_read = cail_pll_read; 554 atom_card_info->pll_read = cail_pll_read;
555 atom_card_info->pll_write = cail_pll_write; 555 atom_card_info->pll_write = cail_pll_write;
556 556
557 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios); 557 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
558 mutex_init(&rdev->mode_info.atom_context->mutex); 558 mutex_init(&rdev->mode_info.atom_context->mutex);
559 radeon_atom_initialize_bios_scratch_regs(rdev->ddev); 559 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
560 atom_allocate_fb_scratch(rdev->mode_info.atom_context); 560 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
561 return 0; 561 return 0;
562 } 562 }
563 563
564 void radeon_atombios_fini(struct radeon_device *rdev) 564 void radeon_atombios_fini(struct radeon_device *rdev)
565 { 565 {
566 if (rdev->mode_info.atom_context) { 566 if (rdev->mode_info.atom_context) {
567 kfree(rdev->mode_info.atom_context->scratch); 567 kfree(rdev->mode_info.atom_context->scratch);
568 kfree(rdev->mode_info.atom_context); 568 kfree(rdev->mode_info.atom_context);
569 } 569 }
570 kfree(rdev->mode_info.atom_card_info); 570 kfree(rdev->mode_info.atom_card_info);
571 } 571 }
572 572
573 int radeon_combios_init(struct radeon_device *rdev) 573 int radeon_combios_init(struct radeon_device *rdev)
574 { 574 {
575 radeon_combios_initialize_bios_scratch_regs(rdev->ddev); 575 radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
576 return 0; 576 return 0;
577 } 577 }
578 578
579 void radeon_combios_fini(struct radeon_device *rdev) 579 void radeon_combios_fini(struct radeon_device *rdev)
580 { 580 {
581 } 581 }
582 582
583 /* if we get transitioned to only one device, tak VGA back */ 583 /* if we get transitioned to only one device, tak VGA back */
584 static unsigned int radeon_vga_set_decode(void *cookie, bool state) 584 static unsigned int radeon_vga_set_decode(void *cookie, bool state)
585 { 585 {
586 struct radeon_device *rdev = cookie; 586 struct radeon_device *rdev = cookie;
587 radeon_vga_set_state(rdev, state); 587 radeon_vga_set_state(rdev, state);
588 if (state) 588 if (state)
589 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 589 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
590 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 590 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
591 else 591 else
592 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 592 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
593 } 593 }
594 594
595 void radeon_check_arguments(struct radeon_device *rdev) 595 void radeon_check_arguments(struct radeon_device *rdev)
596 { 596 {
597 /* vramlimit must be a power of two */ 597 /* vramlimit must be a power of two */
598 switch (radeon_vram_limit) { 598 switch (radeon_vram_limit) {
599 case 0: 599 case 0:
600 case 4: 600 case 4:
601 case 8: 601 case 8:
602 case 16: 602 case 16:
603 case 32: 603 case 32:
604 case 64: 604 case 64:
605 case 128: 605 case 128:
606 case 256: 606 case 256:
607 case 512: 607 case 512:
608 case 1024: 608 case 1024:
609 case 2048: 609 case 2048:
610 case 4096: 610 case 4096:
611 break; 611 break;
612 default: 612 default:
613 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n", 613 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
614 radeon_vram_limit); 614 radeon_vram_limit);
615 radeon_vram_limit = 0; 615 radeon_vram_limit = 0;
616 break; 616 break;
617 } 617 }
618 radeon_vram_limit = radeon_vram_limit << 20; 618 radeon_vram_limit = radeon_vram_limit << 20;
619 /* gtt size must be power of two and greater or equal to 32M */ 619 /* gtt size must be power of two and greater or equal to 32M */
620 switch (radeon_gart_size) { 620 switch (radeon_gart_size) {
621 case 4: 621 case 4:
622 case 8: 622 case 8:
623 case 16: 623 case 16:
624 dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n", 624 dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
625 radeon_gart_size); 625 radeon_gart_size);
626 radeon_gart_size = 512; 626 radeon_gart_size = 512;
627 break; 627 break;
628 case 32: 628 case 32:
629 case 64: 629 case 64:
630 case 128: 630 case 128:
631 case 256: 631 case 256:
632 case 512: 632 case 512:
633 case 1024: 633 case 1024:
634 case 2048: 634 case 2048:
635 case 4096: 635 case 4096:
636 break; 636 break;
637 default: 637 default:
638 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n", 638 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
639 radeon_gart_size); 639 radeon_gart_size);
640 radeon_gart_size = 512; 640 radeon_gart_size = 512;
641 break; 641 break;
642 } 642 }
643 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; 643 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
644 /* AGP mode can only be -1, 1, 2, 4, 8 */ 644 /* AGP mode can only be -1, 1, 2, 4, 8 */
645 switch (radeon_agpmode) { 645 switch (radeon_agpmode) {
646 case -1: 646 case -1:
647 case 0: 647 case 0:
648 case 1: 648 case 1:
649 case 2: 649 case 2:
650 case 4: 650 case 4:
651 case 8: 651 case 8:
652 break; 652 break;
653 default: 653 default:
654 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: " 654 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
655 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode); 655 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
656 radeon_agpmode = 0; 656 radeon_agpmode = 0;
657 break; 657 break;
658 } 658 }
659 } 659 }
660 660
661 static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) 661 static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
662 { 662 {
663 struct drm_device *dev = pci_get_drvdata(pdev); 663 struct drm_device *dev = pci_get_drvdata(pdev);
664 pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; 664 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
665 if (state == VGA_SWITCHEROO_ON) { 665 if (state == VGA_SWITCHEROO_ON) {
666 printk(KERN_INFO "radeon: switched on\n"); 666 printk(KERN_INFO "radeon: switched on\n");
667 /* don't suspend or resume card normally */ 667 /* don't suspend or resume card normally */
668 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 668 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
669 radeon_resume_kms(dev); 669 radeon_resume_kms(dev);
670 dev->switch_power_state = DRM_SWITCH_POWER_ON; 670 dev->switch_power_state = DRM_SWITCH_POWER_ON;
671 drm_kms_helper_poll_enable(dev); 671 drm_kms_helper_poll_enable(dev);
672 } else { 672 } else {
673 printk(KERN_INFO "radeon: switched off\n"); 673 printk(KERN_INFO "radeon: switched off\n");
674 drm_kms_helper_poll_disable(dev); 674 drm_kms_helper_poll_disable(dev);
675 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 675 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
676 radeon_suspend_kms(dev, pmm); 676 radeon_suspend_kms(dev, pmm);
677 dev->switch_power_state = DRM_SWITCH_POWER_OFF; 677 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
678 } 678 }
679 } 679 }
680 680
681 static bool radeon_switcheroo_can_switch(struct pci_dev *pdev) 681 static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
682 { 682 {
683 struct drm_device *dev = pci_get_drvdata(pdev); 683 struct drm_device *dev = pci_get_drvdata(pdev);
684 bool can_switch; 684 bool can_switch;
685 685
686 spin_lock(&dev->count_lock); 686 spin_lock(&dev->count_lock);
687 can_switch = (dev->open_count == 0); 687 can_switch = (dev->open_count == 0);
688 spin_unlock(&dev->count_lock); 688 spin_unlock(&dev->count_lock);
689 return can_switch; 689 return can_switch;
690 } 690 }
691 691
692 692
693 int radeon_device_init(struct radeon_device *rdev, 693 int radeon_device_init(struct radeon_device *rdev,
694 struct drm_device *ddev, 694 struct drm_device *ddev,
695 struct pci_dev *pdev, 695 struct pci_dev *pdev,
696 uint32_t flags) 696 uint32_t flags)
697 { 697 {
698 int r, i; 698 int r, i;
699 int dma_bits; 699 int dma_bits;
700 700
701 rdev->shutdown = false; 701 rdev->shutdown = false;
702 rdev->dev = &pdev->dev; 702 rdev->dev = &pdev->dev;
703 rdev->ddev = ddev; 703 rdev->ddev = ddev;
704 rdev->pdev = pdev; 704 rdev->pdev = pdev;
705 rdev->flags = flags; 705 rdev->flags = flags;
706 rdev->family = flags & RADEON_FAMILY_MASK; 706 rdev->family = flags & RADEON_FAMILY_MASK;
707 rdev->is_atom_bios = false; 707 rdev->is_atom_bios = false;
708 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT; 708 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
709 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; 709 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
710 rdev->gpu_lockup = false; 710 rdev->gpu_lockup = false;
711 rdev->accel_working = false; 711 rdev->accel_working = false;
712 712
713 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n", 713 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
714 radeon_family_name[rdev->family], pdev->vendor, pdev->device, 714 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
715 pdev->subsystem_vendor, pdev->subsystem_device); 715 pdev->subsystem_vendor, pdev->subsystem_device);
716 716
717 /* mutex initialization are all done here so we 717 /* mutex initialization are all done here so we
718 * can recall function without having locking issues */ 718 * can recall function without having locking issues */
719 mutex_init(&rdev->cs_mutex); 719 radeon_mutex_init(&rdev->cs_mutex);
720 mutex_init(&rdev->ib_pool.mutex); 720 mutex_init(&rdev->ib_pool.mutex);
721 mutex_init(&rdev->cp.mutex); 721 mutex_init(&rdev->cp.mutex);
722 mutex_init(&rdev->dc_hw_i2c_mutex); 722 mutex_init(&rdev->dc_hw_i2c_mutex);
723 if (rdev->family >= CHIP_R600) 723 if (rdev->family >= CHIP_R600)
724 spin_lock_init(&rdev->ih.lock); 724 spin_lock_init(&rdev->ih.lock);
725 mutex_init(&rdev->gem.mutex); 725 mutex_init(&rdev->gem.mutex);
726 mutex_init(&rdev->pm.mutex); 726 mutex_init(&rdev->pm.mutex);
727 mutex_init(&rdev->vram_mutex); 727 mutex_init(&rdev->vram_mutex);
728 rwlock_init(&rdev->fence_drv.lock); 728 rwlock_init(&rdev->fence_drv.lock);
729 INIT_LIST_HEAD(&rdev->gem.objects); 729 INIT_LIST_HEAD(&rdev->gem.objects);
730 init_waitqueue_head(&rdev->irq.vblank_queue); 730 init_waitqueue_head(&rdev->irq.vblank_queue);
731 init_waitqueue_head(&rdev->irq.idle_queue); 731 init_waitqueue_head(&rdev->irq.idle_queue);
732 732
733 /* Set asic functions */ 733 /* Set asic functions */
734 r = radeon_asic_init(rdev); 734 r = radeon_asic_init(rdev);
735 if (r) 735 if (r)
736 return r; 736 return r;
737 radeon_check_arguments(rdev); 737 radeon_check_arguments(rdev);
738 738
739 /* all of the newer IGP chips have an internal gart 739 /* all of the newer IGP chips have an internal gart
740 * However some rs4xx report as AGP, so remove that here. 740 * However some rs4xx report as AGP, so remove that here.
741 */ 741 */
742 if ((rdev->family >= CHIP_RS400) && 742 if ((rdev->family >= CHIP_RS400) &&
743 (rdev->flags & RADEON_IS_IGP)) { 743 (rdev->flags & RADEON_IS_IGP)) {
744 rdev->flags &= ~RADEON_IS_AGP; 744 rdev->flags &= ~RADEON_IS_AGP;
745 } 745 }
746 746
747 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) { 747 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
748 radeon_agp_disable(rdev); 748 radeon_agp_disable(rdev);
749 } 749 }
750 750
751 /* set DMA mask + need_dma32 flags. 751 /* set DMA mask + need_dma32 flags.
752 * PCIE - can handle 40-bits. 752 * PCIE - can handle 40-bits.
753 * IGP - can handle 40-bits 753 * IGP - can handle 40-bits
754 * AGP - generally dma32 is safest 754 * AGP - generally dma32 is safest
755 * PCI - dma32 for legacy pci gart, 40 bits on newer asics 755 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
756 */ 756 */
757 rdev->need_dma32 = false; 757 rdev->need_dma32 = false;
758 if (rdev->flags & RADEON_IS_AGP) 758 if (rdev->flags & RADEON_IS_AGP)
759 rdev->need_dma32 = true; 759 rdev->need_dma32 = true;
760 if ((rdev->flags & RADEON_IS_PCI) && 760 if ((rdev->flags & RADEON_IS_PCI) &&
761 (rdev->family < CHIP_RS400)) 761 (rdev->family < CHIP_RS400))
762 rdev->need_dma32 = true; 762 rdev->need_dma32 = true;
763 763
764 dma_bits = rdev->need_dma32 ? 32 : 40; 764 dma_bits = rdev->need_dma32 ? 32 : 40;
765 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); 765 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
766 if (r) { 766 if (r) {
767 rdev->need_dma32 = true; 767 rdev->need_dma32 = true;
768 printk(KERN_WARNING "radeon: No suitable DMA available.\n"); 768 printk(KERN_WARNING "radeon: No suitable DMA available.\n");
769 } 769 }
770 770
771 /* Registers mapping */ 771 /* Registers mapping */
772 /* TODO: block userspace mapping of io register */ 772 /* TODO: block userspace mapping of io register */
773 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2); 773 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
774 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2); 774 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
775 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size); 775 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
776 if (rdev->rmmio == NULL) { 776 if (rdev->rmmio == NULL) {
777 return -ENOMEM; 777 return -ENOMEM;
778 } 778 }
779 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base); 779 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
780 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size); 780 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
781 781
782 /* io port mapping */ 782 /* io port mapping */
783 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 783 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
784 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) { 784 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
785 rdev->rio_mem_size = pci_resource_len(rdev->pdev, i); 785 rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
786 rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size); 786 rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
787 break; 787 break;
788 } 788 }
789 } 789 }
790 if (rdev->rio_mem == NULL) 790 if (rdev->rio_mem == NULL)
791 DRM_ERROR("Unable to find PCI I/O BAR\n"); 791 DRM_ERROR("Unable to find PCI I/O BAR\n");
792 792
793 /* if we have > 1 VGA cards, then disable the radeon VGA resources */ 793 /* if we have > 1 VGA cards, then disable the radeon VGA resources */
794 /* this will fail for cards that aren't VGA class devices, just 794 /* this will fail for cards that aren't VGA class devices, just
795 * ignore it */ 795 * ignore it */
796 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); 796 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
797 vga_switcheroo_register_client(rdev->pdev, 797 vga_switcheroo_register_client(rdev->pdev,
798 radeon_switcheroo_set_state, 798 radeon_switcheroo_set_state,
799 NULL, 799 NULL,
800 radeon_switcheroo_can_switch); 800 radeon_switcheroo_can_switch);
801 801
802 r = radeon_init(rdev); 802 r = radeon_init(rdev);
803 if (r) 803 if (r)
804 return r; 804 return r;
805 805
806 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) { 806 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
807 /* Acceleration not working on AGP card try again 807 /* Acceleration not working on AGP card try again
808 * with fallback to PCI or PCIE GART 808 * with fallback to PCI or PCIE GART
809 */ 809 */
810 radeon_asic_reset(rdev); 810 radeon_asic_reset(rdev);
811 radeon_fini(rdev); 811 radeon_fini(rdev);
812 radeon_agp_disable(rdev); 812 radeon_agp_disable(rdev);
813 r = radeon_init(rdev); 813 r = radeon_init(rdev);
814 if (r) 814 if (r)
815 return r; 815 return r;
816 } 816 }
817 if (radeon_testing) { 817 if (radeon_testing) {
818 radeon_test_moves(rdev); 818 radeon_test_moves(rdev);
819 } 819 }
820 if (radeon_benchmarking) { 820 if (radeon_benchmarking) {
821 radeon_benchmark(rdev, radeon_benchmarking); 821 radeon_benchmark(rdev, radeon_benchmarking);
822 } 822 }
823 return 0; 823 return 0;
824 } 824 }
825 825
826 void radeon_device_fini(struct radeon_device *rdev) 826 void radeon_device_fini(struct radeon_device *rdev)
827 { 827 {
828 DRM_INFO("radeon: finishing device.\n"); 828 DRM_INFO("radeon: finishing device.\n");
829 rdev->shutdown = true; 829 rdev->shutdown = true;
830 /* evict vram memory */ 830 /* evict vram memory */
831 radeon_bo_evict_vram(rdev); 831 radeon_bo_evict_vram(rdev);
832 radeon_fini(rdev); 832 radeon_fini(rdev);
833 vga_switcheroo_unregister_client(rdev->pdev); 833 vga_switcheroo_unregister_client(rdev->pdev);
834 vga_client_register(rdev->pdev, NULL, NULL, NULL); 834 vga_client_register(rdev->pdev, NULL, NULL, NULL);
835 if (rdev->rio_mem) 835 if (rdev->rio_mem)
836 pci_iounmap(rdev->pdev, rdev->rio_mem); 836 pci_iounmap(rdev->pdev, rdev->rio_mem);
837 rdev->rio_mem = NULL; 837 rdev->rio_mem = NULL;
838 iounmap(rdev->rmmio); 838 iounmap(rdev->rmmio);
839 rdev->rmmio = NULL; 839 rdev->rmmio = NULL;
840 } 840 }
841 841
842 842
843 /* 843 /*
844 * Suspend & resume. 844 * Suspend & resume.
845 */ 845 */
846 int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) 846 int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
847 { 847 {
848 struct radeon_device *rdev; 848 struct radeon_device *rdev;
849 struct drm_crtc *crtc; 849 struct drm_crtc *crtc;
850 struct drm_connector *connector; 850 struct drm_connector *connector;
851 int r; 851 int r;
852 852
853 if (dev == NULL || dev->dev_private == NULL) { 853 if (dev == NULL || dev->dev_private == NULL) {
854 return -ENODEV; 854 return -ENODEV;
855 } 855 }
856 if (state.event == PM_EVENT_PRETHAW) { 856 if (state.event == PM_EVENT_PRETHAW) {
857 return 0; 857 return 0;
858 } 858 }
859 rdev = dev->dev_private; 859 rdev = dev->dev_private;
860 860
861 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 861 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
862 return 0; 862 return 0;
863 863
864 /* turn off display hw */ 864 /* turn off display hw */
865 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 865 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
866 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 866 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
867 } 867 }
868 868
869 /* unpin the front buffers */ 869 /* unpin the front buffers */
870 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 870 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
871 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); 871 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
872 struct radeon_bo *robj; 872 struct radeon_bo *robj;
873 873
874 if (rfb == NULL || rfb->obj == NULL) { 874 if (rfb == NULL || rfb->obj == NULL) {
875 continue; 875 continue;
876 } 876 }
877 robj = gem_to_radeon_bo(rfb->obj); 877 robj = gem_to_radeon_bo(rfb->obj);
878 /* don't unpin kernel fb objects */ 878 /* don't unpin kernel fb objects */
879 if (!radeon_fbdev_robj_is_fb(rdev, robj)) { 879 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
880 r = radeon_bo_reserve(robj, false); 880 r = radeon_bo_reserve(robj, false);
881 if (r == 0) { 881 if (r == 0) {
882 radeon_bo_unpin(robj); 882 radeon_bo_unpin(robj);
883 radeon_bo_unreserve(robj); 883 radeon_bo_unreserve(robj);
884 } 884 }
885 } 885 }
886 } 886 }
887 /* evict vram memory */ 887 /* evict vram memory */
888 radeon_bo_evict_vram(rdev); 888 radeon_bo_evict_vram(rdev);
889 /* wait for gpu to finish processing current batch */ 889 /* wait for gpu to finish processing current batch */
890 radeon_fence_wait_last(rdev); 890 radeon_fence_wait_last(rdev);
891 891
892 radeon_save_bios_scratch_regs(rdev); 892 radeon_save_bios_scratch_regs(rdev);
893 893
894 radeon_pm_suspend(rdev); 894 radeon_pm_suspend(rdev);
895 radeon_suspend(rdev); 895 radeon_suspend(rdev);
896 radeon_hpd_fini(rdev); 896 radeon_hpd_fini(rdev);
897 /* evict remaining vram memory */ 897 /* evict remaining vram memory */
898 radeon_bo_evict_vram(rdev); 898 radeon_bo_evict_vram(rdev);
899 899
900 radeon_agp_suspend(rdev); 900 radeon_agp_suspend(rdev);
901 901
902 pci_save_state(dev->pdev); 902 pci_save_state(dev->pdev);
903 if (state.event == PM_EVENT_SUSPEND) { 903 if (state.event == PM_EVENT_SUSPEND) {
904 /* Shut down the device */ 904 /* Shut down the device */
905 pci_disable_device(dev->pdev); 905 pci_disable_device(dev->pdev);
906 pci_set_power_state(dev->pdev, PCI_D3hot); 906 pci_set_power_state(dev->pdev, PCI_D3hot);
907 } 907 }
908 console_lock(); 908 console_lock();
909 radeon_fbdev_set_suspend(rdev, 1); 909 radeon_fbdev_set_suspend(rdev, 1);
910 console_unlock(); 910 console_unlock();
911 return 0; 911 return 0;
912 } 912 }
913 913
914 int radeon_resume_kms(struct drm_device *dev) 914 int radeon_resume_kms(struct drm_device *dev)
915 { 915 {
916 struct drm_connector *connector; 916 struct drm_connector *connector;
917 struct radeon_device *rdev = dev->dev_private; 917 struct radeon_device *rdev = dev->dev_private;
918 918
919 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 919 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
920 return 0; 920 return 0;
921 921
922 console_lock(); 922 console_lock();
923 pci_set_power_state(dev->pdev, PCI_D0); 923 pci_set_power_state(dev->pdev, PCI_D0);
924 pci_restore_state(dev->pdev); 924 pci_restore_state(dev->pdev);
925 if (pci_enable_device(dev->pdev)) { 925 if (pci_enable_device(dev->pdev)) {
926 console_unlock(); 926 console_unlock();
927 return -1; 927 return -1;
928 } 928 }
929 pci_set_master(dev->pdev); 929 pci_set_master(dev->pdev);
930 /* resume AGP if in use */ 930 /* resume AGP if in use */
931 radeon_agp_resume(rdev); 931 radeon_agp_resume(rdev);
932 radeon_resume(rdev); 932 radeon_resume(rdev);
933 radeon_pm_resume(rdev); 933 radeon_pm_resume(rdev);
934 radeon_restore_bios_scratch_regs(rdev); 934 radeon_restore_bios_scratch_regs(rdev);
935 935
936 radeon_fbdev_set_suspend(rdev, 0); 936 radeon_fbdev_set_suspend(rdev, 0);
937 console_unlock(); 937 console_unlock();
938 938
939 /* init dig PHYs */ 939 /* init dig PHYs */
940 if (rdev->is_atom_bios) 940 if (rdev->is_atom_bios)
941 radeon_atom_encoder_init(rdev); 941 radeon_atom_encoder_init(rdev);
942 /* reset hpd state */ 942 /* reset hpd state */
943 radeon_hpd_init(rdev); 943 radeon_hpd_init(rdev);
944 /* blat the mode back in */ 944 /* blat the mode back in */
945 drm_helper_resume_force_mode(dev); 945 drm_helper_resume_force_mode(dev);
946 /* turn on display hw */ 946 /* turn on display hw */
947 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 947 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
948 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); 948 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
949 } 949 }
950 return 0; 950 return 0;
951 } 951 }
952 952
953 int radeon_gpu_reset(struct radeon_device *rdev) 953 int radeon_gpu_reset(struct radeon_device *rdev)
954 { 954 {
955 int r; 955 int r;
956 int resched; 956 int resched;
957 957
958 /* Prevent CS ioctl from interfering */
959 radeon_mutex_lock(&rdev->cs_mutex);
960
958 radeon_save_bios_scratch_regs(rdev); 961 radeon_save_bios_scratch_regs(rdev);
959 /* block TTM */ 962 /* block TTM */
960 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); 963 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
961 radeon_suspend(rdev); 964 radeon_suspend(rdev);
962 965
963 r = radeon_asic_reset(rdev); 966 r = radeon_asic_reset(rdev);
964 if (!r) { 967 if (!r) {
965 dev_info(rdev->dev, "GPU reset succeed\n"); 968 dev_info(rdev->dev, "GPU reset succeed\n");
966 radeon_resume(rdev); 969 radeon_resume(rdev);
967 radeon_restore_bios_scratch_regs(rdev); 970 radeon_restore_bios_scratch_regs(rdev);
968 drm_helper_resume_force_mode(rdev->ddev); 971 drm_helper_resume_force_mode(rdev->ddev);
969 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); 972 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
970 return 0;
971 } 973 }
972 /* bad news, how to tell it to userspace ? */ 974
973 dev_info(rdev->dev, "GPU reset failed\n"); 975 radeon_mutex_unlock(&rdev->cs_mutex);
976
977 if (r) {
978 /* bad news, how to tell it to userspace ? */
979 dev_info(rdev->dev, "GPU reset failed\n");
980 }
981
974 return r; 982 return r;
975 } 983 }
976 984
977 985
978 /* 986 /*
979 * Debugfs 987 * Debugfs
980 */ 988 */
981 struct radeon_debugfs { 989 struct radeon_debugfs {
982 struct drm_info_list *files; 990 struct drm_info_list *files;
983 unsigned num_files; 991 unsigned num_files;
984 }; 992 };
985 static struct radeon_debugfs _radeon_debugfs[RADEON_DEBUGFS_MAX_COMPONENTS]; 993 static struct radeon_debugfs _radeon_debugfs[RADEON_DEBUGFS_MAX_COMPONENTS];
986 static unsigned _radeon_debugfs_count = 0; 994 static unsigned _radeon_debugfs_count = 0;
987 995
988 int radeon_debugfs_add_files(struct radeon_device *rdev, 996 int radeon_debugfs_add_files(struct radeon_device *rdev,
989 struct drm_info_list *files, 997 struct drm_info_list *files,
990 unsigned nfiles) 998 unsigned nfiles)
991 { 999 {
992 unsigned i; 1000 unsigned i;
993 1001
994 for (i = 0; i < _radeon_debugfs_count; i++) { 1002 for (i = 0; i < _radeon_debugfs_count; i++) {
995 if (_radeon_debugfs[i].files == files) { 1003 if (_radeon_debugfs[i].files == files) {
996 /* Already registered */ 1004 /* Already registered */
997 return 0; 1005 return 0;
998 } 1006 }
999 } 1007 }
1000 1008
1001 i = _radeon_debugfs_count + 1; 1009 i = _radeon_debugfs_count + 1;
1002 if (i > RADEON_DEBUGFS_MAX_COMPONENTS) { 1010 if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
1003 DRM_ERROR("Reached maximum number of debugfs components.\n"); 1011 DRM_ERROR("Reached maximum number of debugfs components.\n");
1004 DRM_ERROR("Report so we increase " 1012 DRM_ERROR("Report so we increase "
1005 "RADEON_DEBUGFS_MAX_COMPONENTS.\n"); 1013 "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
1006 return -EINVAL; 1014 return -EINVAL;
1007 } 1015 }
1008 _radeon_debugfs[_radeon_debugfs_count].files = files; 1016 _radeon_debugfs[_radeon_debugfs_count].files = files;
1009 _radeon_debugfs[_radeon_debugfs_count].num_files = nfiles; 1017 _radeon_debugfs[_radeon_debugfs_count].num_files = nfiles;
1010 _radeon_debugfs_count = i; 1018 _radeon_debugfs_count = i;
1011 #if defined(CONFIG_DEBUG_FS) 1019 #if defined(CONFIG_DEBUG_FS)
1012 drm_debugfs_create_files(files, nfiles, 1020 drm_debugfs_create_files(files, nfiles,
1013 rdev->ddev->control->debugfs_root, 1021 rdev->ddev->control->debugfs_root,
1014 rdev->ddev->control); 1022 rdev->ddev->control);
1015 drm_debugfs_create_files(files, nfiles, 1023 drm_debugfs_create_files(files, nfiles,
1016 rdev->ddev->primary->debugfs_root, 1024 rdev->ddev->primary->debugfs_root,
1017 rdev->ddev->primary); 1025 rdev->ddev->primary);
1018 #endif 1026 #endif
1019 return 0; 1027 return 0;
1020 } 1028 }
1021 1029
1022 #if defined(CONFIG_DEBUG_FS) 1030 #if defined(CONFIG_DEBUG_FS)
1023 int radeon_debugfs_init(struct drm_minor *minor) 1031 int radeon_debugfs_init(struct drm_minor *minor)
1024 { 1032 {
1025 return 0; 1033 return 0;
1026 } 1034 }
1027 1035
1028 void radeon_debugfs_cleanup(struct drm_minor *minor) 1036 void radeon_debugfs_cleanup(struct drm_minor *minor)
1029 { 1037 {
1030 unsigned i; 1038 unsigned i;
1031 1039
1032 for (i = 0; i < _radeon_debugfs_count; i++) { 1040 for (i = 0; i < _radeon_debugfs_count; i++) {
1033 drm_debugfs_remove_files(_radeon_debugfs[i].files, 1041 drm_debugfs_remove_files(_radeon_debugfs[i].files,
1034 _radeon_debugfs[i].num_files, minor); 1042 _radeon_debugfs[i].num_files, minor);
1035 } 1043 }
1036 } 1044 }
1037 #endif 1045 #endif