Commit 8ea180f1c7ec137310ea2e66300485dbda93baad

Authored by Peilin Ye
Committed by Greg Kroah-Hartman
1 parent f1b4bdde2b

drm/amdgpu: Prevent kernel-infoleak in amdgpu_info_ioctl()

commit 543e8669ed9bfb30545fd52bc0e047ca4df7fb31 upstream.

Compiler leaves a 4-byte hole near the end of `dev_info`, causing
amdgpu_info_ioctl() to copy uninitialized kernel stack memory to userspace
when `size` is greater than 356.

In 2015 we tried to fix this issue by doing `= {};` on `dev_info`, which
unfortunately does not initialize that 4-byte hole. Fix it by using
memset() instead.

Cc: stable@vger.kernel.org
Fixes: c193fa91b918 ("drm/amdgpu: information leak in amdgpu_info_ioctl()")
Fixes: d38ceaf99ed0 ("drm/amdgpu: add core driver (v4)")
Suggested-by: Dan Carpenter <dan.carpenter@oracle.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Peilin Ye <yepeilin.cs@gmail.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

Showing 1 changed file with 2 additions and 1 deletions Inline Diff

drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
1 /* 1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc. 2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc. 3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse. 4 * Copyright 2009 Jerome Glisse.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation 8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the 10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions: 11 * Software is furnished to do so, subject to the following conditions:
12 * 12 *
13 * The above copyright notice and this permission notice shall be included in 13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software. 14 * all copies or substantial portions of the Software.
15 * 15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE. 22 * OTHER DEALINGS IN THE SOFTWARE.
23 * 23 *
24 * Authors: Dave Airlie 24 * Authors: Dave Airlie
25 * Alex Deucher 25 * Alex Deucher
26 * Jerome Glisse 26 * Jerome Glisse
27 */ 27 */
28 28
29 #include "amdgpu.h" 29 #include "amdgpu.h"
30 #include <drm/drm_debugfs.h> 30 #include <drm/drm_debugfs.h>
31 #include <drm/amdgpu_drm.h> 31 #include <drm/amdgpu_drm.h>
32 #include "amdgpu_sched.h" 32 #include "amdgpu_sched.h"
33 #include "amdgpu_uvd.h" 33 #include "amdgpu_uvd.h"
34 #include "amdgpu_vce.h" 34 #include "amdgpu_vce.h"
35 #include "atom.h" 35 #include "atom.h"
36 36
37 #include <linux/vga_switcheroo.h> 37 #include <linux/vga_switcheroo.h>
38 #include <linux/slab.h> 38 #include <linux/slab.h>
39 #include <linux/uaccess.h> 39 #include <linux/uaccess.h>
40 #include <linux/pci.h> 40 #include <linux/pci.h>
41 #include <linux/pm_runtime.h> 41 #include <linux/pm_runtime.h>
42 #include "amdgpu_amdkfd.h" 42 #include "amdgpu_amdkfd.h"
43 #include "amdgpu_gem.h" 43 #include "amdgpu_gem.h"
44 #include "amdgpu_display.h" 44 #include "amdgpu_display.h"
45 #include "amdgpu_ras.h" 45 #include "amdgpu_ras.h"
46 46
47 void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev) 47 void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
48 { 48 {
49 struct amdgpu_gpu_instance *gpu_instance; 49 struct amdgpu_gpu_instance *gpu_instance;
50 int i; 50 int i;
51 51
52 mutex_lock(&mgpu_info.mutex); 52 mutex_lock(&mgpu_info.mutex);
53 53
54 for (i = 0; i < mgpu_info.num_gpu; i++) { 54 for (i = 0; i < mgpu_info.num_gpu; i++) {
55 gpu_instance = &(mgpu_info.gpu_ins[i]); 55 gpu_instance = &(mgpu_info.gpu_ins[i]);
56 if (gpu_instance->adev == adev) { 56 if (gpu_instance->adev == adev) {
57 mgpu_info.gpu_ins[i] = 57 mgpu_info.gpu_ins[i] =
58 mgpu_info.gpu_ins[mgpu_info.num_gpu - 1]; 58 mgpu_info.gpu_ins[mgpu_info.num_gpu - 1];
59 mgpu_info.num_gpu--; 59 mgpu_info.num_gpu--;
60 if (adev->flags & AMD_IS_APU) 60 if (adev->flags & AMD_IS_APU)
61 mgpu_info.num_apu--; 61 mgpu_info.num_apu--;
62 else 62 else
63 mgpu_info.num_dgpu--; 63 mgpu_info.num_dgpu--;
64 break; 64 break;
65 } 65 }
66 } 66 }
67 67
68 mutex_unlock(&mgpu_info.mutex); 68 mutex_unlock(&mgpu_info.mutex);
69 } 69 }
70 70
71 /** 71 /**
72 * amdgpu_driver_unload_kms - Main unload function for KMS. 72 * amdgpu_driver_unload_kms - Main unload function for KMS.
73 * 73 *
74 * @dev: drm dev pointer 74 * @dev: drm dev pointer
75 * 75 *
76 * This is the main unload function for KMS (all asics). 76 * This is the main unload function for KMS (all asics).
77 * Returns 0 on success. 77 * Returns 0 on success.
78 */ 78 */
79 void amdgpu_driver_unload_kms(struct drm_device *dev) 79 void amdgpu_driver_unload_kms(struct drm_device *dev)
80 { 80 {
81 struct amdgpu_device *adev = dev->dev_private; 81 struct amdgpu_device *adev = dev->dev_private;
82 82
83 if (adev == NULL) 83 if (adev == NULL)
84 return; 84 return;
85 85
86 amdgpu_unregister_gpu_instance(adev); 86 amdgpu_unregister_gpu_instance(adev);
87 87
88 if (adev->rmmio == NULL) 88 if (adev->rmmio == NULL)
89 goto done_free; 89 goto done_free;
90 90
91 if (amdgpu_sriov_vf(adev)) 91 if (amdgpu_sriov_vf(adev))
92 amdgpu_virt_request_full_gpu(adev, false); 92 amdgpu_virt_request_full_gpu(adev, false);
93 93
94 if (amdgpu_device_is_px(dev)) { 94 if (amdgpu_device_is_px(dev)) {
95 pm_runtime_get_sync(dev->dev); 95 pm_runtime_get_sync(dev->dev);
96 pm_runtime_forbid(dev->dev); 96 pm_runtime_forbid(dev->dev);
97 } 97 }
98 98
99 amdgpu_acpi_fini(adev); 99 amdgpu_acpi_fini(adev);
100 100
101 amdgpu_device_fini(adev); 101 amdgpu_device_fini(adev);
102 102
103 done_free: 103 done_free:
104 kfree(adev); 104 kfree(adev);
105 dev->dev_private = NULL; 105 dev->dev_private = NULL;
106 } 106 }
107 107
108 void amdgpu_register_gpu_instance(struct amdgpu_device *adev) 108 void amdgpu_register_gpu_instance(struct amdgpu_device *adev)
109 { 109 {
110 struct amdgpu_gpu_instance *gpu_instance; 110 struct amdgpu_gpu_instance *gpu_instance;
111 111
112 mutex_lock(&mgpu_info.mutex); 112 mutex_lock(&mgpu_info.mutex);
113 113
114 if (mgpu_info.num_gpu >= MAX_GPU_INSTANCE) { 114 if (mgpu_info.num_gpu >= MAX_GPU_INSTANCE) {
115 DRM_ERROR("Cannot register more gpu instance\n"); 115 DRM_ERROR("Cannot register more gpu instance\n");
116 mutex_unlock(&mgpu_info.mutex); 116 mutex_unlock(&mgpu_info.mutex);
117 return; 117 return;
118 } 118 }
119 119
120 gpu_instance = &(mgpu_info.gpu_ins[mgpu_info.num_gpu]); 120 gpu_instance = &(mgpu_info.gpu_ins[mgpu_info.num_gpu]);
121 gpu_instance->adev = adev; 121 gpu_instance->adev = adev;
122 gpu_instance->mgpu_fan_enabled = 0; 122 gpu_instance->mgpu_fan_enabled = 0;
123 123
124 mgpu_info.num_gpu++; 124 mgpu_info.num_gpu++;
125 if (adev->flags & AMD_IS_APU) 125 if (adev->flags & AMD_IS_APU)
126 mgpu_info.num_apu++; 126 mgpu_info.num_apu++;
127 else 127 else
128 mgpu_info.num_dgpu++; 128 mgpu_info.num_dgpu++;
129 129
130 mutex_unlock(&mgpu_info.mutex); 130 mutex_unlock(&mgpu_info.mutex);
131 } 131 }
132 132
133 /** 133 /**
134 * amdgpu_driver_load_kms - Main load function for KMS. 134 * amdgpu_driver_load_kms - Main load function for KMS.
135 * 135 *
136 * @dev: drm dev pointer 136 * @dev: drm dev pointer
137 * @flags: device flags 137 * @flags: device flags
138 * 138 *
139 * This is the main load function for KMS (all asics). 139 * This is the main load function for KMS (all asics).
140 * Returns 0 on success, error on failure. 140 * Returns 0 on success, error on failure.
141 */ 141 */
142 int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) 142 int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
143 { 143 {
144 struct amdgpu_device *adev; 144 struct amdgpu_device *adev;
145 int r, acpi_status; 145 int r, acpi_status;
146 146
147 adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL); 147 adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL);
148 if (adev == NULL) { 148 if (adev == NULL) {
149 return -ENOMEM; 149 return -ENOMEM;
150 } 150 }
151 dev->dev_private = (void *)adev; 151 dev->dev_private = (void *)adev;
152 152
153 if ((amdgpu_runtime_pm != 0) && 153 if ((amdgpu_runtime_pm != 0) &&
154 amdgpu_has_atpx() && 154 amdgpu_has_atpx() &&
155 (amdgpu_is_atpx_hybrid() || 155 (amdgpu_is_atpx_hybrid() ||
156 amdgpu_has_atpx_dgpu_power_cntl()) && 156 amdgpu_has_atpx_dgpu_power_cntl()) &&
157 ((flags & AMD_IS_APU) == 0) && 157 ((flags & AMD_IS_APU) == 0) &&
158 !pci_is_thunderbolt_attached(dev->pdev)) 158 !pci_is_thunderbolt_attached(dev->pdev))
159 flags |= AMD_IS_PX; 159 flags |= AMD_IS_PX;
160 160
161 /* amdgpu_device_init should report only fatal error 161 /* amdgpu_device_init should report only fatal error
162 * like memory allocation failure or iomapping failure, 162 * like memory allocation failure or iomapping failure,
163 * or memory manager initialization failure, it must 163 * or memory manager initialization failure, it must
164 * properly initialize the GPU MC controller and permit 164 * properly initialize the GPU MC controller and permit
165 * VRAM allocation 165 * VRAM allocation
166 */ 166 */
167 r = amdgpu_device_init(adev, dev, dev->pdev, flags); 167 r = amdgpu_device_init(adev, dev, dev->pdev, flags);
168 if (r) { 168 if (r) {
169 dev_err(&dev->pdev->dev, "Fatal error during GPU init\n"); 169 dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
170 goto out; 170 goto out;
171 } 171 }
172 172
173 /* Call ACPI methods: require modeset init 173 /* Call ACPI methods: require modeset init
174 * but failure is not fatal 174 * but failure is not fatal
175 */ 175 */
176 if (!r) { 176 if (!r) {
177 acpi_status = amdgpu_acpi_init(adev); 177 acpi_status = amdgpu_acpi_init(adev);
178 if (acpi_status) 178 if (acpi_status)
179 dev_dbg(&dev->pdev->dev, 179 dev_dbg(&dev->pdev->dev,
180 "Error during ACPI methods call\n"); 180 "Error during ACPI methods call\n");
181 } 181 }
182 182
183 if (amdgpu_device_is_px(dev)) { 183 if (amdgpu_device_is_px(dev)) {
184 dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP); 184 dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
185 pm_runtime_use_autosuspend(dev->dev); 185 pm_runtime_use_autosuspend(dev->dev);
186 pm_runtime_set_autosuspend_delay(dev->dev, 5000); 186 pm_runtime_set_autosuspend_delay(dev->dev, 5000);
187 pm_runtime_set_active(dev->dev); 187 pm_runtime_set_active(dev->dev);
188 pm_runtime_allow(dev->dev); 188 pm_runtime_allow(dev->dev);
189 pm_runtime_mark_last_busy(dev->dev); 189 pm_runtime_mark_last_busy(dev->dev);
190 pm_runtime_put_autosuspend(dev->dev); 190 pm_runtime_put_autosuspend(dev->dev);
191 } 191 }
192 192
193 out: 193 out:
194 if (r) { 194 if (r) {
195 /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */ 195 /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
196 if (adev->rmmio && amdgpu_device_is_px(dev)) 196 if (adev->rmmio && amdgpu_device_is_px(dev))
197 pm_runtime_put_noidle(dev->dev); 197 pm_runtime_put_noidle(dev->dev);
198 amdgpu_driver_unload_kms(dev); 198 amdgpu_driver_unload_kms(dev);
199 } 199 }
200 200
201 return r; 201 return r;
202 } 202 }
203 203
204 static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info, 204 static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
205 struct drm_amdgpu_query_fw *query_fw, 205 struct drm_amdgpu_query_fw *query_fw,
206 struct amdgpu_device *adev) 206 struct amdgpu_device *adev)
207 { 207 {
208 switch (query_fw->fw_type) { 208 switch (query_fw->fw_type) {
209 case AMDGPU_INFO_FW_VCE: 209 case AMDGPU_INFO_FW_VCE:
210 fw_info->ver = adev->vce.fw_version; 210 fw_info->ver = adev->vce.fw_version;
211 fw_info->feature = adev->vce.fb_version; 211 fw_info->feature = adev->vce.fb_version;
212 break; 212 break;
213 case AMDGPU_INFO_FW_UVD: 213 case AMDGPU_INFO_FW_UVD:
214 fw_info->ver = adev->uvd.fw_version; 214 fw_info->ver = adev->uvd.fw_version;
215 fw_info->feature = 0; 215 fw_info->feature = 0;
216 break; 216 break;
217 case AMDGPU_INFO_FW_VCN: 217 case AMDGPU_INFO_FW_VCN:
218 fw_info->ver = adev->vcn.fw_version; 218 fw_info->ver = adev->vcn.fw_version;
219 fw_info->feature = 0; 219 fw_info->feature = 0;
220 break; 220 break;
221 case AMDGPU_INFO_FW_GMC: 221 case AMDGPU_INFO_FW_GMC:
222 fw_info->ver = adev->gmc.fw_version; 222 fw_info->ver = adev->gmc.fw_version;
223 fw_info->feature = 0; 223 fw_info->feature = 0;
224 break; 224 break;
225 case AMDGPU_INFO_FW_GFX_ME: 225 case AMDGPU_INFO_FW_GFX_ME:
226 fw_info->ver = adev->gfx.me_fw_version; 226 fw_info->ver = adev->gfx.me_fw_version;
227 fw_info->feature = adev->gfx.me_feature_version; 227 fw_info->feature = adev->gfx.me_feature_version;
228 break; 228 break;
229 case AMDGPU_INFO_FW_GFX_PFP: 229 case AMDGPU_INFO_FW_GFX_PFP:
230 fw_info->ver = adev->gfx.pfp_fw_version; 230 fw_info->ver = adev->gfx.pfp_fw_version;
231 fw_info->feature = adev->gfx.pfp_feature_version; 231 fw_info->feature = adev->gfx.pfp_feature_version;
232 break; 232 break;
233 case AMDGPU_INFO_FW_GFX_CE: 233 case AMDGPU_INFO_FW_GFX_CE:
234 fw_info->ver = adev->gfx.ce_fw_version; 234 fw_info->ver = adev->gfx.ce_fw_version;
235 fw_info->feature = adev->gfx.ce_feature_version; 235 fw_info->feature = adev->gfx.ce_feature_version;
236 break; 236 break;
237 case AMDGPU_INFO_FW_GFX_RLC: 237 case AMDGPU_INFO_FW_GFX_RLC:
238 fw_info->ver = adev->gfx.rlc_fw_version; 238 fw_info->ver = adev->gfx.rlc_fw_version;
239 fw_info->feature = adev->gfx.rlc_feature_version; 239 fw_info->feature = adev->gfx.rlc_feature_version;
240 break; 240 break;
241 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL: 241 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL:
242 fw_info->ver = adev->gfx.rlc_srlc_fw_version; 242 fw_info->ver = adev->gfx.rlc_srlc_fw_version;
243 fw_info->feature = adev->gfx.rlc_srlc_feature_version; 243 fw_info->feature = adev->gfx.rlc_srlc_feature_version;
244 break; 244 break;
245 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM: 245 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM:
246 fw_info->ver = adev->gfx.rlc_srlg_fw_version; 246 fw_info->ver = adev->gfx.rlc_srlg_fw_version;
247 fw_info->feature = adev->gfx.rlc_srlg_feature_version; 247 fw_info->feature = adev->gfx.rlc_srlg_feature_version;
248 break; 248 break;
249 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM: 249 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM:
250 fw_info->ver = adev->gfx.rlc_srls_fw_version; 250 fw_info->ver = adev->gfx.rlc_srls_fw_version;
251 fw_info->feature = adev->gfx.rlc_srls_feature_version; 251 fw_info->feature = adev->gfx.rlc_srls_feature_version;
252 break; 252 break;
253 case AMDGPU_INFO_FW_GFX_MEC: 253 case AMDGPU_INFO_FW_GFX_MEC:
254 if (query_fw->index == 0) { 254 if (query_fw->index == 0) {
255 fw_info->ver = adev->gfx.mec_fw_version; 255 fw_info->ver = adev->gfx.mec_fw_version;
256 fw_info->feature = adev->gfx.mec_feature_version; 256 fw_info->feature = adev->gfx.mec_feature_version;
257 } else if (query_fw->index == 1) { 257 } else if (query_fw->index == 1) {
258 fw_info->ver = adev->gfx.mec2_fw_version; 258 fw_info->ver = adev->gfx.mec2_fw_version;
259 fw_info->feature = adev->gfx.mec2_feature_version; 259 fw_info->feature = adev->gfx.mec2_feature_version;
260 } else 260 } else
261 return -EINVAL; 261 return -EINVAL;
262 break; 262 break;
263 case AMDGPU_INFO_FW_SMC: 263 case AMDGPU_INFO_FW_SMC:
264 fw_info->ver = adev->pm.fw_version; 264 fw_info->ver = adev->pm.fw_version;
265 fw_info->feature = 0; 265 fw_info->feature = 0;
266 break; 266 break;
267 case AMDGPU_INFO_FW_TA: 267 case AMDGPU_INFO_FW_TA:
268 if (query_fw->index > 1) 268 if (query_fw->index > 1)
269 return -EINVAL; 269 return -EINVAL;
270 if (query_fw->index == 0) { 270 if (query_fw->index == 0) {
271 fw_info->ver = adev->psp.ta_fw_version; 271 fw_info->ver = adev->psp.ta_fw_version;
272 fw_info->feature = adev->psp.ta_xgmi_ucode_version; 272 fw_info->feature = adev->psp.ta_xgmi_ucode_version;
273 } else { 273 } else {
274 fw_info->ver = adev->psp.ta_fw_version; 274 fw_info->ver = adev->psp.ta_fw_version;
275 fw_info->feature = adev->psp.ta_ras_ucode_version; 275 fw_info->feature = adev->psp.ta_ras_ucode_version;
276 } 276 }
277 break; 277 break;
278 case AMDGPU_INFO_FW_SDMA: 278 case AMDGPU_INFO_FW_SDMA:
279 if (query_fw->index >= adev->sdma.num_instances) 279 if (query_fw->index >= adev->sdma.num_instances)
280 return -EINVAL; 280 return -EINVAL;
281 fw_info->ver = adev->sdma.instance[query_fw->index].fw_version; 281 fw_info->ver = adev->sdma.instance[query_fw->index].fw_version;
282 fw_info->feature = adev->sdma.instance[query_fw->index].feature_version; 282 fw_info->feature = adev->sdma.instance[query_fw->index].feature_version;
283 break; 283 break;
284 case AMDGPU_INFO_FW_SOS: 284 case AMDGPU_INFO_FW_SOS:
285 fw_info->ver = adev->psp.sos_fw_version; 285 fw_info->ver = adev->psp.sos_fw_version;
286 fw_info->feature = adev->psp.sos_feature_version; 286 fw_info->feature = adev->psp.sos_feature_version;
287 break; 287 break;
288 case AMDGPU_INFO_FW_ASD: 288 case AMDGPU_INFO_FW_ASD:
289 fw_info->ver = adev->psp.asd_fw_version; 289 fw_info->ver = adev->psp.asd_fw_version;
290 fw_info->feature = adev->psp.asd_feature_version; 290 fw_info->feature = adev->psp.asd_feature_version;
291 break; 291 break;
292 case AMDGPU_INFO_FW_DMCU: 292 case AMDGPU_INFO_FW_DMCU:
293 fw_info->ver = adev->dm.dmcu_fw_version; 293 fw_info->ver = adev->dm.dmcu_fw_version;
294 fw_info->feature = 0; 294 fw_info->feature = 0;
295 break; 295 break;
296 default: 296 default:
297 return -EINVAL; 297 return -EINVAL;
298 } 298 }
299 return 0; 299 return 0;
300 } 300 }
301 301
302 static int amdgpu_hw_ip_info(struct amdgpu_device *adev, 302 static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
303 struct drm_amdgpu_info *info, 303 struct drm_amdgpu_info *info,
304 struct drm_amdgpu_info_hw_ip *result) 304 struct drm_amdgpu_info_hw_ip *result)
305 { 305 {
306 uint32_t ib_start_alignment = 0; 306 uint32_t ib_start_alignment = 0;
307 uint32_t ib_size_alignment = 0; 307 uint32_t ib_size_alignment = 0;
308 enum amd_ip_block_type type; 308 enum amd_ip_block_type type;
309 unsigned int num_rings = 0; 309 unsigned int num_rings = 0;
310 unsigned int i, j; 310 unsigned int i, j;
311 311
312 if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT) 312 if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
313 return -EINVAL; 313 return -EINVAL;
314 314
315 switch (info->query_hw_ip.type) { 315 switch (info->query_hw_ip.type) {
316 case AMDGPU_HW_IP_GFX: 316 case AMDGPU_HW_IP_GFX:
317 type = AMD_IP_BLOCK_TYPE_GFX; 317 type = AMD_IP_BLOCK_TYPE_GFX;
318 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 318 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
319 if (adev->gfx.gfx_ring[i].sched.ready) 319 if (adev->gfx.gfx_ring[i].sched.ready)
320 ++num_rings; 320 ++num_rings;
321 ib_start_alignment = 32; 321 ib_start_alignment = 32;
322 ib_size_alignment = 32; 322 ib_size_alignment = 32;
323 break; 323 break;
324 case AMDGPU_HW_IP_COMPUTE: 324 case AMDGPU_HW_IP_COMPUTE:
325 type = AMD_IP_BLOCK_TYPE_GFX; 325 type = AMD_IP_BLOCK_TYPE_GFX;
326 for (i = 0; i < adev->gfx.num_compute_rings; i++) 326 for (i = 0; i < adev->gfx.num_compute_rings; i++)
327 if (adev->gfx.compute_ring[i].sched.ready) 327 if (adev->gfx.compute_ring[i].sched.ready)
328 ++num_rings; 328 ++num_rings;
329 ib_start_alignment = 32; 329 ib_start_alignment = 32;
330 ib_size_alignment = 32; 330 ib_size_alignment = 32;
331 break; 331 break;
332 case AMDGPU_HW_IP_DMA: 332 case AMDGPU_HW_IP_DMA:
333 type = AMD_IP_BLOCK_TYPE_SDMA; 333 type = AMD_IP_BLOCK_TYPE_SDMA;
334 for (i = 0; i < adev->sdma.num_instances; i++) 334 for (i = 0; i < adev->sdma.num_instances; i++)
335 if (adev->sdma.instance[i].ring.sched.ready) 335 if (adev->sdma.instance[i].ring.sched.ready)
336 ++num_rings; 336 ++num_rings;
337 ib_start_alignment = 256; 337 ib_start_alignment = 256;
338 ib_size_alignment = 4; 338 ib_size_alignment = 4;
339 break; 339 break;
340 case AMDGPU_HW_IP_UVD: 340 case AMDGPU_HW_IP_UVD:
341 type = AMD_IP_BLOCK_TYPE_UVD; 341 type = AMD_IP_BLOCK_TYPE_UVD;
342 for (i = 0; i < adev->uvd.num_uvd_inst; i++) { 342 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
343 if (adev->uvd.harvest_config & (1 << i)) 343 if (adev->uvd.harvest_config & (1 << i))
344 continue; 344 continue;
345 345
346 if (adev->uvd.inst[i].ring.sched.ready) 346 if (adev->uvd.inst[i].ring.sched.ready)
347 ++num_rings; 347 ++num_rings;
348 } 348 }
349 ib_start_alignment = 64; 349 ib_start_alignment = 64;
350 ib_size_alignment = 64; 350 ib_size_alignment = 64;
351 break; 351 break;
352 case AMDGPU_HW_IP_VCE: 352 case AMDGPU_HW_IP_VCE:
353 type = AMD_IP_BLOCK_TYPE_VCE; 353 type = AMD_IP_BLOCK_TYPE_VCE;
354 for (i = 0; i < adev->vce.num_rings; i++) 354 for (i = 0; i < adev->vce.num_rings; i++)
355 if (adev->vce.ring[i].sched.ready) 355 if (adev->vce.ring[i].sched.ready)
356 ++num_rings; 356 ++num_rings;
357 ib_start_alignment = 4; 357 ib_start_alignment = 4;
358 ib_size_alignment = 1; 358 ib_size_alignment = 1;
359 break; 359 break;
360 case AMDGPU_HW_IP_UVD_ENC: 360 case AMDGPU_HW_IP_UVD_ENC:
361 type = AMD_IP_BLOCK_TYPE_UVD; 361 type = AMD_IP_BLOCK_TYPE_UVD;
362 for (i = 0; i < adev->uvd.num_uvd_inst; i++) { 362 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
363 if (adev->uvd.harvest_config & (1 << i)) 363 if (adev->uvd.harvest_config & (1 << i))
364 continue; 364 continue;
365 365
366 for (j = 0; j < adev->uvd.num_enc_rings; j++) 366 for (j = 0; j < adev->uvd.num_enc_rings; j++)
367 if (adev->uvd.inst[i].ring_enc[j].sched.ready) 367 if (adev->uvd.inst[i].ring_enc[j].sched.ready)
368 ++num_rings; 368 ++num_rings;
369 } 369 }
370 ib_start_alignment = 64; 370 ib_start_alignment = 64;
371 ib_size_alignment = 64; 371 ib_size_alignment = 64;
372 break; 372 break;
373 case AMDGPU_HW_IP_VCN_DEC: 373 case AMDGPU_HW_IP_VCN_DEC:
374 type = AMD_IP_BLOCK_TYPE_VCN; 374 type = AMD_IP_BLOCK_TYPE_VCN;
375 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 375 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
376 if (adev->uvd.harvest_config & (1 << i)) 376 if (adev->uvd.harvest_config & (1 << i))
377 continue; 377 continue;
378 378
379 if (adev->vcn.inst[i].ring_dec.sched.ready) 379 if (adev->vcn.inst[i].ring_dec.sched.ready)
380 ++num_rings; 380 ++num_rings;
381 } 381 }
382 ib_start_alignment = 16; 382 ib_start_alignment = 16;
383 ib_size_alignment = 16; 383 ib_size_alignment = 16;
384 break; 384 break;
385 case AMDGPU_HW_IP_VCN_ENC: 385 case AMDGPU_HW_IP_VCN_ENC:
386 type = AMD_IP_BLOCK_TYPE_VCN; 386 type = AMD_IP_BLOCK_TYPE_VCN;
387 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 387 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
388 if (adev->uvd.harvest_config & (1 << i)) 388 if (adev->uvd.harvest_config & (1 << i))
389 continue; 389 continue;
390 390
391 for (j = 0; j < adev->vcn.num_enc_rings; j++) 391 for (j = 0; j < adev->vcn.num_enc_rings; j++)
392 if (adev->vcn.inst[i].ring_enc[j].sched.ready) 392 if (adev->vcn.inst[i].ring_enc[j].sched.ready)
393 ++num_rings; 393 ++num_rings;
394 } 394 }
395 ib_start_alignment = 64; 395 ib_start_alignment = 64;
396 ib_size_alignment = 1; 396 ib_size_alignment = 1;
397 break; 397 break;
398 case AMDGPU_HW_IP_VCN_JPEG: 398 case AMDGPU_HW_IP_VCN_JPEG:
399 type = AMD_IP_BLOCK_TYPE_VCN; 399 type = AMD_IP_BLOCK_TYPE_VCN;
400 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 400 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
401 if (adev->uvd.harvest_config & (1 << i)) 401 if (adev->uvd.harvest_config & (1 << i))
402 continue; 402 continue;
403 403
404 if (adev->vcn.inst[i].ring_jpeg.sched.ready) 404 if (adev->vcn.inst[i].ring_jpeg.sched.ready)
405 ++num_rings; 405 ++num_rings;
406 } 406 }
407 ib_start_alignment = 16; 407 ib_start_alignment = 16;
408 ib_size_alignment = 16; 408 ib_size_alignment = 16;
409 break; 409 break;
410 default: 410 default:
411 return -EINVAL; 411 return -EINVAL;
412 } 412 }
413 413
414 for (i = 0; i < adev->num_ip_blocks; i++) 414 for (i = 0; i < adev->num_ip_blocks; i++)
415 if (adev->ip_blocks[i].version->type == type && 415 if (adev->ip_blocks[i].version->type == type &&
416 adev->ip_blocks[i].status.valid) 416 adev->ip_blocks[i].status.valid)
417 break; 417 break;
418 418
419 if (i == adev->num_ip_blocks) 419 if (i == adev->num_ip_blocks)
420 return 0; 420 return 0;
421 421
422 num_rings = min(amdgpu_ctx_num_entities[info->query_hw_ip.type], 422 num_rings = min(amdgpu_ctx_num_entities[info->query_hw_ip.type],
423 num_rings); 423 num_rings);
424 424
425 result->hw_ip_version_major = adev->ip_blocks[i].version->major; 425 result->hw_ip_version_major = adev->ip_blocks[i].version->major;
426 result->hw_ip_version_minor = adev->ip_blocks[i].version->minor; 426 result->hw_ip_version_minor = adev->ip_blocks[i].version->minor;
427 result->capabilities_flags = 0; 427 result->capabilities_flags = 0;
428 result->available_rings = (1 << num_rings) - 1; 428 result->available_rings = (1 << num_rings) - 1;
429 result->ib_start_alignment = ib_start_alignment; 429 result->ib_start_alignment = ib_start_alignment;
430 result->ib_size_alignment = ib_size_alignment; 430 result->ib_size_alignment = ib_size_alignment;
431 return 0; 431 return 0;
432 } 432 }
433 433
434 /* 434 /*
435 * Userspace get information ioctl 435 * Userspace get information ioctl
436 */ 436 */
437 /** 437 /**
438 * amdgpu_info_ioctl - answer a device specific request. 438 * amdgpu_info_ioctl - answer a device specific request.
439 * 439 *
440 * @adev: amdgpu device pointer 440 * @adev: amdgpu device pointer
441 * @data: request object 441 * @data: request object
442 * @filp: drm filp 442 * @filp: drm filp
443 * 443 *
444 * This function is used to pass device specific parameters to the userspace 444 * This function is used to pass device specific parameters to the userspace
445 * drivers. Examples include: pci device id, pipeline parms, tiling params, 445 * drivers. Examples include: pci device id, pipeline parms, tiling params,
446 * etc. (all asics). 446 * etc. (all asics).
447 * Returns 0 on success, -EINVAL on failure. 447 * Returns 0 on success, -EINVAL on failure.
448 */ 448 */
449 static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 449 static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
450 { 450 {
451 struct amdgpu_device *adev = dev->dev_private; 451 struct amdgpu_device *adev = dev->dev_private;
452 struct drm_amdgpu_info *info = data; 452 struct drm_amdgpu_info *info = data;
453 struct amdgpu_mode_info *minfo = &adev->mode_info; 453 struct amdgpu_mode_info *minfo = &adev->mode_info;
454 void __user *out = (void __user *)(uintptr_t)info->return_pointer; 454 void __user *out = (void __user *)(uintptr_t)info->return_pointer;
455 uint32_t size = info->return_size; 455 uint32_t size = info->return_size;
456 struct drm_crtc *crtc; 456 struct drm_crtc *crtc;
457 uint32_t ui32 = 0; 457 uint32_t ui32 = 0;
458 uint64_t ui64 = 0; 458 uint64_t ui64 = 0;
459 int i, found; 459 int i, found;
460 int ui32_size = sizeof(ui32); 460 int ui32_size = sizeof(ui32);
461 461
462 if (!info->return_size || !info->return_pointer) 462 if (!info->return_size || !info->return_pointer)
463 return -EINVAL; 463 return -EINVAL;
464 464
465 switch (info->query) { 465 switch (info->query) {
466 case AMDGPU_INFO_ACCEL_WORKING: 466 case AMDGPU_INFO_ACCEL_WORKING:
467 ui32 = adev->accel_working; 467 ui32 = adev->accel_working;
468 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0; 468 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
469 case AMDGPU_INFO_CRTC_FROM_ID: 469 case AMDGPU_INFO_CRTC_FROM_ID:
470 for (i = 0, found = 0; i < adev->mode_info.num_crtc; i++) { 470 for (i = 0, found = 0; i < adev->mode_info.num_crtc; i++) {
471 crtc = (struct drm_crtc *)minfo->crtcs[i]; 471 crtc = (struct drm_crtc *)minfo->crtcs[i];
472 if (crtc && crtc->base.id == info->mode_crtc.id) { 472 if (crtc && crtc->base.id == info->mode_crtc.id) {
473 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 473 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
474 ui32 = amdgpu_crtc->crtc_id; 474 ui32 = amdgpu_crtc->crtc_id;
475 found = 1; 475 found = 1;
476 break; 476 break;
477 } 477 }
478 } 478 }
479 if (!found) { 479 if (!found) {
480 DRM_DEBUG_KMS("unknown crtc id %d\n", info->mode_crtc.id); 480 DRM_DEBUG_KMS("unknown crtc id %d\n", info->mode_crtc.id);
481 return -EINVAL; 481 return -EINVAL;
482 } 482 }
483 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0; 483 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
484 case AMDGPU_INFO_HW_IP_INFO: { 484 case AMDGPU_INFO_HW_IP_INFO: {
485 struct drm_amdgpu_info_hw_ip ip = {}; 485 struct drm_amdgpu_info_hw_ip ip = {};
486 int ret; 486 int ret;
487 487
488 ret = amdgpu_hw_ip_info(adev, info, &ip); 488 ret = amdgpu_hw_ip_info(adev, info, &ip);
489 if (ret) 489 if (ret)
490 return ret; 490 return ret;
491 491
492 ret = copy_to_user(out, &ip, min((size_t)size, sizeof(ip))); 492 ret = copy_to_user(out, &ip, min((size_t)size, sizeof(ip)));
493 return ret ? -EFAULT : 0; 493 return ret ? -EFAULT : 0;
494 } 494 }
495 case AMDGPU_INFO_HW_IP_COUNT: { 495 case AMDGPU_INFO_HW_IP_COUNT: {
496 enum amd_ip_block_type type; 496 enum amd_ip_block_type type;
497 uint32_t count = 0; 497 uint32_t count = 0;
498 498
499 switch (info->query_hw_ip.type) { 499 switch (info->query_hw_ip.type) {
500 case AMDGPU_HW_IP_GFX: 500 case AMDGPU_HW_IP_GFX:
501 type = AMD_IP_BLOCK_TYPE_GFX; 501 type = AMD_IP_BLOCK_TYPE_GFX;
502 break; 502 break;
503 case AMDGPU_HW_IP_COMPUTE: 503 case AMDGPU_HW_IP_COMPUTE:
504 type = AMD_IP_BLOCK_TYPE_GFX; 504 type = AMD_IP_BLOCK_TYPE_GFX;
505 break; 505 break;
506 case AMDGPU_HW_IP_DMA: 506 case AMDGPU_HW_IP_DMA:
507 type = AMD_IP_BLOCK_TYPE_SDMA; 507 type = AMD_IP_BLOCK_TYPE_SDMA;
508 break; 508 break;
509 case AMDGPU_HW_IP_UVD: 509 case AMDGPU_HW_IP_UVD:
510 type = AMD_IP_BLOCK_TYPE_UVD; 510 type = AMD_IP_BLOCK_TYPE_UVD;
511 break; 511 break;
512 case AMDGPU_HW_IP_VCE: 512 case AMDGPU_HW_IP_VCE:
513 type = AMD_IP_BLOCK_TYPE_VCE; 513 type = AMD_IP_BLOCK_TYPE_VCE;
514 break; 514 break;
515 case AMDGPU_HW_IP_UVD_ENC: 515 case AMDGPU_HW_IP_UVD_ENC:
516 type = AMD_IP_BLOCK_TYPE_UVD; 516 type = AMD_IP_BLOCK_TYPE_UVD;
517 break; 517 break;
518 case AMDGPU_HW_IP_VCN_DEC: 518 case AMDGPU_HW_IP_VCN_DEC:
519 case AMDGPU_HW_IP_VCN_ENC: 519 case AMDGPU_HW_IP_VCN_ENC:
520 case AMDGPU_HW_IP_VCN_JPEG: 520 case AMDGPU_HW_IP_VCN_JPEG:
521 type = AMD_IP_BLOCK_TYPE_VCN; 521 type = AMD_IP_BLOCK_TYPE_VCN;
522 break; 522 break;
523 default: 523 default:
524 return -EINVAL; 524 return -EINVAL;
525 } 525 }
526 526
527 for (i = 0; i < adev->num_ip_blocks; i++) 527 for (i = 0; i < adev->num_ip_blocks; i++)
528 if (adev->ip_blocks[i].version->type == type && 528 if (adev->ip_blocks[i].version->type == type &&
529 adev->ip_blocks[i].status.valid && 529 adev->ip_blocks[i].status.valid &&
530 count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT) 530 count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
531 count++; 531 count++;
532 532
533 return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0; 533 return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0;
534 } 534 }
535 case AMDGPU_INFO_TIMESTAMP: 535 case AMDGPU_INFO_TIMESTAMP:
536 ui64 = amdgpu_gfx_get_gpu_clock_counter(adev); 536 ui64 = amdgpu_gfx_get_gpu_clock_counter(adev);
537 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 537 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
538 case AMDGPU_INFO_FW_VERSION: { 538 case AMDGPU_INFO_FW_VERSION: {
539 struct drm_amdgpu_info_firmware fw_info; 539 struct drm_amdgpu_info_firmware fw_info;
540 int ret; 540 int ret;
541 541
542 /* We only support one instance of each IP block right now. */ 542 /* We only support one instance of each IP block right now. */
543 if (info->query_fw.ip_instance != 0) 543 if (info->query_fw.ip_instance != 0)
544 return -EINVAL; 544 return -EINVAL;
545 545
546 ret = amdgpu_firmware_info(&fw_info, &info->query_fw, adev); 546 ret = amdgpu_firmware_info(&fw_info, &info->query_fw, adev);
547 if (ret) 547 if (ret)
548 return ret; 548 return ret;
549 549
550 return copy_to_user(out, &fw_info, 550 return copy_to_user(out, &fw_info,
551 min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0; 551 min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0;
552 } 552 }
553 case AMDGPU_INFO_NUM_BYTES_MOVED: 553 case AMDGPU_INFO_NUM_BYTES_MOVED:
554 ui64 = atomic64_read(&adev->num_bytes_moved); 554 ui64 = atomic64_read(&adev->num_bytes_moved);
555 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 555 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
556 case AMDGPU_INFO_NUM_EVICTIONS: 556 case AMDGPU_INFO_NUM_EVICTIONS:
557 ui64 = atomic64_read(&adev->num_evictions); 557 ui64 = atomic64_read(&adev->num_evictions);
558 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 558 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
559 case AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS: 559 case AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS:
560 ui64 = atomic64_read(&adev->num_vram_cpu_page_faults); 560 ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
561 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 561 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
562 case AMDGPU_INFO_VRAM_USAGE: 562 case AMDGPU_INFO_VRAM_USAGE:
563 ui64 = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); 563 ui64 = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
564 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 564 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
565 case AMDGPU_INFO_VIS_VRAM_USAGE: 565 case AMDGPU_INFO_VIS_VRAM_USAGE:
566 ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); 566 ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
567 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 567 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
568 case AMDGPU_INFO_GTT_USAGE: 568 case AMDGPU_INFO_GTT_USAGE:
569 ui64 = amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]); 569 ui64 = amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
570 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 570 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
571 case AMDGPU_INFO_GDS_CONFIG: { 571 case AMDGPU_INFO_GDS_CONFIG: {
572 struct drm_amdgpu_info_gds gds_info; 572 struct drm_amdgpu_info_gds gds_info;
573 573
574 memset(&gds_info, 0, sizeof(gds_info)); 574 memset(&gds_info, 0, sizeof(gds_info));
575 gds_info.compute_partition_size = adev->gds.gds_size; 575 gds_info.compute_partition_size = adev->gds.gds_size;
576 gds_info.gds_total_size = adev->gds.gds_size; 576 gds_info.gds_total_size = adev->gds.gds_size;
577 gds_info.gws_per_compute_partition = adev->gds.gws_size; 577 gds_info.gws_per_compute_partition = adev->gds.gws_size;
578 gds_info.oa_per_compute_partition = adev->gds.oa_size; 578 gds_info.oa_per_compute_partition = adev->gds.oa_size;
579 return copy_to_user(out, &gds_info, 579 return copy_to_user(out, &gds_info,
580 min((size_t)size, sizeof(gds_info))) ? -EFAULT : 0; 580 min((size_t)size, sizeof(gds_info))) ? -EFAULT : 0;
581 } 581 }
582 case AMDGPU_INFO_VRAM_GTT: { 582 case AMDGPU_INFO_VRAM_GTT: {
583 struct drm_amdgpu_info_vram_gtt vram_gtt; 583 struct drm_amdgpu_info_vram_gtt vram_gtt;
584 584
585 vram_gtt.vram_size = adev->gmc.real_vram_size - 585 vram_gtt.vram_size = adev->gmc.real_vram_size -
586 atomic64_read(&adev->vram_pin_size); 586 atomic64_read(&adev->vram_pin_size);
587 vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size - 587 vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size -
588 atomic64_read(&adev->visible_pin_size); 588 atomic64_read(&adev->visible_pin_size);
589 vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size; 589 vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
590 vram_gtt.gtt_size *= PAGE_SIZE; 590 vram_gtt.gtt_size *= PAGE_SIZE;
591 vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size); 591 vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size);
592 return copy_to_user(out, &vram_gtt, 592 return copy_to_user(out, &vram_gtt,
593 min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0; 593 min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
594 } 594 }
595 case AMDGPU_INFO_MEMORY: { 595 case AMDGPU_INFO_MEMORY: {
596 struct drm_amdgpu_memory_info mem; 596 struct drm_amdgpu_memory_info mem;
597 597
598 memset(&mem, 0, sizeof(mem)); 598 memset(&mem, 0, sizeof(mem));
599 mem.vram.total_heap_size = adev->gmc.real_vram_size; 599 mem.vram.total_heap_size = adev->gmc.real_vram_size;
600 mem.vram.usable_heap_size = adev->gmc.real_vram_size - 600 mem.vram.usable_heap_size = adev->gmc.real_vram_size -
601 atomic64_read(&adev->vram_pin_size); 601 atomic64_read(&adev->vram_pin_size);
602 mem.vram.heap_usage = 602 mem.vram.heap_usage =
603 amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); 603 amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
604 mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4; 604 mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
605 605
606 mem.cpu_accessible_vram.total_heap_size = 606 mem.cpu_accessible_vram.total_heap_size =
607 adev->gmc.visible_vram_size; 607 adev->gmc.visible_vram_size;
608 mem.cpu_accessible_vram.usable_heap_size = adev->gmc.visible_vram_size - 608 mem.cpu_accessible_vram.usable_heap_size = adev->gmc.visible_vram_size -
609 atomic64_read(&adev->visible_pin_size); 609 atomic64_read(&adev->visible_pin_size);
610 mem.cpu_accessible_vram.heap_usage = 610 mem.cpu_accessible_vram.heap_usage =
611 amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); 611 amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
612 mem.cpu_accessible_vram.max_allocation = 612 mem.cpu_accessible_vram.max_allocation =
613 mem.cpu_accessible_vram.usable_heap_size * 3 / 4; 613 mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
614 614
615 mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size; 615 mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size;
616 mem.gtt.total_heap_size *= PAGE_SIZE; 616 mem.gtt.total_heap_size *= PAGE_SIZE;
617 mem.gtt.usable_heap_size = mem.gtt.total_heap_size - 617 mem.gtt.usable_heap_size = mem.gtt.total_heap_size -
618 atomic64_read(&adev->gart_pin_size); 618 atomic64_read(&adev->gart_pin_size);
619 mem.gtt.heap_usage = 619 mem.gtt.heap_usage =
620 amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]); 620 amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
621 mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4; 621 mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
622 622
623 return copy_to_user(out, &mem, 623 return copy_to_user(out, &mem,
624 min((size_t)size, sizeof(mem))) 624 min((size_t)size, sizeof(mem)))
625 ? -EFAULT : 0; 625 ? -EFAULT : 0;
626 } 626 }
627 case AMDGPU_INFO_READ_MMR_REG: { 627 case AMDGPU_INFO_READ_MMR_REG: {
628 unsigned n, alloc_size; 628 unsigned n, alloc_size;
629 uint32_t *regs; 629 uint32_t *regs;
630 unsigned se_num = (info->read_mmr_reg.instance >> 630 unsigned se_num = (info->read_mmr_reg.instance >>
631 AMDGPU_INFO_MMR_SE_INDEX_SHIFT) & 631 AMDGPU_INFO_MMR_SE_INDEX_SHIFT) &
632 AMDGPU_INFO_MMR_SE_INDEX_MASK; 632 AMDGPU_INFO_MMR_SE_INDEX_MASK;
633 unsigned sh_num = (info->read_mmr_reg.instance >> 633 unsigned sh_num = (info->read_mmr_reg.instance >>
634 AMDGPU_INFO_MMR_SH_INDEX_SHIFT) & 634 AMDGPU_INFO_MMR_SH_INDEX_SHIFT) &
635 AMDGPU_INFO_MMR_SH_INDEX_MASK; 635 AMDGPU_INFO_MMR_SH_INDEX_MASK;
636 636
637 /* set full masks if the userspace set all bits 637 /* set full masks if the userspace set all bits
638 * in the bitfields */ 638 * in the bitfields */
639 if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK) 639 if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
640 se_num = 0xffffffff; 640 se_num = 0xffffffff;
641 if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) 641 if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
642 sh_num = 0xffffffff; 642 sh_num = 0xffffffff;
643 643
644 if (info->read_mmr_reg.count > 128) 644 if (info->read_mmr_reg.count > 128)
645 return -EINVAL; 645 return -EINVAL;
646 646
647 regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL); 647 regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
648 if (!regs) 648 if (!regs)
649 return -ENOMEM; 649 return -ENOMEM;
650 alloc_size = info->read_mmr_reg.count * sizeof(*regs); 650 alloc_size = info->read_mmr_reg.count * sizeof(*regs);
651 651
652 amdgpu_gfx_off_ctrl(adev, false); 652 amdgpu_gfx_off_ctrl(adev, false);
653 for (i = 0; i < info->read_mmr_reg.count; i++) { 653 for (i = 0; i < info->read_mmr_reg.count; i++) {
654 if (amdgpu_asic_read_register(adev, se_num, sh_num, 654 if (amdgpu_asic_read_register(adev, se_num, sh_num,
655 info->read_mmr_reg.dword_offset + i, 655 info->read_mmr_reg.dword_offset + i,
656 &regs[i])) { 656 &regs[i])) {
657 DRM_DEBUG_KMS("unallowed offset %#x\n", 657 DRM_DEBUG_KMS("unallowed offset %#x\n",
658 info->read_mmr_reg.dword_offset + i); 658 info->read_mmr_reg.dword_offset + i);
659 kfree(regs); 659 kfree(regs);
660 amdgpu_gfx_off_ctrl(adev, true); 660 amdgpu_gfx_off_ctrl(adev, true);
661 return -EFAULT; 661 return -EFAULT;
662 } 662 }
663 } 663 }
664 amdgpu_gfx_off_ctrl(adev, true); 664 amdgpu_gfx_off_ctrl(adev, true);
665 n = copy_to_user(out, regs, min(size, alloc_size)); 665 n = copy_to_user(out, regs, min(size, alloc_size));
666 kfree(regs); 666 kfree(regs);
667 return n ? -EFAULT : 0; 667 return n ? -EFAULT : 0;
668 } 668 }
669 case AMDGPU_INFO_DEV_INFO: { 669 case AMDGPU_INFO_DEV_INFO: {
670 struct drm_amdgpu_info_device dev_info = {}; 670 struct drm_amdgpu_info_device dev_info;
671 uint64_t vm_size; 671 uint64_t vm_size;
672 672
673 memset(&dev_info, 0, sizeof(dev_info));
673 dev_info.device_id = dev->pdev->device; 674 dev_info.device_id = dev->pdev->device;
674 dev_info.chip_rev = adev->rev_id; 675 dev_info.chip_rev = adev->rev_id;
675 dev_info.external_rev = adev->external_rev_id; 676 dev_info.external_rev = adev->external_rev_id;
676 dev_info.pci_rev = dev->pdev->revision; 677 dev_info.pci_rev = dev->pdev->revision;
677 dev_info.family = adev->family; 678 dev_info.family = adev->family;
678 dev_info.num_shader_engines = adev->gfx.config.max_shader_engines; 679 dev_info.num_shader_engines = adev->gfx.config.max_shader_engines;
679 dev_info.num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se; 680 dev_info.num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
680 /* return all clocks in KHz */ 681 /* return all clocks in KHz */
681 dev_info.gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10; 682 dev_info.gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10;
682 if (adev->pm.dpm_enabled) { 683 if (adev->pm.dpm_enabled) {
683 dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10; 684 dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
684 dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10; 685 dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
685 } else if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) && 686 } else if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
686 adev->virt.ops->get_pp_clk) { 687 adev->virt.ops->get_pp_clk) {
687 dev_info.max_engine_clock = amdgpu_virt_get_sclk(adev, false) * 10; 688 dev_info.max_engine_clock = amdgpu_virt_get_sclk(adev, false) * 10;
688 dev_info.max_memory_clock = amdgpu_virt_get_mclk(adev, false) * 10; 689 dev_info.max_memory_clock = amdgpu_virt_get_mclk(adev, false) * 10;
689 } else { 690 } else {
690 dev_info.max_engine_clock = adev->clock.default_sclk * 10; 691 dev_info.max_engine_clock = adev->clock.default_sclk * 10;
691 dev_info.max_memory_clock = adev->clock.default_mclk * 10; 692 dev_info.max_memory_clock = adev->clock.default_mclk * 10;
692 } 693 }
693 dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask; 694 dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
694 dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se * 695 dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se *
695 adev->gfx.config.max_shader_engines; 696 adev->gfx.config.max_shader_engines;
696 dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts; 697 dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
697 dev_info._pad = 0; 698 dev_info._pad = 0;
698 dev_info.ids_flags = 0; 699 dev_info.ids_flags = 0;
699 if (adev->flags & AMD_IS_APU) 700 if (adev->flags & AMD_IS_APU)
700 dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION; 701 dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
701 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) 702 if (amdgpu_mcbp || amdgpu_sriov_vf(adev))
702 dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION; 703 dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
703 704
704 vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; 705 vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
705 vm_size -= AMDGPU_VA_RESERVED_SIZE; 706 vm_size -= AMDGPU_VA_RESERVED_SIZE;
706 707
707 /* Older VCE FW versions are buggy and can handle only 40bits */ 708 /* Older VCE FW versions are buggy and can handle only 40bits */
708 if (adev->vce.fw_version && 709 if (adev->vce.fw_version &&
709 adev->vce.fw_version < AMDGPU_VCE_FW_53_45) 710 adev->vce.fw_version < AMDGPU_VCE_FW_53_45)
710 vm_size = min(vm_size, 1ULL << 40); 711 vm_size = min(vm_size, 1ULL << 40);
711 712
712 dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE; 713 dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
713 dev_info.virtual_address_max = 714 dev_info.virtual_address_max =
714 min(vm_size, AMDGPU_GMC_HOLE_START); 715 min(vm_size, AMDGPU_GMC_HOLE_START);
715 716
716 if (vm_size > AMDGPU_GMC_HOLE_START) { 717 if (vm_size > AMDGPU_GMC_HOLE_START) {
717 dev_info.high_va_offset = AMDGPU_GMC_HOLE_END; 718 dev_info.high_va_offset = AMDGPU_GMC_HOLE_END;
718 dev_info.high_va_max = AMDGPU_GMC_HOLE_END | vm_size; 719 dev_info.high_va_max = AMDGPU_GMC_HOLE_END | vm_size;
719 } 720 }
720 dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE); 721 dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
721 dev_info.pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE; 722 dev_info.pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
722 dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE; 723 dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE;
723 dev_info.cu_active_number = adev->gfx.cu_info.number; 724 dev_info.cu_active_number = adev->gfx.cu_info.number;
724 dev_info.cu_ao_mask = adev->gfx.cu_info.ao_cu_mask; 725 dev_info.cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
725 dev_info.ce_ram_size = adev->gfx.ce_ram_size; 726 dev_info.ce_ram_size = adev->gfx.ce_ram_size;
726 memcpy(&dev_info.cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0], 727 memcpy(&dev_info.cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0],
727 sizeof(adev->gfx.cu_info.ao_cu_bitmap)); 728 sizeof(adev->gfx.cu_info.ao_cu_bitmap));
728 memcpy(&dev_info.cu_bitmap[0], &adev->gfx.cu_info.bitmap[0], 729 memcpy(&dev_info.cu_bitmap[0], &adev->gfx.cu_info.bitmap[0],
729 sizeof(adev->gfx.cu_info.bitmap)); 730 sizeof(adev->gfx.cu_info.bitmap));
730 dev_info.vram_type = adev->gmc.vram_type; 731 dev_info.vram_type = adev->gmc.vram_type;
731 dev_info.vram_bit_width = adev->gmc.vram_width; 732 dev_info.vram_bit_width = adev->gmc.vram_width;
732 dev_info.vce_harvest_config = adev->vce.harvest_config; 733 dev_info.vce_harvest_config = adev->vce.harvest_config;
733 dev_info.gc_double_offchip_lds_buf = 734 dev_info.gc_double_offchip_lds_buf =
734 adev->gfx.config.double_offchip_lds_buf; 735 adev->gfx.config.double_offchip_lds_buf;
735 736
736 if (amdgpu_ngg) { 737 if (amdgpu_ngg) {
737 dev_info.prim_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PRIM].gpu_addr; 738 dev_info.prim_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PRIM].gpu_addr;
738 dev_info.prim_buf_size = adev->gfx.ngg.buf[NGG_PRIM].size; 739 dev_info.prim_buf_size = adev->gfx.ngg.buf[NGG_PRIM].size;
739 dev_info.pos_buf_gpu_addr = adev->gfx.ngg.buf[NGG_POS].gpu_addr; 740 dev_info.pos_buf_gpu_addr = adev->gfx.ngg.buf[NGG_POS].gpu_addr;
740 dev_info.pos_buf_size = adev->gfx.ngg.buf[NGG_POS].size; 741 dev_info.pos_buf_size = adev->gfx.ngg.buf[NGG_POS].size;
741 dev_info.cntl_sb_buf_gpu_addr = adev->gfx.ngg.buf[NGG_CNTL].gpu_addr; 742 dev_info.cntl_sb_buf_gpu_addr = adev->gfx.ngg.buf[NGG_CNTL].gpu_addr;
742 dev_info.cntl_sb_buf_size = adev->gfx.ngg.buf[NGG_CNTL].size; 743 dev_info.cntl_sb_buf_size = adev->gfx.ngg.buf[NGG_CNTL].size;
743 dev_info.param_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PARAM].gpu_addr; 744 dev_info.param_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PARAM].gpu_addr;
744 dev_info.param_buf_size = adev->gfx.ngg.buf[NGG_PARAM].size; 745 dev_info.param_buf_size = adev->gfx.ngg.buf[NGG_PARAM].size;
745 } 746 }
746 dev_info.wave_front_size = adev->gfx.cu_info.wave_front_size; 747 dev_info.wave_front_size = adev->gfx.cu_info.wave_front_size;
747 dev_info.num_shader_visible_vgprs = adev->gfx.config.max_gprs; 748 dev_info.num_shader_visible_vgprs = adev->gfx.config.max_gprs;
748 dev_info.num_cu_per_sh = adev->gfx.config.max_cu_per_sh; 749 dev_info.num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
749 dev_info.num_tcc_blocks = adev->gfx.config.max_texture_channel_caches; 750 dev_info.num_tcc_blocks = adev->gfx.config.max_texture_channel_caches;
750 dev_info.gs_vgt_table_depth = adev->gfx.config.gs_vgt_table_depth; 751 dev_info.gs_vgt_table_depth = adev->gfx.config.gs_vgt_table_depth;
751 dev_info.gs_prim_buffer_depth = adev->gfx.config.gs_prim_buffer_depth; 752 dev_info.gs_prim_buffer_depth = adev->gfx.config.gs_prim_buffer_depth;
752 dev_info.max_gs_waves_per_vgt = adev->gfx.config.max_gs_threads; 753 dev_info.max_gs_waves_per_vgt = adev->gfx.config.max_gs_threads;
753 754
754 if (adev->family >= AMDGPU_FAMILY_NV) 755 if (adev->family >= AMDGPU_FAMILY_NV)
755 dev_info.pa_sc_tile_steering_override = 756 dev_info.pa_sc_tile_steering_override =
756 adev->gfx.config.pa_sc_tile_steering_override; 757 adev->gfx.config.pa_sc_tile_steering_override;
757 758
758 dev_info.tcc_disabled_mask = adev->gfx.config.tcc_disabled_mask; 759 dev_info.tcc_disabled_mask = adev->gfx.config.tcc_disabled_mask;
759 760
760 return copy_to_user(out, &dev_info, 761 return copy_to_user(out, &dev_info,
761 min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0; 762 min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
762 } 763 }
763 case AMDGPU_INFO_VCE_CLOCK_TABLE: { 764 case AMDGPU_INFO_VCE_CLOCK_TABLE: {
764 unsigned i; 765 unsigned i;
765 struct drm_amdgpu_info_vce_clock_table vce_clk_table = {}; 766 struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
766 struct amd_vce_state *vce_state; 767 struct amd_vce_state *vce_state;
767 768
768 for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) { 769 for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) {
769 vce_state = amdgpu_dpm_get_vce_clock_state(adev, i); 770 vce_state = amdgpu_dpm_get_vce_clock_state(adev, i);
770 if (vce_state) { 771 if (vce_state) {
771 vce_clk_table.entries[i].sclk = vce_state->sclk; 772 vce_clk_table.entries[i].sclk = vce_state->sclk;
772 vce_clk_table.entries[i].mclk = vce_state->mclk; 773 vce_clk_table.entries[i].mclk = vce_state->mclk;
773 vce_clk_table.entries[i].eclk = vce_state->evclk; 774 vce_clk_table.entries[i].eclk = vce_state->evclk;
774 vce_clk_table.num_valid_entries++; 775 vce_clk_table.num_valid_entries++;
775 } 776 }
776 } 777 }
777 778
778 return copy_to_user(out, &vce_clk_table, 779 return copy_to_user(out, &vce_clk_table,
779 min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0; 780 min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0;
780 } 781 }
781 case AMDGPU_INFO_VBIOS: { 782 case AMDGPU_INFO_VBIOS: {
782 uint32_t bios_size = adev->bios_size; 783 uint32_t bios_size = adev->bios_size;
783 784
784 switch (info->vbios_info.type) { 785 switch (info->vbios_info.type) {
785 case AMDGPU_INFO_VBIOS_SIZE: 786 case AMDGPU_INFO_VBIOS_SIZE:
786 return copy_to_user(out, &bios_size, 787 return copy_to_user(out, &bios_size,
787 min((size_t)size, sizeof(bios_size))) 788 min((size_t)size, sizeof(bios_size)))
788 ? -EFAULT : 0; 789 ? -EFAULT : 0;
789 case AMDGPU_INFO_VBIOS_IMAGE: { 790 case AMDGPU_INFO_VBIOS_IMAGE: {
790 uint8_t *bios; 791 uint8_t *bios;
791 uint32_t bios_offset = info->vbios_info.offset; 792 uint32_t bios_offset = info->vbios_info.offset;
792 793
793 if (bios_offset >= bios_size) 794 if (bios_offset >= bios_size)
794 return -EINVAL; 795 return -EINVAL;
795 796
796 bios = adev->bios + bios_offset; 797 bios = adev->bios + bios_offset;
797 return copy_to_user(out, bios, 798 return copy_to_user(out, bios,
798 min((size_t)size, (size_t)(bios_size - bios_offset))) 799 min((size_t)size, (size_t)(bios_size - bios_offset)))
799 ? -EFAULT : 0; 800 ? -EFAULT : 0;
800 } 801 }
801 default: 802 default:
802 DRM_DEBUG_KMS("Invalid request %d\n", 803 DRM_DEBUG_KMS("Invalid request %d\n",
803 info->vbios_info.type); 804 info->vbios_info.type);
804 return -EINVAL; 805 return -EINVAL;
805 } 806 }
806 } 807 }
807 case AMDGPU_INFO_NUM_HANDLES: { 808 case AMDGPU_INFO_NUM_HANDLES: {
808 struct drm_amdgpu_info_num_handles handle; 809 struct drm_amdgpu_info_num_handles handle;
809 810
810 switch (info->query_hw_ip.type) { 811 switch (info->query_hw_ip.type) {
811 case AMDGPU_HW_IP_UVD: 812 case AMDGPU_HW_IP_UVD:
812 /* Starting Polaris, we support unlimited UVD handles */ 813 /* Starting Polaris, we support unlimited UVD handles */
813 if (adev->asic_type < CHIP_POLARIS10) { 814 if (adev->asic_type < CHIP_POLARIS10) {
814 handle.uvd_max_handles = adev->uvd.max_handles; 815 handle.uvd_max_handles = adev->uvd.max_handles;
815 handle.uvd_used_handles = amdgpu_uvd_used_handles(adev); 816 handle.uvd_used_handles = amdgpu_uvd_used_handles(adev);
816 817
817 return copy_to_user(out, &handle, 818 return copy_to_user(out, &handle,
818 min((size_t)size, sizeof(handle))) ? -EFAULT : 0; 819 min((size_t)size, sizeof(handle))) ? -EFAULT : 0;
819 } else { 820 } else {
820 return -ENODATA; 821 return -ENODATA;
821 } 822 }
822 823
823 break; 824 break;
824 default: 825 default:
825 return -EINVAL; 826 return -EINVAL;
826 } 827 }
827 } 828 }
828 case AMDGPU_INFO_SENSOR: { 829 case AMDGPU_INFO_SENSOR: {
829 if (!adev->pm.dpm_enabled) 830 if (!adev->pm.dpm_enabled)
830 return -ENOENT; 831 return -ENOENT;
831 832
832 switch (info->sensor_info.type) { 833 switch (info->sensor_info.type) {
833 case AMDGPU_INFO_SENSOR_GFX_SCLK: 834 case AMDGPU_INFO_SENSOR_GFX_SCLK:
834 /* get sclk in Mhz */ 835 /* get sclk in Mhz */
835 if (amdgpu_dpm_read_sensor(adev, 836 if (amdgpu_dpm_read_sensor(adev,
836 AMDGPU_PP_SENSOR_GFX_SCLK, 837 AMDGPU_PP_SENSOR_GFX_SCLK,
837 (void *)&ui32, &ui32_size)) { 838 (void *)&ui32, &ui32_size)) {
838 return -EINVAL; 839 return -EINVAL;
839 } 840 }
840 ui32 /= 100; 841 ui32 /= 100;
841 break; 842 break;
842 case AMDGPU_INFO_SENSOR_GFX_MCLK: 843 case AMDGPU_INFO_SENSOR_GFX_MCLK:
843 /* get mclk in Mhz */ 844 /* get mclk in Mhz */
844 if (amdgpu_dpm_read_sensor(adev, 845 if (amdgpu_dpm_read_sensor(adev,
845 AMDGPU_PP_SENSOR_GFX_MCLK, 846 AMDGPU_PP_SENSOR_GFX_MCLK,
846 (void *)&ui32, &ui32_size)) { 847 (void *)&ui32, &ui32_size)) {
847 return -EINVAL; 848 return -EINVAL;
848 } 849 }
849 ui32 /= 100; 850 ui32 /= 100;
850 break; 851 break;
851 case AMDGPU_INFO_SENSOR_GPU_TEMP: 852 case AMDGPU_INFO_SENSOR_GPU_TEMP:
852 /* get temperature in millidegrees C */ 853 /* get temperature in millidegrees C */
853 if (amdgpu_dpm_read_sensor(adev, 854 if (amdgpu_dpm_read_sensor(adev,
854 AMDGPU_PP_SENSOR_GPU_TEMP, 855 AMDGPU_PP_SENSOR_GPU_TEMP,
855 (void *)&ui32, &ui32_size)) { 856 (void *)&ui32, &ui32_size)) {
856 return -EINVAL; 857 return -EINVAL;
857 } 858 }
858 break; 859 break;
859 case AMDGPU_INFO_SENSOR_GPU_LOAD: 860 case AMDGPU_INFO_SENSOR_GPU_LOAD:
860 /* get GPU load */ 861 /* get GPU load */
861 if (amdgpu_dpm_read_sensor(adev, 862 if (amdgpu_dpm_read_sensor(adev,
862 AMDGPU_PP_SENSOR_GPU_LOAD, 863 AMDGPU_PP_SENSOR_GPU_LOAD,
863 (void *)&ui32, &ui32_size)) { 864 (void *)&ui32, &ui32_size)) {
864 return -EINVAL; 865 return -EINVAL;
865 } 866 }
866 break; 867 break;
867 case AMDGPU_INFO_SENSOR_GPU_AVG_POWER: 868 case AMDGPU_INFO_SENSOR_GPU_AVG_POWER:
868 /* get average GPU power */ 869 /* get average GPU power */
869 if (amdgpu_dpm_read_sensor(adev, 870 if (amdgpu_dpm_read_sensor(adev,
870 AMDGPU_PP_SENSOR_GPU_POWER, 871 AMDGPU_PP_SENSOR_GPU_POWER,
871 (void *)&ui32, &ui32_size)) { 872 (void *)&ui32, &ui32_size)) {
872 return -EINVAL; 873 return -EINVAL;
873 } 874 }
874 ui32 >>= 8; 875 ui32 >>= 8;
875 break; 876 break;
876 case AMDGPU_INFO_SENSOR_VDDNB: 877 case AMDGPU_INFO_SENSOR_VDDNB:
877 /* get VDDNB in millivolts */ 878 /* get VDDNB in millivolts */
878 if (amdgpu_dpm_read_sensor(adev, 879 if (amdgpu_dpm_read_sensor(adev,
879 AMDGPU_PP_SENSOR_VDDNB, 880 AMDGPU_PP_SENSOR_VDDNB,
880 (void *)&ui32, &ui32_size)) { 881 (void *)&ui32, &ui32_size)) {
881 return -EINVAL; 882 return -EINVAL;
882 } 883 }
883 break; 884 break;
884 case AMDGPU_INFO_SENSOR_VDDGFX: 885 case AMDGPU_INFO_SENSOR_VDDGFX:
885 /* get VDDGFX in millivolts */ 886 /* get VDDGFX in millivolts */
886 if (amdgpu_dpm_read_sensor(adev, 887 if (amdgpu_dpm_read_sensor(adev,
887 AMDGPU_PP_SENSOR_VDDGFX, 888 AMDGPU_PP_SENSOR_VDDGFX,
888 (void *)&ui32, &ui32_size)) { 889 (void *)&ui32, &ui32_size)) {
889 return -EINVAL; 890 return -EINVAL;
890 } 891 }
891 break; 892 break;
892 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK: 893 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK:
893 /* get stable pstate sclk in Mhz */ 894 /* get stable pstate sclk in Mhz */
894 if (amdgpu_dpm_read_sensor(adev, 895 if (amdgpu_dpm_read_sensor(adev,
895 AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, 896 AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK,
896 (void *)&ui32, &ui32_size)) { 897 (void *)&ui32, &ui32_size)) {
897 return -EINVAL; 898 return -EINVAL;
898 } 899 }
899 ui32 /= 100; 900 ui32 /= 100;
900 break; 901 break;
901 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK: 902 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK:
902 /* get stable pstate mclk in Mhz */ 903 /* get stable pstate mclk in Mhz */
903 if (amdgpu_dpm_read_sensor(adev, 904 if (amdgpu_dpm_read_sensor(adev,
904 AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, 905 AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK,
905 (void *)&ui32, &ui32_size)) { 906 (void *)&ui32, &ui32_size)) {
906 return -EINVAL; 907 return -EINVAL;
907 } 908 }
908 ui32 /= 100; 909 ui32 /= 100;
909 break; 910 break;
910 default: 911 default:
911 DRM_DEBUG_KMS("Invalid request %d\n", 912 DRM_DEBUG_KMS("Invalid request %d\n",
912 info->sensor_info.type); 913 info->sensor_info.type);
913 return -EINVAL; 914 return -EINVAL;
914 } 915 }
915 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0; 916 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
916 } 917 }
917 case AMDGPU_INFO_VRAM_LOST_COUNTER: 918 case AMDGPU_INFO_VRAM_LOST_COUNTER:
918 ui32 = atomic_read(&adev->vram_lost_counter); 919 ui32 = atomic_read(&adev->vram_lost_counter);
919 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0; 920 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
920 case AMDGPU_INFO_RAS_ENABLED_FEATURES: { 921 case AMDGPU_INFO_RAS_ENABLED_FEATURES: {
921 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 922 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
922 uint64_t ras_mask; 923 uint64_t ras_mask;
923 924
924 if (!ras) 925 if (!ras)
925 return -EINVAL; 926 return -EINVAL;
926 ras_mask = (uint64_t)ras->supported << 32 | ras->features; 927 ras_mask = (uint64_t)ras->supported << 32 | ras->features;
927 928
928 return copy_to_user(out, &ras_mask, 929 return copy_to_user(out, &ras_mask,
929 min_t(u64, size, sizeof(ras_mask))) ? 930 min_t(u64, size, sizeof(ras_mask))) ?
930 -EFAULT : 0; 931 -EFAULT : 0;
931 } 932 }
932 default: 933 default:
933 DRM_DEBUG_KMS("Invalid request %d\n", info->query); 934 DRM_DEBUG_KMS("Invalid request %d\n", info->query);
934 return -EINVAL; 935 return -EINVAL;
935 } 936 }
936 return 0; 937 return 0;
937 } 938 }
938 939
939 940
940 /* 941 /*
941 * Outdated mess for old drm with Xorg being in charge (void function now). 942 * Outdated mess for old drm with Xorg being in charge (void function now).
942 */ 943 */
943 /** 944 /**
944 * amdgpu_driver_lastclose_kms - drm callback for last close 945 * amdgpu_driver_lastclose_kms - drm callback for last close
945 * 946 *
946 * @dev: drm dev pointer 947 * @dev: drm dev pointer
947 * 948 *
948 * Switch vga_switcheroo state after last close (all asics). 949 * Switch vga_switcheroo state after last close (all asics).
949 */ 950 */
950 void amdgpu_driver_lastclose_kms(struct drm_device *dev) 951 void amdgpu_driver_lastclose_kms(struct drm_device *dev)
951 { 952 {
952 drm_fb_helper_lastclose(dev); 953 drm_fb_helper_lastclose(dev);
953 vga_switcheroo_process_delayed_switch(); 954 vga_switcheroo_process_delayed_switch();
954 } 955 }
955 956
956 /** 957 /**
957 * amdgpu_driver_open_kms - drm callback for open 958 * amdgpu_driver_open_kms - drm callback for open
958 * 959 *
959 * @dev: drm dev pointer 960 * @dev: drm dev pointer
960 * @file_priv: drm file 961 * @file_priv: drm file
961 * 962 *
962 * On device open, init vm on cayman+ (all asics). 963 * On device open, init vm on cayman+ (all asics).
963 * Returns 0 on success, error on failure. 964 * Returns 0 on success, error on failure.
964 */ 965 */
965 int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) 966 int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
966 { 967 {
967 struct amdgpu_device *adev = dev->dev_private; 968 struct amdgpu_device *adev = dev->dev_private;
968 struct amdgpu_fpriv *fpriv; 969 struct amdgpu_fpriv *fpriv;
969 int r, pasid; 970 int r, pasid;
970 971
971 /* Ensure IB tests are run on ring */ 972 /* Ensure IB tests are run on ring */
972 flush_delayed_work(&adev->delayed_init_work); 973 flush_delayed_work(&adev->delayed_init_work);
973 974
974 file_priv->driver_priv = NULL; 975 file_priv->driver_priv = NULL;
975 976
976 r = pm_runtime_get_sync(dev->dev); 977 r = pm_runtime_get_sync(dev->dev);
977 if (r < 0) 978 if (r < 0)
978 return r; 979 return r;
979 980
980 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); 981 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
981 if (unlikely(!fpriv)) { 982 if (unlikely(!fpriv)) {
982 r = -ENOMEM; 983 r = -ENOMEM;
983 goto out_suspend; 984 goto out_suspend;
984 } 985 }
985 986
986 pasid = amdgpu_pasid_alloc(16); 987 pasid = amdgpu_pasid_alloc(16);
987 if (pasid < 0) { 988 if (pasid < 0) {
988 dev_warn(adev->dev, "No more PASIDs available!"); 989 dev_warn(adev->dev, "No more PASIDs available!");
989 pasid = 0; 990 pasid = 0;
990 } 991 }
991 r = amdgpu_vm_init(adev, &fpriv->vm, AMDGPU_VM_CONTEXT_GFX, pasid); 992 r = amdgpu_vm_init(adev, &fpriv->vm, AMDGPU_VM_CONTEXT_GFX, pasid);
992 if (r) 993 if (r)
993 goto error_pasid; 994 goto error_pasid;
994 995
995 fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL); 996 fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL);
996 if (!fpriv->prt_va) { 997 if (!fpriv->prt_va) {
997 r = -ENOMEM; 998 r = -ENOMEM;
998 goto error_vm; 999 goto error_vm;
999 } 1000 }
1000 1001
1001 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) { 1002 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
1002 uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK; 1003 uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
1003 1004
1004 r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj, 1005 r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
1005 &fpriv->csa_va, csa_addr, AMDGPU_CSA_SIZE); 1006 &fpriv->csa_va, csa_addr, AMDGPU_CSA_SIZE);
1006 if (r) 1007 if (r)
1007 goto error_vm; 1008 goto error_vm;
1008 } 1009 }
1009 1010
1010 mutex_init(&fpriv->bo_list_lock); 1011 mutex_init(&fpriv->bo_list_lock);
1011 idr_init(&fpriv->bo_list_handles); 1012 idr_init(&fpriv->bo_list_handles);
1012 1013
1013 amdgpu_ctx_mgr_init(&fpriv->ctx_mgr); 1014 amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
1014 1015
1015 file_priv->driver_priv = fpriv; 1016 file_priv->driver_priv = fpriv;
1016 goto out_suspend; 1017 goto out_suspend;
1017 1018
1018 error_vm: 1019 error_vm:
1019 amdgpu_vm_fini(adev, &fpriv->vm); 1020 amdgpu_vm_fini(adev, &fpriv->vm);
1020 1021
1021 error_pasid: 1022 error_pasid:
1022 if (pasid) 1023 if (pasid)
1023 amdgpu_pasid_free(pasid); 1024 amdgpu_pasid_free(pasid);
1024 1025
1025 kfree(fpriv); 1026 kfree(fpriv);
1026 1027
1027 out_suspend: 1028 out_suspend:
1028 pm_runtime_mark_last_busy(dev->dev); 1029 pm_runtime_mark_last_busy(dev->dev);
1029 pm_runtime_put_autosuspend(dev->dev); 1030 pm_runtime_put_autosuspend(dev->dev);
1030 1031
1031 return r; 1032 return r;
1032 } 1033 }
1033 1034
1034 /** 1035 /**
1035 * amdgpu_driver_postclose_kms - drm callback for post close 1036 * amdgpu_driver_postclose_kms - drm callback for post close
1036 * 1037 *
1037 * @dev: drm dev pointer 1038 * @dev: drm dev pointer
1038 * @file_priv: drm file 1039 * @file_priv: drm file
1039 * 1040 *
1040 * On device post close, tear down vm on cayman+ (all asics). 1041 * On device post close, tear down vm on cayman+ (all asics).
1041 */ 1042 */
1042 void amdgpu_driver_postclose_kms(struct drm_device *dev, 1043 void amdgpu_driver_postclose_kms(struct drm_device *dev,
1043 struct drm_file *file_priv) 1044 struct drm_file *file_priv)
1044 { 1045 {
1045 struct amdgpu_device *adev = dev->dev_private; 1046 struct amdgpu_device *adev = dev->dev_private;
1046 struct amdgpu_fpriv *fpriv = file_priv->driver_priv; 1047 struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
1047 struct amdgpu_bo_list *list; 1048 struct amdgpu_bo_list *list;
1048 struct amdgpu_bo *pd; 1049 struct amdgpu_bo *pd;
1049 unsigned int pasid; 1050 unsigned int pasid;
1050 int handle; 1051 int handle;
1051 1052
1052 if (!fpriv) 1053 if (!fpriv)
1053 return; 1054 return;
1054 1055
1055 pm_runtime_get_sync(dev->dev); 1056 pm_runtime_get_sync(dev->dev);
1056 1057
1057 if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD) != NULL) 1058 if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD) != NULL)
1058 amdgpu_uvd_free_handles(adev, file_priv); 1059 amdgpu_uvd_free_handles(adev, file_priv);
1059 if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE) != NULL) 1060 if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE) != NULL)
1060 amdgpu_vce_free_handles(adev, file_priv); 1061 amdgpu_vce_free_handles(adev, file_priv);
1061 1062
1062 amdgpu_vm_bo_rmv(adev, fpriv->prt_va); 1063 amdgpu_vm_bo_rmv(adev, fpriv->prt_va);
1063 1064
1064 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) { 1065 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
1065 /* TODO: how to handle reserve failure */ 1066 /* TODO: how to handle reserve failure */
1066 BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true)); 1067 BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
1067 amdgpu_vm_bo_rmv(adev, fpriv->csa_va); 1068 amdgpu_vm_bo_rmv(adev, fpriv->csa_va);
1068 fpriv->csa_va = NULL; 1069 fpriv->csa_va = NULL;
1069 amdgpu_bo_unreserve(adev->virt.csa_obj); 1070 amdgpu_bo_unreserve(adev->virt.csa_obj);
1070 } 1071 }
1071 1072
1072 pasid = fpriv->vm.pasid; 1073 pasid = fpriv->vm.pasid;
1073 pd = amdgpu_bo_ref(fpriv->vm.root.base.bo); 1074 pd = amdgpu_bo_ref(fpriv->vm.root.base.bo);
1074 1075
1075 amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr); 1076 amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
1076 amdgpu_vm_fini(adev, &fpriv->vm); 1077 amdgpu_vm_fini(adev, &fpriv->vm);
1077 1078
1078 if (pasid) 1079 if (pasid)
1079 amdgpu_pasid_free_delayed(pd->tbo.base.resv, pasid); 1080 amdgpu_pasid_free_delayed(pd->tbo.base.resv, pasid);
1080 amdgpu_bo_unref(&pd); 1081 amdgpu_bo_unref(&pd);
1081 1082
1082 idr_for_each_entry(&fpriv->bo_list_handles, list, handle) 1083 idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
1083 amdgpu_bo_list_put(list); 1084 amdgpu_bo_list_put(list);
1084 1085
1085 idr_destroy(&fpriv->bo_list_handles); 1086 idr_destroy(&fpriv->bo_list_handles);
1086 mutex_destroy(&fpriv->bo_list_lock); 1087 mutex_destroy(&fpriv->bo_list_lock);
1087 1088
1088 kfree(fpriv); 1089 kfree(fpriv);
1089 file_priv->driver_priv = NULL; 1090 file_priv->driver_priv = NULL;
1090 1091
1091 pm_runtime_mark_last_busy(dev->dev); 1092 pm_runtime_mark_last_busy(dev->dev);
1092 pm_runtime_put_autosuspend(dev->dev); 1093 pm_runtime_put_autosuspend(dev->dev);
1093 } 1094 }
1094 1095
1095 /* 1096 /*
1096 * VBlank related functions. 1097 * VBlank related functions.
1097 */ 1098 */
1098 /** 1099 /**
1099 * amdgpu_get_vblank_counter_kms - get frame count 1100 * amdgpu_get_vblank_counter_kms - get frame count
1100 * 1101 *
1101 * @dev: drm dev pointer 1102 * @dev: drm dev pointer
1102 * @pipe: crtc to get the frame count from 1103 * @pipe: crtc to get the frame count from
1103 * 1104 *
1104 * Gets the frame count on the requested crtc (all asics). 1105 * Gets the frame count on the requested crtc (all asics).
1105 * Returns frame count on success, -EINVAL on failure. 1106 * Returns frame count on success, -EINVAL on failure.
1106 */ 1107 */
1107 u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe) 1108 u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
1108 { 1109 {
1109 struct amdgpu_device *adev = dev->dev_private; 1110 struct amdgpu_device *adev = dev->dev_private;
1110 int vpos, hpos, stat; 1111 int vpos, hpos, stat;
1111 u32 count; 1112 u32 count;
1112 1113
1113 if (pipe >= adev->mode_info.num_crtc) { 1114 if (pipe >= adev->mode_info.num_crtc) {
1114 DRM_ERROR("Invalid crtc %u\n", pipe); 1115 DRM_ERROR("Invalid crtc %u\n", pipe);
1115 return -EINVAL; 1116 return -EINVAL;
1116 } 1117 }
1117 1118
1118 /* The hw increments its frame counter at start of vsync, not at start 1119 /* The hw increments its frame counter at start of vsync, not at start
1119 * of vblank, as is required by DRM core vblank counter handling. 1120 * of vblank, as is required by DRM core vblank counter handling.
1120 * Cook the hw count here to make it appear to the caller as if it 1121 * Cook the hw count here to make it appear to the caller as if it
1121 * incremented at start of vblank. We measure distance to start of 1122 * incremented at start of vblank. We measure distance to start of
1122 * vblank in vpos. vpos therefore will be >= 0 between start of vblank 1123 * vblank in vpos. vpos therefore will be >= 0 between start of vblank
1123 * and start of vsync, so vpos >= 0 means to bump the hw frame counter 1124 * and start of vsync, so vpos >= 0 means to bump the hw frame counter
1124 * result by 1 to give the proper appearance to caller. 1125 * result by 1 to give the proper appearance to caller.
1125 */ 1126 */
1126 if (adev->mode_info.crtcs[pipe]) { 1127 if (adev->mode_info.crtcs[pipe]) {
1127 /* Repeat readout if needed to provide stable result if 1128 /* Repeat readout if needed to provide stable result if
1128 * we cross start of vsync during the queries. 1129 * we cross start of vsync during the queries.
1129 */ 1130 */
1130 do { 1131 do {
1131 count = amdgpu_display_vblank_get_counter(adev, pipe); 1132 count = amdgpu_display_vblank_get_counter(adev, pipe);
1132 /* Ask amdgpu_display_get_crtc_scanoutpos to return 1133 /* Ask amdgpu_display_get_crtc_scanoutpos to return
1133 * vpos as distance to start of vblank, instead of 1134 * vpos as distance to start of vblank, instead of
1134 * regular vertical scanout pos. 1135 * regular vertical scanout pos.
1135 */ 1136 */
1136 stat = amdgpu_display_get_crtc_scanoutpos( 1137 stat = amdgpu_display_get_crtc_scanoutpos(
1137 dev, pipe, GET_DISTANCE_TO_VBLANKSTART, 1138 dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
1138 &vpos, &hpos, NULL, NULL, 1139 &vpos, &hpos, NULL, NULL,
1139 &adev->mode_info.crtcs[pipe]->base.hwmode); 1140 &adev->mode_info.crtcs[pipe]->base.hwmode);
1140 } while (count != amdgpu_display_vblank_get_counter(adev, pipe)); 1141 } while (count != amdgpu_display_vblank_get_counter(adev, pipe));
1141 1142
1142 if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) != 1143 if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
1143 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) { 1144 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
1144 DRM_DEBUG_VBL("Query failed! stat %d\n", stat); 1145 DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
1145 } else { 1146 } else {
1146 DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n", 1147 DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n",
1147 pipe, vpos); 1148 pipe, vpos);
1148 1149
1149 /* Bump counter if we are at >= leading edge of vblank, 1150 /* Bump counter if we are at >= leading edge of vblank,
1150 * but before vsync where vpos would turn negative and 1151 * but before vsync where vpos would turn negative and
1151 * the hw counter really increments. 1152 * the hw counter really increments.
1152 */ 1153 */
1153 if (vpos >= 0) 1154 if (vpos >= 0)
1154 count++; 1155 count++;
1155 } 1156 }
1156 } else { 1157 } else {
1157 /* Fallback to use value as is. */ 1158 /* Fallback to use value as is. */
1158 count = amdgpu_display_vblank_get_counter(adev, pipe); 1159 count = amdgpu_display_vblank_get_counter(adev, pipe);
1159 DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n"); 1160 DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
1160 } 1161 }
1161 1162
1162 return count; 1163 return count;
1163 } 1164 }
1164 1165
1165 /** 1166 /**
1166 * amdgpu_enable_vblank_kms - enable vblank interrupt 1167 * amdgpu_enable_vblank_kms - enable vblank interrupt
1167 * 1168 *
1168 * @dev: drm dev pointer 1169 * @dev: drm dev pointer
1169 * @pipe: crtc to enable vblank interrupt for 1170 * @pipe: crtc to enable vblank interrupt for
1170 * 1171 *
1171 * Enable the interrupt on the requested crtc (all asics). 1172 * Enable the interrupt on the requested crtc (all asics).
1172 * Returns 0 on success, -EINVAL on failure. 1173 * Returns 0 on success, -EINVAL on failure.
1173 */ 1174 */
1174 int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe) 1175 int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe)
1175 { 1176 {
1176 struct amdgpu_device *adev = dev->dev_private; 1177 struct amdgpu_device *adev = dev->dev_private;
1177 int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe); 1178 int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
1178 1179
1179 return amdgpu_irq_get(adev, &adev->crtc_irq, idx); 1180 return amdgpu_irq_get(adev, &adev->crtc_irq, idx);
1180 } 1181 }
1181 1182
1182 /** 1183 /**
1183 * amdgpu_disable_vblank_kms - disable vblank interrupt 1184 * amdgpu_disable_vblank_kms - disable vblank interrupt
1184 * 1185 *
1185 * @dev: drm dev pointer 1186 * @dev: drm dev pointer
1186 * @pipe: crtc to disable vblank interrupt for 1187 * @pipe: crtc to disable vblank interrupt for
1187 * 1188 *
1188 * Disable the interrupt on the requested crtc (all asics). 1189 * Disable the interrupt on the requested crtc (all asics).
1189 */ 1190 */
1190 void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe) 1191 void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe)
1191 { 1192 {
1192 struct amdgpu_device *adev = dev->dev_private; 1193 struct amdgpu_device *adev = dev->dev_private;
1193 int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe); 1194 int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
1194 1195
1195 amdgpu_irq_put(adev, &adev->crtc_irq, idx); 1196 amdgpu_irq_put(adev, &adev->crtc_irq, idx);
1196 } 1197 }
1197 1198
1198 const struct drm_ioctl_desc amdgpu_ioctls_kms[] = { 1199 const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
1199 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1200 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1200 DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1201 DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1201 DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1202 DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1202 DRM_IOCTL_DEF_DRV(AMDGPU_SCHED, amdgpu_sched_ioctl, DRM_MASTER), 1203 DRM_IOCTL_DEF_DRV(AMDGPU_SCHED, amdgpu_sched_ioctl, DRM_MASTER),
1203 DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1204 DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1204 DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1205 DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1205 /* KMS */ 1206 /* KMS */
1206 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1207 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1207 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1208 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1208 DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1209 DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1209 DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1210 DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1210 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1211 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1211 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1212 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1212 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1213 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1213 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1214 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1214 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1215 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1215 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW) 1216 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW)
1216 }; 1217 };
1217 const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms); 1218 const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
1218 1219
1219 /* 1220 /*
1220 * Debugfs info 1221 * Debugfs info
1221 */ 1222 */
1222 #if defined(CONFIG_DEBUG_FS) 1223 #if defined(CONFIG_DEBUG_FS)
1223 1224
1224 static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data) 1225 static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
1225 { 1226 {
1226 struct drm_info_node *node = (struct drm_info_node *) m->private; 1227 struct drm_info_node *node = (struct drm_info_node *) m->private;
1227 struct drm_device *dev = node->minor->dev; 1228 struct drm_device *dev = node->minor->dev;
1228 struct amdgpu_device *adev = dev->dev_private; 1229 struct amdgpu_device *adev = dev->dev_private;
1229 struct drm_amdgpu_info_firmware fw_info; 1230 struct drm_amdgpu_info_firmware fw_info;
1230 struct drm_amdgpu_query_fw query_fw; 1231 struct drm_amdgpu_query_fw query_fw;
1231 struct atom_context *ctx = adev->mode_info.atom_context; 1232 struct atom_context *ctx = adev->mode_info.atom_context;
1232 int ret, i; 1233 int ret, i;
1233 1234
1234 /* VCE */ 1235 /* VCE */
1235 query_fw.fw_type = AMDGPU_INFO_FW_VCE; 1236 query_fw.fw_type = AMDGPU_INFO_FW_VCE;
1236 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1237 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1237 if (ret) 1238 if (ret)
1238 return ret; 1239 return ret;
1239 seq_printf(m, "VCE feature version: %u, firmware version: 0x%08x\n", 1240 seq_printf(m, "VCE feature version: %u, firmware version: 0x%08x\n",
1240 fw_info.feature, fw_info.ver); 1241 fw_info.feature, fw_info.ver);
1241 1242
1242 /* UVD */ 1243 /* UVD */
1243 query_fw.fw_type = AMDGPU_INFO_FW_UVD; 1244 query_fw.fw_type = AMDGPU_INFO_FW_UVD;
1244 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1245 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1245 if (ret) 1246 if (ret)
1246 return ret; 1247 return ret;
1247 seq_printf(m, "UVD feature version: %u, firmware version: 0x%08x\n", 1248 seq_printf(m, "UVD feature version: %u, firmware version: 0x%08x\n",
1248 fw_info.feature, fw_info.ver); 1249 fw_info.feature, fw_info.ver);
1249 1250
1250 /* GMC */ 1251 /* GMC */
1251 query_fw.fw_type = AMDGPU_INFO_FW_GMC; 1252 query_fw.fw_type = AMDGPU_INFO_FW_GMC;
1252 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1253 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1253 if (ret) 1254 if (ret)
1254 return ret; 1255 return ret;
1255 seq_printf(m, "MC feature version: %u, firmware version: 0x%08x\n", 1256 seq_printf(m, "MC feature version: %u, firmware version: 0x%08x\n",
1256 fw_info.feature, fw_info.ver); 1257 fw_info.feature, fw_info.ver);
1257 1258
1258 /* ME */ 1259 /* ME */
1259 query_fw.fw_type = AMDGPU_INFO_FW_GFX_ME; 1260 query_fw.fw_type = AMDGPU_INFO_FW_GFX_ME;
1260 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1261 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1261 if (ret) 1262 if (ret)
1262 return ret; 1263 return ret;
1263 seq_printf(m, "ME feature version: %u, firmware version: 0x%08x\n", 1264 seq_printf(m, "ME feature version: %u, firmware version: 0x%08x\n",
1264 fw_info.feature, fw_info.ver); 1265 fw_info.feature, fw_info.ver);
1265 1266
1266 /* PFP */ 1267 /* PFP */
1267 query_fw.fw_type = AMDGPU_INFO_FW_GFX_PFP; 1268 query_fw.fw_type = AMDGPU_INFO_FW_GFX_PFP;
1268 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1269 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1269 if (ret) 1270 if (ret)
1270 return ret; 1271 return ret;
1271 seq_printf(m, "PFP feature version: %u, firmware version: 0x%08x\n", 1272 seq_printf(m, "PFP feature version: %u, firmware version: 0x%08x\n",
1272 fw_info.feature, fw_info.ver); 1273 fw_info.feature, fw_info.ver);
1273 1274
1274 /* CE */ 1275 /* CE */
1275 query_fw.fw_type = AMDGPU_INFO_FW_GFX_CE; 1276 query_fw.fw_type = AMDGPU_INFO_FW_GFX_CE;
1276 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1277 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1277 if (ret) 1278 if (ret)
1278 return ret; 1279 return ret;
1279 seq_printf(m, "CE feature version: %u, firmware version: 0x%08x\n", 1280 seq_printf(m, "CE feature version: %u, firmware version: 0x%08x\n",
1280 fw_info.feature, fw_info.ver); 1281 fw_info.feature, fw_info.ver);
1281 1282
1282 /* RLC */ 1283 /* RLC */
1283 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC; 1284 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC;
1284 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1285 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1285 if (ret) 1286 if (ret)
1286 return ret; 1287 return ret;
1287 seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n", 1288 seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n",
1288 fw_info.feature, fw_info.ver); 1289 fw_info.feature, fw_info.ver);
1289 1290
1290 /* RLC SAVE RESTORE LIST CNTL */ 1291 /* RLC SAVE RESTORE LIST CNTL */
1291 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL; 1292 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL;
1292 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1293 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1293 if (ret) 1294 if (ret)
1294 return ret; 1295 return ret;
1295 seq_printf(m, "RLC SRLC feature version: %u, firmware version: 0x%08x\n", 1296 seq_printf(m, "RLC SRLC feature version: %u, firmware version: 0x%08x\n",
1296 fw_info.feature, fw_info.ver); 1297 fw_info.feature, fw_info.ver);
1297 1298
1298 /* RLC SAVE RESTORE LIST GPM MEM */ 1299 /* RLC SAVE RESTORE LIST GPM MEM */
1299 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM; 1300 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM;
1300 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1301 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1301 if (ret) 1302 if (ret)
1302 return ret; 1303 return ret;
1303 seq_printf(m, "RLC SRLG feature version: %u, firmware version: 0x%08x\n", 1304 seq_printf(m, "RLC SRLG feature version: %u, firmware version: 0x%08x\n",
1304 fw_info.feature, fw_info.ver); 1305 fw_info.feature, fw_info.ver);
1305 1306
1306 /* RLC SAVE RESTORE LIST SRM MEM */ 1307 /* RLC SAVE RESTORE LIST SRM MEM */
1307 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM; 1308 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM;
1308 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1309 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1309 if (ret) 1310 if (ret)
1310 return ret; 1311 return ret;
1311 seq_printf(m, "RLC SRLS feature version: %u, firmware version: 0x%08x\n", 1312 seq_printf(m, "RLC SRLS feature version: %u, firmware version: 0x%08x\n",
1312 fw_info.feature, fw_info.ver); 1313 fw_info.feature, fw_info.ver);
1313 1314
1314 /* MEC */ 1315 /* MEC */
1315 query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC; 1316 query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC;
1316 query_fw.index = 0; 1317 query_fw.index = 0;
1317 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1318 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1318 if (ret) 1319 if (ret)
1319 return ret; 1320 return ret;
1320 seq_printf(m, "MEC feature version: %u, firmware version: 0x%08x\n", 1321 seq_printf(m, "MEC feature version: %u, firmware version: 0x%08x\n",
1321 fw_info.feature, fw_info.ver); 1322 fw_info.feature, fw_info.ver);
1322 1323
1323 /* MEC2 */ 1324 /* MEC2 */
1324 if (adev->asic_type == CHIP_KAVERI || 1325 if (adev->asic_type == CHIP_KAVERI ||
1325 (adev->asic_type > CHIP_TOPAZ && adev->asic_type != CHIP_STONEY)) { 1326 (adev->asic_type > CHIP_TOPAZ && adev->asic_type != CHIP_STONEY)) {
1326 query_fw.index = 1; 1327 query_fw.index = 1;
1327 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1328 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1328 if (ret) 1329 if (ret)
1329 return ret; 1330 return ret;
1330 seq_printf(m, "MEC2 feature version: %u, firmware version: 0x%08x\n", 1331 seq_printf(m, "MEC2 feature version: %u, firmware version: 0x%08x\n",
1331 fw_info.feature, fw_info.ver); 1332 fw_info.feature, fw_info.ver);
1332 } 1333 }
1333 1334
1334 /* PSP SOS */ 1335 /* PSP SOS */
1335 query_fw.fw_type = AMDGPU_INFO_FW_SOS; 1336 query_fw.fw_type = AMDGPU_INFO_FW_SOS;
1336 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1337 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1337 if (ret) 1338 if (ret)
1338 return ret; 1339 return ret;
1339 seq_printf(m, "SOS feature version: %u, firmware version: 0x%08x\n", 1340 seq_printf(m, "SOS feature version: %u, firmware version: 0x%08x\n",
1340 fw_info.feature, fw_info.ver); 1341 fw_info.feature, fw_info.ver);
1341 1342
1342 1343
1343 /* PSP ASD */ 1344 /* PSP ASD */
1344 query_fw.fw_type = AMDGPU_INFO_FW_ASD; 1345 query_fw.fw_type = AMDGPU_INFO_FW_ASD;
1345 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1346 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1346 if (ret) 1347 if (ret)
1347 return ret; 1348 return ret;
1348 seq_printf(m, "ASD feature version: %u, firmware version: 0x%08x\n", 1349 seq_printf(m, "ASD feature version: %u, firmware version: 0x%08x\n",
1349 fw_info.feature, fw_info.ver); 1350 fw_info.feature, fw_info.ver);
1350 1351
1351 query_fw.fw_type = AMDGPU_INFO_FW_TA; 1352 query_fw.fw_type = AMDGPU_INFO_FW_TA;
1352 for (i = 0; i < 2; i++) { 1353 for (i = 0; i < 2; i++) {
1353 query_fw.index = i; 1354 query_fw.index = i;
1354 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1355 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1355 if (ret) 1356 if (ret)
1356 continue; 1357 continue;
1357 seq_printf(m, "TA %s feature version: %u, firmware version: 0x%08x\n", 1358 seq_printf(m, "TA %s feature version: %u, firmware version: 0x%08x\n",
1358 i ? "RAS" : "XGMI", fw_info.feature, fw_info.ver); 1359 i ? "RAS" : "XGMI", fw_info.feature, fw_info.ver);
1359 } 1360 }
1360 1361
1361 /* SMC */ 1362 /* SMC */
1362 query_fw.fw_type = AMDGPU_INFO_FW_SMC; 1363 query_fw.fw_type = AMDGPU_INFO_FW_SMC;
1363 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1364 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1364 if (ret) 1365 if (ret)
1365 return ret; 1366 return ret;
1366 seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x\n", 1367 seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x\n",
1367 fw_info.feature, fw_info.ver); 1368 fw_info.feature, fw_info.ver);
1368 1369
1369 /* SDMA */ 1370 /* SDMA */
1370 query_fw.fw_type = AMDGPU_INFO_FW_SDMA; 1371 query_fw.fw_type = AMDGPU_INFO_FW_SDMA;
1371 for (i = 0; i < adev->sdma.num_instances; i++) { 1372 for (i = 0; i < adev->sdma.num_instances; i++) {
1372 query_fw.index = i; 1373 query_fw.index = i;
1373 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1374 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1374 if (ret) 1375 if (ret)
1375 return ret; 1376 return ret;
1376 seq_printf(m, "SDMA%d feature version: %u, firmware version: 0x%08x\n", 1377 seq_printf(m, "SDMA%d feature version: %u, firmware version: 0x%08x\n",
1377 i, fw_info.feature, fw_info.ver); 1378 i, fw_info.feature, fw_info.ver);
1378 } 1379 }
1379 1380
1380 /* VCN */ 1381 /* VCN */
1381 query_fw.fw_type = AMDGPU_INFO_FW_VCN; 1382 query_fw.fw_type = AMDGPU_INFO_FW_VCN;
1382 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1383 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1383 if (ret) 1384 if (ret)
1384 return ret; 1385 return ret;
1385 seq_printf(m, "VCN feature version: %u, firmware version: 0x%08x\n", 1386 seq_printf(m, "VCN feature version: %u, firmware version: 0x%08x\n",
1386 fw_info.feature, fw_info.ver); 1387 fw_info.feature, fw_info.ver);
1387 1388
1388 /* DMCU */ 1389 /* DMCU */
1389 query_fw.fw_type = AMDGPU_INFO_FW_DMCU; 1390 query_fw.fw_type = AMDGPU_INFO_FW_DMCU;
1390 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1391 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1391 if (ret) 1392 if (ret)
1392 return ret; 1393 return ret;
1393 seq_printf(m, "DMCU feature version: %u, firmware version: 0x%08x\n", 1394 seq_printf(m, "DMCU feature version: %u, firmware version: 0x%08x\n",
1394 fw_info.feature, fw_info.ver); 1395 fw_info.feature, fw_info.ver);
1395 1396
1396 1397
1397 seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version); 1398 seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version);
1398 1399
1399 return 0; 1400 return 0;
1400 } 1401 }
1401 1402
1402 static const struct drm_info_list amdgpu_firmware_info_list[] = { 1403 static const struct drm_info_list amdgpu_firmware_info_list[] = {
1403 {"amdgpu_firmware_info", amdgpu_debugfs_firmware_info, 0, NULL}, 1404 {"amdgpu_firmware_info", amdgpu_debugfs_firmware_info, 0, NULL},
1404 }; 1405 };
1405 #endif 1406 #endif
1406 1407
1407 int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev) 1408 int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev)
1408 { 1409 {
1409 #if defined(CONFIG_DEBUG_FS) 1410 #if defined(CONFIG_DEBUG_FS)
1410 return amdgpu_debugfs_add_files(adev, amdgpu_firmware_info_list, 1411 return amdgpu_debugfs_add_files(adev, amdgpu_firmware_info_list,
1411 ARRAY_SIZE(amdgpu_firmware_info_list)); 1412 ARRAY_SIZE(amdgpu_firmware_info_list));
1412 #else 1413 #else
1413 return 0; 1414 return 0;
1414 #endif 1415 #endif
1415 } 1416 }
1416 1417