Commit d8af20bcae25e8792ccd8c94404e6e57e7db75f2
Exists in
master
and in
13 other branches
Merge branch 'vmwgfx-fixes-3.15' of git://people.freedesktop.org/~thomash/linux into drm-next
single security fix, cc'd stable. * 'vmwgfx-fixes-3.15' of git://people.freedesktop.org/~thomash/linux: drm/vmwgfx: Make sure user-space can't DMA across buffer object boundaries v2
Showing 1 changed file Inline Diff
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
1 | /************************************************************************** | 1 | /************************************************************************** |
2 | * | 2 | * |
3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA | 3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA |
4 | * All Rights Reserved. | 4 | * All Rights Reserved. |
5 | * | 5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the | 7 | * copy of this software and associated documentation files (the |
8 | * "Software"), to deal in the Software without restriction, including | 8 | * "Software"), to deal in the Software without restriction, including |
9 | * without limitation the rights to use, copy, modify, merge, publish, | 9 | * without limitation the rights to use, copy, modify, merge, publish, |
10 | * distribute, sub license, and/or sell copies of the Software, and to | 10 | * distribute, sub license, and/or sell copies of the Software, and to |
11 | * permit persons to whom the Software is furnished to do so, subject to | 11 | * permit persons to whom the Software is furnished to do so, subject to |
12 | * the following conditions: | 12 | * the following conditions: |
13 | * | 13 | * |
14 | * The above copyright notice and this permission notice (including the | 14 | * The above copyright notice and this permission notice (including the |
15 | * next paragraph) shall be included in all copies or substantial portions | 15 | * next paragraph) shall be included in all copies or substantial portions |
16 | * of the Software. | 16 | * of the Software. |
17 | * | 17 | * |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
25 | * | 25 | * |
26 | **************************************************************************/ | 26 | **************************************************************************/ |
27 | 27 | ||
28 | #include "vmwgfx_drv.h" | 28 | #include "vmwgfx_drv.h" |
29 | #include "vmwgfx_reg.h" | 29 | #include "vmwgfx_reg.h" |
30 | #include <drm/ttm/ttm_bo_api.h> | 30 | #include <drm/ttm/ttm_bo_api.h> |
31 | #include <drm/ttm/ttm_placement.h> | 31 | #include <drm/ttm/ttm_placement.h> |
32 | 32 | ||
33 | #define VMW_RES_HT_ORDER 12 | 33 | #define VMW_RES_HT_ORDER 12 |
34 | 34 | ||
35 | /** | 35 | /** |
36 | * struct vmw_resource_relocation - Relocation info for resources | 36 | * struct vmw_resource_relocation - Relocation info for resources |
37 | * | 37 | * |
38 | * @head: List head for the software context's relocation list. | 38 | * @head: List head for the software context's relocation list. |
39 | * @res: Non-ref-counted pointer to the resource. | 39 | * @res: Non-ref-counted pointer to the resource. |
40 | * @offset: Offset of 4 byte entries into the command buffer where the | 40 | * @offset: Offset of 4 byte entries into the command buffer where the |
41 | * id that needs fixup is located. | 41 | * id that needs fixup is located. |
42 | */ | 42 | */ |
43 | struct vmw_resource_relocation { | 43 | struct vmw_resource_relocation { |
44 | struct list_head head; | 44 | struct list_head head; |
45 | const struct vmw_resource *res; | 45 | const struct vmw_resource *res; |
46 | unsigned long offset; | 46 | unsigned long offset; |
47 | }; | 47 | }; |
48 | 48 | ||
49 | /** | 49 | /** |
50 | * struct vmw_resource_val_node - Validation info for resources | 50 | * struct vmw_resource_val_node - Validation info for resources |
51 | * | 51 | * |
52 | * @head: List head for the software context's resource list. | 52 | * @head: List head for the software context's resource list. |
53 | * @hash: Hash entry for quick resouce to val_node lookup. | 53 | * @hash: Hash entry for quick resouce to val_node lookup. |
54 | * @res: Ref-counted pointer to the resource. | 54 | * @res: Ref-counted pointer to the resource. |
55 | * @switch_backup: Boolean whether to switch backup buffer on unreserve. | 55 | * @switch_backup: Boolean whether to switch backup buffer on unreserve. |
56 | * @new_backup: Refcounted pointer to the new backup buffer. | 56 | * @new_backup: Refcounted pointer to the new backup buffer. |
57 | * @staged_bindings: If @res is a context, tracks bindings set up during | 57 | * @staged_bindings: If @res is a context, tracks bindings set up during |
58 | * the command batch. Otherwise NULL. | 58 | * the command batch. Otherwise NULL. |
59 | * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll. | 59 | * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll. |
60 | * @first_usage: Set to true the first time the resource is referenced in | 60 | * @first_usage: Set to true the first time the resource is referenced in |
61 | * the command stream. | 61 | * the command stream. |
62 | * @no_buffer_needed: Resources do not need to allocate buffer backup on | 62 | * @no_buffer_needed: Resources do not need to allocate buffer backup on |
63 | * reservation. The command stream will provide one. | 63 | * reservation. The command stream will provide one. |
64 | */ | 64 | */ |
65 | struct vmw_resource_val_node { | 65 | struct vmw_resource_val_node { |
66 | struct list_head head; | 66 | struct list_head head; |
67 | struct drm_hash_item hash; | 67 | struct drm_hash_item hash; |
68 | struct vmw_resource *res; | 68 | struct vmw_resource *res; |
69 | struct vmw_dma_buffer *new_backup; | 69 | struct vmw_dma_buffer *new_backup; |
70 | struct vmw_ctx_binding_state *staged_bindings; | 70 | struct vmw_ctx_binding_state *staged_bindings; |
71 | unsigned long new_backup_offset; | 71 | unsigned long new_backup_offset; |
72 | bool first_usage; | 72 | bool first_usage; |
73 | bool no_buffer_needed; | 73 | bool no_buffer_needed; |
74 | }; | 74 | }; |
75 | 75 | ||
76 | /** | 76 | /** |
77 | * struct vmw_cmd_entry - Describe a command for the verifier | 77 | * struct vmw_cmd_entry - Describe a command for the verifier |
78 | * | 78 | * |
79 | * @user_allow: Whether allowed from the execbuf ioctl. | 79 | * @user_allow: Whether allowed from the execbuf ioctl. |
80 | * @gb_disable: Whether disabled if guest-backed objects are available. | 80 | * @gb_disable: Whether disabled if guest-backed objects are available. |
81 | * @gb_enable: Whether enabled iff guest-backed objects are available. | 81 | * @gb_enable: Whether enabled iff guest-backed objects are available. |
82 | */ | 82 | */ |
83 | struct vmw_cmd_entry { | 83 | struct vmw_cmd_entry { |
84 | int (*func) (struct vmw_private *, struct vmw_sw_context *, | 84 | int (*func) (struct vmw_private *, struct vmw_sw_context *, |
85 | SVGA3dCmdHeader *); | 85 | SVGA3dCmdHeader *); |
86 | bool user_allow; | 86 | bool user_allow; |
87 | bool gb_disable; | 87 | bool gb_disable; |
88 | bool gb_enable; | 88 | bool gb_enable; |
89 | }; | 89 | }; |
90 | 90 | ||
91 | #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \ | 91 | #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \ |
92 | [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\ | 92 | [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\ |
93 | (_gb_disable), (_gb_enable)} | 93 | (_gb_disable), (_gb_enable)} |
94 | 94 | ||
95 | /** | 95 | /** |
96 | * vmw_resource_unreserve - unreserve resources previously reserved for | 96 | * vmw_resource_unreserve - unreserve resources previously reserved for |
97 | * command submission. | 97 | * command submission. |
98 | * | 98 | * |
99 | * @list_head: list of resources to unreserve. | 99 | * @list_head: list of resources to unreserve. |
100 | * @backoff: Whether command submission failed. | 100 | * @backoff: Whether command submission failed. |
101 | */ | 101 | */ |
102 | static void vmw_resource_list_unreserve(struct list_head *list, | 102 | static void vmw_resource_list_unreserve(struct list_head *list, |
103 | bool backoff) | 103 | bool backoff) |
104 | { | 104 | { |
105 | struct vmw_resource_val_node *val; | 105 | struct vmw_resource_val_node *val; |
106 | 106 | ||
107 | list_for_each_entry(val, list, head) { | 107 | list_for_each_entry(val, list, head) { |
108 | struct vmw_resource *res = val->res; | 108 | struct vmw_resource *res = val->res; |
109 | struct vmw_dma_buffer *new_backup = | 109 | struct vmw_dma_buffer *new_backup = |
110 | backoff ? NULL : val->new_backup; | 110 | backoff ? NULL : val->new_backup; |
111 | 111 | ||
112 | /* | 112 | /* |
113 | * Transfer staged context bindings to the | 113 | * Transfer staged context bindings to the |
114 | * persistent context binding tracker. | 114 | * persistent context binding tracker. |
115 | */ | 115 | */ |
116 | if (unlikely(val->staged_bindings)) { | 116 | if (unlikely(val->staged_bindings)) { |
117 | if (!backoff) { | 117 | if (!backoff) { |
118 | vmw_context_binding_state_transfer | 118 | vmw_context_binding_state_transfer |
119 | (val->res, val->staged_bindings); | 119 | (val->res, val->staged_bindings); |
120 | } | 120 | } |
121 | kfree(val->staged_bindings); | 121 | kfree(val->staged_bindings); |
122 | val->staged_bindings = NULL; | 122 | val->staged_bindings = NULL; |
123 | } | 123 | } |
124 | vmw_resource_unreserve(res, new_backup, | 124 | vmw_resource_unreserve(res, new_backup, |
125 | val->new_backup_offset); | 125 | val->new_backup_offset); |
126 | vmw_dmabuf_unreference(&val->new_backup); | 126 | vmw_dmabuf_unreference(&val->new_backup); |
127 | } | 127 | } |
128 | } | 128 | } |
129 | 129 | ||
130 | 130 | ||
131 | /** | 131 | /** |
132 | * vmw_resource_val_add - Add a resource to the software context's | 132 | * vmw_resource_val_add - Add a resource to the software context's |
133 | * resource list if it's not already on it. | 133 | * resource list if it's not already on it. |
134 | * | 134 | * |
135 | * @sw_context: Pointer to the software context. | 135 | * @sw_context: Pointer to the software context. |
136 | * @res: Pointer to the resource. | 136 | * @res: Pointer to the resource. |
137 | * @p_node On successful return points to a valid pointer to a | 137 | * @p_node On successful return points to a valid pointer to a |
138 | * struct vmw_resource_val_node, if non-NULL on entry. | 138 | * struct vmw_resource_val_node, if non-NULL on entry. |
139 | */ | 139 | */ |
140 | static int vmw_resource_val_add(struct vmw_sw_context *sw_context, | 140 | static int vmw_resource_val_add(struct vmw_sw_context *sw_context, |
141 | struct vmw_resource *res, | 141 | struct vmw_resource *res, |
142 | struct vmw_resource_val_node **p_node) | 142 | struct vmw_resource_val_node **p_node) |
143 | { | 143 | { |
144 | struct vmw_resource_val_node *node; | 144 | struct vmw_resource_val_node *node; |
145 | struct drm_hash_item *hash; | 145 | struct drm_hash_item *hash; |
146 | int ret; | 146 | int ret; |
147 | 147 | ||
148 | if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res, | 148 | if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res, |
149 | &hash) == 0)) { | 149 | &hash) == 0)) { |
150 | node = container_of(hash, struct vmw_resource_val_node, hash); | 150 | node = container_of(hash, struct vmw_resource_val_node, hash); |
151 | node->first_usage = false; | 151 | node->first_usage = false; |
152 | if (unlikely(p_node != NULL)) | 152 | if (unlikely(p_node != NULL)) |
153 | *p_node = node; | 153 | *p_node = node; |
154 | return 0; | 154 | return 0; |
155 | } | 155 | } |
156 | 156 | ||
157 | node = kzalloc(sizeof(*node), GFP_KERNEL); | 157 | node = kzalloc(sizeof(*node), GFP_KERNEL); |
158 | if (unlikely(node == NULL)) { | 158 | if (unlikely(node == NULL)) { |
159 | DRM_ERROR("Failed to allocate a resource validation " | 159 | DRM_ERROR("Failed to allocate a resource validation " |
160 | "entry.\n"); | 160 | "entry.\n"); |
161 | return -ENOMEM; | 161 | return -ENOMEM; |
162 | } | 162 | } |
163 | 163 | ||
164 | node->hash.key = (unsigned long) res; | 164 | node->hash.key = (unsigned long) res; |
165 | ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash); | 165 | ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash); |
166 | if (unlikely(ret != 0)) { | 166 | if (unlikely(ret != 0)) { |
167 | DRM_ERROR("Failed to initialize a resource validation " | 167 | DRM_ERROR("Failed to initialize a resource validation " |
168 | "entry.\n"); | 168 | "entry.\n"); |
169 | kfree(node); | 169 | kfree(node); |
170 | return ret; | 170 | return ret; |
171 | } | 171 | } |
172 | list_add_tail(&node->head, &sw_context->resource_list); | 172 | list_add_tail(&node->head, &sw_context->resource_list); |
173 | node->res = vmw_resource_reference(res); | 173 | node->res = vmw_resource_reference(res); |
174 | node->first_usage = true; | 174 | node->first_usage = true; |
175 | 175 | ||
176 | if (unlikely(p_node != NULL)) | 176 | if (unlikely(p_node != NULL)) |
177 | *p_node = node; | 177 | *p_node = node; |
178 | 178 | ||
179 | return 0; | 179 | return 0; |
180 | } | 180 | } |
181 | 181 | ||
182 | /** | 182 | /** |
183 | * vmw_resource_context_res_add - Put resources previously bound to a context on | 183 | * vmw_resource_context_res_add - Put resources previously bound to a context on |
184 | * the validation list | 184 | * the validation list |
185 | * | 185 | * |
186 | * @dev_priv: Pointer to a device private structure | 186 | * @dev_priv: Pointer to a device private structure |
187 | * @sw_context: Pointer to a software context used for this command submission | 187 | * @sw_context: Pointer to a software context used for this command submission |
188 | * @ctx: Pointer to the context resource | 188 | * @ctx: Pointer to the context resource |
189 | * | 189 | * |
190 | * This function puts all resources that were previously bound to @ctx on | 190 | * This function puts all resources that were previously bound to @ctx on |
191 | * the resource validation list. This is part of the context state reemission | 191 | * the resource validation list. This is part of the context state reemission |
192 | */ | 192 | */ |
193 | static int vmw_resource_context_res_add(struct vmw_private *dev_priv, | 193 | static int vmw_resource_context_res_add(struct vmw_private *dev_priv, |
194 | struct vmw_sw_context *sw_context, | 194 | struct vmw_sw_context *sw_context, |
195 | struct vmw_resource *ctx) | 195 | struct vmw_resource *ctx) |
196 | { | 196 | { |
197 | struct list_head *binding_list; | 197 | struct list_head *binding_list; |
198 | struct vmw_ctx_binding *entry; | 198 | struct vmw_ctx_binding *entry; |
199 | int ret = 0; | 199 | int ret = 0; |
200 | struct vmw_resource *res; | 200 | struct vmw_resource *res; |
201 | 201 | ||
202 | mutex_lock(&dev_priv->binding_mutex); | 202 | mutex_lock(&dev_priv->binding_mutex); |
203 | binding_list = vmw_context_binding_list(ctx); | 203 | binding_list = vmw_context_binding_list(ctx); |
204 | 204 | ||
205 | list_for_each_entry(entry, binding_list, ctx_list) { | 205 | list_for_each_entry(entry, binding_list, ctx_list) { |
206 | res = vmw_resource_reference_unless_doomed(entry->bi.res); | 206 | res = vmw_resource_reference_unless_doomed(entry->bi.res); |
207 | if (unlikely(res == NULL)) | 207 | if (unlikely(res == NULL)) |
208 | continue; | 208 | continue; |
209 | 209 | ||
210 | ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL); | 210 | ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL); |
211 | vmw_resource_unreference(&res); | 211 | vmw_resource_unreference(&res); |
212 | if (unlikely(ret != 0)) | 212 | if (unlikely(ret != 0)) |
213 | break; | 213 | break; |
214 | } | 214 | } |
215 | 215 | ||
216 | mutex_unlock(&dev_priv->binding_mutex); | 216 | mutex_unlock(&dev_priv->binding_mutex); |
217 | return ret; | 217 | return ret; |
218 | } | 218 | } |
219 | 219 | ||
220 | /** | 220 | /** |
221 | * vmw_resource_relocation_add - Add a relocation to the relocation list | 221 | * vmw_resource_relocation_add - Add a relocation to the relocation list |
222 | * | 222 | * |
223 | * @list: Pointer to head of relocation list. | 223 | * @list: Pointer to head of relocation list. |
224 | * @res: The resource. | 224 | * @res: The resource. |
225 | * @offset: Offset into the command buffer currently being parsed where the | 225 | * @offset: Offset into the command buffer currently being parsed where the |
226 | * id that needs fixup is located. Granularity is 4 bytes. | 226 | * id that needs fixup is located. Granularity is 4 bytes. |
227 | */ | 227 | */ |
228 | static int vmw_resource_relocation_add(struct list_head *list, | 228 | static int vmw_resource_relocation_add(struct list_head *list, |
229 | const struct vmw_resource *res, | 229 | const struct vmw_resource *res, |
230 | unsigned long offset) | 230 | unsigned long offset) |
231 | { | 231 | { |
232 | struct vmw_resource_relocation *rel; | 232 | struct vmw_resource_relocation *rel; |
233 | 233 | ||
234 | rel = kmalloc(sizeof(*rel), GFP_KERNEL); | 234 | rel = kmalloc(sizeof(*rel), GFP_KERNEL); |
235 | if (unlikely(rel == NULL)) { | 235 | if (unlikely(rel == NULL)) { |
236 | DRM_ERROR("Failed to allocate a resource relocation.\n"); | 236 | DRM_ERROR("Failed to allocate a resource relocation.\n"); |
237 | return -ENOMEM; | 237 | return -ENOMEM; |
238 | } | 238 | } |
239 | 239 | ||
240 | rel->res = res; | 240 | rel->res = res; |
241 | rel->offset = offset; | 241 | rel->offset = offset; |
242 | list_add_tail(&rel->head, list); | 242 | list_add_tail(&rel->head, list); |
243 | 243 | ||
244 | return 0; | 244 | return 0; |
245 | } | 245 | } |
246 | 246 | ||
247 | /** | 247 | /** |
248 | * vmw_resource_relocations_free - Free all relocations on a list | 248 | * vmw_resource_relocations_free - Free all relocations on a list |
249 | * | 249 | * |
250 | * @list: Pointer to the head of the relocation list. | 250 | * @list: Pointer to the head of the relocation list. |
251 | */ | 251 | */ |
252 | static void vmw_resource_relocations_free(struct list_head *list) | 252 | static void vmw_resource_relocations_free(struct list_head *list) |
253 | { | 253 | { |
254 | struct vmw_resource_relocation *rel, *n; | 254 | struct vmw_resource_relocation *rel, *n; |
255 | 255 | ||
256 | list_for_each_entry_safe(rel, n, list, head) { | 256 | list_for_each_entry_safe(rel, n, list, head) { |
257 | list_del(&rel->head); | 257 | list_del(&rel->head); |
258 | kfree(rel); | 258 | kfree(rel); |
259 | } | 259 | } |
260 | } | 260 | } |
261 | 261 | ||
262 | /** | 262 | /** |
263 | * vmw_resource_relocations_apply - Apply all relocations on a list | 263 | * vmw_resource_relocations_apply - Apply all relocations on a list |
264 | * | 264 | * |
265 | * @cb: Pointer to the start of the command buffer bein patch. This need | 265 | * @cb: Pointer to the start of the command buffer bein patch. This need |
266 | * not be the same buffer as the one being parsed when the relocation | 266 | * not be the same buffer as the one being parsed when the relocation |
267 | * list was built, but the contents must be the same modulo the | 267 | * list was built, but the contents must be the same modulo the |
268 | * resource ids. | 268 | * resource ids. |
269 | * @list: Pointer to the head of the relocation list. | 269 | * @list: Pointer to the head of the relocation list. |
270 | */ | 270 | */ |
271 | static void vmw_resource_relocations_apply(uint32_t *cb, | 271 | static void vmw_resource_relocations_apply(uint32_t *cb, |
272 | struct list_head *list) | 272 | struct list_head *list) |
273 | { | 273 | { |
274 | struct vmw_resource_relocation *rel; | 274 | struct vmw_resource_relocation *rel; |
275 | 275 | ||
276 | list_for_each_entry(rel, list, head) { | 276 | list_for_each_entry(rel, list, head) { |
277 | if (likely(rel->res != NULL)) | 277 | if (likely(rel->res != NULL)) |
278 | cb[rel->offset] = rel->res->id; | 278 | cb[rel->offset] = rel->res->id; |
279 | else | 279 | else |
280 | cb[rel->offset] = SVGA_3D_CMD_NOP; | 280 | cb[rel->offset] = SVGA_3D_CMD_NOP; |
281 | } | 281 | } |
282 | } | 282 | } |
283 | 283 | ||
284 | static int vmw_cmd_invalid(struct vmw_private *dev_priv, | 284 | static int vmw_cmd_invalid(struct vmw_private *dev_priv, |
285 | struct vmw_sw_context *sw_context, | 285 | struct vmw_sw_context *sw_context, |
286 | SVGA3dCmdHeader *header) | 286 | SVGA3dCmdHeader *header) |
287 | { | 287 | { |
288 | return capable(CAP_SYS_ADMIN) ? : -EINVAL; | 288 | return capable(CAP_SYS_ADMIN) ? : -EINVAL; |
289 | } | 289 | } |
290 | 290 | ||
291 | static int vmw_cmd_ok(struct vmw_private *dev_priv, | 291 | static int vmw_cmd_ok(struct vmw_private *dev_priv, |
292 | struct vmw_sw_context *sw_context, | 292 | struct vmw_sw_context *sw_context, |
293 | SVGA3dCmdHeader *header) | 293 | SVGA3dCmdHeader *header) |
294 | { | 294 | { |
295 | return 0; | 295 | return 0; |
296 | } | 296 | } |
297 | 297 | ||
298 | /** | 298 | /** |
299 | * vmw_bo_to_validate_list - add a bo to a validate list | 299 | * vmw_bo_to_validate_list - add a bo to a validate list |
300 | * | 300 | * |
301 | * @sw_context: The software context used for this command submission batch. | 301 | * @sw_context: The software context used for this command submission batch. |
302 | * @bo: The buffer object to add. | 302 | * @bo: The buffer object to add. |
303 | * @validate_as_mob: Validate this buffer as a MOB. | 303 | * @validate_as_mob: Validate this buffer as a MOB. |
304 | * @p_val_node: If non-NULL Will be updated with the validate node number | 304 | * @p_val_node: If non-NULL Will be updated with the validate node number |
305 | * on return. | 305 | * on return. |
306 | * | 306 | * |
307 | * Returns -EINVAL if the limit of number of buffer objects per command | 307 | * Returns -EINVAL if the limit of number of buffer objects per command |
308 | * submission is reached. | 308 | * submission is reached. |
309 | */ | 309 | */ |
310 | static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, | 310 | static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, |
311 | struct ttm_buffer_object *bo, | 311 | struct ttm_buffer_object *bo, |
312 | bool validate_as_mob, | 312 | bool validate_as_mob, |
313 | uint32_t *p_val_node) | 313 | uint32_t *p_val_node) |
314 | { | 314 | { |
315 | uint32_t val_node; | 315 | uint32_t val_node; |
316 | struct vmw_validate_buffer *vval_buf; | 316 | struct vmw_validate_buffer *vval_buf; |
317 | struct ttm_validate_buffer *val_buf; | 317 | struct ttm_validate_buffer *val_buf; |
318 | struct drm_hash_item *hash; | 318 | struct drm_hash_item *hash; |
319 | int ret; | 319 | int ret; |
320 | 320 | ||
321 | if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo, | 321 | if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo, |
322 | &hash) == 0)) { | 322 | &hash) == 0)) { |
323 | vval_buf = container_of(hash, struct vmw_validate_buffer, | 323 | vval_buf = container_of(hash, struct vmw_validate_buffer, |
324 | hash); | 324 | hash); |
325 | if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) { | 325 | if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) { |
326 | DRM_ERROR("Inconsistent buffer usage.\n"); | 326 | DRM_ERROR("Inconsistent buffer usage.\n"); |
327 | return -EINVAL; | 327 | return -EINVAL; |
328 | } | 328 | } |
329 | val_buf = &vval_buf->base; | 329 | val_buf = &vval_buf->base; |
330 | val_node = vval_buf - sw_context->val_bufs; | 330 | val_node = vval_buf - sw_context->val_bufs; |
331 | } else { | 331 | } else { |
332 | val_node = sw_context->cur_val_buf; | 332 | val_node = sw_context->cur_val_buf; |
333 | if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) { | 333 | if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) { |
334 | DRM_ERROR("Max number of DMA buffers per submission " | 334 | DRM_ERROR("Max number of DMA buffers per submission " |
335 | "exceeded.\n"); | 335 | "exceeded.\n"); |
336 | return -EINVAL; | 336 | return -EINVAL; |
337 | } | 337 | } |
338 | vval_buf = &sw_context->val_bufs[val_node]; | 338 | vval_buf = &sw_context->val_bufs[val_node]; |
339 | vval_buf->hash.key = (unsigned long) bo; | 339 | vval_buf->hash.key = (unsigned long) bo; |
340 | ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash); | 340 | ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash); |
341 | if (unlikely(ret != 0)) { | 341 | if (unlikely(ret != 0)) { |
342 | DRM_ERROR("Failed to initialize a buffer validation " | 342 | DRM_ERROR("Failed to initialize a buffer validation " |
343 | "entry.\n"); | 343 | "entry.\n"); |
344 | return ret; | 344 | return ret; |
345 | } | 345 | } |
346 | ++sw_context->cur_val_buf; | 346 | ++sw_context->cur_val_buf; |
347 | val_buf = &vval_buf->base; | 347 | val_buf = &vval_buf->base; |
348 | val_buf->bo = ttm_bo_reference(bo); | 348 | val_buf->bo = ttm_bo_reference(bo); |
349 | val_buf->reserved = false; | 349 | val_buf->reserved = false; |
350 | list_add_tail(&val_buf->head, &sw_context->validate_nodes); | 350 | list_add_tail(&val_buf->head, &sw_context->validate_nodes); |
351 | vval_buf->validate_as_mob = validate_as_mob; | 351 | vval_buf->validate_as_mob = validate_as_mob; |
352 | } | 352 | } |
353 | 353 | ||
354 | sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC; | 354 | sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC; |
355 | 355 | ||
356 | if (p_val_node) | 356 | if (p_val_node) |
357 | *p_val_node = val_node; | 357 | *p_val_node = val_node; |
358 | 358 | ||
359 | return 0; | 359 | return 0; |
360 | } | 360 | } |
361 | 361 | ||
362 | /** | 362 | /** |
363 | * vmw_resources_reserve - Reserve all resources on the sw_context's | 363 | * vmw_resources_reserve - Reserve all resources on the sw_context's |
364 | * resource list. | 364 | * resource list. |
365 | * | 365 | * |
366 | * @sw_context: Pointer to the software context. | 366 | * @sw_context: Pointer to the software context. |
367 | * | 367 | * |
368 | * Note that since vmware's command submission currently is protected by | 368 | * Note that since vmware's command submission currently is protected by |
369 | * the cmdbuf mutex, no fancy deadlock avoidance is required for resources, | 369 | * the cmdbuf mutex, no fancy deadlock avoidance is required for resources, |
370 | * since only a single thread at once will attempt this. | 370 | * since only a single thread at once will attempt this. |
371 | */ | 371 | */ |
372 | static int vmw_resources_reserve(struct vmw_sw_context *sw_context) | 372 | static int vmw_resources_reserve(struct vmw_sw_context *sw_context) |
373 | { | 373 | { |
374 | struct vmw_resource_val_node *val; | 374 | struct vmw_resource_val_node *val; |
375 | int ret; | 375 | int ret; |
376 | 376 | ||
377 | list_for_each_entry(val, &sw_context->resource_list, head) { | 377 | list_for_each_entry(val, &sw_context->resource_list, head) { |
378 | struct vmw_resource *res = val->res; | 378 | struct vmw_resource *res = val->res; |
379 | 379 | ||
380 | ret = vmw_resource_reserve(res, val->no_buffer_needed); | 380 | ret = vmw_resource_reserve(res, val->no_buffer_needed); |
381 | if (unlikely(ret != 0)) | 381 | if (unlikely(ret != 0)) |
382 | return ret; | 382 | return ret; |
383 | 383 | ||
384 | if (res->backup) { | 384 | if (res->backup) { |
385 | struct ttm_buffer_object *bo = &res->backup->base; | 385 | struct ttm_buffer_object *bo = &res->backup->base; |
386 | 386 | ||
387 | ret = vmw_bo_to_validate_list | 387 | ret = vmw_bo_to_validate_list |
388 | (sw_context, bo, | 388 | (sw_context, bo, |
389 | vmw_resource_needs_backup(res), NULL); | 389 | vmw_resource_needs_backup(res), NULL); |
390 | 390 | ||
391 | if (unlikely(ret != 0)) | 391 | if (unlikely(ret != 0)) |
392 | return ret; | 392 | return ret; |
393 | } | 393 | } |
394 | } | 394 | } |
395 | return 0; | 395 | return 0; |
396 | } | 396 | } |
397 | 397 | ||
398 | /** | 398 | /** |
399 | * vmw_resources_validate - Validate all resources on the sw_context's | 399 | * vmw_resources_validate - Validate all resources on the sw_context's |
400 | * resource list. | 400 | * resource list. |
401 | * | 401 | * |
402 | * @sw_context: Pointer to the software context. | 402 | * @sw_context: Pointer to the software context. |
403 | * | 403 | * |
404 | * Before this function is called, all resource backup buffers must have | 404 | * Before this function is called, all resource backup buffers must have |
405 | * been validated. | 405 | * been validated. |
406 | */ | 406 | */ |
407 | static int vmw_resources_validate(struct vmw_sw_context *sw_context) | 407 | static int vmw_resources_validate(struct vmw_sw_context *sw_context) |
408 | { | 408 | { |
409 | struct vmw_resource_val_node *val; | 409 | struct vmw_resource_val_node *val; |
410 | int ret; | 410 | int ret; |
411 | 411 | ||
412 | list_for_each_entry(val, &sw_context->resource_list, head) { | 412 | list_for_each_entry(val, &sw_context->resource_list, head) { |
413 | struct vmw_resource *res = val->res; | 413 | struct vmw_resource *res = val->res; |
414 | 414 | ||
415 | ret = vmw_resource_validate(res); | 415 | ret = vmw_resource_validate(res); |
416 | if (unlikely(ret != 0)) { | 416 | if (unlikely(ret != 0)) { |
417 | if (ret != -ERESTARTSYS) | 417 | if (ret != -ERESTARTSYS) |
418 | DRM_ERROR("Failed to validate resource.\n"); | 418 | DRM_ERROR("Failed to validate resource.\n"); |
419 | return ret; | 419 | return ret; |
420 | } | 420 | } |
421 | } | 421 | } |
422 | return 0; | 422 | return 0; |
423 | } | 423 | } |
424 | 424 | ||
425 | /** | 425 | /** |
426 | * vmw_cmd_compat_res_check - Check that a resource is present and if so, put it | 426 | * vmw_cmd_compat_res_check - Check that a resource is present and if so, put it |
427 | * on the resource validate list unless it's already there. | 427 | * on the resource validate list unless it's already there. |
428 | * | 428 | * |
429 | * @dev_priv: Pointer to a device private structure. | 429 | * @dev_priv: Pointer to a device private structure. |
430 | * @sw_context: Pointer to the software context. | 430 | * @sw_context: Pointer to the software context. |
431 | * @res_type: Resource type. | 431 | * @res_type: Resource type. |
432 | * @converter: User-space visisble type specific information. | 432 | * @converter: User-space visisble type specific information. |
433 | * @id: user-space resource id handle. | 433 | * @id: user-space resource id handle. |
434 | * @id_loc: Pointer to the location in the command buffer currently being | 434 | * @id_loc: Pointer to the location in the command buffer currently being |
435 | * parsed from where the user-space resource id handle is located. | 435 | * parsed from where the user-space resource id handle is located. |
436 | * @p_val: Pointer to pointer to resource validalidation node. Populated | 436 | * @p_val: Pointer to pointer to resource validalidation node. Populated |
437 | * on exit. | 437 | * on exit. |
438 | */ | 438 | */ |
439 | static int | 439 | static int |
440 | vmw_cmd_compat_res_check(struct vmw_private *dev_priv, | 440 | vmw_cmd_compat_res_check(struct vmw_private *dev_priv, |
441 | struct vmw_sw_context *sw_context, | 441 | struct vmw_sw_context *sw_context, |
442 | enum vmw_res_type res_type, | 442 | enum vmw_res_type res_type, |
443 | const struct vmw_user_resource_conv *converter, | 443 | const struct vmw_user_resource_conv *converter, |
444 | uint32_t id, | 444 | uint32_t id, |
445 | uint32_t *id_loc, | 445 | uint32_t *id_loc, |
446 | struct vmw_resource_val_node **p_val) | 446 | struct vmw_resource_val_node **p_val) |
447 | { | 447 | { |
448 | struct vmw_res_cache_entry *rcache = | 448 | struct vmw_res_cache_entry *rcache = |
449 | &sw_context->res_cache[res_type]; | 449 | &sw_context->res_cache[res_type]; |
450 | struct vmw_resource *res; | 450 | struct vmw_resource *res; |
451 | struct vmw_resource_val_node *node; | 451 | struct vmw_resource_val_node *node; |
452 | int ret; | 452 | int ret; |
453 | 453 | ||
454 | if (id == SVGA3D_INVALID_ID) { | 454 | if (id == SVGA3D_INVALID_ID) { |
455 | if (p_val) | 455 | if (p_val) |
456 | *p_val = NULL; | 456 | *p_val = NULL; |
457 | if (res_type == vmw_res_context) { | 457 | if (res_type == vmw_res_context) { |
458 | DRM_ERROR("Illegal context invalid id.\n"); | 458 | DRM_ERROR("Illegal context invalid id.\n"); |
459 | return -EINVAL; | 459 | return -EINVAL; |
460 | } | 460 | } |
461 | return 0; | 461 | return 0; |
462 | } | 462 | } |
463 | 463 | ||
464 | /* | 464 | /* |
465 | * Fastpath in case of repeated commands referencing the same | 465 | * Fastpath in case of repeated commands referencing the same |
466 | * resource | 466 | * resource |
467 | */ | 467 | */ |
468 | 468 | ||
469 | if (likely(rcache->valid && id == rcache->handle)) { | 469 | if (likely(rcache->valid && id == rcache->handle)) { |
470 | const struct vmw_resource *res = rcache->res; | 470 | const struct vmw_resource *res = rcache->res; |
471 | 471 | ||
472 | rcache->node->first_usage = false; | 472 | rcache->node->first_usage = false; |
473 | if (p_val) | 473 | if (p_val) |
474 | *p_val = rcache->node; | 474 | *p_val = rcache->node; |
475 | 475 | ||
476 | return vmw_resource_relocation_add | 476 | return vmw_resource_relocation_add |
477 | (&sw_context->res_relocations, res, | 477 | (&sw_context->res_relocations, res, |
478 | id_loc - sw_context->buf_start); | 478 | id_loc - sw_context->buf_start); |
479 | } | 479 | } |
480 | 480 | ||
481 | ret = vmw_user_resource_lookup_handle(dev_priv, | 481 | ret = vmw_user_resource_lookup_handle(dev_priv, |
482 | sw_context->fp->tfile, | 482 | sw_context->fp->tfile, |
483 | id, | 483 | id, |
484 | converter, | 484 | converter, |
485 | &res); | 485 | &res); |
486 | if (unlikely(ret != 0)) { | 486 | if (unlikely(ret != 0)) { |
487 | DRM_ERROR("Could not find or use resource 0x%08x.\n", | 487 | DRM_ERROR("Could not find or use resource 0x%08x.\n", |
488 | (unsigned) id); | 488 | (unsigned) id); |
489 | dump_stack(); | 489 | dump_stack(); |
490 | return ret; | 490 | return ret; |
491 | } | 491 | } |
492 | 492 | ||
493 | rcache->valid = true; | 493 | rcache->valid = true; |
494 | rcache->res = res; | 494 | rcache->res = res; |
495 | rcache->handle = id; | 495 | rcache->handle = id; |
496 | 496 | ||
497 | ret = vmw_resource_relocation_add(&sw_context->res_relocations, | 497 | ret = vmw_resource_relocation_add(&sw_context->res_relocations, |
498 | res, | 498 | res, |
499 | id_loc - sw_context->buf_start); | 499 | id_loc - sw_context->buf_start); |
500 | if (unlikely(ret != 0)) | 500 | if (unlikely(ret != 0)) |
501 | goto out_no_reloc; | 501 | goto out_no_reloc; |
502 | 502 | ||
503 | ret = vmw_resource_val_add(sw_context, res, &node); | 503 | ret = vmw_resource_val_add(sw_context, res, &node); |
504 | if (unlikely(ret != 0)) | 504 | if (unlikely(ret != 0)) |
505 | goto out_no_reloc; | 505 | goto out_no_reloc; |
506 | 506 | ||
507 | rcache->node = node; | 507 | rcache->node = node; |
508 | if (p_val) | 508 | if (p_val) |
509 | *p_val = node; | 509 | *p_val = node; |
510 | 510 | ||
511 | if (dev_priv->has_mob && node->first_usage && | 511 | if (dev_priv->has_mob && node->first_usage && |
512 | res_type == vmw_res_context) { | 512 | res_type == vmw_res_context) { |
513 | ret = vmw_resource_context_res_add(dev_priv, sw_context, res); | 513 | ret = vmw_resource_context_res_add(dev_priv, sw_context, res); |
514 | if (unlikely(ret != 0)) | 514 | if (unlikely(ret != 0)) |
515 | goto out_no_reloc; | 515 | goto out_no_reloc; |
516 | node->staged_bindings = | 516 | node->staged_bindings = |
517 | kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL); | 517 | kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL); |
518 | if (node->staged_bindings == NULL) { | 518 | if (node->staged_bindings == NULL) { |
519 | DRM_ERROR("Failed to allocate context binding " | 519 | DRM_ERROR("Failed to allocate context binding " |
520 | "information.\n"); | 520 | "information.\n"); |
521 | goto out_no_reloc; | 521 | goto out_no_reloc; |
522 | } | 522 | } |
523 | INIT_LIST_HEAD(&node->staged_bindings->list); | 523 | INIT_LIST_HEAD(&node->staged_bindings->list); |
524 | } | 524 | } |
525 | 525 | ||
526 | vmw_resource_unreference(&res); | 526 | vmw_resource_unreference(&res); |
527 | return 0; | 527 | return 0; |
528 | 528 | ||
529 | out_no_reloc: | 529 | out_no_reloc: |
530 | BUG_ON(sw_context->error_resource != NULL); | 530 | BUG_ON(sw_context->error_resource != NULL); |
531 | sw_context->error_resource = res; | 531 | sw_context->error_resource = res; |
532 | 532 | ||
533 | return ret; | 533 | return ret; |
534 | } | 534 | } |
535 | 535 | ||
536 | /** | 536 | /** |
537 | * vmw_cmd_res_check - Check that a resource is present and if so, put it | 537 | * vmw_cmd_res_check - Check that a resource is present and if so, put it |
538 | * on the resource validate list unless it's already there. | 538 | * on the resource validate list unless it's already there. |
539 | * | 539 | * |
540 | * @dev_priv: Pointer to a device private structure. | 540 | * @dev_priv: Pointer to a device private structure. |
541 | * @sw_context: Pointer to the software context. | 541 | * @sw_context: Pointer to the software context. |
542 | * @res_type: Resource type. | 542 | * @res_type: Resource type. |
543 | * @converter: User-space visisble type specific information. | 543 | * @converter: User-space visisble type specific information. |
544 | * @id_loc: Pointer to the location in the command buffer currently being | 544 | * @id_loc: Pointer to the location in the command buffer currently being |
545 | * parsed from where the user-space resource id handle is located. | 545 | * parsed from where the user-space resource id handle is located. |
546 | * @p_val: Pointer to pointer to resource validalidation node. Populated | 546 | * @p_val: Pointer to pointer to resource validalidation node. Populated |
547 | * on exit. | 547 | * on exit. |
548 | */ | 548 | */ |
549 | static int | 549 | static int |
550 | vmw_cmd_res_check(struct vmw_private *dev_priv, | 550 | vmw_cmd_res_check(struct vmw_private *dev_priv, |
551 | struct vmw_sw_context *sw_context, | 551 | struct vmw_sw_context *sw_context, |
552 | enum vmw_res_type res_type, | 552 | enum vmw_res_type res_type, |
553 | const struct vmw_user_resource_conv *converter, | 553 | const struct vmw_user_resource_conv *converter, |
554 | uint32_t *id_loc, | 554 | uint32_t *id_loc, |
555 | struct vmw_resource_val_node **p_val) | 555 | struct vmw_resource_val_node **p_val) |
556 | { | 556 | { |
557 | return vmw_cmd_compat_res_check(dev_priv, sw_context, res_type, | 557 | return vmw_cmd_compat_res_check(dev_priv, sw_context, res_type, |
558 | converter, *id_loc, id_loc, p_val); | 558 | converter, *id_loc, id_loc, p_val); |
559 | } | 559 | } |
560 | 560 | ||
561 | /** | 561 | /** |
562 | * vmw_rebind_contexts - Rebind all resources previously bound to | 562 | * vmw_rebind_contexts - Rebind all resources previously bound to |
563 | * referenced contexts. | 563 | * referenced contexts. |
564 | * | 564 | * |
565 | * @sw_context: Pointer to the software context. | 565 | * @sw_context: Pointer to the software context. |
566 | * | 566 | * |
567 | * Rebind context binding points that have been scrubbed because of eviction. | 567 | * Rebind context binding points that have been scrubbed because of eviction. |
568 | */ | 568 | */ |
569 | static int vmw_rebind_contexts(struct vmw_sw_context *sw_context) | 569 | static int vmw_rebind_contexts(struct vmw_sw_context *sw_context) |
570 | { | 570 | { |
571 | struct vmw_resource_val_node *val; | 571 | struct vmw_resource_val_node *val; |
572 | int ret; | 572 | int ret; |
573 | 573 | ||
574 | list_for_each_entry(val, &sw_context->resource_list, head) { | 574 | list_for_each_entry(val, &sw_context->resource_list, head) { |
575 | if (likely(!val->staged_bindings)) | 575 | if (likely(!val->staged_bindings)) |
576 | continue; | 576 | continue; |
577 | 577 | ||
578 | ret = vmw_context_rebind_all(val->res); | 578 | ret = vmw_context_rebind_all(val->res); |
579 | if (unlikely(ret != 0)) { | 579 | if (unlikely(ret != 0)) { |
580 | if (ret != -ERESTARTSYS) | 580 | if (ret != -ERESTARTSYS) |
581 | DRM_ERROR("Failed to rebind context.\n"); | 581 | DRM_ERROR("Failed to rebind context.\n"); |
582 | return ret; | 582 | return ret; |
583 | } | 583 | } |
584 | } | 584 | } |
585 | 585 | ||
586 | return 0; | 586 | return 0; |
587 | } | 587 | } |
588 | 588 | ||
589 | /** | 589 | /** |
590 | * vmw_cmd_cid_check - Check a command header for valid context information. | 590 | * vmw_cmd_cid_check - Check a command header for valid context information. |
591 | * | 591 | * |
592 | * @dev_priv: Pointer to a device private structure. | 592 | * @dev_priv: Pointer to a device private structure. |
593 | * @sw_context: Pointer to the software context. | 593 | * @sw_context: Pointer to the software context. |
594 | * @header: A command header with an embedded user-space context handle. | 594 | * @header: A command header with an embedded user-space context handle. |
595 | * | 595 | * |
596 | * Convenience function: Call vmw_cmd_res_check with the user-space context | 596 | * Convenience function: Call vmw_cmd_res_check with the user-space context |
597 | * handle embedded in @header. | 597 | * handle embedded in @header. |
598 | */ | 598 | */ |
599 | static int vmw_cmd_cid_check(struct vmw_private *dev_priv, | 599 | static int vmw_cmd_cid_check(struct vmw_private *dev_priv, |
600 | struct vmw_sw_context *sw_context, | 600 | struct vmw_sw_context *sw_context, |
601 | SVGA3dCmdHeader *header) | 601 | SVGA3dCmdHeader *header) |
602 | { | 602 | { |
603 | struct vmw_cid_cmd { | 603 | struct vmw_cid_cmd { |
604 | SVGA3dCmdHeader header; | 604 | SVGA3dCmdHeader header; |
605 | uint32_t cid; | 605 | uint32_t cid; |
606 | } *cmd; | 606 | } *cmd; |
607 | 607 | ||
608 | cmd = container_of(header, struct vmw_cid_cmd, header); | 608 | cmd = container_of(header, struct vmw_cid_cmd, header); |
609 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, | 609 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
610 | user_context_converter, &cmd->cid, NULL); | 610 | user_context_converter, &cmd->cid, NULL); |
611 | } | 611 | } |
612 | 612 | ||
613 | static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, | 613 | static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, |
614 | struct vmw_sw_context *sw_context, | 614 | struct vmw_sw_context *sw_context, |
615 | SVGA3dCmdHeader *header) | 615 | SVGA3dCmdHeader *header) |
616 | { | 616 | { |
617 | struct vmw_sid_cmd { | 617 | struct vmw_sid_cmd { |
618 | SVGA3dCmdHeader header; | 618 | SVGA3dCmdHeader header; |
619 | SVGA3dCmdSetRenderTarget body; | 619 | SVGA3dCmdSetRenderTarget body; |
620 | } *cmd; | 620 | } *cmd; |
621 | struct vmw_resource_val_node *ctx_node; | 621 | struct vmw_resource_val_node *ctx_node; |
622 | struct vmw_resource_val_node *res_node; | 622 | struct vmw_resource_val_node *res_node; |
623 | int ret; | 623 | int ret; |
624 | 624 | ||
625 | cmd = container_of(header, struct vmw_sid_cmd, header); | 625 | cmd = container_of(header, struct vmw_sid_cmd, header); |
626 | 626 | ||
627 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, | 627 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
628 | user_context_converter, &cmd->body.cid, | 628 | user_context_converter, &cmd->body.cid, |
629 | &ctx_node); | 629 | &ctx_node); |
630 | if (unlikely(ret != 0)) | 630 | if (unlikely(ret != 0)) |
631 | return ret; | 631 | return ret; |
632 | 632 | ||
633 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 633 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
634 | user_surface_converter, | 634 | user_surface_converter, |
635 | &cmd->body.target.sid, &res_node); | 635 | &cmd->body.target.sid, &res_node); |
636 | if (unlikely(ret != 0)) | 636 | if (unlikely(ret != 0)) |
637 | return ret; | 637 | return ret; |
638 | 638 | ||
639 | if (dev_priv->has_mob) { | 639 | if (dev_priv->has_mob) { |
640 | struct vmw_ctx_bindinfo bi; | 640 | struct vmw_ctx_bindinfo bi; |
641 | 641 | ||
642 | bi.ctx = ctx_node->res; | 642 | bi.ctx = ctx_node->res; |
643 | bi.res = res_node ? res_node->res : NULL; | 643 | bi.res = res_node ? res_node->res : NULL; |
644 | bi.bt = vmw_ctx_binding_rt; | 644 | bi.bt = vmw_ctx_binding_rt; |
645 | bi.i1.rt_type = cmd->body.type; | 645 | bi.i1.rt_type = cmd->body.type; |
646 | return vmw_context_binding_add(ctx_node->staged_bindings, &bi); | 646 | return vmw_context_binding_add(ctx_node->staged_bindings, &bi); |
647 | } | 647 | } |
648 | 648 | ||
649 | return 0; | 649 | return 0; |
650 | } | 650 | } |
651 | 651 | ||
652 | static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv, | 652 | static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv, |
653 | struct vmw_sw_context *sw_context, | 653 | struct vmw_sw_context *sw_context, |
654 | SVGA3dCmdHeader *header) | 654 | SVGA3dCmdHeader *header) |
655 | { | 655 | { |
656 | struct vmw_sid_cmd { | 656 | struct vmw_sid_cmd { |
657 | SVGA3dCmdHeader header; | 657 | SVGA3dCmdHeader header; |
658 | SVGA3dCmdSurfaceCopy body; | 658 | SVGA3dCmdSurfaceCopy body; |
659 | } *cmd; | 659 | } *cmd; |
660 | int ret; | 660 | int ret; |
661 | 661 | ||
662 | cmd = container_of(header, struct vmw_sid_cmd, header); | 662 | cmd = container_of(header, struct vmw_sid_cmd, header); |
663 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 663 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
664 | user_surface_converter, | 664 | user_surface_converter, |
665 | &cmd->body.src.sid, NULL); | 665 | &cmd->body.src.sid, NULL); |
666 | if (unlikely(ret != 0)) | 666 | if (unlikely(ret != 0)) |
667 | return ret; | 667 | return ret; |
668 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 668 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
669 | user_surface_converter, | 669 | user_surface_converter, |
670 | &cmd->body.dest.sid, NULL); | 670 | &cmd->body.dest.sid, NULL); |
671 | } | 671 | } |
672 | 672 | ||
673 | static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv, | 673 | static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv, |
674 | struct vmw_sw_context *sw_context, | 674 | struct vmw_sw_context *sw_context, |
675 | SVGA3dCmdHeader *header) | 675 | SVGA3dCmdHeader *header) |
676 | { | 676 | { |
677 | struct vmw_sid_cmd { | 677 | struct vmw_sid_cmd { |
678 | SVGA3dCmdHeader header; | 678 | SVGA3dCmdHeader header; |
679 | SVGA3dCmdSurfaceStretchBlt body; | 679 | SVGA3dCmdSurfaceStretchBlt body; |
680 | } *cmd; | 680 | } *cmd; |
681 | int ret; | 681 | int ret; |
682 | 682 | ||
683 | cmd = container_of(header, struct vmw_sid_cmd, header); | 683 | cmd = container_of(header, struct vmw_sid_cmd, header); |
684 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 684 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
685 | user_surface_converter, | 685 | user_surface_converter, |
686 | &cmd->body.src.sid, NULL); | 686 | &cmd->body.src.sid, NULL); |
687 | if (unlikely(ret != 0)) | 687 | if (unlikely(ret != 0)) |
688 | return ret; | 688 | return ret; |
689 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 689 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
690 | user_surface_converter, | 690 | user_surface_converter, |
691 | &cmd->body.dest.sid, NULL); | 691 | &cmd->body.dest.sid, NULL); |
692 | } | 692 | } |
693 | 693 | ||
694 | static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv, | 694 | static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv, |
695 | struct vmw_sw_context *sw_context, | 695 | struct vmw_sw_context *sw_context, |
696 | SVGA3dCmdHeader *header) | 696 | SVGA3dCmdHeader *header) |
697 | { | 697 | { |
698 | struct vmw_sid_cmd { | 698 | struct vmw_sid_cmd { |
699 | SVGA3dCmdHeader header; | 699 | SVGA3dCmdHeader header; |
700 | SVGA3dCmdBlitSurfaceToScreen body; | 700 | SVGA3dCmdBlitSurfaceToScreen body; |
701 | } *cmd; | 701 | } *cmd; |
702 | 702 | ||
703 | cmd = container_of(header, struct vmw_sid_cmd, header); | 703 | cmd = container_of(header, struct vmw_sid_cmd, header); |
704 | 704 | ||
705 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 705 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
706 | user_surface_converter, | 706 | user_surface_converter, |
707 | &cmd->body.srcImage.sid, NULL); | 707 | &cmd->body.srcImage.sid, NULL); |
708 | } | 708 | } |
709 | 709 | ||
710 | static int vmw_cmd_present_check(struct vmw_private *dev_priv, | 710 | static int vmw_cmd_present_check(struct vmw_private *dev_priv, |
711 | struct vmw_sw_context *sw_context, | 711 | struct vmw_sw_context *sw_context, |
712 | SVGA3dCmdHeader *header) | 712 | SVGA3dCmdHeader *header) |
713 | { | 713 | { |
714 | struct vmw_sid_cmd { | 714 | struct vmw_sid_cmd { |
715 | SVGA3dCmdHeader header; | 715 | SVGA3dCmdHeader header; |
716 | SVGA3dCmdPresent body; | 716 | SVGA3dCmdPresent body; |
717 | } *cmd; | 717 | } *cmd; |
718 | 718 | ||
719 | 719 | ||
720 | cmd = container_of(header, struct vmw_sid_cmd, header); | 720 | cmd = container_of(header, struct vmw_sid_cmd, header); |
721 | 721 | ||
722 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 722 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
723 | user_surface_converter, &cmd->body.sid, | 723 | user_surface_converter, &cmd->body.sid, |
724 | NULL); | 724 | NULL); |
725 | } | 725 | } |
726 | 726 | ||
727 | /** | 727 | /** |
728 | * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries. | 728 | * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries. |
729 | * | 729 | * |
730 | * @dev_priv: The device private structure. | 730 | * @dev_priv: The device private structure. |
731 | * @new_query_bo: The new buffer holding query results. | 731 | * @new_query_bo: The new buffer holding query results. |
732 | * @sw_context: The software context used for this command submission. | 732 | * @sw_context: The software context used for this command submission. |
733 | * | 733 | * |
734 | * This function checks whether @new_query_bo is suitable for holding | 734 | * This function checks whether @new_query_bo is suitable for holding |
735 | * query results, and if another buffer currently is pinned for query | 735 | * query results, and if another buffer currently is pinned for query |
736 | * results. If so, the function prepares the state of @sw_context for | 736 | * results. If so, the function prepares the state of @sw_context for |
737 | * switching pinned buffers after successful submission of the current | 737 | * switching pinned buffers after successful submission of the current |
738 | * command batch. | 738 | * command batch. |
739 | */ | 739 | */ |
740 | static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, | 740 | static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, |
741 | struct ttm_buffer_object *new_query_bo, | 741 | struct ttm_buffer_object *new_query_bo, |
742 | struct vmw_sw_context *sw_context) | 742 | struct vmw_sw_context *sw_context) |
743 | { | 743 | { |
744 | struct vmw_res_cache_entry *ctx_entry = | 744 | struct vmw_res_cache_entry *ctx_entry = |
745 | &sw_context->res_cache[vmw_res_context]; | 745 | &sw_context->res_cache[vmw_res_context]; |
746 | int ret; | 746 | int ret; |
747 | 747 | ||
748 | BUG_ON(!ctx_entry->valid); | 748 | BUG_ON(!ctx_entry->valid); |
749 | sw_context->last_query_ctx = ctx_entry->res; | 749 | sw_context->last_query_ctx = ctx_entry->res; |
750 | 750 | ||
751 | if (unlikely(new_query_bo != sw_context->cur_query_bo)) { | 751 | if (unlikely(new_query_bo != sw_context->cur_query_bo)) { |
752 | 752 | ||
753 | if (unlikely(new_query_bo->num_pages > 4)) { | 753 | if (unlikely(new_query_bo->num_pages > 4)) { |
754 | DRM_ERROR("Query buffer too large.\n"); | 754 | DRM_ERROR("Query buffer too large.\n"); |
755 | return -EINVAL; | 755 | return -EINVAL; |
756 | } | 756 | } |
757 | 757 | ||
758 | if (unlikely(sw_context->cur_query_bo != NULL)) { | 758 | if (unlikely(sw_context->cur_query_bo != NULL)) { |
759 | sw_context->needs_post_query_barrier = true; | 759 | sw_context->needs_post_query_barrier = true; |
760 | ret = vmw_bo_to_validate_list(sw_context, | 760 | ret = vmw_bo_to_validate_list(sw_context, |
761 | sw_context->cur_query_bo, | 761 | sw_context->cur_query_bo, |
762 | dev_priv->has_mob, NULL); | 762 | dev_priv->has_mob, NULL); |
763 | if (unlikely(ret != 0)) | 763 | if (unlikely(ret != 0)) |
764 | return ret; | 764 | return ret; |
765 | } | 765 | } |
766 | sw_context->cur_query_bo = new_query_bo; | 766 | sw_context->cur_query_bo = new_query_bo; |
767 | 767 | ||
768 | ret = vmw_bo_to_validate_list(sw_context, | 768 | ret = vmw_bo_to_validate_list(sw_context, |
769 | dev_priv->dummy_query_bo, | 769 | dev_priv->dummy_query_bo, |
770 | dev_priv->has_mob, NULL); | 770 | dev_priv->has_mob, NULL); |
771 | if (unlikely(ret != 0)) | 771 | if (unlikely(ret != 0)) |
772 | return ret; | 772 | return ret; |
773 | 773 | ||
774 | } | 774 | } |
775 | 775 | ||
776 | return 0; | 776 | return 0; |
777 | } | 777 | } |
778 | 778 | ||
779 | 779 | ||
780 | /** | 780 | /** |
781 | * vmw_query_bo_switch_commit - Finalize switching pinned query buffer | 781 | * vmw_query_bo_switch_commit - Finalize switching pinned query buffer |
782 | * | 782 | * |
783 | * @dev_priv: The device private structure. | 783 | * @dev_priv: The device private structure. |
784 | * @sw_context: The software context used for this command submission batch. | 784 | * @sw_context: The software context used for this command submission batch. |
785 | * | 785 | * |
786 | * This function will check if we're switching query buffers, and will then, | 786 | * This function will check if we're switching query buffers, and will then, |
787 | * issue a dummy occlusion query wait used as a query barrier. When the fence | 787 | * issue a dummy occlusion query wait used as a query barrier. When the fence |
788 | * object following that query wait has signaled, we are sure that all | 788 | * object following that query wait has signaled, we are sure that all |
789 | * preceding queries have finished, and the old query buffer can be unpinned. | 789 | * preceding queries have finished, and the old query buffer can be unpinned. |
790 | * However, since both the new query buffer and the old one are fenced with | 790 | * However, since both the new query buffer and the old one are fenced with |
791 | * that fence, we can do an asynchronus unpin now, and be sure that the | 791 | * that fence, we can do an asynchronus unpin now, and be sure that the |
792 | * old query buffer won't be moved until the fence has signaled. | 792 | * old query buffer won't be moved until the fence has signaled. |
793 | * | 793 | * |
794 | * As mentioned above, both the new - and old query buffers need to be fenced | 794 | * As mentioned above, both the new - and old query buffers need to be fenced |
795 | * using a sequence emitted *after* calling this function. | 795 | * using a sequence emitted *after* calling this function. |
796 | */ | 796 | */ |
797 | static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, | 797 | static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, |
798 | struct vmw_sw_context *sw_context) | 798 | struct vmw_sw_context *sw_context) |
799 | { | 799 | { |
800 | /* | 800 | /* |
801 | * The validate list should still hold references to all | 801 | * The validate list should still hold references to all |
802 | * contexts here. | 802 | * contexts here. |
803 | */ | 803 | */ |
804 | 804 | ||
805 | if (sw_context->needs_post_query_barrier) { | 805 | if (sw_context->needs_post_query_barrier) { |
806 | struct vmw_res_cache_entry *ctx_entry = | 806 | struct vmw_res_cache_entry *ctx_entry = |
807 | &sw_context->res_cache[vmw_res_context]; | 807 | &sw_context->res_cache[vmw_res_context]; |
808 | struct vmw_resource *ctx; | 808 | struct vmw_resource *ctx; |
809 | int ret; | 809 | int ret; |
810 | 810 | ||
811 | BUG_ON(!ctx_entry->valid); | 811 | BUG_ON(!ctx_entry->valid); |
812 | ctx = ctx_entry->res; | 812 | ctx = ctx_entry->res; |
813 | 813 | ||
814 | ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id); | 814 | ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id); |
815 | 815 | ||
816 | if (unlikely(ret != 0)) | 816 | if (unlikely(ret != 0)) |
817 | DRM_ERROR("Out of fifo space for dummy query.\n"); | 817 | DRM_ERROR("Out of fifo space for dummy query.\n"); |
818 | } | 818 | } |
819 | 819 | ||
820 | if (dev_priv->pinned_bo != sw_context->cur_query_bo) { | 820 | if (dev_priv->pinned_bo != sw_context->cur_query_bo) { |
821 | if (dev_priv->pinned_bo) { | 821 | if (dev_priv->pinned_bo) { |
822 | vmw_bo_pin(dev_priv->pinned_bo, false); | 822 | vmw_bo_pin(dev_priv->pinned_bo, false); |
823 | ttm_bo_unref(&dev_priv->pinned_bo); | 823 | ttm_bo_unref(&dev_priv->pinned_bo); |
824 | } | 824 | } |
825 | 825 | ||
826 | if (!sw_context->needs_post_query_barrier) { | 826 | if (!sw_context->needs_post_query_barrier) { |
827 | vmw_bo_pin(sw_context->cur_query_bo, true); | 827 | vmw_bo_pin(sw_context->cur_query_bo, true); |
828 | 828 | ||
829 | /* | 829 | /* |
830 | * We pin also the dummy_query_bo buffer so that we | 830 | * We pin also the dummy_query_bo buffer so that we |
831 | * don't need to validate it when emitting | 831 | * don't need to validate it when emitting |
832 | * dummy queries in context destroy paths. | 832 | * dummy queries in context destroy paths. |
833 | */ | 833 | */ |
834 | 834 | ||
835 | vmw_bo_pin(dev_priv->dummy_query_bo, true); | 835 | vmw_bo_pin(dev_priv->dummy_query_bo, true); |
836 | dev_priv->dummy_query_bo_pinned = true; | 836 | dev_priv->dummy_query_bo_pinned = true; |
837 | 837 | ||
838 | BUG_ON(sw_context->last_query_ctx == NULL); | 838 | BUG_ON(sw_context->last_query_ctx == NULL); |
839 | dev_priv->query_cid = sw_context->last_query_ctx->id; | 839 | dev_priv->query_cid = sw_context->last_query_ctx->id; |
840 | dev_priv->query_cid_valid = true; | 840 | dev_priv->query_cid_valid = true; |
841 | dev_priv->pinned_bo = | 841 | dev_priv->pinned_bo = |
842 | ttm_bo_reference(sw_context->cur_query_bo); | 842 | ttm_bo_reference(sw_context->cur_query_bo); |
843 | } | 843 | } |
844 | } | 844 | } |
845 | } | 845 | } |
846 | 846 | ||
847 | /** | 847 | /** |
848 | * vmw_translate_mob_pointer - Prepare to translate a user-space buffer | 848 | * vmw_translate_mob_pointer - Prepare to translate a user-space buffer |
849 | * handle to a MOB id. | 849 | * handle to a MOB id. |
850 | * | 850 | * |
851 | * @dev_priv: Pointer to a device private structure. | 851 | * @dev_priv: Pointer to a device private structure. |
852 | * @sw_context: The software context used for this command batch validation. | 852 | * @sw_context: The software context used for this command batch validation. |
853 | * @id: Pointer to the user-space handle to be translated. | 853 | * @id: Pointer to the user-space handle to be translated. |
854 | * @vmw_bo_p: Points to a location that, on successful return will carry | 854 | * @vmw_bo_p: Points to a location that, on successful return will carry |
855 | * a reference-counted pointer to the DMA buffer identified by the | 855 | * a reference-counted pointer to the DMA buffer identified by the |
856 | * user-space handle in @id. | 856 | * user-space handle in @id. |
857 | * | 857 | * |
858 | * This function saves information needed to translate a user-space buffer | 858 | * This function saves information needed to translate a user-space buffer |
859 | * handle to a MOB id. The translation does not take place immediately, but | 859 | * handle to a MOB id. The translation does not take place immediately, but |
860 | * during a call to vmw_apply_relocations(). This function builds a relocation | 860 | * during a call to vmw_apply_relocations(). This function builds a relocation |
861 | * list and a list of buffers to validate. The former needs to be freed using | 861 | * list and a list of buffers to validate. The former needs to be freed using |
862 | * either vmw_apply_relocations() or vmw_free_relocations(). The latter | 862 | * either vmw_apply_relocations() or vmw_free_relocations(). The latter |
863 | * needs to be freed using vmw_clear_validations. | 863 | * needs to be freed using vmw_clear_validations. |
864 | */ | 864 | */ |
865 | static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, | 865 | static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, |
866 | struct vmw_sw_context *sw_context, | 866 | struct vmw_sw_context *sw_context, |
867 | SVGAMobId *id, | 867 | SVGAMobId *id, |
868 | struct vmw_dma_buffer **vmw_bo_p) | 868 | struct vmw_dma_buffer **vmw_bo_p) |
869 | { | 869 | { |
870 | struct vmw_dma_buffer *vmw_bo = NULL; | 870 | struct vmw_dma_buffer *vmw_bo = NULL; |
871 | struct ttm_buffer_object *bo; | 871 | struct ttm_buffer_object *bo; |
872 | uint32_t handle = *id; | 872 | uint32_t handle = *id; |
873 | struct vmw_relocation *reloc; | 873 | struct vmw_relocation *reloc; |
874 | int ret; | 874 | int ret; |
875 | 875 | ||
876 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); | 876 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); |
877 | if (unlikely(ret != 0)) { | 877 | if (unlikely(ret != 0)) { |
878 | DRM_ERROR("Could not find or use MOB buffer.\n"); | 878 | DRM_ERROR("Could not find or use MOB buffer.\n"); |
879 | return -EINVAL; | 879 | return -EINVAL; |
880 | } | 880 | } |
881 | bo = &vmw_bo->base; | 881 | bo = &vmw_bo->base; |
882 | 882 | ||
883 | if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { | 883 | if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { |
884 | DRM_ERROR("Max number relocations per submission" | 884 | DRM_ERROR("Max number relocations per submission" |
885 | " exceeded\n"); | 885 | " exceeded\n"); |
886 | ret = -EINVAL; | 886 | ret = -EINVAL; |
887 | goto out_no_reloc; | 887 | goto out_no_reloc; |
888 | } | 888 | } |
889 | 889 | ||
890 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; | 890 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; |
891 | reloc->mob_loc = id; | 891 | reloc->mob_loc = id; |
892 | reloc->location = NULL; | 892 | reloc->location = NULL; |
893 | 893 | ||
894 | ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index); | 894 | ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index); |
895 | if (unlikely(ret != 0)) | 895 | if (unlikely(ret != 0)) |
896 | goto out_no_reloc; | 896 | goto out_no_reloc; |
897 | 897 | ||
898 | *vmw_bo_p = vmw_bo; | 898 | *vmw_bo_p = vmw_bo; |
899 | return 0; | 899 | return 0; |
900 | 900 | ||
901 | out_no_reloc: | 901 | out_no_reloc: |
902 | vmw_dmabuf_unreference(&vmw_bo); | 902 | vmw_dmabuf_unreference(&vmw_bo); |
903 | vmw_bo_p = NULL; | 903 | vmw_bo_p = NULL; |
904 | return ret; | 904 | return ret; |
905 | } | 905 | } |
906 | 906 | ||
907 | /** | 907 | /** |
908 | * vmw_translate_guest_pointer - Prepare to translate a user-space buffer | 908 | * vmw_translate_guest_pointer - Prepare to translate a user-space buffer |
909 | * handle to a valid SVGAGuestPtr | 909 | * handle to a valid SVGAGuestPtr |
910 | * | 910 | * |
911 | * @dev_priv: Pointer to a device private structure. | 911 | * @dev_priv: Pointer to a device private structure. |
912 | * @sw_context: The software context used for this command batch validation. | 912 | * @sw_context: The software context used for this command batch validation. |
913 | * @ptr: Pointer to the user-space handle to be translated. | 913 | * @ptr: Pointer to the user-space handle to be translated. |
914 | * @vmw_bo_p: Points to a location that, on successful return will carry | 914 | * @vmw_bo_p: Points to a location that, on successful return will carry |
915 | * a reference-counted pointer to the DMA buffer identified by the | 915 | * a reference-counted pointer to the DMA buffer identified by the |
916 | * user-space handle in @id. | 916 | * user-space handle in @id. |
917 | * | 917 | * |
918 | * This function saves information needed to translate a user-space buffer | 918 | * This function saves information needed to translate a user-space buffer |
919 | * handle to a valid SVGAGuestPtr. The translation does not take place | 919 | * handle to a valid SVGAGuestPtr. The translation does not take place |
920 | * immediately, but during a call to vmw_apply_relocations(). | 920 | * immediately, but during a call to vmw_apply_relocations(). |
921 | * This function builds a relocation list and a list of buffers to validate. | 921 | * This function builds a relocation list and a list of buffers to validate. |
922 | * The former needs to be freed using either vmw_apply_relocations() or | 922 | * The former needs to be freed using either vmw_apply_relocations() or |
923 | * vmw_free_relocations(). The latter needs to be freed using | 923 | * vmw_free_relocations(). The latter needs to be freed using |
924 | * vmw_clear_validations. | 924 | * vmw_clear_validations. |
925 | */ | 925 | */ |
926 | static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, | 926 | static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, |
927 | struct vmw_sw_context *sw_context, | 927 | struct vmw_sw_context *sw_context, |
928 | SVGAGuestPtr *ptr, | 928 | SVGAGuestPtr *ptr, |
929 | struct vmw_dma_buffer **vmw_bo_p) | 929 | struct vmw_dma_buffer **vmw_bo_p) |
930 | { | 930 | { |
931 | struct vmw_dma_buffer *vmw_bo = NULL; | 931 | struct vmw_dma_buffer *vmw_bo = NULL; |
932 | struct ttm_buffer_object *bo; | 932 | struct ttm_buffer_object *bo; |
933 | uint32_t handle = ptr->gmrId; | 933 | uint32_t handle = ptr->gmrId; |
934 | struct vmw_relocation *reloc; | 934 | struct vmw_relocation *reloc; |
935 | int ret; | 935 | int ret; |
936 | 936 | ||
937 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); | 937 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); |
938 | if (unlikely(ret != 0)) { | 938 | if (unlikely(ret != 0)) { |
939 | DRM_ERROR("Could not find or use GMR region.\n"); | 939 | DRM_ERROR("Could not find or use GMR region.\n"); |
940 | return -EINVAL; | 940 | return -EINVAL; |
941 | } | 941 | } |
942 | bo = &vmw_bo->base; | 942 | bo = &vmw_bo->base; |
943 | 943 | ||
944 | if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { | 944 | if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { |
945 | DRM_ERROR("Max number relocations per submission" | 945 | DRM_ERROR("Max number relocations per submission" |
946 | " exceeded\n"); | 946 | " exceeded\n"); |
947 | ret = -EINVAL; | 947 | ret = -EINVAL; |
948 | goto out_no_reloc; | 948 | goto out_no_reloc; |
949 | } | 949 | } |
950 | 950 | ||
951 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; | 951 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; |
952 | reloc->location = ptr; | 952 | reloc->location = ptr; |
953 | 953 | ||
954 | ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index); | 954 | ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index); |
955 | if (unlikely(ret != 0)) | 955 | if (unlikely(ret != 0)) |
956 | goto out_no_reloc; | 956 | goto out_no_reloc; |
957 | 957 | ||
958 | *vmw_bo_p = vmw_bo; | 958 | *vmw_bo_p = vmw_bo; |
959 | return 0; | 959 | return 0; |
960 | 960 | ||
961 | out_no_reloc: | 961 | out_no_reloc: |
962 | vmw_dmabuf_unreference(&vmw_bo); | 962 | vmw_dmabuf_unreference(&vmw_bo); |
963 | vmw_bo_p = NULL; | 963 | vmw_bo_p = NULL; |
964 | return ret; | 964 | return ret; |
965 | } | 965 | } |
966 | 966 | ||
967 | /** | 967 | /** |
968 | * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command. | 968 | * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command. |
969 | * | 969 | * |
970 | * @dev_priv: Pointer to a device private struct. | 970 | * @dev_priv: Pointer to a device private struct. |
971 | * @sw_context: The software context used for this command submission. | 971 | * @sw_context: The software context used for this command submission. |
972 | * @header: Pointer to the command header in the command stream. | 972 | * @header: Pointer to the command header in the command stream. |
973 | */ | 973 | */ |
974 | static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv, | 974 | static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv, |
975 | struct vmw_sw_context *sw_context, | 975 | struct vmw_sw_context *sw_context, |
976 | SVGA3dCmdHeader *header) | 976 | SVGA3dCmdHeader *header) |
977 | { | 977 | { |
978 | struct vmw_begin_gb_query_cmd { | 978 | struct vmw_begin_gb_query_cmd { |
979 | SVGA3dCmdHeader header; | 979 | SVGA3dCmdHeader header; |
980 | SVGA3dCmdBeginGBQuery q; | 980 | SVGA3dCmdBeginGBQuery q; |
981 | } *cmd; | 981 | } *cmd; |
982 | 982 | ||
983 | cmd = container_of(header, struct vmw_begin_gb_query_cmd, | 983 | cmd = container_of(header, struct vmw_begin_gb_query_cmd, |
984 | header); | 984 | header); |
985 | 985 | ||
986 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, | 986 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
987 | user_context_converter, &cmd->q.cid, | 987 | user_context_converter, &cmd->q.cid, |
988 | NULL); | 988 | NULL); |
989 | } | 989 | } |
990 | 990 | ||
991 | /** | 991 | /** |
992 | * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command. | 992 | * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command. |
993 | * | 993 | * |
994 | * @dev_priv: Pointer to a device private struct. | 994 | * @dev_priv: Pointer to a device private struct. |
995 | * @sw_context: The software context used for this command submission. | 995 | * @sw_context: The software context used for this command submission. |
996 | * @header: Pointer to the command header in the command stream. | 996 | * @header: Pointer to the command header in the command stream. |
997 | */ | 997 | */ |
998 | static int vmw_cmd_begin_query(struct vmw_private *dev_priv, | 998 | static int vmw_cmd_begin_query(struct vmw_private *dev_priv, |
999 | struct vmw_sw_context *sw_context, | 999 | struct vmw_sw_context *sw_context, |
1000 | SVGA3dCmdHeader *header) | 1000 | SVGA3dCmdHeader *header) |
1001 | { | 1001 | { |
1002 | struct vmw_begin_query_cmd { | 1002 | struct vmw_begin_query_cmd { |
1003 | SVGA3dCmdHeader header; | 1003 | SVGA3dCmdHeader header; |
1004 | SVGA3dCmdBeginQuery q; | 1004 | SVGA3dCmdBeginQuery q; |
1005 | } *cmd; | 1005 | } *cmd; |
1006 | 1006 | ||
1007 | cmd = container_of(header, struct vmw_begin_query_cmd, | 1007 | cmd = container_of(header, struct vmw_begin_query_cmd, |
1008 | header); | 1008 | header); |
1009 | 1009 | ||
1010 | if (unlikely(dev_priv->has_mob)) { | 1010 | if (unlikely(dev_priv->has_mob)) { |
1011 | struct { | 1011 | struct { |
1012 | SVGA3dCmdHeader header; | 1012 | SVGA3dCmdHeader header; |
1013 | SVGA3dCmdBeginGBQuery q; | 1013 | SVGA3dCmdBeginGBQuery q; |
1014 | } gb_cmd; | 1014 | } gb_cmd; |
1015 | 1015 | ||
1016 | BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); | 1016 | BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); |
1017 | 1017 | ||
1018 | gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY; | 1018 | gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY; |
1019 | gb_cmd.header.size = cmd->header.size; | 1019 | gb_cmd.header.size = cmd->header.size; |
1020 | gb_cmd.q.cid = cmd->q.cid; | 1020 | gb_cmd.q.cid = cmd->q.cid; |
1021 | gb_cmd.q.type = cmd->q.type; | 1021 | gb_cmd.q.type = cmd->q.type; |
1022 | 1022 | ||
1023 | memcpy(cmd, &gb_cmd, sizeof(*cmd)); | 1023 | memcpy(cmd, &gb_cmd, sizeof(*cmd)); |
1024 | return vmw_cmd_begin_gb_query(dev_priv, sw_context, header); | 1024 | return vmw_cmd_begin_gb_query(dev_priv, sw_context, header); |
1025 | } | 1025 | } |
1026 | 1026 | ||
1027 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, | 1027 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
1028 | user_context_converter, &cmd->q.cid, | 1028 | user_context_converter, &cmd->q.cid, |
1029 | NULL); | 1029 | NULL); |
1030 | } | 1030 | } |
1031 | 1031 | ||
1032 | /** | 1032 | /** |
1033 | * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command. | 1033 | * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command. |
1034 | * | 1034 | * |
1035 | * @dev_priv: Pointer to a device private struct. | 1035 | * @dev_priv: Pointer to a device private struct. |
1036 | * @sw_context: The software context used for this command submission. | 1036 | * @sw_context: The software context used for this command submission. |
1037 | * @header: Pointer to the command header in the command stream. | 1037 | * @header: Pointer to the command header in the command stream. |
1038 | */ | 1038 | */ |
1039 | static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv, | 1039 | static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv, |
1040 | struct vmw_sw_context *sw_context, | 1040 | struct vmw_sw_context *sw_context, |
1041 | SVGA3dCmdHeader *header) | 1041 | SVGA3dCmdHeader *header) |
1042 | { | 1042 | { |
1043 | struct vmw_dma_buffer *vmw_bo; | 1043 | struct vmw_dma_buffer *vmw_bo; |
1044 | struct vmw_query_cmd { | 1044 | struct vmw_query_cmd { |
1045 | SVGA3dCmdHeader header; | 1045 | SVGA3dCmdHeader header; |
1046 | SVGA3dCmdEndGBQuery q; | 1046 | SVGA3dCmdEndGBQuery q; |
1047 | } *cmd; | 1047 | } *cmd; |
1048 | int ret; | 1048 | int ret; |
1049 | 1049 | ||
1050 | cmd = container_of(header, struct vmw_query_cmd, header); | 1050 | cmd = container_of(header, struct vmw_query_cmd, header); |
1051 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | 1051 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
1052 | if (unlikely(ret != 0)) | 1052 | if (unlikely(ret != 0)) |
1053 | return ret; | 1053 | return ret; |
1054 | 1054 | ||
1055 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, | 1055 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, |
1056 | &cmd->q.mobid, | 1056 | &cmd->q.mobid, |
1057 | &vmw_bo); | 1057 | &vmw_bo); |
1058 | if (unlikely(ret != 0)) | 1058 | if (unlikely(ret != 0)) |
1059 | return ret; | 1059 | return ret; |
1060 | 1060 | ||
1061 | ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context); | 1061 | ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context); |
1062 | 1062 | ||
1063 | vmw_dmabuf_unreference(&vmw_bo); | 1063 | vmw_dmabuf_unreference(&vmw_bo); |
1064 | return ret; | 1064 | return ret; |
1065 | } | 1065 | } |
1066 | 1066 | ||
1067 | /** | 1067 | /** |
1068 | * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command. | 1068 | * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command. |
1069 | * | 1069 | * |
1070 | * @dev_priv: Pointer to a device private struct. | 1070 | * @dev_priv: Pointer to a device private struct. |
1071 | * @sw_context: The software context used for this command submission. | 1071 | * @sw_context: The software context used for this command submission. |
1072 | * @header: Pointer to the command header in the command stream. | 1072 | * @header: Pointer to the command header in the command stream. |
1073 | */ | 1073 | */ |
1074 | static int vmw_cmd_end_query(struct vmw_private *dev_priv, | 1074 | static int vmw_cmd_end_query(struct vmw_private *dev_priv, |
1075 | struct vmw_sw_context *sw_context, | 1075 | struct vmw_sw_context *sw_context, |
1076 | SVGA3dCmdHeader *header) | 1076 | SVGA3dCmdHeader *header) |
1077 | { | 1077 | { |
1078 | struct vmw_dma_buffer *vmw_bo; | 1078 | struct vmw_dma_buffer *vmw_bo; |
1079 | struct vmw_query_cmd { | 1079 | struct vmw_query_cmd { |
1080 | SVGA3dCmdHeader header; | 1080 | SVGA3dCmdHeader header; |
1081 | SVGA3dCmdEndQuery q; | 1081 | SVGA3dCmdEndQuery q; |
1082 | } *cmd; | 1082 | } *cmd; |
1083 | int ret; | 1083 | int ret; |
1084 | 1084 | ||
1085 | cmd = container_of(header, struct vmw_query_cmd, header); | 1085 | cmd = container_of(header, struct vmw_query_cmd, header); |
1086 | if (dev_priv->has_mob) { | 1086 | if (dev_priv->has_mob) { |
1087 | struct { | 1087 | struct { |
1088 | SVGA3dCmdHeader header; | 1088 | SVGA3dCmdHeader header; |
1089 | SVGA3dCmdEndGBQuery q; | 1089 | SVGA3dCmdEndGBQuery q; |
1090 | } gb_cmd; | 1090 | } gb_cmd; |
1091 | 1091 | ||
1092 | BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); | 1092 | BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); |
1093 | 1093 | ||
1094 | gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY; | 1094 | gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY; |
1095 | gb_cmd.header.size = cmd->header.size; | 1095 | gb_cmd.header.size = cmd->header.size; |
1096 | gb_cmd.q.cid = cmd->q.cid; | 1096 | gb_cmd.q.cid = cmd->q.cid; |
1097 | gb_cmd.q.type = cmd->q.type; | 1097 | gb_cmd.q.type = cmd->q.type; |
1098 | gb_cmd.q.mobid = cmd->q.guestResult.gmrId; | 1098 | gb_cmd.q.mobid = cmd->q.guestResult.gmrId; |
1099 | gb_cmd.q.offset = cmd->q.guestResult.offset; | 1099 | gb_cmd.q.offset = cmd->q.guestResult.offset; |
1100 | 1100 | ||
1101 | memcpy(cmd, &gb_cmd, sizeof(*cmd)); | 1101 | memcpy(cmd, &gb_cmd, sizeof(*cmd)); |
1102 | return vmw_cmd_end_gb_query(dev_priv, sw_context, header); | 1102 | return vmw_cmd_end_gb_query(dev_priv, sw_context, header); |
1103 | } | 1103 | } |
1104 | 1104 | ||
1105 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | 1105 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
1106 | if (unlikely(ret != 0)) | 1106 | if (unlikely(ret != 0)) |
1107 | return ret; | 1107 | return ret; |
1108 | 1108 | ||
1109 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, | 1109 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, |
1110 | &cmd->q.guestResult, | 1110 | &cmd->q.guestResult, |
1111 | &vmw_bo); | 1111 | &vmw_bo); |
1112 | if (unlikely(ret != 0)) | 1112 | if (unlikely(ret != 0)) |
1113 | return ret; | 1113 | return ret; |
1114 | 1114 | ||
1115 | ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context); | 1115 | ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context); |
1116 | 1116 | ||
1117 | vmw_dmabuf_unreference(&vmw_bo); | 1117 | vmw_dmabuf_unreference(&vmw_bo); |
1118 | return ret; | 1118 | return ret; |
1119 | } | 1119 | } |
1120 | 1120 | ||
1121 | /** | 1121 | /** |
1122 | * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command. | 1122 | * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command. |
1123 | * | 1123 | * |
1124 | * @dev_priv: Pointer to a device private struct. | 1124 | * @dev_priv: Pointer to a device private struct. |
1125 | * @sw_context: The software context used for this command submission. | 1125 | * @sw_context: The software context used for this command submission. |
1126 | * @header: Pointer to the command header in the command stream. | 1126 | * @header: Pointer to the command header in the command stream. |
1127 | */ | 1127 | */ |
1128 | static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv, | 1128 | static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv, |
1129 | struct vmw_sw_context *sw_context, | 1129 | struct vmw_sw_context *sw_context, |
1130 | SVGA3dCmdHeader *header) | 1130 | SVGA3dCmdHeader *header) |
1131 | { | 1131 | { |
1132 | struct vmw_dma_buffer *vmw_bo; | 1132 | struct vmw_dma_buffer *vmw_bo; |
1133 | struct vmw_query_cmd { | 1133 | struct vmw_query_cmd { |
1134 | SVGA3dCmdHeader header; | 1134 | SVGA3dCmdHeader header; |
1135 | SVGA3dCmdWaitForGBQuery q; | 1135 | SVGA3dCmdWaitForGBQuery q; |
1136 | } *cmd; | 1136 | } *cmd; |
1137 | int ret; | 1137 | int ret; |
1138 | 1138 | ||
1139 | cmd = container_of(header, struct vmw_query_cmd, header); | 1139 | cmd = container_of(header, struct vmw_query_cmd, header); |
1140 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | 1140 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
1141 | if (unlikely(ret != 0)) | 1141 | if (unlikely(ret != 0)) |
1142 | return ret; | 1142 | return ret; |
1143 | 1143 | ||
1144 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, | 1144 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, |
1145 | &cmd->q.mobid, | 1145 | &cmd->q.mobid, |
1146 | &vmw_bo); | 1146 | &vmw_bo); |
1147 | if (unlikely(ret != 0)) | 1147 | if (unlikely(ret != 0)) |
1148 | return ret; | 1148 | return ret; |
1149 | 1149 | ||
1150 | vmw_dmabuf_unreference(&vmw_bo); | 1150 | vmw_dmabuf_unreference(&vmw_bo); |
1151 | return 0; | 1151 | return 0; |
1152 | } | 1152 | } |
1153 | 1153 | ||
1154 | /** | 1154 | /** |
1155 | * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command. | 1155 | * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command. |
1156 | * | 1156 | * |
1157 | * @dev_priv: Pointer to a device private struct. | 1157 | * @dev_priv: Pointer to a device private struct. |
1158 | * @sw_context: The software context used for this command submission. | 1158 | * @sw_context: The software context used for this command submission. |
1159 | * @header: Pointer to the command header in the command stream. | 1159 | * @header: Pointer to the command header in the command stream. |
1160 | */ | 1160 | */ |
1161 | static int vmw_cmd_wait_query(struct vmw_private *dev_priv, | 1161 | static int vmw_cmd_wait_query(struct vmw_private *dev_priv, |
1162 | struct vmw_sw_context *sw_context, | 1162 | struct vmw_sw_context *sw_context, |
1163 | SVGA3dCmdHeader *header) | 1163 | SVGA3dCmdHeader *header) |
1164 | { | 1164 | { |
1165 | struct vmw_dma_buffer *vmw_bo; | 1165 | struct vmw_dma_buffer *vmw_bo; |
1166 | struct vmw_query_cmd { | 1166 | struct vmw_query_cmd { |
1167 | SVGA3dCmdHeader header; | 1167 | SVGA3dCmdHeader header; |
1168 | SVGA3dCmdWaitForQuery q; | 1168 | SVGA3dCmdWaitForQuery q; |
1169 | } *cmd; | 1169 | } *cmd; |
1170 | int ret; | 1170 | int ret; |
1171 | 1171 | ||
1172 | cmd = container_of(header, struct vmw_query_cmd, header); | 1172 | cmd = container_of(header, struct vmw_query_cmd, header); |
1173 | if (dev_priv->has_mob) { | 1173 | if (dev_priv->has_mob) { |
1174 | struct { | 1174 | struct { |
1175 | SVGA3dCmdHeader header; | 1175 | SVGA3dCmdHeader header; |
1176 | SVGA3dCmdWaitForGBQuery q; | 1176 | SVGA3dCmdWaitForGBQuery q; |
1177 | } gb_cmd; | 1177 | } gb_cmd; |
1178 | 1178 | ||
1179 | BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); | 1179 | BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); |
1180 | 1180 | ||
1181 | gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY; | 1181 | gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY; |
1182 | gb_cmd.header.size = cmd->header.size; | 1182 | gb_cmd.header.size = cmd->header.size; |
1183 | gb_cmd.q.cid = cmd->q.cid; | 1183 | gb_cmd.q.cid = cmd->q.cid; |
1184 | gb_cmd.q.type = cmd->q.type; | 1184 | gb_cmd.q.type = cmd->q.type; |
1185 | gb_cmd.q.mobid = cmd->q.guestResult.gmrId; | 1185 | gb_cmd.q.mobid = cmd->q.guestResult.gmrId; |
1186 | gb_cmd.q.offset = cmd->q.guestResult.offset; | 1186 | gb_cmd.q.offset = cmd->q.guestResult.offset; |
1187 | 1187 | ||
1188 | memcpy(cmd, &gb_cmd, sizeof(*cmd)); | 1188 | memcpy(cmd, &gb_cmd, sizeof(*cmd)); |
1189 | return vmw_cmd_wait_gb_query(dev_priv, sw_context, header); | 1189 | return vmw_cmd_wait_gb_query(dev_priv, sw_context, header); |
1190 | } | 1190 | } |
1191 | 1191 | ||
1192 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | 1192 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
1193 | if (unlikely(ret != 0)) | 1193 | if (unlikely(ret != 0)) |
1194 | return ret; | 1194 | return ret; |
1195 | 1195 | ||
1196 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, | 1196 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, |
1197 | &cmd->q.guestResult, | 1197 | &cmd->q.guestResult, |
1198 | &vmw_bo); | 1198 | &vmw_bo); |
1199 | if (unlikely(ret != 0)) | 1199 | if (unlikely(ret != 0)) |
1200 | return ret; | 1200 | return ret; |
1201 | 1201 | ||
1202 | vmw_dmabuf_unreference(&vmw_bo); | 1202 | vmw_dmabuf_unreference(&vmw_bo); |
1203 | return 0; | 1203 | return 0; |
1204 | } | 1204 | } |
1205 | 1205 | ||
1206 | static int vmw_cmd_dma(struct vmw_private *dev_priv, | 1206 | static int vmw_cmd_dma(struct vmw_private *dev_priv, |
1207 | struct vmw_sw_context *sw_context, | 1207 | struct vmw_sw_context *sw_context, |
1208 | SVGA3dCmdHeader *header) | 1208 | SVGA3dCmdHeader *header) |
1209 | { | 1209 | { |
1210 | struct vmw_dma_buffer *vmw_bo = NULL; | 1210 | struct vmw_dma_buffer *vmw_bo = NULL; |
1211 | struct vmw_surface *srf = NULL; | 1211 | struct vmw_surface *srf = NULL; |
1212 | struct vmw_dma_cmd { | 1212 | struct vmw_dma_cmd { |
1213 | SVGA3dCmdHeader header; | 1213 | SVGA3dCmdHeader header; |
1214 | SVGA3dCmdSurfaceDMA dma; | 1214 | SVGA3dCmdSurfaceDMA dma; |
1215 | } *cmd; | 1215 | } *cmd; |
1216 | int ret; | 1216 | int ret; |
1217 | SVGA3dCmdSurfaceDMASuffix *suffix; | ||
1218 | uint32_t bo_size; | ||
1217 | 1219 | ||
1218 | cmd = container_of(header, struct vmw_dma_cmd, header); | 1220 | cmd = container_of(header, struct vmw_dma_cmd, header); |
1221 | suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma + | ||
1222 | header->size - sizeof(*suffix)); | ||
1223 | |||
1224 | /* Make sure device and verifier stays in sync. */ | ||
1225 | if (unlikely(suffix->suffixSize != sizeof(*suffix))) { | ||
1226 | DRM_ERROR("Invalid DMA suffix size.\n"); | ||
1227 | return -EINVAL; | ||
1228 | } | ||
1229 | |||
1219 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, | 1230 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, |
1220 | &cmd->dma.guest.ptr, | 1231 | &cmd->dma.guest.ptr, |
1221 | &vmw_bo); | 1232 | &vmw_bo); |
1222 | if (unlikely(ret != 0)) | 1233 | if (unlikely(ret != 0)) |
1223 | return ret; | 1234 | return ret; |
1235 | |||
1236 | /* Make sure DMA doesn't cross BO boundaries. */ | ||
1237 | bo_size = vmw_bo->base.num_pages * PAGE_SIZE; | ||
1238 | if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) { | ||
1239 | DRM_ERROR("Invalid DMA offset.\n"); | ||
1240 | return -EINVAL; | ||
1241 | } | ||
1242 | |||
1243 | bo_size -= cmd->dma.guest.ptr.offset; | ||
1244 | if (unlikely(suffix->maximumOffset > bo_size)) | ||
1245 | suffix->maximumOffset = bo_size; | ||
1224 | 1246 | ||
1225 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 1247 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1226 | user_surface_converter, &cmd->dma.host.sid, | 1248 | user_surface_converter, &cmd->dma.host.sid, |
1227 | NULL); | 1249 | NULL); |
1228 | if (unlikely(ret != 0)) { | 1250 | if (unlikely(ret != 0)) { |
1229 | if (unlikely(ret != -ERESTARTSYS)) | 1251 | if (unlikely(ret != -ERESTARTSYS)) |
1230 | DRM_ERROR("could not find surface for DMA.\n"); | 1252 | DRM_ERROR("could not find surface for DMA.\n"); |
1231 | goto out_no_surface; | 1253 | goto out_no_surface; |
1232 | } | 1254 | } |
1233 | 1255 | ||
1234 | srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res); | 1256 | srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res); |
1235 | 1257 | ||
1236 | vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, | 1258 | vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, |
1237 | header); | 1259 | header); |
1238 | 1260 | ||
1239 | out_no_surface: | 1261 | out_no_surface: |
1240 | vmw_dmabuf_unreference(&vmw_bo); | 1262 | vmw_dmabuf_unreference(&vmw_bo); |
1241 | return ret; | 1263 | return ret; |
1242 | } | 1264 | } |
1243 | 1265 | ||
1244 | static int vmw_cmd_draw(struct vmw_private *dev_priv, | 1266 | static int vmw_cmd_draw(struct vmw_private *dev_priv, |
1245 | struct vmw_sw_context *sw_context, | 1267 | struct vmw_sw_context *sw_context, |
1246 | SVGA3dCmdHeader *header) | 1268 | SVGA3dCmdHeader *header) |
1247 | { | 1269 | { |
1248 | struct vmw_draw_cmd { | 1270 | struct vmw_draw_cmd { |
1249 | SVGA3dCmdHeader header; | 1271 | SVGA3dCmdHeader header; |
1250 | SVGA3dCmdDrawPrimitives body; | 1272 | SVGA3dCmdDrawPrimitives body; |
1251 | } *cmd; | 1273 | } *cmd; |
1252 | SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)( | 1274 | SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)( |
1253 | (unsigned long)header + sizeof(*cmd)); | 1275 | (unsigned long)header + sizeof(*cmd)); |
1254 | SVGA3dPrimitiveRange *range; | 1276 | SVGA3dPrimitiveRange *range; |
1255 | uint32_t i; | 1277 | uint32_t i; |
1256 | uint32_t maxnum; | 1278 | uint32_t maxnum; |
1257 | int ret; | 1279 | int ret; |
1258 | 1280 | ||
1259 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | 1281 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
1260 | if (unlikely(ret != 0)) | 1282 | if (unlikely(ret != 0)) |
1261 | return ret; | 1283 | return ret; |
1262 | 1284 | ||
1263 | cmd = container_of(header, struct vmw_draw_cmd, header); | 1285 | cmd = container_of(header, struct vmw_draw_cmd, header); |
1264 | maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl); | 1286 | maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl); |
1265 | 1287 | ||
1266 | if (unlikely(cmd->body.numVertexDecls > maxnum)) { | 1288 | if (unlikely(cmd->body.numVertexDecls > maxnum)) { |
1267 | DRM_ERROR("Illegal number of vertex declarations.\n"); | 1289 | DRM_ERROR("Illegal number of vertex declarations.\n"); |
1268 | return -EINVAL; | 1290 | return -EINVAL; |
1269 | } | 1291 | } |
1270 | 1292 | ||
1271 | for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) { | 1293 | for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) { |
1272 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 1294 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1273 | user_surface_converter, | 1295 | user_surface_converter, |
1274 | &decl->array.surfaceId, NULL); | 1296 | &decl->array.surfaceId, NULL); |
1275 | if (unlikely(ret != 0)) | 1297 | if (unlikely(ret != 0)) |
1276 | return ret; | 1298 | return ret; |
1277 | } | 1299 | } |
1278 | 1300 | ||
1279 | maxnum = (header->size - sizeof(cmd->body) - | 1301 | maxnum = (header->size - sizeof(cmd->body) - |
1280 | cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range); | 1302 | cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range); |
1281 | if (unlikely(cmd->body.numRanges > maxnum)) { | 1303 | if (unlikely(cmd->body.numRanges > maxnum)) { |
1282 | DRM_ERROR("Illegal number of index ranges.\n"); | 1304 | DRM_ERROR("Illegal number of index ranges.\n"); |
1283 | return -EINVAL; | 1305 | return -EINVAL; |
1284 | } | 1306 | } |
1285 | 1307 | ||
1286 | range = (SVGA3dPrimitiveRange *) decl; | 1308 | range = (SVGA3dPrimitiveRange *) decl; |
1287 | for (i = 0; i < cmd->body.numRanges; ++i, ++range) { | 1309 | for (i = 0; i < cmd->body.numRanges; ++i, ++range) { |
1288 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 1310 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1289 | user_surface_converter, | 1311 | user_surface_converter, |
1290 | &range->indexArray.surfaceId, NULL); | 1312 | &range->indexArray.surfaceId, NULL); |
1291 | if (unlikely(ret != 0)) | 1313 | if (unlikely(ret != 0)) |
1292 | return ret; | 1314 | return ret; |
1293 | } | 1315 | } |
1294 | return 0; | 1316 | return 0; |
1295 | } | 1317 | } |
1296 | 1318 | ||
1297 | 1319 | ||
1298 | static int vmw_cmd_tex_state(struct vmw_private *dev_priv, | 1320 | static int vmw_cmd_tex_state(struct vmw_private *dev_priv, |
1299 | struct vmw_sw_context *sw_context, | 1321 | struct vmw_sw_context *sw_context, |
1300 | SVGA3dCmdHeader *header) | 1322 | SVGA3dCmdHeader *header) |
1301 | { | 1323 | { |
1302 | struct vmw_tex_state_cmd { | 1324 | struct vmw_tex_state_cmd { |
1303 | SVGA3dCmdHeader header; | 1325 | SVGA3dCmdHeader header; |
1304 | SVGA3dCmdSetTextureState state; | 1326 | SVGA3dCmdSetTextureState state; |
1305 | } *cmd; | 1327 | } *cmd; |
1306 | 1328 | ||
1307 | SVGA3dTextureState *last_state = (SVGA3dTextureState *) | 1329 | SVGA3dTextureState *last_state = (SVGA3dTextureState *) |
1308 | ((unsigned long) header + header->size + sizeof(header)); | 1330 | ((unsigned long) header + header->size + sizeof(header)); |
1309 | SVGA3dTextureState *cur_state = (SVGA3dTextureState *) | 1331 | SVGA3dTextureState *cur_state = (SVGA3dTextureState *) |
1310 | ((unsigned long) header + sizeof(struct vmw_tex_state_cmd)); | 1332 | ((unsigned long) header + sizeof(struct vmw_tex_state_cmd)); |
1311 | struct vmw_resource_val_node *ctx_node; | 1333 | struct vmw_resource_val_node *ctx_node; |
1312 | struct vmw_resource_val_node *res_node; | 1334 | struct vmw_resource_val_node *res_node; |
1313 | int ret; | 1335 | int ret; |
1314 | 1336 | ||
1315 | cmd = container_of(header, struct vmw_tex_state_cmd, | 1337 | cmd = container_of(header, struct vmw_tex_state_cmd, |
1316 | header); | 1338 | header); |
1317 | 1339 | ||
1318 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, | 1340 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
1319 | user_context_converter, &cmd->state.cid, | 1341 | user_context_converter, &cmd->state.cid, |
1320 | &ctx_node); | 1342 | &ctx_node); |
1321 | if (unlikely(ret != 0)) | 1343 | if (unlikely(ret != 0)) |
1322 | return ret; | 1344 | return ret; |
1323 | 1345 | ||
1324 | for (; cur_state < last_state; ++cur_state) { | 1346 | for (; cur_state < last_state; ++cur_state) { |
1325 | if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE)) | 1347 | if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE)) |
1326 | continue; | 1348 | continue; |
1327 | 1349 | ||
1328 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 1350 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1329 | user_surface_converter, | 1351 | user_surface_converter, |
1330 | &cur_state->value, &res_node); | 1352 | &cur_state->value, &res_node); |
1331 | if (unlikely(ret != 0)) | 1353 | if (unlikely(ret != 0)) |
1332 | return ret; | 1354 | return ret; |
1333 | 1355 | ||
1334 | if (dev_priv->has_mob) { | 1356 | if (dev_priv->has_mob) { |
1335 | struct vmw_ctx_bindinfo bi; | 1357 | struct vmw_ctx_bindinfo bi; |
1336 | 1358 | ||
1337 | bi.ctx = ctx_node->res; | 1359 | bi.ctx = ctx_node->res; |
1338 | bi.res = res_node ? res_node->res : NULL; | 1360 | bi.res = res_node ? res_node->res : NULL; |
1339 | bi.bt = vmw_ctx_binding_tex; | 1361 | bi.bt = vmw_ctx_binding_tex; |
1340 | bi.i1.texture_stage = cur_state->stage; | 1362 | bi.i1.texture_stage = cur_state->stage; |
1341 | vmw_context_binding_add(ctx_node->staged_bindings, | 1363 | vmw_context_binding_add(ctx_node->staged_bindings, |
1342 | &bi); | 1364 | &bi); |
1343 | } | 1365 | } |
1344 | } | 1366 | } |
1345 | 1367 | ||
1346 | return 0; | 1368 | return 0; |
1347 | } | 1369 | } |
1348 | 1370 | ||
1349 | static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, | 1371 | static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, |
1350 | struct vmw_sw_context *sw_context, | 1372 | struct vmw_sw_context *sw_context, |
1351 | void *buf) | 1373 | void *buf) |
1352 | { | 1374 | { |
1353 | struct vmw_dma_buffer *vmw_bo; | 1375 | struct vmw_dma_buffer *vmw_bo; |
1354 | int ret; | 1376 | int ret; |
1355 | 1377 | ||
1356 | struct { | 1378 | struct { |
1357 | uint32_t header; | 1379 | uint32_t header; |
1358 | SVGAFifoCmdDefineGMRFB body; | 1380 | SVGAFifoCmdDefineGMRFB body; |
1359 | } *cmd = buf; | 1381 | } *cmd = buf; |
1360 | 1382 | ||
1361 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, | 1383 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, |
1362 | &cmd->body.ptr, | 1384 | &cmd->body.ptr, |
1363 | &vmw_bo); | 1385 | &vmw_bo); |
1364 | if (unlikely(ret != 0)) | 1386 | if (unlikely(ret != 0)) |
1365 | return ret; | 1387 | return ret; |
1366 | 1388 | ||
1367 | vmw_dmabuf_unreference(&vmw_bo); | 1389 | vmw_dmabuf_unreference(&vmw_bo); |
1368 | 1390 | ||
1369 | return ret; | 1391 | return ret; |
1370 | } | 1392 | } |
1371 | 1393 | ||
1372 | /** | 1394 | /** |
1373 | * vmw_cmd_switch_backup - Utility function to handle backup buffer switching | 1395 | * vmw_cmd_switch_backup - Utility function to handle backup buffer switching |
1374 | * | 1396 | * |
1375 | * @dev_priv: Pointer to a device private struct. | 1397 | * @dev_priv: Pointer to a device private struct. |
1376 | * @sw_context: The software context being used for this batch. | 1398 | * @sw_context: The software context being used for this batch. |
1377 | * @res_type: The resource type. | 1399 | * @res_type: The resource type. |
1378 | * @converter: Information about user-space binding for this resource type. | 1400 | * @converter: Information about user-space binding for this resource type. |
1379 | * @res_id: Pointer to the user-space resource handle in the command stream. | 1401 | * @res_id: Pointer to the user-space resource handle in the command stream. |
1380 | * @buf_id: Pointer to the user-space backup buffer handle in the command | 1402 | * @buf_id: Pointer to the user-space backup buffer handle in the command |
1381 | * stream. | 1403 | * stream. |
1382 | * @backup_offset: Offset of backup into MOB. | 1404 | * @backup_offset: Offset of backup into MOB. |
1383 | * | 1405 | * |
1384 | * This function prepares for registering a switch of backup buffers | 1406 | * This function prepares for registering a switch of backup buffers |
1385 | * in the resource metadata just prior to unreserving. | 1407 | * in the resource metadata just prior to unreserving. |
1386 | */ | 1408 | */ |
1387 | static int vmw_cmd_switch_backup(struct vmw_private *dev_priv, | 1409 | static int vmw_cmd_switch_backup(struct vmw_private *dev_priv, |
1388 | struct vmw_sw_context *sw_context, | 1410 | struct vmw_sw_context *sw_context, |
1389 | enum vmw_res_type res_type, | 1411 | enum vmw_res_type res_type, |
1390 | const struct vmw_user_resource_conv | 1412 | const struct vmw_user_resource_conv |
1391 | *converter, | 1413 | *converter, |
1392 | uint32_t *res_id, | 1414 | uint32_t *res_id, |
1393 | uint32_t *buf_id, | 1415 | uint32_t *buf_id, |
1394 | unsigned long backup_offset) | 1416 | unsigned long backup_offset) |
1395 | { | 1417 | { |
1396 | int ret; | 1418 | int ret; |
1397 | struct vmw_dma_buffer *dma_buf; | 1419 | struct vmw_dma_buffer *dma_buf; |
1398 | struct vmw_resource_val_node *val_node; | 1420 | struct vmw_resource_val_node *val_node; |
1399 | 1421 | ||
1400 | ret = vmw_cmd_res_check(dev_priv, sw_context, res_type, | 1422 | ret = vmw_cmd_res_check(dev_priv, sw_context, res_type, |
1401 | converter, res_id, &val_node); | 1423 | converter, res_id, &val_node); |
1402 | if (unlikely(ret != 0)) | 1424 | if (unlikely(ret != 0)) |
1403 | return ret; | 1425 | return ret; |
1404 | 1426 | ||
1405 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf); | 1427 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf); |
1406 | if (unlikely(ret != 0)) | 1428 | if (unlikely(ret != 0)) |
1407 | return ret; | 1429 | return ret; |
1408 | 1430 | ||
1409 | if (val_node->first_usage) | 1431 | if (val_node->first_usage) |
1410 | val_node->no_buffer_needed = true; | 1432 | val_node->no_buffer_needed = true; |
1411 | 1433 | ||
1412 | vmw_dmabuf_unreference(&val_node->new_backup); | 1434 | vmw_dmabuf_unreference(&val_node->new_backup); |
1413 | val_node->new_backup = dma_buf; | 1435 | val_node->new_backup = dma_buf; |
1414 | val_node->new_backup_offset = backup_offset; | 1436 | val_node->new_backup_offset = backup_offset; |
1415 | 1437 | ||
1416 | return 0; | 1438 | return 0; |
1417 | } | 1439 | } |
1418 | 1440 | ||
1419 | /** | 1441 | /** |
1420 | * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE | 1442 | * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE |
1421 | * command | 1443 | * command |
1422 | * | 1444 | * |
1423 | * @dev_priv: Pointer to a device private struct. | 1445 | * @dev_priv: Pointer to a device private struct. |
1424 | * @sw_context: The software context being used for this batch. | 1446 | * @sw_context: The software context being used for this batch. |
1425 | * @header: Pointer to the command header in the command stream. | 1447 | * @header: Pointer to the command header in the command stream. |
1426 | */ | 1448 | */ |
1427 | static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv, | 1449 | static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv, |
1428 | struct vmw_sw_context *sw_context, | 1450 | struct vmw_sw_context *sw_context, |
1429 | SVGA3dCmdHeader *header) | 1451 | SVGA3dCmdHeader *header) |
1430 | { | 1452 | { |
1431 | struct vmw_bind_gb_surface_cmd { | 1453 | struct vmw_bind_gb_surface_cmd { |
1432 | SVGA3dCmdHeader header; | 1454 | SVGA3dCmdHeader header; |
1433 | SVGA3dCmdBindGBSurface body; | 1455 | SVGA3dCmdBindGBSurface body; |
1434 | } *cmd; | 1456 | } *cmd; |
1435 | 1457 | ||
1436 | cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header); | 1458 | cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header); |
1437 | 1459 | ||
1438 | return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface, | 1460 | return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface, |
1439 | user_surface_converter, | 1461 | user_surface_converter, |
1440 | &cmd->body.sid, &cmd->body.mobid, | 1462 | &cmd->body.sid, &cmd->body.mobid, |
1441 | 0); | 1463 | 0); |
1442 | } | 1464 | } |
1443 | 1465 | ||
1444 | /** | 1466 | /** |
1445 | * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE | 1467 | * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE |
1446 | * command | 1468 | * command |
1447 | * | 1469 | * |
1448 | * @dev_priv: Pointer to a device private struct. | 1470 | * @dev_priv: Pointer to a device private struct. |
1449 | * @sw_context: The software context being used for this batch. | 1471 | * @sw_context: The software context being used for this batch. |
1450 | * @header: Pointer to the command header in the command stream. | 1472 | * @header: Pointer to the command header in the command stream. |
1451 | */ | 1473 | */ |
1452 | static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv, | 1474 | static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv, |
1453 | struct vmw_sw_context *sw_context, | 1475 | struct vmw_sw_context *sw_context, |
1454 | SVGA3dCmdHeader *header) | 1476 | SVGA3dCmdHeader *header) |
1455 | { | 1477 | { |
1456 | struct vmw_gb_surface_cmd { | 1478 | struct vmw_gb_surface_cmd { |
1457 | SVGA3dCmdHeader header; | 1479 | SVGA3dCmdHeader header; |
1458 | SVGA3dCmdUpdateGBImage body; | 1480 | SVGA3dCmdUpdateGBImage body; |
1459 | } *cmd; | 1481 | } *cmd; |
1460 | 1482 | ||
1461 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); | 1483 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
1462 | 1484 | ||
1463 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 1485 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1464 | user_surface_converter, | 1486 | user_surface_converter, |
1465 | &cmd->body.image.sid, NULL); | 1487 | &cmd->body.image.sid, NULL); |
1466 | } | 1488 | } |
1467 | 1489 | ||
1468 | /** | 1490 | /** |
1469 | * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE | 1491 | * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE |
1470 | * command | 1492 | * command |
1471 | * | 1493 | * |
1472 | * @dev_priv: Pointer to a device private struct. | 1494 | * @dev_priv: Pointer to a device private struct. |
1473 | * @sw_context: The software context being used for this batch. | 1495 | * @sw_context: The software context being used for this batch. |
1474 | * @header: Pointer to the command header in the command stream. | 1496 | * @header: Pointer to the command header in the command stream. |
1475 | */ | 1497 | */ |
1476 | static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv, | 1498 | static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv, |
1477 | struct vmw_sw_context *sw_context, | 1499 | struct vmw_sw_context *sw_context, |
1478 | SVGA3dCmdHeader *header) | 1500 | SVGA3dCmdHeader *header) |
1479 | { | 1501 | { |
1480 | struct vmw_gb_surface_cmd { | 1502 | struct vmw_gb_surface_cmd { |
1481 | SVGA3dCmdHeader header; | 1503 | SVGA3dCmdHeader header; |
1482 | SVGA3dCmdUpdateGBSurface body; | 1504 | SVGA3dCmdUpdateGBSurface body; |
1483 | } *cmd; | 1505 | } *cmd; |
1484 | 1506 | ||
1485 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); | 1507 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
1486 | 1508 | ||
1487 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 1509 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1488 | user_surface_converter, | 1510 | user_surface_converter, |
1489 | &cmd->body.sid, NULL); | 1511 | &cmd->body.sid, NULL); |
1490 | } | 1512 | } |
1491 | 1513 | ||
1492 | /** | 1514 | /** |
1493 | * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE | 1515 | * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE |
1494 | * command | 1516 | * command |
1495 | * | 1517 | * |
1496 | * @dev_priv: Pointer to a device private struct. | 1518 | * @dev_priv: Pointer to a device private struct. |
1497 | * @sw_context: The software context being used for this batch. | 1519 | * @sw_context: The software context being used for this batch. |
1498 | * @header: Pointer to the command header in the command stream. | 1520 | * @header: Pointer to the command header in the command stream. |
1499 | */ | 1521 | */ |
1500 | static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv, | 1522 | static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv, |
1501 | struct vmw_sw_context *sw_context, | 1523 | struct vmw_sw_context *sw_context, |
1502 | SVGA3dCmdHeader *header) | 1524 | SVGA3dCmdHeader *header) |
1503 | { | 1525 | { |
1504 | struct vmw_gb_surface_cmd { | 1526 | struct vmw_gb_surface_cmd { |
1505 | SVGA3dCmdHeader header; | 1527 | SVGA3dCmdHeader header; |
1506 | SVGA3dCmdReadbackGBImage body; | 1528 | SVGA3dCmdReadbackGBImage body; |
1507 | } *cmd; | 1529 | } *cmd; |
1508 | 1530 | ||
1509 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); | 1531 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
1510 | 1532 | ||
1511 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 1533 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1512 | user_surface_converter, | 1534 | user_surface_converter, |
1513 | &cmd->body.image.sid, NULL); | 1535 | &cmd->body.image.sid, NULL); |
1514 | } | 1536 | } |
1515 | 1537 | ||
1516 | /** | 1538 | /** |
1517 | * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE | 1539 | * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE |
1518 | * command | 1540 | * command |
1519 | * | 1541 | * |
1520 | * @dev_priv: Pointer to a device private struct. | 1542 | * @dev_priv: Pointer to a device private struct. |
1521 | * @sw_context: The software context being used for this batch. | 1543 | * @sw_context: The software context being used for this batch. |
1522 | * @header: Pointer to the command header in the command stream. | 1544 | * @header: Pointer to the command header in the command stream. |
1523 | */ | 1545 | */ |
1524 | static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv, | 1546 | static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv, |
1525 | struct vmw_sw_context *sw_context, | 1547 | struct vmw_sw_context *sw_context, |
1526 | SVGA3dCmdHeader *header) | 1548 | SVGA3dCmdHeader *header) |
1527 | { | 1549 | { |
1528 | struct vmw_gb_surface_cmd { | 1550 | struct vmw_gb_surface_cmd { |
1529 | SVGA3dCmdHeader header; | 1551 | SVGA3dCmdHeader header; |
1530 | SVGA3dCmdReadbackGBSurface body; | 1552 | SVGA3dCmdReadbackGBSurface body; |
1531 | } *cmd; | 1553 | } *cmd; |
1532 | 1554 | ||
1533 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); | 1555 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
1534 | 1556 | ||
1535 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 1557 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1536 | user_surface_converter, | 1558 | user_surface_converter, |
1537 | &cmd->body.sid, NULL); | 1559 | &cmd->body.sid, NULL); |
1538 | } | 1560 | } |
1539 | 1561 | ||
1540 | /** | 1562 | /** |
1541 | * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE | 1563 | * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE |
1542 | * command | 1564 | * command |
1543 | * | 1565 | * |
1544 | * @dev_priv: Pointer to a device private struct. | 1566 | * @dev_priv: Pointer to a device private struct. |
1545 | * @sw_context: The software context being used for this batch. | 1567 | * @sw_context: The software context being used for this batch. |
1546 | * @header: Pointer to the command header in the command stream. | 1568 | * @header: Pointer to the command header in the command stream. |
1547 | */ | 1569 | */ |
1548 | static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv, | 1570 | static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv, |
1549 | struct vmw_sw_context *sw_context, | 1571 | struct vmw_sw_context *sw_context, |
1550 | SVGA3dCmdHeader *header) | 1572 | SVGA3dCmdHeader *header) |
1551 | { | 1573 | { |
1552 | struct vmw_gb_surface_cmd { | 1574 | struct vmw_gb_surface_cmd { |
1553 | SVGA3dCmdHeader header; | 1575 | SVGA3dCmdHeader header; |
1554 | SVGA3dCmdInvalidateGBImage body; | 1576 | SVGA3dCmdInvalidateGBImage body; |
1555 | } *cmd; | 1577 | } *cmd; |
1556 | 1578 | ||
1557 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); | 1579 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
1558 | 1580 | ||
1559 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 1581 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1560 | user_surface_converter, | 1582 | user_surface_converter, |
1561 | &cmd->body.image.sid, NULL); | 1583 | &cmd->body.image.sid, NULL); |
1562 | } | 1584 | } |
1563 | 1585 | ||
1564 | /** | 1586 | /** |
1565 | * vmw_cmd_invalidate_gb_surface - Validate an | 1587 | * vmw_cmd_invalidate_gb_surface - Validate an |
1566 | * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command | 1588 | * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command |
1567 | * | 1589 | * |
1568 | * @dev_priv: Pointer to a device private struct. | 1590 | * @dev_priv: Pointer to a device private struct. |
1569 | * @sw_context: The software context being used for this batch. | 1591 | * @sw_context: The software context being used for this batch. |
1570 | * @header: Pointer to the command header in the command stream. | 1592 | * @header: Pointer to the command header in the command stream. |
1571 | */ | 1593 | */ |
1572 | static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv, | 1594 | static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv, |
1573 | struct vmw_sw_context *sw_context, | 1595 | struct vmw_sw_context *sw_context, |
1574 | SVGA3dCmdHeader *header) | 1596 | SVGA3dCmdHeader *header) |
1575 | { | 1597 | { |
1576 | struct vmw_gb_surface_cmd { | 1598 | struct vmw_gb_surface_cmd { |
1577 | SVGA3dCmdHeader header; | 1599 | SVGA3dCmdHeader header; |
1578 | SVGA3dCmdInvalidateGBSurface body; | 1600 | SVGA3dCmdInvalidateGBSurface body; |
1579 | } *cmd; | 1601 | } *cmd; |
1580 | 1602 | ||
1581 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); | 1603 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
1582 | 1604 | ||
1583 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 1605 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1584 | user_surface_converter, | 1606 | user_surface_converter, |
1585 | &cmd->body.sid, NULL); | 1607 | &cmd->body.sid, NULL); |
1586 | } | 1608 | } |
1587 | 1609 | ||
1588 | 1610 | ||
1589 | /** | 1611 | /** |
1590 | * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE | 1612 | * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE |
1591 | * command | 1613 | * command |
1592 | * | 1614 | * |
1593 | * @dev_priv: Pointer to a device private struct. | 1615 | * @dev_priv: Pointer to a device private struct. |
1594 | * @sw_context: The software context being used for this batch. | 1616 | * @sw_context: The software context being used for this batch. |
1595 | * @header: Pointer to the command header in the command stream. | 1617 | * @header: Pointer to the command header in the command stream. |
1596 | */ | 1618 | */ |
1597 | static int vmw_cmd_shader_define(struct vmw_private *dev_priv, | 1619 | static int vmw_cmd_shader_define(struct vmw_private *dev_priv, |
1598 | struct vmw_sw_context *sw_context, | 1620 | struct vmw_sw_context *sw_context, |
1599 | SVGA3dCmdHeader *header) | 1621 | SVGA3dCmdHeader *header) |
1600 | { | 1622 | { |
1601 | struct vmw_shader_define_cmd { | 1623 | struct vmw_shader_define_cmd { |
1602 | SVGA3dCmdHeader header; | 1624 | SVGA3dCmdHeader header; |
1603 | SVGA3dCmdDefineShader body; | 1625 | SVGA3dCmdDefineShader body; |
1604 | } *cmd; | 1626 | } *cmd; |
1605 | int ret; | 1627 | int ret; |
1606 | size_t size; | 1628 | size_t size; |
1607 | 1629 | ||
1608 | cmd = container_of(header, struct vmw_shader_define_cmd, | 1630 | cmd = container_of(header, struct vmw_shader_define_cmd, |
1609 | header); | 1631 | header); |
1610 | 1632 | ||
1611 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, | 1633 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
1612 | user_context_converter, &cmd->body.cid, | 1634 | user_context_converter, &cmd->body.cid, |
1613 | NULL); | 1635 | NULL); |
1614 | if (unlikely(ret != 0)) | 1636 | if (unlikely(ret != 0)) |
1615 | return ret; | 1637 | return ret; |
1616 | 1638 | ||
1617 | if (unlikely(!dev_priv->has_mob)) | 1639 | if (unlikely(!dev_priv->has_mob)) |
1618 | return 0; | 1640 | return 0; |
1619 | 1641 | ||
1620 | size = cmd->header.size - sizeof(cmd->body); | 1642 | size = cmd->header.size - sizeof(cmd->body); |
1621 | ret = vmw_compat_shader_add(sw_context->fp->shman, | 1643 | ret = vmw_compat_shader_add(sw_context->fp->shman, |
1622 | cmd->body.shid, cmd + 1, | 1644 | cmd->body.shid, cmd + 1, |
1623 | cmd->body.type, size, | 1645 | cmd->body.type, size, |
1624 | sw_context->fp->tfile, | 1646 | sw_context->fp->tfile, |
1625 | &sw_context->staged_shaders); | 1647 | &sw_context->staged_shaders); |
1626 | if (unlikely(ret != 0)) | 1648 | if (unlikely(ret != 0)) |
1627 | return ret; | 1649 | return ret; |
1628 | 1650 | ||
1629 | return vmw_resource_relocation_add(&sw_context->res_relocations, | 1651 | return vmw_resource_relocation_add(&sw_context->res_relocations, |
1630 | NULL, &cmd->header.id - | 1652 | NULL, &cmd->header.id - |
1631 | sw_context->buf_start); | 1653 | sw_context->buf_start); |
1632 | 1654 | ||
1633 | return 0; | 1655 | return 0; |
1634 | } | 1656 | } |
1635 | 1657 | ||
1636 | /** | 1658 | /** |
1637 | * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY | 1659 | * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY |
1638 | * command | 1660 | * command |
1639 | * | 1661 | * |
1640 | * @dev_priv: Pointer to a device private struct. | 1662 | * @dev_priv: Pointer to a device private struct. |
1641 | * @sw_context: The software context being used for this batch. | 1663 | * @sw_context: The software context being used for this batch. |
1642 | * @header: Pointer to the command header in the command stream. | 1664 | * @header: Pointer to the command header in the command stream. |
1643 | */ | 1665 | */ |
1644 | static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv, | 1666 | static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv, |
1645 | struct vmw_sw_context *sw_context, | 1667 | struct vmw_sw_context *sw_context, |
1646 | SVGA3dCmdHeader *header) | 1668 | SVGA3dCmdHeader *header) |
1647 | { | 1669 | { |
1648 | struct vmw_shader_destroy_cmd { | 1670 | struct vmw_shader_destroy_cmd { |
1649 | SVGA3dCmdHeader header; | 1671 | SVGA3dCmdHeader header; |
1650 | SVGA3dCmdDestroyShader body; | 1672 | SVGA3dCmdDestroyShader body; |
1651 | } *cmd; | 1673 | } *cmd; |
1652 | int ret; | 1674 | int ret; |
1653 | 1675 | ||
1654 | cmd = container_of(header, struct vmw_shader_destroy_cmd, | 1676 | cmd = container_of(header, struct vmw_shader_destroy_cmd, |
1655 | header); | 1677 | header); |
1656 | 1678 | ||
1657 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, | 1679 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
1658 | user_context_converter, &cmd->body.cid, | 1680 | user_context_converter, &cmd->body.cid, |
1659 | NULL); | 1681 | NULL); |
1660 | if (unlikely(ret != 0)) | 1682 | if (unlikely(ret != 0)) |
1661 | return ret; | 1683 | return ret; |
1662 | 1684 | ||
1663 | if (unlikely(!dev_priv->has_mob)) | 1685 | if (unlikely(!dev_priv->has_mob)) |
1664 | return 0; | 1686 | return 0; |
1665 | 1687 | ||
1666 | ret = vmw_compat_shader_remove(sw_context->fp->shman, | 1688 | ret = vmw_compat_shader_remove(sw_context->fp->shman, |
1667 | cmd->body.shid, | 1689 | cmd->body.shid, |
1668 | cmd->body.type, | 1690 | cmd->body.type, |
1669 | &sw_context->staged_shaders); | 1691 | &sw_context->staged_shaders); |
1670 | if (unlikely(ret != 0)) | 1692 | if (unlikely(ret != 0)) |
1671 | return ret; | 1693 | return ret; |
1672 | 1694 | ||
1673 | return vmw_resource_relocation_add(&sw_context->res_relocations, | 1695 | return vmw_resource_relocation_add(&sw_context->res_relocations, |
1674 | NULL, &cmd->header.id - | 1696 | NULL, &cmd->header.id - |
1675 | sw_context->buf_start); | 1697 | sw_context->buf_start); |
1676 | 1698 | ||
1677 | return 0; | 1699 | return 0; |
1678 | } | 1700 | } |
1679 | 1701 | ||
1680 | /** | 1702 | /** |
1681 | * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER | 1703 | * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER |
1682 | * command | 1704 | * command |
1683 | * | 1705 | * |
1684 | * @dev_priv: Pointer to a device private struct. | 1706 | * @dev_priv: Pointer to a device private struct. |
1685 | * @sw_context: The software context being used for this batch. | 1707 | * @sw_context: The software context being used for this batch. |
1686 | * @header: Pointer to the command header in the command stream. | 1708 | * @header: Pointer to the command header in the command stream. |
1687 | */ | 1709 | */ |
1688 | static int vmw_cmd_set_shader(struct vmw_private *dev_priv, | 1710 | static int vmw_cmd_set_shader(struct vmw_private *dev_priv, |
1689 | struct vmw_sw_context *sw_context, | 1711 | struct vmw_sw_context *sw_context, |
1690 | SVGA3dCmdHeader *header) | 1712 | SVGA3dCmdHeader *header) |
1691 | { | 1713 | { |
1692 | struct vmw_set_shader_cmd { | 1714 | struct vmw_set_shader_cmd { |
1693 | SVGA3dCmdHeader header; | 1715 | SVGA3dCmdHeader header; |
1694 | SVGA3dCmdSetShader body; | 1716 | SVGA3dCmdSetShader body; |
1695 | } *cmd; | 1717 | } *cmd; |
1696 | struct vmw_resource_val_node *ctx_node; | 1718 | struct vmw_resource_val_node *ctx_node; |
1697 | int ret; | 1719 | int ret; |
1698 | 1720 | ||
1699 | cmd = container_of(header, struct vmw_set_shader_cmd, | 1721 | cmd = container_of(header, struct vmw_set_shader_cmd, |
1700 | header); | 1722 | header); |
1701 | 1723 | ||
1702 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, | 1724 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
1703 | user_context_converter, &cmd->body.cid, | 1725 | user_context_converter, &cmd->body.cid, |
1704 | &ctx_node); | 1726 | &ctx_node); |
1705 | if (unlikely(ret != 0)) | 1727 | if (unlikely(ret != 0)) |
1706 | return ret; | 1728 | return ret; |
1707 | 1729 | ||
1708 | if (dev_priv->has_mob) { | 1730 | if (dev_priv->has_mob) { |
1709 | struct vmw_ctx_bindinfo bi; | 1731 | struct vmw_ctx_bindinfo bi; |
1710 | struct vmw_resource_val_node *res_node; | 1732 | struct vmw_resource_val_node *res_node; |
1711 | u32 shid = cmd->body.shid; | 1733 | u32 shid = cmd->body.shid; |
1712 | 1734 | ||
1713 | if (shid != SVGA3D_INVALID_ID) | 1735 | if (shid != SVGA3D_INVALID_ID) |
1714 | (void) vmw_compat_shader_lookup(sw_context->fp->shman, | 1736 | (void) vmw_compat_shader_lookup(sw_context->fp->shman, |
1715 | cmd->body.type, | 1737 | cmd->body.type, |
1716 | &shid); | 1738 | &shid); |
1717 | 1739 | ||
1718 | ret = vmw_cmd_compat_res_check(dev_priv, sw_context, | 1740 | ret = vmw_cmd_compat_res_check(dev_priv, sw_context, |
1719 | vmw_res_shader, | 1741 | vmw_res_shader, |
1720 | user_shader_converter, | 1742 | user_shader_converter, |
1721 | shid, | 1743 | shid, |
1722 | &cmd->body.shid, &res_node); | 1744 | &cmd->body.shid, &res_node); |
1723 | if (unlikely(ret != 0)) | 1745 | if (unlikely(ret != 0)) |
1724 | return ret; | 1746 | return ret; |
1725 | 1747 | ||
1726 | bi.ctx = ctx_node->res; | 1748 | bi.ctx = ctx_node->res; |
1727 | bi.res = res_node ? res_node->res : NULL; | 1749 | bi.res = res_node ? res_node->res : NULL; |
1728 | bi.bt = vmw_ctx_binding_shader; | 1750 | bi.bt = vmw_ctx_binding_shader; |
1729 | bi.i1.shader_type = cmd->body.type; | 1751 | bi.i1.shader_type = cmd->body.type; |
1730 | return vmw_context_binding_add(ctx_node->staged_bindings, &bi); | 1752 | return vmw_context_binding_add(ctx_node->staged_bindings, &bi); |
1731 | } | 1753 | } |
1732 | 1754 | ||
1733 | return 0; | 1755 | return 0; |
1734 | } | 1756 | } |
1735 | 1757 | ||
1736 | /** | 1758 | /** |
1737 | * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST | 1759 | * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST |
1738 | * command | 1760 | * command |
1739 | * | 1761 | * |
1740 | * @dev_priv: Pointer to a device private struct. | 1762 | * @dev_priv: Pointer to a device private struct. |
1741 | * @sw_context: The software context being used for this batch. | 1763 | * @sw_context: The software context being used for this batch. |
1742 | * @header: Pointer to the command header in the command stream. | 1764 | * @header: Pointer to the command header in the command stream. |
1743 | */ | 1765 | */ |
1744 | static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv, | 1766 | static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv, |
1745 | struct vmw_sw_context *sw_context, | 1767 | struct vmw_sw_context *sw_context, |
1746 | SVGA3dCmdHeader *header) | 1768 | SVGA3dCmdHeader *header) |
1747 | { | 1769 | { |
1748 | struct vmw_set_shader_const_cmd { | 1770 | struct vmw_set_shader_const_cmd { |
1749 | SVGA3dCmdHeader header; | 1771 | SVGA3dCmdHeader header; |
1750 | SVGA3dCmdSetShaderConst body; | 1772 | SVGA3dCmdSetShaderConst body; |
1751 | } *cmd; | 1773 | } *cmd; |
1752 | int ret; | 1774 | int ret; |
1753 | 1775 | ||
1754 | cmd = container_of(header, struct vmw_set_shader_const_cmd, | 1776 | cmd = container_of(header, struct vmw_set_shader_const_cmd, |
1755 | header); | 1777 | header); |
1756 | 1778 | ||
1757 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, | 1779 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
1758 | user_context_converter, &cmd->body.cid, | 1780 | user_context_converter, &cmd->body.cid, |
1759 | NULL); | 1781 | NULL); |
1760 | if (unlikely(ret != 0)) | 1782 | if (unlikely(ret != 0)) |
1761 | return ret; | 1783 | return ret; |
1762 | 1784 | ||
1763 | if (dev_priv->has_mob) | 1785 | if (dev_priv->has_mob) |
1764 | header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE; | 1786 | header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE; |
1765 | 1787 | ||
1766 | return 0; | 1788 | return 0; |
1767 | } | 1789 | } |
1768 | 1790 | ||
1769 | /** | 1791 | /** |
1770 | * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER | 1792 | * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER |
1771 | * command | 1793 | * command |
1772 | * | 1794 | * |
1773 | * @dev_priv: Pointer to a device private struct. | 1795 | * @dev_priv: Pointer to a device private struct. |
1774 | * @sw_context: The software context being used for this batch. | 1796 | * @sw_context: The software context being used for this batch. |
1775 | * @header: Pointer to the command header in the command stream. | 1797 | * @header: Pointer to the command header in the command stream. |
1776 | */ | 1798 | */ |
1777 | static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv, | 1799 | static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv, |
1778 | struct vmw_sw_context *sw_context, | 1800 | struct vmw_sw_context *sw_context, |
1779 | SVGA3dCmdHeader *header) | 1801 | SVGA3dCmdHeader *header) |
1780 | { | 1802 | { |
1781 | struct vmw_bind_gb_shader_cmd { | 1803 | struct vmw_bind_gb_shader_cmd { |
1782 | SVGA3dCmdHeader header; | 1804 | SVGA3dCmdHeader header; |
1783 | SVGA3dCmdBindGBShader body; | 1805 | SVGA3dCmdBindGBShader body; |
1784 | } *cmd; | 1806 | } *cmd; |
1785 | 1807 | ||
1786 | cmd = container_of(header, struct vmw_bind_gb_shader_cmd, | 1808 | cmd = container_of(header, struct vmw_bind_gb_shader_cmd, |
1787 | header); | 1809 | header); |
1788 | 1810 | ||
1789 | return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader, | 1811 | return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader, |
1790 | user_shader_converter, | 1812 | user_shader_converter, |
1791 | &cmd->body.shid, &cmd->body.mobid, | 1813 | &cmd->body.shid, &cmd->body.mobid, |
1792 | cmd->body.offsetInBytes); | 1814 | cmd->body.offsetInBytes); |
1793 | } | 1815 | } |
1794 | 1816 | ||
1795 | static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, | 1817 | static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, |
1796 | struct vmw_sw_context *sw_context, | 1818 | struct vmw_sw_context *sw_context, |
1797 | void *buf, uint32_t *size) | 1819 | void *buf, uint32_t *size) |
1798 | { | 1820 | { |
1799 | uint32_t size_remaining = *size; | 1821 | uint32_t size_remaining = *size; |
1800 | uint32_t cmd_id; | 1822 | uint32_t cmd_id; |
1801 | 1823 | ||
1802 | cmd_id = le32_to_cpu(((uint32_t *)buf)[0]); | 1824 | cmd_id = le32_to_cpu(((uint32_t *)buf)[0]); |
1803 | switch (cmd_id) { | 1825 | switch (cmd_id) { |
1804 | case SVGA_CMD_UPDATE: | 1826 | case SVGA_CMD_UPDATE: |
1805 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate); | 1827 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate); |
1806 | break; | 1828 | break; |
1807 | case SVGA_CMD_DEFINE_GMRFB: | 1829 | case SVGA_CMD_DEFINE_GMRFB: |
1808 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB); | 1830 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB); |
1809 | break; | 1831 | break; |
1810 | case SVGA_CMD_BLIT_GMRFB_TO_SCREEN: | 1832 | case SVGA_CMD_BLIT_GMRFB_TO_SCREEN: |
1811 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); | 1833 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); |
1812 | break; | 1834 | break; |
1813 | case SVGA_CMD_BLIT_SCREEN_TO_GMRFB: | 1835 | case SVGA_CMD_BLIT_SCREEN_TO_GMRFB: |
1814 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); | 1836 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); |
1815 | break; | 1837 | break; |
1816 | default: | 1838 | default: |
1817 | DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id); | 1839 | DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id); |
1818 | return -EINVAL; | 1840 | return -EINVAL; |
1819 | } | 1841 | } |
1820 | 1842 | ||
1821 | if (*size > size_remaining) { | 1843 | if (*size > size_remaining) { |
1822 | DRM_ERROR("Invalid SVGA command (size mismatch):" | 1844 | DRM_ERROR("Invalid SVGA command (size mismatch):" |
1823 | " %u.\n", cmd_id); | 1845 | " %u.\n", cmd_id); |
1824 | return -EINVAL; | 1846 | return -EINVAL; |
1825 | } | 1847 | } |
1826 | 1848 | ||
1827 | if (unlikely(!sw_context->kernel)) { | 1849 | if (unlikely(!sw_context->kernel)) { |
1828 | DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id); | 1850 | DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id); |
1829 | return -EPERM; | 1851 | return -EPERM; |
1830 | } | 1852 | } |
1831 | 1853 | ||
1832 | if (cmd_id == SVGA_CMD_DEFINE_GMRFB) | 1854 | if (cmd_id == SVGA_CMD_DEFINE_GMRFB) |
1833 | return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf); | 1855 | return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf); |
1834 | 1856 | ||
1835 | return 0; | 1857 | return 0; |
1836 | } | 1858 | } |
1837 | 1859 | ||
1838 | static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { | 1860 | static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { |
1839 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid, | 1861 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid, |
1840 | false, false, false), | 1862 | false, false, false), |
1841 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid, | 1863 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid, |
1842 | false, false, false), | 1864 | false, false, false), |
1843 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check, | 1865 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check, |
1844 | true, false, false), | 1866 | true, false, false), |
1845 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check, | 1867 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check, |
1846 | true, false, false), | 1868 | true, false, false), |
1847 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma, | 1869 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma, |
1848 | true, false, false), | 1870 | true, false, false), |
1849 | VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid, | 1871 | VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid, |
1850 | false, false, false), | 1872 | false, false, false), |
1851 | VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid, | 1873 | VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid, |
1852 | false, false, false), | 1874 | false, false, false), |
1853 | VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check, | 1875 | VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check, |
1854 | true, false, false), | 1876 | true, false, false), |
1855 | VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check, | 1877 | VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check, |
1856 | true, false, false), | 1878 | true, false, false), |
1857 | VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check, | 1879 | VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check, |
1858 | true, false, false), | 1880 | true, false, false), |
1859 | VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET, | 1881 | VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET, |
1860 | &vmw_cmd_set_render_target_check, true, false, false), | 1882 | &vmw_cmd_set_render_target_check, true, false, false), |
1861 | VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state, | 1883 | VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state, |
1862 | true, false, false), | 1884 | true, false, false), |
1863 | VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check, | 1885 | VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check, |
1864 | true, false, false), | 1886 | true, false, false), |
1865 | VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check, | 1887 | VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check, |
1866 | true, false, false), | 1888 | true, false, false), |
1867 | VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check, | 1889 | VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check, |
1868 | true, false, false), | 1890 | true, false, false), |
1869 | VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check, | 1891 | VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check, |
1870 | true, false, false), | 1892 | true, false, false), |
1871 | VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check, | 1893 | VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check, |
1872 | true, false, false), | 1894 | true, false, false), |
1873 | VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check, | 1895 | VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check, |
1874 | true, false, false), | 1896 | true, false, false), |
1875 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check, | 1897 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check, |
1876 | false, false, false), | 1898 | false, false, false), |
1877 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define, | 1899 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define, |
1878 | true, false, false), | 1900 | true, false, false), |
1879 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy, | 1901 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy, |
1880 | true, false, false), | 1902 | true, false, false), |
1881 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader, | 1903 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader, |
1882 | true, false, false), | 1904 | true, false, false), |
1883 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const, | 1905 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const, |
1884 | true, false, false), | 1906 | true, false, false), |
1885 | VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw, | 1907 | VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw, |
1886 | true, false, false), | 1908 | true, false, false), |
1887 | VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check, | 1909 | VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check, |
1888 | true, false, false), | 1910 | true, false, false), |
1889 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query, | 1911 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query, |
1890 | true, false, false), | 1912 | true, false, false), |
1891 | VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query, | 1913 | VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query, |
1892 | true, false, false), | 1914 | true, false, false), |
1893 | VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query, | 1915 | VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query, |
1894 | true, false, false), | 1916 | true, false, false), |
1895 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok, | 1917 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok, |
1896 | true, false, false), | 1918 | true, false, false), |
1897 | VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, | 1919 | VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, |
1898 | &vmw_cmd_blt_surf_screen_check, false, false, false), | 1920 | &vmw_cmd_blt_surf_screen_check, false, false, false), |
1899 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid, | 1921 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid, |
1900 | false, false, false), | 1922 | false, false, false), |
1901 | VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid, | 1923 | VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid, |
1902 | false, false, false), | 1924 | false, false, false), |
1903 | VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid, | 1925 | VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid, |
1904 | false, false, false), | 1926 | false, false, false), |
1905 | VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid, | 1927 | VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid, |
1906 | false, false, false), | 1928 | false, false, false), |
1907 | VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid, | 1929 | VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid, |
1908 | false, false, false), | 1930 | false, false, false), |
1909 | VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid, | 1931 | VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid, |
1910 | false, false, false), | 1932 | false, false, false), |
1911 | VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid, | 1933 | VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid, |
1912 | false, false, false), | 1934 | false, false, false), |
1913 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid, | 1935 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid, |
1914 | false, false, false), | 1936 | false, false, false), |
1915 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid, | 1937 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid, |
1916 | false, false, false), | 1938 | false, false, false), |
1917 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid, | 1939 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid, |
1918 | false, false, false), | 1940 | false, false, false), |
1919 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid, | 1941 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid, |
1920 | false, false, false), | 1942 | false, false, false), |
1921 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid, | 1943 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid, |
1922 | false, false, false), | 1944 | false, false, false), |
1923 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid, | 1945 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid, |
1924 | false, false, false), | 1946 | false, false, false), |
1925 | VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid, | 1947 | VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid, |
1926 | false, false, true), | 1948 | false, false, true), |
1927 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid, | 1949 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid, |
1928 | false, false, true), | 1950 | false, false, true), |
1929 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid, | 1951 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid, |
1930 | false, false, true), | 1952 | false, false, true), |
1931 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid, | 1953 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid, |
1932 | false, false, true), | 1954 | false, false, true), |
1933 | VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid, | 1955 | VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid, |
1934 | false, false, true), | 1956 | false, false, true), |
1935 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid, | 1957 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid, |
1936 | false, false, true), | 1958 | false, false, true), |
1937 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid, | 1959 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid, |
1938 | false, false, true), | 1960 | false, false, true), |
1939 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid, | 1961 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid, |
1940 | false, false, true), | 1962 | false, false, true), |
1941 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface, | 1963 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface, |
1942 | true, false, true), | 1964 | true, false, true), |
1943 | VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid, | 1965 | VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid, |
1944 | false, false, true), | 1966 | false, false, true), |
1945 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image, | 1967 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image, |
1946 | true, false, true), | 1968 | true, false, true), |
1947 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE, | 1969 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE, |
1948 | &vmw_cmd_update_gb_surface, true, false, true), | 1970 | &vmw_cmd_update_gb_surface, true, false, true), |
1949 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE, | 1971 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE, |
1950 | &vmw_cmd_readback_gb_image, true, false, true), | 1972 | &vmw_cmd_readback_gb_image, true, false, true), |
1951 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE, | 1973 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE, |
1952 | &vmw_cmd_readback_gb_surface, true, false, true), | 1974 | &vmw_cmd_readback_gb_surface, true, false, true), |
1953 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE, | 1975 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE, |
1954 | &vmw_cmd_invalidate_gb_image, true, false, true), | 1976 | &vmw_cmd_invalidate_gb_image, true, false, true), |
1955 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE, | 1977 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE, |
1956 | &vmw_cmd_invalidate_gb_surface, true, false, true), | 1978 | &vmw_cmd_invalidate_gb_surface, true, false, true), |
1957 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid, | 1979 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid, |
1958 | false, false, true), | 1980 | false, false, true), |
1959 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid, | 1981 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid, |
1960 | false, false, true), | 1982 | false, false, true), |
1961 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid, | 1983 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid, |
1962 | false, false, true), | 1984 | false, false, true), |
1963 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid, | 1985 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid, |
1964 | false, false, true), | 1986 | false, false, true), |
1965 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid, | 1987 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid, |
1966 | false, false, true), | 1988 | false, false, true), |
1967 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid, | 1989 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid, |
1968 | false, false, true), | 1990 | false, false, true), |
1969 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader, | 1991 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader, |
1970 | true, false, true), | 1992 | true, false, true), |
1971 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid, | 1993 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid, |
1972 | false, false, true), | 1994 | false, false, true), |
1973 | VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid, | 1995 | VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid, |
1974 | false, false, false), | 1996 | false, false, false), |
1975 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query, | 1997 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query, |
1976 | true, false, true), | 1998 | true, false, true), |
1977 | VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query, | 1999 | VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query, |
1978 | true, false, true), | 2000 | true, false, true), |
1979 | VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query, | 2001 | VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query, |
1980 | true, false, true), | 2002 | true, false, true), |
1981 | VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok, | 2003 | VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok, |
1982 | true, false, true), | 2004 | true, false, true), |
1983 | VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid, | 2005 | VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid, |
1984 | false, false, true), | 2006 | false, false, true), |
1985 | VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid, | 2007 | VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid, |
1986 | false, false, true), | 2008 | false, false, true), |
1987 | VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid, | 2009 | VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid, |
1988 | false, false, true), | 2010 | false, false, true), |
1989 | VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid, | 2011 | VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid, |
1990 | false, false, true), | 2012 | false, false, true), |
1991 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid, | 2013 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid, |
1992 | false, false, true), | 2014 | false, false, true), |
1993 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid, | 2015 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid, |
1994 | false, false, true), | 2016 | false, false, true), |
1995 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid, | 2017 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid, |
1996 | false, false, true), | 2018 | false, false, true), |
1997 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid, | 2019 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid, |
1998 | false, false, true), | 2020 | false, false, true), |
1999 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, | 2021 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, |
2000 | false, false, true), | 2022 | false, false, true), |
2001 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, | 2023 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, |
2002 | false, false, true), | 2024 | false, false, true), |
2003 | VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check, | 2025 | VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check, |
2004 | true, false, true) | 2026 | true, false, true) |
2005 | }; | 2027 | }; |
2006 | 2028 | ||
2007 | static int vmw_cmd_check(struct vmw_private *dev_priv, | 2029 | static int vmw_cmd_check(struct vmw_private *dev_priv, |
2008 | struct vmw_sw_context *sw_context, | 2030 | struct vmw_sw_context *sw_context, |
2009 | void *buf, uint32_t *size) | 2031 | void *buf, uint32_t *size) |
2010 | { | 2032 | { |
2011 | uint32_t cmd_id; | 2033 | uint32_t cmd_id; |
2012 | uint32_t size_remaining = *size; | 2034 | uint32_t size_remaining = *size; |
2013 | SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf; | 2035 | SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf; |
2014 | int ret; | 2036 | int ret; |
2015 | const struct vmw_cmd_entry *entry; | 2037 | const struct vmw_cmd_entry *entry; |
2016 | bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS; | 2038 | bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS; |
2017 | 2039 | ||
2018 | cmd_id = le32_to_cpu(((uint32_t *)buf)[0]); | 2040 | cmd_id = le32_to_cpu(((uint32_t *)buf)[0]); |
2019 | /* Handle any none 3D commands */ | 2041 | /* Handle any none 3D commands */ |
2020 | if (unlikely(cmd_id < SVGA_CMD_MAX)) | 2042 | if (unlikely(cmd_id < SVGA_CMD_MAX)) |
2021 | return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size); | 2043 | return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size); |
2022 | 2044 | ||
2023 | 2045 | ||
2024 | cmd_id = le32_to_cpu(header->id); | 2046 | cmd_id = le32_to_cpu(header->id); |
2025 | *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader); | 2047 | *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader); |
2026 | 2048 | ||
2027 | cmd_id -= SVGA_3D_CMD_BASE; | 2049 | cmd_id -= SVGA_3D_CMD_BASE; |
2028 | if (unlikely(*size > size_remaining)) | 2050 | if (unlikely(*size > size_remaining)) |
2029 | goto out_invalid; | 2051 | goto out_invalid; |
2030 | 2052 | ||
2031 | if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE)) | 2053 | if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE)) |
2032 | goto out_invalid; | 2054 | goto out_invalid; |
2033 | 2055 | ||
2034 | entry = &vmw_cmd_entries[cmd_id]; | 2056 | entry = &vmw_cmd_entries[cmd_id]; |
2035 | if (unlikely(!entry->func)) | 2057 | if (unlikely(!entry->func)) |
2036 | goto out_invalid; | 2058 | goto out_invalid; |
2037 | 2059 | ||
2038 | if (unlikely(!entry->user_allow && !sw_context->kernel)) | 2060 | if (unlikely(!entry->user_allow && !sw_context->kernel)) |
2039 | goto out_privileged; | 2061 | goto out_privileged; |
2040 | 2062 | ||
2041 | if (unlikely(entry->gb_disable && gb)) | 2063 | if (unlikely(entry->gb_disable && gb)) |
2042 | goto out_old; | 2064 | goto out_old; |
2043 | 2065 | ||
2044 | if (unlikely(entry->gb_enable && !gb)) | 2066 | if (unlikely(entry->gb_enable && !gb)) |
2045 | goto out_new; | 2067 | goto out_new; |
2046 | 2068 | ||
2047 | ret = entry->func(dev_priv, sw_context, header); | 2069 | ret = entry->func(dev_priv, sw_context, header); |
2048 | if (unlikely(ret != 0)) | 2070 | if (unlikely(ret != 0)) |
2049 | goto out_invalid; | 2071 | goto out_invalid; |
2050 | 2072 | ||
2051 | return 0; | 2073 | return 0; |
2052 | out_invalid: | 2074 | out_invalid: |
2053 | DRM_ERROR("Invalid SVGA3D command: %d\n", | 2075 | DRM_ERROR("Invalid SVGA3D command: %d\n", |
2054 | cmd_id + SVGA_3D_CMD_BASE); | 2076 | cmd_id + SVGA_3D_CMD_BASE); |
2055 | return -EINVAL; | 2077 | return -EINVAL; |
2056 | out_privileged: | 2078 | out_privileged: |
2057 | DRM_ERROR("Privileged SVGA3D command: %d\n", | 2079 | DRM_ERROR("Privileged SVGA3D command: %d\n", |
2058 | cmd_id + SVGA_3D_CMD_BASE); | 2080 | cmd_id + SVGA_3D_CMD_BASE); |
2059 | return -EPERM; | 2081 | return -EPERM; |
2060 | out_old: | 2082 | out_old: |
2061 | DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n", | 2083 | DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n", |
2062 | cmd_id + SVGA_3D_CMD_BASE); | 2084 | cmd_id + SVGA_3D_CMD_BASE); |
2063 | return -EINVAL; | 2085 | return -EINVAL; |
2064 | out_new: | 2086 | out_new: |
2065 | DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n", | 2087 | DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n", |
2066 | cmd_id + SVGA_3D_CMD_BASE); | 2088 | cmd_id + SVGA_3D_CMD_BASE); |
2067 | return -EINVAL; | 2089 | return -EINVAL; |
2068 | } | 2090 | } |
2069 | 2091 | ||
2070 | static int vmw_cmd_check_all(struct vmw_private *dev_priv, | 2092 | static int vmw_cmd_check_all(struct vmw_private *dev_priv, |
2071 | struct vmw_sw_context *sw_context, | 2093 | struct vmw_sw_context *sw_context, |
2072 | void *buf, | 2094 | void *buf, |
2073 | uint32_t size) | 2095 | uint32_t size) |
2074 | { | 2096 | { |
2075 | int32_t cur_size = size; | 2097 | int32_t cur_size = size; |
2076 | int ret; | 2098 | int ret; |
2077 | 2099 | ||
2078 | sw_context->buf_start = buf; | 2100 | sw_context->buf_start = buf; |
2079 | 2101 | ||
2080 | while (cur_size > 0) { | 2102 | while (cur_size > 0) { |
2081 | size = cur_size; | 2103 | size = cur_size; |
2082 | ret = vmw_cmd_check(dev_priv, sw_context, buf, &size); | 2104 | ret = vmw_cmd_check(dev_priv, sw_context, buf, &size); |
2083 | if (unlikely(ret != 0)) | 2105 | if (unlikely(ret != 0)) |
2084 | return ret; | 2106 | return ret; |
2085 | buf = (void *)((unsigned long) buf + size); | 2107 | buf = (void *)((unsigned long) buf + size); |
2086 | cur_size -= size; | 2108 | cur_size -= size; |
2087 | } | 2109 | } |
2088 | 2110 | ||
2089 | if (unlikely(cur_size != 0)) { | 2111 | if (unlikely(cur_size != 0)) { |
2090 | DRM_ERROR("Command verifier out of sync.\n"); | 2112 | DRM_ERROR("Command verifier out of sync.\n"); |
2091 | return -EINVAL; | 2113 | return -EINVAL; |
2092 | } | 2114 | } |
2093 | 2115 | ||
2094 | return 0; | 2116 | return 0; |
2095 | } | 2117 | } |
2096 | 2118 | ||
2097 | static void vmw_free_relocations(struct vmw_sw_context *sw_context) | 2119 | static void vmw_free_relocations(struct vmw_sw_context *sw_context) |
2098 | { | 2120 | { |
2099 | sw_context->cur_reloc = 0; | 2121 | sw_context->cur_reloc = 0; |
2100 | } | 2122 | } |
2101 | 2123 | ||
2102 | static void vmw_apply_relocations(struct vmw_sw_context *sw_context) | 2124 | static void vmw_apply_relocations(struct vmw_sw_context *sw_context) |
2103 | { | 2125 | { |
2104 | uint32_t i; | 2126 | uint32_t i; |
2105 | struct vmw_relocation *reloc; | 2127 | struct vmw_relocation *reloc; |
2106 | struct ttm_validate_buffer *validate; | 2128 | struct ttm_validate_buffer *validate; |
2107 | struct ttm_buffer_object *bo; | 2129 | struct ttm_buffer_object *bo; |
2108 | 2130 | ||
2109 | for (i = 0; i < sw_context->cur_reloc; ++i) { | 2131 | for (i = 0; i < sw_context->cur_reloc; ++i) { |
2110 | reloc = &sw_context->relocs[i]; | 2132 | reloc = &sw_context->relocs[i]; |
2111 | validate = &sw_context->val_bufs[reloc->index].base; | 2133 | validate = &sw_context->val_bufs[reloc->index].base; |
2112 | bo = validate->bo; | 2134 | bo = validate->bo; |
2113 | switch (bo->mem.mem_type) { | 2135 | switch (bo->mem.mem_type) { |
2114 | case TTM_PL_VRAM: | 2136 | case TTM_PL_VRAM: |
2115 | reloc->location->offset += bo->offset; | 2137 | reloc->location->offset += bo->offset; |
2116 | reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER; | 2138 | reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER; |
2117 | break; | 2139 | break; |
2118 | case VMW_PL_GMR: | 2140 | case VMW_PL_GMR: |
2119 | reloc->location->gmrId = bo->mem.start; | 2141 | reloc->location->gmrId = bo->mem.start; |
2120 | break; | 2142 | break; |
2121 | case VMW_PL_MOB: | 2143 | case VMW_PL_MOB: |
2122 | *reloc->mob_loc = bo->mem.start; | 2144 | *reloc->mob_loc = bo->mem.start; |
2123 | break; | 2145 | break; |
2124 | default: | 2146 | default: |
2125 | BUG(); | 2147 | BUG(); |
2126 | } | 2148 | } |
2127 | } | 2149 | } |
2128 | vmw_free_relocations(sw_context); | 2150 | vmw_free_relocations(sw_context); |
2129 | } | 2151 | } |
2130 | 2152 | ||
2131 | /** | 2153 | /** |
2132 | * vmw_resource_list_unrefererence - Free up a resource list and unreference | 2154 | * vmw_resource_list_unrefererence - Free up a resource list and unreference |
2133 | * all resources referenced by it. | 2155 | * all resources referenced by it. |
2134 | * | 2156 | * |
2135 | * @list: The resource list. | 2157 | * @list: The resource list. |
2136 | */ | 2158 | */ |
2137 | static void vmw_resource_list_unreference(struct list_head *list) | 2159 | static void vmw_resource_list_unreference(struct list_head *list) |
2138 | { | 2160 | { |
2139 | struct vmw_resource_val_node *val, *val_next; | 2161 | struct vmw_resource_val_node *val, *val_next; |
2140 | 2162 | ||
2141 | /* | 2163 | /* |
2142 | * Drop references to resources held during command submission. | 2164 | * Drop references to resources held during command submission. |
2143 | */ | 2165 | */ |
2144 | 2166 | ||
2145 | list_for_each_entry_safe(val, val_next, list, head) { | 2167 | list_for_each_entry_safe(val, val_next, list, head) { |
2146 | list_del_init(&val->head); | 2168 | list_del_init(&val->head); |
2147 | vmw_resource_unreference(&val->res); | 2169 | vmw_resource_unreference(&val->res); |
2148 | if (unlikely(val->staged_bindings)) | 2170 | if (unlikely(val->staged_bindings)) |
2149 | kfree(val->staged_bindings); | 2171 | kfree(val->staged_bindings); |
2150 | kfree(val); | 2172 | kfree(val); |
2151 | } | 2173 | } |
2152 | } | 2174 | } |
2153 | 2175 | ||
2154 | static void vmw_clear_validations(struct vmw_sw_context *sw_context) | 2176 | static void vmw_clear_validations(struct vmw_sw_context *sw_context) |
2155 | { | 2177 | { |
2156 | struct vmw_validate_buffer *entry, *next; | 2178 | struct vmw_validate_buffer *entry, *next; |
2157 | struct vmw_resource_val_node *val; | 2179 | struct vmw_resource_val_node *val; |
2158 | 2180 | ||
2159 | /* | 2181 | /* |
2160 | * Drop references to DMA buffers held during command submission. | 2182 | * Drop references to DMA buffers held during command submission. |
2161 | */ | 2183 | */ |
2162 | list_for_each_entry_safe(entry, next, &sw_context->validate_nodes, | 2184 | list_for_each_entry_safe(entry, next, &sw_context->validate_nodes, |
2163 | base.head) { | 2185 | base.head) { |
2164 | list_del(&entry->base.head); | 2186 | list_del(&entry->base.head); |
2165 | ttm_bo_unref(&entry->base.bo); | 2187 | ttm_bo_unref(&entry->base.bo); |
2166 | (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash); | 2188 | (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash); |
2167 | sw_context->cur_val_buf--; | 2189 | sw_context->cur_val_buf--; |
2168 | } | 2190 | } |
2169 | BUG_ON(sw_context->cur_val_buf != 0); | 2191 | BUG_ON(sw_context->cur_val_buf != 0); |
2170 | 2192 | ||
2171 | list_for_each_entry(val, &sw_context->resource_list, head) | 2193 | list_for_each_entry(val, &sw_context->resource_list, head) |
2172 | (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash); | 2194 | (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash); |
2173 | } | 2195 | } |
2174 | 2196 | ||
2175 | static int vmw_validate_single_buffer(struct vmw_private *dev_priv, | 2197 | static int vmw_validate_single_buffer(struct vmw_private *dev_priv, |
2176 | struct ttm_buffer_object *bo, | 2198 | struct ttm_buffer_object *bo, |
2177 | bool validate_as_mob) | 2199 | bool validate_as_mob) |
2178 | { | 2200 | { |
2179 | int ret; | 2201 | int ret; |
2180 | 2202 | ||
2181 | 2203 | ||
2182 | /* | 2204 | /* |
2183 | * Don't validate pinned buffers. | 2205 | * Don't validate pinned buffers. |
2184 | */ | 2206 | */ |
2185 | 2207 | ||
2186 | if (bo == dev_priv->pinned_bo || | 2208 | if (bo == dev_priv->pinned_bo || |
2187 | (bo == dev_priv->dummy_query_bo && | 2209 | (bo == dev_priv->dummy_query_bo && |
2188 | dev_priv->dummy_query_bo_pinned)) | 2210 | dev_priv->dummy_query_bo_pinned)) |
2189 | return 0; | 2211 | return 0; |
2190 | 2212 | ||
2191 | if (validate_as_mob) | 2213 | if (validate_as_mob) |
2192 | return ttm_bo_validate(bo, &vmw_mob_placement, true, false); | 2214 | return ttm_bo_validate(bo, &vmw_mob_placement, true, false); |
2193 | 2215 | ||
2194 | /** | 2216 | /** |
2195 | * Put BO in VRAM if there is space, otherwise as a GMR. | 2217 | * Put BO in VRAM if there is space, otherwise as a GMR. |
2196 | * If there is no space in VRAM and GMR ids are all used up, | 2218 | * If there is no space in VRAM and GMR ids are all used up, |
2197 | * start evicting GMRs to make room. If the DMA buffer can't be | 2219 | * start evicting GMRs to make room. If the DMA buffer can't be |
2198 | * used as a GMR, this will return -ENOMEM. | 2220 | * used as a GMR, this will return -ENOMEM. |
2199 | */ | 2221 | */ |
2200 | 2222 | ||
2201 | ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false); | 2223 | ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false); |
2202 | if (likely(ret == 0 || ret == -ERESTARTSYS)) | 2224 | if (likely(ret == 0 || ret == -ERESTARTSYS)) |
2203 | return ret; | 2225 | return ret; |
2204 | 2226 | ||
2205 | /** | 2227 | /** |
2206 | * If that failed, try VRAM again, this time evicting | 2228 | * If that failed, try VRAM again, this time evicting |
2207 | * previous contents. | 2229 | * previous contents. |
2208 | */ | 2230 | */ |
2209 | 2231 | ||
2210 | DRM_INFO("Falling through to VRAM.\n"); | 2232 | DRM_INFO("Falling through to VRAM.\n"); |
2211 | ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false); | 2233 | ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false); |
2212 | return ret; | 2234 | return ret; |
2213 | } | 2235 | } |
2214 | 2236 | ||
2215 | static int vmw_validate_buffers(struct vmw_private *dev_priv, | 2237 | static int vmw_validate_buffers(struct vmw_private *dev_priv, |
2216 | struct vmw_sw_context *sw_context) | 2238 | struct vmw_sw_context *sw_context) |
2217 | { | 2239 | { |
2218 | struct vmw_validate_buffer *entry; | 2240 | struct vmw_validate_buffer *entry; |
2219 | int ret; | 2241 | int ret; |
2220 | 2242 | ||
2221 | list_for_each_entry(entry, &sw_context->validate_nodes, base.head) { | 2243 | list_for_each_entry(entry, &sw_context->validate_nodes, base.head) { |
2222 | ret = vmw_validate_single_buffer(dev_priv, entry->base.bo, | 2244 | ret = vmw_validate_single_buffer(dev_priv, entry->base.bo, |
2223 | entry->validate_as_mob); | 2245 | entry->validate_as_mob); |
2224 | if (unlikely(ret != 0)) | 2246 | if (unlikely(ret != 0)) |
2225 | return ret; | 2247 | return ret; |
2226 | } | 2248 | } |
2227 | return 0; | 2249 | return 0; |
2228 | } | 2250 | } |
2229 | 2251 | ||
2230 | static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context, | 2252 | static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context, |
2231 | uint32_t size) | 2253 | uint32_t size) |
2232 | { | 2254 | { |
2233 | if (likely(sw_context->cmd_bounce_size >= size)) | 2255 | if (likely(sw_context->cmd_bounce_size >= size)) |
2234 | return 0; | 2256 | return 0; |
2235 | 2257 | ||
2236 | if (sw_context->cmd_bounce_size == 0) | 2258 | if (sw_context->cmd_bounce_size == 0) |
2237 | sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE; | 2259 | sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE; |
2238 | 2260 | ||
2239 | while (sw_context->cmd_bounce_size < size) { | 2261 | while (sw_context->cmd_bounce_size < size) { |
2240 | sw_context->cmd_bounce_size = | 2262 | sw_context->cmd_bounce_size = |
2241 | PAGE_ALIGN(sw_context->cmd_bounce_size + | 2263 | PAGE_ALIGN(sw_context->cmd_bounce_size + |
2242 | (sw_context->cmd_bounce_size >> 1)); | 2264 | (sw_context->cmd_bounce_size >> 1)); |
2243 | } | 2265 | } |
2244 | 2266 | ||
2245 | if (sw_context->cmd_bounce != NULL) | 2267 | if (sw_context->cmd_bounce != NULL) |
2246 | vfree(sw_context->cmd_bounce); | 2268 | vfree(sw_context->cmd_bounce); |
2247 | 2269 | ||
2248 | sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size); | 2270 | sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size); |
2249 | 2271 | ||
2250 | if (sw_context->cmd_bounce == NULL) { | 2272 | if (sw_context->cmd_bounce == NULL) { |
2251 | DRM_ERROR("Failed to allocate command bounce buffer.\n"); | 2273 | DRM_ERROR("Failed to allocate command bounce buffer.\n"); |
2252 | sw_context->cmd_bounce_size = 0; | 2274 | sw_context->cmd_bounce_size = 0; |
2253 | return -ENOMEM; | 2275 | return -ENOMEM; |
2254 | } | 2276 | } |
2255 | 2277 | ||
2256 | return 0; | 2278 | return 0; |
2257 | } | 2279 | } |
2258 | 2280 | ||
2259 | /** | 2281 | /** |
2260 | * vmw_execbuf_fence_commands - create and submit a command stream fence | 2282 | * vmw_execbuf_fence_commands - create and submit a command stream fence |
2261 | * | 2283 | * |
2262 | * Creates a fence object and submits a command stream marker. | 2284 | * Creates a fence object and submits a command stream marker. |
2263 | * If this fails for some reason, We sync the fifo and return NULL. | 2285 | * If this fails for some reason, We sync the fifo and return NULL. |
2264 | * It is then safe to fence buffers with a NULL pointer. | 2286 | * It is then safe to fence buffers with a NULL pointer. |
2265 | * | 2287 | * |
2266 | * If @p_handle is not NULL @file_priv must also not be NULL. Creates | 2288 | * If @p_handle is not NULL @file_priv must also not be NULL. Creates |
2267 | * a userspace handle if @p_handle is not NULL, otherwise not. | 2289 | * a userspace handle if @p_handle is not NULL, otherwise not. |
2268 | */ | 2290 | */ |
2269 | 2291 | ||
2270 | int vmw_execbuf_fence_commands(struct drm_file *file_priv, | 2292 | int vmw_execbuf_fence_commands(struct drm_file *file_priv, |
2271 | struct vmw_private *dev_priv, | 2293 | struct vmw_private *dev_priv, |
2272 | struct vmw_fence_obj **p_fence, | 2294 | struct vmw_fence_obj **p_fence, |
2273 | uint32_t *p_handle) | 2295 | uint32_t *p_handle) |
2274 | { | 2296 | { |
2275 | uint32_t sequence; | 2297 | uint32_t sequence; |
2276 | int ret; | 2298 | int ret; |
2277 | bool synced = false; | 2299 | bool synced = false; |
2278 | 2300 | ||
2279 | /* p_handle implies file_priv. */ | 2301 | /* p_handle implies file_priv. */ |
2280 | BUG_ON(p_handle != NULL && file_priv == NULL); | 2302 | BUG_ON(p_handle != NULL && file_priv == NULL); |
2281 | 2303 | ||
2282 | ret = vmw_fifo_send_fence(dev_priv, &sequence); | 2304 | ret = vmw_fifo_send_fence(dev_priv, &sequence); |
2283 | if (unlikely(ret != 0)) { | 2305 | if (unlikely(ret != 0)) { |
2284 | DRM_ERROR("Fence submission error. Syncing.\n"); | 2306 | DRM_ERROR("Fence submission error. Syncing.\n"); |
2285 | synced = true; | 2307 | synced = true; |
2286 | } | 2308 | } |
2287 | 2309 | ||
2288 | if (p_handle != NULL) | 2310 | if (p_handle != NULL) |
2289 | ret = vmw_user_fence_create(file_priv, dev_priv->fman, | 2311 | ret = vmw_user_fence_create(file_priv, dev_priv->fman, |
2290 | sequence, | 2312 | sequence, |
2291 | DRM_VMW_FENCE_FLAG_EXEC, | 2313 | DRM_VMW_FENCE_FLAG_EXEC, |
2292 | p_fence, p_handle); | 2314 | p_fence, p_handle); |
2293 | else | 2315 | else |
2294 | ret = vmw_fence_create(dev_priv->fman, sequence, | 2316 | ret = vmw_fence_create(dev_priv->fman, sequence, |
2295 | DRM_VMW_FENCE_FLAG_EXEC, | 2317 | DRM_VMW_FENCE_FLAG_EXEC, |
2296 | p_fence); | 2318 | p_fence); |
2297 | 2319 | ||
2298 | if (unlikely(ret != 0 && !synced)) { | 2320 | if (unlikely(ret != 0 && !synced)) { |
2299 | (void) vmw_fallback_wait(dev_priv, false, false, | 2321 | (void) vmw_fallback_wait(dev_priv, false, false, |
2300 | sequence, false, | 2322 | sequence, false, |
2301 | VMW_FENCE_WAIT_TIMEOUT); | 2323 | VMW_FENCE_WAIT_TIMEOUT); |
2302 | *p_fence = NULL; | 2324 | *p_fence = NULL; |
2303 | } | 2325 | } |
2304 | 2326 | ||
2305 | return 0; | 2327 | return 0; |
2306 | } | 2328 | } |
2307 | 2329 | ||
2308 | /** | 2330 | /** |
2309 | * vmw_execbuf_copy_fence_user - copy fence object information to | 2331 | * vmw_execbuf_copy_fence_user - copy fence object information to |
2310 | * user-space. | 2332 | * user-space. |
2311 | * | 2333 | * |
2312 | * @dev_priv: Pointer to a vmw_private struct. | 2334 | * @dev_priv: Pointer to a vmw_private struct. |
2313 | * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file. | 2335 | * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file. |
2314 | * @ret: Return value from fence object creation. | 2336 | * @ret: Return value from fence object creation. |
2315 | * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to | 2337 | * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to |
2316 | * which the information should be copied. | 2338 | * which the information should be copied. |
2317 | * @fence: Pointer to the fenc object. | 2339 | * @fence: Pointer to the fenc object. |
2318 | * @fence_handle: User-space fence handle. | 2340 | * @fence_handle: User-space fence handle. |
2319 | * | 2341 | * |
2320 | * This function copies fence information to user-space. If copying fails, | 2342 | * This function copies fence information to user-space. If copying fails, |
2321 | * The user-space struct drm_vmw_fence_rep::error member is hopefully | 2343 | * The user-space struct drm_vmw_fence_rep::error member is hopefully |
2322 | * left untouched, and if it's preloaded with an -EFAULT by user-space, | 2344 | * left untouched, and if it's preloaded with an -EFAULT by user-space, |
2323 | * the error will hopefully be detected. | 2345 | * the error will hopefully be detected. |
2324 | * Also if copying fails, user-space will be unable to signal the fence | 2346 | * Also if copying fails, user-space will be unable to signal the fence |
2325 | * object so we wait for it immediately, and then unreference the | 2347 | * object so we wait for it immediately, and then unreference the |
2326 | * user-space reference. | 2348 | * user-space reference. |
2327 | */ | 2349 | */ |
2328 | void | 2350 | void |
2329 | vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, | 2351 | vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, |
2330 | struct vmw_fpriv *vmw_fp, | 2352 | struct vmw_fpriv *vmw_fp, |
2331 | int ret, | 2353 | int ret, |
2332 | struct drm_vmw_fence_rep __user *user_fence_rep, | 2354 | struct drm_vmw_fence_rep __user *user_fence_rep, |
2333 | struct vmw_fence_obj *fence, | 2355 | struct vmw_fence_obj *fence, |
2334 | uint32_t fence_handle) | 2356 | uint32_t fence_handle) |
2335 | { | 2357 | { |
2336 | struct drm_vmw_fence_rep fence_rep; | 2358 | struct drm_vmw_fence_rep fence_rep; |
2337 | 2359 | ||
2338 | if (user_fence_rep == NULL) | 2360 | if (user_fence_rep == NULL) |
2339 | return; | 2361 | return; |
2340 | 2362 | ||
2341 | memset(&fence_rep, 0, sizeof(fence_rep)); | 2363 | memset(&fence_rep, 0, sizeof(fence_rep)); |
2342 | 2364 | ||
2343 | fence_rep.error = ret; | 2365 | fence_rep.error = ret; |
2344 | if (ret == 0) { | 2366 | if (ret == 0) { |
2345 | BUG_ON(fence == NULL); | 2367 | BUG_ON(fence == NULL); |
2346 | 2368 | ||
2347 | fence_rep.handle = fence_handle; | 2369 | fence_rep.handle = fence_handle; |
2348 | fence_rep.seqno = fence->seqno; | 2370 | fence_rep.seqno = fence->seqno; |
2349 | vmw_update_seqno(dev_priv, &dev_priv->fifo); | 2371 | vmw_update_seqno(dev_priv, &dev_priv->fifo); |
2350 | fence_rep.passed_seqno = dev_priv->last_read_seqno; | 2372 | fence_rep.passed_seqno = dev_priv->last_read_seqno; |
2351 | } | 2373 | } |
2352 | 2374 | ||
2353 | /* | 2375 | /* |
2354 | * copy_to_user errors will be detected by user space not | 2376 | * copy_to_user errors will be detected by user space not |
2355 | * seeing fence_rep::error filled in. Typically | 2377 | * seeing fence_rep::error filled in. Typically |
2356 | * user-space would have pre-set that member to -EFAULT. | 2378 | * user-space would have pre-set that member to -EFAULT. |
2357 | */ | 2379 | */ |
2358 | ret = copy_to_user(user_fence_rep, &fence_rep, | 2380 | ret = copy_to_user(user_fence_rep, &fence_rep, |
2359 | sizeof(fence_rep)); | 2381 | sizeof(fence_rep)); |
2360 | 2382 | ||
2361 | /* | 2383 | /* |
2362 | * User-space lost the fence object. We need to sync | 2384 | * User-space lost the fence object. We need to sync |
2363 | * and unreference the handle. | 2385 | * and unreference the handle. |
2364 | */ | 2386 | */ |
2365 | if (unlikely(ret != 0) && (fence_rep.error == 0)) { | 2387 | if (unlikely(ret != 0) && (fence_rep.error == 0)) { |
2366 | ttm_ref_object_base_unref(vmw_fp->tfile, | 2388 | ttm_ref_object_base_unref(vmw_fp->tfile, |
2367 | fence_handle, TTM_REF_USAGE); | 2389 | fence_handle, TTM_REF_USAGE); |
2368 | DRM_ERROR("Fence copy error. Syncing.\n"); | 2390 | DRM_ERROR("Fence copy error. Syncing.\n"); |
2369 | (void) vmw_fence_obj_wait(fence, fence->signal_mask, | 2391 | (void) vmw_fence_obj_wait(fence, fence->signal_mask, |
2370 | false, false, | 2392 | false, false, |
2371 | VMW_FENCE_WAIT_TIMEOUT); | 2393 | VMW_FENCE_WAIT_TIMEOUT); |
2372 | } | 2394 | } |
2373 | } | 2395 | } |
2374 | 2396 | ||
2375 | int vmw_execbuf_process(struct drm_file *file_priv, | 2397 | int vmw_execbuf_process(struct drm_file *file_priv, |
2376 | struct vmw_private *dev_priv, | 2398 | struct vmw_private *dev_priv, |
2377 | void __user *user_commands, | 2399 | void __user *user_commands, |
2378 | void *kernel_commands, | 2400 | void *kernel_commands, |
2379 | uint32_t command_size, | 2401 | uint32_t command_size, |
2380 | uint64_t throttle_us, | 2402 | uint64_t throttle_us, |
2381 | struct drm_vmw_fence_rep __user *user_fence_rep, | 2403 | struct drm_vmw_fence_rep __user *user_fence_rep, |
2382 | struct vmw_fence_obj **out_fence) | 2404 | struct vmw_fence_obj **out_fence) |
2383 | { | 2405 | { |
2384 | struct vmw_sw_context *sw_context = &dev_priv->ctx; | 2406 | struct vmw_sw_context *sw_context = &dev_priv->ctx; |
2385 | struct vmw_fence_obj *fence = NULL; | 2407 | struct vmw_fence_obj *fence = NULL; |
2386 | struct vmw_resource *error_resource; | 2408 | struct vmw_resource *error_resource; |
2387 | struct list_head resource_list; | 2409 | struct list_head resource_list; |
2388 | struct ww_acquire_ctx ticket; | 2410 | struct ww_acquire_ctx ticket; |
2389 | uint32_t handle; | 2411 | uint32_t handle; |
2390 | void *cmd; | 2412 | void *cmd; |
2391 | int ret; | 2413 | int ret; |
2392 | 2414 | ||
2393 | ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex); | 2415 | ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex); |
2394 | if (unlikely(ret != 0)) | 2416 | if (unlikely(ret != 0)) |
2395 | return -ERESTARTSYS; | 2417 | return -ERESTARTSYS; |
2396 | 2418 | ||
2397 | if (kernel_commands == NULL) { | 2419 | if (kernel_commands == NULL) { |
2398 | sw_context->kernel = false; | 2420 | sw_context->kernel = false; |
2399 | 2421 | ||
2400 | ret = vmw_resize_cmd_bounce(sw_context, command_size); | 2422 | ret = vmw_resize_cmd_bounce(sw_context, command_size); |
2401 | if (unlikely(ret != 0)) | 2423 | if (unlikely(ret != 0)) |
2402 | goto out_unlock; | 2424 | goto out_unlock; |
2403 | 2425 | ||
2404 | 2426 | ||
2405 | ret = copy_from_user(sw_context->cmd_bounce, | 2427 | ret = copy_from_user(sw_context->cmd_bounce, |
2406 | user_commands, command_size); | 2428 | user_commands, command_size); |
2407 | 2429 | ||
2408 | if (unlikely(ret != 0)) { | 2430 | if (unlikely(ret != 0)) { |
2409 | ret = -EFAULT; | 2431 | ret = -EFAULT; |
2410 | DRM_ERROR("Failed copying commands.\n"); | 2432 | DRM_ERROR("Failed copying commands.\n"); |
2411 | goto out_unlock; | 2433 | goto out_unlock; |
2412 | } | 2434 | } |
2413 | kernel_commands = sw_context->cmd_bounce; | 2435 | kernel_commands = sw_context->cmd_bounce; |
2414 | } else | 2436 | } else |
2415 | sw_context->kernel = true; | 2437 | sw_context->kernel = true; |
2416 | 2438 | ||
2417 | sw_context->fp = vmw_fpriv(file_priv); | 2439 | sw_context->fp = vmw_fpriv(file_priv); |
2418 | sw_context->cur_reloc = 0; | 2440 | sw_context->cur_reloc = 0; |
2419 | sw_context->cur_val_buf = 0; | 2441 | sw_context->cur_val_buf = 0; |
2420 | sw_context->fence_flags = 0; | 2442 | sw_context->fence_flags = 0; |
2421 | INIT_LIST_HEAD(&sw_context->resource_list); | 2443 | INIT_LIST_HEAD(&sw_context->resource_list); |
2422 | sw_context->cur_query_bo = dev_priv->pinned_bo; | 2444 | sw_context->cur_query_bo = dev_priv->pinned_bo; |
2423 | sw_context->last_query_ctx = NULL; | 2445 | sw_context->last_query_ctx = NULL; |
2424 | sw_context->needs_post_query_barrier = false; | 2446 | sw_context->needs_post_query_barrier = false; |
2425 | memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache)); | 2447 | memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache)); |
2426 | INIT_LIST_HEAD(&sw_context->validate_nodes); | 2448 | INIT_LIST_HEAD(&sw_context->validate_nodes); |
2427 | INIT_LIST_HEAD(&sw_context->res_relocations); | 2449 | INIT_LIST_HEAD(&sw_context->res_relocations); |
2428 | if (!sw_context->res_ht_initialized) { | 2450 | if (!sw_context->res_ht_initialized) { |
2429 | ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER); | 2451 | ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER); |
2430 | if (unlikely(ret != 0)) | 2452 | if (unlikely(ret != 0)) |
2431 | goto out_unlock; | 2453 | goto out_unlock; |
2432 | sw_context->res_ht_initialized = true; | 2454 | sw_context->res_ht_initialized = true; |
2433 | } | 2455 | } |
2434 | INIT_LIST_HEAD(&sw_context->staged_shaders); | 2456 | INIT_LIST_HEAD(&sw_context->staged_shaders); |
2435 | 2457 | ||
2436 | INIT_LIST_HEAD(&resource_list); | 2458 | INIT_LIST_HEAD(&resource_list); |
2437 | ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, | 2459 | ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, |
2438 | command_size); | 2460 | command_size); |
2439 | if (unlikely(ret != 0)) | 2461 | if (unlikely(ret != 0)) |
2440 | goto out_err_nores; | 2462 | goto out_err_nores; |
2441 | 2463 | ||
2442 | ret = vmw_resources_reserve(sw_context); | 2464 | ret = vmw_resources_reserve(sw_context); |
2443 | if (unlikely(ret != 0)) | 2465 | if (unlikely(ret != 0)) |
2444 | goto out_err_nores; | 2466 | goto out_err_nores; |
2445 | 2467 | ||
2446 | ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes); | 2468 | ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes); |
2447 | if (unlikely(ret != 0)) | 2469 | if (unlikely(ret != 0)) |
2448 | goto out_err; | 2470 | goto out_err; |
2449 | 2471 | ||
2450 | ret = vmw_validate_buffers(dev_priv, sw_context); | 2472 | ret = vmw_validate_buffers(dev_priv, sw_context); |
2451 | if (unlikely(ret != 0)) | 2473 | if (unlikely(ret != 0)) |
2452 | goto out_err; | 2474 | goto out_err; |
2453 | 2475 | ||
2454 | ret = vmw_resources_validate(sw_context); | 2476 | ret = vmw_resources_validate(sw_context); |
2455 | if (unlikely(ret != 0)) | 2477 | if (unlikely(ret != 0)) |
2456 | goto out_err; | 2478 | goto out_err; |
2457 | 2479 | ||
2458 | if (throttle_us) { | 2480 | if (throttle_us) { |
2459 | ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue, | 2481 | ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue, |
2460 | throttle_us); | 2482 | throttle_us); |
2461 | 2483 | ||
2462 | if (unlikely(ret != 0)) | 2484 | if (unlikely(ret != 0)) |
2463 | goto out_err; | 2485 | goto out_err; |
2464 | } | 2486 | } |
2465 | 2487 | ||
2466 | ret = mutex_lock_interruptible(&dev_priv->binding_mutex); | 2488 | ret = mutex_lock_interruptible(&dev_priv->binding_mutex); |
2467 | if (unlikely(ret != 0)) { | 2489 | if (unlikely(ret != 0)) { |
2468 | ret = -ERESTARTSYS; | 2490 | ret = -ERESTARTSYS; |
2469 | goto out_err; | 2491 | goto out_err; |
2470 | } | 2492 | } |
2471 | 2493 | ||
2472 | if (dev_priv->has_mob) { | 2494 | if (dev_priv->has_mob) { |
2473 | ret = vmw_rebind_contexts(sw_context); | 2495 | ret = vmw_rebind_contexts(sw_context); |
2474 | if (unlikely(ret != 0)) | 2496 | if (unlikely(ret != 0)) |
2475 | goto out_unlock_binding; | 2497 | goto out_unlock_binding; |
2476 | } | 2498 | } |
2477 | 2499 | ||
2478 | cmd = vmw_fifo_reserve(dev_priv, command_size); | 2500 | cmd = vmw_fifo_reserve(dev_priv, command_size); |
2479 | if (unlikely(cmd == NULL)) { | 2501 | if (unlikely(cmd == NULL)) { |
2480 | DRM_ERROR("Failed reserving fifo space for commands.\n"); | 2502 | DRM_ERROR("Failed reserving fifo space for commands.\n"); |
2481 | ret = -ENOMEM; | 2503 | ret = -ENOMEM; |
2482 | goto out_unlock_binding; | 2504 | goto out_unlock_binding; |
2483 | } | 2505 | } |
2484 | 2506 | ||
2485 | vmw_apply_relocations(sw_context); | 2507 | vmw_apply_relocations(sw_context); |
2486 | memcpy(cmd, kernel_commands, command_size); | 2508 | memcpy(cmd, kernel_commands, command_size); |
2487 | 2509 | ||
2488 | vmw_resource_relocations_apply(cmd, &sw_context->res_relocations); | 2510 | vmw_resource_relocations_apply(cmd, &sw_context->res_relocations); |
2489 | vmw_resource_relocations_free(&sw_context->res_relocations); | 2511 | vmw_resource_relocations_free(&sw_context->res_relocations); |
2490 | 2512 | ||
2491 | vmw_fifo_commit(dev_priv, command_size); | 2513 | vmw_fifo_commit(dev_priv, command_size); |
2492 | 2514 | ||
2493 | vmw_query_bo_switch_commit(dev_priv, sw_context); | 2515 | vmw_query_bo_switch_commit(dev_priv, sw_context); |
2494 | ret = vmw_execbuf_fence_commands(file_priv, dev_priv, | 2516 | ret = vmw_execbuf_fence_commands(file_priv, dev_priv, |
2495 | &fence, | 2517 | &fence, |
2496 | (user_fence_rep) ? &handle : NULL); | 2518 | (user_fence_rep) ? &handle : NULL); |
2497 | /* | 2519 | /* |
2498 | * This error is harmless, because if fence submission fails, | 2520 | * This error is harmless, because if fence submission fails, |
2499 | * vmw_fifo_send_fence will sync. The error will be propagated to | 2521 | * vmw_fifo_send_fence will sync. The error will be propagated to |
2500 | * user-space in @fence_rep | 2522 | * user-space in @fence_rep |
2501 | */ | 2523 | */ |
2502 | 2524 | ||
2503 | if (ret != 0) | 2525 | if (ret != 0) |
2504 | DRM_ERROR("Fence submission error. Syncing.\n"); | 2526 | DRM_ERROR("Fence submission error. Syncing.\n"); |
2505 | 2527 | ||
2506 | vmw_resource_list_unreserve(&sw_context->resource_list, false); | 2528 | vmw_resource_list_unreserve(&sw_context->resource_list, false); |
2507 | mutex_unlock(&dev_priv->binding_mutex); | 2529 | mutex_unlock(&dev_priv->binding_mutex); |
2508 | 2530 | ||
2509 | ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes, | 2531 | ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes, |
2510 | (void *) fence); | 2532 | (void *) fence); |
2511 | 2533 | ||
2512 | if (unlikely(dev_priv->pinned_bo != NULL && | 2534 | if (unlikely(dev_priv->pinned_bo != NULL && |
2513 | !dev_priv->query_cid_valid)) | 2535 | !dev_priv->query_cid_valid)) |
2514 | __vmw_execbuf_release_pinned_bo(dev_priv, fence); | 2536 | __vmw_execbuf_release_pinned_bo(dev_priv, fence); |
2515 | 2537 | ||
2516 | vmw_clear_validations(sw_context); | 2538 | vmw_clear_validations(sw_context); |
2517 | vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, | 2539 | vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, |
2518 | user_fence_rep, fence, handle); | 2540 | user_fence_rep, fence, handle); |
2519 | 2541 | ||
2520 | /* Don't unreference when handing fence out */ | 2542 | /* Don't unreference when handing fence out */ |
2521 | if (unlikely(out_fence != NULL)) { | 2543 | if (unlikely(out_fence != NULL)) { |
2522 | *out_fence = fence; | 2544 | *out_fence = fence; |
2523 | fence = NULL; | 2545 | fence = NULL; |
2524 | } else if (likely(fence != NULL)) { | 2546 | } else if (likely(fence != NULL)) { |
2525 | vmw_fence_obj_unreference(&fence); | 2547 | vmw_fence_obj_unreference(&fence); |
2526 | } | 2548 | } |
2527 | 2549 | ||
2528 | list_splice_init(&sw_context->resource_list, &resource_list); | 2550 | list_splice_init(&sw_context->resource_list, &resource_list); |
2529 | vmw_compat_shaders_commit(sw_context->fp->shman, | 2551 | vmw_compat_shaders_commit(sw_context->fp->shman, |
2530 | &sw_context->staged_shaders); | 2552 | &sw_context->staged_shaders); |
2531 | mutex_unlock(&dev_priv->cmdbuf_mutex); | 2553 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
2532 | 2554 | ||
2533 | /* | 2555 | /* |
2534 | * Unreference resources outside of the cmdbuf_mutex to | 2556 | * Unreference resources outside of the cmdbuf_mutex to |
2535 | * avoid deadlocks in resource destruction paths. | 2557 | * avoid deadlocks in resource destruction paths. |
2536 | */ | 2558 | */ |
2537 | vmw_resource_list_unreference(&resource_list); | 2559 | vmw_resource_list_unreference(&resource_list); |
2538 | 2560 | ||
2539 | return 0; | 2561 | return 0; |
2540 | 2562 | ||
2541 | out_unlock_binding: | 2563 | out_unlock_binding: |
2542 | mutex_unlock(&dev_priv->binding_mutex); | 2564 | mutex_unlock(&dev_priv->binding_mutex); |
2543 | out_err: | 2565 | out_err: |
2544 | ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes); | 2566 | ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes); |
2545 | out_err_nores: | 2567 | out_err_nores: |
2546 | vmw_resource_list_unreserve(&sw_context->resource_list, true); | 2568 | vmw_resource_list_unreserve(&sw_context->resource_list, true); |
2547 | vmw_resource_relocations_free(&sw_context->res_relocations); | 2569 | vmw_resource_relocations_free(&sw_context->res_relocations); |
2548 | vmw_free_relocations(sw_context); | 2570 | vmw_free_relocations(sw_context); |
2549 | vmw_clear_validations(sw_context); | 2571 | vmw_clear_validations(sw_context); |
2550 | if (unlikely(dev_priv->pinned_bo != NULL && | 2572 | if (unlikely(dev_priv->pinned_bo != NULL && |
2551 | !dev_priv->query_cid_valid)) | 2573 | !dev_priv->query_cid_valid)) |
2552 | __vmw_execbuf_release_pinned_bo(dev_priv, NULL); | 2574 | __vmw_execbuf_release_pinned_bo(dev_priv, NULL); |
2553 | out_unlock: | 2575 | out_unlock: |
2554 | list_splice_init(&sw_context->resource_list, &resource_list); | 2576 | list_splice_init(&sw_context->resource_list, &resource_list); |
2555 | error_resource = sw_context->error_resource; | 2577 | error_resource = sw_context->error_resource; |
2556 | sw_context->error_resource = NULL; | 2578 | sw_context->error_resource = NULL; |
2557 | vmw_compat_shaders_revert(sw_context->fp->shman, | 2579 | vmw_compat_shaders_revert(sw_context->fp->shman, |
2558 | &sw_context->staged_shaders); | 2580 | &sw_context->staged_shaders); |
2559 | mutex_unlock(&dev_priv->cmdbuf_mutex); | 2581 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
2560 | 2582 | ||
2561 | /* | 2583 | /* |
2562 | * Unreference resources outside of the cmdbuf_mutex to | 2584 | * Unreference resources outside of the cmdbuf_mutex to |
2563 | * avoid deadlocks in resource destruction paths. | 2585 | * avoid deadlocks in resource destruction paths. |
2564 | */ | 2586 | */ |
2565 | vmw_resource_list_unreference(&resource_list); | 2587 | vmw_resource_list_unreference(&resource_list); |
2566 | if (unlikely(error_resource != NULL)) | 2588 | if (unlikely(error_resource != NULL)) |
2567 | vmw_resource_unreference(&error_resource); | 2589 | vmw_resource_unreference(&error_resource); |
2568 | 2590 | ||
2569 | return ret; | 2591 | return ret; |
2570 | } | 2592 | } |
2571 | 2593 | ||
2572 | /** | 2594 | /** |
2573 | * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer. | 2595 | * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer. |
2574 | * | 2596 | * |
2575 | * @dev_priv: The device private structure. | 2597 | * @dev_priv: The device private structure. |
2576 | * | 2598 | * |
2577 | * This function is called to idle the fifo and unpin the query buffer | 2599 | * This function is called to idle the fifo and unpin the query buffer |
2578 | * if the normal way to do this hits an error, which should typically be | 2600 | * if the normal way to do this hits an error, which should typically be |
2579 | * extremely rare. | 2601 | * extremely rare. |
2580 | */ | 2602 | */ |
2581 | static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv) | 2603 | static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv) |
2582 | { | 2604 | { |
2583 | DRM_ERROR("Can't unpin query buffer. Trying to recover.\n"); | 2605 | DRM_ERROR("Can't unpin query buffer. Trying to recover.\n"); |
2584 | 2606 | ||
2585 | (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ); | 2607 | (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ); |
2586 | vmw_bo_pin(dev_priv->pinned_bo, false); | 2608 | vmw_bo_pin(dev_priv->pinned_bo, false); |
2587 | vmw_bo_pin(dev_priv->dummy_query_bo, false); | 2609 | vmw_bo_pin(dev_priv->dummy_query_bo, false); |
2588 | dev_priv->dummy_query_bo_pinned = false; | 2610 | dev_priv->dummy_query_bo_pinned = false; |
2589 | } | 2611 | } |
2590 | 2612 | ||
2591 | 2613 | ||
2592 | /** | 2614 | /** |
2593 | * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned | 2615 | * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned |
2594 | * query bo. | 2616 | * query bo. |
2595 | * | 2617 | * |
2596 | * @dev_priv: The device private structure. | 2618 | * @dev_priv: The device private structure. |
2597 | * @fence: If non-NULL should point to a struct vmw_fence_obj issued | 2619 | * @fence: If non-NULL should point to a struct vmw_fence_obj issued |
2598 | * _after_ a query barrier that flushes all queries touching the current | 2620 | * _after_ a query barrier that flushes all queries touching the current |
2599 | * buffer pointed to by @dev_priv->pinned_bo | 2621 | * buffer pointed to by @dev_priv->pinned_bo |
2600 | * | 2622 | * |
2601 | * This function should be used to unpin the pinned query bo, or | 2623 | * This function should be used to unpin the pinned query bo, or |
2602 | * as a query barrier when we need to make sure that all queries have | 2624 | * as a query barrier when we need to make sure that all queries have |
2603 | * finished before the next fifo command. (For example on hardware | 2625 | * finished before the next fifo command. (For example on hardware |
2604 | * context destructions where the hardware may otherwise leak unfinished | 2626 | * context destructions where the hardware may otherwise leak unfinished |
2605 | * queries). | 2627 | * queries). |
2606 | * | 2628 | * |
2607 | * This function does not return any failure codes, but make attempts | 2629 | * This function does not return any failure codes, but make attempts |
2608 | * to do safe unpinning in case of errors. | 2630 | * to do safe unpinning in case of errors. |
2609 | * | 2631 | * |
2610 | * The function will synchronize on the previous query barrier, and will | 2632 | * The function will synchronize on the previous query barrier, and will |
2611 | * thus not finish until that barrier has executed. | 2633 | * thus not finish until that barrier has executed. |
2612 | * | 2634 | * |
2613 | * the @dev_priv->cmdbuf_mutex needs to be held by the current thread | 2635 | * the @dev_priv->cmdbuf_mutex needs to be held by the current thread |
2614 | * before calling this function. | 2636 | * before calling this function. |
2615 | */ | 2637 | */ |
2616 | void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, | 2638 | void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, |
2617 | struct vmw_fence_obj *fence) | 2639 | struct vmw_fence_obj *fence) |
2618 | { | 2640 | { |
2619 | int ret = 0; | 2641 | int ret = 0; |
2620 | struct list_head validate_list; | 2642 | struct list_head validate_list; |
2621 | struct ttm_validate_buffer pinned_val, query_val; | 2643 | struct ttm_validate_buffer pinned_val, query_val; |
2622 | struct vmw_fence_obj *lfence = NULL; | 2644 | struct vmw_fence_obj *lfence = NULL; |
2623 | struct ww_acquire_ctx ticket; | 2645 | struct ww_acquire_ctx ticket; |
2624 | 2646 | ||
2625 | if (dev_priv->pinned_bo == NULL) | 2647 | if (dev_priv->pinned_bo == NULL) |
2626 | goto out_unlock; | 2648 | goto out_unlock; |
2627 | 2649 | ||
2628 | INIT_LIST_HEAD(&validate_list); | 2650 | INIT_LIST_HEAD(&validate_list); |
2629 | 2651 | ||
2630 | pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo); | 2652 | pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo); |
2631 | list_add_tail(&pinned_val.head, &validate_list); | 2653 | list_add_tail(&pinned_val.head, &validate_list); |
2632 | 2654 | ||
2633 | query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo); | 2655 | query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo); |
2634 | list_add_tail(&query_val.head, &validate_list); | 2656 | list_add_tail(&query_val.head, &validate_list); |
2635 | 2657 | ||
2636 | do { | 2658 | do { |
2637 | ret = ttm_eu_reserve_buffers(&ticket, &validate_list); | 2659 | ret = ttm_eu_reserve_buffers(&ticket, &validate_list); |
2638 | } while (ret == -ERESTARTSYS); | 2660 | } while (ret == -ERESTARTSYS); |
2639 | 2661 | ||
2640 | if (unlikely(ret != 0)) { | 2662 | if (unlikely(ret != 0)) { |
2641 | vmw_execbuf_unpin_panic(dev_priv); | 2663 | vmw_execbuf_unpin_panic(dev_priv); |
2642 | goto out_no_reserve; | 2664 | goto out_no_reserve; |
2643 | } | 2665 | } |
2644 | 2666 | ||
2645 | if (dev_priv->query_cid_valid) { | 2667 | if (dev_priv->query_cid_valid) { |
2646 | BUG_ON(fence != NULL); | 2668 | BUG_ON(fence != NULL); |
2647 | ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid); | 2669 | ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid); |
2648 | if (unlikely(ret != 0)) { | 2670 | if (unlikely(ret != 0)) { |
2649 | vmw_execbuf_unpin_panic(dev_priv); | 2671 | vmw_execbuf_unpin_panic(dev_priv); |
2650 | goto out_no_emit; | 2672 | goto out_no_emit; |
2651 | } | 2673 | } |
2652 | dev_priv->query_cid_valid = false; | 2674 | dev_priv->query_cid_valid = false; |
2653 | } | 2675 | } |
2654 | 2676 | ||
2655 | vmw_bo_pin(dev_priv->pinned_bo, false); | 2677 | vmw_bo_pin(dev_priv->pinned_bo, false); |
2656 | vmw_bo_pin(dev_priv->dummy_query_bo, false); | 2678 | vmw_bo_pin(dev_priv->dummy_query_bo, false); |
2657 | dev_priv->dummy_query_bo_pinned = false; | 2679 | dev_priv->dummy_query_bo_pinned = false; |
2658 | 2680 | ||
2659 | if (fence == NULL) { | 2681 | if (fence == NULL) { |
2660 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence, | 2682 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence, |
2661 | NULL); | 2683 | NULL); |
2662 | fence = lfence; | 2684 | fence = lfence; |
2663 | } | 2685 | } |
2664 | ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence); | 2686 | ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence); |
2665 | if (lfence != NULL) | 2687 | if (lfence != NULL) |
2666 | vmw_fence_obj_unreference(&lfence); | 2688 | vmw_fence_obj_unreference(&lfence); |
2667 | 2689 | ||
2668 | ttm_bo_unref(&query_val.bo); | 2690 | ttm_bo_unref(&query_val.bo); |
2669 | ttm_bo_unref(&pinned_val.bo); | 2691 | ttm_bo_unref(&pinned_val.bo); |
2670 | ttm_bo_unref(&dev_priv->pinned_bo); | 2692 | ttm_bo_unref(&dev_priv->pinned_bo); |
2671 | 2693 | ||
2672 | out_unlock: | 2694 | out_unlock: |
2673 | return; | 2695 | return; |
2674 | 2696 | ||
2675 | out_no_emit: | 2697 | out_no_emit: |
2676 | ttm_eu_backoff_reservation(&ticket, &validate_list); | 2698 | ttm_eu_backoff_reservation(&ticket, &validate_list); |
2677 | out_no_reserve: | 2699 | out_no_reserve: |
2678 | ttm_bo_unref(&query_val.bo); | 2700 | ttm_bo_unref(&query_val.bo); |
2679 | ttm_bo_unref(&pinned_val.bo); | 2701 | ttm_bo_unref(&pinned_val.bo); |
2680 | ttm_bo_unref(&dev_priv->pinned_bo); | 2702 | ttm_bo_unref(&dev_priv->pinned_bo); |
2681 | } | 2703 | } |
2682 | 2704 | ||
2683 | /** | 2705 | /** |
2684 | * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned | 2706 | * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned |
2685 | * query bo. | 2707 | * query bo. |
2686 | * | 2708 | * |
2687 | * @dev_priv: The device private structure. | 2709 | * @dev_priv: The device private structure. |
2688 | * | 2710 | * |
2689 | * This function should be used to unpin the pinned query bo, or | 2711 | * This function should be used to unpin the pinned query bo, or |
2690 | * as a query barrier when we need to make sure that all queries have | 2712 | * as a query barrier when we need to make sure that all queries have |
2691 | * finished before the next fifo command. (For example on hardware | 2713 | * finished before the next fifo command. (For example on hardware |
2692 | * context destructions where the hardware may otherwise leak unfinished | 2714 | * context destructions where the hardware may otherwise leak unfinished |
2693 | * queries). | 2715 | * queries). |
2694 | * | 2716 | * |
2695 | * This function does not return any failure codes, but make attempts | 2717 | * This function does not return any failure codes, but make attempts |
2696 | * to do safe unpinning in case of errors. | 2718 | * to do safe unpinning in case of errors. |
2697 | * | 2719 | * |
2698 | * The function will synchronize on the previous query barrier, and will | 2720 | * The function will synchronize on the previous query barrier, and will |
2699 | * thus not finish until that barrier has executed. | 2721 | * thus not finish until that barrier has executed. |
2700 | */ | 2722 | */ |
2701 | void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv) | 2723 | void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv) |
2702 | { | 2724 | { |
2703 | mutex_lock(&dev_priv->cmdbuf_mutex); | 2725 | mutex_lock(&dev_priv->cmdbuf_mutex); |
2704 | if (dev_priv->query_cid_valid) | 2726 | if (dev_priv->query_cid_valid) |
2705 | __vmw_execbuf_release_pinned_bo(dev_priv, NULL); | 2727 | __vmw_execbuf_release_pinned_bo(dev_priv, NULL); |
2706 | mutex_unlock(&dev_priv->cmdbuf_mutex); | 2728 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
2707 | } | 2729 | } |
2708 | 2730 | ||
2709 | 2731 | ||
2710 | int vmw_execbuf_ioctl(struct drm_device *dev, void *data, | 2732 | int vmw_execbuf_ioctl(struct drm_device *dev, void *data, |
2711 | struct drm_file *file_priv) | 2733 | struct drm_file *file_priv) |
2712 | { | 2734 | { |
2713 | struct vmw_private *dev_priv = vmw_priv(dev); | 2735 | struct vmw_private *dev_priv = vmw_priv(dev); |
2714 | struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data; | 2736 | struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data; |
2715 | int ret; | 2737 | int ret; |
2716 | 2738 | ||
2717 | /* | 2739 | /* |
2718 | * This will allow us to extend the ioctl argument while | 2740 | * This will allow us to extend the ioctl argument while |
2719 | * maintaining backwards compatibility: | 2741 | * maintaining backwards compatibility: |
2720 | * We take different code paths depending on the value of | 2742 | * We take different code paths depending on the value of |
2721 | * arg->version. | 2743 | * arg->version. |
2722 | */ | 2744 | */ |
2723 | 2745 | ||
2724 | if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) { | 2746 | if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) { |
2725 | DRM_ERROR("Incorrect execbuf version.\n"); | 2747 | DRM_ERROR("Incorrect execbuf version.\n"); |
2726 | DRM_ERROR("You're running outdated experimental " | 2748 | DRM_ERROR("You're running outdated experimental " |
2727 | "vmwgfx user-space drivers."); | 2749 | "vmwgfx user-space drivers."); |
2728 | return -EINVAL; | 2750 | return -EINVAL; |
2729 | } | 2751 | } |
2730 | 2752 | ||
2731 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); | 2753 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); |
2732 | if (unlikely(ret != 0)) | 2754 | if (unlikely(ret != 0)) |
2733 | return ret; | 2755 | return ret; |
2734 | 2756 | ||
2735 | ret = vmw_execbuf_process(file_priv, dev_priv, | 2757 | ret = vmw_execbuf_process(file_priv, dev_priv, |
2736 | (void __user *)(unsigned long)arg->commands, | 2758 | (void __user *)(unsigned long)arg->commands, |
2737 | NULL, arg->command_size, arg->throttle_us, | 2759 | NULL, arg->command_size, arg->throttle_us, |
2738 | (void __user *)(unsigned long)arg->fence_rep, | 2760 | (void __user *)(unsigned long)arg->fence_rep, |
2739 | NULL); | 2761 | NULL); |
2740 | 2762 | ||
2741 | if (unlikely(ret != 0)) | 2763 | if (unlikely(ret != 0)) |
2742 | goto out_unlock; | 2764 | goto out_unlock; |
2743 | 2765 | ||
2744 | vmw_kms_cursor_post_execbuf(dev_priv); | 2766 | vmw_kms_cursor_post_execbuf(dev_priv); |
2745 | 2767 | ||
2746 | out_unlock: | 2768 | out_unlock: |
2747 | ttm_read_unlock(&dev_priv->reservation_sem); | 2769 | ttm_read_unlock(&dev_priv->reservation_sem); |
2748 | return ret; | 2770 | return ret; |
2749 | } | 2771 | } |
2750 | 2772 |