Commit e797ebd1cbc75f6bdc2fe4f7ab39dd040d171994
Committed by
Jacob Stiffler
1 parent
f0feb1b9a2
Exists in
smarc-ti-linux-3.14.y
and in
1 other branch
drm/prime: Add DRM_RDWR for prime export
mmap requires O_RDWR to be set if MAP_SHARED is requested with PROT_WRITE. Adding DRM_RDWR (defined as O_RDWR) as a valid flag that can be passed to drm_prime_handle_to_fd_ioctl for buffers that need required mapping. Change-Id: If130b484efc917aa56562c4edb715b2a07687ce4 Signed-off-by: Hemant Hariyani <hemanthariyani@ti.com>
Showing 2 changed files with 5 additions and 3 deletions Inline Diff
drivers/gpu/drm/drm_prime.c
1 | /* | 1 | /* |
2 | * Copyright © 2012 Red Hat | 2 | * Copyright © 2012 Red Hat |
3 | * | 3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), | 5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation | 6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the | 8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: | 9 | * Software is furnished to do so, subject to the following conditions: |
10 | * | 10 | * |
11 | * The above copyright notice and this permission notice (including the next | 11 | * The above copyright notice and this permission notice (including the next |
12 | * paragraph) shall be included in all copies or substantial portions of the | 12 | * paragraph) shall be included in all copies or substantial portions of the |
13 | * Software. | 13 | * Software. |
14 | * | 14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
21 | * IN THE SOFTWARE. | 21 | * IN THE SOFTWARE. |
22 | * | 22 | * |
23 | * Authors: | 23 | * Authors: |
24 | * Dave Airlie <airlied@redhat.com> | 24 | * Dave Airlie <airlied@redhat.com> |
25 | * Rob Clark <rob.clark@linaro.org> | 25 | * Rob Clark <rob.clark@linaro.org> |
26 | * | 26 | * |
27 | */ | 27 | */ |
28 | 28 | ||
29 | #include <linux/export.h> | 29 | #include <linux/export.h> |
30 | #include <linux/dma-buf.h> | 30 | #include <linux/dma-buf.h> |
31 | #include <drm/drmP.h> | 31 | #include <drm/drmP.h> |
32 | 32 | ||
33 | /* | 33 | /* |
34 | * DMA-BUF/GEM Object references and lifetime overview: | 34 | * DMA-BUF/GEM Object references and lifetime overview: |
35 | * | 35 | * |
36 | * On the export the dma_buf holds a reference to the exporting GEM | 36 | * On the export the dma_buf holds a reference to the exporting GEM |
37 | * object. It takes this reference in handle_to_fd_ioctl, when it | 37 | * object. It takes this reference in handle_to_fd_ioctl, when it |
38 | * first calls .prime_export and stores the exporting GEM object in | 38 | * first calls .prime_export and stores the exporting GEM object in |
39 | * the dma_buf priv. This reference is released when the dma_buf | 39 | * the dma_buf priv. This reference is released when the dma_buf |
40 | * object goes away in the driver .release function. | 40 | * object goes away in the driver .release function. |
41 | * | 41 | * |
42 | * On the import the importing GEM object holds a reference to the | 42 | * On the import the importing GEM object holds a reference to the |
43 | * dma_buf (which in turn holds a ref to the exporting GEM object). | 43 | * dma_buf (which in turn holds a ref to the exporting GEM object). |
44 | * It takes that reference in the fd_to_handle ioctl. | 44 | * It takes that reference in the fd_to_handle ioctl. |
45 | * It calls dma_buf_get, creates an attachment to it and stores the | 45 | * It calls dma_buf_get, creates an attachment to it and stores the |
46 | * attachment in the GEM object. When this attachment is destroyed | 46 | * attachment in the GEM object. When this attachment is destroyed |
47 | * when the imported object is destroyed, we remove the attachment | 47 | * when the imported object is destroyed, we remove the attachment |
48 | * and drop the reference to the dma_buf. | 48 | * and drop the reference to the dma_buf. |
49 | * | 49 | * |
50 | * Thus the chain of references always flows in one direction | 50 | * Thus the chain of references always flows in one direction |
51 | * (avoiding loops): importing_gem -> dmabuf -> exporting_gem | 51 | * (avoiding loops): importing_gem -> dmabuf -> exporting_gem |
52 | * | 52 | * |
53 | * Self-importing: if userspace is using PRIME as a replacement for flink | 53 | * Self-importing: if userspace is using PRIME as a replacement for flink |
54 | * then it will get a fd->handle request for a GEM object that it created. | 54 | * then it will get a fd->handle request for a GEM object that it created. |
55 | * Drivers should detect this situation and return back the gem object | 55 | * Drivers should detect this situation and return back the gem object |
56 | * from the dma-buf private. Prime will do this automatically for drivers that | 56 | * from the dma-buf private. Prime will do this automatically for drivers that |
57 | * use the drm_gem_prime_{import,export} helpers. | 57 | * use the drm_gem_prime_{import,export} helpers. |
58 | */ | 58 | */ |
59 | 59 | ||
60 | struct drm_prime_member { | 60 | struct drm_prime_member { |
61 | struct list_head entry; | 61 | struct list_head entry; |
62 | struct dma_buf *dma_buf; | 62 | struct dma_buf *dma_buf; |
63 | uint32_t handle; | 63 | uint32_t handle; |
64 | }; | 64 | }; |
65 | 65 | ||
66 | struct drm_prime_attachment { | 66 | struct drm_prime_attachment { |
67 | struct sg_table *sgt; | 67 | struct sg_table *sgt; |
68 | enum dma_data_direction dir; | 68 | enum dma_data_direction dir; |
69 | }; | 69 | }; |
70 | 70 | ||
71 | static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle) | 71 | static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle) |
72 | { | 72 | { |
73 | struct drm_prime_member *member; | 73 | struct drm_prime_member *member; |
74 | 74 | ||
75 | member = kmalloc(sizeof(*member), GFP_KERNEL); | 75 | member = kmalloc(sizeof(*member), GFP_KERNEL); |
76 | if (!member) | 76 | if (!member) |
77 | return -ENOMEM; | 77 | return -ENOMEM; |
78 | 78 | ||
79 | get_dma_buf(dma_buf); | 79 | get_dma_buf(dma_buf); |
80 | member->dma_buf = dma_buf; | 80 | member->dma_buf = dma_buf; |
81 | member->handle = handle; | 81 | member->handle = handle; |
82 | list_add(&member->entry, &prime_fpriv->head); | 82 | list_add(&member->entry, &prime_fpriv->head); |
83 | return 0; | 83 | return 0; |
84 | } | 84 | } |
85 | 85 | ||
86 | static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv, | 86 | static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv, |
87 | uint32_t handle) | 87 | uint32_t handle) |
88 | { | 88 | { |
89 | struct drm_prime_member *member; | 89 | struct drm_prime_member *member; |
90 | 90 | ||
91 | list_for_each_entry(member, &prime_fpriv->head, entry) { | 91 | list_for_each_entry(member, &prime_fpriv->head, entry) { |
92 | if (member->handle == handle) | 92 | if (member->handle == handle) |
93 | return member->dma_buf; | 93 | return member->dma_buf; |
94 | } | 94 | } |
95 | 95 | ||
96 | return NULL; | 96 | return NULL; |
97 | } | 97 | } |
98 | 98 | ||
99 | static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, | 99 | static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, |
100 | struct dma_buf *dma_buf, | 100 | struct dma_buf *dma_buf, |
101 | uint32_t *handle) | 101 | uint32_t *handle) |
102 | { | 102 | { |
103 | struct drm_prime_member *member; | 103 | struct drm_prime_member *member; |
104 | 104 | ||
105 | list_for_each_entry(member, &prime_fpriv->head, entry) { | 105 | list_for_each_entry(member, &prime_fpriv->head, entry) { |
106 | if (member->dma_buf == dma_buf) { | 106 | if (member->dma_buf == dma_buf) { |
107 | *handle = member->handle; | 107 | *handle = member->handle; |
108 | return 0; | 108 | return 0; |
109 | } | 109 | } |
110 | } | 110 | } |
111 | return -ENOENT; | 111 | return -ENOENT; |
112 | } | 112 | } |
113 | 113 | ||
114 | static int drm_gem_map_attach(struct dma_buf *dma_buf, | 114 | static int drm_gem_map_attach(struct dma_buf *dma_buf, |
115 | struct device *target_dev, | 115 | struct device *target_dev, |
116 | struct dma_buf_attachment *attach) | 116 | struct dma_buf_attachment *attach) |
117 | { | 117 | { |
118 | struct drm_prime_attachment *prime_attach; | 118 | struct drm_prime_attachment *prime_attach; |
119 | struct drm_gem_object *obj = dma_buf->priv; | 119 | struct drm_gem_object *obj = dma_buf->priv; |
120 | struct drm_device *dev = obj->dev; | 120 | struct drm_device *dev = obj->dev; |
121 | 121 | ||
122 | prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL); | 122 | prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL); |
123 | if (!prime_attach) | 123 | if (!prime_attach) |
124 | return -ENOMEM; | 124 | return -ENOMEM; |
125 | 125 | ||
126 | prime_attach->dir = DMA_NONE; | 126 | prime_attach->dir = DMA_NONE; |
127 | attach->priv = prime_attach; | 127 | attach->priv = prime_attach; |
128 | 128 | ||
129 | if (!dev->driver->gem_prime_pin) | 129 | if (!dev->driver->gem_prime_pin) |
130 | return 0; | 130 | return 0; |
131 | 131 | ||
132 | return dev->driver->gem_prime_pin(obj); | 132 | return dev->driver->gem_prime_pin(obj); |
133 | } | 133 | } |
134 | 134 | ||
135 | static void drm_gem_map_detach(struct dma_buf *dma_buf, | 135 | static void drm_gem_map_detach(struct dma_buf *dma_buf, |
136 | struct dma_buf_attachment *attach) | 136 | struct dma_buf_attachment *attach) |
137 | { | 137 | { |
138 | struct drm_prime_attachment *prime_attach = attach->priv; | 138 | struct drm_prime_attachment *prime_attach = attach->priv; |
139 | struct drm_gem_object *obj = dma_buf->priv; | 139 | struct drm_gem_object *obj = dma_buf->priv; |
140 | struct drm_device *dev = obj->dev; | 140 | struct drm_device *dev = obj->dev; |
141 | struct sg_table *sgt; | 141 | struct sg_table *sgt; |
142 | 142 | ||
143 | if (dev->driver->gem_prime_unpin) | 143 | if (dev->driver->gem_prime_unpin) |
144 | dev->driver->gem_prime_unpin(obj); | 144 | dev->driver->gem_prime_unpin(obj); |
145 | 145 | ||
146 | if (!prime_attach) | 146 | if (!prime_attach) |
147 | return; | 147 | return; |
148 | 148 | ||
149 | sgt = prime_attach->sgt; | 149 | sgt = prime_attach->sgt; |
150 | if (sgt) { | 150 | if (sgt) { |
151 | if (prime_attach->dir != DMA_NONE) | 151 | if (prime_attach->dir != DMA_NONE) |
152 | dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, | 152 | dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, |
153 | prime_attach->dir); | 153 | prime_attach->dir); |
154 | sg_free_table(sgt); | 154 | sg_free_table(sgt); |
155 | } | 155 | } |
156 | 156 | ||
157 | kfree(sgt); | 157 | kfree(sgt); |
158 | kfree(prime_attach); | 158 | kfree(prime_attach); |
159 | attach->priv = NULL; | 159 | attach->priv = NULL; |
160 | } | 160 | } |
161 | 161 | ||
162 | void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv, | 162 | void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv, |
163 | struct dma_buf *dma_buf) | 163 | struct dma_buf *dma_buf) |
164 | { | 164 | { |
165 | struct drm_prime_member *member, *safe; | 165 | struct drm_prime_member *member, *safe; |
166 | 166 | ||
167 | list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) { | 167 | list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) { |
168 | if (member->dma_buf == dma_buf) { | 168 | if (member->dma_buf == dma_buf) { |
169 | dma_buf_put(dma_buf); | 169 | dma_buf_put(dma_buf); |
170 | list_del(&member->entry); | 170 | list_del(&member->entry); |
171 | kfree(member); | 171 | kfree(member); |
172 | } | 172 | } |
173 | } | 173 | } |
174 | } | 174 | } |
175 | 175 | ||
176 | static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, | 176 | static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, |
177 | enum dma_data_direction dir) | 177 | enum dma_data_direction dir) |
178 | { | 178 | { |
179 | struct drm_prime_attachment *prime_attach = attach->priv; | 179 | struct drm_prime_attachment *prime_attach = attach->priv; |
180 | struct drm_gem_object *obj = attach->dmabuf->priv; | 180 | struct drm_gem_object *obj = attach->dmabuf->priv; |
181 | struct sg_table *sgt; | 181 | struct sg_table *sgt; |
182 | 182 | ||
183 | if (WARN_ON(dir == DMA_NONE || !prime_attach)) | 183 | if (WARN_ON(dir == DMA_NONE || !prime_attach)) |
184 | return ERR_PTR(-EINVAL); | 184 | return ERR_PTR(-EINVAL); |
185 | 185 | ||
186 | /* return the cached mapping when possible */ | 186 | /* return the cached mapping when possible */ |
187 | if (prime_attach->dir == dir) | 187 | if (prime_attach->dir == dir) |
188 | return prime_attach->sgt; | 188 | return prime_attach->sgt; |
189 | 189 | ||
190 | /* | 190 | /* |
191 | * two mappings with different directions for the same attachment are | 191 | * two mappings with different directions for the same attachment are |
192 | * not allowed | 192 | * not allowed |
193 | */ | 193 | */ |
194 | if (WARN_ON(prime_attach->dir != DMA_NONE)) | 194 | if (WARN_ON(prime_attach->dir != DMA_NONE)) |
195 | return ERR_PTR(-EBUSY); | 195 | return ERR_PTR(-EBUSY); |
196 | 196 | ||
197 | sgt = obj->dev->driver->gem_prime_get_sg_table(obj); | 197 | sgt = obj->dev->driver->gem_prime_get_sg_table(obj); |
198 | 198 | ||
199 | if (!IS_ERR(sgt)) { | 199 | if (!IS_ERR(sgt)) { |
200 | if (!dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir)) { | 200 | if (!dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir)) { |
201 | sg_free_table(sgt); | 201 | sg_free_table(sgt); |
202 | kfree(sgt); | 202 | kfree(sgt); |
203 | sgt = ERR_PTR(-ENOMEM); | 203 | sgt = ERR_PTR(-ENOMEM); |
204 | } else { | 204 | } else { |
205 | prime_attach->sgt = sgt; | 205 | prime_attach->sgt = sgt; |
206 | prime_attach->dir = dir; | 206 | prime_attach->dir = dir; |
207 | } | 207 | } |
208 | } | 208 | } |
209 | 209 | ||
210 | return sgt; | 210 | return sgt; |
211 | } | 211 | } |
212 | 212 | ||
213 | static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, | 213 | static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, |
214 | struct sg_table *sgt, enum dma_data_direction dir) | 214 | struct sg_table *sgt, enum dma_data_direction dir) |
215 | { | 215 | { |
216 | /* nothing to be done here */ | 216 | /* nothing to be done here */ |
217 | } | 217 | } |
218 | 218 | ||
219 | void drm_gem_dmabuf_release(struct dma_buf *dma_buf) | 219 | void drm_gem_dmabuf_release(struct dma_buf *dma_buf) |
220 | { | 220 | { |
221 | struct drm_gem_object *obj = dma_buf->priv; | 221 | struct drm_gem_object *obj = dma_buf->priv; |
222 | 222 | ||
223 | /* drop the reference on the export fd holds */ | 223 | /* drop the reference on the export fd holds */ |
224 | drm_gem_object_unreference_unlocked(obj); | 224 | drm_gem_object_unreference_unlocked(obj); |
225 | } | 225 | } |
226 | EXPORT_SYMBOL(drm_gem_dmabuf_release); | 226 | EXPORT_SYMBOL(drm_gem_dmabuf_release); |
227 | 227 | ||
228 | static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf) | 228 | static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf) |
229 | { | 229 | { |
230 | struct drm_gem_object *obj = dma_buf->priv; | 230 | struct drm_gem_object *obj = dma_buf->priv; |
231 | struct drm_device *dev = obj->dev; | 231 | struct drm_device *dev = obj->dev; |
232 | 232 | ||
233 | return dev->driver->gem_prime_vmap(obj); | 233 | return dev->driver->gem_prime_vmap(obj); |
234 | } | 234 | } |
235 | 235 | ||
236 | static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) | 236 | static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) |
237 | { | 237 | { |
238 | struct drm_gem_object *obj = dma_buf->priv; | 238 | struct drm_gem_object *obj = dma_buf->priv; |
239 | struct drm_device *dev = obj->dev; | 239 | struct drm_device *dev = obj->dev; |
240 | 240 | ||
241 | dev->driver->gem_prime_vunmap(obj, vaddr); | 241 | dev->driver->gem_prime_vunmap(obj, vaddr); |
242 | } | 242 | } |
243 | 243 | ||
244 | static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, | 244 | static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, |
245 | unsigned long page_num) | 245 | unsigned long page_num) |
246 | { | 246 | { |
247 | return NULL; | 247 | return NULL; |
248 | } | 248 | } |
249 | 249 | ||
250 | static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, | 250 | static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, |
251 | unsigned long page_num, void *addr) | 251 | unsigned long page_num, void *addr) |
252 | { | 252 | { |
253 | 253 | ||
254 | } | 254 | } |
255 | static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, | 255 | static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, |
256 | unsigned long page_num) | 256 | unsigned long page_num) |
257 | { | 257 | { |
258 | return NULL; | 258 | return NULL; |
259 | } | 259 | } |
260 | 260 | ||
261 | static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, | 261 | static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, |
262 | unsigned long page_num, void *addr) | 262 | unsigned long page_num, void *addr) |
263 | { | 263 | { |
264 | 264 | ||
265 | } | 265 | } |
266 | 266 | ||
267 | static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, | 267 | static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, |
268 | struct vm_area_struct *vma) | 268 | struct vm_area_struct *vma) |
269 | { | 269 | { |
270 | struct drm_gem_object *obj = dma_buf->priv; | 270 | struct drm_gem_object *obj = dma_buf->priv; |
271 | struct drm_device *dev = obj->dev; | 271 | struct drm_device *dev = obj->dev; |
272 | 272 | ||
273 | if (!dev->driver->gem_prime_mmap) | 273 | if (!dev->driver->gem_prime_mmap) |
274 | return -ENOSYS; | 274 | return -ENOSYS; |
275 | 275 | ||
276 | return dev->driver->gem_prime_mmap(obj, vma); | 276 | return dev->driver->gem_prime_mmap(obj, vma); |
277 | } | 277 | } |
278 | 278 | ||
279 | static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { | 279 | static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { |
280 | .attach = drm_gem_map_attach, | 280 | .attach = drm_gem_map_attach, |
281 | .detach = drm_gem_map_detach, | 281 | .detach = drm_gem_map_detach, |
282 | .map_dma_buf = drm_gem_map_dma_buf, | 282 | .map_dma_buf = drm_gem_map_dma_buf, |
283 | .unmap_dma_buf = drm_gem_unmap_dma_buf, | 283 | .unmap_dma_buf = drm_gem_unmap_dma_buf, |
284 | .release = drm_gem_dmabuf_release, | 284 | .release = drm_gem_dmabuf_release, |
285 | .kmap = drm_gem_dmabuf_kmap, | 285 | .kmap = drm_gem_dmabuf_kmap, |
286 | .kmap_atomic = drm_gem_dmabuf_kmap_atomic, | 286 | .kmap_atomic = drm_gem_dmabuf_kmap_atomic, |
287 | .kunmap = drm_gem_dmabuf_kunmap, | 287 | .kunmap = drm_gem_dmabuf_kunmap, |
288 | .kunmap_atomic = drm_gem_dmabuf_kunmap_atomic, | 288 | .kunmap_atomic = drm_gem_dmabuf_kunmap_atomic, |
289 | .mmap = drm_gem_dmabuf_mmap, | 289 | .mmap = drm_gem_dmabuf_mmap, |
290 | .vmap = drm_gem_dmabuf_vmap, | 290 | .vmap = drm_gem_dmabuf_vmap, |
291 | .vunmap = drm_gem_dmabuf_vunmap, | 291 | .vunmap = drm_gem_dmabuf_vunmap, |
292 | }; | 292 | }; |
293 | 293 | ||
294 | /** | 294 | /** |
295 | * DOC: PRIME Helpers | 295 | * DOC: PRIME Helpers |
296 | * | 296 | * |
297 | * Drivers can implement @gem_prime_export and @gem_prime_import in terms of | 297 | * Drivers can implement @gem_prime_export and @gem_prime_import in terms of |
298 | * simpler APIs by using the helper functions @drm_gem_prime_export and | 298 | * simpler APIs by using the helper functions @drm_gem_prime_export and |
299 | * @drm_gem_prime_import. These functions implement dma-buf support in terms of | 299 | * @drm_gem_prime_import. These functions implement dma-buf support in terms of |
300 | * five lower-level driver callbacks: | 300 | * five lower-level driver callbacks: |
301 | * | 301 | * |
302 | * Export callbacks: | 302 | * Export callbacks: |
303 | * | 303 | * |
304 | * - @gem_prime_pin (optional): prepare a GEM object for exporting | 304 | * - @gem_prime_pin (optional): prepare a GEM object for exporting |
305 | * | 305 | * |
306 | * - @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages | 306 | * - @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages |
307 | * | 307 | * |
308 | * - @gem_prime_vmap: vmap a buffer exported by your driver | 308 | * - @gem_prime_vmap: vmap a buffer exported by your driver |
309 | * | 309 | * |
310 | * - @gem_prime_vunmap: vunmap a buffer exported by your driver | 310 | * - @gem_prime_vunmap: vunmap a buffer exported by your driver |
311 | * | 311 | * |
312 | * Import callback: | 312 | * Import callback: |
313 | * | 313 | * |
314 | * - @gem_prime_import_sg_table (import): produce a GEM object from another | 314 | * - @gem_prime_import_sg_table (import): produce a GEM object from another |
315 | * driver's scatter/gather table | 315 | * driver's scatter/gather table |
316 | */ | 316 | */ |
317 | 317 | ||
318 | struct dma_buf *drm_gem_prime_export(struct drm_device *dev, | 318 | struct dma_buf *drm_gem_prime_export(struct drm_device *dev, |
319 | struct drm_gem_object *obj, int flags) | 319 | struct drm_gem_object *obj, int flags) |
320 | { | 320 | { |
321 | return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, flags); | 321 | return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, flags); |
322 | } | 322 | } |
323 | EXPORT_SYMBOL(drm_gem_prime_export); | 323 | EXPORT_SYMBOL(drm_gem_prime_export); |
324 | 324 | ||
325 | static struct dma_buf *export_and_register_object(struct drm_device *dev, | 325 | static struct dma_buf *export_and_register_object(struct drm_device *dev, |
326 | struct drm_gem_object *obj, | 326 | struct drm_gem_object *obj, |
327 | uint32_t flags) | 327 | uint32_t flags) |
328 | { | 328 | { |
329 | struct dma_buf *dmabuf; | 329 | struct dma_buf *dmabuf; |
330 | 330 | ||
331 | /* prevent races with concurrent gem_close. */ | 331 | /* prevent races with concurrent gem_close. */ |
332 | if (obj->handle_count == 0) { | 332 | if (obj->handle_count == 0) { |
333 | dmabuf = ERR_PTR(-ENOENT); | 333 | dmabuf = ERR_PTR(-ENOENT); |
334 | return dmabuf; | 334 | return dmabuf; |
335 | } | 335 | } |
336 | 336 | ||
337 | dmabuf = dev->driver->gem_prime_export(dev, obj, flags); | 337 | dmabuf = dev->driver->gem_prime_export(dev, obj, flags); |
338 | if (IS_ERR(dmabuf)) { | 338 | if (IS_ERR(dmabuf)) { |
339 | /* normally the created dma-buf takes ownership of the ref, | 339 | /* normally the created dma-buf takes ownership of the ref, |
340 | * but if that fails then drop the ref | 340 | * but if that fails then drop the ref |
341 | */ | 341 | */ |
342 | return dmabuf; | 342 | return dmabuf; |
343 | } | 343 | } |
344 | 344 | ||
345 | /* | 345 | /* |
346 | * Note that callers do not need to clean up the export cache | 346 | * Note that callers do not need to clean up the export cache |
347 | * since the check for obj->handle_count guarantees that someone | 347 | * since the check for obj->handle_count guarantees that someone |
348 | * will clean it up. | 348 | * will clean it up. |
349 | */ | 349 | */ |
350 | obj->dma_buf = dmabuf; | 350 | obj->dma_buf = dmabuf; |
351 | get_dma_buf(obj->dma_buf); | 351 | get_dma_buf(obj->dma_buf); |
352 | /* Grab a new ref since the callers is now used by the dma-buf */ | 352 | /* Grab a new ref since the callers is now used by the dma-buf */ |
353 | drm_gem_object_reference(obj); | 353 | drm_gem_object_reference(obj); |
354 | 354 | ||
355 | return dmabuf; | 355 | return dmabuf; |
356 | } | 356 | } |
357 | 357 | ||
358 | int drm_gem_prime_handle_to_fd(struct drm_device *dev, | 358 | int drm_gem_prime_handle_to_fd(struct drm_device *dev, |
359 | struct drm_file *file_priv, uint32_t handle, uint32_t flags, | 359 | struct drm_file *file_priv, uint32_t handle, uint32_t flags, |
360 | int *prime_fd) | 360 | int *prime_fd) |
361 | { | 361 | { |
362 | struct drm_gem_object *obj; | 362 | struct drm_gem_object *obj; |
363 | int ret = 0; | 363 | int ret = 0; |
364 | struct dma_buf *dmabuf; | 364 | struct dma_buf *dmabuf; |
365 | 365 | ||
366 | mutex_lock(&file_priv->prime.lock); | 366 | mutex_lock(&file_priv->prime.lock); |
367 | obj = drm_gem_object_lookup(dev, file_priv, handle); | 367 | obj = drm_gem_object_lookup(dev, file_priv, handle); |
368 | if (!obj) { | 368 | if (!obj) { |
369 | ret = -ENOENT; | 369 | ret = -ENOENT; |
370 | goto out_unlock; | 370 | goto out_unlock; |
371 | } | 371 | } |
372 | 372 | ||
373 | dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle); | 373 | dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle); |
374 | if (dmabuf) { | 374 | if (dmabuf) { |
375 | get_dma_buf(dmabuf); | 375 | get_dma_buf(dmabuf); |
376 | goto out_have_handle; | 376 | goto out_have_handle; |
377 | } | 377 | } |
378 | 378 | ||
379 | mutex_lock(&dev->object_name_lock); | 379 | mutex_lock(&dev->object_name_lock); |
380 | /* re-export the original imported object */ | 380 | /* re-export the original imported object */ |
381 | if (obj->import_attach) { | 381 | if (obj->import_attach) { |
382 | dmabuf = obj->import_attach->dmabuf; | 382 | dmabuf = obj->import_attach->dmabuf; |
383 | get_dma_buf(dmabuf); | 383 | get_dma_buf(dmabuf); |
384 | goto out_have_obj; | 384 | goto out_have_obj; |
385 | } | 385 | } |
386 | 386 | ||
387 | if (obj->dma_buf) { | 387 | if (obj->dma_buf) { |
388 | get_dma_buf(obj->dma_buf); | 388 | get_dma_buf(obj->dma_buf); |
389 | dmabuf = obj->dma_buf; | 389 | dmabuf = obj->dma_buf; |
390 | goto out_have_obj; | 390 | goto out_have_obj; |
391 | } | 391 | } |
392 | 392 | ||
393 | dmabuf = export_and_register_object(dev, obj, flags); | 393 | dmabuf = export_and_register_object(dev, obj, flags); |
394 | if (IS_ERR(dmabuf)) { | 394 | if (IS_ERR(dmabuf)) { |
395 | /* normally the created dma-buf takes ownership of the ref, | 395 | /* normally the created dma-buf takes ownership of the ref, |
396 | * but if that fails then drop the ref | 396 | * but if that fails then drop the ref |
397 | */ | 397 | */ |
398 | ret = PTR_ERR(dmabuf); | 398 | ret = PTR_ERR(dmabuf); |
399 | mutex_unlock(&dev->object_name_lock); | 399 | mutex_unlock(&dev->object_name_lock); |
400 | goto out; | 400 | goto out; |
401 | } | 401 | } |
402 | 402 | ||
403 | out_have_obj: | 403 | out_have_obj: |
404 | /* | 404 | /* |
405 | * If we've exported this buffer then cheat and add it to the import list | 405 | * If we've exported this buffer then cheat and add it to the import list |
406 | * so we get the correct handle back. We must do this under the | 406 | * so we get the correct handle back. We must do this under the |
407 | * protection of dev->object_name_lock to ensure that a racing gem close | 407 | * protection of dev->object_name_lock to ensure that a racing gem close |
408 | * ioctl doesn't miss to remove this buffer handle from the cache. | 408 | * ioctl doesn't miss to remove this buffer handle from the cache. |
409 | */ | 409 | */ |
410 | ret = drm_prime_add_buf_handle(&file_priv->prime, | 410 | ret = drm_prime_add_buf_handle(&file_priv->prime, |
411 | dmabuf, handle); | 411 | dmabuf, handle); |
412 | mutex_unlock(&dev->object_name_lock); | 412 | mutex_unlock(&dev->object_name_lock); |
413 | if (ret) | 413 | if (ret) |
414 | goto fail_put_dmabuf; | 414 | goto fail_put_dmabuf; |
415 | 415 | ||
416 | out_have_handle: | 416 | out_have_handle: |
417 | ret = dma_buf_fd(dmabuf, flags); | 417 | ret = dma_buf_fd(dmabuf, flags); |
418 | /* | 418 | /* |
419 | * We must _not_ remove the buffer from the handle cache since the newly | 419 | * We must _not_ remove the buffer from the handle cache since the newly |
420 | * created dma buf is already linked in the global obj->dma_buf pointer, | 420 | * created dma buf is already linked in the global obj->dma_buf pointer, |
421 | * and that is invariant as long as a userspace gem handle exists. | 421 | * and that is invariant as long as a userspace gem handle exists. |
422 | * Closing the handle will clean out the cache anyway, so we don't leak. | 422 | * Closing the handle will clean out the cache anyway, so we don't leak. |
423 | */ | 423 | */ |
424 | if (ret < 0) { | 424 | if (ret < 0) { |
425 | goto fail_put_dmabuf; | 425 | goto fail_put_dmabuf; |
426 | } else { | 426 | } else { |
427 | *prime_fd = ret; | 427 | *prime_fd = ret; |
428 | ret = 0; | 428 | ret = 0; |
429 | } | 429 | } |
430 | 430 | ||
431 | goto out; | 431 | goto out; |
432 | 432 | ||
433 | fail_put_dmabuf: | 433 | fail_put_dmabuf: |
434 | dma_buf_put(dmabuf); | 434 | dma_buf_put(dmabuf); |
435 | out: | 435 | out: |
436 | drm_gem_object_unreference_unlocked(obj); | 436 | drm_gem_object_unreference_unlocked(obj); |
437 | out_unlock: | 437 | out_unlock: |
438 | mutex_unlock(&file_priv->prime.lock); | 438 | mutex_unlock(&file_priv->prime.lock); |
439 | 439 | ||
440 | return ret; | 440 | return ret; |
441 | } | 441 | } |
442 | EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); | 442 | EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); |
443 | 443 | ||
444 | struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, | 444 | struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, |
445 | struct dma_buf *dma_buf) | 445 | struct dma_buf *dma_buf) |
446 | { | 446 | { |
447 | struct dma_buf_attachment *attach; | 447 | struct dma_buf_attachment *attach; |
448 | struct sg_table *sgt; | 448 | struct sg_table *sgt; |
449 | struct drm_gem_object *obj; | 449 | struct drm_gem_object *obj; |
450 | int ret; | 450 | int ret; |
451 | 451 | ||
452 | if (!dev->driver->gem_prime_import_sg_table) | 452 | if (!dev->driver->gem_prime_import_sg_table) |
453 | return ERR_PTR(-EINVAL); | 453 | return ERR_PTR(-EINVAL); |
454 | 454 | ||
455 | if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) { | 455 | if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) { |
456 | obj = dma_buf->priv; | 456 | obj = dma_buf->priv; |
457 | if (obj->dev == dev) { | 457 | if (obj->dev == dev) { |
458 | /* | 458 | /* |
459 | * Importing dmabuf exported from out own gem increases | 459 | * Importing dmabuf exported from out own gem increases |
460 | * refcount on gem itself instead of f_count of dmabuf. | 460 | * refcount on gem itself instead of f_count of dmabuf. |
461 | */ | 461 | */ |
462 | drm_gem_object_reference(obj); | 462 | drm_gem_object_reference(obj); |
463 | return obj; | 463 | return obj; |
464 | } | 464 | } |
465 | } | 465 | } |
466 | 466 | ||
467 | attach = dma_buf_attach(dma_buf, dev->dev); | 467 | attach = dma_buf_attach(dma_buf, dev->dev); |
468 | if (IS_ERR(attach)) | 468 | if (IS_ERR(attach)) |
469 | return ERR_CAST(attach); | 469 | return ERR_CAST(attach); |
470 | 470 | ||
471 | get_dma_buf(dma_buf); | 471 | get_dma_buf(dma_buf); |
472 | 472 | ||
473 | sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); | 473 | sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); |
474 | if (IS_ERR_OR_NULL(sgt)) { | 474 | if (IS_ERR_OR_NULL(sgt)) { |
475 | ret = PTR_ERR(sgt); | 475 | ret = PTR_ERR(sgt); |
476 | goto fail_detach; | 476 | goto fail_detach; |
477 | } | 477 | } |
478 | 478 | ||
479 | obj = dev->driver->gem_prime_import_sg_table(dev, dma_buf->size, sgt); | 479 | obj = dev->driver->gem_prime_import_sg_table(dev, dma_buf->size, sgt); |
480 | if (IS_ERR(obj)) { | 480 | if (IS_ERR(obj)) { |
481 | ret = PTR_ERR(obj); | 481 | ret = PTR_ERR(obj); |
482 | goto fail_unmap; | 482 | goto fail_unmap; |
483 | } | 483 | } |
484 | 484 | ||
485 | obj->import_attach = attach; | 485 | obj->import_attach = attach; |
486 | 486 | ||
487 | return obj; | 487 | return obj; |
488 | 488 | ||
489 | fail_unmap: | 489 | fail_unmap: |
490 | dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); | 490 | dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); |
491 | fail_detach: | 491 | fail_detach: |
492 | dma_buf_detach(dma_buf, attach); | 492 | dma_buf_detach(dma_buf, attach); |
493 | dma_buf_put(dma_buf); | 493 | dma_buf_put(dma_buf); |
494 | 494 | ||
495 | return ERR_PTR(ret); | 495 | return ERR_PTR(ret); |
496 | } | 496 | } |
497 | EXPORT_SYMBOL(drm_gem_prime_import); | 497 | EXPORT_SYMBOL(drm_gem_prime_import); |
498 | 498 | ||
499 | int drm_gem_prime_fd_to_handle(struct drm_device *dev, | 499 | int drm_gem_prime_fd_to_handle(struct drm_device *dev, |
500 | struct drm_file *file_priv, int prime_fd, uint32_t *handle) | 500 | struct drm_file *file_priv, int prime_fd, uint32_t *handle) |
501 | { | 501 | { |
502 | struct dma_buf *dma_buf; | 502 | struct dma_buf *dma_buf; |
503 | struct drm_gem_object *obj; | 503 | struct drm_gem_object *obj; |
504 | int ret; | 504 | int ret; |
505 | 505 | ||
506 | dma_buf = dma_buf_get(prime_fd); | 506 | dma_buf = dma_buf_get(prime_fd); |
507 | if (IS_ERR(dma_buf)) | 507 | if (IS_ERR(dma_buf)) |
508 | return PTR_ERR(dma_buf); | 508 | return PTR_ERR(dma_buf); |
509 | 509 | ||
510 | mutex_lock(&file_priv->prime.lock); | 510 | mutex_lock(&file_priv->prime.lock); |
511 | 511 | ||
512 | ret = drm_prime_lookup_buf_handle(&file_priv->prime, | 512 | ret = drm_prime_lookup_buf_handle(&file_priv->prime, |
513 | dma_buf, handle); | 513 | dma_buf, handle); |
514 | if (ret == 0) | 514 | if (ret == 0) |
515 | goto out_put; | 515 | goto out_put; |
516 | 516 | ||
517 | /* never seen this one, need to import */ | 517 | /* never seen this one, need to import */ |
518 | mutex_lock(&dev->object_name_lock); | 518 | mutex_lock(&dev->object_name_lock); |
519 | obj = dev->driver->gem_prime_import(dev, dma_buf); | 519 | obj = dev->driver->gem_prime_import(dev, dma_buf); |
520 | if (IS_ERR(obj)) { | 520 | if (IS_ERR(obj)) { |
521 | ret = PTR_ERR(obj); | 521 | ret = PTR_ERR(obj); |
522 | goto out_unlock; | 522 | goto out_unlock; |
523 | } | 523 | } |
524 | 524 | ||
525 | if (obj->dma_buf) { | 525 | if (obj->dma_buf) { |
526 | WARN_ON(obj->dma_buf != dma_buf); | 526 | WARN_ON(obj->dma_buf != dma_buf); |
527 | } else { | 527 | } else { |
528 | obj->dma_buf = dma_buf; | 528 | obj->dma_buf = dma_buf; |
529 | get_dma_buf(dma_buf); | 529 | get_dma_buf(dma_buf); |
530 | } | 530 | } |
531 | 531 | ||
532 | /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ | 532 | /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ |
533 | ret = drm_gem_handle_create_tail(file_priv, obj, handle); | 533 | ret = drm_gem_handle_create_tail(file_priv, obj, handle); |
534 | drm_gem_object_unreference_unlocked(obj); | 534 | drm_gem_object_unreference_unlocked(obj); |
535 | if (ret) | 535 | if (ret) |
536 | goto out_put; | 536 | goto out_put; |
537 | 537 | ||
538 | ret = drm_prime_add_buf_handle(&file_priv->prime, | 538 | ret = drm_prime_add_buf_handle(&file_priv->prime, |
539 | dma_buf, *handle); | 539 | dma_buf, *handle); |
540 | if (ret) | 540 | if (ret) |
541 | goto fail; | 541 | goto fail; |
542 | 542 | ||
543 | mutex_unlock(&file_priv->prime.lock); | 543 | mutex_unlock(&file_priv->prime.lock); |
544 | 544 | ||
545 | dma_buf_put(dma_buf); | 545 | dma_buf_put(dma_buf); |
546 | 546 | ||
547 | return 0; | 547 | return 0; |
548 | 548 | ||
549 | fail: | 549 | fail: |
550 | /* hmm, if driver attached, we are relying on the free-object path | 550 | /* hmm, if driver attached, we are relying on the free-object path |
551 | * to detach.. which seems ok.. | 551 | * to detach.. which seems ok.. |
552 | */ | 552 | */ |
553 | drm_gem_handle_delete(file_priv, *handle); | 553 | drm_gem_handle_delete(file_priv, *handle); |
554 | out_unlock: | 554 | out_unlock: |
555 | mutex_unlock(&dev->object_name_lock); | 555 | mutex_unlock(&dev->object_name_lock); |
556 | out_put: | 556 | out_put: |
557 | dma_buf_put(dma_buf); | 557 | dma_buf_put(dma_buf); |
558 | mutex_unlock(&file_priv->prime.lock); | 558 | mutex_unlock(&file_priv->prime.lock); |
559 | return ret; | 559 | return ret; |
560 | } | 560 | } |
561 | EXPORT_SYMBOL(drm_gem_prime_fd_to_handle); | 561 | EXPORT_SYMBOL(drm_gem_prime_fd_to_handle); |
562 | 562 | ||
563 | int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, | 563 | int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, |
564 | struct drm_file *file_priv) | 564 | struct drm_file *file_priv) |
565 | { | 565 | { |
566 | struct drm_prime_handle *args = data; | 566 | struct drm_prime_handle *args = data; |
567 | uint32_t flags; | 567 | uint32_t flags; |
568 | 568 | ||
569 | if (!drm_core_check_feature(dev, DRIVER_PRIME)) | 569 | if (!drm_core_check_feature(dev, DRIVER_PRIME)) |
570 | return -EINVAL; | 570 | return -EINVAL; |
571 | 571 | ||
572 | if (!dev->driver->prime_handle_to_fd) | 572 | if (!dev->driver->prime_handle_to_fd) |
573 | return -ENOSYS; | 573 | return -ENOSYS; |
574 | 574 | ||
575 | /* check flags are valid */ | 575 | /* check flags are valid */ |
576 | if (args->flags & ~DRM_CLOEXEC) | 576 | if (args->flags & ~(DRM_CLOEXEC|DRM_RDWR)) |
577 | return -EINVAL; | 577 | return -EINVAL; |
578 | 578 | ||
579 | /* we only want to pass DRM_CLOEXEC which is == O_CLOEXEC */ | 579 | /* we only want to pass DRM_CLOEXEC which is == O_CLOEXEC |
580 | flags = args->flags & DRM_CLOEXEC; | 580 | and DRM_RDWR which is O_RDWR */ |
581 | flags = args->flags & (DRM_CLOEXEC|DRM_RDWR); | ||
581 | 582 | ||
582 | return dev->driver->prime_handle_to_fd(dev, file_priv, | 583 | return dev->driver->prime_handle_to_fd(dev, file_priv, |
583 | args->handle, flags, &args->fd); | 584 | args->handle, flags, &args->fd); |
584 | } | 585 | } |
585 | 586 | ||
586 | int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data, | 587 | int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data, |
587 | struct drm_file *file_priv) | 588 | struct drm_file *file_priv) |
588 | { | 589 | { |
589 | struct drm_prime_handle *args = data; | 590 | struct drm_prime_handle *args = data; |
590 | 591 | ||
591 | if (!drm_core_check_feature(dev, DRIVER_PRIME)) | 592 | if (!drm_core_check_feature(dev, DRIVER_PRIME)) |
592 | return -EINVAL; | 593 | return -EINVAL; |
593 | 594 | ||
594 | if (!dev->driver->prime_fd_to_handle) | 595 | if (!dev->driver->prime_fd_to_handle) |
595 | return -ENOSYS; | 596 | return -ENOSYS; |
596 | 597 | ||
597 | return dev->driver->prime_fd_to_handle(dev, file_priv, | 598 | return dev->driver->prime_fd_to_handle(dev, file_priv, |
598 | args->fd, &args->handle); | 599 | args->fd, &args->handle); |
599 | } | 600 | } |
600 | 601 | ||
601 | /* | 602 | /* |
602 | * drm_prime_pages_to_sg | 603 | * drm_prime_pages_to_sg |
603 | * | 604 | * |
604 | * this helper creates an sg table object from a set of pages | 605 | * this helper creates an sg table object from a set of pages |
605 | * the driver is responsible for mapping the pages into the | 606 | * the driver is responsible for mapping the pages into the |
606 | * importers address space | 607 | * importers address space |
607 | */ | 608 | */ |
608 | struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages) | 609 | struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages) |
609 | { | 610 | { |
610 | struct sg_table *sg = NULL; | 611 | struct sg_table *sg = NULL; |
611 | int ret; | 612 | int ret; |
612 | 613 | ||
613 | sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL); | 614 | sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL); |
614 | if (!sg) { | 615 | if (!sg) { |
615 | ret = -ENOMEM; | 616 | ret = -ENOMEM; |
616 | goto out; | 617 | goto out; |
617 | } | 618 | } |
618 | 619 | ||
619 | ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0, | 620 | ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0, |
620 | nr_pages << PAGE_SHIFT, GFP_KERNEL); | 621 | nr_pages << PAGE_SHIFT, GFP_KERNEL); |
621 | if (ret) | 622 | if (ret) |
622 | goto out; | 623 | goto out; |
623 | 624 | ||
624 | return sg; | 625 | return sg; |
625 | out: | 626 | out: |
626 | kfree(sg); | 627 | kfree(sg); |
627 | return ERR_PTR(ret); | 628 | return ERR_PTR(ret); |
628 | } | 629 | } |
629 | EXPORT_SYMBOL(drm_prime_pages_to_sg); | 630 | EXPORT_SYMBOL(drm_prime_pages_to_sg); |
630 | 631 | ||
631 | /* export an sg table into an array of pages and addresses | 632 | /* export an sg table into an array of pages and addresses |
632 | this is currently required by the TTM driver in order to do correct fault | 633 | this is currently required by the TTM driver in order to do correct fault |
633 | handling */ | 634 | handling */ |
634 | int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, | 635 | int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, |
635 | dma_addr_t *addrs, int max_pages) | 636 | dma_addr_t *addrs, int max_pages) |
636 | { | 637 | { |
637 | unsigned count; | 638 | unsigned count; |
638 | struct scatterlist *sg; | 639 | struct scatterlist *sg; |
639 | struct page *page; | 640 | struct page *page; |
640 | u32 len; | 641 | u32 len; |
641 | int pg_index; | 642 | int pg_index; |
642 | dma_addr_t addr; | 643 | dma_addr_t addr; |
643 | 644 | ||
644 | pg_index = 0; | 645 | pg_index = 0; |
645 | for_each_sg(sgt->sgl, sg, sgt->nents, count) { | 646 | for_each_sg(sgt->sgl, sg, sgt->nents, count) { |
646 | len = sg->length; | 647 | len = sg->length; |
647 | page = sg_page(sg); | 648 | page = sg_page(sg); |
648 | addr = sg_dma_address(sg); | 649 | addr = sg_dma_address(sg); |
649 | 650 | ||
650 | while (len > 0) { | 651 | while (len > 0) { |
651 | if (WARN_ON(pg_index >= max_pages)) | 652 | if (WARN_ON(pg_index >= max_pages)) |
652 | return -1; | 653 | return -1; |
653 | pages[pg_index] = page; | 654 | pages[pg_index] = page; |
654 | if (addrs) | 655 | if (addrs) |
655 | addrs[pg_index] = addr; | 656 | addrs[pg_index] = addr; |
656 | 657 | ||
657 | page++; | 658 | page++; |
658 | addr += PAGE_SIZE; | 659 | addr += PAGE_SIZE; |
659 | len -= PAGE_SIZE; | 660 | len -= PAGE_SIZE; |
660 | pg_index++; | 661 | pg_index++; |
661 | } | 662 | } |
662 | } | 663 | } |
663 | return 0; | 664 | return 0; |
664 | } | 665 | } |
665 | EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays); | 666 | EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays); |
666 | /* helper function to cleanup a GEM/prime object */ | 667 | /* helper function to cleanup a GEM/prime object */ |
667 | void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg) | 668 | void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg) |
668 | { | 669 | { |
669 | struct dma_buf_attachment *attach; | 670 | struct dma_buf_attachment *attach; |
670 | struct dma_buf *dma_buf; | 671 | struct dma_buf *dma_buf; |
671 | attach = obj->import_attach; | 672 | attach = obj->import_attach; |
672 | if (sg) | 673 | if (sg) |
673 | dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL); | 674 | dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL); |
674 | dma_buf = attach->dmabuf; | 675 | dma_buf = attach->dmabuf; |
675 | dma_buf_detach(attach->dmabuf, attach); | 676 | dma_buf_detach(attach->dmabuf, attach); |
676 | /* remove the reference */ | 677 | /* remove the reference */ |
677 | dma_buf_put(dma_buf); | 678 | dma_buf_put(dma_buf); |
678 | } | 679 | } |
679 | EXPORT_SYMBOL(drm_prime_gem_destroy); | 680 | EXPORT_SYMBOL(drm_prime_gem_destroy); |
680 | 681 | ||
681 | void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv) | 682 | void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv) |
682 | { | 683 | { |
683 | INIT_LIST_HEAD(&prime_fpriv->head); | 684 | INIT_LIST_HEAD(&prime_fpriv->head); |
684 | mutex_init(&prime_fpriv->lock); | 685 | mutex_init(&prime_fpriv->lock); |
685 | } | 686 | } |
686 | EXPORT_SYMBOL(drm_prime_init_file_private); | 687 | EXPORT_SYMBOL(drm_prime_init_file_private); |
687 | 688 | ||
688 | void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv) | 689 | void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv) |
689 | { | 690 | { |
690 | /* by now drm_gem_release should've made sure the list is empty */ | 691 | /* by now drm_gem_release should've made sure the list is empty */ |
691 | WARN_ON(!list_empty(&prime_fpriv->head)); | 692 | WARN_ON(!list_empty(&prime_fpriv->head)); |
692 | } | 693 | } |
693 | EXPORT_SYMBOL(drm_prime_destroy_file_private); | 694 | EXPORT_SYMBOL(drm_prime_destroy_file_private); |
694 | 695 |
include/uapi/drm/drm.h
1 | /** | 1 | /** |
2 | * \file drm.h | 2 | * \file drm.h |
3 | * Header for the Direct Rendering Manager | 3 | * Header for the Direct Rendering Manager |
4 | * | 4 | * |
5 | * \author Rickard E. (Rik) Faith <faith@valinux.com> | 5 | * \author Rickard E. (Rik) Faith <faith@valinux.com> |
6 | * | 6 | * |
7 | * \par Acknowledgments: | 7 | * \par Acknowledgments: |
8 | * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic \c cmpxchg. | 8 | * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic \c cmpxchg. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | /* | 11 | /* |
12 | * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. | 12 | * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. |
13 | * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. | 13 | * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. |
14 | * All rights reserved. | 14 | * All rights reserved. |
15 | * | 15 | * |
16 | * Permission is hereby granted, free of charge, to any person obtaining a | 16 | * Permission is hereby granted, free of charge, to any person obtaining a |
17 | * copy of this software and associated documentation files (the "Software"), | 17 | * copy of this software and associated documentation files (the "Software"), |
18 | * to deal in the Software without restriction, including without limitation | 18 | * to deal in the Software without restriction, including without limitation |
19 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | 19 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
20 | * and/or sell copies of the Software, and to permit persons to whom the | 20 | * and/or sell copies of the Software, and to permit persons to whom the |
21 | * Software is furnished to do so, subject to the following conditions: | 21 | * Software is furnished to do so, subject to the following conditions: |
22 | * | 22 | * |
23 | * The above copyright notice and this permission notice (including the next | 23 | * The above copyright notice and this permission notice (including the next |
24 | * paragraph) shall be included in all copies or substantial portions of the | 24 | * paragraph) shall be included in all copies or substantial portions of the |
25 | * Software. | 25 | * Software. |
26 | * | 26 | * |
27 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | 27 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
28 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | 28 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
29 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | 29 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
30 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | 30 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
31 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | 31 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
32 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | 32 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
33 | * OTHER DEALINGS IN THE SOFTWARE. | 33 | * OTHER DEALINGS IN THE SOFTWARE. |
34 | */ | 34 | */ |
35 | 35 | ||
36 | #ifndef _DRM_H_ | 36 | #ifndef _DRM_H_ |
37 | #define _DRM_H_ | 37 | #define _DRM_H_ |
38 | 38 | ||
39 | #if defined(__KERNEL__) || defined(__linux__) | 39 | #if defined(__KERNEL__) || defined(__linux__) |
40 | 40 | ||
41 | #include <linux/types.h> | 41 | #include <linux/types.h> |
42 | #include <asm/ioctl.h> | 42 | #include <asm/ioctl.h> |
43 | typedef unsigned int drm_handle_t; | 43 | typedef unsigned int drm_handle_t; |
44 | 44 | ||
45 | #else /* One of the BSDs */ | 45 | #else /* One of the BSDs */ |
46 | 46 | ||
47 | #include <sys/ioccom.h> | 47 | #include <sys/ioccom.h> |
48 | #include <sys/types.h> | 48 | #include <sys/types.h> |
49 | typedef int8_t __s8; | 49 | typedef int8_t __s8; |
50 | typedef uint8_t __u8; | 50 | typedef uint8_t __u8; |
51 | typedef int16_t __s16; | 51 | typedef int16_t __s16; |
52 | typedef uint16_t __u16; | 52 | typedef uint16_t __u16; |
53 | typedef int32_t __s32; | 53 | typedef int32_t __s32; |
54 | typedef uint32_t __u32; | 54 | typedef uint32_t __u32; |
55 | typedef int64_t __s64; | 55 | typedef int64_t __s64; |
56 | typedef uint64_t __u64; | 56 | typedef uint64_t __u64; |
57 | typedef unsigned long drm_handle_t; | 57 | typedef unsigned long drm_handle_t; |
58 | 58 | ||
59 | #endif | 59 | #endif |
60 | 60 | ||
61 | #define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */ | 61 | #define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */ |
62 | #define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */ | 62 | #define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */ |
63 | #define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */ | 63 | #define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */ |
64 | #define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */ | 64 | #define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */ |
65 | 65 | ||
66 | #define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */ | 66 | #define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */ |
67 | #define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */ | 67 | #define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */ |
68 | #define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD) | 68 | #define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD) |
69 | #define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT) | 69 | #define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT) |
70 | #define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT)) | 70 | #define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT)) |
71 | 71 | ||
72 | typedef unsigned int drm_context_t; | 72 | typedef unsigned int drm_context_t; |
73 | typedef unsigned int drm_drawable_t; | 73 | typedef unsigned int drm_drawable_t; |
74 | typedef unsigned int drm_magic_t; | 74 | typedef unsigned int drm_magic_t; |
75 | 75 | ||
76 | /** | 76 | /** |
77 | * Cliprect. | 77 | * Cliprect. |
78 | * | 78 | * |
79 | * \warning: If you change this structure, make sure you change | 79 | * \warning: If you change this structure, make sure you change |
80 | * XF86DRIClipRectRec in the server as well | 80 | * XF86DRIClipRectRec in the server as well |
81 | * | 81 | * |
82 | * \note KW: Actually it's illegal to change either for | 82 | * \note KW: Actually it's illegal to change either for |
83 | * backwards-compatibility reasons. | 83 | * backwards-compatibility reasons. |
84 | */ | 84 | */ |
85 | struct drm_clip_rect { | 85 | struct drm_clip_rect { |
86 | unsigned short x1; | 86 | unsigned short x1; |
87 | unsigned short y1; | 87 | unsigned short y1; |
88 | unsigned short x2; | 88 | unsigned short x2; |
89 | unsigned short y2; | 89 | unsigned short y2; |
90 | }; | 90 | }; |
91 | 91 | ||
92 | /** | 92 | /** |
93 | * Drawable information. | 93 | * Drawable information. |
94 | */ | 94 | */ |
95 | struct drm_drawable_info { | 95 | struct drm_drawable_info { |
96 | unsigned int num_rects; | 96 | unsigned int num_rects; |
97 | struct drm_clip_rect *rects; | 97 | struct drm_clip_rect *rects; |
98 | }; | 98 | }; |
99 | 99 | ||
100 | /** | 100 | /** |
101 | * Texture region, | 101 | * Texture region, |
102 | */ | 102 | */ |
103 | struct drm_tex_region { | 103 | struct drm_tex_region { |
104 | unsigned char next; | 104 | unsigned char next; |
105 | unsigned char prev; | 105 | unsigned char prev; |
106 | unsigned char in_use; | 106 | unsigned char in_use; |
107 | unsigned char padding; | 107 | unsigned char padding; |
108 | unsigned int age; | 108 | unsigned int age; |
109 | }; | 109 | }; |
110 | 110 | ||
111 | /** | 111 | /** |
112 | * Hardware lock. | 112 | * Hardware lock. |
113 | * | 113 | * |
114 | * The lock structure is a simple cache-line aligned integer. To avoid | 114 | * The lock structure is a simple cache-line aligned integer. To avoid |
115 | * processor bus contention on a multiprocessor system, there should not be any | 115 | * processor bus contention on a multiprocessor system, there should not be any |
116 | * other data stored in the same cache line. | 116 | * other data stored in the same cache line. |
117 | */ | 117 | */ |
118 | struct drm_hw_lock { | 118 | struct drm_hw_lock { |
119 | __volatile__ unsigned int lock; /**< lock variable */ | 119 | __volatile__ unsigned int lock; /**< lock variable */ |
120 | char padding[60]; /**< Pad to cache line */ | 120 | char padding[60]; /**< Pad to cache line */ |
121 | }; | 121 | }; |
122 | 122 | ||
123 | /** | 123 | /** |
124 | * DRM_IOCTL_VERSION ioctl argument type. | 124 | * DRM_IOCTL_VERSION ioctl argument type. |
125 | * | 125 | * |
126 | * \sa drmGetVersion(). | 126 | * \sa drmGetVersion(). |
127 | */ | 127 | */ |
128 | struct drm_version { | 128 | struct drm_version { |
129 | int version_major; /**< Major version */ | 129 | int version_major; /**< Major version */ |
130 | int version_minor; /**< Minor version */ | 130 | int version_minor; /**< Minor version */ |
131 | int version_patchlevel; /**< Patch level */ | 131 | int version_patchlevel; /**< Patch level */ |
132 | size_t name_len; /**< Length of name buffer */ | 132 | size_t name_len; /**< Length of name buffer */ |
133 | char __user *name; /**< Name of driver */ | 133 | char __user *name; /**< Name of driver */ |
134 | size_t date_len; /**< Length of date buffer */ | 134 | size_t date_len; /**< Length of date buffer */ |
135 | char __user *date; /**< User-space buffer to hold date */ | 135 | char __user *date; /**< User-space buffer to hold date */ |
136 | size_t desc_len; /**< Length of desc buffer */ | 136 | size_t desc_len; /**< Length of desc buffer */ |
137 | char __user *desc; /**< User-space buffer to hold desc */ | 137 | char __user *desc; /**< User-space buffer to hold desc */ |
138 | }; | 138 | }; |
139 | 139 | ||
140 | /** | 140 | /** |
141 | * DRM_IOCTL_GET_UNIQUE ioctl argument type. | 141 | * DRM_IOCTL_GET_UNIQUE ioctl argument type. |
142 | * | 142 | * |
143 | * \sa drmGetBusid() and drmSetBusId(). | 143 | * \sa drmGetBusid() and drmSetBusId(). |
144 | */ | 144 | */ |
145 | struct drm_unique { | 145 | struct drm_unique { |
146 | size_t unique_len; /**< Length of unique */ | 146 | size_t unique_len; /**< Length of unique */ |
147 | char __user *unique; /**< Unique name for driver instantiation */ | 147 | char __user *unique; /**< Unique name for driver instantiation */ |
148 | }; | 148 | }; |
149 | 149 | ||
150 | struct drm_list { | 150 | struct drm_list { |
151 | int count; /**< Length of user-space structures */ | 151 | int count; /**< Length of user-space structures */ |
152 | struct drm_version __user *version; | 152 | struct drm_version __user *version; |
153 | }; | 153 | }; |
154 | 154 | ||
155 | struct drm_block { | 155 | struct drm_block { |
156 | int unused; | 156 | int unused; |
157 | }; | 157 | }; |
158 | 158 | ||
159 | /** | 159 | /** |
160 | * DRM_IOCTL_CONTROL ioctl argument type. | 160 | * DRM_IOCTL_CONTROL ioctl argument type. |
161 | * | 161 | * |
162 | * \sa drmCtlInstHandler() and drmCtlUninstHandler(). | 162 | * \sa drmCtlInstHandler() and drmCtlUninstHandler(). |
163 | */ | 163 | */ |
164 | struct drm_control { | 164 | struct drm_control { |
165 | enum { | 165 | enum { |
166 | DRM_ADD_COMMAND, | 166 | DRM_ADD_COMMAND, |
167 | DRM_RM_COMMAND, | 167 | DRM_RM_COMMAND, |
168 | DRM_INST_HANDLER, | 168 | DRM_INST_HANDLER, |
169 | DRM_UNINST_HANDLER | 169 | DRM_UNINST_HANDLER |
170 | } func; | 170 | } func; |
171 | int irq; | 171 | int irq; |
172 | }; | 172 | }; |
173 | 173 | ||
174 | /** | 174 | /** |
175 | * Type of memory to map. | 175 | * Type of memory to map. |
176 | */ | 176 | */ |
177 | enum drm_map_type { | 177 | enum drm_map_type { |
178 | _DRM_FRAME_BUFFER = 0, /**< WC (no caching), no core dump */ | 178 | _DRM_FRAME_BUFFER = 0, /**< WC (no caching), no core dump */ |
179 | _DRM_REGISTERS = 1, /**< no caching, no core dump */ | 179 | _DRM_REGISTERS = 1, /**< no caching, no core dump */ |
180 | _DRM_SHM = 2, /**< shared, cached */ | 180 | _DRM_SHM = 2, /**< shared, cached */ |
181 | _DRM_AGP = 3, /**< AGP/GART */ | 181 | _DRM_AGP = 3, /**< AGP/GART */ |
182 | _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */ | 182 | _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */ |
183 | _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */ | 183 | _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */ |
184 | }; | 184 | }; |
185 | 185 | ||
186 | /** | 186 | /** |
187 | * Memory mapping flags. | 187 | * Memory mapping flags. |
188 | */ | 188 | */ |
189 | enum drm_map_flags { | 189 | enum drm_map_flags { |
190 | _DRM_RESTRICTED = 0x01, /**< Cannot be mapped to user-virtual */ | 190 | _DRM_RESTRICTED = 0x01, /**< Cannot be mapped to user-virtual */ |
191 | _DRM_READ_ONLY = 0x02, | 191 | _DRM_READ_ONLY = 0x02, |
192 | _DRM_LOCKED = 0x04, /**< shared, cached, locked */ | 192 | _DRM_LOCKED = 0x04, /**< shared, cached, locked */ |
193 | _DRM_KERNEL = 0x08, /**< kernel requires access */ | 193 | _DRM_KERNEL = 0x08, /**< kernel requires access */ |
194 | _DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */ | 194 | _DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */ |
195 | _DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */ | 195 | _DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */ |
196 | _DRM_REMOVABLE = 0x40, /**< Removable mapping */ | 196 | _DRM_REMOVABLE = 0x40, /**< Removable mapping */ |
197 | _DRM_DRIVER = 0x80 /**< Managed by driver */ | 197 | _DRM_DRIVER = 0x80 /**< Managed by driver */ |
198 | }; | 198 | }; |
199 | 199 | ||
200 | struct drm_ctx_priv_map { | 200 | struct drm_ctx_priv_map { |
201 | unsigned int ctx_id; /**< Context requesting private mapping */ | 201 | unsigned int ctx_id; /**< Context requesting private mapping */ |
202 | void *handle; /**< Handle of map */ | 202 | void *handle; /**< Handle of map */ |
203 | }; | 203 | }; |
204 | 204 | ||
205 | /** | 205 | /** |
206 | * DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls | 206 | * DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls |
207 | * argument type. | 207 | * argument type. |
208 | * | 208 | * |
209 | * \sa drmAddMap(). | 209 | * \sa drmAddMap(). |
210 | */ | 210 | */ |
211 | struct drm_map { | 211 | struct drm_map { |
212 | unsigned long offset; /**< Requested physical address (0 for SAREA)*/ | 212 | unsigned long offset; /**< Requested physical address (0 for SAREA)*/ |
213 | unsigned long size; /**< Requested physical size (bytes) */ | 213 | unsigned long size; /**< Requested physical size (bytes) */ |
214 | enum drm_map_type type; /**< Type of memory to map */ | 214 | enum drm_map_type type; /**< Type of memory to map */ |
215 | enum drm_map_flags flags; /**< Flags */ | 215 | enum drm_map_flags flags; /**< Flags */ |
216 | void *handle; /**< User-space: "Handle" to pass to mmap() */ | 216 | void *handle; /**< User-space: "Handle" to pass to mmap() */ |
217 | /**< Kernel-space: kernel-virtual address */ | 217 | /**< Kernel-space: kernel-virtual address */ |
218 | int mtrr; /**< MTRR slot used */ | 218 | int mtrr; /**< MTRR slot used */ |
219 | /* Private data */ | 219 | /* Private data */ |
220 | }; | 220 | }; |
221 | 221 | ||
222 | /** | 222 | /** |
223 | * DRM_IOCTL_GET_CLIENT ioctl argument type. | 223 | * DRM_IOCTL_GET_CLIENT ioctl argument type. |
224 | */ | 224 | */ |
225 | struct drm_client { | 225 | struct drm_client { |
226 | int idx; /**< Which client desired? */ | 226 | int idx; /**< Which client desired? */ |
227 | int auth; /**< Is client authenticated? */ | 227 | int auth; /**< Is client authenticated? */ |
228 | unsigned long pid; /**< Process ID */ | 228 | unsigned long pid; /**< Process ID */ |
229 | unsigned long uid; /**< User ID */ | 229 | unsigned long uid; /**< User ID */ |
230 | unsigned long magic; /**< Magic */ | 230 | unsigned long magic; /**< Magic */ |
231 | unsigned long iocs; /**< Ioctl count */ | 231 | unsigned long iocs; /**< Ioctl count */ |
232 | }; | 232 | }; |
233 | 233 | ||
234 | enum drm_stat_type { | 234 | enum drm_stat_type { |
235 | _DRM_STAT_LOCK, | 235 | _DRM_STAT_LOCK, |
236 | _DRM_STAT_OPENS, | 236 | _DRM_STAT_OPENS, |
237 | _DRM_STAT_CLOSES, | 237 | _DRM_STAT_CLOSES, |
238 | _DRM_STAT_IOCTLS, | 238 | _DRM_STAT_IOCTLS, |
239 | _DRM_STAT_LOCKS, | 239 | _DRM_STAT_LOCKS, |
240 | _DRM_STAT_UNLOCKS, | 240 | _DRM_STAT_UNLOCKS, |
241 | _DRM_STAT_VALUE, /**< Generic value */ | 241 | _DRM_STAT_VALUE, /**< Generic value */ |
242 | _DRM_STAT_BYTE, /**< Generic byte counter (1024bytes/K) */ | 242 | _DRM_STAT_BYTE, /**< Generic byte counter (1024bytes/K) */ |
243 | _DRM_STAT_COUNT, /**< Generic non-byte counter (1000/k) */ | 243 | _DRM_STAT_COUNT, /**< Generic non-byte counter (1000/k) */ |
244 | 244 | ||
245 | _DRM_STAT_IRQ, /**< IRQ */ | 245 | _DRM_STAT_IRQ, /**< IRQ */ |
246 | _DRM_STAT_PRIMARY, /**< Primary DMA bytes */ | 246 | _DRM_STAT_PRIMARY, /**< Primary DMA bytes */ |
247 | _DRM_STAT_SECONDARY, /**< Secondary DMA bytes */ | 247 | _DRM_STAT_SECONDARY, /**< Secondary DMA bytes */ |
248 | _DRM_STAT_DMA, /**< DMA */ | 248 | _DRM_STAT_DMA, /**< DMA */ |
249 | _DRM_STAT_SPECIAL, /**< Special DMA (e.g., priority or polled) */ | 249 | _DRM_STAT_SPECIAL, /**< Special DMA (e.g., priority or polled) */ |
250 | _DRM_STAT_MISSED /**< Missed DMA opportunity */ | 250 | _DRM_STAT_MISSED /**< Missed DMA opportunity */ |
251 | /* Add to the *END* of the list */ | 251 | /* Add to the *END* of the list */ |
252 | }; | 252 | }; |
253 | 253 | ||
254 | /** | 254 | /** |
255 | * DRM_IOCTL_GET_STATS ioctl argument type. | 255 | * DRM_IOCTL_GET_STATS ioctl argument type. |
256 | */ | 256 | */ |
257 | struct drm_stats { | 257 | struct drm_stats { |
258 | unsigned long count; | 258 | unsigned long count; |
259 | struct { | 259 | struct { |
260 | unsigned long value; | 260 | unsigned long value; |
261 | enum drm_stat_type type; | 261 | enum drm_stat_type type; |
262 | } data[15]; | 262 | } data[15]; |
263 | }; | 263 | }; |
264 | 264 | ||
265 | /** | 265 | /** |
266 | * Hardware locking flags. | 266 | * Hardware locking flags. |
267 | */ | 267 | */ |
268 | enum drm_lock_flags { | 268 | enum drm_lock_flags { |
269 | _DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */ | 269 | _DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */ |
270 | _DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */ | 270 | _DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */ |
271 | _DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */ | 271 | _DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */ |
272 | _DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */ | 272 | _DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */ |
273 | /* These *HALT* flags aren't supported yet | 273 | /* These *HALT* flags aren't supported yet |
274 | -- they will be used to support the | 274 | -- they will be used to support the |
275 | full-screen DGA-like mode. */ | 275 | full-screen DGA-like mode. */ |
276 | _DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */ | 276 | _DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */ |
277 | _DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */ | 277 | _DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */ |
278 | }; | 278 | }; |
279 | 279 | ||
280 | /** | 280 | /** |
281 | * DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type. | 281 | * DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type. |
282 | * | 282 | * |
283 | * \sa drmGetLock() and drmUnlock(). | 283 | * \sa drmGetLock() and drmUnlock(). |
284 | */ | 284 | */ |
285 | struct drm_lock { | 285 | struct drm_lock { |
286 | int context; | 286 | int context; |
287 | enum drm_lock_flags flags; | 287 | enum drm_lock_flags flags; |
288 | }; | 288 | }; |
289 | 289 | ||
290 | /** | 290 | /** |
291 | * DMA flags | 291 | * DMA flags |
292 | * | 292 | * |
293 | * \warning | 293 | * \warning |
294 | * These values \e must match xf86drm.h. | 294 | * These values \e must match xf86drm.h. |
295 | * | 295 | * |
296 | * \sa drm_dma. | 296 | * \sa drm_dma. |
297 | */ | 297 | */ |
298 | enum drm_dma_flags { | 298 | enum drm_dma_flags { |
299 | /* Flags for DMA buffer dispatch */ | 299 | /* Flags for DMA buffer dispatch */ |
300 | _DRM_DMA_BLOCK = 0x01, /**< | 300 | _DRM_DMA_BLOCK = 0x01, /**< |
301 | * Block until buffer dispatched. | 301 | * Block until buffer dispatched. |
302 | * | 302 | * |
303 | * \note The buffer may not yet have | 303 | * \note The buffer may not yet have |
304 | * been processed by the hardware -- | 304 | * been processed by the hardware -- |
305 | * getting a hardware lock with the | 305 | * getting a hardware lock with the |
306 | * hardware quiescent will ensure | 306 | * hardware quiescent will ensure |
307 | * that the buffer has been | 307 | * that the buffer has been |
308 | * processed. | 308 | * processed. |
309 | */ | 309 | */ |
310 | _DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */ | 310 | _DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */ |
311 | _DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */ | 311 | _DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */ |
312 | 312 | ||
313 | /* Flags for DMA buffer request */ | 313 | /* Flags for DMA buffer request */ |
314 | _DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */ | 314 | _DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */ |
315 | _DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */ | 315 | _DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */ |
316 | _DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */ | 316 | _DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */ |
317 | }; | 317 | }; |
318 | 318 | ||
319 | /** | 319 | /** |
320 | * DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type. | 320 | * DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type. |
321 | * | 321 | * |
322 | * \sa drmAddBufs(). | 322 | * \sa drmAddBufs(). |
323 | */ | 323 | */ |
324 | struct drm_buf_desc { | 324 | struct drm_buf_desc { |
325 | int count; /**< Number of buffers of this size */ | 325 | int count; /**< Number of buffers of this size */ |
326 | int size; /**< Size in bytes */ | 326 | int size; /**< Size in bytes */ |
327 | int low_mark; /**< Low water mark */ | 327 | int low_mark; /**< Low water mark */ |
328 | int high_mark; /**< High water mark */ | 328 | int high_mark; /**< High water mark */ |
329 | enum { | 329 | enum { |
330 | _DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */ | 330 | _DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */ |
331 | _DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */ | 331 | _DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */ |
332 | _DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */ | 332 | _DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */ |
333 | _DRM_FB_BUFFER = 0x08, /**< Buffer is in frame buffer */ | 333 | _DRM_FB_BUFFER = 0x08, /**< Buffer is in frame buffer */ |
334 | _DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */ | 334 | _DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */ |
335 | } flags; | 335 | } flags; |
336 | unsigned long agp_start; /**< | 336 | unsigned long agp_start; /**< |
337 | * Start address of where the AGP buffers are | 337 | * Start address of where the AGP buffers are |
338 | * in the AGP aperture | 338 | * in the AGP aperture |
339 | */ | 339 | */ |
340 | }; | 340 | }; |
341 | 341 | ||
342 | /** | 342 | /** |
343 | * DRM_IOCTL_INFO_BUFS ioctl argument type. | 343 | * DRM_IOCTL_INFO_BUFS ioctl argument type. |
344 | */ | 344 | */ |
345 | struct drm_buf_info { | 345 | struct drm_buf_info { |
346 | int count; /**< Entries in list */ | 346 | int count; /**< Entries in list */ |
347 | struct drm_buf_desc __user *list; | 347 | struct drm_buf_desc __user *list; |
348 | }; | 348 | }; |
349 | 349 | ||
350 | /** | 350 | /** |
351 | * DRM_IOCTL_FREE_BUFS ioctl argument type. | 351 | * DRM_IOCTL_FREE_BUFS ioctl argument type. |
352 | */ | 352 | */ |
353 | struct drm_buf_free { | 353 | struct drm_buf_free { |
354 | int count; | 354 | int count; |
355 | int __user *list; | 355 | int __user *list; |
356 | }; | 356 | }; |
357 | 357 | ||
358 | /** | 358 | /** |
359 | * Buffer information | 359 | * Buffer information |
360 | * | 360 | * |
361 | * \sa drm_buf_map. | 361 | * \sa drm_buf_map. |
362 | */ | 362 | */ |
363 | struct drm_buf_pub { | 363 | struct drm_buf_pub { |
364 | int idx; /**< Index into the master buffer list */ | 364 | int idx; /**< Index into the master buffer list */ |
365 | int total; /**< Buffer size */ | 365 | int total; /**< Buffer size */ |
366 | int used; /**< Amount of buffer in use (for DMA) */ | 366 | int used; /**< Amount of buffer in use (for DMA) */ |
367 | void __user *address; /**< Address of buffer */ | 367 | void __user *address; /**< Address of buffer */ |
368 | }; | 368 | }; |
369 | 369 | ||
370 | /** | 370 | /** |
371 | * DRM_IOCTL_MAP_BUFS ioctl argument type. | 371 | * DRM_IOCTL_MAP_BUFS ioctl argument type. |
372 | */ | 372 | */ |
373 | struct drm_buf_map { | 373 | struct drm_buf_map { |
374 | int count; /**< Length of the buffer list */ | 374 | int count; /**< Length of the buffer list */ |
375 | void __user *virtual; /**< Mmap'd area in user-virtual */ | 375 | void __user *virtual; /**< Mmap'd area in user-virtual */ |
376 | struct drm_buf_pub __user *list; /**< Buffer information */ | 376 | struct drm_buf_pub __user *list; /**< Buffer information */ |
377 | }; | 377 | }; |
378 | 378 | ||
379 | /** | 379 | /** |
380 | * DRM_IOCTL_DMA ioctl argument type. | 380 | * DRM_IOCTL_DMA ioctl argument type. |
381 | * | 381 | * |
382 | * Indices here refer to the offset into the buffer list in drm_buf_get. | 382 | * Indices here refer to the offset into the buffer list in drm_buf_get. |
383 | * | 383 | * |
384 | * \sa drmDMA(). | 384 | * \sa drmDMA(). |
385 | */ | 385 | */ |
386 | struct drm_dma { | 386 | struct drm_dma { |
387 | int context; /**< Context handle */ | 387 | int context; /**< Context handle */ |
388 | int send_count; /**< Number of buffers to send */ | 388 | int send_count; /**< Number of buffers to send */ |
389 | int __user *send_indices; /**< List of handles to buffers */ | 389 | int __user *send_indices; /**< List of handles to buffers */ |
390 | int __user *send_sizes; /**< Lengths of data to send */ | 390 | int __user *send_sizes; /**< Lengths of data to send */ |
391 | enum drm_dma_flags flags; /**< Flags */ | 391 | enum drm_dma_flags flags; /**< Flags */ |
392 | int request_count; /**< Number of buffers requested */ | 392 | int request_count; /**< Number of buffers requested */ |
393 | int request_size; /**< Desired size for buffers */ | 393 | int request_size; /**< Desired size for buffers */ |
394 | int __user *request_indices; /**< Buffer information */ | 394 | int __user *request_indices; /**< Buffer information */ |
395 | int __user *request_sizes; | 395 | int __user *request_sizes; |
396 | int granted_count; /**< Number of buffers granted */ | 396 | int granted_count; /**< Number of buffers granted */ |
397 | }; | 397 | }; |
398 | 398 | ||
399 | enum drm_ctx_flags { | 399 | enum drm_ctx_flags { |
400 | _DRM_CONTEXT_PRESERVED = 0x01, | 400 | _DRM_CONTEXT_PRESERVED = 0x01, |
401 | _DRM_CONTEXT_2DONLY = 0x02 | 401 | _DRM_CONTEXT_2DONLY = 0x02 |
402 | }; | 402 | }; |
403 | 403 | ||
404 | /** | 404 | /** |
405 | * DRM_IOCTL_ADD_CTX ioctl argument type. | 405 | * DRM_IOCTL_ADD_CTX ioctl argument type. |
406 | * | 406 | * |
407 | * \sa drmCreateContext() and drmDestroyContext(). | 407 | * \sa drmCreateContext() and drmDestroyContext(). |
408 | */ | 408 | */ |
409 | struct drm_ctx { | 409 | struct drm_ctx { |
410 | drm_context_t handle; | 410 | drm_context_t handle; |
411 | enum drm_ctx_flags flags; | 411 | enum drm_ctx_flags flags; |
412 | }; | 412 | }; |
413 | 413 | ||
414 | /** | 414 | /** |
415 | * DRM_IOCTL_RES_CTX ioctl argument type. | 415 | * DRM_IOCTL_RES_CTX ioctl argument type. |
416 | */ | 416 | */ |
417 | struct drm_ctx_res { | 417 | struct drm_ctx_res { |
418 | int count; | 418 | int count; |
419 | struct drm_ctx __user *contexts; | 419 | struct drm_ctx __user *contexts; |
420 | }; | 420 | }; |
421 | 421 | ||
422 | /** | 422 | /** |
423 | * DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type. | 423 | * DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type. |
424 | */ | 424 | */ |
425 | struct drm_draw { | 425 | struct drm_draw { |
426 | drm_drawable_t handle; | 426 | drm_drawable_t handle; |
427 | }; | 427 | }; |
428 | 428 | ||
429 | /** | 429 | /** |
430 | * DRM_IOCTL_UPDATE_DRAW ioctl argument type. | 430 | * DRM_IOCTL_UPDATE_DRAW ioctl argument type. |
431 | */ | 431 | */ |
432 | typedef enum { | 432 | typedef enum { |
433 | DRM_DRAWABLE_CLIPRECTS, | 433 | DRM_DRAWABLE_CLIPRECTS, |
434 | } drm_drawable_info_type_t; | 434 | } drm_drawable_info_type_t; |
435 | 435 | ||
436 | struct drm_update_draw { | 436 | struct drm_update_draw { |
437 | drm_drawable_t handle; | 437 | drm_drawable_t handle; |
438 | unsigned int type; | 438 | unsigned int type; |
439 | unsigned int num; | 439 | unsigned int num; |
440 | unsigned long long data; | 440 | unsigned long long data; |
441 | }; | 441 | }; |
442 | 442 | ||
443 | /** | 443 | /** |
444 | * DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type. | 444 | * DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type. |
445 | */ | 445 | */ |
446 | struct drm_auth { | 446 | struct drm_auth { |
447 | drm_magic_t magic; | 447 | drm_magic_t magic; |
448 | }; | 448 | }; |
449 | 449 | ||
450 | /** | 450 | /** |
451 | * DRM_IOCTL_IRQ_BUSID ioctl argument type. | 451 | * DRM_IOCTL_IRQ_BUSID ioctl argument type. |
452 | * | 452 | * |
453 | * \sa drmGetInterruptFromBusID(). | 453 | * \sa drmGetInterruptFromBusID(). |
454 | */ | 454 | */ |
455 | struct drm_irq_busid { | 455 | struct drm_irq_busid { |
456 | int irq; /**< IRQ number */ | 456 | int irq; /**< IRQ number */ |
457 | int busnum; /**< bus number */ | 457 | int busnum; /**< bus number */ |
458 | int devnum; /**< device number */ | 458 | int devnum; /**< device number */ |
459 | int funcnum; /**< function number */ | 459 | int funcnum; /**< function number */ |
460 | }; | 460 | }; |
461 | 461 | ||
462 | enum drm_vblank_seq_type { | 462 | enum drm_vblank_seq_type { |
463 | _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */ | 463 | _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */ |
464 | _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */ | 464 | _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */ |
465 | /* bits 1-6 are reserved for high crtcs */ | 465 | /* bits 1-6 are reserved for high crtcs */ |
466 | _DRM_VBLANK_HIGH_CRTC_MASK = 0x0000003e, | 466 | _DRM_VBLANK_HIGH_CRTC_MASK = 0x0000003e, |
467 | _DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */ | 467 | _DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */ |
468 | _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */ | 468 | _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */ |
469 | _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */ | 469 | _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */ |
470 | _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */ | 470 | _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */ |
471 | _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking, unsupported */ | 471 | _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking, unsupported */ |
472 | }; | 472 | }; |
473 | #define _DRM_VBLANK_HIGH_CRTC_SHIFT 1 | 473 | #define _DRM_VBLANK_HIGH_CRTC_SHIFT 1 |
474 | 474 | ||
475 | #define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE) | 475 | #define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE) |
476 | #define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \ | 476 | #define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \ |
477 | _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS) | 477 | _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS) |
478 | 478 | ||
479 | struct drm_wait_vblank_request { | 479 | struct drm_wait_vblank_request { |
480 | enum drm_vblank_seq_type type; | 480 | enum drm_vblank_seq_type type; |
481 | unsigned int sequence; | 481 | unsigned int sequence; |
482 | unsigned long signal; | 482 | unsigned long signal; |
483 | }; | 483 | }; |
484 | 484 | ||
485 | struct drm_wait_vblank_reply { | 485 | struct drm_wait_vblank_reply { |
486 | enum drm_vblank_seq_type type; | 486 | enum drm_vblank_seq_type type; |
487 | unsigned int sequence; | 487 | unsigned int sequence; |
488 | long tval_sec; | 488 | long tval_sec; |
489 | long tval_usec; | 489 | long tval_usec; |
490 | }; | 490 | }; |
491 | 491 | ||
492 | /** | 492 | /** |
493 | * DRM_IOCTL_WAIT_VBLANK ioctl argument type. | 493 | * DRM_IOCTL_WAIT_VBLANK ioctl argument type. |
494 | * | 494 | * |
495 | * \sa drmWaitVBlank(). | 495 | * \sa drmWaitVBlank(). |
496 | */ | 496 | */ |
497 | union drm_wait_vblank { | 497 | union drm_wait_vblank { |
498 | struct drm_wait_vblank_request request; | 498 | struct drm_wait_vblank_request request; |
499 | struct drm_wait_vblank_reply reply; | 499 | struct drm_wait_vblank_reply reply; |
500 | }; | 500 | }; |
501 | 501 | ||
502 | #define _DRM_PRE_MODESET 1 | 502 | #define _DRM_PRE_MODESET 1 |
503 | #define _DRM_POST_MODESET 2 | 503 | #define _DRM_POST_MODESET 2 |
504 | 504 | ||
505 | /** | 505 | /** |
506 | * DRM_IOCTL_MODESET_CTL ioctl argument type | 506 | * DRM_IOCTL_MODESET_CTL ioctl argument type |
507 | * | 507 | * |
508 | * \sa drmModesetCtl(). | 508 | * \sa drmModesetCtl(). |
509 | */ | 509 | */ |
510 | struct drm_modeset_ctl { | 510 | struct drm_modeset_ctl { |
511 | __u32 crtc; | 511 | __u32 crtc; |
512 | __u32 cmd; | 512 | __u32 cmd; |
513 | }; | 513 | }; |
514 | 514 | ||
515 | /** | 515 | /** |
516 | * DRM_IOCTL_AGP_ENABLE ioctl argument type. | 516 | * DRM_IOCTL_AGP_ENABLE ioctl argument type. |
517 | * | 517 | * |
518 | * \sa drmAgpEnable(). | 518 | * \sa drmAgpEnable(). |
519 | */ | 519 | */ |
520 | struct drm_agp_mode { | 520 | struct drm_agp_mode { |
521 | unsigned long mode; /**< AGP mode */ | 521 | unsigned long mode; /**< AGP mode */ |
522 | }; | 522 | }; |
523 | 523 | ||
524 | /** | 524 | /** |
525 | * DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type. | 525 | * DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type. |
526 | * | 526 | * |
527 | * \sa drmAgpAlloc() and drmAgpFree(). | 527 | * \sa drmAgpAlloc() and drmAgpFree(). |
528 | */ | 528 | */ |
529 | struct drm_agp_buffer { | 529 | struct drm_agp_buffer { |
530 | unsigned long size; /**< In bytes -- will round to page boundary */ | 530 | unsigned long size; /**< In bytes -- will round to page boundary */ |
531 | unsigned long handle; /**< Used for binding / unbinding */ | 531 | unsigned long handle; /**< Used for binding / unbinding */ |
532 | unsigned long type; /**< Type of memory to allocate */ | 532 | unsigned long type; /**< Type of memory to allocate */ |
533 | unsigned long physical; /**< Physical used by i810 */ | 533 | unsigned long physical; /**< Physical used by i810 */ |
534 | }; | 534 | }; |
535 | 535 | ||
536 | /** | 536 | /** |
537 | * DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type. | 537 | * DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type. |
538 | * | 538 | * |
539 | * \sa drmAgpBind() and drmAgpUnbind(). | 539 | * \sa drmAgpBind() and drmAgpUnbind(). |
540 | */ | 540 | */ |
541 | struct drm_agp_binding { | 541 | struct drm_agp_binding { |
542 | unsigned long handle; /**< From drm_agp_buffer */ | 542 | unsigned long handle; /**< From drm_agp_buffer */ |
543 | unsigned long offset; /**< In bytes -- will round to page boundary */ | 543 | unsigned long offset; /**< In bytes -- will round to page boundary */ |
544 | }; | 544 | }; |
545 | 545 | ||
546 | /** | 546 | /** |
547 | * DRM_IOCTL_AGP_INFO ioctl argument type. | 547 | * DRM_IOCTL_AGP_INFO ioctl argument type. |
548 | * | 548 | * |
549 | * \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(), | 549 | * \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(), |
550 | * drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(), | 550 | * drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(), |
551 | * drmAgpVendorId() and drmAgpDeviceId(). | 551 | * drmAgpVendorId() and drmAgpDeviceId(). |
552 | */ | 552 | */ |
553 | struct drm_agp_info { | 553 | struct drm_agp_info { |
554 | int agp_version_major; | 554 | int agp_version_major; |
555 | int agp_version_minor; | 555 | int agp_version_minor; |
556 | unsigned long mode; | 556 | unsigned long mode; |
557 | unsigned long aperture_base; /* physical address */ | 557 | unsigned long aperture_base; /* physical address */ |
558 | unsigned long aperture_size; /* bytes */ | 558 | unsigned long aperture_size; /* bytes */ |
559 | unsigned long memory_allowed; /* bytes */ | 559 | unsigned long memory_allowed; /* bytes */ |
560 | unsigned long memory_used; | 560 | unsigned long memory_used; |
561 | 561 | ||
562 | /* PCI information */ | 562 | /* PCI information */ |
563 | unsigned short id_vendor; | 563 | unsigned short id_vendor; |
564 | unsigned short id_device; | 564 | unsigned short id_device; |
565 | }; | 565 | }; |
566 | 566 | ||
567 | /** | 567 | /** |
568 | * DRM_IOCTL_SG_ALLOC ioctl argument type. | 568 | * DRM_IOCTL_SG_ALLOC ioctl argument type. |
569 | */ | 569 | */ |
570 | struct drm_scatter_gather { | 570 | struct drm_scatter_gather { |
571 | unsigned long size; /**< In bytes -- will round to page boundary */ | 571 | unsigned long size; /**< In bytes -- will round to page boundary */ |
572 | unsigned long handle; /**< Used for mapping / unmapping */ | 572 | unsigned long handle; /**< Used for mapping / unmapping */ |
573 | }; | 573 | }; |
574 | 574 | ||
575 | /** | 575 | /** |
576 | * DRM_IOCTL_SET_VERSION ioctl argument type. | 576 | * DRM_IOCTL_SET_VERSION ioctl argument type. |
577 | */ | 577 | */ |
578 | struct drm_set_version { | 578 | struct drm_set_version { |
579 | int drm_di_major; | 579 | int drm_di_major; |
580 | int drm_di_minor; | 580 | int drm_di_minor; |
581 | int drm_dd_major; | 581 | int drm_dd_major; |
582 | int drm_dd_minor; | 582 | int drm_dd_minor; |
583 | }; | 583 | }; |
584 | 584 | ||
585 | /** DRM_IOCTL_GEM_CLOSE ioctl argument type */ | 585 | /** DRM_IOCTL_GEM_CLOSE ioctl argument type */ |
586 | struct drm_gem_close { | 586 | struct drm_gem_close { |
587 | /** Handle of the object to be closed. */ | 587 | /** Handle of the object to be closed. */ |
588 | __u32 handle; | 588 | __u32 handle; |
589 | __u32 pad; | 589 | __u32 pad; |
590 | }; | 590 | }; |
591 | 591 | ||
592 | /** DRM_IOCTL_GEM_FLINK ioctl argument type */ | 592 | /** DRM_IOCTL_GEM_FLINK ioctl argument type */ |
593 | struct drm_gem_flink { | 593 | struct drm_gem_flink { |
594 | /** Handle for the object being named */ | 594 | /** Handle for the object being named */ |
595 | __u32 handle; | 595 | __u32 handle; |
596 | 596 | ||
597 | /** Returned global name */ | 597 | /** Returned global name */ |
598 | __u32 name; | 598 | __u32 name; |
599 | }; | 599 | }; |
600 | 600 | ||
601 | /** DRM_IOCTL_GEM_OPEN ioctl argument type */ | 601 | /** DRM_IOCTL_GEM_OPEN ioctl argument type */ |
602 | struct drm_gem_open { | 602 | struct drm_gem_open { |
603 | /** Name of object being opened */ | 603 | /** Name of object being opened */ |
604 | __u32 name; | 604 | __u32 name; |
605 | 605 | ||
606 | /** Returned handle for the object */ | 606 | /** Returned handle for the object */ |
607 | __u32 handle; | 607 | __u32 handle; |
608 | 608 | ||
609 | /** Returned size of the object */ | 609 | /** Returned size of the object */ |
610 | __u64 size; | 610 | __u64 size; |
611 | }; | 611 | }; |
612 | 612 | ||
613 | #define DRM_CAP_DUMB_BUFFER 0x1 | 613 | #define DRM_CAP_DUMB_BUFFER 0x1 |
614 | #define DRM_CAP_VBLANK_HIGH_CRTC 0x2 | 614 | #define DRM_CAP_VBLANK_HIGH_CRTC 0x2 |
615 | #define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3 | 615 | #define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3 |
616 | #define DRM_CAP_DUMB_PREFER_SHADOW 0x4 | 616 | #define DRM_CAP_DUMB_PREFER_SHADOW 0x4 |
617 | #define DRM_CAP_PRIME 0x5 | 617 | #define DRM_CAP_PRIME 0x5 |
618 | #define DRM_PRIME_CAP_IMPORT 0x1 | 618 | #define DRM_PRIME_CAP_IMPORT 0x1 |
619 | #define DRM_PRIME_CAP_EXPORT 0x2 | 619 | #define DRM_PRIME_CAP_EXPORT 0x2 |
620 | #define DRM_CAP_TIMESTAMP_MONOTONIC 0x6 | 620 | #define DRM_CAP_TIMESTAMP_MONOTONIC 0x6 |
621 | #define DRM_CAP_ASYNC_PAGE_FLIP 0x7 | 621 | #define DRM_CAP_ASYNC_PAGE_FLIP 0x7 |
622 | #define DRM_CAP_CURSOR_WIDTH 0x8 | 622 | #define DRM_CAP_CURSOR_WIDTH 0x8 |
623 | #define DRM_CAP_CURSOR_HEIGHT 0x9 | 623 | #define DRM_CAP_CURSOR_HEIGHT 0x9 |
624 | 624 | ||
625 | /** DRM_IOCTL_GET_CAP ioctl argument type */ | 625 | /** DRM_IOCTL_GET_CAP ioctl argument type */ |
626 | struct drm_get_cap { | 626 | struct drm_get_cap { |
627 | __u64 capability; | 627 | __u64 capability; |
628 | __u64 value; | 628 | __u64 value; |
629 | }; | 629 | }; |
630 | 630 | ||
631 | /** | 631 | /** |
632 | * DRM_CLIENT_CAP_STEREO_3D | 632 | * DRM_CLIENT_CAP_STEREO_3D |
633 | * | 633 | * |
634 | * if set to 1, the DRM core will expose the stereo 3D capabilities of the | 634 | * if set to 1, the DRM core will expose the stereo 3D capabilities of the |
635 | * monitor by advertising the supported 3D layouts in the flags of struct | 635 | * monitor by advertising the supported 3D layouts in the flags of struct |
636 | * drm_mode_modeinfo. | 636 | * drm_mode_modeinfo. |
637 | */ | 637 | */ |
638 | #define DRM_CLIENT_CAP_STEREO_3D 1 | 638 | #define DRM_CLIENT_CAP_STEREO_3D 1 |
639 | 639 | ||
640 | /** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */ | 640 | /** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */ |
641 | struct drm_set_client_cap { | 641 | struct drm_set_client_cap { |
642 | __u64 capability; | 642 | __u64 capability; |
643 | __u64 value; | 643 | __u64 value; |
644 | }; | 644 | }; |
645 | 645 | ||
646 | #define DRM_CLOEXEC O_CLOEXEC | 646 | #define DRM_CLOEXEC O_CLOEXEC |
647 | #define DRM_RDWR O_RDWR | ||
647 | struct drm_prime_handle { | 648 | struct drm_prime_handle { |
648 | __u32 handle; | 649 | __u32 handle; |
649 | 650 | ||
650 | /** Flags.. only applicable for handle->fd */ | 651 | /** Flags.. only applicable for handle->fd */ |
651 | __u32 flags; | 652 | __u32 flags; |
652 | 653 | ||
653 | /** Returned dmabuf file descriptor */ | 654 | /** Returned dmabuf file descriptor */ |
654 | __s32 fd; | 655 | __s32 fd; |
655 | }; | 656 | }; |
656 | 657 | ||
657 | #include <drm/drm_mode.h> | 658 | #include <drm/drm_mode.h> |
658 | 659 | ||
659 | #define DRM_IOCTL_BASE 'd' | 660 | #define DRM_IOCTL_BASE 'd' |
660 | #define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) | 661 | #define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) |
661 | #define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type) | 662 | #define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type) |
662 | #define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type) | 663 | #define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type) |
663 | #define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type) | 664 | #define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type) |
664 | 665 | ||
665 | #define DRM_IOCTL_VERSION DRM_IOWR(0x00, struct drm_version) | 666 | #define DRM_IOCTL_VERSION DRM_IOWR(0x00, struct drm_version) |
666 | #define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, struct drm_unique) | 667 | #define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, struct drm_unique) |
667 | #define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, struct drm_auth) | 668 | #define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, struct drm_auth) |
668 | #define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, struct drm_irq_busid) | 669 | #define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, struct drm_irq_busid) |
669 | #define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, struct drm_map) | 670 | #define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, struct drm_map) |
670 | #define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client) | 671 | #define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client) |
671 | #define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats) | 672 | #define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats) |
672 | #define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version) | 673 | #define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version) |
673 | #define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl) | 674 | #define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl) |
674 | #define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close) | 675 | #define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close) |
675 | #define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink) | 676 | #define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink) |
676 | #define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open) | 677 | #define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open) |
677 | #define DRM_IOCTL_GET_CAP DRM_IOWR(0x0c, struct drm_get_cap) | 678 | #define DRM_IOCTL_GET_CAP DRM_IOWR(0x0c, struct drm_get_cap) |
678 | #define DRM_IOCTL_SET_CLIENT_CAP DRM_IOW( 0x0d, struct drm_set_client_cap) | 679 | #define DRM_IOCTL_SET_CLIENT_CAP DRM_IOW( 0x0d, struct drm_set_client_cap) |
679 | 680 | ||
680 | #define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique) | 681 | #define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique) |
681 | #define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth) | 682 | #define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth) |
682 | #define DRM_IOCTL_BLOCK DRM_IOWR(0x12, struct drm_block) | 683 | #define DRM_IOCTL_BLOCK DRM_IOWR(0x12, struct drm_block) |
683 | #define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, struct drm_block) | 684 | #define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, struct drm_block) |
684 | #define DRM_IOCTL_CONTROL DRM_IOW( 0x14, struct drm_control) | 685 | #define DRM_IOCTL_CONTROL DRM_IOW( 0x14, struct drm_control) |
685 | #define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, struct drm_map) | 686 | #define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, struct drm_map) |
686 | #define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, struct drm_buf_desc) | 687 | #define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, struct drm_buf_desc) |
687 | #define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, struct drm_buf_desc) | 688 | #define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, struct drm_buf_desc) |
688 | #define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, struct drm_buf_info) | 689 | #define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, struct drm_buf_info) |
689 | #define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, struct drm_buf_map) | 690 | #define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, struct drm_buf_map) |
690 | #define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, struct drm_buf_free) | 691 | #define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, struct drm_buf_free) |
691 | 692 | ||
692 | #define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, struct drm_map) | 693 | #define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, struct drm_map) |
693 | 694 | ||
694 | #define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, struct drm_ctx_priv_map) | 695 | #define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, struct drm_ctx_priv_map) |
695 | #define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map) | 696 | #define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map) |
696 | 697 | ||
697 | #define DRM_IOCTL_SET_MASTER DRM_IO(0x1e) | 698 | #define DRM_IOCTL_SET_MASTER DRM_IO(0x1e) |
698 | #define DRM_IOCTL_DROP_MASTER DRM_IO(0x1f) | 699 | #define DRM_IOCTL_DROP_MASTER DRM_IO(0x1f) |
699 | 700 | ||
700 | #define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, struct drm_ctx) | 701 | #define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, struct drm_ctx) |
701 | #define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, struct drm_ctx) | 702 | #define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, struct drm_ctx) |
702 | #define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, struct drm_ctx) | 703 | #define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, struct drm_ctx) |
703 | #define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, struct drm_ctx) | 704 | #define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, struct drm_ctx) |
704 | #define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, struct drm_ctx) | 705 | #define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, struct drm_ctx) |
705 | #define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, struct drm_ctx) | 706 | #define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, struct drm_ctx) |
706 | #define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, struct drm_ctx_res) | 707 | #define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, struct drm_ctx_res) |
707 | #define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, struct drm_draw) | 708 | #define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, struct drm_draw) |
708 | #define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, struct drm_draw) | 709 | #define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, struct drm_draw) |
709 | #define DRM_IOCTL_DMA DRM_IOWR(0x29, struct drm_dma) | 710 | #define DRM_IOCTL_DMA DRM_IOWR(0x29, struct drm_dma) |
710 | #define DRM_IOCTL_LOCK DRM_IOW( 0x2a, struct drm_lock) | 711 | #define DRM_IOCTL_LOCK DRM_IOW( 0x2a, struct drm_lock) |
711 | #define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock) | 712 | #define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock) |
712 | #define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock) | 713 | #define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock) |
713 | 714 | ||
714 | #define DRM_IOCTL_PRIME_HANDLE_TO_FD DRM_IOWR(0x2d, struct drm_prime_handle) | 715 | #define DRM_IOCTL_PRIME_HANDLE_TO_FD DRM_IOWR(0x2d, struct drm_prime_handle) |
715 | #define DRM_IOCTL_PRIME_FD_TO_HANDLE DRM_IOWR(0x2e, struct drm_prime_handle) | 716 | #define DRM_IOCTL_PRIME_FD_TO_HANDLE DRM_IOWR(0x2e, struct drm_prime_handle) |
716 | 717 | ||
717 | #define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30) | 718 | #define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30) |
718 | #define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31) | 719 | #define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31) |
719 | #define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, struct drm_agp_mode) | 720 | #define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, struct drm_agp_mode) |
720 | #define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, struct drm_agp_info) | 721 | #define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, struct drm_agp_info) |
721 | #define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, struct drm_agp_buffer) | 722 | #define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, struct drm_agp_buffer) |
722 | #define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, struct drm_agp_buffer) | 723 | #define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, struct drm_agp_buffer) |
723 | #define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, struct drm_agp_binding) | 724 | #define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, struct drm_agp_binding) |
724 | #define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, struct drm_agp_binding) | 725 | #define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, struct drm_agp_binding) |
725 | 726 | ||
726 | #define DRM_IOCTL_SG_ALLOC DRM_IOWR(0x38, struct drm_scatter_gather) | 727 | #define DRM_IOCTL_SG_ALLOC DRM_IOWR(0x38, struct drm_scatter_gather) |
727 | #define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, struct drm_scatter_gather) | 728 | #define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, struct drm_scatter_gather) |
728 | 729 | ||
729 | #define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank) | 730 | #define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank) |
730 | 731 | ||
731 | #define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw) | 732 | #define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw) |
732 | 733 | ||
733 | #define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res) | 734 | #define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res) |
734 | #define DRM_IOCTL_MODE_GETCRTC DRM_IOWR(0xA1, struct drm_mode_crtc) | 735 | #define DRM_IOCTL_MODE_GETCRTC DRM_IOWR(0xA1, struct drm_mode_crtc) |
735 | #define DRM_IOCTL_MODE_SETCRTC DRM_IOWR(0xA2, struct drm_mode_crtc) | 736 | #define DRM_IOCTL_MODE_SETCRTC DRM_IOWR(0xA2, struct drm_mode_crtc) |
736 | #define DRM_IOCTL_MODE_CURSOR DRM_IOWR(0xA3, struct drm_mode_cursor) | 737 | #define DRM_IOCTL_MODE_CURSOR DRM_IOWR(0xA3, struct drm_mode_cursor) |
737 | #define DRM_IOCTL_MODE_GETGAMMA DRM_IOWR(0xA4, struct drm_mode_crtc_lut) | 738 | #define DRM_IOCTL_MODE_GETGAMMA DRM_IOWR(0xA4, struct drm_mode_crtc_lut) |
738 | #define DRM_IOCTL_MODE_SETGAMMA DRM_IOWR(0xA5, struct drm_mode_crtc_lut) | 739 | #define DRM_IOCTL_MODE_SETGAMMA DRM_IOWR(0xA5, struct drm_mode_crtc_lut) |
739 | #define DRM_IOCTL_MODE_GETENCODER DRM_IOWR(0xA6, struct drm_mode_get_encoder) | 740 | #define DRM_IOCTL_MODE_GETENCODER DRM_IOWR(0xA6, struct drm_mode_get_encoder) |
740 | #define DRM_IOCTL_MODE_GETCONNECTOR DRM_IOWR(0xA7, struct drm_mode_get_connector) | 741 | #define DRM_IOCTL_MODE_GETCONNECTOR DRM_IOWR(0xA7, struct drm_mode_get_connector) |
741 | #define DRM_IOCTL_MODE_ATTACHMODE DRM_IOWR(0xA8, struct drm_mode_mode_cmd) /* deprecated (never worked) */ | 742 | #define DRM_IOCTL_MODE_ATTACHMODE DRM_IOWR(0xA8, struct drm_mode_mode_cmd) /* deprecated (never worked) */ |
742 | #define DRM_IOCTL_MODE_DETACHMODE DRM_IOWR(0xA9, struct drm_mode_mode_cmd) /* deprecated (never worked) */ | 743 | #define DRM_IOCTL_MODE_DETACHMODE DRM_IOWR(0xA9, struct drm_mode_mode_cmd) /* deprecated (never worked) */ |
743 | 744 | ||
744 | #define DRM_IOCTL_MODE_GETPROPERTY DRM_IOWR(0xAA, struct drm_mode_get_property) | 745 | #define DRM_IOCTL_MODE_GETPROPERTY DRM_IOWR(0xAA, struct drm_mode_get_property) |
745 | #define DRM_IOCTL_MODE_SETPROPERTY DRM_IOWR(0xAB, struct drm_mode_connector_set_property) | 746 | #define DRM_IOCTL_MODE_SETPROPERTY DRM_IOWR(0xAB, struct drm_mode_connector_set_property) |
746 | #define DRM_IOCTL_MODE_GETPROPBLOB DRM_IOWR(0xAC, struct drm_mode_get_blob) | 747 | #define DRM_IOCTL_MODE_GETPROPBLOB DRM_IOWR(0xAC, struct drm_mode_get_blob) |
747 | #define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd) | 748 | #define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd) |
748 | #define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd) | 749 | #define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd) |
749 | #define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int) | 750 | #define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int) |
750 | #define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip) | 751 | #define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip) |
751 | #define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd) | 752 | #define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd) |
752 | 753 | ||
753 | #define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb) | 754 | #define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb) |
754 | #define DRM_IOCTL_MODE_MAP_DUMB DRM_IOWR(0xB3, struct drm_mode_map_dumb) | 755 | #define DRM_IOCTL_MODE_MAP_DUMB DRM_IOWR(0xB3, struct drm_mode_map_dumb) |
755 | #define DRM_IOCTL_MODE_DESTROY_DUMB DRM_IOWR(0xB4, struct drm_mode_destroy_dumb) | 756 | #define DRM_IOCTL_MODE_DESTROY_DUMB DRM_IOWR(0xB4, struct drm_mode_destroy_dumb) |
756 | #define DRM_IOCTL_MODE_GETPLANERESOURCES DRM_IOWR(0xB5, struct drm_mode_get_plane_res) | 757 | #define DRM_IOCTL_MODE_GETPLANERESOURCES DRM_IOWR(0xB5, struct drm_mode_get_plane_res) |
757 | #define DRM_IOCTL_MODE_GETPLANE DRM_IOWR(0xB6, struct drm_mode_get_plane) | 758 | #define DRM_IOCTL_MODE_GETPLANE DRM_IOWR(0xB6, struct drm_mode_get_plane) |
758 | #define DRM_IOCTL_MODE_SETPLANE DRM_IOWR(0xB7, struct drm_mode_set_plane) | 759 | #define DRM_IOCTL_MODE_SETPLANE DRM_IOWR(0xB7, struct drm_mode_set_plane) |
759 | #define DRM_IOCTL_MODE_ADDFB2 DRM_IOWR(0xB8, struct drm_mode_fb_cmd2) | 760 | #define DRM_IOCTL_MODE_ADDFB2 DRM_IOWR(0xB8, struct drm_mode_fb_cmd2) |
760 | #define DRM_IOCTL_MODE_OBJ_GETPROPERTIES DRM_IOWR(0xB9, struct drm_mode_obj_get_properties) | 761 | #define DRM_IOCTL_MODE_OBJ_GETPROPERTIES DRM_IOWR(0xB9, struct drm_mode_obj_get_properties) |
761 | #define DRM_IOCTL_MODE_OBJ_SETPROPERTY DRM_IOWR(0xBA, struct drm_mode_obj_set_property) | 762 | #define DRM_IOCTL_MODE_OBJ_SETPROPERTY DRM_IOWR(0xBA, struct drm_mode_obj_set_property) |
762 | #define DRM_IOCTL_MODE_CURSOR2 DRM_IOWR(0xBB, struct drm_mode_cursor2) | 763 | #define DRM_IOCTL_MODE_CURSOR2 DRM_IOWR(0xBB, struct drm_mode_cursor2) |
763 | 764 | ||
764 | /** | 765 | /** |
765 | * Device specific ioctls should only be in their respective headers | 766 | * Device specific ioctls should only be in their respective headers |
766 | * The device specific ioctl range is from 0x40 to 0x99. | 767 | * The device specific ioctl range is from 0x40 to 0x99. |
767 | * Generic IOCTLS restart at 0xA0. | 768 | * Generic IOCTLS restart at 0xA0. |
768 | * | 769 | * |
769 | * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and | 770 | * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and |
770 | * drmCommandReadWrite(). | 771 | * drmCommandReadWrite(). |
771 | */ | 772 | */ |
772 | #define DRM_COMMAND_BASE 0x40 | 773 | #define DRM_COMMAND_BASE 0x40 |
773 | #define DRM_COMMAND_END 0xA0 | 774 | #define DRM_COMMAND_END 0xA0 |
774 | 775 | ||
775 | /** | 776 | /** |
776 | * Header for events written back to userspace on the drm fd. The | 777 | * Header for events written back to userspace on the drm fd. The |
777 | * type defines the type of event, the length specifies the total | 778 | * type defines the type of event, the length specifies the total |
778 | * length of the event (including the header), and user_data is | 779 | * length of the event (including the header), and user_data is |
779 | * typically a 64 bit value passed with the ioctl that triggered the | 780 | * typically a 64 bit value passed with the ioctl that triggered the |
780 | * event. A read on the drm fd will always only return complete | 781 | * event. A read on the drm fd will always only return complete |
781 | * events, that is, if for example the read buffer is 100 bytes, and | 782 | * events, that is, if for example the read buffer is 100 bytes, and |
782 | * there are two 64 byte events pending, only one will be returned. | 783 | * there are two 64 byte events pending, only one will be returned. |
783 | * | 784 | * |
784 | * Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and | 785 | * Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and |
785 | * up are chipset specific. | 786 | * up are chipset specific. |
786 | */ | 787 | */ |
787 | struct drm_event { | 788 | struct drm_event { |
788 | __u32 type; | 789 | __u32 type; |
789 | __u32 length; | 790 | __u32 length; |
790 | }; | 791 | }; |
791 | 792 | ||
792 | #define DRM_EVENT_VBLANK 0x01 | 793 | #define DRM_EVENT_VBLANK 0x01 |
793 | #define DRM_EVENT_FLIP_COMPLETE 0x02 | 794 | #define DRM_EVENT_FLIP_COMPLETE 0x02 |
794 | 795 | ||
795 | struct drm_event_vblank { | 796 | struct drm_event_vblank { |
796 | struct drm_event base; | 797 | struct drm_event base; |
797 | __u64 user_data; | 798 | __u64 user_data; |
798 | __u32 tv_sec; | 799 | __u32 tv_sec; |
799 | __u32 tv_usec; | 800 | __u32 tv_usec; |
800 | __u32 sequence; | 801 | __u32 sequence; |
801 | __u32 reserved; | 802 | __u32 reserved; |
802 | }; | 803 | }; |
803 | 804 | ||
804 | /* typedef area */ | 805 | /* typedef area */ |
805 | #ifndef __KERNEL__ | 806 | #ifndef __KERNEL__ |
806 | typedef struct drm_clip_rect drm_clip_rect_t; | 807 | typedef struct drm_clip_rect drm_clip_rect_t; |
807 | typedef struct drm_drawable_info drm_drawable_info_t; | 808 | typedef struct drm_drawable_info drm_drawable_info_t; |
808 | typedef struct drm_tex_region drm_tex_region_t; | 809 | typedef struct drm_tex_region drm_tex_region_t; |
809 | typedef struct drm_hw_lock drm_hw_lock_t; | 810 | typedef struct drm_hw_lock drm_hw_lock_t; |
810 | typedef struct drm_version drm_version_t; | 811 | typedef struct drm_version drm_version_t; |
811 | typedef struct drm_unique drm_unique_t; | 812 | typedef struct drm_unique drm_unique_t; |
812 | typedef struct drm_list drm_list_t; | 813 | typedef struct drm_list drm_list_t; |
813 | typedef struct drm_block drm_block_t; | 814 | typedef struct drm_block drm_block_t; |
814 | typedef struct drm_control drm_control_t; | 815 | typedef struct drm_control drm_control_t; |
815 | typedef enum drm_map_type drm_map_type_t; | 816 | typedef enum drm_map_type drm_map_type_t; |
816 | typedef enum drm_map_flags drm_map_flags_t; | 817 | typedef enum drm_map_flags drm_map_flags_t; |
817 | typedef struct drm_ctx_priv_map drm_ctx_priv_map_t; | 818 | typedef struct drm_ctx_priv_map drm_ctx_priv_map_t; |
818 | typedef struct drm_map drm_map_t; | 819 | typedef struct drm_map drm_map_t; |
819 | typedef struct drm_client drm_client_t; | 820 | typedef struct drm_client drm_client_t; |
820 | typedef enum drm_stat_type drm_stat_type_t; | 821 | typedef enum drm_stat_type drm_stat_type_t; |
821 | typedef struct drm_stats drm_stats_t; | 822 | typedef struct drm_stats drm_stats_t; |
822 | typedef enum drm_lock_flags drm_lock_flags_t; | 823 | typedef enum drm_lock_flags drm_lock_flags_t; |
823 | typedef struct drm_lock drm_lock_t; | 824 | typedef struct drm_lock drm_lock_t; |
824 | typedef enum drm_dma_flags drm_dma_flags_t; | 825 | typedef enum drm_dma_flags drm_dma_flags_t; |
825 | typedef struct drm_buf_desc drm_buf_desc_t; | 826 | typedef struct drm_buf_desc drm_buf_desc_t; |
826 | typedef struct drm_buf_info drm_buf_info_t; | 827 | typedef struct drm_buf_info drm_buf_info_t; |
827 | typedef struct drm_buf_free drm_buf_free_t; | 828 | typedef struct drm_buf_free drm_buf_free_t; |
828 | typedef struct drm_buf_pub drm_buf_pub_t; | 829 | typedef struct drm_buf_pub drm_buf_pub_t; |
829 | typedef struct drm_buf_map drm_buf_map_t; | 830 | typedef struct drm_buf_map drm_buf_map_t; |
830 | typedef struct drm_dma drm_dma_t; | 831 | typedef struct drm_dma drm_dma_t; |
831 | typedef union drm_wait_vblank drm_wait_vblank_t; | 832 | typedef union drm_wait_vblank drm_wait_vblank_t; |
832 | typedef struct drm_agp_mode drm_agp_mode_t; | 833 | typedef struct drm_agp_mode drm_agp_mode_t; |
833 | typedef enum drm_ctx_flags drm_ctx_flags_t; | 834 | typedef enum drm_ctx_flags drm_ctx_flags_t; |
834 | typedef struct drm_ctx drm_ctx_t; | 835 | typedef struct drm_ctx drm_ctx_t; |
835 | typedef struct drm_ctx_res drm_ctx_res_t; | 836 | typedef struct drm_ctx_res drm_ctx_res_t; |
836 | typedef struct drm_draw drm_draw_t; | 837 | typedef struct drm_draw drm_draw_t; |
837 | typedef struct drm_update_draw drm_update_draw_t; | 838 | typedef struct drm_update_draw drm_update_draw_t; |
838 | typedef struct drm_auth drm_auth_t; | 839 | typedef struct drm_auth drm_auth_t; |
839 | typedef struct drm_irq_busid drm_irq_busid_t; | 840 | typedef struct drm_irq_busid drm_irq_busid_t; |
840 | typedef enum drm_vblank_seq_type drm_vblank_seq_type_t; | 841 | typedef enum drm_vblank_seq_type drm_vblank_seq_type_t; |
841 | 842 | ||
842 | typedef struct drm_agp_buffer drm_agp_buffer_t; | 843 | typedef struct drm_agp_buffer drm_agp_buffer_t; |
843 | typedef struct drm_agp_binding drm_agp_binding_t; | 844 | typedef struct drm_agp_binding drm_agp_binding_t; |
844 | typedef struct drm_agp_info drm_agp_info_t; | 845 | typedef struct drm_agp_info drm_agp_info_t; |
845 | typedef struct drm_scatter_gather drm_scatter_gather_t; | 846 | typedef struct drm_scatter_gather drm_scatter_gather_t; |
846 | typedef struct drm_set_version drm_set_version_t; | 847 | typedef struct drm_set_version drm_set_version_t; |
847 | #endif | 848 | #endif |
848 | 849 | ||
849 | #endif | 850 | #endif |
850 | 851 |