Commit 89177644a7b6306e6084a89eab7e290f4bfef397
Committed by
Dave Airlie
1 parent
6504d0d990
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
drm: add prime helpers
Instead of reimplementing all of the dma_buf functionality in every driver, create helpers drm_prime_import and drm_prime_export that implement them in terms of new, lower-level hook functions: gem_prime_pin: callback when a buffer is created, used to pin buffers into GTT gem_prime_get_sg_table: convert a drm_gem_object to an sg_table for export gem_prime_import_sg_table: convert an sg_table into a drm_gem_object gem_prime_vmap, gem_prime_vunmap: map and unmap an object These hooks are optional; drivers can opt in by using drm_gem_prime_import and drm_gem_prime_export as the .gem_prime_import and .gem_prime_export fields of struct drm_driver. v2: - Drop .begin_cpu_access. None of the drivers this code replaces implemented it. Having it here was a leftover from when I was trying to include i915 in this rework. - Use mutex_lock instead of mutex_lock_interruptible, as these three drivers did. This patch series shouldn't change that behavior. - Rename helpers to gem_prime_get_sg_table and gem_prime_import_sg_table. Rename struct sg_table* variables to 'sgt' for clarity. - Update drm.tmpl for these new hooks. v3: - Pass the vaddr down to the driver. This lets drivers that just call vunmap on the pointer avoid having to store the pointer in their GEM private structures. - Move documentation into a /** DOC */ comment in drm_prime.c and include it in drm.tmpl with a !P line. I tried to use !F lines to include documentation of the individual functions from drmP.h, but the docproc / kernel-doc scripts barf on that file, so hopefully this is good enough for now. - apply refcount fix from commit be8a42ae60addd8b6092535c11b42d099d6470ec ("drm/prime: drop reference on imported dma-buf come from gem") Signed-off-by: Aaron Plattner <aplattner@nvidia.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: David Airlie <airlied@linux.ie> Signed-off-by: Dave Airlie <airlied@redhat.com>
Showing 3 changed files with 201 additions and 1 deletions Side-by-side Diff
Documentation/DocBook/drm.tmpl
... | ... | @@ -743,6 +743,10 @@ |
743 | 743 | These two operations are mandatory for GEM drivers that support DRM |
744 | 744 | PRIME. |
745 | 745 | </para> |
746 | + <sect4> | |
747 | + <title>DRM PRIME Helper Functions Reference</title> | |
748 | +!Pdrivers/gpu/drm/drm_prime.c PRIME Helpers | |
749 | + </sect4> | |
746 | 750 | </sect3> |
747 | 751 | <sect3 id="drm-gem-objects-mapping"> |
748 | 752 | <title>GEM Objects Mapping</title> |
drivers/gpu/drm/drm_prime.c
... | ... | @@ -53,7 +53,8 @@ |
53 | 53 | * Self-importing: if userspace is using PRIME as a replacement for flink |
54 | 54 | * then it will get a fd->handle request for a GEM object that it created. |
55 | 55 | * Drivers should detect this situation and return back the gem object |
56 | - * from the dma-buf private. | |
56 | + * from the dma-buf private. Prime will do this automatically for drivers that | |
57 | + * use the drm_gem_prime_{import,export} helpers. | |
57 | 58 | */ |
58 | 59 | |
59 | 60 | struct drm_prime_member { |
... | ... | @@ -62,6 +63,137 @@ |
62 | 63 | uint32_t handle; |
63 | 64 | }; |
64 | 65 | |
66 | +static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, | |
67 | + enum dma_data_direction dir) | |
68 | +{ | |
69 | + struct drm_gem_object *obj = attach->dmabuf->priv; | |
70 | + struct sg_table *sgt; | |
71 | + | |
72 | + mutex_lock(&obj->dev->struct_mutex); | |
73 | + | |
74 | + sgt = obj->dev->driver->gem_prime_get_sg_table(obj); | |
75 | + | |
76 | + if (!IS_ERR_OR_NULL(sgt)) | |
77 | + dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir); | |
78 | + | |
79 | + mutex_unlock(&obj->dev->struct_mutex); | |
80 | + return sgt; | |
81 | +} | |
82 | + | |
83 | +static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, | |
84 | + struct sg_table *sgt, enum dma_data_direction dir) | |
85 | +{ | |
86 | + dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); | |
87 | + sg_free_table(sgt); | |
88 | + kfree(sgt); | |
89 | +} | |
90 | + | |
91 | +static void drm_gem_dmabuf_release(struct dma_buf *dma_buf) | |
92 | +{ | |
93 | + struct drm_gem_object *obj = dma_buf->priv; | |
94 | + | |
95 | + if (obj->export_dma_buf == dma_buf) { | |
96 | + /* drop the reference on the export fd holds */ | |
97 | + obj->export_dma_buf = NULL; | |
98 | + drm_gem_object_unreference_unlocked(obj); | |
99 | + } | |
100 | +} | |
101 | + | |
102 | +static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf) | |
103 | +{ | |
104 | + struct drm_gem_object *obj = dma_buf->priv; | |
105 | + struct drm_device *dev = obj->dev; | |
106 | + | |
107 | + return dev->driver->gem_prime_vmap(obj); | |
108 | +} | |
109 | + | |
110 | +static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) | |
111 | +{ | |
112 | + struct drm_gem_object *obj = dma_buf->priv; | |
113 | + struct drm_device *dev = obj->dev; | |
114 | + | |
115 | + dev->driver->gem_prime_vunmap(obj, vaddr); | |
116 | +} | |
117 | + | |
118 | +static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, | |
119 | + unsigned long page_num) | |
120 | +{ | |
121 | + return NULL; | |
122 | +} | |
123 | + | |
124 | +static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, | |
125 | + unsigned long page_num, void *addr) | |
126 | +{ | |
127 | + | |
128 | +} | |
129 | +static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, | |
130 | + unsigned long page_num) | |
131 | +{ | |
132 | + return NULL; | |
133 | +} | |
134 | + | |
135 | +static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, | |
136 | + unsigned long page_num, void *addr) | |
137 | +{ | |
138 | + | |
139 | +} | |
140 | + | |
141 | +static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, | |
142 | + struct vm_area_struct *vma) | |
143 | +{ | |
144 | + return -EINVAL; | |
145 | +} | |
146 | + | |
147 | +static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { | |
148 | + .map_dma_buf = drm_gem_map_dma_buf, | |
149 | + .unmap_dma_buf = drm_gem_unmap_dma_buf, | |
150 | + .release = drm_gem_dmabuf_release, | |
151 | + .kmap = drm_gem_dmabuf_kmap, | |
152 | + .kmap_atomic = drm_gem_dmabuf_kmap_atomic, | |
153 | + .kunmap = drm_gem_dmabuf_kunmap, | |
154 | + .kunmap_atomic = drm_gem_dmabuf_kunmap_atomic, | |
155 | + .mmap = drm_gem_dmabuf_mmap, | |
156 | + .vmap = drm_gem_dmabuf_vmap, | |
157 | + .vunmap = drm_gem_dmabuf_vunmap, | |
158 | +}; | |
159 | + | |
160 | +/** | |
161 | + * DOC: PRIME Helpers | |
162 | + * | |
163 | + * Drivers can implement @gem_prime_export and @gem_prime_import in terms of | |
164 | + * simpler APIs by using the helper functions @drm_gem_prime_export and | |
165 | + * @drm_gem_prime_import. These functions implement dma-buf support in terms of | |
166 | + * five lower-level driver callbacks: | |
167 | + * | |
168 | + * Export callbacks: | |
169 | + * | |
170 | + * - @gem_prime_pin (optional): prepare a GEM object for exporting | |
171 | + * | |
172 | + * - @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages | |
173 | + * | |
174 | + * - @gem_prime_vmap: vmap a buffer exported by your driver | |
175 | + * | |
176 | + * - @gem_prime_vunmap: vunmap a buffer exported by your driver | |
177 | + * | |
178 | + * Import callback: | |
179 | + * | |
180 | + * - @gem_prime_import_sg_table (import): produce a GEM object from another | |
181 | + * driver's scatter/gather table | |
182 | + */ | |
183 | + | |
184 | +struct dma_buf *drm_gem_prime_export(struct drm_device *dev, | |
185 | + struct drm_gem_object *obj, int flags) | |
186 | +{ | |
187 | + if (dev->driver->gem_prime_pin) { | |
188 | + int ret = dev->driver->gem_prime_pin(obj); | |
189 | + if (ret) | |
190 | + return ERR_PTR(ret); | |
191 | + } | |
192 | + return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, | |
193 | + 0600); | |
194 | +} | |
195 | +EXPORT_SYMBOL(drm_gem_prime_export); | |
196 | + | |
65 | 197 | int drm_gem_prime_handle_to_fd(struct drm_device *dev, |
66 | 198 | struct drm_file *file_priv, uint32_t handle, uint32_t flags, |
67 | 199 | int *prime_fd) |
... | ... | @@ -116,6 +248,58 @@ |
116 | 248 | return 0; |
117 | 249 | } |
118 | 250 | EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); |
251 | + | |
252 | +struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, | |
253 | + struct dma_buf *dma_buf) | |
254 | +{ | |
255 | + struct dma_buf_attachment *attach; | |
256 | + struct sg_table *sgt; | |
257 | + struct drm_gem_object *obj; | |
258 | + int ret; | |
259 | + | |
260 | + if (!dev->driver->gem_prime_import_sg_table) | |
261 | + return ERR_PTR(-EINVAL); | |
262 | + | |
263 | + if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) { | |
264 | + obj = dma_buf->priv; | |
265 | + if (obj->dev == dev) { | |
266 | + /* | |
267 | + * Importing dmabuf exported from out own gem increases | |
268 | + * refcount on gem itself instead of f_count of dmabuf. | |
269 | + */ | |
270 | + drm_gem_object_reference(obj); | |
271 | + dma_buf_put(dma_buf); | |
272 | + return obj; | |
273 | + } | |
274 | + } | |
275 | + | |
276 | + attach = dma_buf_attach(dma_buf, dev->dev); | |
277 | + if (IS_ERR(attach)) | |
278 | + return ERR_PTR(PTR_ERR(attach)); | |
279 | + | |
280 | + sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); | |
281 | + if (IS_ERR_OR_NULL(sgt)) { | |
282 | + ret = PTR_ERR(sgt); | |
283 | + goto fail_detach; | |
284 | + } | |
285 | + | |
286 | + obj = dev->driver->gem_prime_import_sg_table(dev, dma_buf->size, sgt); | |
287 | + if (IS_ERR(obj)) { | |
288 | + ret = PTR_ERR(obj); | |
289 | + goto fail_unmap; | |
290 | + } | |
291 | + | |
292 | + obj->import_attach = attach; | |
293 | + | |
294 | + return obj; | |
295 | + | |
296 | +fail_unmap: | |
297 | + dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); | |
298 | +fail_detach: | |
299 | + dma_buf_detach(dma_buf, attach); | |
300 | + return ERR_PTR(ret); | |
301 | +} | |
302 | +EXPORT_SYMBOL(drm_gem_prime_import); | |
119 | 303 | |
120 | 304 | int drm_gem_prime_fd_to_handle(struct drm_device *dev, |
121 | 305 | struct drm_file *file_priv, int prime_fd, uint32_t *handle) |
include/drm/drmP.h
... | ... | @@ -930,6 +930,14 @@ |
930 | 930 | /* import dmabuf -> GEM */ |
931 | 931 | struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev, |
932 | 932 | struct dma_buf *dma_buf); |
933 | + /* low-level interface used by drm_gem_prime_{import,export} */ | |
934 | + int (*gem_prime_pin)(struct drm_gem_object *obj); | |
935 | + struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj); | |
936 | + struct drm_gem_object *(*gem_prime_import_sg_table)( | |
937 | + struct drm_device *dev, size_t size, | |
938 | + struct sg_table *sgt); | |
939 | + void *(*gem_prime_vmap)(struct drm_gem_object *obj); | |
940 | + void (*gem_prime_vunmap)(struct drm_gem_object *obj, void *vaddr); | |
933 | 941 | |
934 | 942 | /* vga arb irq handler */ |
935 | 943 | void (*vgaarb_irq)(struct drm_device *dev, bool state); |
936 | 944 | |
... | ... | @@ -1562,9 +1570,13 @@ |
1562 | 1570 | extern int drm_gem_name_info(struct seq_file *m, void *data); |
1563 | 1571 | |
1564 | 1572 | |
1573 | +extern struct dma_buf *drm_gem_prime_export(struct drm_device *dev, | |
1574 | + struct drm_gem_object *obj, int flags); | |
1565 | 1575 | extern int drm_gem_prime_handle_to_fd(struct drm_device *dev, |
1566 | 1576 | struct drm_file *file_priv, uint32_t handle, uint32_t flags, |
1567 | 1577 | int *prime_fd); |
1578 | +extern struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, | |
1579 | + struct dma_buf *dma_buf); | |
1568 | 1580 | extern int drm_gem_prime_fd_to_handle(struct drm_device *dev, |
1569 | 1581 | struct drm_file *file_priv, int prime_fd, uint32_t *handle); |
1570 | 1582 |