Commit ed1da130a6ebe7e7c8485668758ca3f96c413672
Committed by
Jacob Stiffler
1 parent
13bc683e42
Exists in
smarc-ti-linux-3.14.y
and in
1 other branch
media: vb2: verify data_offset only if nonzero bytesused
verify_planes would fail if the user space fills up the data_offset field and bytesused is left as zero. Correct this. When comparing data_offset > bytesused, bypass the check if the bytesused field is set to zero. Change-Id: I4c63bc03f6d455ce00a56d63df08c624579bc831 Signed-off-by: Nikhil Devshatwar <nikhil.nd@ti.com>
Showing 1 changed file with 3 additions and 6 deletions Inline Diff
drivers/media/v4l2-core/videobuf2-core.c
1 | /* | 1 | /* |
2 | * videobuf2-core.c - V4L2 driver helper framework | 2 | * videobuf2-core.c - V4L2 driver helper framework |
3 | * | 3 | * |
4 | * Copyright (C) 2010 Samsung Electronics | 4 | * Copyright (C) 2010 Samsung Electronics |
5 | * | 5 | * |
6 | * Author: Pawel Osciak <pawel@osciak.com> | 6 | * Author: Pawel Osciak <pawel@osciak.com> |
7 | * Marek Szyprowski <m.szyprowski@samsung.com> | 7 | * Marek Szyprowski <m.szyprowski@samsung.com> |
8 | * | 8 | * |
9 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License as published by | 10 | * it under the terms of the GNU General Public License as published by |
11 | * the Free Software Foundation. | 11 | * the Free Software Foundation. |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/err.h> | 14 | #include <linux/err.h> |
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | #include <linux/mm.h> | 17 | #include <linux/mm.h> |
18 | #include <linux/poll.h> | 18 | #include <linux/poll.h> |
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include <linux/sched.h> | 20 | #include <linux/sched.h> |
21 | 21 | ||
22 | #include <media/v4l2-dev.h> | 22 | #include <media/v4l2-dev.h> |
23 | #include <media/v4l2-fh.h> | 23 | #include <media/v4l2-fh.h> |
24 | #include <media/v4l2-event.h> | 24 | #include <media/v4l2-event.h> |
25 | #include <media/videobuf2-core.h> | 25 | #include <media/videobuf2-core.h> |
26 | 26 | ||
27 | static int debug; | 27 | static int debug; |
28 | module_param(debug, int, 0644); | 28 | module_param(debug, int, 0644); |
29 | 29 | ||
30 | #define dprintk(level, fmt, arg...) \ | 30 | #define dprintk(level, fmt, arg...) \ |
31 | do { \ | 31 | do { \ |
32 | if (debug >= level) \ | 32 | if (debug >= level) \ |
33 | printk(KERN_DEBUG "vb2: " fmt, ## arg); \ | 33 | printk(KERN_DEBUG "vb2: " fmt, ## arg); \ |
34 | } while (0) | 34 | } while (0) |
35 | 35 | ||
36 | #define call_memop(q, op, args...) \ | 36 | #define call_memop(q, op, args...) \ |
37 | (((q)->mem_ops->op) ? \ | 37 | (((q)->mem_ops->op) ? \ |
38 | ((q)->mem_ops->op(args)) : 0) | 38 | ((q)->mem_ops->op(args)) : 0) |
39 | 39 | ||
40 | #define call_qop(q, op, args...) \ | 40 | #define call_qop(q, op, args...) \ |
41 | (((q)->ops->op) ? ((q)->ops->op(args)) : 0) | 41 | (((q)->ops->op) ? ((q)->ops->op(args)) : 0) |
42 | 42 | ||
43 | /* Flags that are set by the vb2 core */ | 43 | /* Flags that are set by the vb2 core */ |
44 | #define V4L2_BUFFER_MASK_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \ | 44 | #define V4L2_BUFFER_MASK_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \ |
45 | V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \ | 45 | V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \ |
46 | V4L2_BUF_FLAG_PREPARED | \ | 46 | V4L2_BUF_FLAG_PREPARED | \ |
47 | V4L2_BUF_FLAG_TIMESTAMP_MASK) | 47 | V4L2_BUF_FLAG_TIMESTAMP_MASK) |
48 | /* Output buffer flags that should be passed on to the driver */ | 48 | /* Output buffer flags that should be passed on to the driver */ |
49 | #define V4L2_BUFFER_OUT_FLAGS (V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME | \ | 49 | #define V4L2_BUFFER_OUT_FLAGS (V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME | \ |
50 | V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_TIMECODE) | 50 | V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_TIMECODE) |
51 | 51 | ||
52 | /** | 52 | /** |
53 | * __vb2_buf_mem_alloc() - allocate video memory for the given buffer | 53 | * __vb2_buf_mem_alloc() - allocate video memory for the given buffer |
54 | */ | 54 | */ |
55 | static int __vb2_buf_mem_alloc(struct vb2_buffer *vb) | 55 | static int __vb2_buf_mem_alloc(struct vb2_buffer *vb) |
56 | { | 56 | { |
57 | struct vb2_queue *q = vb->vb2_queue; | 57 | struct vb2_queue *q = vb->vb2_queue; |
58 | void *mem_priv; | 58 | void *mem_priv; |
59 | int plane; | 59 | int plane; |
60 | 60 | ||
61 | /* | 61 | /* |
62 | * Allocate memory for all planes in this buffer | 62 | * Allocate memory for all planes in this buffer |
63 | * NOTE: mmapped areas should be page aligned | 63 | * NOTE: mmapped areas should be page aligned |
64 | */ | 64 | */ |
65 | for (plane = 0; plane < vb->num_planes; ++plane) { | 65 | for (plane = 0; plane < vb->num_planes; ++plane) { |
66 | unsigned long size = PAGE_ALIGN(q->plane_sizes[plane]); | 66 | unsigned long size = PAGE_ALIGN(q->plane_sizes[plane]); |
67 | 67 | ||
68 | mem_priv = call_memop(q, alloc, q->alloc_ctx[plane], | 68 | mem_priv = call_memop(q, alloc, q->alloc_ctx[plane], |
69 | size, q->gfp_flags); | 69 | size, q->gfp_flags); |
70 | if (IS_ERR_OR_NULL(mem_priv)) | 70 | if (IS_ERR_OR_NULL(mem_priv)) |
71 | goto free; | 71 | goto free; |
72 | 72 | ||
73 | /* Associate allocator private data with this plane */ | 73 | /* Associate allocator private data with this plane */ |
74 | vb->planes[plane].mem_priv = mem_priv; | 74 | vb->planes[plane].mem_priv = mem_priv; |
75 | vb->v4l2_planes[plane].length = q->plane_sizes[plane]; | 75 | vb->v4l2_planes[plane].length = q->plane_sizes[plane]; |
76 | } | 76 | } |
77 | 77 | ||
78 | return 0; | 78 | return 0; |
79 | free: | 79 | free: |
80 | /* Free already allocated memory if one of the allocations failed */ | 80 | /* Free already allocated memory if one of the allocations failed */ |
81 | for (; plane > 0; --plane) { | 81 | for (; plane > 0; --plane) { |
82 | call_memop(q, put, vb->planes[plane - 1].mem_priv); | 82 | call_memop(q, put, vb->planes[plane - 1].mem_priv); |
83 | vb->planes[plane - 1].mem_priv = NULL; | 83 | vb->planes[plane - 1].mem_priv = NULL; |
84 | } | 84 | } |
85 | 85 | ||
86 | return -ENOMEM; | 86 | return -ENOMEM; |
87 | } | 87 | } |
88 | 88 | ||
89 | /** | 89 | /** |
90 | * __vb2_buf_mem_free() - free memory of the given buffer | 90 | * __vb2_buf_mem_free() - free memory of the given buffer |
91 | */ | 91 | */ |
92 | static void __vb2_buf_mem_free(struct vb2_buffer *vb) | 92 | static void __vb2_buf_mem_free(struct vb2_buffer *vb) |
93 | { | 93 | { |
94 | struct vb2_queue *q = vb->vb2_queue; | 94 | struct vb2_queue *q = vb->vb2_queue; |
95 | unsigned int plane; | 95 | unsigned int plane; |
96 | 96 | ||
97 | for (plane = 0; plane < vb->num_planes; ++plane) { | 97 | for (plane = 0; plane < vb->num_planes; ++plane) { |
98 | call_memop(q, put, vb->planes[plane].mem_priv); | 98 | call_memop(q, put, vb->planes[plane].mem_priv); |
99 | vb->planes[plane].mem_priv = NULL; | 99 | vb->planes[plane].mem_priv = NULL; |
100 | dprintk(3, "Freed plane %d of buffer %d\n", plane, | 100 | dprintk(3, "Freed plane %d of buffer %d\n", plane, |
101 | vb->v4l2_buf.index); | 101 | vb->v4l2_buf.index); |
102 | } | 102 | } |
103 | } | 103 | } |
104 | 104 | ||
105 | /** | 105 | /** |
106 | * __vb2_buf_userptr_put() - release userspace memory associated with | 106 | * __vb2_buf_userptr_put() - release userspace memory associated with |
107 | * a USERPTR buffer | 107 | * a USERPTR buffer |
108 | */ | 108 | */ |
109 | static void __vb2_buf_userptr_put(struct vb2_buffer *vb) | 109 | static void __vb2_buf_userptr_put(struct vb2_buffer *vb) |
110 | { | 110 | { |
111 | struct vb2_queue *q = vb->vb2_queue; | 111 | struct vb2_queue *q = vb->vb2_queue; |
112 | unsigned int plane; | 112 | unsigned int plane; |
113 | 113 | ||
114 | for (plane = 0; plane < vb->num_planes; ++plane) { | 114 | for (plane = 0; plane < vb->num_planes; ++plane) { |
115 | if (vb->planes[plane].mem_priv) | 115 | if (vb->planes[plane].mem_priv) |
116 | call_memop(q, put_userptr, vb->planes[plane].mem_priv); | 116 | call_memop(q, put_userptr, vb->planes[plane].mem_priv); |
117 | vb->planes[plane].mem_priv = NULL; | 117 | vb->planes[plane].mem_priv = NULL; |
118 | } | 118 | } |
119 | } | 119 | } |
120 | 120 | ||
121 | /** | 121 | /** |
122 | * __vb2_plane_dmabuf_put() - release memory associated with | 122 | * __vb2_plane_dmabuf_put() - release memory associated with |
123 | * a DMABUF shared plane | 123 | * a DMABUF shared plane |
124 | */ | 124 | */ |
125 | static void __vb2_plane_dmabuf_put(struct vb2_queue *q, struct vb2_plane *p) | 125 | static void __vb2_plane_dmabuf_put(struct vb2_queue *q, struct vb2_plane *p) |
126 | { | 126 | { |
127 | if (!p->mem_priv) | 127 | if (!p->mem_priv) |
128 | return; | 128 | return; |
129 | 129 | ||
130 | if (p->dbuf_mapped) | 130 | if (p->dbuf_mapped) |
131 | call_memop(q, unmap_dmabuf, p->mem_priv); | 131 | call_memop(q, unmap_dmabuf, p->mem_priv); |
132 | 132 | ||
133 | call_memop(q, detach_dmabuf, p->mem_priv); | 133 | call_memop(q, detach_dmabuf, p->mem_priv); |
134 | dma_buf_put(p->dbuf); | 134 | dma_buf_put(p->dbuf); |
135 | memset(p, 0, sizeof(*p)); | 135 | memset(p, 0, sizeof(*p)); |
136 | } | 136 | } |
137 | 137 | ||
138 | /** | 138 | /** |
139 | * __vb2_buf_dmabuf_put() - release memory associated with | 139 | * __vb2_buf_dmabuf_put() - release memory associated with |
140 | * a DMABUF shared buffer | 140 | * a DMABUF shared buffer |
141 | */ | 141 | */ |
142 | static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb) | 142 | static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb) |
143 | { | 143 | { |
144 | struct vb2_queue *q = vb->vb2_queue; | 144 | struct vb2_queue *q = vb->vb2_queue; |
145 | unsigned int plane; | 145 | unsigned int plane; |
146 | 146 | ||
147 | for (plane = 0; plane < vb->num_planes; ++plane) | 147 | for (plane = 0; plane < vb->num_planes; ++plane) |
148 | __vb2_plane_dmabuf_put(q, &vb->planes[plane]); | 148 | __vb2_plane_dmabuf_put(q, &vb->planes[plane]); |
149 | } | 149 | } |
150 | 150 | ||
151 | /** | 151 | /** |
152 | * __setup_lengths() - setup initial lengths for every plane in | 152 | * __setup_lengths() - setup initial lengths for every plane in |
153 | * every buffer on the queue | 153 | * every buffer on the queue |
154 | */ | 154 | */ |
155 | static void __setup_lengths(struct vb2_queue *q, unsigned int n) | 155 | static void __setup_lengths(struct vb2_queue *q, unsigned int n) |
156 | { | 156 | { |
157 | unsigned int buffer, plane; | 157 | unsigned int buffer, plane; |
158 | struct vb2_buffer *vb; | 158 | struct vb2_buffer *vb; |
159 | 159 | ||
160 | for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) { | 160 | for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) { |
161 | vb = q->bufs[buffer]; | 161 | vb = q->bufs[buffer]; |
162 | if (!vb) | 162 | if (!vb) |
163 | continue; | 163 | continue; |
164 | 164 | ||
165 | for (plane = 0; plane < vb->num_planes; ++plane) | 165 | for (plane = 0; plane < vb->num_planes; ++plane) |
166 | vb->v4l2_planes[plane].length = q->plane_sizes[plane]; | 166 | vb->v4l2_planes[plane].length = q->plane_sizes[plane]; |
167 | } | 167 | } |
168 | } | 168 | } |
169 | 169 | ||
170 | /** | 170 | /** |
171 | * __setup_offsets() - setup unique offsets ("cookies") for every plane in | 171 | * __setup_offsets() - setup unique offsets ("cookies") for every plane in |
172 | * every buffer on the queue | 172 | * every buffer on the queue |
173 | */ | 173 | */ |
174 | static void __setup_offsets(struct vb2_queue *q, unsigned int n) | 174 | static void __setup_offsets(struct vb2_queue *q, unsigned int n) |
175 | { | 175 | { |
176 | unsigned int buffer, plane; | 176 | unsigned int buffer, plane; |
177 | struct vb2_buffer *vb; | 177 | struct vb2_buffer *vb; |
178 | unsigned long off; | 178 | unsigned long off; |
179 | 179 | ||
180 | if (q->num_buffers) { | 180 | if (q->num_buffers) { |
181 | struct v4l2_plane *p; | 181 | struct v4l2_plane *p; |
182 | vb = q->bufs[q->num_buffers - 1]; | 182 | vb = q->bufs[q->num_buffers - 1]; |
183 | p = &vb->v4l2_planes[vb->num_planes - 1]; | 183 | p = &vb->v4l2_planes[vb->num_planes - 1]; |
184 | off = PAGE_ALIGN(p->m.mem_offset + p->length); | 184 | off = PAGE_ALIGN(p->m.mem_offset + p->length); |
185 | } else { | 185 | } else { |
186 | off = 0; | 186 | off = 0; |
187 | } | 187 | } |
188 | 188 | ||
189 | for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) { | 189 | for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) { |
190 | vb = q->bufs[buffer]; | 190 | vb = q->bufs[buffer]; |
191 | if (!vb) | 191 | if (!vb) |
192 | continue; | 192 | continue; |
193 | 193 | ||
194 | for (plane = 0; plane < vb->num_planes; ++plane) { | 194 | for (plane = 0; plane < vb->num_planes; ++plane) { |
195 | vb->v4l2_planes[plane].m.mem_offset = off; | 195 | vb->v4l2_planes[plane].m.mem_offset = off; |
196 | 196 | ||
197 | dprintk(3, "Buffer %d, plane %d offset 0x%08lx\n", | 197 | dprintk(3, "Buffer %d, plane %d offset 0x%08lx\n", |
198 | buffer, plane, off); | 198 | buffer, plane, off); |
199 | 199 | ||
200 | off += vb->v4l2_planes[plane].length; | 200 | off += vb->v4l2_planes[plane].length; |
201 | off = PAGE_ALIGN(off); | 201 | off = PAGE_ALIGN(off); |
202 | } | 202 | } |
203 | } | 203 | } |
204 | } | 204 | } |
205 | 205 | ||
206 | /** | 206 | /** |
207 | * __vb2_queue_alloc() - allocate videobuf buffer structures and (for MMAP type) | 207 | * __vb2_queue_alloc() - allocate videobuf buffer structures and (for MMAP type) |
208 | * video buffer memory for all buffers/planes on the queue and initializes the | 208 | * video buffer memory for all buffers/planes on the queue and initializes the |
209 | * queue | 209 | * queue |
210 | * | 210 | * |
211 | * Returns the number of buffers successfully allocated. | 211 | * Returns the number of buffers successfully allocated. |
212 | */ | 212 | */ |
213 | static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory, | 213 | static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory, |
214 | unsigned int num_buffers, unsigned int num_planes) | 214 | unsigned int num_buffers, unsigned int num_planes) |
215 | { | 215 | { |
216 | unsigned int buffer; | 216 | unsigned int buffer; |
217 | struct vb2_buffer *vb; | 217 | struct vb2_buffer *vb; |
218 | int ret; | 218 | int ret; |
219 | 219 | ||
220 | for (buffer = 0; buffer < num_buffers; ++buffer) { | 220 | for (buffer = 0; buffer < num_buffers; ++buffer) { |
221 | /* Allocate videobuf buffer structures */ | 221 | /* Allocate videobuf buffer structures */ |
222 | vb = kzalloc(q->buf_struct_size, GFP_KERNEL); | 222 | vb = kzalloc(q->buf_struct_size, GFP_KERNEL); |
223 | if (!vb) { | 223 | if (!vb) { |
224 | dprintk(1, "Memory alloc for buffer struct failed\n"); | 224 | dprintk(1, "Memory alloc for buffer struct failed\n"); |
225 | break; | 225 | break; |
226 | } | 226 | } |
227 | 227 | ||
228 | /* Length stores number of planes for multiplanar buffers */ | 228 | /* Length stores number of planes for multiplanar buffers */ |
229 | if (V4L2_TYPE_IS_MULTIPLANAR(q->type)) | 229 | if (V4L2_TYPE_IS_MULTIPLANAR(q->type)) |
230 | vb->v4l2_buf.length = num_planes; | 230 | vb->v4l2_buf.length = num_planes; |
231 | 231 | ||
232 | vb->state = VB2_BUF_STATE_DEQUEUED; | 232 | vb->state = VB2_BUF_STATE_DEQUEUED; |
233 | vb->vb2_queue = q; | 233 | vb->vb2_queue = q; |
234 | vb->num_planes = num_planes; | 234 | vb->num_planes = num_planes; |
235 | vb->v4l2_buf.index = q->num_buffers + buffer; | 235 | vb->v4l2_buf.index = q->num_buffers + buffer; |
236 | vb->v4l2_buf.type = q->type; | 236 | vb->v4l2_buf.type = q->type; |
237 | vb->v4l2_buf.memory = memory; | 237 | vb->v4l2_buf.memory = memory; |
238 | 238 | ||
239 | /* Allocate video buffer memory for the MMAP type */ | 239 | /* Allocate video buffer memory for the MMAP type */ |
240 | if (memory == V4L2_MEMORY_MMAP) { | 240 | if (memory == V4L2_MEMORY_MMAP) { |
241 | ret = __vb2_buf_mem_alloc(vb); | 241 | ret = __vb2_buf_mem_alloc(vb); |
242 | if (ret) { | 242 | if (ret) { |
243 | dprintk(1, "Failed allocating memory for " | 243 | dprintk(1, "Failed allocating memory for " |
244 | "buffer %d\n", buffer); | 244 | "buffer %d\n", buffer); |
245 | kfree(vb); | 245 | kfree(vb); |
246 | break; | 246 | break; |
247 | } | 247 | } |
248 | /* | 248 | /* |
249 | * Call the driver-provided buffer initialization | 249 | * Call the driver-provided buffer initialization |
250 | * callback, if given. An error in initialization | 250 | * callback, if given. An error in initialization |
251 | * results in queue setup failure. | 251 | * results in queue setup failure. |
252 | */ | 252 | */ |
253 | ret = call_qop(q, buf_init, vb); | 253 | ret = call_qop(q, buf_init, vb); |
254 | if (ret) { | 254 | if (ret) { |
255 | dprintk(1, "Buffer %d %p initialization" | 255 | dprintk(1, "Buffer %d %p initialization" |
256 | " failed\n", buffer, vb); | 256 | " failed\n", buffer, vb); |
257 | __vb2_buf_mem_free(vb); | 257 | __vb2_buf_mem_free(vb); |
258 | kfree(vb); | 258 | kfree(vb); |
259 | break; | 259 | break; |
260 | } | 260 | } |
261 | } | 261 | } |
262 | 262 | ||
263 | q->bufs[q->num_buffers + buffer] = vb; | 263 | q->bufs[q->num_buffers + buffer] = vb; |
264 | } | 264 | } |
265 | 265 | ||
266 | __setup_lengths(q, buffer); | 266 | __setup_lengths(q, buffer); |
267 | if (memory == V4L2_MEMORY_MMAP) | 267 | if (memory == V4L2_MEMORY_MMAP) |
268 | __setup_offsets(q, buffer); | 268 | __setup_offsets(q, buffer); |
269 | 269 | ||
270 | dprintk(1, "Allocated %d buffers, %d plane(s) each\n", | 270 | dprintk(1, "Allocated %d buffers, %d plane(s) each\n", |
271 | buffer, num_planes); | 271 | buffer, num_planes); |
272 | 272 | ||
273 | return buffer; | 273 | return buffer; |
274 | } | 274 | } |
275 | 275 | ||
276 | /** | 276 | /** |
277 | * __vb2_free_mem() - release all video buffer memory for a given queue | 277 | * __vb2_free_mem() - release all video buffer memory for a given queue |
278 | */ | 278 | */ |
279 | static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers) | 279 | static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers) |
280 | { | 280 | { |
281 | unsigned int buffer; | 281 | unsigned int buffer; |
282 | struct vb2_buffer *vb; | 282 | struct vb2_buffer *vb; |
283 | 283 | ||
284 | for (buffer = q->num_buffers - buffers; buffer < q->num_buffers; | 284 | for (buffer = q->num_buffers - buffers; buffer < q->num_buffers; |
285 | ++buffer) { | 285 | ++buffer) { |
286 | vb = q->bufs[buffer]; | 286 | vb = q->bufs[buffer]; |
287 | if (!vb) | 287 | if (!vb) |
288 | continue; | 288 | continue; |
289 | 289 | ||
290 | /* Free MMAP buffers or release USERPTR buffers */ | 290 | /* Free MMAP buffers or release USERPTR buffers */ |
291 | if (q->memory == V4L2_MEMORY_MMAP) | 291 | if (q->memory == V4L2_MEMORY_MMAP) |
292 | __vb2_buf_mem_free(vb); | 292 | __vb2_buf_mem_free(vb); |
293 | else if (q->memory == V4L2_MEMORY_DMABUF) | 293 | else if (q->memory == V4L2_MEMORY_DMABUF) |
294 | __vb2_buf_dmabuf_put(vb); | 294 | __vb2_buf_dmabuf_put(vb); |
295 | else | 295 | else |
296 | __vb2_buf_userptr_put(vb); | 296 | __vb2_buf_userptr_put(vb); |
297 | } | 297 | } |
298 | } | 298 | } |
299 | 299 | ||
300 | /** | 300 | /** |
301 | * __vb2_queue_free() - free buffers at the end of the queue - video memory and | 301 | * __vb2_queue_free() - free buffers at the end of the queue - video memory and |
302 | * related information, if no buffers are left return the queue to an | 302 | * related information, if no buffers are left return the queue to an |
303 | * uninitialized state. Might be called even if the queue has already been freed. | 303 | * uninitialized state. Might be called even if the queue has already been freed. |
304 | */ | 304 | */ |
305 | static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers) | 305 | static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers) |
306 | { | 306 | { |
307 | unsigned int buffer; | 307 | unsigned int buffer; |
308 | 308 | ||
309 | /* | 309 | /* |
310 | * Sanity check: when preparing a buffer the queue lock is released for | 310 | * Sanity check: when preparing a buffer the queue lock is released for |
311 | * a short while (see __buf_prepare for the details), which would allow | 311 | * a short while (see __buf_prepare for the details), which would allow |
312 | * a race with a reqbufs which can call this function. Removing the | 312 | * a race with a reqbufs which can call this function. Removing the |
313 | * buffers from underneath __buf_prepare is obviously a bad idea, so we | 313 | * buffers from underneath __buf_prepare is obviously a bad idea, so we |
314 | * check if any of the buffers is in the state PREPARING, and if so we | 314 | * check if any of the buffers is in the state PREPARING, and if so we |
315 | * just return -EAGAIN. | 315 | * just return -EAGAIN. |
316 | */ | 316 | */ |
317 | for (buffer = q->num_buffers - buffers; buffer < q->num_buffers; | 317 | for (buffer = q->num_buffers - buffers; buffer < q->num_buffers; |
318 | ++buffer) { | 318 | ++buffer) { |
319 | if (q->bufs[buffer] == NULL) | 319 | if (q->bufs[buffer] == NULL) |
320 | continue; | 320 | continue; |
321 | if (q->bufs[buffer]->state == VB2_BUF_STATE_PREPARING) { | 321 | if (q->bufs[buffer]->state == VB2_BUF_STATE_PREPARING) { |
322 | dprintk(1, "reqbufs: preparing buffers, cannot free\n"); | 322 | dprintk(1, "reqbufs: preparing buffers, cannot free\n"); |
323 | return -EAGAIN; | 323 | return -EAGAIN; |
324 | } | 324 | } |
325 | } | 325 | } |
326 | 326 | ||
327 | /* Call driver-provided cleanup function for each buffer, if provided */ | 327 | /* Call driver-provided cleanup function for each buffer, if provided */ |
328 | if (q->ops->buf_cleanup) { | 328 | if (q->ops->buf_cleanup) { |
329 | for (buffer = q->num_buffers - buffers; buffer < q->num_buffers; | 329 | for (buffer = q->num_buffers - buffers; buffer < q->num_buffers; |
330 | ++buffer) { | 330 | ++buffer) { |
331 | if (NULL == q->bufs[buffer]) | 331 | if (NULL == q->bufs[buffer]) |
332 | continue; | 332 | continue; |
333 | q->ops->buf_cleanup(q->bufs[buffer]); | 333 | q->ops->buf_cleanup(q->bufs[buffer]); |
334 | } | 334 | } |
335 | } | 335 | } |
336 | 336 | ||
337 | /* Release video buffer memory */ | 337 | /* Release video buffer memory */ |
338 | __vb2_free_mem(q, buffers); | 338 | __vb2_free_mem(q, buffers); |
339 | 339 | ||
340 | /* Free videobuf buffers */ | 340 | /* Free videobuf buffers */ |
341 | for (buffer = q->num_buffers - buffers; buffer < q->num_buffers; | 341 | for (buffer = q->num_buffers - buffers; buffer < q->num_buffers; |
342 | ++buffer) { | 342 | ++buffer) { |
343 | kfree(q->bufs[buffer]); | 343 | kfree(q->bufs[buffer]); |
344 | q->bufs[buffer] = NULL; | 344 | q->bufs[buffer] = NULL; |
345 | } | 345 | } |
346 | 346 | ||
347 | q->num_buffers -= buffers; | 347 | q->num_buffers -= buffers; |
348 | if (!q->num_buffers) | 348 | if (!q->num_buffers) |
349 | q->memory = 0; | 349 | q->memory = 0; |
350 | INIT_LIST_HEAD(&q->queued_list); | 350 | INIT_LIST_HEAD(&q->queued_list); |
351 | return 0; | 351 | return 0; |
352 | } | 352 | } |
353 | 353 | ||
354 | /** | 354 | /** |
355 | * __verify_planes_array() - verify that the planes array passed in struct | 355 | * __verify_planes_array() - verify that the planes array passed in struct |
356 | * v4l2_buffer from userspace can be safely used | 356 | * v4l2_buffer from userspace can be safely used |
357 | */ | 357 | */ |
358 | static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer *b) | 358 | static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer *b) |
359 | { | 359 | { |
360 | if (!V4L2_TYPE_IS_MULTIPLANAR(b->type)) | 360 | if (!V4L2_TYPE_IS_MULTIPLANAR(b->type)) |
361 | return 0; | 361 | return 0; |
362 | 362 | ||
363 | /* Is memory for copying plane information present? */ | 363 | /* Is memory for copying plane information present? */ |
364 | if (NULL == b->m.planes) { | 364 | if (NULL == b->m.planes) { |
365 | dprintk(1, "Multi-planar buffer passed but " | 365 | dprintk(1, "Multi-planar buffer passed but " |
366 | "planes array not provided\n"); | 366 | "planes array not provided\n"); |
367 | return -EINVAL; | 367 | return -EINVAL; |
368 | } | 368 | } |
369 | 369 | ||
370 | if (b->length < vb->num_planes || b->length > VIDEO_MAX_PLANES) { | 370 | if (b->length < vb->num_planes || b->length > VIDEO_MAX_PLANES) { |
371 | dprintk(1, "Incorrect planes array length, " | 371 | dprintk(1, "Incorrect planes array length, " |
372 | "expected %d, got %d\n", vb->num_planes, b->length); | 372 | "expected %d, got %d\n", vb->num_planes, b->length); |
373 | return -EINVAL; | 373 | return -EINVAL; |
374 | } | 374 | } |
375 | 375 | ||
376 | return 0; | 376 | return 0; |
377 | } | 377 | } |
378 | 378 | ||
379 | /** | 379 | /** |
380 | * __verify_length() - Verify that the bytesused value for each plane fits in | 380 | * __verify_length() - Verify that the bytesused value for each plane fits in |
381 | * the plane length and that the data offset doesn't exceed the bytesused value. | 381 | * the plane length and that the data offset doesn't exceed the bytesused value. |
382 | */ | 382 | */ |
383 | static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b) | 383 | static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b) |
384 | { | 384 | { |
385 | unsigned int length; | 385 | unsigned int length; |
386 | unsigned int plane; | 386 | unsigned int plane; |
387 | 387 | ||
388 | if (!V4L2_TYPE_IS_OUTPUT(b->type)) | 388 | if (!V4L2_TYPE_IS_OUTPUT(b->type)) |
389 | return 0; | 389 | return 0; |
390 | 390 | ||
391 | if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) { | 391 | if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) { |
392 | for (plane = 0; plane < vb->num_planes; ++plane) { | 392 | for (plane = 0; plane < vb->num_planes; ++plane) { |
393 | length = (b->memory == V4L2_MEMORY_USERPTR) | 393 | length = (b->memory == V4L2_MEMORY_USERPTR) |
394 | ? b->m.planes[plane].length | 394 | ? b->m.planes[plane].length |
395 | : vb->v4l2_planes[plane].length; | 395 | : vb->v4l2_planes[plane].length; |
396 | 396 | ||
397 | if (b->m.planes[plane].bytesused > length) | 397 | if (b->m.planes[plane].bytesused > 0 && |
398 | return -EINVAL; | 398 | b->m.planes[plane].data_offset + |
399 | 399 | b->m.planes[plane].bytesused > length) | |
400 | if (b->m.planes[plane].data_offset > 0 && | ||
401 | b->m.planes[plane].data_offset >= | ||
402 | b->m.planes[plane].bytesused) | ||
403 | return -EINVAL; | 400 | return -EINVAL; |
404 | } | 401 | } |
405 | } else { | 402 | } else { |
406 | length = (b->memory == V4L2_MEMORY_USERPTR) | 403 | length = (b->memory == V4L2_MEMORY_USERPTR) |
407 | ? b->length : vb->v4l2_planes[0].length; | 404 | ? b->length : vb->v4l2_planes[0].length; |
408 | 405 | ||
409 | if (b->bytesused > length) | 406 | if (b->bytesused > length) |
410 | return -EINVAL; | 407 | return -EINVAL; |
411 | } | 408 | } |
412 | 409 | ||
413 | return 0; | 410 | return 0; |
414 | } | 411 | } |
415 | 412 | ||
416 | /** | 413 | /** |
417 | * __buffer_in_use() - return true if the buffer is in use and | 414 | * __buffer_in_use() - return true if the buffer is in use and |
418 | * the queue cannot be freed (by the means of REQBUFS(0)) call | 415 | * the queue cannot be freed (by the means of REQBUFS(0)) call |
419 | */ | 416 | */ |
420 | static bool __buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb) | 417 | static bool __buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb) |
421 | { | 418 | { |
422 | unsigned int plane; | 419 | unsigned int plane; |
423 | for (plane = 0; plane < vb->num_planes; ++plane) { | 420 | for (plane = 0; plane < vb->num_planes; ++plane) { |
424 | void *mem_priv = vb->planes[plane].mem_priv; | 421 | void *mem_priv = vb->planes[plane].mem_priv; |
425 | /* | 422 | /* |
426 | * If num_users() has not been provided, call_memop | 423 | * If num_users() has not been provided, call_memop |
427 | * will return 0, apparently nobody cares about this | 424 | * will return 0, apparently nobody cares about this |
428 | * case anyway. If num_users() returns more than 1, | 425 | * case anyway. If num_users() returns more than 1, |
429 | * we are not the only user of the plane's memory. | 426 | * we are not the only user of the plane's memory. |
430 | */ | 427 | */ |
431 | if (mem_priv && call_memop(q, num_users, mem_priv) > 1) | 428 | if (mem_priv && call_memop(q, num_users, mem_priv) > 1) |
432 | return true; | 429 | return true; |
433 | } | 430 | } |
434 | return false; | 431 | return false; |
435 | } | 432 | } |
436 | 433 | ||
437 | /** | 434 | /** |
438 | * __buffers_in_use() - return true if any buffers on the queue are in use and | 435 | * __buffers_in_use() - return true if any buffers on the queue are in use and |
439 | * the queue cannot be freed (by the means of REQBUFS(0)) call | 436 | * the queue cannot be freed (by the means of REQBUFS(0)) call |
440 | */ | 437 | */ |
441 | static bool __buffers_in_use(struct vb2_queue *q) | 438 | static bool __buffers_in_use(struct vb2_queue *q) |
442 | { | 439 | { |
443 | unsigned int buffer; | 440 | unsigned int buffer; |
444 | for (buffer = 0; buffer < q->num_buffers; ++buffer) { | 441 | for (buffer = 0; buffer < q->num_buffers; ++buffer) { |
445 | if (__buffer_in_use(q, q->bufs[buffer])) | 442 | if (__buffer_in_use(q, q->bufs[buffer])) |
446 | return true; | 443 | return true; |
447 | } | 444 | } |
448 | return false; | 445 | return false; |
449 | } | 446 | } |
450 | 447 | ||
451 | /** | 448 | /** |
452 | * __fill_v4l2_buffer() - fill in a struct v4l2_buffer with information to be | 449 | * __fill_v4l2_buffer() - fill in a struct v4l2_buffer with information to be |
453 | * returned to userspace | 450 | * returned to userspace |
454 | */ | 451 | */ |
455 | static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b) | 452 | static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b) |
456 | { | 453 | { |
457 | struct vb2_queue *q = vb->vb2_queue; | 454 | struct vb2_queue *q = vb->vb2_queue; |
458 | 455 | ||
459 | /* Copy back data such as timestamp, flags, etc. */ | 456 | /* Copy back data such as timestamp, flags, etc. */ |
460 | memcpy(b, &vb->v4l2_buf, offsetof(struct v4l2_buffer, m)); | 457 | memcpy(b, &vb->v4l2_buf, offsetof(struct v4l2_buffer, m)); |
461 | b->reserved2 = vb->v4l2_buf.reserved2; | 458 | b->reserved2 = vb->v4l2_buf.reserved2; |
462 | b->reserved = vb->v4l2_buf.reserved; | 459 | b->reserved = vb->v4l2_buf.reserved; |
463 | 460 | ||
464 | if (V4L2_TYPE_IS_MULTIPLANAR(q->type)) { | 461 | if (V4L2_TYPE_IS_MULTIPLANAR(q->type)) { |
465 | /* | 462 | /* |
466 | * Fill in plane-related data if userspace provided an array | 463 | * Fill in plane-related data if userspace provided an array |
467 | * for it. The caller has already verified memory and size. | 464 | * for it. The caller has already verified memory and size. |
468 | */ | 465 | */ |
469 | b->length = vb->num_planes; | 466 | b->length = vb->num_planes; |
470 | memcpy(b->m.planes, vb->v4l2_planes, | 467 | memcpy(b->m.planes, vb->v4l2_planes, |
471 | b->length * sizeof(struct v4l2_plane)); | 468 | b->length * sizeof(struct v4l2_plane)); |
472 | } else { | 469 | } else { |
473 | /* | 470 | /* |
474 | * We use length and offset in v4l2_planes array even for | 471 | * We use length and offset in v4l2_planes array even for |
475 | * single-planar buffers, but userspace does not. | 472 | * single-planar buffers, but userspace does not. |
476 | */ | 473 | */ |
477 | b->length = vb->v4l2_planes[0].length; | 474 | b->length = vb->v4l2_planes[0].length; |
478 | b->bytesused = vb->v4l2_planes[0].bytesused; | 475 | b->bytesused = vb->v4l2_planes[0].bytesused; |
479 | if (q->memory == V4L2_MEMORY_MMAP) | 476 | if (q->memory == V4L2_MEMORY_MMAP) |
480 | b->m.offset = vb->v4l2_planes[0].m.mem_offset; | 477 | b->m.offset = vb->v4l2_planes[0].m.mem_offset; |
481 | else if (q->memory == V4L2_MEMORY_USERPTR) | 478 | else if (q->memory == V4L2_MEMORY_USERPTR) |
482 | b->m.userptr = vb->v4l2_planes[0].m.userptr; | 479 | b->m.userptr = vb->v4l2_planes[0].m.userptr; |
483 | else if (q->memory == V4L2_MEMORY_DMABUF) | 480 | else if (q->memory == V4L2_MEMORY_DMABUF) |
484 | b->m.fd = vb->v4l2_planes[0].m.fd; | 481 | b->m.fd = vb->v4l2_planes[0].m.fd; |
485 | } | 482 | } |
486 | 483 | ||
487 | /* | 484 | /* |
488 | * Clear any buffer state related flags. | 485 | * Clear any buffer state related flags. |
489 | */ | 486 | */ |
490 | b->flags &= ~V4L2_BUFFER_MASK_FLAGS; | 487 | b->flags &= ~V4L2_BUFFER_MASK_FLAGS; |
491 | b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK; | 488 | b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK; |
492 | if ((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) != | 489 | if ((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) != |
493 | V4L2_BUF_FLAG_TIMESTAMP_COPY) { | 490 | V4L2_BUF_FLAG_TIMESTAMP_COPY) { |
494 | /* | 491 | /* |
495 | * For non-COPY timestamps, drop timestamp source bits | 492 | * For non-COPY timestamps, drop timestamp source bits |
496 | * and obtain the timestamp source from the queue. | 493 | * and obtain the timestamp source from the queue. |
497 | */ | 494 | */ |
498 | b->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK; | 495 | b->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK; |
499 | b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK; | 496 | b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK; |
500 | } | 497 | } |
501 | 498 | ||
502 | switch (vb->state) { | 499 | switch (vb->state) { |
503 | case VB2_BUF_STATE_QUEUED: | 500 | case VB2_BUF_STATE_QUEUED: |
504 | case VB2_BUF_STATE_ACTIVE: | 501 | case VB2_BUF_STATE_ACTIVE: |
505 | b->flags |= V4L2_BUF_FLAG_QUEUED; | 502 | b->flags |= V4L2_BUF_FLAG_QUEUED; |
506 | break; | 503 | break; |
507 | case VB2_BUF_STATE_ERROR: | 504 | case VB2_BUF_STATE_ERROR: |
508 | b->flags |= V4L2_BUF_FLAG_ERROR; | 505 | b->flags |= V4L2_BUF_FLAG_ERROR; |
509 | /* fall through */ | 506 | /* fall through */ |
510 | case VB2_BUF_STATE_DONE: | 507 | case VB2_BUF_STATE_DONE: |
511 | b->flags |= V4L2_BUF_FLAG_DONE; | 508 | b->flags |= V4L2_BUF_FLAG_DONE; |
512 | break; | 509 | break; |
513 | case VB2_BUF_STATE_PREPARED: | 510 | case VB2_BUF_STATE_PREPARED: |
514 | b->flags |= V4L2_BUF_FLAG_PREPARED; | 511 | b->flags |= V4L2_BUF_FLAG_PREPARED; |
515 | break; | 512 | break; |
516 | case VB2_BUF_STATE_PREPARING: | 513 | case VB2_BUF_STATE_PREPARING: |
517 | case VB2_BUF_STATE_DEQUEUED: | 514 | case VB2_BUF_STATE_DEQUEUED: |
518 | /* nothing */ | 515 | /* nothing */ |
519 | break; | 516 | break; |
520 | } | 517 | } |
521 | 518 | ||
522 | if (__buffer_in_use(q, vb)) | 519 | if (__buffer_in_use(q, vb)) |
523 | b->flags |= V4L2_BUF_FLAG_MAPPED; | 520 | b->flags |= V4L2_BUF_FLAG_MAPPED; |
524 | } | 521 | } |
525 | 522 | ||
526 | /** | 523 | /** |
527 | * vb2_querybuf() - query video buffer information | 524 | * vb2_querybuf() - query video buffer information |
528 | * @q: videobuf queue | 525 | * @q: videobuf queue |
529 | * @b: buffer struct passed from userspace to vidioc_querybuf handler | 526 | * @b: buffer struct passed from userspace to vidioc_querybuf handler |
530 | * in driver | 527 | * in driver |
531 | * | 528 | * |
532 | * Should be called from vidioc_querybuf ioctl handler in driver. | 529 | * Should be called from vidioc_querybuf ioctl handler in driver. |
533 | * This function will verify the passed v4l2_buffer structure and fill the | 530 | * This function will verify the passed v4l2_buffer structure and fill the |
534 | * relevant information for the userspace. | 531 | * relevant information for the userspace. |
535 | * | 532 | * |
536 | * The return values from this function are intended to be directly returned | 533 | * The return values from this function are intended to be directly returned |
537 | * from vidioc_querybuf handler in driver. | 534 | * from vidioc_querybuf handler in driver. |
538 | */ | 535 | */ |
539 | int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b) | 536 | int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b) |
540 | { | 537 | { |
541 | struct vb2_buffer *vb; | 538 | struct vb2_buffer *vb; |
542 | int ret; | 539 | int ret; |
543 | 540 | ||
544 | if (b->type != q->type) { | 541 | if (b->type != q->type) { |
545 | dprintk(1, "querybuf: wrong buffer type\n"); | 542 | dprintk(1, "querybuf: wrong buffer type\n"); |
546 | return -EINVAL; | 543 | return -EINVAL; |
547 | } | 544 | } |
548 | 545 | ||
549 | if (b->index >= q->num_buffers) { | 546 | if (b->index >= q->num_buffers) { |
550 | dprintk(1, "querybuf: buffer index out of range\n"); | 547 | dprintk(1, "querybuf: buffer index out of range\n"); |
551 | return -EINVAL; | 548 | return -EINVAL; |
552 | } | 549 | } |
553 | vb = q->bufs[b->index]; | 550 | vb = q->bufs[b->index]; |
554 | ret = __verify_planes_array(vb, b); | 551 | ret = __verify_planes_array(vb, b); |
555 | if (!ret) | 552 | if (!ret) |
556 | __fill_v4l2_buffer(vb, b); | 553 | __fill_v4l2_buffer(vb, b); |
557 | return ret; | 554 | return ret; |
558 | } | 555 | } |
559 | EXPORT_SYMBOL(vb2_querybuf); | 556 | EXPORT_SYMBOL(vb2_querybuf); |
560 | 557 | ||
561 | /** | 558 | /** |
562 | * __verify_userptr_ops() - verify that all memory operations required for | 559 | * __verify_userptr_ops() - verify that all memory operations required for |
563 | * USERPTR queue type have been provided | 560 | * USERPTR queue type have been provided |
564 | */ | 561 | */ |
565 | static int __verify_userptr_ops(struct vb2_queue *q) | 562 | static int __verify_userptr_ops(struct vb2_queue *q) |
566 | { | 563 | { |
567 | if (!(q->io_modes & VB2_USERPTR) || !q->mem_ops->get_userptr || | 564 | if (!(q->io_modes & VB2_USERPTR) || !q->mem_ops->get_userptr || |
568 | !q->mem_ops->put_userptr) | 565 | !q->mem_ops->put_userptr) |
569 | return -EINVAL; | 566 | return -EINVAL; |
570 | 567 | ||
571 | return 0; | 568 | return 0; |
572 | } | 569 | } |
573 | 570 | ||
574 | /** | 571 | /** |
575 | * __verify_mmap_ops() - verify that all memory operations required for | 572 | * __verify_mmap_ops() - verify that all memory operations required for |
576 | * MMAP queue type have been provided | 573 | * MMAP queue type have been provided |
577 | */ | 574 | */ |
578 | static int __verify_mmap_ops(struct vb2_queue *q) | 575 | static int __verify_mmap_ops(struct vb2_queue *q) |
579 | { | 576 | { |
580 | if (!(q->io_modes & VB2_MMAP) || !q->mem_ops->alloc || | 577 | if (!(q->io_modes & VB2_MMAP) || !q->mem_ops->alloc || |
581 | !q->mem_ops->put || !q->mem_ops->mmap) | 578 | !q->mem_ops->put || !q->mem_ops->mmap) |
582 | return -EINVAL; | 579 | return -EINVAL; |
583 | 580 | ||
584 | return 0; | 581 | return 0; |
585 | } | 582 | } |
586 | 583 | ||
587 | /** | 584 | /** |
588 | * __verify_dmabuf_ops() - verify that all memory operations required for | 585 | * __verify_dmabuf_ops() - verify that all memory operations required for |
589 | * DMABUF queue type have been provided | 586 | * DMABUF queue type have been provided |
590 | */ | 587 | */ |
591 | static int __verify_dmabuf_ops(struct vb2_queue *q) | 588 | static int __verify_dmabuf_ops(struct vb2_queue *q) |
592 | { | 589 | { |
593 | if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf || | 590 | if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf || |
594 | !q->mem_ops->detach_dmabuf || !q->mem_ops->map_dmabuf || | 591 | !q->mem_ops->detach_dmabuf || !q->mem_ops->map_dmabuf || |
595 | !q->mem_ops->unmap_dmabuf) | 592 | !q->mem_ops->unmap_dmabuf) |
596 | return -EINVAL; | 593 | return -EINVAL; |
597 | 594 | ||
598 | return 0; | 595 | return 0; |
599 | } | 596 | } |
600 | 597 | ||
601 | /** | 598 | /** |
602 | * __verify_memory_type() - Check whether the memory type and buffer type | 599 | * __verify_memory_type() - Check whether the memory type and buffer type |
603 | * passed to a buffer operation are compatible with the queue. | 600 | * passed to a buffer operation are compatible with the queue. |
604 | */ | 601 | */ |
605 | static int __verify_memory_type(struct vb2_queue *q, | 602 | static int __verify_memory_type(struct vb2_queue *q, |
606 | enum v4l2_memory memory, enum v4l2_buf_type type) | 603 | enum v4l2_memory memory, enum v4l2_buf_type type) |
607 | { | 604 | { |
608 | if (memory != V4L2_MEMORY_MMAP && memory != V4L2_MEMORY_USERPTR && | 605 | if (memory != V4L2_MEMORY_MMAP && memory != V4L2_MEMORY_USERPTR && |
609 | memory != V4L2_MEMORY_DMABUF) { | 606 | memory != V4L2_MEMORY_DMABUF) { |
610 | dprintk(1, "reqbufs: unsupported memory type\n"); | 607 | dprintk(1, "reqbufs: unsupported memory type\n"); |
611 | return -EINVAL; | 608 | return -EINVAL; |
612 | } | 609 | } |
613 | 610 | ||
614 | if (type != q->type) { | 611 | if (type != q->type) { |
615 | dprintk(1, "reqbufs: requested type is incorrect\n"); | 612 | dprintk(1, "reqbufs: requested type is incorrect\n"); |
616 | return -EINVAL; | 613 | return -EINVAL; |
617 | } | 614 | } |
618 | 615 | ||
619 | /* | 616 | /* |
620 | * Make sure all the required memory ops for given memory type | 617 | * Make sure all the required memory ops for given memory type |
621 | * are available. | 618 | * are available. |
622 | */ | 619 | */ |
623 | if (memory == V4L2_MEMORY_MMAP && __verify_mmap_ops(q)) { | 620 | if (memory == V4L2_MEMORY_MMAP && __verify_mmap_ops(q)) { |
624 | dprintk(1, "reqbufs: MMAP for current setup unsupported\n"); | 621 | dprintk(1, "reqbufs: MMAP for current setup unsupported\n"); |
625 | return -EINVAL; | 622 | return -EINVAL; |
626 | } | 623 | } |
627 | 624 | ||
628 | if (memory == V4L2_MEMORY_USERPTR && __verify_userptr_ops(q)) { | 625 | if (memory == V4L2_MEMORY_USERPTR && __verify_userptr_ops(q)) { |
629 | dprintk(1, "reqbufs: USERPTR for current setup unsupported\n"); | 626 | dprintk(1, "reqbufs: USERPTR for current setup unsupported\n"); |
630 | return -EINVAL; | 627 | return -EINVAL; |
631 | } | 628 | } |
632 | 629 | ||
633 | if (memory == V4L2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) { | 630 | if (memory == V4L2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) { |
634 | dprintk(1, "reqbufs: DMABUF for current setup unsupported\n"); | 631 | dprintk(1, "reqbufs: DMABUF for current setup unsupported\n"); |
635 | return -EINVAL; | 632 | return -EINVAL; |
636 | } | 633 | } |
637 | 634 | ||
638 | /* | 635 | /* |
639 | * Place the busy tests at the end: -EBUSY can be ignored when | 636 | * Place the busy tests at the end: -EBUSY can be ignored when |
640 | * create_bufs is called with count == 0, but count == 0 should still | 637 | * create_bufs is called with count == 0, but count == 0 should still |
641 | * do the memory and type validation. | 638 | * do the memory and type validation. |
642 | */ | 639 | */ |
643 | if (q->fileio) { | 640 | if (q->fileio) { |
644 | dprintk(1, "reqbufs: file io in progress\n"); | 641 | dprintk(1, "reqbufs: file io in progress\n"); |
645 | return -EBUSY; | 642 | return -EBUSY; |
646 | } | 643 | } |
647 | return 0; | 644 | return 0; |
648 | } | 645 | } |
649 | 646 | ||
650 | /** | 647 | /** |
651 | * __reqbufs() - Initiate streaming | 648 | * __reqbufs() - Initiate streaming |
652 | * @q: videobuf2 queue | 649 | * @q: videobuf2 queue |
653 | * @req: struct passed from userspace to vidioc_reqbufs handler in driver | 650 | * @req: struct passed from userspace to vidioc_reqbufs handler in driver |
654 | * | 651 | * |
655 | * Should be called from vidioc_reqbufs ioctl handler of a driver. | 652 | * Should be called from vidioc_reqbufs ioctl handler of a driver. |
656 | * This function: | 653 | * This function: |
657 | * 1) verifies streaming parameters passed from the userspace, | 654 | * 1) verifies streaming parameters passed from the userspace, |
658 | * 2) sets up the queue, | 655 | * 2) sets up the queue, |
659 | * 3) negotiates number of buffers and planes per buffer with the driver | 656 | * 3) negotiates number of buffers and planes per buffer with the driver |
660 | * to be used during streaming, | 657 | * to be used during streaming, |
661 | * 4) allocates internal buffer structures (struct vb2_buffer), according to | 658 | * 4) allocates internal buffer structures (struct vb2_buffer), according to |
662 | * the agreed parameters, | 659 | * the agreed parameters, |
663 | * 5) for MMAP memory type, allocates actual video memory, using the | 660 | * 5) for MMAP memory type, allocates actual video memory, using the |
664 | * memory handling/allocation routines provided during queue initialization | 661 | * memory handling/allocation routines provided during queue initialization |
665 | * | 662 | * |
666 | * If req->count is 0, all the memory will be freed instead. | 663 | * If req->count is 0, all the memory will be freed instead. |
667 | * If the queue has been allocated previously (by a previous vb2_reqbufs) call | 664 | * If the queue has been allocated previously (by a previous vb2_reqbufs) call |
668 | * and the queue is not busy, memory will be reallocated. | 665 | * and the queue is not busy, memory will be reallocated. |
669 | * | 666 | * |
670 | * The return values from this function are intended to be directly returned | 667 | * The return values from this function are intended to be directly returned |
671 | * from vidioc_reqbufs handler in driver. | 668 | * from vidioc_reqbufs handler in driver. |
672 | */ | 669 | */ |
673 | static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req) | 670 | static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req) |
674 | { | 671 | { |
675 | unsigned int num_buffers, allocated_buffers, num_planes = 0; | 672 | unsigned int num_buffers, allocated_buffers, num_planes = 0; |
676 | int ret; | 673 | int ret; |
677 | 674 | ||
678 | if (q->streaming) { | 675 | if (q->streaming) { |
679 | dprintk(1, "reqbufs: streaming active\n"); | 676 | dprintk(1, "reqbufs: streaming active\n"); |
680 | return -EBUSY; | 677 | return -EBUSY; |
681 | } | 678 | } |
682 | 679 | ||
683 | if (req->count == 0 || q->num_buffers != 0 || q->memory != req->memory) { | 680 | if (req->count == 0 || q->num_buffers != 0 || q->memory != req->memory) { |
684 | /* | 681 | /* |
685 | * We already have buffers allocated, so first check if they | 682 | * We already have buffers allocated, so first check if they |
686 | * are not in use and can be freed. | 683 | * are not in use and can be freed. |
687 | */ | 684 | */ |
688 | if (q->memory == V4L2_MEMORY_MMAP && __buffers_in_use(q)) { | 685 | if (q->memory == V4L2_MEMORY_MMAP && __buffers_in_use(q)) { |
689 | dprintk(1, "reqbufs: memory in use, cannot free\n"); | 686 | dprintk(1, "reqbufs: memory in use, cannot free\n"); |
690 | return -EBUSY; | 687 | return -EBUSY; |
691 | } | 688 | } |
692 | 689 | ||
693 | ret = __vb2_queue_free(q, q->num_buffers); | 690 | ret = __vb2_queue_free(q, q->num_buffers); |
694 | if (ret) | 691 | if (ret) |
695 | return ret; | 692 | return ret; |
696 | 693 | ||
697 | /* | 694 | /* |
698 | * In case of REQBUFS(0) return immediately without calling | 695 | * In case of REQBUFS(0) return immediately without calling |
699 | * driver's queue_setup() callback and allocating resources. | 696 | * driver's queue_setup() callback and allocating resources. |
700 | */ | 697 | */ |
701 | if (req->count == 0) | 698 | if (req->count == 0) |
702 | return 0; | 699 | return 0; |
703 | } | 700 | } |
704 | 701 | ||
705 | /* | 702 | /* |
706 | * Make sure the requested values and current defaults are sane. | 703 | * Make sure the requested values and current defaults are sane. |
707 | */ | 704 | */ |
708 | num_buffers = min_t(unsigned int, req->count, VIDEO_MAX_FRAME); | 705 | num_buffers = min_t(unsigned int, req->count, VIDEO_MAX_FRAME); |
709 | memset(q->plane_sizes, 0, sizeof(q->plane_sizes)); | 706 | memset(q->plane_sizes, 0, sizeof(q->plane_sizes)); |
710 | memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx)); | 707 | memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx)); |
711 | q->memory = req->memory; | 708 | q->memory = req->memory; |
712 | 709 | ||
713 | /* | 710 | /* |
714 | * Ask the driver how many buffers and planes per buffer it requires. | 711 | * Ask the driver how many buffers and planes per buffer it requires. |
715 | * Driver also sets the size and allocator context for each plane. | 712 | * Driver also sets the size and allocator context for each plane. |
716 | */ | 713 | */ |
717 | ret = call_qop(q, queue_setup, q, NULL, &num_buffers, &num_planes, | 714 | ret = call_qop(q, queue_setup, q, NULL, &num_buffers, &num_planes, |
718 | q->plane_sizes, q->alloc_ctx); | 715 | q->plane_sizes, q->alloc_ctx); |
719 | if (ret) | 716 | if (ret) |
720 | return ret; | 717 | return ret; |
721 | 718 | ||
722 | /* Finally, allocate buffers and video memory */ | 719 | /* Finally, allocate buffers and video memory */ |
723 | ret = __vb2_queue_alloc(q, req->memory, num_buffers, num_planes); | 720 | ret = __vb2_queue_alloc(q, req->memory, num_buffers, num_planes); |
724 | if (ret == 0) { | 721 | if (ret == 0) { |
725 | dprintk(1, "Memory allocation failed\n"); | 722 | dprintk(1, "Memory allocation failed\n"); |
726 | return -ENOMEM; | 723 | return -ENOMEM; |
727 | } | 724 | } |
728 | 725 | ||
729 | allocated_buffers = ret; | 726 | allocated_buffers = ret; |
730 | 727 | ||
731 | /* | 728 | /* |
732 | * Check if driver can handle the allocated number of buffers. | 729 | * Check if driver can handle the allocated number of buffers. |
733 | */ | 730 | */ |
734 | if (allocated_buffers < num_buffers) { | 731 | if (allocated_buffers < num_buffers) { |
735 | num_buffers = allocated_buffers; | 732 | num_buffers = allocated_buffers; |
736 | 733 | ||
737 | ret = call_qop(q, queue_setup, q, NULL, &num_buffers, | 734 | ret = call_qop(q, queue_setup, q, NULL, &num_buffers, |
738 | &num_planes, q->plane_sizes, q->alloc_ctx); | 735 | &num_planes, q->plane_sizes, q->alloc_ctx); |
739 | 736 | ||
740 | if (!ret && allocated_buffers < num_buffers) | 737 | if (!ret && allocated_buffers < num_buffers) |
741 | ret = -ENOMEM; | 738 | ret = -ENOMEM; |
742 | 739 | ||
743 | /* | 740 | /* |
744 | * Either the driver has accepted a smaller number of buffers, | 741 | * Either the driver has accepted a smaller number of buffers, |
745 | * or .queue_setup() returned an error | 742 | * or .queue_setup() returned an error |
746 | */ | 743 | */ |
747 | } | 744 | } |
748 | 745 | ||
749 | q->num_buffers = allocated_buffers; | 746 | q->num_buffers = allocated_buffers; |
750 | 747 | ||
751 | if (ret < 0) { | 748 | if (ret < 0) { |
752 | __vb2_queue_free(q, allocated_buffers); | 749 | __vb2_queue_free(q, allocated_buffers); |
753 | return ret; | 750 | return ret; |
754 | } | 751 | } |
755 | 752 | ||
756 | /* | 753 | /* |
757 | * Return the number of successfully allocated buffers | 754 | * Return the number of successfully allocated buffers |
758 | * to the userspace. | 755 | * to the userspace. |
759 | */ | 756 | */ |
760 | req->count = allocated_buffers; | 757 | req->count = allocated_buffers; |
761 | q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type); | 758 | q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type); |
762 | 759 | ||
763 | return 0; | 760 | return 0; |
764 | } | 761 | } |
765 | 762 | ||
766 | /** | 763 | /** |
767 | * vb2_reqbufs() - Wrapper for __reqbufs() that also verifies the memory and | 764 | * vb2_reqbufs() - Wrapper for __reqbufs() that also verifies the memory and |
768 | * type values. | 765 | * type values. |
769 | * @q: videobuf2 queue | 766 | * @q: videobuf2 queue |
770 | * @req: struct passed from userspace to vidioc_reqbufs handler in driver | 767 | * @req: struct passed from userspace to vidioc_reqbufs handler in driver |
771 | */ | 768 | */ |
772 | int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req) | 769 | int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req) |
773 | { | 770 | { |
774 | int ret = __verify_memory_type(q, req->memory, req->type); | 771 | int ret = __verify_memory_type(q, req->memory, req->type); |
775 | 772 | ||
776 | return ret ? ret : __reqbufs(q, req); | 773 | return ret ? ret : __reqbufs(q, req); |
777 | } | 774 | } |
778 | EXPORT_SYMBOL_GPL(vb2_reqbufs); | 775 | EXPORT_SYMBOL_GPL(vb2_reqbufs); |
779 | 776 | ||
780 | /** | 777 | /** |
781 | * __create_bufs() - Allocate buffers and any required auxiliary structs | 778 | * __create_bufs() - Allocate buffers and any required auxiliary structs |
782 | * @q: videobuf2 queue | 779 | * @q: videobuf2 queue |
783 | * @create: creation parameters, passed from userspace to vidioc_create_bufs | 780 | * @create: creation parameters, passed from userspace to vidioc_create_bufs |
784 | * handler in driver | 781 | * handler in driver |
785 | * | 782 | * |
786 | * Should be called from vidioc_create_bufs ioctl handler of a driver. | 783 | * Should be called from vidioc_create_bufs ioctl handler of a driver. |
787 | * This function: | 784 | * This function: |
788 | * 1) verifies parameter sanity | 785 | * 1) verifies parameter sanity |
789 | * 2) calls the .queue_setup() queue operation | 786 | * 2) calls the .queue_setup() queue operation |
790 | * 3) performs any necessary memory allocations | 787 | * 3) performs any necessary memory allocations |
791 | * | 788 | * |
792 | * The return values from this function are intended to be directly returned | 789 | * The return values from this function are intended to be directly returned |
793 | * from vidioc_create_bufs handler in driver. | 790 | * from vidioc_create_bufs handler in driver. |
794 | */ | 791 | */ |
795 | static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create) | 792 | static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create) |
796 | { | 793 | { |
797 | unsigned int num_planes = 0, num_buffers, allocated_buffers; | 794 | unsigned int num_planes = 0, num_buffers, allocated_buffers; |
798 | int ret; | 795 | int ret; |
799 | 796 | ||
800 | if (q->num_buffers == VIDEO_MAX_FRAME) { | 797 | if (q->num_buffers == VIDEO_MAX_FRAME) { |
801 | dprintk(1, "%s(): maximum number of buffers already allocated\n", | 798 | dprintk(1, "%s(): maximum number of buffers already allocated\n", |
802 | __func__); | 799 | __func__); |
803 | return -ENOBUFS; | 800 | return -ENOBUFS; |
804 | } | 801 | } |
805 | 802 | ||
806 | if (!q->num_buffers) { | 803 | if (!q->num_buffers) { |
807 | memset(q->plane_sizes, 0, sizeof(q->plane_sizes)); | 804 | memset(q->plane_sizes, 0, sizeof(q->plane_sizes)); |
808 | memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx)); | 805 | memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx)); |
809 | q->memory = create->memory; | 806 | q->memory = create->memory; |
810 | q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type); | 807 | q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type); |
811 | } | 808 | } |
812 | 809 | ||
813 | num_buffers = min(create->count, VIDEO_MAX_FRAME - q->num_buffers); | 810 | num_buffers = min(create->count, VIDEO_MAX_FRAME - q->num_buffers); |
814 | 811 | ||
815 | /* | 812 | /* |
816 | * Ask the driver, whether the requested number of buffers, planes per | 813 | * Ask the driver, whether the requested number of buffers, planes per |
817 | * buffer and their sizes are acceptable | 814 | * buffer and their sizes are acceptable |
818 | */ | 815 | */ |
819 | ret = call_qop(q, queue_setup, q, &create->format, &num_buffers, | 816 | ret = call_qop(q, queue_setup, q, &create->format, &num_buffers, |
820 | &num_planes, q->plane_sizes, q->alloc_ctx); | 817 | &num_planes, q->plane_sizes, q->alloc_ctx); |
821 | if (ret) | 818 | if (ret) |
822 | return ret; | 819 | return ret; |
823 | 820 | ||
824 | /* Finally, allocate buffers and video memory */ | 821 | /* Finally, allocate buffers and video memory */ |
825 | ret = __vb2_queue_alloc(q, create->memory, num_buffers, | 822 | ret = __vb2_queue_alloc(q, create->memory, num_buffers, |
826 | num_planes); | 823 | num_planes); |
827 | if (ret == 0) { | 824 | if (ret == 0) { |
828 | dprintk(1, "Memory allocation failed\n"); | 825 | dprintk(1, "Memory allocation failed\n"); |
829 | return -ENOMEM; | 826 | return -ENOMEM; |
830 | } | 827 | } |
831 | 828 | ||
832 | allocated_buffers = ret; | 829 | allocated_buffers = ret; |
833 | 830 | ||
834 | /* | 831 | /* |
835 | * Check if driver can handle the so far allocated number of buffers. | 832 | * Check if driver can handle the so far allocated number of buffers. |
836 | */ | 833 | */ |
837 | if (ret < num_buffers) { | 834 | if (ret < num_buffers) { |
838 | num_buffers = ret; | 835 | num_buffers = ret; |
839 | 836 | ||
840 | /* | 837 | /* |
841 | * q->num_buffers contains the total number of buffers, that the | 838 | * q->num_buffers contains the total number of buffers, that the |
842 | * queue driver has set up | 839 | * queue driver has set up |
843 | */ | 840 | */ |
844 | ret = call_qop(q, queue_setup, q, &create->format, &num_buffers, | 841 | ret = call_qop(q, queue_setup, q, &create->format, &num_buffers, |
845 | &num_planes, q->plane_sizes, q->alloc_ctx); | 842 | &num_planes, q->plane_sizes, q->alloc_ctx); |
846 | 843 | ||
847 | if (!ret && allocated_buffers < num_buffers) | 844 | if (!ret && allocated_buffers < num_buffers) |
848 | ret = -ENOMEM; | 845 | ret = -ENOMEM; |
849 | 846 | ||
850 | /* | 847 | /* |
851 | * Either the driver has accepted a smaller number of buffers, | 848 | * Either the driver has accepted a smaller number of buffers, |
852 | * or .queue_setup() returned an error | 849 | * or .queue_setup() returned an error |
853 | */ | 850 | */ |
854 | } | 851 | } |
855 | 852 | ||
856 | q->num_buffers += allocated_buffers; | 853 | q->num_buffers += allocated_buffers; |
857 | 854 | ||
858 | if (ret < 0) { | 855 | if (ret < 0) { |
859 | __vb2_queue_free(q, allocated_buffers); | 856 | __vb2_queue_free(q, allocated_buffers); |
860 | return -ENOMEM; | 857 | return -ENOMEM; |
861 | } | 858 | } |
862 | 859 | ||
863 | /* | 860 | /* |
864 | * Return the number of successfully allocated buffers | 861 | * Return the number of successfully allocated buffers |
865 | * to the userspace. | 862 | * to the userspace. |
866 | */ | 863 | */ |
867 | create->count = allocated_buffers; | 864 | create->count = allocated_buffers; |
868 | 865 | ||
869 | return 0; | 866 | return 0; |
870 | } | 867 | } |
871 | 868 | ||
872 | /** | 869 | /** |
873 | * vb2_create_bufs() - Wrapper for __create_bufs() that also verifies the | 870 | * vb2_create_bufs() - Wrapper for __create_bufs() that also verifies the |
874 | * memory and type values. | 871 | * memory and type values. |
875 | * @q: videobuf2 queue | 872 | * @q: videobuf2 queue |
876 | * @create: creation parameters, passed from userspace to vidioc_create_bufs | 873 | * @create: creation parameters, passed from userspace to vidioc_create_bufs |
877 | * handler in driver | 874 | * handler in driver |
878 | */ | 875 | */ |
879 | int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create) | 876 | int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create) |
880 | { | 877 | { |
881 | int ret = __verify_memory_type(q, create->memory, create->format.type); | 878 | int ret = __verify_memory_type(q, create->memory, create->format.type); |
882 | 879 | ||
883 | create->index = q->num_buffers; | 880 | create->index = q->num_buffers; |
884 | if (create->count == 0) | 881 | if (create->count == 0) |
885 | return ret != -EBUSY ? ret : 0; | 882 | return ret != -EBUSY ? ret : 0; |
886 | return ret ? ret : __create_bufs(q, create); | 883 | return ret ? ret : __create_bufs(q, create); |
887 | } | 884 | } |
888 | EXPORT_SYMBOL_GPL(vb2_create_bufs); | 885 | EXPORT_SYMBOL_GPL(vb2_create_bufs); |
889 | 886 | ||
890 | /** | 887 | /** |
891 | * vb2_plane_vaddr() - Return a kernel virtual address of a given plane | 888 | * vb2_plane_vaddr() - Return a kernel virtual address of a given plane |
892 | * @vb: vb2_buffer to which the plane in question belongs to | 889 | * @vb: vb2_buffer to which the plane in question belongs to |
893 | * @plane_no: plane number for which the address is to be returned | 890 | * @plane_no: plane number for which the address is to be returned |
894 | * | 891 | * |
895 | * This function returns a kernel virtual address of a given plane if | 892 | * This function returns a kernel virtual address of a given plane if |
896 | * such a mapping exist, NULL otherwise. | 893 | * such a mapping exist, NULL otherwise. |
897 | */ | 894 | */ |
898 | void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no) | 895 | void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no) |
899 | { | 896 | { |
900 | struct vb2_queue *q = vb->vb2_queue; | 897 | struct vb2_queue *q = vb->vb2_queue; |
901 | 898 | ||
902 | if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv) | 899 | if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv) |
903 | return NULL; | 900 | return NULL; |
904 | 901 | ||
905 | return call_memop(q, vaddr, vb->planes[plane_no].mem_priv); | 902 | return call_memop(q, vaddr, vb->planes[plane_no].mem_priv); |
906 | 903 | ||
907 | } | 904 | } |
908 | EXPORT_SYMBOL_GPL(vb2_plane_vaddr); | 905 | EXPORT_SYMBOL_GPL(vb2_plane_vaddr); |
909 | 906 | ||
910 | /** | 907 | /** |
911 | * vb2_plane_cookie() - Return allocator specific cookie for the given plane | 908 | * vb2_plane_cookie() - Return allocator specific cookie for the given plane |
912 | * @vb: vb2_buffer to which the plane in question belongs to | 909 | * @vb: vb2_buffer to which the plane in question belongs to |
913 | * @plane_no: plane number for which the cookie is to be returned | 910 | * @plane_no: plane number for which the cookie is to be returned |
914 | * | 911 | * |
915 | * This function returns an allocator specific cookie for a given plane if | 912 | * This function returns an allocator specific cookie for a given plane if |
916 | * available, NULL otherwise. The allocator should provide some simple static | 913 | * available, NULL otherwise. The allocator should provide some simple static |
917 | * inline function, which would convert this cookie to the allocator specific | 914 | * inline function, which would convert this cookie to the allocator specific |
918 | * type that can be used directly by the driver to access the buffer. This can | 915 | * type that can be used directly by the driver to access the buffer. This can |
919 | * be for example physical address, pointer to scatter list or IOMMU mapping. | 916 | * be for example physical address, pointer to scatter list or IOMMU mapping. |
920 | */ | 917 | */ |
921 | void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no) | 918 | void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no) |
922 | { | 919 | { |
923 | struct vb2_queue *q = vb->vb2_queue; | 920 | struct vb2_queue *q = vb->vb2_queue; |
924 | 921 | ||
925 | if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv) | 922 | if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv) |
926 | return NULL; | 923 | return NULL; |
927 | 924 | ||
928 | return call_memop(q, cookie, vb->planes[plane_no].mem_priv); | 925 | return call_memop(q, cookie, vb->planes[plane_no].mem_priv); |
929 | } | 926 | } |
930 | EXPORT_SYMBOL_GPL(vb2_plane_cookie); | 927 | EXPORT_SYMBOL_GPL(vb2_plane_cookie); |
931 | 928 | ||
932 | /** | 929 | /** |
933 | * vb2_buffer_done() - inform videobuf that an operation on a buffer is finished | 930 | * vb2_buffer_done() - inform videobuf that an operation on a buffer is finished |
934 | * @vb: vb2_buffer returned from the driver | 931 | * @vb: vb2_buffer returned from the driver |
935 | * @state: either VB2_BUF_STATE_DONE if the operation finished successfully | 932 | * @state: either VB2_BUF_STATE_DONE if the operation finished successfully |
936 | * or VB2_BUF_STATE_ERROR if the operation finished with an error | 933 | * or VB2_BUF_STATE_ERROR if the operation finished with an error |
937 | * | 934 | * |
938 | * This function should be called by the driver after a hardware operation on | 935 | * This function should be called by the driver after a hardware operation on |
939 | * a buffer is finished and the buffer may be returned to userspace. The driver | 936 | * a buffer is finished and the buffer may be returned to userspace. The driver |
940 | * cannot use this buffer anymore until it is queued back to it by videobuf | 937 | * cannot use this buffer anymore until it is queued back to it by videobuf |
941 | * by the means of buf_queue callback. Only buffers previously queued to the | 938 | * by the means of buf_queue callback. Only buffers previously queued to the |
942 | * driver by buf_queue can be passed to this function. | 939 | * driver by buf_queue can be passed to this function. |
943 | */ | 940 | */ |
944 | void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state) | 941 | void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state) |
945 | { | 942 | { |
946 | struct vb2_queue *q = vb->vb2_queue; | 943 | struct vb2_queue *q = vb->vb2_queue; |
947 | unsigned long flags; | 944 | unsigned long flags; |
948 | unsigned int plane; | 945 | unsigned int plane; |
949 | 946 | ||
950 | if (vb->state != VB2_BUF_STATE_ACTIVE) | 947 | if (vb->state != VB2_BUF_STATE_ACTIVE) |
951 | return; | 948 | return; |
952 | 949 | ||
953 | if (state != VB2_BUF_STATE_DONE && state != VB2_BUF_STATE_ERROR) | 950 | if (state != VB2_BUF_STATE_DONE && state != VB2_BUF_STATE_ERROR) |
954 | return; | 951 | return; |
955 | 952 | ||
956 | dprintk(4, "Done processing on buffer %d, state: %d\n", | 953 | dprintk(4, "Done processing on buffer %d, state: %d\n", |
957 | vb->v4l2_buf.index, state); | 954 | vb->v4l2_buf.index, state); |
958 | 955 | ||
959 | /* sync buffers */ | 956 | /* sync buffers */ |
960 | for (plane = 0; plane < vb->num_planes; ++plane) | 957 | for (plane = 0; plane < vb->num_planes; ++plane) |
961 | call_memop(q, finish, vb->planes[plane].mem_priv); | 958 | call_memop(q, finish, vb->planes[plane].mem_priv); |
962 | 959 | ||
963 | /* Add the buffer to the done buffers list */ | 960 | /* Add the buffer to the done buffers list */ |
964 | spin_lock_irqsave(&q->done_lock, flags); | 961 | spin_lock_irqsave(&q->done_lock, flags); |
965 | vb->state = state; | 962 | vb->state = state; |
966 | list_add_tail(&vb->done_entry, &q->done_list); | 963 | list_add_tail(&vb->done_entry, &q->done_list); |
967 | atomic_dec(&q->queued_count); | 964 | atomic_dec(&q->queued_count); |
968 | spin_unlock_irqrestore(&q->done_lock, flags); | 965 | spin_unlock_irqrestore(&q->done_lock, flags); |
969 | 966 | ||
970 | /* Inform any processes that may be waiting for buffers */ | 967 | /* Inform any processes that may be waiting for buffers */ |
971 | wake_up(&q->done_wq); | 968 | wake_up(&q->done_wq); |
972 | } | 969 | } |
973 | EXPORT_SYMBOL_GPL(vb2_buffer_done); | 970 | EXPORT_SYMBOL_GPL(vb2_buffer_done); |
974 | 971 | ||
975 | /** | 972 | /** |
976 | * __fill_vb2_buffer() - fill a vb2_buffer with information provided in a | 973 | * __fill_vb2_buffer() - fill a vb2_buffer with information provided in a |
977 | * v4l2_buffer by the userspace. The caller has already verified that struct | 974 | * v4l2_buffer by the userspace. The caller has already verified that struct |
978 | * v4l2_buffer has a valid number of planes. | 975 | * v4l2_buffer has a valid number of planes. |
979 | */ | 976 | */ |
980 | static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b, | 977 | static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b, |
981 | struct v4l2_plane *v4l2_planes) | 978 | struct v4l2_plane *v4l2_planes) |
982 | { | 979 | { |
983 | unsigned int plane; | 980 | unsigned int plane; |
984 | 981 | ||
985 | if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) { | 982 | if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) { |
986 | /* Fill in driver-provided information for OUTPUT types */ | 983 | /* Fill in driver-provided information for OUTPUT types */ |
987 | if (V4L2_TYPE_IS_OUTPUT(b->type)) { | 984 | if (V4L2_TYPE_IS_OUTPUT(b->type)) { |
988 | /* | 985 | /* |
989 | * Will have to go up to b->length when API starts | 986 | * Will have to go up to b->length when API starts |
990 | * accepting variable number of planes. | 987 | * accepting variable number of planes. |
991 | */ | 988 | */ |
992 | for (plane = 0; plane < vb->num_planes; ++plane) { | 989 | for (plane = 0; plane < vb->num_planes; ++plane) { |
993 | v4l2_planes[plane].bytesused = | 990 | v4l2_planes[plane].bytesused = |
994 | b->m.planes[plane].bytesused; | 991 | b->m.planes[plane].bytesused; |
995 | v4l2_planes[plane].data_offset = | 992 | v4l2_planes[plane].data_offset = |
996 | b->m.planes[plane].data_offset; | 993 | b->m.planes[plane].data_offset; |
997 | } | 994 | } |
998 | } | 995 | } |
999 | 996 | ||
1000 | if (b->memory == V4L2_MEMORY_USERPTR) { | 997 | if (b->memory == V4L2_MEMORY_USERPTR) { |
1001 | for (plane = 0; plane < vb->num_planes; ++plane) { | 998 | for (plane = 0; plane < vb->num_planes; ++plane) { |
1002 | v4l2_planes[plane].m.userptr = | 999 | v4l2_planes[plane].m.userptr = |
1003 | b->m.planes[plane].m.userptr; | 1000 | b->m.planes[plane].m.userptr; |
1004 | v4l2_planes[plane].length = | 1001 | v4l2_planes[plane].length = |
1005 | b->m.planes[plane].length; | 1002 | b->m.planes[plane].length; |
1006 | } | 1003 | } |
1007 | } | 1004 | } |
1008 | if (b->memory == V4L2_MEMORY_DMABUF) { | 1005 | if (b->memory == V4L2_MEMORY_DMABUF) { |
1009 | for (plane = 0; plane < vb->num_planes; ++plane) { | 1006 | for (plane = 0; plane < vb->num_planes; ++plane) { |
1010 | v4l2_planes[plane].m.fd = | 1007 | v4l2_planes[plane].m.fd = |
1011 | b->m.planes[plane].m.fd; | 1008 | b->m.planes[plane].m.fd; |
1012 | v4l2_planes[plane].length = | 1009 | v4l2_planes[plane].length = |
1013 | b->m.planes[plane].length; | 1010 | b->m.planes[plane].length; |
1014 | v4l2_planes[plane].data_offset = | 1011 | v4l2_planes[plane].data_offset = |
1015 | b->m.planes[plane].data_offset; | 1012 | b->m.planes[plane].data_offset; |
1016 | } | 1013 | } |
1017 | } | 1014 | } |
1018 | } else { | 1015 | } else { |
1019 | /* | 1016 | /* |
1020 | * Single-planar buffers do not use planes array, | 1017 | * Single-planar buffers do not use planes array, |
1021 | * so fill in relevant v4l2_buffer struct fields instead. | 1018 | * so fill in relevant v4l2_buffer struct fields instead. |
1022 | * In videobuf we use our internal V4l2_planes struct for | 1019 | * In videobuf we use our internal V4l2_planes struct for |
1023 | * single-planar buffers as well, for simplicity. | 1020 | * single-planar buffers as well, for simplicity. |
1024 | */ | 1021 | */ |
1025 | if (V4L2_TYPE_IS_OUTPUT(b->type)) { | 1022 | if (V4L2_TYPE_IS_OUTPUT(b->type)) { |
1026 | v4l2_planes[0].bytesused = b->bytesused; | 1023 | v4l2_planes[0].bytesused = b->bytesused; |
1027 | v4l2_planes[0].data_offset = 0; | 1024 | v4l2_planes[0].data_offset = 0; |
1028 | } | 1025 | } |
1029 | 1026 | ||
1030 | if (b->memory == V4L2_MEMORY_USERPTR) { | 1027 | if (b->memory == V4L2_MEMORY_USERPTR) { |
1031 | v4l2_planes[0].m.userptr = b->m.userptr; | 1028 | v4l2_planes[0].m.userptr = b->m.userptr; |
1032 | v4l2_planes[0].length = b->length; | 1029 | v4l2_planes[0].length = b->length; |
1033 | } | 1030 | } |
1034 | 1031 | ||
1035 | if (b->memory == V4L2_MEMORY_DMABUF) { | 1032 | if (b->memory == V4L2_MEMORY_DMABUF) { |
1036 | v4l2_planes[0].m.fd = b->m.fd; | 1033 | v4l2_planes[0].m.fd = b->m.fd; |
1037 | v4l2_planes[0].length = b->length; | 1034 | v4l2_planes[0].length = b->length; |
1038 | v4l2_planes[0].data_offset = 0; | 1035 | v4l2_planes[0].data_offset = 0; |
1039 | } | 1036 | } |
1040 | 1037 | ||
1041 | } | 1038 | } |
1042 | 1039 | ||
1043 | /* Zero flags that the vb2 core handles */ | 1040 | /* Zero flags that the vb2 core handles */ |
1044 | vb->v4l2_buf.flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS; | 1041 | vb->v4l2_buf.flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS; |
1045 | if ((vb->vb2_queue->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) != | 1042 | if ((vb->vb2_queue->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) != |
1046 | V4L2_BUF_FLAG_TIMESTAMP_COPY || !V4L2_TYPE_IS_OUTPUT(b->type)) { | 1043 | V4L2_BUF_FLAG_TIMESTAMP_COPY || !V4L2_TYPE_IS_OUTPUT(b->type)) { |
1047 | /* | 1044 | /* |
1048 | * Non-COPY timestamps and non-OUTPUT queues will get | 1045 | * Non-COPY timestamps and non-OUTPUT queues will get |
1049 | * their timestamp and timestamp source flags from the | 1046 | * their timestamp and timestamp source flags from the |
1050 | * queue. | 1047 | * queue. |
1051 | */ | 1048 | */ |
1052 | vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK; | 1049 | vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK; |
1053 | } | 1050 | } |
1054 | 1051 | ||
1055 | if (V4L2_TYPE_IS_OUTPUT(b->type)) { | 1052 | if (V4L2_TYPE_IS_OUTPUT(b->type)) { |
1056 | /* | 1053 | /* |
1057 | * For output buffers mask out the timecode flag: | 1054 | * For output buffers mask out the timecode flag: |
1058 | * this will be handled later in vb2_internal_qbuf(). | 1055 | * this will be handled later in vb2_internal_qbuf(). |
1059 | * The 'field' is valid metadata for this output buffer | 1056 | * The 'field' is valid metadata for this output buffer |
1060 | * and so that needs to be copied here. | 1057 | * and so that needs to be copied here. |
1061 | */ | 1058 | */ |
1062 | vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TIMECODE; | 1059 | vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TIMECODE; |
1063 | vb->v4l2_buf.field = b->field; | 1060 | vb->v4l2_buf.field = b->field; |
1064 | } else { | 1061 | } else { |
1065 | /* Zero any output buffer flags as this is a capture buffer */ | 1062 | /* Zero any output buffer flags as this is a capture buffer */ |
1066 | vb->v4l2_buf.flags &= ~V4L2_BUFFER_OUT_FLAGS; | 1063 | vb->v4l2_buf.flags &= ~V4L2_BUFFER_OUT_FLAGS; |
1067 | } | 1064 | } |
1068 | } | 1065 | } |
1069 | 1066 | ||
1070 | /** | 1067 | /** |
1071 | * __qbuf_userptr() - handle qbuf of a USERPTR buffer | 1068 | * __qbuf_userptr() - handle qbuf of a USERPTR buffer |
1072 | */ | 1069 | */ |
1073 | static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b) | 1070 | static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b) |
1074 | { | 1071 | { |
1075 | struct v4l2_plane planes[VIDEO_MAX_PLANES]; | 1072 | struct v4l2_plane planes[VIDEO_MAX_PLANES]; |
1076 | struct vb2_queue *q = vb->vb2_queue; | 1073 | struct vb2_queue *q = vb->vb2_queue; |
1077 | void *mem_priv; | 1074 | void *mem_priv; |
1078 | unsigned int plane; | 1075 | unsigned int plane; |
1079 | int ret; | 1076 | int ret; |
1080 | int write = !V4L2_TYPE_IS_OUTPUT(q->type); | 1077 | int write = !V4L2_TYPE_IS_OUTPUT(q->type); |
1081 | 1078 | ||
1082 | /* Copy relevant information provided by the userspace */ | 1079 | /* Copy relevant information provided by the userspace */ |
1083 | __fill_vb2_buffer(vb, b, planes); | 1080 | __fill_vb2_buffer(vb, b, planes); |
1084 | 1081 | ||
1085 | for (plane = 0; plane < vb->num_planes; ++plane) { | 1082 | for (plane = 0; plane < vb->num_planes; ++plane) { |
1086 | /* Skip the plane if already verified */ | 1083 | /* Skip the plane if already verified */ |
1087 | if (vb->v4l2_planes[plane].m.userptr && | 1084 | if (vb->v4l2_planes[plane].m.userptr && |
1088 | vb->v4l2_planes[plane].m.userptr == planes[plane].m.userptr | 1085 | vb->v4l2_planes[plane].m.userptr == planes[plane].m.userptr |
1089 | && vb->v4l2_planes[plane].length == planes[plane].length) | 1086 | && vb->v4l2_planes[plane].length == planes[plane].length) |
1090 | continue; | 1087 | continue; |
1091 | 1088 | ||
1092 | dprintk(3, "qbuf: userspace address for plane %d changed, " | 1089 | dprintk(3, "qbuf: userspace address for plane %d changed, " |
1093 | "reacquiring memory\n", plane); | 1090 | "reacquiring memory\n", plane); |
1094 | 1091 | ||
1095 | /* Check if the provided plane buffer is large enough */ | 1092 | /* Check if the provided plane buffer is large enough */ |
1096 | if (planes[plane].length < q->plane_sizes[plane]) { | 1093 | if (planes[plane].length < q->plane_sizes[plane]) { |
1097 | dprintk(1, "qbuf: provided buffer size %u is less than " | 1094 | dprintk(1, "qbuf: provided buffer size %u is less than " |
1098 | "setup size %u for plane %d\n", | 1095 | "setup size %u for plane %d\n", |
1099 | planes[plane].length, | 1096 | planes[plane].length, |
1100 | q->plane_sizes[plane], plane); | 1097 | q->plane_sizes[plane], plane); |
1101 | ret = -EINVAL; | 1098 | ret = -EINVAL; |
1102 | goto err; | 1099 | goto err; |
1103 | } | 1100 | } |
1104 | 1101 | ||
1105 | /* Release previously acquired memory if present */ | 1102 | /* Release previously acquired memory if present */ |
1106 | if (vb->planes[plane].mem_priv) | 1103 | if (vb->planes[plane].mem_priv) |
1107 | call_memop(q, put_userptr, vb->planes[plane].mem_priv); | 1104 | call_memop(q, put_userptr, vb->planes[plane].mem_priv); |
1108 | 1105 | ||
1109 | vb->planes[plane].mem_priv = NULL; | 1106 | vb->planes[plane].mem_priv = NULL; |
1110 | vb->v4l2_planes[plane].m.userptr = 0; | 1107 | vb->v4l2_planes[plane].m.userptr = 0; |
1111 | vb->v4l2_planes[plane].length = 0; | 1108 | vb->v4l2_planes[plane].length = 0; |
1112 | 1109 | ||
1113 | /* Acquire each plane's memory */ | 1110 | /* Acquire each plane's memory */ |
1114 | mem_priv = call_memop(q, get_userptr, q->alloc_ctx[plane], | 1111 | mem_priv = call_memop(q, get_userptr, q->alloc_ctx[plane], |
1115 | planes[plane].m.userptr, | 1112 | planes[plane].m.userptr, |
1116 | planes[plane].length, write); | 1113 | planes[plane].length, write); |
1117 | if (IS_ERR_OR_NULL(mem_priv)) { | 1114 | if (IS_ERR_OR_NULL(mem_priv)) { |
1118 | dprintk(1, "qbuf: failed acquiring userspace " | 1115 | dprintk(1, "qbuf: failed acquiring userspace " |
1119 | "memory for plane %d\n", plane); | 1116 | "memory for plane %d\n", plane); |
1120 | ret = mem_priv ? PTR_ERR(mem_priv) : -EINVAL; | 1117 | ret = mem_priv ? PTR_ERR(mem_priv) : -EINVAL; |
1121 | goto err; | 1118 | goto err; |
1122 | } | 1119 | } |
1123 | vb->planes[plane].mem_priv = mem_priv; | 1120 | vb->planes[plane].mem_priv = mem_priv; |
1124 | } | 1121 | } |
1125 | 1122 | ||
1126 | /* | 1123 | /* |
1127 | * Call driver-specific initialization on the newly acquired buffer, | 1124 | * Call driver-specific initialization on the newly acquired buffer, |
1128 | * if provided. | 1125 | * if provided. |
1129 | */ | 1126 | */ |
1130 | ret = call_qop(q, buf_init, vb); | 1127 | ret = call_qop(q, buf_init, vb); |
1131 | if (ret) { | 1128 | if (ret) { |
1132 | dprintk(1, "qbuf: buffer initialization failed\n"); | 1129 | dprintk(1, "qbuf: buffer initialization failed\n"); |
1133 | goto err; | 1130 | goto err; |
1134 | } | 1131 | } |
1135 | 1132 | ||
1136 | /* | 1133 | /* |
1137 | * Now that everything is in order, copy relevant information | 1134 | * Now that everything is in order, copy relevant information |
1138 | * provided by userspace. | 1135 | * provided by userspace. |
1139 | */ | 1136 | */ |
1140 | for (plane = 0; plane < vb->num_planes; ++plane) | 1137 | for (plane = 0; plane < vb->num_planes; ++plane) |
1141 | vb->v4l2_planes[plane] = planes[plane]; | 1138 | vb->v4l2_planes[plane] = planes[plane]; |
1142 | 1139 | ||
1143 | return 0; | 1140 | return 0; |
1144 | err: | 1141 | err: |
1145 | /* In case of errors, release planes that were already acquired */ | 1142 | /* In case of errors, release planes that were already acquired */ |
1146 | for (plane = 0; plane < vb->num_planes; ++plane) { | 1143 | for (plane = 0; plane < vb->num_planes; ++plane) { |
1147 | if (vb->planes[plane].mem_priv) | 1144 | if (vb->planes[plane].mem_priv) |
1148 | call_memop(q, put_userptr, vb->planes[plane].mem_priv); | 1145 | call_memop(q, put_userptr, vb->planes[plane].mem_priv); |
1149 | vb->planes[plane].mem_priv = NULL; | 1146 | vb->planes[plane].mem_priv = NULL; |
1150 | vb->v4l2_planes[plane].m.userptr = 0; | 1147 | vb->v4l2_planes[plane].m.userptr = 0; |
1151 | vb->v4l2_planes[plane].length = 0; | 1148 | vb->v4l2_planes[plane].length = 0; |
1152 | } | 1149 | } |
1153 | 1150 | ||
1154 | return ret; | 1151 | return ret; |
1155 | } | 1152 | } |
1156 | 1153 | ||
1157 | /** | 1154 | /** |
1158 | * __qbuf_mmap() - handle qbuf of an MMAP buffer | 1155 | * __qbuf_mmap() - handle qbuf of an MMAP buffer |
1159 | */ | 1156 | */ |
1160 | static int __qbuf_mmap(struct vb2_buffer *vb, const struct v4l2_buffer *b) | 1157 | static int __qbuf_mmap(struct vb2_buffer *vb, const struct v4l2_buffer *b) |
1161 | { | 1158 | { |
1162 | __fill_vb2_buffer(vb, b, vb->v4l2_planes); | 1159 | __fill_vb2_buffer(vb, b, vb->v4l2_planes); |
1163 | return 0; | 1160 | return 0; |
1164 | } | 1161 | } |
1165 | 1162 | ||
1166 | /** | 1163 | /** |
1167 | * __qbuf_dmabuf() - handle qbuf of a DMABUF buffer | 1164 | * __qbuf_dmabuf() - handle qbuf of a DMABUF buffer |
1168 | */ | 1165 | */ |
1169 | static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b) | 1166 | static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b) |
1170 | { | 1167 | { |
1171 | struct v4l2_plane planes[VIDEO_MAX_PLANES]; | 1168 | struct v4l2_plane planes[VIDEO_MAX_PLANES]; |
1172 | struct vb2_queue *q = vb->vb2_queue; | 1169 | struct vb2_queue *q = vb->vb2_queue; |
1173 | void *mem_priv; | 1170 | void *mem_priv; |
1174 | unsigned int plane; | 1171 | unsigned int plane; |
1175 | int ret; | 1172 | int ret; |
1176 | int write = !V4L2_TYPE_IS_OUTPUT(q->type); | 1173 | int write = !V4L2_TYPE_IS_OUTPUT(q->type); |
1177 | 1174 | ||
1178 | /* Copy relevant information provided by the userspace */ | 1175 | /* Copy relevant information provided by the userspace */ |
1179 | __fill_vb2_buffer(vb, b, planes); | 1176 | __fill_vb2_buffer(vb, b, planes); |
1180 | 1177 | ||
1181 | for (plane = 0; plane < vb->num_planes; ++plane) { | 1178 | for (plane = 0; plane < vb->num_planes; ++plane) { |
1182 | struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd); | 1179 | struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd); |
1183 | 1180 | ||
1184 | if (IS_ERR_OR_NULL(dbuf)) { | 1181 | if (IS_ERR_OR_NULL(dbuf)) { |
1185 | dprintk(1, "qbuf: invalid dmabuf fd for plane %d\n", | 1182 | dprintk(1, "qbuf: invalid dmabuf fd for plane %d\n", |
1186 | plane); | 1183 | plane); |
1187 | ret = -EINVAL; | 1184 | ret = -EINVAL; |
1188 | goto err; | 1185 | goto err; |
1189 | } | 1186 | } |
1190 | 1187 | ||
1191 | /* use DMABUF size if length is not provided */ | 1188 | /* use DMABUF size if length is not provided */ |
1192 | if (planes[plane].length == 0) | 1189 | if (planes[plane].length == 0) |
1193 | planes[plane].length = dbuf->size; | 1190 | planes[plane].length = dbuf->size; |
1194 | 1191 | ||
1195 | if (planes[plane].length < planes[plane].data_offset + | 1192 | if (planes[plane].length < planes[plane].data_offset + |
1196 | q->plane_sizes[plane]) { | 1193 | q->plane_sizes[plane]) { |
1197 | dprintk(1, "qbuf: invalid dmabuf length for plane %d\n", | 1194 | dprintk(1, "qbuf: invalid dmabuf length for plane %d\n", |
1198 | plane); | 1195 | plane); |
1199 | ret = -EINVAL; | 1196 | ret = -EINVAL; |
1200 | goto err; | 1197 | goto err; |
1201 | } | 1198 | } |
1202 | 1199 | ||
1203 | /* Skip the plane if already verified */ | 1200 | /* Skip the plane if already verified */ |
1204 | if (dbuf == vb->planes[plane].dbuf && | 1201 | if (dbuf == vb->planes[plane].dbuf && |
1205 | vb->v4l2_planes[plane].length == planes[plane].length) { | 1202 | vb->v4l2_planes[plane].length == planes[plane].length) { |
1206 | dma_buf_put(dbuf); | 1203 | dma_buf_put(dbuf); |
1207 | continue; | 1204 | continue; |
1208 | } | 1205 | } |
1209 | 1206 | ||
1210 | dprintk(1, "qbuf: buffer for plane %d changed\n", plane); | 1207 | dprintk(1, "qbuf: buffer for plane %d changed\n", plane); |
1211 | 1208 | ||
1212 | /* Release previously acquired memory if present */ | 1209 | /* Release previously acquired memory if present */ |
1213 | __vb2_plane_dmabuf_put(q, &vb->planes[plane]); | 1210 | __vb2_plane_dmabuf_put(q, &vb->planes[plane]); |
1214 | memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane)); | 1211 | memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane)); |
1215 | 1212 | ||
1216 | /* Acquire each plane's memory */ | 1213 | /* Acquire each plane's memory */ |
1217 | mem_priv = call_memop(q, attach_dmabuf, q->alloc_ctx[plane], | 1214 | mem_priv = call_memop(q, attach_dmabuf, q->alloc_ctx[plane], |
1218 | dbuf, planes[plane].length, write); | 1215 | dbuf, planes[plane].length, write); |
1219 | if (IS_ERR(mem_priv)) { | 1216 | if (IS_ERR(mem_priv)) { |
1220 | dprintk(1, "qbuf: failed to attach dmabuf\n"); | 1217 | dprintk(1, "qbuf: failed to attach dmabuf\n"); |
1221 | ret = PTR_ERR(mem_priv); | 1218 | ret = PTR_ERR(mem_priv); |
1222 | dma_buf_put(dbuf); | 1219 | dma_buf_put(dbuf); |
1223 | goto err; | 1220 | goto err; |
1224 | } | 1221 | } |
1225 | 1222 | ||
1226 | vb->planes[plane].dbuf = dbuf; | 1223 | vb->planes[plane].dbuf = dbuf; |
1227 | vb->planes[plane].mem_priv = mem_priv; | 1224 | vb->planes[plane].mem_priv = mem_priv; |
1228 | } | 1225 | } |
1229 | 1226 | ||
1230 | /* TODO: This pins the buffer(s) with dma_buf_map_attachment()).. but | 1227 | /* TODO: This pins the buffer(s) with dma_buf_map_attachment()).. but |
1231 | * really we want to do this just before the DMA, not while queueing | 1228 | * really we want to do this just before the DMA, not while queueing |
1232 | * the buffer(s).. | 1229 | * the buffer(s).. |
1233 | */ | 1230 | */ |
1234 | for (plane = 0; plane < vb->num_planes; ++plane) { | 1231 | for (plane = 0; plane < vb->num_planes; ++plane) { |
1235 | ret = call_memop(q, map_dmabuf, vb->planes[plane].mem_priv); | 1232 | ret = call_memop(q, map_dmabuf, vb->planes[plane].mem_priv); |
1236 | if (ret) { | 1233 | if (ret) { |
1237 | dprintk(1, "qbuf: failed to map dmabuf for plane %d\n", | 1234 | dprintk(1, "qbuf: failed to map dmabuf for plane %d\n", |
1238 | plane); | 1235 | plane); |
1239 | goto err; | 1236 | goto err; |
1240 | } | 1237 | } |
1241 | vb->planes[plane].dbuf_mapped = 1; | 1238 | vb->planes[plane].dbuf_mapped = 1; |
1242 | } | 1239 | } |
1243 | 1240 | ||
1244 | /* | 1241 | /* |
1245 | * Call driver-specific initialization on the newly acquired buffer, | 1242 | * Call driver-specific initialization on the newly acquired buffer, |
1246 | * if provided. | 1243 | * if provided. |
1247 | */ | 1244 | */ |
1248 | ret = call_qop(q, buf_init, vb); | 1245 | ret = call_qop(q, buf_init, vb); |
1249 | if (ret) { | 1246 | if (ret) { |
1250 | dprintk(1, "qbuf: buffer initialization failed\n"); | 1247 | dprintk(1, "qbuf: buffer initialization failed\n"); |
1251 | goto err; | 1248 | goto err; |
1252 | } | 1249 | } |
1253 | 1250 | ||
1254 | /* | 1251 | /* |
1255 | * Now that everything is in order, copy relevant information | 1252 | * Now that everything is in order, copy relevant information |
1256 | * provided by userspace. | 1253 | * provided by userspace. |
1257 | */ | 1254 | */ |
1258 | for (plane = 0; plane < vb->num_planes; ++plane) | 1255 | for (plane = 0; plane < vb->num_planes; ++plane) |
1259 | vb->v4l2_planes[plane] = planes[plane]; | 1256 | vb->v4l2_planes[plane] = planes[plane]; |
1260 | 1257 | ||
1261 | return 0; | 1258 | return 0; |
1262 | err: | 1259 | err: |
1263 | /* In case of errors, release planes that were already acquired */ | 1260 | /* In case of errors, release planes that were already acquired */ |
1264 | __vb2_buf_dmabuf_put(vb); | 1261 | __vb2_buf_dmabuf_put(vb); |
1265 | 1262 | ||
1266 | return ret; | 1263 | return ret; |
1267 | } | 1264 | } |
1268 | 1265 | ||
1269 | /** | 1266 | /** |
1270 | * __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing | 1267 | * __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing |
1271 | */ | 1268 | */ |
1272 | static void __enqueue_in_driver(struct vb2_buffer *vb) | 1269 | static void __enqueue_in_driver(struct vb2_buffer *vb) |
1273 | { | 1270 | { |
1274 | struct vb2_queue *q = vb->vb2_queue; | 1271 | struct vb2_queue *q = vb->vb2_queue; |
1275 | unsigned int plane; | 1272 | unsigned int plane; |
1276 | 1273 | ||
1277 | vb->state = VB2_BUF_STATE_ACTIVE; | 1274 | vb->state = VB2_BUF_STATE_ACTIVE; |
1278 | atomic_inc(&q->queued_count); | 1275 | atomic_inc(&q->queued_count); |
1279 | 1276 | ||
1280 | /* sync buffers */ | 1277 | /* sync buffers */ |
1281 | for (plane = 0; plane < vb->num_planes; ++plane) | 1278 | for (plane = 0; plane < vb->num_planes; ++plane) |
1282 | call_memop(q, prepare, vb->planes[plane].mem_priv); | 1279 | call_memop(q, prepare, vb->planes[plane].mem_priv); |
1283 | 1280 | ||
1284 | q->ops->buf_queue(vb); | 1281 | q->ops->buf_queue(vb); |
1285 | } | 1282 | } |
1286 | 1283 | ||
1287 | static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b) | 1284 | static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b) |
1288 | { | 1285 | { |
1289 | struct vb2_queue *q = vb->vb2_queue; | 1286 | struct vb2_queue *q = vb->vb2_queue; |
1290 | struct rw_semaphore *mmap_sem; | 1287 | struct rw_semaphore *mmap_sem; |
1291 | int ret; | 1288 | int ret; |
1292 | 1289 | ||
1293 | ret = __verify_length(vb, b); | 1290 | ret = __verify_length(vb, b); |
1294 | if (ret < 0) { | 1291 | if (ret < 0) { |
1295 | dprintk(1, "%s(): plane parameters verification failed: %d\n", | 1292 | dprintk(1, "%s(): plane parameters verification failed: %d\n", |
1296 | __func__, ret); | 1293 | __func__, ret); |
1297 | return ret; | 1294 | return ret; |
1298 | } | 1295 | } |
1299 | 1296 | ||
1300 | vb->state = VB2_BUF_STATE_PREPARING; | 1297 | vb->state = VB2_BUF_STATE_PREPARING; |
1301 | vb->v4l2_buf.timestamp.tv_sec = 0; | 1298 | vb->v4l2_buf.timestamp.tv_sec = 0; |
1302 | vb->v4l2_buf.timestamp.tv_usec = 0; | 1299 | vb->v4l2_buf.timestamp.tv_usec = 0; |
1303 | vb->v4l2_buf.sequence = 0; | 1300 | vb->v4l2_buf.sequence = 0; |
1304 | 1301 | ||
1305 | switch (q->memory) { | 1302 | switch (q->memory) { |
1306 | case V4L2_MEMORY_MMAP: | 1303 | case V4L2_MEMORY_MMAP: |
1307 | ret = __qbuf_mmap(vb, b); | 1304 | ret = __qbuf_mmap(vb, b); |
1308 | break; | 1305 | break; |
1309 | case V4L2_MEMORY_USERPTR: | 1306 | case V4L2_MEMORY_USERPTR: |
1310 | /* | 1307 | /* |
1311 | * In case of user pointer buffers vb2 allocators need to get | 1308 | * In case of user pointer buffers vb2 allocators need to get |
1312 | * direct access to userspace pages. This requires getting | 1309 | * direct access to userspace pages. This requires getting |
1313 | * the mmap semaphore for read access in the current process | 1310 | * the mmap semaphore for read access in the current process |
1314 | * structure. The same semaphore is taken before calling mmap | 1311 | * structure. The same semaphore is taken before calling mmap |
1315 | * operation, while both qbuf/prepare_buf and mmap are called | 1312 | * operation, while both qbuf/prepare_buf and mmap are called |
1316 | * by the driver or v4l2 core with the driver's lock held. | 1313 | * by the driver or v4l2 core with the driver's lock held. |
1317 | * To avoid an AB-BA deadlock (mmap_sem then driver's lock in | 1314 | * To avoid an AB-BA deadlock (mmap_sem then driver's lock in |
1318 | * mmap and driver's lock then mmap_sem in qbuf/prepare_buf), | 1315 | * mmap and driver's lock then mmap_sem in qbuf/prepare_buf), |
1319 | * the videobuf2 core releases the driver's lock, takes | 1316 | * the videobuf2 core releases the driver's lock, takes |
1320 | * mmap_sem and then takes the driver's lock again. | 1317 | * mmap_sem and then takes the driver's lock again. |
1321 | */ | 1318 | */ |
1322 | mmap_sem = ¤t->mm->mmap_sem; | 1319 | mmap_sem = ¤t->mm->mmap_sem; |
1323 | call_qop(q, wait_prepare, q); | 1320 | call_qop(q, wait_prepare, q); |
1324 | down_read(mmap_sem); | 1321 | down_read(mmap_sem); |
1325 | call_qop(q, wait_finish, q); | 1322 | call_qop(q, wait_finish, q); |
1326 | 1323 | ||
1327 | ret = __qbuf_userptr(vb, b); | 1324 | ret = __qbuf_userptr(vb, b); |
1328 | 1325 | ||
1329 | up_read(mmap_sem); | 1326 | up_read(mmap_sem); |
1330 | break; | 1327 | break; |
1331 | case V4L2_MEMORY_DMABUF: | 1328 | case V4L2_MEMORY_DMABUF: |
1332 | ret = __qbuf_dmabuf(vb, b); | 1329 | ret = __qbuf_dmabuf(vb, b); |
1333 | break; | 1330 | break; |
1334 | default: | 1331 | default: |
1335 | WARN(1, "Invalid queue type\n"); | 1332 | WARN(1, "Invalid queue type\n"); |
1336 | ret = -EINVAL; | 1333 | ret = -EINVAL; |
1337 | } | 1334 | } |
1338 | 1335 | ||
1339 | if (!ret) | 1336 | if (!ret) |
1340 | ret = call_qop(q, buf_prepare, vb); | 1337 | ret = call_qop(q, buf_prepare, vb); |
1341 | if (ret) | 1338 | if (ret) |
1342 | dprintk(1, "qbuf: buffer preparation failed: %d\n", ret); | 1339 | dprintk(1, "qbuf: buffer preparation failed: %d\n", ret); |
1343 | vb->state = ret ? VB2_BUF_STATE_DEQUEUED : VB2_BUF_STATE_PREPARED; | 1340 | vb->state = ret ? VB2_BUF_STATE_DEQUEUED : VB2_BUF_STATE_PREPARED; |
1344 | 1341 | ||
1345 | return ret; | 1342 | return ret; |
1346 | } | 1343 | } |
1347 | 1344 | ||
1348 | static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b, | 1345 | static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b, |
1349 | const char *opname) | 1346 | const char *opname) |
1350 | { | 1347 | { |
1351 | if (b->type != q->type) { | 1348 | if (b->type != q->type) { |
1352 | dprintk(1, "%s(): invalid buffer type\n", opname); | 1349 | dprintk(1, "%s(): invalid buffer type\n", opname); |
1353 | return -EINVAL; | 1350 | return -EINVAL; |
1354 | } | 1351 | } |
1355 | 1352 | ||
1356 | if (b->index >= q->num_buffers) { | 1353 | if (b->index >= q->num_buffers) { |
1357 | dprintk(1, "%s(): buffer index out of range\n", opname); | 1354 | dprintk(1, "%s(): buffer index out of range\n", opname); |
1358 | return -EINVAL; | 1355 | return -EINVAL; |
1359 | } | 1356 | } |
1360 | 1357 | ||
1361 | if (q->bufs[b->index] == NULL) { | 1358 | if (q->bufs[b->index] == NULL) { |
1362 | /* Should never happen */ | 1359 | /* Should never happen */ |
1363 | dprintk(1, "%s(): buffer is NULL\n", opname); | 1360 | dprintk(1, "%s(): buffer is NULL\n", opname); |
1364 | return -EINVAL; | 1361 | return -EINVAL; |
1365 | } | 1362 | } |
1366 | 1363 | ||
1367 | if (b->memory != q->memory) { | 1364 | if (b->memory != q->memory) { |
1368 | dprintk(1, "%s(): invalid memory type\n", opname); | 1365 | dprintk(1, "%s(): invalid memory type\n", opname); |
1369 | return -EINVAL; | 1366 | return -EINVAL; |
1370 | } | 1367 | } |
1371 | 1368 | ||
1372 | return __verify_planes_array(q->bufs[b->index], b); | 1369 | return __verify_planes_array(q->bufs[b->index], b); |
1373 | } | 1370 | } |
1374 | 1371 | ||
1375 | /** | 1372 | /** |
1376 | * vb2_prepare_buf() - Pass ownership of a buffer from userspace to the kernel | 1373 | * vb2_prepare_buf() - Pass ownership of a buffer from userspace to the kernel |
1377 | * @q: videobuf2 queue | 1374 | * @q: videobuf2 queue |
1378 | * @b: buffer structure passed from userspace to vidioc_prepare_buf | 1375 | * @b: buffer structure passed from userspace to vidioc_prepare_buf |
1379 | * handler in driver | 1376 | * handler in driver |
1380 | * | 1377 | * |
1381 | * Should be called from vidioc_prepare_buf ioctl handler of a driver. | 1378 | * Should be called from vidioc_prepare_buf ioctl handler of a driver. |
1382 | * This function: | 1379 | * This function: |
1383 | * 1) verifies the passed buffer, | 1380 | * 1) verifies the passed buffer, |
1384 | * 2) calls buf_prepare callback in the driver (if provided), in which | 1381 | * 2) calls buf_prepare callback in the driver (if provided), in which |
1385 | * driver-specific buffer initialization can be performed, | 1382 | * driver-specific buffer initialization can be performed, |
1386 | * | 1383 | * |
1387 | * The return values from this function are intended to be directly returned | 1384 | * The return values from this function are intended to be directly returned |
1388 | * from vidioc_prepare_buf handler in driver. | 1385 | * from vidioc_prepare_buf handler in driver. |
1389 | */ | 1386 | */ |
1390 | int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b) | 1387 | int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b) |
1391 | { | 1388 | { |
1392 | struct vb2_buffer *vb; | 1389 | struct vb2_buffer *vb; |
1393 | int ret; | 1390 | int ret; |
1394 | 1391 | ||
1395 | if (q->fileio) { | 1392 | if (q->fileio) { |
1396 | dprintk(1, "%s(): file io in progress\n", __func__); | 1393 | dprintk(1, "%s(): file io in progress\n", __func__); |
1397 | return -EBUSY; | 1394 | return -EBUSY; |
1398 | } | 1395 | } |
1399 | 1396 | ||
1400 | ret = vb2_queue_or_prepare_buf(q, b, "prepare_buf"); | 1397 | ret = vb2_queue_or_prepare_buf(q, b, "prepare_buf"); |
1401 | if (ret) | 1398 | if (ret) |
1402 | return ret; | 1399 | return ret; |
1403 | 1400 | ||
1404 | vb = q->bufs[b->index]; | 1401 | vb = q->bufs[b->index]; |
1405 | if (vb->state != VB2_BUF_STATE_DEQUEUED) { | 1402 | if (vb->state != VB2_BUF_STATE_DEQUEUED) { |
1406 | dprintk(1, "%s(): invalid buffer state %d\n", __func__, | 1403 | dprintk(1, "%s(): invalid buffer state %d\n", __func__, |
1407 | vb->state); | 1404 | vb->state); |
1408 | return -EINVAL; | 1405 | return -EINVAL; |
1409 | } | 1406 | } |
1410 | 1407 | ||
1411 | ret = __buf_prepare(vb, b); | 1408 | ret = __buf_prepare(vb, b); |
1412 | if (!ret) { | 1409 | if (!ret) { |
1413 | /* Fill buffer information for the userspace */ | 1410 | /* Fill buffer information for the userspace */ |
1414 | __fill_v4l2_buffer(vb, b); | 1411 | __fill_v4l2_buffer(vb, b); |
1415 | 1412 | ||
1416 | dprintk(1, "%s() of buffer %d succeeded\n", __func__, vb->v4l2_buf.index); | 1413 | dprintk(1, "%s() of buffer %d succeeded\n", __func__, vb->v4l2_buf.index); |
1417 | } | 1414 | } |
1418 | return ret; | 1415 | return ret; |
1419 | } | 1416 | } |
1420 | EXPORT_SYMBOL_GPL(vb2_prepare_buf); | 1417 | EXPORT_SYMBOL_GPL(vb2_prepare_buf); |
1421 | 1418 | ||
1422 | /** | 1419 | /** |
1423 | * vb2_start_streaming() - Attempt to start streaming. | 1420 | * vb2_start_streaming() - Attempt to start streaming. |
1424 | * @q: videobuf2 queue | 1421 | * @q: videobuf2 queue |
1425 | * | 1422 | * |
1426 | * If there are not enough buffers, then retry_start_streaming is set to | 1423 | * If there are not enough buffers, then retry_start_streaming is set to |
1427 | * 1 and 0 is returned. The next time a buffer is queued and | 1424 | * 1 and 0 is returned. The next time a buffer is queued and |
1428 | * retry_start_streaming is 1, this function will be called again to | 1425 | * retry_start_streaming is 1, this function will be called again to |
1429 | * retry starting the DMA engine. | 1426 | * retry starting the DMA engine. |
1430 | */ | 1427 | */ |
1431 | static int vb2_start_streaming(struct vb2_queue *q) | 1428 | static int vb2_start_streaming(struct vb2_queue *q) |
1432 | { | 1429 | { |
1433 | int ret; | 1430 | int ret; |
1434 | 1431 | ||
1435 | /* Tell the driver to start streaming */ | 1432 | /* Tell the driver to start streaming */ |
1436 | ret = call_qop(q, start_streaming, q, atomic_read(&q->queued_count)); | 1433 | ret = call_qop(q, start_streaming, q, atomic_read(&q->queued_count)); |
1437 | 1434 | ||
1438 | /* | 1435 | /* |
1439 | * If there are not enough buffers queued to start streaming, then | 1436 | * If there are not enough buffers queued to start streaming, then |
1440 | * the start_streaming operation will return -ENOBUFS and you have to | 1437 | * the start_streaming operation will return -ENOBUFS and you have to |
1441 | * retry when the next buffer is queued. | 1438 | * retry when the next buffer is queued. |
1442 | */ | 1439 | */ |
1443 | if (ret == -ENOBUFS) { | 1440 | if (ret == -ENOBUFS) { |
1444 | dprintk(1, "qbuf: not enough buffers, retry when more buffers are queued.\n"); | 1441 | dprintk(1, "qbuf: not enough buffers, retry when more buffers are queued.\n"); |
1445 | q->retry_start_streaming = 1; | 1442 | q->retry_start_streaming = 1; |
1446 | return 0; | 1443 | return 0; |
1447 | } | 1444 | } |
1448 | if (ret) | 1445 | if (ret) |
1449 | dprintk(1, "qbuf: driver refused to start streaming\n"); | 1446 | dprintk(1, "qbuf: driver refused to start streaming\n"); |
1450 | else | 1447 | else |
1451 | q->retry_start_streaming = 0; | 1448 | q->retry_start_streaming = 0; |
1452 | return ret; | 1449 | return ret; |
1453 | } | 1450 | } |
1454 | 1451 | ||
1455 | static int vb2_internal_qbuf(struct vb2_queue *q, struct v4l2_buffer *b) | 1452 | static int vb2_internal_qbuf(struct vb2_queue *q, struct v4l2_buffer *b) |
1456 | { | 1453 | { |
1457 | int ret = vb2_queue_or_prepare_buf(q, b, "qbuf"); | 1454 | int ret = vb2_queue_or_prepare_buf(q, b, "qbuf"); |
1458 | struct vb2_buffer *vb; | 1455 | struct vb2_buffer *vb; |
1459 | 1456 | ||
1460 | if (ret) | 1457 | if (ret) |
1461 | return ret; | 1458 | return ret; |
1462 | 1459 | ||
1463 | vb = q->bufs[b->index]; | 1460 | vb = q->bufs[b->index]; |
1464 | if (vb->state != VB2_BUF_STATE_DEQUEUED) { | 1461 | if (vb->state != VB2_BUF_STATE_DEQUEUED) { |
1465 | dprintk(1, "%s(): invalid buffer state %d\n", __func__, | 1462 | dprintk(1, "%s(): invalid buffer state %d\n", __func__, |
1466 | vb->state); | 1463 | vb->state); |
1467 | return -EINVAL; | 1464 | return -EINVAL; |
1468 | } | 1465 | } |
1469 | 1466 | ||
1470 | switch (vb->state) { | 1467 | switch (vb->state) { |
1471 | case VB2_BUF_STATE_DEQUEUED: | 1468 | case VB2_BUF_STATE_DEQUEUED: |
1472 | ret = __buf_prepare(vb, b); | 1469 | ret = __buf_prepare(vb, b); |
1473 | if (ret) | 1470 | if (ret) |
1474 | return ret; | 1471 | return ret; |
1475 | break; | 1472 | break; |
1476 | case VB2_BUF_STATE_PREPARED: | 1473 | case VB2_BUF_STATE_PREPARED: |
1477 | break; | 1474 | break; |
1478 | case VB2_BUF_STATE_PREPARING: | 1475 | case VB2_BUF_STATE_PREPARING: |
1479 | dprintk(1, "qbuf: buffer still being prepared\n"); | 1476 | dprintk(1, "qbuf: buffer still being prepared\n"); |
1480 | return -EINVAL; | 1477 | return -EINVAL; |
1481 | default: | 1478 | default: |
1482 | dprintk(1, "qbuf: buffer already in use\n"); | 1479 | dprintk(1, "qbuf: buffer already in use\n"); |
1483 | return -EINVAL; | 1480 | return -EINVAL; |
1484 | } | 1481 | } |
1485 | 1482 | ||
1486 | /* | 1483 | /* |
1487 | * Add to the queued buffers list, a buffer will stay on it until | 1484 | * Add to the queued buffers list, a buffer will stay on it until |
1488 | * dequeued in dqbuf. | 1485 | * dequeued in dqbuf. |
1489 | */ | 1486 | */ |
1490 | list_add_tail(&vb->queued_entry, &q->queued_list); | 1487 | list_add_tail(&vb->queued_entry, &q->queued_list); |
1491 | q->waiting_for_buffers = false; | 1488 | q->waiting_for_buffers = false; |
1492 | vb->state = VB2_BUF_STATE_QUEUED; | 1489 | vb->state = VB2_BUF_STATE_QUEUED; |
1493 | if (V4L2_TYPE_IS_OUTPUT(q->type)) { | 1490 | if (V4L2_TYPE_IS_OUTPUT(q->type)) { |
1494 | /* | 1491 | /* |
1495 | * For output buffers copy the timestamp if needed, | 1492 | * For output buffers copy the timestamp if needed, |
1496 | * and the timecode field and flag if needed. | 1493 | * and the timecode field and flag if needed. |
1497 | */ | 1494 | */ |
1498 | if ((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) == | 1495 | if ((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) == |
1499 | V4L2_BUF_FLAG_TIMESTAMP_COPY) | 1496 | V4L2_BUF_FLAG_TIMESTAMP_COPY) |
1500 | vb->v4l2_buf.timestamp = b->timestamp; | 1497 | vb->v4l2_buf.timestamp = b->timestamp; |
1501 | vb->v4l2_buf.flags |= b->flags & V4L2_BUF_FLAG_TIMECODE; | 1498 | vb->v4l2_buf.flags |= b->flags & V4L2_BUF_FLAG_TIMECODE; |
1502 | if (b->flags & V4L2_BUF_FLAG_TIMECODE) | 1499 | if (b->flags & V4L2_BUF_FLAG_TIMECODE) |
1503 | vb->v4l2_buf.timecode = b->timecode; | 1500 | vb->v4l2_buf.timecode = b->timecode; |
1504 | } | 1501 | } |
1505 | 1502 | ||
1506 | /* | 1503 | /* |
1507 | * If already streaming, give the buffer to driver for processing. | 1504 | * If already streaming, give the buffer to driver for processing. |
1508 | * If not, the buffer will be given to driver on next streamon. | 1505 | * If not, the buffer will be given to driver on next streamon. |
1509 | */ | 1506 | */ |
1510 | if (q->streaming) | 1507 | if (q->streaming) |
1511 | __enqueue_in_driver(vb); | 1508 | __enqueue_in_driver(vb); |
1512 | 1509 | ||
1513 | /* Fill buffer information for the userspace */ | 1510 | /* Fill buffer information for the userspace */ |
1514 | __fill_v4l2_buffer(vb, b); | 1511 | __fill_v4l2_buffer(vb, b); |
1515 | 1512 | ||
1516 | if (q->retry_start_streaming) { | 1513 | if (q->retry_start_streaming) { |
1517 | ret = vb2_start_streaming(q); | 1514 | ret = vb2_start_streaming(q); |
1518 | if (ret) | 1515 | if (ret) |
1519 | return ret; | 1516 | return ret; |
1520 | } | 1517 | } |
1521 | 1518 | ||
1522 | dprintk(1, "%s() of buffer %d succeeded\n", __func__, vb->v4l2_buf.index); | 1519 | dprintk(1, "%s() of buffer %d succeeded\n", __func__, vb->v4l2_buf.index); |
1523 | return 0; | 1520 | return 0; |
1524 | } | 1521 | } |
1525 | 1522 | ||
1526 | /** | 1523 | /** |
1527 | * vb2_qbuf() - Queue a buffer from userspace | 1524 | * vb2_qbuf() - Queue a buffer from userspace |
1528 | * @q: videobuf2 queue | 1525 | * @q: videobuf2 queue |
1529 | * @b: buffer structure passed from userspace to vidioc_qbuf handler | 1526 | * @b: buffer structure passed from userspace to vidioc_qbuf handler |
1530 | * in driver | 1527 | * in driver |
1531 | * | 1528 | * |
1532 | * Should be called from vidioc_qbuf ioctl handler of a driver. | 1529 | * Should be called from vidioc_qbuf ioctl handler of a driver. |
1533 | * This function: | 1530 | * This function: |
1534 | * 1) verifies the passed buffer, | 1531 | * 1) verifies the passed buffer, |
1535 | * 2) if necessary, calls buf_prepare callback in the driver (if provided), in | 1532 | * 2) if necessary, calls buf_prepare callback in the driver (if provided), in |
1536 | * which driver-specific buffer initialization can be performed, | 1533 | * which driver-specific buffer initialization can be performed, |
1537 | * 3) if streaming is on, queues the buffer in driver by the means of buf_queue | 1534 | * 3) if streaming is on, queues the buffer in driver by the means of buf_queue |
1538 | * callback for processing. | 1535 | * callback for processing. |
1539 | * | 1536 | * |
1540 | * The return values from this function are intended to be directly returned | 1537 | * The return values from this function are intended to be directly returned |
1541 | * from vidioc_qbuf handler in driver. | 1538 | * from vidioc_qbuf handler in driver. |
1542 | */ | 1539 | */ |
1543 | int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b) | 1540 | int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b) |
1544 | { | 1541 | { |
1545 | if (q->fileio) { | 1542 | if (q->fileio) { |
1546 | dprintk(1, "%s(): file io in progress\n", __func__); | 1543 | dprintk(1, "%s(): file io in progress\n", __func__); |
1547 | return -EBUSY; | 1544 | return -EBUSY; |
1548 | } | 1545 | } |
1549 | 1546 | ||
1550 | return vb2_internal_qbuf(q, b); | 1547 | return vb2_internal_qbuf(q, b); |
1551 | } | 1548 | } |
1552 | EXPORT_SYMBOL_GPL(vb2_qbuf); | 1549 | EXPORT_SYMBOL_GPL(vb2_qbuf); |
1553 | 1550 | ||
1554 | /** | 1551 | /** |
1555 | * __vb2_wait_for_done_vb() - wait for a buffer to become available | 1552 | * __vb2_wait_for_done_vb() - wait for a buffer to become available |
1556 | * for dequeuing | 1553 | * for dequeuing |
1557 | * | 1554 | * |
1558 | * Will sleep if required for nonblocking == false. | 1555 | * Will sleep if required for nonblocking == false. |
1559 | */ | 1556 | */ |
1560 | static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking) | 1557 | static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking) |
1561 | { | 1558 | { |
1562 | /* | 1559 | /* |
1563 | * All operations on vb_done_list are performed under done_lock | 1560 | * All operations on vb_done_list are performed under done_lock |
1564 | * spinlock protection. However, buffers may be removed from | 1561 | * spinlock protection. However, buffers may be removed from |
1565 | * it and returned to userspace only while holding both driver's | 1562 | * it and returned to userspace only while holding both driver's |
1566 | * lock and the done_lock spinlock. Thus we can be sure that as | 1563 | * lock and the done_lock spinlock. Thus we can be sure that as |
1567 | * long as we hold the driver's lock, the list will remain not | 1564 | * long as we hold the driver's lock, the list will remain not |
1568 | * empty if list_empty() check succeeds. | 1565 | * empty if list_empty() check succeeds. |
1569 | */ | 1566 | */ |
1570 | 1567 | ||
1571 | for (;;) { | 1568 | for (;;) { |
1572 | int ret; | 1569 | int ret; |
1573 | 1570 | ||
1574 | if (!q->streaming) { | 1571 | if (!q->streaming) { |
1575 | dprintk(1, "Streaming off, will not wait for buffers\n"); | 1572 | dprintk(1, "Streaming off, will not wait for buffers\n"); |
1576 | return -EINVAL; | 1573 | return -EINVAL; |
1577 | } | 1574 | } |
1578 | 1575 | ||
1579 | if (!list_empty(&q->done_list)) { | 1576 | if (!list_empty(&q->done_list)) { |
1580 | /* | 1577 | /* |
1581 | * Found a buffer that we were waiting for. | 1578 | * Found a buffer that we were waiting for. |
1582 | */ | 1579 | */ |
1583 | break; | 1580 | break; |
1584 | } | 1581 | } |
1585 | 1582 | ||
1586 | if (nonblocking) { | 1583 | if (nonblocking) { |
1587 | dprintk(1, "Nonblocking and no buffers to dequeue, " | 1584 | dprintk(1, "Nonblocking and no buffers to dequeue, " |
1588 | "will not wait\n"); | 1585 | "will not wait\n"); |
1589 | return -EAGAIN; | 1586 | return -EAGAIN; |
1590 | } | 1587 | } |
1591 | 1588 | ||
1592 | /* | 1589 | /* |
1593 | * We are streaming and blocking, wait for another buffer to | 1590 | * We are streaming and blocking, wait for another buffer to |
1594 | * become ready or for streamoff. Driver's lock is released to | 1591 | * become ready or for streamoff. Driver's lock is released to |
1595 | * allow streamoff or qbuf to be called while waiting. | 1592 | * allow streamoff or qbuf to be called while waiting. |
1596 | */ | 1593 | */ |
1597 | call_qop(q, wait_prepare, q); | 1594 | call_qop(q, wait_prepare, q); |
1598 | 1595 | ||
1599 | /* | 1596 | /* |
1600 | * All locks have been released, it is safe to sleep now. | 1597 | * All locks have been released, it is safe to sleep now. |
1601 | */ | 1598 | */ |
1602 | dprintk(3, "Will sleep waiting for buffers\n"); | 1599 | dprintk(3, "Will sleep waiting for buffers\n"); |
1603 | ret = wait_event_interruptible(q->done_wq, | 1600 | ret = wait_event_interruptible(q->done_wq, |
1604 | !list_empty(&q->done_list) || !q->streaming); | 1601 | !list_empty(&q->done_list) || !q->streaming); |
1605 | 1602 | ||
1606 | /* | 1603 | /* |
1607 | * We need to reevaluate both conditions again after reacquiring | 1604 | * We need to reevaluate both conditions again after reacquiring |
1608 | * the locks or return an error if one occurred. | 1605 | * the locks or return an error if one occurred. |
1609 | */ | 1606 | */ |
1610 | call_qop(q, wait_finish, q); | 1607 | call_qop(q, wait_finish, q); |
1611 | if (ret) { | 1608 | if (ret) { |
1612 | dprintk(1, "Sleep was interrupted\n"); | 1609 | dprintk(1, "Sleep was interrupted\n"); |
1613 | return ret; | 1610 | return ret; |
1614 | } | 1611 | } |
1615 | } | 1612 | } |
1616 | return 0; | 1613 | return 0; |
1617 | } | 1614 | } |
1618 | 1615 | ||
1619 | /** | 1616 | /** |
1620 | * __vb2_get_done_vb() - get a buffer ready for dequeuing | 1617 | * __vb2_get_done_vb() - get a buffer ready for dequeuing |
1621 | * | 1618 | * |
1622 | * Will sleep if required for nonblocking == false. | 1619 | * Will sleep if required for nonblocking == false. |
1623 | */ | 1620 | */ |
1624 | static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb, | 1621 | static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb, |
1625 | struct v4l2_buffer *b, int nonblocking) | 1622 | struct v4l2_buffer *b, int nonblocking) |
1626 | { | 1623 | { |
1627 | unsigned long flags; | 1624 | unsigned long flags; |
1628 | int ret; | 1625 | int ret; |
1629 | 1626 | ||
1630 | /* | 1627 | /* |
1631 | * Wait for at least one buffer to become available on the done_list. | 1628 | * Wait for at least one buffer to become available on the done_list. |
1632 | */ | 1629 | */ |
1633 | ret = __vb2_wait_for_done_vb(q, nonblocking); | 1630 | ret = __vb2_wait_for_done_vb(q, nonblocking); |
1634 | if (ret) | 1631 | if (ret) |
1635 | return ret; | 1632 | return ret; |
1636 | 1633 | ||
1637 | /* | 1634 | /* |
1638 | * Driver's lock has been held since we last verified that done_list | 1635 | * Driver's lock has been held since we last verified that done_list |
1639 | * is not empty, so no need for another list_empty(done_list) check. | 1636 | * is not empty, so no need for another list_empty(done_list) check. |
1640 | */ | 1637 | */ |
1641 | spin_lock_irqsave(&q->done_lock, flags); | 1638 | spin_lock_irqsave(&q->done_lock, flags); |
1642 | *vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry); | 1639 | *vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry); |
1643 | /* | 1640 | /* |
1644 | * Only remove the buffer from done_list if v4l2_buffer can handle all | 1641 | * Only remove the buffer from done_list if v4l2_buffer can handle all |
1645 | * the planes. | 1642 | * the planes. |
1646 | */ | 1643 | */ |
1647 | ret = __verify_planes_array(*vb, b); | 1644 | ret = __verify_planes_array(*vb, b); |
1648 | if (!ret) | 1645 | if (!ret) |
1649 | list_del(&(*vb)->done_entry); | 1646 | list_del(&(*vb)->done_entry); |
1650 | spin_unlock_irqrestore(&q->done_lock, flags); | 1647 | spin_unlock_irqrestore(&q->done_lock, flags); |
1651 | 1648 | ||
1652 | return ret; | 1649 | return ret; |
1653 | } | 1650 | } |
1654 | 1651 | ||
1655 | /** | 1652 | /** |
1656 | * vb2_wait_for_all_buffers() - wait until all buffers are given back to vb2 | 1653 | * vb2_wait_for_all_buffers() - wait until all buffers are given back to vb2 |
1657 | * @q: videobuf2 queue | 1654 | * @q: videobuf2 queue |
1658 | * | 1655 | * |
1659 | * This function will wait until all buffers that have been given to the driver | 1656 | * This function will wait until all buffers that have been given to the driver |
1660 | * by buf_queue() are given back to vb2 with vb2_buffer_done(). It doesn't call | 1657 | * by buf_queue() are given back to vb2 with vb2_buffer_done(). It doesn't call |
1661 | * wait_prepare, wait_finish pair. It is intended to be called with all locks | 1658 | * wait_prepare, wait_finish pair. It is intended to be called with all locks |
1662 | * taken, for example from stop_streaming() callback. | 1659 | * taken, for example from stop_streaming() callback. |
1663 | */ | 1660 | */ |
1664 | int vb2_wait_for_all_buffers(struct vb2_queue *q) | 1661 | int vb2_wait_for_all_buffers(struct vb2_queue *q) |
1665 | { | 1662 | { |
1666 | if (!q->streaming) { | 1663 | if (!q->streaming) { |
1667 | dprintk(1, "Streaming off, will not wait for buffers\n"); | 1664 | dprintk(1, "Streaming off, will not wait for buffers\n"); |
1668 | return -EINVAL; | 1665 | return -EINVAL; |
1669 | } | 1666 | } |
1670 | 1667 | ||
1671 | if (!q->retry_start_streaming) | 1668 | if (!q->retry_start_streaming) |
1672 | wait_event(q->done_wq, !atomic_read(&q->queued_count)); | 1669 | wait_event(q->done_wq, !atomic_read(&q->queued_count)); |
1673 | return 0; | 1670 | return 0; |
1674 | } | 1671 | } |
1675 | EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers); | 1672 | EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers); |
1676 | 1673 | ||
1677 | /** | 1674 | /** |
1678 | * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state | 1675 | * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state |
1679 | */ | 1676 | */ |
1680 | static void __vb2_dqbuf(struct vb2_buffer *vb) | 1677 | static void __vb2_dqbuf(struct vb2_buffer *vb) |
1681 | { | 1678 | { |
1682 | struct vb2_queue *q = vb->vb2_queue; | 1679 | struct vb2_queue *q = vb->vb2_queue; |
1683 | unsigned int i; | 1680 | unsigned int i; |
1684 | 1681 | ||
1685 | /* nothing to do if the buffer is already dequeued */ | 1682 | /* nothing to do if the buffer is already dequeued */ |
1686 | if (vb->state == VB2_BUF_STATE_DEQUEUED) | 1683 | if (vb->state == VB2_BUF_STATE_DEQUEUED) |
1687 | return; | 1684 | return; |
1688 | 1685 | ||
1689 | vb->state = VB2_BUF_STATE_DEQUEUED; | 1686 | vb->state = VB2_BUF_STATE_DEQUEUED; |
1690 | 1687 | ||
1691 | /* unmap DMABUF buffer */ | 1688 | /* unmap DMABUF buffer */ |
1692 | if (q->memory == V4L2_MEMORY_DMABUF) | 1689 | if (q->memory == V4L2_MEMORY_DMABUF) |
1693 | for (i = 0; i < vb->num_planes; ++i) { | 1690 | for (i = 0; i < vb->num_planes; ++i) { |
1694 | if (!vb->planes[i].dbuf_mapped) | 1691 | if (!vb->planes[i].dbuf_mapped) |
1695 | continue; | 1692 | continue; |
1696 | call_memop(q, unmap_dmabuf, vb->planes[i].mem_priv); | 1693 | call_memop(q, unmap_dmabuf, vb->planes[i].mem_priv); |
1697 | vb->planes[i].dbuf_mapped = 0; | 1694 | vb->planes[i].dbuf_mapped = 0; |
1698 | } | 1695 | } |
1699 | } | 1696 | } |
1700 | 1697 | ||
1701 | static int vb2_internal_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking) | 1698 | static int vb2_internal_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking) |
1702 | { | 1699 | { |
1703 | struct vb2_buffer *vb = NULL; | 1700 | struct vb2_buffer *vb = NULL; |
1704 | int ret; | 1701 | int ret; |
1705 | 1702 | ||
1706 | if (b->type != q->type) { | 1703 | if (b->type != q->type) { |
1707 | dprintk(1, "dqbuf: invalid buffer type\n"); | 1704 | dprintk(1, "dqbuf: invalid buffer type\n"); |
1708 | return -EINVAL; | 1705 | return -EINVAL; |
1709 | } | 1706 | } |
1710 | ret = __vb2_get_done_vb(q, &vb, b, nonblocking); | 1707 | ret = __vb2_get_done_vb(q, &vb, b, nonblocking); |
1711 | if (ret < 0) | 1708 | if (ret < 0) |
1712 | return ret; | 1709 | return ret; |
1713 | 1710 | ||
1714 | ret = call_qop(q, buf_finish, vb); | 1711 | ret = call_qop(q, buf_finish, vb); |
1715 | if (ret) { | 1712 | if (ret) { |
1716 | dprintk(1, "dqbuf: buffer finish failed\n"); | 1713 | dprintk(1, "dqbuf: buffer finish failed\n"); |
1717 | return ret; | 1714 | return ret; |
1718 | } | 1715 | } |
1719 | 1716 | ||
1720 | switch (vb->state) { | 1717 | switch (vb->state) { |
1721 | case VB2_BUF_STATE_DONE: | 1718 | case VB2_BUF_STATE_DONE: |
1722 | dprintk(3, "dqbuf: Returning done buffer\n"); | 1719 | dprintk(3, "dqbuf: Returning done buffer\n"); |
1723 | break; | 1720 | break; |
1724 | case VB2_BUF_STATE_ERROR: | 1721 | case VB2_BUF_STATE_ERROR: |
1725 | dprintk(3, "dqbuf: Returning done buffer with errors\n"); | 1722 | dprintk(3, "dqbuf: Returning done buffer with errors\n"); |
1726 | break; | 1723 | break; |
1727 | default: | 1724 | default: |
1728 | dprintk(1, "dqbuf: Invalid buffer state\n"); | 1725 | dprintk(1, "dqbuf: Invalid buffer state\n"); |
1729 | return -EINVAL; | 1726 | return -EINVAL; |
1730 | } | 1727 | } |
1731 | 1728 | ||
1732 | /* Fill buffer information for the userspace */ | 1729 | /* Fill buffer information for the userspace */ |
1733 | __fill_v4l2_buffer(vb, b); | 1730 | __fill_v4l2_buffer(vb, b); |
1734 | /* Remove from videobuf queue */ | 1731 | /* Remove from videobuf queue */ |
1735 | list_del(&vb->queued_entry); | 1732 | list_del(&vb->queued_entry); |
1736 | /* go back to dequeued state */ | 1733 | /* go back to dequeued state */ |
1737 | __vb2_dqbuf(vb); | 1734 | __vb2_dqbuf(vb); |
1738 | 1735 | ||
1739 | dprintk(1, "dqbuf of buffer %d, with state %d\n", | 1736 | dprintk(1, "dqbuf of buffer %d, with state %d\n", |
1740 | vb->v4l2_buf.index, vb->state); | 1737 | vb->v4l2_buf.index, vb->state); |
1741 | 1738 | ||
1742 | return 0; | 1739 | return 0; |
1743 | } | 1740 | } |
1744 | 1741 | ||
1745 | /** | 1742 | /** |
1746 | * vb2_dqbuf() - Dequeue a buffer to the userspace | 1743 | * vb2_dqbuf() - Dequeue a buffer to the userspace |
1747 | * @q: videobuf2 queue | 1744 | * @q: videobuf2 queue |
1748 | * @b: buffer structure passed from userspace to vidioc_dqbuf handler | 1745 | * @b: buffer structure passed from userspace to vidioc_dqbuf handler |
1749 | * in driver | 1746 | * in driver |
1750 | * @nonblocking: if true, this call will not sleep waiting for a buffer if no | 1747 | * @nonblocking: if true, this call will not sleep waiting for a buffer if no |
1751 | * buffers ready for dequeuing are present. Normally the driver | 1748 | * buffers ready for dequeuing are present. Normally the driver |
1752 | * would be passing (file->f_flags & O_NONBLOCK) here | 1749 | * would be passing (file->f_flags & O_NONBLOCK) here |
1753 | * | 1750 | * |
1754 | * Should be called from vidioc_dqbuf ioctl handler of a driver. | 1751 | * Should be called from vidioc_dqbuf ioctl handler of a driver. |
1755 | * This function: | 1752 | * This function: |
1756 | * 1) verifies the passed buffer, | 1753 | * 1) verifies the passed buffer, |
1757 | * 2) calls buf_finish callback in the driver (if provided), in which | 1754 | * 2) calls buf_finish callback in the driver (if provided), in which |
1758 | * driver can perform any additional operations that may be required before | 1755 | * driver can perform any additional operations that may be required before |
1759 | * returning the buffer to userspace, such as cache sync, | 1756 | * returning the buffer to userspace, such as cache sync, |
1760 | * 3) the buffer struct members are filled with relevant information for | 1757 | * 3) the buffer struct members are filled with relevant information for |
1761 | * the userspace. | 1758 | * the userspace. |
1762 | * | 1759 | * |
1763 | * The return values from this function are intended to be directly returned | 1760 | * The return values from this function are intended to be directly returned |
1764 | * from vidioc_dqbuf handler in driver. | 1761 | * from vidioc_dqbuf handler in driver. |
1765 | */ | 1762 | */ |
1766 | int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking) | 1763 | int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking) |
1767 | { | 1764 | { |
1768 | if (q->fileio) { | 1765 | if (q->fileio) { |
1769 | dprintk(1, "dqbuf: file io in progress\n"); | 1766 | dprintk(1, "dqbuf: file io in progress\n"); |
1770 | return -EBUSY; | 1767 | return -EBUSY; |
1771 | } | 1768 | } |
1772 | return vb2_internal_dqbuf(q, b, nonblocking); | 1769 | return vb2_internal_dqbuf(q, b, nonblocking); |
1773 | } | 1770 | } |
1774 | EXPORT_SYMBOL_GPL(vb2_dqbuf); | 1771 | EXPORT_SYMBOL_GPL(vb2_dqbuf); |
1775 | 1772 | ||
1776 | /** | 1773 | /** |
1777 | * __vb2_queue_cancel() - cancel and stop (pause) streaming | 1774 | * __vb2_queue_cancel() - cancel and stop (pause) streaming |
1778 | * | 1775 | * |
1779 | * Removes all queued buffers from driver's queue and all buffers queued by | 1776 | * Removes all queued buffers from driver's queue and all buffers queued by |
1780 | * userspace from videobuf's queue. Returns to state after reqbufs. | 1777 | * userspace from videobuf's queue. Returns to state after reqbufs. |
1781 | */ | 1778 | */ |
1782 | static void __vb2_queue_cancel(struct vb2_queue *q) | 1779 | static void __vb2_queue_cancel(struct vb2_queue *q) |
1783 | { | 1780 | { |
1784 | unsigned int i; | 1781 | unsigned int i; |
1785 | 1782 | ||
1786 | if (q->retry_start_streaming) { | 1783 | if (q->retry_start_streaming) { |
1787 | q->retry_start_streaming = 0; | 1784 | q->retry_start_streaming = 0; |
1788 | q->streaming = 0; | 1785 | q->streaming = 0; |
1789 | } | 1786 | } |
1790 | 1787 | ||
1791 | /* | 1788 | /* |
1792 | * Tell driver to stop all transactions and release all queued | 1789 | * Tell driver to stop all transactions and release all queued |
1793 | * buffers. | 1790 | * buffers. |
1794 | */ | 1791 | */ |
1795 | if (q->streaming) | 1792 | if (q->streaming) |
1796 | call_qop(q, stop_streaming, q); | 1793 | call_qop(q, stop_streaming, q); |
1797 | q->streaming = 0; | 1794 | q->streaming = 0; |
1798 | 1795 | ||
1799 | /* | 1796 | /* |
1800 | * Remove all buffers from videobuf's list... | 1797 | * Remove all buffers from videobuf's list... |
1801 | */ | 1798 | */ |
1802 | INIT_LIST_HEAD(&q->queued_list); | 1799 | INIT_LIST_HEAD(&q->queued_list); |
1803 | /* | 1800 | /* |
1804 | * ...and done list; userspace will not receive any buffers it | 1801 | * ...and done list; userspace will not receive any buffers it |
1805 | * has not already dequeued before initiating cancel. | 1802 | * has not already dequeued before initiating cancel. |
1806 | */ | 1803 | */ |
1807 | INIT_LIST_HEAD(&q->done_list); | 1804 | INIT_LIST_HEAD(&q->done_list); |
1808 | atomic_set(&q->queued_count, 0); | 1805 | atomic_set(&q->queued_count, 0); |
1809 | wake_up_all(&q->done_wq); | 1806 | wake_up_all(&q->done_wq); |
1810 | 1807 | ||
1811 | /* | 1808 | /* |
1812 | * Reinitialize all buffers for next use. | 1809 | * Reinitialize all buffers for next use. |
1813 | */ | 1810 | */ |
1814 | for (i = 0; i < q->num_buffers; ++i) | 1811 | for (i = 0; i < q->num_buffers; ++i) |
1815 | __vb2_dqbuf(q->bufs[i]); | 1812 | __vb2_dqbuf(q->bufs[i]); |
1816 | } | 1813 | } |
1817 | 1814 | ||
1818 | static int vb2_internal_streamon(struct vb2_queue *q, enum v4l2_buf_type type) | 1815 | static int vb2_internal_streamon(struct vb2_queue *q, enum v4l2_buf_type type) |
1819 | { | 1816 | { |
1820 | struct vb2_buffer *vb; | 1817 | struct vb2_buffer *vb; |
1821 | int ret; | 1818 | int ret; |
1822 | 1819 | ||
1823 | if (type != q->type) { | 1820 | if (type != q->type) { |
1824 | dprintk(1, "streamon: invalid stream type\n"); | 1821 | dprintk(1, "streamon: invalid stream type\n"); |
1825 | return -EINVAL; | 1822 | return -EINVAL; |
1826 | } | 1823 | } |
1827 | 1824 | ||
1828 | if (q->streaming) { | 1825 | if (q->streaming) { |
1829 | dprintk(3, "streamon successful: already streaming\n"); | 1826 | dprintk(3, "streamon successful: already streaming\n"); |
1830 | return 0; | 1827 | return 0; |
1831 | } | 1828 | } |
1832 | 1829 | ||
1833 | if (!q->num_buffers) { | 1830 | if (!q->num_buffers) { |
1834 | dprintk(1, "streamon: no buffers have been allocated\n"); | 1831 | dprintk(1, "streamon: no buffers have been allocated\n"); |
1835 | return -EINVAL; | 1832 | return -EINVAL; |
1836 | } | 1833 | } |
1837 | 1834 | ||
1838 | /* | 1835 | /* |
1839 | * If any buffers were queued before streamon, | 1836 | * If any buffers were queued before streamon, |
1840 | * we can now pass them to driver for processing. | 1837 | * we can now pass them to driver for processing. |
1841 | */ | 1838 | */ |
1842 | list_for_each_entry(vb, &q->queued_list, queued_entry) | 1839 | list_for_each_entry(vb, &q->queued_list, queued_entry) |
1843 | __enqueue_in_driver(vb); | 1840 | __enqueue_in_driver(vb); |
1844 | 1841 | ||
1845 | /* Tell driver to start streaming. */ | 1842 | /* Tell driver to start streaming. */ |
1846 | ret = vb2_start_streaming(q); | 1843 | ret = vb2_start_streaming(q); |
1847 | if (ret) { | 1844 | if (ret) { |
1848 | __vb2_queue_cancel(q); | 1845 | __vb2_queue_cancel(q); |
1849 | return ret; | 1846 | return ret; |
1850 | } | 1847 | } |
1851 | 1848 | ||
1852 | q->streaming = 1; | 1849 | q->streaming = 1; |
1853 | 1850 | ||
1854 | dprintk(3, "Streamon successful\n"); | 1851 | dprintk(3, "Streamon successful\n"); |
1855 | return 0; | 1852 | return 0; |
1856 | } | 1853 | } |
1857 | 1854 | ||
1858 | /** | 1855 | /** |
1859 | * vb2_streamon - start streaming | 1856 | * vb2_streamon - start streaming |
1860 | * @q: videobuf2 queue | 1857 | * @q: videobuf2 queue |
1861 | * @type: type argument passed from userspace to vidioc_streamon handler | 1858 | * @type: type argument passed from userspace to vidioc_streamon handler |
1862 | * | 1859 | * |
1863 | * Should be called from vidioc_streamon handler of a driver. | 1860 | * Should be called from vidioc_streamon handler of a driver. |
1864 | * This function: | 1861 | * This function: |
1865 | * 1) verifies current state | 1862 | * 1) verifies current state |
1866 | * 2) passes any previously queued buffers to the driver and starts streaming | 1863 | * 2) passes any previously queued buffers to the driver and starts streaming |
1867 | * | 1864 | * |
1868 | * The return values from this function are intended to be directly returned | 1865 | * The return values from this function are intended to be directly returned |
1869 | * from vidioc_streamon handler in the driver. | 1866 | * from vidioc_streamon handler in the driver. |
1870 | */ | 1867 | */ |
1871 | int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type) | 1868 | int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type) |
1872 | { | 1869 | { |
1873 | if (q->fileio) { | 1870 | if (q->fileio) { |
1874 | dprintk(1, "streamon: file io in progress\n"); | 1871 | dprintk(1, "streamon: file io in progress\n"); |
1875 | return -EBUSY; | 1872 | return -EBUSY; |
1876 | } | 1873 | } |
1877 | return vb2_internal_streamon(q, type); | 1874 | return vb2_internal_streamon(q, type); |
1878 | } | 1875 | } |
1879 | EXPORT_SYMBOL_GPL(vb2_streamon); | 1876 | EXPORT_SYMBOL_GPL(vb2_streamon); |
1880 | 1877 | ||
1881 | static int vb2_internal_streamoff(struct vb2_queue *q, enum v4l2_buf_type type) | 1878 | static int vb2_internal_streamoff(struct vb2_queue *q, enum v4l2_buf_type type) |
1882 | { | 1879 | { |
1883 | if (type != q->type) { | 1880 | if (type != q->type) { |
1884 | dprintk(1, "streamoff: invalid stream type\n"); | 1881 | dprintk(1, "streamoff: invalid stream type\n"); |
1885 | return -EINVAL; | 1882 | return -EINVAL; |
1886 | } | 1883 | } |
1887 | 1884 | ||
1888 | if (!q->streaming) { | 1885 | if (!q->streaming) { |
1889 | dprintk(3, "streamoff successful: not streaming\n"); | 1886 | dprintk(3, "streamoff successful: not streaming\n"); |
1890 | return 0; | 1887 | return 0; |
1891 | } | 1888 | } |
1892 | 1889 | ||
1893 | /* | 1890 | /* |
1894 | * Cancel will pause streaming and remove all buffers from the driver | 1891 | * Cancel will pause streaming and remove all buffers from the driver |
1895 | * and videobuf, effectively returning control over them to userspace. | 1892 | * and videobuf, effectively returning control over them to userspace. |
1896 | */ | 1893 | */ |
1897 | __vb2_queue_cancel(q); | 1894 | __vb2_queue_cancel(q); |
1898 | q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type); | 1895 | q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type); |
1899 | 1896 | ||
1900 | dprintk(3, "Streamoff successful\n"); | 1897 | dprintk(3, "Streamoff successful\n"); |
1901 | return 0; | 1898 | return 0; |
1902 | } | 1899 | } |
1903 | 1900 | ||
1904 | /** | 1901 | /** |
1905 | * vb2_streamoff - stop streaming | 1902 | * vb2_streamoff - stop streaming |
1906 | * @q: videobuf2 queue | 1903 | * @q: videobuf2 queue |
1907 | * @type: type argument passed from userspace to vidioc_streamoff handler | 1904 | * @type: type argument passed from userspace to vidioc_streamoff handler |
1908 | * | 1905 | * |
1909 | * Should be called from vidioc_streamoff handler of a driver. | 1906 | * Should be called from vidioc_streamoff handler of a driver. |
1910 | * This function: | 1907 | * This function: |
1911 | * 1) verifies current state, | 1908 | * 1) verifies current state, |
1912 | * 2) stop streaming and dequeues any queued buffers, including those previously | 1909 | * 2) stop streaming and dequeues any queued buffers, including those previously |
1913 | * passed to the driver (after waiting for the driver to finish). | 1910 | * passed to the driver (after waiting for the driver to finish). |
1914 | * | 1911 | * |
1915 | * This call can be used for pausing playback. | 1912 | * This call can be used for pausing playback. |
1916 | * The return values from this function are intended to be directly returned | 1913 | * The return values from this function are intended to be directly returned |
1917 | * from vidioc_streamoff handler in the driver | 1914 | * from vidioc_streamoff handler in the driver |
1918 | */ | 1915 | */ |
1919 | int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type) | 1916 | int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type) |
1920 | { | 1917 | { |
1921 | if (q->fileio) { | 1918 | if (q->fileio) { |
1922 | dprintk(1, "streamoff: file io in progress\n"); | 1919 | dprintk(1, "streamoff: file io in progress\n"); |
1923 | return -EBUSY; | 1920 | return -EBUSY; |
1924 | } | 1921 | } |
1925 | return vb2_internal_streamoff(q, type); | 1922 | return vb2_internal_streamoff(q, type); |
1926 | } | 1923 | } |
1927 | EXPORT_SYMBOL_GPL(vb2_streamoff); | 1924 | EXPORT_SYMBOL_GPL(vb2_streamoff); |
1928 | 1925 | ||
1929 | /** | 1926 | /** |
1930 | * __find_plane_by_offset() - find plane associated with the given offset off | 1927 | * __find_plane_by_offset() - find plane associated with the given offset off |
1931 | */ | 1928 | */ |
1932 | static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off, | 1929 | static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off, |
1933 | unsigned int *_buffer, unsigned int *_plane) | 1930 | unsigned int *_buffer, unsigned int *_plane) |
1934 | { | 1931 | { |
1935 | struct vb2_buffer *vb; | 1932 | struct vb2_buffer *vb; |
1936 | unsigned int buffer, plane; | 1933 | unsigned int buffer, plane; |
1937 | 1934 | ||
1938 | /* | 1935 | /* |
1939 | * Go over all buffers and their planes, comparing the given offset | 1936 | * Go over all buffers and their planes, comparing the given offset |
1940 | * with an offset assigned to each plane. If a match is found, | 1937 | * with an offset assigned to each plane. If a match is found, |
1941 | * return its buffer and plane numbers. | 1938 | * return its buffer and plane numbers. |
1942 | */ | 1939 | */ |
1943 | for (buffer = 0; buffer < q->num_buffers; ++buffer) { | 1940 | for (buffer = 0; buffer < q->num_buffers; ++buffer) { |
1944 | vb = q->bufs[buffer]; | 1941 | vb = q->bufs[buffer]; |
1945 | 1942 | ||
1946 | for (plane = 0; plane < vb->num_planes; ++plane) { | 1943 | for (plane = 0; plane < vb->num_planes; ++plane) { |
1947 | if (vb->v4l2_planes[plane].m.mem_offset == off) { | 1944 | if (vb->v4l2_planes[plane].m.mem_offset == off) { |
1948 | *_buffer = buffer; | 1945 | *_buffer = buffer; |
1949 | *_plane = plane; | 1946 | *_plane = plane; |
1950 | return 0; | 1947 | return 0; |
1951 | } | 1948 | } |
1952 | } | 1949 | } |
1953 | } | 1950 | } |
1954 | 1951 | ||
1955 | return -EINVAL; | 1952 | return -EINVAL; |
1956 | } | 1953 | } |
1957 | 1954 | ||
1958 | /** | 1955 | /** |
1959 | * vb2_expbuf() - Export a buffer as a file descriptor | 1956 | * vb2_expbuf() - Export a buffer as a file descriptor |
1960 | * @q: videobuf2 queue | 1957 | * @q: videobuf2 queue |
1961 | * @eb: export buffer structure passed from userspace to vidioc_expbuf | 1958 | * @eb: export buffer structure passed from userspace to vidioc_expbuf |
1962 | * handler in driver | 1959 | * handler in driver |
1963 | * | 1960 | * |
1964 | * The return values from this function are intended to be directly returned | 1961 | * The return values from this function are intended to be directly returned |
1965 | * from vidioc_expbuf handler in driver. | 1962 | * from vidioc_expbuf handler in driver. |
1966 | */ | 1963 | */ |
1967 | int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb) | 1964 | int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb) |
1968 | { | 1965 | { |
1969 | struct vb2_buffer *vb = NULL; | 1966 | struct vb2_buffer *vb = NULL; |
1970 | struct vb2_plane *vb_plane; | 1967 | struct vb2_plane *vb_plane; |
1971 | int ret; | 1968 | int ret; |
1972 | struct dma_buf *dbuf; | 1969 | struct dma_buf *dbuf; |
1973 | 1970 | ||
1974 | if (q->memory != V4L2_MEMORY_MMAP) { | 1971 | if (q->memory != V4L2_MEMORY_MMAP) { |
1975 | dprintk(1, "Queue is not currently set up for mmap\n"); | 1972 | dprintk(1, "Queue is not currently set up for mmap\n"); |
1976 | return -EINVAL; | 1973 | return -EINVAL; |
1977 | } | 1974 | } |
1978 | 1975 | ||
1979 | if (!q->mem_ops->get_dmabuf) { | 1976 | if (!q->mem_ops->get_dmabuf) { |
1980 | dprintk(1, "Queue does not support DMA buffer exporting\n"); | 1977 | dprintk(1, "Queue does not support DMA buffer exporting\n"); |
1981 | return -EINVAL; | 1978 | return -EINVAL; |
1982 | } | 1979 | } |
1983 | 1980 | ||
1984 | if (eb->flags & ~(O_CLOEXEC | O_ACCMODE)) { | 1981 | if (eb->flags & ~(O_CLOEXEC | O_ACCMODE)) { |
1985 | dprintk(1, "Queue does support only O_CLOEXEC and access mode flags\n"); | 1982 | dprintk(1, "Queue does support only O_CLOEXEC and access mode flags\n"); |
1986 | return -EINVAL; | 1983 | return -EINVAL; |
1987 | } | 1984 | } |
1988 | 1985 | ||
1989 | if (eb->type != q->type) { | 1986 | if (eb->type != q->type) { |
1990 | dprintk(1, "qbuf: invalid buffer type\n"); | 1987 | dprintk(1, "qbuf: invalid buffer type\n"); |
1991 | return -EINVAL; | 1988 | return -EINVAL; |
1992 | } | 1989 | } |
1993 | 1990 | ||
1994 | if (eb->index >= q->num_buffers) { | 1991 | if (eb->index >= q->num_buffers) { |
1995 | dprintk(1, "buffer index out of range\n"); | 1992 | dprintk(1, "buffer index out of range\n"); |
1996 | return -EINVAL; | 1993 | return -EINVAL; |
1997 | } | 1994 | } |
1998 | 1995 | ||
1999 | vb = q->bufs[eb->index]; | 1996 | vb = q->bufs[eb->index]; |
2000 | 1997 | ||
2001 | if (eb->plane >= vb->num_planes) { | 1998 | if (eb->plane >= vb->num_planes) { |
2002 | dprintk(1, "buffer plane out of range\n"); | 1999 | dprintk(1, "buffer plane out of range\n"); |
2003 | return -EINVAL; | 2000 | return -EINVAL; |
2004 | } | 2001 | } |
2005 | 2002 | ||
2006 | vb_plane = &vb->planes[eb->plane]; | 2003 | vb_plane = &vb->planes[eb->plane]; |
2007 | 2004 | ||
2008 | dbuf = call_memop(q, get_dmabuf, vb_plane->mem_priv, eb->flags & O_ACCMODE); | 2005 | dbuf = call_memop(q, get_dmabuf, vb_plane->mem_priv, eb->flags & O_ACCMODE); |
2009 | if (IS_ERR_OR_NULL(dbuf)) { | 2006 | if (IS_ERR_OR_NULL(dbuf)) { |
2010 | dprintk(1, "Failed to export buffer %d, plane %d\n", | 2007 | dprintk(1, "Failed to export buffer %d, plane %d\n", |
2011 | eb->index, eb->plane); | 2008 | eb->index, eb->plane); |
2012 | return -EINVAL; | 2009 | return -EINVAL; |
2013 | } | 2010 | } |
2014 | 2011 | ||
2015 | ret = dma_buf_fd(dbuf, eb->flags & ~O_ACCMODE); | 2012 | ret = dma_buf_fd(dbuf, eb->flags & ~O_ACCMODE); |
2016 | if (ret < 0) { | 2013 | if (ret < 0) { |
2017 | dprintk(3, "buffer %d, plane %d failed to export (%d)\n", | 2014 | dprintk(3, "buffer %d, plane %d failed to export (%d)\n", |
2018 | eb->index, eb->plane, ret); | 2015 | eb->index, eb->plane, ret); |
2019 | dma_buf_put(dbuf); | 2016 | dma_buf_put(dbuf); |
2020 | return ret; | 2017 | return ret; |
2021 | } | 2018 | } |
2022 | 2019 | ||
2023 | dprintk(3, "buffer %d, plane %d exported as %d descriptor\n", | 2020 | dprintk(3, "buffer %d, plane %d exported as %d descriptor\n", |
2024 | eb->index, eb->plane, ret); | 2021 | eb->index, eb->plane, ret); |
2025 | eb->fd = ret; | 2022 | eb->fd = ret; |
2026 | 2023 | ||
2027 | return 0; | 2024 | return 0; |
2028 | } | 2025 | } |
2029 | EXPORT_SYMBOL_GPL(vb2_expbuf); | 2026 | EXPORT_SYMBOL_GPL(vb2_expbuf); |
2030 | 2027 | ||
2031 | /** | 2028 | /** |
2032 | * vb2_mmap() - map video buffers into application address space | 2029 | * vb2_mmap() - map video buffers into application address space |
2033 | * @q: videobuf2 queue | 2030 | * @q: videobuf2 queue |
2034 | * @vma: vma passed to the mmap file operation handler in the driver | 2031 | * @vma: vma passed to the mmap file operation handler in the driver |
2035 | * | 2032 | * |
2036 | * Should be called from mmap file operation handler of a driver. | 2033 | * Should be called from mmap file operation handler of a driver. |
2037 | * This function maps one plane of one of the available video buffers to | 2034 | * This function maps one plane of one of the available video buffers to |
2038 | * userspace. To map whole video memory allocated on reqbufs, this function | 2035 | * userspace. To map whole video memory allocated on reqbufs, this function |
2039 | * has to be called once per each plane per each buffer previously allocated. | 2036 | * has to be called once per each plane per each buffer previously allocated. |
2040 | * | 2037 | * |
2041 | * When the userspace application calls mmap, it passes to it an offset returned | 2038 | * When the userspace application calls mmap, it passes to it an offset returned |
2042 | * to it earlier by the means of vidioc_querybuf handler. That offset acts as | 2039 | * to it earlier by the means of vidioc_querybuf handler. That offset acts as |
2043 | * a "cookie", which is then used to identify the plane to be mapped. | 2040 | * a "cookie", which is then used to identify the plane to be mapped. |
2044 | * This function finds a plane with a matching offset and a mapping is performed | 2041 | * This function finds a plane with a matching offset and a mapping is performed |
2045 | * by the means of a provided memory operation. | 2042 | * by the means of a provided memory operation. |
2046 | * | 2043 | * |
2047 | * The return values from this function are intended to be directly returned | 2044 | * The return values from this function are intended to be directly returned |
2048 | * from the mmap handler in driver. | 2045 | * from the mmap handler in driver. |
2049 | */ | 2046 | */ |
2050 | int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma) | 2047 | int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma) |
2051 | { | 2048 | { |
2052 | unsigned long off = vma->vm_pgoff << PAGE_SHIFT; | 2049 | unsigned long off = vma->vm_pgoff << PAGE_SHIFT; |
2053 | struct vb2_buffer *vb; | 2050 | struct vb2_buffer *vb; |
2054 | unsigned int buffer, plane; | 2051 | unsigned int buffer, plane; |
2055 | int ret; | 2052 | int ret; |
2056 | unsigned long length; | 2053 | unsigned long length; |
2057 | 2054 | ||
2058 | if (q->memory != V4L2_MEMORY_MMAP) { | 2055 | if (q->memory != V4L2_MEMORY_MMAP) { |
2059 | dprintk(1, "Queue is not currently set up for mmap\n"); | 2056 | dprintk(1, "Queue is not currently set up for mmap\n"); |
2060 | return -EINVAL; | 2057 | return -EINVAL; |
2061 | } | 2058 | } |
2062 | 2059 | ||
2063 | /* | 2060 | /* |
2064 | * Check memory area access mode. | 2061 | * Check memory area access mode. |
2065 | */ | 2062 | */ |
2066 | if (!(vma->vm_flags & VM_SHARED)) { | 2063 | if (!(vma->vm_flags & VM_SHARED)) { |
2067 | dprintk(1, "Invalid vma flags, VM_SHARED needed\n"); | 2064 | dprintk(1, "Invalid vma flags, VM_SHARED needed\n"); |
2068 | return -EINVAL; | 2065 | return -EINVAL; |
2069 | } | 2066 | } |
2070 | if (V4L2_TYPE_IS_OUTPUT(q->type)) { | 2067 | if (V4L2_TYPE_IS_OUTPUT(q->type)) { |
2071 | if (!(vma->vm_flags & VM_WRITE)) { | 2068 | if (!(vma->vm_flags & VM_WRITE)) { |
2072 | dprintk(1, "Invalid vma flags, VM_WRITE needed\n"); | 2069 | dprintk(1, "Invalid vma flags, VM_WRITE needed\n"); |
2073 | return -EINVAL; | 2070 | return -EINVAL; |
2074 | } | 2071 | } |
2075 | } else { | 2072 | } else { |
2076 | if (!(vma->vm_flags & VM_READ)) { | 2073 | if (!(vma->vm_flags & VM_READ)) { |
2077 | dprintk(1, "Invalid vma flags, VM_READ needed\n"); | 2074 | dprintk(1, "Invalid vma flags, VM_READ needed\n"); |
2078 | return -EINVAL; | 2075 | return -EINVAL; |
2079 | } | 2076 | } |
2080 | } | 2077 | } |
2081 | 2078 | ||
2082 | /* | 2079 | /* |
2083 | * Find the plane corresponding to the offset passed by userspace. | 2080 | * Find the plane corresponding to the offset passed by userspace. |
2084 | */ | 2081 | */ |
2085 | ret = __find_plane_by_offset(q, off, &buffer, &plane); | 2082 | ret = __find_plane_by_offset(q, off, &buffer, &plane); |
2086 | if (ret) | 2083 | if (ret) |
2087 | return ret; | 2084 | return ret; |
2088 | 2085 | ||
2089 | vb = q->bufs[buffer]; | 2086 | vb = q->bufs[buffer]; |
2090 | 2087 | ||
2091 | /* | 2088 | /* |
2092 | * MMAP requires page_aligned buffers. | 2089 | * MMAP requires page_aligned buffers. |
2093 | * The buffer length was page_aligned at __vb2_buf_mem_alloc(), | 2090 | * The buffer length was page_aligned at __vb2_buf_mem_alloc(), |
2094 | * so, we need to do the same here. | 2091 | * so, we need to do the same here. |
2095 | */ | 2092 | */ |
2096 | length = PAGE_ALIGN(vb->v4l2_planes[plane].length); | 2093 | length = PAGE_ALIGN(vb->v4l2_planes[plane].length); |
2097 | if (length < (vma->vm_end - vma->vm_start)) { | 2094 | if (length < (vma->vm_end - vma->vm_start)) { |
2098 | dprintk(1, | 2095 | dprintk(1, |
2099 | "MMAP invalid, as it would overflow buffer length\n"); | 2096 | "MMAP invalid, as it would overflow buffer length\n"); |
2100 | return -EINVAL; | 2097 | return -EINVAL; |
2101 | } | 2098 | } |
2102 | 2099 | ||
2103 | ret = call_memop(q, mmap, vb->planes[plane].mem_priv, vma); | 2100 | ret = call_memop(q, mmap, vb->planes[plane].mem_priv, vma); |
2104 | if (ret) | 2101 | if (ret) |
2105 | return ret; | 2102 | return ret; |
2106 | 2103 | ||
2107 | dprintk(3, "Buffer %d, plane %d successfully mapped\n", buffer, plane); | 2104 | dprintk(3, "Buffer %d, plane %d successfully mapped\n", buffer, plane); |
2108 | return 0; | 2105 | return 0; |
2109 | } | 2106 | } |
2110 | EXPORT_SYMBOL_GPL(vb2_mmap); | 2107 | EXPORT_SYMBOL_GPL(vb2_mmap); |
2111 | 2108 | ||
2112 | #ifndef CONFIG_MMU | 2109 | #ifndef CONFIG_MMU |
2113 | unsigned long vb2_get_unmapped_area(struct vb2_queue *q, | 2110 | unsigned long vb2_get_unmapped_area(struct vb2_queue *q, |
2114 | unsigned long addr, | 2111 | unsigned long addr, |
2115 | unsigned long len, | 2112 | unsigned long len, |
2116 | unsigned long pgoff, | 2113 | unsigned long pgoff, |
2117 | unsigned long flags) | 2114 | unsigned long flags) |
2118 | { | 2115 | { |
2119 | unsigned long off = pgoff << PAGE_SHIFT; | 2116 | unsigned long off = pgoff << PAGE_SHIFT; |
2120 | struct vb2_buffer *vb; | 2117 | struct vb2_buffer *vb; |
2121 | unsigned int buffer, plane; | 2118 | unsigned int buffer, plane; |
2122 | int ret; | 2119 | int ret; |
2123 | 2120 | ||
2124 | if (q->memory != V4L2_MEMORY_MMAP) { | 2121 | if (q->memory != V4L2_MEMORY_MMAP) { |
2125 | dprintk(1, "Queue is not currently set up for mmap\n"); | 2122 | dprintk(1, "Queue is not currently set up for mmap\n"); |
2126 | return -EINVAL; | 2123 | return -EINVAL; |
2127 | } | 2124 | } |
2128 | 2125 | ||
2129 | /* | 2126 | /* |
2130 | * Find the plane corresponding to the offset passed by userspace. | 2127 | * Find the plane corresponding to the offset passed by userspace. |
2131 | */ | 2128 | */ |
2132 | ret = __find_plane_by_offset(q, off, &buffer, &plane); | 2129 | ret = __find_plane_by_offset(q, off, &buffer, &plane); |
2133 | if (ret) | 2130 | if (ret) |
2134 | return ret; | 2131 | return ret; |
2135 | 2132 | ||
2136 | vb = q->bufs[buffer]; | 2133 | vb = q->bufs[buffer]; |
2137 | 2134 | ||
2138 | return (unsigned long)vb2_plane_vaddr(vb, plane); | 2135 | return (unsigned long)vb2_plane_vaddr(vb, plane); |
2139 | } | 2136 | } |
2140 | EXPORT_SYMBOL_GPL(vb2_get_unmapped_area); | 2137 | EXPORT_SYMBOL_GPL(vb2_get_unmapped_area); |
2141 | #endif | 2138 | #endif |
2142 | 2139 | ||
2143 | static int __vb2_init_fileio(struct vb2_queue *q, int read); | 2140 | static int __vb2_init_fileio(struct vb2_queue *q, int read); |
2144 | static int __vb2_cleanup_fileio(struct vb2_queue *q); | 2141 | static int __vb2_cleanup_fileio(struct vb2_queue *q); |
2145 | 2142 | ||
2146 | /** | 2143 | /** |
2147 | * vb2_poll() - implements poll userspace operation | 2144 | * vb2_poll() - implements poll userspace operation |
2148 | * @q: videobuf2 queue | 2145 | * @q: videobuf2 queue |
2149 | * @file: file argument passed to the poll file operation handler | 2146 | * @file: file argument passed to the poll file operation handler |
2150 | * @wait: wait argument passed to the poll file operation handler | 2147 | * @wait: wait argument passed to the poll file operation handler |
2151 | * | 2148 | * |
2152 | * This function implements poll file operation handler for a driver. | 2149 | * This function implements poll file operation handler for a driver. |
2153 | * For CAPTURE queues, if a buffer is ready to be dequeued, the userspace will | 2150 | * For CAPTURE queues, if a buffer is ready to be dequeued, the userspace will |
2154 | * be informed that the file descriptor of a video device is available for | 2151 | * be informed that the file descriptor of a video device is available for |
2155 | * reading. | 2152 | * reading. |
2156 | * For OUTPUT queues, if a buffer is ready to be dequeued, the file descriptor | 2153 | * For OUTPUT queues, if a buffer is ready to be dequeued, the file descriptor |
2157 | * will be reported as available for writing. | 2154 | * will be reported as available for writing. |
2158 | * | 2155 | * |
2159 | * If the driver uses struct v4l2_fh, then vb2_poll() will also check for any | 2156 | * If the driver uses struct v4l2_fh, then vb2_poll() will also check for any |
2160 | * pending events. | 2157 | * pending events. |
2161 | * | 2158 | * |
2162 | * The return values from this function are intended to be directly returned | 2159 | * The return values from this function are intended to be directly returned |
2163 | * from poll handler in driver. | 2160 | * from poll handler in driver. |
2164 | */ | 2161 | */ |
2165 | unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait) | 2162 | unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait) |
2166 | { | 2163 | { |
2167 | struct video_device *vfd = video_devdata(file); | 2164 | struct video_device *vfd = video_devdata(file); |
2168 | unsigned long req_events = poll_requested_events(wait); | 2165 | unsigned long req_events = poll_requested_events(wait); |
2169 | struct vb2_buffer *vb = NULL; | 2166 | struct vb2_buffer *vb = NULL; |
2170 | unsigned int res = 0; | 2167 | unsigned int res = 0; |
2171 | unsigned long flags; | 2168 | unsigned long flags; |
2172 | 2169 | ||
2173 | if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) { | 2170 | if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) { |
2174 | struct v4l2_fh *fh = file->private_data; | 2171 | struct v4l2_fh *fh = file->private_data; |
2175 | 2172 | ||
2176 | if (v4l2_event_pending(fh)) | 2173 | if (v4l2_event_pending(fh)) |
2177 | res = POLLPRI; | 2174 | res = POLLPRI; |
2178 | else if (req_events & POLLPRI) | 2175 | else if (req_events & POLLPRI) |
2179 | poll_wait(file, &fh->wait, wait); | 2176 | poll_wait(file, &fh->wait, wait); |
2180 | } | 2177 | } |
2181 | 2178 | ||
2182 | if (!V4L2_TYPE_IS_OUTPUT(q->type) && !(req_events & (POLLIN | POLLRDNORM))) | 2179 | if (!V4L2_TYPE_IS_OUTPUT(q->type) && !(req_events & (POLLIN | POLLRDNORM))) |
2183 | return res; | 2180 | return res; |
2184 | if (V4L2_TYPE_IS_OUTPUT(q->type) && !(req_events & (POLLOUT | POLLWRNORM))) | 2181 | if (V4L2_TYPE_IS_OUTPUT(q->type) && !(req_events & (POLLOUT | POLLWRNORM))) |
2185 | return res; | 2182 | return res; |
2186 | 2183 | ||
2187 | /* | 2184 | /* |
2188 | * Start file I/O emulator only if streaming API has not been used yet. | 2185 | * Start file I/O emulator only if streaming API has not been used yet. |
2189 | */ | 2186 | */ |
2190 | if (q->num_buffers == 0 && q->fileio == NULL) { | 2187 | if (q->num_buffers == 0 && q->fileio == NULL) { |
2191 | if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) && | 2188 | if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) && |
2192 | (req_events & (POLLIN | POLLRDNORM))) { | 2189 | (req_events & (POLLIN | POLLRDNORM))) { |
2193 | if (__vb2_init_fileio(q, 1)) | 2190 | if (__vb2_init_fileio(q, 1)) |
2194 | return res | POLLERR; | 2191 | return res | POLLERR; |
2195 | } | 2192 | } |
2196 | if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE) && | 2193 | if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE) && |
2197 | (req_events & (POLLOUT | POLLWRNORM))) { | 2194 | (req_events & (POLLOUT | POLLWRNORM))) { |
2198 | if (__vb2_init_fileio(q, 0)) | 2195 | if (__vb2_init_fileio(q, 0)) |
2199 | return res | POLLERR; | 2196 | return res | POLLERR; |
2200 | /* | 2197 | /* |
2201 | * Write to OUTPUT queue can be done immediately. | 2198 | * Write to OUTPUT queue can be done immediately. |
2202 | */ | 2199 | */ |
2203 | return res | POLLOUT | POLLWRNORM; | 2200 | return res | POLLOUT | POLLWRNORM; |
2204 | } | 2201 | } |
2205 | } | 2202 | } |
2206 | 2203 | ||
2207 | /* | 2204 | /* |
2208 | * There is nothing to wait for if the queue isn't streaming. | 2205 | * There is nothing to wait for if the queue isn't streaming. |
2209 | */ | 2206 | */ |
2210 | if (!vb2_is_streaming(q)) | 2207 | if (!vb2_is_streaming(q)) |
2211 | return res | POLLERR; | 2208 | return res | POLLERR; |
2212 | /* | 2209 | /* |
2213 | * For compatibility with vb1: if QBUF hasn't been called yet, then | 2210 | * For compatibility with vb1: if QBUF hasn't been called yet, then |
2214 | * return POLLERR as well. This only affects capture queues, output | 2211 | * return POLLERR as well. This only affects capture queues, output |
2215 | * queues will always initialize waiting_for_buffers to false. | 2212 | * queues will always initialize waiting_for_buffers to false. |
2216 | */ | 2213 | */ |
2217 | if (q->waiting_for_buffers) | 2214 | if (q->waiting_for_buffers) |
2218 | return res | POLLERR; | 2215 | return res | POLLERR; |
2219 | 2216 | ||
2220 | if (list_empty(&q->done_list)) | 2217 | if (list_empty(&q->done_list)) |
2221 | poll_wait(file, &q->done_wq, wait); | 2218 | poll_wait(file, &q->done_wq, wait); |
2222 | 2219 | ||
2223 | /* | 2220 | /* |
2224 | * Take first buffer available for dequeuing. | 2221 | * Take first buffer available for dequeuing. |
2225 | */ | 2222 | */ |
2226 | spin_lock_irqsave(&q->done_lock, flags); | 2223 | spin_lock_irqsave(&q->done_lock, flags); |
2227 | if (!list_empty(&q->done_list)) | 2224 | if (!list_empty(&q->done_list)) |
2228 | vb = list_first_entry(&q->done_list, struct vb2_buffer, | 2225 | vb = list_first_entry(&q->done_list, struct vb2_buffer, |
2229 | done_entry); | 2226 | done_entry); |
2230 | spin_unlock_irqrestore(&q->done_lock, flags); | 2227 | spin_unlock_irqrestore(&q->done_lock, flags); |
2231 | 2228 | ||
2232 | if (vb && (vb->state == VB2_BUF_STATE_DONE | 2229 | if (vb && (vb->state == VB2_BUF_STATE_DONE |
2233 | || vb->state == VB2_BUF_STATE_ERROR)) { | 2230 | || vb->state == VB2_BUF_STATE_ERROR)) { |
2234 | return (V4L2_TYPE_IS_OUTPUT(q->type)) ? | 2231 | return (V4L2_TYPE_IS_OUTPUT(q->type)) ? |
2235 | res | POLLOUT | POLLWRNORM : | 2232 | res | POLLOUT | POLLWRNORM : |
2236 | res | POLLIN | POLLRDNORM; | 2233 | res | POLLIN | POLLRDNORM; |
2237 | } | 2234 | } |
2238 | return res; | 2235 | return res; |
2239 | } | 2236 | } |
2240 | EXPORT_SYMBOL_GPL(vb2_poll); | 2237 | EXPORT_SYMBOL_GPL(vb2_poll); |
2241 | 2238 | ||
2242 | /** | 2239 | /** |
2243 | * vb2_queue_init() - initialize a videobuf2 queue | 2240 | * vb2_queue_init() - initialize a videobuf2 queue |
2244 | * @q: videobuf2 queue; this structure should be allocated in driver | 2241 | * @q: videobuf2 queue; this structure should be allocated in driver |
2245 | * | 2242 | * |
2246 | * The vb2_queue structure should be allocated by the driver. The driver is | 2243 | * The vb2_queue structure should be allocated by the driver. The driver is |
2247 | * responsible of clearing it's content and setting initial values for some | 2244 | * responsible of clearing it's content and setting initial values for some |
2248 | * required entries before calling this function. | 2245 | * required entries before calling this function. |
2249 | * q->ops, q->mem_ops, q->type and q->io_modes are mandatory. Please refer | 2246 | * q->ops, q->mem_ops, q->type and q->io_modes are mandatory. Please refer |
2250 | * to the struct vb2_queue description in include/media/videobuf2-core.h | 2247 | * to the struct vb2_queue description in include/media/videobuf2-core.h |
2251 | * for more information. | 2248 | * for more information. |
2252 | */ | 2249 | */ |
2253 | int vb2_queue_init(struct vb2_queue *q) | 2250 | int vb2_queue_init(struct vb2_queue *q) |
2254 | { | 2251 | { |
2255 | /* | 2252 | /* |
2256 | * Sanity check | 2253 | * Sanity check |
2257 | */ | 2254 | */ |
2258 | if (WARN_ON(!q) || | 2255 | if (WARN_ON(!q) || |
2259 | WARN_ON(!q->ops) || | 2256 | WARN_ON(!q->ops) || |
2260 | WARN_ON(!q->mem_ops) || | 2257 | WARN_ON(!q->mem_ops) || |
2261 | WARN_ON(!q->type) || | 2258 | WARN_ON(!q->type) || |
2262 | WARN_ON(!q->io_modes) || | 2259 | WARN_ON(!q->io_modes) || |
2263 | WARN_ON(!q->ops->queue_setup) || | 2260 | WARN_ON(!q->ops->queue_setup) || |
2264 | WARN_ON(!q->ops->buf_queue) || | 2261 | WARN_ON(!q->ops->buf_queue) || |
2265 | WARN_ON(q->timestamp_flags & | 2262 | WARN_ON(q->timestamp_flags & |
2266 | ~(V4L2_BUF_FLAG_TIMESTAMP_MASK | | 2263 | ~(V4L2_BUF_FLAG_TIMESTAMP_MASK | |
2267 | V4L2_BUF_FLAG_TSTAMP_SRC_MASK))) | 2264 | V4L2_BUF_FLAG_TSTAMP_SRC_MASK))) |
2268 | return -EINVAL; | 2265 | return -EINVAL; |
2269 | 2266 | ||
2270 | /* Warn that the driver should choose an appropriate timestamp type */ | 2267 | /* Warn that the driver should choose an appropriate timestamp type */ |
2271 | WARN_ON((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) == | 2268 | WARN_ON((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) == |
2272 | V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN); | 2269 | V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN); |
2273 | 2270 | ||
2274 | INIT_LIST_HEAD(&q->queued_list); | 2271 | INIT_LIST_HEAD(&q->queued_list); |
2275 | INIT_LIST_HEAD(&q->done_list); | 2272 | INIT_LIST_HEAD(&q->done_list); |
2276 | spin_lock_init(&q->done_lock); | 2273 | spin_lock_init(&q->done_lock); |
2277 | init_waitqueue_head(&q->done_wq); | 2274 | init_waitqueue_head(&q->done_wq); |
2278 | 2275 | ||
2279 | if (q->buf_struct_size == 0) | 2276 | if (q->buf_struct_size == 0) |
2280 | q->buf_struct_size = sizeof(struct vb2_buffer); | 2277 | q->buf_struct_size = sizeof(struct vb2_buffer); |
2281 | 2278 | ||
2282 | return 0; | 2279 | return 0; |
2283 | } | 2280 | } |
2284 | EXPORT_SYMBOL_GPL(vb2_queue_init); | 2281 | EXPORT_SYMBOL_GPL(vb2_queue_init); |
2285 | 2282 | ||
2286 | /** | 2283 | /** |
2287 | * vb2_queue_release() - stop streaming, release the queue and free memory | 2284 | * vb2_queue_release() - stop streaming, release the queue and free memory |
2288 | * @q: videobuf2 queue | 2285 | * @q: videobuf2 queue |
2289 | * | 2286 | * |
2290 | * This function stops streaming and performs necessary clean ups, including | 2287 | * This function stops streaming and performs necessary clean ups, including |
2291 | * freeing video buffer memory. The driver is responsible for freeing | 2288 | * freeing video buffer memory. The driver is responsible for freeing |
2292 | * the vb2_queue structure itself. | 2289 | * the vb2_queue structure itself. |
2293 | */ | 2290 | */ |
2294 | void vb2_queue_release(struct vb2_queue *q) | 2291 | void vb2_queue_release(struct vb2_queue *q) |
2295 | { | 2292 | { |
2296 | __vb2_cleanup_fileio(q); | 2293 | __vb2_cleanup_fileio(q); |
2297 | __vb2_queue_cancel(q); | 2294 | __vb2_queue_cancel(q); |
2298 | __vb2_queue_free(q, q->num_buffers); | 2295 | __vb2_queue_free(q, q->num_buffers); |
2299 | } | 2296 | } |
2300 | EXPORT_SYMBOL_GPL(vb2_queue_release); | 2297 | EXPORT_SYMBOL_GPL(vb2_queue_release); |
2301 | 2298 | ||
2302 | /** | 2299 | /** |
2303 | * struct vb2_fileio_buf - buffer context used by file io emulator | 2300 | * struct vb2_fileio_buf - buffer context used by file io emulator |
2304 | * | 2301 | * |
2305 | * vb2 provides a compatibility layer and emulator of file io (read and | 2302 | * vb2 provides a compatibility layer and emulator of file io (read and |
2306 | * write) calls on top of streaming API. This structure is used for | 2303 | * write) calls on top of streaming API. This structure is used for |
2307 | * tracking context related to the buffers. | 2304 | * tracking context related to the buffers. |
2308 | */ | 2305 | */ |
2309 | struct vb2_fileio_buf { | 2306 | struct vb2_fileio_buf { |
2310 | void *vaddr; | 2307 | void *vaddr; |
2311 | unsigned int size; | 2308 | unsigned int size; |
2312 | unsigned int pos; | 2309 | unsigned int pos; |
2313 | unsigned int queued:1; | 2310 | unsigned int queued:1; |
2314 | }; | 2311 | }; |
2315 | 2312 | ||
2316 | /** | 2313 | /** |
2317 | * struct vb2_fileio_data - queue context used by file io emulator | 2314 | * struct vb2_fileio_data - queue context used by file io emulator |
2318 | * | 2315 | * |
2319 | * vb2 provides a compatibility layer and emulator of file io (read and | 2316 | * vb2 provides a compatibility layer and emulator of file io (read and |
2320 | * write) calls on top of streaming API. For proper operation it required | 2317 | * write) calls on top of streaming API. For proper operation it required |
2321 | * this structure to save the driver state between each call of the read | 2318 | * this structure to save the driver state between each call of the read |
2322 | * or write function. | 2319 | * or write function. |
2323 | */ | 2320 | */ |
2324 | struct vb2_fileio_data { | 2321 | struct vb2_fileio_data { |
2325 | struct v4l2_requestbuffers req; | 2322 | struct v4l2_requestbuffers req; |
2326 | struct v4l2_buffer b; | 2323 | struct v4l2_buffer b; |
2327 | struct vb2_fileio_buf bufs[VIDEO_MAX_FRAME]; | 2324 | struct vb2_fileio_buf bufs[VIDEO_MAX_FRAME]; |
2328 | unsigned int index; | 2325 | unsigned int index; |
2329 | unsigned int q_count; | 2326 | unsigned int q_count; |
2330 | unsigned int dq_count; | 2327 | unsigned int dq_count; |
2331 | unsigned int flags; | 2328 | unsigned int flags; |
2332 | }; | 2329 | }; |
2333 | 2330 | ||
2334 | /** | 2331 | /** |
2335 | * __vb2_init_fileio() - initialize file io emulator | 2332 | * __vb2_init_fileio() - initialize file io emulator |
2336 | * @q: videobuf2 queue | 2333 | * @q: videobuf2 queue |
2337 | * @read: mode selector (1 means read, 0 means write) | 2334 | * @read: mode selector (1 means read, 0 means write) |
2338 | */ | 2335 | */ |
2339 | static int __vb2_init_fileio(struct vb2_queue *q, int read) | 2336 | static int __vb2_init_fileio(struct vb2_queue *q, int read) |
2340 | { | 2337 | { |
2341 | struct vb2_fileio_data *fileio; | 2338 | struct vb2_fileio_data *fileio; |
2342 | int i, ret; | 2339 | int i, ret; |
2343 | unsigned int count = 0; | 2340 | unsigned int count = 0; |
2344 | 2341 | ||
2345 | /* | 2342 | /* |
2346 | * Sanity check | 2343 | * Sanity check |
2347 | */ | 2344 | */ |
2348 | if ((read && !(q->io_modes & VB2_READ)) || | 2345 | if ((read && !(q->io_modes & VB2_READ)) || |
2349 | (!read && !(q->io_modes & VB2_WRITE))) | 2346 | (!read && !(q->io_modes & VB2_WRITE))) |
2350 | BUG(); | 2347 | BUG(); |
2351 | 2348 | ||
2352 | /* | 2349 | /* |
2353 | * Check if device supports mapping buffers to kernel virtual space. | 2350 | * Check if device supports mapping buffers to kernel virtual space. |
2354 | */ | 2351 | */ |
2355 | if (!q->mem_ops->vaddr) | 2352 | if (!q->mem_ops->vaddr) |
2356 | return -EBUSY; | 2353 | return -EBUSY; |
2357 | 2354 | ||
2358 | /* | 2355 | /* |
2359 | * Check if streaming api has not been already activated. | 2356 | * Check if streaming api has not been already activated. |
2360 | */ | 2357 | */ |
2361 | if (q->streaming || q->num_buffers > 0) | 2358 | if (q->streaming || q->num_buffers > 0) |
2362 | return -EBUSY; | 2359 | return -EBUSY; |
2363 | 2360 | ||
2364 | /* | 2361 | /* |
2365 | * Start with count 1, driver can increase it in queue_setup() | 2362 | * Start with count 1, driver can increase it in queue_setup() |
2366 | */ | 2363 | */ |
2367 | count = 1; | 2364 | count = 1; |
2368 | 2365 | ||
2369 | dprintk(3, "setting up file io: mode %s, count %d, flags %08x\n", | 2366 | dprintk(3, "setting up file io: mode %s, count %d, flags %08x\n", |
2370 | (read) ? "read" : "write", count, q->io_flags); | 2367 | (read) ? "read" : "write", count, q->io_flags); |
2371 | 2368 | ||
2372 | fileio = kzalloc(sizeof(struct vb2_fileio_data), GFP_KERNEL); | 2369 | fileio = kzalloc(sizeof(struct vb2_fileio_data), GFP_KERNEL); |
2373 | if (fileio == NULL) | 2370 | if (fileio == NULL) |
2374 | return -ENOMEM; | 2371 | return -ENOMEM; |
2375 | 2372 | ||
2376 | fileio->flags = q->io_flags; | 2373 | fileio->flags = q->io_flags; |
2377 | 2374 | ||
2378 | /* | 2375 | /* |
2379 | * Request buffers and use MMAP type to force driver | 2376 | * Request buffers and use MMAP type to force driver |
2380 | * to allocate buffers by itself. | 2377 | * to allocate buffers by itself. |
2381 | */ | 2378 | */ |
2382 | fileio->req.count = count; | 2379 | fileio->req.count = count; |
2383 | fileio->req.memory = V4L2_MEMORY_MMAP; | 2380 | fileio->req.memory = V4L2_MEMORY_MMAP; |
2384 | fileio->req.type = q->type; | 2381 | fileio->req.type = q->type; |
2385 | ret = vb2_reqbufs(q, &fileio->req); | 2382 | ret = vb2_reqbufs(q, &fileio->req); |
2386 | if (ret) | 2383 | if (ret) |
2387 | goto err_kfree; | 2384 | goto err_kfree; |
2388 | 2385 | ||
2389 | /* | 2386 | /* |
2390 | * Check if plane_count is correct | 2387 | * Check if plane_count is correct |
2391 | * (multiplane buffers are not supported). | 2388 | * (multiplane buffers are not supported). |
2392 | */ | 2389 | */ |
2393 | if (q->bufs[0]->num_planes != 1) { | 2390 | if (q->bufs[0]->num_planes != 1) { |
2394 | ret = -EBUSY; | 2391 | ret = -EBUSY; |
2395 | goto err_reqbufs; | 2392 | goto err_reqbufs; |
2396 | } | 2393 | } |
2397 | 2394 | ||
2398 | /* | 2395 | /* |
2399 | * Get kernel address of each buffer. | 2396 | * Get kernel address of each buffer. |
2400 | */ | 2397 | */ |
2401 | for (i = 0; i < q->num_buffers; i++) { | 2398 | for (i = 0; i < q->num_buffers; i++) { |
2402 | fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0); | 2399 | fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0); |
2403 | if (fileio->bufs[i].vaddr == NULL) { | 2400 | if (fileio->bufs[i].vaddr == NULL) { |
2404 | ret = -EINVAL; | 2401 | ret = -EINVAL; |
2405 | goto err_reqbufs; | 2402 | goto err_reqbufs; |
2406 | } | 2403 | } |
2407 | fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0); | 2404 | fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0); |
2408 | } | 2405 | } |
2409 | 2406 | ||
2410 | /* | 2407 | /* |
2411 | * Read mode requires pre queuing of all buffers. | 2408 | * Read mode requires pre queuing of all buffers. |
2412 | */ | 2409 | */ |
2413 | if (read) { | 2410 | if (read) { |
2414 | /* | 2411 | /* |
2415 | * Queue all buffers. | 2412 | * Queue all buffers. |
2416 | */ | 2413 | */ |
2417 | for (i = 0; i < q->num_buffers; i++) { | 2414 | for (i = 0; i < q->num_buffers; i++) { |
2418 | struct v4l2_buffer *b = &fileio->b; | 2415 | struct v4l2_buffer *b = &fileio->b; |
2419 | memset(b, 0, sizeof(*b)); | 2416 | memset(b, 0, sizeof(*b)); |
2420 | b->type = q->type; | 2417 | b->type = q->type; |
2421 | b->memory = q->memory; | 2418 | b->memory = q->memory; |
2422 | b->index = i; | 2419 | b->index = i; |
2423 | ret = vb2_qbuf(q, b); | 2420 | ret = vb2_qbuf(q, b); |
2424 | if (ret) | 2421 | if (ret) |
2425 | goto err_reqbufs; | 2422 | goto err_reqbufs; |
2426 | fileio->bufs[i].queued = 1; | 2423 | fileio->bufs[i].queued = 1; |
2427 | } | 2424 | } |
2428 | fileio->index = q->num_buffers; | 2425 | fileio->index = q->num_buffers; |
2429 | } | 2426 | } |
2430 | 2427 | ||
2431 | /* | 2428 | /* |
2432 | * Start streaming. | 2429 | * Start streaming. |
2433 | */ | 2430 | */ |
2434 | ret = vb2_streamon(q, q->type); | 2431 | ret = vb2_streamon(q, q->type); |
2435 | if (ret) | 2432 | if (ret) |
2436 | goto err_reqbufs; | 2433 | goto err_reqbufs; |
2437 | 2434 | ||
2438 | q->fileio = fileio; | 2435 | q->fileio = fileio; |
2439 | 2436 | ||
2440 | return ret; | 2437 | return ret; |
2441 | 2438 | ||
2442 | err_reqbufs: | 2439 | err_reqbufs: |
2443 | fileio->req.count = 0; | 2440 | fileio->req.count = 0; |
2444 | vb2_reqbufs(q, &fileio->req); | 2441 | vb2_reqbufs(q, &fileio->req); |
2445 | 2442 | ||
2446 | err_kfree: | 2443 | err_kfree: |
2447 | kfree(fileio); | 2444 | kfree(fileio); |
2448 | return ret; | 2445 | return ret; |
2449 | } | 2446 | } |
2450 | 2447 | ||
2451 | /** | 2448 | /** |
2452 | * __vb2_cleanup_fileio() - free resourced used by file io emulator | 2449 | * __vb2_cleanup_fileio() - free resourced used by file io emulator |
2453 | * @q: videobuf2 queue | 2450 | * @q: videobuf2 queue |
2454 | */ | 2451 | */ |
2455 | static int __vb2_cleanup_fileio(struct vb2_queue *q) | 2452 | static int __vb2_cleanup_fileio(struct vb2_queue *q) |
2456 | { | 2453 | { |
2457 | struct vb2_fileio_data *fileio = q->fileio; | 2454 | struct vb2_fileio_data *fileio = q->fileio; |
2458 | 2455 | ||
2459 | if (fileio) { | 2456 | if (fileio) { |
2460 | vb2_internal_streamoff(q, q->type); | 2457 | vb2_internal_streamoff(q, q->type); |
2461 | q->fileio = NULL; | 2458 | q->fileio = NULL; |
2462 | fileio->req.count = 0; | 2459 | fileio->req.count = 0; |
2463 | vb2_reqbufs(q, &fileio->req); | 2460 | vb2_reqbufs(q, &fileio->req); |
2464 | kfree(fileio); | 2461 | kfree(fileio); |
2465 | dprintk(3, "file io emulator closed\n"); | 2462 | dprintk(3, "file io emulator closed\n"); |
2466 | } | 2463 | } |
2467 | return 0; | 2464 | return 0; |
2468 | } | 2465 | } |
2469 | 2466 | ||
2470 | /** | 2467 | /** |
2471 | * __vb2_perform_fileio() - perform a single file io (read or write) operation | 2468 | * __vb2_perform_fileio() - perform a single file io (read or write) operation |
2472 | * @q: videobuf2 queue | 2469 | * @q: videobuf2 queue |
2473 | * @data: pointed to target userspace buffer | 2470 | * @data: pointed to target userspace buffer |
2474 | * @count: number of bytes to read or write | 2471 | * @count: number of bytes to read or write |
2475 | * @ppos: file handle position tracking pointer | 2472 | * @ppos: file handle position tracking pointer |
2476 | * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking) | 2473 | * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking) |
2477 | * @read: access mode selector (1 means read, 0 means write) | 2474 | * @read: access mode selector (1 means read, 0 means write) |
2478 | */ | 2475 | */ |
2479 | static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count, | 2476 | static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count, |
2480 | loff_t *ppos, int nonblock, int read) | 2477 | loff_t *ppos, int nonblock, int read) |
2481 | { | 2478 | { |
2482 | struct vb2_fileio_data *fileio; | 2479 | struct vb2_fileio_data *fileio; |
2483 | struct vb2_fileio_buf *buf; | 2480 | struct vb2_fileio_buf *buf; |
2484 | int ret, index; | 2481 | int ret, index; |
2485 | 2482 | ||
2486 | dprintk(3, "file io: mode %s, offset %ld, count %zd, %sblocking\n", | 2483 | dprintk(3, "file io: mode %s, offset %ld, count %zd, %sblocking\n", |
2487 | read ? "read" : "write", (long)*ppos, count, | 2484 | read ? "read" : "write", (long)*ppos, count, |
2488 | nonblock ? "non" : ""); | 2485 | nonblock ? "non" : ""); |
2489 | 2486 | ||
2490 | if (!data) | 2487 | if (!data) |
2491 | return -EINVAL; | 2488 | return -EINVAL; |
2492 | 2489 | ||
2493 | /* | 2490 | /* |
2494 | * Initialize emulator on first call. | 2491 | * Initialize emulator on first call. |
2495 | */ | 2492 | */ |
2496 | if (!q->fileio) { | 2493 | if (!q->fileio) { |
2497 | ret = __vb2_init_fileio(q, read); | 2494 | ret = __vb2_init_fileio(q, read); |
2498 | dprintk(3, "file io: vb2_init_fileio result: %d\n", ret); | 2495 | dprintk(3, "file io: vb2_init_fileio result: %d\n", ret); |
2499 | if (ret) | 2496 | if (ret) |
2500 | return ret; | 2497 | return ret; |
2501 | } | 2498 | } |
2502 | fileio = q->fileio; | 2499 | fileio = q->fileio; |
2503 | 2500 | ||
2504 | /* | 2501 | /* |
2505 | * Check if we need to dequeue the buffer. | 2502 | * Check if we need to dequeue the buffer. |
2506 | */ | 2503 | */ |
2507 | index = fileio->index; | 2504 | index = fileio->index; |
2508 | if (index >= q->num_buffers) { | 2505 | if (index >= q->num_buffers) { |
2509 | /* | 2506 | /* |
2510 | * Call vb2_dqbuf to get buffer back. | 2507 | * Call vb2_dqbuf to get buffer back. |
2511 | */ | 2508 | */ |
2512 | memset(&fileio->b, 0, sizeof(fileio->b)); | 2509 | memset(&fileio->b, 0, sizeof(fileio->b)); |
2513 | fileio->b.type = q->type; | 2510 | fileio->b.type = q->type; |
2514 | fileio->b.memory = q->memory; | 2511 | fileio->b.memory = q->memory; |
2515 | ret = vb2_internal_dqbuf(q, &fileio->b, nonblock); | 2512 | ret = vb2_internal_dqbuf(q, &fileio->b, nonblock); |
2516 | dprintk(5, "file io: vb2_dqbuf result: %d\n", ret); | 2513 | dprintk(5, "file io: vb2_dqbuf result: %d\n", ret); |
2517 | if (ret) | 2514 | if (ret) |
2518 | return ret; | 2515 | return ret; |
2519 | fileio->dq_count += 1; | 2516 | fileio->dq_count += 1; |
2520 | 2517 | ||
2521 | index = fileio->b.index; | 2518 | index = fileio->b.index; |
2522 | buf = &fileio->bufs[index]; | 2519 | buf = &fileio->bufs[index]; |
2523 | 2520 | ||
2524 | /* | 2521 | /* |
2525 | * Get number of bytes filled by the driver | 2522 | * Get number of bytes filled by the driver |
2526 | */ | 2523 | */ |
2527 | buf->pos = 0; | 2524 | buf->pos = 0; |
2528 | buf->queued = 0; | 2525 | buf->queued = 0; |
2529 | buf->size = read ? vb2_get_plane_payload(q->bufs[index], 0) | 2526 | buf->size = read ? vb2_get_plane_payload(q->bufs[index], 0) |
2530 | : vb2_plane_size(q->bufs[index], 0); | 2527 | : vb2_plane_size(q->bufs[index], 0); |
2531 | } else { | 2528 | } else { |
2532 | buf = &fileio->bufs[index]; | 2529 | buf = &fileio->bufs[index]; |
2533 | } | 2530 | } |
2534 | 2531 | ||
2535 | /* | 2532 | /* |
2536 | * Limit count on last few bytes of the buffer. | 2533 | * Limit count on last few bytes of the buffer. |
2537 | */ | 2534 | */ |
2538 | if (buf->pos + count > buf->size) { | 2535 | if (buf->pos + count > buf->size) { |
2539 | count = buf->size - buf->pos; | 2536 | count = buf->size - buf->pos; |
2540 | dprintk(5, "reducing read count: %zd\n", count); | 2537 | dprintk(5, "reducing read count: %zd\n", count); |
2541 | } | 2538 | } |
2542 | 2539 | ||
2543 | /* | 2540 | /* |
2544 | * Transfer data to userspace. | 2541 | * Transfer data to userspace. |
2545 | */ | 2542 | */ |
2546 | dprintk(3, "file io: copying %zd bytes - buffer %d, offset %u\n", | 2543 | dprintk(3, "file io: copying %zd bytes - buffer %d, offset %u\n", |
2547 | count, index, buf->pos); | 2544 | count, index, buf->pos); |
2548 | if (read) | 2545 | if (read) |
2549 | ret = copy_to_user(data, buf->vaddr + buf->pos, count); | 2546 | ret = copy_to_user(data, buf->vaddr + buf->pos, count); |
2550 | else | 2547 | else |
2551 | ret = copy_from_user(buf->vaddr + buf->pos, data, count); | 2548 | ret = copy_from_user(buf->vaddr + buf->pos, data, count); |
2552 | if (ret) { | 2549 | if (ret) { |
2553 | dprintk(3, "file io: error copying data\n"); | 2550 | dprintk(3, "file io: error copying data\n"); |
2554 | return -EFAULT; | 2551 | return -EFAULT; |
2555 | } | 2552 | } |
2556 | 2553 | ||
2557 | /* | 2554 | /* |
2558 | * Update counters. | 2555 | * Update counters. |
2559 | */ | 2556 | */ |
2560 | buf->pos += count; | 2557 | buf->pos += count; |
2561 | *ppos += count; | 2558 | *ppos += count; |
2562 | 2559 | ||
2563 | /* | 2560 | /* |
2564 | * Queue next buffer if required. | 2561 | * Queue next buffer if required. |
2565 | */ | 2562 | */ |
2566 | if (buf->pos == buf->size || | 2563 | if (buf->pos == buf->size || |
2567 | (!read && (fileio->flags & VB2_FILEIO_WRITE_IMMEDIATELY))) { | 2564 | (!read && (fileio->flags & VB2_FILEIO_WRITE_IMMEDIATELY))) { |
2568 | /* | 2565 | /* |
2569 | * Check if this is the last buffer to read. | 2566 | * Check if this is the last buffer to read. |
2570 | */ | 2567 | */ |
2571 | if (read && (fileio->flags & VB2_FILEIO_READ_ONCE) && | 2568 | if (read && (fileio->flags & VB2_FILEIO_READ_ONCE) && |
2572 | fileio->dq_count == 1) { | 2569 | fileio->dq_count == 1) { |
2573 | dprintk(3, "file io: read limit reached\n"); | 2570 | dprintk(3, "file io: read limit reached\n"); |
2574 | return __vb2_cleanup_fileio(q); | 2571 | return __vb2_cleanup_fileio(q); |
2575 | } | 2572 | } |
2576 | 2573 | ||
2577 | /* | 2574 | /* |
2578 | * Call vb2_qbuf and give buffer to the driver. | 2575 | * Call vb2_qbuf and give buffer to the driver. |
2579 | */ | 2576 | */ |
2580 | memset(&fileio->b, 0, sizeof(fileio->b)); | 2577 | memset(&fileio->b, 0, sizeof(fileio->b)); |
2581 | fileio->b.type = q->type; | 2578 | fileio->b.type = q->type; |
2582 | fileio->b.memory = q->memory; | 2579 | fileio->b.memory = q->memory; |
2583 | fileio->b.index = index; | 2580 | fileio->b.index = index; |
2584 | fileio->b.bytesused = buf->pos; | 2581 | fileio->b.bytesused = buf->pos; |
2585 | ret = vb2_internal_qbuf(q, &fileio->b); | 2582 | ret = vb2_internal_qbuf(q, &fileio->b); |
2586 | dprintk(5, "file io: vb2_dbuf result: %d\n", ret); | 2583 | dprintk(5, "file io: vb2_dbuf result: %d\n", ret); |
2587 | if (ret) | 2584 | if (ret) |
2588 | return ret; | 2585 | return ret; |
2589 | 2586 | ||
2590 | /* | 2587 | /* |
2591 | * Buffer has been queued, update the status | 2588 | * Buffer has been queued, update the status |
2592 | */ | 2589 | */ |
2593 | buf->pos = 0; | 2590 | buf->pos = 0; |
2594 | buf->queued = 1; | 2591 | buf->queued = 1; |
2595 | buf->size = vb2_plane_size(q->bufs[index], 0); | 2592 | buf->size = vb2_plane_size(q->bufs[index], 0); |
2596 | fileio->q_count += 1; | 2593 | fileio->q_count += 1; |
2597 | if (fileio->index < q->num_buffers) | 2594 | if (fileio->index < q->num_buffers) |
2598 | fileio->index++; | 2595 | fileio->index++; |
2599 | } | 2596 | } |
2600 | 2597 | ||
2601 | /* | 2598 | /* |
2602 | * Return proper number of bytes processed. | 2599 | * Return proper number of bytes processed. |
2603 | */ | 2600 | */ |
2604 | if (ret == 0) | 2601 | if (ret == 0) |
2605 | ret = count; | 2602 | ret = count; |
2606 | return ret; | 2603 | return ret; |
2607 | } | 2604 | } |
2608 | 2605 | ||
2609 | size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count, | 2606 | size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count, |
2610 | loff_t *ppos, int nonblocking) | 2607 | loff_t *ppos, int nonblocking) |
2611 | { | 2608 | { |
2612 | return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1); | 2609 | return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1); |
2613 | } | 2610 | } |
2614 | EXPORT_SYMBOL_GPL(vb2_read); | 2611 | EXPORT_SYMBOL_GPL(vb2_read); |
2615 | 2612 | ||
2616 | size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count, | 2613 | size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count, |
2617 | loff_t *ppos, int nonblocking) | 2614 | loff_t *ppos, int nonblocking) |
2618 | { | 2615 | { |
2619 | return __vb2_perform_fileio(q, (char __user *) data, count, | 2616 | return __vb2_perform_fileio(q, (char __user *) data, count, |
2620 | ppos, nonblocking, 0); | 2617 | ppos, nonblocking, 0); |
2621 | } | 2618 | } |
2622 | EXPORT_SYMBOL_GPL(vb2_write); | 2619 | EXPORT_SYMBOL_GPL(vb2_write); |
2623 | 2620 | ||
2624 | 2621 | ||
2625 | /* | 2622 | /* |
2626 | * The following functions are not part of the vb2 core API, but are helper | 2623 | * The following functions are not part of the vb2 core API, but are helper |
2627 | * functions that plug into struct v4l2_ioctl_ops, struct v4l2_file_operations | 2624 | * functions that plug into struct v4l2_ioctl_ops, struct v4l2_file_operations |
2628 | * and struct vb2_ops. | 2625 | * and struct vb2_ops. |
2629 | * They contain boilerplate code that most if not all drivers have to do | 2626 | * They contain boilerplate code that most if not all drivers have to do |
2630 | * and so they simplify the driver code. | 2627 | * and so they simplify the driver code. |
2631 | */ | 2628 | */ |
2632 | 2629 | ||
2633 | /* The queue is busy if there is a owner and you are not that owner. */ | 2630 | /* The queue is busy if there is a owner and you are not that owner. */ |
2634 | static inline bool vb2_queue_is_busy(struct video_device *vdev, struct file *file) | 2631 | static inline bool vb2_queue_is_busy(struct video_device *vdev, struct file *file) |
2635 | { | 2632 | { |
2636 | return vdev->queue->owner && vdev->queue->owner != file->private_data; | 2633 | return vdev->queue->owner && vdev->queue->owner != file->private_data; |
2637 | } | 2634 | } |
2638 | 2635 | ||
2639 | /* vb2 ioctl helpers */ | 2636 | /* vb2 ioctl helpers */ |
2640 | 2637 | ||
2641 | int vb2_ioctl_reqbufs(struct file *file, void *priv, | 2638 | int vb2_ioctl_reqbufs(struct file *file, void *priv, |
2642 | struct v4l2_requestbuffers *p) | 2639 | struct v4l2_requestbuffers *p) |
2643 | { | 2640 | { |
2644 | struct video_device *vdev = video_devdata(file); | 2641 | struct video_device *vdev = video_devdata(file); |
2645 | int res = __verify_memory_type(vdev->queue, p->memory, p->type); | 2642 | int res = __verify_memory_type(vdev->queue, p->memory, p->type); |
2646 | 2643 | ||
2647 | if (res) | 2644 | if (res) |
2648 | return res; | 2645 | return res; |
2649 | if (vb2_queue_is_busy(vdev, file)) | 2646 | if (vb2_queue_is_busy(vdev, file)) |
2650 | return -EBUSY; | 2647 | return -EBUSY; |
2651 | res = __reqbufs(vdev->queue, p); | 2648 | res = __reqbufs(vdev->queue, p); |
2652 | /* If count == 0, then the owner has released all buffers and he | 2649 | /* If count == 0, then the owner has released all buffers and he |
2653 | is no longer owner of the queue. Otherwise we have a new owner. */ | 2650 | is no longer owner of the queue. Otherwise we have a new owner. */ |
2654 | if (res == 0) | 2651 | if (res == 0) |
2655 | vdev->queue->owner = p->count ? file->private_data : NULL; | 2652 | vdev->queue->owner = p->count ? file->private_data : NULL; |
2656 | return res; | 2653 | return res; |
2657 | } | 2654 | } |
2658 | EXPORT_SYMBOL_GPL(vb2_ioctl_reqbufs); | 2655 | EXPORT_SYMBOL_GPL(vb2_ioctl_reqbufs); |
2659 | 2656 | ||
2660 | int vb2_ioctl_create_bufs(struct file *file, void *priv, | 2657 | int vb2_ioctl_create_bufs(struct file *file, void *priv, |
2661 | struct v4l2_create_buffers *p) | 2658 | struct v4l2_create_buffers *p) |
2662 | { | 2659 | { |
2663 | struct video_device *vdev = video_devdata(file); | 2660 | struct video_device *vdev = video_devdata(file); |
2664 | int res = __verify_memory_type(vdev->queue, p->memory, p->format.type); | 2661 | int res = __verify_memory_type(vdev->queue, p->memory, p->format.type); |
2665 | 2662 | ||
2666 | p->index = vdev->queue->num_buffers; | 2663 | p->index = vdev->queue->num_buffers; |
2667 | /* If count == 0, then just check if memory and type are valid. | 2664 | /* If count == 0, then just check if memory and type are valid. |
2668 | Any -EBUSY result from __verify_memory_type can be mapped to 0. */ | 2665 | Any -EBUSY result from __verify_memory_type can be mapped to 0. */ |
2669 | if (p->count == 0) | 2666 | if (p->count == 0) |
2670 | return res != -EBUSY ? res : 0; | 2667 | return res != -EBUSY ? res : 0; |
2671 | if (res) | 2668 | if (res) |
2672 | return res; | 2669 | return res; |
2673 | if (vb2_queue_is_busy(vdev, file)) | 2670 | if (vb2_queue_is_busy(vdev, file)) |
2674 | return -EBUSY; | 2671 | return -EBUSY; |
2675 | res = __create_bufs(vdev->queue, p); | 2672 | res = __create_bufs(vdev->queue, p); |
2676 | if (res == 0) | 2673 | if (res == 0) |
2677 | vdev->queue->owner = file->private_data; | 2674 | vdev->queue->owner = file->private_data; |
2678 | return res; | 2675 | return res; |
2679 | } | 2676 | } |
2680 | EXPORT_SYMBOL_GPL(vb2_ioctl_create_bufs); | 2677 | EXPORT_SYMBOL_GPL(vb2_ioctl_create_bufs); |
2681 | 2678 | ||
2682 | int vb2_ioctl_prepare_buf(struct file *file, void *priv, | 2679 | int vb2_ioctl_prepare_buf(struct file *file, void *priv, |
2683 | struct v4l2_buffer *p) | 2680 | struct v4l2_buffer *p) |
2684 | { | 2681 | { |
2685 | struct video_device *vdev = video_devdata(file); | 2682 | struct video_device *vdev = video_devdata(file); |
2686 | 2683 | ||
2687 | if (vb2_queue_is_busy(vdev, file)) | 2684 | if (vb2_queue_is_busy(vdev, file)) |
2688 | return -EBUSY; | 2685 | return -EBUSY; |
2689 | return vb2_prepare_buf(vdev->queue, p); | 2686 | return vb2_prepare_buf(vdev->queue, p); |
2690 | } | 2687 | } |
2691 | EXPORT_SYMBOL_GPL(vb2_ioctl_prepare_buf); | 2688 | EXPORT_SYMBOL_GPL(vb2_ioctl_prepare_buf); |
2692 | 2689 | ||
2693 | int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p) | 2690 | int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p) |
2694 | { | 2691 | { |
2695 | struct video_device *vdev = video_devdata(file); | 2692 | struct video_device *vdev = video_devdata(file); |
2696 | 2693 | ||
2697 | /* No need to call vb2_queue_is_busy(), anyone can query buffers. */ | 2694 | /* No need to call vb2_queue_is_busy(), anyone can query buffers. */ |
2698 | return vb2_querybuf(vdev->queue, p); | 2695 | return vb2_querybuf(vdev->queue, p); |
2699 | } | 2696 | } |
2700 | EXPORT_SYMBOL_GPL(vb2_ioctl_querybuf); | 2697 | EXPORT_SYMBOL_GPL(vb2_ioctl_querybuf); |
2701 | 2698 | ||
2702 | int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p) | 2699 | int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p) |
2703 | { | 2700 | { |
2704 | struct video_device *vdev = video_devdata(file); | 2701 | struct video_device *vdev = video_devdata(file); |
2705 | 2702 | ||
2706 | if (vb2_queue_is_busy(vdev, file)) | 2703 | if (vb2_queue_is_busy(vdev, file)) |
2707 | return -EBUSY; | 2704 | return -EBUSY; |
2708 | return vb2_qbuf(vdev->queue, p); | 2705 | return vb2_qbuf(vdev->queue, p); |
2709 | } | 2706 | } |
2710 | EXPORT_SYMBOL_GPL(vb2_ioctl_qbuf); | 2707 | EXPORT_SYMBOL_GPL(vb2_ioctl_qbuf); |
2711 | 2708 | ||
2712 | int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p) | 2709 | int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p) |
2713 | { | 2710 | { |
2714 | struct video_device *vdev = video_devdata(file); | 2711 | struct video_device *vdev = video_devdata(file); |
2715 | 2712 | ||
2716 | if (vb2_queue_is_busy(vdev, file)) | 2713 | if (vb2_queue_is_busy(vdev, file)) |
2717 | return -EBUSY; | 2714 | return -EBUSY; |
2718 | return vb2_dqbuf(vdev->queue, p, file->f_flags & O_NONBLOCK); | 2715 | return vb2_dqbuf(vdev->queue, p, file->f_flags & O_NONBLOCK); |
2719 | } | 2716 | } |
2720 | EXPORT_SYMBOL_GPL(vb2_ioctl_dqbuf); | 2717 | EXPORT_SYMBOL_GPL(vb2_ioctl_dqbuf); |
2721 | 2718 | ||
2722 | int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i) | 2719 | int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i) |
2723 | { | 2720 | { |
2724 | struct video_device *vdev = video_devdata(file); | 2721 | struct video_device *vdev = video_devdata(file); |
2725 | 2722 | ||
2726 | if (vb2_queue_is_busy(vdev, file)) | 2723 | if (vb2_queue_is_busy(vdev, file)) |
2727 | return -EBUSY; | 2724 | return -EBUSY; |
2728 | return vb2_streamon(vdev->queue, i); | 2725 | return vb2_streamon(vdev->queue, i); |
2729 | } | 2726 | } |
2730 | EXPORT_SYMBOL_GPL(vb2_ioctl_streamon); | 2727 | EXPORT_SYMBOL_GPL(vb2_ioctl_streamon); |
2731 | 2728 | ||
2732 | int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i) | 2729 | int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i) |
2733 | { | 2730 | { |
2734 | struct video_device *vdev = video_devdata(file); | 2731 | struct video_device *vdev = video_devdata(file); |
2735 | 2732 | ||
2736 | if (vb2_queue_is_busy(vdev, file)) | 2733 | if (vb2_queue_is_busy(vdev, file)) |
2737 | return -EBUSY; | 2734 | return -EBUSY; |
2738 | return vb2_streamoff(vdev->queue, i); | 2735 | return vb2_streamoff(vdev->queue, i); |
2739 | } | 2736 | } |
2740 | EXPORT_SYMBOL_GPL(vb2_ioctl_streamoff); | 2737 | EXPORT_SYMBOL_GPL(vb2_ioctl_streamoff); |
2741 | 2738 | ||
2742 | int vb2_ioctl_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *p) | 2739 | int vb2_ioctl_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *p) |
2743 | { | 2740 | { |
2744 | struct video_device *vdev = video_devdata(file); | 2741 | struct video_device *vdev = video_devdata(file); |
2745 | 2742 | ||
2746 | if (vb2_queue_is_busy(vdev, file)) | 2743 | if (vb2_queue_is_busy(vdev, file)) |
2747 | return -EBUSY; | 2744 | return -EBUSY; |
2748 | return vb2_expbuf(vdev->queue, p); | 2745 | return vb2_expbuf(vdev->queue, p); |
2749 | } | 2746 | } |
2750 | EXPORT_SYMBOL_GPL(vb2_ioctl_expbuf); | 2747 | EXPORT_SYMBOL_GPL(vb2_ioctl_expbuf); |
2751 | 2748 | ||
2752 | /* v4l2_file_operations helpers */ | 2749 | /* v4l2_file_operations helpers */ |
2753 | 2750 | ||
2754 | int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma) | 2751 | int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma) |
2755 | { | 2752 | { |
2756 | struct video_device *vdev = video_devdata(file); | 2753 | struct video_device *vdev = video_devdata(file); |
2757 | struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock; | 2754 | struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock; |
2758 | int err; | 2755 | int err; |
2759 | 2756 | ||
2760 | if (lock && mutex_lock_interruptible(lock)) | 2757 | if (lock && mutex_lock_interruptible(lock)) |
2761 | return -ERESTARTSYS; | 2758 | return -ERESTARTSYS; |
2762 | err = vb2_mmap(vdev->queue, vma); | 2759 | err = vb2_mmap(vdev->queue, vma); |
2763 | if (lock) | 2760 | if (lock) |
2764 | mutex_unlock(lock); | 2761 | mutex_unlock(lock); |
2765 | return err; | 2762 | return err; |
2766 | } | 2763 | } |
2767 | EXPORT_SYMBOL_GPL(vb2_fop_mmap); | 2764 | EXPORT_SYMBOL_GPL(vb2_fop_mmap); |
2768 | 2765 | ||
2769 | int _vb2_fop_release(struct file *file, struct mutex *lock) | 2766 | int _vb2_fop_release(struct file *file, struct mutex *lock) |
2770 | { | 2767 | { |
2771 | struct video_device *vdev = video_devdata(file); | 2768 | struct video_device *vdev = video_devdata(file); |
2772 | 2769 | ||
2773 | if (file->private_data == vdev->queue->owner) { | 2770 | if (file->private_data == vdev->queue->owner) { |
2774 | if (lock) | 2771 | if (lock) |
2775 | mutex_lock(lock); | 2772 | mutex_lock(lock); |
2776 | vb2_queue_release(vdev->queue); | 2773 | vb2_queue_release(vdev->queue); |
2777 | vdev->queue->owner = NULL; | 2774 | vdev->queue->owner = NULL; |
2778 | if (lock) | 2775 | if (lock) |
2779 | mutex_unlock(lock); | 2776 | mutex_unlock(lock); |
2780 | } | 2777 | } |
2781 | return v4l2_fh_release(file); | 2778 | return v4l2_fh_release(file); |
2782 | } | 2779 | } |
2783 | EXPORT_SYMBOL_GPL(_vb2_fop_release); | 2780 | EXPORT_SYMBOL_GPL(_vb2_fop_release); |
2784 | 2781 | ||
2785 | int vb2_fop_release(struct file *file) | 2782 | int vb2_fop_release(struct file *file) |
2786 | { | 2783 | { |
2787 | struct video_device *vdev = video_devdata(file); | 2784 | struct video_device *vdev = video_devdata(file); |
2788 | struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock; | 2785 | struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock; |
2789 | 2786 | ||
2790 | return _vb2_fop_release(file, lock); | 2787 | return _vb2_fop_release(file, lock); |
2791 | } | 2788 | } |
2792 | EXPORT_SYMBOL_GPL(vb2_fop_release); | 2789 | EXPORT_SYMBOL_GPL(vb2_fop_release); |
2793 | 2790 | ||
2794 | ssize_t vb2_fop_write(struct file *file, const char __user *buf, | 2791 | ssize_t vb2_fop_write(struct file *file, const char __user *buf, |
2795 | size_t count, loff_t *ppos) | 2792 | size_t count, loff_t *ppos) |
2796 | { | 2793 | { |
2797 | struct video_device *vdev = video_devdata(file); | 2794 | struct video_device *vdev = video_devdata(file); |
2798 | struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock; | 2795 | struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock; |
2799 | int err = -EBUSY; | 2796 | int err = -EBUSY; |
2800 | 2797 | ||
2801 | if (lock && mutex_lock_interruptible(lock)) | 2798 | if (lock && mutex_lock_interruptible(lock)) |
2802 | return -ERESTARTSYS; | 2799 | return -ERESTARTSYS; |
2803 | if (vb2_queue_is_busy(vdev, file)) | 2800 | if (vb2_queue_is_busy(vdev, file)) |
2804 | goto exit; | 2801 | goto exit; |
2805 | err = vb2_write(vdev->queue, buf, count, ppos, | 2802 | err = vb2_write(vdev->queue, buf, count, ppos, |
2806 | file->f_flags & O_NONBLOCK); | 2803 | file->f_flags & O_NONBLOCK); |
2807 | if (vdev->queue->fileio) | 2804 | if (vdev->queue->fileio) |
2808 | vdev->queue->owner = file->private_data; | 2805 | vdev->queue->owner = file->private_data; |
2809 | exit: | 2806 | exit: |
2810 | if (lock) | 2807 | if (lock) |
2811 | mutex_unlock(lock); | 2808 | mutex_unlock(lock); |
2812 | return err; | 2809 | return err; |
2813 | } | 2810 | } |
2814 | EXPORT_SYMBOL_GPL(vb2_fop_write); | 2811 | EXPORT_SYMBOL_GPL(vb2_fop_write); |
2815 | 2812 | ||
2816 | ssize_t vb2_fop_read(struct file *file, char __user *buf, | 2813 | ssize_t vb2_fop_read(struct file *file, char __user *buf, |
2817 | size_t count, loff_t *ppos) | 2814 | size_t count, loff_t *ppos) |
2818 | { | 2815 | { |
2819 | struct video_device *vdev = video_devdata(file); | 2816 | struct video_device *vdev = video_devdata(file); |
2820 | struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock; | 2817 | struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock; |
2821 | int err = -EBUSY; | 2818 | int err = -EBUSY; |
2822 | 2819 | ||
2823 | if (lock && mutex_lock_interruptible(lock)) | 2820 | if (lock && mutex_lock_interruptible(lock)) |
2824 | return -ERESTARTSYS; | 2821 | return -ERESTARTSYS; |
2825 | if (vb2_queue_is_busy(vdev, file)) | 2822 | if (vb2_queue_is_busy(vdev, file)) |
2826 | goto exit; | 2823 | goto exit; |
2827 | err = vb2_read(vdev->queue, buf, count, ppos, | 2824 | err = vb2_read(vdev->queue, buf, count, ppos, |
2828 | file->f_flags & O_NONBLOCK); | 2825 | file->f_flags & O_NONBLOCK); |
2829 | if (vdev->queue->fileio) | 2826 | if (vdev->queue->fileio) |
2830 | vdev->queue->owner = file->private_data; | 2827 | vdev->queue->owner = file->private_data; |
2831 | exit: | 2828 | exit: |
2832 | if (lock) | 2829 | if (lock) |
2833 | mutex_unlock(lock); | 2830 | mutex_unlock(lock); |
2834 | return err; | 2831 | return err; |
2835 | } | 2832 | } |
2836 | EXPORT_SYMBOL_GPL(vb2_fop_read); | 2833 | EXPORT_SYMBOL_GPL(vb2_fop_read); |
2837 | 2834 | ||
2838 | unsigned int vb2_fop_poll(struct file *file, poll_table *wait) | 2835 | unsigned int vb2_fop_poll(struct file *file, poll_table *wait) |
2839 | { | 2836 | { |
2840 | struct video_device *vdev = video_devdata(file); | 2837 | struct video_device *vdev = video_devdata(file); |
2841 | struct vb2_queue *q = vdev->queue; | 2838 | struct vb2_queue *q = vdev->queue; |
2842 | struct mutex *lock = q->lock ? q->lock : vdev->lock; | 2839 | struct mutex *lock = q->lock ? q->lock : vdev->lock; |
2843 | unsigned long req_events = poll_requested_events(wait); | 2840 | unsigned long req_events = poll_requested_events(wait); |
2844 | unsigned res; | 2841 | unsigned res; |
2845 | void *fileio; | 2842 | void *fileio; |
2846 | bool must_lock = false; | 2843 | bool must_lock = false; |
2847 | 2844 | ||
2848 | /* Try to be smart: only lock if polling might start fileio, | 2845 | /* Try to be smart: only lock if polling might start fileio, |
2849 | otherwise locking will only introduce unwanted delays. */ | 2846 | otherwise locking will only introduce unwanted delays. */ |
2850 | if (q->num_buffers == 0 && q->fileio == NULL) { | 2847 | if (q->num_buffers == 0 && q->fileio == NULL) { |
2851 | if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) && | 2848 | if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) && |
2852 | (req_events & (POLLIN | POLLRDNORM))) | 2849 | (req_events & (POLLIN | POLLRDNORM))) |
2853 | must_lock = true; | 2850 | must_lock = true; |
2854 | else if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE) && | 2851 | else if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE) && |
2855 | (req_events & (POLLOUT | POLLWRNORM))) | 2852 | (req_events & (POLLOUT | POLLWRNORM))) |
2856 | must_lock = true; | 2853 | must_lock = true; |
2857 | } | 2854 | } |
2858 | 2855 | ||
2859 | /* If locking is needed, but this helper doesn't know how, then you | 2856 | /* If locking is needed, but this helper doesn't know how, then you |
2860 | shouldn't be using this helper but you should write your own. */ | 2857 | shouldn't be using this helper but you should write your own. */ |
2861 | WARN_ON(must_lock && !lock); | 2858 | WARN_ON(must_lock && !lock); |
2862 | 2859 | ||
2863 | if (must_lock && lock && mutex_lock_interruptible(lock)) | 2860 | if (must_lock && lock && mutex_lock_interruptible(lock)) |
2864 | return POLLERR; | 2861 | return POLLERR; |
2865 | 2862 | ||
2866 | fileio = q->fileio; | 2863 | fileio = q->fileio; |
2867 | 2864 | ||
2868 | res = vb2_poll(vdev->queue, file, wait); | 2865 | res = vb2_poll(vdev->queue, file, wait); |
2869 | 2866 | ||
2870 | /* If fileio was started, then we have a new queue owner. */ | 2867 | /* If fileio was started, then we have a new queue owner. */ |
2871 | if (must_lock && !fileio && q->fileio) | 2868 | if (must_lock && !fileio && q->fileio) |
2872 | q->owner = file->private_data; | 2869 | q->owner = file->private_data; |
2873 | if (must_lock && lock) | 2870 | if (must_lock && lock) |
2874 | mutex_unlock(lock); | 2871 | mutex_unlock(lock); |
2875 | return res; | 2872 | return res; |
2876 | } | 2873 | } |
2877 | EXPORT_SYMBOL_GPL(vb2_fop_poll); | 2874 | EXPORT_SYMBOL_GPL(vb2_fop_poll); |
2878 | 2875 | ||
2879 | #ifndef CONFIG_MMU | 2876 | #ifndef CONFIG_MMU |
2880 | unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr, | 2877 | unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr, |
2881 | unsigned long len, unsigned long pgoff, unsigned long flags) | 2878 | unsigned long len, unsigned long pgoff, unsigned long flags) |
2882 | { | 2879 | { |
2883 | struct video_device *vdev = video_devdata(file); | 2880 | struct video_device *vdev = video_devdata(file); |
2884 | struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock; | 2881 | struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock; |
2885 | int ret; | 2882 | int ret; |
2886 | 2883 | ||
2887 | if (lock && mutex_lock_interruptible(lock)) | 2884 | if (lock && mutex_lock_interruptible(lock)) |
2888 | return -ERESTARTSYS; | 2885 | return -ERESTARTSYS; |
2889 | ret = vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags); | 2886 | ret = vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags); |
2890 | if (lock) | 2887 | if (lock) |
2891 | mutex_unlock(lock); | 2888 | mutex_unlock(lock); |
2892 | return ret; | 2889 | return ret; |
2893 | } | 2890 | } |
2894 | EXPORT_SYMBOL_GPL(vb2_fop_get_unmapped_area); | 2891 | EXPORT_SYMBOL_GPL(vb2_fop_get_unmapped_area); |
2895 | #endif | 2892 | #endif |
2896 | 2893 | ||
2897 | /* vb2_ops helpers. Only use if vq->lock is non-NULL. */ | 2894 | /* vb2_ops helpers. Only use if vq->lock is non-NULL. */ |
2898 | 2895 | ||
2899 | void vb2_ops_wait_prepare(struct vb2_queue *vq) | 2896 | void vb2_ops_wait_prepare(struct vb2_queue *vq) |
2900 | { | 2897 | { |
2901 | mutex_unlock(vq->lock); | 2898 | mutex_unlock(vq->lock); |
2902 | } | 2899 | } |
2903 | EXPORT_SYMBOL_GPL(vb2_ops_wait_prepare); | 2900 | EXPORT_SYMBOL_GPL(vb2_ops_wait_prepare); |
2904 | 2901 | ||
2905 | void vb2_ops_wait_finish(struct vb2_queue *vq) | 2902 | void vb2_ops_wait_finish(struct vb2_queue *vq) |
2906 | { | 2903 | { |
2907 | mutex_lock(vq->lock); | 2904 | mutex_lock(vq->lock); |
2908 | } | 2905 | } |
2909 | EXPORT_SYMBOL_GPL(vb2_ops_wait_finish); | 2906 | EXPORT_SYMBOL_GPL(vb2_ops_wait_finish); |
2910 | 2907 | ||
2911 | MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2"); | 2908 | MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2"); |
2912 | MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski"); | 2909 | MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski"); |
2913 | MODULE_LICENSE("GPL"); | 2910 | MODULE_LICENSE("GPL"); |
2914 | 2911 |