Commit 83ae7c5a1b5b5cd4380ff70797e4c5dcfb61a70d
Committed by
Mauro Carvalho Chehab
1 parent
19b6ef5164
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
[media] v4l: vb2: add buffer exporting via dmabuf
This patch adds extension to videobuf2-core. It allow to export an mmap buffer as a DMABUF file descriptor. Signed-off-by: Tomasz Stanislawski <t.stanislaws@samsung.com> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com> Acked-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com> Acked-by: Hans Verkuil <hans.verkuil@cisco.com> Tested-by: Mauro Carvalho Chehab <mchehab@redhat.com> Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
Showing 4 changed files with 103 additions and 0 deletions Inline Diff
drivers/media/v4l2-core/v4l2-mem2mem.c
1 | /* | 1 | /* |
2 | * Memory-to-memory device framework for Video for Linux 2 and videobuf. | 2 | * Memory-to-memory device framework for Video for Linux 2 and videobuf. |
3 | * | 3 | * |
4 | * Helper functions for devices that use videobuf buffers for both their | 4 | * Helper functions for devices that use videobuf buffers for both their |
5 | * source and destination. | 5 | * source and destination. |
6 | * | 6 | * |
7 | * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. | 7 | * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. |
8 | * Pawel Osciak, <pawel@osciak.com> | 8 | * Pawel Osciak, <pawel@osciak.com> |
9 | * Marek Szyprowski, <m.szyprowski@samsung.com> | 9 | * Marek Szyprowski, <m.szyprowski@samsung.com> |
10 | * | 10 | * |
11 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
12 | * it under the terms of the GNU General Public License as published by the | 12 | * it under the terms of the GNU General Public License as published by the |
13 | * Free Software Foundation; either version 2 of the License, or (at your | 13 | * Free Software Foundation; either version 2 of the License, or (at your |
14 | * option) any later version. | 14 | * option) any later version. |
15 | */ | 15 | */ |
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | #include <linux/sched.h> | 17 | #include <linux/sched.h> |
18 | #include <linux/slab.h> | 18 | #include <linux/slab.h> |
19 | 19 | ||
20 | #include <media/videobuf2-core.h> | 20 | #include <media/videobuf2-core.h> |
21 | #include <media/v4l2-mem2mem.h> | 21 | #include <media/v4l2-mem2mem.h> |
22 | #include <media/v4l2-dev.h> | 22 | #include <media/v4l2-dev.h> |
23 | #include <media/v4l2-fh.h> | 23 | #include <media/v4l2-fh.h> |
24 | #include <media/v4l2-event.h> | 24 | #include <media/v4l2-event.h> |
25 | 25 | ||
26 | MODULE_DESCRIPTION("Mem to mem device framework for videobuf"); | 26 | MODULE_DESCRIPTION("Mem to mem device framework for videobuf"); |
27 | MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>"); | 27 | MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>"); |
28 | MODULE_LICENSE("GPL"); | 28 | MODULE_LICENSE("GPL"); |
29 | 29 | ||
30 | static bool debug; | 30 | static bool debug; |
31 | module_param(debug, bool, 0644); | 31 | module_param(debug, bool, 0644); |
32 | 32 | ||
33 | #define dprintk(fmt, arg...) \ | 33 | #define dprintk(fmt, arg...) \ |
34 | do { \ | 34 | do { \ |
35 | if (debug) \ | 35 | if (debug) \ |
36 | printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\ | 36 | printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\ |
37 | } while (0) | 37 | } while (0) |
38 | 38 | ||
39 | 39 | ||
40 | /* Instance is already queued on the job_queue */ | 40 | /* Instance is already queued on the job_queue */ |
41 | #define TRANS_QUEUED (1 << 0) | 41 | #define TRANS_QUEUED (1 << 0) |
42 | /* Instance is currently running in hardware */ | 42 | /* Instance is currently running in hardware */ |
43 | #define TRANS_RUNNING (1 << 1) | 43 | #define TRANS_RUNNING (1 << 1) |
44 | 44 | ||
45 | 45 | ||
46 | /* Offset base for buffers on the destination queue - used to distinguish | 46 | /* Offset base for buffers on the destination queue - used to distinguish |
47 | * between source and destination buffers when mmapping - they receive the same | 47 | * between source and destination buffers when mmapping - they receive the same |
48 | * offsets but for different queues */ | 48 | * offsets but for different queues */ |
49 | #define DST_QUEUE_OFF_BASE (1 << 30) | 49 | #define DST_QUEUE_OFF_BASE (1 << 30) |
50 | 50 | ||
51 | 51 | ||
52 | /** | 52 | /** |
53 | * struct v4l2_m2m_dev - per-device context | 53 | * struct v4l2_m2m_dev - per-device context |
54 | * @curr_ctx: currently running instance | 54 | * @curr_ctx: currently running instance |
55 | * @job_queue: instances queued to run | 55 | * @job_queue: instances queued to run |
56 | * @job_spinlock: protects job_queue | 56 | * @job_spinlock: protects job_queue |
57 | * @m2m_ops: driver callbacks | 57 | * @m2m_ops: driver callbacks |
58 | */ | 58 | */ |
59 | struct v4l2_m2m_dev { | 59 | struct v4l2_m2m_dev { |
60 | struct v4l2_m2m_ctx *curr_ctx; | 60 | struct v4l2_m2m_ctx *curr_ctx; |
61 | 61 | ||
62 | struct list_head job_queue; | 62 | struct list_head job_queue; |
63 | spinlock_t job_spinlock; | 63 | spinlock_t job_spinlock; |
64 | 64 | ||
65 | struct v4l2_m2m_ops *m2m_ops; | 65 | struct v4l2_m2m_ops *m2m_ops; |
66 | }; | 66 | }; |
67 | 67 | ||
68 | static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx, | 68 | static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx, |
69 | enum v4l2_buf_type type) | 69 | enum v4l2_buf_type type) |
70 | { | 70 | { |
71 | if (V4L2_TYPE_IS_OUTPUT(type)) | 71 | if (V4L2_TYPE_IS_OUTPUT(type)) |
72 | return &m2m_ctx->out_q_ctx; | 72 | return &m2m_ctx->out_q_ctx; |
73 | else | 73 | else |
74 | return &m2m_ctx->cap_q_ctx; | 74 | return &m2m_ctx->cap_q_ctx; |
75 | } | 75 | } |
76 | 76 | ||
77 | /** | 77 | /** |
78 | * v4l2_m2m_get_vq() - return vb2_queue for the given type | 78 | * v4l2_m2m_get_vq() - return vb2_queue for the given type |
79 | */ | 79 | */ |
80 | struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx, | 80 | struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx, |
81 | enum v4l2_buf_type type) | 81 | enum v4l2_buf_type type) |
82 | { | 82 | { |
83 | struct v4l2_m2m_queue_ctx *q_ctx; | 83 | struct v4l2_m2m_queue_ctx *q_ctx; |
84 | 84 | ||
85 | q_ctx = get_queue_ctx(m2m_ctx, type); | 85 | q_ctx = get_queue_ctx(m2m_ctx, type); |
86 | if (!q_ctx) | 86 | if (!q_ctx) |
87 | return NULL; | 87 | return NULL; |
88 | 88 | ||
89 | return &q_ctx->q; | 89 | return &q_ctx->q; |
90 | } | 90 | } |
91 | EXPORT_SYMBOL(v4l2_m2m_get_vq); | 91 | EXPORT_SYMBOL(v4l2_m2m_get_vq); |
92 | 92 | ||
93 | /** | 93 | /** |
94 | * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers | 94 | * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers |
95 | */ | 95 | */ |
96 | void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx) | 96 | void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx) |
97 | { | 97 | { |
98 | struct v4l2_m2m_buffer *b = NULL; | 98 | struct v4l2_m2m_buffer *b = NULL; |
99 | unsigned long flags; | 99 | unsigned long flags; |
100 | 100 | ||
101 | spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); | 101 | spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); |
102 | 102 | ||
103 | if (list_empty(&q_ctx->rdy_queue)) { | 103 | if (list_empty(&q_ctx->rdy_queue)) { |
104 | spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); | 104 | spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); |
105 | return NULL; | 105 | return NULL; |
106 | } | 106 | } |
107 | 107 | ||
108 | b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); | 108 | b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); |
109 | spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); | 109 | spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); |
110 | return &b->vb; | 110 | return &b->vb; |
111 | } | 111 | } |
112 | EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf); | 112 | EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf); |
113 | 113 | ||
114 | /** | 114 | /** |
115 | * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and | 115 | * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and |
116 | * return it | 116 | * return it |
117 | */ | 117 | */ |
118 | void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx) | 118 | void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx) |
119 | { | 119 | { |
120 | struct v4l2_m2m_buffer *b = NULL; | 120 | struct v4l2_m2m_buffer *b = NULL; |
121 | unsigned long flags; | 121 | unsigned long flags; |
122 | 122 | ||
123 | spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); | 123 | spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); |
124 | if (list_empty(&q_ctx->rdy_queue)) { | 124 | if (list_empty(&q_ctx->rdy_queue)) { |
125 | spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); | 125 | spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); |
126 | return NULL; | 126 | return NULL; |
127 | } | 127 | } |
128 | b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); | 128 | b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); |
129 | list_del(&b->list); | 129 | list_del(&b->list); |
130 | q_ctx->num_rdy--; | 130 | q_ctx->num_rdy--; |
131 | spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); | 131 | spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); |
132 | 132 | ||
133 | return &b->vb; | 133 | return &b->vb; |
134 | } | 134 | } |
135 | EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove); | 135 | EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove); |
136 | 136 | ||
137 | /* | 137 | /* |
138 | * Scheduling handlers | 138 | * Scheduling handlers |
139 | */ | 139 | */ |
140 | 140 | ||
141 | /** | 141 | /** |
142 | * v4l2_m2m_get_curr_priv() - return driver private data for the currently | 142 | * v4l2_m2m_get_curr_priv() - return driver private data for the currently |
143 | * running instance or NULL if no instance is running | 143 | * running instance or NULL if no instance is running |
144 | */ | 144 | */ |
145 | void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev) | 145 | void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev) |
146 | { | 146 | { |
147 | unsigned long flags; | 147 | unsigned long flags; |
148 | void *ret = NULL; | 148 | void *ret = NULL; |
149 | 149 | ||
150 | spin_lock_irqsave(&m2m_dev->job_spinlock, flags); | 150 | spin_lock_irqsave(&m2m_dev->job_spinlock, flags); |
151 | if (m2m_dev->curr_ctx) | 151 | if (m2m_dev->curr_ctx) |
152 | ret = m2m_dev->curr_ctx->priv; | 152 | ret = m2m_dev->curr_ctx->priv; |
153 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); | 153 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); |
154 | 154 | ||
155 | return ret; | 155 | return ret; |
156 | } | 156 | } |
157 | EXPORT_SYMBOL(v4l2_m2m_get_curr_priv); | 157 | EXPORT_SYMBOL(v4l2_m2m_get_curr_priv); |
158 | 158 | ||
159 | /** | 159 | /** |
160 | * v4l2_m2m_try_run() - select next job to perform and run it if possible | 160 | * v4l2_m2m_try_run() - select next job to perform and run it if possible |
161 | * | 161 | * |
162 | * Get next transaction (if present) from the waiting jobs list and run it. | 162 | * Get next transaction (if present) from the waiting jobs list and run it. |
163 | */ | 163 | */ |
164 | static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev) | 164 | static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev) |
165 | { | 165 | { |
166 | unsigned long flags; | 166 | unsigned long flags; |
167 | 167 | ||
168 | spin_lock_irqsave(&m2m_dev->job_spinlock, flags); | 168 | spin_lock_irqsave(&m2m_dev->job_spinlock, flags); |
169 | if (NULL != m2m_dev->curr_ctx) { | 169 | if (NULL != m2m_dev->curr_ctx) { |
170 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); | 170 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); |
171 | dprintk("Another instance is running, won't run now\n"); | 171 | dprintk("Another instance is running, won't run now\n"); |
172 | return; | 172 | return; |
173 | } | 173 | } |
174 | 174 | ||
175 | if (list_empty(&m2m_dev->job_queue)) { | 175 | if (list_empty(&m2m_dev->job_queue)) { |
176 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); | 176 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); |
177 | dprintk("No job pending\n"); | 177 | dprintk("No job pending\n"); |
178 | return; | 178 | return; |
179 | } | 179 | } |
180 | 180 | ||
181 | m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue, | 181 | m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue, |
182 | struct v4l2_m2m_ctx, queue); | 182 | struct v4l2_m2m_ctx, queue); |
183 | m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING; | 183 | m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING; |
184 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); | 184 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); |
185 | 185 | ||
186 | m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv); | 186 | m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv); |
187 | } | 187 | } |
188 | 188 | ||
189 | /** | 189 | /** |
190 | * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to | 190 | * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to |
191 | * the pending job queue and add it if so. | 191 | * the pending job queue and add it if so. |
192 | * @m2m_ctx: m2m context assigned to the instance to be checked | 192 | * @m2m_ctx: m2m context assigned to the instance to be checked |
193 | * | 193 | * |
194 | * There are three basic requirements an instance has to meet to be able to run: | 194 | * There are three basic requirements an instance has to meet to be able to run: |
195 | * 1) at least one source buffer has to be queued, | 195 | * 1) at least one source buffer has to be queued, |
196 | * 2) at least one destination buffer has to be queued, | 196 | * 2) at least one destination buffer has to be queued, |
197 | * 3) streaming has to be on. | 197 | * 3) streaming has to be on. |
198 | * | 198 | * |
199 | * There may also be additional, custom requirements. In such case the driver | 199 | * There may also be additional, custom requirements. In such case the driver |
200 | * should supply a custom callback (job_ready in v4l2_m2m_ops) that should | 200 | * should supply a custom callback (job_ready in v4l2_m2m_ops) that should |
201 | * return 1 if the instance is ready. | 201 | * return 1 if the instance is ready. |
202 | * An example of the above could be an instance that requires more than one | 202 | * An example of the above could be an instance that requires more than one |
203 | * src/dst buffer per transaction. | 203 | * src/dst buffer per transaction. |
204 | */ | 204 | */ |
205 | static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx) | 205 | static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx) |
206 | { | 206 | { |
207 | struct v4l2_m2m_dev *m2m_dev; | 207 | struct v4l2_m2m_dev *m2m_dev; |
208 | unsigned long flags_job, flags; | 208 | unsigned long flags_job, flags; |
209 | 209 | ||
210 | m2m_dev = m2m_ctx->m2m_dev; | 210 | m2m_dev = m2m_ctx->m2m_dev; |
211 | dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx); | 211 | dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx); |
212 | 212 | ||
213 | if (!m2m_ctx->out_q_ctx.q.streaming | 213 | if (!m2m_ctx->out_q_ctx.q.streaming |
214 | || !m2m_ctx->cap_q_ctx.q.streaming) { | 214 | || !m2m_ctx->cap_q_ctx.q.streaming) { |
215 | dprintk("Streaming needs to be on for both queues\n"); | 215 | dprintk("Streaming needs to be on for both queues\n"); |
216 | return; | 216 | return; |
217 | } | 217 | } |
218 | 218 | ||
219 | spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); | 219 | spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); |
220 | if (m2m_ctx->job_flags & TRANS_QUEUED) { | 220 | if (m2m_ctx->job_flags & TRANS_QUEUED) { |
221 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); | 221 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); |
222 | dprintk("On job queue already\n"); | 222 | dprintk("On job queue already\n"); |
223 | return; | 223 | return; |
224 | } | 224 | } |
225 | 225 | ||
226 | spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); | 226 | spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); |
227 | if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)) { | 227 | if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)) { |
228 | spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); | 228 | spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); |
229 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); | 229 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); |
230 | dprintk("No input buffers available\n"); | 230 | dprintk("No input buffers available\n"); |
231 | return; | 231 | return; |
232 | } | 232 | } |
233 | if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)) { | 233 | if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)) { |
234 | spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); | 234 | spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); |
235 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); | 235 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); |
236 | dprintk("No output buffers available\n"); | 236 | dprintk("No output buffers available\n"); |
237 | return; | 237 | return; |
238 | } | 238 | } |
239 | spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); | 239 | spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); |
240 | 240 | ||
241 | if (m2m_dev->m2m_ops->job_ready | 241 | if (m2m_dev->m2m_ops->job_ready |
242 | && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) { | 242 | && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) { |
243 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); | 243 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); |
244 | dprintk("Driver not ready\n"); | 244 | dprintk("Driver not ready\n"); |
245 | return; | 245 | return; |
246 | } | 246 | } |
247 | 247 | ||
248 | list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue); | 248 | list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue); |
249 | m2m_ctx->job_flags |= TRANS_QUEUED; | 249 | m2m_ctx->job_flags |= TRANS_QUEUED; |
250 | 250 | ||
251 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); | 251 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); |
252 | 252 | ||
253 | v4l2_m2m_try_run(m2m_dev); | 253 | v4l2_m2m_try_run(m2m_dev); |
254 | } | 254 | } |
255 | 255 | ||
256 | /** | 256 | /** |
257 | * v4l2_m2m_job_finish() - inform the framework that a job has been finished | 257 | * v4l2_m2m_job_finish() - inform the framework that a job has been finished |
258 | * and have it clean up | 258 | * and have it clean up |
259 | * | 259 | * |
260 | * Called by a driver to yield back the device after it has finished with it. | 260 | * Called by a driver to yield back the device after it has finished with it. |
261 | * Should be called as soon as possible after reaching a state which allows | 261 | * Should be called as soon as possible after reaching a state which allows |
262 | * other instances to take control of the device. | 262 | * other instances to take control of the device. |
263 | * | 263 | * |
264 | * This function has to be called only after device_run() callback has been | 264 | * This function has to be called only after device_run() callback has been |
265 | * called on the driver. To prevent recursion, it should not be called directly | 265 | * called on the driver. To prevent recursion, it should not be called directly |
266 | * from the device_run() callback though. | 266 | * from the device_run() callback though. |
267 | */ | 267 | */ |
268 | void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, | 268 | void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, |
269 | struct v4l2_m2m_ctx *m2m_ctx) | 269 | struct v4l2_m2m_ctx *m2m_ctx) |
270 | { | 270 | { |
271 | unsigned long flags; | 271 | unsigned long flags; |
272 | 272 | ||
273 | spin_lock_irqsave(&m2m_dev->job_spinlock, flags); | 273 | spin_lock_irqsave(&m2m_dev->job_spinlock, flags); |
274 | if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) { | 274 | if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) { |
275 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); | 275 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); |
276 | dprintk("Called by an instance not currently running\n"); | 276 | dprintk("Called by an instance not currently running\n"); |
277 | return; | 277 | return; |
278 | } | 278 | } |
279 | 279 | ||
280 | list_del(&m2m_dev->curr_ctx->queue); | 280 | list_del(&m2m_dev->curr_ctx->queue); |
281 | m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); | 281 | m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); |
282 | wake_up(&m2m_dev->curr_ctx->finished); | 282 | wake_up(&m2m_dev->curr_ctx->finished); |
283 | m2m_dev->curr_ctx = NULL; | 283 | m2m_dev->curr_ctx = NULL; |
284 | 284 | ||
285 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); | 285 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); |
286 | 286 | ||
287 | /* This instance might have more buffers ready, but since we do not | 287 | /* This instance might have more buffers ready, but since we do not |
288 | * allow more than one job on the job_queue per instance, each has | 288 | * allow more than one job on the job_queue per instance, each has |
289 | * to be scheduled separately after the previous one finishes. */ | 289 | * to be scheduled separately after the previous one finishes. */ |
290 | v4l2_m2m_try_schedule(m2m_ctx); | 290 | v4l2_m2m_try_schedule(m2m_ctx); |
291 | v4l2_m2m_try_run(m2m_dev); | 291 | v4l2_m2m_try_run(m2m_dev); |
292 | } | 292 | } |
293 | EXPORT_SYMBOL(v4l2_m2m_job_finish); | 293 | EXPORT_SYMBOL(v4l2_m2m_job_finish); |
294 | 294 | ||
295 | /** | 295 | /** |
296 | * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer | 296 | * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer |
297 | */ | 297 | */ |
298 | int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 298 | int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
299 | struct v4l2_requestbuffers *reqbufs) | 299 | struct v4l2_requestbuffers *reqbufs) |
300 | { | 300 | { |
301 | struct vb2_queue *vq; | 301 | struct vb2_queue *vq; |
302 | 302 | ||
303 | vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type); | 303 | vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type); |
304 | return vb2_reqbufs(vq, reqbufs); | 304 | return vb2_reqbufs(vq, reqbufs); |
305 | } | 305 | } |
306 | EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs); | 306 | EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs); |
307 | 307 | ||
308 | /** | 308 | /** |
309 | * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer | 309 | * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer |
310 | * | 310 | * |
311 | * See v4l2_m2m_mmap() documentation for details. | 311 | * See v4l2_m2m_mmap() documentation for details. |
312 | */ | 312 | */ |
313 | int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 313 | int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
314 | struct v4l2_buffer *buf) | 314 | struct v4l2_buffer *buf) |
315 | { | 315 | { |
316 | struct vb2_queue *vq; | 316 | struct vb2_queue *vq; |
317 | int ret = 0; | 317 | int ret = 0; |
318 | unsigned int i; | 318 | unsigned int i; |
319 | 319 | ||
320 | vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); | 320 | vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); |
321 | ret = vb2_querybuf(vq, buf); | 321 | ret = vb2_querybuf(vq, buf); |
322 | 322 | ||
323 | /* Adjust MMAP memory offsets for the CAPTURE queue */ | 323 | /* Adjust MMAP memory offsets for the CAPTURE queue */ |
324 | if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) { | 324 | if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) { |
325 | if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) { | 325 | if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) { |
326 | for (i = 0; i < buf->length; ++i) | 326 | for (i = 0; i < buf->length; ++i) |
327 | buf->m.planes[i].m.mem_offset | 327 | buf->m.planes[i].m.mem_offset |
328 | += DST_QUEUE_OFF_BASE; | 328 | += DST_QUEUE_OFF_BASE; |
329 | } else { | 329 | } else { |
330 | buf->m.offset += DST_QUEUE_OFF_BASE; | 330 | buf->m.offset += DST_QUEUE_OFF_BASE; |
331 | } | 331 | } |
332 | } | 332 | } |
333 | 333 | ||
334 | return ret; | 334 | return ret; |
335 | } | 335 | } |
336 | EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf); | 336 | EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf); |
337 | 337 | ||
338 | /** | 338 | /** |
339 | * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on | 339 | * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on |
340 | * the type | 340 | * the type |
341 | */ | 341 | */ |
342 | int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 342 | int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
343 | struct v4l2_buffer *buf) | 343 | struct v4l2_buffer *buf) |
344 | { | 344 | { |
345 | struct vb2_queue *vq; | 345 | struct vb2_queue *vq; |
346 | int ret; | 346 | int ret; |
347 | 347 | ||
348 | vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); | 348 | vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); |
349 | ret = vb2_qbuf(vq, buf); | 349 | ret = vb2_qbuf(vq, buf); |
350 | if (!ret) | 350 | if (!ret) |
351 | v4l2_m2m_try_schedule(m2m_ctx); | 351 | v4l2_m2m_try_schedule(m2m_ctx); |
352 | 352 | ||
353 | return ret; | 353 | return ret; |
354 | } | 354 | } |
355 | EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf); | 355 | EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf); |
356 | 356 | ||
357 | /** | 357 | /** |
358 | * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on | 358 | * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on |
359 | * the type | 359 | * the type |
360 | */ | 360 | */ |
361 | int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 361 | int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
362 | struct v4l2_buffer *buf) | 362 | struct v4l2_buffer *buf) |
363 | { | 363 | { |
364 | struct vb2_queue *vq; | 364 | struct vb2_queue *vq; |
365 | 365 | ||
366 | vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); | 366 | vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); |
367 | return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK); | 367 | return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK); |
368 | } | 368 | } |
369 | EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf); | 369 | EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf); |
370 | 370 | ||
371 | /** | 371 | /** |
372 | * v4l2_m2m_expbuf() - export a source or destination buffer, depending on | ||
373 | * the type | ||
374 | */ | ||
375 | int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | ||
376 | struct v4l2_exportbuffer *eb) | ||
377 | { | ||
378 | struct vb2_queue *vq; | ||
379 | |||
380 | vq = v4l2_m2m_get_vq(m2m_ctx, eb->type); | ||
381 | return vb2_expbuf(vq, eb); | ||
382 | } | ||
383 | EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf); | ||
384 | /** | ||
372 | * v4l2_m2m_streamon() - turn on streaming for a video queue | 385 | * v4l2_m2m_streamon() - turn on streaming for a video queue |
373 | */ | 386 | */ |
374 | int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 387 | int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
375 | enum v4l2_buf_type type) | 388 | enum v4l2_buf_type type) |
376 | { | 389 | { |
377 | struct vb2_queue *vq; | 390 | struct vb2_queue *vq; |
378 | int ret; | 391 | int ret; |
379 | 392 | ||
380 | vq = v4l2_m2m_get_vq(m2m_ctx, type); | 393 | vq = v4l2_m2m_get_vq(m2m_ctx, type); |
381 | ret = vb2_streamon(vq, type); | 394 | ret = vb2_streamon(vq, type); |
382 | if (!ret) | 395 | if (!ret) |
383 | v4l2_m2m_try_schedule(m2m_ctx); | 396 | v4l2_m2m_try_schedule(m2m_ctx); |
384 | 397 | ||
385 | return ret; | 398 | return ret; |
386 | } | 399 | } |
387 | EXPORT_SYMBOL_GPL(v4l2_m2m_streamon); | 400 | EXPORT_SYMBOL_GPL(v4l2_m2m_streamon); |
388 | 401 | ||
389 | /** | 402 | /** |
390 | * v4l2_m2m_streamoff() - turn off streaming for a video queue | 403 | * v4l2_m2m_streamoff() - turn off streaming for a video queue |
391 | */ | 404 | */ |
392 | int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 405 | int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
393 | enum v4l2_buf_type type) | 406 | enum v4l2_buf_type type) |
394 | { | 407 | { |
395 | struct vb2_queue *vq; | 408 | struct vb2_queue *vq; |
396 | 409 | ||
397 | vq = v4l2_m2m_get_vq(m2m_ctx, type); | 410 | vq = v4l2_m2m_get_vq(m2m_ctx, type); |
398 | return vb2_streamoff(vq, type); | 411 | return vb2_streamoff(vq, type); |
399 | } | 412 | } |
400 | EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff); | 413 | EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff); |
401 | 414 | ||
402 | /** | 415 | /** |
403 | * v4l2_m2m_poll() - poll replacement, for destination buffers only | 416 | * v4l2_m2m_poll() - poll replacement, for destination buffers only |
404 | * | 417 | * |
405 | * Call from the driver's poll() function. Will poll both queues. If a buffer | 418 | * Call from the driver's poll() function. Will poll both queues. If a buffer |
406 | * is available to dequeue (with dqbuf) from the source queue, this will | 419 | * is available to dequeue (with dqbuf) from the source queue, this will |
407 | * indicate that a non-blocking write can be performed, while read will be | 420 | * indicate that a non-blocking write can be performed, while read will be |
408 | * returned in case of the destination queue. | 421 | * returned in case of the destination queue. |
409 | */ | 422 | */ |
410 | unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 423 | unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
411 | struct poll_table_struct *wait) | 424 | struct poll_table_struct *wait) |
412 | { | 425 | { |
413 | struct video_device *vfd = video_devdata(file); | 426 | struct video_device *vfd = video_devdata(file); |
414 | unsigned long req_events = poll_requested_events(wait); | 427 | unsigned long req_events = poll_requested_events(wait); |
415 | struct vb2_queue *src_q, *dst_q; | 428 | struct vb2_queue *src_q, *dst_q; |
416 | struct vb2_buffer *src_vb = NULL, *dst_vb = NULL; | 429 | struct vb2_buffer *src_vb = NULL, *dst_vb = NULL; |
417 | unsigned int rc = 0; | 430 | unsigned int rc = 0; |
418 | unsigned long flags; | 431 | unsigned long flags; |
419 | 432 | ||
420 | if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) { | 433 | if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) { |
421 | struct v4l2_fh *fh = file->private_data; | 434 | struct v4l2_fh *fh = file->private_data; |
422 | 435 | ||
423 | if (v4l2_event_pending(fh)) | 436 | if (v4l2_event_pending(fh)) |
424 | rc = POLLPRI; | 437 | rc = POLLPRI; |
425 | else if (req_events & POLLPRI) | 438 | else if (req_events & POLLPRI) |
426 | poll_wait(file, &fh->wait, wait); | 439 | poll_wait(file, &fh->wait, wait); |
427 | if (!(req_events & (POLLOUT | POLLWRNORM | POLLIN | POLLRDNORM))) | 440 | if (!(req_events & (POLLOUT | POLLWRNORM | POLLIN | POLLRDNORM))) |
428 | return rc; | 441 | return rc; |
429 | } | 442 | } |
430 | 443 | ||
431 | src_q = v4l2_m2m_get_src_vq(m2m_ctx); | 444 | src_q = v4l2_m2m_get_src_vq(m2m_ctx); |
432 | dst_q = v4l2_m2m_get_dst_vq(m2m_ctx); | 445 | dst_q = v4l2_m2m_get_dst_vq(m2m_ctx); |
433 | 446 | ||
434 | /* | 447 | /* |
435 | * There has to be at least one buffer queued on each queued_list, which | 448 | * There has to be at least one buffer queued on each queued_list, which |
436 | * means either in driver already or waiting for driver to claim it | 449 | * means either in driver already or waiting for driver to claim it |
437 | * and start processing. | 450 | * and start processing. |
438 | */ | 451 | */ |
439 | if ((!src_q->streaming || list_empty(&src_q->queued_list)) | 452 | if ((!src_q->streaming || list_empty(&src_q->queued_list)) |
440 | && (!dst_q->streaming || list_empty(&dst_q->queued_list))) { | 453 | && (!dst_q->streaming || list_empty(&dst_q->queued_list))) { |
441 | rc |= POLLERR; | 454 | rc |= POLLERR; |
442 | goto end; | 455 | goto end; |
443 | } | 456 | } |
444 | 457 | ||
445 | if (m2m_ctx->m2m_dev->m2m_ops->unlock) | 458 | if (m2m_ctx->m2m_dev->m2m_ops->unlock) |
446 | m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv); | 459 | m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv); |
447 | 460 | ||
448 | poll_wait(file, &src_q->done_wq, wait); | 461 | poll_wait(file, &src_q->done_wq, wait); |
449 | poll_wait(file, &dst_q->done_wq, wait); | 462 | poll_wait(file, &dst_q->done_wq, wait); |
450 | 463 | ||
451 | if (m2m_ctx->m2m_dev->m2m_ops->lock) | 464 | if (m2m_ctx->m2m_dev->m2m_ops->lock) |
452 | m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv); | 465 | m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv); |
453 | 466 | ||
454 | spin_lock_irqsave(&src_q->done_lock, flags); | 467 | spin_lock_irqsave(&src_q->done_lock, flags); |
455 | if (!list_empty(&src_q->done_list)) | 468 | if (!list_empty(&src_q->done_list)) |
456 | src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer, | 469 | src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer, |
457 | done_entry); | 470 | done_entry); |
458 | if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE | 471 | if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE |
459 | || src_vb->state == VB2_BUF_STATE_ERROR)) | 472 | || src_vb->state == VB2_BUF_STATE_ERROR)) |
460 | rc |= POLLOUT | POLLWRNORM; | 473 | rc |= POLLOUT | POLLWRNORM; |
461 | spin_unlock_irqrestore(&src_q->done_lock, flags); | 474 | spin_unlock_irqrestore(&src_q->done_lock, flags); |
462 | 475 | ||
463 | spin_lock_irqsave(&dst_q->done_lock, flags); | 476 | spin_lock_irqsave(&dst_q->done_lock, flags); |
464 | if (!list_empty(&dst_q->done_list)) | 477 | if (!list_empty(&dst_q->done_list)) |
465 | dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer, | 478 | dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer, |
466 | done_entry); | 479 | done_entry); |
467 | if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE | 480 | if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE |
468 | || dst_vb->state == VB2_BUF_STATE_ERROR)) | 481 | || dst_vb->state == VB2_BUF_STATE_ERROR)) |
469 | rc |= POLLIN | POLLRDNORM; | 482 | rc |= POLLIN | POLLRDNORM; |
470 | spin_unlock_irqrestore(&dst_q->done_lock, flags); | 483 | spin_unlock_irqrestore(&dst_q->done_lock, flags); |
471 | 484 | ||
472 | end: | 485 | end: |
473 | return rc; | 486 | return rc; |
474 | } | 487 | } |
475 | EXPORT_SYMBOL_GPL(v4l2_m2m_poll); | 488 | EXPORT_SYMBOL_GPL(v4l2_m2m_poll); |
476 | 489 | ||
477 | /** | 490 | /** |
478 | * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer | 491 | * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer |
479 | * | 492 | * |
480 | * Call from driver's mmap() function. Will handle mmap() for both queues | 493 | * Call from driver's mmap() function. Will handle mmap() for both queues |
481 | * seamlessly for videobuffer, which will receive normal per-queue offsets and | 494 | * seamlessly for videobuffer, which will receive normal per-queue offsets and |
482 | * proper videobuf queue pointers. The differentiation is made outside videobuf | 495 | * proper videobuf queue pointers. The differentiation is made outside videobuf |
483 | * by adding a predefined offset to buffers from one of the queues and | 496 | * by adding a predefined offset to buffers from one of the queues and |
484 | * subtracting it before passing it back to videobuf. Only drivers (and | 497 | * subtracting it before passing it back to videobuf. Only drivers (and |
485 | * thus applications) receive modified offsets. | 498 | * thus applications) receive modified offsets. |
486 | */ | 499 | */ |
487 | int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 500 | int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
488 | struct vm_area_struct *vma) | 501 | struct vm_area_struct *vma) |
489 | { | 502 | { |
490 | unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; | 503 | unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; |
491 | struct vb2_queue *vq; | 504 | struct vb2_queue *vq; |
492 | 505 | ||
493 | if (offset < DST_QUEUE_OFF_BASE) { | 506 | if (offset < DST_QUEUE_OFF_BASE) { |
494 | vq = v4l2_m2m_get_src_vq(m2m_ctx); | 507 | vq = v4l2_m2m_get_src_vq(m2m_ctx); |
495 | } else { | 508 | } else { |
496 | vq = v4l2_m2m_get_dst_vq(m2m_ctx); | 509 | vq = v4l2_m2m_get_dst_vq(m2m_ctx); |
497 | vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT); | 510 | vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT); |
498 | } | 511 | } |
499 | 512 | ||
500 | return vb2_mmap(vq, vma); | 513 | return vb2_mmap(vq, vma); |
501 | } | 514 | } |
502 | EXPORT_SYMBOL(v4l2_m2m_mmap); | 515 | EXPORT_SYMBOL(v4l2_m2m_mmap); |
503 | 516 | ||
504 | /** | 517 | /** |
505 | * v4l2_m2m_init() - initialize per-driver m2m data | 518 | * v4l2_m2m_init() - initialize per-driver m2m data |
506 | * | 519 | * |
507 | * Usually called from driver's probe() function. | 520 | * Usually called from driver's probe() function. |
508 | */ | 521 | */ |
509 | struct v4l2_m2m_dev *v4l2_m2m_init(struct v4l2_m2m_ops *m2m_ops) | 522 | struct v4l2_m2m_dev *v4l2_m2m_init(struct v4l2_m2m_ops *m2m_ops) |
510 | { | 523 | { |
511 | struct v4l2_m2m_dev *m2m_dev; | 524 | struct v4l2_m2m_dev *m2m_dev; |
512 | 525 | ||
513 | if (!m2m_ops || WARN_ON(!m2m_ops->device_run) || | 526 | if (!m2m_ops || WARN_ON(!m2m_ops->device_run) || |
514 | WARN_ON(!m2m_ops->job_abort)) | 527 | WARN_ON(!m2m_ops->job_abort)) |
515 | return ERR_PTR(-EINVAL); | 528 | return ERR_PTR(-EINVAL); |
516 | 529 | ||
517 | m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL); | 530 | m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL); |
518 | if (!m2m_dev) | 531 | if (!m2m_dev) |
519 | return ERR_PTR(-ENOMEM); | 532 | return ERR_PTR(-ENOMEM); |
520 | 533 | ||
521 | m2m_dev->curr_ctx = NULL; | 534 | m2m_dev->curr_ctx = NULL; |
522 | m2m_dev->m2m_ops = m2m_ops; | 535 | m2m_dev->m2m_ops = m2m_ops; |
523 | INIT_LIST_HEAD(&m2m_dev->job_queue); | 536 | INIT_LIST_HEAD(&m2m_dev->job_queue); |
524 | spin_lock_init(&m2m_dev->job_spinlock); | 537 | spin_lock_init(&m2m_dev->job_spinlock); |
525 | 538 | ||
526 | return m2m_dev; | 539 | return m2m_dev; |
527 | } | 540 | } |
528 | EXPORT_SYMBOL_GPL(v4l2_m2m_init); | 541 | EXPORT_SYMBOL_GPL(v4l2_m2m_init); |
529 | 542 | ||
530 | /** | 543 | /** |
531 | * v4l2_m2m_release() - cleans up and frees a m2m_dev structure | 544 | * v4l2_m2m_release() - cleans up and frees a m2m_dev structure |
532 | * | 545 | * |
533 | * Usually called from driver's remove() function. | 546 | * Usually called from driver's remove() function. |
534 | */ | 547 | */ |
535 | void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev) | 548 | void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev) |
536 | { | 549 | { |
537 | kfree(m2m_dev); | 550 | kfree(m2m_dev); |
538 | } | 551 | } |
539 | EXPORT_SYMBOL_GPL(v4l2_m2m_release); | 552 | EXPORT_SYMBOL_GPL(v4l2_m2m_release); |
540 | 553 | ||
541 | /** | 554 | /** |
542 | * v4l2_m2m_ctx_init() - allocate and initialize a m2m context | 555 | * v4l2_m2m_ctx_init() - allocate and initialize a m2m context |
543 | * @priv - driver's instance private data | 556 | * @priv - driver's instance private data |
544 | * @m2m_dev - a previously initialized m2m_dev struct | 557 | * @m2m_dev - a previously initialized m2m_dev struct |
545 | * @vq_init - a callback for queue type-specific initialization function to be | 558 | * @vq_init - a callback for queue type-specific initialization function to be |
546 | * used for initializing videobuf_queues | 559 | * used for initializing videobuf_queues |
547 | * | 560 | * |
548 | * Usually called from driver's open() function. | 561 | * Usually called from driver's open() function. |
549 | */ | 562 | */ |
550 | struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev, | 563 | struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev, |
551 | void *drv_priv, | 564 | void *drv_priv, |
552 | int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)) | 565 | int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)) |
553 | { | 566 | { |
554 | struct v4l2_m2m_ctx *m2m_ctx; | 567 | struct v4l2_m2m_ctx *m2m_ctx; |
555 | struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx; | 568 | struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx; |
556 | int ret; | 569 | int ret; |
557 | 570 | ||
558 | m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL); | 571 | m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL); |
559 | if (!m2m_ctx) | 572 | if (!m2m_ctx) |
560 | return ERR_PTR(-ENOMEM); | 573 | return ERR_PTR(-ENOMEM); |
561 | 574 | ||
562 | m2m_ctx->priv = drv_priv; | 575 | m2m_ctx->priv = drv_priv; |
563 | m2m_ctx->m2m_dev = m2m_dev; | 576 | m2m_ctx->m2m_dev = m2m_dev; |
564 | init_waitqueue_head(&m2m_ctx->finished); | 577 | init_waitqueue_head(&m2m_ctx->finished); |
565 | 578 | ||
566 | out_q_ctx = &m2m_ctx->out_q_ctx; | 579 | out_q_ctx = &m2m_ctx->out_q_ctx; |
567 | cap_q_ctx = &m2m_ctx->cap_q_ctx; | 580 | cap_q_ctx = &m2m_ctx->cap_q_ctx; |
568 | 581 | ||
569 | INIT_LIST_HEAD(&out_q_ctx->rdy_queue); | 582 | INIT_LIST_HEAD(&out_q_ctx->rdy_queue); |
570 | INIT_LIST_HEAD(&cap_q_ctx->rdy_queue); | 583 | INIT_LIST_HEAD(&cap_q_ctx->rdy_queue); |
571 | spin_lock_init(&out_q_ctx->rdy_spinlock); | 584 | spin_lock_init(&out_q_ctx->rdy_spinlock); |
572 | spin_lock_init(&cap_q_ctx->rdy_spinlock); | 585 | spin_lock_init(&cap_q_ctx->rdy_spinlock); |
573 | 586 | ||
574 | INIT_LIST_HEAD(&m2m_ctx->queue); | 587 | INIT_LIST_HEAD(&m2m_ctx->queue); |
575 | 588 | ||
576 | ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q); | 589 | ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q); |
577 | 590 | ||
578 | if (ret) | 591 | if (ret) |
579 | goto err; | 592 | goto err; |
580 | 593 | ||
581 | return m2m_ctx; | 594 | return m2m_ctx; |
582 | err: | 595 | err: |
583 | kfree(m2m_ctx); | 596 | kfree(m2m_ctx); |
584 | return ERR_PTR(ret); | 597 | return ERR_PTR(ret); |
585 | } | 598 | } |
586 | EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init); | 599 | EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init); |
587 | 600 | ||
588 | /** | 601 | /** |
589 | * v4l2_m2m_ctx_release() - release m2m context | 602 | * v4l2_m2m_ctx_release() - release m2m context |
590 | * | 603 | * |
591 | * Usually called from driver's release() function. | 604 | * Usually called from driver's release() function. |
592 | */ | 605 | */ |
593 | void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx) | 606 | void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx) |
594 | { | 607 | { |
595 | struct v4l2_m2m_dev *m2m_dev; | 608 | struct v4l2_m2m_dev *m2m_dev; |
596 | unsigned long flags; | 609 | unsigned long flags; |
597 | 610 | ||
598 | m2m_dev = m2m_ctx->m2m_dev; | 611 | m2m_dev = m2m_ctx->m2m_dev; |
599 | 612 | ||
600 | spin_lock_irqsave(&m2m_dev->job_spinlock, flags); | 613 | spin_lock_irqsave(&m2m_dev->job_spinlock, flags); |
601 | if (m2m_ctx->job_flags & TRANS_RUNNING) { | 614 | if (m2m_ctx->job_flags & TRANS_RUNNING) { |
602 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); | 615 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); |
603 | m2m_dev->m2m_ops->job_abort(m2m_ctx->priv); | 616 | m2m_dev->m2m_ops->job_abort(m2m_ctx->priv); |
604 | dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx); | 617 | dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx); |
605 | wait_event(m2m_ctx->finished, !(m2m_ctx->job_flags & TRANS_RUNNING)); | 618 | wait_event(m2m_ctx->finished, !(m2m_ctx->job_flags & TRANS_RUNNING)); |
606 | } else if (m2m_ctx->job_flags & TRANS_QUEUED) { | 619 | } else if (m2m_ctx->job_flags & TRANS_QUEUED) { |
607 | list_del(&m2m_ctx->queue); | 620 | list_del(&m2m_ctx->queue); |
608 | m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); | 621 | m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); |
609 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); | 622 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); |
610 | dprintk("m2m_ctx: %p had been on queue and was removed\n", | 623 | dprintk("m2m_ctx: %p had been on queue and was removed\n", |
611 | m2m_ctx); | 624 | m2m_ctx); |
612 | } else { | 625 | } else { |
613 | /* Do nothing, was not on queue/running */ | 626 | /* Do nothing, was not on queue/running */ |
614 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); | 627 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); |
615 | } | 628 | } |
616 | 629 | ||
617 | vb2_queue_release(&m2m_ctx->cap_q_ctx.q); | 630 | vb2_queue_release(&m2m_ctx->cap_q_ctx.q); |
618 | vb2_queue_release(&m2m_ctx->out_q_ctx.q); | 631 | vb2_queue_release(&m2m_ctx->out_q_ctx.q); |
619 | 632 | ||
620 | kfree(m2m_ctx); | 633 | kfree(m2m_ctx); |
621 | } | 634 | } |
622 | EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release); | 635 | EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release); |
623 | 636 | ||
624 | /** | 637 | /** |
625 | * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list. | 638 | * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list. |
626 | * | 639 | * |
627 | * Call from buf_queue(), videobuf_queue_ops callback. | 640 | * Call from buf_queue(), videobuf_queue_ops callback. |
628 | */ | 641 | */ |
629 | void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb) | 642 | void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb) |
630 | { | 643 | { |
631 | struct v4l2_m2m_buffer *b = container_of(vb, struct v4l2_m2m_buffer, vb); | 644 | struct v4l2_m2m_buffer *b = container_of(vb, struct v4l2_m2m_buffer, vb); |
632 | struct v4l2_m2m_queue_ctx *q_ctx; | 645 | struct v4l2_m2m_queue_ctx *q_ctx; |
633 | unsigned long flags; | 646 | unsigned long flags; |
634 | 647 | ||
635 | q_ctx = get_queue_ctx(m2m_ctx, vb->vb2_queue->type); | 648 | q_ctx = get_queue_ctx(m2m_ctx, vb->vb2_queue->type); |
636 | if (!q_ctx) | 649 | if (!q_ctx) |
637 | return; | 650 | return; |
638 | 651 | ||
639 | spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); | 652 | spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); |
640 | list_add_tail(&b->list, &q_ctx->rdy_queue); | 653 | list_add_tail(&b->list, &q_ctx->rdy_queue); |
641 | q_ctx->num_rdy++; | 654 | q_ctx->num_rdy++; |
642 | spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); | 655 | spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); |
643 | } | 656 | } |
644 | EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue); | 657 | EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue); |
645 | 658 | ||
646 | 659 |
drivers/media/v4l2-core/videobuf2-core.c
1 | /* | 1 | /* |
2 | * videobuf2-core.c - V4L2 driver helper framework | 2 | * videobuf2-core.c - V4L2 driver helper framework |
3 | * | 3 | * |
4 | * Copyright (C) 2010 Samsung Electronics | 4 | * Copyright (C) 2010 Samsung Electronics |
5 | * | 5 | * |
6 | * Author: Pawel Osciak <pawel@osciak.com> | 6 | * Author: Pawel Osciak <pawel@osciak.com> |
7 | * Marek Szyprowski <m.szyprowski@samsung.com> | 7 | * Marek Szyprowski <m.szyprowski@samsung.com> |
8 | * | 8 | * |
9 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License as published by | 10 | * it under the terms of the GNU General Public License as published by |
11 | * the Free Software Foundation. | 11 | * the Free Software Foundation. |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/err.h> | 14 | #include <linux/err.h> |
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | #include <linux/mm.h> | 17 | #include <linux/mm.h> |
18 | #include <linux/poll.h> | 18 | #include <linux/poll.h> |
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include <linux/sched.h> | 20 | #include <linux/sched.h> |
21 | 21 | ||
22 | #include <media/v4l2-dev.h> | 22 | #include <media/v4l2-dev.h> |
23 | #include <media/v4l2-fh.h> | 23 | #include <media/v4l2-fh.h> |
24 | #include <media/v4l2-event.h> | 24 | #include <media/v4l2-event.h> |
25 | #include <media/videobuf2-core.h> | 25 | #include <media/videobuf2-core.h> |
26 | 26 | ||
27 | static int debug; | 27 | static int debug; |
28 | module_param(debug, int, 0644); | 28 | module_param(debug, int, 0644); |
29 | 29 | ||
30 | #define dprintk(level, fmt, arg...) \ | 30 | #define dprintk(level, fmt, arg...) \ |
31 | do { \ | 31 | do { \ |
32 | if (debug >= level) \ | 32 | if (debug >= level) \ |
33 | printk(KERN_DEBUG "vb2: " fmt, ## arg); \ | 33 | printk(KERN_DEBUG "vb2: " fmt, ## arg); \ |
34 | } while (0) | 34 | } while (0) |
35 | 35 | ||
36 | #define call_memop(q, op, args...) \ | 36 | #define call_memop(q, op, args...) \ |
37 | (((q)->mem_ops->op) ? \ | 37 | (((q)->mem_ops->op) ? \ |
38 | ((q)->mem_ops->op(args)) : 0) | 38 | ((q)->mem_ops->op(args)) : 0) |
39 | 39 | ||
40 | #define call_qop(q, op, args...) \ | 40 | #define call_qop(q, op, args...) \ |
41 | (((q)->ops->op) ? ((q)->ops->op(args)) : 0) | 41 | (((q)->ops->op) ? ((q)->ops->op(args)) : 0) |
42 | 42 | ||
43 | #define V4L2_BUFFER_STATE_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \ | 43 | #define V4L2_BUFFER_STATE_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \ |
44 | V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \ | 44 | V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \ |
45 | V4L2_BUF_FLAG_PREPARED) | 45 | V4L2_BUF_FLAG_PREPARED) |
46 | 46 | ||
47 | /** | 47 | /** |
48 | * __vb2_buf_mem_alloc() - allocate video memory for the given buffer | 48 | * __vb2_buf_mem_alloc() - allocate video memory for the given buffer |
49 | */ | 49 | */ |
50 | static int __vb2_buf_mem_alloc(struct vb2_buffer *vb) | 50 | static int __vb2_buf_mem_alloc(struct vb2_buffer *vb) |
51 | { | 51 | { |
52 | struct vb2_queue *q = vb->vb2_queue; | 52 | struct vb2_queue *q = vb->vb2_queue; |
53 | void *mem_priv; | 53 | void *mem_priv; |
54 | int plane; | 54 | int plane; |
55 | 55 | ||
56 | /* Allocate memory for all planes in this buffer */ | 56 | /* Allocate memory for all planes in this buffer */ |
57 | for (plane = 0; plane < vb->num_planes; ++plane) { | 57 | for (plane = 0; plane < vb->num_planes; ++plane) { |
58 | mem_priv = call_memop(q, alloc, q->alloc_ctx[plane], | 58 | mem_priv = call_memop(q, alloc, q->alloc_ctx[plane], |
59 | q->plane_sizes[plane]); | 59 | q->plane_sizes[plane]); |
60 | if (IS_ERR_OR_NULL(mem_priv)) | 60 | if (IS_ERR_OR_NULL(mem_priv)) |
61 | goto free; | 61 | goto free; |
62 | 62 | ||
63 | /* Associate allocator private data with this plane */ | 63 | /* Associate allocator private data with this plane */ |
64 | vb->planes[plane].mem_priv = mem_priv; | 64 | vb->planes[plane].mem_priv = mem_priv; |
65 | vb->v4l2_planes[plane].length = q->plane_sizes[plane]; | 65 | vb->v4l2_planes[plane].length = q->plane_sizes[plane]; |
66 | } | 66 | } |
67 | 67 | ||
68 | return 0; | 68 | return 0; |
69 | free: | 69 | free: |
70 | /* Free already allocated memory if one of the allocations failed */ | 70 | /* Free already allocated memory if one of the allocations failed */ |
71 | for (; plane > 0; --plane) { | 71 | for (; plane > 0; --plane) { |
72 | call_memop(q, put, vb->planes[plane - 1].mem_priv); | 72 | call_memop(q, put, vb->planes[plane - 1].mem_priv); |
73 | vb->planes[plane - 1].mem_priv = NULL; | 73 | vb->planes[plane - 1].mem_priv = NULL; |
74 | } | 74 | } |
75 | 75 | ||
76 | return -ENOMEM; | 76 | return -ENOMEM; |
77 | } | 77 | } |
78 | 78 | ||
79 | /** | 79 | /** |
80 | * __vb2_buf_mem_free() - free memory of the given buffer | 80 | * __vb2_buf_mem_free() - free memory of the given buffer |
81 | */ | 81 | */ |
82 | static void __vb2_buf_mem_free(struct vb2_buffer *vb) | 82 | static void __vb2_buf_mem_free(struct vb2_buffer *vb) |
83 | { | 83 | { |
84 | struct vb2_queue *q = vb->vb2_queue; | 84 | struct vb2_queue *q = vb->vb2_queue; |
85 | unsigned int plane; | 85 | unsigned int plane; |
86 | 86 | ||
87 | for (plane = 0; plane < vb->num_planes; ++plane) { | 87 | for (plane = 0; plane < vb->num_planes; ++plane) { |
88 | call_memop(q, put, vb->planes[plane].mem_priv); | 88 | call_memop(q, put, vb->planes[plane].mem_priv); |
89 | vb->planes[plane].mem_priv = NULL; | 89 | vb->planes[plane].mem_priv = NULL; |
90 | dprintk(3, "Freed plane %d of buffer %d\n", plane, | 90 | dprintk(3, "Freed plane %d of buffer %d\n", plane, |
91 | vb->v4l2_buf.index); | 91 | vb->v4l2_buf.index); |
92 | } | 92 | } |
93 | } | 93 | } |
94 | 94 | ||
95 | /** | 95 | /** |
96 | * __vb2_buf_userptr_put() - release userspace memory associated with | 96 | * __vb2_buf_userptr_put() - release userspace memory associated with |
97 | * a USERPTR buffer | 97 | * a USERPTR buffer |
98 | */ | 98 | */ |
99 | static void __vb2_buf_userptr_put(struct vb2_buffer *vb) | 99 | static void __vb2_buf_userptr_put(struct vb2_buffer *vb) |
100 | { | 100 | { |
101 | struct vb2_queue *q = vb->vb2_queue; | 101 | struct vb2_queue *q = vb->vb2_queue; |
102 | unsigned int plane; | 102 | unsigned int plane; |
103 | 103 | ||
104 | for (plane = 0; plane < vb->num_planes; ++plane) { | 104 | for (plane = 0; plane < vb->num_planes; ++plane) { |
105 | if (vb->planes[plane].mem_priv) | 105 | if (vb->planes[plane].mem_priv) |
106 | call_memop(q, put_userptr, vb->planes[plane].mem_priv); | 106 | call_memop(q, put_userptr, vb->planes[plane].mem_priv); |
107 | vb->planes[plane].mem_priv = NULL; | 107 | vb->planes[plane].mem_priv = NULL; |
108 | } | 108 | } |
109 | } | 109 | } |
110 | 110 | ||
111 | /** | 111 | /** |
112 | * __vb2_plane_dmabuf_put() - release memory associated with | 112 | * __vb2_plane_dmabuf_put() - release memory associated with |
113 | * a DMABUF shared plane | 113 | * a DMABUF shared plane |
114 | */ | 114 | */ |
115 | static void __vb2_plane_dmabuf_put(struct vb2_queue *q, struct vb2_plane *p) | 115 | static void __vb2_plane_dmabuf_put(struct vb2_queue *q, struct vb2_plane *p) |
116 | { | 116 | { |
117 | if (!p->mem_priv) | 117 | if (!p->mem_priv) |
118 | return; | 118 | return; |
119 | 119 | ||
120 | if (p->dbuf_mapped) | 120 | if (p->dbuf_mapped) |
121 | call_memop(q, unmap_dmabuf, p->mem_priv); | 121 | call_memop(q, unmap_dmabuf, p->mem_priv); |
122 | 122 | ||
123 | call_memop(q, detach_dmabuf, p->mem_priv); | 123 | call_memop(q, detach_dmabuf, p->mem_priv); |
124 | dma_buf_put(p->dbuf); | 124 | dma_buf_put(p->dbuf); |
125 | memset(p, 0, sizeof(*p)); | 125 | memset(p, 0, sizeof(*p)); |
126 | } | 126 | } |
127 | 127 | ||
128 | /** | 128 | /** |
129 | * __vb2_buf_dmabuf_put() - release memory associated with | 129 | * __vb2_buf_dmabuf_put() - release memory associated with |
130 | * a DMABUF shared buffer | 130 | * a DMABUF shared buffer |
131 | */ | 131 | */ |
132 | static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb) | 132 | static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb) |
133 | { | 133 | { |
134 | struct vb2_queue *q = vb->vb2_queue; | 134 | struct vb2_queue *q = vb->vb2_queue; |
135 | unsigned int plane; | 135 | unsigned int plane; |
136 | 136 | ||
137 | for (plane = 0; plane < vb->num_planes; ++plane) | 137 | for (plane = 0; plane < vb->num_planes; ++plane) |
138 | __vb2_plane_dmabuf_put(q, &vb->planes[plane]); | 138 | __vb2_plane_dmabuf_put(q, &vb->planes[plane]); |
139 | } | 139 | } |
140 | 140 | ||
141 | /** | 141 | /** |
142 | * __setup_offsets() - setup unique offsets ("cookies") for every plane in | 142 | * __setup_offsets() - setup unique offsets ("cookies") for every plane in |
143 | * every buffer on the queue | 143 | * every buffer on the queue |
144 | */ | 144 | */ |
145 | static void __setup_offsets(struct vb2_queue *q, unsigned int n) | 145 | static void __setup_offsets(struct vb2_queue *q, unsigned int n) |
146 | { | 146 | { |
147 | unsigned int buffer, plane; | 147 | unsigned int buffer, plane; |
148 | struct vb2_buffer *vb; | 148 | struct vb2_buffer *vb; |
149 | unsigned long off; | 149 | unsigned long off; |
150 | 150 | ||
151 | if (q->num_buffers) { | 151 | if (q->num_buffers) { |
152 | struct v4l2_plane *p; | 152 | struct v4l2_plane *p; |
153 | vb = q->bufs[q->num_buffers - 1]; | 153 | vb = q->bufs[q->num_buffers - 1]; |
154 | p = &vb->v4l2_planes[vb->num_planes - 1]; | 154 | p = &vb->v4l2_planes[vb->num_planes - 1]; |
155 | off = PAGE_ALIGN(p->m.mem_offset + p->length); | 155 | off = PAGE_ALIGN(p->m.mem_offset + p->length); |
156 | } else { | 156 | } else { |
157 | off = 0; | 157 | off = 0; |
158 | } | 158 | } |
159 | 159 | ||
160 | for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) { | 160 | for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) { |
161 | vb = q->bufs[buffer]; | 161 | vb = q->bufs[buffer]; |
162 | if (!vb) | 162 | if (!vb) |
163 | continue; | 163 | continue; |
164 | 164 | ||
165 | for (plane = 0; plane < vb->num_planes; ++plane) { | 165 | for (plane = 0; plane < vb->num_planes; ++plane) { |
166 | vb->v4l2_planes[plane].length = q->plane_sizes[plane]; | 166 | vb->v4l2_planes[plane].length = q->plane_sizes[plane]; |
167 | vb->v4l2_planes[plane].m.mem_offset = off; | 167 | vb->v4l2_planes[plane].m.mem_offset = off; |
168 | 168 | ||
169 | dprintk(3, "Buffer %d, plane %d offset 0x%08lx\n", | 169 | dprintk(3, "Buffer %d, plane %d offset 0x%08lx\n", |
170 | buffer, plane, off); | 170 | buffer, plane, off); |
171 | 171 | ||
172 | off += vb->v4l2_planes[plane].length; | 172 | off += vb->v4l2_planes[plane].length; |
173 | off = PAGE_ALIGN(off); | 173 | off = PAGE_ALIGN(off); |
174 | } | 174 | } |
175 | } | 175 | } |
176 | } | 176 | } |
177 | 177 | ||
178 | /** | 178 | /** |
179 | * __vb2_queue_alloc() - allocate videobuf buffer structures and (for MMAP type) | 179 | * __vb2_queue_alloc() - allocate videobuf buffer structures and (for MMAP type) |
180 | * video buffer memory for all buffers/planes on the queue and initializes the | 180 | * video buffer memory for all buffers/planes on the queue and initializes the |
181 | * queue | 181 | * queue |
182 | * | 182 | * |
183 | * Returns the number of buffers successfully allocated. | 183 | * Returns the number of buffers successfully allocated. |
184 | */ | 184 | */ |
185 | static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory, | 185 | static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory, |
186 | unsigned int num_buffers, unsigned int num_planes) | 186 | unsigned int num_buffers, unsigned int num_planes) |
187 | { | 187 | { |
188 | unsigned int buffer; | 188 | unsigned int buffer; |
189 | struct vb2_buffer *vb; | 189 | struct vb2_buffer *vb; |
190 | int ret; | 190 | int ret; |
191 | 191 | ||
192 | for (buffer = 0; buffer < num_buffers; ++buffer) { | 192 | for (buffer = 0; buffer < num_buffers; ++buffer) { |
193 | /* Allocate videobuf buffer structures */ | 193 | /* Allocate videobuf buffer structures */ |
194 | vb = kzalloc(q->buf_struct_size, GFP_KERNEL); | 194 | vb = kzalloc(q->buf_struct_size, GFP_KERNEL); |
195 | if (!vb) { | 195 | if (!vb) { |
196 | dprintk(1, "Memory alloc for buffer struct failed\n"); | 196 | dprintk(1, "Memory alloc for buffer struct failed\n"); |
197 | break; | 197 | break; |
198 | } | 198 | } |
199 | 199 | ||
200 | /* Length stores number of planes for multiplanar buffers */ | 200 | /* Length stores number of planes for multiplanar buffers */ |
201 | if (V4L2_TYPE_IS_MULTIPLANAR(q->type)) | 201 | if (V4L2_TYPE_IS_MULTIPLANAR(q->type)) |
202 | vb->v4l2_buf.length = num_planes; | 202 | vb->v4l2_buf.length = num_planes; |
203 | 203 | ||
204 | vb->state = VB2_BUF_STATE_DEQUEUED; | 204 | vb->state = VB2_BUF_STATE_DEQUEUED; |
205 | vb->vb2_queue = q; | 205 | vb->vb2_queue = q; |
206 | vb->num_planes = num_planes; | 206 | vb->num_planes = num_planes; |
207 | vb->v4l2_buf.index = q->num_buffers + buffer; | 207 | vb->v4l2_buf.index = q->num_buffers + buffer; |
208 | vb->v4l2_buf.type = q->type; | 208 | vb->v4l2_buf.type = q->type; |
209 | vb->v4l2_buf.memory = memory; | 209 | vb->v4l2_buf.memory = memory; |
210 | 210 | ||
211 | /* Allocate video buffer memory for the MMAP type */ | 211 | /* Allocate video buffer memory for the MMAP type */ |
212 | if (memory == V4L2_MEMORY_MMAP) { | 212 | if (memory == V4L2_MEMORY_MMAP) { |
213 | ret = __vb2_buf_mem_alloc(vb); | 213 | ret = __vb2_buf_mem_alloc(vb); |
214 | if (ret) { | 214 | if (ret) { |
215 | dprintk(1, "Failed allocating memory for " | 215 | dprintk(1, "Failed allocating memory for " |
216 | "buffer %d\n", buffer); | 216 | "buffer %d\n", buffer); |
217 | kfree(vb); | 217 | kfree(vb); |
218 | break; | 218 | break; |
219 | } | 219 | } |
220 | /* | 220 | /* |
221 | * Call the driver-provided buffer initialization | 221 | * Call the driver-provided buffer initialization |
222 | * callback, if given. An error in initialization | 222 | * callback, if given. An error in initialization |
223 | * results in queue setup failure. | 223 | * results in queue setup failure. |
224 | */ | 224 | */ |
225 | ret = call_qop(q, buf_init, vb); | 225 | ret = call_qop(q, buf_init, vb); |
226 | if (ret) { | 226 | if (ret) { |
227 | dprintk(1, "Buffer %d %p initialization" | 227 | dprintk(1, "Buffer %d %p initialization" |
228 | " failed\n", buffer, vb); | 228 | " failed\n", buffer, vb); |
229 | __vb2_buf_mem_free(vb); | 229 | __vb2_buf_mem_free(vb); |
230 | kfree(vb); | 230 | kfree(vb); |
231 | break; | 231 | break; |
232 | } | 232 | } |
233 | } | 233 | } |
234 | 234 | ||
235 | q->bufs[q->num_buffers + buffer] = vb; | 235 | q->bufs[q->num_buffers + buffer] = vb; |
236 | } | 236 | } |
237 | 237 | ||
238 | __setup_offsets(q, buffer); | 238 | __setup_offsets(q, buffer); |
239 | 239 | ||
240 | dprintk(1, "Allocated %d buffers, %d plane(s) each\n", | 240 | dprintk(1, "Allocated %d buffers, %d plane(s) each\n", |
241 | buffer, num_planes); | 241 | buffer, num_planes); |
242 | 242 | ||
243 | return buffer; | 243 | return buffer; |
244 | } | 244 | } |
245 | 245 | ||
246 | /** | 246 | /** |
247 | * __vb2_free_mem() - release all video buffer memory for a given queue | 247 | * __vb2_free_mem() - release all video buffer memory for a given queue |
248 | */ | 248 | */ |
249 | static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers) | 249 | static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers) |
250 | { | 250 | { |
251 | unsigned int buffer; | 251 | unsigned int buffer; |
252 | struct vb2_buffer *vb; | 252 | struct vb2_buffer *vb; |
253 | 253 | ||
254 | for (buffer = q->num_buffers - buffers; buffer < q->num_buffers; | 254 | for (buffer = q->num_buffers - buffers; buffer < q->num_buffers; |
255 | ++buffer) { | 255 | ++buffer) { |
256 | vb = q->bufs[buffer]; | 256 | vb = q->bufs[buffer]; |
257 | if (!vb) | 257 | if (!vb) |
258 | continue; | 258 | continue; |
259 | 259 | ||
260 | /* Free MMAP buffers or release USERPTR buffers */ | 260 | /* Free MMAP buffers or release USERPTR buffers */ |
261 | if (q->memory == V4L2_MEMORY_MMAP) | 261 | if (q->memory == V4L2_MEMORY_MMAP) |
262 | __vb2_buf_mem_free(vb); | 262 | __vb2_buf_mem_free(vb); |
263 | else if (q->memory == V4L2_MEMORY_DMABUF) | 263 | else if (q->memory == V4L2_MEMORY_DMABUF) |
264 | __vb2_buf_dmabuf_put(vb); | 264 | __vb2_buf_dmabuf_put(vb); |
265 | else | 265 | else |
266 | __vb2_buf_userptr_put(vb); | 266 | __vb2_buf_userptr_put(vb); |
267 | } | 267 | } |
268 | } | 268 | } |
269 | 269 | ||
270 | /** | 270 | /** |
271 | * __vb2_queue_free() - free buffers at the end of the queue - video memory and | 271 | * __vb2_queue_free() - free buffers at the end of the queue - video memory and |
272 | * related information, if no buffers are left return the queue to an | 272 | * related information, if no buffers are left return the queue to an |
273 | * uninitialized state. Might be called even if the queue has already been freed. | 273 | * uninitialized state. Might be called even if the queue has already been freed. |
274 | */ | 274 | */ |
275 | static void __vb2_queue_free(struct vb2_queue *q, unsigned int buffers) | 275 | static void __vb2_queue_free(struct vb2_queue *q, unsigned int buffers) |
276 | { | 276 | { |
277 | unsigned int buffer; | 277 | unsigned int buffer; |
278 | 278 | ||
279 | /* Call driver-provided cleanup function for each buffer, if provided */ | 279 | /* Call driver-provided cleanup function for each buffer, if provided */ |
280 | if (q->ops->buf_cleanup) { | 280 | if (q->ops->buf_cleanup) { |
281 | for (buffer = q->num_buffers - buffers; buffer < q->num_buffers; | 281 | for (buffer = q->num_buffers - buffers; buffer < q->num_buffers; |
282 | ++buffer) { | 282 | ++buffer) { |
283 | if (NULL == q->bufs[buffer]) | 283 | if (NULL == q->bufs[buffer]) |
284 | continue; | 284 | continue; |
285 | q->ops->buf_cleanup(q->bufs[buffer]); | 285 | q->ops->buf_cleanup(q->bufs[buffer]); |
286 | } | 286 | } |
287 | } | 287 | } |
288 | 288 | ||
289 | /* Release video buffer memory */ | 289 | /* Release video buffer memory */ |
290 | __vb2_free_mem(q, buffers); | 290 | __vb2_free_mem(q, buffers); |
291 | 291 | ||
292 | /* Free videobuf buffers */ | 292 | /* Free videobuf buffers */ |
293 | for (buffer = q->num_buffers - buffers; buffer < q->num_buffers; | 293 | for (buffer = q->num_buffers - buffers; buffer < q->num_buffers; |
294 | ++buffer) { | 294 | ++buffer) { |
295 | kfree(q->bufs[buffer]); | 295 | kfree(q->bufs[buffer]); |
296 | q->bufs[buffer] = NULL; | 296 | q->bufs[buffer] = NULL; |
297 | } | 297 | } |
298 | 298 | ||
299 | q->num_buffers -= buffers; | 299 | q->num_buffers -= buffers; |
300 | if (!q->num_buffers) | 300 | if (!q->num_buffers) |
301 | q->memory = 0; | 301 | q->memory = 0; |
302 | INIT_LIST_HEAD(&q->queued_list); | 302 | INIT_LIST_HEAD(&q->queued_list); |
303 | } | 303 | } |
304 | 304 | ||
305 | /** | 305 | /** |
306 | * __verify_planes_array() - verify that the planes array passed in struct | 306 | * __verify_planes_array() - verify that the planes array passed in struct |
307 | * v4l2_buffer from userspace can be safely used | 307 | * v4l2_buffer from userspace can be safely used |
308 | */ | 308 | */ |
309 | static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer *b) | 309 | static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer *b) |
310 | { | 310 | { |
311 | if (!V4L2_TYPE_IS_MULTIPLANAR(b->type)) | 311 | if (!V4L2_TYPE_IS_MULTIPLANAR(b->type)) |
312 | return 0; | 312 | return 0; |
313 | 313 | ||
314 | /* Is memory for copying plane information present? */ | 314 | /* Is memory for copying plane information present? */ |
315 | if (NULL == b->m.planes) { | 315 | if (NULL == b->m.planes) { |
316 | dprintk(1, "Multi-planar buffer passed but " | 316 | dprintk(1, "Multi-planar buffer passed but " |
317 | "planes array not provided\n"); | 317 | "planes array not provided\n"); |
318 | return -EINVAL; | 318 | return -EINVAL; |
319 | } | 319 | } |
320 | 320 | ||
321 | if (b->length < vb->num_planes || b->length > VIDEO_MAX_PLANES) { | 321 | if (b->length < vb->num_planes || b->length > VIDEO_MAX_PLANES) { |
322 | dprintk(1, "Incorrect planes array length, " | 322 | dprintk(1, "Incorrect planes array length, " |
323 | "expected %d, got %d\n", vb->num_planes, b->length); | 323 | "expected %d, got %d\n", vb->num_planes, b->length); |
324 | return -EINVAL; | 324 | return -EINVAL; |
325 | } | 325 | } |
326 | 326 | ||
327 | return 0; | 327 | return 0; |
328 | } | 328 | } |
329 | 329 | ||
330 | /** | 330 | /** |
331 | * __buffer_in_use() - return true if the buffer is in use and | 331 | * __buffer_in_use() - return true if the buffer is in use and |
332 | * the queue cannot be freed (by the means of REQBUFS(0)) call | 332 | * the queue cannot be freed (by the means of REQBUFS(0)) call |
333 | */ | 333 | */ |
334 | static bool __buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb) | 334 | static bool __buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb) |
335 | { | 335 | { |
336 | unsigned int plane; | 336 | unsigned int plane; |
337 | for (plane = 0; plane < vb->num_planes; ++plane) { | 337 | for (plane = 0; plane < vb->num_planes; ++plane) { |
338 | void *mem_priv = vb->planes[plane].mem_priv; | 338 | void *mem_priv = vb->planes[plane].mem_priv; |
339 | /* | 339 | /* |
340 | * If num_users() has not been provided, call_memop | 340 | * If num_users() has not been provided, call_memop |
341 | * will return 0, apparently nobody cares about this | 341 | * will return 0, apparently nobody cares about this |
342 | * case anyway. If num_users() returns more than 1, | 342 | * case anyway. If num_users() returns more than 1, |
343 | * we are not the only user of the plane's memory. | 343 | * we are not the only user of the plane's memory. |
344 | */ | 344 | */ |
345 | if (mem_priv && call_memop(q, num_users, mem_priv) > 1) | 345 | if (mem_priv && call_memop(q, num_users, mem_priv) > 1) |
346 | return true; | 346 | return true; |
347 | } | 347 | } |
348 | return false; | 348 | return false; |
349 | } | 349 | } |
350 | 350 | ||
351 | /** | 351 | /** |
352 | * __buffers_in_use() - return true if any buffers on the queue are in use and | 352 | * __buffers_in_use() - return true if any buffers on the queue are in use and |
353 | * the queue cannot be freed (by the means of REQBUFS(0)) call | 353 | * the queue cannot be freed (by the means of REQBUFS(0)) call |
354 | */ | 354 | */ |
355 | static bool __buffers_in_use(struct vb2_queue *q) | 355 | static bool __buffers_in_use(struct vb2_queue *q) |
356 | { | 356 | { |
357 | unsigned int buffer; | 357 | unsigned int buffer; |
358 | for (buffer = 0; buffer < q->num_buffers; ++buffer) { | 358 | for (buffer = 0; buffer < q->num_buffers; ++buffer) { |
359 | if (__buffer_in_use(q, q->bufs[buffer])) | 359 | if (__buffer_in_use(q, q->bufs[buffer])) |
360 | return true; | 360 | return true; |
361 | } | 361 | } |
362 | return false; | 362 | return false; |
363 | } | 363 | } |
364 | 364 | ||
365 | /** | 365 | /** |
366 | * __fill_v4l2_buffer() - fill in a struct v4l2_buffer with information to be | 366 | * __fill_v4l2_buffer() - fill in a struct v4l2_buffer with information to be |
367 | * returned to userspace | 367 | * returned to userspace |
368 | */ | 368 | */ |
369 | static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b) | 369 | static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b) |
370 | { | 370 | { |
371 | struct vb2_queue *q = vb->vb2_queue; | 371 | struct vb2_queue *q = vb->vb2_queue; |
372 | 372 | ||
373 | /* Copy back data such as timestamp, flags, etc. */ | 373 | /* Copy back data such as timestamp, flags, etc. */ |
374 | memcpy(b, &vb->v4l2_buf, offsetof(struct v4l2_buffer, m)); | 374 | memcpy(b, &vb->v4l2_buf, offsetof(struct v4l2_buffer, m)); |
375 | b->reserved2 = vb->v4l2_buf.reserved2; | 375 | b->reserved2 = vb->v4l2_buf.reserved2; |
376 | b->reserved = vb->v4l2_buf.reserved; | 376 | b->reserved = vb->v4l2_buf.reserved; |
377 | 377 | ||
378 | if (V4L2_TYPE_IS_MULTIPLANAR(q->type)) { | 378 | if (V4L2_TYPE_IS_MULTIPLANAR(q->type)) { |
379 | /* | 379 | /* |
380 | * Fill in plane-related data if userspace provided an array | 380 | * Fill in plane-related data if userspace provided an array |
381 | * for it. The caller has already verified memory and size. | 381 | * for it. The caller has already verified memory and size. |
382 | */ | 382 | */ |
383 | b->length = vb->num_planes; | 383 | b->length = vb->num_planes; |
384 | memcpy(b->m.planes, vb->v4l2_planes, | 384 | memcpy(b->m.planes, vb->v4l2_planes, |
385 | b->length * sizeof(struct v4l2_plane)); | 385 | b->length * sizeof(struct v4l2_plane)); |
386 | } else { | 386 | } else { |
387 | /* | 387 | /* |
388 | * We use length and offset in v4l2_planes array even for | 388 | * We use length and offset in v4l2_planes array even for |
389 | * single-planar buffers, but userspace does not. | 389 | * single-planar buffers, but userspace does not. |
390 | */ | 390 | */ |
391 | b->length = vb->v4l2_planes[0].length; | 391 | b->length = vb->v4l2_planes[0].length; |
392 | b->bytesused = vb->v4l2_planes[0].bytesused; | 392 | b->bytesused = vb->v4l2_planes[0].bytesused; |
393 | if (q->memory == V4L2_MEMORY_MMAP) | 393 | if (q->memory == V4L2_MEMORY_MMAP) |
394 | b->m.offset = vb->v4l2_planes[0].m.mem_offset; | 394 | b->m.offset = vb->v4l2_planes[0].m.mem_offset; |
395 | else if (q->memory == V4L2_MEMORY_USERPTR) | 395 | else if (q->memory == V4L2_MEMORY_USERPTR) |
396 | b->m.userptr = vb->v4l2_planes[0].m.userptr; | 396 | b->m.userptr = vb->v4l2_planes[0].m.userptr; |
397 | else if (q->memory == V4L2_MEMORY_DMABUF) | 397 | else if (q->memory == V4L2_MEMORY_DMABUF) |
398 | b->m.fd = vb->v4l2_planes[0].m.fd; | 398 | b->m.fd = vb->v4l2_planes[0].m.fd; |
399 | } | 399 | } |
400 | 400 | ||
401 | /* | 401 | /* |
402 | * Clear any buffer state related flags. | 402 | * Clear any buffer state related flags. |
403 | */ | 403 | */ |
404 | b->flags &= ~V4L2_BUFFER_STATE_FLAGS; | 404 | b->flags &= ~V4L2_BUFFER_STATE_FLAGS; |
405 | 405 | ||
406 | switch (vb->state) { | 406 | switch (vb->state) { |
407 | case VB2_BUF_STATE_QUEUED: | 407 | case VB2_BUF_STATE_QUEUED: |
408 | case VB2_BUF_STATE_ACTIVE: | 408 | case VB2_BUF_STATE_ACTIVE: |
409 | b->flags |= V4L2_BUF_FLAG_QUEUED; | 409 | b->flags |= V4L2_BUF_FLAG_QUEUED; |
410 | break; | 410 | break; |
411 | case VB2_BUF_STATE_ERROR: | 411 | case VB2_BUF_STATE_ERROR: |
412 | b->flags |= V4L2_BUF_FLAG_ERROR; | 412 | b->flags |= V4L2_BUF_FLAG_ERROR; |
413 | /* fall through */ | 413 | /* fall through */ |
414 | case VB2_BUF_STATE_DONE: | 414 | case VB2_BUF_STATE_DONE: |
415 | b->flags |= V4L2_BUF_FLAG_DONE; | 415 | b->flags |= V4L2_BUF_FLAG_DONE; |
416 | break; | 416 | break; |
417 | case VB2_BUF_STATE_PREPARED: | 417 | case VB2_BUF_STATE_PREPARED: |
418 | b->flags |= V4L2_BUF_FLAG_PREPARED; | 418 | b->flags |= V4L2_BUF_FLAG_PREPARED; |
419 | break; | 419 | break; |
420 | case VB2_BUF_STATE_DEQUEUED: | 420 | case VB2_BUF_STATE_DEQUEUED: |
421 | /* nothing */ | 421 | /* nothing */ |
422 | break; | 422 | break; |
423 | } | 423 | } |
424 | 424 | ||
425 | if (__buffer_in_use(q, vb)) | 425 | if (__buffer_in_use(q, vb)) |
426 | b->flags |= V4L2_BUF_FLAG_MAPPED; | 426 | b->flags |= V4L2_BUF_FLAG_MAPPED; |
427 | } | 427 | } |
428 | 428 | ||
429 | /** | 429 | /** |
430 | * vb2_querybuf() - query video buffer information | 430 | * vb2_querybuf() - query video buffer information |
431 | * @q: videobuf queue | 431 | * @q: videobuf queue |
432 | * @b: buffer struct passed from userspace to vidioc_querybuf handler | 432 | * @b: buffer struct passed from userspace to vidioc_querybuf handler |
433 | * in driver | 433 | * in driver |
434 | * | 434 | * |
435 | * Should be called from vidioc_querybuf ioctl handler in driver. | 435 | * Should be called from vidioc_querybuf ioctl handler in driver. |
436 | * This function will verify the passed v4l2_buffer structure and fill the | 436 | * This function will verify the passed v4l2_buffer structure and fill the |
437 | * relevant information for the userspace. | 437 | * relevant information for the userspace. |
438 | * | 438 | * |
439 | * The return values from this function are intended to be directly returned | 439 | * The return values from this function are intended to be directly returned |
440 | * from vidioc_querybuf handler in driver. | 440 | * from vidioc_querybuf handler in driver. |
441 | */ | 441 | */ |
442 | int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b) | 442 | int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b) |
443 | { | 443 | { |
444 | struct vb2_buffer *vb; | 444 | struct vb2_buffer *vb; |
445 | int ret; | 445 | int ret; |
446 | 446 | ||
447 | if (b->type != q->type) { | 447 | if (b->type != q->type) { |
448 | dprintk(1, "querybuf: wrong buffer type\n"); | 448 | dprintk(1, "querybuf: wrong buffer type\n"); |
449 | return -EINVAL; | 449 | return -EINVAL; |
450 | } | 450 | } |
451 | 451 | ||
452 | if (b->index >= q->num_buffers) { | 452 | if (b->index >= q->num_buffers) { |
453 | dprintk(1, "querybuf: buffer index out of range\n"); | 453 | dprintk(1, "querybuf: buffer index out of range\n"); |
454 | return -EINVAL; | 454 | return -EINVAL; |
455 | } | 455 | } |
456 | vb = q->bufs[b->index]; | 456 | vb = q->bufs[b->index]; |
457 | ret = __verify_planes_array(vb, b); | 457 | ret = __verify_planes_array(vb, b); |
458 | if (!ret) | 458 | if (!ret) |
459 | __fill_v4l2_buffer(vb, b); | 459 | __fill_v4l2_buffer(vb, b); |
460 | return ret; | 460 | return ret; |
461 | } | 461 | } |
462 | EXPORT_SYMBOL(vb2_querybuf); | 462 | EXPORT_SYMBOL(vb2_querybuf); |
463 | 463 | ||
464 | /** | 464 | /** |
465 | * __verify_userptr_ops() - verify that all memory operations required for | 465 | * __verify_userptr_ops() - verify that all memory operations required for |
466 | * USERPTR queue type have been provided | 466 | * USERPTR queue type have been provided |
467 | */ | 467 | */ |
468 | static int __verify_userptr_ops(struct vb2_queue *q) | 468 | static int __verify_userptr_ops(struct vb2_queue *q) |
469 | { | 469 | { |
470 | if (!(q->io_modes & VB2_USERPTR) || !q->mem_ops->get_userptr || | 470 | if (!(q->io_modes & VB2_USERPTR) || !q->mem_ops->get_userptr || |
471 | !q->mem_ops->put_userptr) | 471 | !q->mem_ops->put_userptr) |
472 | return -EINVAL; | 472 | return -EINVAL; |
473 | 473 | ||
474 | return 0; | 474 | return 0; |
475 | } | 475 | } |
476 | 476 | ||
477 | /** | 477 | /** |
478 | * __verify_mmap_ops() - verify that all memory operations required for | 478 | * __verify_mmap_ops() - verify that all memory operations required for |
479 | * MMAP queue type have been provided | 479 | * MMAP queue type have been provided |
480 | */ | 480 | */ |
481 | static int __verify_mmap_ops(struct vb2_queue *q) | 481 | static int __verify_mmap_ops(struct vb2_queue *q) |
482 | { | 482 | { |
483 | if (!(q->io_modes & VB2_MMAP) || !q->mem_ops->alloc || | 483 | if (!(q->io_modes & VB2_MMAP) || !q->mem_ops->alloc || |
484 | !q->mem_ops->put || !q->mem_ops->mmap) | 484 | !q->mem_ops->put || !q->mem_ops->mmap) |
485 | return -EINVAL; | 485 | return -EINVAL; |
486 | 486 | ||
487 | return 0; | 487 | return 0; |
488 | } | 488 | } |
489 | 489 | ||
490 | /** | 490 | /** |
491 | * __verify_dmabuf_ops() - verify that all memory operations required for | 491 | * __verify_dmabuf_ops() - verify that all memory operations required for |
492 | * DMABUF queue type have been provided | 492 | * DMABUF queue type have been provided |
493 | */ | 493 | */ |
494 | static int __verify_dmabuf_ops(struct vb2_queue *q) | 494 | static int __verify_dmabuf_ops(struct vb2_queue *q) |
495 | { | 495 | { |
496 | if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf || | 496 | if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf || |
497 | !q->mem_ops->detach_dmabuf || !q->mem_ops->map_dmabuf || | 497 | !q->mem_ops->detach_dmabuf || !q->mem_ops->map_dmabuf || |
498 | !q->mem_ops->unmap_dmabuf) | 498 | !q->mem_ops->unmap_dmabuf) |
499 | return -EINVAL; | 499 | return -EINVAL; |
500 | 500 | ||
501 | return 0; | 501 | return 0; |
502 | } | 502 | } |
503 | 503 | ||
504 | /** | 504 | /** |
505 | * __verify_memory_type() - Check whether the memory type and buffer type | 505 | * __verify_memory_type() - Check whether the memory type and buffer type |
506 | * passed to a buffer operation are compatible with the queue. | 506 | * passed to a buffer operation are compatible with the queue. |
507 | */ | 507 | */ |
508 | static int __verify_memory_type(struct vb2_queue *q, | 508 | static int __verify_memory_type(struct vb2_queue *q, |
509 | enum v4l2_memory memory, enum v4l2_buf_type type) | 509 | enum v4l2_memory memory, enum v4l2_buf_type type) |
510 | { | 510 | { |
511 | if (memory != V4L2_MEMORY_MMAP && memory != V4L2_MEMORY_USERPTR && | 511 | if (memory != V4L2_MEMORY_MMAP && memory != V4L2_MEMORY_USERPTR && |
512 | memory != V4L2_MEMORY_DMABUF) { | 512 | memory != V4L2_MEMORY_DMABUF) { |
513 | dprintk(1, "reqbufs: unsupported memory type\n"); | 513 | dprintk(1, "reqbufs: unsupported memory type\n"); |
514 | return -EINVAL; | 514 | return -EINVAL; |
515 | } | 515 | } |
516 | 516 | ||
517 | if (type != q->type) { | 517 | if (type != q->type) { |
518 | dprintk(1, "reqbufs: requested type is incorrect\n"); | 518 | dprintk(1, "reqbufs: requested type is incorrect\n"); |
519 | return -EINVAL; | 519 | return -EINVAL; |
520 | } | 520 | } |
521 | 521 | ||
522 | /* | 522 | /* |
523 | * Make sure all the required memory ops for given memory type | 523 | * Make sure all the required memory ops for given memory type |
524 | * are available. | 524 | * are available. |
525 | */ | 525 | */ |
526 | if (memory == V4L2_MEMORY_MMAP && __verify_mmap_ops(q)) { | 526 | if (memory == V4L2_MEMORY_MMAP && __verify_mmap_ops(q)) { |
527 | dprintk(1, "reqbufs: MMAP for current setup unsupported\n"); | 527 | dprintk(1, "reqbufs: MMAP for current setup unsupported\n"); |
528 | return -EINVAL; | 528 | return -EINVAL; |
529 | } | 529 | } |
530 | 530 | ||
531 | if (memory == V4L2_MEMORY_USERPTR && __verify_userptr_ops(q)) { | 531 | if (memory == V4L2_MEMORY_USERPTR && __verify_userptr_ops(q)) { |
532 | dprintk(1, "reqbufs: USERPTR for current setup unsupported\n"); | 532 | dprintk(1, "reqbufs: USERPTR for current setup unsupported\n"); |
533 | return -EINVAL; | 533 | return -EINVAL; |
534 | } | 534 | } |
535 | 535 | ||
536 | if (memory == V4L2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) { | 536 | if (memory == V4L2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) { |
537 | dprintk(1, "reqbufs: DMABUF for current setup unsupported\n"); | 537 | dprintk(1, "reqbufs: DMABUF for current setup unsupported\n"); |
538 | return -EINVAL; | 538 | return -EINVAL; |
539 | } | 539 | } |
540 | 540 | ||
541 | /* | 541 | /* |
542 | * Place the busy tests at the end: -EBUSY can be ignored when | 542 | * Place the busy tests at the end: -EBUSY can be ignored when |
543 | * create_bufs is called with count == 0, but count == 0 should still | 543 | * create_bufs is called with count == 0, but count == 0 should still |
544 | * do the memory and type validation. | 544 | * do the memory and type validation. |
545 | */ | 545 | */ |
546 | if (q->fileio) { | 546 | if (q->fileio) { |
547 | dprintk(1, "reqbufs: file io in progress\n"); | 547 | dprintk(1, "reqbufs: file io in progress\n"); |
548 | return -EBUSY; | 548 | return -EBUSY; |
549 | } | 549 | } |
550 | return 0; | 550 | return 0; |
551 | } | 551 | } |
552 | 552 | ||
553 | /** | 553 | /** |
554 | * __reqbufs() - Initiate streaming | 554 | * __reqbufs() - Initiate streaming |
555 | * @q: videobuf2 queue | 555 | * @q: videobuf2 queue |
556 | * @req: struct passed from userspace to vidioc_reqbufs handler in driver | 556 | * @req: struct passed from userspace to vidioc_reqbufs handler in driver |
557 | * | 557 | * |
558 | * Should be called from vidioc_reqbufs ioctl handler of a driver. | 558 | * Should be called from vidioc_reqbufs ioctl handler of a driver. |
559 | * This function: | 559 | * This function: |
560 | * 1) verifies streaming parameters passed from the userspace, | 560 | * 1) verifies streaming parameters passed from the userspace, |
561 | * 2) sets up the queue, | 561 | * 2) sets up the queue, |
562 | * 3) negotiates number of buffers and planes per buffer with the driver | 562 | * 3) negotiates number of buffers and planes per buffer with the driver |
563 | * to be used during streaming, | 563 | * to be used during streaming, |
564 | * 4) allocates internal buffer structures (struct vb2_buffer), according to | 564 | * 4) allocates internal buffer structures (struct vb2_buffer), according to |
565 | * the agreed parameters, | 565 | * the agreed parameters, |
566 | * 5) for MMAP memory type, allocates actual video memory, using the | 566 | * 5) for MMAP memory type, allocates actual video memory, using the |
567 | * memory handling/allocation routines provided during queue initialization | 567 | * memory handling/allocation routines provided during queue initialization |
568 | * | 568 | * |
569 | * If req->count is 0, all the memory will be freed instead. | 569 | * If req->count is 0, all the memory will be freed instead. |
570 | * If the queue has been allocated previously (by a previous vb2_reqbufs) call | 570 | * If the queue has been allocated previously (by a previous vb2_reqbufs) call |
571 | * and the queue is not busy, memory will be reallocated. | 571 | * and the queue is not busy, memory will be reallocated. |
572 | * | 572 | * |
573 | * The return values from this function are intended to be directly returned | 573 | * The return values from this function are intended to be directly returned |
574 | * from vidioc_reqbufs handler in driver. | 574 | * from vidioc_reqbufs handler in driver. |
575 | */ | 575 | */ |
576 | static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req) | 576 | static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req) |
577 | { | 577 | { |
578 | unsigned int num_buffers, allocated_buffers, num_planes = 0; | 578 | unsigned int num_buffers, allocated_buffers, num_planes = 0; |
579 | int ret; | 579 | int ret; |
580 | 580 | ||
581 | if (q->streaming) { | 581 | if (q->streaming) { |
582 | dprintk(1, "reqbufs: streaming active\n"); | 582 | dprintk(1, "reqbufs: streaming active\n"); |
583 | return -EBUSY; | 583 | return -EBUSY; |
584 | } | 584 | } |
585 | 585 | ||
586 | if (req->count == 0 || q->num_buffers != 0 || q->memory != req->memory) { | 586 | if (req->count == 0 || q->num_buffers != 0 || q->memory != req->memory) { |
587 | /* | 587 | /* |
588 | * We already have buffers allocated, so first check if they | 588 | * We already have buffers allocated, so first check if they |
589 | * are not in use and can be freed. | 589 | * are not in use and can be freed. |
590 | */ | 590 | */ |
591 | if (q->memory == V4L2_MEMORY_MMAP && __buffers_in_use(q)) { | 591 | if (q->memory == V4L2_MEMORY_MMAP && __buffers_in_use(q)) { |
592 | dprintk(1, "reqbufs: memory in use, cannot free\n"); | 592 | dprintk(1, "reqbufs: memory in use, cannot free\n"); |
593 | return -EBUSY; | 593 | return -EBUSY; |
594 | } | 594 | } |
595 | 595 | ||
596 | __vb2_queue_free(q, q->num_buffers); | 596 | __vb2_queue_free(q, q->num_buffers); |
597 | 597 | ||
598 | /* | 598 | /* |
599 | * In case of REQBUFS(0) return immediately without calling | 599 | * In case of REQBUFS(0) return immediately without calling |
600 | * driver's queue_setup() callback and allocating resources. | 600 | * driver's queue_setup() callback and allocating resources. |
601 | */ | 601 | */ |
602 | if (req->count == 0) | 602 | if (req->count == 0) |
603 | return 0; | 603 | return 0; |
604 | } | 604 | } |
605 | 605 | ||
606 | /* | 606 | /* |
607 | * Make sure the requested values and current defaults are sane. | 607 | * Make sure the requested values and current defaults are sane. |
608 | */ | 608 | */ |
609 | num_buffers = min_t(unsigned int, req->count, VIDEO_MAX_FRAME); | 609 | num_buffers = min_t(unsigned int, req->count, VIDEO_MAX_FRAME); |
610 | memset(q->plane_sizes, 0, sizeof(q->plane_sizes)); | 610 | memset(q->plane_sizes, 0, sizeof(q->plane_sizes)); |
611 | memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx)); | 611 | memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx)); |
612 | q->memory = req->memory; | 612 | q->memory = req->memory; |
613 | 613 | ||
614 | /* | 614 | /* |
615 | * Ask the driver how many buffers and planes per buffer it requires. | 615 | * Ask the driver how many buffers and planes per buffer it requires. |
616 | * Driver also sets the size and allocator context for each plane. | 616 | * Driver also sets the size and allocator context for each plane. |
617 | */ | 617 | */ |
618 | ret = call_qop(q, queue_setup, q, NULL, &num_buffers, &num_planes, | 618 | ret = call_qop(q, queue_setup, q, NULL, &num_buffers, &num_planes, |
619 | q->plane_sizes, q->alloc_ctx); | 619 | q->plane_sizes, q->alloc_ctx); |
620 | if (ret) | 620 | if (ret) |
621 | return ret; | 621 | return ret; |
622 | 622 | ||
623 | /* Finally, allocate buffers and video memory */ | 623 | /* Finally, allocate buffers and video memory */ |
624 | ret = __vb2_queue_alloc(q, req->memory, num_buffers, num_planes); | 624 | ret = __vb2_queue_alloc(q, req->memory, num_buffers, num_planes); |
625 | if (ret == 0) { | 625 | if (ret == 0) { |
626 | dprintk(1, "Memory allocation failed\n"); | 626 | dprintk(1, "Memory allocation failed\n"); |
627 | return -ENOMEM; | 627 | return -ENOMEM; |
628 | } | 628 | } |
629 | 629 | ||
630 | allocated_buffers = ret; | 630 | allocated_buffers = ret; |
631 | 631 | ||
632 | /* | 632 | /* |
633 | * Check if driver can handle the allocated number of buffers. | 633 | * Check if driver can handle the allocated number of buffers. |
634 | */ | 634 | */ |
635 | if (allocated_buffers < num_buffers) { | 635 | if (allocated_buffers < num_buffers) { |
636 | num_buffers = allocated_buffers; | 636 | num_buffers = allocated_buffers; |
637 | 637 | ||
638 | ret = call_qop(q, queue_setup, q, NULL, &num_buffers, | 638 | ret = call_qop(q, queue_setup, q, NULL, &num_buffers, |
639 | &num_planes, q->plane_sizes, q->alloc_ctx); | 639 | &num_planes, q->plane_sizes, q->alloc_ctx); |
640 | 640 | ||
641 | if (!ret && allocated_buffers < num_buffers) | 641 | if (!ret && allocated_buffers < num_buffers) |
642 | ret = -ENOMEM; | 642 | ret = -ENOMEM; |
643 | 643 | ||
644 | /* | 644 | /* |
645 | * Either the driver has accepted a smaller number of buffers, | 645 | * Either the driver has accepted a smaller number of buffers, |
646 | * or .queue_setup() returned an error | 646 | * or .queue_setup() returned an error |
647 | */ | 647 | */ |
648 | } | 648 | } |
649 | 649 | ||
650 | q->num_buffers = allocated_buffers; | 650 | q->num_buffers = allocated_buffers; |
651 | 651 | ||
652 | if (ret < 0) { | 652 | if (ret < 0) { |
653 | __vb2_queue_free(q, allocated_buffers); | 653 | __vb2_queue_free(q, allocated_buffers); |
654 | return ret; | 654 | return ret; |
655 | } | 655 | } |
656 | 656 | ||
657 | /* | 657 | /* |
658 | * Return the number of successfully allocated buffers | 658 | * Return the number of successfully allocated buffers |
659 | * to the userspace. | 659 | * to the userspace. |
660 | */ | 660 | */ |
661 | req->count = allocated_buffers; | 661 | req->count = allocated_buffers; |
662 | 662 | ||
663 | return 0; | 663 | return 0; |
664 | } | 664 | } |
665 | 665 | ||
666 | /** | 666 | /** |
667 | * vb2_reqbufs() - Wrapper for __reqbufs() that also verifies the memory and | 667 | * vb2_reqbufs() - Wrapper for __reqbufs() that also verifies the memory and |
668 | * type values. | 668 | * type values. |
669 | * @q: videobuf2 queue | 669 | * @q: videobuf2 queue |
670 | * @req: struct passed from userspace to vidioc_reqbufs handler in driver | 670 | * @req: struct passed from userspace to vidioc_reqbufs handler in driver |
671 | */ | 671 | */ |
672 | int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req) | 672 | int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req) |
673 | { | 673 | { |
674 | int ret = __verify_memory_type(q, req->memory, req->type); | 674 | int ret = __verify_memory_type(q, req->memory, req->type); |
675 | 675 | ||
676 | return ret ? ret : __reqbufs(q, req); | 676 | return ret ? ret : __reqbufs(q, req); |
677 | } | 677 | } |
678 | EXPORT_SYMBOL_GPL(vb2_reqbufs); | 678 | EXPORT_SYMBOL_GPL(vb2_reqbufs); |
679 | 679 | ||
680 | /** | 680 | /** |
681 | * __create_bufs() - Allocate buffers and any required auxiliary structs | 681 | * __create_bufs() - Allocate buffers and any required auxiliary structs |
682 | * @q: videobuf2 queue | 682 | * @q: videobuf2 queue |
683 | * @create: creation parameters, passed from userspace to vidioc_create_bufs | 683 | * @create: creation parameters, passed from userspace to vidioc_create_bufs |
684 | * handler in driver | 684 | * handler in driver |
685 | * | 685 | * |
686 | * Should be called from vidioc_create_bufs ioctl handler of a driver. | 686 | * Should be called from vidioc_create_bufs ioctl handler of a driver. |
687 | * This function: | 687 | * This function: |
688 | * 1) verifies parameter sanity | 688 | * 1) verifies parameter sanity |
689 | * 2) calls the .queue_setup() queue operation | 689 | * 2) calls the .queue_setup() queue operation |
690 | * 3) performs any necessary memory allocations | 690 | * 3) performs any necessary memory allocations |
691 | * | 691 | * |
692 | * The return values from this function are intended to be directly returned | 692 | * The return values from this function are intended to be directly returned |
693 | * from vidioc_create_bufs handler in driver. | 693 | * from vidioc_create_bufs handler in driver. |
694 | */ | 694 | */ |
695 | static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create) | 695 | static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create) |
696 | { | 696 | { |
697 | unsigned int num_planes = 0, num_buffers, allocated_buffers; | 697 | unsigned int num_planes = 0, num_buffers, allocated_buffers; |
698 | int ret; | 698 | int ret; |
699 | 699 | ||
700 | if (q->num_buffers == VIDEO_MAX_FRAME) { | 700 | if (q->num_buffers == VIDEO_MAX_FRAME) { |
701 | dprintk(1, "%s(): maximum number of buffers already allocated\n", | 701 | dprintk(1, "%s(): maximum number of buffers already allocated\n", |
702 | __func__); | 702 | __func__); |
703 | return -ENOBUFS; | 703 | return -ENOBUFS; |
704 | } | 704 | } |
705 | 705 | ||
706 | if (!q->num_buffers) { | 706 | if (!q->num_buffers) { |
707 | memset(q->plane_sizes, 0, sizeof(q->plane_sizes)); | 707 | memset(q->plane_sizes, 0, sizeof(q->plane_sizes)); |
708 | memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx)); | 708 | memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx)); |
709 | q->memory = create->memory; | 709 | q->memory = create->memory; |
710 | } | 710 | } |
711 | 711 | ||
712 | num_buffers = min(create->count, VIDEO_MAX_FRAME - q->num_buffers); | 712 | num_buffers = min(create->count, VIDEO_MAX_FRAME - q->num_buffers); |
713 | 713 | ||
714 | /* | 714 | /* |
715 | * Ask the driver, whether the requested number of buffers, planes per | 715 | * Ask the driver, whether the requested number of buffers, planes per |
716 | * buffer and their sizes are acceptable | 716 | * buffer and their sizes are acceptable |
717 | */ | 717 | */ |
718 | ret = call_qop(q, queue_setup, q, &create->format, &num_buffers, | 718 | ret = call_qop(q, queue_setup, q, &create->format, &num_buffers, |
719 | &num_planes, q->plane_sizes, q->alloc_ctx); | 719 | &num_planes, q->plane_sizes, q->alloc_ctx); |
720 | if (ret) | 720 | if (ret) |
721 | return ret; | 721 | return ret; |
722 | 722 | ||
723 | /* Finally, allocate buffers and video memory */ | 723 | /* Finally, allocate buffers and video memory */ |
724 | ret = __vb2_queue_alloc(q, create->memory, num_buffers, | 724 | ret = __vb2_queue_alloc(q, create->memory, num_buffers, |
725 | num_planes); | 725 | num_planes); |
726 | if (ret == 0) { | 726 | if (ret == 0) { |
727 | dprintk(1, "Memory allocation failed\n"); | 727 | dprintk(1, "Memory allocation failed\n"); |
728 | return -ENOMEM; | 728 | return -ENOMEM; |
729 | } | 729 | } |
730 | 730 | ||
731 | allocated_buffers = ret; | 731 | allocated_buffers = ret; |
732 | 732 | ||
733 | /* | 733 | /* |
734 | * Check if driver can handle the so far allocated number of buffers. | 734 | * Check if driver can handle the so far allocated number of buffers. |
735 | */ | 735 | */ |
736 | if (ret < num_buffers) { | 736 | if (ret < num_buffers) { |
737 | num_buffers = ret; | 737 | num_buffers = ret; |
738 | 738 | ||
739 | /* | 739 | /* |
740 | * q->num_buffers contains the total number of buffers, that the | 740 | * q->num_buffers contains the total number of buffers, that the |
741 | * queue driver has set up | 741 | * queue driver has set up |
742 | */ | 742 | */ |
743 | ret = call_qop(q, queue_setup, q, &create->format, &num_buffers, | 743 | ret = call_qop(q, queue_setup, q, &create->format, &num_buffers, |
744 | &num_planes, q->plane_sizes, q->alloc_ctx); | 744 | &num_planes, q->plane_sizes, q->alloc_ctx); |
745 | 745 | ||
746 | if (!ret && allocated_buffers < num_buffers) | 746 | if (!ret && allocated_buffers < num_buffers) |
747 | ret = -ENOMEM; | 747 | ret = -ENOMEM; |
748 | 748 | ||
749 | /* | 749 | /* |
750 | * Either the driver has accepted a smaller number of buffers, | 750 | * Either the driver has accepted a smaller number of buffers, |
751 | * or .queue_setup() returned an error | 751 | * or .queue_setup() returned an error |
752 | */ | 752 | */ |
753 | } | 753 | } |
754 | 754 | ||
755 | q->num_buffers += allocated_buffers; | 755 | q->num_buffers += allocated_buffers; |
756 | 756 | ||
757 | if (ret < 0) { | 757 | if (ret < 0) { |
758 | __vb2_queue_free(q, allocated_buffers); | 758 | __vb2_queue_free(q, allocated_buffers); |
759 | return -ENOMEM; | 759 | return -ENOMEM; |
760 | } | 760 | } |
761 | 761 | ||
762 | /* | 762 | /* |
763 | * Return the number of successfully allocated buffers | 763 | * Return the number of successfully allocated buffers |
764 | * to the userspace. | 764 | * to the userspace. |
765 | */ | 765 | */ |
766 | create->count = allocated_buffers; | 766 | create->count = allocated_buffers; |
767 | 767 | ||
768 | return 0; | 768 | return 0; |
769 | } | 769 | } |
770 | 770 | ||
771 | /** | 771 | /** |
772 | * vb2_create_bufs() - Wrapper for __create_bufs() that also verifies the | 772 | * vb2_create_bufs() - Wrapper for __create_bufs() that also verifies the |
773 | * memory and type values. | 773 | * memory and type values. |
774 | * @q: videobuf2 queue | 774 | * @q: videobuf2 queue |
775 | * @create: creation parameters, passed from userspace to vidioc_create_bufs | 775 | * @create: creation parameters, passed from userspace to vidioc_create_bufs |
776 | * handler in driver | 776 | * handler in driver |
777 | */ | 777 | */ |
778 | int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create) | 778 | int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create) |
779 | { | 779 | { |
780 | int ret = __verify_memory_type(q, create->memory, create->format.type); | 780 | int ret = __verify_memory_type(q, create->memory, create->format.type); |
781 | 781 | ||
782 | create->index = q->num_buffers; | 782 | create->index = q->num_buffers; |
783 | if (create->count == 0) | 783 | if (create->count == 0) |
784 | return ret != -EBUSY ? ret : 0; | 784 | return ret != -EBUSY ? ret : 0; |
785 | return ret ? ret : __create_bufs(q, create); | 785 | return ret ? ret : __create_bufs(q, create); |
786 | } | 786 | } |
787 | EXPORT_SYMBOL_GPL(vb2_create_bufs); | 787 | EXPORT_SYMBOL_GPL(vb2_create_bufs); |
788 | 788 | ||
789 | /** | 789 | /** |
790 | * vb2_plane_vaddr() - Return a kernel virtual address of a given plane | 790 | * vb2_plane_vaddr() - Return a kernel virtual address of a given plane |
791 | * @vb: vb2_buffer to which the plane in question belongs to | 791 | * @vb: vb2_buffer to which the plane in question belongs to |
792 | * @plane_no: plane number for which the address is to be returned | 792 | * @plane_no: plane number for which the address is to be returned |
793 | * | 793 | * |
794 | * This function returns a kernel virtual address of a given plane if | 794 | * This function returns a kernel virtual address of a given plane if |
795 | * such a mapping exist, NULL otherwise. | 795 | * such a mapping exist, NULL otherwise. |
796 | */ | 796 | */ |
797 | void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no) | 797 | void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no) |
798 | { | 798 | { |
799 | struct vb2_queue *q = vb->vb2_queue; | 799 | struct vb2_queue *q = vb->vb2_queue; |
800 | 800 | ||
801 | if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv) | 801 | if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv) |
802 | return NULL; | 802 | return NULL; |
803 | 803 | ||
804 | return call_memop(q, vaddr, vb->planes[plane_no].mem_priv); | 804 | return call_memop(q, vaddr, vb->planes[plane_no].mem_priv); |
805 | 805 | ||
806 | } | 806 | } |
807 | EXPORT_SYMBOL_GPL(vb2_plane_vaddr); | 807 | EXPORT_SYMBOL_GPL(vb2_plane_vaddr); |
808 | 808 | ||
809 | /** | 809 | /** |
810 | * vb2_plane_cookie() - Return allocator specific cookie for the given plane | 810 | * vb2_plane_cookie() - Return allocator specific cookie for the given plane |
811 | * @vb: vb2_buffer to which the plane in question belongs to | 811 | * @vb: vb2_buffer to which the plane in question belongs to |
812 | * @plane_no: plane number for which the cookie is to be returned | 812 | * @plane_no: plane number for which the cookie is to be returned |
813 | * | 813 | * |
814 | * This function returns an allocator specific cookie for a given plane if | 814 | * This function returns an allocator specific cookie for a given plane if |
815 | * available, NULL otherwise. The allocator should provide some simple static | 815 | * available, NULL otherwise. The allocator should provide some simple static |
816 | * inline function, which would convert this cookie to the allocator specific | 816 | * inline function, which would convert this cookie to the allocator specific |
817 | * type that can be used directly by the driver to access the buffer. This can | 817 | * type that can be used directly by the driver to access the buffer. This can |
818 | * be for example physical address, pointer to scatter list or IOMMU mapping. | 818 | * be for example physical address, pointer to scatter list or IOMMU mapping. |
819 | */ | 819 | */ |
820 | void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no) | 820 | void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no) |
821 | { | 821 | { |
822 | struct vb2_queue *q = vb->vb2_queue; | 822 | struct vb2_queue *q = vb->vb2_queue; |
823 | 823 | ||
824 | if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv) | 824 | if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv) |
825 | return NULL; | 825 | return NULL; |
826 | 826 | ||
827 | return call_memop(q, cookie, vb->planes[plane_no].mem_priv); | 827 | return call_memop(q, cookie, vb->planes[plane_no].mem_priv); |
828 | } | 828 | } |
829 | EXPORT_SYMBOL_GPL(vb2_plane_cookie); | 829 | EXPORT_SYMBOL_GPL(vb2_plane_cookie); |
830 | 830 | ||
831 | /** | 831 | /** |
832 | * vb2_buffer_done() - inform videobuf that an operation on a buffer is finished | 832 | * vb2_buffer_done() - inform videobuf that an operation on a buffer is finished |
833 | * @vb: vb2_buffer returned from the driver | 833 | * @vb: vb2_buffer returned from the driver |
834 | * @state: either VB2_BUF_STATE_DONE if the operation finished successfully | 834 | * @state: either VB2_BUF_STATE_DONE if the operation finished successfully |
835 | * or VB2_BUF_STATE_ERROR if the operation finished with an error | 835 | * or VB2_BUF_STATE_ERROR if the operation finished with an error |
836 | * | 836 | * |
837 | * This function should be called by the driver after a hardware operation on | 837 | * This function should be called by the driver after a hardware operation on |
838 | * a buffer is finished and the buffer may be returned to userspace. The driver | 838 | * a buffer is finished and the buffer may be returned to userspace. The driver |
839 | * cannot use this buffer anymore until it is queued back to it by videobuf | 839 | * cannot use this buffer anymore until it is queued back to it by videobuf |
840 | * by the means of buf_queue callback. Only buffers previously queued to the | 840 | * by the means of buf_queue callback. Only buffers previously queued to the |
841 | * driver by buf_queue can be passed to this function. | 841 | * driver by buf_queue can be passed to this function. |
842 | */ | 842 | */ |
843 | void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state) | 843 | void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state) |
844 | { | 844 | { |
845 | struct vb2_queue *q = vb->vb2_queue; | 845 | struct vb2_queue *q = vb->vb2_queue; |
846 | unsigned long flags; | 846 | unsigned long flags; |
847 | unsigned int plane; | 847 | unsigned int plane; |
848 | 848 | ||
849 | if (vb->state != VB2_BUF_STATE_ACTIVE) | 849 | if (vb->state != VB2_BUF_STATE_ACTIVE) |
850 | return; | 850 | return; |
851 | 851 | ||
852 | if (state != VB2_BUF_STATE_DONE && state != VB2_BUF_STATE_ERROR) | 852 | if (state != VB2_BUF_STATE_DONE && state != VB2_BUF_STATE_ERROR) |
853 | return; | 853 | return; |
854 | 854 | ||
855 | dprintk(4, "Done processing on buffer %d, state: %d\n", | 855 | dprintk(4, "Done processing on buffer %d, state: %d\n", |
856 | vb->v4l2_buf.index, vb->state); | 856 | vb->v4l2_buf.index, vb->state); |
857 | 857 | ||
858 | /* sync buffers */ | 858 | /* sync buffers */ |
859 | for (plane = 0; plane < vb->num_planes; ++plane) | 859 | for (plane = 0; plane < vb->num_planes; ++plane) |
860 | call_memop(q, finish, vb->planes[plane].mem_priv); | 860 | call_memop(q, finish, vb->planes[plane].mem_priv); |
861 | 861 | ||
862 | /* Add the buffer to the done buffers list */ | 862 | /* Add the buffer to the done buffers list */ |
863 | spin_lock_irqsave(&q->done_lock, flags); | 863 | spin_lock_irqsave(&q->done_lock, flags); |
864 | vb->state = state; | 864 | vb->state = state; |
865 | list_add_tail(&vb->done_entry, &q->done_list); | 865 | list_add_tail(&vb->done_entry, &q->done_list); |
866 | atomic_dec(&q->queued_count); | 866 | atomic_dec(&q->queued_count); |
867 | spin_unlock_irqrestore(&q->done_lock, flags); | 867 | spin_unlock_irqrestore(&q->done_lock, flags); |
868 | 868 | ||
869 | /* Inform any processes that may be waiting for buffers */ | 869 | /* Inform any processes that may be waiting for buffers */ |
870 | wake_up(&q->done_wq); | 870 | wake_up(&q->done_wq); |
871 | } | 871 | } |
872 | EXPORT_SYMBOL_GPL(vb2_buffer_done); | 872 | EXPORT_SYMBOL_GPL(vb2_buffer_done); |
873 | 873 | ||
874 | /** | 874 | /** |
875 | * __fill_vb2_buffer() - fill a vb2_buffer with information provided in a | 875 | * __fill_vb2_buffer() - fill a vb2_buffer with information provided in a |
876 | * v4l2_buffer by the userspace. The caller has already verified that struct | 876 | * v4l2_buffer by the userspace. The caller has already verified that struct |
877 | * v4l2_buffer has a valid number of planes. | 877 | * v4l2_buffer has a valid number of planes. |
878 | */ | 878 | */ |
879 | static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b, | 879 | static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b, |
880 | struct v4l2_plane *v4l2_planes) | 880 | struct v4l2_plane *v4l2_planes) |
881 | { | 881 | { |
882 | unsigned int plane; | 882 | unsigned int plane; |
883 | 883 | ||
884 | if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) { | 884 | if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) { |
885 | /* Fill in driver-provided information for OUTPUT types */ | 885 | /* Fill in driver-provided information for OUTPUT types */ |
886 | if (V4L2_TYPE_IS_OUTPUT(b->type)) { | 886 | if (V4L2_TYPE_IS_OUTPUT(b->type)) { |
887 | /* | 887 | /* |
888 | * Will have to go up to b->length when API starts | 888 | * Will have to go up to b->length when API starts |
889 | * accepting variable number of planes. | 889 | * accepting variable number of planes. |
890 | */ | 890 | */ |
891 | for (plane = 0; plane < vb->num_planes; ++plane) { | 891 | for (plane = 0; plane < vb->num_planes; ++plane) { |
892 | v4l2_planes[plane].bytesused = | 892 | v4l2_planes[plane].bytesused = |
893 | b->m.planes[plane].bytesused; | 893 | b->m.planes[plane].bytesused; |
894 | v4l2_planes[plane].data_offset = | 894 | v4l2_planes[plane].data_offset = |
895 | b->m.planes[plane].data_offset; | 895 | b->m.planes[plane].data_offset; |
896 | } | 896 | } |
897 | } | 897 | } |
898 | 898 | ||
899 | if (b->memory == V4L2_MEMORY_USERPTR) { | 899 | if (b->memory == V4L2_MEMORY_USERPTR) { |
900 | for (plane = 0; plane < vb->num_planes; ++plane) { | 900 | for (plane = 0; plane < vb->num_planes; ++plane) { |
901 | v4l2_planes[plane].m.userptr = | 901 | v4l2_planes[plane].m.userptr = |
902 | b->m.planes[plane].m.userptr; | 902 | b->m.planes[plane].m.userptr; |
903 | v4l2_planes[plane].length = | 903 | v4l2_planes[plane].length = |
904 | b->m.planes[plane].length; | 904 | b->m.planes[plane].length; |
905 | } | 905 | } |
906 | } | 906 | } |
907 | if (b->memory == V4L2_MEMORY_DMABUF) { | 907 | if (b->memory == V4L2_MEMORY_DMABUF) { |
908 | for (plane = 0; plane < vb->num_planes; ++plane) { | 908 | for (plane = 0; plane < vb->num_planes; ++plane) { |
909 | v4l2_planes[plane].m.fd = | 909 | v4l2_planes[plane].m.fd = |
910 | b->m.planes[plane].m.fd; | 910 | b->m.planes[plane].m.fd; |
911 | v4l2_planes[plane].length = | 911 | v4l2_planes[plane].length = |
912 | b->m.planes[plane].length; | 912 | b->m.planes[plane].length; |
913 | v4l2_planes[plane].data_offset = | 913 | v4l2_planes[plane].data_offset = |
914 | b->m.planes[plane].data_offset; | 914 | b->m.planes[plane].data_offset; |
915 | } | 915 | } |
916 | } | 916 | } |
917 | } else { | 917 | } else { |
918 | /* | 918 | /* |
919 | * Single-planar buffers do not use planes array, | 919 | * Single-planar buffers do not use planes array, |
920 | * so fill in relevant v4l2_buffer struct fields instead. | 920 | * so fill in relevant v4l2_buffer struct fields instead. |
921 | * In videobuf we use our internal V4l2_planes struct for | 921 | * In videobuf we use our internal V4l2_planes struct for |
922 | * single-planar buffers as well, for simplicity. | 922 | * single-planar buffers as well, for simplicity. |
923 | */ | 923 | */ |
924 | if (V4L2_TYPE_IS_OUTPUT(b->type)) | 924 | if (V4L2_TYPE_IS_OUTPUT(b->type)) |
925 | v4l2_planes[0].bytesused = b->bytesused; | 925 | v4l2_planes[0].bytesused = b->bytesused; |
926 | 926 | ||
927 | if (b->memory == V4L2_MEMORY_USERPTR) { | 927 | if (b->memory == V4L2_MEMORY_USERPTR) { |
928 | v4l2_planes[0].m.userptr = b->m.userptr; | 928 | v4l2_planes[0].m.userptr = b->m.userptr; |
929 | v4l2_planes[0].length = b->length; | 929 | v4l2_planes[0].length = b->length; |
930 | } | 930 | } |
931 | 931 | ||
932 | if (b->memory == V4L2_MEMORY_DMABUF) { | 932 | if (b->memory == V4L2_MEMORY_DMABUF) { |
933 | v4l2_planes[0].m.fd = b->m.fd; | 933 | v4l2_planes[0].m.fd = b->m.fd; |
934 | v4l2_planes[0].length = b->length; | 934 | v4l2_planes[0].length = b->length; |
935 | v4l2_planes[0].data_offset = 0; | 935 | v4l2_planes[0].data_offset = 0; |
936 | } | 936 | } |
937 | 937 | ||
938 | } | 938 | } |
939 | 939 | ||
940 | vb->v4l2_buf.field = b->field; | 940 | vb->v4l2_buf.field = b->field; |
941 | vb->v4l2_buf.timestamp = b->timestamp; | 941 | vb->v4l2_buf.timestamp = b->timestamp; |
942 | vb->v4l2_buf.flags = b->flags & ~V4L2_BUFFER_STATE_FLAGS; | 942 | vb->v4l2_buf.flags = b->flags & ~V4L2_BUFFER_STATE_FLAGS; |
943 | } | 943 | } |
944 | 944 | ||
945 | /** | 945 | /** |
946 | * __qbuf_userptr() - handle qbuf of a USERPTR buffer | 946 | * __qbuf_userptr() - handle qbuf of a USERPTR buffer |
947 | */ | 947 | */ |
948 | static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b) | 948 | static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b) |
949 | { | 949 | { |
950 | struct v4l2_plane planes[VIDEO_MAX_PLANES]; | 950 | struct v4l2_plane planes[VIDEO_MAX_PLANES]; |
951 | struct vb2_queue *q = vb->vb2_queue; | 951 | struct vb2_queue *q = vb->vb2_queue; |
952 | void *mem_priv; | 952 | void *mem_priv; |
953 | unsigned int plane; | 953 | unsigned int plane; |
954 | int ret; | 954 | int ret; |
955 | int write = !V4L2_TYPE_IS_OUTPUT(q->type); | 955 | int write = !V4L2_TYPE_IS_OUTPUT(q->type); |
956 | 956 | ||
957 | /* Copy relevant information provided by the userspace */ | 957 | /* Copy relevant information provided by the userspace */ |
958 | __fill_vb2_buffer(vb, b, planes); | 958 | __fill_vb2_buffer(vb, b, planes); |
959 | 959 | ||
960 | for (plane = 0; plane < vb->num_planes; ++plane) { | 960 | for (plane = 0; plane < vb->num_planes; ++plane) { |
961 | /* Skip the plane if already verified */ | 961 | /* Skip the plane if already verified */ |
962 | if (vb->v4l2_planes[plane].m.userptr && | 962 | if (vb->v4l2_planes[plane].m.userptr && |
963 | vb->v4l2_planes[plane].m.userptr == planes[plane].m.userptr | 963 | vb->v4l2_planes[plane].m.userptr == planes[plane].m.userptr |
964 | && vb->v4l2_planes[plane].length == planes[plane].length) | 964 | && vb->v4l2_planes[plane].length == planes[plane].length) |
965 | continue; | 965 | continue; |
966 | 966 | ||
967 | dprintk(3, "qbuf: userspace address for plane %d changed, " | 967 | dprintk(3, "qbuf: userspace address for plane %d changed, " |
968 | "reacquiring memory\n", plane); | 968 | "reacquiring memory\n", plane); |
969 | 969 | ||
970 | /* Check if the provided plane buffer is large enough */ | 970 | /* Check if the provided plane buffer is large enough */ |
971 | if (planes[plane].length < q->plane_sizes[plane]) { | 971 | if (planes[plane].length < q->plane_sizes[plane]) { |
972 | ret = -EINVAL; | 972 | ret = -EINVAL; |
973 | goto err; | 973 | goto err; |
974 | } | 974 | } |
975 | 975 | ||
976 | /* Release previously acquired memory if present */ | 976 | /* Release previously acquired memory if present */ |
977 | if (vb->planes[plane].mem_priv) | 977 | if (vb->planes[plane].mem_priv) |
978 | call_memop(q, put_userptr, vb->planes[plane].mem_priv); | 978 | call_memop(q, put_userptr, vb->planes[plane].mem_priv); |
979 | 979 | ||
980 | vb->planes[plane].mem_priv = NULL; | 980 | vb->planes[plane].mem_priv = NULL; |
981 | vb->v4l2_planes[plane].m.userptr = 0; | 981 | vb->v4l2_planes[plane].m.userptr = 0; |
982 | vb->v4l2_planes[plane].length = 0; | 982 | vb->v4l2_planes[plane].length = 0; |
983 | 983 | ||
984 | /* Acquire each plane's memory */ | 984 | /* Acquire each plane's memory */ |
985 | mem_priv = call_memop(q, get_userptr, q->alloc_ctx[plane], | 985 | mem_priv = call_memop(q, get_userptr, q->alloc_ctx[plane], |
986 | planes[plane].m.userptr, | 986 | planes[plane].m.userptr, |
987 | planes[plane].length, write); | 987 | planes[plane].length, write); |
988 | if (IS_ERR_OR_NULL(mem_priv)) { | 988 | if (IS_ERR_OR_NULL(mem_priv)) { |
989 | dprintk(1, "qbuf: failed acquiring userspace " | 989 | dprintk(1, "qbuf: failed acquiring userspace " |
990 | "memory for plane %d\n", plane); | 990 | "memory for plane %d\n", plane); |
991 | ret = mem_priv ? PTR_ERR(mem_priv) : -EINVAL; | 991 | ret = mem_priv ? PTR_ERR(mem_priv) : -EINVAL; |
992 | goto err; | 992 | goto err; |
993 | } | 993 | } |
994 | vb->planes[plane].mem_priv = mem_priv; | 994 | vb->planes[plane].mem_priv = mem_priv; |
995 | } | 995 | } |
996 | 996 | ||
997 | /* | 997 | /* |
998 | * Call driver-specific initialization on the newly acquired buffer, | 998 | * Call driver-specific initialization on the newly acquired buffer, |
999 | * if provided. | 999 | * if provided. |
1000 | */ | 1000 | */ |
1001 | ret = call_qop(q, buf_init, vb); | 1001 | ret = call_qop(q, buf_init, vb); |
1002 | if (ret) { | 1002 | if (ret) { |
1003 | dprintk(1, "qbuf: buffer initialization failed\n"); | 1003 | dprintk(1, "qbuf: buffer initialization failed\n"); |
1004 | goto err; | 1004 | goto err; |
1005 | } | 1005 | } |
1006 | 1006 | ||
1007 | /* | 1007 | /* |
1008 | * Now that everything is in order, copy relevant information | 1008 | * Now that everything is in order, copy relevant information |
1009 | * provided by userspace. | 1009 | * provided by userspace. |
1010 | */ | 1010 | */ |
1011 | for (plane = 0; plane < vb->num_planes; ++plane) | 1011 | for (plane = 0; plane < vb->num_planes; ++plane) |
1012 | vb->v4l2_planes[plane] = planes[plane]; | 1012 | vb->v4l2_planes[plane] = planes[plane]; |
1013 | 1013 | ||
1014 | return 0; | 1014 | return 0; |
1015 | err: | 1015 | err: |
1016 | /* In case of errors, release planes that were already acquired */ | 1016 | /* In case of errors, release planes that were already acquired */ |
1017 | for (plane = 0; plane < vb->num_planes; ++plane) { | 1017 | for (plane = 0; plane < vb->num_planes; ++plane) { |
1018 | if (vb->planes[plane].mem_priv) | 1018 | if (vb->planes[plane].mem_priv) |
1019 | call_memop(q, put_userptr, vb->planes[plane].mem_priv); | 1019 | call_memop(q, put_userptr, vb->planes[plane].mem_priv); |
1020 | vb->planes[plane].mem_priv = NULL; | 1020 | vb->planes[plane].mem_priv = NULL; |
1021 | vb->v4l2_planes[plane].m.userptr = 0; | 1021 | vb->v4l2_planes[plane].m.userptr = 0; |
1022 | vb->v4l2_planes[plane].length = 0; | 1022 | vb->v4l2_planes[plane].length = 0; |
1023 | } | 1023 | } |
1024 | 1024 | ||
1025 | return ret; | 1025 | return ret; |
1026 | } | 1026 | } |
1027 | 1027 | ||
1028 | /** | 1028 | /** |
1029 | * __qbuf_mmap() - handle qbuf of an MMAP buffer | 1029 | * __qbuf_mmap() - handle qbuf of an MMAP buffer |
1030 | */ | 1030 | */ |
1031 | static int __qbuf_mmap(struct vb2_buffer *vb, const struct v4l2_buffer *b) | 1031 | static int __qbuf_mmap(struct vb2_buffer *vb, const struct v4l2_buffer *b) |
1032 | { | 1032 | { |
1033 | __fill_vb2_buffer(vb, b, vb->v4l2_planes); | 1033 | __fill_vb2_buffer(vb, b, vb->v4l2_planes); |
1034 | return 0; | 1034 | return 0; |
1035 | } | 1035 | } |
1036 | 1036 | ||
1037 | /** | 1037 | /** |
1038 | * __qbuf_dmabuf() - handle qbuf of a DMABUF buffer | 1038 | * __qbuf_dmabuf() - handle qbuf of a DMABUF buffer |
1039 | */ | 1039 | */ |
1040 | static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b) | 1040 | static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b) |
1041 | { | 1041 | { |
1042 | struct v4l2_plane planes[VIDEO_MAX_PLANES]; | 1042 | struct v4l2_plane planes[VIDEO_MAX_PLANES]; |
1043 | struct vb2_queue *q = vb->vb2_queue; | 1043 | struct vb2_queue *q = vb->vb2_queue; |
1044 | void *mem_priv; | 1044 | void *mem_priv; |
1045 | unsigned int plane; | 1045 | unsigned int plane; |
1046 | int ret; | 1046 | int ret; |
1047 | int write = !V4L2_TYPE_IS_OUTPUT(q->type); | 1047 | int write = !V4L2_TYPE_IS_OUTPUT(q->type); |
1048 | 1048 | ||
1049 | /* Verify and copy relevant information provided by the userspace */ | 1049 | /* Verify and copy relevant information provided by the userspace */ |
1050 | __fill_vb2_buffer(vb, b, planes); | 1050 | __fill_vb2_buffer(vb, b, planes); |
1051 | 1051 | ||
1052 | for (plane = 0; plane < vb->num_planes; ++plane) { | 1052 | for (plane = 0; plane < vb->num_planes; ++plane) { |
1053 | struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd); | 1053 | struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd); |
1054 | 1054 | ||
1055 | if (IS_ERR_OR_NULL(dbuf)) { | 1055 | if (IS_ERR_OR_NULL(dbuf)) { |
1056 | dprintk(1, "qbuf: invalid dmabuf fd for plane %d\n", | 1056 | dprintk(1, "qbuf: invalid dmabuf fd for plane %d\n", |
1057 | plane); | 1057 | plane); |
1058 | ret = -EINVAL; | 1058 | ret = -EINVAL; |
1059 | goto err; | 1059 | goto err; |
1060 | } | 1060 | } |
1061 | 1061 | ||
1062 | /* use DMABUF size if length is not provided */ | 1062 | /* use DMABUF size if length is not provided */ |
1063 | if (planes[plane].length == 0) | 1063 | if (planes[plane].length == 0) |
1064 | planes[plane].length = dbuf->size; | 1064 | planes[plane].length = dbuf->size; |
1065 | 1065 | ||
1066 | if (planes[plane].length < planes[plane].data_offset + | 1066 | if (planes[plane].length < planes[plane].data_offset + |
1067 | q->plane_sizes[plane]) { | 1067 | q->plane_sizes[plane]) { |
1068 | ret = -EINVAL; | 1068 | ret = -EINVAL; |
1069 | goto err; | 1069 | goto err; |
1070 | } | 1070 | } |
1071 | 1071 | ||
1072 | /* Skip the plane if already verified */ | 1072 | /* Skip the plane if already verified */ |
1073 | if (dbuf == vb->planes[plane].dbuf && | 1073 | if (dbuf == vb->planes[plane].dbuf && |
1074 | vb->v4l2_planes[plane].length == planes[plane].length) { | 1074 | vb->v4l2_planes[plane].length == planes[plane].length) { |
1075 | dma_buf_put(dbuf); | 1075 | dma_buf_put(dbuf); |
1076 | continue; | 1076 | continue; |
1077 | } | 1077 | } |
1078 | 1078 | ||
1079 | dprintk(1, "qbuf: buffer for plane %d changed\n", plane); | 1079 | dprintk(1, "qbuf: buffer for plane %d changed\n", plane); |
1080 | 1080 | ||
1081 | /* Release previously acquired memory if present */ | 1081 | /* Release previously acquired memory if present */ |
1082 | __vb2_plane_dmabuf_put(q, &vb->planes[plane]); | 1082 | __vb2_plane_dmabuf_put(q, &vb->planes[plane]); |
1083 | memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane)); | 1083 | memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane)); |
1084 | 1084 | ||
1085 | /* Acquire each plane's memory */ | 1085 | /* Acquire each plane's memory */ |
1086 | mem_priv = call_memop(q, attach_dmabuf, q->alloc_ctx[plane], | 1086 | mem_priv = call_memop(q, attach_dmabuf, q->alloc_ctx[plane], |
1087 | dbuf, planes[plane].length, write); | 1087 | dbuf, planes[plane].length, write); |
1088 | if (IS_ERR(mem_priv)) { | 1088 | if (IS_ERR(mem_priv)) { |
1089 | dprintk(1, "qbuf: failed to attach dmabuf\n"); | 1089 | dprintk(1, "qbuf: failed to attach dmabuf\n"); |
1090 | ret = PTR_ERR(mem_priv); | 1090 | ret = PTR_ERR(mem_priv); |
1091 | dma_buf_put(dbuf); | 1091 | dma_buf_put(dbuf); |
1092 | goto err; | 1092 | goto err; |
1093 | } | 1093 | } |
1094 | 1094 | ||
1095 | vb->planes[plane].dbuf = dbuf; | 1095 | vb->planes[plane].dbuf = dbuf; |
1096 | vb->planes[plane].mem_priv = mem_priv; | 1096 | vb->planes[plane].mem_priv = mem_priv; |
1097 | } | 1097 | } |
1098 | 1098 | ||
1099 | /* TODO: This pins the buffer(s) with dma_buf_map_attachment()).. but | 1099 | /* TODO: This pins the buffer(s) with dma_buf_map_attachment()).. but |
1100 | * really we want to do this just before the DMA, not while queueing | 1100 | * really we want to do this just before the DMA, not while queueing |
1101 | * the buffer(s).. | 1101 | * the buffer(s).. |
1102 | */ | 1102 | */ |
1103 | for (plane = 0; plane < vb->num_planes; ++plane) { | 1103 | for (plane = 0; plane < vb->num_planes; ++plane) { |
1104 | ret = call_memop(q, map_dmabuf, vb->planes[plane].mem_priv); | 1104 | ret = call_memop(q, map_dmabuf, vb->planes[plane].mem_priv); |
1105 | if (ret) { | 1105 | if (ret) { |
1106 | dprintk(1, "qbuf: failed to map dmabuf for plane %d\n", | 1106 | dprintk(1, "qbuf: failed to map dmabuf for plane %d\n", |
1107 | plane); | 1107 | plane); |
1108 | goto err; | 1108 | goto err; |
1109 | } | 1109 | } |
1110 | vb->planes[plane].dbuf_mapped = 1; | 1110 | vb->planes[plane].dbuf_mapped = 1; |
1111 | } | 1111 | } |
1112 | 1112 | ||
1113 | /* | 1113 | /* |
1114 | * Call driver-specific initialization on the newly acquired buffer, | 1114 | * Call driver-specific initialization on the newly acquired buffer, |
1115 | * if provided. | 1115 | * if provided. |
1116 | */ | 1116 | */ |
1117 | ret = call_qop(q, buf_init, vb); | 1117 | ret = call_qop(q, buf_init, vb); |
1118 | if (ret) { | 1118 | if (ret) { |
1119 | dprintk(1, "qbuf: buffer initialization failed\n"); | 1119 | dprintk(1, "qbuf: buffer initialization failed\n"); |
1120 | goto err; | 1120 | goto err; |
1121 | } | 1121 | } |
1122 | 1122 | ||
1123 | /* | 1123 | /* |
1124 | * Now that everything is in order, copy relevant information | 1124 | * Now that everything is in order, copy relevant information |
1125 | * provided by userspace. | 1125 | * provided by userspace. |
1126 | */ | 1126 | */ |
1127 | for (plane = 0; plane < vb->num_planes; ++plane) | 1127 | for (plane = 0; plane < vb->num_planes; ++plane) |
1128 | vb->v4l2_planes[plane] = planes[plane]; | 1128 | vb->v4l2_planes[plane] = planes[plane]; |
1129 | 1129 | ||
1130 | return 0; | 1130 | return 0; |
1131 | err: | 1131 | err: |
1132 | /* In case of errors, release planes that were already acquired */ | 1132 | /* In case of errors, release planes that were already acquired */ |
1133 | __vb2_buf_dmabuf_put(vb); | 1133 | __vb2_buf_dmabuf_put(vb); |
1134 | 1134 | ||
1135 | return ret; | 1135 | return ret; |
1136 | } | 1136 | } |
1137 | 1137 | ||
1138 | /** | 1138 | /** |
1139 | * __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing | 1139 | * __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing |
1140 | */ | 1140 | */ |
1141 | static void __enqueue_in_driver(struct vb2_buffer *vb) | 1141 | static void __enqueue_in_driver(struct vb2_buffer *vb) |
1142 | { | 1142 | { |
1143 | struct vb2_queue *q = vb->vb2_queue; | 1143 | struct vb2_queue *q = vb->vb2_queue; |
1144 | unsigned int plane; | 1144 | unsigned int plane; |
1145 | 1145 | ||
1146 | vb->state = VB2_BUF_STATE_ACTIVE; | 1146 | vb->state = VB2_BUF_STATE_ACTIVE; |
1147 | atomic_inc(&q->queued_count); | 1147 | atomic_inc(&q->queued_count); |
1148 | 1148 | ||
1149 | /* sync buffers */ | 1149 | /* sync buffers */ |
1150 | for (plane = 0; plane < vb->num_planes; ++plane) | 1150 | for (plane = 0; plane < vb->num_planes; ++plane) |
1151 | call_memop(q, prepare, vb->planes[plane].mem_priv); | 1151 | call_memop(q, prepare, vb->planes[plane].mem_priv); |
1152 | 1152 | ||
1153 | q->ops->buf_queue(vb); | 1153 | q->ops->buf_queue(vb); |
1154 | } | 1154 | } |
1155 | 1155 | ||
1156 | static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b) | 1156 | static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b) |
1157 | { | 1157 | { |
1158 | struct vb2_queue *q = vb->vb2_queue; | 1158 | struct vb2_queue *q = vb->vb2_queue; |
1159 | int ret; | 1159 | int ret; |
1160 | 1160 | ||
1161 | switch (q->memory) { | 1161 | switch (q->memory) { |
1162 | case V4L2_MEMORY_MMAP: | 1162 | case V4L2_MEMORY_MMAP: |
1163 | ret = __qbuf_mmap(vb, b); | 1163 | ret = __qbuf_mmap(vb, b); |
1164 | break; | 1164 | break; |
1165 | case V4L2_MEMORY_USERPTR: | 1165 | case V4L2_MEMORY_USERPTR: |
1166 | ret = __qbuf_userptr(vb, b); | 1166 | ret = __qbuf_userptr(vb, b); |
1167 | break; | 1167 | break; |
1168 | case V4L2_MEMORY_DMABUF: | 1168 | case V4L2_MEMORY_DMABUF: |
1169 | ret = __qbuf_dmabuf(vb, b); | 1169 | ret = __qbuf_dmabuf(vb, b); |
1170 | break; | 1170 | break; |
1171 | default: | 1171 | default: |
1172 | WARN(1, "Invalid queue type\n"); | 1172 | WARN(1, "Invalid queue type\n"); |
1173 | ret = -EINVAL; | 1173 | ret = -EINVAL; |
1174 | } | 1174 | } |
1175 | 1175 | ||
1176 | if (!ret) | 1176 | if (!ret) |
1177 | ret = call_qop(q, buf_prepare, vb); | 1177 | ret = call_qop(q, buf_prepare, vb); |
1178 | if (ret) | 1178 | if (ret) |
1179 | dprintk(1, "qbuf: buffer preparation failed: %d\n", ret); | 1179 | dprintk(1, "qbuf: buffer preparation failed: %d\n", ret); |
1180 | else | 1180 | else |
1181 | vb->state = VB2_BUF_STATE_PREPARED; | 1181 | vb->state = VB2_BUF_STATE_PREPARED; |
1182 | 1182 | ||
1183 | return ret; | 1183 | return ret; |
1184 | } | 1184 | } |
1185 | 1185 | ||
1186 | /** | 1186 | /** |
1187 | * vb2_prepare_buf() - Pass ownership of a buffer from userspace to the kernel | 1187 | * vb2_prepare_buf() - Pass ownership of a buffer from userspace to the kernel |
1188 | * @q: videobuf2 queue | 1188 | * @q: videobuf2 queue |
1189 | * @b: buffer structure passed from userspace to vidioc_prepare_buf | 1189 | * @b: buffer structure passed from userspace to vidioc_prepare_buf |
1190 | * handler in driver | 1190 | * handler in driver |
1191 | * | 1191 | * |
1192 | * Should be called from vidioc_prepare_buf ioctl handler of a driver. | 1192 | * Should be called from vidioc_prepare_buf ioctl handler of a driver. |
1193 | * This function: | 1193 | * This function: |
1194 | * 1) verifies the passed buffer, | 1194 | * 1) verifies the passed buffer, |
1195 | * 2) calls buf_prepare callback in the driver (if provided), in which | 1195 | * 2) calls buf_prepare callback in the driver (if provided), in which |
1196 | * driver-specific buffer initialization can be performed, | 1196 | * driver-specific buffer initialization can be performed, |
1197 | * | 1197 | * |
1198 | * The return values from this function are intended to be directly returned | 1198 | * The return values from this function are intended to be directly returned |
1199 | * from vidioc_prepare_buf handler in driver. | 1199 | * from vidioc_prepare_buf handler in driver. |
1200 | */ | 1200 | */ |
1201 | int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b) | 1201 | int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b) |
1202 | { | 1202 | { |
1203 | struct vb2_buffer *vb; | 1203 | struct vb2_buffer *vb; |
1204 | int ret; | 1204 | int ret; |
1205 | 1205 | ||
1206 | if (q->fileio) { | 1206 | if (q->fileio) { |
1207 | dprintk(1, "%s(): file io in progress\n", __func__); | 1207 | dprintk(1, "%s(): file io in progress\n", __func__); |
1208 | return -EBUSY; | 1208 | return -EBUSY; |
1209 | } | 1209 | } |
1210 | 1210 | ||
1211 | if (b->type != q->type) { | 1211 | if (b->type != q->type) { |
1212 | dprintk(1, "%s(): invalid buffer type\n", __func__); | 1212 | dprintk(1, "%s(): invalid buffer type\n", __func__); |
1213 | return -EINVAL; | 1213 | return -EINVAL; |
1214 | } | 1214 | } |
1215 | 1215 | ||
1216 | if (b->index >= q->num_buffers) { | 1216 | if (b->index >= q->num_buffers) { |
1217 | dprintk(1, "%s(): buffer index out of range\n", __func__); | 1217 | dprintk(1, "%s(): buffer index out of range\n", __func__); |
1218 | return -EINVAL; | 1218 | return -EINVAL; |
1219 | } | 1219 | } |
1220 | 1220 | ||
1221 | vb = q->bufs[b->index]; | 1221 | vb = q->bufs[b->index]; |
1222 | if (NULL == vb) { | 1222 | if (NULL == vb) { |
1223 | /* Should never happen */ | 1223 | /* Should never happen */ |
1224 | dprintk(1, "%s(): buffer is NULL\n", __func__); | 1224 | dprintk(1, "%s(): buffer is NULL\n", __func__); |
1225 | return -EINVAL; | 1225 | return -EINVAL; |
1226 | } | 1226 | } |
1227 | 1227 | ||
1228 | if (b->memory != q->memory) { | 1228 | if (b->memory != q->memory) { |
1229 | dprintk(1, "%s(): invalid memory type\n", __func__); | 1229 | dprintk(1, "%s(): invalid memory type\n", __func__); |
1230 | return -EINVAL; | 1230 | return -EINVAL; |
1231 | } | 1231 | } |
1232 | 1232 | ||
1233 | if (vb->state != VB2_BUF_STATE_DEQUEUED) { | 1233 | if (vb->state != VB2_BUF_STATE_DEQUEUED) { |
1234 | dprintk(1, "%s(): invalid buffer state %d\n", __func__, vb->state); | 1234 | dprintk(1, "%s(): invalid buffer state %d\n", __func__, vb->state); |
1235 | return -EINVAL; | 1235 | return -EINVAL; |
1236 | } | 1236 | } |
1237 | ret = __verify_planes_array(vb, b); | 1237 | ret = __verify_planes_array(vb, b); |
1238 | if (ret < 0) | 1238 | if (ret < 0) |
1239 | return ret; | 1239 | return ret; |
1240 | ret = __buf_prepare(vb, b); | 1240 | ret = __buf_prepare(vb, b); |
1241 | if (ret < 0) | 1241 | if (ret < 0) |
1242 | return ret; | 1242 | return ret; |
1243 | 1243 | ||
1244 | __fill_v4l2_buffer(vb, b); | 1244 | __fill_v4l2_buffer(vb, b); |
1245 | 1245 | ||
1246 | return 0; | 1246 | return 0; |
1247 | } | 1247 | } |
1248 | EXPORT_SYMBOL_GPL(vb2_prepare_buf); | 1248 | EXPORT_SYMBOL_GPL(vb2_prepare_buf); |
1249 | 1249 | ||
1250 | /** | 1250 | /** |
1251 | * vb2_qbuf() - Queue a buffer from userspace | 1251 | * vb2_qbuf() - Queue a buffer from userspace |
1252 | * @q: videobuf2 queue | 1252 | * @q: videobuf2 queue |
1253 | * @b: buffer structure passed from userspace to vidioc_qbuf handler | 1253 | * @b: buffer structure passed from userspace to vidioc_qbuf handler |
1254 | * in driver | 1254 | * in driver |
1255 | * | 1255 | * |
1256 | * Should be called from vidioc_qbuf ioctl handler of a driver. | 1256 | * Should be called from vidioc_qbuf ioctl handler of a driver. |
1257 | * This function: | 1257 | * This function: |
1258 | * 1) verifies the passed buffer, | 1258 | * 1) verifies the passed buffer, |
1259 | * 2) if necessary, calls buf_prepare callback in the driver (if provided), in | 1259 | * 2) if necessary, calls buf_prepare callback in the driver (if provided), in |
1260 | * which driver-specific buffer initialization can be performed, | 1260 | * which driver-specific buffer initialization can be performed, |
1261 | * 3) if streaming is on, queues the buffer in driver by the means of buf_queue | 1261 | * 3) if streaming is on, queues the buffer in driver by the means of buf_queue |
1262 | * callback for processing. | 1262 | * callback for processing. |
1263 | * | 1263 | * |
1264 | * The return values from this function are intended to be directly returned | 1264 | * The return values from this function are intended to be directly returned |
1265 | * from vidioc_qbuf handler in driver. | 1265 | * from vidioc_qbuf handler in driver. |
1266 | */ | 1266 | */ |
1267 | int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b) | 1267 | int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b) |
1268 | { | 1268 | { |
1269 | struct rw_semaphore *mmap_sem = NULL; | 1269 | struct rw_semaphore *mmap_sem = NULL; |
1270 | struct vb2_buffer *vb; | 1270 | struct vb2_buffer *vb; |
1271 | int ret = 0; | 1271 | int ret = 0; |
1272 | 1272 | ||
1273 | /* | 1273 | /* |
1274 | * In case of user pointer buffers vb2 allocator needs to get direct | 1274 | * In case of user pointer buffers vb2 allocator needs to get direct |
1275 | * access to userspace pages. This requires getting read access on | 1275 | * access to userspace pages. This requires getting read access on |
1276 | * mmap semaphore in the current process structure. The same | 1276 | * mmap semaphore in the current process structure. The same |
1277 | * semaphore is taken before calling mmap operation, while both mmap | 1277 | * semaphore is taken before calling mmap operation, while both mmap |
1278 | * and qbuf are called by the driver or v4l2 core with driver's lock | 1278 | * and qbuf are called by the driver or v4l2 core with driver's lock |
1279 | * held. To avoid a AB-BA deadlock (mmap_sem then driver's lock in | 1279 | * held. To avoid a AB-BA deadlock (mmap_sem then driver's lock in |
1280 | * mmap and driver's lock then mmap_sem in qbuf) the videobuf2 core | 1280 | * mmap and driver's lock then mmap_sem in qbuf) the videobuf2 core |
1281 | * release driver's lock, takes mmap_sem and then takes again driver's | 1281 | * release driver's lock, takes mmap_sem and then takes again driver's |
1282 | * lock. | 1282 | * lock. |
1283 | * | 1283 | * |
1284 | * To avoid race with other vb2 calls, which might be called after | 1284 | * To avoid race with other vb2 calls, which might be called after |
1285 | * releasing driver's lock, this operation is performed at the | 1285 | * releasing driver's lock, this operation is performed at the |
1286 | * beggining of qbuf processing. This way the queue status is | 1286 | * beggining of qbuf processing. This way the queue status is |
1287 | * consistent after getting driver's lock back. | 1287 | * consistent after getting driver's lock back. |
1288 | */ | 1288 | */ |
1289 | if (q->memory == V4L2_MEMORY_USERPTR) { | 1289 | if (q->memory == V4L2_MEMORY_USERPTR) { |
1290 | mmap_sem = ¤t->mm->mmap_sem; | 1290 | mmap_sem = ¤t->mm->mmap_sem; |
1291 | call_qop(q, wait_prepare, q); | 1291 | call_qop(q, wait_prepare, q); |
1292 | down_read(mmap_sem); | 1292 | down_read(mmap_sem); |
1293 | call_qop(q, wait_finish, q); | 1293 | call_qop(q, wait_finish, q); |
1294 | } | 1294 | } |
1295 | 1295 | ||
1296 | if (q->fileio) { | 1296 | if (q->fileio) { |
1297 | dprintk(1, "qbuf: file io in progress\n"); | 1297 | dprintk(1, "qbuf: file io in progress\n"); |
1298 | ret = -EBUSY; | 1298 | ret = -EBUSY; |
1299 | goto unlock; | 1299 | goto unlock; |
1300 | } | 1300 | } |
1301 | 1301 | ||
1302 | if (b->type != q->type) { | 1302 | if (b->type != q->type) { |
1303 | dprintk(1, "qbuf: invalid buffer type\n"); | 1303 | dprintk(1, "qbuf: invalid buffer type\n"); |
1304 | ret = -EINVAL; | 1304 | ret = -EINVAL; |
1305 | goto unlock; | 1305 | goto unlock; |
1306 | } | 1306 | } |
1307 | 1307 | ||
1308 | if (b->index >= q->num_buffers) { | 1308 | if (b->index >= q->num_buffers) { |
1309 | dprintk(1, "qbuf: buffer index out of range\n"); | 1309 | dprintk(1, "qbuf: buffer index out of range\n"); |
1310 | ret = -EINVAL; | 1310 | ret = -EINVAL; |
1311 | goto unlock; | 1311 | goto unlock; |
1312 | } | 1312 | } |
1313 | 1313 | ||
1314 | vb = q->bufs[b->index]; | 1314 | vb = q->bufs[b->index]; |
1315 | if (NULL == vb) { | 1315 | if (NULL == vb) { |
1316 | /* Should never happen */ | 1316 | /* Should never happen */ |
1317 | dprintk(1, "qbuf: buffer is NULL\n"); | 1317 | dprintk(1, "qbuf: buffer is NULL\n"); |
1318 | ret = -EINVAL; | 1318 | ret = -EINVAL; |
1319 | goto unlock; | 1319 | goto unlock; |
1320 | } | 1320 | } |
1321 | 1321 | ||
1322 | if (b->memory != q->memory) { | 1322 | if (b->memory != q->memory) { |
1323 | dprintk(1, "qbuf: invalid memory type\n"); | 1323 | dprintk(1, "qbuf: invalid memory type\n"); |
1324 | ret = -EINVAL; | 1324 | ret = -EINVAL; |
1325 | goto unlock; | 1325 | goto unlock; |
1326 | } | 1326 | } |
1327 | ret = __verify_planes_array(vb, b); | 1327 | ret = __verify_planes_array(vb, b); |
1328 | if (ret) | 1328 | if (ret) |
1329 | goto unlock; | 1329 | goto unlock; |
1330 | 1330 | ||
1331 | switch (vb->state) { | 1331 | switch (vb->state) { |
1332 | case VB2_BUF_STATE_DEQUEUED: | 1332 | case VB2_BUF_STATE_DEQUEUED: |
1333 | ret = __buf_prepare(vb, b); | 1333 | ret = __buf_prepare(vb, b); |
1334 | if (ret) | 1334 | if (ret) |
1335 | goto unlock; | 1335 | goto unlock; |
1336 | case VB2_BUF_STATE_PREPARED: | 1336 | case VB2_BUF_STATE_PREPARED: |
1337 | break; | 1337 | break; |
1338 | default: | 1338 | default: |
1339 | dprintk(1, "qbuf: buffer already in use\n"); | 1339 | dprintk(1, "qbuf: buffer already in use\n"); |
1340 | ret = -EINVAL; | 1340 | ret = -EINVAL; |
1341 | goto unlock; | 1341 | goto unlock; |
1342 | } | 1342 | } |
1343 | 1343 | ||
1344 | /* | 1344 | /* |
1345 | * Add to the queued buffers list, a buffer will stay on it until | 1345 | * Add to the queued buffers list, a buffer will stay on it until |
1346 | * dequeued in dqbuf. | 1346 | * dequeued in dqbuf. |
1347 | */ | 1347 | */ |
1348 | list_add_tail(&vb->queued_entry, &q->queued_list); | 1348 | list_add_tail(&vb->queued_entry, &q->queued_list); |
1349 | vb->state = VB2_BUF_STATE_QUEUED; | 1349 | vb->state = VB2_BUF_STATE_QUEUED; |
1350 | 1350 | ||
1351 | /* | 1351 | /* |
1352 | * If already streaming, give the buffer to driver for processing. | 1352 | * If already streaming, give the buffer to driver for processing. |
1353 | * If not, the buffer will be given to driver on next streamon. | 1353 | * If not, the buffer will be given to driver on next streamon. |
1354 | */ | 1354 | */ |
1355 | if (q->streaming) | 1355 | if (q->streaming) |
1356 | __enqueue_in_driver(vb); | 1356 | __enqueue_in_driver(vb); |
1357 | 1357 | ||
1358 | /* Fill buffer information for the userspace */ | 1358 | /* Fill buffer information for the userspace */ |
1359 | __fill_v4l2_buffer(vb, b); | 1359 | __fill_v4l2_buffer(vb, b); |
1360 | 1360 | ||
1361 | dprintk(1, "qbuf of buffer %d succeeded\n", vb->v4l2_buf.index); | 1361 | dprintk(1, "qbuf of buffer %d succeeded\n", vb->v4l2_buf.index); |
1362 | unlock: | 1362 | unlock: |
1363 | if (mmap_sem) | 1363 | if (mmap_sem) |
1364 | up_read(mmap_sem); | 1364 | up_read(mmap_sem); |
1365 | return ret; | 1365 | return ret; |
1366 | } | 1366 | } |
1367 | EXPORT_SYMBOL_GPL(vb2_qbuf); | 1367 | EXPORT_SYMBOL_GPL(vb2_qbuf); |
1368 | 1368 | ||
1369 | /** | 1369 | /** |
1370 | * __vb2_wait_for_done_vb() - wait for a buffer to become available | 1370 | * __vb2_wait_for_done_vb() - wait for a buffer to become available |
1371 | * for dequeuing | 1371 | * for dequeuing |
1372 | * | 1372 | * |
1373 | * Will sleep if required for nonblocking == false. | 1373 | * Will sleep if required for nonblocking == false. |
1374 | */ | 1374 | */ |
1375 | static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking) | 1375 | static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking) |
1376 | { | 1376 | { |
1377 | /* | 1377 | /* |
1378 | * All operations on vb_done_list are performed under done_lock | 1378 | * All operations on vb_done_list are performed under done_lock |
1379 | * spinlock protection. However, buffers may be removed from | 1379 | * spinlock protection. However, buffers may be removed from |
1380 | * it and returned to userspace only while holding both driver's | 1380 | * it and returned to userspace only while holding both driver's |
1381 | * lock and the done_lock spinlock. Thus we can be sure that as | 1381 | * lock and the done_lock spinlock. Thus we can be sure that as |
1382 | * long as we hold the driver's lock, the list will remain not | 1382 | * long as we hold the driver's lock, the list will remain not |
1383 | * empty if list_empty() check succeeds. | 1383 | * empty if list_empty() check succeeds. |
1384 | */ | 1384 | */ |
1385 | 1385 | ||
1386 | for (;;) { | 1386 | for (;;) { |
1387 | int ret; | 1387 | int ret; |
1388 | 1388 | ||
1389 | if (!q->streaming) { | 1389 | if (!q->streaming) { |
1390 | dprintk(1, "Streaming off, will not wait for buffers\n"); | 1390 | dprintk(1, "Streaming off, will not wait for buffers\n"); |
1391 | return -EINVAL; | 1391 | return -EINVAL; |
1392 | } | 1392 | } |
1393 | 1393 | ||
1394 | if (!list_empty(&q->done_list)) { | 1394 | if (!list_empty(&q->done_list)) { |
1395 | /* | 1395 | /* |
1396 | * Found a buffer that we were waiting for. | 1396 | * Found a buffer that we were waiting for. |
1397 | */ | 1397 | */ |
1398 | break; | 1398 | break; |
1399 | } | 1399 | } |
1400 | 1400 | ||
1401 | if (nonblocking) { | 1401 | if (nonblocking) { |
1402 | dprintk(1, "Nonblocking and no buffers to dequeue, " | 1402 | dprintk(1, "Nonblocking and no buffers to dequeue, " |
1403 | "will not wait\n"); | 1403 | "will not wait\n"); |
1404 | return -EAGAIN; | 1404 | return -EAGAIN; |
1405 | } | 1405 | } |
1406 | 1406 | ||
1407 | /* | 1407 | /* |
1408 | * We are streaming and blocking, wait for another buffer to | 1408 | * We are streaming and blocking, wait for another buffer to |
1409 | * become ready or for streamoff. Driver's lock is released to | 1409 | * become ready or for streamoff. Driver's lock is released to |
1410 | * allow streamoff or qbuf to be called while waiting. | 1410 | * allow streamoff or qbuf to be called while waiting. |
1411 | */ | 1411 | */ |
1412 | call_qop(q, wait_prepare, q); | 1412 | call_qop(q, wait_prepare, q); |
1413 | 1413 | ||
1414 | /* | 1414 | /* |
1415 | * All locks have been released, it is safe to sleep now. | 1415 | * All locks have been released, it is safe to sleep now. |
1416 | */ | 1416 | */ |
1417 | dprintk(3, "Will sleep waiting for buffers\n"); | 1417 | dprintk(3, "Will sleep waiting for buffers\n"); |
1418 | ret = wait_event_interruptible(q->done_wq, | 1418 | ret = wait_event_interruptible(q->done_wq, |
1419 | !list_empty(&q->done_list) || !q->streaming); | 1419 | !list_empty(&q->done_list) || !q->streaming); |
1420 | 1420 | ||
1421 | /* | 1421 | /* |
1422 | * We need to reevaluate both conditions again after reacquiring | 1422 | * We need to reevaluate both conditions again after reacquiring |
1423 | * the locks or return an error if one occurred. | 1423 | * the locks or return an error if one occurred. |
1424 | */ | 1424 | */ |
1425 | call_qop(q, wait_finish, q); | 1425 | call_qop(q, wait_finish, q); |
1426 | if (ret) { | 1426 | if (ret) { |
1427 | dprintk(1, "Sleep was interrupted\n"); | 1427 | dprintk(1, "Sleep was interrupted\n"); |
1428 | return ret; | 1428 | return ret; |
1429 | } | 1429 | } |
1430 | } | 1430 | } |
1431 | return 0; | 1431 | return 0; |
1432 | } | 1432 | } |
1433 | 1433 | ||
1434 | /** | 1434 | /** |
1435 | * __vb2_get_done_vb() - get a buffer ready for dequeuing | 1435 | * __vb2_get_done_vb() - get a buffer ready for dequeuing |
1436 | * | 1436 | * |
1437 | * Will sleep if required for nonblocking == false. | 1437 | * Will sleep if required for nonblocking == false. |
1438 | */ | 1438 | */ |
1439 | static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb, | 1439 | static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb, |
1440 | struct v4l2_buffer *b, int nonblocking) | 1440 | struct v4l2_buffer *b, int nonblocking) |
1441 | { | 1441 | { |
1442 | unsigned long flags; | 1442 | unsigned long flags; |
1443 | int ret; | 1443 | int ret; |
1444 | 1444 | ||
1445 | /* | 1445 | /* |
1446 | * Wait for at least one buffer to become available on the done_list. | 1446 | * Wait for at least one buffer to become available on the done_list. |
1447 | */ | 1447 | */ |
1448 | ret = __vb2_wait_for_done_vb(q, nonblocking); | 1448 | ret = __vb2_wait_for_done_vb(q, nonblocking); |
1449 | if (ret) | 1449 | if (ret) |
1450 | return ret; | 1450 | return ret; |
1451 | 1451 | ||
1452 | /* | 1452 | /* |
1453 | * Driver's lock has been held since we last verified that done_list | 1453 | * Driver's lock has been held since we last verified that done_list |
1454 | * is not empty, so no need for another list_empty(done_list) check. | 1454 | * is not empty, so no need for another list_empty(done_list) check. |
1455 | */ | 1455 | */ |
1456 | spin_lock_irqsave(&q->done_lock, flags); | 1456 | spin_lock_irqsave(&q->done_lock, flags); |
1457 | *vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry); | 1457 | *vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry); |
1458 | /* | 1458 | /* |
1459 | * Only remove the buffer from done_list if v4l2_buffer can handle all | 1459 | * Only remove the buffer from done_list if v4l2_buffer can handle all |
1460 | * the planes. | 1460 | * the planes. |
1461 | */ | 1461 | */ |
1462 | ret = __verify_planes_array(*vb, b); | 1462 | ret = __verify_planes_array(*vb, b); |
1463 | if (!ret) | 1463 | if (!ret) |
1464 | list_del(&(*vb)->done_entry); | 1464 | list_del(&(*vb)->done_entry); |
1465 | spin_unlock_irqrestore(&q->done_lock, flags); | 1465 | spin_unlock_irqrestore(&q->done_lock, flags); |
1466 | 1466 | ||
1467 | return ret; | 1467 | return ret; |
1468 | } | 1468 | } |
1469 | 1469 | ||
1470 | /** | 1470 | /** |
1471 | * vb2_wait_for_all_buffers() - wait until all buffers are given back to vb2 | 1471 | * vb2_wait_for_all_buffers() - wait until all buffers are given back to vb2 |
1472 | * @q: videobuf2 queue | 1472 | * @q: videobuf2 queue |
1473 | * | 1473 | * |
1474 | * This function will wait until all buffers that have been given to the driver | 1474 | * This function will wait until all buffers that have been given to the driver |
1475 | * by buf_queue() are given back to vb2 with vb2_buffer_done(). It doesn't call | 1475 | * by buf_queue() are given back to vb2 with vb2_buffer_done(). It doesn't call |
1476 | * wait_prepare, wait_finish pair. It is intended to be called with all locks | 1476 | * wait_prepare, wait_finish pair. It is intended to be called with all locks |
1477 | * taken, for example from stop_streaming() callback. | 1477 | * taken, for example from stop_streaming() callback. |
1478 | */ | 1478 | */ |
1479 | int vb2_wait_for_all_buffers(struct vb2_queue *q) | 1479 | int vb2_wait_for_all_buffers(struct vb2_queue *q) |
1480 | { | 1480 | { |
1481 | if (!q->streaming) { | 1481 | if (!q->streaming) { |
1482 | dprintk(1, "Streaming off, will not wait for buffers\n"); | 1482 | dprintk(1, "Streaming off, will not wait for buffers\n"); |
1483 | return -EINVAL; | 1483 | return -EINVAL; |
1484 | } | 1484 | } |
1485 | 1485 | ||
1486 | wait_event(q->done_wq, !atomic_read(&q->queued_count)); | 1486 | wait_event(q->done_wq, !atomic_read(&q->queued_count)); |
1487 | return 0; | 1487 | return 0; |
1488 | } | 1488 | } |
1489 | EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers); | 1489 | EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers); |
1490 | 1490 | ||
1491 | /** | 1491 | /** |
1492 | * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state | 1492 | * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state |
1493 | */ | 1493 | */ |
1494 | static void __vb2_dqbuf(struct vb2_buffer *vb) | 1494 | static void __vb2_dqbuf(struct vb2_buffer *vb) |
1495 | { | 1495 | { |
1496 | struct vb2_queue *q = vb->vb2_queue; | 1496 | struct vb2_queue *q = vb->vb2_queue; |
1497 | unsigned int i; | 1497 | unsigned int i; |
1498 | 1498 | ||
1499 | /* nothing to do if the buffer is already dequeued */ | 1499 | /* nothing to do if the buffer is already dequeued */ |
1500 | if (vb->state == VB2_BUF_STATE_DEQUEUED) | 1500 | if (vb->state == VB2_BUF_STATE_DEQUEUED) |
1501 | return; | 1501 | return; |
1502 | 1502 | ||
1503 | vb->state = VB2_BUF_STATE_DEQUEUED; | 1503 | vb->state = VB2_BUF_STATE_DEQUEUED; |
1504 | 1504 | ||
1505 | /* unmap DMABUF buffer */ | 1505 | /* unmap DMABUF buffer */ |
1506 | if (q->memory == V4L2_MEMORY_DMABUF) | 1506 | if (q->memory == V4L2_MEMORY_DMABUF) |
1507 | for (i = 0; i < vb->num_planes; ++i) { | 1507 | for (i = 0; i < vb->num_planes; ++i) { |
1508 | if (!vb->planes[i].dbuf_mapped) | 1508 | if (!vb->planes[i].dbuf_mapped) |
1509 | continue; | 1509 | continue; |
1510 | call_memop(q, unmap_dmabuf, vb->planes[i].mem_priv); | 1510 | call_memop(q, unmap_dmabuf, vb->planes[i].mem_priv); |
1511 | vb->planes[i].dbuf_mapped = 0; | 1511 | vb->planes[i].dbuf_mapped = 0; |
1512 | } | 1512 | } |
1513 | } | 1513 | } |
1514 | 1514 | ||
1515 | /** | 1515 | /** |
1516 | * vb2_dqbuf() - Dequeue a buffer to the userspace | 1516 | * vb2_dqbuf() - Dequeue a buffer to the userspace |
1517 | * @q: videobuf2 queue | 1517 | * @q: videobuf2 queue |
1518 | * @b: buffer structure passed from userspace to vidioc_dqbuf handler | 1518 | * @b: buffer structure passed from userspace to vidioc_dqbuf handler |
1519 | * in driver | 1519 | * in driver |
1520 | * @nonblocking: if true, this call will not sleep waiting for a buffer if no | 1520 | * @nonblocking: if true, this call will not sleep waiting for a buffer if no |
1521 | * buffers ready for dequeuing are present. Normally the driver | 1521 | * buffers ready for dequeuing are present. Normally the driver |
1522 | * would be passing (file->f_flags & O_NONBLOCK) here | 1522 | * would be passing (file->f_flags & O_NONBLOCK) here |
1523 | * | 1523 | * |
1524 | * Should be called from vidioc_dqbuf ioctl handler of a driver. | 1524 | * Should be called from vidioc_dqbuf ioctl handler of a driver. |
1525 | * This function: | 1525 | * This function: |
1526 | * 1) verifies the passed buffer, | 1526 | * 1) verifies the passed buffer, |
1527 | * 2) calls buf_finish callback in the driver (if provided), in which | 1527 | * 2) calls buf_finish callback in the driver (if provided), in which |
1528 | * driver can perform any additional operations that may be required before | 1528 | * driver can perform any additional operations that may be required before |
1529 | * returning the buffer to userspace, such as cache sync, | 1529 | * returning the buffer to userspace, such as cache sync, |
1530 | * 3) the buffer struct members are filled with relevant information for | 1530 | * 3) the buffer struct members are filled with relevant information for |
1531 | * the userspace. | 1531 | * the userspace. |
1532 | * | 1532 | * |
1533 | * The return values from this function are intended to be directly returned | 1533 | * The return values from this function are intended to be directly returned |
1534 | * from vidioc_dqbuf handler in driver. | 1534 | * from vidioc_dqbuf handler in driver. |
1535 | */ | 1535 | */ |
1536 | int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking) | 1536 | int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking) |
1537 | { | 1537 | { |
1538 | struct vb2_buffer *vb = NULL; | 1538 | struct vb2_buffer *vb = NULL; |
1539 | int ret; | 1539 | int ret; |
1540 | 1540 | ||
1541 | if (q->fileio) { | 1541 | if (q->fileio) { |
1542 | dprintk(1, "dqbuf: file io in progress\n"); | 1542 | dprintk(1, "dqbuf: file io in progress\n"); |
1543 | return -EBUSY; | 1543 | return -EBUSY; |
1544 | } | 1544 | } |
1545 | 1545 | ||
1546 | if (b->type != q->type) { | 1546 | if (b->type != q->type) { |
1547 | dprintk(1, "dqbuf: invalid buffer type\n"); | 1547 | dprintk(1, "dqbuf: invalid buffer type\n"); |
1548 | return -EINVAL; | 1548 | return -EINVAL; |
1549 | } | 1549 | } |
1550 | ret = __vb2_get_done_vb(q, &vb, b, nonblocking); | 1550 | ret = __vb2_get_done_vb(q, &vb, b, nonblocking); |
1551 | if (ret < 0) | 1551 | if (ret < 0) |
1552 | return ret; | 1552 | return ret; |
1553 | 1553 | ||
1554 | ret = call_qop(q, buf_finish, vb); | 1554 | ret = call_qop(q, buf_finish, vb); |
1555 | if (ret) { | 1555 | if (ret) { |
1556 | dprintk(1, "dqbuf: buffer finish failed\n"); | 1556 | dprintk(1, "dqbuf: buffer finish failed\n"); |
1557 | return ret; | 1557 | return ret; |
1558 | } | 1558 | } |
1559 | 1559 | ||
1560 | switch (vb->state) { | 1560 | switch (vb->state) { |
1561 | case VB2_BUF_STATE_DONE: | 1561 | case VB2_BUF_STATE_DONE: |
1562 | dprintk(3, "dqbuf: Returning done buffer\n"); | 1562 | dprintk(3, "dqbuf: Returning done buffer\n"); |
1563 | break; | 1563 | break; |
1564 | case VB2_BUF_STATE_ERROR: | 1564 | case VB2_BUF_STATE_ERROR: |
1565 | dprintk(3, "dqbuf: Returning done buffer with errors\n"); | 1565 | dprintk(3, "dqbuf: Returning done buffer with errors\n"); |
1566 | break; | 1566 | break; |
1567 | default: | 1567 | default: |
1568 | dprintk(1, "dqbuf: Invalid buffer state\n"); | 1568 | dprintk(1, "dqbuf: Invalid buffer state\n"); |
1569 | return -EINVAL; | 1569 | return -EINVAL; |
1570 | } | 1570 | } |
1571 | 1571 | ||
1572 | /* Fill buffer information for the userspace */ | 1572 | /* Fill buffer information for the userspace */ |
1573 | __fill_v4l2_buffer(vb, b); | 1573 | __fill_v4l2_buffer(vb, b); |
1574 | /* Remove from videobuf queue */ | 1574 | /* Remove from videobuf queue */ |
1575 | list_del(&vb->queued_entry); | 1575 | list_del(&vb->queued_entry); |
1576 | /* go back to dequeued state */ | 1576 | /* go back to dequeued state */ |
1577 | __vb2_dqbuf(vb); | 1577 | __vb2_dqbuf(vb); |
1578 | 1578 | ||
1579 | dprintk(1, "dqbuf of buffer %d, with state %d\n", | 1579 | dprintk(1, "dqbuf of buffer %d, with state %d\n", |
1580 | vb->v4l2_buf.index, vb->state); | 1580 | vb->v4l2_buf.index, vb->state); |
1581 | 1581 | ||
1582 | return 0; | 1582 | return 0; |
1583 | } | 1583 | } |
1584 | EXPORT_SYMBOL_GPL(vb2_dqbuf); | 1584 | EXPORT_SYMBOL_GPL(vb2_dqbuf); |
1585 | 1585 | ||
1586 | /** | 1586 | /** |
1587 | * __vb2_queue_cancel() - cancel and stop (pause) streaming | 1587 | * __vb2_queue_cancel() - cancel and stop (pause) streaming |
1588 | * | 1588 | * |
1589 | * Removes all queued buffers from driver's queue and all buffers queued by | 1589 | * Removes all queued buffers from driver's queue and all buffers queued by |
1590 | * userspace from videobuf's queue. Returns to state after reqbufs. | 1590 | * userspace from videobuf's queue. Returns to state after reqbufs. |
1591 | */ | 1591 | */ |
1592 | static void __vb2_queue_cancel(struct vb2_queue *q) | 1592 | static void __vb2_queue_cancel(struct vb2_queue *q) |
1593 | { | 1593 | { |
1594 | unsigned int i; | 1594 | unsigned int i; |
1595 | 1595 | ||
1596 | /* | 1596 | /* |
1597 | * Tell driver to stop all transactions and release all queued | 1597 | * Tell driver to stop all transactions and release all queued |
1598 | * buffers. | 1598 | * buffers. |
1599 | */ | 1599 | */ |
1600 | if (q->streaming) | 1600 | if (q->streaming) |
1601 | call_qop(q, stop_streaming, q); | 1601 | call_qop(q, stop_streaming, q); |
1602 | q->streaming = 0; | 1602 | q->streaming = 0; |
1603 | 1603 | ||
1604 | /* | 1604 | /* |
1605 | * Remove all buffers from videobuf's list... | 1605 | * Remove all buffers from videobuf's list... |
1606 | */ | 1606 | */ |
1607 | INIT_LIST_HEAD(&q->queued_list); | 1607 | INIT_LIST_HEAD(&q->queued_list); |
1608 | /* | 1608 | /* |
1609 | * ...and done list; userspace will not receive any buffers it | 1609 | * ...and done list; userspace will not receive any buffers it |
1610 | * has not already dequeued before initiating cancel. | 1610 | * has not already dequeued before initiating cancel. |
1611 | */ | 1611 | */ |
1612 | INIT_LIST_HEAD(&q->done_list); | 1612 | INIT_LIST_HEAD(&q->done_list); |
1613 | atomic_set(&q->queued_count, 0); | 1613 | atomic_set(&q->queued_count, 0); |
1614 | wake_up_all(&q->done_wq); | 1614 | wake_up_all(&q->done_wq); |
1615 | 1615 | ||
1616 | /* | 1616 | /* |
1617 | * Reinitialize all buffers for next use. | 1617 | * Reinitialize all buffers for next use. |
1618 | */ | 1618 | */ |
1619 | for (i = 0; i < q->num_buffers; ++i) | 1619 | for (i = 0; i < q->num_buffers; ++i) |
1620 | __vb2_dqbuf(q->bufs[i]); | 1620 | __vb2_dqbuf(q->bufs[i]); |
1621 | } | 1621 | } |
1622 | 1622 | ||
1623 | /** | 1623 | /** |
1624 | * vb2_streamon - start streaming | 1624 | * vb2_streamon - start streaming |
1625 | * @q: videobuf2 queue | 1625 | * @q: videobuf2 queue |
1626 | * @type: type argument passed from userspace to vidioc_streamon handler | 1626 | * @type: type argument passed from userspace to vidioc_streamon handler |
1627 | * | 1627 | * |
1628 | * Should be called from vidioc_streamon handler of a driver. | 1628 | * Should be called from vidioc_streamon handler of a driver. |
1629 | * This function: | 1629 | * This function: |
1630 | * 1) verifies current state | 1630 | * 1) verifies current state |
1631 | * 2) passes any previously queued buffers to the driver and starts streaming | 1631 | * 2) passes any previously queued buffers to the driver and starts streaming |
1632 | * | 1632 | * |
1633 | * The return values from this function are intended to be directly returned | 1633 | * The return values from this function are intended to be directly returned |
1634 | * from vidioc_streamon handler in the driver. | 1634 | * from vidioc_streamon handler in the driver. |
1635 | */ | 1635 | */ |
1636 | int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type) | 1636 | int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type) |
1637 | { | 1637 | { |
1638 | struct vb2_buffer *vb; | 1638 | struct vb2_buffer *vb; |
1639 | int ret; | 1639 | int ret; |
1640 | 1640 | ||
1641 | if (q->fileio) { | 1641 | if (q->fileio) { |
1642 | dprintk(1, "streamon: file io in progress\n"); | 1642 | dprintk(1, "streamon: file io in progress\n"); |
1643 | return -EBUSY; | 1643 | return -EBUSY; |
1644 | } | 1644 | } |
1645 | 1645 | ||
1646 | if (type != q->type) { | 1646 | if (type != q->type) { |
1647 | dprintk(1, "streamon: invalid stream type\n"); | 1647 | dprintk(1, "streamon: invalid stream type\n"); |
1648 | return -EINVAL; | 1648 | return -EINVAL; |
1649 | } | 1649 | } |
1650 | 1650 | ||
1651 | if (q->streaming) { | 1651 | if (q->streaming) { |
1652 | dprintk(1, "streamon: already streaming\n"); | 1652 | dprintk(1, "streamon: already streaming\n"); |
1653 | return -EBUSY; | 1653 | return -EBUSY; |
1654 | } | 1654 | } |
1655 | 1655 | ||
1656 | /* | 1656 | /* |
1657 | * If any buffers were queued before streamon, | 1657 | * If any buffers were queued before streamon, |
1658 | * we can now pass them to driver for processing. | 1658 | * we can now pass them to driver for processing. |
1659 | */ | 1659 | */ |
1660 | list_for_each_entry(vb, &q->queued_list, queued_entry) | 1660 | list_for_each_entry(vb, &q->queued_list, queued_entry) |
1661 | __enqueue_in_driver(vb); | 1661 | __enqueue_in_driver(vb); |
1662 | 1662 | ||
1663 | /* | 1663 | /* |
1664 | * Let driver notice that streaming state has been enabled. | 1664 | * Let driver notice that streaming state has been enabled. |
1665 | */ | 1665 | */ |
1666 | ret = call_qop(q, start_streaming, q, atomic_read(&q->queued_count)); | 1666 | ret = call_qop(q, start_streaming, q, atomic_read(&q->queued_count)); |
1667 | if (ret) { | 1667 | if (ret) { |
1668 | dprintk(1, "streamon: driver refused to start streaming\n"); | 1668 | dprintk(1, "streamon: driver refused to start streaming\n"); |
1669 | __vb2_queue_cancel(q); | 1669 | __vb2_queue_cancel(q); |
1670 | return ret; | 1670 | return ret; |
1671 | } | 1671 | } |
1672 | 1672 | ||
1673 | q->streaming = 1; | 1673 | q->streaming = 1; |
1674 | 1674 | ||
1675 | dprintk(3, "Streamon successful\n"); | 1675 | dprintk(3, "Streamon successful\n"); |
1676 | return 0; | 1676 | return 0; |
1677 | } | 1677 | } |
1678 | EXPORT_SYMBOL_GPL(vb2_streamon); | 1678 | EXPORT_SYMBOL_GPL(vb2_streamon); |
1679 | 1679 | ||
1680 | 1680 | ||
1681 | /** | 1681 | /** |
1682 | * vb2_streamoff - stop streaming | 1682 | * vb2_streamoff - stop streaming |
1683 | * @q: videobuf2 queue | 1683 | * @q: videobuf2 queue |
1684 | * @type: type argument passed from userspace to vidioc_streamoff handler | 1684 | * @type: type argument passed from userspace to vidioc_streamoff handler |
1685 | * | 1685 | * |
1686 | * Should be called from vidioc_streamoff handler of a driver. | 1686 | * Should be called from vidioc_streamoff handler of a driver. |
1687 | * This function: | 1687 | * This function: |
1688 | * 1) verifies current state, | 1688 | * 1) verifies current state, |
1689 | * 2) stop streaming and dequeues any queued buffers, including those previously | 1689 | * 2) stop streaming and dequeues any queued buffers, including those previously |
1690 | * passed to the driver (after waiting for the driver to finish). | 1690 | * passed to the driver (after waiting for the driver to finish). |
1691 | * | 1691 | * |
1692 | * This call can be used for pausing playback. | 1692 | * This call can be used for pausing playback. |
1693 | * The return values from this function are intended to be directly returned | 1693 | * The return values from this function are intended to be directly returned |
1694 | * from vidioc_streamoff handler in the driver | 1694 | * from vidioc_streamoff handler in the driver |
1695 | */ | 1695 | */ |
1696 | int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type) | 1696 | int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type) |
1697 | { | 1697 | { |
1698 | if (q->fileio) { | 1698 | if (q->fileio) { |
1699 | dprintk(1, "streamoff: file io in progress\n"); | 1699 | dprintk(1, "streamoff: file io in progress\n"); |
1700 | return -EBUSY; | 1700 | return -EBUSY; |
1701 | } | 1701 | } |
1702 | 1702 | ||
1703 | if (type != q->type) { | 1703 | if (type != q->type) { |
1704 | dprintk(1, "streamoff: invalid stream type\n"); | 1704 | dprintk(1, "streamoff: invalid stream type\n"); |
1705 | return -EINVAL; | 1705 | return -EINVAL; |
1706 | } | 1706 | } |
1707 | 1707 | ||
1708 | if (!q->streaming) { | 1708 | if (!q->streaming) { |
1709 | dprintk(1, "streamoff: not streaming\n"); | 1709 | dprintk(1, "streamoff: not streaming\n"); |
1710 | return -EINVAL; | 1710 | return -EINVAL; |
1711 | } | 1711 | } |
1712 | 1712 | ||
1713 | /* | 1713 | /* |
1714 | * Cancel will pause streaming and remove all buffers from the driver | 1714 | * Cancel will pause streaming and remove all buffers from the driver |
1715 | * and videobuf, effectively returning control over them to userspace. | 1715 | * and videobuf, effectively returning control over them to userspace. |
1716 | */ | 1716 | */ |
1717 | __vb2_queue_cancel(q); | 1717 | __vb2_queue_cancel(q); |
1718 | 1718 | ||
1719 | dprintk(3, "Streamoff successful\n"); | 1719 | dprintk(3, "Streamoff successful\n"); |
1720 | return 0; | 1720 | return 0; |
1721 | } | 1721 | } |
1722 | EXPORT_SYMBOL_GPL(vb2_streamoff); | 1722 | EXPORT_SYMBOL_GPL(vb2_streamoff); |
1723 | 1723 | ||
1724 | /** | 1724 | /** |
1725 | * __find_plane_by_offset() - find plane associated with the given offset off | 1725 | * __find_plane_by_offset() - find plane associated with the given offset off |
1726 | */ | 1726 | */ |
1727 | static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off, | 1727 | static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off, |
1728 | unsigned int *_buffer, unsigned int *_plane) | 1728 | unsigned int *_buffer, unsigned int *_plane) |
1729 | { | 1729 | { |
1730 | struct vb2_buffer *vb; | 1730 | struct vb2_buffer *vb; |
1731 | unsigned int buffer, plane; | 1731 | unsigned int buffer, plane; |
1732 | 1732 | ||
1733 | /* | 1733 | /* |
1734 | * Go over all buffers and their planes, comparing the given offset | 1734 | * Go over all buffers and their planes, comparing the given offset |
1735 | * with an offset assigned to each plane. If a match is found, | 1735 | * with an offset assigned to each plane. If a match is found, |
1736 | * return its buffer and plane numbers. | 1736 | * return its buffer and plane numbers. |
1737 | */ | 1737 | */ |
1738 | for (buffer = 0; buffer < q->num_buffers; ++buffer) { | 1738 | for (buffer = 0; buffer < q->num_buffers; ++buffer) { |
1739 | vb = q->bufs[buffer]; | 1739 | vb = q->bufs[buffer]; |
1740 | 1740 | ||
1741 | for (plane = 0; plane < vb->num_planes; ++plane) { | 1741 | for (plane = 0; plane < vb->num_planes; ++plane) { |
1742 | if (vb->v4l2_planes[plane].m.mem_offset == off) { | 1742 | if (vb->v4l2_planes[plane].m.mem_offset == off) { |
1743 | *_buffer = buffer; | 1743 | *_buffer = buffer; |
1744 | *_plane = plane; | 1744 | *_plane = plane; |
1745 | return 0; | 1745 | return 0; |
1746 | } | 1746 | } |
1747 | } | 1747 | } |
1748 | } | 1748 | } |
1749 | 1749 | ||
1750 | return -EINVAL; | 1750 | return -EINVAL; |
1751 | } | 1751 | } |
1752 | 1752 | ||
1753 | /** | 1753 | /** |
1754 | * vb2_expbuf() - Export a buffer as a file descriptor | ||
1755 | * @q: videobuf2 queue | ||
1756 | * @eb: export buffer structure passed from userspace to vidioc_expbuf | ||
1757 | * handler in driver | ||
1758 | * | ||
1759 | * The return values from this function are intended to be directly returned | ||
1760 | * from vidioc_expbuf handler in driver. | ||
1761 | */ | ||
1762 | int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb) | ||
1763 | { | ||
1764 | struct vb2_buffer *vb = NULL; | ||
1765 | struct vb2_plane *vb_plane; | ||
1766 | int ret; | ||
1767 | struct dma_buf *dbuf; | ||
1768 | |||
1769 | if (q->memory != V4L2_MEMORY_MMAP) { | ||
1770 | dprintk(1, "Queue is not currently set up for mmap\n"); | ||
1771 | return -EINVAL; | ||
1772 | } | ||
1773 | |||
1774 | if (!q->mem_ops->get_dmabuf) { | ||
1775 | dprintk(1, "Queue does not support DMA buffer exporting\n"); | ||
1776 | return -EINVAL; | ||
1777 | } | ||
1778 | |||
1779 | if (eb->flags & ~O_CLOEXEC) { | ||
1780 | dprintk(1, "Queue does support only O_CLOEXEC flag\n"); | ||
1781 | return -EINVAL; | ||
1782 | } | ||
1783 | |||
1784 | if (eb->type != q->type) { | ||
1785 | dprintk(1, "qbuf: invalid buffer type\n"); | ||
1786 | return -EINVAL; | ||
1787 | } | ||
1788 | |||
1789 | if (eb->index >= q->num_buffers) { | ||
1790 | dprintk(1, "buffer index out of range\n"); | ||
1791 | return -EINVAL; | ||
1792 | } | ||
1793 | |||
1794 | vb = q->bufs[eb->index]; | ||
1795 | |||
1796 | if (eb->plane >= vb->num_planes) { | ||
1797 | dprintk(1, "buffer plane out of range\n"); | ||
1798 | return -EINVAL; | ||
1799 | } | ||
1800 | |||
1801 | vb_plane = &vb->planes[eb->plane]; | ||
1802 | |||
1803 | dbuf = call_memop(q, get_dmabuf, vb_plane->mem_priv); | ||
1804 | if (IS_ERR_OR_NULL(dbuf)) { | ||
1805 | dprintk(1, "Failed to export buffer %d, plane %d\n", | ||
1806 | eb->index, eb->plane); | ||
1807 | return -EINVAL; | ||
1808 | } | ||
1809 | |||
1810 | ret = dma_buf_fd(dbuf, eb->flags); | ||
1811 | if (ret < 0) { | ||
1812 | dprintk(3, "buffer %d, plane %d failed to export (%d)\n", | ||
1813 | eb->index, eb->plane, ret); | ||
1814 | dma_buf_put(dbuf); | ||
1815 | return ret; | ||
1816 | } | ||
1817 | |||
1818 | dprintk(3, "buffer %d, plane %d exported as %d descriptor\n", | ||
1819 | eb->index, eb->plane, ret); | ||
1820 | eb->fd = ret; | ||
1821 | |||
1822 | return 0; | ||
1823 | } | ||
1824 | EXPORT_SYMBOL_GPL(vb2_expbuf); | ||
1825 | |||
1826 | /** | ||
1754 | * vb2_mmap() - map video buffers into application address space | 1827 | * vb2_mmap() - map video buffers into application address space |
1755 | * @q: videobuf2 queue | 1828 | * @q: videobuf2 queue |
1756 | * @vma: vma passed to the mmap file operation handler in the driver | 1829 | * @vma: vma passed to the mmap file operation handler in the driver |
1757 | * | 1830 | * |
1758 | * Should be called from mmap file operation handler of a driver. | 1831 | * Should be called from mmap file operation handler of a driver. |
1759 | * This function maps one plane of one of the available video buffers to | 1832 | * This function maps one plane of one of the available video buffers to |
1760 | * userspace. To map whole video memory allocated on reqbufs, this function | 1833 | * userspace. To map whole video memory allocated on reqbufs, this function |
1761 | * has to be called once per each plane per each buffer previously allocated. | 1834 | * has to be called once per each plane per each buffer previously allocated. |
1762 | * | 1835 | * |
1763 | * When the userspace application calls mmap, it passes to it an offset returned | 1836 | * When the userspace application calls mmap, it passes to it an offset returned |
1764 | * to it earlier by the means of vidioc_querybuf handler. That offset acts as | 1837 | * to it earlier by the means of vidioc_querybuf handler. That offset acts as |
1765 | * a "cookie", which is then used to identify the plane to be mapped. | 1838 | * a "cookie", which is then used to identify the plane to be mapped. |
1766 | * This function finds a plane with a matching offset and a mapping is performed | 1839 | * This function finds a plane with a matching offset and a mapping is performed |
1767 | * by the means of a provided memory operation. | 1840 | * by the means of a provided memory operation. |
1768 | * | 1841 | * |
1769 | * The return values from this function are intended to be directly returned | 1842 | * The return values from this function are intended to be directly returned |
1770 | * from the mmap handler in driver. | 1843 | * from the mmap handler in driver. |
1771 | */ | 1844 | */ |
1772 | int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma) | 1845 | int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma) |
1773 | { | 1846 | { |
1774 | unsigned long off = vma->vm_pgoff << PAGE_SHIFT; | 1847 | unsigned long off = vma->vm_pgoff << PAGE_SHIFT; |
1775 | struct vb2_buffer *vb; | 1848 | struct vb2_buffer *vb; |
1776 | unsigned int buffer, plane; | 1849 | unsigned int buffer, plane; |
1777 | int ret; | 1850 | int ret; |
1778 | 1851 | ||
1779 | if (q->memory != V4L2_MEMORY_MMAP) { | 1852 | if (q->memory != V4L2_MEMORY_MMAP) { |
1780 | dprintk(1, "Queue is not currently set up for mmap\n"); | 1853 | dprintk(1, "Queue is not currently set up for mmap\n"); |
1781 | return -EINVAL; | 1854 | return -EINVAL; |
1782 | } | 1855 | } |
1783 | 1856 | ||
1784 | /* | 1857 | /* |
1785 | * Check memory area access mode. | 1858 | * Check memory area access mode. |
1786 | */ | 1859 | */ |
1787 | if (!(vma->vm_flags & VM_SHARED)) { | 1860 | if (!(vma->vm_flags & VM_SHARED)) { |
1788 | dprintk(1, "Invalid vma flags, VM_SHARED needed\n"); | 1861 | dprintk(1, "Invalid vma flags, VM_SHARED needed\n"); |
1789 | return -EINVAL; | 1862 | return -EINVAL; |
1790 | } | 1863 | } |
1791 | if (V4L2_TYPE_IS_OUTPUT(q->type)) { | 1864 | if (V4L2_TYPE_IS_OUTPUT(q->type)) { |
1792 | if (!(vma->vm_flags & VM_WRITE)) { | 1865 | if (!(vma->vm_flags & VM_WRITE)) { |
1793 | dprintk(1, "Invalid vma flags, VM_WRITE needed\n"); | 1866 | dprintk(1, "Invalid vma flags, VM_WRITE needed\n"); |
1794 | return -EINVAL; | 1867 | return -EINVAL; |
1795 | } | 1868 | } |
1796 | } else { | 1869 | } else { |
1797 | if (!(vma->vm_flags & VM_READ)) { | 1870 | if (!(vma->vm_flags & VM_READ)) { |
1798 | dprintk(1, "Invalid vma flags, VM_READ needed\n"); | 1871 | dprintk(1, "Invalid vma flags, VM_READ needed\n"); |
1799 | return -EINVAL; | 1872 | return -EINVAL; |
1800 | } | 1873 | } |
1801 | } | 1874 | } |
1802 | 1875 | ||
1803 | /* | 1876 | /* |
1804 | * Find the plane corresponding to the offset passed by userspace. | 1877 | * Find the plane corresponding to the offset passed by userspace. |
1805 | */ | 1878 | */ |
1806 | ret = __find_plane_by_offset(q, off, &buffer, &plane); | 1879 | ret = __find_plane_by_offset(q, off, &buffer, &plane); |
1807 | if (ret) | 1880 | if (ret) |
1808 | return ret; | 1881 | return ret; |
1809 | 1882 | ||
1810 | vb = q->bufs[buffer]; | 1883 | vb = q->bufs[buffer]; |
1811 | 1884 | ||
1812 | ret = call_memop(q, mmap, vb->planes[plane].mem_priv, vma); | 1885 | ret = call_memop(q, mmap, vb->planes[plane].mem_priv, vma); |
1813 | if (ret) | 1886 | if (ret) |
1814 | return ret; | 1887 | return ret; |
1815 | 1888 | ||
1816 | dprintk(3, "Buffer %d, plane %d successfully mapped\n", buffer, plane); | 1889 | dprintk(3, "Buffer %d, plane %d successfully mapped\n", buffer, plane); |
1817 | return 0; | 1890 | return 0; |
1818 | } | 1891 | } |
1819 | EXPORT_SYMBOL_GPL(vb2_mmap); | 1892 | EXPORT_SYMBOL_GPL(vb2_mmap); |
1820 | 1893 | ||
1821 | #ifndef CONFIG_MMU | 1894 | #ifndef CONFIG_MMU |
1822 | unsigned long vb2_get_unmapped_area(struct vb2_queue *q, | 1895 | unsigned long vb2_get_unmapped_area(struct vb2_queue *q, |
1823 | unsigned long addr, | 1896 | unsigned long addr, |
1824 | unsigned long len, | 1897 | unsigned long len, |
1825 | unsigned long pgoff, | 1898 | unsigned long pgoff, |
1826 | unsigned long flags) | 1899 | unsigned long flags) |
1827 | { | 1900 | { |
1828 | unsigned long off = pgoff << PAGE_SHIFT; | 1901 | unsigned long off = pgoff << PAGE_SHIFT; |
1829 | struct vb2_buffer *vb; | 1902 | struct vb2_buffer *vb; |
1830 | unsigned int buffer, plane; | 1903 | unsigned int buffer, plane; |
1831 | int ret; | 1904 | int ret; |
1832 | 1905 | ||
1833 | if (q->memory != V4L2_MEMORY_MMAP) { | 1906 | if (q->memory != V4L2_MEMORY_MMAP) { |
1834 | dprintk(1, "Queue is not currently set up for mmap\n"); | 1907 | dprintk(1, "Queue is not currently set up for mmap\n"); |
1835 | return -EINVAL; | 1908 | return -EINVAL; |
1836 | } | 1909 | } |
1837 | 1910 | ||
1838 | /* | 1911 | /* |
1839 | * Find the plane corresponding to the offset passed by userspace. | 1912 | * Find the plane corresponding to the offset passed by userspace. |
1840 | */ | 1913 | */ |
1841 | ret = __find_plane_by_offset(q, off, &buffer, &plane); | 1914 | ret = __find_plane_by_offset(q, off, &buffer, &plane); |
1842 | if (ret) | 1915 | if (ret) |
1843 | return ret; | 1916 | return ret; |
1844 | 1917 | ||
1845 | vb = q->bufs[buffer]; | 1918 | vb = q->bufs[buffer]; |
1846 | 1919 | ||
1847 | return (unsigned long)vb2_plane_vaddr(vb, plane); | 1920 | return (unsigned long)vb2_plane_vaddr(vb, plane); |
1848 | } | 1921 | } |
1849 | EXPORT_SYMBOL_GPL(vb2_get_unmapped_area); | 1922 | EXPORT_SYMBOL_GPL(vb2_get_unmapped_area); |
1850 | #endif | 1923 | #endif |
1851 | 1924 | ||
1852 | static int __vb2_init_fileio(struct vb2_queue *q, int read); | 1925 | static int __vb2_init_fileio(struct vb2_queue *q, int read); |
1853 | static int __vb2_cleanup_fileio(struct vb2_queue *q); | 1926 | static int __vb2_cleanup_fileio(struct vb2_queue *q); |
1854 | 1927 | ||
1855 | /** | 1928 | /** |
1856 | * vb2_poll() - implements poll userspace operation | 1929 | * vb2_poll() - implements poll userspace operation |
1857 | * @q: videobuf2 queue | 1930 | * @q: videobuf2 queue |
1858 | * @file: file argument passed to the poll file operation handler | 1931 | * @file: file argument passed to the poll file operation handler |
1859 | * @wait: wait argument passed to the poll file operation handler | 1932 | * @wait: wait argument passed to the poll file operation handler |
1860 | * | 1933 | * |
1861 | * This function implements poll file operation handler for a driver. | 1934 | * This function implements poll file operation handler for a driver. |
1862 | * For CAPTURE queues, if a buffer is ready to be dequeued, the userspace will | 1935 | * For CAPTURE queues, if a buffer is ready to be dequeued, the userspace will |
1863 | * be informed that the file descriptor of a video device is available for | 1936 | * be informed that the file descriptor of a video device is available for |
1864 | * reading. | 1937 | * reading. |
1865 | * For OUTPUT queues, if a buffer is ready to be dequeued, the file descriptor | 1938 | * For OUTPUT queues, if a buffer is ready to be dequeued, the file descriptor |
1866 | * will be reported as available for writing. | 1939 | * will be reported as available for writing. |
1867 | * | 1940 | * |
1868 | * If the driver uses struct v4l2_fh, then vb2_poll() will also check for any | 1941 | * If the driver uses struct v4l2_fh, then vb2_poll() will also check for any |
1869 | * pending events. | 1942 | * pending events. |
1870 | * | 1943 | * |
1871 | * The return values from this function are intended to be directly returned | 1944 | * The return values from this function are intended to be directly returned |
1872 | * from poll handler in driver. | 1945 | * from poll handler in driver. |
1873 | */ | 1946 | */ |
1874 | unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait) | 1947 | unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait) |
1875 | { | 1948 | { |
1876 | struct video_device *vfd = video_devdata(file); | 1949 | struct video_device *vfd = video_devdata(file); |
1877 | unsigned long req_events = poll_requested_events(wait); | 1950 | unsigned long req_events = poll_requested_events(wait); |
1878 | struct vb2_buffer *vb = NULL; | 1951 | struct vb2_buffer *vb = NULL; |
1879 | unsigned int res = 0; | 1952 | unsigned int res = 0; |
1880 | unsigned long flags; | 1953 | unsigned long flags; |
1881 | 1954 | ||
1882 | if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) { | 1955 | if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) { |
1883 | struct v4l2_fh *fh = file->private_data; | 1956 | struct v4l2_fh *fh = file->private_data; |
1884 | 1957 | ||
1885 | if (v4l2_event_pending(fh)) | 1958 | if (v4l2_event_pending(fh)) |
1886 | res = POLLPRI; | 1959 | res = POLLPRI; |
1887 | else if (req_events & POLLPRI) | 1960 | else if (req_events & POLLPRI) |
1888 | poll_wait(file, &fh->wait, wait); | 1961 | poll_wait(file, &fh->wait, wait); |
1889 | } | 1962 | } |
1890 | 1963 | ||
1891 | /* | 1964 | /* |
1892 | * Start file I/O emulator only if streaming API has not been used yet. | 1965 | * Start file I/O emulator only if streaming API has not been used yet. |
1893 | */ | 1966 | */ |
1894 | if (q->num_buffers == 0 && q->fileio == NULL) { | 1967 | if (q->num_buffers == 0 && q->fileio == NULL) { |
1895 | if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) && | 1968 | if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) && |
1896 | (req_events & (POLLIN | POLLRDNORM))) { | 1969 | (req_events & (POLLIN | POLLRDNORM))) { |
1897 | if (__vb2_init_fileio(q, 1)) | 1970 | if (__vb2_init_fileio(q, 1)) |
1898 | return res | POLLERR; | 1971 | return res | POLLERR; |
1899 | } | 1972 | } |
1900 | if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE) && | 1973 | if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE) && |
1901 | (req_events & (POLLOUT | POLLWRNORM))) { | 1974 | (req_events & (POLLOUT | POLLWRNORM))) { |
1902 | if (__vb2_init_fileio(q, 0)) | 1975 | if (__vb2_init_fileio(q, 0)) |
1903 | return res | POLLERR; | 1976 | return res | POLLERR; |
1904 | /* | 1977 | /* |
1905 | * Write to OUTPUT queue can be done immediately. | 1978 | * Write to OUTPUT queue can be done immediately. |
1906 | */ | 1979 | */ |
1907 | return res | POLLOUT | POLLWRNORM; | 1980 | return res | POLLOUT | POLLWRNORM; |
1908 | } | 1981 | } |
1909 | } | 1982 | } |
1910 | 1983 | ||
1911 | /* | 1984 | /* |
1912 | * There is nothing to wait for if no buffers have already been queued. | 1985 | * There is nothing to wait for if no buffers have already been queued. |
1913 | */ | 1986 | */ |
1914 | if (list_empty(&q->queued_list)) | 1987 | if (list_empty(&q->queued_list)) |
1915 | return res | POLLERR; | 1988 | return res | POLLERR; |
1916 | 1989 | ||
1917 | poll_wait(file, &q->done_wq, wait); | 1990 | poll_wait(file, &q->done_wq, wait); |
1918 | 1991 | ||
1919 | /* | 1992 | /* |
1920 | * Take first buffer available for dequeuing. | 1993 | * Take first buffer available for dequeuing. |
1921 | */ | 1994 | */ |
1922 | spin_lock_irqsave(&q->done_lock, flags); | 1995 | spin_lock_irqsave(&q->done_lock, flags); |
1923 | if (!list_empty(&q->done_list)) | 1996 | if (!list_empty(&q->done_list)) |
1924 | vb = list_first_entry(&q->done_list, struct vb2_buffer, | 1997 | vb = list_first_entry(&q->done_list, struct vb2_buffer, |
1925 | done_entry); | 1998 | done_entry); |
1926 | spin_unlock_irqrestore(&q->done_lock, flags); | 1999 | spin_unlock_irqrestore(&q->done_lock, flags); |
1927 | 2000 | ||
1928 | if (vb && (vb->state == VB2_BUF_STATE_DONE | 2001 | if (vb && (vb->state == VB2_BUF_STATE_DONE |
1929 | || vb->state == VB2_BUF_STATE_ERROR)) { | 2002 | || vb->state == VB2_BUF_STATE_ERROR)) { |
1930 | return (V4L2_TYPE_IS_OUTPUT(q->type)) ? | 2003 | return (V4L2_TYPE_IS_OUTPUT(q->type)) ? |
1931 | res | POLLOUT | POLLWRNORM : | 2004 | res | POLLOUT | POLLWRNORM : |
1932 | res | POLLIN | POLLRDNORM; | 2005 | res | POLLIN | POLLRDNORM; |
1933 | } | 2006 | } |
1934 | return res; | 2007 | return res; |
1935 | } | 2008 | } |
1936 | EXPORT_SYMBOL_GPL(vb2_poll); | 2009 | EXPORT_SYMBOL_GPL(vb2_poll); |
1937 | 2010 | ||
1938 | /** | 2011 | /** |
1939 | * vb2_queue_init() - initialize a videobuf2 queue | 2012 | * vb2_queue_init() - initialize a videobuf2 queue |
1940 | * @q: videobuf2 queue; this structure should be allocated in driver | 2013 | * @q: videobuf2 queue; this structure should be allocated in driver |
1941 | * | 2014 | * |
1942 | * The vb2_queue structure should be allocated by the driver. The driver is | 2015 | * The vb2_queue structure should be allocated by the driver. The driver is |
1943 | * responsible of clearing it's content and setting initial values for some | 2016 | * responsible of clearing it's content and setting initial values for some |
1944 | * required entries before calling this function. | 2017 | * required entries before calling this function. |
1945 | * q->ops, q->mem_ops, q->type and q->io_modes are mandatory. Please refer | 2018 | * q->ops, q->mem_ops, q->type and q->io_modes are mandatory. Please refer |
1946 | * to the struct vb2_queue description in include/media/videobuf2-core.h | 2019 | * to the struct vb2_queue description in include/media/videobuf2-core.h |
1947 | * for more information. | 2020 | * for more information. |
1948 | */ | 2021 | */ |
1949 | int vb2_queue_init(struct vb2_queue *q) | 2022 | int vb2_queue_init(struct vb2_queue *q) |
1950 | { | 2023 | { |
1951 | /* | 2024 | /* |
1952 | * Sanity check | 2025 | * Sanity check |
1953 | */ | 2026 | */ |
1954 | if (WARN_ON(!q) || | 2027 | if (WARN_ON(!q) || |
1955 | WARN_ON(!q->ops) || | 2028 | WARN_ON(!q->ops) || |
1956 | WARN_ON(!q->mem_ops) || | 2029 | WARN_ON(!q->mem_ops) || |
1957 | WARN_ON(!q->type) || | 2030 | WARN_ON(!q->type) || |
1958 | WARN_ON(!q->io_modes) || | 2031 | WARN_ON(!q->io_modes) || |
1959 | WARN_ON(!q->ops->queue_setup) || | 2032 | WARN_ON(!q->ops->queue_setup) || |
1960 | WARN_ON(!q->ops->buf_queue)) | 2033 | WARN_ON(!q->ops->buf_queue)) |
1961 | return -EINVAL; | 2034 | return -EINVAL; |
1962 | 2035 | ||
1963 | INIT_LIST_HEAD(&q->queued_list); | 2036 | INIT_LIST_HEAD(&q->queued_list); |
1964 | INIT_LIST_HEAD(&q->done_list); | 2037 | INIT_LIST_HEAD(&q->done_list); |
1965 | spin_lock_init(&q->done_lock); | 2038 | spin_lock_init(&q->done_lock); |
1966 | init_waitqueue_head(&q->done_wq); | 2039 | init_waitqueue_head(&q->done_wq); |
1967 | 2040 | ||
1968 | if (q->buf_struct_size == 0) | 2041 | if (q->buf_struct_size == 0) |
1969 | q->buf_struct_size = sizeof(struct vb2_buffer); | 2042 | q->buf_struct_size = sizeof(struct vb2_buffer); |
1970 | 2043 | ||
1971 | return 0; | 2044 | return 0; |
1972 | } | 2045 | } |
1973 | EXPORT_SYMBOL_GPL(vb2_queue_init); | 2046 | EXPORT_SYMBOL_GPL(vb2_queue_init); |
1974 | 2047 | ||
1975 | /** | 2048 | /** |
1976 | * vb2_queue_release() - stop streaming, release the queue and free memory | 2049 | * vb2_queue_release() - stop streaming, release the queue and free memory |
1977 | * @q: videobuf2 queue | 2050 | * @q: videobuf2 queue |
1978 | * | 2051 | * |
1979 | * This function stops streaming and performs necessary clean ups, including | 2052 | * This function stops streaming and performs necessary clean ups, including |
1980 | * freeing video buffer memory. The driver is responsible for freeing | 2053 | * freeing video buffer memory. The driver is responsible for freeing |
1981 | * the vb2_queue structure itself. | 2054 | * the vb2_queue structure itself. |
1982 | */ | 2055 | */ |
1983 | void vb2_queue_release(struct vb2_queue *q) | 2056 | void vb2_queue_release(struct vb2_queue *q) |
1984 | { | 2057 | { |
1985 | __vb2_cleanup_fileio(q); | 2058 | __vb2_cleanup_fileio(q); |
1986 | __vb2_queue_cancel(q); | 2059 | __vb2_queue_cancel(q); |
1987 | __vb2_queue_free(q, q->num_buffers); | 2060 | __vb2_queue_free(q, q->num_buffers); |
1988 | } | 2061 | } |
1989 | EXPORT_SYMBOL_GPL(vb2_queue_release); | 2062 | EXPORT_SYMBOL_GPL(vb2_queue_release); |
1990 | 2063 | ||
1991 | /** | 2064 | /** |
1992 | * struct vb2_fileio_buf - buffer context used by file io emulator | 2065 | * struct vb2_fileio_buf - buffer context used by file io emulator |
1993 | * | 2066 | * |
1994 | * vb2 provides a compatibility layer and emulator of file io (read and | 2067 | * vb2 provides a compatibility layer and emulator of file io (read and |
1995 | * write) calls on top of streaming API. This structure is used for | 2068 | * write) calls on top of streaming API. This structure is used for |
1996 | * tracking context related to the buffers. | 2069 | * tracking context related to the buffers. |
1997 | */ | 2070 | */ |
1998 | struct vb2_fileio_buf { | 2071 | struct vb2_fileio_buf { |
1999 | void *vaddr; | 2072 | void *vaddr; |
2000 | unsigned int size; | 2073 | unsigned int size; |
2001 | unsigned int pos; | 2074 | unsigned int pos; |
2002 | unsigned int queued:1; | 2075 | unsigned int queued:1; |
2003 | }; | 2076 | }; |
2004 | 2077 | ||
2005 | /** | 2078 | /** |
2006 | * struct vb2_fileio_data - queue context used by file io emulator | 2079 | * struct vb2_fileio_data - queue context used by file io emulator |
2007 | * | 2080 | * |
2008 | * vb2 provides a compatibility layer and emulator of file io (read and | 2081 | * vb2 provides a compatibility layer and emulator of file io (read and |
2009 | * write) calls on top of streaming API. For proper operation it required | 2082 | * write) calls on top of streaming API. For proper operation it required |
2010 | * this structure to save the driver state between each call of the read | 2083 | * this structure to save the driver state between each call of the read |
2011 | * or write function. | 2084 | * or write function. |
2012 | */ | 2085 | */ |
2013 | struct vb2_fileio_data { | 2086 | struct vb2_fileio_data { |
2014 | struct v4l2_requestbuffers req; | 2087 | struct v4l2_requestbuffers req; |
2015 | struct v4l2_buffer b; | 2088 | struct v4l2_buffer b; |
2016 | struct vb2_fileio_buf bufs[VIDEO_MAX_FRAME]; | 2089 | struct vb2_fileio_buf bufs[VIDEO_MAX_FRAME]; |
2017 | unsigned int index; | 2090 | unsigned int index; |
2018 | unsigned int q_count; | 2091 | unsigned int q_count; |
2019 | unsigned int dq_count; | 2092 | unsigned int dq_count; |
2020 | unsigned int flags; | 2093 | unsigned int flags; |
2021 | }; | 2094 | }; |
2022 | 2095 | ||
2023 | /** | 2096 | /** |
2024 | * __vb2_init_fileio() - initialize file io emulator | 2097 | * __vb2_init_fileio() - initialize file io emulator |
2025 | * @q: videobuf2 queue | 2098 | * @q: videobuf2 queue |
2026 | * @read: mode selector (1 means read, 0 means write) | 2099 | * @read: mode selector (1 means read, 0 means write) |
2027 | */ | 2100 | */ |
2028 | static int __vb2_init_fileio(struct vb2_queue *q, int read) | 2101 | static int __vb2_init_fileio(struct vb2_queue *q, int read) |
2029 | { | 2102 | { |
2030 | struct vb2_fileio_data *fileio; | 2103 | struct vb2_fileio_data *fileio; |
2031 | int i, ret; | 2104 | int i, ret; |
2032 | unsigned int count = 0; | 2105 | unsigned int count = 0; |
2033 | 2106 | ||
2034 | /* | 2107 | /* |
2035 | * Sanity check | 2108 | * Sanity check |
2036 | */ | 2109 | */ |
2037 | if ((read && !(q->io_modes & VB2_READ)) || | 2110 | if ((read && !(q->io_modes & VB2_READ)) || |
2038 | (!read && !(q->io_modes & VB2_WRITE))) | 2111 | (!read && !(q->io_modes & VB2_WRITE))) |
2039 | BUG(); | 2112 | BUG(); |
2040 | 2113 | ||
2041 | /* | 2114 | /* |
2042 | * Check if device supports mapping buffers to kernel virtual space. | 2115 | * Check if device supports mapping buffers to kernel virtual space. |
2043 | */ | 2116 | */ |
2044 | if (!q->mem_ops->vaddr) | 2117 | if (!q->mem_ops->vaddr) |
2045 | return -EBUSY; | 2118 | return -EBUSY; |
2046 | 2119 | ||
2047 | /* | 2120 | /* |
2048 | * Check if streaming api has not been already activated. | 2121 | * Check if streaming api has not been already activated. |
2049 | */ | 2122 | */ |
2050 | if (q->streaming || q->num_buffers > 0) | 2123 | if (q->streaming || q->num_buffers > 0) |
2051 | return -EBUSY; | 2124 | return -EBUSY; |
2052 | 2125 | ||
2053 | /* | 2126 | /* |
2054 | * Start with count 1, driver can increase it in queue_setup() | 2127 | * Start with count 1, driver can increase it in queue_setup() |
2055 | */ | 2128 | */ |
2056 | count = 1; | 2129 | count = 1; |
2057 | 2130 | ||
2058 | dprintk(3, "setting up file io: mode %s, count %d, flags %08x\n", | 2131 | dprintk(3, "setting up file io: mode %s, count %d, flags %08x\n", |
2059 | (read) ? "read" : "write", count, q->io_flags); | 2132 | (read) ? "read" : "write", count, q->io_flags); |
2060 | 2133 | ||
2061 | fileio = kzalloc(sizeof(struct vb2_fileio_data), GFP_KERNEL); | 2134 | fileio = kzalloc(sizeof(struct vb2_fileio_data), GFP_KERNEL); |
2062 | if (fileio == NULL) | 2135 | if (fileio == NULL) |
2063 | return -ENOMEM; | 2136 | return -ENOMEM; |
2064 | 2137 | ||
2065 | fileio->flags = q->io_flags; | 2138 | fileio->flags = q->io_flags; |
2066 | 2139 | ||
2067 | /* | 2140 | /* |
2068 | * Request buffers and use MMAP type to force driver | 2141 | * Request buffers and use MMAP type to force driver |
2069 | * to allocate buffers by itself. | 2142 | * to allocate buffers by itself. |
2070 | */ | 2143 | */ |
2071 | fileio->req.count = count; | 2144 | fileio->req.count = count; |
2072 | fileio->req.memory = V4L2_MEMORY_MMAP; | 2145 | fileio->req.memory = V4L2_MEMORY_MMAP; |
2073 | fileio->req.type = q->type; | 2146 | fileio->req.type = q->type; |
2074 | ret = vb2_reqbufs(q, &fileio->req); | 2147 | ret = vb2_reqbufs(q, &fileio->req); |
2075 | if (ret) | 2148 | if (ret) |
2076 | goto err_kfree; | 2149 | goto err_kfree; |
2077 | 2150 | ||
2078 | /* | 2151 | /* |
2079 | * Check if plane_count is correct | 2152 | * Check if plane_count is correct |
2080 | * (multiplane buffers are not supported). | 2153 | * (multiplane buffers are not supported). |
2081 | */ | 2154 | */ |
2082 | if (q->bufs[0]->num_planes != 1) { | 2155 | if (q->bufs[0]->num_planes != 1) { |
2083 | ret = -EBUSY; | 2156 | ret = -EBUSY; |
2084 | goto err_reqbufs; | 2157 | goto err_reqbufs; |
2085 | } | 2158 | } |
2086 | 2159 | ||
2087 | /* | 2160 | /* |
2088 | * Get kernel address of each buffer. | 2161 | * Get kernel address of each buffer. |
2089 | */ | 2162 | */ |
2090 | for (i = 0; i < q->num_buffers; i++) { | 2163 | for (i = 0; i < q->num_buffers; i++) { |
2091 | fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0); | 2164 | fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0); |
2092 | if (fileio->bufs[i].vaddr == NULL) | 2165 | if (fileio->bufs[i].vaddr == NULL) |
2093 | goto err_reqbufs; | 2166 | goto err_reqbufs; |
2094 | fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0); | 2167 | fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0); |
2095 | } | 2168 | } |
2096 | 2169 | ||
2097 | /* | 2170 | /* |
2098 | * Read mode requires pre queuing of all buffers. | 2171 | * Read mode requires pre queuing of all buffers. |
2099 | */ | 2172 | */ |
2100 | if (read) { | 2173 | if (read) { |
2101 | /* | 2174 | /* |
2102 | * Queue all buffers. | 2175 | * Queue all buffers. |
2103 | */ | 2176 | */ |
2104 | for (i = 0; i < q->num_buffers; i++) { | 2177 | for (i = 0; i < q->num_buffers; i++) { |
2105 | struct v4l2_buffer *b = &fileio->b; | 2178 | struct v4l2_buffer *b = &fileio->b; |
2106 | memset(b, 0, sizeof(*b)); | 2179 | memset(b, 0, sizeof(*b)); |
2107 | b->type = q->type; | 2180 | b->type = q->type; |
2108 | b->memory = q->memory; | 2181 | b->memory = q->memory; |
2109 | b->index = i; | 2182 | b->index = i; |
2110 | ret = vb2_qbuf(q, b); | 2183 | ret = vb2_qbuf(q, b); |
2111 | if (ret) | 2184 | if (ret) |
2112 | goto err_reqbufs; | 2185 | goto err_reqbufs; |
2113 | fileio->bufs[i].queued = 1; | 2186 | fileio->bufs[i].queued = 1; |
2114 | } | 2187 | } |
2115 | 2188 | ||
2116 | /* | 2189 | /* |
2117 | * Start streaming. | 2190 | * Start streaming. |
2118 | */ | 2191 | */ |
2119 | ret = vb2_streamon(q, q->type); | 2192 | ret = vb2_streamon(q, q->type); |
2120 | if (ret) | 2193 | if (ret) |
2121 | goto err_reqbufs; | 2194 | goto err_reqbufs; |
2122 | } | 2195 | } |
2123 | 2196 | ||
2124 | q->fileio = fileio; | 2197 | q->fileio = fileio; |
2125 | 2198 | ||
2126 | return ret; | 2199 | return ret; |
2127 | 2200 | ||
2128 | err_reqbufs: | 2201 | err_reqbufs: |
2129 | fileio->req.count = 0; | 2202 | fileio->req.count = 0; |
2130 | vb2_reqbufs(q, &fileio->req); | 2203 | vb2_reqbufs(q, &fileio->req); |
2131 | 2204 | ||
2132 | err_kfree: | 2205 | err_kfree: |
2133 | kfree(fileio); | 2206 | kfree(fileio); |
2134 | return ret; | 2207 | return ret; |
2135 | } | 2208 | } |
2136 | 2209 | ||
2137 | /** | 2210 | /** |
2138 | * __vb2_cleanup_fileio() - free resourced used by file io emulator | 2211 | * __vb2_cleanup_fileio() - free resourced used by file io emulator |
2139 | * @q: videobuf2 queue | 2212 | * @q: videobuf2 queue |
2140 | */ | 2213 | */ |
2141 | static int __vb2_cleanup_fileio(struct vb2_queue *q) | 2214 | static int __vb2_cleanup_fileio(struct vb2_queue *q) |
2142 | { | 2215 | { |
2143 | struct vb2_fileio_data *fileio = q->fileio; | 2216 | struct vb2_fileio_data *fileio = q->fileio; |
2144 | 2217 | ||
2145 | if (fileio) { | 2218 | if (fileio) { |
2146 | /* | 2219 | /* |
2147 | * Hack fileio context to enable direct calls to vb2 ioctl | 2220 | * Hack fileio context to enable direct calls to vb2 ioctl |
2148 | * interface. | 2221 | * interface. |
2149 | */ | 2222 | */ |
2150 | q->fileio = NULL; | 2223 | q->fileio = NULL; |
2151 | 2224 | ||
2152 | vb2_streamoff(q, q->type); | 2225 | vb2_streamoff(q, q->type); |
2153 | fileio->req.count = 0; | 2226 | fileio->req.count = 0; |
2154 | vb2_reqbufs(q, &fileio->req); | 2227 | vb2_reqbufs(q, &fileio->req); |
2155 | kfree(fileio); | 2228 | kfree(fileio); |
2156 | dprintk(3, "file io emulator closed\n"); | 2229 | dprintk(3, "file io emulator closed\n"); |
2157 | } | 2230 | } |
2158 | return 0; | 2231 | return 0; |
2159 | } | 2232 | } |
2160 | 2233 | ||
2161 | /** | 2234 | /** |
2162 | * __vb2_perform_fileio() - perform a single file io (read or write) operation | 2235 | * __vb2_perform_fileio() - perform a single file io (read or write) operation |
2163 | * @q: videobuf2 queue | 2236 | * @q: videobuf2 queue |
2164 | * @data: pointed to target userspace buffer | 2237 | * @data: pointed to target userspace buffer |
2165 | * @count: number of bytes to read or write | 2238 | * @count: number of bytes to read or write |
2166 | * @ppos: file handle position tracking pointer | 2239 | * @ppos: file handle position tracking pointer |
2167 | * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking) | 2240 | * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking) |
2168 | * @read: access mode selector (1 means read, 0 means write) | 2241 | * @read: access mode selector (1 means read, 0 means write) |
2169 | */ | 2242 | */ |
2170 | static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count, | 2243 | static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count, |
2171 | loff_t *ppos, int nonblock, int read) | 2244 | loff_t *ppos, int nonblock, int read) |
2172 | { | 2245 | { |
2173 | struct vb2_fileio_data *fileio; | 2246 | struct vb2_fileio_data *fileio; |
2174 | struct vb2_fileio_buf *buf; | 2247 | struct vb2_fileio_buf *buf; |
2175 | int ret, index; | 2248 | int ret, index; |
2176 | 2249 | ||
2177 | dprintk(3, "file io: mode %s, offset %ld, count %zd, %sblocking\n", | 2250 | dprintk(3, "file io: mode %s, offset %ld, count %zd, %sblocking\n", |
2178 | read ? "read" : "write", (long)*ppos, count, | 2251 | read ? "read" : "write", (long)*ppos, count, |
2179 | nonblock ? "non" : ""); | 2252 | nonblock ? "non" : ""); |
2180 | 2253 | ||
2181 | if (!data) | 2254 | if (!data) |
2182 | return -EINVAL; | 2255 | return -EINVAL; |
2183 | 2256 | ||
2184 | /* | 2257 | /* |
2185 | * Initialize emulator on first call. | 2258 | * Initialize emulator on first call. |
2186 | */ | 2259 | */ |
2187 | if (!q->fileio) { | 2260 | if (!q->fileio) { |
2188 | ret = __vb2_init_fileio(q, read); | 2261 | ret = __vb2_init_fileio(q, read); |
2189 | dprintk(3, "file io: vb2_init_fileio result: %d\n", ret); | 2262 | dprintk(3, "file io: vb2_init_fileio result: %d\n", ret); |
2190 | if (ret) | 2263 | if (ret) |
2191 | return ret; | 2264 | return ret; |
2192 | } | 2265 | } |
2193 | fileio = q->fileio; | 2266 | fileio = q->fileio; |
2194 | 2267 | ||
2195 | /* | 2268 | /* |
2196 | * Hack fileio context to enable direct calls to vb2 ioctl interface. | 2269 | * Hack fileio context to enable direct calls to vb2 ioctl interface. |
2197 | * The pointer will be restored before returning from this function. | 2270 | * The pointer will be restored before returning from this function. |
2198 | */ | 2271 | */ |
2199 | q->fileio = NULL; | 2272 | q->fileio = NULL; |
2200 | 2273 | ||
2201 | index = fileio->index; | 2274 | index = fileio->index; |
2202 | buf = &fileio->bufs[index]; | 2275 | buf = &fileio->bufs[index]; |
2203 | 2276 | ||
2204 | /* | 2277 | /* |
2205 | * Check if we need to dequeue the buffer. | 2278 | * Check if we need to dequeue the buffer. |
2206 | */ | 2279 | */ |
2207 | if (buf->queued) { | 2280 | if (buf->queued) { |
2208 | struct vb2_buffer *vb; | 2281 | struct vb2_buffer *vb; |
2209 | 2282 | ||
2210 | /* | 2283 | /* |
2211 | * Call vb2_dqbuf to get buffer back. | 2284 | * Call vb2_dqbuf to get buffer back. |
2212 | */ | 2285 | */ |
2213 | memset(&fileio->b, 0, sizeof(fileio->b)); | 2286 | memset(&fileio->b, 0, sizeof(fileio->b)); |
2214 | fileio->b.type = q->type; | 2287 | fileio->b.type = q->type; |
2215 | fileio->b.memory = q->memory; | 2288 | fileio->b.memory = q->memory; |
2216 | fileio->b.index = index; | 2289 | fileio->b.index = index; |
2217 | ret = vb2_dqbuf(q, &fileio->b, nonblock); | 2290 | ret = vb2_dqbuf(q, &fileio->b, nonblock); |
2218 | dprintk(5, "file io: vb2_dqbuf result: %d\n", ret); | 2291 | dprintk(5, "file io: vb2_dqbuf result: %d\n", ret); |
2219 | if (ret) | 2292 | if (ret) |
2220 | goto end; | 2293 | goto end; |
2221 | fileio->dq_count += 1; | 2294 | fileio->dq_count += 1; |
2222 | 2295 | ||
2223 | /* | 2296 | /* |
2224 | * Get number of bytes filled by the driver | 2297 | * Get number of bytes filled by the driver |
2225 | */ | 2298 | */ |
2226 | vb = q->bufs[index]; | 2299 | vb = q->bufs[index]; |
2227 | buf->size = vb2_get_plane_payload(vb, 0); | 2300 | buf->size = vb2_get_plane_payload(vb, 0); |
2228 | buf->queued = 0; | 2301 | buf->queued = 0; |
2229 | } | 2302 | } |
2230 | 2303 | ||
2231 | /* | 2304 | /* |
2232 | * Limit count on last few bytes of the buffer. | 2305 | * Limit count on last few bytes of the buffer. |
2233 | */ | 2306 | */ |
2234 | if (buf->pos + count > buf->size) { | 2307 | if (buf->pos + count > buf->size) { |
2235 | count = buf->size - buf->pos; | 2308 | count = buf->size - buf->pos; |
2236 | dprintk(5, "reducing read count: %zd\n", count); | 2309 | dprintk(5, "reducing read count: %zd\n", count); |
2237 | } | 2310 | } |
2238 | 2311 | ||
2239 | /* | 2312 | /* |
2240 | * Transfer data to userspace. | 2313 | * Transfer data to userspace. |
2241 | */ | 2314 | */ |
2242 | dprintk(3, "file io: copying %zd bytes - buffer %d, offset %u\n", | 2315 | dprintk(3, "file io: copying %zd bytes - buffer %d, offset %u\n", |
2243 | count, index, buf->pos); | 2316 | count, index, buf->pos); |
2244 | if (read) | 2317 | if (read) |
2245 | ret = copy_to_user(data, buf->vaddr + buf->pos, count); | 2318 | ret = copy_to_user(data, buf->vaddr + buf->pos, count); |
2246 | else | 2319 | else |
2247 | ret = copy_from_user(buf->vaddr + buf->pos, data, count); | 2320 | ret = copy_from_user(buf->vaddr + buf->pos, data, count); |
2248 | if (ret) { | 2321 | if (ret) { |
2249 | dprintk(3, "file io: error copying data\n"); | 2322 | dprintk(3, "file io: error copying data\n"); |
2250 | ret = -EFAULT; | 2323 | ret = -EFAULT; |
2251 | goto end; | 2324 | goto end; |
2252 | } | 2325 | } |
2253 | 2326 | ||
2254 | /* | 2327 | /* |
2255 | * Update counters. | 2328 | * Update counters. |
2256 | */ | 2329 | */ |
2257 | buf->pos += count; | 2330 | buf->pos += count; |
2258 | *ppos += count; | 2331 | *ppos += count; |
2259 | 2332 | ||
2260 | /* | 2333 | /* |
2261 | * Queue next buffer if required. | 2334 | * Queue next buffer if required. |
2262 | */ | 2335 | */ |
2263 | if (buf->pos == buf->size || | 2336 | if (buf->pos == buf->size || |
2264 | (!read && (fileio->flags & VB2_FILEIO_WRITE_IMMEDIATELY))) { | 2337 | (!read && (fileio->flags & VB2_FILEIO_WRITE_IMMEDIATELY))) { |
2265 | /* | 2338 | /* |
2266 | * Check if this is the last buffer to read. | 2339 | * Check if this is the last buffer to read. |
2267 | */ | 2340 | */ |
2268 | if (read && (fileio->flags & VB2_FILEIO_READ_ONCE) && | 2341 | if (read && (fileio->flags & VB2_FILEIO_READ_ONCE) && |
2269 | fileio->dq_count == 1) { | 2342 | fileio->dq_count == 1) { |
2270 | dprintk(3, "file io: read limit reached\n"); | 2343 | dprintk(3, "file io: read limit reached\n"); |
2271 | /* | 2344 | /* |
2272 | * Restore fileio pointer and release the context. | 2345 | * Restore fileio pointer and release the context. |
2273 | */ | 2346 | */ |
2274 | q->fileio = fileio; | 2347 | q->fileio = fileio; |
2275 | return __vb2_cleanup_fileio(q); | 2348 | return __vb2_cleanup_fileio(q); |
2276 | } | 2349 | } |
2277 | 2350 | ||
2278 | /* | 2351 | /* |
2279 | * Call vb2_qbuf and give buffer to the driver. | 2352 | * Call vb2_qbuf and give buffer to the driver. |
2280 | */ | 2353 | */ |
2281 | memset(&fileio->b, 0, sizeof(fileio->b)); | 2354 | memset(&fileio->b, 0, sizeof(fileio->b)); |
2282 | fileio->b.type = q->type; | 2355 | fileio->b.type = q->type; |
2283 | fileio->b.memory = q->memory; | 2356 | fileio->b.memory = q->memory; |
2284 | fileio->b.index = index; | 2357 | fileio->b.index = index; |
2285 | fileio->b.bytesused = buf->pos; | 2358 | fileio->b.bytesused = buf->pos; |
2286 | ret = vb2_qbuf(q, &fileio->b); | 2359 | ret = vb2_qbuf(q, &fileio->b); |
2287 | dprintk(5, "file io: vb2_dbuf result: %d\n", ret); | 2360 | dprintk(5, "file io: vb2_dbuf result: %d\n", ret); |
2288 | if (ret) | 2361 | if (ret) |
2289 | goto end; | 2362 | goto end; |
2290 | 2363 | ||
2291 | /* | 2364 | /* |
2292 | * Buffer has been queued, update the status | 2365 | * Buffer has been queued, update the status |
2293 | */ | 2366 | */ |
2294 | buf->pos = 0; | 2367 | buf->pos = 0; |
2295 | buf->queued = 1; | 2368 | buf->queued = 1; |
2296 | buf->size = q->bufs[0]->v4l2_planes[0].length; | 2369 | buf->size = q->bufs[0]->v4l2_planes[0].length; |
2297 | fileio->q_count += 1; | 2370 | fileio->q_count += 1; |
2298 | 2371 | ||
2299 | /* | 2372 | /* |
2300 | * Switch to the next buffer | 2373 | * Switch to the next buffer |
2301 | */ | 2374 | */ |
2302 | fileio->index = (index + 1) % q->num_buffers; | 2375 | fileio->index = (index + 1) % q->num_buffers; |
2303 | 2376 | ||
2304 | /* | 2377 | /* |
2305 | * Start streaming if required. | 2378 | * Start streaming if required. |
2306 | */ | 2379 | */ |
2307 | if (!read && !q->streaming) { | 2380 | if (!read && !q->streaming) { |
2308 | ret = vb2_streamon(q, q->type); | 2381 | ret = vb2_streamon(q, q->type); |
2309 | if (ret) | 2382 | if (ret) |
2310 | goto end; | 2383 | goto end; |
2311 | } | 2384 | } |
2312 | } | 2385 | } |
2313 | 2386 | ||
2314 | /* | 2387 | /* |
2315 | * Return proper number of bytes processed. | 2388 | * Return proper number of bytes processed. |
2316 | */ | 2389 | */ |
2317 | if (ret == 0) | 2390 | if (ret == 0) |
2318 | ret = count; | 2391 | ret = count; |
2319 | end: | 2392 | end: |
2320 | /* | 2393 | /* |
2321 | * Restore the fileio context and block vb2 ioctl interface. | 2394 | * Restore the fileio context and block vb2 ioctl interface. |
2322 | */ | 2395 | */ |
2323 | q->fileio = fileio; | 2396 | q->fileio = fileio; |
2324 | return ret; | 2397 | return ret; |
2325 | } | 2398 | } |
2326 | 2399 | ||
2327 | size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count, | 2400 | size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count, |
2328 | loff_t *ppos, int nonblocking) | 2401 | loff_t *ppos, int nonblocking) |
2329 | { | 2402 | { |
2330 | return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1); | 2403 | return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1); |
2331 | } | 2404 | } |
2332 | EXPORT_SYMBOL_GPL(vb2_read); | 2405 | EXPORT_SYMBOL_GPL(vb2_read); |
2333 | 2406 | ||
2334 | size_t vb2_write(struct vb2_queue *q, char __user *data, size_t count, | 2407 | size_t vb2_write(struct vb2_queue *q, char __user *data, size_t count, |
2335 | loff_t *ppos, int nonblocking) | 2408 | loff_t *ppos, int nonblocking) |
2336 | { | 2409 | { |
2337 | return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 0); | 2410 | return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 0); |
2338 | } | 2411 | } |
2339 | EXPORT_SYMBOL_GPL(vb2_write); | 2412 | EXPORT_SYMBOL_GPL(vb2_write); |
2340 | 2413 | ||
2341 | 2414 | ||
2342 | /* | 2415 | /* |
2343 | * The following functions are not part of the vb2 core API, but are helper | 2416 | * The following functions are not part of the vb2 core API, but are helper |
2344 | * functions that plug into struct v4l2_ioctl_ops, struct v4l2_file_operations | 2417 | * functions that plug into struct v4l2_ioctl_ops, struct v4l2_file_operations |
2345 | * and struct vb2_ops. | 2418 | * and struct vb2_ops. |
2346 | * They contain boilerplate code that most if not all drivers have to do | 2419 | * They contain boilerplate code that most if not all drivers have to do |
2347 | * and so they simplify the driver code. | 2420 | * and so they simplify the driver code. |
2348 | */ | 2421 | */ |
2349 | 2422 | ||
2350 | /* The queue is busy if there is a owner and you are not that owner. */ | 2423 | /* The queue is busy if there is a owner and you are not that owner. */ |
2351 | static inline bool vb2_queue_is_busy(struct video_device *vdev, struct file *file) | 2424 | static inline bool vb2_queue_is_busy(struct video_device *vdev, struct file *file) |
2352 | { | 2425 | { |
2353 | return vdev->queue->owner && vdev->queue->owner != file->private_data; | 2426 | return vdev->queue->owner && vdev->queue->owner != file->private_data; |
2354 | } | 2427 | } |
2355 | 2428 | ||
2356 | /* vb2 ioctl helpers */ | 2429 | /* vb2 ioctl helpers */ |
2357 | 2430 | ||
2358 | int vb2_ioctl_reqbufs(struct file *file, void *priv, | 2431 | int vb2_ioctl_reqbufs(struct file *file, void *priv, |
2359 | struct v4l2_requestbuffers *p) | 2432 | struct v4l2_requestbuffers *p) |
2360 | { | 2433 | { |
2361 | struct video_device *vdev = video_devdata(file); | 2434 | struct video_device *vdev = video_devdata(file); |
2362 | int res = __verify_memory_type(vdev->queue, p->memory, p->type); | 2435 | int res = __verify_memory_type(vdev->queue, p->memory, p->type); |
2363 | 2436 | ||
2364 | if (res) | 2437 | if (res) |
2365 | return res; | 2438 | return res; |
2366 | if (vb2_queue_is_busy(vdev, file)) | 2439 | if (vb2_queue_is_busy(vdev, file)) |
2367 | return -EBUSY; | 2440 | return -EBUSY; |
2368 | res = __reqbufs(vdev->queue, p); | 2441 | res = __reqbufs(vdev->queue, p); |
2369 | /* If count == 0, then the owner has released all buffers and he | 2442 | /* If count == 0, then the owner has released all buffers and he |
2370 | is no longer owner of the queue. Otherwise we have a new owner. */ | 2443 | is no longer owner of the queue. Otherwise we have a new owner. */ |
2371 | if (res == 0) | 2444 | if (res == 0) |
2372 | vdev->queue->owner = p->count ? file->private_data : NULL; | 2445 | vdev->queue->owner = p->count ? file->private_data : NULL; |
2373 | return res; | 2446 | return res; |
2374 | } | 2447 | } |
2375 | EXPORT_SYMBOL_GPL(vb2_ioctl_reqbufs); | 2448 | EXPORT_SYMBOL_GPL(vb2_ioctl_reqbufs); |
2376 | 2449 | ||
2377 | int vb2_ioctl_create_bufs(struct file *file, void *priv, | 2450 | int vb2_ioctl_create_bufs(struct file *file, void *priv, |
2378 | struct v4l2_create_buffers *p) | 2451 | struct v4l2_create_buffers *p) |
2379 | { | 2452 | { |
2380 | struct video_device *vdev = video_devdata(file); | 2453 | struct video_device *vdev = video_devdata(file); |
2381 | int res = __verify_memory_type(vdev->queue, p->memory, p->format.type); | 2454 | int res = __verify_memory_type(vdev->queue, p->memory, p->format.type); |
2382 | 2455 | ||
2383 | p->index = vdev->queue->num_buffers; | 2456 | p->index = vdev->queue->num_buffers; |
2384 | /* If count == 0, then just check if memory and type are valid. | 2457 | /* If count == 0, then just check if memory and type are valid. |
2385 | Any -EBUSY result from __verify_memory_type can be mapped to 0. */ | 2458 | Any -EBUSY result from __verify_memory_type can be mapped to 0. */ |
2386 | if (p->count == 0) | 2459 | if (p->count == 0) |
2387 | return res != -EBUSY ? res : 0; | 2460 | return res != -EBUSY ? res : 0; |
2388 | if (res) | 2461 | if (res) |
2389 | return res; | 2462 | return res; |
2390 | if (vb2_queue_is_busy(vdev, file)) | 2463 | if (vb2_queue_is_busy(vdev, file)) |
2391 | return -EBUSY; | 2464 | return -EBUSY; |
2392 | res = __create_bufs(vdev->queue, p); | 2465 | res = __create_bufs(vdev->queue, p); |
2393 | if (res == 0) | 2466 | if (res == 0) |
2394 | vdev->queue->owner = file->private_data; | 2467 | vdev->queue->owner = file->private_data; |
2395 | return res; | 2468 | return res; |
2396 | } | 2469 | } |
2397 | EXPORT_SYMBOL_GPL(vb2_ioctl_create_bufs); | 2470 | EXPORT_SYMBOL_GPL(vb2_ioctl_create_bufs); |
2398 | 2471 | ||
2399 | int vb2_ioctl_prepare_buf(struct file *file, void *priv, | 2472 | int vb2_ioctl_prepare_buf(struct file *file, void *priv, |
2400 | struct v4l2_buffer *p) | 2473 | struct v4l2_buffer *p) |
2401 | { | 2474 | { |
2402 | struct video_device *vdev = video_devdata(file); | 2475 | struct video_device *vdev = video_devdata(file); |
2403 | 2476 | ||
2404 | if (vb2_queue_is_busy(vdev, file)) | 2477 | if (vb2_queue_is_busy(vdev, file)) |
2405 | return -EBUSY; | 2478 | return -EBUSY; |
2406 | return vb2_prepare_buf(vdev->queue, p); | 2479 | return vb2_prepare_buf(vdev->queue, p); |
2407 | } | 2480 | } |
2408 | EXPORT_SYMBOL_GPL(vb2_ioctl_prepare_buf); | 2481 | EXPORT_SYMBOL_GPL(vb2_ioctl_prepare_buf); |
2409 | 2482 | ||
2410 | int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p) | 2483 | int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p) |
2411 | { | 2484 | { |
2412 | struct video_device *vdev = video_devdata(file); | 2485 | struct video_device *vdev = video_devdata(file); |
2413 | 2486 | ||
2414 | /* No need to call vb2_queue_is_busy(), anyone can query buffers. */ | 2487 | /* No need to call vb2_queue_is_busy(), anyone can query buffers. */ |
2415 | return vb2_querybuf(vdev->queue, p); | 2488 | return vb2_querybuf(vdev->queue, p); |
2416 | } | 2489 | } |
2417 | EXPORT_SYMBOL_GPL(vb2_ioctl_querybuf); | 2490 | EXPORT_SYMBOL_GPL(vb2_ioctl_querybuf); |
2418 | 2491 | ||
2419 | int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p) | 2492 | int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p) |
2420 | { | 2493 | { |
2421 | struct video_device *vdev = video_devdata(file); | 2494 | struct video_device *vdev = video_devdata(file); |
2422 | 2495 | ||
2423 | if (vb2_queue_is_busy(vdev, file)) | 2496 | if (vb2_queue_is_busy(vdev, file)) |
2424 | return -EBUSY; | 2497 | return -EBUSY; |
2425 | return vb2_qbuf(vdev->queue, p); | 2498 | return vb2_qbuf(vdev->queue, p); |
2426 | } | 2499 | } |
2427 | EXPORT_SYMBOL_GPL(vb2_ioctl_qbuf); | 2500 | EXPORT_SYMBOL_GPL(vb2_ioctl_qbuf); |
2428 | 2501 | ||
2429 | int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p) | 2502 | int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p) |
2430 | { | 2503 | { |
2431 | struct video_device *vdev = video_devdata(file); | 2504 | struct video_device *vdev = video_devdata(file); |
2432 | 2505 | ||
2433 | if (vb2_queue_is_busy(vdev, file)) | 2506 | if (vb2_queue_is_busy(vdev, file)) |
2434 | return -EBUSY; | 2507 | return -EBUSY; |
2435 | return vb2_dqbuf(vdev->queue, p, file->f_flags & O_NONBLOCK); | 2508 | return vb2_dqbuf(vdev->queue, p, file->f_flags & O_NONBLOCK); |
2436 | } | 2509 | } |
2437 | EXPORT_SYMBOL_GPL(vb2_ioctl_dqbuf); | 2510 | EXPORT_SYMBOL_GPL(vb2_ioctl_dqbuf); |
2438 | 2511 | ||
2439 | int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i) | 2512 | int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i) |
2440 | { | 2513 | { |
2441 | struct video_device *vdev = video_devdata(file); | 2514 | struct video_device *vdev = video_devdata(file); |
2442 | 2515 | ||
2443 | if (vb2_queue_is_busy(vdev, file)) | 2516 | if (vb2_queue_is_busy(vdev, file)) |
2444 | return -EBUSY; | 2517 | return -EBUSY; |
2445 | return vb2_streamon(vdev->queue, i); | 2518 | return vb2_streamon(vdev->queue, i); |
2446 | } | 2519 | } |
2447 | EXPORT_SYMBOL_GPL(vb2_ioctl_streamon); | 2520 | EXPORT_SYMBOL_GPL(vb2_ioctl_streamon); |
2448 | 2521 | ||
2449 | int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i) | 2522 | int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i) |
2450 | { | 2523 | { |
2451 | struct video_device *vdev = video_devdata(file); | 2524 | struct video_device *vdev = video_devdata(file); |
2452 | 2525 | ||
2453 | if (vb2_queue_is_busy(vdev, file)) | 2526 | if (vb2_queue_is_busy(vdev, file)) |
2454 | return -EBUSY; | 2527 | return -EBUSY; |
2455 | return vb2_streamoff(vdev->queue, i); | 2528 | return vb2_streamoff(vdev->queue, i); |
2456 | } | 2529 | } |
2457 | EXPORT_SYMBOL_GPL(vb2_ioctl_streamoff); | 2530 | EXPORT_SYMBOL_GPL(vb2_ioctl_streamoff); |
2531 | |||
2532 | int vb2_ioctl_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *p) | ||
2533 | { | ||
2534 | struct video_device *vdev = video_devdata(file); | ||
2535 | |||
2536 | if (vb2_queue_is_busy(vdev, file)) | ||
2537 | return -EBUSY; | ||
2538 | return vb2_expbuf(vdev->queue, p); | ||
2539 | } | ||
2540 | EXPORT_SYMBOL_GPL(vb2_ioctl_expbuf); | ||
2458 | 2541 | ||
2459 | /* v4l2_file_operations helpers */ | 2542 | /* v4l2_file_operations helpers */ |
2460 | 2543 | ||
2461 | int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma) | 2544 | int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma) |
2462 | { | 2545 | { |
2463 | struct video_device *vdev = video_devdata(file); | 2546 | struct video_device *vdev = video_devdata(file); |
2464 | 2547 | ||
2465 | return vb2_mmap(vdev->queue, vma); | 2548 | return vb2_mmap(vdev->queue, vma); |
2466 | } | 2549 | } |
2467 | EXPORT_SYMBOL_GPL(vb2_fop_mmap); | 2550 | EXPORT_SYMBOL_GPL(vb2_fop_mmap); |
2468 | 2551 | ||
2469 | int vb2_fop_release(struct file *file) | 2552 | int vb2_fop_release(struct file *file) |
2470 | { | 2553 | { |
2471 | struct video_device *vdev = video_devdata(file); | 2554 | struct video_device *vdev = video_devdata(file); |
2472 | 2555 | ||
2473 | if (file->private_data == vdev->queue->owner) { | 2556 | if (file->private_data == vdev->queue->owner) { |
2474 | vb2_queue_release(vdev->queue); | 2557 | vb2_queue_release(vdev->queue); |
2475 | vdev->queue->owner = NULL; | 2558 | vdev->queue->owner = NULL; |
2476 | } | 2559 | } |
2477 | return v4l2_fh_release(file); | 2560 | return v4l2_fh_release(file); |
2478 | } | 2561 | } |
2479 | EXPORT_SYMBOL_GPL(vb2_fop_release); | 2562 | EXPORT_SYMBOL_GPL(vb2_fop_release); |
2480 | 2563 | ||
2481 | ssize_t vb2_fop_write(struct file *file, char __user *buf, | 2564 | ssize_t vb2_fop_write(struct file *file, char __user *buf, |
2482 | size_t count, loff_t *ppos) | 2565 | size_t count, loff_t *ppos) |
2483 | { | 2566 | { |
2484 | struct video_device *vdev = video_devdata(file); | 2567 | struct video_device *vdev = video_devdata(file); |
2485 | struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock; | 2568 | struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock; |
2486 | int err = -EBUSY; | 2569 | int err = -EBUSY; |
2487 | 2570 | ||
2488 | if (lock && mutex_lock_interruptible(lock)) | 2571 | if (lock && mutex_lock_interruptible(lock)) |
2489 | return -ERESTARTSYS; | 2572 | return -ERESTARTSYS; |
2490 | if (vb2_queue_is_busy(vdev, file)) | 2573 | if (vb2_queue_is_busy(vdev, file)) |
2491 | goto exit; | 2574 | goto exit; |
2492 | err = vb2_write(vdev->queue, buf, count, ppos, | 2575 | err = vb2_write(vdev->queue, buf, count, ppos, |
2493 | file->f_flags & O_NONBLOCK); | 2576 | file->f_flags & O_NONBLOCK); |
2494 | if (vdev->queue->fileio) | 2577 | if (vdev->queue->fileio) |
2495 | vdev->queue->owner = file->private_data; | 2578 | vdev->queue->owner = file->private_data; |
2496 | exit: | 2579 | exit: |
2497 | if (lock) | 2580 | if (lock) |
2498 | mutex_unlock(lock); | 2581 | mutex_unlock(lock); |
2499 | return err; | 2582 | return err; |
2500 | } | 2583 | } |
2501 | EXPORT_SYMBOL_GPL(vb2_fop_write); | 2584 | EXPORT_SYMBOL_GPL(vb2_fop_write); |
2502 | 2585 | ||
2503 | ssize_t vb2_fop_read(struct file *file, char __user *buf, | 2586 | ssize_t vb2_fop_read(struct file *file, char __user *buf, |
2504 | size_t count, loff_t *ppos) | 2587 | size_t count, loff_t *ppos) |
2505 | { | 2588 | { |
2506 | struct video_device *vdev = video_devdata(file); | 2589 | struct video_device *vdev = video_devdata(file); |
2507 | struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock; | 2590 | struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock; |
2508 | int err = -EBUSY; | 2591 | int err = -EBUSY; |
2509 | 2592 | ||
2510 | if (lock && mutex_lock_interruptible(lock)) | 2593 | if (lock && mutex_lock_interruptible(lock)) |
2511 | return -ERESTARTSYS; | 2594 | return -ERESTARTSYS; |
2512 | if (vb2_queue_is_busy(vdev, file)) | 2595 | if (vb2_queue_is_busy(vdev, file)) |
2513 | goto exit; | 2596 | goto exit; |
2514 | err = vb2_read(vdev->queue, buf, count, ppos, | 2597 | err = vb2_read(vdev->queue, buf, count, ppos, |
2515 | file->f_flags & O_NONBLOCK); | 2598 | file->f_flags & O_NONBLOCK); |
2516 | if (vdev->queue->fileio) | 2599 | if (vdev->queue->fileio) |
2517 | vdev->queue->owner = file->private_data; | 2600 | vdev->queue->owner = file->private_data; |
2518 | exit: | 2601 | exit: |
2519 | if (lock) | 2602 | if (lock) |
2520 | mutex_unlock(lock); | 2603 | mutex_unlock(lock); |
2521 | return err; | 2604 | return err; |
2522 | } | 2605 | } |
2523 | EXPORT_SYMBOL_GPL(vb2_fop_read); | 2606 | EXPORT_SYMBOL_GPL(vb2_fop_read); |
2524 | 2607 | ||
2525 | unsigned int vb2_fop_poll(struct file *file, poll_table *wait) | 2608 | unsigned int vb2_fop_poll(struct file *file, poll_table *wait) |
2526 | { | 2609 | { |
2527 | struct video_device *vdev = video_devdata(file); | 2610 | struct video_device *vdev = video_devdata(file); |
2528 | struct vb2_queue *q = vdev->queue; | 2611 | struct vb2_queue *q = vdev->queue; |
2529 | struct mutex *lock = q->lock ? q->lock : vdev->lock; | 2612 | struct mutex *lock = q->lock ? q->lock : vdev->lock; |
2530 | unsigned long req_events = poll_requested_events(wait); | 2613 | unsigned long req_events = poll_requested_events(wait); |
2531 | unsigned res; | 2614 | unsigned res; |
2532 | void *fileio; | 2615 | void *fileio; |
2533 | bool must_lock = false; | 2616 | bool must_lock = false; |
2534 | 2617 | ||
2535 | /* Try to be smart: only lock if polling might start fileio, | 2618 | /* Try to be smart: only lock if polling might start fileio, |
2536 | otherwise locking will only introduce unwanted delays. */ | 2619 | otherwise locking will only introduce unwanted delays. */ |
2537 | if (q->num_buffers == 0 && q->fileio == NULL) { | 2620 | if (q->num_buffers == 0 && q->fileio == NULL) { |
2538 | if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) && | 2621 | if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) && |
2539 | (req_events & (POLLIN | POLLRDNORM))) | 2622 | (req_events & (POLLIN | POLLRDNORM))) |
2540 | must_lock = true; | 2623 | must_lock = true; |
2541 | else if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE) && | 2624 | else if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE) && |
2542 | (req_events & (POLLOUT | POLLWRNORM))) | 2625 | (req_events & (POLLOUT | POLLWRNORM))) |
2543 | must_lock = true; | 2626 | must_lock = true; |
2544 | } | 2627 | } |
2545 | 2628 | ||
2546 | /* If locking is needed, but this helper doesn't know how, then you | 2629 | /* If locking is needed, but this helper doesn't know how, then you |
2547 | shouldn't be using this helper but you should write your own. */ | 2630 | shouldn't be using this helper but you should write your own. */ |
2548 | WARN_ON(must_lock && !lock); | 2631 | WARN_ON(must_lock && !lock); |
2549 | 2632 | ||
2550 | if (must_lock && lock && mutex_lock_interruptible(lock)) | 2633 | if (must_lock && lock && mutex_lock_interruptible(lock)) |
2551 | return POLLERR; | 2634 | return POLLERR; |
2552 | 2635 | ||
2553 | fileio = q->fileio; | 2636 | fileio = q->fileio; |
2554 | 2637 | ||
2555 | res = vb2_poll(vdev->queue, file, wait); | 2638 | res = vb2_poll(vdev->queue, file, wait); |
2556 | 2639 | ||
2557 | /* If fileio was started, then we have a new queue owner. */ | 2640 | /* If fileio was started, then we have a new queue owner. */ |
2558 | if (must_lock && !fileio && q->fileio) | 2641 | if (must_lock && !fileio && q->fileio) |
2559 | q->owner = file->private_data; | 2642 | q->owner = file->private_data; |
2560 | if (must_lock && lock) | 2643 | if (must_lock && lock) |
2561 | mutex_unlock(lock); | 2644 | mutex_unlock(lock); |
2562 | return res; | 2645 | return res; |
2563 | } | 2646 | } |
2564 | EXPORT_SYMBOL_GPL(vb2_fop_poll); | 2647 | EXPORT_SYMBOL_GPL(vb2_fop_poll); |
2565 | 2648 | ||
2566 | #ifndef CONFIG_MMU | 2649 | #ifndef CONFIG_MMU |
2567 | unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr, | 2650 | unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr, |
2568 | unsigned long len, unsigned long pgoff, unsigned long flags) | 2651 | unsigned long len, unsigned long pgoff, unsigned long flags) |
2569 | { | 2652 | { |
2570 | struct video_device *vdev = video_devdata(file); | 2653 | struct video_device *vdev = video_devdata(file); |
2571 | 2654 | ||
2572 | return vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags); | 2655 | return vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags); |
2573 | } | 2656 | } |
2574 | EXPORT_SYMBOL_GPL(vb2_fop_get_unmapped_area); | 2657 | EXPORT_SYMBOL_GPL(vb2_fop_get_unmapped_area); |
2575 | #endif | 2658 | #endif |
2576 | 2659 | ||
2577 | /* vb2_ops helpers. Only use if vq->lock is non-NULL. */ | 2660 | /* vb2_ops helpers. Only use if vq->lock is non-NULL. */ |
2578 | 2661 | ||
2579 | void vb2_ops_wait_prepare(struct vb2_queue *vq) | 2662 | void vb2_ops_wait_prepare(struct vb2_queue *vq) |
2580 | { | 2663 | { |
2581 | mutex_unlock(vq->lock); | 2664 | mutex_unlock(vq->lock); |
2582 | } | 2665 | } |
2583 | EXPORT_SYMBOL_GPL(vb2_ops_wait_prepare); | 2666 | EXPORT_SYMBOL_GPL(vb2_ops_wait_prepare); |
2584 | 2667 | ||
2585 | void vb2_ops_wait_finish(struct vb2_queue *vq) | 2668 | void vb2_ops_wait_finish(struct vb2_queue *vq) |
2586 | { | 2669 | { |
2587 | mutex_lock(vq->lock); | 2670 | mutex_lock(vq->lock); |
2588 | } | 2671 | } |
2589 | EXPORT_SYMBOL_GPL(vb2_ops_wait_finish); | 2672 | EXPORT_SYMBOL_GPL(vb2_ops_wait_finish); |
2590 | 2673 | ||
2591 | MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2"); | 2674 | MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2"); |
2592 | MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski"); | 2675 | MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski"); |
2593 | MODULE_LICENSE("GPL"); | 2676 | MODULE_LICENSE("GPL"); |
2594 | 2677 |
include/media/v4l2-mem2mem.h
1 | /* | 1 | /* |
2 | * Memory-to-memory device framework for Video for Linux 2. | 2 | * Memory-to-memory device framework for Video for Linux 2. |
3 | * | 3 | * |
4 | * Helper functions for devices that use memory buffers for both source | 4 | * Helper functions for devices that use memory buffers for both source |
5 | * and destination. | 5 | * and destination. |
6 | * | 6 | * |
7 | * Copyright (c) 2009 Samsung Electronics Co., Ltd. | 7 | * Copyright (c) 2009 Samsung Electronics Co., Ltd. |
8 | * Pawel Osciak, <pawel@osciak.com> | 8 | * Pawel Osciak, <pawel@osciak.com> |
9 | * Marek Szyprowski, <m.szyprowski@samsung.com> | 9 | * Marek Szyprowski, <m.szyprowski@samsung.com> |
10 | * | 10 | * |
11 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
12 | * it under the terms of the GNU General Public License as published by the | 12 | * it under the terms of the GNU General Public License as published by the |
13 | * Free Software Foundation; either version 2 of the | 13 | * Free Software Foundation; either version 2 of the |
14 | * License, or (at your option) any later version | 14 | * License, or (at your option) any later version |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #ifndef _MEDIA_V4L2_MEM2MEM_H | 17 | #ifndef _MEDIA_V4L2_MEM2MEM_H |
18 | #define _MEDIA_V4L2_MEM2MEM_H | 18 | #define _MEDIA_V4L2_MEM2MEM_H |
19 | 19 | ||
20 | #include <media/videobuf2-core.h> | 20 | #include <media/videobuf2-core.h> |
21 | 21 | ||
22 | /** | 22 | /** |
23 | * struct v4l2_m2m_ops - mem-to-mem device driver callbacks | 23 | * struct v4l2_m2m_ops - mem-to-mem device driver callbacks |
24 | * @device_run: required. Begin the actual job (transaction) inside this | 24 | * @device_run: required. Begin the actual job (transaction) inside this |
25 | * callback. | 25 | * callback. |
26 | * The job does NOT have to end before this callback returns | 26 | * The job does NOT have to end before this callback returns |
27 | * (and it will be the usual case). When the job finishes, | 27 | * (and it will be the usual case). When the job finishes, |
28 | * v4l2_m2m_job_finish() has to be called. | 28 | * v4l2_m2m_job_finish() has to be called. |
29 | * @job_ready: optional. Should return 0 if the driver does not have a job | 29 | * @job_ready: optional. Should return 0 if the driver does not have a job |
30 | * fully prepared to run yet (i.e. it will not be able to finish a | 30 | * fully prepared to run yet (i.e. it will not be able to finish a |
31 | * transaction without sleeping). If not provided, it will be | 31 | * transaction without sleeping). If not provided, it will be |
32 | * assumed that one source and one destination buffer are all | 32 | * assumed that one source and one destination buffer are all |
33 | * that is required for the driver to perform one full transaction. | 33 | * that is required for the driver to perform one full transaction. |
34 | * This method may not sleep. | 34 | * This method may not sleep. |
35 | * @job_abort: required. Informs the driver that it has to abort the currently | 35 | * @job_abort: required. Informs the driver that it has to abort the currently |
36 | * running transaction as soon as possible (i.e. as soon as it can | 36 | * running transaction as soon as possible (i.e. as soon as it can |
37 | * stop the device safely; e.g. in the next interrupt handler), | 37 | * stop the device safely; e.g. in the next interrupt handler), |
38 | * even if the transaction would not have been finished by then. | 38 | * even if the transaction would not have been finished by then. |
39 | * After the driver performs the necessary steps, it has to call | 39 | * After the driver performs the necessary steps, it has to call |
40 | * v4l2_m2m_job_finish() (as if the transaction ended normally). | 40 | * v4l2_m2m_job_finish() (as if the transaction ended normally). |
41 | * This function does not have to (and will usually not) wait | 41 | * This function does not have to (and will usually not) wait |
42 | * until the device enters a state when it can be stopped. | 42 | * until the device enters a state when it can be stopped. |
43 | */ | 43 | */ |
44 | struct v4l2_m2m_ops { | 44 | struct v4l2_m2m_ops { |
45 | void (*device_run)(void *priv); | 45 | void (*device_run)(void *priv); |
46 | int (*job_ready)(void *priv); | 46 | int (*job_ready)(void *priv); |
47 | void (*job_abort)(void *priv); | 47 | void (*job_abort)(void *priv); |
48 | void (*lock)(void *priv); | 48 | void (*lock)(void *priv); |
49 | void (*unlock)(void *priv); | 49 | void (*unlock)(void *priv); |
50 | }; | 50 | }; |
51 | 51 | ||
52 | struct v4l2_m2m_dev; | 52 | struct v4l2_m2m_dev; |
53 | 53 | ||
54 | struct v4l2_m2m_queue_ctx { | 54 | struct v4l2_m2m_queue_ctx { |
55 | /* private: internal use only */ | 55 | /* private: internal use only */ |
56 | struct vb2_queue q; | 56 | struct vb2_queue q; |
57 | 57 | ||
58 | /* Queue for buffers ready to be processed as soon as this | 58 | /* Queue for buffers ready to be processed as soon as this |
59 | * instance receives access to the device */ | 59 | * instance receives access to the device */ |
60 | struct list_head rdy_queue; | 60 | struct list_head rdy_queue; |
61 | spinlock_t rdy_spinlock; | 61 | spinlock_t rdy_spinlock; |
62 | u8 num_rdy; | 62 | u8 num_rdy; |
63 | }; | 63 | }; |
64 | 64 | ||
65 | struct v4l2_m2m_ctx { | 65 | struct v4l2_m2m_ctx { |
66 | /* private: internal use only */ | 66 | /* private: internal use only */ |
67 | struct v4l2_m2m_dev *m2m_dev; | 67 | struct v4l2_m2m_dev *m2m_dev; |
68 | 68 | ||
69 | /* Capture (output to memory) queue context */ | 69 | /* Capture (output to memory) queue context */ |
70 | struct v4l2_m2m_queue_ctx cap_q_ctx; | 70 | struct v4l2_m2m_queue_ctx cap_q_ctx; |
71 | 71 | ||
72 | /* Output (input from memory) queue context */ | 72 | /* Output (input from memory) queue context */ |
73 | struct v4l2_m2m_queue_ctx out_q_ctx; | 73 | struct v4l2_m2m_queue_ctx out_q_ctx; |
74 | 74 | ||
75 | /* For device job queue */ | 75 | /* For device job queue */ |
76 | struct list_head queue; | 76 | struct list_head queue; |
77 | unsigned long job_flags; | 77 | unsigned long job_flags; |
78 | wait_queue_head_t finished; | 78 | wait_queue_head_t finished; |
79 | 79 | ||
80 | /* Instance private data */ | 80 | /* Instance private data */ |
81 | void *priv; | 81 | void *priv; |
82 | }; | 82 | }; |
83 | 83 | ||
84 | struct v4l2_m2m_buffer { | 84 | struct v4l2_m2m_buffer { |
85 | struct vb2_buffer vb; | 85 | struct vb2_buffer vb; |
86 | struct list_head list; | 86 | struct list_head list; |
87 | }; | 87 | }; |
88 | 88 | ||
89 | void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev); | 89 | void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev); |
90 | 90 | ||
91 | struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx, | 91 | struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx, |
92 | enum v4l2_buf_type type); | 92 | enum v4l2_buf_type type); |
93 | 93 | ||
94 | void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, | 94 | void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, |
95 | struct v4l2_m2m_ctx *m2m_ctx); | 95 | struct v4l2_m2m_ctx *m2m_ctx); |
96 | 96 | ||
97 | static inline void | 97 | static inline void |
98 | v4l2_m2m_buf_done(struct vb2_buffer *buf, enum vb2_buffer_state state) | 98 | v4l2_m2m_buf_done(struct vb2_buffer *buf, enum vb2_buffer_state state) |
99 | { | 99 | { |
100 | vb2_buffer_done(buf, state); | 100 | vb2_buffer_done(buf, state); |
101 | } | 101 | } |
102 | 102 | ||
103 | int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 103 | int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
104 | struct v4l2_requestbuffers *reqbufs); | 104 | struct v4l2_requestbuffers *reqbufs); |
105 | 105 | ||
106 | int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 106 | int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
107 | struct v4l2_buffer *buf); | 107 | struct v4l2_buffer *buf); |
108 | 108 | ||
109 | int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 109 | int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
110 | struct v4l2_buffer *buf); | 110 | struct v4l2_buffer *buf); |
111 | int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 111 | int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
112 | struct v4l2_buffer *buf); | 112 | struct v4l2_buffer *buf); |
113 | 113 | ||
114 | int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | ||
115 | struct v4l2_exportbuffer *eb); | ||
116 | |||
114 | int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 117 | int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
115 | enum v4l2_buf_type type); | 118 | enum v4l2_buf_type type); |
116 | int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 119 | int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
117 | enum v4l2_buf_type type); | 120 | enum v4l2_buf_type type); |
118 | 121 | ||
119 | unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 122 | unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
120 | struct poll_table_struct *wait); | 123 | struct poll_table_struct *wait); |
121 | 124 | ||
122 | int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | 125 | int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
123 | struct vm_area_struct *vma); | 126 | struct vm_area_struct *vma); |
124 | 127 | ||
125 | struct v4l2_m2m_dev *v4l2_m2m_init(struct v4l2_m2m_ops *m2m_ops); | 128 | struct v4l2_m2m_dev *v4l2_m2m_init(struct v4l2_m2m_ops *m2m_ops); |
126 | void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev); | 129 | void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev); |
127 | 130 | ||
128 | struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev, | 131 | struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev, |
129 | void *drv_priv, | 132 | void *drv_priv, |
130 | int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)); | 133 | int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)); |
131 | 134 | ||
132 | void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx); | 135 | void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx); |
133 | 136 | ||
134 | void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb); | 137 | void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb); |
135 | 138 | ||
136 | /** | 139 | /** |
137 | * v4l2_m2m_num_src_bufs_ready() - return the number of source buffers ready for | 140 | * v4l2_m2m_num_src_bufs_ready() - return the number of source buffers ready for |
138 | * use | 141 | * use |
139 | */ | 142 | */ |
140 | static inline | 143 | static inline |
141 | unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx) | 144 | unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx) |
142 | { | 145 | { |
143 | return m2m_ctx->out_q_ctx.num_rdy; | 146 | return m2m_ctx->out_q_ctx.num_rdy; |
144 | } | 147 | } |
145 | 148 | ||
146 | /** | 149 | /** |
147 | * v4l2_m2m_num_src_bufs_ready() - return the number of destination buffers | 150 | * v4l2_m2m_num_src_bufs_ready() - return the number of destination buffers |
148 | * ready for use | 151 | * ready for use |
149 | */ | 152 | */ |
150 | static inline | 153 | static inline |
151 | unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx) | 154 | unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx) |
152 | { | 155 | { |
153 | return m2m_ctx->cap_q_ctx.num_rdy; | 156 | return m2m_ctx->cap_q_ctx.num_rdy; |
154 | } | 157 | } |
155 | 158 | ||
156 | void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx); | 159 | void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx); |
157 | 160 | ||
158 | /** | 161 | /** |
159 | * v4l2_m2m_next_src_buf() - return next source buffer from the list of ready | 162 | * v4l2_m2m_next_src_buf() - return next source buffer from the list of ready |
160 | * buffers | 163 | * buffers |
161 | */ | 164 | */ |
162 | static inline void *v4l2_m2m_next_src_buf(struct v4l2_m2m_ctx *m2m_ctx) | 165 | static inline void *v4l2_m2m_next_src_buf(struct v4l2_m2m_ctx *m2m_ctx) |
163 | { | 166 | { |
164 | return v4l2_m2m_next_buf(&m2m_ctx->out_q_ctx); | 167 | return v4l2_m2m_next_buf(&m2m_ctx->out_q_ctx); |
165 | } | 168 | } |
166 | 169 | ||
167 | /** | 170 | /** |
168 | * v4l2_m2m_next_dst_buf() - return next destination buffer from the list of | 171 | * v4l2_m2m_next_dst_buf() - return next destination buffer from the list of |
169 | * ready buffers | 172 | * ready buffers |
170 | */ | 173 | */ |
171 | static inline void *v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx) | 174 | static inline void *v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx) |
172 | { | 175 | { |
173 | return v4l2_m2m_next_buf(&m2m_ctx->cap_q_ctx); | 176 | return v4l2_m2m_next_buf(&m2m_ctx->cap_q_ctx); |
174 | } | 177 | } |
175 | 178 | ||
176 | /** | 179 | /** |
177 | * v4l2_m2m_get_src_vq() - return vb2_queue for source buffers | 180 | * v4l2_m2m_get_src_vq() - return vb2_queue for source buffers |
178 | */ | 181 | */ |
179 | static inline | 182 | static inline |
180 | struct vb2_queue *v4l2_m2m_get_src_vq(struct v4l2_m2m_ctx *m2m_ctx) | 183 | struct vb2_queue *v4l2_m2m_get_src_vq(struct v4l2_m2m_ctx *m2m_ctx) |
181 | { | 184 | { |
182 | return &m2m_ctx->out_q_ctx.q; | 185 | return &m2m_ctx->out_q_ctx.q; |
183 | } | 186 | } |
184 | 187 | ||
185 | /** | 188 | /** |
186 | * v4l2_m2m_get_dst_vq() - return vb2_queue for destination buffers | 189 | * v4l2_m2m_get_dst_vq() - return vb2_queue for destination buffers |
187 | */ | 190 | */ |
188 | static inline | 191 | static inline |
189 | struct vb2_queue *v4l2_m2m_get_dst_vq(struct v4l2_m2m_ctx *m2m_ctx) | 192 | struct vb2_queue *v4l2_m2m_get_dst_vq(struct v4l2_m2m_ctx *m2m_ctx) |
190 | { | 193 | { |
191 | return &m2m_ctx->cap_q_ctx.q; | 194 | return &m2m_ctx->cap_q_ctx.q; |
192 | } | 195 | } |
193 | 196 | ||
194 | void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx); | 197 | void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx); |
195 | 198 | ||
196 | /** | 199 | /** |
197 | * v4l2_m2m_src_buf_remove() - take off a source buffer from the list of ready | 200 | * v4l2_m2m_src_buf_remove() - take off a source buffer from the list of ready |
198 | * buffers and return it | 201 | * buffers and return it |
199 | */ | 202 | */ |
200 | static inline void *v4l2_m2m_src_buf_remove(struct v4l2_m2m_ctx *m2m_ctx) | 203 | static inline void *v4l2_m2m_src_buf_remove(struct v4l2_m2m_ctx *m2m_ctx) |
201 | { | 204 | { |
202 | return v4l2_m2m_buf_remove(&m2m_ctx->out_q_ctx); | 205 | return v4l2_m2m_buf_remove(&m2m_ctx->out_q_ctx); |
203 | } | 206 | } |
204 | 207 | ||
205 | /** | 208 | /** |
206 | * v4l2_m2m_dst_buf_remove() - take off a destination buffer from the list of | 209 | * v4l2_m2m_dst_buf_remove() - take off a destination buffer from the list of |
207 | * ready buffers and return it | 210 | * ready buffers and return it |
208 | */ | 211 | */ |
209 | static inline void *v4l2_m2m_dst_buf_remove(struct v4l2_m2m_ctx *m2m_ctx) | 212 | static inline void *v4l2_m2m_dst_buf_remove(struct v4l2_m2m_ctx *m2m_ctx) |
210 | { | 213 | { |
211 | return v4l2_m2m_buf_remove(&m2m_ctx->cap_q_ctx); | 214 | return v4l2_m2m_buf_remove(&m2m_ctx->cap_q_ctx); |
212 | } | 215 | } |
213 | 216 | ||
214 | #endif /* _MEDIA_V4L2_MEM2MEM_H */ | 217 | #endif /* _MEDIA_V4L2_MEM2MEM_H */ |
215 | 218 | ||
216 | 219 |
include/media/videobuf2-core.h
1 | /* | 1 | /* |
2 | * videobuf2-core.h - V4L2 driver helper framework | 2 | * videobuf2-core.h - V4L2 driver helper framework |
3 | * | 3 | * |
4 | * Copyright (C) 2010 Samsung Electronics | 4 | * Copyright (C) 2010 Samsung Electronics |
5 | * | 5 | * |
6 | * Author: Pawel Osciak <pawel@osciak.com> | 6 | * Author: Pawel Osciak <pawel@osciak.com> |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License as published by | 9 | * it under the terms of the GNU General Public License as published by |
10 | * the Free Software Foundation. | 10 | * the Free Software Foundation. |
11 | */ | 11 | */ |
12 | #ifndef _MEDIA_VIDEOBUF2_CORE_H | 12 | #ifndef _MEDIA_VIDEOBUF2_CORE_H |
13 | #define _MEDIA_VIDEOBUF2_CORE_H | 13 | #define _MEDIA_VIDEOBUF2_CORE_H |
14 | 14 | ||
15 | #include <linux/mm_types.h> | 15 | #include <linux/mm_types.h> |
16 | #include <linux/mutex.h> | 16 | #include <linux/mutex.h> |
17 | #include <linux/poll.h> | 17 | #include <linux/poll.h> |
18 | #include <linux/videodev2.h> | 18 | #include <linux/videodev2.h> |
19 | #include <linux/dma-buf.h> | 19 | #include <linux/dma-buf.h> |
20 | 20 | ||
21 | struct vb2_alloc_ctx; | 21 | struct vb2_alloc_ctx; |
22 | struct vb2_fileio_data; | 22 | struct vb2_fileio_data; |
23 | 23 | ||
24 | /** | 24 | /** |
25 | * struct vb2_mem_ops - memory handling/memory allocator operations | 25 | * struct vb2_mem_ops - memory handling/memory allocator operations |
26 | * @alloc: allocate video memory and, optionally, allocator private data, | 26 | * @alloc: allocate video memory and, optionally, allocator private data, |
27 | * return NULL on failure or a pointer to allocator private, | 27 | * return NULL on failure or a pointer to allocator private, |
28 | * per-buffer data on success; the returned private structure | 28 | * per-buffer data on success; the returned private structure |
29 | * will then be passed as buf_priv argument to other ops in this | 29 | * will then be passed as buf_priv argument to other ops in this |
30 | * structure | 30 | * structure |
31 | * @put: inform the allocator that the buffer will no longer be used; | 31 | * @put: inform the allocator that the buffer will no longer be used; |
32 | * usually will result in the allocator freeing the buffer (if | 32 | * usually will result in the allocator freeing the buffer (if |
33 | * no other users of this buffer are present); the buf_priv | 33 | * no other users of this buffer are present); the buf_priv |
34 | * argument is the allocator private per-buffer structure | 34 | * argument is the allocator private per-buffer structure |
35 | * previously returned from the alloc callback | 35 | * previously returned from the alloc callback |
36 | * @get_userptr: acquire userspace memory for a hardware operation; used for | 36 | * @get_userptr: acquire userspace memory for a hardware operation; used for |
37 | * USERPTR memory types; vaddr is the address passed to the | 37 | * USERPTR memory types; vaddr is the address passed to the |
38 | * videobuf layer when queuing a video buffer of USERPTR type; | 38 | * videobuf layer when queuing a video buffer of USERPTR type; |
39 | * should return an allocator private per-buffer structure | 39 | * should return an allocator private per-buffer structure |
40 | * associated with the buffer on success, NULL on failure; | 40 | * associated with the buffer on success, NULL on failure; |
41 | * the returned private structure will then be passed as buf_priv | 41 | * the returned private structure will then be passed as buf_priv |
42 | * argument to other ops in this structure | 42 | * argument to other ops in this structure |
43 | * @put_userptr: inform the allocator that a USERPTR buffer will no longer | 43 | * @put_userptr: inform the allocator that a USERPTR buffer will no longer |
44 | * be used | 44 | * be used |
45 | * @attach_dmabuf: attach a shared struct dma_buf for a hardware operation; | 45 | * @attach_dmabuf: attach a shared struct dma_buf for a hardware operation; |
46 | * used for DMABUF memory types; alloc_ctx is the alloc context | 46 | * used for DMABUF memory types; alloc_ctx is the alloc context |
47 | * dbuf is the shared dma_buf; returns NULL on failure; | 47 | * dbuf is the shared dma_buf; returns NULL on failure; |
48 | * allocator private per-buffer structure on success; | 48 | * allocator private per-buffer structure on success; |
49 | * this needs to be used for further accesses to the buffer | 49 | * this needs to be used for further accesses to the buffer |
50 | * @detach_dmabuf: inform the exporter of the buffer that the current DMABUF | 50 | * @detach_dmabuf: inform the exporter of the buffer that the current DMABUF |
51 | * buffer is no longer used; the buf_priv argument is the | 51 | * buffer is no longer used; the buf_priv argument is the |
52 | * allocator private per-buffer structure previously returned | 52 | * allocator private per-buffer structure previously returned |
53 | * from the attach_dmabuf callback | 53 | * from the attach_dmabuf callback |
54 | * @map_dmabuf: request for access to the dmabuf from allocator; the allocator | 54 | * @map_dmabuf: request for access to the dmabuf from allocator; the allocator |
55 | * of dmabuf is informed that this driver is going to use the | 55 | * of dmabuf is informed that this driver is going to use the |
56 | * dmabuf | 56 | * dmabuf |
57 | * @unmap_dmabuf: releases access control to the dmabuf - allocator is notified | 57 | * @unmap_dmabuf: releases access control to the dmabuf - allocator is notified |
58 | * that this driver is done using the dmabuf for now | 58 | * that this driver is done using the dmabuf for now |
59 | * @prepare: called every time the buffer is passed from userspace to the | 59 | * @prepare: called every time the buffer is passed from userspace to the |
60 | * driver, useful for cache synchronisation, optional | 60 | * driver, useful for cache synchronisation, optional |
61 | * @finish: called every time the buffer is passed back from the driver | 61 | * @finish: called every time the buffer is passed back from the driver |
62 | * to the userspace, also optional | 62 | * to the userspace, also optional |
63 | * @vaddr: return a kernel virtual address to a given memory buffer | 63 | * @vaddr: return a kernel virtual address to a given memory buffer |
64 | * associated with the passed private structure or NULL if no | 64 | * associated with the passed private structure or NULL if no |
65 | * such mapping exists | 65 | * such mapping exists |
66 | * @cookie: return allocator specific cookie for a given memory buffer | 66 | * @cookie: return allocator specific cookie for a given memory buffer |
67 | * associated with the passed private structure or NULL if not | 67 | * associated with the passed private structure or NULL if not |
68 | * available | 68 | * available |
69 | * @num_users: return the current number of users of a memory buffer; | 69 | * @num_users: return the current number of users of a memory buffer; |
70 | * return 1 if the videobuf layer (or actually the driver using | 70 | * return 1 if the videobuf layer (or actually the driver using |
71 | * it) is the only user | 71 | * it) is the only user |
72 | * @mmap: setup a userspace mapping for a given memory buffer under | 72 | * @mmap: setup a userspace mapping for a given memory buffer under |
73 | * the provided virtual memory region | 73 | * the provided virtual memory region |
74 | * | 74 | * |
75 | * Required ops for USERPTR types: get_userptr, put_userptr. | 75 | * Required ops for USERPTR types: get_userptr, put_userptr. |
76 | * Required ops for MMAP types: alloc, put, num_users, mmap. | 76 | * Required ops for MMAP types: alloc, put, num_users, mmap. |
77 | * Required ops for read/write access types: alloc, put, num_users, vaddr | 77 | * Required ops for read/write access types: alloc, put, num_users, vaddr |
78 | * Required ops for DMABUF types: attach_dmabuf, detach_dmabuf, map_dmabuf, | 78 | * Required ops for DMABUF types: attach_dmabuf, detach_dmabuf, map_dmabuf, |
79 | * unmap_dmabuf. | 79 | * unmap_dmabuf. |
80 | */ | 80 | */ |
81 | struct vb2_mem_ops { | 81 | struct vb2_mem_ops { |
82 | void *(*alloc)(void *alloc_ctx, unsigned long size); | 82 | void *(*alloc)(void *alloc_ctx, unsigned long size); |
83 | void (*put)(void *buf_priv); | 83 | void (*put)(void *buf_priv); |
84 | struct dma_buf *(*get_dmabuf)(void *buf_priv); | ||
84 | 85 | ||
85 | void *(*get_userptr)(void *alloc_ctx, unsigned long vaddr, | 86 | void *(*get_userptr)(void *alloc_ctx, unsigned long vaddr, |
86 | unsigned long size, int write); | 87 | unsigned long size, int write); |
87 | void (*put_userptr)(void *buf_priv); | 88 | void (*put_userptr)(void *buf_priv); |
88 | 89 | ||
89 | void (*prepare)(void *buf_priv); | 90 | void (*prepare)(void *buf_priv); |
90 | void (*finish)(void *buf_priv); | 91 | void (*finish)(void *buf_priv); |
91 | 92 | ||
92 | void *(*attach_dmabuf)(void *alloc_ctx, struct dma_buf *dbuf, | 93 | void *(*attach_dmabuf)(void *alloc_ctx, struct dma_buf *dbuf, |
93 | unsigned long size, int write); | 94 | unsigned long size, int write); |
94 | void (*detach_dmabuf)(void *buf_priv); | 95 | void (*detach_dmabuf)(void *buf_priv); |
95 | int (*map_dmabuf)(void *buf_priv); | 96 | int (*map_dmabuf)(void *buf_priv); |
96 | void (*unmap_dmabuf)(void *buf_priv); | 97 | void (*unmap_dmabuf)(void *buf_priv); |
97 | 98 | ||
98 | void *(*vaddr)(void *buf_priv); | 99 | void *(*vaddr)(void *buf_priv); |
99 | void *(*cookie)(void *buf_priv); | 100 | void *(*cookie)(void *buf_priv); |
100 | 101 | ||
101 | unsigned int (*num_users)(void *buf_priv); | 102 | unsigned int (*num_users)(void *buf_priv); |
102 | 103 | ||
103 | int (*mmap)(void *buf_priv, struct vm_area_struct *vma); | 104 | int (*mmap)(void *buf_priv, struct vm_area_struct *vma); |
104 | }; | 105 | }; |
105 | 106 | ||
106 | struct vb2_plane { | 107 | struct vb2_plane { |
107 | void *mem_priv; | 108 | void *mem_priv; |
108 | struct dma_buf *dbuf; | 109 | struct dma_buf *dbuf; |
109 | unsigned int dbuf_mapped; | 110 | unsigned int dbuf_mapped; |
110 | }; | 111 | }; |
111 | 112 | ||
112 | /** | 113 | /** |
113 | * enum vb2_io_modes - queue access methods | 114 | * enum vb2_io_modes - queue access methods |
114 | * @VB2_MMAP: driver supports MMAP with streaming API | 115 | * @VB2_MMAP: driver supports MMAP with streaming API |
115 | * @VB2_USERPTR: driver supports USERPTR with streaming API | 116 | * @VB2_USERPTR: driver supports USERPTR with streaming API |
116 | * @VB2_READ: driver supports read() style access | 117 | * @VB2_READ: driver supports read() style access |
117 | * @VB2_WRITE: driver supports write() style access | 118 | * @VB2_WRITE: driver supports write() style access |
118 | * @VB2_DMABUF: driver supports DMABUF with streaming API | 119 | * @VB2_DMABUF: driver supports DMABUF with streaming API |
119 | */ | 120 | */ |
120 | enum vb2_io_modes { | 121 | enum vb2_io_modes { |
121 | VB2_MMAP = (1 << 0), | 122 | VB2_MMAP = (1 << 0), |
122 | VB2_USERPTR = (1 << 1), | 123 | VB2_USERPTR = (1 << 1), |
123 | VB2_READ = (1 << 2), | 124 | VB2_READ = (1 << 2), |
124 | VB2_WRITE = (1 << 3), | 125 | VB2_WRITE = (1 << 3), |
125 | VB2_DMABUF = (1 << 4), | 126 | VB2_DMABUF = (1 << 4), |
126 | }; | 127 | }; |
127 | 128 | ||
128 | /** | 129 | /** |
129 | * enum vb2_fileio_flags - flags for selecting a mode of the file io emulator, | 130 | * enum vb2_fileio_flags - flags for selecting a mode of the file io emulator, |
130 | * by default the 'streaming' style is used by the file io emulator | 131 | * by default the 'streaming' style is used by the file io emulator |
131 | * @VB2_FILEIO_READ_ONCE: report EOF after reading the first buffer | 132 | * @VB2_FILEIO_READ_ONCE: report EOF after reading the first buffer |
132 | * @VB2_FILEIO_WRITE_IMMEDIATELY: queue buffer after each write() call | 133 | * @VB2_FILEIO_WRITE_IMMEDIATELY: queue buffer after each write() call |
133 | */ | 134 | */ |
134 | enum vb2_fileio_flags { | 135 | enum vb2_fileio_flags { |
135 | VB2_FILEIO_READ_ONCE = (1 << 0), | 136 | VB2_FILEIO_READ_ONCE = (1 << 0), |
136 | VB2_FILEIO_WRITE_IMMEDIATELY = (1 << 1), | 137 | VB2_FILEIO_WRITE_IMMEDIATELY = (1 << 1), |
137 | }; | 138 | }; |
138 | 139 | ||
139 | /** | 140 | /** |
140 | * enum vb2_buffer_state - current video buffer state | 141 | * enum vb2_buffer_state - current video buffer state |
141 | * @VB2_BUF_STATE_DEQUEUED: buffer under userspace control | 142 | * @VB2_BUF_STATE_DEQUEUED: buffer under userspace control |
142 | * @VB2_BUF_STATE_PREPARED: buffer prepared in videobuf and by the driver | 143 | * @VB2_BUF_STATE_PREPARED: buffer prepared in videobuf and by the driver |
143 | * @VB2_BUF_STATE_QUEUED: buffer queued in videobuf, but not in driver | 144 | * @VB2_BUF_STATE_QUEUED: buffer queued in videobuf, but not in driver |
144 | * @VB2_BUF_STATE_ACTIVE: buffer queued in driver and possibly used | 145 | * @VB2_BUF_STATE_ACTIVE: buffer queued in driver and possibly used |
145 | * in a hardware operation | 146 | * in a hardware operation |
146 | * @VB2_BUF_STATE_DONE: buffer returned from driver to videobuf, but | 147 | * @VB2_BUF_STATE_DONE: buffer returned from driver to videobuf, but |
147 | * not yet dequeued to userspace | 148 | * not yet dequeued to userspace |
148 | * @VB2_BUF_STATE_ERROR: same as above, but the operation on the buffer | 149 | * @VB2_BUF_STATE_ERROR: same as above, but the operation on the buffer |
149 | * has ended with an error, which will be reported | 150 | * has ended with an error, which will be reported |
150 | * to the userspace when it is dequeued | 151 | * to the userspace when it is dequeued |
151 | */ | 152 | */ |
152 | enum vb2_buffer_state { | 153 | enum vb2_buffer_state { |
153 | VB2_BUF_STATE_DEQUEUED, | 154 | VB2_BUF_STATE_DEQUEUED, |
154 | VB2_BUF_STATE_PREPARED, | 155 | VB2_BUF_STATE_PREPARED, |
155 | VB2_BUF_STATE_QUEUED, | 156 | VB2_BUF_STATE_QUEUED, |
156 | VB2_BUF_STATE_ACTIVE, | 157 | VB2_BUF_STATE_ACTIVE, |
157 | VB2_BUF_STATE_DONE, | 158 | VB2_BUF_STATE_DONE, |
158 | VB2_BUF_STATE_ERROR, | 159 | VB2_BUF_STATE_ERROR, |
159 | }; | 160 | }; |
160 | 161 | ||
161 | struct vb2_queue; | 162 | struct vb2_queue; |
162 | 163 | ||
163 | /** | 164 | /** |
164 | * struct vb2_buffer - represents a video buffer | 165 | * struct vb2_buffer - represents a video buffer |
165 | * @v4l2_buf: struct v4l2_buffer associated with this buffer; can | 166 | * @v4l2_buf: struct v4l2_buffer associated with this buffer; can |
166 | * be read by the driver and relevant entries can be | 167 | * be read by the driver and relevant entries can be |
167 | * changed by the driver in case of CAPTURE types | 168 | * changed by the driver in case of CAPTURE types |
168 | * (such as timestamp) | 169 | * (such as timestamp) |
169 | * @v4l2_planes: struct v4l2_planes associated with this buffer; can | 170 | * @v4l2_planes: struct v4l2_planes associated with this buffer; can |
170 | * be read by the driver and relevant entries can be | 171 | * be read by the driver and relevant entries can be |
171 | * changed by the driver in case of CAPTURE types | 172 | * changed by the driver in case of CAPTURE types |
172 | * (such as bytesused); NOTE that even for single-planar | 173 | * (such as bytesused); NOTE that even for single-planar |
173 | * types, the v4l2_planes[0] struct should be used | 174 | * types, the v4l2_planes[0] struct should be used |
174 | * instead of v4l2_buf for filling bytesused - drivers | 175 | * instead of v4l2_buf for filling bytesused - drivers |
175 | * should use the vb2_set_plane_payload() function for that | 176 | * should use the vb2_set_plane_payload() function for that |
176 | * @vb2_queue: the queue to which this driver belongs | 177 | * @vb2_queue: the queue to which this driver belongs |
177 | * @num_planes: number of planes in the buffer | 178 | * @num_planes: number of planes in the buffer |
178 | * on an internal driver queue | 179 | * on an internal driver queue |
179 | * @state: current buffer state; do not change | 180 | * @state: current buffer state; do not change |
180 | * @queued_entry: entry on the queued buffers list, which holds all | 181 | * @queued_entry: entry on the queued buffers list, which holds all |
181 | * buffers queued from userspace | 182 | * buffers queued from userspace |
182 | * @done_entry: entry on the list that stores all buffers ready to | 183 | * @done_entry: entry on the list that stores all buffers ready to |
183 | * be dequeued to userspace | 184 | * be dequeued to userspace |
184 | * @planes: private per-plane information; do not change | 185 | * @planes: private per-plane information; do not change |
185 | */ | 186 | */ |
186 | struct vb2_buffer { | 187 | struct vb2_buffer { |
187 | struct v4l2_buffer v4l2_buf; | 188 | struct v4l2_buffer v4l2_buf; |
188 | struct v4l2_plane v4l2_planes[VIDEO_MAX_PLANES]; | 189 | struct v4l2_plane v4l2_planes[VIDEO_MAX_PLANES]; |
189 | 190 | ||
190 | struct vb2_queue *vb2_queue; | 191 | struct vb2_queue *vb2_queue; |
191 | 192 | ||
192 | unsigned int num_planes; | 193 | unsigned int num_planes; |
193 | 194 | ||
194 | /* Private: internal use only */ | 195 | /* Private: internal use only */ |
195 | enum vb2_buffer_state state; | 196 | enum vb2_buffer_state state; |
196 | 197 | ||
197 | struct list_head queued_entry; | 198 | struct list_head queued_entry; |
198 | struct list_head done_entry; | 199 | struct list_head done_entry; |
199 | 200 | ||
200 | struct vb2_plane planes[VIDEO_MAX_PLANES]; | 201 | struct vb2_plane planes[VIDEO_MAX_PLANES]; |
201 | }; | 202 | }; |
202 | 203 | ||
203 | /** | 204 | /** |
204 | * struct vb2_ops - driver-specific callbacks | 205 | * struct vb2_ops - driver-specific callbacks |
205 | * | 206 | * |
206 | * @queue_setup: called from VIDIOC_REQBUFS and VIDIOC_CREATE_BUFS | 207 | * @queue_setup: called from VIDIOC_REQBUFS and VIDIOC_CREATE_BUFS |
207 | * handlers before memory allocation, or, if | 208 | * handlers before memory allocation, or, if |
208 | * *num_planes != 0, after the allocation to verify a | 209 | * *num_planes != 0, after the allocation to verify a |
209 | * smaller number of buffers. Driver should return | 210 | * smaller number of buffers. Driver should return |
210 | * the required number of buffers in *num_buffers, the | 211 | * the required number of buffers in *num_buffers, the |
211 | * required number of planes per buffer in *num_planes; the | 212 | * required number of planes per buffer in *num_planes; the |
212 | * size of each plane should be set in the sizes[] array | 213 | * size of each plane should be set in the sizes[] array |
213 | * and optional per-plane allocator specific context in the | 214 | * and optional per-plane allocator specific context in the |
214 | * alloc_ctxs[] array. When called from VIDIOC_REQBUFS, | 215 | * alloc_ctxs[] array. When called from VIDIOC_REQBUFS, |
215 | * fmt == NULL, the driver has to use the currently | 216 | * fmt == NULL, the driver has to use the currently |
216 | * configured format and *num_buffers is the total number | 217 | * configured format and *num_buffers is the total number |
217 | * of buffers, that are being allocated. When called from | 218 | * of buffers, that are being allocated. When called from |
218 | * VIDIOC_CREATE_BUFS, fmt != NULL and it describes the | 219 | * VIDIOC_CREATE_BUFS, fmt != NULL and it describes the |
219 | * target frame format. In this case *num_buffers are being | 220 | * target frame format. In this case *num_buffers are being |
220 | * allocated additionally to q->num_buffers. | 221 | * allocated additionally to q->num_buffers. |
221 | * @wait_prepare: release any locks taken while calling vb2 functions; | 222 | * @wait_prepare: release any locks taken while calling vb2 functions; |
222 | * it is called before an ioctl needs to wait for a new | 223 | * it is called before an ioctl needs to wait for a new |
223 | * buffer to arrive; required to avoid a deadlock in | 224 | * buffer to arrive; required to avoid a deadlock in |
224 | * blocking access type | 225 | * blocking access type |
225 | * @wait_finish: reacquire all locks released in the previous callback; | 226 | * @wait_finish: reacquire all locks released in the previous callback; |
226 | * required to continue operation after sleeping while | 227 | * required to continue operation after sleeping while |
227 | * waiting for a new buffer to arrive | 228 | * waiting for a new buffer to arrive |
228 | * @buf_init: called once after allocating a buffer (in MMAP case) | 229 | * @buf_init: called once after allocating a buffer (in MMAP case) |
229 | * or after acquiring a new USERPTR buffer; drivers may | 230 | * or after acquiring a new USERPTR buffer; drivers may |
230 | * perform additional buffer-related initialization; | 231 | * perform additional buffer-related initialization; |
231 | * initialization failure (return != 0) will prevent | 232 | * initialization failure (return != 0) will prevent |
232 | * queue setup from completing successfully; optional | 233 | * queue setup from completing successfully; optional |
233 | * @buf_prepare: called every time the buffer is queued from userspace | 234 | * @buf_prepare: called every time the buffer is queued from userspace |
234 | * and from the VIDIOC_PREPARE_BUF ioctl; drivers may | 235 | * and from the VIDIOC_PREPARE_BUF ioctl; drivers may |
235 | * perform any initialization required before each hardware | 236 | * perform any initialization required before each hardware |
236 | * operation in this callback; if an error is returned, the | 237 | * operation in this callback; if an error is returned, the |
237 | * buffer will not be queued in driver; optional | 238 | * buffer will not be queued in driver; optional |
238 | * @buf_finish: called before every dequeue of the buffer back to | 239 | * @buf_finish: called before every dequeue of the buffer back to |
239 | * userspace; drivers may perform any operations required | 240 | * userspace; drivers may perform any operations required |
240 | * before userspace accesses the buffer; optional | 241 | * before userspace accesses the buffer; optional |
241 | * @buf_cleanup: called once before the buffer is freed; drivers may | 242 | * @buf_cleanup: called once before the buffer is freed; drivers may |
242 | * perform any additional cleanup; optional | 243 | * perform any additional cleanup; optional |
243 | * @start_streaming: called once to enter 'streaming' state; the driver may | 244 | * @start_streaming: called once to enter 'streaming' state; the driver may |
244 | * receive buffers with @buf_queue callback before | 245 | * receive buffers with @buf_queue callback before |
245 | * @start_streaming is called; the driver gets the number | 246 | * @start_streaming is called; the driver gets the number |
246 | * of already queued buffers in count parameter; driver | 247 | * of already queued buffers in count parameter; driver |
247 | * can return an error if hardware fails or not enough | 248 | * can return an error if hardware fails or not enough |
248 | * buffers has been queued, in such case all buffers that | 249 | * buffers has been queued, in such case all buffers that |
249 | * have been already given by the @buf_queue callback are | 250 | * have been already given by the @buf_queue callback are |
250 | * invalidated. | 251 | * invalidated. |
251 | * @stop_streaming: called when 'streaming' state must be disabled; driver | 252 | * @stop_streaming: called when 'streaming' state must be disabled; driver |
252 | * should stop any DMA transactions or wait until they | 253 | * should stop any DMA transactions or wait until they |
253 | * finish and give back all buffers it got from buf_queue() | 254 | * finish and give back all buffers it got from buf_queue() |
254 | * callback; may use vb2_wait_for_all_buffers() function | 255 | * callback; may use vb2_wait_for_all_buffers() function |
255 | * @buf_queue: passes buffer vb to the driver; driver may start | 256 | * @buf_queue: passes buffer vb to the driver; driver may start |
256 | * hardware operation on this buffer; driver should give | 257 | * hardware operation on this buffer; driver should give |
257 | * the buffer back by calling vb2_buffer_done() function; | 258 | * the buffer back by calling vb2_buffer_done() function; |
258 | * it is allways called after calling STREAMON ioctl; | 259 | * it is allways called after calling STREAMON ioctl; |
259 | * might be called before start_streaming callback if user | 260 | * might be called before start_streaming callback if user |
260 | * pre-queued buffers before calling STREAMON | 261 | * pre-queued buffers before calling STREAMON |
261 | */ | 262 | */ |
262 | struct vb2_ops { | 263 | struct vb2_ops { |
263 | int (*queue_setup)(struct vb2_queue *q, const struct v4l2_format *fmt, | 264 | int (*queue_setup)(struct vb2_queue *q, const struct v4l2_format *fmt, |
264 | unsigned int *num_buffers, unsigned int *num_planes, | 265 | unsigned int *num_buffers, unsigned int *num_planes, |
265 | unsigned int sizes[], void *alloc_ctxs[]); | 266 | unsigned int sizes[], void *alloc_ctxs[]); |
266 | 267 | ||
267 | void (*wait_prepare)(struct vb2_queue *q); | 268 | void (*wait_prepare)(struct vb2_queue *q); |
268 | void (*wait_finish)(struct vb2_queue *q); | 269 | void (*wait_finish)(struct vb2_queue *q); |
269 | 270 | ||
270 | int (*buf_init)(struct vb2_buffer *vb); | 271 | int (*buf_init)(struct vb2_buffer *vb); |
271 | int (*buf_prepare)(struct vb2_buffer *vb); | 272 | int (*buf_prepare)(struct vb2_buffer *vb); |
272 | int (*buf_finish)(struct vb2_buffer *vb); | 273 | int (*buf_finish)(struct vb2_buffer *vb); |
273 | void (*buf_cleanup)(struct vb2_buffer *vb); | 274 | void (*buf_cleanup)(struct vb2_buffer *vb); |
274 | 275 | ||
275 | int (*start_streaming)(struct vb2_queue *q, unsigned int count); | 276 | int (*start_streaming)(struct vb2_queue *q, unsigned int count); |
276 | int (*stop_streaming)(struct vb2_queue *q); | 277 | int (*stop_streaming)(struct vb2_queue *q); |
277 | 278 | ||
278 | void (*buf_queue)(struct vb2_buffer *vb); | 279 | void (*buf_queue)(struct vb2_buffer *vb); |
279 | }; | 280 | }; |
280 | 281 | ||
281 | struct v4l2_fh; | 282 | struct v4l2_fh; |
282 | 283 | ||
283 | /** | 284 | /** |
284 | * struct vb2_queue - a videobuf queue | 285 | * struct vb2_queue - a videobuf queue |
285 | * | 286 | * |
286 | * @type: queue type (see V4L2_BUF_TYPE_* in linux/videodev2.h | 287 | * @type: queue type (see V4L2_BUF_TYPE_* in linux/videodev2.h |
287 | * @io_modes: supported io methods (see vb2_io_modes enum) | 288 | * @io_modes: supported io methods (see vb2_io_modes enum) |
288 | * @io_flags: additional io flags (see vb2_fileio_flags enum) | 289 | * @io_flags: additional io flags (see vb2_fileio_flags enum) |
289 | * @lock: pointer to a mutex that protects the vb2_queue struct. The | 290 | * @lock: pointer to a mutex that protects the vb2_queue struct. The |
290 | * driver can set this to a mutex to let the v4l2 core serialize | 291 | * driver can set this to a mutex to let the v4l2 core serialize |
291 | * the queuing ioctls. If the driver wants to handle locking | 292 | * the queuing ioctls. If the driver wants to handle locking |
292 | * itself, then this should be set to NULL. This lock is not used | 293 | * itself, then this should be set to NULL. This lock is not used |
293 | * by the videobuf2 core API. | 294 | * by the videobuf2 core API. |
294 | * @owner: The filehandle that 'owns' the buffers, i.e. the filehandle | 295 | * @owner: The filehandle that 'owns' the buffers, i.e. the filehandle |
295 | * that called reqbufs, create_buffers or started fileio. | 296 | * that called reqbufs, create_buffers or started fileio. |
296 | * This field is not used by the videobuf2 core API, but it allows | 297 | * This field is not used by the videobuf2 core API, but it allows |
297 | * drivers to easily associate an owner filehandle with the queue. | 298 | * drivers to easily associate an owner filehandle with the queue. |
298 | * @ops: driver-specific callbacks | 299 | * @ops: driver-specific callbacks |
299 | * @mem_ops: memory allocator specific callbacks | 300 | * @mem_ops: memory allocator specific callbacks |
300 | * @drv_priv: driver private data | 301 | * @drv_priv: driver private data |
301 | * @buf_struct_size: size of the driver-specific buffer structure; | 302 | * @buf_struct_size: size of the driver-specific buffer structure; |
302 | * "0" indicates the driver doesn't want to use a custom buffer | 303 | * "0" indicates the driver doesn't want to use a custom buffer |
303 | * structure type, so sizeof(struct vb2_buffer) will is used | 304 | * structure type, so sizeof(struct vb2_buffer) will is used |
304 | * | 305 | * |
305 | * @memory: current memory type used | 306 | * @memory: current memory type used |
306 | * @bufs: videobuf buffer structures | 307 | * @bufs: videobuf buffer structures |
307 | * @num_buffers: number of allocated/used buffers | 308 | * @num_buffers: number of allocated/used buffers |
308 | * @queued_list: list of buffers currently queued from userspace | 309 | * @queued_list: list of buffers currently queued from userspace |
309 | * @queued_count: number of buffers owned by the driver | 310 | * @queued_count: number of buffers owned by the driver |
310 | * @done_list: list of buffers ready to be dequeued to userspace | 311 | * @done_list: list of buffers ready to be dequeued to userspace |
311 | * @done_lock: lock to protect done_list list | 312 | * @done_lock: lock to protect done_list list |
312 | * @done_wq: waitqueue for processes waiting for buffers ready to be dequeued | 313 | * @done_wq: waitqueue for processes waiting for buffers ready to be dequeued |
313 | * @alloc_ctx: memory type/allocator-specific contexts for each plane | 314 | * @alloc_ctx: memory type/allocator-specific contexts for each plane |
314 | * @streaming: current streaming state | 315 | * @streaming: current streaming state |
315 | * @fileio: file io emulator internal data, used only if emulator is active | 316 | * @fileio: file io emulator internal data, used only if emulator is active |
316 | */ | 317 | */ |
317 | struct vb2_queue { | 318 | struct vb2_queue { |
318 | enum v4l2_buf_type type; | 319 | enum v4l2_buf_type type; |
319 | unsigned int io_modes; | 320 | unsigned int io_modes; |
320 | unsigned int io_flags; | 321 | unsigned int io_flags; |
321 | struct mutex *lock; | 322 | struct mutex *lock; |
322 | struct v4l2_fh *owner; | 323 | struct v4l2_fh *owner; |
323 | 324 | ||
324 | const struct vb2_ops *ops; | 325 | const struct vb2_ops *ops; |
325 | const struct vb2_mem_ops *mem_ops; | 326 | const struct vb2_mem_ops *mem_ops; |
326 | void *drv_priv; | 327 | void *drv_priv; |
327 | unsigned int buf_struct_size; | 328 | unsigned int buf_struct_size; |
328 | 329 | ||
329 | /* private: internal use only */ | 330 | /* private: internal use only */ |
330 | enum v4l2_memory memory; | 331 | enum v4l2_memory memory; |
331 | struct vb2_buffer *bufs[VIDEO_MAX_FRAME]; | 332 | struct vb2_buffer *bufs[VIDEO_MAX_FRAME]; |
332 | unsigned int num_buffers; | 333 | unsigned int num_buffers; |
333 | 334 | ||
334 | struct list_head queued_list; | 335 | struct list_head queued_list; |
335 | 336 | ||
336 | atomic_t queued_count; | 337 | atomic_t queued_count; |
337 | struct list_head done_list; | 338 | struct list_head done_list; |
338 | spinlock_t done_lock; | 339 | spinlock_t done_lock; |
339 | wait_queue_head_t done_wq; | 340 | wait_queue_head_t done_wq; |
340 | 341 | ||
341 | void *alloc_ctx[VIDEO_MAX_PLANES]; | 342 | void *alloc_ctx[VIDEO_MAX_PLANES]; |
342 | unsigned int plane_sizes[VIDEO_MAX_PLANES]; | 343 | unsigned int plane_sizes[VIDEO_MAX_PLANES]; |
343 | 344 | ||
344 | unsigned int streaming:1; | 345 | unsigned int streaming:1; |
345 | 346 | ||
346 | struct vb2_fileio_data *fileio; | 347 | struct vb2_fileio_data *fileio; |
347 | }; | 348 | }; |
348 | 349 | ||
349 | void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no); | 350 | void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no); |
350 | void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no); | 351 | void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no); |
351 | 352 | ||
352 | void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state); | 353 | void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state); |
353 | int vb2_wait_for_all_buffers(struct vb2_queue *q); | 354 | int vb2_wait_for_all_buffers(struct vb2_queue *q); |
354 | 355 | ||
355 | int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b); | 356 | int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b); |
356 | int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req); | 357 | int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req); |
357 | 358 | ||
358 | int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create); | 359 | int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create); |
359 | int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b); | 360 | int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b); |
360 | 361 | ||
361 | int __must_check vb2_queue_init(struct vb2_queue *q); | 362 | int __must_check vb2_queue_init(struct vb2_queue *q); |
362 | 363 | ||
363 | void vb2_queue_release(struct vb2_queue *q); | 364 | void vb2_queue_release(struct vb2_queue *q); |
364 | 365 | ||
365 | int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b); | 366 | int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b); |
367 | int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb); | ||
366 | int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking); | 368 | int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking); |
367 | 369 | ||
368 | int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type); | 370 | int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type); |
369 | int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type); | 371 | int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type); |
370 | 372 | ||
371 | int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma); | 373 | int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma); |
372 | #ifndef CONFIG_MMU | 374 | #ifndef CONFIG_MMU |
373 | unsigned long vb2_get_unmapped_area(struct vb2_queue *q, | 375 | unsigned long vb2_get_unmapped_area(struct vb2_queue *q, |
374 | unsigned long addr, | 376 | unsigned long addr, |
375 | unsigned long len, | 377 | unsigned long len, |
376 | unsigned long pgoff, | 378 | unsigned long pgoff, |
377 | unsigned long flags); | 379 | unsigned long flags); |
378 | #endif | 380 | #endif |
379 | unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait); | 381 | unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait); |
380 | size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count, | 382 | size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count, |
381 | loff_t *ppos, int nonblock); | 383 | loff_t *ppos, int nonblock); |
382 | size_t vb2_write(struct vb2_queue *q, char __user *data, size_t count, | 384 | size_t vb2_write(struct vb2_queue *q, char __user *data, size_t count, |
383 | loff_t *ppos, int nonblock); | 385 | loff_t *ppos, int nonblock); |
384 | 386 | ||
385 | /** | 387 | /** |
386 | * vb2_is_streaming() - return streaming status of the queue | 388 | * vb2_is_streaming() - return streaming status of the queue |
387 | * @q: videobuf queue | 389 | * @q: videobuf queue |
388 | */ | 390 | */ |
389 | static inline bool vb2_is_streaming(struct vb2_queue *q) | 391 | static inline bool vb2_is_streaming(struct vb2_queue *q) |
390 | { | 392 | { |
391 | return q->streaming; | 393 | return q->streaming; |
392 | } | 394 | } |
393 | 395 | ||
394 | /** | 396 | /** |
395 | * vb2_is_busy() - return busy status of the queue | 397 | * vb2_is_busy() - return busy status of the queue |
396 | * @q: videobuf queue | 398 | * @q: videobuf queue |
397 | * | 399 | * |
398 | * This function checks if queue has any buffers allocated. | 400 | * This function checks if queue has any buffers allocated. |
399 | */ | 401 | */ |
400 | static inline bool vb2_is_busy(struct vb2_queue *q) | 402 | static inline bool vb2_is_busy(struct vb2_queue *q) |
401 | { | 403 | { |
402 | return (q->num_buffers > 0); | 404 | return (q->num_buffers > 0); |
403 | } | 405 | } |
404 | 406 | ||
405 | /** | 407 | /** |
406 | * vb2_get_drv_priv() - return driver private data associated with the queue | 408 | * vb2_get_drv_priv() - return driver private data associated with the queue |
407 | * @q: videobuf queue | 409 | * @q: videobuf queue |
408 | */ | 410 | */ |
409 | static inline void *vb2_get_drv_priv(struct vb2_queue *q) | 411 | static inline void *vb2_get_drv_priv(struct vb2_queue *q) |
410 | { | 412 | { |
411 | return q->drv_priv; | 413 | return q->drv_priv; |
412 | } | 414 | } |
413 | 415 | ||
414 | /** | 416 | /** |
415 | * vb2_set_plane_payload() - set bytesused for the plane plane_no | 417 | * vb2_set_plane_payload() - set bytesused for the plane plane_no |
416 | * @vb: buffer for which plane payload should be set | 418 | * @vb: buffer for which plane payload should be set |
417 | * @plane_no: plane number for which payload should be set | 419 | * @plane_no: plane number for which payload should be set |
418 | * @size: payload in bytes | 420 | * @size: payload in bytes |
419 | */ | 421 | */ |
420 | static inline void vb2_set_plane_payload(struct vb2_buffer *vb, | 422 | static inline void vb2_set_plane_payload(struct vb2_buffer *vb, |
421 | unsigned int plane_no, unsigned long size) | 423 | unsigned int plane_no, unsigned long size) |
422 | { | 424 | { |
423 | if (plane_no < vb->num_planes) | 425 | if (plane_no < vb->num_planes) |
424 | vb->v4l2_planes[plane_no].bytesused = size; | 426 | vb->v4l2_planes[plane_no].bytesused = size; |
425 | } | 427 | } |
426 | 428 | ||
427 | /** | 429 | /** |
428 | * vb2_get_plane_payload() - get bytesused for the plane plane_no | 430 | * vb2_get_plane_payload() - get bytesused for the plane plane_no |
429 | * @vb: buffer for which plane payload should be set | 431 | * @vb: buffer for which plane payload should be set |
430 | * @plane_no: plane number for which payload should be set | 432 | * @plane_no: plane number for which payload should be set |
431 | * @size: payload in bytes | 433 | * @size: payload in bytes |
432 | */ | 434 | */ |
433 | static inline unsigned long vb2_get_plane_payload(struct vb2_buffer *vb, | 435 | static inline unsigned long vb2_get_plane_payload(struct vb2_buffer *vb, |
434 | unsigned int plane_no) | 436 | unsigned int plane_no) |
435 | { | 437 | { |
436 | if (plane_no < vb->num_planes) | 438 | if (plane_no < vb->num_planes) |
437 | return vb->v4l2_planes[plane_no].bytesused; | 439 | return vb->v4l2_planes[plane_no].bytesused; |
438 | return 0; | 440 | return 0; |
439 | } | 441 | } |
440 | 442 | ||
441 | /** | 443 | /** |
442 | * vb2_plane_size() - return plane size in bytes | 444 | * vb2_plane_size() - return plane size in bytes |
443 | * @vb: buffer for which plane size should be returned | 445 | * @vb: buffer for which plane size should be returned |
444 | * @plane_no: plane number for which size should be returned | 446 | * @plane_no: plane number for which size should be returned |
445 | */ | 447 | */ |
446 | static inline unsigned long | 448 | static inline unsigned long |
447 | vb2_plane_size(struct vb2_buffer *vb, unsigned int plane_no) | 449 | vb2_plane_size(struct vb2_buffer *vb, unsigned int plane_no) |
448 | { | 450 | { |
449 | if (plane_no < vb->num_planes) | 451 | if (plane_no < vb->num_planes) |
450 | return vb->v4l2_planes[plane_no].length; | 452 | return vb->v4l2_planes[plane_no].length; |
451 | return 0; | 453 | return 0; |
452 | } | 454 | } |
453 | 455 | ||
454 | /* | 456 | /* |
455 | * The following functions are not part of the vb2 core API, but are simple | 457 | * The following functions are not part of the vb2 core API, but are simple |
456 | * helper functions that you can use in your struct v4l2_file_operations, | 458 | * helper functions that you can use in your struct v4l2_file_operations, |
457 | * struct v4l2_ioctl_ops and struct vb2_ops. They will serialize if vb2_queue->lock | 459 | * struct v4l2_ioctl_ops and struct vb2_ops. They will serialize if vb2_queue->lock |
458 | * or video_device->lock is set, and they will set and test vb2_queue->owner | 460 | * or video_device->lock is set, and they will set and test vb2_queue->owner |
459 | * to check if the calling filehandle is permitted to do the queuing operation. | 461 | * to check if the calling filehandle is permitted to do the queuing operation. |
460 | */ | 462 | */ |
461 | 463 | ||
462 | /* struct v4l2_ioctl_ops helpers */ | 464 | /* struct v4l2_ioctl_ops helpers */ |
463 | 465 | ||
464 | int vb2_ioctl_reqbufs(struct file *file, void *priv, | 466 | int vb2_ioctl_reqbufs(struct file *file, void *priv, |
465 | struct v4l2_requestbuffers *p); | 467 | struct v4l2_requestbuffers *p); |
466 | int vb2_ioctl_create_bufs(struct file *file, void *priv, | 468 | int vb2_ioctl_create_bufs(struct file *file, void *priv, |
467 | struct v4l2_create_buffers *p); | 469 | struct v4l2_create_buffers *p); |
468 | int vb2_ioctl_prepare_buf(struct file *file, void *priv, | 470 | int vb2_ioctl_prepare_buf(struct file *file, void *priv, |
469 | struct v4l2_buffer *p); | 471 | struct v4l2_buffer *p); |
470 | int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p); | 472 | int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p); |
471 | int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p); | 473 | int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p); |
472 | int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p); | 474 | int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p); |
473 | int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i); | 475 | int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i); |
474 | int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i); | 476 | int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i); |
477 | int vb2_ioctl_expbuf(struct file *file, void *priv, | ||
478 | struct v4l2_exportbuffer *p); | ||
475 | 479 | ||
476 | /* struct v4l2_file_operations helpers */ | 480 | /* struct v4l2_file_operations helpers */ |
477 | 481 | ||
478 | int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma); | 482 | int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma); |
479 | int vb2_fop_release(struct file *file); | 483 | int vb2_fop_release(struct file *file); |
480 | ssize_t vb2_fop_write(struct file *file, char __user *buf, | 484 | ssize_t vb2_fop_write(struct file *file, char __user *buf, |
481 | size_t count, loff_t *ppos); | 485 | size_t count, loff_t *ppos); |
482 | ssize_t vb2_fop_read(struct file *file, char __user *buf, | 486 | ssize_t vb2_fop_read(struct file *file, char __user *buf, |
483 | size_t count, loff_t *ppos); | 487 | size_t count, loff_t *ppos); |
484 | unsigned int vb2_fop_poll(struct file *file, poll_table *wait); | 488 | unsigned int vb2_fop_poll(struct file *file, poll_table *wait); |
485 | #ifndef CONFIG_MMU | 489 | #ifndef CONFIG_MMU |
486 | unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr, | 490 | unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr, |
487 | unsigned long len, unsigned long pgoff, unsigned long flags); | 491 | unsigned long len, unsigned long pgoff, unsigned long flags); |
488 | #endif | 492 | #endif |
489 | 493 | ||
490 | /* struct vb2_ops helpers, only use if vq->lock is non-NULL. */ | 494 | /* struct vb2_ops helpers, only use if vq->lock is non-NULL. */ |
491 | 495 | ||
492 | void vb2_ops_wait_prepare(struct vb2_queue *vq); | 496 | void vb2_ops_wait_prepare(struct vb2_queue *vq); |
493 | void vb2_ops_wait_finish(struct vb2_queue *vq); | 497 | void vb2_ops_wait_finish(struct vb2_queue *vq); |
494 | 498 | ||
495 | #endif /* _MEDIA_VIDEOBUF2_CORE_H */ | 499 | #endif /* _MEDIA_VIDEOBUF2_CORE_H */ |
496 | 500 |