Commit a570fd6ff74f637f907ae98211e03e8ff6327211
Exists in
ti-lsk-linux-4.1.y
and in
5 other branches
Merge branch 'rpmsg-ti-linux-4.1.y' of git://git.ti.com/rpmsg/rpmsg into ti-linux-4.1.y
TI-Feature: rpmsg TI-Tree: git://git.ti.com/rpmsg/rpmsg.git TI-Branch: rpmsg-ti-linux-4.1.y * 'rpmsg-ti-linux-4.1.y' of git://git.ti.com/rpmsg/rpmsg: rpmsg: fill in dma fields for sgs passed to virtio virtio_ring: add virtqueue_add_inbuf/outbuf_rpmsg API virtio_ring: revise descriptor addition logic for virtio_rpmsg virtio_ring: break out vring descriptor setup code iommu/omap: Fix debug_read_tlb() to use seq_printf() Signed-off-by: Texas Instruments Auto Merger <lcpd_integration@list.ti.com>
Showing 4 changed files Side-by-side Diff
drivers/iommu/omap-iommu-debug.c
... | ... | @@ -133,26 +133,18 @@ |
133 | 133 | } |
134 | 134 | |
135 | 135 | static ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr, |
136 | - char *buf) | |
136 | + struct seq_file *s) | |
137 | 137 | { |
138 | - char *p = buf; | |
139 | - | |
140 | - /* FIXME: Need more detail analysis of cam/ram */ | |
141 | - p += sprintf(p, "%08x %08x %01x\n", cr->cam, cr->ram, | |
142 | - (cr->cam & MMU_CAM_P) ? 1 : 0); | |
143 | - | |
144 | - return p - buf; | |
138 | + return seq_printf(s, "%08x %08x %01x\n", cr->cam, cr->ram, | |
139 | + (cr->cam & MMU_CAM_P) ? 1 : 0); | |
145 | 140 | } |
146 | 141 | |
147 | -static size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, | |
148 | - ssize_t bytes) | |
142 | +static size_t omap_dump_tlb_entries(struct omap_iommu *obj, struct seq_file *s) | |
149 | 143 | { |
150 | 144 | int i, num; |
151 | 145 | struct cr_regs *cr; |
152 | - char *p = buf; | |
153 | 146 | |
154 | - num = bytes / sizeof(*cr); | |
155 | - num = min(obj->nr_tlb_entries, num); | |
147 | + num = obj->nr_tlb_entries; | |
156 | 148 | |
157 | 149 | cr = kcalloc(num, sizeof(*cr), GFP_KERNEL); |
158 | 150 | if (!cr) |
159 | 151 | |
160 | 152 | |
161 | 153 | |
162 | 154 | |
163 | 155 | |
164 | 156 | |
165 | 157 | |
166 | 158 | |
... | ... | @@ -160,40 +152,28 @@ |
160 | 152 | |
161 | 153 | num = __dump_tlb_entries(obj, cr, num); |
162 | 154 | for (i = 0; i < num; i++) |
163 | - p += iotlb_dump_cr(obj, cr + i, p); | |
155 | + iotlb_dump_cr(obj, cr + i, s); | |
164 | 156 | kfree(cr); |
165 | 157 | |
166 | - return p - buf; | |
158 | + return 0; | |
167 | 159 | } |
168 | 160 | |
169 | -static ssize_t debug_read_tlb(struct file *file, char __user *userbuf, | |
170 | - size_t count, loff_t *ppos) | |
161 | +static int debug_read_tlb(struct seq_file *s, void *data) | |
171 | 162 | { |
172 | - struct omap_iommu *obj = file->private_data; | |
173 | - char *p, *buf; | |
174 | - ssize_t bytes, rest; | |
163 | + struct omap_iommu *obj = s->private; | |
175 | 164 | |
176 | 165 | if (is_omap_iommu_detached(obj)) |
177 | 166 | return -EPERM; |
178 | 167 | |
179 | - buf = kmalloc(count, GFP_KERNEL); | |
180 | - if (!buf) | |
181 | - return -ENOMEM; | |
182 | - p = buf; | |
183 | - | |
184 | 168 | mutex_lock(&iommu_debug_lock); |
185 | 169 | |
186 | - p += sprintf(p, "%8s %8s\n", "cam:", "ram:"); | |
187 | - p += sprintf(p, "-----------------------------------------\n"); | |
188 | - rest = count - (p - buf); | |
189 | - p += omap_dump_tlb_entries(obj, p, rest); | |
170 | + seq_printf(s, "%8s %8s\n", "cam:", "ram:"); | |
171 | + seq_puts(s, "-----------------------------------------\n"); | |
172 | + omap_dump_tlb_entries(obj, s); | |
190 | 173 | |
191 | - bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); | |
192 | - | |
193 | 174 | mutex_unlock(&iommu_debug_lock); |
194 | - kfree(buf); | |
195 | 175 | |
196 | - return bytes; | |
176 | + return 0; | |
197 | 177 | } |
198 | 178 | |
199 | 179 | static void dump_ioptable(struct seq_file *s) |
... | ... | @@ -268,7 +248,7 @@ |
268 | 248 | } |
269 | 249 | |
270 | 250 | DEBUG_FOPS_RO(regs); |
271 | -DEBUG_FOPS_RO(tlb); | |
251 | +DEBUG_SEQ_FOPS_RO(tlb); | |
272 | 252 | DEBUG_SEQ_FOPS_RO(pagetable); |
273 | 253 | |
274 | 254 | #define __DEBUG_ADD_FILE(attr, mode) \ |
drivers/rpmsg/virtio_rpmsg_bus.c
... | ... | @@ -214,6 +214,17 @@ |
214 | 214 | kfree(ept); |
215 | 215 | } |
216 | 216 | |
217 | +static inline void rpmsg_sg_init_one(struct virtproc_info *vrp, | |
218 | + struct scatterlist *sg, | |
219 | + void *msg, unsigned int len) | |
220 | +{ | |
221 | + unsigned long offset = msg - vrp->rbufs; | |
222 | + | |
223 | + sg_init_table(sg, 1); | |
224 | + sg_dma_address(sg) = vrp->bufs_dma + offset; | |
225 | + sg_dma_len(sg) = len; | |
226 | +} | |
227 | + | |
217 | 228 | /* for more info, see below documentation of rpmsg_create_ept() */ |
218 | 229 | static struct rpmsg_endpoint *__rpmsg_create_ept(struct virtproc_info *vrp, |
219 | 230 | struct rpmsg_channel *rpdev, rpmsg_rx_cb_t cb, |
220 | 231 | |
... | ... | @@ -825,12 +836,12 @@ |
825 | 836 | msg, sizeof(*msg) + msg->len, true); |
826 | 837 | #endif |
827 | 838 | |
828 | - sg_init_one(&sg, msg, sizeof(*msg) + len); | |
839 | + rpmsg_sg_init_one(vrp, &sg, msg, sizeof(*msg) + len); | |
829 | 840 | |
830 | 841 | mutex_lock(&vrp->tx_lock); |
831 | 842 | |
832 | 843 | /* add message to the remote processor's virtqueue */ |
833 | - err = virtqueue_add_outbuf(vrp->svq, &sg, 1, msg, GFP_KERNEL); | |
844 | + err = virtqueue_add_outbuf_rpmsg(vrp->svq, &sg, 1, msg, GFP_KERNEL); | |
834 | 845 | if (err) { |
835 | 846 | /* |
836 | 847 | * need to reclaim the buffer here, otherwise it's lost |
837 | 848 | |
... | ... | @@ -917,10 +928,10 @@ |
917 | 928 | dev_warn(dev, "msg received with no recipient\n"); |
918 | 929 | |
919 | 930 | /* publish the real size of the buffer */ |
920 | - sg_init_one(&sg, msg, RPMSG_BUF_SIZE); | |
931 | + rpmsg_sg_init_one(vrp, &sg, msg, RPMSG_BUF_SIZE); | |
921 | 932 | |
922 | 933 | /* add the buffer back to the remote processor's virtqueue */ |
923 | - err = virtqueue_add_inbuf(vrp->rvq, &sg, 1, msg, GFP_KERNEL); | |
934 | + err = virtqueue_add_inbuf_rpmsg(vrp->rvq, &sg, 1, msg, GFP_KERNEL); | |
924 | 935 | if (err < 0) { |
925 | 936 | dev_err(dev, "failed to add a virtqueue buffer: %d\n", err); |
926 | 937 | return err; |
927 | 938 | |
... | ... | @@ -1099,10 +1110,10 @@ |
1099 | 1110 | struct scatterlist sg; |
1100 | 1111 | void *cpu_addr = vrp->rbufs + i * RPMSG_BUF_SIZE; |
1101 | 1112 | |
1102 | - sg_init_one(&sg, cpu_addr, RPMSG_BUF_SIZE); | |
1113 | + rpmsg_sg_init_one(vrp, &sg, cpu_addr, RPMSG_BUF_SIZE); | |
1103 | 1114 | |
1104 | - err = virtqueue_add_inbuf(vrp->rvq, &sg, 1, cpu_addr, | |
1105 | - GFP_KERNEL); | |
1115 | + err = virtqueue_add_inbuf_rpmsg(vrp->rvq, &sg, 1, cpu_addr, | |
1116 | + GFP_KERNEL); | |
1106 | 1117 | WARN_ON(err); /* sanity check; this can't really happen */ |
1107 | 1118 | } |
1108 | 1119 |
drivers/virtio/virtio_ring.c
... | ... | @@ -120,13 +120,26 @@ |
120 | 120 | return desc; |
121 | 121 | } |
122 | 122 | |
123 | +static inline void vring_desc_set(struct virtio_device *vdev, | |
124 | + struct vring_desc *desc, | |
125 | + struct scatterlist *sg, | |
126 | + u16 flags, | |
127 | + bool rpmsg) | |
128 | +{ | |
129 | + desc->flags = cpu_to_virtio16(vdev, flags); | |
130 | + desc->addr = cpu_to_virtio64(vdev, | |
131 | + rpmsg ? sg_dma_address(sg) : sg_phys(sg)); | |
132 | + desc->len = cpu_to_virtio32(vdev, rpmsg ? sg_dma_len(sg) : sg->length); | |
133 | +} | |
134 | + | |
123 | 135 | static inline int virtqueue_add(struct virtqueue *_vq, |
124 | 136 | struct scatterlist *sgs[], |
125 | 137 | unsigned int total_sg, |
126 | 138 | unsigned int out_sgs, |
127 | 139 | unsigned int in_sgs, |
128 | 140 | void *data, |
129 | - gfp_t gfp) | |
141 | + gfp_t gfp, | |
142 | + bool rpmsg) | |
130 | 143 | { |
131 | 144 | struct vring_virtqueue *vq = to_vvq(_vq); |
132 | 145 | struct scatterlist *sg; |
... | ... | @@ -164,7 +177,7 @@ |
164 | 177 | |
165 | 178 | /* If the host supports indirect descriptor tables, and we have multiple |
166 | 179 | * buffers, then go indirect. FIXME: tune this threshold */ |
167 | - if (vq->indirect && total_sg > 1 && vq->vq.num_free) | |
180 | + if (!rpmsg && vq->indirect && total_sg > 1 && vq->vq.num_free) | |
168 | 181 | desc = alloc_indirect(_vq, total_sg, gfp); |
169 | 182 | else |
170 | 183 | desc = NULL; |
171 | 184 | |
... | ... | @@ -205,18 +218,17 @@ |
205 | 218 | |
206 | 219 | for (n = 0; n < out_sgs; n++) { |
207 | 220 | for (sg = sgs[n]; sg; sg = sg_next(sg)) { |
208 | - desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT); | |
209 | - desc[i].addr = cpu_to_virtio64(_vq->vdev, sg_phys(sg)); | |
210 | - desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length); | |
221 | + vring_desc_set(_vq->vdev, desc + i, sg, | |
222 | + VRING_DESC_F_NEXT, rpmsg); | |
211 | 223 | prev = i; |
212 | 224 | i = virtio16_to_cpu(_vq->vdev, desc[i].next); |
213 | 225 | } |
214 | 226 | } |
215 | 227 | for (; n < (out_sgs + in_sgs); n++) { |
216 | 228 | for (sg = sgs[n]; sg; sg = sg_next(sg)) { |
217 | - desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE); | |
218 | - desc[i].addr = cpu_to_virtio64(_vq->vdev, sg_phys(sg)); | |
219 | - desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length); | |
229 | + vring_desc_set(_vq->vdev, desc + i, sg, | |
230 | + VRING_DESC_F_NEXT | VRING_DESC_F_WRITE, | |
231 | + rpmsg); | |
220 | 232 | prev = i; |
221 | 233 | i = virtio16_to_cpu(_vq->vdev, desc[i].next); |
222 | 234 | } |
... | ... | @@ -284,7 +296,8 @@ |
284 | 296 | for (sg = sgs[i]; sg; sg = sg_next(sg)) |
285 | 297 | total_sg++; |
286 | 298 | } |
287 | - return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, data, gfp); | |
299 | + return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, data, gfp, | |
300 | + false); | |
288 | 301 | } |
289 | 302 | EXPORT_SYMBOL_GPL(virtqueue_add_sgs); |
290 | 303 | |
291 | 304 | |
... | ... | @@ -306,11 +319,36 @@ |
306 | 319 | void *data, |
307 | 320 | gfp_t gfp) |
308 | 321 | { |
309 | - return virtqueue_add(vq, &sg, num, 1, 0, data, gfp); | |
322 | + return virtqueue_add(vq, &sg, num, 1, 0, data, gfp, false); | |
310 | 323 | } |
311 | 324 | EXPORT_SYMBOL_GPL(virtqueue_add_outbuf); |
312 | 325 | |
313 | 326 | /** |
327 | + * virtqueue_add_outbuf_rpmsg - expose output buffers for virtio_rpmsg | |
328 | + * @vq: the struct virtqueue we're talking about. | |
329 | + * @sg: scatterlist (with dma fields filled in, and terminated properly!) | |
330 | + * @num: the number of entries in @sg readable by other side | |
331 | + * @data: the token identifying the buffer. | |
332 | + * @gfp: how to do memory allocations (if necessary). | |
333 | + * | |
334 | + * Caller must ensure we don't call this with other virtqueue operations | |
335 | + * at the same time (except where noted). Note that the scatterlist is | |
336 | + * non-standard with only the corresponding dma fields filled in, so | |
337 | + * should not be used with any sg operations using traditional buffer | |
338 | + * and length fields. | |
339 | + * | |
340 | + * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). | |
341 | + */ | |
342 | +int virtqueue_add_outbuf_rpmsg(struct virtqueue *vq, | |
343 | + struct scatterlist *sg, unsigned int num, | |
344 | + void *data, | |
345 | + gfp_t gfp) | |
346 | +{ | |
347 | + return virtqueue_add(vq, &sg, num, 1, 0, data, gfp, true); | |
348 | +} | |
349 | +EXPORT_SYMBOL_GPL(virtqueue_add_outbuf_rpmsg); | |
350 | + | |
351 | +/** | |
314 | 352 | * virtqueue_add_inbuf - expose input buffers to other end |
315 | 353 | * @vq: the struct virtqueue we're talking about. |
316 | 354 | * @sg: scatterlist (must be well-formed and terminated!) |
317 | 355 | |
... | ... | @@ -328,9 +366,34 @@ |
328 | 366 | void *data, |
329 | 367 | gfp_t gfp) |
330 | 368 | { |
331 | - return virtqueue_add(vq, &sg, num, 0, 1, data, gfp); | |
369 | + return virtqueue_add(vq, &sg, num, 0, 1, data, gfp, false); | |
332 | 370 | } |
333 | 371 | EXPORT_SYMBOL_GPL(virtqueue_add_inbuf); |
372 | + | |
373 | +/** | |
374 | + * virtqueue_add_inbuf_rpmsg - expose input buffers for virtio_rpmsg | |
375 | + * @vq: the struct virtqueue we're talking about. | |
376 | + * @sg: scatterlist (with dma fields filled in, and terminated properly!) | |
377 | + * @num: the number of entries in @sg writable by other side | |
378 | + * @data: the token identifying the buffer. | |
379 | + * @gfp: how to do memory allocations (if necessary). | |
380 | + * | |
381 | + * Caller must ensure we don't call this with other virtqueue operations | |
382 | + * at the same time (except where noted). Note that the scatterlist is | |
383 | + * non-standard with only the corresponding dma fields filled in, so | |
384 | + * should not be used with any sg operations using traditional buffer | |
385 | + * and length fields. | |
386 | + * | |
387 | + * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). | |
388 | + */ | |
389 | +int virtqueue_add_inbuf_rpmsg(struct virtqueue *vq, | |
390 | + struct scatterlist *sg, unsigned int num, | |
391 | + void *data, | |
392 | + gfp_t gfp) | |
393 | +{ | |
394 | + return virtqueue_add(vq, &sg, num, 0, 1, data, gfp, true); | |
395 | +} | |
396 | +EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_rpmsg); | |
334 | 397 | |
335 | 398 | /** |
336 | 399 | * virtqueue_kick_prepare - first half of split virtqueue_kick call. |
include/linux/virtio.h
... | ... | @@ -44,6 +44,16 @@ |
44 | 44 | void *data, |
45 | 45 | gfp_t gfp); |
46 | 46 | |
47 | +int virtqueue_add_outbuf_rpmsg(struct virtqueue *vq, | |
48 | + struct scatterlist sg[], unsigned int num, | |
49 | + void *data, | |
50 | + gfp_t gfp); | |
51 | + | |
52 | +int virtqueue_add_inbuf_rpmsg(struct virtqueue *vq, | |
53 | + struct scatterlist sg[], unsigned int num, | |
54 | + void *data, | |
55 | + gfp_t gfp); | |
56 | + | |
47 | 57 | int virtqueue_add_sgs(struct virtqueue *vq, |
48 | 58 | struct scatterlist *sgs[], |
49 | 59 | unsigned int out_sgs, |