Commit 13d7a146c7738f2236689428f38ee7a0df6cf095

Authored by Edgar E. Iglesias
Committed by Suman Anna
1 parent 2b07f54a80

virtio_ring: revise descriptor addition logic for virtio_rpmsg

The virtio core expects the vring buffers to be allocated from
linear address space in general, but this may not be true always
with virtio_rpmsg. The virtio_rpmsg bus allocates the vring buffers
using the dma_alloc_coherent() API, and this API can return virtual
addresses from the vmalloc range if the underlying memory is allocated
from a carveout (physical contiguous memory not mapped into kernel) or
a CMA pool in highmem. For more details, please see the discussion
thread, http://marc.info/?l=linux-arm-kernel&m=142738673019657&w=2.

This patch adds a 'rpmsg' flag to the internal virtqueue_add function
and leverages this flag to revise the descriptor preparation when
adding the buffers. The revised logic uses the sg_dma_address() and
sg_dma_len() helpers instead of relying on sg_phys() and sg->length
fields, so that the remote side sees the physical addresses of the
vring buffers properly. The virtio rpmsg core is expected to prepare
the scatterlist structures with the dma fields filled in properly, and
use a new API (will be added in following patch) to add the virtqueue
buffers.

Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
[s-anna@ti.com: rename dma variable to use rpmsg, add commit description]
Signed-off-by: Suman Anna <s-anna@ti.com>

Showing 1 changed file with 15 additions and 10 deletions Side-by-side Diff

drivers/virtio/virtio_ring.c
... ... @@ -123,11 +123,13 @@
123 123 static inline void vring_desc_set(struct virtio_device *vdev,
124 124 struct vring_desc *desc,
125 125 struct scatterlist *sg,
126   - u16 flags)
  126 + u16 flags,
  127 + bool rpmsg)
127 128 {
128 129 desc->flags = cpu_to_virtio16(vdev, flags);
129   - desc->addr = cpu_to_virtio64(vdev, sg_phys(sg));
130   - desc->len = cpu_to_virtio32(vdev, sg->length);
  130 + desc->addr = cpu_to_virtio64(vdev,
  131 + rpmsg ? sg_dma_address(sg) : sg_phys(sg));
  132 + desc->len = cpu_to_virtio32(vdev, rpmsg ? sg_dma_len(sg) : sg->length);
131 133 }
132 134  
133 135 static inline int virtqueue_add(struct virtqueue *_vq,
... ... @@ -136,7 +138,8 @@
136 138 unsigned int out_sgs,
137 139 unsigned int in_sgs,
138 140 void *data,
139   - gfp_t gfp)
  141 + gfp_t gfp,
  142 + bool rpmsg)
140 143 {
141 144 struct vring_virtqueue *vq = to_vvq(_vq);
142 145 struct scatterlist *sg;
... ... @@ -174,7 +177,7 @@
174 177  
175 178 /* If the host supports indirect descriptor tables, and we have multiple
176 179 * buffers, then go indirect. FIXME: tune this threshold */
177   - if (vq->indirect && total_sg > 1 && vq->vq.num_free)
  180 + if (!rpmsg && vq->indirect && total_sg > 1 && vq->vq.num_free)
178 181 desc = alloc_indirect(_vq, total_sg, gfp);
179 182 else
180 183 desc = NULL;
... ... @@ -216,7 +219,7 @@
216 219 for (n = 0; n < out_sgs; n++) {
217 220 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
218 221 vring_desc_set(_vq->vdev, desc + i, sg,
219   - VRING_DESC_F_NEXT);
  222 + VRING_DESC_F_NEXT, rpmsg);
220 223 prev = i;
221 224 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
222 225 }
... ... @@ -224,7 +227,8 @@
224 227 for (; n < (out_sgs + in_sgs); n++) {
225 228 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
226 229 vring_desc_set(_vq->vdev, desc + i, sg,
227   - VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
  230 + VRING_DESC_F_NEXT | VRING_DESC_F_WRITE,
  231 + rpmsg);
228 232 prev = i;
229 233 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
230 234 }
... ... @@ -292,7 +296,8 @@
292 296 for (sg = sgs[i]; sg; sg = sg_next(sg))
293 297 total_sg++;
294 298 }
295   - return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, data, gfp);
  299 + return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, data, gfp,
  300 + false);
296 301 }
297 302 EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
298 303  
... ... @@ -314,7 +319,7 @@
314 319 void *data,
315 320 gfp_t gfp)
316 321 {
317   - return virtqueue_add(vq, &sg, num, 1, 0, data, gfp);
  322 + return virtqueue_add(vq, &sg, num, 1, 0, data, gfp, false);
318 323 }
319 324 EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
320 325  
... ... @@ -336,7 +341,7 @@
336 341 void *data,
337 342 gfp_t gfp)
338 343 {
339   - return virtqueue_add(vq, &sg, num, 0, 1, data, gfp);
  344 + return virtqueue_add(vq, &sg, num, 0, 1, data, gfp, false);
340 345 }
341 346 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
342 347