Blame view
virt/kvm/coalesced_mmio.c
4.12 KB
5f94c1741 KVM: Add coalesce... |
1 2 3 4 |
/* * KVM coalesced MMIO * * Copyright (c) 2008 Bull S.A.S. |
221d059d1 KVM: Update Red H... |
5 |
* Copyright 2009 Red Hat, Inc. and/or its affiliates. |
5f94c1741 KVM: Add coalesce... |
6 7 8 9 10 11 12 13 |
* * Author: Laurent Vivier <Laurent.Vivier@bull.net> * */ #include "iodev.h" #include <linux/kvm_host.h> |
5a0e3ad6a include cleanup: ... |
14 |
#include <linux/slab.h> |
5f94c1741 KVM: Add coalesce... |
15 16 17 |
#include <linux/kvm.h> #include "coalesced_mmio.h" |
d76685c4a KVM: cleanup io_d... |
18 19 20 21 |
static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev) { return container_of(dev, struct kvm_coalesced_mmio_dev, dev); } |
bda9020e2 KVM: remove in_ra... |
22 23 |
static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev, gpa_t addr, int len) |
5f94c1741 KVM: Add coalesce... |
24 |
{ |
2b3c246a6 KVM: Make coalesc... |
25 26 27 28 |
/* is it in a batchable area ? * (addr,len) is fully included in * (zone->addr, zone->size) */ |
1a214246c KVM: make checks ... |
29 30 31 32 33 34 35 36 37 |
if (len < 0) return 0; if (addr + len < addr) return 0; if (addr < dev->zone.addr) return 0; if (addr + len > dev->zone.addr + dev->zone.size) return 0; return 1; |
5f94c1741 KVM: Add coalesce... |
38 |
} |
c298125f4 KVM: MMIO: Lock c... |
39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 |
static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev) { struct kvm_coalesced_mmio_ring *ring; unsigned avail; /* Are we able to batch it ? */ /* last is the first free entry * check if we don't meet the first used entry * there is always one unused entry in the buffer */ ring = dev->kvm->coalesced_mmio_ring; avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX; if (avail == 0) { /* full */ return 0; } return 1; } |
bda9020e2 KVM: remove in_ra... |
59 60 |
static int coalesced_mmio_write(struct kvm_io_device *this, gpa_t addr, int len, const void *val) |
5f94c1741 KVM: Add coalesce... |
61 |
{ |
d76685c4a KVM: cleanup io_d... |
62 |
struct kvm_coalesced_mmio_dev *dev = to_mmio(this); |
5f94c1741 KVM: Add coalesce... |
63 |
struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring; |
c298125f4 KVM: MMIO: Lock c... |
64 |
|
bda9020e2 KVM: remove in_ra... |
65 66 |
if (!coalesced_mmio_in_range(dev, addr, len)) return -EOPNOTSUPP; |
5f94c1741 KVM: Add coalesce... |
67 |
|
2b3c246a6 KVM: Make coalesc... |
68 |
spin_lock(&dev->kvm->ring_lock); |
5f94c1741 KVM: Add coalesce... |
69 |
|
c298125f4 KVM: MMIO: Lock c... |
70 |
if (!coalesced_mmio_has_room(dev)) { |
2b3c246a6 KVM: Make coalesc... |
71 |
spin_unlock(&dev->kvm->ring_lock); |
c298125f4 KVM: MMIO: Lock c... |
72 73 |
return -EOPNOTSUPP; } |
5f94c1741 KVM: Add coalesce... |
74 75 76 77 78 79 80 |
/* copy data in first free entry of the ring */ ring->coalesced_mmio[ring->last].phys_addr = addr; ring->coalesced_mmio[ring->last].len = len; memcpy(ring->coalesced_mmio[ring->last].data, val, len); smp_wmb(); ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX; |
2b3c246a6 KVM: Make coalesc... |
81 |
spin_unlock(&dev->kvm->ring_lock); |
bda9020e2 KVM: remove in_ra... |
82 |
return 0; |
5f94c1741 KVM: Add coalesce... |
83 84 85 86 |
} static void coalesced_mmio_destructor(struct kvm_io_device *this) { |
d76685c4a KVM: cleanup io_d... |
87 |
struct kvm_coalesced_mmio_dev *dev = to_mmio(this); |
787a660a4 KVM: Clean up coa... |
88 |
|
2b3c246a6 KVM: Make coalesc... |
89 |
list_del(&dev->list); |
787a660a4 KVM: Clean up coa... |
90 |
kfree(dev); |
5f94c1741 KVM: Add coalesce... |
91 |
} |
d76685c4a KVM: cleanup io_d... |
92 93 |
static const struct kvm_io_device_ops coalesced_mmio_ops = { .write = coalesced_mmio_write, |
d76685c4a KVM: cleanup io_d... |
94 95 |
.destructor = coalesced_mmio_destructor, }; |
5f94c1741 KVM: Add coalesce... |
96 97 |
int kvm_coalesced_mmio_init(struct kvm *kvm) { |
980da6ce5 KVM: Simplify coa... |
98 |
struct page *page; |
090b7aff2 KVM: make io_bus ... |
99 |
int ret; |
5f94c1741 KVM: Add coalesce... |
100 |
|
980da6ce5 KVM: Simplify coa... |
101 102 103 104 |
ret = -ENOMEM; page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!page) goto out_err; |
980da6ce5 KVM: Simplify coa... |
105 |
|
2b3c246a6 KVM: Make coalesc... |
106 107 |
ret = 0; kvm->coalesced_mmio_ring = page_address(page); |
980da6ce5 KVM: Simplify coa... |
108 |
|
2b3c246a6 KVM: Make coalesc... |
109 110 111 112 113 114 115 |
/* * We're using this spinlock to sync access to the coalesced ring. * The list doesn't need it's own lock since device registration and * unregistration should only happen when kvm->slots_lock is held. */ spin_lock_init(&kvm->ring_lock); INIT_LIST_HEAD(&kvm->coalesced_zones); |
090b7aff2 KVM: make io_bus ... |
116 |
|
980da6ce5 KVM: Simplify coa... |
117 |
out_err: |
090b7aff2 KVM: make io_bus ... |
118 |
return ret; |
5f94c1741 KVM: Add coalesce... |
119 |
} |
980da6ce5 KVM: Simplify coa... |
120 121 122 123 124 |
void kvm_coalesced_mmio_free(struct kvm *kvm) { if (kvm->coalesced_mmio_ring) free_page((unsigned long)kvm->coalesced_mmio_ring); } |
5f94c1741 KVM: Add coalesce... |
125 |
int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm, |
43db66973 KVM: Fix Codestyl... |
126 |
struct kvm_coalesced_mmio_zone *zone) |
5f94c1741 KVM: Add coalesce... |
127 |
{ |
2b3c246a6 KVM: Make coalesc... |
128 129 |
int ret; struct kvm_coalesced_mmio_dev *dev; |
5f94c1741 KVM: Add coalesce... |
130 |
|
2b3c246a6 KVM: Make coalesc... |
131 132 133 134 135 136 137 |
dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL); if (!dev) return -ENOMEM; kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops); dev->kvm = kvm; dev->zone = *zone; |
5f94c1741 KVM: Add coalesce... |
138 |
|
79fac95ec KVM: convert slot... |
139 |
mutex_lock(&kvm->slots_lock); |
743eeb0b0 KVM: Intelligent ... |
140 141 |
ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, zone->addr, zone->size, &dev->dev); |
2b3c246a6 KVM: Make coalesc... |
142 143 144 145 |
if (ret < 0) goto out_free_dev; list_add_tail(&dev->list, &kvm->coalesced_zones); mutex_unlock(&kvm->slots_lock); |
5f94c1741 KVM: Add coalesce... |
146 |
|
2b3c246a6 KVM: Make coalesc... |
147 |
return ret; |
5f94c1741 KVM: Add coalesce... |
148 |
|
2b3c246a6 KVM: Make coalesc... |
149 |
out_free_dev: |
79fac95ec KVM: convert slot... |
150 |
mutex_unlock(&kvm->slots_lock); |
2b3c246a6 KVM: Make coalesc... |
151 152 153 154 155 |
kfree(dev); if (dev == NULL) return -ENXIO; |
5f94c1741 KVM: Add coalesce... |
156 157 158 159 160 161 |
return 0; } int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm, struct kvm_coalesced_mmio_zone *zone) { |
2b3c246a6 KVM: Make coalesc... |
162 |
struct kvm_coalesced_mmio_dev *dev, *tmp; |
5f94c1741 KVM: Add coalesce... |
163 |
|
79fac95ec KVM: convert slot... |
164 |
mutex_lock(&kvm->slots_lock); |
5f94c1741 KVM: Add coalesce... |
165 |
|
2b3c246a6 KVM: Make coalesc... |
166 167 168 169 |
list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list) if (coalesced_mmio_in_range(dev, zone->addr, zone->size)) { kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dev->dev); kvm_iodevice_destructor(&dev->dev); |
5f94c1741 KVM: Add coalesce... |
170 |
} |
5f94c1741 KVM: Add coalesce... |
171 |
|
79fac95ec KVM: convert slot... |
172 |
mutex_unlock(&kvm->slots_lock); |
5f94c1741 KVM: Add coalesce... |
173 174 175 |
return 0; } |