Blame view
virt/kvm/coalesced_mmio.c
4.12 KB
5f94c1741 KVM: Add coalesce... |
1 2 3 4 |
/* * KVM coalesced MMIO * * Copyright (c) 2008 Bull S.A.S. |
221d059d1 KVM: Update Red H... |
5 |
* Copyright 2009 Red Hat, Inc. and/or its affiliates. |
5f94c1741 KVM: Add coalesce... |
6 7 8 9 10 11 12 13 |
* * Author: Laurent Vivier <Laurent.Vivier@bull.net> * */ #include "iodev.h" #include <linux/kvm_host.h> |
5a0e3ad6a include cleanup: ... |
14 |
#include <linux/slab.h> |
5f94c1741 KVM: Add coalesce... |
15 16 17 |
#include <linux/kvm.h> #include "coalesced_mmio.h" |
d76685c4a KVM: cleanup io_d... |
18 19 20 21 |
static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev) { return container_of(dev, struct kvm_coalesced_mmio_dev, dev); } |
bda9020e2 KVM: remove in_ra... |
22 23 |
static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev, gpa_t addr, int len) |
5f94c1741 KVM: Add coalesce... |
24 |
{ |
5f94c1741 KVM: Add coalesce... |
25 |
struct kvm_coalesced_mmio_zone *zone; |
105f8d40a KVM: Calculate av... |
26 27 |
struct kvm_coalesced_mmio_ring *ring; unsigned avail; |
5f94c1741 KVM: Add coalesce... |
28 |
int i; |
5f94c1741 KVM: Add coalesce... |
29 30 31 32 33 34 |
/* Are we able to batch it ? */ /* last is the first free entry * check if we don't meet the first used entry * there is always one unused entry in the buffer */ |
105f8d40a KVM: Calculate av... |
35 36 |
ring = dev->kvm->coalesced_mmio_ring; avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX; |
64a2268dc KVM: move coalesc... |
37 |
if (avail < KVM_MAX_VCPUS) { |
5f94c1741 KVM: Add coalesce... |
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
/* full */ return 0; } /* is it in a batchable area ? */ for (i = 0; i < dev->nb_zones; i++) { zone = &dev->zone[i]; /* (addr,len) is fully included in * (zone->addr, zone->size) */ if (zone->addr <= addr && addr + len <= zone->addr + zone->size) return 1; } return 0; } |
bda9020e2 KVM: remove in_ra... |
57 58 |
static int coalesced_mmio_write(struct kvm_io_device *this, gpa_t addr, int len, const void *val) |
5f94c1741 KVM: Add coalesce... |
59 |
{ |
d76685c4a KVM: cleanup io_d... |
60 |
struct kvm_coalesced_mmio_dev *dev = to_mmio(this); |
5f94c1741 KVM: Add coalesce... |
61 |
struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring; |
bda9020e2 KVM: remove in_ra... |
62 63 |
if (!coalesced_mmio_in_range(dev, addr, len)) return -EOPNOTSUPP; |
5f94c1741 KVM: Add coalesce... |
64 |
|
64a2268dc KVM: move coalesc... |
65 |
spin_lock(&dev->lock); |
5f94c1741 KVM: Add coalesce... |
66 67 68 69 70 71 72 73 |
/* copy data in first free entry of the ring */ ring->coalesced_mmio[ring->last].phys_addr = addr; ring->coalesced_mmio[ring->last].len = len; memcpy(ring->coalesced_mmio[ring->last].data, val, len); smp_wmb(); ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX; |
64a2268dc KVM: move coalesc... |
74 |
spin_unlock(&dev->lock); |
bda9020e2 KVM: remove in_ra... |
75 |
return 0; |
5f94c1741 KVM: Add coalesce... |
76 77 78 79 |
} static void coalesced_mmio_destructor(struct kvm_io_device *this) { |
d76685c4a KVM: cleanup io_d... |
80 |
struct kvm_coalesced_mmio_dev *dev = to_mmio(this); |
787a660a4 KVM: Clean up coa... |
81 82 |
kfree(dev); |
5f94c1741 KVM: Add coalesce... |
83 |
} |
d76685c4a KVM: cleanup io_d... |
84 85 |
static const struct kvm_io_device_ops coalesced_mmio_ops = { .write = coalesced_mmio_write, |
d76685c4a KVM: cleanup io_d... |
86 87 |
.destructor = coalesced_mmio_destructor, }; |
5f94c1741 KVM: Add coalesce... |
88 89 90 |
int kvm_coalesced_mmio_init(struct kvm *kvm) { struct kvm_coalesced_mmio_dev *dev; |
980da6ce5 KVM: Simplify coa... |
91 |
struct page *page; |
090b7aff2 KVM: make io_bus ... |
92 |
int ret; |
5f94c1741 KVM: Add coalesce... |
93 |
|
980da6ce5 KVM: Simplify coa... |
94 95 96 97 98 99 100 |
ret = -ENOMEM; page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!page) goto out_err; kvm->coalesced_mmio_ring = page_address(page); ret = -ENOMEM; |
5f94c1741 KVM: Add coalesce... |
101 102 |
dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL); if (!dev) |
980da6ce5 KVM: Simplify coa... |
103 |
goto out_free_page; |
64a2268dc KVM: move coalesc... |
104 |
spin_lock_init(&dev->lock); |
d76685c4a KVM: cleanup io_d... |
105 |
kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops); |
5f94c1741 KVM: Add coalesce... |
106 107 |
dev->kvm = kvm; kvm->coalesced_mmio_dev = dev; |
5f94c1741 KVM: Add coalesce... |
108 |
|
79fac95ec KVM: convert slot... |
109 |
mutex_lock(&kvm->slots_lock); |
e93f8a0f8 KVM: convert io_b... |
110 |
ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &dev->dev); |
79fac95ec KVM: convert slot... |
111 |
mutex_unlock(&kvm->slots_lock); |
090b7aff2 KVM: make io_bus ... |
112 |
if (ret < 0) |
980da6ce5 KVM: Simplify coa... |
113 114 115 |
goto out_free_dev; return ret; |
090b7aff2 KVM: make io_bus ... |
116 |
|
980da6ce5 KVM: Simplify coa... |
117 |
out_free_dev: |
6ce5a090a KVM: coalesced_mm... |
118 |
kvm->coalesced_mmio_dev = NULL; |
980da6ce5 KVM: Simplify coa... |
119 120 |
kfree(dev); out_free_page: |
6ce5a090a KVM: coalesced_mm... |
121 |
kvm->coalesced_mmio_ring = NULL; |
980da6ce5 KVM: Simplify coa... |
122 123 |
__free_page(page); out_err: |
090b7aff2 KVM: make io_bus ... |
124 |
return ret; |
5f94c1741 KVM: Add coalesce... |
125 |
} |
980da6ce5 KVM: Simplify coa... |
126 127 128 129 130 |
void kvm_coalesced_mmio_free(struct kvm *kvm) { if (kvm->coalesced_mmio_ring) free_page((unsigned long)kvm->coalesced_mmio_ring); } |
5f94c1741 KVM: Add coalesce... |
131 |
int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm, |
43db66973 KVM: Fix Codestyl... |
132 |
struct kvm_coalesced_mmio_zone *zone) |
5f94c1741 KVM: Add coalesce... |
133 134 135 136 |
{ struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev; if (dev == NULL) |
a87fa3551 KVM: fix the errn... |
137 |
return -ENXIO; |
5f94c1741 KVM: Add coalesce... |
138 |
|
79fac95ec KVM: convert slot... |
139 |
mutex_lock(&kvm->slots_lock); |
5f94c1741 KVM: Add coalesce... |
140 |
if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) { |
79fac95ec KVM: convert slot... |
141 |
mutex_unlock(&kvm->slots_lock); |
5f94c1741 KVM: Add coalesce... |
142 143 144 145 146 |
return -ENOBUFS; } dev->zone[dev->nb_zones] = *zone; dev->nb_zones++; |
79fac95ec KVM: convert slot... |
147 |
mutex_unlock(&kvm->slots_lock); |
5f94c1741 KVM: Add coalesce... |
148 149 150 151 152 153 154 155 156 157 158 |
return 0; } int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm, struct kvm_coalesced_mmio_zone *zone) { int i; struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev; struct kvm_coalesced_mmio_zone *z; if (dev == NULL) |
a87fa3551 KVM: fix the errn... |
159 |
return -ENXIO; |
5f94c1741 KVM: Add coalesce... |
160 |
|
79fac95ec KVM: convert slot... |
161 |
mutex_lock(&kvm->slots_lock); |
5f94c1741 KVM: Add coalesce... |
162 163 |
i = dev->nb_zones; |
43db66973 KVM: Fix Codestyl... |
164 |
while (i) { |
5f94c1741 KVM: Add coalesce... |
165 166 167 168 169 170 171 172 173 174 175 176 177 |
z = &dev->zone[i - 1]; /* unregister all zones * included in (zone->addr, zone->size) */ if (zone->addr <= z->addr && z->addr + z->size <= zone->addr + zone->size) { dev->nb_zones--; *z = dev->zone[dev->nb_zones]; } i--; } |
79fac95ec KVM: convert slot... |
178 |
mutex_unlock(&kvm->slots_lock); |
5f94c1741 KVM: Add coalesce... |
179 180 181 |
return 0; } |