Blame view
virt/kvm/coalesced_mmio.c
4.15 KB
b24413180 License cleanup: ... |
1 |
// SPDX-License-Identifier: GPL-2.0 |
5f94c1741 KVM: Add coalesce... |
2 3 4 5 |
/* * KVM coalesced MMIO * * Copyright (c) 2008 Bull S.A.S. |
221d059d1 KVM: Update Red H... |
6 |
* Copyright 2009 Red Hat, Inc. and/or its affiliates. |
5f94c1741 KVM: Add coalesce... |
7 8 9 10 |
* * Author: Laurent Vivier <Laurent.Vivier@bull.net> * */ |
af669ac6d KVM: move iodev.h... |
11 |
#include <kvm/iodev.h> |
5f94c1741 KVM: Add coalesce... |
12 13 |
#include <linux/kvm_host.h> |
5a0e3ad6a include cleanup: ... |
14 |
#include <linux/slab.h> |
5f94c1741 KVM: Add coalesce... |
15 16 17 |
#include <linux/kvm.h> #include "coalesced_mmio.h" |
d76685c4a KVM: cleanup io_d... |
18 19 20 21 |
static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev) { return container_of(dev, struct kvm_coalesced_mmio_dev, dev); } |
bda9020e2 KVM: remove in_ra... |
22 23 |
static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev, gpa_t addr, int len) |
5f94c1741 KVM: Add coalesce... |
24 |
{ |
2b3c246a6 KVM: Make coalesc... |
25 26 27 28 |
/* is it in a batchable area ? * (addr,len) is fully included in * (zone->addr, zone->size) */ |
1a214246c KVM: make checks ... |
29 30 31 32 33 34 35 36 37 |
if (len < 0) return 0; if (addr + len < addr) return 0; if (addr < dev->zone.addr) return 0; if (addr + len > dev->zone.addr + dev->zone.size) return 0; return 1; |
5f94c1741 KVM: Add coalesce... |
38 |
} |
c298125f4 KVM: MMIO: Lock c... |
39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 |
static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev) { struct kvm_coalesced_mmio_ring *ring; unsigned avail; /* Are we able to batch it ? */ /* last is the first free entry * check if we don't meet the first used entry * there is always one unused entry in the buffer */ ring = dev->kvm->coalesced_mmio_ring; avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX; if (avail == 0) { /* full */ return 0; } return 1; } |
e32edf4fd KVM: Redesign kvm... |
59 60 61 |
static int coalesced_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr, int len, const void *val) |
5f94c1741 KVM: Add coalesce... |
62 |
{ |
d76685c4a KVM: cleanup io_d... |
63 |
struct kvm_coalesced_mmio_dev *dev = to_mmio(this); |
5f94c1741 KVM: Add coalesce... |
64 |
struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring; |
c298125f4 KVM: MMIO: Lock c... |
65 |
|
bda9020e2 KVM: remove in_ra... |
66 67 |
if (!coalesced_mmio_in_range(dev, addr, len)) return -EOPNOTSUPP; |
5f94c1741 KVM: Add coalesce... |
68 |
|
2b3c246a6 KVM: Make coalesc... |
69 |
spin_lock(&dev->kvm->ring_lock); |
5f94c1741 KVM: Add coalesce... |
70 |
|
c298125f4 KVM: MMIO: Lock c... |
71 |
if (!coalesced_mmio_has_room(dev)) { |
2b3c246a6 KVM: Make coalesc... |
72 |
spin_unlock(&dev->kvm->ring_lock); |
c298125f4 KVM: MMIO: Lock c... |
73 74 |
return -EOPNOTSUPP; } |
5f94c1741 KVM: Add coalesce... |
75 76 77 78 79 80 81 |
/* copy data in first free entry of the ring */ ring->coalesced_mmio[ring->last].phys_addr = addr; ring->coalesced_mmio[ring->last].len = len; memcpy(ring->coalesced_mmio[ring->last].data, val, len); smp_wmb(); ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX; |
2b3c246a6 KVM: Make coalesc... |
82 |
spin_unlock(&dev->kvm->ring_lock); |
bda9020e2 KVM: remove in_ra... |
83 |
return 0; |
5f94c1741 KVM: Add coalesce... |
84 85 86 87 |
} static void coalesced_mmio_destructor(struct kvm_io_device *this) { |
d76685c4a KVM: cleanup io_d... |
88 |
struct kvm_coalesced_mmio_dev *dev = to_mmio(this); |
787a660a4 KVM: Clean up coa... |
89 |
|
2b3c246a6 KVM: Make coalesc... |
90 |
list_del(&dev->list); |
787a660a4 KVM: Clean up coa... |
91 |
kfree(dev); |
5f94c1741 KVM: Add coalesce... |
92 |
} |
d76685c4a KVM: cleanup io_d... |
93 94 |
static const struct kvm_io_device_ops coalesced_mmio_ops = { .write = coalesced_mmio_write, |
d76685c4a KVM: cleanup io_d... |
95 96 |
.destructor = coalesced_mmio_destructor, }; |
5f94c1741 KVM: Add coalesce... |
97 98 |
int kvm_coalesced_mmio_init(struct kvm *kvm) { |
980da6ce5 KVM: Simplify coa... |
99 |
struct page *page; |
090b7aff2 KVM: make io_bus ... |
100 |
int ret; |
5f94c1741 KVM: Add coalesce... |
101 |
|
980da6ce5 KVM: Simplify coa... |
102 103 104 105 |
ret = -ENOMEM; page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!page) goto out_err; |
980da6ce5 KVM: Simplify coa... |
106 |
|
2b3c246a6 KVM: Make coalesc... |
107 108 |
ret = 0; kvm->coalesced_mmio_ring = page_address(page); |
980da6ce5 KVM: Simplify coa... |
109 |
|
2b3c246a6 KVM: Make coalesc... |
110 111 112 113 114 115 116 |
/* * We're using this spinlock to sync access to the coalesced ring. * The list doesn't need it's own lock since device registration and * unregistration should only happen when kvm->slots_lock is held. */ spin_lock_init(&kvm->ring_lock); INIT_LIST_HEAD(&kvm->coalesced_zones); |
090b7aff2 KVM: make io_bus ... |
117 |
|
980da6ce5 KVM: Simplify coa... |
118 |
out_err: |
090b7aff2 KVM: make io_bus ... |
119 |
return ret; |
5f94c1741 KVM: Add coalesce... |
120 |
} |
980da6ce5 KVM: Simplify coa... |
121 122 123 124 125 |
void kvm_coalesced_mmio_free(struct kvm *kvm) { if (kvm->coalesced_mmio_ring) free_page((unsigned long)kvm->coalesced_mmio_ring); } |
5f94c1741 KVM: Add coalesce... |
126 |
int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm, |
43db66973 KVM: Fix Codestyl... |
127 |
struct kvm_coalesced_mmio_zone *zone) |
5f94c1741 KVM: Add coalesce... |
128 |
{ |
2b3c246a6 KVM: Make coalesc... |
129 130 |
int ret; struct kvm_coalesced_mmio_dev *dev; |
5f94c1741 KVM: Add coalesce... |
131 |
|
2b3c246a6 KVM: Make coalesc... |
132 133 134 135 136 137 138 |
dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL); if (!dev) return -ENOMEM; kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops); dev->kvm = kvm; dev->zone = *zone; |
5f94c1741 KVM: Add coalesce... |
139 |
|
79fac95ec KVM: convert slot... |
140 |
mutex_lock(&kvm->slots_lock); |
743eeb0b0 KVM: Intelligent ... |
141 142 |
ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, zone->addr, zone->size, &dev->dev); |
2b3c246a6 KVM: Make coalesc... |
143 144 145 146 |
if (ret < 0) goto out_free_dev; list_add_tail(&dev->list, &kvm->coalesced_zones); mutex_unlock(&kvm->slots_lock); |
5f94c1741 KVM: Add coalesce... |
147 |
|
aac5c4226 KVM: return an er... |
148 |
return 0; |
5f94c1741 KVM: Add coalesce... |
149 |
|
2b3c246a6 KVM: Make coalesc... |
150 |
out_free_dev: |
79fac95ec KVM: convert slot... |
151 |
mutex_unlock(&kvm->slots_lock); |
2b3c246a6 KVM: Make coalesc... |
152 |
kfree(dev); |
aac5c4226 KVM: return an er... |
153 |
return ret; |
5f94c1741 KVM: Add coalesce... |
154 155 156 157 158 |
} int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm, struct kvm_coalesced_mmio_zone *zone) { |
2b3c246a6 KVM: Make coalesc... |
159 |
struct kvm_coalesced_mmio_dev *dev, *tmp; |
5f94c1741 KVM: Add coalesce... |
160 |
|
79fac95ec KVM: convert slot... |
161 |
mutex_lock(&kvm->slots_lock); |
5f94c1741 KVM: Add coalesce... |
162 |
|
2b3c246a6 KVM: Make coalesc... |
163 164 165 166 |
list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list) if (coalesced_mmio_in_range(dev, zone->addr, zone->size)) { kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dev->dev); kvm_iodevice_destructor(&dev->dev); |
5f94c1741 KVM: Add coalesce... |
167 |
} |
5f94c1741 KVM: Add coalesce... |
168 |
|
79fac95ec KVM: convert slot... |
169 |
mutex_unlock(&kvm->slots_lock); |
5f94c1741 KVM: Add coalesce... |
170 171 172 |
return 0; } |