Blame view

virt/kvm/eventfd.c 22.6 KB
721eecbf4   Gregory Haskins   KVM: irqfd
1
2
3
4
  /*
   * kvm eventfd support - use eventfd objects to signal various KVM events
   *
   * Copyright 2009 Novell.  All Rights Reserved.
221d059d1   Avi Kivity   KVM: Update Red H...
5
   * Copyright 2010 Red Hat, Inc. and/or its affiliates.
721eecbf4   Gregory Haskins   KVM: irqfd
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
   *
   * Author:
   *	Gregory Haskins <ghaskins@novell.com>
   *
   * This file is free software; you can redistribute it and/or modify
   * it under the terms of version 2 of the GNU General Public License
   * as published by the Free Software Foundation.
   *
   * This program is distributed in the hope that it will be useful,
   * but WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
   * GNU General Public License for more details.
   *
   * You should have received a copy of the GNU General Public License
   * along with this program; if not, write to the Free Software Foundation,
   * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
   */
  
  #include <linux/kvm_host.h>
d34e6b175   Gregory Haskins   KVM: add ioeventf...
25
  #include <linux/kvm.h>
166c9775f   Eric Auger   KVM: create kvm_i...
26
  #include <linux/kvm_irqfd.h>
721eecbf4   Gregory Haskins   KVM: irqfd
27
28
29
30
31
32
33
  #include <linux/workqueue.h>
  #include <linux/syscalls.h>
  #include <linux/wait.h>
  #include <linux/poll.h>
  #include <linux/file.h>
  #include <linux/list.h>
  #include <linux/eventfd.h>
d34e6b175   Gregory Haskins   KVM: add ioeventf...
34
  #include <linux/kernel.h>
719d93cd5   Christian Borntraeger   kvm/irqchip: Spee...
35
  #include <linux/srcu.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
36
  #include <linux/slab.h>
56f89f362   Paul Mackerras   KVM: Don't keep r...
37
  #include <linux/seqlock.h>
9016cfb57   Eric Auger   KVM: eventfd: add...
38
  #include <linux/irqbypass.h>
e4d57e1ee   Paul Mackerras   KVM: Move irq not...
39
  #include <trace/events/kvm.h>
d34e6b175   Gregory Haskins   KVM: add ioeventf...
40

af669ac6d   Andre Przywara   KVM: move iodev.h...
41
  #include <kvm/iodev.h>
721eecbf4   Gregory Haskins   KVM: irqfd
42

297e21053   Paul Mackerras   KVM: Give IRQFD i...
43
  #ifdef CONFIG_HAVE_KVM_IRQFD
721eecbf4   Gregory Haskins   KVM: irqfd
44

36343f6ea   Paolo Bonzini   KVM: fix OOPS on ...
45
  static struct workqueue_struct *irqfd_cleanup_wq;
721eecbf4   Gregory Haskins   KVM: irqfd
46
47
48
49
  
  static void
  irqfd_inject(struct work_struct *work)
  {
166c9775f   Eric Auger   KVM: create kvm_i...
50
51
  	struct kvm_kernel_irqfd *irqfd =
  		container_of(work, struct kvm_kernel_irqfd, inject);
721eecbf4   Gregory Haskins   KVM: irqfd
52
  	struct kvm *kvm = irqfd->kvm;
7a84428af   Alex Williamson   KVM: Add resampli...
53
  	if (!irqfd->resampler) {
aa2fbe6d4   Yang Zhang   KVM: Let ioapic k...
54
55
56
57
  		kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1,
  				false);
  		kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0,
  				false);
7a84428af   Alex Williamson   KVM: Add resampli...
58
59
  	} else
  		kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
aa2fbe6d4   Yang Zhang   KVM: Let ioapic k...
60
  			    irqfd->gsi, 1, false);
7a84428af   Alex Williamson   KVM: Add resampli...
61
62
63
64
65
66
67
68
69
70
  }
  
  /*
   * Since resampler irqfds share an IRQ source ID, we de-assert once
   * then notify all of the resampler irqfds using this GSI.  We can't
   * do multiple de-asserts or we risk racing with incoming re-asserts.
   */
  static void
  irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
  {
166c9775f   Eric Auger   KVM: create kvm_i...
71
  	struct kvm_kernel_irqfd_resampler *resampler;
719d93cd5   Christian Borntraeger   kvm/irqchip: Spee...
72
  	struct kvm *kvm;
166c9775f   Eric Auger   KVM: create kvm_i...
73
  	struct kvm_kernel_irqfd *irqfd;
719d93cd5   Christian Borntraeger   kvm/irqchip: Spee...
74
  	int idx;
7a84428af   Alex Williamson   KVM: Add resampli...
75

166c9775f   Eric Auger   KVM: create kvm_i...
76
77
  	resampler = container_of(kian,
  			struct kvm_kernel_irqfd_resampler, notifier);
719d93cd5   Christian Borntraeger   kvm/irqchip: Spee...
78
  	kvm = resampler->kvm;
7a84428af   Alex Williamson   KVM: Add resampli...
79

719d93cd5   Christian Borntraeger   kvm/irqchip: Spee...
80
  	kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
aa2fbe6d4   Yang Zhang   KVM: Let ioapic k...
81
  		    resampler->notifier.gsi, 0, false);
7a84428af   Alex Williamson   KVM: Add resampli...
82

719d93cd5   Christian Borntraeger   kvm/irqchip: Spee...
83
  	idx = srcu_read_lock(&kvm->irq_srcu);
7a84428af   Alex Williamson   KVM: Add resampli...
84
85
86
  
  	list_for_each_entry_rcu(irqfd, &resampler->list, resampler_link)
  		eventfd_signal(irqfd->resamplefd, 1);
719d93cd5   Christian Borntraeger   kvm/irqchip: Spee...
87
  	srcu_read_unlock(&kvm->irq_srcu, idx);
7a84428af   Alex Williamson   KVM: Add resampli...
88
89
90
  }
  
  static void
166c9775f   Eric Auger   KVM: create kvm_i...
91
  irqfd_resampler_shutdown(struct kvm_kernel_irqfd *irqfd)
7a84428af   Alex Williamson   KVM: Add resampli...
92
  {
166c9775f   Eric Auger   KVM: create kvm_i...
93
  	struct kvm_kernel_irqfd_resampler *resampler = irqfd->resampler;
7a84428af   Alex Williamson   KVM: Add resampli...
94
95
96
97
98
  	struct kvm *kvm = resampler->kvm;
  
  	mutex_lock(&kvm->irqfds.resampler_lock);
  
  	list_del_rcu(&irqfd->resampler_link);
719d93cd5   Christian Borntraeger   kvm/irqchip: Spee...
99
  	synchronize_srcu(&kvm->irq_srcu);
7a84428af   Alex Williamson   KVM: Add resampli...
100
101
102
103
104
  
  	if (list_empty(&resampler->list)) {
  		list_del(&resampler->link);
  		kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier);
  		kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
aa2fbe6d4   Yang Zhang   KVM: Let ioapic k...
105
  			    resampler->notifier.gsi, 0, false);
7a84428af   Alex Williamson   KVM: Add resampli...
106
107
108
109
  		kfree(resampler);
  	}
  
  	mutex_unlock(&kvm->irqfds.resampler_lock);
721eecbf4   Gregory Haskins   KVM: irqfd
110
111
112
113
114
115
116
117
  }
  
  /*
   * Race-free decouple logic (ordering is critical)
   */
  static void
  irqfd_shutdown(struct work_struct *work)
  {
166c9775f   Eric Auger   KVM: create kvm_i...
118
119
  	struct kvm_kernel_irqfd *irqfd =
  		container_of(work, struct kvm_kernel_irqfd, shutdown);
3a46a033b   Lan Tianyu   KVM/Eventfd: Avoi...
120
  	struct kvm *kvm = irqfd->kvm;
b6a114d27   Michael S. Tsirkin   KVM: fix spurious...
121
  	u64 cnt;
721eecbf4   Gregory Haskins   KVM: irqfd
122

3a46a033b   Lan Tianyu   KVM/Eventfd: Avoi...
123
124
  	/* Make sure irqfd has been initalized in assign path. */
  	synchronize_srcu(&kvm->irq_srcu);
721eecbf4   Gregory Haskins   KVM: irqfd
125
126
127
128
  	/*
  	 * Synchronize with the wait-queue and unhook ourselves to prevent
  	 * further events.
  	 */
b6a114d27   Michael S. Tsirkin   KVM: fix spurious...
129
  	eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt);
721eecbf4   Gregory Haskins   KVM: irqfd
130
131
132
133
134
  
  	/*
  	 * We know no new events will be scheduled at this point, so block
  	 * until all previously outstanding events have completed
  	 */
43829731d   Tejun Heo   workqueue: deprec...
135
  	flush_work(&irqfd->inject);
721eecbf4   Gregory Haskins   KVM: irqfd
136

7a84428af   Alex Williamson   KVM: Add resampli...
137
138
139
140
  	if (irqfd->resampler) {
  		irqfd_resampler_shutdown(irqfd);
  		eventfd_ctx_put(irqfd->resamplefd);
  	}
721eecbf4   Gregory Haskins   KVM: irqfd
141
142
143
  	/*
  	 * It is now safe to release the object's resources
  	 */
9016cfb57   Eric Auger   KVM: eventfd: add...
144
145
146
  #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
  	irq_bypass_unregister_consumer(&irqfd->consumer);
  #endif
721eecbf4   Gregory Haskins   KVM: irqfd
147
148
149
150
151
152
153
  	eventfd_ctx_put(irqfd->eventfd);
  	kfree(irqfd);
  }
  
  
  /* assumes kvm->irqfds.lock is held */
  static bool
166c9775f   Eric Auger   KVM: create kvm_i...
154
  irqfd_is_active(struct kvm_kernel_irqfd *irqfd)
721eecbf4   Gregory Haskins   KVM: irqfd
155
156
157
158
159
160
161
162
163
164
  {
  	return list_empty(&irqfd->list) ? false : true;
  }
  
  /*
   * Mark the irqfd as inactive and schedule it for removal
   *
   * assumes kvm->irqfds.lock is held
   */
  static void
166c9775f   Eric Auger   KVM: create kvm_i...
165
  irqfd_deactivate(struct kvm_kernel_irqfd *irqfd)
721eecbf4   Gregory Haskins   KVM: irqfd
166
167
168
169
  {
  	BUG_ON(!irqfd_is_active(irqfd));
  
  	list_del_init(&irqfd->list);
36343f6ea   Paolo Bonzini   KVM: fix OOPS on ...
170
  	queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
721eecbf4   Gregory Haskins   KVM: irqfd
171
  }
b97e6de9c   Paolo Bonzini   KVM: x86: merge k...
172
  int __attribute__((weak)) kvm_arch_set_irq_inatomic(
c9a5eccac   Andrey Smetanin   kvm/eventfd: add ...
173
174
175
176
177
178
179
  				struct kvm_kernel_irq_routing_entry *irq,
  				struct kvm *kvm, int irq_source_id,
  				int level,
  				bool line_status)
  {
  	return -EWOULDBLOCK;
  }
721eecbf4   Gregory Haskins   KVM: irqfd
180
181
182
183
  /*
   * Called with wqh->lock held and interrupts disabled
   */
  static int
ac6424b98   Ingo Molnar   sched/wait: Renam...
184
  irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
721eecbf4   Gregory Haskins   KVM: irqfd
185
  {
166c9775f   Eric Auger   KVM: create kvm_i...
186
187
  	struct kvm_kernel_irqfd *irqfd =
  		container_of(wait, struct kvm_kernel_irqfd, wait);
721eecbf4   Gregory Haskins   KVM: irqfd
188
  	unsigned long flags = (unsigned long)key;
56f89f362   Paul Mackerras   KVM: Don't keep r...
189
  	struct kvm_kernel_irq_routing_entry irq;
bd2b53b20   Michael S. Tsirkin   KVM: fast-path ms...
190
  	struct kvm *kvm = irqfd->kvm;
56f89f362   Paul Mackerras   KVM: Don't keep r...
191
  	unsigned seq;
719d93cd5   Christian Borntraeger   kvm/irqchip: Spee...
192
  	int idx;
721eecbf4   Gregory Haskins   KVM: irqfd
193

bd2b53b20   Michael S. Tsirkin   KVM: fast-path ms...
194
  	if (flags & POLLIN) {
719d93cd5   Christian Borntraeger   kvm/irqchip: Spee...
195
  		idx = srcu_read_lock(&kvm->irq_srcu);
56f89f362   Paul Mackerras   KVM: Don't keep r...
196
197
198
199
  		do {
  			seq = read_seqcount_begin(&irqfd->irq_entry_sc);
  			irq = irqfd->irq_entry;
  		} while (read_seqcount_retry(&irqfd->irq_entry_sc, seq));
721eecbf4   Gregory Haskins   KVM: irqfd
200
  		/* An event has been signaled, inject an interrupt */
b97e6de9c   Paolo Bonzini   KVM: x86: merge k...
201
202
203
  		if (kvm_arch_set_irq_inatomic(&irq, kvm,
  					      KVM_USERSPACE_IRQ_SOURCE_ID, 1,
  					      false) == -EWOULDBLOCK)
bd2b53b20   Michael S. Tsirkin   KVM: fast-path ms...
204
  			schedule_work(&irqfd->inject);
719d93cd5   Christian Borntraeger   kvm/irqchip: Spee...
205
  		srcu_read_unlock(&kvm->irq_srcu, idx);
bd2b53b20   Michael S. Tsirkin   KVM: fast-path ms...
206
  	}
721eecbf4   Gregory Haskins   KVM: irqfd
207
208
209
  
  	if (flags & POLLHUP) {
  		/* The eventfd is closing, detach from KVM */
721eecbf4   Gregory Haskins   KVM: irqfd
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
  		unsigned long flags;
  
  		spin_lock_irqsave(&kvm->irqfds.lock, flags);
  
  		/*
  		 * We must check if someone deactivated the irqfd before
  		 * we could acquire the irqfds.lock since the item is
  		 * deactivated from the KVM side before it is unhooked from
  		 * the wait-queue.  If it is already deactivated, we can
  		 * simply return knowing the other side will cleanup for us.
  		 * We cannot race against the irqfd going away since the
  		 * other side is required to acquire wqh->lock, which we hold
  		 */
  		if (irqfd_is_active(irqfd))
  			irqfd_deactivate(irqfd);
  
  		spin_unlock_irqrestore(&kvm->irqfds.lock, flags);
  	}
  
  	return 0;
  }
  
  static void
  irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
  			poll_table *pt)
  {
166c9775f   Eric Auger   KVM: create kvm_i...
236
237
  	struct kvm_kernel_irqfd *irqfd =
  		container_of(pt, struct kvm_kernel_irqfd, pt);
721eecbf4   Gregory Haskins   KVM: irqfd
238
239
  	add_wait_queue(wqh, &irqfd->wait);
  }
bd2b53b20   Michael S. Tsirkin   KVM: fast-path ms...
240
  /* Must be called under irqfds.lock */
166c9775f   Eric Auger   KVM: create kvm_i...
241
  static void irqfd_update(struct kvm *kvm, struct kvm_kernel_irqfd *irqfd)
bd2b53b20   Michael S. Tsirkin   KVM: fast-path ms...
242
243
  {
  	struct kvm_kernel_irq_routing_entry *e;
8ba918d48   Paul Mackerras   KVM: irqchip: Pro...
244
  	struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS];
351dc6477   Andrey Smetanin   kvm/eventfd: avoi...
245
  	int n_entries;
8ba918d48   Paul Mackerras   KVM: irqchip: Pro...
246

9957c86d6   Paul Mackerras   KVM: Move all acc...
247
  	n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi);
bd2b53b20   Michael S. Tsirkin   KVM: fast-path ms...
248

56f89f362   Paul Mackerras   KVM: Don't keep r...
249
  	write_seqcount_begin(&irqfd->irq_entry_sc);
8ba918d48   Paul Mackerras   KVM: irqchip: Pro...
250
  	e = entries;
351dc6477   Andrey Smetanin   kvm/eventfd: avoi...
251
252
253
254
  	if (n_entries == 1)
  		irqfd->irq_entry = *e;
  	else
  		irqfd->irq_entry.type = 0;
56f89f362   Paul Mackerras   KVM: Don't keep r...
255

56f89f362   Paul Mackerras   KVM: Don't keep r...
256
  	write_seqcount_end(&irqfd->irq_entry_sc);
bd2b53b20   Michael S. Tsirkin   KVM: fast-path ms...
257
  }
1a02b2703   Eric Auger   KVM: introduce kv...
258
259
260
261
262
263
264
265
266
267
  #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
  void __attribute__((weak)) kvm_arch_irq_bypass_stop(
  				struct irq_bypass_consumer *cons)
  {
  }
  
  void __attribute__((weak)) kvm_arch_irq_bypass_start(
  				struct irq_bypass_consumer *cons)
  {
  }
f70c20aaf   Feng Wu   KVM: Add an arch ...
268
269
270
271
272
273
274
  
  int  __attribute__((weak)) kvm_arch_update_irqfd_routing(
  				struct kvm *kvm, unsigned int host_irq,
  				uint32_t guest_irq, bool set)
  {
  	return 0;
  }
1a02b2703   Eric Auger   KVM: introduce kv...
275
  #endif
721eecbf4   Gregory Haskins   KVM: irqfd
276
  static int
d4db2935e   Alex Williamson   KVM: Pass kvm_irq...
277
  kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
721eecbf4   Gregory Haskins   KVM: irqfd
278
  {
166c9775f   Eric Auger   KVM: create kvm_i...
279
  	struct kvm_kernel_irqfd *irqfd, *tmp;
cffe78d92   Al Viro   kvm eventfd: swit...
280
  	struct fd f;
7a84428af   Alex Williamson   KVM: Add resampli...
281
  	struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
721eecbf4   Gregory Haskins   KVM: irqfd
282
283
  	int ret;
  	unsigned int events;
9957c86d6   Paul Mackerras   KVM: Move all acc...
284
  	int idx;
721eecbf4   Gregory Haskins   KVM: irqfd
285

01c94e64f   Eric Auger   KVM: introduce kv...
286
287
  	if (!kvm_arch_intc_initialized(kvm))
  		return -EAGAIN;
721eecbf4   Gregory Haskins   KVM: irqfd
288
289
290
291
292
  	irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
  	if (!irqfd)
  		return -ENOMEM;
  
  	irqfd->kvm = kvm;
d4db2935e   Alex Williamson   KVM: Pass kvm_irq...
293
  	irqfd->gsi = args->gsi;
721eecbf4   Gregory Haskins   KVM: irqfd
294
295
296
  	INIT_LIST_HEAD(&irqfd->list);
  	INIT_WORK(&irqfd->inject, irqfd_inject);
  	INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
56f89f362   Paul Mackerras   KVM: Don't keep r...
297
  	seqcount_init(&irqfd->irq_entry_sc);
721eecbf4   Gregory Haskins   KVM: irqfd
298

cffe78d92   Al Viro   kvm eventfd: swit...
299
300
301
302
  	f = fdget(args->fd);
  	if (!f.file) {
  		ret = -EBADF;
  		goto out;
721eecbf4   Gregory Haskins   KVM: irqfd
303
  	}
cffe78d92   Al Viro   kvm eventfd: swit...
304
  	eventfd = eventfd_ctx_fileget(f.file);
721eecbf4   Gregory Haskins   KVM: irqfd
305
306
307
308
309
310
  	if (IS_ERR(eventfd)) {
  		ret = PTR_ERR(eventfd);
  		goto fail;
  	}
  
  	irqfd->eventfd = eventfd;
7a84428af   Alex Williamson   KVM: Add resampli...
311
  	if (args->flags & KVM_IRQFD_FLAG_RESAMPLE) {
166c9775f   Eric Auger   KVM: create kvm_i...
312
  		struct kvm_kernel_irqfd_resampler *resampler;
7a84428af   Alex Williamson   KVM: Add resampli...
313
314
315
316
317
318
319
320
321
322
323
324
325
  
  		resamplefd = eventfd_ctx_fdget(args->resamplefd);
  		if (IS_ERR(resamplefd)) {
  			ret = PTR_ERR(resamplefd);
  			goto fail;
  		}
  
  		irqfd->resamplefd = resamplefd;
  		INIT_LIST_HEAD(&irqfd->resampler_link);
  
  		mutex_lock(&kvm->irqfds.resampler_lock);
  
  		list_for_each_entry(resampler,
49f8a1a53   Alex Williamson   kvm: Fix irqfd re...
326
  				    &kvm->irqfds.resampler_list, link) {
7a84428af   Alex Williamson   KVM: Add resampli...
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
  			if (resampler->notifier.gsi == irqfd->gsi) {
  				irqfd->resampler = resampler;
  				break;
  			}
  		}
  
  		if (!irqfd->resampler) {
  			resampler = kzalloc(sizeof(*resampler), GFP_KERNEL);
  			if (!resampler) {
  				ret = -ENOMEM;
  				mutex_unlock(&kvm->irqfds.resampler_lock);
  				goto fail;
  			}
  
  			resampler->kvm = kvm;
  			INIT_LIST_HEAD(&resampler->list);
  			resampler->notifier.gsi = irqfd->gsi;
  			resampler->notifier.irq_acked = irqfd_resampler_ack;
  			INIT_LIST_HEAD(&resampler->link);
  
  			list_add(&resampler->link, &kvm->irqfds.resampler_list);
  			kvm_register_irq_ack_notifier(kvm,
  						      &resampler->notifier);
  			irqfd->resampler = resampler;
  		}
  
  		list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list);
719d93cd5   Christian Borntraeger   kvm/irqchip: Spee...
354
  		synchronize_srcu(&kvm->irq_srcu);
7a84428af   Alex Williamson   KVM: Add resampli...
355
356
357
  
  		mutex_unlock(&kvm->irqfds.resampler_lock);
  	}
721eecbf4   Gregory Haskins   KVM: irqfd
358
359
360
361
362
363
  	/*
  	 * Install our own custom wake-up handling so we are notified via
  	 * a callback whenever someone signals the underlying eventfd
  	 */
  	init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup);
  	init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc);
f1d1c309f   Michael S. Tsirkin   KVM: only allow o...
364
365
366
367
368
369
370
371
372
373
374
  	spin_lock_irq(&kvm->irqfds.lock);
  
  	ret = 0;
  	list_for_each_entry(tmp, &kvm->irqfds.items, list) {
  		if (irqfd->eventfd != tmp->eventfd)
  			continue;
  		/* This fd is used for another irq already. */
  		ret = -EBUSY;
  		spin_unlock_irq(&kvm->irqfds.lock);
  		goto fail;
  	}
9957c86d6   Paul Mackerras   KVM: Move all acc...
375
376
  	idx = srcu_read_lock(&kvm->irq_srcu);
  	irqfd_update(kvm, irqfd);
bd2b53b20   Michael S. Tsirkin   KVM: fast-path ms...
377

721eecbf4   Gregory Haskins   KVM: irqfd
378
  	list_add_tail(&irqfd->list, &kvm->irqfds.items);
721eecbf4   Gregory Haskins   KVM: irqfd
379

684a0b719   Cornelia Huck   KVM: eventfd: Fix...
380
  	spin_unlock_irq(&kvm->irqfds.lock);
721eecbf4   Gregory Haskins   KVM: irqfd
381
382
383
384
  	/*
  	 * Check if there was an event already pending on the eventfd
  	 * before we registered, and trigger it as if we didn't miss it.
  	 */
684a0b719   Cornelia Huck   KVM: eventfd: Fix...
385
  	events = f.file->f_op->poll(f.file, &irqfd->pt);
721eecbf4   Gregory Haskins   KVM: irqfd
386
387
  	if (events & POLLIN)
  		schedule_work(&irqfd->inject);
9016cfb57   Eric Auger   KVM: eventfd: add...
388
  #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
14717e203   Alex Williamson   kvm: Conditionall...
389
390
391
392
393
394
395
396
397
398
  	if (kvm_arch_has_irq_bypass()) {
  		irqfd->consumer.token = (void *)irqfd->eventfd;
  		irqfd->consumer.add_producer = kvm_arch_irq_bypass_add_producer;
  		irqfd->consumer.del_producer = kvm_arch_irq_bypass_del_producer;
  		irqfd->consumer.stop = kvm_arch_irq_bypass_stop;
  		irqfd->consumer.start = kvm_arch_irq_bypass_start;
  		ret = irq_bypass_register_consumer(&irqfd->consumer);
  		if (ret)
  			pr_info("irq bypass consumer (token %p) registration fails: %d
  ",
9016cfb57   Eric Auger   KVM: eventfd: add...
399
  				irqfd->consumer.token, ret);
14717e203   Alex Williamson   kvm: Conditionall...
400
  	}
9016cfb57   Eric Auger   KVM: eventfd: add...
401
  #endif
721eecbf4   Gregory Haskins   KVM: irqfd
402

3a46a033b   Lan Tianyu   KVM/Eventfd: Avoi...
403
  	srcu_read_unlock(&kvm->irq_srcu, idx);
270d5d771   Paolo Bonzini   KVM: irqfd: fix r...
404
405
406
407
408
409
  
  	/*
  	 * do not drop the file until the irqfd is fully initialized, otherwise
  	 * we might race against the POLLHUP
  	 */
  	fdput(f);
721eecbf4   Gregory Haskins   KVM: irqfd
410
411
412
  	return 0;
  
  fail:
7a84428af   Alex Williamson   KVM: Add resampli...
413
414
415
416
417
  	if (irqfd->resampler)
  		irqfd_resampler_shutdown(irqfd);
  
  	if (resamplefd && !IS_ERR(resamplefd))
  		eventfd_ctx_put(resamplefd);
721eecbf4   Gregory Haskins   KVM: irqfd
418
419
  	if (eventfd && !IS_ERR(eventfd))
  		eventfd_ctx_put(eventfd);
cffe78d92   Al Viro   kvm eventfd: swit...
420
  	fdput(f);
721eecbf4   Gregory Haskins   KVM: irqfd
421

cffe78d92   Al Viro   kvm eventfd: swit...
422
  out:
721eecbf4   Gregory Haskins   KVM: irqfd
423
424
425
  	kfree(irqfd);
  	return ret;
  }
c77dcacb3   Paolo Bonzini   KVM: Move more co...
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
  
  bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
  {
  	struct kvm_irq_ack_notifier *kian;
  	int gsi, idx;
  
  	idx = srcu_read_lock(&kvm->irq_srcu);
  	gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
  	if (gsi != -1)
  		hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
  					 link)
  			if (kian->gsi == gsi) {
  				srcu_read_unlock(&kvm->irq_srcu, idx);
  				return true;
  			}
  
  	srcu_read_unlock(&kvm->irq_srcu, idx);
  
  	return false;
  }
  EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
ba1aefcd6   Andrey Smetanin   kvm/eventfd: fact...
447
  void kvm_notify_acked_gsi(struct kvm *kvm, int gsi)
c77dcacb3   Paolo Bonzini   KVM: Move more co...
448
449
  {
  	struct kvm_irq_ack_notifier *kian;
ba1aefcd6   Andrey Smetanin   kvm/eventfd: fact...
450
451
452
453
454
455
456
457
458
  
  	hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
  				 link)
  		if (kian->gsi == gsi)
  			kian->irq_acked(kian);
  }
  
  void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
  {
c77dcacb3   Paolo Bonzini   KVM: Move more co...
459
460
461
462
463
464
465
  	int gsi, idx;
  
  	trace_kvm_ack_irq(irqchip, pin);
  
  	idx = srcu_read_lock(&kvm->irq_srcu);
  	gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
  	if (gsi != -1)
ba1aefcd6   Andrey Smetanin   kvm/eventfd: fact...
466
  		kvm_notify_acked_gsi(kvm, gsi);
c77dcacb3   Paolo Bonzini   KVM: Move more co...
467
468
469
470
471
472
473
474
475
  	srcu_read_unlock(&kvm->irq_srcu, idx);
  }
  
  void kvm_register_irq_ack_notifier(struct kvm *kvm,
  				   struct kvm_irq_ack_notifier *kian)
  {
  	mutex_lock(&kvm->irq_lock);
  	hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
  	mutex_unlock(&kvm->irq_lock);
993225adf   David Hildenbrand   KVM: x86: rename ...
476
  	kvm_arch_post_irq_ack_notifier_list_update(kvm);
c77dcacb3   Paolo Bonzini   KVM: Move more co...
477
478
479
480
481
482
483
484
485
  }
  
  void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
  				    struct kvm_irq_ack_notifier *kian)
  {
  	mutex_lock(&kvm->irq_lock);
  	hlist_del_init_rcu(&kian->link);
  	mutex_unlock(&kvm->irq_lock);
  	synchronize_srcu(&kvm->irq_srcu);
993225adf   David Hildenbrand   KVM: x86: rename ...
486
  	kvm_arch_post_irq_ack_notifier_list_update(kvm);
c77dcacb3   Paolo Bonzini   KVM: Move more co...
487
  }
914daba86   Alexander Graf   KVM: Distangle ev...
488
  #endif
721eecbf4   Gregory Haskins   KVM: irqfd
489
490
  
  void
d34e6b175   Gregory Haskins   KVM: add ioeventf...
491
  kvm_eventfd_init(struct kvm *kvm)
721eecbf4   Gregory Haskins   KVM: irqfd
492
  {
297e21053   Paul Mackerras   KVM: Give IRQFD i...
493
  #ifdef CONFIG_HAVE_KVM_IRQFD
721eecbf4   Gregory Haskins   KVM: irqfd
494
495
  	spin_lock_init(&kvm->irqfds.lock);
  	INIT_LIST_HEAD(&kvm->irqfds.items);
7a84428af   Alex Williamson   KVM: Add resampli...
496
497
  	INIT_LIST_HEAD(&kvm->irqfds.resampler_list);
  	mutex_init(&kvm->irqfds.resampler_lock);
914daba86   Alexander Graf   KVM: Distangle ev...
498
  #endif
d34e6b175   Gregory Haskins   KVM: add ioeventf...
499
  	INIT_LIST_HEAD(&kvm->ioeventfds);
721eecbf4   Gregory Haskins   KVM: irqfd
500
  }
297e21053   Paul Mackerras   KVM: Give IRQFD i...
501
  #ifdef CONFIG_HAVE_KVM_IRQFD
721eecbf4   Gregory Haskins   KVM: irqfd
502
503
504
505
  /*
   * shutdown any irqfd's that match fd+gsi
   */
  static int
d4db2935e   Alex Williamson   KVM: Pass kvm_irq...
506
  kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
721eecbf4   Gregory Haskins   KVM: irqfd
507
  {
166c9775f   Eric Auger   KVM: create kvm_i...
508
  	struct kvm_kernel_irqfd *irqfd, *tmp;
721eecbf4   Gregory Haskins   KVM: irqfd
509
  	struct eventfd_ctx *eventfd;
d4db2935e   Alex Williamson   KVM: Pass kvm_irq...
510
  	eventfd = eventfd_ctx_fdget(args->fd);
721eecbf4   Gregory Haskins   KVM: irqfd
511
512
513
514
515
516
  	if (IS_ERR(eventfd))
  		return PTR_ERR(eventfd);
  
  	spin_lock_irq(&kvm->irqfds.lock);
  
  	list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
d4db2935e   Alex Williamson   KVM: Pass kvm_irq...
517
  		if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) {
bd2b53b20   Michael S. Tsirkin   KVM: fast-path ms...
518
  			/*
56f89f362   Paul Mackerras   KVM: Don't keep r...
519
  			 * This clearing of irq_entry.type is needed for when
c8ce057ea   Michael S. Tsirkin   KVM: improve comm...
520
521
522
  			 * another thread calls kvm_irq_routing_update before
  			 * we flush workqueue below (we synchronize with
  			 * kvm_irq_routing_update using irqfds.lock).
bd2b53b20   Michael S. Tsirkin   KVM: fast-path ms...
523
  			 */
56f89f362   Paul Mackerras   KVM: Don't keep r...
524
525
526
  			write_seqcount_begin(&irqfd->irq_entry_sc);
  			irqfd->irq_entry.type = 0;
  			write_seqcount_end(&irqfd->irq_entry_sc);
721eecbf4   Gregory Haskins   KVM: irqfd
527
  			irqfd_deactivate(irqfd);
bd2b53b20   Michael S. Tsirkin   KVM: fast-path ms...
528
  		}
721eecbf4   Gregory Haskins   KVM: irqfd
529
530
531
532
533
534
535
536
537
538
  	}
  
  	spin_unlock_irq(&kvm->irqfds.lock);
  	eventfd_ctx_put(eventfd);
  
  	/*
  	 * Block until we know all outstanding shutdown jobs have completed
  	 * so that we guarantee there will not be any more interrupts on this
  	 * gsi once this deassign function returns.
  	 */
36343f6ea   Paolo Bonzini   KVM: fix OOPS on ...
539
  	flush_workqueue(irqfd_cleanup_wq);
721eecbf4   Gregory Haskins   KVM: irqfd
540
541
542
543
544
  
  	return 0;
  }
  
  int
d4db2935e   Alex Williamson   KVM: Pass kvm_irq...
545
  kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
721eecbf4   Gregory Haskins   KVM: irqfd
546
  {
7a84428af   Alex Williamson   KVM: Add resampli...
547
  	if (args->flags & ~(KVM_IRQFD_FLAG_DEASSIGN | KVM_IRQFD_FLAG_RESAMPLE))
326cf0334   Alex Williamson   KVM: Sanitize KVM...
548
  		return -EINVAL;
d4db2935e   Alex Williamson   KVM: Pass kvm_irq...
549
550
  	if (args->flags & KVM_IRQFD_FLAG_DEASSIGN)
  		return kvm_irqfd_deassign(kvm, args);
721eecbf4   Gregory Haskins   KVM: irqfd
551

d4db2935e   Alex Williamson   KVM: Pass kvm_irq...
552
  	return kvm_irqfd_assign(kvm, args);
721eecbf4   Gregory Haskins   KVM: irqfd
553
554
555
556
557
558
559
560
561
  }
  
  /*
   * This function is called as the kvm VM fd is being released. Shutdown all
   * irqfds that still remain open
   */
  void
  kvm_irqfd_release(struct kvm *kvm)
  {
166c9775f   Eric Auger   KVM: create kvm_i...
562
  	struct kvm_kernel_irqfd *irqfd, *tmp;
721eecbf4   Gregory Haskins   KVM: irqfd
563
564
565
566
567
568
569
570
571
572
573
574
  
  	spin_lock_irq(&kvm->irqfds.lock);
  
  	list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list)
  		irqfd_deactivate(irqfd);
  
  	spin_unlock_irq(&kvm->irqfds.lock);
  
  	/*
  	 * Block until we know all outstanding shutdown jobs have completed
  	 * since we do not take a kvm* reference.
  	 */
36343f6ea   Paolo Bonzini   KVM: fix OOPS on ...
575
  	flush_workqueue(irqfd_cleanup_wq);
721eecbf4   Gregory Haskins   KVM: irqfd
576
577
578
579
  
  }
  
  /*
9957c86d6   Paul Mackerras   KVM: Move all acc...
580
   * Take note of a change in irq routing.
719d93cd5   Christian Borntraeger   kvm/irqchip: Spee...
581
   * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards.
bd2b53b20   Michael S. Tsirkin   KVM: fast-path ms...
582
   */
9957c86d6   Paul Mackerras   KVM: Move all acc...
583
  void kvm_irq_routing_update(struct kvm *kvm)
bd2b53b20   Michael S. Tsirkin   KVM: fast-path ms...
584
  {
166c9775f   Eric Auger   KVM: create kvm_i...
585
  	struct kvm_kernel_irqfd *irqfd;
bd2b53b20   Michael S. Tsirkin   KVM: fast-path ms...
586
587
  
  	spin_lock_irq(&kvm->irqfds.lock);
f70c20aaf   Feng Wu   KVM: Add an arch ...
588
  	list_for_each_entry(irqfd, &kvm->irqfds.items, list) {
9957c86d6   Paul Mackerras   KVM: Move all acc...
589
  		irqfd_update(kvm, irqfd);
bd2b53b20   Michael S. Tsirkin   KVM: fast-path ms...
590

f70c20aaf   Feng Wu   KVM: Add an arch ...
591
592
593
594
595
596
597
598
599
  #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
  		if (irqfd->producer) {
  			int ret = kvm_arch_update_irqfd_routing(
  					irqfd->kvm, irqfd->producer->irq,
  					irqfd->gsi, 1);
  			WARN_ON(ret);
  		}
  #endif
  	}
bd2b53b20   Michael S. Tsirkin   KVM: fast-path ms...
600
601
  	spin_unlock_irq(&kvm->irqfds.lock);
  }
36343f6ea   Paolo Bonzini   KVM: fix OOPS on ...
602
603
604
605
606
607
608
609
610
611
612
613
614
  /*
   * create a host-wide workqueue for issuing deferred shutdown requests
   * aggregated from all vm* instances. We need our own isolated
   * queue to ease flushing work items when a VM exits.
   */
  int kvm_irqfd_init(void)
  {
  	irqfd_cleanup_wq = alloc_workqueue("kvm-irqfd-cleanup", 0, 0);
  	if (!irqfd_cleanup_wq)
  		return -ENOMEM;
  
  	return 0;
  }
a0f155e96   Cornelia Huck   KVM: Initialize i...
615
  void kvm_irqfd_exit(void)
721eecbf4   Gregory Haskins   KVM: irqfd
616
  {
36343f6ea   Paolo Bonzini   KVM: fix OOPS on ...
617
  	destroy_workqueue(irqfd_cleanup_wq);
721eecbf4   Gregory Haskins   KVM: irqfd
618
  }
914daba86   Alexander Graf   KVM: Distangle ev...
619
  #endif
d34e6b175   Gregory Haskins   KVM: add ioeventf...
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
  
  /*
   * --------------------------------------------------------------------
   * ioeventfd: translate a PIO/MMIO memory write to an eventfd signal.
   *
   * userspace can register a PIO/MMIO address with an eventfd for receiving
   * notification when the memory has been touched.
   * --------------------------------------------------------------------
   */
  
  struct _ioeventfd {
  	struct list_head     list;
  	u64                  addr;
  	int                  length;
  	struct eventfd_ctx  *eventfd;
  	u64                  datamatch;
  	struct kvm_io_device dev;
05e07f9bd   Michael S. Tsirkin   kvm: fix MMIO/PIO...
637
  	u8                   bus_idx;
d34e6b175   Gregory Haskins   KVM: add ioeventf...
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
  	bool                 wildcard;
  };
  
  static inline struct _ioeventfd *
  to_ioeventfd(struct kvm_io_device *dev)
  {
  	return container_of(dev, struct _ioeventfd, dev);
  }
  
  static void
  ioeventfd_release(struct _ioeventfd *p)
  {
  	eventfd_ctx_put(p->eventfd);
  	list_del(&p->list);
  	kfree(p);
  }
  
  static bool
  ioeventfd_in_range(struct _ioeventfd *p, gpa_t addr, int len, const void *val)
  {
  	u64 _val;
f848a5a8d   Michael S. Tsirkin   KVM: support any-...
659
660
661
662
663
664
665
666
667
  	if (addr != p->addr)
  		/* address must be precise for a hit */
  		return false;
  
  	if (!p->length)
  		/* length = 0 means only look at the address, so always a hit */
  		return true;
  
  	if (len != p->length)
d34e6b175   Gregory Haskins   KVM: add ioeventf...
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
  		/* address-range must be precise for a hit */
  		return false;
  
  	if (p->wildcard)
  		/* all else equal, wildcard is always a hit */
  		return true;
  
  	/* otherwise, we have to actually compare the data */
  
  	BUG_ON(!IS_ALIGNED((unsigned long)val, len));
  
  	switch (len) {
  	case 1:
  		_val = *(u8 *)val;
  		break;
  	case 2:
  		_val = *(u16 *)val;
  		break;
  	case 4:
  		_val = *(u32 *)val;
  		break;
  	case 8:
  		_val = *(u64 *)val;
  		break;
  	default:
  		return false;
  	}
  
  	return _val == p->datamatch ? true : false;
  }
  
  /* MMIO/PIO writes trigger an event if the addr/val match */
  static int
e32edf4fd   Nikolay Nikolaev   KVM: Redesign kvm...
701
702
  ioeventfd_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr,
  		int len, const void *val)
d34e6b175   Gregory Haskins   KVM: add ioeventf...
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
  {
  	struct _ioeventfd *p = to_ioeventfd(this);
  
  	if (!ioeventfd_in_range(p, addr, len, val))
  		return -EOPNOTSUPP;
  
  	eventfd_signal(p->eventfd, 1);
  	return 0;
  }
  
  /*
   * This function is called as KVM is completely shutting down.  We do not
   * need to worry about locking just nuke anything we have as quickly as possible
   */
  static void
  ioeventfd_destructor(struct kvm_io_device *this)
  {
  	struct _ioeventfd *p = to_ioeventfd(this);
  
  	ioeventfd_release(p);
  }
  
  static const struct kvm_io_device_ops ioeventfd_ops = {
  	.write      = ioeventfd_write,
  	.destructor = ioeventfd_destructor,
  };
  
  /* assumes kvm->slots_lock held */
  static bool
  ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p)
  {
  	struct _ioeventfd *_p;
  
  	list_for_each_entry(_p, &kvm->ioeventfds, list)
05e07f9bd   Michael S. Tsirkin   kvm: fix MMIO/PIO...
737
  		if (_p->bus_idx == p->bus_idx &&
f848a5a8d   Michael S. Tsirkin   KVM: support any-...
738
739
740
741
742
  		    _p->addr == p->addr &&
  		    (!_p->length || !p->length ||
  		     (_p->length == p->length &&
  		      (_p->wildcard || p->wildcard ||
  		       _p->datamatch == p->datamatch))))
d34e6b175   Gregory Haskins   KVM: add ioeventf...
743
744
745
746
  			return true;
  
  	return false;
  }
2b83451b4   Cornelia Huck   KVM: ioeventfd fo...
747
748
749
750
751
752
753
754
  static enum kvm_bus ioeventfd_bus_from_flags(__u32 flags)
  {
  	if (flags & KVM_IOEVENTFD_FLAG_PIO)
  		return KVM_PIO_BUS;
  	if (flags & KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY)
  		return KVM_VIRTIO_CCW_NOTIFY_BUS;
  	return KVM_MMIO_BUS;
  }
85da11ca5   Jason Wang   kvm: factor out c...
755
756
757
  static int kvm_assign_ioeventfd_idx(struct kvm *kvm,
  				enum kvm_bus bus_idx,
  				struct kvm_ioeventfd *args)
d34e6b175   Gregory Haskins   KVM: add ioeventf...
758
  {
d34e6b175   Gregory Haskins   KVM: add ioeventf...
759

85da11ca5   Jason Wang   kvm: factor out c...
760
761
762
  	struct eventfd_ctx *eventfd;
  	struct _ioeventfd *p;
  	int ret;
f848a5a8d   Michael S. Tsirkin   KVM: support any-...
763

d34e6b175   Gregory Haskins   KVM: add ioeventf...
764
765
766
767
768
769
770
771
772
773
774
775
  	eventfd = eventfd_ctx_fdget(args->fd);
  	if (IS_ERR(eventfd))
  		return PTR_ERR(eventfd);
  
  	p = kzalloc(sizeof(*p), GFP_KERNEL);
  	if (!p) {
  		ret = -ENOMEM;
  		goto fail;
  	}
  
  	INIT_LIST_HEAD(&p->list);
  	p->addr    = args->addr;
05e07f9bd   Michael S. Tsirkin   kvm: fix MMIO/PIO...
776
  	p->bus_idx = bus_idx;
d34e6b175   Gregory Haskins   KVM: add ioeventf...
777
778
779
780
781
782
783
784
  	p->length  = args->len;
  	p->eventfd = eventfd;
  
  	/* The datamatch feature is optional, otherwise this is a wildcard */
  	if (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH)
  		p->datamatch = args->datamatch;
  	else
  		p->wildcard = true;
79fac95ec   Marcelo Tosatti   KVM: convert slot...
785
  	mutex_lock(&kvm->slots_lock);
d34e6b175   Gregory Haskins   KVM: add ioeventf...
786

25985edce   Lucas De Marchi   Fix common misspe...
787
  	/* Verify that there isn't a match already */
d34e6b175   Gregory Haskins   KVM: add ioeventf...
788
789
790
791
792
793
  	if (ioeventfd_check_collision(kvm, p)) {
  		ret = -EEXIST;
  		goto unlock_fail;
  	}
  
  	kvm_iodevice_init(&p->dev, &ioeventfd_ops);
743eeb0b0   Sasha Levin   KVM: Intelligent ...
794
795
  	ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length,
  				      &p->dev);
d34e6b175   Gregory Haskins   KVM: add ioeventf...
796
797
  	if (ret < 0)
  		goto unlock_fail;
4a12f9517   Christian Borntraeger   KVM: mark kvm->bu...
798
  	kvm_get_bus(kvm, bus_idx)->ioeventfd_count++;
d34e6b175   Gregory Haskins   KVM: add ioeventf...
799
  	list_add_tail(&p->list, &kvm->ioeventfds);
79fac95ec   Marcelo Tosatti   KVM: convert slot...
800
  	mutex_unlock(&kvm->slots_lock);
d34e6b175   Gregory Haskins   KVM: add ioeventf...
801
802
803
804
  
  	return 0;
  
  unlock_fail:
79fac95ec   Marcelo Tosatti   KVM: convert slot...
805
  	mutex_unlock(&kvm->slots_lock);
d34e6b175   Gregory Haskins   KVM: add ioeventf...
806
807
808
809
810
811
812
813
814
  
  fail:
  	kfree(p);
  	eventfd_ctx_put(eventfd);
  
  	return ret;
  }
  
  static int
85da11ca5   Jason Wang   kvm: factor out c...
815
816
  kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
  			   struct kvm_ioeventfd *args)
d34e6b175   Gregory Haskins   KVM: add ioeventf...
817
  {
d34e6b175   Gregory Haskins   KVM: add ioeventf...
818
819
  	struct _ioeventfd        *p, *tmp;
  	struct eventfd_ctx       *eventfd;
4a12f9517   Christian Borntraeger   KVM: mark kvm->bu...
820
  	struct kvm_io_bus	 *bus;
d34e6b175   Gregory Haskins   KVM: add ioeventf...
821
822
823
824
825
  	int                       ret = -ENOENT;
  
  	eventfd = eventfd_ctx_fdget(args->fd);
  	if (IS_ERR(eventfd))
  		return PTR_ERR(eventfd);
79fac95ec   Marcelo Tosatti   KVM: convert slot...
826
  	mutex_lock(&kvm->slots_lock);
d34e6b175   Gregory Haskins   KVM: add ioeventf...
827
828
829
  
  	list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) {
  		bool wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH);
05e07f9bd   Michael S. Tsirkin   kvm: fix MMIO/PIO...
830
831
  		if (p->bus_idx != bus_idx ||
  		    p->eventfd != eventfd  ||
d34e6b175   Gregory Haskins   KVM: add ioeventf...
832
833
834
835
836
837
838
  		    p->addr != args->addr  ||
  		    p->length != args->len ||
  		    p->wildcard != wildcard)
  			continue;
  
  		if (!p->wildcard && p->datamatch != args->datamatch)
  			continue;
e93f8a0f8   Marcelo Tosatti   KVM: convert io_b...
839
  		kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
4a12f9517   Christian Borntraeger   KVM: mark kvm->bu...
840
841
842
  		bus = kvm_get_bus(kvm, bus_idx);
  		if (bus)
  			bus->ioeventfd_count--;
d34e6b175   Gregory Haskins   KVM: add ioeventf...
843
844
845
846
  		ioeventfd_release(p);
  		ret = 0;
  		break;
  	}
79fac95ec   Marcelo Tosatti   KVM: convert slot...
847
  	mutex_unlock(&kvm->slots_lock);
d34e6b175   Gregory Haskins   KVM: add ioeventf...
848
849
850
851
852
  
  	eventfd_ctx_put(eventfd);
  
  	return ret;
  }
85da11ca5   Jason Wang   kvm: factor out c...
853
854
855
  static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
  {
  	enum kvm_bus bus_idx = ioeventfd_bus_from_flags(args->flags);
eefd6b06b   Jason Wang   kvm: fix double f...
856
857
858
859
  	int ret = kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
  
  	if (!args->len && bus_idx == KVM_MMIO_BUS)
  		kvm_deassign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
85da11ca5   Jason Wang   kvm: factor out c...
860

eefd6b06b   Jason Wang   kvm: fix double f...
861
  	return ret;
85da11ca5   Jason Wang   kvm: factor out c...
862
863
864
865
866
867
  }
  
  static int
  kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
  {
  	enum kvm_bus              bus_idx;
eefd6b06b   Jason Wang   kvm: fix double f...
868
  	int ret;
85da11ca5   Jason Wang   kvm: factor out c...
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
  
  	bus_idx = ioeventfd_bus_from_flags(args->flags);
  	/* must be natural-word sized, or 0 to ignore length */
  	switch (args->len) {
  	case 0:
  	case 1:
  	case 2:
  	case 4:
  	case 8:
  		break;
  	default:
  		return -EINVAL;
  	}
  
  	/* check for range overflow */
  	if (args->addr + args->len < args->addr)
  		return -EINVAL;
  
  	/* check for extra flags that we don't understand */
  	if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
  		return -EINVAL;
  
  	/* ioeventfd with no length can't be combined with DATAMATCH */
e9ea5069d   Jason Wang   kvm: add capabili...
892
  	if (!args->len && (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH))
85da11ca5   Jason Wang   kvm: factor out c...
893
  		return -EINVAL;
eefd6b06b   Jason Wang   kvm: fix double f...
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
  	ret = kvm_assign_ioeventfd_idx(kvm, bus_idx, args);
  	if (ret)
  		goto fail;
  
  	/* When length is ignored, MMIO is also put on a separate bus, for
  	 * faster lookups.
  	 */
  	if (!args->len && bus_idx == KVM_MMIO_BUS) {
  		ret = kvm_assign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
  		if (ret < 0)
  			goto fast_fail;
  	}
  
  	return 0;
  
  fast_fail:
  	kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
  fail:
  	return ret;
85da11ca5   Jason Wang   kvm: factor out c...
913
  }
d34e6b175   Gregory Haskins   KVM: add ioeventf...
914
915
916
917
918
919
920
921
  int
  kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
  {
  	if (args->flags & KVM_IOEVENTFD_FLAG_DEASSIGN)
  		return kvm_deassign_ioeventfd(kvm, args);
  
  	return kvm_assign_ioeventfd(kvm, args);
  }