Commit bda9020e2463ec94db9f97e8615f3bae22069838

Authored by Michael S. Tsirkin
Committed by Avi Kivity
1 parent 6c47469453

KVM: remove in_range from io devices

This changes bus accesses to use high-level kvm_io_bus_read/kvm_io_bus_write
functions. in_range now becomes unused so it is removed from device ops in
favor of read/write callbacks performing range checks internally.

This allows aliasing (mostly for in-kernel virtio), as well as better error
handling by making it possible to pass errors up to userspace.

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>

Showing 10 changed files with 152 additions and 208 deletions Side-by-side Diff

arch/ia64/kvm/kvm-ia64.c
... ... @@ -210,16 +210,6 @@
210 210  
211 211 }
212 212  
213   -static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
214   - gpa_t addr, int len, int is_write)
215   -{
216   - struct kvm_io_device *dev;
217   -
218   - dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr, len, is_write);
219   -
220   - return dev;
221   -}
222   -
223 213 static int handle_vm_error(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
224 214 {
225 215 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
... ... @@ -231,6 +221,7 @@
231 221 {
232 222 struct kvm_mmio_req *p;
233 223 struct kvm_io_device *mmio_dev;
  224 + int r;
234 225  
235 226 p = kvm_get_vcpu_ioreq(vcpu);
236 227  
... ... @@ -247,16 +238,13 @@
247 238 kvm_run->exit_reason = KVM_EXIT_MMIO;
248 239 return 0;
249 240 mmio:
250   - mmio_dev = vcpu_find_mmio_dev(vcpu, p->addr, p->size, !p->dir);
251   - if (mmio_dev) {
252   - if (!p->dir)
253   - kvm_iodevice_write(mmio_dev, p->addr, p->size,
254   - &p->data);
255   - else
256   - kvm_iodevice_read(mmio_dev, p->addr, p->size,
257   - &p->data);
258   -
259   - } else
  241 + if (p->dir)
  242 + r = kvm_io_bus_read(&vcpu->kvm->mmio_bus, p->addr,
  243 + p->size, &p->data);
  244 + else
  245 + r = kvm_io_bus_write(&vcpu->kvm->mmio_bus, p->addr,
  246 + p->size, &p->data);
  247 + if (r)
260 248 printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr);
261 249 p->state = STATE_IORESP_READY;
262 250  
arch/x86/kvm/i8254.c
... ... @@ -358,15 +358,23 @@
358 358 return container_of(dev, struct kvm_pit, speaker_dev);
359 359 }
360 360  
361   -static void pit_ioport_write(struct kvm_io_device *this,
362   - gpa_t addr, int len, const void *data)
  361 +static inline int pit_in_range(gpa_t addr)
363 362 {
  363 + return ((addr >= KVM_PIT_BASE_ADDRESS) &&
  364 + (addr < KVM_PIT_BASE_ADDRESS + KVM_PIT_MEM_LENGTH));
  365 +}
  366 +
  367 +static int pit_ioport_write(struct kvm_io_device *this,
  368 + gpa_t addr, int len, const void *data)
  369 +{
364 370 struct kvm_pit *pit = dev_to_pit(this);
365 371 struct kvm_kpit_state *pit_state = &pit->pit_state;
366 372 struct kvm *kvm = pit->kvm;
367 373 int channel, access;
368 374 struct kvm_kpit_channel_state *s;
369 375 u32 val = *(u32 *) data;
  376 + if (!pit_in_range(addr))
  377 + return -EOPNOTSUPP;
370 378  
371 379 val &= 0xff;
372 380 addr &= KVM_PIT_CHANNEL_MASK;
373 381  
374 382  
... ... @@ -429,16 +437,19 @@
429 437 }
430 438  
431 439 mutex_unlock(&pit_state->lock);
  440 + return 0;
432 441 }
433 442  
434   -static void pit_ioport_read(struct kvm_io_device *this,
435   - gpa_t addr, int len, void *data)
  443 +static int pit_ioport_read(struct kvm_io_device *this,
  444 + gpa_t addr, int len, void *data)
436 445 {
437 446 struct kvm_pit *pit = dev_to_pit(this);
438 447 struct kvm_kpit_state *pit_state = &pit->pit_state;
439 448 struct kvm *kvm = pit->kvm;
440 449 int ret, count;
441 450 struct kvm_kpit_channel_state *s;
  451 + if (!pit_in_range(addr))
  452 + return -EOPNOTSUPP;
442 453  
443 454 addr &= KVM_PIT_CHANNEL_MASK;
444 455 s = &pit_state->channels[addr];
445 456  
446 457  
447 458  
448 459  
449 460  
450 461  
... ... @@ -493,37 +504,36 @@
493 504 memcpy(data, (char *)&ret, len);
494 505  
495 506 mutex_unlock(&pit_state->lock);
  507 + return 0;
496 508 }
497 509  
498   -static int pit_in_range(struct kvm_io_device *this, gpa_t addr,
499   - int len, int is_write)
  510 +static int speaker_ioport_write(struct kvm_io_device *this,
  511 + gpa_t addr, int len, const void *data)
500 512 {
501   - return ((addr >= KVM_PIT_BASE_ADDRESS) &&
502   - (addr < KVM_PIT_BASE_ADDRESS + KVM_PIT_MEM_LENGTH));
503   -}
504   -
505   -static void speaker_ioport_write(struct kvm_io_device *this,
506   - gpa_t addr, int len, const void *data)
507   -{
508 513 struct kvm_pit *pit = speaker_to_pit(this);
509 514 struct kvm_kpit_state *pit_state = &pit->pit_state;
510 515 struct kvm *kvm = pit->kvm;
511 516 u32 val = *(u32 *) data;
  517 + if (addr != KVM_SPEAKER_BASE_ADDRESS)
  518 + return -EOPNOTSUPP;
512 519  
513 520 mutex_lock(&pit_state->lock);
514 521 pit_state->speaker_data_on = (val >> 1) & 1;
515 522 pit_set_gate(kvm, 2, val & 1);
516 523 mutex_unlock(&pit_state->lock);
  524 + return 0;
517 525 }
518 526  
519   -static void speaker_ioport_read(struct kvm_io_device *this,
520   - gpa_t addr, int len, void *data)
  527 +static int speaker_ioport_read(struct kvm_io_device *this,
  528 + gpa_t addr, int len, void *data)
521 529 {
522 530 struct kvm_pit *pit = speaker_to_pit(this);
523 531 struct kvm_kpit_state *pit_state = &pit->pit_state;
524 532 struct kvm *kvm = pit->kvm;
525 533 unsigned int refresh_clock;
526 534 int ret;
  535 + if (addr != KVM_SPEAKER_BASE_ADDRESS)
  536 + return -EOPNOTSUPP;
527 537  
528 538 /* Refresh clock toggles at about 15us. We approximate as 2^14ns. */
529 539 refresh_clock = ((unsigned int)ktime_to_ns(ktime_get()) >> 14) & 1;
530 540  
... ... @@ -535,14 +545,9 @@
535 545 len = sizeof(ret);
536 546 memcpy(data, (char *)&ret, len);
537 547 mutex_unlock(&pit_state->lock);
  548 + return 0;
538 549 }
539 550  
540   -static int speaker_in_range(struct kvm_io_device *this, gpa_t addr,
541   - int len, int is_write)
542   -{
543   - return (addr == KVM_SPEAKER_BASE_ADDRESS);
544   -}
545   -
546 551 void kvm_pit_reset(struct kvm_pit *pit)
547 552 {
548 553 int i;
549 554  
... ... @@ -574,13 +579,11 @@
574 579 static const struct kvm_io_device_ops pit_dev_ops = {
575 580 .read = pit_ioport_read,
576 581 .write = pit_ioport_write,
577   - .in_range = pit_in_range,
578 582 };
579 583  
580 584 static const struct kvm_io_device_ops speaker_dev_ops = {
581 585 .read = speaker_ioport_read,
582 586 .write = speaker_ioport_write,
583   - .in_range = speaker_in_range,
584 587 };
585 588  
586 589 /* Caller must have writers lock on slots_lock */
arch/x86/kvm/i8259.c
... ... @@ -430,8 +430,7 @@
430 430 return s->elcr;
431 431 }
432 432  
433   -static int picdev_in_range(struct kvm_io_device *this, gpa_t addr,
434   - int len, int is_write)
  433 +static int picdev_in_range(gpa_t addr)
435 434 {
436 435 switch (addr) {
437 436 case 0x20:
438 437  
439 438  
... ... @@ -451,16 +450,18 @@
451 450 return container_of(dev, struct kvm_pic, dev);
452 451 }
453 452  
454   -static void picdev_write(struct kvm_io_device *this,
  453 +static int picdev_write(struct kvm_io_device *this,
455 454 gpa_t addr, int len, const void *val)
456 455 {
457 456 struct kvm_pic *s = to_pic(this);
458 457 unsigned char data = *(unsigned char *)val;
  458 + if (!picdev_in_range(addr))
  459 + return -EOPNOTSUPP;
459 460  
460 461 if (len != 1) {
461 462 if (printk_ratelimit())
462 463 printk(KERN_ERR "PIC: non byte write\n");
463   - return;
  464 + return 0;
464 465 }
465 466 pic_lock(s);
466 467 switch (addr) {
467 468  
468 469  
469 470  
... ... @@ -476,18 +477,21 @@
476 477 break;
477 478 }
478 479 pic_unlock(s);
  480 + return 0;
479 481 }
480 482  
481   -static void picdev_read(struct kvm_io_device *this,
482   - gpa_t addr, int len, void *val)
  483 +static int picdev_read(struct kvm_io_device *this,
  484 + gpa_t addr, int len, void *val)
483 485 {
484 486 struct kvm_pic *s = to_pic(this);
485 487 unsigned char data = 0;
  488 + if (!picdev_in_range(addr))
  489 + return -EOPNOTSUPP;
486 490  
487 491 if (len != 1) {
488 492 if (printk_ratelimit())
489 493 printk(KERN_ERR "PIC: non byte read\n");
490   - return;
  494 + return 0;
491 495 }
492 496 pic_lock(s);
493 497 switch (addr) {
... ... @@ -504,6 +508,7 @@
504 508 }
505 509 *(unsigned char *)val = data;
506 510 pic_unlock(s);
  511 + return 0;
507 512 }
508 513  
509 514 /*
... ... @@ -526,7 +531,6 @@
526 531 static const struct kvm_io_device_ops picdev_ops = {
527 532 .read = picdev_read,
528 533 .write = picdev_write,
529   - .in_range = picdev_in_range,
530 534 };
531 535  
532 536 struct kvm_pic *kvm_create_pic(struct kvm *kvm)
arch/x86/kvm/lapic.c
... ... @@ -546,18 +546,27 @@
546 546 return container_of(dev, struct kvm_lapic, dev);
547 547 }
548 548  
549   -static void apic_mmio_read(struct kvm_io_device *this,
550   - gpa_t address, int len, void *data)
  549 +static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
551 550 {
  551 + return apic_hw_enabled(apic) &&
  552 + addr >= apic->base_address &&
  553 + addr < apic->base_address + LAPIC_MMIO_LENGTH;
  554 +}
  555 +
  556 +static int apic_mmio_read(struct kvm_io_device *this,
  557 + gpa_t address, int len, void *data)
  558 +{
552 559 struct kvm_lapic *apic = to_lapic(this);
553 560 unsigned int offset = address - apic->base_address;
554 561 unsigned char alignment = offset & 0xf;
555 562 u32 result;
  563 + if (!apic_mmio_in_range(apic, address))
  564 + return -EOPNOTSUPP;
556 565  
557 566 if ((alignment + len) > 4) {
558 567 printk(KERN_ERR "KVM_APIC_READ: alignment error %lx %d",
559 568 (unsigned long)address, len);
560   - return;
  569 + return 0;
561 570 }
562 571 result = __apic_read(apic, offset & ~0xf);
563 572  
... ... @@ -574,6 +583,7 @@
574 583 "should be 1,2, or 4 instead\n", len);
575 584 break;
576 585 }
  586 + return 0;
577 587 }
578 588  
579 589 static void update_divide_count(struct kvm_lapic *apic)
580 590  
... ... @@ -629,13 +639,15 @@
629 639 apic->vcpu->kvm->arch.vapics_in_nmi_mode--;
630 640 }
631 641  
632   -static void apic_mmio_write(struct kvm_io_device *this,
633   - gpa_t address, int len, const void *data)
  642 +static int apic_mmio_write(struct kvm_io_device *this,
  643 + gpa_t address, int len, const void *data)
634 644 {
635 645 struct kvm_lapic *apic = to_lapic(this);
636 646 unsigned int offset = address - apic->base_address;
637 647 unsigned char alignment = offset & 0xf;
638 648 u32 val;
  649 + if (!apic_mmio_in_range(apic, address))
  650 + return -EOPNOTSUPP;
639 651  
640 652 /*
641 653 * APIC register must be aligned on 128-bits boundary.
... ... @@ -646,7 +658,7 @@
646 658 /* Don't shout loud, $infamous_os would cause only noise. */
647 659 apic_debug("apic write: bad size=%d %lx\n",
648 660 len, (long)address);
649   - return;
  661 + return 0;
650 662 }
651 663  
652 664 val = *(u32 *) data;
... ... @@ -729,7 +741,7 @@
729 741 hrtimer_cancel(&apic->lapic_timer.timer);
730 742 apic_set_reg(apic, APIC_TMICT, val);
731 743 start_apic_timer(apic);
732   - return;
  744 + return 0;
733 745  
734 746 case APIC_TDCR:
735 747 if (val & 4)
736 748  
... ... @@ -743,24 +755,9 @@
743 755 offset);
744 756 break;
745 757 }
746   -
  758 + return 0;
747 759 }
748 760  
749   -static int apic_mmio_range(struct kvm_io_device *this, gpa_t addr,
750   - int len, int size)
751   -{
752   - struct kvm_lapic *apic = to_lapic(this);
753   - int ret = 0;
754   -
755   -
756   - if (apic_hw_enabled(apic) &&
757   - (addr >= apic->base_address) &&
758   - (addr < (apic->base_address + LAPIC_MMIO_LENGTH)))
759   - ret = 1;
760   -
761   - return ret;
762   -}
763   -
764 761 void kvm_free_lapic(struct kvm_vcpu *vcpu)
765 762 {
766 763 if (!vcpu->arch.apic)
... ... @@ -938,7 +935,6 @@
938 935 static const struct kvm_io_device_ops apic_mmio_ops = {
939 936 .read = apic_mmio_read,
940 937 .write = apic_mmio_write,
941   - .in_range = apic_mmio_range,
942 938 };
943 939  
944 940 int kvm_create_lapic(struct kvm_vcpu *vcpu)
... ... @@ -2333,35 +2333,23 @@
2333 2333 num_msrs_to_save = j;
2334 2334 }
2335 2335  
2336   -/*
2337   - * Only apic need an MMIO device hook, so shortcut now..
2338   - */
2339   -static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu,
2340   - gpa_t addr, int len,
2341   - int is_write)
  2336 +static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
  2337 + const void *v)
2342 2338 {
2343   - struct kvm_io_device *dev;
  2339 + if (vcpu->arch.apic &&
  2340 + !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, len, v))
  2341 + return 0;
2344 2342  
2345   - if (vcpu->arch.apic) {
2346   - dev = &vcpu->arch.apic->dev;
2347   - if (kvm_iodevice_in_range(dev, addr, len, is_write))
2348   - return dev;
2349   - }
2350   - return NULL;
  2343 + return kvm_io_bus_write(&vcpu->kvm->mmio_bus, addr, len, v);
2351 2344 }
2352 2345  
2353   -
2354   -static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
2355   - gpa_t addr, int len,
2356   - int is_write)
  2346 +static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
2357 2347 {
2358   - struct kvm_io_device *dev;
  2348 + if (vcpu->arch.apic &&
  2349 + !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, len, v))
  2350 + return 0;
2359 2351  
2360   - dev = vcpu_find_pervcpu_dev(vcpu, addr, len, is_write);
2361   - if (dev == NULL)
2362   - dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr, len,
2363   - is_write);
2364   - return dev;
  2352 + return kvm_io_bus_read(&vcpu->kvm->mmio_bus, addr, len, v);
2365 2353 }
2366 2354  
2367 2355 static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
... ... @@ -2430,7 +2418,6 @@
2430 2418 unsigned int bytes,
2431 2419 struct kvm_vcpu *vcpu)
2432 2420 {
2433   - struct kvm_io_device *mmio_dev;
2434 2421 gpa_t gpa;
2435 2422  
2436 2423 if (vcpu->mmio_read_completed) {
2437 2424  
... ... @@ -2455,13 +2442,8 @@
2455 2442 /*
2456 2443 * Is this MMIO handled locally?
2457 2444 */
2458   - mutex_lock(&vcpu->kvm->lock);
2459   - mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 0);
2460   - mutex_unlock(&vcpu->kvm->lock);
2461   - if (mmio_dev) {
2462   - kvm_iodevice_read(mmio_dev, gpa, bytes, val);
  2445 + if (!vcpu_mmio_read(vcpu, gpa, bytes, val))
2463 2446 return X86EMUL_CONTINUE;
2464   - }
2465 2447  
2466 2448 vcpu->mmio_needed = 1;
2467 2449 vcpu->mmio_phys_addr = gpa;
... ... @@ -2488,7 +2470,6 @@
2488 2470 unsigned int bytes,
2489 2471 struct kvm_vcpu *vcpu)
2490 2472 {
2491   - struct kvm_io_device *mmio_dev;
2492 2473 gpa_t gpa;
2493 2474  
2494 2475 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2495 2476  
... ... @@ -2509,13 +2490,8 @@
2509 2490 /*
2510 2491 * Is this MMIO handled locally?
2511 2492 */
2512   - mutex_lock(&vcpu->kvm->lock);
2513   - mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 1);
2514   - mutex_unlock(&vcpu->kvm->lock);
2515   - if (mmio_dev) {
2516   - kvm_iodevice_write(mmio_dev, gpa, bytes, val);
  2493 + if (!vcpu_mmio_write(vcpu, gpa, bytes, val))
2517 2494 return X86EMUL_CONTINUE;
2518   - }
2519 2495  
2520 2496 vcpu->mmio_needed = 1;
2521 2497 vcpu->mmio_phys_addr = gpa;
2522 2498  
2523 2499  
2524 2500  
2525 2501  
2526 2502  
2527 2503  
2528 2504  
2529 2505  
2530 2506  
... ... @@ -2850,48 +2826,40 @@
2850 2826 return 0;
2851 2827 }
2852 2828  
2853   -static void kernel_pio(struct kvm_io_device *pio_dev,
2854   - struct kvm_vcpu *vcpu,
2855   - void *pd)
  2829 +static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
2856 2830 {
2857 2831 /* TODO: String I/O for in kernel device */
  2832 + int r;
2858 2833  
2859 2834 if (vcpu->arch.pio.in)
2860   - kvm_iodevice_read(pio_dev, vcpu->arch.pio.port,
2861   - vcpu->arch.pio.size,
2862   - pd);
  2835 + r = kvm_io_bus_read(&vcpu->kvm->pio_bus, vcpu->arch.pio.port,
  2836 + vcpu->arch.pio.size, pd);
2863 2837 else
2864   - kvm_iodevice_write(pio_dev, vcpu->arch.pio.port,
2865   - vcpu->arch.pio.size,
2866   - pd);
  2838 + r = kvm_io_bus_write(&vcpu->kvm->pio_bus, vcpu->arch.pio.port,
  2839 + vcpu->arch.pio.size, pd);
  2840 + return r;
2867 2841 }
2868 2842  
2869   -static void pio_string_write(struct kvm_io_device *pio_dev,
2870   - struct kvm_vcpu *vcpu)
  2843 +static int pio_string_write(struct kvm_vcpu *vcpu)
2871 2844 {
2872 2845 struct kvm_pio_request *io = &vcpu->arch.pio;
2873 2846 void *pd = vcpu->arch.pio_data;
2874   - int i;
  2847 + int i, r = 0;
2875 2848  
2876 2849 for (i = 0; i < io->cur_count; i++) {
2877   - kvm_iodevice_write(pio_dev, io->port,
2878   - io->size,
2879   - pd);
  2850 + if (kvm_io_bus_write(&vcpu->kvm->pio_bus,
  2851 + io->port, io->size, pd)) {
  2852 + r = -EOPNOTSUPP;
  2853 + break;
  2854 + }
2880 2855 pd += io->size;
2881 2856 }
  2857 + return r;
2882 2858 }
2883 2859  
2884   -static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
2885   - gpa_t addr, int len,
2886   - int is_write)
2887   -{
2888   - return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr, len, is_write);
2889   -}
2890   -
2891 2860 int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2892 2861 int size, unsigned port)
2893 2862 {
2894   - struct kvm_io_device *pio_dev;
2895 2863 unsigned long val;
2896 2864  
2897 2865 vcpu->run->exit_reason = KVM_EXIT_IO;
... ... @@ -2911,11 +2879,7 @@
2911 2879 val = kvm_register_read(vcpu, VCPU_REGS_RAX);
2912 2880 memcpy(vcpu->arch.pio_data, &val, 4);
2913 2881  
2914   - mutex_lock(&vcpu->kvm->lock);
2915   - pio_dev = vcpu_find_pio_dev(vcpu, port, size, !in);
2916   - mutex_unlock(&vcpu->kvm->lock);
2917   - if (pio_dev) {
2918   - kernel_pio(pio_dev, vcpu, vcpu->arch.pio_data);
  2882 + if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
2919 2883 complete_pio(vcpu);
2920 2884 return 1;
2921 2885 }
... ... @@ -2929,7 +2893,6 @@
2929 2893 {
2930 2894 unsigned now, in_page;
2931 2895 int ret = 0;
2932   - struct kvm_io_device *pio_dev;
2933 2896  
2934 2897 vcpu->run->exit_reason = KVM_EXIT_IO;
2935 2898 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
... ... @@ -2973,12 +2936,6 @@
2973 2936  
2974 2937 vcpu->arch.pio.guest_gva = address;
2975 2938  
2976   - mutex_lock(&vcpu->kvm->lock);
2977   - pio_dev = vcpu_find_pio_dev(vcpu, port,
2978   - vcpu->arch.pio.cur_count,
2979   - !vcpu->arch.pio.in);
2980   - mutex_unlock(&vcpu->kvm->lock);
2981   -
2982 2939 if (!vcpu->arch.pio.in) {
2983 2940 /* string PIO write */
2984 2941 ret = pio_copy_data(vcpu);
2985 2942  
... ... @@ -2986,16 +2943,13 @@
2986 2943 kvm_inject_gp(vcpu, 0);
2987 2944 return 1;
2988 2945 }
2989   - if (ret == 0 && pio_dev) {
2990   - pio_string_write(pio_dev, vcpu);
  2946 + if (ret == 0 && !pio_string_write(vcpu)) {
2991 2947 complete_pio(vcpu);
2992 2948 if (vcpu->arch.pio.count == 0)
2993 2949 ret = 1;
2994 2950 }
2995   - } else if (pio_dev)
2996   - pr_unimpl(vcpu, "no string pio read support yet, "
2997   - "port %x size %d count %ld\n",
2998   - port, size, count);
  2951 + }
  2952 + /* no string PIO read support yet */
2999 2953  
3000 2954 return ret;
3001 2955 }
include/linux/kvm_host.h
... ... @@ -60,8 +60,10 @@
60 60  
61 61 void kvm_io_bus_init(struct kvm_io_bus *bus);
62 62 void kvm_io_bus_destroy(struct kvm_io_bus *bus);
63   -struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus,
64   - gpa_t addr, int len, int is_write);
  63 +int kvm_io_bus_write(struct kvm_io_bus *bus, gpa_t addr, int len,
  64 + const void *val);
  65 +int kvm_io_bus_read(struct kvm_io_bus *bus, gpa_t addr, int len,
  66 + void *val);
65 67 void __kvm_io_bus_register_dev(struct kvm_io_bus *bus,
66 68 struct kvm_io_device *dev);
67 69 void kvm_io_bus_register_dev(struct kvm *kvm, struct kvm_io_bus *bus,
virt/kvm/coalesced_mmio.c
... ... @@ -19,18 +19,14 @@
19 19 return container_of(dev, struct kvm_coalesced_mmio_dev, dev);
20 20 }
21 21  
22   -static int coalesced_mmio_in_range(struct kvm_io_device *this,
23   - gpa_t addr, int len, int is_write)
  22 +static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
  23 + gpa_t addr, int len)
24 24 {
25   - struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
26 25 struct kvm_coalesced_mmio_zone *zone;
27 26 struct kvm_coalesced_mmio_ring *ring;
28 27 unsigned avail;
29 28 int i;
30 29  
31   - if (!is_write)
32   - return 0;
33   -
34 30 /* Are we able to batch it ? */
35 31  
36 32 /* last is the first free entry
37 33  
... ... @@ -60,11 +56,13 @@
60 56 return 0;
61 57 }
62 58  
63   -static void coalesced_mmio_write(struct kvm_io_device *this,
64   - gpa_t addr, int len, const void *val)
  59 +static int coalesced_mmio_write(struct kvm_io_device *this,
  60 + gpa_t addr, int len, const void *val)
65 61 {
66 62 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
67 63 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
  64 + if (!coalesced_mmio_in_range(dev, addr, len))
  65 + return -EOPNOTSUPP;
68 66  
69 67 spin_lock(&dev->lock);
70 68  
... ... @@ -76,6 +74,7 @@
76 74 smp_wmb();
77 75 ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
78 76 spin_unlock(&dev->lock);
  77 + return 0;
79 78 }
80 79  
81 80 static void coalesced_mmio_destructor(struct kvm_io_device *this)
... ... @@ -87,7 +86,6 @@
87 86  
88 87 static const struct kvm_io_device_ops coalesced_mmio_ops = {
89 88 .write = coalesced_mmio_write,
90   - .in_range = coalesced_mmio_in_range,
91 89 .destructor = coalesced_mmio_destructor,
92 90 };
93 91  
... ... @@ -227,20 +227,19 @@
227 227 return container_of(dev, struct kvm_ioapic, dev);
228 228 }
229 229  
230   -static int ioapic_in_range(struct kvm_io_device *this, gpa_t addr,
231   - int len, int is_write)
  230 +static inline int ioapic_in_range(struct kvm_ioapic *ioapic, gpa_t addr)
232 231 {
233   - struct kvm_ioapic *ioapic = to_ioapic(this);
234   -
235 232 return ((addr >= ioapic->base_address &&
236 233 (addr < ioapic->base_address + IOAPIC_MEM_LENGTH)));
237 234 }
238 235  
239   -static void ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len,
240   - void *val)
  236 +static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len,
  237 + void *val)
241 238 {
242 239 struct kvm_ioapic *ioapic = to_ioapic(this);
243 240 u32 result;
  241 + if (!ioapic_in_range(ioapic, addr))
  242 + return -EOPNOTSUPP;
244 243  
245 244 ioapic_debug("addr %lx\n", (unsigned long)addr);
246 245 ASSERT(!(addr & 0xf)); /* check alignment */
247 246  
248 247  
... ... @@ -273,13 +272,16 @@
273 272 printk(KERN_WARNING "ioapic: wrong length %d\n", len);
274 273 }
275 274 mutex_unlock(&ioapic->kvm->irq_lock);
  275 + return 0;
276 276 }
277 277  
278   -static void ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
279   - const void *val)
  278 +static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
  279 + const void *val)
280 280 {
281 281 struct kvm_ioapic *ioapic = to_ioapic(this);
282 282 u32 data;
  283 + if (!ioapic_in_range(ioapic, addr))
  284 + return -EOPNOTSUPP;
283 285  
284 286 ioapic_debug("ioapic_mmio_write addr=%p len=%d val=%p\n",
285 287 (void*)addr, len, val);
... ... @@ -290,7 +292,7 @@
290 292 data = *(u32 *) val;
291 293 else {
292 294 printk(KERN_WARNING "ioapic: Unsupported size %d\n", len);
293   - return;
  295 + return 0;
294 296 }
295 297  
296 298 addr &= 0xff;
... ... @@ -312,6 +314,7 @@
312 314 break;
313 315 }
314 316 mutex_unlock(&ioapic->kvm->irq_lock);
  317 + return 0;
315 318 }
316 319  
317 320 void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
... ... @@ -329,7 +332,6 @@
329 332 static const struct kvm_io_device_ops ioapic_mmio_ops = {
330 333 .read = ioapic_mmio_read,
331 334 .write = ioapic_mmio_write,
332   - .in_range = ioapic_in_range,
333 335 };
334 336  
335 337 int kvm_ioapic_init(struct kvm *kvm)
... ... @@ -17,23 +17,24 @@
17 17 #define __KVM_IODEV_H__
18 18  
19 19 #include <linux/kvm_types.h>
  20 +#include <asm/errno.h>
20 21  
21 22 struct kvm_io_device;
22 23  
23 24 /**
24 25 * kvm_io_device_ops are called under kvm slots_lock.
  26 + * read and write handlers return 0 if the transaction has been handled,
  27 + * or non-zero to have it passed to the next device.
25 28 **/
26 29 struct kvm_io_device_ops {
27   - void (*read)(struct kvm_io_device *this,
  30 + int (*read)(struct kvm_io_device *this,
  31 + gpa_t addr,
  32 + int len,
  33 + void *val);
  34 + int (*write)(struct kvm_io_device *this,
28 35 gpa_t addr,
29 36 int len,
30   - void *val);
31   - void (*write)(struct kvm_io_device *this,
32   - gpa_t addr,
33   - int len,
34   - const void *val);
35   - int (*in_range)(struct kvm_io_device *this, gpa_t addr, int len,
36   - int is_write);
  37 + const void *val);
37 38 void (*destructor)(struct kvm_io_device *this);
38 39 };
39 40  
40 41  
41 42  
42 43  
... ... @@ -48,26 +49,16 @@
48 49 dev->ops = ops;
49 50 }
50 51  
51   -static inline void kvm_iodevice_read(struct kvm_io_device *dev,
52   - gpa_t addr,
53   - int len,
54   - void *val)
  52 +static inline int kvm_iodevice_read(struct kvm_io_device *dev,
  53 + gpa_t addr, int l, void *v)
55 54 {
56   - dev->ops->read(dev, addr, len, val);
  55 + return dev->ops->read ? dev->ops->read(dev, addr, l, v) : -EOPNOTSUPP;
57 56 }
58 57  
59   -static inline void kvm_iodevice_write(struct kvm_io_device *dev,
60   - gpa_t addr,
61   - int len,
62   - const void *val)
  58 +static inline int kvm_iodevice_write(struct kvm_io_device *dev,
  59 + gpa_t addr, int l, const void *v)
63 60 {
64   - dev->ops->write(dev, addr, len, val);
65   -}
66   -
67   -static inline int kvm_iodevice_in_range(struct kvm_io_device *dev,
68   - gpa_t addr, int len, int is_write)
69   -{
70   - return dev->ops->in_range(dev, addr, len, is_write);
  61 + return dev->ops->write ? dev->ops->write(dev, addr, l, v) : -EOPNOTSUPP;
71 62 }
72 63  
73 64 static inline void kvm_iodevice_destructor(struct kvm_io_device *dev)
... ... @@ -2512,19 +2512,25 @@
2512 2512 }
2513 2513 }
2514 2514  
2515   -struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus,
2516   - gpa_t addr, int len, int is_write)
  2515 +/* kvm_io_bus_write - called under kvm->slots_lock */
  2516 +int kvm_io_bus_write(struct kvm_io_bus *bus, gpa_t addr,
  2517 + int len, const void *val)
2517 2518 {
2518 2519 int i;
  2520 + for (i = 0; i < bus->dev_count; i++)
  2521 + if (!kvm_iodevice_write(bus->devs[i], addr, len, val))
  2522 + return 0;
  2523 + return -EOPNOTSUPP;
  2524 +}
2519 2525  
2520   - for (i = 0; i < bus->dev_count; i++) {
2521   - struct kvm_io_device *pos = bus->devs[i];
2522   -
2523   - if (kvm_iodevice_in_range(pos, addr, len, is_write))
2524   - return pos;
2525   - }
2526   -
2527   - return NULL;
  2526 +/* kvm_io_bus_read - called under kvm->slots_lock */
  2527 +int kvm_io_bus_read(struct kvm_io_bus *bus, gpa_t addr, int len, void *val)
  2528 +{
  2529 + int i;
  2530 + for (i = 0; i < bus->dev_count; i++)
  2531 + if (!kvm_iodevice_read(bus->devs[i], addr, len, val))
  2532 + return 0;
  2533 + return -EOPNOTSUPP;
2528 2534 }
2529 2535  
2530 2536 void kvm_io_bus_register_dev(struct kvm *kvm, struct kvm_io_bus *bus,