Commit adf094931ffb25ef4b381559918f1a34181a5273
Committed by
Greg Kroah-Hartman
1 parent
238c6d5483
Exists in
master
and in
39 other branches
PM: Simplify the new suspend/hibernation framework for devices
PM: Simplify the new suspend/hibernation framework for devices Following the discussion at the Kernel Summit, simplify the new device PM framework by merging 'struct pm_ops' and 'struct pm_ext_ops' and removing pointers to 'struct pm_ext_ops' from 'struct platform_driver' and 'struct pci_driver'. After this change, the suspend/hibernation callbacks will only reside in 'struct device_driver' as well as at the bus type/ device class/device type level. Accordingly, PCI and platform device drivers are now expected to put their suspend/hibernation callbacks into the 'struct device_driver' embedded in 'struct pci_driver' or 'struct platform_driver', respectively. Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl> Acked-by: Pavel Machek <pavel@suse.cz> Cc: Jesse Barnes <jbarnes@virtuousgeek.org> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Showing 8 changed files with 119 additions and 151 deletions Side-by-side Diff
drivers/base/platform.c
... | ... | @@ -503,8 +503,6 @@ |
503 | 503 | drv->driver.suspend = platform_drv_suspend; |
504 | 504 | if (drv->resume) |
505 | 505 | drv->driver.resume = platform_drv_resume; |
506 | - if (drv->pm) | |
507 | - drv->driver.pm = &drv->pm->base; | |
508 | 506 | return driver_register(&drv->driver); |
509 | 507 | } |
510 | 508 | EXPORT_SYMBOL_GPL(platform_driver_register); |
... | ... | @@ -686,7 +684,10 @@ |
686 | 684 | struct device_driver *drv = dev->driver; |
687 | 685 | int ret = 0; |
688 | 686 | |
689 | - if (drv && drv->pm) { | |
687 | + if (!drv) | |
688 | + return 0; | |
689 | + | |
690 | + if (drv->pm) { | |
690 | 691 | if (drv->pm->suspend) |
691 | 692 | ret = drv->pm->suspend(dev); |
692 | 693 | } else { |
693 | 694 | |
694 | 695 | |
... | ... | @@ -698,16 +699,15 @@ |
698 | 699 | |
699 | 700 | static int platform_pm_suspend_noirq(struct device *dev) |
700 | 701 | { |
701 | - struct platform_driver *pdrv; | |
702 | + struct device_driver *drv = dev->driver; | |
702 | 703 | int ret = 0; |
703 | 704 | |
704 | - if (!dev->driver) | |
705 | + if (!drv) | |
705 | 706 | return 0; |
706 | 707 | |
707 | - pdrv = to_platform_driver(dev->driver); | |
708 | - if (pdrv->pm) { | |
709 | - if (pdrv->pm->suspend_noirq) | |
710 | - ret = pdrv->pm->suspend_noirq(dev); | |
708 | + if (drv->pm) { | |
709 | + if (drv->pm->suspend_noirq) | |
710 | + ret = drv->pm->suspend_noirq(dev); | |
711 | 711 | } else { |
712 | 712 | ret = platform_legacy_suspend_late(dev, PMSG_SUSPEND); |
713 | 713 | } |
... | ... | @@ -720,7 +720,10 @@ |
720 | 720 | struct device_driver *drv = dev->driver; |
721 | 721 | int ret = 0; |
722 | 722 | |
723 | - if (drv && drv->pm) { | |
723 | + if (!drv) | |
724 | + return 0; | |
725 | + | |
726 | + if (drv->pm) { | |
724 | 727 | if (drv->pm->resume) |
725 | 728 | ret = drv->pm->resume(dev); |
726 | 729 | } else { |
727 | 730 | |
728 | 731 | |
... | ... | @@ -732,16 +735,15 @@ |
732 | 735 | |
733 | 736 | static int platform_pm_resume_noirq(struct device *dev) |
734 | 737 | { |
735 | - struct platform_driver *pdrv; | |
738 | + struct device_driver *drv = dev->driver; | |
736 | 739 | int ret = 0; |
737 | 740 | |
738 | - if (!dev->driver) | |
741 | + if (!drv) | |
739 | 742 | return 0; |
740 | 743 | |
741 | - pdrv = to_platform_driver(dev->driver); | |
742 | - if (pdrv->pm) { | |
743 | - if (pdrv->pm->resume_noirq) | |
744 | - ret = pdrv->pm->resume_noirq(dev); | |
744 | + if (drv->pm) { | |
745 | + if (drv->pm->resume_noirq) | |
746 | + ret = drv->pm->resume_noirq(dev); | |
745 | 747 | } else { |
746 | 748 | ret = platform_legacy_resume_early(dev); |
747 | 749 | } |
748 | 750 | |
749 | 751 | |
... | ... | @@ -780,16 +782,15 @@ |
780 | 782 | |
781 | 783 | static int platform_pm_freeze_noirq(struct device *dev) |
782 | 784 | { |
783 | - struct platform_driver *pdrv; | |
785 | + struct device_driver *drv = dev->driver; | |
784 | 786 | int ret = 0; |
785 | 787 | |
786 | - if (!dev->driver) | |
788 | + if (!drv) | |
787 | 789 | return 0; |
788 | 790 | |
789 | - pdrv = to_platform_driver(dev->driver); | |
790 | - if (pdrv->pm) { | |
791 | - if (pdrv->pm->freeze_noirq) | |
792 | - ret = pdrv->pm->freeze_noirq(dev); | |
791 | + if (drv->pm) { | |
792 | + if (drv->pm->freeze_noirq) | |
793 | + ret = drv->pm->freeze_noirq(dev); | |
793 | 794 | } else { |
794 | 795 | ret = platform_legacy_suspend_late(dev, PMSG_FREEZE); |
795 | 796 | } |
... | ... | @@ -802,7 +803,10 @@ |
802 | 803 | struct device_driver *drv = dev->driver; |
803 | 804 | int ret = 0; |
804 | 805 | |
805 | - if (drv && drv->pm) { | |
806 | + if (!drv) | |
807 | + return 0; | |
808 | + | |
809 | + if (drv->pm) { | |
806 | 810 | if (drv->pm->thaw) |
807 | 811 | ret = drv->pm->thaw(dev); |
808 | 812 | } else { |
809 | 813 | |
810 | 814 | |
... | ... | @@ -814,16 +818,15 @@ |
814 | 818 | |
815 | 819 | static int platform_pm_thaw_noirq(struct device *dev) |
816 | 820 | { |
817 | - struct platform_driver *pdrv; | |
821 | + struct device_driver *drv = dev->driver; | |
818 | 822 | int ret = 0; |
819 | 823 | |
820 | - if (!dev->driver) | |
824 | + if (!drv) | |
821 | 825 | return 0; |
822 | 826 | |
823 | - pdrv = to_platform_driver(dev->driver); | |
824 | - if (pdrv->pm) { | |
825 | - if (pdrv->pm->thaw_noirq) | |
826 | - ret = pdrv->pm->thaw_noirq(dev); | |
827 | + if (drv->pm) { | |
828 | + if (drv->pm->thaw_noirq) | |
829 | + ret = drv->pm->thaw_noirq(dev); | |
827 | 830 | } else { |
828 | 831 | ret = platform_legacy_resume_early(dev); |
829 | 832 | } |
... | ... | @@ -836,7 +839,10 @@ |
836 | 839 | struct device_driver *drv = dev->driver; |
837 | 840 | int ret = 0; |
838 | 841 | |
839 | - if (drv && drv->pm) { | |
842 | + if (!drv) | |
843 | + return 0; | |
844 | + | |
845 | + if (drv->pm) { | |
840 | 846 | if (drv->pm->poweroff) |
841 | 847 | ret = drv->pm->poweroff(dev); |
842 | 848 | } else { |
843 | 849 | |
844 | 850 | |
... | ... | @@ -848,16 +854,15 @@ |
848 | 854 | |
849 | 855 | static int platform_pm_poweroff_noirq(struct device *dev) |
850 | 856 | { |
851 | - struct platform_driver *pdrv; | |
857 | + struct device_driver *drv = dev->driver; | |
852 | 858 | int ret = 0; |
853 | 859 | |
854 | - if (!dev->driver) | |
860 | + if (!drv) | |
855 | 861 | return 0; |
856 | 862 | |
857 | - pdrv = to_platform_driver(dev->driver); | |
858 | - if (pdrv->pm) { | |
859 | - if (pdrv->pm->poweroff_noirq) | |
860 | - ret = pdrv->pm->poweroff_noirq(dev); | |
863 | + if (drv->pm) { | |
864 | + if (drv->pm->poweroff_noirq) | |
865 | + ret = drv->pm->poweroff_noirq(dev); | |
861 | 866 | } else { |
862 | 867 | ret = platform_legacy_suspend_late(dev, PMSG_HIBERNATE); |
863 | 868 | } |
... | ... | @@ -870,7 +875,10 @@ |
870 | 875 | struct device_driver *drv = dev->driver; |
871 | 876 | int ret = 0; |
872 | 877 | |
873 | - if (drv && drv->pm) { | |
878 | + if (!drv) | |
879 | + return 0; | |
880 | + | |
881 | + if (drv->pm) { | |
874 | 882 | if (drv->pm->restore) |
875 | 883 | ret = drv->pm->restore(dev); |
876 | 884 | } else { |
877 | 885 | |
878 | 886 | |
... | ... | @@ -882,16 +890,15 @@ |
882 | 890 | |
883 | 891 | static int platform_pm_restore_noirq(struct device *dev) |
884 | 892 | { |
885 | - struct platform_driver *pdrv; | |
893 | + struct device_driver *drv = dev->driver; | |
886 | 894 | int ret = 0; |
887 | 895 | |
888 | - if (!dev->driver) | |
896 | + if (!drv) | |
889 | 897 | return 0; |
890 | 898 | |
891 | - pdrv = to_platform_driver(dev->driver); | |
892 | - if (pdrv->pm) { | |
893 | - if (pdrv->pm->restore_noirq) | |
894 | - ret = pdrv->pm->restore_noirq(dev); | |
899 | + if (drv->pm) { | |
900 | + if (drv->pm->restore_noirq) | |
901 | + ret = drv->pm->restore_noirq(dev); | |
895 | 902 | } else { |
896 | 903 | ret = platform_legacy_resume_early(dev); |
897 | 904 | } |
... | ... | @@ -912,17 +919,15 @@ |
912 | 919 | |
913 | 920 | #endif /* !CONFIG_HIBERNATION */ |
914 | 921 | |
915 | -static struct pm_ext_ops platform_pm_ops = { | |
916 | - .base = { | |
917 | - .prepare = platform_pm_prepare, | |
918 | - .complete = platform_pm_complete, | |
919 | - .suspend = platform_pm_suspend, | |
920 | - .resume = platform_pm_resume, | |
921 | - .freeze = platform_pm_freeze, | |
922 | - .thaw = platform_pm_thaw, | |
923 | - .poweroff = platform_pm_poweroff, | |
924 | - .restore = platform_pm_restore, | |
925 | - }, | |
922 | +static struct dev_pm_ops platform_dev_pm_ops = { | |
923 | + .prepare = platform_pm_prepare, | |
924 | + .complete = platform_pm_complete, | |
925 | + .suspend = platform_pm_suspend, | |
926 | + .resume = platform_pm_resume, | |
927 | + .freeze = platform_pm_freeze, | |
928 | + .thaw = platform_pm_thaw, | |
929 | + .poweroff = platform_pm_poweroff, | |
930 | + .restore = platform_pm_restore, | |
926 | 931 | .suspend_noirq = platform_pm_suspend_noirq, |
927 | 932 | .resume_noirq = platform_pm_resume_noirq, |
928 | 933 | .freeze_noirq = platform_pm_freeze_noirq, |
... | ... | @@ -931,7 +936,7 @@ |
931 | 936 | .restore_noirq = platform_pm_restore_noirq, |
932 | 937 | }; |
933 | 938 | |
934 | -#define PLATFORM_PM_OPS_PTR &platform_pm_ops | |
939 | +#define PLATFORM_PM_OPS_PTR (&platform_dev_pm_ops) | |
935 | 940 | |
936 | 941 | #else /* !CONFIG_PM_SLEEP */ |
937 | 942 |
drivers/base/power/main.c
... | ... | @@ -112,7 +112,8 @@ |
112 | 112 | * @ops: PM operations to choose from. |
113 | 113 | * @state: PM transition of the system being carried out. |
114 | 114 | */ |
115 | -static int pm_op(struct device *dev, struct pm_ops *ops, pm_message_t state) | |
115 | +static int pm_op(struct device *dev, struct dev_pm_ops *ops, | |
116 | + pm_message_t state) | |
116 | 117 | { |
117 | 118 | int error = 0; |
118 | 119 | |
... | ... | @@ -174,7 +175,7 @@ |
174 | 175 | * The operation is executed with interrupts disabled by the only remaining |
175 | 176 | * functional CPU in the system. |
176 | 177 | */ |
177 | -static int pm_noirq_op(struct device *dev, struct pm_ext_ops *ops, | |
178 | +static int pm_noirq_op(struct device *dev, struct dev_pm_ops *ops, | |
178 | 179 | pm_message_t state) |
179 | 180 | { |
180 | 181 | int error = 0; |
... | ... | @@ -354,7 +355,7 @@ |
354 | 355 | if (dev->bus) { |
355 | 356 | if (dev->bus->pm) { |
356 | 357 | pm_dev_dbg(dev, state, ""); |
357 | - error = pm_op(dev, &dev->bus->pm->base, state); | |
358 | + error = pm_op(dev, dev->bus->pm, state); | |
358 | 359 | } else if (dev->bus->resume) { |
359 | 360 | pm_dev_dbg(dev, state, "legacy "); |
360 | 361 | error = dev->bus->resume(dev); |
361 | 362 | |
... | ... | @@ -451,9 +452,9 @@ |
451 | 452 | dev->type->pm->complete(dev); |
452 | 453 | } |
453 | 454 | |
454 | - if (dev->bus && dev->bus->pm && dev->bus->pm->base.complete) { | |
455 | + if (dev->bus && dev->bus->pm && dev->bus->pm->complete) { | |
455 | 456 | pm_dev_dbg(dev, state, "completing "); |
456 | - dev->bus->pm->base.complete(dev); | |
457 | + dev->bus->pm->complete(dev); | |
457 | 458 | } |
458 | 459 | |
459 | 460 | up(&dev->sem); |
... | ... | @@ -624,7 +625,7 @@ |
624 | 625 | if (dev->bus) { |
625 | 626 | if (dev->bus->pm) { |
626 | 627 | pm_dev_dbg(dev, state, ""); |
627 | - error = pm_op(dev, &dev->bus->pm->base, state); | |
628 | + error = pm_op(dev, dev->bus->pm, state); | |
628 | 629 | } else if (dev->bus->suspend) { |
629 | 630 | pm_dev_dbg(dev, state, "legacy "); |
630 | 631 | error = dev->bus->suspend(dev, state); |
631 | 632 | |
... | ... | @@ -685,10 +686,10 @@ |
685 | 686 | |
686 | 687 | down(&dev->sem); |
687 | 688 | |
688 | - if (dev->bus && dev->bus->pm && dev->bus->pm->base.prepare) { | |
689 | + if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) { | |
689 | 690 | pm_dev_dbg(dev, state, "preparing "); |
690 | - error = dev->bus->pm->base.prepare(dev); | |
691 | - suspend_report_result(dev->bus->pm->base.prepare, error); | |
691 | + error = dev->bus->pm->prepare(dev); | |
692 | + suspend_report_result(dev->bus->pm->prepare, error); | |
692 | 693 | if (error) |
693 | 694 | goto End; |
694 | 695 | } |
drivers/pci/pci-driver.c
... | ... | @@ -433,8 +433,7 @@ |
433 | 433 | |
434 | 434 | static int pci_pm_suspend_noirq(struct device *dev) |
435 | 435 | { |
436 | - struct pci_dev *pci_dev = to_pci_dev(dev); | |
437 | - struct pci_driver *drv = pci_dev->driver; | |
436 | + struct device_driver *drv = dev->driver; | |
438 | 437 | int error = 0; |
439 | 438 | |
440 | 439 | if (drv && drv->pm) { |
441 | 440 | |
... | ... | @@ -469,11 +468,10 @@ |
469 | 468 | |
470 | 469 | static int pci_pm_resume_noirq(struct device *dev) |
471 | 470 | { |
472 | - struct pci_dev *pci_dev = to_pci_dev(dev); | |
473 | - struct pci_driver *drv = pci_dev->driver; | |
471 | + struct device_driver *drv = dev->driver; | |
474 | 472 | int error = 0; |
475 | 473 | |
476 | - pci_fixup_device(pci_fixup_resume_early, pci_dev); | |
474 | + pci_fixup_device(pci_fixup_resume_early, to_pci_dev(dev)); | |
477 | 475 | |
478 | 476 | if (drv && drv->pm) { |
479 | 477 | if (drv->pm->resume_noirq) |
... | ... | @@ -519,8 +517,7 @@ |
519 | 517 | |
520 | 518 | static int pci_pm_freeze_noirq(struct device *dev) |
521 | 519 | { |
522 | - struct pci_dev *pci_dev = to_pci_dev(dev); | |
523 | - struct pci_driver *drv = pci_dev->driver; | |
520 | + struct device_driver *drv = dev->driver; | |
524 | 521 | int error = 0; |
525 | 522 | |
526 | 523 | if (drv && drv->pm) { |
527 | 524 | |
... | ... | @@ -553,15 +550,14 @@ |
553 | 550 | |
554 | 551 | static int pci_pm_thaw_noirq(struct device *dev) |
555 | 552 | { |
556 | - struct pci_dev *pci_dev = to_pci_dev(dev); | |
557 | - struct pci_driver *drv = pci_dev->driver; | |
553 | + struct device_driver *drv = dev->driver; | |
558 | 554 | int error = 0; |
559 | 555 | |
560 | 556 | if (drv && drv->pm) { |
561 | 557 | if (drv->pm->thaw_noirq) |
562 | 558 | error = drv->pm->thaw_noirq(dev); |
563 | 559 | } else { |
564 | - pci_fixup_device(pci_fixup_resume_early, pci_dev); | |
560 | + pci_fixup_device(pci_fixup_resume_early, to_pci_dev(dev)); | |
565 | 561 | error = pci_legacy_resume_early(dev); |
566 | 562 | } |
567 | 563 | |
... | ... | @@ -589,8 +585,7 @@ |
589 | 585 | |
590 | 586 | static int pci_pm_poweroff_noirq(struct device *dev) |
591 | 587 | { |
592 | - struct pci_dev *pci_dev = to_pci_dev(dev); | |
593 | - struct pci_driver *drv = pci_dev->driver; | |
588 | + struct device_driver *drv = dev->driver; | |
594 | 589 | int error = 0; |
595 | 590 | |
596 | 591 | if (drv && drv->pm) { |
... | ... | @@ -625,7 +620,7 @@ |
625 | 620 | static int pci_pm_restore_noirq(struct device *dev) |
626 | 621 | { |
627 | 622 | struct pci_dev *pci_dev = to_pci_dev(dev); |
628 | - struct pci_driver *drv = pci_dev->driver; | |
623 | + struct device_driver *drv = dev->driver; | |
629 | 624 | int error = 0; |
630 | 625 | |
631 | 626 | pci_fixup_device(pci_fixup_resume, pci_dev); |
... | ... | @@ -654,17 +649,15 @@ |
654 | 649 | |
655 | 650 | #endif /* !CONFIG_HIBERNATION */ |
656 | 651 | |
657 | -struct pm_ext_ops pci_pm_ops = { | |
658 | - .base = { | |
659 | - .prepare = pci_pm_prepare, | |
660 | - .complete = pci_pm_complete, | |
661 | - .suspend = pci_pm_suspend, | |
662 | - .resume = pci_pm_resume, | |
663 | - .freeze = pci_pm_freeze, | |
664 | - .thaw = pci_pm_thaw, | |
665 | - .poweroff = pci_pm_poweroff, | |
666 | - .restore = pci_pm_restore, | |
667 | - }, | |
652 | +struct dev_pm_ops pci_dev_pm_ops = { | |
653 | + .prepare = pci_pm_prepare, | |
654 | + .complete = pci_pm_complete, | |
655 | + .suspend = pci_pm_suspend, | |
656 | + .resume = pci_pm_resume, | |
657 | + .freeze = pci_pm_freeze, | |
658 | + .thaw = pci_pm_thaw, | |
659 | + .poweroff = pci_pm_poweroff, | |
660 | + .restore = pci_pm_restore, | |
668 | 661 | .suspend_noirq = pci_pm_suspend_noirq, |
669 | 662 | .resume_noirq = pci_pm_resume_noirq, |
670 | 663 | .freeze_noirq = pci_pm_freeze_noirq, |
... | ... | @@ -673,7 +666,7 @@ |
673 | 666 | .restore_noirq = pci_pm_restore_noirq, |
674 | 667 | }; |
675 | 668 | |
676 | -#define PCI_PM_OPS_PTR &pci_pm_ops | |
669 | +#define PCI_PM_OPS_PTR (&pci_dev_pm_ops) | |
677 | 670 | |
678 | 671 | #else /* !CONFIG_PM_SLEEP */ |
679 | 672 | |
... | ... | @@ -702,9 +695,6 @@ |
702 | 695 | drv->driver.bus = &pci_bus_type; |
703 | 696 | drv->driver.owner = owner; |
704 | 697 | drv->driver.mod_name = mod_name; |
705 | - | |
706 | - if (drv->pm) | |
707 | - drv->driver.pm = &drv->pm->base; | |
708 | 698 | |
709 | 699 | spin_lock_init(&drv->dynids.lock); |
710 | 700 | INIT_LIST_HEAD(&drv->dynids.list); |
drivers/usb/core/usb.c
... | ... | @@ -286,7 +286,7 @@ |
286 | 286 | return usb_resume(dev); |
287 | 287 | } |
288 | 288 | |
289 | -static struct pm_ops usb_device_pm_ops = { | |
289 | +static struct dev_pm_ops usb_device_pm_ops = { | |
290 | 290 | .prepare = usb_dev_prepare, |
291 | 291 | .complete = usb_dev_complete, |
292 | 292 | .suspend = usb_dev_suspend, |
... | ... | @@ -301,7 +301,7 @@ |
301 | 301 | |
302 | 302 | #define ksuspend_usb_init() 0 |
303 | 303 | #define ksuspend_usb_cleanup() do {} while (0) |
304 | -#define usb_device_pm_ops (*(struct pm_ops *)0) | |
304 | +#define usb_device_pm_ops (*(struct dev_pm_ops *)0) | |
305 | 305 | |
306 | 306 | #endif /* CONFIG_PM */ |
307 | 307 |
include/linux/device.h
... | ... | @@ -65,7 +65,7 @@ |
65 | 65 | int (*resume_early)(struct device *dev); |
66 | 66 | int (*resume)(struct device *dev); |
67 | 67 | |
68 | - struct pm_ext_ops *pm; | |
68 | + struct dev_pm_ops *pm; | |
69 | 69 | |
70 | 70 | struct bus_type_private *p; |
71 | 71 | }; |
... | ... | @@ -133,7 +133,7 @@ |
133 | 133 | int (*resume) (struct device *dev); |
134 | 134 | struct attribute_group **groups; |
135 | 135 | |
136 | - struct pm_ops *pm; | |
136 | + struct dev_pm_ops *pm; | |
137 | 137 | |
138 | 138 | struct driver_private *p; |
139 | 139 | }; |
... | ... | @@ -198,7 +198,7 @@ |
198 | 198 | int (*suspend)(struct device *dev, pm_message_t state); |
199 | 199 | int (*resume)(struct device *dev); |
200 | 200 | |
201 | - struct pm_ops *pm; | |
201 | + struct dev_pm_ops *pm; | |
202 | 202 | struct class_private *p; |
203 | 203 | }; |
204 | 204 | |
... | ... | @@ -291,7 +291,7 @@ |
291 | 291 | int (*suspend)(struct device *dev, pm_message_t state); |
292 | 292 | int (*resume)(struct device *dev); |
293 | 293 | |
294 | - struct pm_ops *pm; | |
294 | + struct dev_pm_ops *pm; | |
295 | 295 | }; |
296 | 296 | |
297 | 297 | /* interface for exporting device attributes */ |
include/linux/pci.h
... | ... | @@ -421,7 +421,6 @@ |
421 | 421 | int (*resume_early) (struct pci_dev *dev); |
422 | 422 | int (*resume) (struct pci_dev *dev); /* Device woken up */ |
423 | 423 | void (*shutdown) (struct pci_dev *dev); |
424 | - struct pm_ext_ops *pm; | |
425 | 424 | struct pci_error_handlers *err_handler; |
426 | 425 | struct device_driver driver; |
427 | 426 | struct pci_dynids dynids; |
include/linux/platform_device.h
include/linux/pm.h
... | ... | @@ -41,7 +41,7 @@ |
41 | 41 | } pm_message_t; |
42 | 42 | |
43 | 43 | /** |
44 | - * struct pm_ops - device PM callbacks | |
44 | + * struct dev_pm_ops - device PM callbacks | |
45 | 45 | * |
46 | 46 | * Several driver power state transitions are externally visible, affecting |
47 | 47 | * the state of pending I/O queues and (for drivers that touch hardware) |
... | ... | @@ -126,46 +126,6 @@ |
126 | 126 | * On most platforms, there are no restrictions on availability of |
127 | 127 | * resources like clocks during @restore(). |
128 | 128 | * |
129 | - * All of the above callbacks, except for @complete(), return error codes. | |
130 | - * However, the error codes returned by the resume operations, @resume(), | |
131 | - * @thaw(), and @restore(), do not cause the PM core to abort the resume | |
132 | - * transition during which they are returned. The error codes returned in | |
133 | - * that cases are only printed by the PM core to the system logs for debugging | |
134 | - * purposes. Still, it is recommended that drivers only return error codes | |
135 | - * from their resume methods in case of an unrecoverable failure (i.e. when the | |
136 | - * device being handled refuses to resume and becomes unusable) to allow us to | |
137 | - * modify the PM core in the future, so that it can avoid attempting to handle | |
138 | - * devices that failed to resume and their children. | |
139 | - * | |
140 | - * It is allowed to unregister devices while the above callbacks are being | |
141 | - * executed. However, it is not allowed to unregister a device from within any | |
142 | - * of its own callbacks. | |
143 | - */ | |
144 | - | |
145 | -struct pm_ops { | |
146 | - int (*prepare)(struct device *dev); | |
147 | - void (*complete)(struct device *dev); | |
148 | - int (*suspend)(struct device *dev); | |
149 | - int (*resume)(struct device *dev); | |
150 | - int (*freeze)(struct device *dev); | |
151 | - int (*thaw)(struct device *dev); | |
152 | - int (*poweroff)(struct device *dev); | |
153 | - int (*restore)(struct device *dev); | |
154 | -}; | |
155 | - | |
156 | -/** | |
157 | - * struct pm_ext_ops - extended device PM callbacks | |
158 | - * | |
159 | - * Some devices require certain operations related to suspend and hibernation | |
160 | - * to be carried out with interrupts disabled. Thus, 'struct pm_ext_ops' below | |
161 | - * is defined, adding callbacks to be executed with interrupts disabled to | |
162 | - * 'struct pm_ops'. | |
163 | - * | |
164 | - * The following callbacks included in 'struct pm_ext_ops' are executed with | |
165 | - * the nonboot CPUs switched off and with interrupts disabled on the only | |
166 | - * functional CPU. They also are executed with the PM core list of devices | |
167 | - * locked, so they must NOT unregister any devices. | |
168 | - * | |
169 | 129 | * @suspend_noirq: Complete the operations of ->suspend() by carrying out any |
170 | 130 | * actions required for suspending the device that need interrupts to be |
171 | 131 | * disabled |
172 | 132 | |
... | ... | @@ -190,18 +150,32 @@ |
190 | 150 | * actions required for restoring the operations of the device that need |
191 | 151 | * interrupts to be disabled |
192 | 152 | * |
193 | - * All of the above callbacks return error codes, but the error codes returned | |
194 | - * by the resume operations, @resume_noirq(), @thaw_noirq(), and | |
195 | - * @restore_noirq(), do not cause the PM core to abort the resume transition | |
196 | - * during which they are returned. The error codes returned in that cases are | |
197 | - * only printed by the PM core to the system logs for debugging purposes. | |
198 | - * Still, as stated above, it is recommended that drivers only return error | |
199 | - * codes from their resume methods if the device being handled fails to resume | |
200 | - * and is not usable any more. | |
153 | + * All of the above callbacks, except for @complete(), return error codes. | |
154 | + * However, the error codes returned by the resume operations, @resume(), | |
155 | + * @thaw(), @restore(), @resume_noirq(), @thaw_noirq(), and @restore_noirq() do | |
156 | + * not cause the PM core to abort the resume transition during which they are | |
157 | + * returned. The error codes returned in that cases are only printed by the PM | |
158 | + * core to the system logs for debugging purposes. Still, it is recommended | |
159 | + * that drivers only return error codes from their resume methods in case of an | |
160 | + * unrecoverable failure (i.e. when the device being handled refuses to resume | |
161 | + * and becomes unusable) to allow us to modify the PM core in the future, so | |
162 | + * that it can avoid attempting to handle devices that failed to resume and | |
163 | + * their children. | |
164 | + * | |
165 | + * It is allowed to unregister devices while the above callbacks are being | |
166 | + * executed. However, it is not allowed to unregister a device from within any | |
167 | + * of its own callbacks. | |
201 | 168 | */ |
202 | 169 | |
203 | -struct pm_ext_ops { | |
204 | - struct pm_ops base; | |
170 | +struct dev_pm_ops { | |
171 | + int (*prepare)(struct device *dev); | |
172 | + void (*complete)(struct device *dev); | |
173 | + int (*suspend)(struct device *dev); | |
174 | + int (*resume)(struct device *dev); | |
175 | + int (*freeze)(struct device *dev); | |
176 | + int (*thaw)(struct device *dev); | |
177 | + int (*poweroff)(struct device *dev); | |
178 | + int (*restore)(struct device *dev); | |
205 | 179 | int (*suspend_noirq)(struct device *dev); |
206 | 180 | int (*resume_noirq)(struct device *dev); |
207 | 181 | int (*freeze_noirq)(struct device *dev); |