Commit 6c9546675864f51506af69eca388e5d922942c56

Authored by Lin Ming
Committed by Jens Axboe
1 parent 6631127469

block: add runtime pm helpers

Add runtime pm helper functions:

void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
  - Initialization function for drivers to call.

int blk_pre_runtime_suspend(struct request_queue *q)
  - If any requests are in the queue, mark last busy and return -EBUSY.
    Otherwise set q->rpm_status to RPM_SUSPENDING and return 0.

void blk_post_runtime_suspend(struct request_queue *q, int err)
  - If the suspend succeeded then set q->rpm_status to RPM_SUSPENDED.
    Otherwise set it to RPM_ACTIVE and mark last busy.

void blk_pre_runtime_resume(struct request_queue *q)
  - Set q->rpm_status to RPM_RESUMING.

void blk_post_runtime_resume(struct request_queue *q, int err)
  - If the resume succeeded then set q->rpm_status to RPM_ACTIVE
    and call __blk_run_queue, then mark last busy and autosuspend.
    Otherwise set q->rpm_status to RPM_SUSPENDED.

The idea and API is designed by Alan Stern and described here:
http://marc.info/?l=linux-scsi&m=133727953625963&w=2

Signed-off-by: Lin Ming <ming.m.lin@intel.com>
Signed-off-by: Aaron Lu <aaron.lu@intel.com>
Acked-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

Showing 2 changed files with 171 additions and 0 deletions Side-by-side Diff

... ... @@ -30,6 +30,7 @@
30 30 #include <linux/list_sort.h>
31 31 #include <linux/delay.h>
32 32 #include <linux/ratelimit.h>
  33 +#include <linux/pm_runtime.h>
33 34  
34 35 #define CREATE_TRACE_POINTS
35 36 #include <trace/events/block.h>
... ... @@ -3044,6 +3045,149 @@
3044 3045 current->plug = NULL;
3045 3046 }
3046 3047 EXPORT_SYMBOL(blk_finish_plug);
  3048 +
  3049 +#ifdef CONFIG_PM_RUNTIME
  3050 +/**
  3051 + * blk_pm_runtime_init - Block layer runtime PM initialization routine
  3052 + * @q: the queue of the device
  3053 + * @dev: the device the queue belongs to
  3054 + *
  3055 + * Description:
  3056 + * Initialize runtime-PM-related fields for @q and start auto suspend for
  3057 + * @dev. Drivers that want to take advantage of request-based runtime PM
  3058 + * should call this function after @dev has been initialized, and its
  3059 + * request queue @q has been allocated, and runtime PM for it can not happen
  3060 + * yet(either due to disabled/forbidden or its usage_count > 0). In most
  3061 + * cases, driver should call this function before any I/O has taken place.
  3062 + *
  3063 + * This function takes care of setting up using auto suspend for the device,
  3064 + * the autosuspend delay is set to -1 to make runtime suspend impossible
  3065 + * until an updated value is either set by user or by driver. Drivers do
  3066 + * not need to touch other autosuspend settings.
  3067 + *
  3068 + * The block layer runtime PM is request based, so only works for drivers
  3069 + * that use request as their IO unit instead of those directly use bio's.
  3070 + */
  3071 +void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
  3072 +{
  3073 + q->dev = dev;
  3074 + q->rpm_status = RPM_ACTIVE;
  3075 + pm_runtime_set_autosuspend_delay(q->dev, -1);
  3076 + pm_runtime_use_autosuspend(q->dev);
  3077 +}
  3078 +EXPORT_SYMBOL(blk_pm_runtime_init);
  3079 +
  3080 +/**
  3081 + * blk_pre_runtime_suspend - Pre runtime suspend check
  3082 + * @q: the queue of the device
  3083 + *
  3084 + * Description:
  3085 + * This function will check if runtime suspend is allowed for the device
  3086 + * by examining if there are any requests pending in the queue. If there
  3087 + * are requests pending, the device can not be runtime suspended; otherwise,
  3088 + * the queue's status will be updated to SUSPENDING and the driver can
  3089 + * proceed to suspend the device.
  3090 + *
  3091 + * For the not allowed case, we mark last busy for the device so that
  3092 + * runtime PM core will try to autosuspend it some time later.
  3093 + *
  3094 + * This function should be called near the start of the device's
  3095 + * runtime_suspend callback.
  3096 + *
  3097 + * Return:
  3098 + * 0 - OK to runtime suspend the device
  3099 + * -EBUSY - Device should not be runtime suspended
  3100 + */
  3101 +int blk_pre_runtime_suspend(struct request_queue *q)
  3102 +{
  3103 + int ret = 0;
  3104 +
  3105 + spin_lock_irq(q->queue_lock);
  3106 + if (q->nr_pending) {
  3107 + ret = -EBUSY;
  3108 + pm_runtime_mark_last_busy(q->dev);
  3109 + } else {
  3110 + q->rpm_status = RPM_SUSPENDING;
  3111 + }
  3112 + spin_unlock_irq(q->queue_lock);
  3113 + return ret;
  3114 +}
  3115 +EXPORT_SYMBOL(blk_pre_runtime_suspend);
  3116 +
  3117 +/**
  3118 + * blk_post_runtime_suspend - Post runtime suspend processing
  3119 + * @q: the queue of the device
  3120 + * @err: return value of the device's runtime_suspend function
  3121 + *
  3122 + * Description:
  3123 + * Update the queue's runtime status according to the return value of the
  3124 + * device's runtime suspend function and mark last busy for the device so
  3125 + * that PM core will try to auto suspend the device at a later time.
  3126 + *
  3127 + * This function should be called near the end of the device's
  3128 + * runtime_suspend callback.
  3129 + */
  3130 +void blk_post_runtime_suspend(struct request_queue *q, int err)
  3131 +{
  3132 + spin_lock_irq(q->queue_lock);
  3133 + if (!err) {
  3134 + q->rpm_status = RPM_SUSPENDED;
  3135 + } else {
  3136 + q->rpm_status = RPM_ACTIVE;
  3137 + pm_runtime_mark_last_busy(q->dev);
  3138 + }
  3139 + spin_unlock_irq(q->queue_lock);
  3140 +}
  3141 +EXPORT_SYMBOL(blk_post_runtime_suspend);
  3142 +
  3143 +/**
  3144 + * blk_pre_runtime_resume - Pre runtime resume processing
  3145 + * @q: the queue of the device
  3146 + *
  3147 + * Description:
  3148 + * Update the queue's runtime status to RESUMING in preparation for the
  3149 + * runtime resume of the device.
  3150 + *
  3151 + * This function should be called near the start of the device's
  3152 + * runtime_resume callback.
  3153 + */
  3154 +void blk_pre_runtime_resume(struct request_queue *q)
  3155 +{
  3156 + spin_lock_irq(q->queue_lock);
  3157 + q->rpm_status = RPM_RESUMING;
  3158 + spin_unlock_irq(q->queue_lock);
  3159 +}
  3160 +EXPORT_SYMBOL(blk_pre_runtime_resume);
  3161 +
  3162 +/**
  3163 + * blk_post_runtime_resume - Post runtime resume processing
  3164 + * @q: the queue of the device
  3165 + * @err: return value of the device's runtime_resume function
  3166 + *
  3167 + * Description:
  3168 + * Update the queue's runtime status according to the return value of the
  3169 + * device's runtime_resume function. If it is successfully resumed, process
  3170 + * the requests that are queued into the device's queue when it is resuming
  3171 + * and then mark last busy and initiate autosuspend for it.
  3172 + *
  3173 + * This function should be called near the end of the device's
  3174 + * runtime_resume callback.
  3175 + */
  3176 +void blk_post_runtime_resume(struct request_queue *q, int err)
  3177 +{
  3178 + spin_lock_irq(q->queue_lock);
  3179 + if (!err) {
  3180 + q->rpm_status = RPM_ACTIVE;
  3181 + __blk_run_queue(q);
  3182 + pm_runtime_mark_last_busy(q->dev);
  3183 + pm_runtime_autosuspend(q->dev);
  3184 + } else {
  3185 + q->rpm_status = RPM_SUSPENDED;
  3186 + }
  3187 + spin_unlock_irq(q->queue_lock);
  3188 +}
  3189 +EXPORT_SYMBOL(blk_post_runtime_resume);
  3190 +#endif
3047 3191  
3048 3192 int __init blk_dev_init(void)
3049 3193 {
include/linux/blkdev.h
... ... @@ -361,6 +361,12 @@
361 361 */
362 362 struct kobject kobj;
363 363  
  364 +#ifdef CONFIG_PM_RUNTIME
  365 + struct device *dev;
  366 + int rpm_status;
  367 + unsigned int nr_pending;
  368 +#endif
  369 +
364 370 /*
365 371 * queue settings
366 372 */
... ... @@ -959,6 +965,27 @@
959 965 struct request_queue *blk_alloc_queue(gfp_t);
960 966 struct request_queue *blk_alloc_queue_node(gfp_t, int);
961 967 extern void blk_put_queue(struct request_queue *);
  968 +
  969 +/*
  970 + * block layer runtime pm functions
  971 + */
  972 +#ifdef CONFIG_PM_RUNTIME
  973 +extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
  974 +extern int blk_pre_runtime_suspend(struct request_queue *q);
  975 +extern void blk_post_runtime_suspend(struct request_queue *q, int err);
  976 +extern void blk_pre_runtime_resume(struct request_queue *q);
  977 +extern void blk_post_runtime_resume(struct request_queue *q, int err);
  978 +#else
  979 +static inline void blk_pm_runtime_init(struct request_queue *q,
  980 + struct device *dev) {}
  981 +static inline int blk_pre_runtime_suspend(struct request_queue *q)
  982 +{
  983 + return -ENOSYS;
  984 +}
  985 +static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {}
  986 +static inline void blk_pre_runtime_resume(struct request_queue *q) {}
  987 +static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
  988 +#endif
962 989  
963 990 /*
964 991 * blk_plug permits building a queue of related requests by holding the I/O