Blame view

block/blk-pm.c 6.79 KB
bca6b067b   Bart Van Assche   block: Move power...
1
  // SPDX-License-Identifier: GPL-2.0
7cedffec8   Bart Van Assche   block: Make blk_g...
2
  #include <linux/blk-mq.h>
bca6b067b   Bart Van Assche   block: Move power...
3
4
5
  #include <linux/blk-pm.h>
  #include <linux/blkdev.h>
  #include <linux/pm_runtime.h>
7cedffec8   Bart Van Assche   block: Make blk_g...
6
7
  #include "blk-mq.h"
  #include "blk-mq-tag.h"
bca6b067b   Bart Van Assche   block: Move power...
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
  
  /**
   * blk_pm_runtime_init - Block layer runtime PM initialization routine
   * @q: the queue of the device
   * @dev: the device the queue belongs to
   *
   * Description:
   *    Initialize runtime-PM-related fields for @q and start auto suspend for
   *    @dev. Drivers that want to take advantage of request-based runtime PM
   *    should call this function after @dev has been initialized, and its
   *    request queue @q has been allocated, and runtime PM for it can not happen
   *    yet(either due to disabled/forbidden or its usage_count > 0). In most
   *    cases, driver should call this function before any I/O has taken place.
   *
   *    This function takes care of setting up using auto suspend for the device,
   *    the autosuspend delay is set to -1 to make runtime suspend impossible
   *    until an updated value is either set by user or by driver. Drivers do
   *    not need to touch other autosuspend settings.
   *
   *    The block layer runtime PM is request based, so only works for drivers
   *    that use request as their IO unit instead of those directly use bio's.
   */
  void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
  {
bca6b067b   Bart Van Assche   block: Move power...
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
  	q->dev = dev;
  	q->rpm_status = RPM_ACTIVE;
  	pm_runtime_set_autosuspend_delay(q->dev, -1);
  	pm_runtime_use_autosuspend(q->dev);
  }
  EXPORT_SYMBOL(blk_pm_runtime_init);
  
  /**
   * blk_pre_runtime_suspend - Pre runtime suspend check
   * @q: the queue of the device
   *
   * Description:
   *    This function will check if runtime suspend is allowed for the device
   *    by examining if there are any requests pending in the queue. If there
   *    are requests pending, the device can not be runtime suspended; otherwise,
   *    the queue's status will be updated to SUSPENDING and the driver can
   *    proceed to suspend the device.
   *
   *    For the not allowed case, we mark last busy for the device so that
   *    runtime PM core will try to autosuspend it some time later.
   *
   *    This function should be called near the start of the device's
   *    runtime_suspend callback.
   *
   * Return:
   *    0		- OK to runtime suspend the device
   *    -EBUSY	- Device should not be runtime suspended
   */
  int blk_pre_runtime_suspend(struct request_queue *q)
  {
  	int ret = 0;
  
  	if (!q->dev)
  		return ret;
7cedffec8   Bart Van Assche   block: Make blk_g...
66
  	WARN_ON_ONCE(q->rpm_status != RPM_ACTIVE);
092898b07   Bart Van Assche   scsi: block: Fix ...
67
68
69
  	spin_lock_irq(&q->queue_lock);
  	q->rpm_status = RPM_SUSPENDING;
  	spin_unlock_irq(&q->queue_lock);
7cedffec8   Bart Van Assche   block: Make blk_g...
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
  	/*
  	 * Increase the pm_only counter before checking whether any
  	 * non-PM blk_queue_enter() calls are in progress to avoid that any
  	 * new non-PM blk_queue_enter() calls succeed before the pm_only
  	 * counter is decreased again.
  	 */
  	blk_set_pm_only(q);
  	ret = -EBUSY;
  	/* Switch q_usage_counter from per-cpu to atomic mode. */
  	blk_freeze_queue_start(q);
  	/*
  	 * Wait until atomic mode has been reached. Since that
  	 * involves calling call_rcu(), it is guaranteed that later
  	 * blk_queue_enter() calls see the pm-only state. See also
  	 * http://lwn.net/Articles/573497/.
  	 */
  	percpu_ref_switch_to_atomic_sync(&q->q_usage_counter);
  	if (percpu_ref_is_zero(&q->q_usage_counter))
  		ret = 0;
  	/* Switch q_usage_counter back to per-cpu mode. */
  	blk_mq_unfreeze_queue(q);
092898b07   Bart Van Assche   scsi: block: Fix ...
91
92
93
  	if (ret < 0) {
  		spin_lock_irq(&q->queue_lock);
  		q->rpm_status = RPM_ACTIVE;
bca6b067b   Bart Van Assche   block: Move power...
94
  		pm_runtime_mark_last_busy(q->dev);
092898b07   Bart Van Assche   scsi: block: Fix ...
95
  		spin_unlock_irq(&q->queue_lock);
7cedffec8   Bart Van Assche   block: Make blk_g...
96

7cedffec8   Bart Van Assche   block: Make blk_g...
97
  		blk_clear_pm_only(q);
092898b07   Bart Van Assche   scsi: block: Fix ...
98
  	}
7cedffec8   Bart Van Assche   block: Make blk_g...
99

bca6b067b   Bart Van Assche   block: Move power...
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
  	return ret;
  }
  EXPORT_SYMBOL(blk_pre_runtime_suspend);
  
  /**
   * blk_post_runtime_suspend - Post runtime suspend processing
   * @q: the queue of the device
   * @err: return value of the device's runtime_suspend function
   *
   * Description:
   *    Update the queue's runtime status according to the return value of the
   *    device's runtime suspend function and mark last busy for the device so
   *    that PM core will try to auto suspend the device at a later time.
   *
   *    This function should be called near the end of the device's
   *    runtime_suspend callback.
   */
  void blk_post_runtime_suspend(struct request_queue *q, int err)
  {
  	if (!q->dev)
  		return;
0d945c1f9   Christoph Hellwig   block: remove the...
121
  	spin_lock_irq(&q->queue_lock);
bca6b067b   Bart Van Assche   block: Move power...
122
123
124
125
126
127
  	if (!err) {
  		q->rpm_status = RPM_SUSPENDED;
  	} else {
  		q->rpm_status = RPM_ACTIVE;
  		pm_runtime_mark_last_busy(q->dev);
  	}
0d945c1f9   Christoph Hellwig   block: remove the...
128
  	spin_unlock_irq(&q->queue_lock);
7cedffec8   Bart Van Assche   block: Make blk_g...
129
130
131
  
  	if (err)
  		blk_clear_pm_only(q);
bca6b067b   Bart Van Assche   block: Move power...
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
  }
  EXPORT_SYMBOL(blk_post_runtime_suspend);
  
  /**
   * blk_pre_runtime_resume - Pre runtime resume processing
   * @q: the queue of the device
   *
   * Description:
   *    Update the queue's runtime status to RESUMING in preparation for the
   *    runtime resume of the device.
   *
   *    This function should be called near the start of the device's
   *    runtime_resume callback.
   */
  void blk_pre_runtime_resume(struct request_queue *q)
  {
  	if (!q->dev)
  		return;
0d945c1f9   Christoph Hellwig   block: remove the...
150
  	spin_lock_irq(&q->queue_lock);
bca6b067b   Bart Van Assche   block: Move power...
151
  	q->rpm_status = RPM_RESUMING;
0d945c1f9   Christoph Hellwig   block: remove the...
152
  	spin_unlock_irq(&q->queue_lock);
bca6b067b   Bart Van Assche   block: Move power...
153
154
155
156
157
158
159
160
161
162
  }
  EXPORT_SYMBOL(blk_pre_runtime_resume);
  
  /**
   * blk_post_runtime_resume - Post runtime resume processing
   * @q: the queue of the device
   * @err: return value of the device's runtime_resume function
   *
   * Description:
   *    Update the queue's runtime status according to the return value of the
8f38f8e0a   Alan Stern   scsi: block: pm: ...
163
164
   *    device's runtime_resume function. If the resume was successful, call
   *    blk_set_runtime_active() to do the real work of restarting the queue.
bca6b067b   Bart Van Assche   block: Move power...
165
166
167
168
169
170
171
172
   *
   *    This function should be called near the end of the device's
   *    runtime_resume callback.
   */
  void blk_post_runtime_resume(struct request_queue *q, int err)
  {
  	if (!q->dev)
  		return;
bca6b067b   Bart Van Assche   block: Move power...
173
  	if (!err) {
8f38f8e0a   Alan Stern   scsi: block: pm: ...
174
  		blk_set_runtime_active(q);
bca6b067b   Bart Van Assche   block: Move power...
175
  	} else {
8f38f8e0a   Alan Stern   scsi: block: pm: ...
176
  		spin_lock_irq(&q->queue_lock);
bca6b067b   Bart Van Assche   block: Move power...
177
  		q->rpm_status = RPM_SUSPENDED;
8f38f8e0a   Alan Stern   scsi: block: pm: ...
178
  		spin_unlock_irq(&q->queue_lock);
bca6b067b   Bart Van Assche   block: Move power...
179
  	}
bca6b067b   Bart Van Assche   block: Move power...
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
  }
  EXPORT_SYMBOL(blk_post_runtime_resume);
  
  /**
   * blk_set_runtime_active - Force runtime status of the queue to be active
   * @q: the queue of the device
   *
   * If the device is left runtime suspended during system suspend the resume
   * hook typically resumes the device and corrects runtime status
   * accordingly. However, that does not affect the queue runtime PM status
   * which is still "suspended". This prevents processing requests from the
   * queue.
   *
   * This function can be used in driver's resume hook to correct queue
   * runtime PM status and re-enable peeking requests from the queue. It
   * should be called before first request is added to the queue.
8f38f8e0a   Alan Stern   scsi: block: pm: ...
196
197
198
   *
   * This function is also called by blk_post_runtime_resume() for successful
   * runtime resumes.  It does everything necessary to restart the queue.
bca6b067b   Bart Van Assche   block: Move power...
199
200
201
   */
  void blk_set_runtime_active(struct request_queue *q)
  {
8f38f8e0a   Alan Stern   scsi: block: pm: ...
202
203
204
205
206
207
208
209
210
211
212
213
214
215
  	int old_status;
  
  	if (!q->dev)
  		return;
  
  	spin_lock_irq(&q->queue_lock);
  	old_status = q->rpm_status;
  	q->rpm_status = RPM_ACTIVE;
  	pm_runtime_mark_last_busy(q->dev);
  	pm_request_autosuspend(q->dev);
  	spin_unlock_irq(&q->queue_lock);
  
  	if (old_status != RPM_ACTIVE)
  		blk_clear_pm_only(q);
bca6b067b   Bart Van Assche   block: Move power...
216
217
  }
  EXPORT_SYMBOL(blk_set_runtime_active);