Blame view

block/blk-timeout.c 6.02 KB
242f9dcb8   Jens Axboe   block: unify requ...
1
2
3
4
5
6
  /*
   * Functions related to generic timeout handling of requests.
   */
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/blkdev.h>
581d4e28d   Jens Axboe   block: add fault ...
7
  #include <linux/fault-inject.h>
242f9dcb8   Jens Axboe   block: unify requ...
8
9
  
  #include "blk.h"
904158376   Christoph Hellwig   block: fix blk_ab...
10
  #include "blk-mq.h"
242f9dcb8   Jens Axboe   block: unify requ...
11

581d4e28d   Jens Axboe   block: add fault ...
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
  #ifdef CONFIG_FAIL_IO_TIMEOUT
  
  static DECLARE_FAULT_ATTR(fail_io_timeout);
  
  static int __init setup_fail_io_timeout(char *str)
  {
  	return setup_fault_attr(&fail_io_timeout, str);
  }
  __setup("fail_io_timeout=", setup_fail_io_timeout);
  
  int blk_should_fake_timeout(struct request_queue *q)
  {
  	if (!test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
  		return 0;
  
  	return should_fail(&fail_io_timeout, 1);
  }
  
  static int __init fail_io_timeout_debugfs(void)
  {
dd48c085c   Akinobu Mita   fault-injection: ...
32
33
  	struct dentry *dir = fault_create_debugfs_attr("fail_io_timeout",
  						NULL, &fail_io_timeout);
8616ebb16   Duan Jiong   block: replace IS...
34
  	return PTR_ERR_OR_ZERO(dir);
581d4e28d   Jens Axboe   block: add fault ...
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
  }
  
  late_initcall(fail_io_timeout_debugfs);
  
  ssize_t part_timeout_show(struct device *dev, struct device_attribute *attr,
  			  char *buf)
  {
  	struct gendisk *disk = dev_to_disk(dev);
  	int set = test_bit(QUEUE_FLAG_FAIL_IO, &disk->queue->queue_flags);
  
  	return sprintf(buf, "%d
  ", set != 0);
  }
  
  ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr,
  			   const char *buf, size_t count)
  {
  	struct gendisk *disk = dev_to_disk(dev);
  	int val;
  
  	if (count) {
  		struct request_queue *q = disk->queue;
  		char *p = (char *) buf;
  
  		val = simple_strtoul(p, &p, 10);
581d4e28d   Jens Axboe   block: add fault ...
60
  		if (val)
8814ce8a0   Bart Van Assche   block: Introduce ...
61
  			blk_queue_flag_set(QUEUE_FLAG_FAIL_IO, q);
581d4e28d   Jens Axboe   block: add fault ...
62
  		else
8814ce8a0   Bart Van Assche   block: Introduce ...
63
  			blk_queue_flag_clear(QUEUE_FLAG_FAIL_IO, q);
581d4e28d   Jens Axboe   block: add fault ...
64
65
66
67
68
69
  	}
  
  	return count;
  }
  
  #endif /* CONFIG_FAIL_IO_TIMEOUT */
242f9dcb8   Jens Axboe   block: unify requ...
70
71
72
73
74
75
76
  /*
   * blk_delete_timer - Delete/cancel timer for a given function.
   * @req:	request that we are canceling timer for
   *
   */
  void blk_delete_timer(struct request *req)
  {
242f9dcb8   Jens Axboe   block: unify requ...
77
  	list_del_init(&req->timeout_list);
242f9dcb8   Jens Axboe   block: unify requ...
78
79
80
81
82
  }
  
  static void blk_rq_timed_out(struct request *req)
  {
  	struct request_queue *q = req->q;
80bd7181b   Hannes Reinecke   block: check for ...
83
  	enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
242f9dcb8   Jens Axboe   block: unify requ...
84

80bd7181b   Hannes Reinecke   block: check for ...
85
86
  	if (q->rq_timed_out_fn)
  		ret = q->rq_timed_out_fn(req);
242f9dcb8   Jens Axboe   block: unify requ...
87
  	switch (ret) {
242f9dcb8   Jens Axboe   block: unify requ...
88
  	case BLK_EH_RESET_TIMER:
87ee7b112   Jens Axboe   blk-mq: fix race ...
89
  		blk_add_timer(req);
4912aa6c1   Jeff Moyer   block: fix race b...
90
  		blk_clear_rq_complete(req);
242f9dcb8   Jens Axboe   block: unify requ...
91
  		break;
6600593cb   Christoph Hellwig   block: rename BLK...
92
  	case BLK_EH_DONE:
242f9dcb8   Jens Axboe   block: unify requ...
93
94
95
96
97
98
99
100
101
102
103
104
105
  		/*
  		 * LLD handles this for now but in the future
  		 * we can send a request msg to abort the command
  		 * and we can move more of the generic scsi eh code to
  		 * the blk layer.
  		 */
  		break;
  	default:
  		printk(KERN_ERR "block: bad eh return: %d
  ", ret);
  		break;
  	}
  }
46f92d42e   Christoph Hellwig   blk-mq: unshared ...
106
  static void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout,
320ae51fe   Jens Axboe   blk-mq: new multi...
107
108
  			  unsigned int *next_set)
  {
0a72e7f44   Jens Axboe   block: add access...
109
110
111
  	const unsigned long deadline = blk_rq_deadline(rq);
  
  	if (time_after_eq(jiffies, deadline)) {
320ae51fe   Jens Axboe   blk-mq: new multi...
112
113
114
115
116
117
118
  		list_del_init(&rq->timeout_list);
  
  		/*
  		 * Check if we raced with end io completion
  		 */
  		if (!blk_mark_rq_complete(rq))
  			blk_rq_timed_out(rq);
0a72e7f44   Jens Axboe   block: add access...
119
120
  	} else if (!*next_set || time_after(*next_timeout, deadline)) {
  		*next_timeout = deadline;
320ae51fe   Jens Axboe   blk-mq: new multi...
121
122
123
  		*next_set = 1;
  	}
  }
287922eb0   Christoph Hellwig   block: defer time...
124
  void blk_timeout_work(struct work_struct *work)
242f9dcb8   Jens Axboe   block: unify requ...
125
  {
287922eb0   Christoph Hellwig   block: defer time...
126
127
  	struct request_queue *q =
  		container_of(work, struct request_queue, timeout_work);
565e411d7   malahal@us.ibm.com   block: optimizati...
128
  	unsigned long flags, next = 0;
242f9dcb8   Jens Axboe   block: unify requ...
129
  	struct request *rq, *tmp;
a534dbe96   Richard Kennedy   block: ensure jif...
130
  	int next_set = 0;
242f9dcb8   Jens Axboe   block: unify requ...
131
132
  
  	spin_lock_irqsave(q->queue_lock, flags);
320ae51fe   Jens Axboe   blk-mq: new multi...
133
134
  	list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list)
  		blk_rq_check_expired(rq, &next, &next_set);
242f9dcb8   Jens Axboe   block: unify requ...
135

a534dbe96   Richard Kennedy   block: ensure jif...
136
  	if (next_set)
7838c15b8   Alan Stern   Block: use round_...
137
  		mod_timer(&q->timeout, round_jiffies_up(next));
242f9dcb8   Jens Axboe   block: unify requ...
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
  
  	spin_unlock_irqrestore(q->queue_lock, flags);
  }
  
  /**
   * blk_abort_request -- Request request recovery for the specified command
   * @req:	pointer to the request of interest
   *
   * This function requests that the block layer start recovery for the
   * request by deleting the timer and calling the q's timeout function.
   * LLDDs who implement their own error recovery MAY ignore the timeout
   * event if they generated blk_abort_req. Must hold queue lock.
   */
  void blk_abort_request(struct request *req)
  {
55ce0da1d   Christoph Hellwig   block: fix blk_ab...
153
  	if (req->q->mq_ops) {
358f70da4   Tejun Heo   blk-mq: make blk_...
154
155
156
157
158
  		/*
  		 * All we need to ensure is that timeout scan takes place
  		 * immediately and that scan sees the new timeout value.
  		 * No need for fancy synchronizations.
  		 */
0a72e7f44   Jens Axboe   block: add access...
159
  		blk_rq_set_deadline(req, jiffies);
bc6d65e6d   Tejun Heo   blk-mq: Directly ...
160
  		kblockd_schedule_work(&req->q->timeout_work);
55ce0da1d   Christoph Hellwig   block: fix blk_ab...
161
  	} else {
358f70da4   Tejun Heo   blk-mq: make blk_...
162
163
  		if (blk_mark_rq_complete(req))
  			return;
55ce0da1d   Christoph Hellwig   block: fix blk_ab...
164
  		blk_delete_timer(req);
904158376   Christoph Hellwig   block: fix blk_ab...
165
  		blk_rq_timed_out(req);
55ce0da1d   Christoph Hellwig   block: fix blk_ab...
166
  	}
242f9dcb8   Jens Axboe   block: unify requ...
167
168
  }
  EXPORT_SYMBOL_GPL(blk_abort_request);
0d2602ca3   Jens Axboe   blk-mq: improve s...
169
170
171
172
173
174
175
176
177
178
  unsigned long blk_rq_timeout(unsigned long timeout)
  {
  	unsigned long maxt;
  
  	maxt = round_jiffies_up(jiffies + BLK_MAX_TIMEOUT);
  	if (time_after(timeout, maxt))
  		timeout = maxt;
  
  	return timeout;
  }
c4a634f43   Christoph Hellwig   block: fold __blk...
179
180
181
182
183
184
185
186
187
  /**
   * blk_add_timer - Start timeout timer for a single request
   * @req:	request that is about to start running.
   *
   * Notes:
   *    Each request has its own timer, and as it is added to the queue, we
   *    set up the timer. When the request completes, we cancel the timer.
   */
  void blk_add_timer(struct request *req)
242f9dcb8   Jens Axboe   block: unify requ...
188
189
190
  {
  	struct request_queue *q = req->q;
  	unsigned long expiry;
2fff8a924   Bart Van Assche   block: Check lock...
191
192
  	if (!q->mq_ops)
  		lockdep_assert_held(q->queue_lock);
5e940aaa5   Ming Lei   blk-timeout: fix ...
193
194
  	/* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */
  	if (!q->mq_ops && !q->rq_timed_out_fn)
242f9dcb8   Jens Axboe   block: unify requ...
195
196
197
  		return;
  
  	BUG_ON(!list_empty(&req->timeout_list));
242f9dcb8   Jens Axboe   block: unify requ...
198

2eef33e43   Tejun Heo   block: clean up m...
199
200
201
202
203
  	/*
  	 * Some LLDs, like scsi, peek at the timeout to prevent a
  	 * command from being retried forever.
  	 */
  	if (!req->timeout)
242f9dcb8   Jens Axboe   block: unify requ...
204
  		req->timeout = q->rq_timeout;
2eef33e43   Tejun Heo   block: clean up m...
205

f5e350f02   Bart Van Assche   blk-mq: Fix timeo...
206
  	req->rq_flags &= ~RQF_TIMED_OUT;
0a72e7f44   Jens Axboe   block: add access...
207
  	blk_rq_set_deadline(req, jiffies + req->timeout);
3b627a3f9   Jens Axboe   block: clarify bl...
208
209
210
211
212
  
  	/*
  	 * Only the non-mq case needs to add the request to a protected list.
  	 * For the mq case we simply scan the tag map.
  	 */
c4a634f43   Christoph Hellwig   block: fold __blk...
213
214
  	if (!q->mq_ops)
  		list_add_tail(&req->timeout_list, &req->q->timeout_list);
242f9dcb8   Jens Axboe   block: unify requ...
215
216
217
  
  	/*
  	 * If the timer isn't already pending or this timeout is earlier
7838c15b8   Alan Stern   Block: use round_...
218
  	 * than an existing one, modify the timer. Round up to next nearest
242f9dcb8   Jens Axboe   block: unify requ...
219
220
  	 * second.
  	 */
0a72e7f44   Jens Axboe   block: add access...
221
  	expiry = blk_rq_timeout(round_jiffies_up(blk_rq_deadline(req)));
242f9dcb8   Jens Axboe   block: unify requ...
222
223
  
  	if (!timer_pending(&q->timeout) ||
f793aa537   Jens Axboe   block: relax when...
224
225
226
227
228
229
230
231
232
233
  	    time_before(expiry, q->timeout.expires)) {
  		unsigned long diff = q->timeout.expires - expiry;
  
  		/*
  		 * Due to added timer slack to group timers, the timer
  		 * will often be a little in front of what we asked for.
  		 * So apply some tolerance here too, otherwise we keep
  		 * modifying the timer because expires for value X
  		 * will be X + something.
  		 */
c7bca4183   Jens Axboe   block: ensure tha...
234
  		if (!timer_pending(&q->timeout) || (diff >= HZ / 2))
f793aa537   Jens Axboe   block: relax when...
235
236
  			mod_timer(&q->timeout, expiry);
  	}
320ae51fe   Jens Axboe   blk-mq: new multi...
237
238
  
  }