Blame view

block/blk-timeout.c 5.95 KB
242f9dcb8   Jens Axboe   block: unify requ...
1
2
3
4
5
6
  /*
   * Functions related to generic timeout handling of requests.
   */
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/blkdev.h>
581d4e28d   Jens Axboe   block: add fault ...
7
  #include <linux/fault-inject.h>
242f9dcb8   Jens Axboe   block: unify requ...
8
9
  
  #include "blk.h"
904158376   Christoph Hellwig   block: fix blk_ab...
10
  #include "blk-mq.h"
242f9dcb8   Jens Axboe   block: unify requ...
11

581d4e28d   Jens Axboe   block: add fault ...
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
  #ifdef CONFIG_FAIL_IO_TIMEOUT
  
  static DECLARE_FAULT_ATTR(fail_io_timeout);
  
  static int __init setup_fail_io_timeout(char *str)
  {
  	return setup_fault_attr(&fail_io_timeout, str);
  }
  __setup("fail_io_timeout=", setup_fail_io_timeout);
  
  int blk_should_fake_timeout(struct request_queue *q)
  {
  	if (!test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
  		return 0;
  
  	return should_fail(&fail_io_timeout, 1);
  }
  
  static int __init fail_io_timeout_debugfs(void)
  {
dd48c085c   Akinobu Mita   fault-injection: ...
32
33
  	struct dentry *dir = fault_create_debugfs_attr("fail_io_timeout",
  						NULL, &fail_io_timeout);
8616ebb16   Duan Jiong   block: replace IS...
34
  	return PTR_ERR_OR_ZERO(dir);
581d4e28d   Jens Axboe   block: add fault ...
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
  }
  
  late_initcall(fail_io_timeout_debugfs);
  
  ssize_t part_timeout_show(struct device *dev, struct device_attribute *attr,
  			  char *buf)
  {
  	struct gendisk *disk = dev_to_disk(dev);
  	int set = test_bit(QUEUE_FLAG_FAIL_IO, &disk->queue->queue_flags);
  
  	return sprintf(buf, "%d
  ", set != 0);
  }
  
  ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr,
  			   const char *buf, size_t count)
  {
  	struct gendisk *disk = dev_to_disk(dev);
  	int val;
  
  	if (count) {
  		struct request_queue *q = disk->queue;
  		char *p = (char *) buf;
  
  		val = simple_strtoul(p, &p, 10);
  		spin_lock_irq(q->queue_lock);
  		if (val)
  			queue_flag_set(QUEUE_FLAG_FAIL_IO, q);
  		else
  			queue_flag_clear(QUEUE_FLAG_FAIL_IO, q);
  		spin_unlock_irq(q->queue_lock);
  	}
  
  	return count;
  }
  
  #endif /* CONFIG_FAIL_IO_TIMEOUT */
242f9dcb8   Jens Axboe   block: unify requ...
72
73
74
75
76
77
78
  /*
   * blk_delete_timer - Delete/cancel timer for a given function.
   * @req:	request that we are canceling timer for
   *
   */
  void blk_delete_timer(struct request *req)
  {
242f9dcb8   Jens Axboe   block: unify requ...
79
  	list_del_init(&req->timeout_list);
242f9dcb8   Jens Axboe   block: unify requ...
80
81
82
83
84
  }
  
  static void blk_rq_timed_out(struct request *req)
  {
  	struct request_queue *q = req->q;
80bd7181b   Hannes Reinecke   block: check for ...
85
  	enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
242f9dcb8   Jens Axboe   block: unify requ...
86

80bd7181b   Hannes Reinecke   block: check for ...
87
88
  	if (q->rq_timed_out_fn)
  		ret = q->rq_timed_out_fn(req);
242f9dcb8   Jens Axboe   block: unify requ...
89
90
  	switch (ret) {
  	case BLK_EH_HANDLED:
320ae51fe   Jens Axboe   blk-mq: new multi...
91
  		/* Can we use req->errors here? */
46f92d42e   Christoph Hellwig   blk-mq: unshared ...
92
  		__blk_complete_request(req);
242f9dcb8   Jens Axboe   block: unify requ...
93
94
  		break;
  	case BLK_EH_RESET_TIMER:
87ee7b112   Jens Axboe   blk-mq: fix race ...
95
  		blk_add_timer(req);
4912aa6c1   Jeff Moyer   block: fix race b...
96
  		blk_clear_rq_complete(req);
242f9dcb8   Jens Axboe   block: unify requ...
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
  		break;
  	case BLK_EH_NOT_HANDLED:
  		/*
  		 * LLD handles this for now but in the future
  		 * we can send a request msg to abort the command
  		 * and we can move more of the generic scsi eh code to
  		 * the blk layer.
  		 */
  		break;
  	default:
  		printk(KERN_ERR "block: bad eh return: %d
  ", ret);
  		break;
  	}
  }
46f92d42e   Christoph Hellwig   blk-mq: unshared ...
112
  static void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout,
320ae51fe   Jens Axboe   blk-mq: new multi...
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
  			  unsigned int *next_set)
  {
  	if (time_after_eq(jiffies, rq->deadline)) {
  		list_del_init(&rq->timeout_list);
  
  		/*
  		 * Check if we raced with end io completion
  		 */
  		if (!blk_mark_rq_complete(rq))
  			blk_rq_timed_out(rq);
  	} else if (!*next_set || time_after(*next_timeout, rq->deadline)) {
  		*next_timeout = rq->deadline;
  		*next_set = 1;
  	}
  }
287922eb0   Christoph Hellwig   block: defer time...
128
  void blk_timeout_work(struct work_struct *work)
242f9dcb8   Jens Axboe   block: unify requ...
129
  {
287922eb0   Christoph Hellwig   block: defer time...
130
131
  	struct request_queue *q =
  		container_of(work, struct request_queue, timeout_work);
565e411d7   malahal@us.ibm.com   block: optimizati...
132
  	unsigned long flags, next = 0;
242f9dcb8   Jens Axboe   block: unify requ...
133
  	struct request *rq, *tmp;
a534dbe96   Richard Kennedy   block: ensure jif...
134
  	int next_set = 0;
242f9dcb8   Jens Axboe   block: unify requ...
135

287922eb0   Christoph Hellwig   block: defer time...
136
137
  	if (blk_queue_enter(q, true))
  		return;
242f9dcb8   Jens Axboe   block: unify requ...
138
  	spin_lock_irqsave(q->queue_lock, flags);
320ae51fe   Jens Axboe   blk-mq: new multi...
139
140
  	list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list)
  		blk_rq_check_expired(rq, &next, &next_set);
242f9dcb8   Jens Axboe   block: unify requ...
141

a534dbe96   Richard Kennedy   block: ensure jif...
142
  	if (next_set)
7838c15b8   Alan Stern   Block: use round_...
143
  		mod_timer(&q->timeout, round_jiffies_up(next));
242f9dcb8   Jens Axboe   block: unify requ...
144
145
  
  	spin_unlock_irqrestore(q->queue_lock, flags);
287922eb0   Christoph Hellwig   block: defer time...
146
  	blk_queue_exit(q);
242f9dcb8   Jens Axboe   block: unify requ...
147
148
149
150
151
152
153
154
155
156
157
158
159
  }
  
  /**
   * blk_abort_request -- Request request recovery for the specified command
   * @req:	pointer to the request of interest
   *
   * This function requests that the block layer start recovery for the
   * request by deleting the timer and calling the q's timeout function.
   * LLDDs who implement their own error recovery MAY ignore the timeout
   * event if they generated blk_abort_req. Must hold queue lock.
   */
  void blk_abort_request(struct request *req)
  {
7ba1fbaa4   Jens Axboe   block: use rq com...
160
161
  	if (blk_mark_rq_complete(req))
  		return;
55ce0da1d   Christoph Hellwig   block: fix blk_ab...
162
163
  
  	if (req->q->mq_ops) {
904158376   Christoph Hellwig   block: fix blk_ab...
164
  		blk_mq_rq_timed_out(req, false);
55ce0da1d   Christoph Hellwig   block: fix blk_ab...
165
166
  	} else {
  		blk_delete_timer(req);
904158376   Christoph Hellwig   block: fix blk_ab...
167
  		blk_rq_timed_out(req);
55ce0da1d   Christoph Hellwig   block: fix blk_ab...
168
  	}
242f9dcb8   Jens Axboe   block: unify requ...
169
170
  }
  EXPORT_SYMBOL_GPL(blk_abort_request);
0d2602ca3   Jens Axboe   blk-mq: improve s...
171
172
173
174
175
176
177
178
179
180
  unsigned long blk_rq_timeout(unsigned long timeout)
  {
  	unsigned long maxt;
  
  	maxt = round_jiffies_up(jiffies + BLK_MAX_TIMEOUT);
  	if (time_after(timeout, maxt))
  		timeout = maxt;
  
  	return timeout;
  }
c4a634f43   Christoph Hellwig   block: fold __blk...
181
182
183
184
185
186
187
  /**
   * blk_add_timer - Start timeout timer for a single request
   * @req:	request that is about to start running.
   *
   * Notes:
   *    Each request has its own timer, and as it is added to the queue, we
   *    set up the timer. When the request completes, we cancel the timer.
3b627a3f9   Jens Axboe   block: clarify bl...
188
   *    Queue lock must be held for the non-mq case, mq case doesn't care.
c4a634f43   Christoph Hellwig   block: fold __blk...
189
190
   */
  void blk_add_timer(struct request *req)
242f9dcb8   Jens Axboe   block: unify requ...
191
192
193
  {
  	struct request_queue *q = req->q;
  	unsigned long expiry;
5e940aaa5   Ming Lei   blk-timeout: fix ...
194
195
  	/* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */
  	if (!q->mq_ops && !q->rq_timed_out_fn)
242f9dcb8   Jens Axboe   block: unify requ...
196
197
198
  		return;
  
  	BUG_ON(!list_empty(&req->timeout_list));
242f9dcb8   Jens Axboe   block: unify requ...
199

2eef33e43   Tejun Heo   block: clean up m...
200
201
202
203
204
  	/*
  	 * Some LLDs, like scsi, peek at the timeout to prevent a
  	 * command from being retried forever.
  	 */
  	if (!req->timeout)
242f9dcb8   Jens Axboe   block: unify requ...
205
  		req->timeout = q->rq_timeout;
2eef33e43   Tejun Heo   block: clean up m...
206
207
  
  	req->deadline = jiffies + req->timeout;
3b627a3f9   Jens Axboe   block: clarify bl...
208
209
210
211
212
  
  	/*
  	 * Only the non-mq case needs to add the request to a protected list.
  	 * For the mq case we simply scan the tag map.
  	 */
c4a634f43   Christoph Hellwig   block: fold __blk...
213
214
  	if (!q->mq_ops)
  		list_add_tail(&req->timeout_list, &req->q->timeout_list);
242f9dcb8   Jens Axboe   block: unify requ...
215
216
217
  
  	/*
  	 * If the timer isn't already pending or this timeout is earlier
7838c15b8   Alan Stern   Block: use round_...
218
  	 * than an existing one, modify the timer. Round up to next nearest
242f9dcb8   Jens Axboe   block: unify requ...
219
220
  	 * second.
  	 */
0d2602ca3   Jens Axboe   blk-mq: improve s...
221
  	expiry = blk_rq_timeout(round_jiffies_up(req->deadline));
242f9dcb8   Jens Axboe   block: unify requ...
222
223
  
  	if (!timer_pending(&q->timeout) ||
f793aa537   Jens Axboe   block: relax when...
224
225
226
227
228
229
230
231
232
233
  	    time_before(expiry, q->timeout.expires)) {
  		unsigned long diff = q->timeout.expires - expiry;
  
  		/*
  		 * Due to added timer slack to group timers, the timer
  		 * will often be a little in front of what we asked for.
  		 * So apply some tolerance here too, otherwise we keep
  		 * modifying the timer because expires for value X
  		 * will be X + something.
  		 */
c7bca4183   Jens Axboe   block: ensure tha...
234
  		if (!timer_pending(&q->timeout) || (diff >= HZ / 2))
f793aa537   Jens Axboe   block: relax when...
235
236
  			mod_timer(&q->timeout, expiry);
  	}
320ae51fe   Jens Axboe   blk-mq: new multi...
237
238
  
  }