Blame view

block/blk-timeout.c 5.84 KB
242f9dcb8   Jens Axboe   block: unify requ...
1
2
3
4
5
6
  /*
   * Functions related to generic timeout handling of requests.
   */
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/blkdev.h>
581d4e28d   Jens Axboe   block: add fault ...
7
  #include <linux/fault-inject.h>
242f9dcb8   Jens Axboe   block: unify requ...
8
9
  
  #include "blk.h"
904158376   Christoph Hellwig   block: fix blk_ab...
10
  #include "blk-mq.h"
242f9dcb8   Jens Axboe   block: unify requ...
11

581d4e28d   Jens Axboe   block: add fault ...
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
  #ifdef CONFIG_FAIL_IO_TIMEOUT
  
  static DECLARE_FAULT_ATTR(fail_io_timeout);
  
  static int __init setup_fail_io_timeout(char *str)
  {
  	return setup_fault_attr(&fail_io_timeout, str);
  }
  __setup("fail_io_timeout=", setup_fail_io_timeout);
  
  int blk_should_fake_timeout(struct request_queue *q)
  {
  	if (!test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
  		return 0;
  
  	return should_fail(&fail_io_timeout, 1);
  }
  
  static int __init fail_io_timeout_debugfs(void)
  {
dd48c085c   Akinobu Mita   fault-injection: ...
32
33
  	struct dentry *dir = fault_create_debugfs_attr("fail_io_timeout",
  						NULL, &fail_io_timeout);
8616ebb16   Duan Jiong   block: replace IS...
34
  	return PTR_ERR_OR_ZERO(dir);
581d4e28d   Jens Axboe   block: add fault ...
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
  }
  
  late_initcall(fail_io_timeout_debugfs);
  
  ssize_t part_timeout_show(struct device *dev, struct device_attribute *attr,
  			  char *buf)
  {
  	struct gendisk *disk = dev_to_disk(dev);
  	int set = test_bit(QUEUE_FLAG_FAIL_IO, &disk->queue->queue_flags);
  
  	return sprintf(buf, "%d
  ", set != 0);
  }
  
  ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr,
  			   const char *buf, size_t count)
  {
  	struct gendisk *disk = dev_to_disk(dev);
  	int val;
  
  	if (count) {
  		struct request_queue *q = disk->queue;
  		char *p = (char *) buf;
  
  		val = simple_strtoul(p, &p, 10);
  		spin_lock_irq(q->queue_lock);
  		if (val)
  			queue_flag_set(QUEUE_FLAG_FAIL_IO, q);
  		else
  			queue_flag_clear(QUEUE_FLAG_FAIL_IO, q);
  		spin_unlock_irq(q->queue_lock);
  	}
  
  	return count;
  }
  
  #endif /* CONFIG_FAIL_IO_TIMEOUT */
242f9dcb8   Jens Axboe   block: unify requ...
72
73
74
75
76
77
78
  /*
   * blk_delete_timer - Delete/cancel timer for a given function.
   * @req:	request that we are canceling timer for
   *
   */
  void blk_delete_timer(struct request *req)
  {
242f9dcb8   Jens Axboe   block: unify requ...
79
  	list_del_init(&req->timeout_list);
242f9dcb8   Jens Axboe   block: unify requ...
80
81
82
83
84
  }
  
  static void blk_rq_timed_out(struct request *req)
  {
  	struct request_queue *q = req->q;
80bd7181b   Hannes Reinecke   block: check for ...
85
  	enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
242f9dcb8   Jens Axboe   block: unify requ...
86

80bd7181b   Hannes Reinecke   block: check for ...
87
88
  	if (q->rq_timed_out_fn)
  		ret = q->rq_timed_out_fn(req);
242f9dcb8   Jens Axboe   block: unify requ...
89
90
  	switch (ret) {
  	case BLK_EH_HANDLED:
46f92d42e   Christoph Hellwig   blk-mq: unshared ...
91
  		__blk_complete_request(req);
242f9dcb8   Jens Axboe   block: unify requ...
92
93
  		break;
  	case BLK_EH_RESET_TIMER:
87ee7b112   Jens Axboe   blk-mq: fix race ...
94
  		blk_add_timer(req);
4912aa6c1   Jeff Moyer   block: fix race b...
95
  		blk_clear_rq_complete(req);
242f9dcb8   Jens Axboe   block: unify requ...
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
  		break;
  	case BLK_EH_NOT_HANDLED:
  		/*
  		 * LLD handles this for now but in the future
  		 * we can send a request msg to abort the command
  		 * and we can move more of the generic scsi eh code to
  		 * the blk layer.
  		 */
  		break;
  	default:
  		printk(KERN_ERR "block: bad eh return: %d
  ", ret);
  		break;
  	}
  }
46f92d42e   Christoph Hellwig   blk-mq: unshared ...
111
  static void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout,
320ae51fe   Jens Axboe   blk-mq: new multi...
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
  			  unsigned int *next_set)
  {
  	if (time_after_eq(jiffies, rq->deadline)) {
  		list_del_init(&rq->timeout_list);
  
  		/*
  		 * Check if we raced with end io completion
  		 */
  		if (!blk_mark_rq_complete(rq))
  			blk_rq_timed_out(rq);
  	} else if (!*next_set || time_after(*next_timeout, rq->deadline)) {
  		*next_timeout = rq->deadline;
  		*next_set = 1;
  	}
  }
287922eb0   Christoph Hellwig   block: defer time...
127
  void blk_timeout_work(struct work_struct *work)
242f9dcb8   Jens Axboe   block: unify requ...
128
  {
287922eb0   Christoph Hellwig   block: defer time...
129
130
  	struct request_queue *q =
  		container_of(work, struct request_queue, timeout_work);
565e411d7   malahal@us.ibm.com   block: optimizati...
131
  	unsigned long flags, next = 0;
242f9dcb8   Jens Axboe   block: unify requ...
132
  	struct request *rq, *tmp;
a534dbe96   Richard Kennedy   block: ensure jif...
133
  	int next_set = 0;
242f9dcb8   Jens Axboe   block: unify requ...
134
135
  
  	spin_lock_irqsave(q->queue_lock, flags);
320ae51fe   Jens Axboe   blk-mq: new multi...
136
137
  	list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list)
  		blk_rq_check_expired(rq, &next, &next_set);
242f9dcb8   Jens Axboe   block: unify requ...
138

a534dbe96   Richard Kennedy   block: ensure jif...
139
  	if (next_set)
7838c15b8   Alan Stern   Block: use round_...
140
  		mod_timer(&q->timeout, round_jiffies_up(next));
242f9dcb8   Jens Axboe   block: unify requ...
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
  
  	spin_unlock_irqrestore(q->queue_lock, flags);
  }
  
  /**
   * blk_abort_request -- Request request recovery for the specified command
   * @req:	pointer to the request of interest
   *
   * This function requests that the block layer start recovery for the
   * request by deleting the timer and calling the q's timeout function.
   * LLDDs who implement their own error recovery MAY ignore the timeout
   * event if they generated blk_abort_req. Must hold queue lock.
   */
  void blk_abort_request(struct request *req)
  {
7ba1fbaa4   Jens Axboe   block: use rq com...
156
157
  	if (blk_mark_rq_complete(req))
  		return;
55ce0da1d   Christoph Hellwig   block: fix blk_ab...
158
159
  
  	if (req->q->mq_ops) {
904158376   Christoph Hellwig   block: fix blk_ab...
160
  		blk_mq_rq_timed_out(req, false);
55ce0da1d   Christoph Hellwig   block: fix blk_ab...
161
162
  	} else {
  		blk_delete_timer(req);
904158376   Christoph Hellwig   block: fix blk_ab...
163
  		blk_rq_timed_out(req);
55ce0da1d   Christoph Hellwig   block: fix blk_ab...
164
  	}
242f9dcb8   Jens Axboe   block: unify requ...
165
166
  }
  EXPORT_SYMBOL_GPL(blk_abort_request);
0d2602ca3   Jens Axboe   blk-mq: improve s...
167
168
169
170
171
172
173
174
175
176
  unsigned long blk_rq_timeout(unsigned long timeout)
  {
  	unsigned long maxt;
  
  	maxt = round_jiffies_up(jiffies + BLK_MAX_TIMEOUT);
  	if (time_after(timeout, maxt))
  		timeout = maxt;
  
  	return timeout;
  }
c4a634f43   Christoph Hellwig   block: fold __blk...
177
178
179
180
181
182
183
184
185
  /**
   * blk_add_timer - Start timeout timer for a single request
   * @req:	request that is about to start running.
   *
   * Notes:
   *    Each request has its own timer, and as it is added to the queue, we
   *    set up the timer. When the request completes, we cancel the timer.
   */
  void blk_add_timer(struct request *req)
242f9dcb8   Jens Axboe   block: unify requ...
186
187
188
  {
  	struct request_queue *q = req->q;
  	unsigned long expiry;
2fff8a924   Bart Van Assche   block: Check lock...
189
190
  	if (!q->mq_ops)
  		lockdep_assert_held(q->queue_lock);
5e940aaa5   Ming Lei   blk-timeout: fix ...
191
192
  	/* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */
  	if (!q->mq_ops && !q->rq_timed_out_fn)
242f9dcb8   Jens Axboe   block: unify requ...
193
194
195
  		return;
  
  	BUG_ON(!list_empty(&req->timeout_list));
242f9dcb8   Jens Axboe   block: unify requ...
196

2eef33e43   Tejun Heo   block: clean up m...
197
198
199
200
201
  	/*
  	 * Some LLDs, like scsi, peek at the timeout to prevent a
  	 * command from being retried forever.
  	 */
  	if (!req->timeout)
242f9dcb8   Jens Axboe   block: unify requ...
202
  		req->timeout = q->rq_timeout;
2eef33e43   Tejun Heo   block: clean up m...
203
204
  
  	req->deadline = jiffies + req->timeout;
3b627a3f9   Jens Axboe   block: clarify bl...
205
206
207
208
209
  
  	/*
  	 * Only the non-mq case needs to add the request to a protected list.
  	 * For the mq case we simply scan the tag map.
  	 */
c4a634f43   Christoph Hellwig   block: fold __blk...
210
211
  	if (!q->mq_ops)
  		list_add_tail(&req->timeout_list, &req->q->timeout_list);
242f9dcb8   Jens Axboe   block: unify requ...
212
213
214
  
  	/*
  	 * If the timer isn't already pending or this timeout is earlier
7838c15b8   Alan Stern   Block: use round_...
215
  	 * than an existing one, modify the timer. Round up to next nearest
242f9dcb8   Jens Axboe   block: unify requ...
216
217
  	 * second.
  	 */
0d2602ca3   Jens Axboe   blk-mq: improve s...
218
  	expiry = blk_rq_timeout(round_jiffies_up(req->deadline));
242f9dcb8   Jens Axboe   block: unify requ...
219
220
  
  	if (!timer_pending(&q->timeout) ||
f793aa537   Jens Axboe   block: relax when...
221
222
223
224
225
226
227
228
229
230
  	    time_before(expiry, q->timeout.expires)) {
  		unsigned long diff = q->timeout.expires - expiry;
  
  		/*
  		 * Due to added timer slack to group timers, the timer
  		 * will often be a little in front of what we asked for.
  		 * So apply some tolerance here too, otherwise we keep
  		 * modifying the timer because expires for value X
  		 * will be X + something.
  		 */
c7bca4183   Jens Axboe   block: ensure tha...
231
  		if (!timer_pending(&q->timeout) || (diff >= HZ / 2))
f793aa537   Jens Axboe   block: relax when...
232
233
  			mod_timer(&q->timeout, expiry);
  	}
320ae51fe   Jens Axboe   blk-mq: new multi...
234
235
  
  }