Blame view
block/blk-timeout.c
5.57 KB
242f9dcb8 block: unify requ... |
1 2 3 4 5 6 |
/* * Functions related to generic timeout handling of requests. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/blkdev.h> |
581d4e28d block: add fault ... |
7 |
#include <linux/fault-inject.h> |
242f9dcb8 block: unify requ... |
8 9 |
#include "blk.h" |
581d4e28d block: add fault ... |
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 |
#ifdef CONFIG_FAIL_IO_TIMEOUT static DECLARE_FAULT_ATTR(fail_io_timeout); static int __init setup_fail_io_timeout(char *str) { return setup_fault_attr(&fail_io_timeout, str); } __setup("fail_io_timeout=", setup_fail_io_timeout); int blk_should_fake_timeout(struct request_queue *q) { if (!test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags)) return 0; return should_fail(&fail_io_timeout, 1); } static int __init fail_io_timeout_debugfs(void) { return init_fault_attr_dentries(&fail_io_timeout, "fail_io_timeout"); } late_initcall(fail_io_timeout_debugfs); ssize_t part_timeout_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gendisk *disk = dev_to_disk(dev); int set = test_bit(QUEUE_FLAG_FAIL_IO, &disk->queue->queue_flags); return sprintf(buf, "%d ", set != 0); } ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct gendisk *disk = dev_to_disk(dev); int val; if (count) { struct request_queue *q = disk->queue; char *p = (char *) buf; val = simple_strtoul(p, &p, 10); spin_lock_irq(q->queue_lock); if (val) queue_flag_set(QUEUE_FLAG_FAIL_IO, q); else queue_flag_clear(QUEUE_FLAG_FAIL_IO, q); spin_unlock_irq(q->queue_lock); } return count; } #endif /* CONFIG_FAIL_IO_TIMEOUT */ |
242f9dcb8 block: unify requ... |
68 69 70 71 72 73 74 |
/* * blk_delete_timer - Delete/cancel timer for a given function. * @req: request that we are canceling timer for * */ void blk_delete_timer(struct request *req) { |
242f9dcb8 block: unify requ... |
75 |
list_del_init(&req->timeout_list); |
242f9dcb8 block: unify requ... |
76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 |
} static void blk_rq_timed_out(struct request *req) { struct request_queue *q = req->q; enum blk_eh_timer_return ret; ret = q->rq_timed_out_fn(req); switch (ret) { case BLK_EH_HANDLED: __blk_complete_request(req); break; case BLK_EH_RESET_TIMER: blk_clear_rq_complete(req); blk_add_timer(req); break; case BLK_EH_NOT_HANDLED: /* * LLD handles this for now but in the future * we can send a request msg to abort the command * and we can move more of the generic scsi eh code to * the blk layer. */ break; default: printk(KERN_ERR "block: bad eh return: %d ", ret); break; } } void blk_rq_timed_out_timer(unsigned long data) { struct request_queue *q = (struct request_queue *) data; |
565e411d7 block: optimizati... |
110 |
unsigned long flags, next = 0; |
242f9dcb8 block: unify requ... |
111 |
struct request *rq, *tmp; |
a534dbe96 block: ensure jif... |
112 |
int next_set = 0; |
242f9dcb8 block: unify requ... |
113 114 115 116 117 118 119 120 121 122 123 124 125 |
spin_lock_irqsave(q->queue_lock, flags); list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) { if (time_after_eq(jiffies, rq->deadline)) { list_del_init(&rq->timeout_list); /* * Check if we raced with end io completion */ if (blk_mark_rq_complete(rq)) continue; blk_rq_timed_out(rq); |
a534dbe96 block: ensure jif... |
126 |
} else if (!next_set || time_after(next, rq->deadline)) { |
2eef33e43 block: clean up m... |
127 |
next = rq->deadline; |
a534dbe96 block: ensure jif... |
128 129 |
next_set = 1; } |
242f9dcb8 block: unify requ... |
130 |
} |
a534dbe96 block: ensure jif... |
131 |
if (next_set) |
7838c15b8 Block: use round_... |
132 |
mod_timer(&q->timeout, round_jiffies_up(next)); |
242f9dcb8 block: unify requ... |
133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 |
spin_unlock_irqrestore(q->queue_lock, flags); } /** * blk_abort_request -- Request request recovery for the specified command * @req: pointer to the request of interest * * This function requests that the block layer start recovery for the * request by deleting the timer and calling the q's timeout function. * LLDDs who implement their own error recovery MAY ignore the timeout * event if they generated blk_abort_req. Must hold queue lock. */ void blk_abort_request(struct request *req) { |
7ba1fbaa4 block: use rq com... |
148 149 |
if (blk_mark_rq_complete(req)) return; |
242f9dcb8 block: unify requ... |
150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 |
blk_delete_timer(req); blk_rq_timed_out(req); } EXPORT_SYMBOL_GPL(blk_abort_request); /** * blk_add_timer - Start timeout timer for a single request * @req: request that is about to start running. * * Notes: * Each request has its own timer, and as it is added to the queue, we * set up the timer. When the request completes, we cancel the timer. */ void blk_add_timer(struct request *req) { struct request_queue *q = req->q; unsigned long expiry; if (!q->rq_timed_out_fn) return; BUG_ON(!list_empty(&req->timeout_list)); BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags)); |
2eef33e43 block: clean up m... |
173 174 175 176 177 |
/* * Some LLDs, like scsi, peek at the timeout to prevent a * command from being retried forever. */ if (!req->timeout) |
242f9dcb8 block: unify requ... |
178 |
req->timeout = q->rq_timeout; |
2eef33e43 block: clean up m... |
179 180 |
req->deadline = jiffies + req->timeout; |
242f9dcb8 block: unify requ... |
181 182 183 184 |
list_add_tail(&req->timeout_list, &q->timeout_list); /* * If the timer isn't already pending or this timeout is earlier |
7838c15b8 Block: use round_... |
185 |
* than an existing one, modify the timer. Round up to next nearest |
242f9dcb8 block: unify requ... |
186 187 |
* second. */ |
7838c15b8 Block: use round_... |
188 |
expiry = round_jiffies_up(req->deadline); |
242f9dcb8 block: unify requ... |
189 190 191 192 193 |
if (!timer_pending(&q->timeout) || time_before(expiry, q->timeout.expires)) mod_timer(&q->timeout, expiry); } |
11914a53d block: Add interf... |
194 195 196 197 198 199 200 201 202 203 |
/** * blk_abort_queue -- Abort all request on given queue * @queue: pointer to queue * */ void blk_abort_queue(struct request_queue *q) { unsigned long flags; struct request *rq, *tmp; |
be987fdb5 block: fix deadlo... |
204 |
LIST_HEAD(list); |
11914a53d block: Add interf... |
205 |
|
b75911349 block: make blk_a... |
206 207 208 209 210 |
/* * Not a request based block device, nothing to abort */ if (!q->request_fn) return; |
11914a53d block: Add interf... |
211 212 213 |
spin_lock_irqsave(q->queue_lock, flags); elv_abort_queue(q); |
be987fdb5 block: fix deadlo... |
214 215 216 217 218 219 220 |
/* * Splice entries to local list, to avoid deadlocking if entries * get readded to the timeout list by error handling */ list_splice_init(&q->timeout_list, &list); list_for_each_entry_safe(rq, tmp, &list, timeout_list) |
11914a53d block: Add interf... |
221 |
blk_abort_request(rq); |
17d5c8ca7 block: fix interm... |
222 223 224 225 226 227 |
/* * Occasionally, blk_abort_request() will return without * deleting the element from the list. Make sure we add those back * instead of leaving them on the local stack list. */ list_splice(&list, &q->timeout_list); |
11914a53d block: Add interf... |
228 229 230 231 |
spin_unlock_irqrestore(q->queue_lock, flags); } EXPORT_SYMBOL_GPL(blk_abort_queue); |