Blame view
block/blk-timeout.c
5.64 KB
242f9dcb8 block: unify requ... |
1 2 3 4 5 6 |
/* * Functions related to generic timeout handling of requests. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/blkdev.h> |
581d4e28d block: add fault ... |
7 |
#include <linux/fault-inject.h> |
242f9dcb8 block: unify requ... |
8 9 |
#include "blk.h" |
581d4e28d block: add fault ... |
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 |
#ifdef CONFIG_FAIL_IO_TIMEOUT static DECLARE_FAULT_ATTR(fail_io_timeout); static int __init setup_fail_io_timeout(char *str) { return setup_fault_attr(&fail_io_timeout, str); } __setup("fail_io_timeout=", setup_fail_io_timeout); int blk_should_fake_timeout(struct request_queue *q) { if (!test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags)) return 0; return should_fail(&fail_io_timeout, 1); } static int __init fail_io_timeout_debugfs(void) { |
dd48c085c fault-injection: ... |
30 31 32 33 |
struct dentry *dir = fault_create_debugfs_attr("fail_io_timeout", NULL, &fail_io_timeout); return IS_ERR(dir) ? PTR_ERR(dir) : 0; |
581d4e28d block: add fault ... |
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 |
} late_initcall(fail_io_timeout_debugfs); ssize_t part_timeout_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gendisk *disk = dev_to_disk(dev); int set = test_bit(QUEUE_FLAG_FAIL_IO, &disk->queue->queue_flags); return sprintf(buf, "%d ", set != 0); } ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct gendisk *disk = dev_to_disk(dev); int val; if (count) { struct request_queue *q = disk->queue; char *p = (char *) buf; val = simple_strtoul(p, &p, 10); spin_lock_irq(q->queue_lock); if (val) queue_flag_set(QUEUE_FLAG_FAIL_IO, q); else queue_flag_clear(QUEUE_FLAG_FAIL_IO, q); spin_unlock_irq(q->queue_lock); } return count; } #endif /* CONFIG_FAIL_IO_TIMEOUT */ |
242f9dcb8 block: unify requ... |
71 72 73 74 75 76 77 |
/* * blk_delete_timer - Delete/cancel timer for a given function. * @req: request that we are canceling timer for * */ void blk_delete_timer(struct request *req) { |
242f9dcb8 block: unify requ... |
78 |
list_del_init(&req->timeout_list); |
242f9dcb8 block: unify requ... |
79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 |
} static void blk_rq_timed_out(struct request *req) { struct request_queue *q = req->q; enum blk_eh_timer_return ret; ret = q->rq_timed_out_fn(req); switch (ret) { case BLK_EH_HANDLED: __blk_complete_request(req); break; case BLK_EH_RESET_TIMER: blk_clear_rq_complete(req); blk_add_timer(req); break; case BLK_EH_NOT_HANDLED: /* * LLD handles this for now but in the future * we can send a request msg to abort the command * and we can move more of the generic scsi eh code to * the blk layer. */ break; default: printk(KERN_ERR "block: bad eh return: %d ", ret); break; } } void blk_rq_timed_out_timer(unsigned long data) { struct request_queue *q = (struct request_queue *) data; |
565e411d7 block: optimizati... |
113 |
unsigned long flags, next = 0; |
242f9dcb8 block: unify requ... |
114 |
struct request *rq, *tmp; |
a534dbe96 block: ensure jif... |
115 |
int next_set = 0; |
242f9dcb8 block: unify requ... |
116 117 118 119 120 121 122 123 124 125 126 127 128 |
spin_lock_irqsave(q->queue_lock, flags); list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) { if (time_after_eq(jiffies, rq->deadline)) { list_del_init(&rq->timeout_list); /* * Check if we raced with end io completion */ if (blk_mark_rq_complete(rq)) continue; blk_rq_timed_out(rq); |
a534dbe96 block: ensure jif... |
129 |
} else if (!next_set || time_after(next, rq->deadline)) { |
2eef33e43 block: clean up m... |
130 |
next = rq->deadline; |
a534dbe96 block: ensure jif... |
131 132 |
next_set = 1; } |
242f9dcb8 block: unify requ... |
133 |
} |
a534dbe96 block: ensure jif... |
134 |
if (next_set) |
7838c15b8 Block: use round_... |
135 |
mod_timer(&q->timeout, round_jiffies_up(next)); |
242f9dcb8 block: unify requ... |
136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 |
spin_unlock_irqrestore(q->queue_lock, flags); } /** * blk_abort_request -- Request request recovery for the specified command * @req: pointer to the request of interest * * This function requests that the block layer start recovery for the * request by deleting the timer and calling the q's timeout function. * LLDDs who implement their own error recovery MAY ignore the timeout * event if they generated blk_abort_req. Must hold queue lock. */ void blk_abort_request(struct request *req) { |
7ba1fbaa4 block: use rq com... |
151 152 |
if (blk_mark_rq_complete(req)) return; |
242f9dcb8 block: unify requ... |
153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 |
blk_delete_timer(req); blk_rq_timed_out(req); } EXPORT_SYMBOL_GPL(blk_abort_request); /** * blk_add_timer - Start timeout timer for a single request * @req: request that is about to start running. * * Notes: * Each request has its own timer, and as it is added to the queue, we * set up the timer. When the request completes, we cancel the timer. */ void blk_add_timer(struct request *req) { struct request_queue *q = req->q; unsigned long expiry; if (!q->rq_timed_out_fn) return; BUG_ON(!list_empty(&req->timeout_list)); BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags)); |
2eef33e43 block: clean up m... |
176 177 178 179 180 |
/* * Some LLDs, like scsi, peek at the timeout to prevent a * command from being retried forever. */ if (!req->timeout) |
242f9dcb8 block: unify requ... |
181 |
req->timeout = q->rq_timeout; |
2eef33e43 block: clean up m... |
182 183 |
req->deadline = jiffies + req->timeout; |
242f9dcb8 block: unify requ... |
184 185 186 187 |
list_add_tail(&req->timeout_list, &q->timeout_list); /* * If the timer isn't already pending or this timeout is earlier |
7838c15b8 Block: use round_... |
188 |
* than an existing one, modify the timer. Round up to next nearest |
242f9dcb8 block: unify requ... |
189 190 |
* second. */ |
7838c15b8 Block: use round_... |
191 |
expiry = round_jiffies_up(req->deadline); |
242f9dcb8 block: unify requ... |
192 193 194 195 196 |
if (!timer_pending(&q->timeout) || time_before(expiry, q->timeout.expires)) mod_timer(&q->timeout, expiry); } |
11914a53d block: Add interf... |
197 198 199 200 201 202 203 204 205 206 |
/** * blk_abort_queue -- Abort all request on given queue * @queue: pointer to queue * */ void blk_abort_queue(struct request_queue *q) { unsigned long flags; struct request *rq, *tmp; |
be987fdb5 block: fix deadlo... |
207 |
LIST_HEAD(list); |
11914a53d block: Add interf... |
208 |
|
b75911349 block: make blk_a... |
209 210 211 212 213 |
/* * Not a request based block device, nothing to abort */ if (!q->request_fn) return; |
11914a53d block: Add interf... |
214 215 216 |
spin_lock_irqsave(q->queue_lock, flags); elv_abort_queue(q); |
be987fdb5 block: fix deadlo... |
217 218 219 220 221 222 223 |
/* * Splice entries to local list, to avoid deadlocking if entries * get readded to the timeout list by error handling */ list_splice_init(&q->timeout_list, &list); list_for_each_entry_safe(rq, tmp, &list, timeout_list) |
11914a53d block: Add interf... |
224 |
blk_abort_request(rq); |
17d5c8ca7 block: fix interm... |
225 226 227 228 229 230 |
/* * Occasionally, blk_abort_request() will return without * deleting the element from the list. Make sure we add those back * instead of leaving them on the local stack list. */ list_splice(&list, &q->timeout_list); |
11914a53d block: Add interf... |
231 232 233 234 |
spin_unlock_irqrestore(q->queue_lock, flags); } EXPORT_SYMBOL_GPL(blk_abort_queue); |