Commit d50235b7bc3ee0a0427984d763ea7534149531b4
Committed by
Jens Axboe
1 parent
a6b3f7614c
Exists in
smarc-imx_3.14.28_1.0.0_ga
and in
1 other branch
elevator: Fix a race in elevator switching
There's a race between elevator switching and normal io operation. Because the allocation of struct elevator_queue and struct elevator_data don't in a atomic operation.So there are have chance to use NULL ->elevator_data. For example: Thread A: Thread B blk_queu_bio elevator_switch spin_lock_irq(q->queue_block) elevator_alloc elv_merge elevator_init_fn Because call elevator_alloc, it can't hold queue_lock and the ->elevator_data is NULL.So at the same time, threadA call elv_merge and nedd some info of elevator_data.So the crash happened. Move the elevator_alloc into func elevator_init_fn, it make the operations in a atomic operation. Using the follow method can easy reproduce this bug 1:dd if=/dev/sdb of=/dev/null 2:while true;do echo noop > scheduler;echo deadline > scheduler;done The test method also use this method. Signed-off-by: Jianpeng Ma <majianpeng@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Showing 5 changed files with 51 additions and 30 deletions Side-by-side Diff
block/cfq-iosched.c
... | ... | @@ -4347,18 +4347,28 @@ |
4347 | 4347 | kfree(cfqd); |
4348 | 4348 | } |
4349 | 4349 | |
4350 | -static int cfq_init_queue(struct request_queue *q) | |
4350 | +static int cfq_init_queue(struct request_queue *q, struct elevator_type *e) | |
4351 | 4351 | { |
4352 | 4352 | struct cfq_data *cfqd; |
4353 | 4353 | struct blkcg_gq *blkg __maybe_unused; |
4354 | 4354 | int i, ret; |
4355 | + struct elevator_queue *eq; | |
4355 | 4356 | |
4357 | + eq = elevator_alloc(q, e); | |
4358 | + if (!eq) | |
4359 | + return -ENOMEM; | |
4360 | + | |
4356 | 4361 | cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node); |
4357 | - if (!cfqd) | |
4362 | + if (!cfqd) { | |
4363 | + kobject_put(&eq->kobj); | |
4358 | 4364 | return -ENOMEM; |
4365 | + } | |
4366 | + eq->elevator_data = cfqd; | |
4359 | 4367 | |
4360 | 4368 | cfqd->queue = q; |
4361 | - q->elevator->elevator_data = cfqd; | |
4369 | + spin_lock_irq(q->queue_lock); | |
4370 | + q->elevator = eq; | |
4371 | + spin_unlock_irq(q->queue_lock); | |
4362 | 4372 | |
4363 | 4373 | /* Init root service tree */ |
4364 | 4374 | cfqd->grp_service_tree = CFQ_RB_ROOT; |
... | ... | @@ -4433,6 +4443,7 @@ |
4433 | 4443 | |
4434 | 4444 | out_free: |
4435 | 4445 | kfree(cfqd); |
4446 | + kobject_put(&eq->kobj); | |
4436 | 4447 | return ret; |
4437 | 4448 | } |
4438 | 4449 |
block/deadline-iosched.c
... | ... | @@ -337,13 +337,21 @@ |
337 | 337 | /* |
338 | 338 | * initialize elevator private data (deadline_data). |
339 | 339 | */ |
340 | -static int deadline_init_queue(struct request_queue *q) | |
340 | +static int deadline_init_queue(struct request_queue *q, struct elevator_type *e) | |
341 | 341 | { |
342 | 342 | struct deadline_data *dd; |
343 | + struct elevator_queue *eq; | |
343 | 344 | |
345 | + eq = elevator_alloc(q, e); | |
346 | + if (!eq) | |
347 | + return -ENOMEM; | |
348 | + | |
344 | 349 | dd = kmalloc_node(sizeof(*dd), GFP_KERNEL | __GFP_ZERO, q->node); |
345 | - if (!dd) | |
350 | + if (!dd) { | |
351 | + kobject_put(&eq->kobj); | |
346 | 352 | return -ENOMEM; |
353 | + } | |
354 | + eq->elevator_data = dd; | |
347 | 355 | |
348 | 356 | INIT_LIST_HEAD(&dd->fifo_list[READ]); |
349 | 357 | INIT_LIST_HEAD(&dd->fifo_list[WRITE]); |
... | ... | @@ -355,7 +363,9 @@ |
355 | 363 | dd->front_merges = 1; |
356 | 364 | dd->fifo_batch = fifo_batch; |
357 | 365 | |
358 | - q->elevator->elevator_data = dd; | |
366 | + spin_lock_irq(q->queue_lock); | |
367 | + q->elevator = eq; | |
368 | + spin_unlock_irq(q->queue_lock); | |
359 | 369 | return 0; |
360 | 370 | } |
361 | 371 |
block/elevator.c
... | ... | @@ -150,7 +150,7 @@ |
150 | 150 | |
151 | 151 | static struct kobj_type elv_ktype; |
152 | 152 | |
153 | -static struct elevator_queue *elevator_alloc(struct request_queue *q, | |
153 | +struct elevator_queue *elevator_alloc(struct request_queue *q, | |
154 | 154 | struct elevator_type *e) |
155 | 155 | { |
156 | 156 | struct elevator_queue *eq; |
... | ... | @@ -170,6 +170,7 @@ |
170 | 170 | elevator_put(e); |
171 | 171 | return NULL; |
172 | 172 | } |
173 | +EXPORT_SYMBOL(elevator_alloc); | |
173 | 174 | |
174 | 175 | static void elevator_release(struct kobject *kobj) |
175 | 176 | { |
... | ... | @@ -221,16 +222,7 @@ |
221 | 222 | } |
222 | 223 | } |
223 | 224 | |
224 | - q->elevator = elevator_alloc(q, e); | |
225 | - if (!q->elevator) | |
226 | - return -ENOMEM; | |
227 | - | |
228 | - err = e->ops.elevator_init_fn(q); | |
229 | - if (err) { | |
230 | - kobject_put(&q->elevator->kobj); | |
231 | - return err; | |
232 | - } | |
233 | - | |
225 | + err = e->ops.elevator_init_fn(q, e); | |
234 | 226 | return 0; |
235 | 227 | } |
236 | 228 | EXPORT_SYMBOL(elevator_init); |
237 | 229 | |
... | ... | @@ -935,16 +927,9 @@ |
935 | 927 | spin_unlock_irq(q->queue_lock); |
936 | 928 | |
937 | 929 | /* allocate, init and register new elevator */ |
938 | - err = -ENOMEM; | |
939 | - q->elevator = elevator_alloc(q, new_e); | |
940 | - if (!q->elevator) | |
930 | + err = new_e->ops.elevator_init_fn(q, new_e); | |
931 | + if (err) | |
941 | 932 | goto fail_init; |
942 | - | |
943 | - err = new_e->ops.elevator_init_fn(q); | |
944 | - if (err) { | |
945 | - kobject_put(&q->elevator->kobj); | |
946 | - goto fail_init; | |
947 | - } | |
948 | 933 | |
949 | 934 | if (registered) { |
950 | 935 | err = elv_register_queue(q); |
block/noop-iosched.c
... | ... | @@ -59,16 +59,27 @@ |
59 | 59 | return list_entry(rq->queuelist.next, struct request, queuelist); |
60 | 60 | } |
61 | 61 | |
62 | -static int noop_init_queue(struct request_queue *q) | |
62 | +static int noop_init_queue(struct request_queue *q, struct elevator_type *e) | |
63 | 63 | { |
64 | 64 | struct noop_data *nd; |
65 | + struct elevator_queue *eq; | |
65 | 66 | |
67 | + eq = elevator_alloc(q, e); | |
68 | + if (!eq) | |
69 | + return -ENOMEM; | |
70 | + | |
66 | 71 | nd = kmalloc_node(sizeof(*nd), GFP_KERNEL, q->node); |
67 | - if (!nd) | |
72 | + if (!nd) { | |
73 | + kobject_put(&eq->kobj); | |
68 | 74 | return -ENOMEM; |
75 | + } | |
76 | + eq->elevator_data = nd; | |
69 | 77 | |
70 | 78 | INIT_LIST_HEAD(&nd->queue); |
71 | - q->elevator->elevator_data = nd; | |
79 | + | |
80 | + spin_lock_irq(q->queue_lock); | |
81 | + q->elevator = eq; | |
82 | + spin_unlock_irq(q->queue_lock); | |
72 | 83 | return 0; |
73 | 84 | } |
74 | 85 |
include/linux/elevator.h
... | ... | @@ -7,6 +7,7 @@ |
7 | 7 | #ifdef CONFIG_BLOCK |
8 | 8 | |
9 | 9 | struct io_cq; |
10 | +struct elevator_type; | |
10 | 11 | |
11 | 12 | typedef int (elevator_merge_fn) (struct request_queue *, struct request **, |
12 | 13 | struct bio *); |
... | ... | @@ -35,7 +36,8 @@ |
35 | 36 | typedef void (elevator_activate_req_fn) (struct request_queue *, struct request *); |
36 | 37 | typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *); |
37 | 38 | |
38 | -typedef int (elevator_init_fn) (struct request_queue *); | |
39 | +typedef int (elevator_init_fn) (struct request_queue *, | |
40 | + struct elevator_type *e); | |
39 | 41 | typedef void (elevator_exit_fn) (struct elevator_queue *); |
40 | 42 | |
41 | 43 | struct elevator_ops |
... | ... | @@ -155,6 +157,8 @@ |
155 | 157 | extern void elevator_exit(struct elevator_queue *); |
156 | 158 | extern int elevator_change(struct request_queue *, const char *); |
157 | 159 | extern bool elv_rq_merge_ok(struct request *, struct bio *); |
160 | +extern struct elevator_queue *elevator_alloc(struct request_queue *, | |
161 | + struct elevator_type *); | |
158 | 162 | |
159 | 163 | /* |
160 | 164 | * Helper functions. |