Blame view
block/blk-softirq.c
4.43 KB
b646fc59b block: split soft... |
1 2 3 4 5 6 7 8 9 10 |
/* * Functions related to softirq rq completions */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/interrupt.h> #include <linux/cpu.h> |
39be35012 sched, block: Uni... |
11 |
#include <linux/sched.h> |
b646fc59b block: split soft... |
12 13 14 15 |
#include "blk.h" static DEFINE_PER_CPU(struct list_head, blk_cpu_done); |
c7c22e4d5 block: add suppor... |
16 17 18 19 20 21 22 23 24 |
/* * Softirq action handler - move entries to local list and loop over them * while passing them to the queue registered handler. */ static void blk_done_softirq(struct softirq_action *h) { struct list_head *cpu_list, local_list; local_irq_disable(); |
170d800af block: Replace __... |
25 |
cpu_list = this_cpu_ptr(&blk_cpu_done); |
c7c22e4d5 block: add suppor... |
26 27 28 29 30 |
list_replace_init(cpu_list, &local_list); local_irq_enable(); while (!list_empty(&local_list)) { struct request *rq; |
360f92c24 block: fix regres... |
31 32 |
rq = list_entry(local_list.next, struct request, ipi_list); list_del_init(&rq->ipi_list); |
c7c22e4d5 block: add suppor... |
33 34 35 |
rq->q->softirq_done_fn(rq); } } |
0a06ff068 kernel: remove CO... |
36 |
#ifdef CONFIG_SMP |
c7c22e4d5 block: add suppor... |
37 38 39 40 41 42 43 |
static void trigger_softirq(void *data) { struct request *rq = data; unsigned long flags; struct list_head *list; local_irq_save(flags); |
170d800af block: Replace __... |
44 |
list = this_cpu_ptr(&blk_cpu_done); |
360f92c24 block: fix regres... |
45 |
list_add_tail(&rq->ipi_list, list); |
c7c22e4d5 block: add suppor... |
46 |
|
360f92c24 block: fix regres... |
47 |
if (list->next == &rq->ipi_list) |
c7c22e4d5 block: add suppor... |
48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 |
raise_softirq_irqoff(BLOCK_SOFTIRQ); local_irq_restore(flags); } /* * Setup and invoke a run of 'trigger_softirq' on the given cpu. */ static int raise_blk_irq(int cpu, struct request *rq) { if (cpu_online(cpu)) { struct call_single_data *data = &rq->csd; data->func = trigger_softirq; data->info = rq; data->flags = 0; |
c46fff2a3 smp: Rename __smp... |
64 |
smp_call_function_single_async(cpu, data); |
c7c22e4d5 block: add suppor... |
65 66 67 68 69 |
return 0; } return 1; } |
0a06ff068 kernel: remove CO... |
70 |
#else /* CONFIG_SMP */ |
c7c22e4d5 block: add suppor... |
71 72 73 74 75 |
static int raise_blk_irq(int cpu, struct request *rq) { return 1; } #endif |
0b776b062 block: delete __c... |
76 77 |
static int blk_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) |
b646fc59b block: split soft... |
78 79 80 81 82 83 84 85 86 87 |
{ /* * If a CPU goes away, splice its entries to the current CPU * and trigger a run of the softirq */ if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { int cpu = (unsigned long) hcpu; local_irq_disable(); list_splice_init(&per_cpu(blk_cpu_done, cpu), |
170d800af block: Replace __... |
88 |
this_cpu_ptr(&blk_cpu_done)); |
b646fc59b block: split soft... |
89 90 91 92 93 94 |
raise_softirq_irqoff(BLOCK_SOFTIRQ); local_irq_enable(); } return NOTIFY_OK; } |
0b776b062 block: delete __c... |
95 |
static struct notifier_block blk_cpu_notifier = { |
b646fc59b block: split soft... |
96 97 |
.notifier_call = blk_cpu_notify, }; |
242f9dcb8 block: unify requ... |
98 |
void __blk_complete_request(struct request *req) |
b646fc59b block: split soft... |
99 |
{ |
39be35012 sched, block: Uni... |
100 |
int ccpu, cpu; |
c7c22e4d5 block: add suppor... |
101 |
struct request_queue *q = req->q; |
b646fc59b block: split soft... |
102 |
unsigned long flags; |
39be35012 sched, block: Uni... |
103 |
bool shared = false; |
b646fc59b block: split soft... |
104 |
|
c7c22e4d5 block: add suppor... |
105 |
BUG_ON(!q->softirq_done_fn); |
b646fc59b block: split soft... |
106 107 |
local_irq_save(flags); |
c7c22e4d5 block: add suppor... |
108 |
cpu = smp_processor_id(); |
b646fc59b block: split soft... |
109 |
|
c7c22e4d5 block: add suppor... |
110 111 112 |
/* * Select completion CPU */ |
8ad6a56f5 block: Don't chec... |
113 |
if (req->cpu != -1) { |
c7c22e4d5 block: add suppor... |
114 |
ccpu = req->cpu; |
39be35012 sched, block: Uni... |
115 116 |
if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags)) shared = cpus_share_cache(cpu, ccpu); |
5757a6d76 block: strict rq_... |
117 |
} else |
c7c22e4d5 block: add suppor... |
118 |
ccpu = cpu; |
bcf30e75b block: improve rq... |
119 |
/* |
39be35012 sched, block: Uni... |
120 121 |
* If current CPU and requested CPU share a cache, run the softirq on * the current CPU. One might concern this is just like |
bcf30e75b block: improve rq... |
122 123 124 125 126 |
* QUEUE_FLAG_SAME_FORCE, but actually not. blk_complete_request() is * running in interrupt handler, and currently I/O controller doesn't * support multiple interrupts, so current CPU is unique actually. This * avoids IPI sending from current CPU to the first CPU of a group. */ |
39be35012 sched, block: Uni... |
127 |
if (ccpu == cpu || shared) { |
c7c22e4d5 block: add suppor... |
128 129 |
struct list_head *list; do_local: |
170d800af block: Replace __... |
130 |
list = this_cpu_ptr(&blk_cpu_done); |
360f92c24 block: fix regres... |
131 |
list_add_tail(&req->ipi_list, list); |
c7c22e4d5 block: add suppor... |
132 133 134 135 136 137 138 |
/* * if the list only contains our just added request, * signal a raise of the softirq. If there are already * entries there, someone already raised the irq but it * hasn't run yet. */ |
360f92c24 block: fix regres... |
139 |
if (list->next == &req->ipi_list) |
c7c22e4d5 block: add suppor... |
140 141 142 |
raise_softirq_irqoff(BLOCK_SOFTIRQ); } else if (raise_blk_irq(ccpu, req)) goto do_local; |
b646fc59b block: split soft... |
143 144 145 |
local_irq_restore(flags); } |
242f9dcb8 block: unify requ... |
146 147 148 149 150 151 152 153 154 155 156 157 158 159 |
/** * blk_complete_request - end I/O on a request * @req: the request being processed * * Description: * Ends all I/O on a request. It does not handle partial completions, * unless the driver actually implements this in its completion callback * through requeueing. The actual completion happens out-of-order, * through a softirq handler. The user must have registered a completion * callback through blk_queue_softirq_done(). **/ void blk_complete_request(struct request *req) { |
581d4e28d block: add fault ... |
160 161 |
if (unlikely(blk_should_fake_timeout(req->q))) return; |
242f9dcb8 block: unify requ... |
162 163 164 |
if (!blk_mark_rq_complete(req)) __blk_complete_request(req); } |
b646fc59b block: split soft... |
165 |
EXPORT_SYMBOL(blk_complete_request); |
3c18ce71a block: make blk_s... |
166 |
static __init int blk_softirq_init(void) |
b646fc59b block: split soft... |
167 168 169 170 171 172 173 174 175 176 177 |
{ int i; for_each_possible_cpu(i) INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); open_softirq(BLOCK_SOFTIRQ, blk_done_softirq); register_hotcpu_notifier(&blk_cpu_notifier); return 0; } subsys_initcall(blk_softirq_init); |