Blame view

block/blk-softirq.c 4.57 KB
b646fc59b   Jens Axboe   block: split soft...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
  /*
   * Functions related to softirq rq completions
   */
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/init.h>
  #include <linux/bio.h>
  #include <linux/blkdev.h>
  #include <linux/interrupt.h>
  #include <linux/cpu.h>
  
  #include "blk.h"
  
  static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
c7c22e4d5   Jens Axboe   block: add suppor...
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
  /*
   * Softirq action handler - move entries to local list and loop over them
   * while passing them to the queue registered handler.
   */
  static void blk_done_softirq(struct softirq_action *h)
  {
  	struct list_head *cpu_list, local_list;
  
  	local_irq_disable();
  	cpu_list = &__get_cpu_var(blk_cpu_done);
  	list_replace_init(cpu_list, &local_list);
  	local_irq_enable();
  
  	while (!list_empty(&local_list)) {
  		struct request *rq;
  
  		rq = list_entry(local_list.next, struct request, csd.list);
  		list_del_init(&rq->csd.list);
  		rq->q->softirq_done_fn(rq);
  	}
  }
  
  #if defined(CONFIG_SMP) && defined(CONFIG_USE_GENERIC_SMP_HELPERS)
  static void trigger_softirq(void *data)
  {
  	struct request *rq = data;
  	unsigned long flags;
  	struct list_head *list;
  
  	local_irq_save(flags);
  	list = &__get_cpu_var(blk_cpu_done);
  	list_add_tail(&rq->csd.list, list);
  
  	if (list->next == &rq->csd.list)
  		raise_softirq_irqoff(BLOCK_SOFTIRQ);
  
  	local_irq_restore(flags);
  }
  
  /*
   * Setup and invoke a run of 'trigger_softirq' on the given cpu.
   */
  static int raise_blk_irq(int cpu, struct request *rq)
  {
  	if (cpu_online(cpu)) {
  		struct call_single_data *data = &rq->csd;
  
  		data->func = trigger_softirq;
  		data->info = rq;
  		data->flags = 0;
6e2756376   Peter Zijlstra   generic-ipi: remo...
65
  		__smp_call_function_single(cpu, data, 0);
c7c22e4d5   Jens Axboe   block: add suppor...
66
67
68
69
70
71
72
73
74
75
76
  		return 0;
  	}
  
  	return 1;
  }
  #else /* CONFIG_SMP && CONFIG_USE_GENERIC_SMP_HELPERS */
  static int raise_blk_irq(int cpu, struct request *rq)
  {
  	return 1;
  }
  #endif
b646fc59b   Jens Axboe   block: split soft...
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
  static int __cpuinit blk_cpu_notify(struct notifier_block *self,
  				    unsigned long action, void *hcpu)
  {
  	/*
  	 * If a CPU goes away, splice its entries to the current CPU
  	 * and trigger a run of the softirq
  	 */
  	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
  		int cpu = (unsigned long) hcpu;
  
  		local_irq_disable();
  		list_splice_init(&per_cpu(blk_cpu_done, cpu),
  				 &__get_cpu_var(blk_cpu_done));
  		raise_softirq_irqoff(BLOCK_SOFTIRQ);
  		local_irq_enable();
  	}
  
  	return NOTIFY_OK;
  }
c7c22e4d5   Jens Axboe   block: add suppor...
96
  static struct notifier_block __cpuinitdata blk_cpu_notifier = {
b646fc59b   Jens Axboe   block: split soft...
97
98
  	.notifier_call	= blk_cpu_notify,
  };
242f9dcb8   Jens Axboe   block: unify requ...
99
  void __blk_complete_request(struct request *req)
b646fc59b   Jens Axboe   block: split soft...
100
  {
bcf30e75b   Shaohua Li   block: improve rq...
101
  	int ccpu, cpu, group_cpu = NR_CPUS;
c7c22e4d5   Jens Axboe   block: add suppor...
102
  	struct request_queue *q = req->q;
b646fc59b   Jens Axboe   block: split soft...
103
  	unsigned long flags;
c7c22e4d5   Jens Axboe   block: add suppor...
104
  	BUG_ON(!q->softirq_done_fn);
b646fc59b   Jens Axboe   block: split soft...
105
106
  
  	local_irq_save(flags);
c7c22e4d5   Jens Axboe   block: add suppor...
107
  	cpu = smp_processor_id();
b646fc59b   Jens Axboe   block: split soft...
108

c7c22e4d5   Jens Axboe   block: add suppor...
109
110
111
  	/*
  	 * Select completion CPU
  	 */
8ad6a56f5   Tao Ma   block: Don't chec...
112
  	if (req->cpu != -1) {
c7c22e4d5   Jens Axboe   block: add suppor...
113
  		ccpu = req->cpu;
bcf30e75b   Shaohua Li   block: improve rq...
114
  		if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags)) {
5757a6d76   Dan Williams   block: strict rq_...
115
  			ccpu = blk_cpu_to_group(ccpu);
bcf30e75b   Shaohua Li   block: improve rq...
116
117
  			group_cpu = blk_cpu_to_group(cpu);
  		}
5757a6d76   Dan Williams   block: strict rq_...
118
  	} else
c7c22e4d5   Jens Axboe   block: add suppor...
119
  		ccpu = cpu;
bcf30e75b   Shaohua Li   block: improve rq...
120
121
122
123
124
125
126
127
128
  	/*
  	 * If current CPU and requested CPU are in the same group, running
  	 * softirq in current CPU. One might concern this is just like
  	 * QUEUE_FLAG_SAME_FORCE, but actually not. blk_complete_request() is
  	 * running in interrupt handler, and currently I/O controller doesn't
  	 * support multiple interrupts, so current CPU is unique actually. This
  	 * avoids IPI sending from current CPU to the first CPU of a group.
  	 */
  	if (ccpu == cpu || ccpu == group_cpu) {
c7c22e4d5   Jens Axboe   block: add suppor...
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
  		struct list_head *list;
  do_local:
  		list = &__get_cpu_var(blk_cpu_done);
  		list_add_tail(&req->csd.list, list);
  
  		/*
  		 * if the list only contains our just added request,
  		 * signal a raise of the softirq. If there are already
  		 * entries there, someone already raised the irq but it
  		 * hasn't run yet.
  		 */
  		if (list->next == &req->csd.list)
  			raise_softirq_irqoff(BLOCK_SOFTIRQ);
  	} else if (raise_blk_irq(ccpu, req))
  		goto do_local;
b646fc59b   Jens Axboe   block: split soft...
144
145
146
  
  	local_irq_restore(flags);
  }
242f9dcb8   Jens Axboe   block: unify requ...
147
148
149
150
151
152
153
154
155
156
157
158
159
160
  
  /**
   * blk_complete_request - end I/O on a request
   * @req:      the request being processed
   *
   * Description:
   *     Ends all I/O on a request. It does not handle partial completions,
   *     unless the driver actually implements this in its completion callback
   *     through requeueing. The actual completion happens out-of-order,
   *     through a softirq handler. The user must have registered a completion
   *     callback through blk_queue_softirq_done().
   **/
  void blk_complete_request(struct request *req)
  {
581d4e28d   Jens Axboe   block: add fault ...
161
162
  	if (unlikely(blk_should_fake_timeout(req->q)))
  		return;
242f9dcb8   Jens Axboe   block: unify requ...
163
164
165
  	if (!blk_mark_rq_complete(req))
  		__blk_complete_request(req);
  }
b646fc59b   Jens Axboe   block: split soft...
166
  EXPORT_SYMBOL(blk_complete_request);
3c18ce71a   Roel Kluin   block: make blk_s...
167
  static __init int blk_softirq_init(void)
b646fc59b   Jens Axboe   block: split soft...
168
169
170
171
172
173
174
175
176
177
178
  {
  	int i;
  
  	for_each_possible_cpu(i)
  		INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
  
  	open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
  	register_hotcpu_notifier(&blk_cpu_notifier);
  	return 0;
  }
  subsys_initcall(blk_softirq_init);