Blame view

block/blk-softirq.c 4.52 KB
b646fc59b   Jens Axboe   block: split soft...
1
2
3
4
5
6
7
8
9
10
  /*
   * Functions related to softirq rq completions
   */
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/init.h>
  #include <linux/bio.h>
  #include <linux/blkdev.h>
  #include <linux/interrupt.h>
  #include <linux/cpu.h>
39be35012   Peter Zijlstra   sched, block: Uni...
11
  #include <linux/sched.h>
b646fc59b   Jens Axboe   block: split soft...
12
13
14
15
  
  #include "blk.h"
  
  static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
c7c22e4d5   Jens Axboe   block: add suppor...
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
  /*
   * Softirq action handler - move entries to local list and loop over them
   * while passing them to the queue registered handler.
   */
  static void blk_done_softirq(struct softirq_action *h)
  {
  	struct list_head *cpu_list, local_list;
  
  	local_irq_disable();
  	cpu_list = &__get_cpu_var(blk_cpu_done);
  	list_replace_init(cpu_list, &local_list);
  	local_irq_enable();
  
  	while (!list_empty(&local_list)) {
  		struct request *rq;
  
  		rq = list_entry(local_list.next, struct request, csd.list);
  		list_del_init(&rq->csd.list);
  		rq->q->softirq_done_fn(rq);
  	}
  }
  
  #if defined(CONFIG_SMP) && defined(CONFIG_USE_GENERIC_SMP_HELPERS)
  static void trigger_softirq(void *data)
  {
  	struct request *rq = data;
  	unsigned long flags;
  	struct list_head *list;
  
  	local_irq_save(flags);
  	list = &__get_cpu_var(blk_cpu_done);
  	list_add_tail(&rq->csd.list, list);
  
  	if (list->next == &rq->csd.list)
  		raise_softirq_irqoff(BLOCK_SOFTIRQ);
  
  	local_irq_restore(flags);
  }
  
  /*
   * Setup and invoke a run of 'trigger_softirq' on the given cpu.
   */
  static int raise_blk_irq(int cpu, struct request *rq)
  {
  	if (cpu_online(cpu)) {
  		struct call_single_data *data = &rq->csd;
  
  		data->func = trigger_softirq;
  		data->info = rq;
  		data->flags = 0;
6e2756376   Peter Zijlstra   generic-ipi: remo...
66
  		__smp_call_function_single(cpu, data, 0);
c7c22e4d5   Jens Axboe   block: add suppor...
67
68
69
70
71
72
73
74
75
76
77
  		return 0;
  	}
  
  	return 1;
  }
  #else /* CONFIG_SMP && CONFIG_USE_GENERIC_SMP_HELPERS */
  static int raise_blk_irq(int cpu, struct request *rq)
  {
  	return 1;
  }
  #endif
0b776b062   Paul Gortmaker   block: delete __c...
78
79
  static int blk_cpu_notify(struct notifier_block *self, unsigned long action,
  			  void *hcpu)
b646fc59b   Jens Axboe   block: split soft...
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
  {
  	/*
  	 * If a CPU goes away, splice its entries to the current CPU
  	 * and trigger a run of the softirq
  	 */
  	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
  		int cpu = (unsigned long) hcpu;
  
  		local_irq_disable();
  		list_splice_init(&per_cpu(blk_cpu_done, cpu),
  				 &__get_cpu_var(blk_cpu_done));
  		raise_softirq_irqoff(BLOCK_SOFTIRQ);
  		local_irq_enable();
  	}
  
  	return NOTIFY_OK;
  }
0b776b062   Paul Gortmaker   block: delete __c...
97
  static struct notifier_block blk_cpu_notifier = {
b646fc59b   Jens Axboe   block: split soft...
98
99
  	.notifier_call	= blk_cpu_notify,
  };
242f9dcb8   Jens Axboe   block: unify requ...
100
  void __blk_complete_request(struct request *req)
b646fc59b   Jens Axboe   block: split soft...
101
  {
39be35012   Peter Zijlstra   sched, block: Uni...
102
  	int ccpu, cpu;
c7c22e4d5   Jens Axboe   block: add suppor...
103
  	struct request_queue *q = req->q;
b646fc59b   Jens Axboe   block: split soft...
104
  	unsigned long flags;
39be35012   Peter Zijlstra   sched, block: Uni...
105
  	bool shared = false;
b646fc59b   Jens Axboe   block: split soft...
106

c7c22e4d5   Jens Axboe   block: add suppor...
107
  	BUG_ON(!q->softirq_done_fn);
b646fc59b   Jens Axboe   block: split soft...
108
109
  
  	local_irq_save(flags);
c7c22e4d5   Jens Axboe   block: add suppor...
110
  	cpu = smp_processor_id();
b646fc59b   Jens Axboe   block: split soft...
111

c7c22e4d5   Jens Axboe   block: add suppor...
112
113
114
  	/*
  	 * Select completion CPU
  	 */
8ad6a56f5   Tao Ma   block: Don't chec...
115
  	if (req->cpu != -1) {
c7c22e4d5   Jens Axboe   block: add suppor...
116
  		ccpu = req->cpu;
39be35012   Peter Zijlstra   sched, block: Uni...
117
118
  		if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags))
  			shared = cpus_share_cache(cpu, ccpu);
5757a6d76   Dan Williams   block: strict rq_...
119
  	} else
c7c22e4d5   Jens Axboe   block: add suppor...
120
  		ccpu = cpu;
bcf30e75b   Shaohua Li   block: improve rq...
121
  	/*
39be35012   Peter Zijlstra   sched, block: Uni...
122
123
  	 * If current CPU and requested CPU share a cache, run the softirq on
  	 * the current CPU. One might concern this is just like
bcf30e75b   Shaohua Li   block: improve rq...
124
125
126
127
128
  	 * QUEUE_FLAG_SAME_FORCE, but actually not. blk_complete_request() is
  	 * running in interrupt handler, and currently I/O controller doesn't
  	 * support multiple interrupts, so current CPU is unique actually. This
  	 * avoids IPI sending from current CPU to the first CPU of a group.
  	 */
39be35012   Peter Zijlstra   sched, block: Uni...
129
  	if (ccpu == cpu || shared) {
c7c22e4d5   Jens Axboe   block: add suppor...
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
  		struct list_head *list;
  do_local:
  		list = &__get_cpu_var(blk_cpu_done);
  		list_add_tail(&req->csd.list, list);
  
  		/*
  		 * if the list only contains our just added request,
  		 * signal a raise of the softirq. If there are already
  		 * entries there, someone already raised the irq but it
  		 * hasn't run yet.
  		 */
  		if (list->next == &req->csd.list)
  			raise_softirq_irqoff(BLOCK_SOFTIRQ);
  	} else if (raise_blk_irq(ccpu, req))
  		goto do_local;
b646fc59b   Jens Axboe   block: split soft...
145
146
147
  
  	local_irq_restore(flags);
  }
242f9dcb8   Jens Axboe   block: unify requ...
148
149
150
151
152
153
154
155
156
157
158
159
160
161
  
  /**
   * blk_complete_request - end I/O on a request
   * @req:      the request being processed
   *
   * Description:
   *     Ends all I/O on a request. It does not handle partial completions,
   *     unless the driver actually implements this in its completion callback
   *     through requeueing. The actual completion happens out-of-order,
   *     through a softirq handler. The user must have registered a completion
   *     callback through blk_queue_softirq_done().
   **/
  void blk_complete_request(struct request *req)
  {
581d4e28d   Jens Axboe   block: add fault ...
162
163
  	if (unlikely(blk_should_fake_timeout(req->q)))
  		return;
242f9dcb8   Jens Axboe   block: unify requ...
164
165
166
  	if (!blk_mark_rq_complete(req))
  		__blk_complete_request(req);
  }
b646fc59b   Jens Axboe   block: split soft...
167
  EXPORT_SYMBOL(blk_complete_request);
3c18ce71a   Roel Kluin   block: make blk_s...
168
  static __init int blk_softirq_init(void)
b646fc59b   Jens Axboe   block: split soft...
169
170
171
172
173
174
175
176
177
178
179
  {
  	int i;
  
  	for_each_possible_cpu(i)
  		INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
  
  	open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
  	register_hotcpu_notifier(&blk_cpu_notifier);
  	return 0;
  }
  subsys_initcall(blk_softirq_init);