Blame view

block/blk-stat.c 4.8 KB
3dcf60bcb   Christoph Hellwig   block: add SPDX t...
1
  // SPDX-License-Identifier: GPL-2.0
cf43e6be8   Jens Axboe   block: add scalab...
2
3
4
5
6
7
  /*
   * Block stat tracking code
   *
   * Copyright (C) 2016 Jens Axboe
   */
  #include <linux/kernel.h>
34dbad5d2   Omar Sandoval   blk-stat: convert...
8
  #include <linux/rculist.h>
cf43e6be8   Jens Axboe   block: add scalab...
9
10
11
12
  #include <linux/blk-mq.h>
  
  #include "blk-stat.h"
  #include "blk-mq.h"
b9147dd1b   Shaohua Li   blk-throttle: add...
13
  #include "blk.h"
cf43e6be8   Jens Axboe   block: add scalab...
14

34dbad5d2   Omar Sandoval   blk-stat: convert...
15
16
17
  struct blk_queue_stats {
  	struct list_head callbacks;
  	spinlock_t lock;
b9147dd1b   Shaohua Li   blk-throttle: add...
18
  	bool enable_accounting;
34dbad5d2   Omar Sandoval   blk-stat: convert...
19
  };
2ecbf4563   Josef Bacik   blk-stat: export ...
20
  void blk_rq_stat_init(struct blk_rq_stat *stat)
34dbad5d2   Omar Sandoval   blk-stat: convert...
21
22
23
  {
  	stat->min = -1ULL;
  	stat->max = stat->nr_samples = stat->mean = 0;
eca8b53a6   Shaohua Li   blk-stat: delete ...
24
  	stat->batch = 0;
cf43e6be8   Jens Axboe   block: add scalab...
25
  }
eca8b53a6   Shaohua Li   blk-stat: delete ...
26
  /* src is a per-cpu stat, mean isn't initialized */
2ecbf4563   Josef Bacik   blk-stat: export ...
27
  void blk_rq_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
cf43e6be8   Jens Axboe   block: add scalab...
28
29
30
  {
  	if (!src->nr_samples)
  		return;
cf43e6be8   Jens Axboe   block: add scalab...
31
32
  	dst->min = min(dst->min, src->min);
  	dst->max = max(dst->max, src->max);
eca8b53a6   Shaohua Li   blk-stat: delete ...
33
34
  	dst->mean = div_u64(src->batch + dst->mean * dst->nr_samples,
  				dst->nr_samples + src->nr_samples);
cf43e6be8   Jens Axboe   block: add scalab...
35
36
  	dst->nr_samples += src->nr_samples;
  }
2ecbf4563   Josef Bacik   blk-stat: export ...
37
  void blk_rq_stat_add(struct blk_rq_stat *stat, u64 value)
cf43e6be8   Jens Axboe   block: add scalab...
38
  {
34dbad5d2   Omar Sandoval   blk-stat: convert...
39
40
  	stat->min = min(stat->min, value);
  	stat->max = max(stat->max, value);
34dbad5d2   Omar Sandoval   blk-stat: convert...
41
  	stat->batch += value;
eca8b53a6   Shaohua Li   blk-stat: delete ...
42
  	stat->nr_samples++;
cf43e6be8   Jens Axboe   block: add scalab...
43
  }
522a77756   Omar Sandoval   block: consolidat...
44
  void blk_stat_add(struct request *rq, u64 now)
cf43e6be8   Jens Axboe   block: add scalab...
45
  {
34dbad5d2   Omar Sandoval   blk-stat: convert...
46
47
48
  	struct request_queue *q = rq->q;
  	struct blk_stat_callback *cb;
  	struct blk_rq_stat *stat;
8148f0b56   Pavel Begunkov   blk-stat: Optimis...
49
  	int bucket, cpu;
522a77756   Omar Sandoval   block: consolidat...
50
  	u64 value;
34dbad5d2   Omar Sandoval   blk-stat: convert...
51

544ccc8dc   Omar Sandoval   block: get rid of...
52
  	value = (now >= rq->io_start_time_ns) ? now - rq->io_start_time_ns : 0;
34dbad5d2   Omar Sandoval   blk-stat: convert...
53

b9147dd1b   Shaohua Li   blk-throttle: add...
54
  	blk_throtl_stat_add(rq, value);
34dbad5d2   Omar Sandoval   blk-stat: convert...
55
  	rcu_read_lock();
8148f0b56   Pavel Begunkov   blk-stat: Optimis...
56
  	cpu = get_cpu();
34dbad5d2   Omar Sandoval   blk-stat: convert...
57
  	list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
d37381239   Jens Axboe   blk-stat: don't u...
58
59
60
61
62
63
  		if (!blk_stat_is_active(cb))
  			continue;
  
  		bucket = cb->bucket_fn(rq);
  		if (bucket < 0)
  			continue;
8148f0b56   Pavel Begunkov   blk-stat: Optimis...
64
  		stat = &per_cpu_ptr(cb->cpu_stat, cpu)[bucket];
2ecbf4563   Josef Bacik   blk-stat: export ...
65
  		blk_rq_stat_add(stat, value);
cf43e6be8   Jens Axboe   block: add scalab...
66
  	}
8148f0b56   Pavel Begunkov   blk-stat: Optimis...
67
  	put_cpu();
34dbad5d2   Omar Sandoval   blk-stat: convert...
68
  	rcu_read_unlock();
cf43e6be8   Jens Axboe   block: add scalab...
69
  }
e99e88a9d   Kees Cook   treewide: setup_t...
70
  static void blk_stat_timer_fn(struct timer_list *t)
cf43e6be8   Jens Axboe   block: add scalab...
71
  {
e99e88a9d   Kees Cook   treewide: setup_t...
72
  	struct blk_stat_callback *cb = from_timer(cb, t, timer);
34dbad5d2   Omar Sandoval   blk-stat: convert...
73
74
  	unsigned int bucket;
  	int cpu;
cf43e6be8   Jens Axboe   block: add scalab...
75

34dbad5d2   Omar Sandoval   blk-stat: convert...
76
  	for (bucket = 0; bucket < cb->buckets; bucket++)
2ecbf4563   Josef Bacik   blk-stat: export ...
77
  		blk_rq_stat_init(&cb->stat[bucket]);
cf43e6be8   Jens Axboe   block: add scalab...
78

34dbad5d2   Omar Sandoval   blk-stat: convert...
79
80
  	for_each_online_cpu(cpu) {
  		struct blk_rq_stat *cpu_stat;
7cd54aa84   Jens Axboe   blk-stat: fix a f...
81

34dbad5d2   Omar Sandoval   blk-stat: convert...
82
83
  		cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
  		for (bucket = 0; bucket < cb->buckets; bucket++) {
2ecbf4563   Josef Bacik   blk-stat: export ...
84
85
  			blk_rq_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]);
  			blk_rq_stat_init(&cpu_stat[bucket]);
cf43e6be8   Jens Axboe   block: add scalab...
86
  		}
34dbad5d2   Omar Sandoval   blk-stat: convert...
87
  	}
cf43e6be8   Jens Axboe   block: add scalab...
88

34dbad5d2   Omar Sandoval   blk-stat: convert...
89
  	cb->timer_fn(cb);
cf43e6be8   Jens Axboe   block: add scalab...
90
  }
34dbad5d2   Omar Sandoval   blk-stat: convert...
91
92
  struct blk_stat_callback *
  blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *),
a37244e4c   Stephen Bates   blk-stat: convert...
93
  			int (*bucket_fn)(const struct request *),
34dbad5d2   Omar Sandoval   blk-stat: convert...
94
  			unsigned int buckets, void *data)
cf43e6be8   Jens Axboe   block: add scalab...
95
  {
34dbad5d2   Omar Sandoval   blk-stat: convert...
96
  	struct blk_stat_callback *cb;
cf43e6be8   Jens Axboe   block: add scalab...
97

34dbad5d2   Omar Sandoval   blk-stat: convert...
98
99
100
  	cb = kmalloc(sizeof(*cb), GFP_KERNEL);
  	if (!cb)
  		return NULL;
cf43e6be8   Jens Axboe   block: add scalab...
101

34dbad5d2   Omar Sandoval   blk-stat: convert...
102
103
104
105
106
107
108
109
110
111
112
113
114
  	cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat),
  				 GFP_KERNEL);
  	if (!cb->stat) {
  		kfree(cb);
  		return NULL;
  	}
  	cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat),
  				      __alignof__(struct blk_rq_stat));
  	if (!cb->cpu_stat) {
  		kfree(cb->stat);
  		kfree(cb);
  		return NULL;
  	}
cf43e6be8   Jens Axboe   block: add scalab...
115

34dbad5d2   Omar Sandoval   blk-stat: convert...
116
117
118
119
  	cb->timer_fn = timer_fn;
  	cb->bucket_fn = bucket_fn;
  	cb->data = data;
  	cb->buckets = buckets;
e99e88a9d   Kees Cook   treewide: setup_t...
120
  	timer_setup(&cb->timer, blk_stat_timer_fn, 0);
34dbad5d2   Omar Sandoval   blk-stat: convert...
121
122
  
  	return cb;
cf43e6be8   Jens Axboe   block: add scalab...
123
  }
34dbad5d2   Omar Sandoval   blk-stat: convert...
124
125
  void blk_stat_add_callback(struct request_queue *q,
  			   struct blk_stat_callback *cb)
cf43e6be8   Jens Axboe   block: add scalab...
126
  {
34dbad5d2   Omar Sandoval   blk-stat: convert...
127
  	unsigned int bucket;
e11d80a84   Tejun Heo   blk-stat: make q-...
128
  	unsigned long flags;
34dbad5d2   Omar Sandoval   blk-stat: convert...
129
  	int cpu;
cf43e6be8   Jens Axboe   block: add scalab...
130

34dbad5d2   Omar Sandoval   blk-stat: convert...
131
132
  	for_each_possible_cpu(cpu) {
  		struct blk_rq_stat *cpu_stat;
cf43e6be8   Jens Axboe   block: add scalab...
133

34dbad5d2   Omar Sandoval   blk-stat: convert...
134
135
  		cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
  		for (bucket = 0; bucket < cb->buckets; bucket++)
2ecbf4563   Josef Bacik   blk-stat: export ...
136
  			blk_rq_stat_init(&cpu_stat[bucket]);
34dbad5d2   Omar Sandoval   blk-stat: convert...
137
  	}
cf43e6be8   Jens Axboe   block: add scalab...
138

e11d80a84   Tejun Heo   blk-stat: make q-...
139
  	spin_lock_irqsave(&q->stats->lock, flags);
34dbad5d2   Omar Sandoval   blk-stat: convert...
140
  	list_add_tail_rcu(&cb->list, &q->stats->callbacks);
7dfdbc736   Bart Van Assche   block: Protect qu...
141
  	blk_queue_flag_set(QUEUE_FLAG_STATS, q);
e11d80a84   Tejun Heo   blk-stat: make q-...
142
  	spin_unlock_irqrestore(&q->stats->lock, flags);
34dbad5d2   Omar Sandoval   blk-stat: convert...
143
  }
cf43e6be8   Jens Axboe   block: add scalab...
144

34dbad5d2   Omar Sandoval   blk-stat: convert...
145
146
147
  void blk_stat_remove_callback(struct request_queue *q,
  			      struct blk_stat_callback *cb)
  {
e11d80a84   Tejun Heo   blk-stat: make q-...
148
149
150
  	unsigned long flags;
  
  	spin_lock_irqsave(&q->stats->lock, flags);
34dbad5d2   Omar Sandoval   blk-stat: convert...
151
  	list_del_rcu(&cb->list);
b9147dd1b   Shaohua Li   blk-throttle: add...
152
  	if (list_empty(&q->stats->callbacks) && !q->stats->enable_accounting)
7dfdbc736   Bart Van Assche   block: Protect qu...
153
  		blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
e11d80a84   Tejun Heo   blk-stat: make q-...
154
  	spin_unlock_irqrestore(&q->stats->lock, flags);
cf43e6be8   Jens Axboe   block: add scalab...
155

34dbad5d2   Omar Sandoval   blk-stat: convert...
156
  	del_timer_sync(&cb->timer);
cf43e6be8   Jens Axboe   block: add scalab...
157
  }
34dbad5d2   Omar Sandoval   blk-stat: convert...
158
  static void blk_stat_free_callback_rcu(struct rcu_head *head)
cf43e6be8   Jens Axboe   block: add scalab...
159
  {
34dbad5d2   Omar Sandoval   blk-stat: convert...
160
161
162
163
164
165
  	struct blk_stat_callback *cb;
  
  	cb = container_of(head, struct blk_stat_callback, rcu);
  	free_percpu(cb->cpu_stat);
  	kfree(cb->stat);
  	kfree(cb);
cf43e6be8   Jens Axboe   block: add scalab...
166
  }
34dbad5d2   Omar Sandoval   blk-stat: convert...
167
  void blk_stat_free_callback(struct blk_stat_callback *cb)
cf43e6be8   Jens Axboe   block: add scalab...
168
  {
a83b576c9   Jens Axboe   block: fix stacke...
169
170
  	if (cb)
  		call_rcu(&cb->rcu, blk_stat_free_callback_rcu);
cf43e6be8   Jens Axboe   block: add scalab...
171
  }
b9147dd1b   Shaohua Li   blk-throttle: add...
172
173
  void blk_stat_enable_accounting(struct request_queue *q)
  {
e11d80a84   Tejun Heo   blk-stat: make q-...
174
175
176
  	unsigned long flags;
  
  	spin_lock_irqsave(&q->stats->lock, flags);
b9147dd1b   Shaohua Li   blk-throttle: add...
177
  	q->stats->enable_accounting = true;
7dfdbc736   Bart Van Assche   block: Protect qu...
178
  	blk_queue_flag_set(QUEUE_FLAG_STATS, q);
e11d80a84   Tejun Heo   blk-stat: make q-...
179
  	spin_unlock_irqrestore(&q->stats->lock, flags);
b9147dd1b   Shaohua Li   blk-throttle: add...
180
  }
f8232f29c   Omar Sandoval   block: export blk...
181
  EXPORT_SYMBOL_GPL(blk_stat_enable_accounting);
b9147dd1b   Shaohua Li   blk-throttle: add...
182

34dbad5d2   Omar Sandoval   blk-stat: convert...
183
  struct blk_queue_stats *blk_alloc_queue_stats(void)
cf43e6be8   Jens Axboe   block: add scalab...
184
  {
34dbad5d2   Omar Sandoval   blk-stat: convert...
185
186
187
188
189
190
191
192
  	struct blk_queue_stats *stats;
  
  	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
  	if (!stats)
  		return NULL;
  
  	INIT_LIST_HEAD(&stats->callbacks);
  	spin_lock_init(&stats->lock);
b9147dd1b   Shaohua Li   blk-throttle: add...
193
  	stats->enable_accounting = false;
34dbad5d2   Omar Sandoval   blk-stat: convert...
194
195
196
197
198
199
200
201
202
203
  
  	return stats;
  }
  
  void blk_free_queue_stats(struct blk_queue_stats *stats)
  {
  	if (!stats)
  		return;
  
  	WARN_ON(!list_empty(&stats->callbacks));
cf43e6be8   Jens Axboe   block: add scalab...
204

34dbad5d2   Omar Sandoval   blk-stat: convert...
205
  	kfree(stats);
cf43e6be8   Jens Axboe   block: add scalab...
206
  }