Blame view
block/blk-stat.c
4.71 KB
cf43e6be8
|
1 2 3 4 5 6 |
/* * Block stat tracking code * * Copyright (C) 2016 Jens Axboe */ #include <linux/kernel.h> |
34dbad5d2
|
7 |
#include <linux/rculist.h> |
cf43e6be8
|
8 9 10 11 |
#include <linux/blk-mq.h> #include "blk-stat.h" #include "blk-mq.h" |
b9147dd1b
|
12 |
#include "blk.h" |
cf43e6be8
|
13 |
|
34dbad5d2
|
14 15 16 |
struct blk_queue_stats { struct list_head callbacks; spinlock_t lock; |
b9147dd1b
|
17 |
bool enable_accounting; |
34dbad5d2
|
18 |
}; |
2ecbf4563
|
19 |
void blk_rq_stat_init(struct blk_rq_stat *stat) |
34dbad5d2
|
20 21 22 |
{ stat->min = -1ULL; stat->max = stat->nr_samples = stat->mean = 0; |
eca8b53a6
|
23 |
stat->batch = 0; |
cf43e6be8
|
24 |
} |
eca8b53a6
|
25 |
/* src is a per-cpu stat, mean isn't initialized */ |
2ecbf4563
|
26 |
void blk_rq_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src) |
cf43e6be8
|
27 28 29 |
{ if (!src->nr_samples) return; |
cf43e6be8
|
30 31 |
dst->min = min(dst->min, src->min); dst->max = max(dst->max, src->max); |
eca8b53a6
|
32 33 |
dst->mean = div_u64(src->batch + dst->mean * dst->nr_samples, dst->nr_samples + src->nr_samples); |
cf43e6be8
|
34 35 |
dst->nr_samples += src->nr_samples; } |
2ecbf4563
|
36 |
void blk_rq_stat_add(struct blk_rq_stat *stat, u64 value) |
cf43e6be8
|
37 |
{ |
34dbad5d2
|
38 39 |
stat->min = min(stat->min, value); stat->max = max(stat->max, value); |
34dbad5d2
|
40 |
stat->batch += value; |
eca8b53a6
|
41 |
stat->nr_samples++; |
cf43e6be8
|
42 |
} |
522a77756
|
43 |
void blk_stat_add(struct request *rq, u64 now) |
cf43e6be8
|
44 |
{ |
34dbad5d2
|
45 46 47 48 |
struct request_queue *q = rq->q; struct blk_stat_callback *cb; struct blk_rq_stat *stat; int bucket; |
522a77756
|
49 |
u64 value; |
34dbad5d2
|
50 |
|
544ccc8dc
|
51 |
value = (now >= rq->io_start_time_ns) ? now - rq->io_start_time_ns : 0; |
34dbad5d2
|
52 |
|
b9147dd1b
|
53 |
blk_throtl_stat_add(rq, value); |
34dbad5d2
|
54 55 |
rcu_read_lock(); list_for_each_entry_rcu(cb, &q->stats->callbacks, list) { |
d37381239
|
56 57 58 59 60 61 62 63 |
if (!blk_stat_is_active(cb)) continue; bucket = cb->bucket_fn(rq); if (bucket < 0) continue; stat = &get_cpu_ptr(cb->cpu_stat)[bucket]; |
2ecbf4563
|
64 |
blk_rq_stat_add(stat, value); |
d37381239
|
65 |
put_cpu_ptr(cb->cpu_stat); |
cf43e6be8
|
66 |
} |
34dbad5d2
|
67 |
rcu_read_unlock(); |
cf43e6be8
|
68 |
} |
e99e88a9d
|
69 |
static void blk_stat_timer_fn(struct timer_list *t) |
cf43e6be8
|
70 |
{ |
e99e88a9d
|
71 |
struct blk_stat_callback *cb = from_timer(cb, t, timer); |
34dbad5d2
|
72 73 |
unsigned int bucket; int cpu; |
cf43e6be8
|
74 |
|
34dbad5d2
|
75 |
for (bucket = 0; bucket < cb->buckets; bucket++) |
2ecbf4563
|
76 |
blk_rq_stat_init(&cb->stat[bucket]); |
cf43e6be8
|
77 |
|
34dbad5d2
|
78 79 |
for_each_online_cpu(cpu) { struct blk_rq_stat *cpu_stat; |
7cd54aa84
|
80 |
|
34dbad5d2
|
81 82 |
cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu); for (bucket = 0; bucket < cb->buckets; bucket++) { |
2ecbf4563
|
83 84 |
blk_rq_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]); blk_rq_stat_init(&cpu_stat[bucket]); |
cf43e6be8
|
85 |
} |
34dbad5d2
|
86 |
} |
cf43e6be8
|
87 |
|
34dbad5d2
|
88 |
cb->timer_fn(cb); |
cf43e6be8
|
89 |
} |
34dbad5d2
|
90 91 |
struct blk_stat_callback * blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *), |
a37244e4c
|
92 |
int (*bucket_fn)(const struct request *), |
34dbad5d2
|
93 |
unsigned int buckets, void *data) |
cf43e6be8
|
94 |
{ |
34dbad5d2
|
95 |
struct blk_stat_callback *cb; |
cf43e6be8
|
96 |
|
34dbad5d2
|
97 98 99 |
cb = kmalloc(sizeof(*cb), GFP_KERNEL); if (!cb) return NULL; |
cf43e6be8
|
100 |
|
34dbad5d2
|
101 102 103 104 105 106 107 108 109 110 111 112 113 |
cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat), GFP_KERNEL); if (!cb->stat) { kfree(cb); return NULL; } cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat), __alignof__(struct blk_rq_stat)); if (!cb->cpu_stat) { kfree(cb->stat); kfree(cb); return NULL; } |
cf43e6be8
|
114 |
|
34dbad5d2
|
115 116 117 118 |
cb->timer_fn = timer_fn; cb->bucket_fn = bucket_fn; cb->data = data; cb->buckets = buckets; |
e99e88a9d
|
119 |
timer_setup(&cb->timer, blk_stat_timer_fn, 0); |
34dbad5d2
|
120 121 |
return cb; |
cf43e6be8
|
122 |
} |
34dbad5d2
|
123 |
EXPORT_SYMBOL_GPL(blk_stat_alloc_callback); |
cf43e6be8
|
124 |
|
34dbad5d2
|
125 126 |
void blk_stat_add_callback(struct request_queue *q, struct blk_stat_callback *cb) |
cf43e6be8
|
127 |
{ |
34dbad5d2
|
128 129 |
unsigned int bucket; int cpu; |
cf43e6be8
|
130 |
|
34dbad5d2
|
131 132 |
for_each_possible_cpu(cpu) { struct blk_rq_stat *cpu_stat; |
cf43e6be8
|
133 |
|
34dbad5d2
|
134 135 |
cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu); for (bucket = 0; bucket < cb->buckets; bucket++) |
2ecbf4563
|
136 |
blk_rq_stat_init(&cpu_stat[bucket]); |
34dbad5d2
|
137 |
} |
cf43e6be8
|
138 |
|
34dbad5d2
|
139 140 |
spin_lock(&q->stats->lock); list_add_tail_rcu(&cb->list, &q->stats->callbacks); |
7dfdbc736
|
141 |
blk_queue_flag_set(QUEUE_FLAG_STATS, q); |
34dbad5d2
|
142 143 144 |
spin_unlock(&q->stats->lock); } EXPORT_SYMBOL_GPL(blk_stat_add_callback); |
cf43e6be8
|
145 |
|
34dbad5d2
|
146 147 148 149 150 |
void blk_stat_remove_callback(struct request_queue *q, struct blk_stat_callback *cb) { spin_lock(&q->stats->lock); list_del_rcu(&cb->list); |
b9147dd1b
|
151 |
if (list_empty(&q->stats->callbacks) && !q->stats->enable_accounting) |
7dfdbc736
|
152 |
blk_queue_flag_clear(QUEUE_FLAG_STATS, q); |
34dbad5d2
|
153 |
spin_unlock(&q->stats->lock); |
cf43e6be8
|
154 |
|
34dbad5d2
|
155 |
del_timer_sync(&cb->timer); |
cf43e6be8
|
156 |
} |
34dbad5d2
|
157 |
EXPORT_SYMBOL_GPL(blk_stat_remove_callback); |
cf43e6be8
|
158 |
|
34dbad5d2
|
159 |
static void blk_stat_free_callback_rcu(struct rcu_head *head) |
cf43e6be8
|
160 |
{ |
34dbad5d2
|
161 162 163 164 165 166 |
struct blk_stat_callback *cb; cb = container_of(head, struct blk_stat_callback, rcu); free_percpu(cb->cpu_stat); kfree(cb->stat); kfree(cb); |
cf43e6be8
|
167 |
} |
34dbad5d2
|
168 |
void blk_stat_free_callback(struct blk_stat_callback *cb) |
cf43e6be8
|
169 |
{ |
a83b576c9
|
170 171 |
if (cb) call_rcu(&cb->rcu, blk_stat_free_callback_rcu); |
cf43e6be8
|
172 |
} |
34dbad5d2
|
173 |
EXPORT_SYMBOL_GPL(blk_stat_free_callback); |
cf43e6be8
|
174 |
|
b9147dd1b
|
175 176 177 178 |
void blk_stat_enable_accounting(struct request_queue *q) { spin_lock(&q->stats->lock); q->stats->enable_accounting = true; |
7dfdbc736
|
179 |
blk_queue_flag_set(QUEUE_FLAG_STATS, q); |
b9147dd1b
|
180 181 |
spin_unlock(&q->stats->lock); } |
34dbad5d2
|
182 |
struct blk_queue_stats *blk_alloc_queue_stats(void) |
cf43e6be8
|
183 |
{ |
34dbad5d2
|
184 185 186 187 188 189 190 191 |
struct blk_queue_stats *stats; stats = kmalloc(sizeof(*stats), GFP_KERNEL); if (!stats) return NULL; INIT_LIST_HEAD(&stats->callbacks); spin_lock_init(&stats->lock); |
b9147dd1b
|
192 |
stats->enable_accounting = false; |
34dbad5d2
|
193 194 195 196 197 198 199 200 201 202 |
return stats; } void blk_free_queue_stats(struct blk_queue_stats *stats) { if (!stats) return; WARN_ON(!list_empty(&stats->callbacks)); |
cf43e6be8
|
203 |
|
34dbad5d2
|
204 |
kfree(stats); |
cf43e6be8
|
205 |
} |