Blame view
block/blk-stat.c
4.8 KB
3dcf60bcb
|
1 |
// SPDX-License-Identifier: GPL-2.0 |
cf43e6be8
|
2 3 4 5 6 7 |
/* * Block stat tracking code * * Copyright (C) 2016 Jens Axboe */ #include <linux/kernel.h> |
34dbad5d2
|
8 |
#include <linux/rculist.h> |
cf43e6be8
|
9 10 11 12 |
#include <linux/blk-mq.h> #include "blk-stat.h" #include "blk-mq.h" |
b9147dd1b
|
13 |
#include "blk.h" |
cf43e6be8
|
14 |
|
34dbad5d2
|
15 16 17 |
struct blk_queue_stats { struct list_head callbacks; spinlock_t lock; |
b9147dd1b
|
18 |
bool enable_accounting; |
34dbad5d2
|
19 |
}; |
2ecbf4563
|
20 |
void blk_rq_stat_init(struct blk_rq_stat *stat) |
34dbad5d2
|
21 22 23 |
{ stat->min = -1ULL; stat->max = stat->nr_samples = stat->mean = 0; |
eca8b53a6
|
24 |
stat->batch = 0; |
cf43e6be8
|
25 |
} |
eca8b53a6
|
26 |
/* src is a per-cpu stat, mean isn't initialized */ |
2ecbf4563
|
27 |
void blk_rq_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src) |
cf43e6be8
|
28 29 30 |
{ if (!src->nr_samples) return; |
cf43e6be8
|
31 32 |
dst->min = min(dst->min, src->min); dst->max = max(dst->max, src->max); |
eca8b53a6
|
33 34 |
dst->mean = div_u64(src->batch + dst->mean * dst->nr_samples, dst->nr_samples + src->nr_samples); |
cf43e6be8
|
35 36 |
dst->nr_samples += src->nr_samples; } |
2ecbf4563
|
37 |
void blk_rq_stat_add(struct blk_rq_stat *stat, u64 value) |
cf43e6be8
|
38 |
{ |
34dbad5d2
|
39 40 |
stat->min = min(stat->min, value); stat->max = max(stat->max, value); |
34dbad5d2
|
41 |
stat->batch += value; |
eca8b53a6
|
42 |
stat->nr_samples++; |
cf43e6be8
|
43 |
} |
522a77756
|
44 |
void blk_stat_add(struct request *rq, u64 now) |
cf43e6be8
|
45 |
{ |
34dbad5d2
|
46 47 48 |
struct request_queue *q = rq->q; struct blk_stat_callback *cb; struct blk_rq_stat *stat; |
8148f0b56
|
49 |
int bucket, cpu; |
522a77756
|
50 |
u64 value; |
34dbad5d2
|
51 |
|
544ccc8dc
|
52 |
value = (now >= rq->io_start_time_ns) ? now - rq->io_start_time_ns : 0; |
34dbad5d2
|
53 |
|
b9147dd1b
|
54 |
blk_throtl_stat_add(rq, value); |
34dbad5d2
|
55 |
rcu_read_lock(); |
8148f0b56
|
56 |
cpu = get_cpu(); |
34dbad5d2
|
57 |
list_for_each_entry_rcu(cb, &q->stats->callbacks, list) { |
d37381239
|
58 59 60 61 62 63 |
if (!blk_stat_is_active(cb)) continue; bucket = cb->bucket_fn(rq); if (bucket < 0) continue; |
8148f0b56
|
64 |
stat = &per_cpu_ptr(cb->cpu_stat, cpu)[bucket]; |
2ecbf4563
|
65 |
blk_rq_stat_add(stat, value); |
cf43e6be8
|
66 |
} |
8148f0b56
|
67 |
put_cpu(); |
34dbad5d2
|
68 |
rcu_read_unlock(); |
cf43e6be8
|
69 |
} |
e99e88a9d
|
70 |
static void blk_stat_timer_fn(struct timer_list *t) |
cf43e6be8
|
71 |
{ |
e99e88a9d
|
72 |
struct blk_stat_callback *cb = from_timer(cb, t, timer); |
34dbad5d2
|
73 74 |
unsigned int bucket; int cpu; |
cf43e6be8
|
75 |
|
34dbad5d2
|
76 |
for (bucket = 0; bucket < cb->buckets; bucket++) |
2ecbf4563
|
77 |
blk_rq_stat_init(&cb->stat[bucket]); |
cf43e6be8
|
78 |
|
34dbad5d2
|
79 80 |
for_each_online_cpu(cpu) { struct blk_rq_stat *cpu_stat; |
7cd54aa84
|
81 |
|
34dbad5d2
|
82 83 |
cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu); for (bucket = 0; bucket < cb->buckets; bucket++) { |
2ecbf4563
|
84 85 |
blk_rq_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]); blk_rq_stat_init(&cpu_stat[bucket]); |
cf43e6be8
|
86 |
} |
34dbad5d2
|
87 |
} |
cf43e6be8
|
88 |
|
34dbad5d2
|
89 |
cb->timer_fn(cb); |
cf43e6be8
|
90 |
} |
34dbad5d2
|
91 92 |
struct blk_stat_callback * blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *), |
a37244e4c
|
93 |
int (*bucket_fn)(const struct request *), |
34dbad5d2
|
94 |
unsigned int buckets, void *data) |
cf43e6be8
|
95 |
{ |
34dbad5d2
|
96 |
struct blk_stat_callback *cb; |
cf43e6be8
|
97 |
|
34dbad5d2
|
98 99 100 |
cb = kmalloc(sizeof(*cb), GFP_KERNEL); if (!cb) return NULL; |
cf43e6be8
|
101 |
|
34dbad5d2
|
102 103 104 105 106 107 108 109 110 111 112 113 114 |
cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat), GFP_KERNEL); if (!cb->stat) { kfree(cb); return NULL; } cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat), __alignof__(struct blk_rq_stat)); if (!cb->cpu_stat) { kfree(cb->stat); kfree(cb); return NULL; } |
cf43e6be8
|
115 |
|
34dbad5d2
|
116 117 118 119 |
cb->timer_fn = timer_fn; cb->bucket_fn = bucket_fn; cb->data = data; cb->buckets = buckets; |
e99e88a9d
|
120 |
timer_setup(&cb->timer, blk_stat_timer_fn, 0); |
34dbad5d2
|
121 122 |
return cb; |
cf43e6be8
|
123 |
} |
34dbad5d2
|
124 125 |
void blk_stat_add_callback(struct request_queue *q, struct blk_stat_callback *cb) |
cf43e6be8
|
126 |
{ |
34dbad5d2
|
127 |
unsigned int bucket; |
e11d80a84
|
128 |
unsigned long flags; |
34dbad5d2
|
129 |
int cpu; |
cf43e6be8
|
130 |
|
34dbad5d2
|
131 132 |
for_each_possible_cpu(cpu) { struct blk_rq_stat *cpu_stat; |
cf43e6be8
|
133 |
|
34dbad5d2
|
134 135 |
cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu); for (bucket = 0; bucket < cb->buckets; bucket++) |
2ecbf4563
|
136 |
blk_rq_stat_init(&cpu_stat[bucket]); |
34dbad5d2
|
137 |
} |
cf43e6be8
|
138 |
|
e11d80a84
|
139 |
spin_lock_irqsave(&q->stats->lock, flags); |
34dbad5d2
|
140 |
list_add_tail_rcu(&cb->list, &q->stats->callbacks); |
7dfdbc736
|
141 |
blk_queue_flag_set(QUEUE_FLAG_STATS, q); |
e11d80a84
|
142 |
spin_unlock_irqrestore(&q->stats->lock, flags); |
34dbad5d2
|
143 |
} |
cf43e6be8
|
144 |
|
34dbad5d2
|
145 146 147 |
void blk_stat_remove_callback(struct request_queue *q, struct blk_stat_callback *cb) { |
e11d80a84
|
148 149 150 |
unsigned long flags; spin_lock_irqsave(&q->stats->lock, flags); |
34dbad5d2
|
151 |
list_del_rcu(&cb->list); |
b9147dd1b
|
152 |
if (list_empty(&q->stats->callbacks) && !q->stats->enable_accounting) |
7dfdbc736
|
153 |
blk_queue_flag_clear(QUEUE_FLAG_STATS, q); |
e11d80a84
|
154 |
spin_unlock_irqrestore(&q->stats->lock, flags); |
cf43e6be8
|
155 |
|
34dbad5d2
|
156 |
del_timer_sync(&cb->timer); |
cf43e6be8
|
157 |
} |
34dbad5d2
|
158 |
static void blk_stat_free_callback_rcu(struct rcu_head *head) |
cf43e6be8
|
159 |
{ |
34dbad5d2
|
160 161 162 163 164 165 |
struct blk_stat_callback *cb; cb = container_of(head, struct blk_stat_callback, rcu); free_percpu(cb->cpu_stat); kfree(cb->stat); kfree(cb); |
cf43e6be8
|
166 |
} |
34dbad5d2
|
167 |
void blk_stat_free_callback(struct blk_stat_callback *cb) |
cf43e6be8
|
168 |
{ |
a83b576c9
|
169 170 |
if (cb) call_rcu(&cb->rcu, blk_stat_free_callback_rcu); |
cf43e6be8
|
171 |
} |
b9147dd1b
|
172 173 |
void blk_stat_enable_accounting(struct request_queue *q) { |
e11d80a84
|
174 175 176 |
unsigned long flags; spin_lock_irqsave(&q->stats->lock, flags); |
b9147dd1b
|
177 |
q->stats->enable_accounting = true; |
7dfdbc736
|
178 |
blk_queue_flag_set(QUEUE_FLAG_STATS, q); |
e11d80a84
|
179 |
spin_unlock_irqrestore(&q->stats->lock, flags); |
b9147dd1b
|
180 |
} |
f8232f29c
|
181 |
EXPORT_SYMBOL_GPL(blk_stat_enable_accounting); |
b9147dd1b
|
182 |
|
34dbad5d2
|
183 |
struct blk_queue_stats *blk_alloc_queue_stats(void) |
cf43e6be8
|
184 |
{ |
34dbad5d2
|
185 186 187 188 189 190 191 192 |
struct blk_queue_stats *stats; stats = kmalloc(sizeof(*stats), GFP_KERNEL); if (!stats) return NULL; INIT_LIST_HEAD(&stats->callbacks); spin_lock_init(&stats->lock); |
b9147dd1b
|
193 |
stats->enable_accounting = false; |
34dbad5d2
|
194 195 196 197 198 199 200 201 202 203 |
return stats; } void blk_free_queue_stats(struct blk_queue_stats *stats) { if (!stats) return; WARN_ON(!list_empty(&stats->callbacks)); |
cf43e6be8
|
204 |
|
34dbad5d2
|
205 |
kfree(stats); |
cf43e6be8
|
206 |
} |