Blame view
block/blk-stat.c
5.45 KB
cf43e6be8 block: add scalab... |
1 2 3 4 5 6 |
/* * Block stat tracking code * * Copyright (C) 2016 Jens Axboe */ #include <linux/kernel.h> |
34dbad5d2 blk-stat: convert... |
7 |
#include <linux/rculist.h> |
cf43e6be8 block: add scalab... |
8 9 10 11 |
#include <linux/blk-mq.h> #include "blk-stat.h" #include "blk-mq.h" |
b9147dd1b blk-throttle: add... |
12 |
#include "blk.h" |
cf43e6be8 block: add scalab... |
13 |
|
4875253fd blk-stat: move BL... |
14 |
#define BLK_RQ_STAT_BATCH 64 |
34dbad5d2 blk-stat: convert... |
15 16 17 |
struct blk_queue_stats { struct list_head callbacks; spinlock_t lock; |
b9147dd1b blk-throttle: add... |
18 |
bool enable_accounting; |
34dbad5d2 blk-stat: convert... |
19 |
}; |
34dbad5d2 blk-stat: convert... |
20 21 22 23 24 25 |
static void blk_stat_init(struct blk_rq_stat *stat) { stat->min = -1ULL; stat->max = stat->nr_samples = stat->mean = 0; stat->batch = stat->nr_batch = 0; } |
cf43e6be8 block: add scalab... |
26 27 28 |
static void blk_stat_flush_batch(struct blk_rq_stat *stat) { const s32 nr_batch = READ_ONCE(stat->nr_batch); |
209200efa blk-stat: fix a typo |
29 |
const s32 nr_samples = READ_ONCE(stat->nr_samples); |
cf43e6be8 block: add scalab... |
30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
if (!nr_batch) return; if (!nr_samples) stat->mean = div64_s64(stat->batch, nr_batch); else { stat->mean = div64_s64((stat->mean * nr_samples) + stat->batch, nr_batch + nr_samples); } stat->nr_samples += nr_batch; stat->nr_batch = stat->batch = 0; } static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src) { |
7d8d00140 blk-stat: fix blk... |
47 |
blk_stat_flush_batch(src); |
cf43e6be8 block: add scalab... |
48 49 |
if (!src->nr_samples) return; |
cf43e6be8 block: add scalab... |
50 51 52 53 54 55 56 57 58 59 60 61 |
dst->min = min(dst->min, src->min); dst->max = max(dst->max, src->max); if (!dst->nr_samples) dst->mean = src->mean; else { dst->mean = div64_s64((src->mean * src->nr_samples) + (dst->mean * dst->nr_samples), dst->nr_samples + src->nr_samples); } dst->nr_samples += src->nr_samples; } |
34dbad5d2 blk-stat: convert... |
62 |
static void __blk_stat_add(struct blk_rq_stat *stat, u64 value) |
cf43e6be8 block: add scalab... |
63 |
{ |
34dbad5d2 blk-stat: convert... |
64 65 |
stat->min = min(stat->min, value); stat->max = max(stat->max, value); |
cf43e6be8 block: add scalab... |
66 |
|
34dbad5d2 blk-stat: convert... |
67 68 69 |
if (stat->batch + value < stat->batch || stat->nr_batch + 1 == BLK_RQ_STAT_BATCH) blk_stat_flush_batch(stat); |
cf43e6be8 block: add scalab... |
70 |
|
34dbad5d2 blk-stat: convert... |
71 72 |
stat->batch += value; stat->nr_batch++; |
cf43e6be8 block: add scalab... |
73 |
} |
34dbad5d2 blk-stat: convert... |
74 |
void blk_stat_add(struct request *rq) |
cf43e6be8 block: add scalab... |
75 |
{ |
34dbad5d2 blk-stat: convert... |
76 77 78 79 80 81 82 83 84 85 86 |
struct request_queue *q = rq->q; struct blk_stat_callback *cb; struct blk_rq_stat *stat; int bucket; s64 now, value; now = __blk_stat_time(ktime_to_ns(ktime_get())); if (now < blk_stat_time(&rq->issue_stat)) return; value = now - blk_stat_time(&rq->issue_stat); |
b9147dd1b blk-throttle: add... |
87 |
blk_throtl_stat_add(rq, value); |
34dbad5d2 blk-stat: convert... |
88 89 |
rcu_read_lock(); list_for_each_entry_rcu(cb, &q->stats->callbacks, list) { |
d37381239 blk-stat: don't u... |
90 91 92 93 94 95 96 97 98 99 |
if (!blk_stat_is_active(cb)) continue; bucket = cb->bucket_fn(rq); if (bucket < 0) continue; stat = &get_cpu_ptr(cb->cpu_stat)[bucket]; __blk_stat_add(stat, value); put_cpu_ptr(cb->cpu_stat); |
cf43e6be8 block: add scalab... |
100 |
} |
34dbad5d2 blk-stat: convert... |
101 |
rcu_read_unlock(); |
cf43e6be8 block: add scalab... |
102 |
} |
34dbad5d2 blk-stat: convert... |
103 |
static void blk_stat_timer_fn(unsigned long data) |
cf43e6be8 block: add scalab... |
104 |
{ |
34dbad5d2 blk-stat: convert... |
105 106 107 |
struct blk_stat_callback *cb = (void *)data; unsigned int bucket; int cpu; |
cf43e6be8 block: add scalab... |
108 |
|
34dbad5d2 blk-stat: convert... |
109 110 |
for (bucket = 0; bucket < cb->buckets; bucket++) blk_stat_init(&cb->stat[bucket]); |
cf43e6be8 block: add scalab... |
111 |
|
34dbad5d2 blk-stat: convert... |
112 113 |
for_each_online_cpu(cpu) { struct blk_rq_stat *cpu_stat; |
7cd54aa84 blk-stat: fix a f... |
114 |
|
34dbad5d2 blk-stat: convert... |
115 116 117 118 |
cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu); for (bucket = 0; bucket < cb->buckets; bucket++) { blk_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]); blk_stat_init(&cpu_stat[bucket]); |
cf43e6be8 block: add scalab... |
119 |
} |
34dbad5d2 blk-stat: convert... |
120 |
} |
cf43e6be8 block: add scalab... |
121 |
|
34dbad5d2 blk-stat: convert... |
122 |
cb->timer_fn(cb); |
cf43e6be8 block: add scalab... |
123 |
} |
34dbad5d2 blk-stat: convert... |
124 125 |
struct blk_stat_callback * blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *), |
a37244e4c blk-stat: convert... |
126 |
int (*bucket_fn)(const struct request *), |
34dbad5d2 blk-stat: convert... |
127 |
unsigned int buckets, void *data) |
cf43e6be8 block: add scalab... |
128 |
{ |
34dbad5d2 blk-stat: convert... |
129 |
struct blk_stat_callback *cb; |
cf43e6be8 block: add scalab... |
130 |
|
34dbad5d2 blk-stat: convert... |
131 132 133 |
cb = kmalloc(sizeof(*cb), GFP_KERNEL); if (!cb) return NULL; |
cf43e6be8 block: add scalab... |
134 |
|
34dbad5d2 blk-stat: convert... |
135 136 137 138 139 140 141 142 143 144 145 146 147 |
cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat), GFP_KERNEL); if (!cb->stat) { kfree(cb); return NULL; } cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat), __alignof__(struct blk_rq_stat)); if (!cb->cpu_stat) { kfree(cb->stat); kfree(cb); return NULL; } |
cf43e6be8 block: add scalab... |
148 |
|
34dbad5d2 blk-stat: convert... |
149 150 151 152 153 154 155 |
cb->timer_fn = timer_fn; cb->bucket_fn = bucket_fn; cb->data = data; cb->buckets = buckets; setup_timer(&cb->timer, blk_stat_timer_fn, (unsigned long)cb); return cb; |
cf43e6be8 block: add scalab... |
156 |
} |
34dbad5d2 blk-stat: convert... |
157 |
EXPORT_SYMBOL_GPL(blk_stat_alloc_callback); |
cf43e6be8 block: add scalab... |
158 |
|
34dbad5d2 blk-stat: convert... |
159 160 |
void blk_stat_add_callback(struct request_queue *q, struct blk_stat_callback *cb) |
cf43e6be8 block: add scalab... |
161 |
{ |
34dbad5d2 blk-stat: convert... |
162 163 |
unsigned int bucket; int cpu; |
cf43e6be8 block: add scalab... |
164 |
|
34dbad5d2 blk-stat: convert... |
165 166 |
for_each_possible_cpu(cpu) { struct blk_rq_stat *cpu_stat; |
cf43e6be8 block: add scalab... |
167 |
|
34dbad5d2 blk-stat: convert... |
168 169 170 171 |
cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu); for (bucket = 0; bucket < cb->buckets; bucket++) blk_stat_init(&cpu_stat[bucket]); } |
cf43e6be8 block: add scalab... |
172 |
|
34dbad5d2 blk-stat: convert... |
173 174 175 176 177 178 |
spin_lock(&q->stats->lock); list_add_tail_rcu(&cb->list, &q->stats->callbacks); set_bit(QUEUE_FLAG_STATS, &q->queue_flags); spin_unlock(&q->stats->lock); } EXPORT_SYMBOL_GPL(blk_stat_add_callback); |
cf43e6be8 block: add scalab... |
179 |
|
34dbad5d2 blk-stat: convert... |
180 181 182 183 184 |
void blk_stat_remove_callback(struct request_queue *q, struct blk_stat_callback *cb) { spin_lock(&q->stats->lock); list_del_rcu(&cb->list); |
b9147dd1b blk-throttle: add... |
185 |
if (list_empty(&q->stats->callbacks) && !q->stats->enable_accounting) |
34dbad5d2 blk-stat: convert... |
186 187 |
clear_bit(QUEUE_FLAG_STATS, &q->queue_flags); spin_unlock(&q->stats->lock); |
cf43e6be8 block: add scalab... |
188 |
|
34dbad5d2 blk-stat: convert... |
189 |
del_timer_sync(&cb->timer); |
cf43e6be8 block: add scalab... |
190 |
} |
34dbad5d2 blk-stat: convert... |
191 |
EXPORT_SYMBOL_GPL(blk_stat_remove_callback); |
cf43e6be8 block: add scalab... |
192 |
|
34dbad5d2 blk-stat: convert... |
193 |
static void blk_stat_free_callback_rcu(struct rcu_head *head) |
cf43e6be8 block: add scalab... |
194 |
{ |
34dbad5d2 blk-stat: convert... |
195 196 197 198 199 200 |
struct blk_stat_callback *cb; cb = container_of(head, struct blk_stat_callback, rcu); free_percpu(cb->cpu_stat); kfree(cb->stat); kfree(cb); |
cf43e6be8 block: add scalab... |
201 |
} |
34dbad5d2 blk-stat: convert... |
202 |
void blk_stat_free_callback(struct blk_stat_callback *cb) |
cf43e6be8 block: add scalab... |
203 |
{ |
a83b576c9 block: fix stacke... |
204 205 |
if (cb) call_rcu(&cb->rcu, blk_stat_free_callback_rcu); |
cf43e6be8 block: add scalab... |
206 |
} |
34dbad5d2 blk-stat: convert... |
207 |
EXPORT_SYMBOL_GPL(blk_stat_free_callback); |
cf43e6be8 block: add scalab... |
208 |
|
b9147dd1b blk-throttle: add... |
209 210 211 212 213 214 215 |
void blk_stat_enable_accounting(struct request_queue *q) { spin_lock(&q->stats->lock); q->stats->enable_accounting = true; set_bit(QUEUE_FLAG_STATS, &q->queue_flags); spin_unlock(&q->stats->lock); } |
34dbad5d2 blk-stat: convert... |
216 |
struct blk_queue_stats *blk_alloc_queue_stats(void) |
cf43e6be8 block: add scalab... |
217 |
{ |
34dbad5d2 blk-stat: convert... |
218 219 220 221 222 223 224 225 |
struct blk_queue_stats *stats; stats = kmalloc(sizeof(*stats), GFP_KERNEL); if (!stats) return NULL; INIT_LIST_HEAD(&stats->callbacks); spin_lock_init(&stats->lock); |
b9147dd1b blk-throttle: add... |
226 |
stats->enable_accounting = false; |
34dbad5d2 blk-stat: convert... |
227 228 229 230 231 232 233 234 235 236 |
return stats; } void blk_free_queue_stats(struct blk_queue_stats *stats) { if (!stats) return; WARN_ON(!list_empty(&stats->callbacks)); |
cf43e6be8 block: add scalab... |
237 |
|
34dbad5d2 blk-stat: convert... |
238 |
kfree(stats); |
cf43e6be8 block: add scalab... |
239 |
} |