Blame view
include/linux/blk-cgroup.h
19.7 KB
b24413180 License cleanup: ... |
1 |
/* SPDX-License-Identifier: GPL-2.0 */ |
31e4c28d9 blkio: Introduce ... |
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 |
#ifndef _BLK_CGROUP_H #define _BLK_CGROUP_H /* * Common Block IO controller cgroup interface * * Based on ideas and code from CFQ, CFS and BFQ: * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> * * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> * Paolo Valente <paolo.valente@unimore.it> * * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> * Nauman Rafique <nauman@google.com> */ #include <linux/cgroup.h> |
f73316482 blk-cgroup: reimp... |
18 |
#include <linux/percpu.h> |
24bdb8ef0 blkcg: make blkcg... |
19 |
#include <linux/percpu_counter.h> |
f73316482 blk-cgroup: reimp... |
20 |
#include <linux/u64_stats_sync.h> |
829fdb500 blkcg: export con... |
21 |
#include <linux/seq_file.h> |
a637120e4 blkcg: use radix ... |
22 |
#include <linux/radix-tree.h> |
a051661ca blkcg: implement ... |
23 |
#include <linux/blkdev.h> |
a5049a8ae blkcg: fix use-af... |
24 |
#include <linux/atomic.h> |
902ec5b6d block: make blkcg... |
25 |
#include <linux/kthread.h> |
5cdf2e3fe blkcg: associate ... |
26 |
#include <linux/fs.h> |
31e4c28d9 blkio: Introduce ... |
27 |
|
24bdb8ef0 blkcg: make blkcg... |
28 29 |
/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */ #define BLKG_STAT_CPU_BATCH (INT_MAX / 2) |
9355aede5 blkio-throttle: l... |
30 31 |
/* Max limits for throttle policy */ #define THROTL_IOPS_MAX UINT_MAX |
f48ec1d78 cfq: fix build br... |
32 |
#ifdef CONFIG_BLK_CGROUP |
f73316482 blk-cgroup: reimp... |
33 34 35 36 37 38 39 |
enum blkg_iostat_type { BLKG_IOSTAT_READ, BLKG_IOSTAT_WRITE, BLKG_IOSTAT_DISCARD, BLKG_IOSTAT_NR, }; |
a637120e4 blkcg: use radix ... |
40 |
struct blkcg_gq; |
3c798398e blkcg: mass renam... |
41 |
struct blkcg { |
36558c8a3 blkcg: style clea... |
42 43 |
struct cgroup_subsys_state css; spinlock_t lock; |
d866dbf61 blkcg: rename blk... |
44 |
refcount_t online_pin; |
a637120e4 blkcg: use radix ... |
45 46 |
struct radix_tree_root blkg_tree; |
55679c8d2 blkcg: Annotate b... |
47 |
struct blkcg_gq __rcu *blkg_hint; |
36558c8a3 blkcg: style clea... |
48 |
struct hlist_head blkg_list; |
9a9e8a26d blkcg: add blkcg->id |
49 |
|
814376483 blkcg: minor upda... |
50 |
struct blkcg_policy_data *cpd[BLKCG_MAX_POLS]; |
52ebea749 writeback: make b... |
51 |
|
7876f930d blkcg: implement ... |
52 |
struct list_head all_blkcgs_node; |
52ebea749 writeback: make b... |
53 54 55 |
#ifdef CONFIG_CGROUP_WRITEBACK struct list_head cgwb_list; #endif |
31e4c28d9 blkio: Introduce ... |
56 |
}; |
f73316482 blk-cgroup: reimp... |
57 58 59 60 61 62 63 64 65 66 |
struct blkg_iostat { u64 bytes[BLKG_IOSTAT_NR]; u64 ios[BLKG_IOSTAT_NR]; }; struct blkg_iostat_set { struct u64_stats_sync sync; struct blkg_iostat cur; struct blkg_iostat last; }; |
e6269c445 blkcg: add blkg_[... |
67 |
/* |
f95a04afa blkcg: embed stru... |
68 69 70 71 |
* A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a * request_queue (q). This is used by blkcg policies which need to track * information per blkcg - q pair. * |
001bea73e blkcg: replace bl... |
72 73 74 75 76 |
* There can be multiple active blkcg policies and each blkg:policy pair is * represented by a blkg_policy_data which is allocated and freed by each * policy's pd_alloc/free_fn() methods. A policy can allocate private data * area by allocating larger data structure which embeds blkg_policy_data * at the beginning. |
f95a04afa blkcg: embed stru... |
77 |
*/ |
0381411e4 blkcg: let blkcg ... |
78 |
struct blkg_policy_data { |
b276a876a blkcg: add blkg_p... |
79 |
/* the blkg and policy id this per-policy data belongs to */ |
3c798398e blkcg: mass renam... |
80 |
struct blkcg_gq *blkg; |
b276a876a blkcg: add blkg_p... |
81 |
int plid; |
0381411e4 blkcg: let blkcg ... |
82 |
}; |
e48453c38 block, cgroup: im... |
83 |
/* |
e4a9bde95 blkcg: replace bl... |
84 85 86 87 88 |
* Policies that need to keep per-blkcg data which is independent from any * request_queue associated to it should implement cpd_alloc/free_fn() * methods. A policy can allocate private data area by allocating larger * data structure which embeds blkcg_policy_data at the beginning. * cpd_init() is invoked to let each policy handle per-blkcg data. |
e48453c38 block, cgroup: im... |
89 90 |
*/ struct blkcg_policy_data { |
814376483 blkcg: minor upda... |
91 92 |
/* the blkcg and policy id this per-policy data belongs to */ struct blkcg *blkcg; |
e48453c38 block, cgroup: im... |
93 |
int plid; |
e48453c38 block, cgroup: im... |
94 |
}; |
3c798398e blkcg: mass renam... |
95 96 |
/* association between a blk cgroup and a request queue */ struct blkcg_gq { |
c875f4d02 blkcg: drop unnec... |
97 |
/* Pointer to the associated request_queue */ |
36558c8a3 blkcg: style clea... |
98 99 100 |
struct request_queue *q; struct list_head q_node; struct hlist_node blkcg_node; |
3c798398e blkcg: mass renam... |
101 |
struct blkcg *blkcg; |
3c5478659 blkcg: make blkcg... |
102 103 104 |
/* all non-root blkcg_gq's are guaranteed to have access to parent */ struct blkcg_gq *parent; |
1adaf3dde blkcg: move refcn... |
105 |
/* reference count */ |
7fcf2b033 blkcg: change blk... |
106 |
struct percpu_ref refcnt; |
220841906 blkio: Export dis... |
107 |
|
f427d9096 blkcg: implement ... |
108 109 |
/* is this blkg online? protected by both blkcg and q locks */ bool online; |
f73316482 blk-cgroup: reimp... |
110 111 |
struct blkg_iostat_set __percpu *iostat_cpu; struct blkg_iostat_set iostat; |
77ea73388 blkcg: move io_se... |
112 |
|
36558c8a3 blkcg: style clea... |
113 |
struct blkg_policy_data *pd[BLKCG_MAX_POLS]; |
1adaf3dde blkcg: move refcn... |
114 |
|
d3f77dfdc blkcg: implement ... |
115 116 117 |
spinlock_t async_bio_lock; struct bio_list async_bios; struct work_struct async_bio_work; |
d09d8df3a blkcg: add generi... |
118 119 120 121 122 123 |
atomic_t use_delay; atomic64_t delay_nsec; atomic64_t delay_start; u64 last_delay; int last_use; |
d3f77dfdc blkcg: implement ... |
124 125 |
struct rcu_head rcu_head; |
31e4c28d9 blkio: Introduce ... |
126 |
}; |
e4a9bde95 blkcg: replace bl... |
127 |
typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp); |
814376483 blkcg: minor upda... |
128 |
typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd); |
e4a9bde95 blkcg: replace bl... |
129 |
typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd); |
69d7fde59 blkcg: use CGROUP... |
130 |
typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd); |
cf09a8ee1 blkcg: pass @q an... |
131 132 |
typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, struct request_queue *q, struct blkcg *blkcg); |
a9520cd6f blkcg: make blkcg... |
133 134 135 |
typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd); typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd); typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd); |
001bea73e blkcg: replace bl... |
136 |
typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd); |
a9520cd6f blkcg: make blkcg... |
137 |
typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd); |
903d23f0a blk-cgroup: allow... |
138 139 |
typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf, size_t size); |
3e2520668 blkio: Implement ... |
140 |
|
3c798398e blkcg: mass renam... |
141 |
struct blkcg_policy { |
36558c8a3 blkcg: style clea... |
142 |
int plid; |
36558c8a3 blkcg: style clea... |
143 |
/* cgroup files for the policy */ |
2ee867dcf blkcg: implement ... |
144 |
struct cftype *dfl_cftypes; |
880f50e22 blkcg: mark exist... |
145 |
struct cftype *legacy_cftypes; |
f9fcc2d39 blkcg: collapse b... |
146 147 |
/* operations */ |
e4a9bde95 blkcg: replace bl... |
148 |
blkcg_pol_alloc_cpd_fn *cpd_alloc_fn; |
e48453c38 block, cgroup: im... |
149 |
blkcg_pol_init_cpd_fn *cpd_init_fn; |
e4a9bde95 blkcg: replace bl... |
150 |
blkcg_pol_free_cpd_fn *cpd_free_fn; |
69d7fde59 blkcg: use CGROUP... |
151 |
blkcg_pol_bind_cpd_fn *cpd_bind_fn; |
e4a9bde95 blkcg: replace bl... |
152 |
|
001bea73e blkcg: replace bl... |
153 |
blkcg_pol_alloc_pd_fn *pd_alloc_fn; |
f9fcc2d39 blkcg: collapse b... |
154 |
blkcg_pol_init_pd_fn *pd_init_fn; |
f427d9096 blkcg: implement ... |
155 156 |
blkcg_pol_online_pd_fn *pd_online_fn; blkcg_pol_offline_pd_fn *pd_offline_fn; |
001bea73e blkcg: replace bl... |
157 |
blkcg_pol_free_pd_fn *pd_free_fn; |
f9fcc2d39 blkcg: collapse b... |
158 |
blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; |
903d23f0a blk-cgroup: allow... |
159 |
blkcg_pol_stat_pd_fn *pd_stat_fn; |
3e2520668 blkio: Implement ... |
160 |
}; |
3c798398e blkcg: mass renam... |
161 |
extern struct blkcg blkcg_root; |
496d5e756 blkcg: add blkcg_... |
162 |
extern struct cgroup_subsys_state * const blkcg_root_css; |
07b0fdecb blkcg: allow blkc... |
163 |
extern bool blkcg_debug_stats; |
36558c8a3 blkcg: style clea... |
164 |
|
24f290466 blkcg: inline [__... |
165 166 |
struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, struct request_queue *q, bool update_hint); |
36558c8a3 blkcg: style clea... |
167 |
int blkcg_init_queue(struct request_queue *q); |
36558c8a3 blkcg: style clea... |
168 |
void blkcg_exit_queue(struct request_queue *q); |
5efd61135 blkcg: add blkcg_... |
169 |
|
3e2520668 blkio: Implement ... |
170 |
/* Blkio controller policy registration */ |
d5bf02914 Revert "block: ad... |
171 |
int blkcg_policy_register(struct blkcg_policy *pol); |
3c798398e blkcg: mass renam... |
172 |
void blkcg_policy_unregister(struct blkcg_policy *pol); |
36558c8a3 blkcg: style clea... |
173 |
int blkcg_activate_policy(struct request_queue *q, |
3c798398e blkcg: mass renam... |
174 |
const struct blkcg_policy *pol); |
36558c8a3 blkcg: style clea... |
175 |
void blkcg_deactivate_policy(struct request_queue *q, |
3c798398e blkcg: mass renam... |
176 |
const struct blkcg_policy *pol); |
3e2520668 blkio: Implement ... |
177 |
|
dd165eb3b blkcg: misc prepa... |
178 |
const char *blkg_dev_name(struct blkcg_gq *blkg); |
3c798398e blkcg: mass renam... |
179 |
void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, |
f95a04afa blkcg: embed stru... |
180 181 |
u64 (*prfill)(struct seq_file *, struct blkg_policy_data *, int), |
3c798398e blkcg: mass renam... |
182 |
const struct blkcg_policy *pol, int data, |
ec399347d blkcg: use @pol i... |
183 |
bool show_total); |
f95a04afa blkcg: embed stru... |
184 |
u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v); |
16b3de665 blkcg: implement ... |
185 |
|
829fdb500 blkcg: export con... |
186 |
struct blkg_conf_ctx { |
36558c8a3 blkcg: style clea... |
187 |
struct gendisk *disk; |
3c798398e blkcg: mass renam... |
188 |
struct blkcg_gq *blkg; |
36aa9e5f5 blkcg: move body ... |
189 |
char *body; |
829fdb500 blkcg: export con... |
190 |
}; |
015d254cb blkcg: separate b... |
191 |
struct gendisk *blkcg_conf_get_disk(char **inputp); |
3c798398e blkcg: mass renam... |
192 |
int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, |
36aa9e5f5 blkcg: move body ... |
193 |
char *input, struct blkg_conf_ctx *ctx); |
829fdb500 blkcg: export con... |
194 |
void blkg_conf_finish(struct blkg_conf_ctx *ctx); |
0fe061b9f blkcg: fix ref co... |
195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 |
/** * blkcg_css - find the current css * * Find the css associated with either the kthread or the current task. * This may return a dying css, so it is up to the caller to use tryget logic * to confirm it is alive and well. */ static inline struct cgroup_subsys_state *blkcg_css(void) { struct cgroup_subsys_state *css; css = kthread_blkcg(); if (css) return css; return task_css(current, io_cgrp_id); } |
a7c6d554a cgroup: add/updat... |
211 212 213 214 |
static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css) { return css ? container_of(css, struct blkcg, css) : NULL; } |
0fe061b9f blkcg: fix ref co... |
215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 |
/** * __bio_blkcg - internal, inconsistent version to get blkcg * * DO NOT USE. * This function is inconsistent and consequently is dangerous to use. The * first part of the function returns a blkcg where a reference is owned by the * bio. This means it does not need to be rcu protected as it cannot go away * with the bio owning a reference to it. However, the latter potentially gets * it from task_css(). This can race against task migration and the cgroup * dying. It is also semantically different as it must be called rcu protected * and is susceptible to failure when trying to get a reference to it. * Therefore, it is not ok to assume that *_get() will always succeed on the * blkcg returned here. */ static inline struct blkcg *__bio_blkcg(struct bio *bio) |
27e6fa996 blkcg: fix ref co... |
230 |
{ |
db6638d7d blkcg: remove bio... |
231 232 |
if (bio && bio->bi_blkg) return bio->bi_blkg->blkcg; |
0fe061b9f blkcg: fix ref co... |
233 234 |
return css_to_blkcg(blkcg_css()); } |
b5f2954d3 blkcg: revert blk... |
235 |
|
0fe061b9f blkcg: fix ref co... |
236 237 238 239 240 241 242 243 244 245 |
/** * bio_blkcg - grab the blkcg associated with a bio * @bio: target bio * * This returns the blkcg associated with a bio, %NULL if not associated. * Callers are expected to either handle %NULL or know association has been * done prior to calling this. */ static inline struct blkcg *bio_blkcg(struct bio *bio) { |
db6638d7d blkcg: remove bio... |
246 247 |
if (bio && bio->bi_blkg) return bio->bi_blkg->blkcg; |
0fe061b9f blkcg: fix ref co... |
248 |
return NULL; |
fd383c2d3 blkcg: implement ... |
249 |
} |
d09d8df3a blkcg: add generi... |
250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 |
static inline bool blk_cgroup_congested(void) { struct cgroup_subsys_state *css; bool ret = false; rcu_read_lock(); css = kthread_blkcg(); if (!css) css = task_css(current, io_cgrp_id); while (css) { if (atomic_read(&css->cgroup->congestion_count)) { ret = true; break; } css = css->parent; } rcu_read_unlock(); return ret; } |
0381411e4 blkcg: let blkcg ... |
269 |
/** |
c7c98fd37 block: introduce ... |
270 271 272 273 274 275 276 277 278 279 280 281 |
* bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg * @return: true if this bio needs to be submitted with the root blkg context. * * In order to avoid priority inversions we sometimes need to issue a bio as if * it were attached to the root blkg, and then backcharge to the actual owning * blkg. The idea is we do bio_blkcg() to look up the actual context for the * bio and attach the appropriate blkg to the bio. Then we call this helper and * if it is true run with the root blkg for that queue and then do any * backcharging to the originating cgroup once the io is complete. */ static inline bool bio_issue_as_root_blkg(struct bio *bio) { |
0d1e0c7cd blk: introduce RE... |
282 |
return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0; |
c7c98fd37 block: introduce ... |
283 284 285 |
} /** |
3c5478659 blkcg: make blkcg... |
286 287 288 289 290 291 292 |
* blkcg_parent - get the parent of a blkcg * @blkcg: blkcg of interest * * Return the parent blkcg of @blkcg. Can be called anytime. */ static inline struct blkcg *blkcg_parent(struct blkcg *blkcg) { |
5c9d535b8 cgroup: remove cs... |
293 |
return css_to_blkcg(blkcg->css.parent); |
3c5478659 blkcg: make blkcg... |
294 295 296 |
} /** |
24f290466 blkcg: inline [__... |
297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 |
* __blkg_lookup - internal version of blkg_lookup() * @blkcg: blkcg of interest * @q: request_queue of interest * @update_hint: whether to update lookup hint with the result or not * * This is internal version and shouldn't be used by policy * implementations. Looks up blkgs for the @blkcg - @q pair regardless of * @q's bypass state. If @update_hint is %true, the caller should be * holding @q->queue_lock and lookup hint is updated on success. */ static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q, bool update_hint) { struct blkcg_gq *blkg; |
85b6bc9db blkcg: move root ... |
312 313 |
if (blkcg == &blkcg_root) return q->root_blkg; |
24f290466 blkcg: inline [__... |
314 315 316 317 318 319 320 321 322 323 324 325 326 |
blkg = rcu_dereference(blkcg->blkg_hint); if (blkg && blkg->q == q) return blkg; return blkg_lookup_slowpath(blkcg, q, update_hint); } /** * blkg_lookup - lookup blkg for the specified blkcg - q pair * @blkcg: blkcg of interest * @q: request_queue of interest * * Lookup blkg for the @blkcg - @q pair. This function should be called |
012d4a652 block: Fix spelli... |
327 |
* under RCU read lock. |
24f290466 blkcg: inline [__... |
328 329 330 331 332 |
*/ static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q) { WARN_ON_ONCE(!rcu_read_lock_held()); |
24f290466 blkcg: inline [__... |
333 334 335 336 |
return __blkg_lookup(blkcg, q, false); } /** |
b86d865cb blkcg: Make blkg_... |
337 |
* blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair |
6bad9b210 blkcg: Introduce ... |
338 339 340 341 |
* @q: request_queue of interest * * Lookup blkg for @q at the root level. See also blkg_lookup(). */ |
b86d865cb blkcg: Make blkg_... |
342 |
static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q) |
6bad9b210 blkcg: Introduce ... |
343 |
{ |
b86d865cb blkcg: Make blkg_... |
344 |
return q->root_blkg; |
6bad9b210 blkcg: Introduce ... |
345 346 347 |
} /** |
0381411e4 blkcg: let blkcg ... |
348 349 350 351 352 353 |
* blkg_to_pdata - get policy private data * @blkg: blkg of interest * @pol: policy of interest * * Return pointer to private data associated with the @blkg-@pol pair. */ |
f95a04afa blkcg: embed stru... |
354 355 |
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, struct blkcg_policy *pol) |
0381411e4 blkcg: let blkcg ... |
356 |
{ |
f95a04afa blkcg: embed stru... |
357 |
return blkg ? blkg->pd[pol->plid] : NULL; |
0381411e4 blkcg: let blkcg ... |
358 |
} |
e48453c38 block, cgroup: im... |
359 360 361 |
static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg, struct blkcg_policy *pol) { |
814376483 blkcg: minor upda... |
362 |
return blkcg ? blkcg->cpd[pol->plid] : NULL; |
e48453c38 block, cgroup: im... |
363 |
} |
0381411e4 blkcg: let blkcg ... |
364 365 |
/** * pdata_to_blkg - get blkg associated with policy private data |
f95a04afa blkcg: embed stru... |
366 |
* @pd: policy private data of interest |
0381411e4 blkcg: let blkcg ... |
367 |
* |
f95a04afa blkcg: embed stru... |
368 |
* @pd is policy private data. Determine the blkg it's associated with. |
0381411e4 blkcg: let blkcg ... |
369 |
*/ |
f95a04afa blkcg: embed stru... |
370 |
static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) |
0381411e4 blkcg: let blkcg ... |
371 |
{ |
f95a04afa blkcg: embed stru... |
372 |
return pd ? pd->blkg : NULL; |
0381411e4 blkcg: let blkcg ... |
373 |
} |
814376483 blkcg: minor upda... |
374 375 376 377 |
static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd) { return cpd ? cpd->blkcg : NULL; } |
59b57717f blkcg: delay blkg... |
378 |
extern void blkcg_destroy_blkgs(struct blkcg *blkcg); |
59b57717f blkcg: delay blkg... |
379 |
/** |
d866dbf61 blkcg: rename blk... |
380 |
* blkcg_pin_online - pin online state |
59b57717f blkcg: delay blkg... |
381 382 |
* @blkcg: blkcg of interest * |
d866dbf61 blkcg: rename blk... |
383 384 385 |
* While pinned, a blkcg is kept online. This is primarily used to * impedance-match blkg and cgwb lifetimes so that blkg doesn't go offline * while an associated cgwb is still active. |
59b57717f blkcg: delay blkg... |
386 |
*/ |
d866dbf61 blkcg: rename blk... |
387 |
static inline void blkcg_pin_online(struct blkcg *blkcg) |
59b57717f blkcg: delay blkg... |
388 |
{ |
d866dbf61 blkcg: rename blk... |
389 |
refcount_inc(&blkcg->online_pin); |
59b57717f blkcg: delay blkg... |
390 391 392 |
} /** |
d866dbf61 blkcg: rename blk... |
393 |
* blkcg_unpin_online - unpin online state |
59b57717f blkcg: delay blkg... |
394 395 |
* @blkcg: blkcg of interest * |
d866dbf61 blkcg: rename blk... |
396 397 398 |
* This is primarily used to impedance-match blkg and cgwb lifetimes so * that blkg doesn't go offline while an associated cgwb is still active. * When this count goes to zero, all active cgwbs have finished so the |
59b57717f blkcg: delay blkg... |
399 |
* blkcg can continue destruction by calling blkcg_destroy_blkgs(). |
59b57717f blkcg: delay blkg... |
400 |
*/ |
d866dbf61 blkcg: rename blk... |
401 |
static inline void blkcg_unpin_online(struct blkcg *blkcg) |
59b57717f blkcg: delay blkg... |
402 |
{ |
4308a434e blkcg: don't offl... |
403 404 405 |
do { if (!refcount_dec_and_test(&blkcg->online_pin)) break; |
59b57717f blkcg: delay blkg... |
406 |
blkcg_destroy_blkgs(blkcg); |
4308a434e blkcg: don't offl... |
407 408 |
blkcg = blkcg_parent(blkcg); } while (blkcg); |
59b57717f blkcg: delay blkg... |
409 |
} |
54e7ed12b blkcg: remove blk... |
410 411 412 413 414 415 416 417 |
/** * blkg_path - format cgroup path of blkg * @blkg: blkg of interest * @buf: target buffer * @buflen: target buffer length * * Format the path of the cgroup of @blkg into @buf. */ |
3c798398e blkcg: mass renam... |
418 |
static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen) |
afc24d49c blk-cgroup: confi... |
419 |
{ |
4c737b41d cgroup: make cgro... |
420 |
return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen); |
afc24d49c blk-cgroup: confi... |
421 |
} |
1adaf3dde blkcg: move refcn... |
422 423 424 425 |
/** * blkg_get - get a blkg reference * @blkg: blkg to get * |
a5049a8ae blkcg: fix use-af... |
426 |
* The caller should be holding an existing reference. |
1adaf3dde blkcg: move refcn... |
427 |
*/ |
3c798398e blkcg: mass renam... |
428 |
static inline void blkg_get(struct blkcg_gq *blkg) |
1adaf3dde blkcg: move refcn... |
429 |
{ |
7fcf2b033 blkcg: change blk... |
430 |
percpu_ref_get(&blkg->refcnt); |
1adaf3dde blkcg: move refcn... |
431 |
} |
d09d8df3a blkcg: add generi... |
432 |
/** |
7754f669f blkcg: rename blk... |
433 |
* blkg_tryget - try and get a blkg reference |
d09d8df3a blkcg: add generi... |
434 435 436 437 438 |
* @blkg: blkg to get * * This is for use when doing an RCU lookup of the blkg. We may be in the midst * of freeing this blkg, so we can only use it if the refcnt is not zero. */ |
7754f669f blkcg: rename blk... |
439 |
static inline bool blkg_tryget(struct blkcg_gq *blkg) |
d09d8df3a blkcg: add generi... |
440 |
{ |
6ab218799 blkcg: clean up b... |
441 |
return blkg && percpu_ref_tryget(&blkg->refcnt); |
d09d8df3a blkcg: add generi... |
442 |
} |
beea9da07 blkcg: convert bl... |
443 |
/** |
1adaf3dde blkcg: move refcn... |
444 445 |
* blkg_put - put a blkg reference * @blkg: blkg to put |
1adaf3dde blkcg: move refcn... |
446 |
*/ |
3c798398e blkcg: mass renam... |
447 |
static inline void blkg_put(struct blkcg_gq *blkg) |
1adaf3dde blkcg: move refcn... |
448 |
{ |
7fcf2b033 blkcg: change blk... |
449 |
percpu_ref_put(&blkg->refcnt); |
1adaf3dde blkcg: move refcn... |
450 |
} |
dd4a4ffc0 blkcg: move blkg_... |
451 452 453 |
/** * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants * @d_blkg: loop cursor pointing to the current descendant |
492eb21b9 cgroup: make hier... |
454 |
* @pos_css: used for iteration |
dd4a4ffc0 blkcg: move blkg_... |
455 456 457 458 459 |
* @p_blkg: target blkg to walk descendants of * * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU * read locked. If called under either blkcg or queue lock, the iteration * is guaranteed to include all and only online blkgs. The caller may |
492eb21b9 cgroup: make hier... |
460 |
* update @pos_css by calling css_rightmost_descendant() to skip subtree. |
bd8815a6d cgroup: make css_... |
461 |
* @p_blkg is included in the iteration and the first node to be visited. |
dd4a4ffc0 blkcg: move blkg_... |
462 |
*/ |
492eb21b9 cgroup: make hier... |
463 464 465 |
#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \ css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \ if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ |
dd4a4ffc0 blkcg: move blkg_... |
466 |
(p_blkg)->q, false))) |
edcb0722c blkcg: introduce ... |
467 |
/** |
aa539cb38 blkcg: implement ... |
468 469 |
* blkg_for_each_descendant_post - post-order walk of a blkg's descendants * @d_blkg: loop cursor pointing to the current descendant |
492eb21b9 cgroup: make hier... |
470 |
* @pos_css: used for iteration |
aa539cb38 blkcg: implement ... |
471 472 473 |
* @p_blkg: target blkg to walk descendants of * * Similar to blkg_for_each_descendant_pre() but performs post-order |
bd8815a6d cgroup: make css_... |
474 475 |
* traversal instead. Synchronization rules are the same. @p_blkg is * included in the iteration and the last node to be visited. |
aa539cb38 blkcg: implement ... |
476 |
*/ |
492eb21b9 cgroup: make hier... |
477 478 479 |
#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \ css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \ if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ |
aa539cb38 blkcg: implement ... |
480 |
(p_blkg)->q, false))) |
d3f77dfdc blkcg: implement ... |
481 482 483 484 485 486 487 488 489 |
bool __blkcg_punt_bio_submit(struct bio *bio); static inline bool blkcg_punt_bio_submit(struct bio *bio) { if (bio->bi_opf & REQ_CGROUP_PUNT) return __blkcg_punt_bio_submit(bio); else return false; } |
e439bedf6 blkcg: consolidat... |
490 491 492 493 494 |
static inline void blkcg_bio_issue_init(struct bio *bio) { bio_issue_init(&bio->bi_issue, bio_sectors(bio)); } |
d09d8df3a blkcg: add generi... |
495 496 |
static inline void blkcg_use_delay(struct blkcg_gq *blkg) { |
54c52e10d blk-iocost: switc... |
497 498 |
if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0)) return; |
d09d8df3a blkcg: add generi... |
499 500 501 502 503 504 505 |
if (atomic_add_return(1, &blkg->use_delay) == 1) atomic_inc(&blkg->blkcg->css.cgroup->congestion_count); } static inline int blkcg_unuse_delay(struct blkcg_gq *blkg) { int old = atomic_read(&blkg->use_delay); |
54c52e10d blk-iocost: switc... |
506 507 |
if (WARN_ON_ONCE(old < 0)) return 0; |
d09d8df3a blkcg: add generi... |
508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 |
if (old == 0) return 0; /* * We do this song and dance because we can race with somebody else * adding or removing delay. If we just did an atomic_dec we'd end up * negative and we'd already be in trouble. We need to subtract 1 and * then check to see if we were the last delay so we can drop the * congestion count on the cgroup. */ while (old) { int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1); if (cur == old) break; old = cur; } if (old == 0) return 0; if (old == 1) atomic_dec(&blkg->blkcg->css.cgroup->congestion_count); return 1; } |
54c52e10d blk-iocost: switc... |
531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 |
/** * blkcg_set_delay - Enable allocator delay mechanism with the specified delay amount * @blkg: target blkg * @delay: delay duration in nsecs * * When enabled with this function, the delay is not decayed and must be * explicitly cleared with blkcg_clear_delay(). Must not be mixed with * blkcg_[un]use_delay() and blkcg_add_delay() usages. */ static inline void blkcg_set_delay(struct blkcg_gq *blkg, u64 delay) { int old = atomic_read(&blkg->use_delay); /* We only want 1 person setting the congestion count for this blkg. */ if (!old && atomic_cmpxchg(&blkg->use_delay, old, -1) == old) atomic_inc(&blkg->blkcg->css.cgroup->congestion_count); atomic64_set(&blkg->delay_nsec, delay); } /** * blkcg_clear_delay - Disable allocator delay mechanism * @blkg: target blkg * * Disable use_delay mechanism. See blkcg_set_delay(). */ |
d09d8df3a blkcg: add generi... |
557 558 559 |
static inline void blkcg_clear_delay(struct blkcg_gq *blkg) { int old = atomic_read(&blkg->use_delay); |
54c52e10d blk-iocost: switc... |
560 |
|
d09d8df3a blkcg: add generi... |
561 |
/* We only want 1 person clearing the congestion count for this blkg. */ |
54c52e10d blk-iocost: switc... |
562 563 |
if (old && atomic_cmpxchg(&blkg->use_delay, old, 0) == old) atomic_dec(&blkg->blkcg->css.cgroup->congestion_count); |
d09d8df3a blkcg: add generi... |
564 |
} |
db18a53e5 blk-cgroup: remov... |
565 |
void blk_cgroup_bio_start(struct bio *bio); |
d09d8df3a blkcg: add generi... |
566 567 568 |
void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta); void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay); void blkcg_maybe_throttle_current(void); |
36558c8a3 blkcg: style clea... |
569 |
#else /* CONFIG_BLK_CGROUP */ |
efa7d1c73 update !CONFIG_BL... |
570 571 |
struct blkcg { }; |
2f5ea4771 cfq-iosched: fix ... |
572 |
|
f95a04afa blkcg: embed stru... |
573 574 |
struct blkg_policy_data { }; |
e48453c38 block, cgroup: im... |
575 576 |
struct blkcg_policy_data { }; |
3c798398e blkcg: mass renam... |
577 |
struct blkcg_gq { |
2f5ea4771 cfq-iosched: fix ... |
578 |
}; |
3c798398e blkcg: mass renam... |
579 |
struct blkcg_policy { |
3e2520668 blkio: Implement ... |
580 |
}; |
496d5e756 blkcg: add blkcg_... |
581 |
#define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL)) |
d09d8df3a blkcg: add generi... |
582 583 |
static inline void blkcg_maybe_throttle_current(void) { } static inline bool blk_cgroup_congested(void) { return false; } |
efa7d1c73 update !CONFIG_BL... |
584 |
#ifdef CONFIG_BLOCK |
d09d8df3a blkcg: add generi... |
585 |
static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { } |
3c798398e blkcg: mass renam... |
586 |
static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; } |
b86d865cb blkcg: Make blkg_... |
587 588 |
static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q) { return NULL; } |
5efd61135 blkcg: add blkcg_... |
589 |
static inline int blkcg_init_queue(struct request_queue *q) { return 0; } |
5efd61135 blkcg: add blkcg_... |
590 |
static inline void blkcg_exit_queue(struct request_queue *q) { } |
d5bf02914 Revert "block: ad... |
591 |
static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; } |
3c798398e blkcg: mass renam... |
592 |
static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { } |
a2b1693ba blkcg: implement ... |
593 |
static inline int blkcg_activate_policy(struct request_queue *q, |
3c798398e blkcg: mass renam... |
594 |
const struct blkcg_policy *pol) { return 0; } |
a2b1693ba blkcg: implement ... |
595 |
static inline void blkcg_deactivate_policy(struct request_queue *q, |
3c798398e blkcg: mass renam... |
596 |
const struct blkcg_policy *pol) { } |
0fe061b9f blkcg: fix ref co... |
597 |
static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; } |
b1208b56f blkcg: inline bio... |
598 |
static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; } |
a051661ca blkcg: implement ... |
599 |
|
f95a04afa blkcg: embed stru... |
600 601 602 |
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, struct blkcg_policy *pol) { return NULL; } static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; } |
3c798398e blkcg: mass renam... |
603 604 605 |
static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; } static inline void blkg_get(struct blkcg_gq *blkg) { } static inline void blkg_put(struct blkcg_gq *blkg) { } |
afc24d49c blk-cgroup: confi... |
606 |
|
d3f77dfdc blkcg: implement ... |
607 |
static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; } |
e439bedf6 blkcg: consolidat... |
608 |
static inline void blkcg_bio_issue_init(struct bio *bio) { } |
db18a53e5 blk-cgroup: remov... |
609 |
static inline void blk_cgroup_bio_start(struct bio *bio) { } |
ae1188963 blkcg: consolidat... |
610 |
|
a051661ca blkcg: implement ... |
611 612 |
#define blk_queue_for_each_rl(rl, q) \ for ((rl) = &(q)->root_rl; (rl); (rl) = NULL) |
efa7d1c73 update !CONFIG_BL... |
613 |
#endif /* CONFIG_BLOCK */ |
36558c8a3 blkcg: style clea... |
614 615 |
#endif /* CONFIG_BLK_CGROUP */ #endif /* _BLK_CGROUP_H */ |