Blame view

block/blk-cgroup.c 47.9 KB
3dcf60bcb   Christoph Hellwig   block: add SPDX t...
1
  // SPDX-License-Identifier: GPL-2.0
31e4c28d9   Vivek Goyal   blkio: Introduce ...
2
3
4
5
6
7
8
9
10
11
12
  /*
   * Common Block IO controller cgroup interface
   *
   * Based on ideas and code from CFQ, CFS and BFQ:
   * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
   *
   * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
   *		      Paolo Valente <paolo.valente@unimore.it>
   *
   * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
   * 	              Nauman Rafique <nauman@google.com>
e48453c38   Arianna Avanzini   block, cgroup: im...
13
14
15
16
   *
   * For policy-specific per-blkcg data:
   * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
   *                    Arianna Avanzini <avanzini.arianna@gmail.com>
31e4c28d9   Vivek Goyal   blkio: Introduce ...
17
18
   */
  #include <linux/ioprio.h>
220841906   Vivek Goyal   blkio: Export dis...
19
  #include <linux/kdev_t.h>
9d6a986c0   Vivek Goyal   blkio: Export som...
20
  #include <linux/module.h>
174cd4b1e   Ingo Molnar   sched/headers: Pr...
21
  #include <linux/sched/signal.h>
accee7854   Stephen Rothwell   block: include li...
22
  #include <linux/err.h>
9195291e5   Divyesh Shah   blkio: Increment ...
23
  #include <linux/blkdev.h>
52ebea749   Tejun Heo   writeback: make b...
24
  #include <linux/backing-dev.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
25
  #include <linux/slab.h>
34d0f179d   Gui Jianfeng   io-controller: Ad...
26
  #include <linux/genhd.h>
72e06c255   Tejun Heo   blkcg: shoot down...
27
  #include <linux/delay.h>
9a9e8a26d   Tejun Heo   blkcg: add blkcg->id
28
  #include <linux/atomic.h>
36aa9e5f5   Tejun Heo   blkcg: move body ...
29
  #include <linux/ctype.h>
eea8f41cc   Tejun Heo   blkcg: move block...
30
  #include <linux/blk-cgroup.h>
d09d8df3a   Josef Bacik   blkcg: add generi...
31
  #include <linux/tracehook.h>
fd112c746   Josef Bacik   blk-cgroup: turn ...
32
  #include <linux/psi.h>
5efd61135   Tejun Heo   blkcg: add blkcg_...
33
  #include "blk.h"
3e2520668   Vivek Goyal   blkio: Implement ...
34

84c124da9   Divyesh Shah   blkio: Changes to...
35
  #define MAX_KEY_LEN 100
838f13bf4   Tejun Heo   blkcg: allow blkc...
36
37
38
39
40
41
42
43
  /*
   * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
   * blkcg_pol_register_mutex nests outside of it and synchronizes entire
   * policy [un]register operations including cgroup file additions /
   * removals.  Putting cgroup file registration outside blkcg_pol_mutex
   * allows grabbing it from cgroup callbacks.
   */
  static DEFINE_MUTEX(blkcg_pol_register_mutex);
bc0d6501a   Tejun Heo   blkcg: kill blkio...
44
  static DEFINE_MUTEX(blkcg_pol_mutex);
923adde1b   Tejun Heo   blkcg: clear all ...
45

e48453c38   Arianna Avanzini   block, cgroup: im...
46
  struct blkcg blkcg_root;
3c798398e   Tejun Heo   blkcg: mass renam...
47
  EXPORT_SYMBOL_GPL(blkcg_root);
9d6a986c0   Vivek Goyal   blkio: Export som...
48

496d5e756   Tejun Heo   blkcg: add blkcg_...
49
  struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css;
9b0eb69b7   Tejun Heo   cgroup, blkcg: Pr...
50
  EXPORT_SYMBOL_GPL(blkcg_root_css);
496d5e756   Tejun Heo   blkcg: add blkcg_...
51

3c798398e   Tejun Heo   blkcg: mass renam...
52
  static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
035d10b2f   Tejun Heo   blkcg: add blkio_...
53

7876f930d   Tejun Heo   blkcg: implement ...
54
  static LIST_HEAD(all_blkcgs);		/* protected by blkcg_pol_mutex */
07b0fdecb   Tejun Heo   blkcg: allow blkc...
55
  bool blkcg_debug_stats = false;
d3f77dfdc   Tejun Heo   blkcg: implement ...
56
  static struct workqueue_struct *blkcg_punt_bio_wq;
903d23f0a   Josef Bacik   blk-cgroup: allow...
57

a2b1693ba   Tejun Heo   blkcg: implement ...
58
  static bool blkcg_policy_enabled(struct request_queue *q,
3c798398e   Tejun Heo   blkcg: mass renam...
59
  				 const struct blkcg_policy *pol)
a2b1693ba   Tejun Heo   blkcg: implement ...
60
61
62
  {
  	return pol && test_bit(pol->plid, q->blkcg_pols);
  }
0381411e4   Tejun Heo   blkcg: let blkcg ...
63
64
65
66
67
68
  /**
   * blkg_free - free a blkg
   * @blkg: blkg to free
   *
   * Free @blkg which may be partially allocated.
   */
3c798398e   Tejun Heo   blkcg: mass renam...
69
  static void blkg_free(struct blkcg_gq *blkg)
0381411e4   Tejun Heo   blkcg: let blkcg ...
70
  {
e8989fae3   Tejun Heo   blkcg: unify blkg...
71
  	int i;
549d3aa87   Tejun Heo   blkcg: make blkg-...
72
73
74
  
  	if (!blkg)
  		return;
db6136703   Tejun Heo   blkcg: invoke blk...
75
  	for (i = 0; i < BLKCG_MAX_POLS; i++)
001bea73e   Tejun Heo   blkcg: replace bl...
76
77
  		if (blkg->pd[i])
  			blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
e8989fae3   Tejun Heo   blkcg: unify blkg...
78

77ea73388   Tejun Heo   blkcg: move io_se...
79
80
  	blkg_rwstat_exit(&blkg->stat_ios);
  	blkg_rwstat_exit(&blkg->stat_bytes);
ef069b97f   Tejun Heo   blkcg: perpcu_ref...
81
  	percpu_ref_exit(&blkg->refcnt);
549d3aa87   Tejun Heo   blkcg: make blkg-...
82
  	kfree(blkg);
0381411e4   Tejun Heo   blkcg: let blkcg ...
83
  }
7fcf2b033   Dennis Zhou   blkcg: change blk...
84
85
86
  static void __blkg_release(struct rcu_head *rcu)
  {
  	struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head);
d3f77dfdc   Tejun Heo   blkcg: implement ...
87
  	WARN_ON(!bio_list_empty(&blkg->async_bios));
7fcf2b033   Dennis Zhou   blkcg: change blk...
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
  	/* release the blkcg and parent blkg refs this blkg has been holding */
  	css_put(&blkg->blkcg->css);
  	if (blkg->parent)
  		blkg_put(blkg->parent);
  
  	wb_congested_put(blkg->wb_congested);
  
  	blkg_free(blkg);
  }
  
  /*
   * A group is RCU protected, but having an rcu lock does not mean that one
   * can access all the fields of blkg and assume these are valid.  For
   * example, don't try to follow throtl_data and request queue links.
   *
   * Having a reference to blkg under an rcu allows accesses to only values
   * local to groups like group stats and group rate limits.
   */
  static void blkg_release(struct percpu_ref *ref)
  {
  	struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt);
  
  	call_rcu(&blkg->rcu_head, __blkg_release);
  }
d3f77dfdc   Tejun Heo   blkcg: implement ...
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
  static void blkg_async_bio_workfn(struct work_struct *work)
  {
  	struct blkcg_gq *blkg = container_of(work, struct blkcg_gq,
  					     async_bio_work);
  	struct bio_list bios = BIO_EMPTY_LIST;
  	struct bio *bio;
  
  	/* as long as there are pending bios, @blkg can't go away */
  	spin_lock_bh(&blkg->async_bio_lock);
  	bio_list_merge(&bios, &blkg->async_bios);
  	bio_list_init(&blkg->async_bios);
  	spin_unlock_bh(&blkg->async_bio_lock);
  
  	while ((bio = bio_list_pop(&bios)))
  		submit_bio(bio);
  }
0381411e4   Tejun Heo   blkcg: let blkcg ...
128
129
130
131
  /**
   * blkg_alloc - allocate a blkg
   * @blkcg: block cgroup the new blkg is associated with
   * @q: request_queue the new blkg is associated with
159749937   Tejun Heo   blkcg: make root ...
132
   * @gfp_mask: allocation mask to use
0381411e4   Tejun Heo   blkcg: let blkcg ...
133
   *
e8989fae3   Tejun Heo   blkcg: unify blkg...
134
   * Allocate a new blkg assocating @blkcg and @q.
0381411e4   Tejun Heo   blkcg: let blkcg ...
135
   */
159749937   Tejun Heo   blkcg: make root ...
136
137
  static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
  				   gfp_t gfp_mask)
0381411e4   Tejun Heo   blkcg: let blkcg ...
138
  {
3c798398e   Tejun Heo   blkcg: mass renam...
139
  	struct blkcg_gq *blkg;
e8989fae3   Tejun Heo   blkcg: unify blkg...
140
  	int i;
0381411e4   Tejun Heo   blkcg: let blkcg ...
141
142
  
  	/* alloc and init base part */
159749937   Tejun Heo   blkcg: make root ...
143
  	blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
0381411e4   Tejun Heo   blkcg: let blkcg ...
144
145
  	if (!blkg)
  		return NULL;
ef069b97f   Tejun Heo   blkcg: perpcu_ref...
146
147
  	if (percpu_ref_init(&blkg->refcnt, blkg_release, 0, gfp_mask))
  		goto err_free;
77ea73388   Tejun Heo   blkcg: move io_se...
148
149
150
  	if (blkg_rwstat_init(&blkg->stat_bytes, gfp_mask) ||
  	    blkg_rwstat_init(&blkg->stat_ios, gfp_mask))
  		goto err_free;
c875f4d02   Tejun Heo   blkcg: drop unnec...
151
  	blkg->q = q;
e8989fae3   Tejun Heo   blkcg: unify blkg...
152
  	INIT_LIST_HEAD(&blkg->q_node);
d3f77dfdc   Tejun Heo   blkcg: implement ...
153
154
155
  	spin_lock_init(&blkg->async_bio_lock);
  	bio_list_init(&blkg->async_bios);
  	INIT_WORK(&blkg->async_bio_work, blkg_async_bio_workfn);
0381411e4   Tejun Heo   blkcg: let blkcg ...
156
  	blkg->blkcg = blkcg;
0381411e4   Tejun Heo   blkcg: let blkcg ...
157

8bd435b30   Tejun Heo   blkcg: remove sta...
158
  	for (i = 0; i < BLKCG_MAX_POLS; i++) {
3c798398e   Tejun Heo   blkcg: mass renam...
159
  		struct blkcg_policy *pol = blkcg_policy[i];
e8989fae3   Tejun Heo   blkcg: unify blkg...
160
  		struct blkg_policy_data *pd;
0381411e4   Tejun Heo   blkcg: let blkcg ...
161

a2b1693ba   Tejun Heo   blkcg: implement ...
162
  		if (!blkcg_policy_enabled(q, pol))
e8989fae3   Tejun Heo   blkcg: unify blkg...
163
164
165
  			continue;
  
  		/* alloc per-policy data and attach it to blkg */
cf09a8ee1   Tejun Heo   blkcg: pass @q an...
166
  		pd = pol->pd_alloc_fn(gfp_mask, q, blkcg);
a051661ca   Tejun Heo   blkcg: implement ...
167
168
  		if (!pd)
  			goto err_free;
549d3aa87   Tejun Heo   blkcg: make blkg-...
169

e8989fae3   Tejun Heo   blkcg: unify blkg...
170
171
  		blkg->pd[i] = pd;
  		pd->blkg = blkg;
b276a876a   Tejun Heo   blkcg: add blkg_p...
172
  		pd->plid = i;
e8989fae3   Tejun Heo   blkcg: unify blkg...
173
  	}
0381411e4   Tejun Heo   blkcg: let blkcg ...
174
  	return blkg;
a051661ca   Tejun Heo   blkcg: implement ...
175
176
177
178
  
  err_free:
  	blkg_free(blkg);
  	return NULL;
0381411e4   Tejun Heo   blkcg: let blkcg ...
179
  }
24f290466   Tejun Heo   blkcg: inline [__...
180
181
  struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
  				      struct request_queue *q, bool update_hint)
80fd99792   Tejun Heo   blkcg: make sure ...
182
  {
3c798398e   Tejun Heo   blkcg: mass renam...
183
  	struct blkcg_gq *blkg;
80fd99792   Tejun Heo   blkcg: make sure ...
184

a637120e4   Tejun Heo   blkcg: use radix ...
185
  	/*
86cde6b62   Tejun Heo   blkcg: reorganize...
186
187
188
189
  	 * Hint didn't match.  Look up from the radix tree.  Note that the
  	 * hint can only be updated under queue_lock as otherwise @blkg
  	 * could have already been removed from blkg_tree.  The caller is
  	 * responsible for grabbing queue_lock if @update_hint.
a637120e4   Tejun Heo   blkcg: use radix ...
190
191
  	 */
  	blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
86cde6b62   Tejun Heo   blkcg: reorganize...
192
193
  	if (blkg && blkg->q == q) {
  		if (update_hint) {
0d945c1f9   Christoph Hellwig   block: remove the...
194
  			lockdep_assert_held(&q->queue_lock);
86cde6b62   Tejun Heo   blkcg: reorganize...
195
196
  			rcu_assign_pointer(blkcg->blkg_hint, blkg);
  		}
a637120e4   Tejun Heo   blkcg: use radix ...
197
  		return blkg;
86cde6b62   Tejun Heo   blkcg: reorganize...
198
  	}
a637120e4   Tejun Heo   blkcg: use radix ...
199

80fd99792   Tejun Heo   blkcg: make sure ...
200
201
  	return NULL;
  }
ae1188963   Tejun Heo   blkcg: consolidat...
202
  EXPORT_SYMBOL_GPL(blkg_lookup_slowpath);
80fd99792   Tejun Heo   blkcg: make sure ...
203

159749937   Tejun Heo   blkcg: make root ...
204
  /*
d708f0d50   Jens Axboe   Revert "blkcg: al...
205
206
   * If @new_blkg is %NULL, this function tries to allocate a new one as
   * necessary using %GFP_NOWAIT.  @new_blkg is always consumed on return.
159749937   Tejun Heo   blkcg: make root ...
207
   */
86cde6b62   Tejun Heo   blkcg: reorganize...
208
  static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
d708f0d50   Jens Axboe   Revert "blkcg: al...
209
210
  				    struct request_queue *q,
  				    struct blkcg_gq *new_blkg)
5624a4e44   Vivek Goyal   blk-throttle: Mak...
211
  {
d708f0d50   Jens Axboe   Revert "blkcg: al...
212
  	struct blkcg_gq *blkg;
ce7acfeaf   Tejun Heo   writeback, blkcg:...
213
  	struct bdi_writeback_congested *wb_congested;
f427d9096   Tejun Heo   blkcg: implement ...
214
  	int i, ret;
5624a4e44   Vivek Goyal   blk-throttle: Mak...
215

cd1604fab   Tejun Heo   blkcg: factor out...
216
  	WARN_ON_ONCE(!rcu_read_lock_held());
0d945c1f9   Christoph Hellwig   block: remove the...
217
  	lockdep_assert_held(&q->queue_lock);
cd1604fab   Tejun Heo   blkcg: factor out...
218

0273ac349   Dennis Zhou   blkcg: handle dyi...
219
220
221
222
223
  	/* request_queue is dying, do not create/recreate a blkg */
  	if (blk_queue_dying(q)) {
  		ret = -ENODEV;
  		goto err_free_blkg;
  	}
7ee9c5620   Tejun Heo   blkcg: let blkio_...
224
  	/* blkg holds a reference to blkcg */
ec903c0c8   Tejun Heo   cgroup: rename cs...
225
  	if (!css_tryget_online(&blkcg->css)) {
20386ce01   Tejun Heo   blkcg: refine err...
226
  		ret = -ENODEV;
93e6d5d8f   Tejun Heo   blkcg: cosmetic u...
227
  		goto err_free_blkg;
159749937   Tejun Heo   blkcg: make root ...
228
  	}
cd1604fab   Tejun Heo   blkcg: factor out...
229

dc3b17cc8   Jan Kara   block: Use pointe...
230
  	wb_congested = wb_congested_get_create(q->backing_dev_info,
d708f0d50   Jens Axboe   Revert "blkcg: al...
231
232
233
  					       blkcg->css.id,
  					       GFP_NOWAIT | __GFP_NOWARN);
  	if (!wb_congested) {
ce7acfeaf   Tejun Heo   writeback, blkcg:...
234
  		ret = -ENOMEM;
d708f0d50   Jens Axboe   Revert "blkcg: al...
235
  		goto err_put_css;
ce7acfeaf   Tejun Heo   writeback, blkcg:...
236
  	}
d708f0d50   Jens Axboe   Revert "blkcg: al...
237
238
239
240
241
242
  	/* allocate */
  	if (!new_blkg) {
  		new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN);
  		if (unlikely(!new_blkg)) {
  			ret = -ENOMEM;
  			goto err_put_congested;
159749937   Tejun Heo   blkcg: make root ...
243
244
  		}
  	}
d708f0d50   Jens Axboe   Revert "blkcg: al...
245
246
  	blkg = new_blkg;
  	blkg->wb_congested = wb_congested;
cd1604fab   Tejun Heo   blkcg: factor out...
247

db6136703   Tejun Heo   blkcg: invoke blk...
248
  	/* link parent */
3c5478659   Tejun Heo   blkcg: make blkcg...
249
250
251
  	if (blkcg_parent(blkcg)) {
  		blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
  		if (WARN_ON_ONCE(!blkg->parent)) {
20386ce01   Tejun Heo   blkcg: refine err...
252
  			ret = -ENODEV;
d708f0d50   Jens Axboe   Revert "blkcg: al...
253
  			goto err_put_congested;
3c5478659   Tejun Heo   blkcg: make blkcg...
254
255
256
  		}
  		blkg_get(blkg->parent);
  	}
db6136703   Tejun Heo   blkcg: invoke blk...
257
258
259
260
261
  	/* invoke per-policy init */
  	for (i = 0; i < BLKCG_MAX_POLS; i++) {
  		struct blkcg_policy *pol = blkcg_policy[i];
  
  		if (blkg->pd[i] && pol->pd_init_fn)
a9520cd6f   Tejun Heo   blkcg: make blkcg...
262
  			pol->pd_init_fn(blkg->pd[i]);
db6136703   Tejun Heo   blkcg: invoke blk...
263
264
265
  	}
  
  	/* insert */
cd1604fab   Tejun Heo   blkcg: factor out...
266
  	spin_lock(&blkcg->lock);
a637120e4   Tejun Heo   blkcg: use radix ...
267
268
269
270
  	ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
  	if (likely(!ret)) {
  		hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
  		list_add(&blkg->q_node, &q->blkg_list);
f427d9096   Tejun Heo   blkcg: implement ...
271
272
273
274
275
  
  		for (i = 0; i < BLKCG_MAX_POLS; i++) {
  			struct blkcg_policy *pol = blkcg_policy[i];
  
  			if (blkg->pd[i] && pol->pd_online_fn)
a9520cd6f   Tejun Heo   blkcg: make blkcg...
276
  				pol->pd_online_fn(blkg->pd[i]);
f427d9096   Tejun Heo   blkcg: implement ...
277
  		}
a637120e4   Tejun Heo   blkcg: use radix ...
278
  	}
f427d9096   Tejun Heo   blkcg: implement ...
279
  	blkg->online = true;
cd1604fab   Tejun Heo   blkcg: factor out...
280
  	spin_unlock(&blkcg->lock);
496fb7806   Tejun Heo   blkcg: fix blkcg-...
281

ec13b1d6f   Tejun Heo   blkcg: always cre...
282
  	if (!ret)
a637120e4   Tejun Heo   blkcg: use radix ...
283
  		return blkg;
159749937   Tejun Heo   blkcg: make root ...
284

3c5478659   Tejun Heo   blkcg: make blkcg...
285
286
287
  	/* @blkg failed fully initialized, use the usual release path */
  	blkg_put(blkg);
  	return ERR_PTR(ret);
d708f0d50   Jens Axboe   Revert "blkcg: al...
288
289
290
  err_put_congested:
  	wb_congested_put(wb_congested);
  err_put_css:
496fb7806   Tejun Heo   blkcg: fix blkcg-...
291
  	css_put(&blkcg->css);
93e6d5d8f   Tejun Heo   blkcg: cosmetic u...
292
  err_free_blkg:
d708f0d50   Jens Axboe   Revert "blkcg: al...
293
  	blkg_free(new_blkg);
93e6d5d8f   Tejun Heo   blkcg: cosmetic u...
294
  	return ERR_PTR(ret);
31e4c28d9   Vivek Goyal   blkio: Introduce ...
295
  }
3c96cb32d   Tejun Heo   blkcg: drop stuff...
296

86cde6b62   Tejun Heo   blkcg: reorganize...
297
  /**
b978962ad   Dennis Zhou   blkcg: update blk...
298
   * __blkg_lookup_create - lookup blkg, try to create one if not there
86cde6b62   Tejun Heo   blkcg: reorganize...
299
300
301
302
   * @blkcg: blkcg of interest
   * @q: request_queue of interest
   *
   * Lookup blkg for the @blkcg - @q pair.  If it doesn't exist, try to
3c5478659   Tejun Heo   blkcg: make blkcg...
303
304
305
   * create one.  blkg creation is performed recursively from blkcg_root such
   * that all non-root blkg's have access to the parent blkg.  This function
   * should be called under RCU read lock and @q->queue_lock.
86cde6b62   Tejun Heo   blkcg: reorganize...
306
   *
beea9da07   Dennis Zhou   blkcg: convert bl...
307
308
   * Returns the blkg or the closest blkg if blkg_create() fails as it walks
   * down from root.
86cde6b62   Tejun Heo   blkcg: reorganize...
309
   */
b978962ad   Dennis Zhou   blkcg: update blk...
310
311
  struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
  				      struct request_queue *q)
3c96cb32d   Tejun Heo   blkcg: drop stuff...
312
  {
86cde6b62   Tejun Heo   blkcg: reorganize...
313
314
315
  	struct blkcg_gq *blkg;
  
  	WARN_ON_ONCE(!rcu_read_lock_held());
0d945c1f9   Christoph Hellwig   block: remove the...
316
  	lockdep_assert_held(&q->queue_lock);
86cde6b62   Tejun Heo   blkcg: reorganize...
317

86cde6b62   Tejun Heo   blkcg: reorganize...
318
319
320
  	blkg = __blkg_lookup(blkcg, q, true);
  	if (blkg)
  		return blkg;
3c5478659   Tejun Heo   blkcg: make blkcg...
321
322
  	/*
  	 * Create blkgs walking down from blkcg_root to @blkcg, so that all
beea9da07   Dennis Zhou   blkcg: convert bl...
323
324
  	 * non-root blkgs have access to their parents.  Returns the closest
  	 * blkg to the intended blkg should blkg_create() fail.
3c5478659   Tejun Heo   blkcg: make blkcg...
325
326
327
328
  	 */
  	while (true) {
  		struct blkcg *pos = blkcg;
  		struct blkcg *parent = blkcg_parent(blkcg);
beea9da07   Dennis Zhou   blkcg: convert bl...
329
330
331
332
333
334
335
336
337
  		struct blkcg_gq *ret_blkg = q->root_blkg;
  
  		while (parent) {
  			blkg = __blkg_lookup(parent, q, false);
  			if (blkg) {
  				/* remember closest blkg */
  				ret_blkg = blkg;
  				break;
  			}
3c5478659   Tejun Heo   blkcg: make blkcg...
338
339
340
  			pos = parent;
  			parent = blkcg_parent(parent);
  		}
d708f0d50   Jens Axboe   Revert "blkcg: al...
341
  		blkg = blkg_create(pos, q, NULL);
beea9da07   Dennis Zhou   blkcg: convert bl...
342
343
344
  		if (IS_ERR(blkg))
  			return ret_blkg;
  		if (pos == blkcg)
3c5478659   Tejun Heo   blkcg: make blkcg...
345
346
  			return blkg;
  	}
3c96cb32d   Tejun Heo   blkcg: drop stuff...
347
  }
31e4c28d9   Vivek Goyal   blkio: Introduce ...
348

b978962ad   Dennis Zhou   blkcg: update blk...
349
350
351
352
353
354
355
356
357
358
359
360
361
362
  /**
   * blkg_lookup_create - find or create a blkg
   * @blkcg: target block cgroup
   * @q: target request_queue
   *
   * This looks up or creates the blkg representing the unique pair
   * of the blkcg and the request_queue.
   */
  struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
  				    struct request_queue *q)
  {
  	struct blkcg_gq *blkg = blkg_lookup(blkcg, q);
  
  	if (unlikely(!blkg)) {
3a762de55   Ming Lei   block: save irq s...
363
364
365
  		unsigned long flags;
  
  		spin_lock_irqsave(&q->queue_lock, flags);
b978962ad   Dennis Zhou   blkcg: update blk...
366
  		blkg = __blkg_lookup_create(blkcg, q);
3a762de55   Ming Lei   block: save irq s...
367
  		spin_unlock_irqrestore(&q->queue_lock, flags);
b978962ad   Dennis Zhou   blkcg: update blk...
368
369
370
371
  	}
  
  	return blkg;
  }
3c798398e   Tejun Heo   blkcg: mass renam...
372
  static void blkg_destroy(struct blkcg_gq *blkg)
03aa264ac   Tejun Heo   blkcg: let blkcg ...
373
  {
3c798398e   Tejun Heo   blkcg: mass renam...
374
  	struct blkcg *blkcg = blkg->blkcg;
77ea73388   Tejun Heo   blkcg: move io_se...
375
  	struct blkcg_gq *parent = blkg->parent;
6b0654620   Dennis Zhou (Facebook)   Revert "blk-throt...
376
  	int i;
03aa264ac   Tejun Heo   blkcg: let blkcg ...
377

0d945c1f9   Christoph Hellwig   block: remove the...
378
  	lockdep_assert_held(&blkg->q->queue_lock);
9f13ef678   Tejun Heo   blkcg: use double...
379
  	lockdep_assert_held(&blkcg->lock);
03aa264ac   Tejun Heo   blkcg: let blkcg ...
380
381
  
  	/* Something wrong if we are trying to remove same group twice */
e8989fae3   Tejun Heo   blkcg: unify blkg...
382
  	WARN_ON_ONCE(list_empty(&blkg->q_node));
9f13ef678   Tejun Heo   blkcg: use double...
383
  	WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
a637120e4   Tejun Heo   blkcg: use radix ...
384

6b0654620   Dennis Zhou (Facebook)   Revert "blk-throt...
385
386
387
388
389
390
  	for (i = 0; i < BLKCG_MAX_POLS; i++) {
  		struct blkcg_policy *pol = blkcg_policy[i];
  
  		if (blkg->pd[i] && pol->pd_offline_fn)
  			pol->pd_offline_fn(blkg->pd[i]);
  	}
77ea73388   Tejun Heo   blkcg: move io_se...
391
392
393
394
  	if (parent) {
  		blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes);
  		blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios);
  	}
f427d9096   Tejun Heo   blkcg: implement ...
395
  	blkg->online = false;
a637120e4   Tejun Heo   blkcg: use radix ...
396
  	radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
e8989fae3   Tejun Heo   blkcg: unify blkg...
397
  	list_del_init(&blkg->q_node);
9f13ef678   Tejun Heo   blkcg: use double...
398
  	hlist_del_init_rcu(&blkg->blkcg_node);
03aa264ac   Tejun Heo   blkcg: let blkcg ...
399

03aa264ac   Tejun Heo   blkcg: let blkcg ...
400
  	/*
a637120e4   Tejun Heo   blkcg: use radix ...
401
402
403
404
  	 * Both setting lookup hint to and clearing it from @blkg are done
  	 * under queue_lock.  If it's not pointing to @blkg now, it never
  	 * will.  Hint assignment itself can race safely.
  	 */
ec6c676a0   Paul E. McKenney   block: Substitute...
405
  	if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
a637120e4   Tejun Heo   blkcg: use radix ...
406
407
408
  		rcu_assign_pointer(blkcg->blkg_hint, NULL);
  
  	/*
03aa264ac   Tejun Heo   blkcg: let blkcg ...
409
410
411
  	 * Put the reference taken at the time of creation so that when all
  	 * queues are gone, group can be destroyed.
  	 */
7fcf2b033   Dennis Zhou   blkcg: change blk...
412
  	percpu_ref_kill(&blkg->refcnt);
03aa264ac   Tejun Heo   blkcg: let blkcg ...
413
  }
9f13ef678   Tejun Heo   blkcg: use double...
414
415
416
  /**
   * blkg_destroy_all - destroy all blkgs associated with a request_queue
   * @q: request_queue of interest
9f13ef678   Tejun Heo   blkcg: use double...
417
   *
3c96cb32d   Tejun Heo   blkcg: drop stuff...
418
   * Destroy all blkgs associated with @q.
9f13ef678   Tejun Heo   blkcg: use double...
419
   */
3c96cb32d   Tejun Heo   blkcg: drop stuff...
420
  static void blkg_destroy_all(struct request_queue *q)
72e06c255   Tejun Heo   blkcg: shoot down...
421
  {
3c798398e   Tejun Heo   blkcg: mass renam...
422
  	struct blkcg_gq *blkg, *n;
72e06c255   Tejun Heo   blkcg: shoot down...
423

0d945c1f9   Christoph Hellwig   block: remove the...
424
  	spin_lock_irq(&q->queue_lock);
9f13ef678   Tejun Heo   blkcg: use double...
425
  	list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
3c798398e   Tejun Heo   blkcg: mass renam...
426
  		struct blkcg *blkcg = blkg->blkcg;
72e06c255   Tejun Heo   blkcg: shoot down...
427

9f13ef678   Tejun Heo   blkcg: use double...
428
429
430
  		spin_lock(&blkcg->lock);
  		blkg_destroy(blkg);
  		spin_unlock(&blkcg->lock);
72e06c255   Tejun Heo   blkcg: shoot down...
431
  	}
6fe810bda   Tejun Heo   block: blkg_destr...
432
433
  
  	q->root_blkg = NULL;
0d945c1f9   Christoph Hellwig   block: remove the...
434
  	spin_unlock_irq(&q->queue_lock);
72e06c255   Tejun Heo   blkcg: shoot down...
435
  }
182446d08   Tejun Heo   cgroup: pass arou...
436
437
  static int blkcg_reset_stats(struct cgroup_subsys_state *css,
  			     struct cftype *cftype, u64 val)
303a3acb2   Divyesh Shah   blkio: Add io con...
438
  {
182446d08   Tejun Heo   cgroup: pass arou...
439
  	struct blkcg *blkcg = css_to_blkcg(css);
3c798398e   Tejun Heo   blkcg: mass renam...
440
  	struct blkcg_gq *blkg;
bc0d6501a   Tejun Heo   blkcg: kill blkio...
441
  	int i;
303a3acb2   Divyesh Shah   blkio: Add io con...
442

838f13bf4   Tejun Heo   blkcg: allow blkc...
443
  	mutex_lock(&blkcg_pol_mutex);
303a3acb2   Divyesh Shah   blkio: Add io con...
444
  	spin_lock_irq(&blkcg->lock);
997a026c8   Tejun Heo   blkcg: simplify s...
445
446
447
448
449
450
  
  	/*
  	 * Note that stat reset is racy - it doesn't synchronize against
  	 * stat updates.  This is a debug feature which shouldn't exist
  	 * anyway.  If you get hit by a race, retry.
  	 */
b67bfe0d4   Sasha Levin   hlist: drop the n...
451
  	hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
77ea73388   Tejun Heo   blkcg: move io_se...
452
453
  		blkg_rwstat_reset(&blkg->stat_bytes);
  		blkg_rwstat_reset(&blkg->stat_ios);
8bd435b30   Tejun Heo   blkcg: remove sta...
454
  		for (i = 0; i < BLKCG_MAX_POLS; i++) {
3c798398e   Tejun Heo   blkcg: mass renam...
455
  			struct blkcg_policy *pol = blkcg_policy[i];
549d3aa87   Tejun Heo   blkcg: make blkg-...
456

a9520cd6f   Tejun Heo   blkcg: make blkcg...
457
458
  			if (blkg->pd[i] && pol->pd_reset_stats_fn)
  				pol->pd_reset_stats_fn(blkg->pd[i]);
bc0d6501a   Tejun Heo   blkcg: kill blkio...
459
  		}
303a3acb2   Divyesh Shah   blkio: Add io con...
460
  	}
f0bdc8cdd   Vivek Goyal   blk-cgroup: Make ...
461

303a3acb2   Divyesh Shah   blkio: Add io con...
462
  	spin_unlock_irq(&blkcg->lock);
bc0d6501a   Tejun Heo   blkcg: kill blkio...
463
  	mutex_unlock(&blkcg_pol_mutex);
303a3acb2   Divyesh Shah   blkio: Add io con...
464
465
  	return 0;
  }
dd165eb3b   Tejun Heo   blkcg: misc prepa...
466
  const char *blkg_dev_name(struct blkcg_gq *blkg)
303a3acb2   Divyesh Shah   blkio: Add io con...
467
  {
d3d32e69f   Tejun Heo   blkcg: restructur...
468
  	/* some drivers (floppy) instantiate a queue w/o disk registered */
dc3b17cc8   Jan Kara   block: Use pointe...
469
470
  	if (blkg->q->backing_dev_info->dev)
  		return dev_name(blkg->q->backing_dev_info->dev);
d3d32e69f   Tejun Heo   blkcg: restructur...
471
  	return NULL;
303a3acb2   Divyesh Shah   blkio: Add io con...
472
  }
d3d32e69f   Tejun Heo   blkcg: restructur...
473
474
475
476
477
478
479
480
481
482
483
  /**
   * blkcg_print_blkgs - helper for printing per-blkg data
   * @sf: seq_file to print to
   * @blkcg: blkcg of interest
   * @prfill: fill function to print out a blkg
   * @pol: policy in question
   * @data: data to be passed to @prfill
   * @show_total: to print out sum of prfill return values or not
   *
   * This function invokes @prfill on each blkg of @blkcg if pd for the
   * policy specified by @pol exists.  @prfill is invoked with @sf, the
810ecfa76   Tejun Heo   blkcg: make blkcg...
484
485
486
   * policy data and @data and the matching queue lock held.  If @show_total
   * is %true, the sum of the return values from @prfill is printed with
   * "Total" label at the end.
d3d32e69f   Tejun Heo   blkcg: restructur...
487
488
489
490
   *
   * This is to be used to construct print functions for
   * cftype->read_seq_string method.
   */
3c798398e   Tejun Heo   blkcg: mass renam...
491
  void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
f95a04afa   Tejun Heo   blkcg: embed stru...
492
493
  		       u64 (*prfill)(struct seq_file *,
  				     struct blkg_policy_data *, int),
3c798398e   Tejun Heo   blkcg: mass renam...
494
  		       const struct blkcg_policy *pol, int data,
ec399347d   Tejun Heo   blkcg: use @pol i...
495
  		       bool show_total)
5624a4e44   Vivek Goyal   blk-throttle: Mak...
496
  {
3c798398e   Tejun Heo   blkcg: mass renam...
497
  	struct blkcg_gq *blkg;
d3d32e69f   Tejun Heo   blkcg: restructur...
498
  	u64 total = 0;
5624a4e44   Vivek Goyal   blk-throttle: Mak...
499

810ecfa76   Tejun Heo   blkcg: make blkcg...
500
  	rcu_read_lock();
ee89f8125   Linus Torvalds   Merge branch 'for...
501
  	hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
0d945c1f9   Christoph Hellwig   block: remove the...
502
  		spin_lock_irq(&blkg->q->queue_lock);
a2b1693ba   Tejun Heo   blkcg: implement ...
503
  		if (blkcg_policy_enabled(blkg->q, pol))
f95a04afa   Tejun Heo   blkcg: embed stru...
504
  			total += prfill(sf, blkg->pd[pol->plid], data);
0d945c1f9   Christoph Hellwig   block: remove the...
505
  		spin_unlock_irq(&blkg->q->queue_lock);
810ecfa76   Tejun Heo   blkcg: make blkcg...
506
507
  	}
  	rcu_read_unlock();
d3d32e69f   Tejun Heo   blkcg: restructur...
508
509
510
511
512
  
  	if (show_total)
  		seq_printf(sf, "Total %llu
  ", (unsigned long long)total);
  }
829fdb500   Tejun Heo   blkcg: export con...
513
  EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
d3d32e69f   Tejun Heo   blkcg: restructur...
514
515
516
517
  
  /**
   * __blkg_prfill_u64 - prfill helper for a single u64 value
   * @sf: seq_file to print to
f95a04afa   Tejun Heo   blkcg: embed stru...
518
   * @pd: policy private data of interest
d3d32e69f   Tejun Heo   blkcg: restructur...
519
520
   * @v: value to print
   *
f95a04afa   Tejun Heo   blkcg: embed stru...
521
   * Print @v to @sf for the device assocaited with @pd.
d3d32e69f   Tejun Heo   blkcg: restructur...
522
   */
f95a04afa   Tejun Heo   blkcg: embed stru...
523
  u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
d3d32e69f   Tejun Heo   blkcg: restructur...
524
  {
f95a04afa   Tejun Heo   blkcg: embed stru...
525
  	const char *dname = blkg_dev_name(pd->blkg);
d3d32e69f   Tejun Heo   blkcg: restructur...
526
527
528
529
530
531
532
533
  
  	if (!dname)
  		return 0;
  
  	seq_printf(sf, "%s %llu
  ", dname, (unsigned long long)v);
  	return v;
  }
829fdb500   Tejun Heo   blkcg: export con...
534
  EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
d3d32e69f   Tejun Heo   blkcg: restructur...
535
536
537
538
  
  /**
   * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
   * @sf: seq_file to print to
f95a04afa   Tejun Heo   blkcg: embed stru...
539
   * @pd: policy private data of interest
d3d32e69f   Tejun Heo   blkcg: restructur...
540
541
   * @rwstat: rwstat to print
   *
f95a04afa   Tejun Heo   blkcg: embed stru...
542
   * Print @rwstat to @sf for the device assocaited with @pd.
d3d32e69f   Tejun Heo   blkcg: restructur...
543
   */
f95a04afa   Tejun Heo   blkcg: embed stru...
544
  u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
7af6fd911   Christoph Hellwig   blk-cgroup: intro...
545
  			 const struct blkg_rwstat_sample *rwstat)
d3d32e69f   Tejun Heo   blkcg: restructur...
546
547
548
549
550
551
  {
  	static const char *rwstr[] = {
  		[BLKG_RWSTAT_READ]	= "Read",
  		[BLKG_RWSTAT_WRITE]	= "Write",
  		[BLKG_RWSTAT_SYNC]	= "Sync",
  		[BLKG_RWSTAT_ASYNC]	= "Async",
636620b66   Tejun Heo   blkcg: Track DISC...
552
  		[BLKG_RWSTAT_DISCARD]	= "Discard",
d3d32e69f   Tejun Heo   blkcg: restructur...
553
  	};
f95a04afa   Tejun Heo   blkcg: embed stru...
554
  	const char *dname = blkg_dev_name(pd->blkg);
d3d32e69f   Tejun Heo   blkcg: restructur...
555
556
557
558
559
560
561
562
563
  	u64 v;
  	int i;
  
  	if (!dname)
  		return 0;
  
  	for (i = 0; i < BLKG_RWSTAT_NR; i++)
  		seq_printf(sf, "%s %s %llu
  ", dname, rwstr[i],
7af6fd911   Christoph Hellwig   blk-cgroup: intro...
564
  			   rwstat->cnt[i]);
d3d32e69f   Tejun Heo   blkcg: restructur...
565

7af6fd911   Christoph Hellwig   blk-cgroup: intro...
566
567
568
569
570
  	v = rwstat->cnt[BLKG_RWSTAT_READ] +
  		rwstat->cnt[BLKG_RWSTAT_WRITE] +
  		rwstat->cnt[BLKG_RWSTAT_DISCARD];
  	seq_printf(sf, "%s Total %llu
  ", dname, v);
d3d32e69f   Tejun Heo   blkcg: restructur...
571
572
  	return v;
  }
b50da39f5   Tejun Heo   blkcg: export __b...
573
  EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat);
d3d32e69f   Tejun Heo   blkcg: restructur...
574

5bc4afb1e   Tejun Heo   blkcg: drop BLKCG...
575
  /**
5bc4afb1e   Tejun Heo   blkcg: drop BLKCG...
576
577
   * blkg_prfill_rwstat - prfill callback for blkg_rwstat
   * @sf: seq_file to print to
f95a04afa   Tejun Heo   blkcg: embed stru...
578
579
   * @pd: policy private data of interest
   * @off: offset to the blkg_rwstat in @pd
5bc4afb1e   Tejun Heo   blkcg: drop BLKCG...
580
581
582
   *
   * prfill callback for printing a blkg_rwstat.
   */
f95a04afa   Tejun Heo   blkcg: embed stru...
583
584
  u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
  		       int off)
d3d32e69f   Tejun Heo   blkcg: restructur...
585
  {
7af6fd911   Christoph Hellwig   blk-cgroup: intro...
586
  	struct blkg_rwstat_sample rwstat = { };
d3d32e69f   Tejun Heo   blkcg: restructur...
587

5d0b6e48c   Christoph Hellwig   blk-cgroup: pass ...
588
  	blkg_rwstat_read((void *)pd + off, &rwstat);
f95a04afa   Tejun Heo   blkcg: embed stru...
589
  	return __blkg_prfill_rwstat(sf, pd, &rwstat);
d3d32e69f   Tejun Heo   blkcg: restructur...
590
  }
5bc4afb1e   Tejun Heo   blkcg: drop BLKCG...
591
  EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
d3d32e69f   Tejun Heo   blkcg: restructur...
592

77ea73388   Tejun Heo   blkcg: move io_se...
593
594
595
  static u64 blkg_prfill_rwstat_field(struct seq_file *sf,
  				    struct blkg_policy_data *pd, int off)
  {
7af6fd911   Christoph Hellwig   blk-cgroup: intro...
596
  	struct blkg_rwstat_sample rwstat = { };
77ea73388   Tejun Heo   blkcg: move io_se...
597

5d0b6e48c   Christoph Hellwig   blk-cgroup: pass ...
598
  	blkg_rwstat_read((void *)pd->blkg + off, &rwstat);
77ea73388   Tejun Heo   blkcg: move io_se...
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
  	return __blkg_prfill_rwstat(sf, pd, &rwstat);
  }
  
  /**
   * blkg_print_stat_bytes - seq_show callback for blkg->stat_bytes
   * @sf: seq_file to print to
   * @v: unused
   *
   * To be used as cftype->seq_show to print blkg->stat_bytes.
   * cftype->private must be set to the blkcg_policy.
   */
  int blkg_print_stat_bytes(struct seq_file *sf, void *v)
  {
  	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
  			  blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
  			  offsetof(struct blkcg_gq, stat_bytes), true);
  	return 0;
  }
  EXPORT_SYMBOL_GPL(blkg_print_stat_bytes);
  
  /**
   * blkg_print_stat_bytes - seq_show callback for blkg->stat_ios
   * @sf: seq_file to print to
   * @v: unused
   *
   * To be used as cftype->seq_show to print blkg->stat_ios.  cftype->private
   * must be set to the blkcg_policy.
   */
  int blkg_print_stat_ios(struct seq_file *sf, void *v)
  {
  	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
  			  blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
  			  offsetof(struct blkcg_gq, stat_ios), true);
  	return 0;
  }
  EXPORT_SYMBOL_GPL(blkg_print_stat_ios);
  
  static u64 blkg_prfill_rwstat_field_recursive(struct seq_file *sf,
  					      struct blkg_policy_data *pd,
  					      int off)
  {
7af6fd911   Christoph Hellwig   blk-cgroup: intro...
640
  	struct blkg_rwstat_sample rwstat;
5d0b6e48c   Christoph Hellwig   blk-cgroup: pass ...
641
642
  
  	blkg_rwstat_recursive_sum(pd->blkg, NULL, off, &rwstat);
77ea73388   Tejun Heo   blkcg: move io_se...
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
  	return __blkg_prfill_rwstat(sf, pd, &rwstat);
  }
  
  /**
   * blkg_print_stat_bytes_recursive - recursive version of blkg_print_stat_bytes
   * @sf: seq_file to print to
   * @v: unused
   */
  int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v)
  {
  	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
  			  blkg_prfill_rwstat_field_recursive,
  			  (void *)seq_cft(sf)->private,
  			  offsetof(struct blkcg_gq, stat_bytes), true);
  	return 0;
  }
  EXPORT_SYMBOL_GPL(blkg_print_stat_bytes_recursive);
  
  /**
   * blkg_print_stat_ios_recursive - recursive version of blkg_print_stat_ios
   * @sf: seq_file to print to
   * @v: unused
   */
  int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v)
  {
  	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
  			  blkg_prfill_rwstat_field_recursive,
  			  (void *)seq_cft(sf)->private,
  			  offsetof(struct blkcg_gq, stat_ios), true);
  	return 0;
  }
  EXPORT_SYMBOL_GPL(blkg_print_stat_ios_recursive);
3a8b31d39   Tejun Heo   blkcg: restructur...
675
  /**
16b3de665   Tejun Heo   blkcg: implement ...
676
   * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
f12c74cab   Tejun Heo   blkcg: make blkg_...
677
678
679
   * @blkg: blkg of interest
   * @pol: blkcg_policy which contains the blkg_rwstat
   * @off: offset to the blkg_rwstat in blkg_policy_data or @blkg
7af6fd911   Christoph Hellwig   blk-cgroup: intro...
680
   * @sum: blkg_rwstat_sample structure containing the results
16b3de665   Tejun Heo   blkcg: implement ...
681
   *
f12c74cab   Tejun Heo   blkcg: make blkg_...
682
683
684
685
686
687
   * Collect the blkg_rwstat specified by @blkg, @pol and @off and all its
   * online descendants and their aux counts.  The caller must be holding the
   * queue lock for online tests.
   *
   * If @pol is NULL, blkg_rwstat is at @off bytes into @blkg; otherwise, it
   * is at @off bytes into @blkg's blkg_policy_data of the policy.
16b3de665   Tejun Heo   blkcg: implement ...
688
   */
5d0b6e48c   Christoph Hellwig   blk-cgroup: pass ...
689
  void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
7af6fd911   Christoph Hellwig   blk-cgroup: intro...
690
  		int off, struct blkg_rwstat_sample *sum)
16b3de665   Tejun Heo   blkcg: implement ...
691
  {
16b3de665   Tejun Heo   blkcg: implement ...
692
  	struct blkcg_gq *pos_blkg;
492eb21b9   Tejun Heo   cgroup: make hier...
693
  	struct cgroup_subsys_state *pos_css;
239eeb085   Christoph Hellwig   blk-cgroup: facto...
694
  	unsigned int i;
16b3de665   Tejun Heo   blkcg: implement ...
695

0d945c1f9   Christoph Hellwig   block: remove the...
696
  	lockdep_assert_held(&blkg->q->queue_lock);
16b3de665   Tejun Heo   blkcg: implement ...
697

16b3de665   Tejun Heo   blkcg: implement ...
698
  	rcu_read_lock();
f12c74cab   Tejun Heo   blkcg: make blkg_...
699
  	blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
3a7faeada   Tejun Heo   blkcg: reduce sta...
700
  		struct blkg_rwstat *rwstat;
16b3de665   Tejun Heo   blkcg: implement ...
701
702
703
  
  		if (!pos_blkg->online)
  			continue;
f12c74cab   Tejun Heo   blkcg: make blkg_...
704
705
706
707
  		if (pol)
  			rwstat = (void *)blkg_to_pd(pos_blkg, pol) + off;
  		else
  			rwstat = (void *)pos_blkg + off;
16b3de665   Tejun Heo   blkcg: implement ...
708
  		for (i = 0; i < BLKG_RWSTAT_NR; i++)
7af6fd911   Christoph Hellwig   blk-cgroup: intro...
709
  			sum->cnt[i] = blkg_rwstat_read_counter(rwstat, i);
16b3de665   Tejun Heo   blkcg: implement ...
710
711
  	}
  	rcu_read_unlock();
16b3de665   Tejun Heo   blkcg: implement ...
712
713
  }
  EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);
457e490f2   Tahsin Erdogan   blkcg: allocate s...
714
715
716
717
718
719
  /* Performs queue bypass and policy enabled checks then looks up blkg. */
  static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
  					  const struct blkcg_policy *pol,
  					  struct request_queue *q)
  {
  	WARN_ON_ONCE(!rcu_read_lock_held());
0d945c1f9   Christoph Hellwig   block: remove the...
720
  	lockdep_assert_held(&q->queue_lock);
457e490f2   Tahsin Erdogan   blkcg: allocate s...
721
722
723
  
  	if (!blkcg_policy_enabled(q, pol))
  		return ERR_PTR(-EOPNOTSUPP);
457e490f2   Tahsin Erdogan   blkcg: allocate s...
724
725
  	return __blkg_lookup(blkcg, q, true /* update_hint */);
  }
16b3de665   Tejun Heo   blkcg: implement ...
726
  /**
3a8b31d39   Tejun Heo   blkcg: restructur...
727
   * blkg_conf_prep - parse and prepare for per-blkg config update
015d254cb   Tejun Heo   blkcg: separate b...
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
   * @inputp: input string pointer
   *
   * Parse the device node prefix part, MAJ:MIN, of per-blkg config update
   * from @input and get and return the matching gendisk.  *@inputp is
   * updated to point past the device node prefix.  Returns an ERR_PTR()
   * value on error.
   *
   * Use this function iff blkg_conf_prep() can't be used for some reason.
   */
  struct gendisk *blkcg_conf_get_disk(char **inputp)
  {
  	char *input = *inputp;
  	unsigned int major, minor;
  	struct gendisk *disk;
  	int key_len, part;
  
  	if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2)
  		return ERR_PTR(-EINVAL);
  
  	input += key_len;
  	if (!isspace(*input))
  		return ERR_PTR(-EINVAL);
  	input = skip_spaces(input);
  
  	disk = get_gendisk(MKDEV(major, minor), &part);
  	if (!disk)
  		return ERR_PTR(-ENODEV);
  	if (part) {
  		put_disk_and_module(disk);
  		return ERR_PTR(-ENODEV);
  	}
  
  	*inputp = input;
  	return disk;
  }
  
  /**
   * blkg_conf_prep - parse and prepare for per-blkg config update
3a8b31d39   Tejun Heo   blkcg: restructur...
766
   * @blkcg: target block cgroup
da8b06626   Tejun Heo   blkcg: make blkg_...
767
   * @pol: target policy
3a8b31d39   Tejun Heo   blkcg: restructur...
768
769
770
771
   * @input: input string
   * @ctx: blkg_conf_ctx to be filled
   *
   * Parse per-blkg config update from @input and initialize @ctx with the
36aa9e5f5   Tejun Heo   blkcg: move body ...
772
773
774
   * result.  @ctx->blkg points to the blkg to be updated and @ctx->body the
   * part of @input following MAJ:MIN.  This function returns with RCU read
   * lock and queue lock held and must be paired with blkg_conf_finish().
3a8b31d39   Tejun Heo   blkcg: restructur...
775
   */
3c798398e   Tejun Heo   blkcg: mass renam...
776
  int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
36aa9e5f5   Tejun Heo   blkcg: move body ...
777
  		   char *input, struct blkg_conf_ctx *ctx)
0d945c1f9   Christoph Hellwig   block: remove the...
778
  	__acquires(rcu) __acquires(&disk->queue->queue_lock)
34d0f179d   Gui Jianfeng   io-controller: Ad...
779
  {
3a8b31d39   Tejun Heo   blkcg: restructur...
780
  	struct gendisk *disk;
457e490f2   Tahsin Erdogan   blkcg: allocate s...
781
  	struct request_queue *q;
3c798398e   Tejun Heo   blkcg: mass renam...
782
  	struct blkcg_gq *blkg;
015d254cb   Tejun Heo   blkcg: separate b...
783
  	int ret;
36aa9e5f5   Tejun Heo   blkcg: move body ...
784

015d254cb   Tejun Heo   blkcg: separate b...
785
786
787
  	disk = blkcg_conf_get_disk(&input);
  	if (IS_ERR(disk))
  		return PTR_ERR(disk);
e56da7e28   Tejun Heo   blkcg: don't allo...
788

457e490f2   Tahsin Erdogan   blkcg: allocate s...
789
  	q = disk->queue;
da8b06626   Tejun Heo   blkcg: make blkg_...
790

457e490f2   Tahsin Erdogan   blkcg: allocate s...
791
  	rcu_read_lock();
0d945c1f9   Christoph Hellwig   block: remove the...
792
  	spin_lock_irq(&q->queue_lock);
e56da7e28   Tejun Heo   blkcg: don't allo...
793

457e490f2   Tahsin Erdogan   blkcg: allocate s...
794
  	blkg = blkg_lookup_check(blkcg, pol, q);
4bfd482e7   Tejun Heo   blkcg: kill blkio...
795
796
  	if (IS_ERR(blkg)) {
  		ret = PTR_ERR(blkg);
457e490f2   Tahsin Erdogan   blkcg: allocate s...
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
  		goto fail_unlock;
  	}
  
  	if (blkg)
  		goto success;
  
  	/*
  	 * Create blkgs walking down from blkcg_root to @blkcg, so that all
  	 * non-root blkgs have access to their parents.
  	 */
  	while (true) {
  		struct blkcg *pos = blkcg;
  		struct blkcg *parent;
  		struct blkcg_gq *new_blkg;
  
  		parent = blkcg_parent(blkcg);
  		while (parent && !__blkg_lookup(parent, q, false)) {
  			pos = parent;
  			parent = blkcg_parent(parent);
  		}
  
  		/* Drop locks to do new blkg allocation with GFP_KERNEL. */
0d945c1f9   Christoph Hellwig   block: remove the...
819
  		spin_unlock_irq(&q->queue_lock);
3a8b31d39   Tejun Heo   blkcg: restructur...
820
  		rcu_read_unlock();
457e490f2   Tahsin Erdogan   blkcg: allocate s...
821
822
823
824
825
  
  		new_blkg = blkg_alloc(pos, q, GFP_KERNEL);
  		if (unlikely(!new_blkg)) {
  			ret = -ENOMEM;
  			goto fail;
7702e8f45   Vivek Goyal   blk-cgroup: cgrou...
826
  		}
3a8b31d39   Tejun Heo   blkcg: restructur...
827

457e490f2   Tahsin Erdogan   blkcg: allocate s...
828
  		rcu_read_lock();
0d945c1f9   Christoph Hellwig   block: remove the...
829
  		spin_lock_irq(&q->queue_lock);
457e490f2   Tahsin Erdogan   blkcg: allocate s...
830
831
832
833
834
835
836
837
838
839
840
  
  		blkg = blkg_lookup_check(pos, pol, q);
  		if (IS_ERR(blkg)) {
  			ret = PTR_ERR(blkg);
  			goto fail_unlock;
  		}
  
  		if (blkg) {
  			blkg_free(new_blkg);
  		} else {
  			blkg = blkg_create(pos, q, new_blkg);
98d669b49   Kefeng Wang   block: Drop unlik...
841
  			if (IS_ERR(blkg)) {
457e490f2   Tahsin Erdogan   blkcg: allocate s...
842
843
844
845
846
847
848
849
850
  				ret = PTR_ERR(blkg);
  				goto fail_unlock;
  			}
  		}
  
  		if (pos == blkcg)
  			goto success;
  	}
  success:
3a8b31d39   Tejun Heo   blkcg: restructur...
851
852
  	ctx->disk = disk;
  	ctx->blkg = blkg;
015d254cb   Tejun Heo   blkcg: separate b...
853
  	ctx->body = input;
726fa6945   Tejun Heo   blkcg: simplify b...
854
  	return 0;
457e490f2   Tahsin Erdogan   blkcg: allocate s...
855
856
  
  fail_unlock:
0d945c1f9   Christoph Hellwig   block: remove the...
857
  	spin_unlock_irq(&q->queue_lock);
457e490f2   Tahsin Erdogan   blkcg: allocate s...
858
859
  	rcu_read_unlock();
  fail:
9df6c2991   Jan Kara   genhd: Add helper...
860
  	put_disk_and_module(disk);
457e490f2   Tahsin Erdogan   blkcg: allocate s...
861
862
863
864
865
866
867
868
869
870
871
  	/*
  	 * If queue was bypassing, we should retry.  Do so after a
  	 * short msleep().  It isn't strictly necessary but queue
  	 * can be bypassing for some time and it's always nice to
  	 * avoid busy looping.
  	 */
  	if (ret == -EBUSY) {
  		msleep(10);
  		ret = restart_syscall();
  	}
  	return ret;
34d0f179d   Gui Jianfeng   io-controller: Ad...
872
  }
89f3b6d62   Pavel Begunkov   bfq: Fix bfq link...
873
  EXPORT_SYMBOL_GPL(blkg_conf_prep);
34d0f179d   Gui Jianfeng   io-controller: Ad...
874

3a8b31d39   Tejun Heo   blkcg: restructur...
875
876
877
878
879
880
881
  /**
   * blkg_conf_finish - finish up per-blkg config update
   * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
   *
   * Finish up after per-blkg config update.  This function must be paired
   * with blkg_conf_prep().
   */
829fdb500   Tejun Heo   blkcg: export con...
882
  void blkg_conf_finish(struct blkg_conf_ctx *ctx)
0d945c1f9   Christoph Hellwig   block: remove the...
883
  	__releases(&ctx->disk->queue->queue_lock) __releases(rcu)
34d0f179d   Gui Jianfeng   io-controller: Ad...
884
  {
0d945c1f9   Christoph Hellwig   block: remove the...
885
  	spin_unlock_irq(&ctx->disk->queue->queue_lock);
3a8b31d39   Tejun Heo   blkcg: restructur...
886
  	rcu_read_unlock();
9df6c2991   Jan Kara   genhd: Add helper...
887
  	put_disk_and_module(ctx->disk);
34d0f179d   Gui Jianfeng   io-controller: Ad...
888
  }
89f3b6d62   Pavel Begunkov   bfq: Fix bfq link...
889
  EXPORT_SYMBOL_GPL(blkg_conf_finish);
34d0f179d   Gui Jianfeng   io-controller: Ad...
890

2ee867dcf   Tejun Heo   blkcg: implement ...
891
892
893
894
895
896
897
898
899
  static int blkcg_print_stat(struct seq_file *sf, void *v)
  {
  	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
  	struct blkcg_gq *blkg;
  
  	rcu_read_lock();
  
  	hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
  		const char *dname;
903d23f0a   Josef Bacik   blk-cgroup: allow...
900
  		char *buf;
7af6fd911   Christoph Hellwig   blk-cgroup: intro...
901
  		struct blkg_rwstat_sample rwstat;
636620b66   Tejun Heo   blkcg: Track DISC...
902
  		u64 rbytes, wbytes, rios, wios, dbytes, dios;
903d23f0a   Josef Bacik   blk-cgroup: allow...
903
904
905
  		size_t size = seq_get_buf(sf, &buf), off = 0;
  		int i;
  		bool has_stats = false;
2ee867dcf   Tejun Heo   blkcg: implement ...
906

b0814361a   Tejun Heo   blkcg: make blkcg...
907
908
909
910
  		spin_lock_irq(&blkg->q->queue_lock);
  
  		if (!blkg->online)
  			goto skip;
2ee867dcf   Tejun Heo   blkcg: implement ...
911
912
  		dname = blkg_dev_name(blkg);
  		if (!dname)
b0814361a   Tejun Heo   blkcg: make blkcg...
913
  			goto skip;
2ee867dcf   Tejun Heo   blkcg: implement ...
914

903d23f0a   Josef Bacik   blk-cgroup: allow...
915
916
917
918
919
920
921
  		/*
  		 * Hooray string manipulation, count is the size written NOT
  		 * INCLUDING THE \0, so size is now count+1 less than what we
  		 * had before, but we want to start writing the next bit from
  		 * the \0 so we only add count to buf.
  		 */
  		off += scnprintf(buf+off, size-off, "%s ", dname);
5d0b6e48c   Christoph Hellwig   blk-cgroup: pass ...
922
923
  		blkg_rwstat_recursive_sum(blkg, NULL,
  				offsetof(struct blkcg_gq, stat_bytes), &rwstat);
7af6fd911   Christoph Hellwig   blk-cgroup: intro...
924
925
926
  		rbytes = rwstat.cnt[BLKG_RWSTAT_READ];
  		wbytes = rwstat.cnt[BLKG_RWSTAT_WRITE];
  		dbytes = rwstat.cnt[BLKG_RWSTAT_DISCARD];
2ee867dcf   Tejun Heo   blkcg: implement ...
927

5d0b6e48c   Christoph Hellwig   blk-cgroup: pass ...
928
929
  		blkg_rwstat_recursive_sum(blkg, NULL,
  					offsetof(struct blkcg_gq, stat_ios), &rwstat);
7af6fd911   Christoph Hellwig   blk-cgroup: intro...
930
931
932
  		rios = rwstat.cnt[BLKG_RWSTAT_READ];
  		wios = rwstat.cnt[BLKG_RWSTAT_WRITE];
  		dios = rwstat.cnt[BLKG_RWSTAT_DISCARD];
2ee867dcf   Tejun Heo   blkcg: implement ...
933

903d23f0a   Josef Bacik   blk-cgroup: allow...
934
935
936
  		if (rbytes || wbytes || rios || wios) {
  			has_stats = true;
  			off += scnprintf(buf+off, size-off,
636620b66   Tejun Heo   blkcg: Track DISC...
937
938
939
  					 "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu",
  					 rbytes, wbytes, rios, wios,
  					 dbytes, dios);
903d23f0a   Josef Bacik   blk-cgroup: allow...
940
  		}
07b0fdecb   Tejun Heo   blkcg: allow blkc...
941
  		if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) {
d09d8df3a   Josef Bacik   blkcg: add generi...
942
943
944
945
946
947
  			has_stats = true;
  			off += scnprintf(buf+off, size-off,
  					 " use_delay=%d delay_nsec=%llu",
  					 atomic_read(&blkg->use_delay),
  					(unsigned long long)atomic64_read(&blkg->delay_nsec));
  		}
903d23f0a   Josef Bacik   blk-cgroup: allow...
948
949
950
951
952
953
954
955
956
957
958
959
  		for (i = 0; i < BLKCG_MAX_POLS; i++) {
  			struct blkcg_policy *pol = blkcg_policy[i];
  			size_t written;
  
  			if (!blkg->pd[i] || !pol->pd_stat_fn)
  				continue;
  
  			written = pol->pd_stat_fn(blkg->pd[i], buf+off, size-off);
  			if (written)
  				has_stats = true;
  			off += written;
  		}
07b0fdecb   Tejun Heo   blkcg: allow blkc...
960

903d23f0a   Josef Bacik   blk-cgroup: allow...
961
  		if (has_stats) {
f539da82f   Tejun Heo   blkcg: update blk...
962
963
964
965
966
967
968
  			if (off < size - 1) {
  				off += scnprintf(buf+off, size-off, "
  ");
  				seq_commit(sf, off);
  			} else {
  				seq_commit(sf, -1);
  			}
903d23f0a   Josef Bacik   blk-cgroup: allow...
969
  		}
b0814361a   Tejun Heo   blkcg: make blkcg...
970
971
  	skip:
  		spin_unlock_irq(&blkg->q->queue_lock);
2ee867dcf   Tejun Heo   blkcg: implement ...
972
973
974
975
976
  	}
  
  	rcu_read_unlock();
  	return 0;
  }
e1f3b9412   Bart Van Assche   block/blk-cgroup....
977
  static struct cftype blkcg_files[] = {
2ee867dcf   Tejun Heo   blkcg: implement ...
978
979
  	{
  		.name = "stat",
ca0752c5e   Tejun Heo   blkcg: don't crea...
980
  		.flags = CFTYPE_NOT_ON_ROOT,
2ee867dcf   Tejun Heo   blkcg: implement ...
981
982
983
984
  		.seq_show = blkcg_print_stat,
  	},
  	{ }	/* terminate */
  };
e1f3b9412   Bart Van Assche   block/blk-cgroup....
985
  static struct cftype blkcg_legacy_files[] = {
31e4c28d9   Vivek Goyal   blkio: Introduce ...
986
  	{
84c124da9   Divyesh Shah   blkio: Changes to...
987
  		.name = "reset_stats",
3c798398e   Tejun Heo   blkcg: mass renam...
988
  		.write_u64 = blkcg_reset_stats,
220841906   Vivek Goyal   blkio: Export dis...
989
  	},
4baf6e332   Tejun Heo   cgroup: convert a...
990
  	{ }	/* terminate */
31e4c28d9   Vivek Goyal   blkio: Introduce ...
991
  };
59b57717f   Dennis Zhou (Facebook)   blkcg: delay blkg...
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
  /*
   * blkcg destruction is a three-stage process.
   *
   * 1. Destruction starts.  The blkcg_css_offline() callback is invoked
   *    which offlines writeback.  Here we tie the next stage of blkg destruction
   *    to the completion of writeback associated with the blkcg.  This lets us
   *    avoid punting potentially large amounts of outstanding writeback to root
   *    while maintaining any ongoing policies.  The next stage is triggered when
   *    the nr_cgwbs count goes to zero.
   *
   * 2. When the nr_cgwbs count goes to zero, blkcg_destroy_blkgs() is called
   *    and handles the destruction of blkgs.  Here the css reference held by
   *    the blkg is put back eventually allowing blkcg_css_free() to be called.
   *    This work may occur in cgwb_release_workfn() on the cgwb_release
   *    workqueue.  Any submitted ios that fail to get the blkg ref will be
   *    punted to the root_blkg.
   *
   * 3. Once the blkcg ref count goes to zero, blkcg_css_free() is called.
   *    This finally frees the blkcg.
   */
9f13ef678   Tejun Heo   blkcg: use double...
1012
  /**
92fb97487   Tejun Heo   cgroup: rename ->...
1013
   * blkcg_css_offline - cgroup css_offline callback
eb95419b0   Tejun Heo   cgroup: pass arou...
1014
   * @css: css of interest
9f13ef678   Tejun Heo   blkcg: use double...
1015
   *
59b57717f   Dennis Zhou (Facebook)   blkcg: delay blkg...
1016
1017
1018
   * This function is called when @css is about to go away.  Here the cgwbs are
   * offlined first and only once writeback associated with the blkcg has
   * finished do we start step 2 (see above).
9f13ef678   Tejun Heo   blkcg: use double...
1019
   */
eb95419b0   Tejun Heo   cgroup: pass arou...
1020
  static void blkcg_css_offline(struct cgroup_subsys_state *css)
31e4c28d9   Vivek Goyal   blkio: Introduce ...
1021
  {
eb95419b0   Tejun Heo   cgroup: pass arou...
1022
  	struct blkcg *blkcg = css_to_blkcg(css);
b1c357696   Vivek Goyal   blkio: Take care ...
1023

59b57717f   Dennis Zhou (Facebook)   blkcg: delay blkg...
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
  	/* this prevents anyone from attaching or migrating to this blkcg */
  	wb_blkcg_offline(blkcg);
  
  	/* put the base cgwb reference allowing step 2 to be triggered */
  	blkcg_cgwb_put(blkcg);
  }
  
  /**
   * blkcg_destroy_blkgs - responsible for shooting down blkgs
   * @blkcg: blkcg of interest
   *
   * blkgs should be removed while holding both q and blkcg locks.  As blkcg lock
   * is nested inside q lock, this function performs reverse double lock dancing.
   * Destroying the blkgs releases the reference held on the blkcg's css allowing
   * blkcg_css_free to eventually be called.
   *
   * This is the blkcg counterpart of ioc_release_fn().
   */
  void blkcg_destroy_blkgs(struct blkcg *blkcg)
  {
9f13ef678   Tejun Heo   blkcg: use double...
1044
  	spin_lock_irq(&blkcg->lock);
7ee9c5620   Tejun Heo   blkcg: let blkio_...
1045

4c6994806   Joseph Qi   blk-throttle: fix...
1046
1047
  	while (!hlist_empty(&blkcg->blkg_list)) {
  		struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
6b0654620   Dennis Zhou (Facebook)   Revert "blk-throt...
1048
  						struct blkcg_gq, blkcg_node);
4c6994806   Joseph Qi   blk-throttle: fix...
1049
  		struct request_queue *q = blkg->q;
0d945c1f9   Christoph Hellwig   block: remove the...
1050
  		if (spin_trylock(&q->queue_lock)) {
4c6994806   Joseph Qi   blk-throttle: fix...
1051
  			blkg_destroy(blkg);
0d945c1f9   Christoph Hellwig   block: remove the...
1052
  			spin_unlock(&q->queue_lock);
4c6994806   Joseph Qi   blk-throttle: fix...
1053
1054
1055
1056
1057
1058
  		} else {
  			spin_unlock_irq(&blkcg->lock);
  			cpu_relax();
  			spin_lock_irq(&blkcg->lock);
  		}
  	}
6b0654620   Dennis Zhou (Facebook)   Revert "blk-throt...
1059

4c6994806   Joseph Qi   blk-throttle: fix...
1060
1061
  	spin_unlock_irq(&blkcg->lock);
  }
eb95419b0   Tejun Heo   cgroup: pass arou...
1062
  static void blkcg_css_free(struct cgroup_subsys_state *css)
7ee9c5620   Tejun Heo   blkcg: let blkio_...
1063
  {
eb95419b0   Tejun Heo   cgroup: pass arou...
1064
  	struct blkcg *blkcg = css_to_blkcg(css);
bc915e61c   Tejun Heo   blkcg: remove unn...
1065
  	int i;
7ee9c5620   Tejun Heo   blkcg: let blkio_...
1066

7876f930d   Tejun Heo   blkcg: implement ...
1067
  	mutex_lock(&blkcg_pol_mutex);
e4a9bde95   Tejun Heo   blkcg: replace bl...
1068

7876f930d   Tejun Heo   blkcg: implement ...
1069
  	list_del(&blkcg->all_blkcgs_node);
7876f930d   Tejun Heo   blkcg: implement ...
1070

bc915e61c   Tejun Heo   blkcg: remove unn...
1071
  	for (i = 0; i < BLKCG_MAX_POLS; i++)
e4a9bde95   Tejun Heo   blkcg: replace bl...
1072
1073
1074
1075
  		if (blkcg->cpd[i])
  			blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
  
  	mutex_unlock(&blkcg_pol_mutex);
bc915e61c   Tejun Heo   blkcg: remove unn...
1076
  	kfree(blkcg);
31e4c28d9   Vivek Goyal   blkio: Introduce ...
1077
  }
eb95419b0   Tejun Heo   cgroup: pass arou...
1078
1079
  static struct cgroup_subsys_state *
  blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
31e4c28d9   Vivek Goyal   blkio: Introduce ...
1080
  {
3c798398e   Tejun Heo   blkcg: mass renam...
1081
  	struct blkcg *blkcg;
e48453c38   Arianna Avanzini   block, cgroup: im...
1082
1083
  	struct cgroup_subsys_state *ret;
  	int i;
31e4c28d9   Vivek Goyal   blkio: Introduce ...
1084

7876f930d   Tejun Heo   blkcg: implement ...
1085
  	mutex_lock(&blkcg_pol_mutex);
eb95419b0   Tejun Heo   cgroup: pass arou...
1086
  	if (!parent_css) {
3c798398e   Tejun Heo   blkcg: mass renam...
1087
  		blkcg = &blkcg_root;
bc915e61c   Tejun Heo   blkcg: remove unn...
1088
1089
1090
1091
  	} else {
  		blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
  		if (!blkcg) {
  			ret = ERR_PTR(-ENOMEM);
4c18c9e96   weiping zhang   blkcg: avoid free...
1092
  			goto unlock;
bc915e61c   Tejun Heo   blkcg: remove unn...
1093
  		}
e48453c38   Arianna Avanzini   block, cgroup: im...
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
  	}
  
  	for (i = 0; i < BLKCG_MAX_POLS ; i++) {
  		struct blkcg_policy *pol = blkcg_policy[i];
  		struct blkcg_policy_data *cpd;
  
  		/*
  		 * If the policy hasn't been attached yet, wait for it
  		 * to be attached before doing anything else. Otherwise,
  		 * check if the policy requires any specific per-cgroup
  		 * data: if it does, allocate and initialize it.
  		 */
e4a9bde95   Tejun Heo   blkcg: replace bl...
1106
  		if (!pol || !pol->cpd_alloc_fn)
e48453c38   Arianna Avanzini   block, cgroup: im...
1107
  			continue;
e4a9bde95   Tejun Heo   blkcg: replace bl...
1108
  		cpd = pol->cpd_alloc_fn(GFP_KERNEL);
e48453c38   Arianna Avanzini   block, cgroup: im...
1109
1110
1111
1112
  		if (!cpd) {
  			ret = ERR_PTR(-ENOMEM);
  			goto free_pd_blkcg;
  		}
814376483   Tejun Heo   blkcg: minor upda...
1113
1114
  		blkcg->cpd[i] = cpd;
  		cpd->blkcg = blkcg;
e48453c38   Arianna Avanzini   block, cgroup: im...
1115
  		cpd->plid = i;
e4a9bde95   Tejun Heo   blkcg: replace bl...
1116
1117
  		if (pol->cpd_init_fn)
  			pol->cpd_init_fn(cpd);
e48453c38   Arianna Avanzini   block, cgroup: im...
1118
  	}
31e4c28d9   Vivek Goyal   blkio: Introduce ...
1119

31e4c28d9   Vivek Goyal   blkio: Introduce ...
1120
  	spin_lock_init(&blkcg->lock);
e00f4f4d0   Tejun Heo   block,blkcg: use ...
1121
  	INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN);
31e4c28d9   Vivek Goyal   blkio: Introduce ...
1122
  	INIT_HLIST_HEAD(&blkcg->blkg_list);
52ebea749   Tejun Heo   writeback: make b...
1123
1124
  #ifdef CONFIG_CGROUP_WRITEBACK
  	INIT_LIST_HEAD(&blkcg->cgwb_list);
59b57717f   Dennis Zhou (Facebook)   blkcg: delay blkg...
1125
  	refcount_set(&blkcg->cgwb_refcnt, 1);
52ebea749   Tejun Heo   writeback: make b...
1126
  #endif
7876f930d   Tejun Heo   blkcg: implement ...
1127
1128
1129
  	list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
  
  	mutex_unlock(&blkcg_pol_mutex);
31e4c28d9   Vivek Goyal   blkio: Introduce ...
1130
  	return &blkcg->css;
e48453c38   Arianna Avanzini   block, cgroup: im...
1131
1132
1133
  
  free_pd_blkcg:
  	for (i--; i >= 0; i--)
e4a9bde95   Tejun Heo   blkcg: replace bl...
1134
1135
  		if (blkcg->cpd[i])
  			blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
4c18c9e96   weiping zhang   blkcg: avoid free...
1136
1137
1138
1139
  
  	if (blkcg != &blkcg_root)
  		kfree(blkcg);
  unlock:
7876f930d   Tejun Heo   blkcg: implement ...
1140
  	mutex_unlock(&blkcg_pol_mutex);
e48453c38   Arianna Avanzini   block, cgroup: im...
1141
  	return ret;
31e4c28d9   Vivek Goyal   blkio: Introduce ...
1142
  }
5efd61135   Tejun Heo   blkcg: add blkcg_...
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
  /**
   * blkcg_init_queue - initialize blkcg part of request queue
   * @q: request_queue to initialize
   *
   * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
   * part of new request_queue @q.
   *
   * RETURNS:
   * 0 on success, -errno on failure.
   */
  int blkcg_init_queue(struct request_queue *q)
  {
d708f0d50   Jens Axboe   Revert "blkcg: al...
1155
1156
  	struct blkcg_gq *new_blkg, *blkg;
  	bool preloaded;
ec13b1d6f   Tejun Heo   blkcg: always cre...
1157
  	int ret;
d708f0d50   Jens Axboe   Revert "blkcg: al...
1158
1159
1160
1161
1162
  	new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
  	if (!new_blkg)
  		return -ENOMEM;
  
  	preloaded = !radix_tree_preload(GFP_KERNEL);
bea548831   Jiang Biao   blkcg: small fix ...
1163
  	/* Make sure the root blkg exists. */
ec13b1d6f   Tejun Heo   blkcg: always cre...
1164
  	rcu_read_lock();
0d945c1f9   Christoph Hellwig   block: remove the...
1165
  	spin_lock_irq(&q->queue_lock);
d708f0d50   Jens Axboe   Revert "blkcg: al...
1166
  	blkg = blkg_create(&blkcg_root, q, new_blkg);
901932a3f   Jiang Biao   blkcg: init root ...
1167
1168
1169
  	if (IS_ERR(blkg))
  		goto err_unlock;
  	q->root_blkg = blkg;
0d945c1f9   Christoph Hellwig   block: remove the...
1170
  	spin_unlock_irq(&q->queue_lock);
ec13b1d6f   Tejun Heo   blkcg: always cre...
1171
  	rcu_read_unlock();
d708f0d50   Jens Axboe   Revert "blkcg: al...
1172
1173
  	if (preloaded)
  		radix_tree_preload_end();
05c608f63   Yufen Yu   blkcg: fix memlea...
1174
  	ret = blk_throtl_init(q);
04be60b5e   Christoph Hellwig   blk-cgroup: conso...
1175
1176
  	if (ret)
  		goto err_destroy_all;
d70675121   Josef Bacik   block: introduce ...
1177

05c608f63   Yufen Yu   blkcg: fix memlea...
1178
1179
1180
  	ret = blk_iolatency_init(q);
  	if (ret) {
  		blk_throtl_exit(q);
04be60b5e   Christoph Hellwig   blk-cgroup: conso...
1181
  		goto err_destroy_all;
05c608f63   Yufen Yu   blkcg: fix memlea...
1182
  	}
04be60b5e   Christoph Hellwig   blk-cgroup: conso...
1183
  	return 0;
901932a3f   Jiang Biao   blkcg: init root ...
1184

04be60b5e   Christoph Hellwig   blk-cgroup: conso...
1185
  err_destroy_all:
04be60b5e   Christoph Hellwig   blk-cgroup: conso...
1186
  	blkg_destroy_all(q);
04be60b5e   Christoph Hellwig   blk-cgroup: conso...
1187
  	return ret;
901932a3f   Jiang Biao   blkcg: init root ...
1188
  err_unlock:
0d945c1f9   Christoph Hellwig   block: remove the...
1189
  	spin_unlock_irq(&q->queue_lock);
901932a3f   Jiang Biao   blkcg: init root ...
1190
1191
1192
1193
  	rcu_read_unlock();
  	if (preloaded)
  		radix_tree_preload_end();
  	return PTR_ERR(blkg);
5efd61135   Tejun Heo   blkcg: add blkcg_...
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
  }
  
  /**
   * blkcg_drain_queue - drain blkcg part of request_queue
   * @q: request_queue to drain
   *
   * Called from blk_drain_queue().  Responsible for draining blkcg part.
   */
  void blkcg_drain_queue(struct request_queue *q)
  {
0d945c1f9   Christoph Hellwig   block: remove the...
1204
  	lockdep_assert_held(&q->queue_lock);
5efd61135   Tejun Heo   blkcg: add blkcg_...
1205

0b462c89e   Tejun Heo   blkcg: don't call...
1206
1207
1208
1209
1210
1211
  	/*
  	 * @q could be exiting and already have destroyed all blkgs as
  	 * indicated by NULL root_blkg.  If so, don't confuse policies.
  	 */
  	if (!q->root_blkg)
  		return;
5efd61135   Tejun Heo   blkcg: add blkcg_...
1212
1213
1214
1215
1216
1217
1218
  	blk_throtl_drain(q);
  }
  
  /**
   * blkcg_exit_queue - exit and release blkcg part of request_queue
   * @q: request_queue being released
   *
7585d5082   Marcos Paulo de Souza   blk-cgroup: Fix d...
1219
   * Called from blk_exit_queue().  Responsible for exiting blkcg part.
5efd61135   Tejun Heo   blkcg: add blkcg_...
1220
1221
1222
   */
  void blkcg_exit_queue(struct request_queue *q)
  {
3c96cb32d   Tejun Heo   blkcg: drop stuff...
1223
  	blkg_destroy_all(q);
5efd61135   Tejun Heo   blkcg: add blkcg_...
1224
1225
  	blk_throtl_exit(q);
  }
31e4c28d9   Vivek Goyal   blkio: Introduce ...
1226
1227
1228
1229
1230
1231
  /*
   * We cannot support shared io contexts, as we have no mean to support
   * two tasks with the same ioc in two different groups without major rework
   * of the main cic data structures.  For now we allow a task to change
   * its cgroup only if it's the only owner of its ioc.
   */
1f7dd3e5a   Tejun Heo   cgroup: fix handl...
1232
  static int blkcg_can_attach(struct cgroup_taskset *tset)
31e4c28d9   Vivek Goyal   blkio: Introduce ...
1233
  {
bb9d97b6d   Tejun Heo   cgroup: don't use...
1234
  	struct task_struct *task;
1f7dd3e5a   Tejun Heo   cgroup: fix handl...
1235
  	struct cgroup_subsys_state *dst_css;
31e4c28d9   Vivek Goyal   blkio: Introduce ...
1236
1237
1238
1239
  	struct io_context *ioc;
  	int ret = 0;
  
  	/* task_lock() is needed to avoid races with exit_io_context() */
1f7dd3e5a   Tejun Heo   cgroup: fix handl...
1240
  	cgroup_taskset_for_each(task, dst_css, tset) {
bb9d97b6d   Tejun Heo   cgroup: don't use...
1241
1242
1243
1244
1245
1246
1247
1248
  		task_lock(task);
  		ioc = task->io_context;
  		if (ioc && atomic_read(&ioc->nr_tasks) > 1)
  			ret = -EINVAL;
  		task_unlock(task);
  		if (ret)
  			break;
  	}
31e4c28d9   Vivek Goyal   blkio: Introduce ...
1249
1250
  	return ret;
  }
69d7fde59   Tejun Heo   blkcg: use CGROUP...
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
  static void blkcg_bind(struct cgroup_subsys_state *root_css)
  {
  	int i;
  
  	mutex_lock(&blkcg_pol_mutex);
  
  	for (i = 0; i < BLKCG_MAX_POLS; i++) {
  		struct blkcg_policy *pol = blkcg_policy[i];
  		struct blkcg *blkcg;
  
  		if (!pol || !pol->cpd_bind_fn)
  			continue;
  
  		list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node)
  			if (blkcg->cpd[pol->plid])
  				pol->cpd_bind_fn(blkcg->cpd[pol->plid]);
  	}
  	mutex_unlock(&blkcg_pol_mutex);
  }
d09d8df3a   Josef Bacik   blkcg: add generi...
1270
1271
1272
1273
1274
1275
  static void blkcg_exit(struct task_struct *tsk)
  {
  	if (tsk->throttle_queue)
  		blk_put_queue(tsk->throttle_queue);
  	tsk->throttle_queue = NULL;
  }
c165b3e3c   Tejun Heo   blkcg: rename sub...
1276
  struct cgroup_subsys io_cgrp_subsys = {
92fb97487   Tejun Heo   cgroup: rename ->...
1277
1278
1279
  	.css_alloc = blkcg_css_alloc,
  	.css_offline = blkcg_css_offline,
  	.css_free = blkcg_css_free,
3c798398e   Tejun Heo   blkcg: mass renam...
1280
  	.can_attach = blkcg_can_attach,
69d7fde59   Tejun Heo   blkcg: use CGROUP...
1281
  	.bind = blkcg_bind,
2ee867dcf   Tejun Heo   blkcg: implement ...
1282
  	.dfl_cftypes = blkcg_files,
880f50e22   Tejun Heo   blkcg: mark exist...
1283
  	.legacy_cftypes = blkcg_legacy_files,
c165b3e3c   Tejun Heo   blkcg: rename sub...
1284
  	.legacy_name = "blkio",
d09d8df3a   Josef Bacik   blkcg: add generi...
1285
  	.exit = blkcg_exit,
1ced953b1   Tejun Heo   blkcg, memcg: mak...
1286
1287
1288
1289
1290
1291
1292
1293
  #ifdef CONFIG_MEMCG
  	/*
  	 * This ensures that, if available, memcg is automatically enabled
  	 * together on the default hierarchy so that the owner cgroup can
  	 * be retrieved from writeback pages.
  	 */
  	.depends_on = 1 << memory_cgrp_id,
  #endif
676f7c8f8   Tejun Heo   cgroup: relocate ...
1294
  };
c165b3e3c   Tejun Heo   blkcg: rename sub...
1295
  EXPORT_SYMBOL_GPL(io_cgrp_subsys);
676f7c8f8   Tejun Heo   cgroup: relocate ...
1296

8bd435b30   Tejun Heo   blkcg: remove sta...
1297
  /**
a2b1693ba   Tejun Heo   blkcg: implement ...
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
   * blkcg_activate_policy - activate a blkcg policy on a request_queue
   * @q: request_queue of interest
   * @pol: blkcg policy to activate
   *
   * Activate @pol on @q.  Requires %GFP_KERNEL context.  @q goes through
   * bypass mode to populate its blkgs with policy_data for @pol.
   *
   * Activation happens with @q bypassed, so nobody would be accessing blkgs
   * from IO path.  Update of each blkg is protected by both queue and blkcg
   * locks so that holding either lock and testing blkcg_policy_enabled() is
   * always enough for dereferencing policy data.
   *
   * The caller is responsible for synchronizing [de]activations and policy
   * [un]registerations.  Returns 0 on success, -errno on failure.
   */
  int blkcg_activate_policy(struct request_queue *q,
3c798398e   Tejun Heo   blkcg: mass renam...
1314
  			  const struct blkcg_policy *pol)
a2b1693ba   Tejun Heo   blkcg: implement ...
1315
  {
4c55f4f9a   Tejun Heo   blkcg: restructur...
1316
  	struct blkg_policy_data *pd_prealloc = NULL;
9d179b865   Tejun Heo   blkcg: Fix multip...
1317
  	struct blkcg_gq *blkg, *pinned_blkg = NULL;
4c55f4f9a   Tejun Heo   blkcg: restructur...
1318
  	int ret;
a2b1693ba   Tejun Heo   blkcg: implement ...
1319
1320
1321
  
  	if (blkcg_policy_enabled(q, pol))
  		return 0;
344e9ffcb   Jens Axboe   block: add queue_...
1322
  	if (queue_is_mq(q))
bd166ef18   Jens Axboe   blk-mq-sched: add...
1323
  		blk_mq_freeze_queue(q);
9d179b865   Tejun Heo   blkcg: Fix multip...
1324
  retry:
0d945c1f9   Christoph Hellwig   block: remove the...
1325
  	spin_lock_irq(&q->queue_lock);
a2b1693ba   Tejun Heo   blkcg: implement ...
1326

9d179b865   Tejun Heo   blkcg: Fix multip...
1327
  	/* blkg_list is pushed at the head, reverse walk to allocate parents first */
71c814077   Tejun Heo   blkcg: blkcg_acti...
1328
  	list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) {
4c55f4f9a   Tejun Heo   blkcg: restructur...
1329
1330
1331
1332
  		struct blkg_policy_data *pd;
  
  		if (blkg->pd[pol->plid])
  			continue;
a2b1693ba   Tejun Heo   blkcg: implement ...
1333

9d179b865   Tejun Heo   blkcg: Fix multip...
1334
1335
1336
1337
1338
1339
1340
1341
  		/* If prealloc matches, use it; otherwise try GFP_NOWAIT */
  		if (blkg == pinned_blkg) {
  			pd = pd_prealloc;
  			pd_prealloc = NULL;
  		} else {
  			pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q,
  					      blkg->blkcg);
  		}
4c55f4f9a   Tejun Heo   blkcg: restructur...
1342
  		if (!pd) {
9d179b865   Tejun Heo   blkcg: Fix multip...
1343
1344
1345
1346
1347
1348
1349
1350
  			/*
  			 * GFP_NOWAIT failed.  Free the existing one and
  			 * prealloc for @blkg w/ GFP_KERNEL.
  			 */
  			if (pinned_blkg)
  				blkg_put(pinned_blkg);
  			blkg_get(blkg);
  			pinned_blkg = blkg;
0d945c1f9   Christoph Hellwig   block: remove the...
1351
  			spin_unlock_irq(&q->queue_lock);
9d179b865   Tejun Heo   blkcg: Fix multip...
1352
1353
1354
1355
1356
1357
1358
1359
1360
  
  			if (pd_prealloc)
  				pol->pd_free_fn(pd_prealloc);
  			pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q,
  						       blkg->blkcg);
  			if (pd_prealloc)
  				goto retry;
  			else
  				goto enomem;
4c55f4f9a   Tejun Heo   blkcg: restructur...
1361
  		}
a2b1693ba   Tejun Heo   blkcg: implement ...
1362
1363
1364
  
  		blkg->pd[pol->plid] = pd;
  		pd->blkg = blkg;
b276a876a   Tejun Heo   blkcg: add blkg_p...
1365
  		pd->plid = pol->plid;
a2b1693ba   Tejun Heo   blkcg: implement ...
1366
  	}
9d179b865   Tejun Heo   blkcg: Fix multip...
1367
1368
1369
1370
  	/* all allocated, init in the same order */
  	if (pol->pd_init_fn)
  		list_for_each_entry_reverse(blkg, &q->blkg_list, q_node)
  			pol->pd_init_fn(blkg->pd[pol->plid]);
a2b1693ba   Tejun Heo   blkcg: implement ...
1371
1372
  	__set_bit(pol->plid, q->blkcg_pols);
  	ret = 0;
4c55f4f9a   Tejun Heo   blkcg: restructur...
1373

0d945c1f9   Christoph Hellwig   block: remove the...
1374
  	spin_unlock_irq(&q->queue_lock);
9d179b865   Tejun Heo   blkcg: Fix multip...
1375
  out:
344e9ffcb   Jens Axboe   block: add queue_...
1376
  	if (queue_is_mq(q))
bd166ef18   Jens Axboe   blk-mq-sched: add...
1377
  		blk_mq_unfreeze_queue(q);
9d179b865   Tejun Heo   blkcg: Fix multip...
1378
1379
  	if (pinned_blkg)
  		blkg_put(pinned_blkg);
001bea73e   Tejun Heo   blkcg: replace bl...
1380
1381
  	if (pd_prealloc)
  		pol->pd_free_fn(pd_prealloc);
a2b1693ba   Tejun Heo   blkcg: implement ...
1382
  	return ret;
9d179b865   Tejun Heo   blkcg: Fix multip...
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
  
  enomem:
  	/* alloc failed, nothing's initialized yet, free everything */
  	spin_lock_irq(&q->queue_lock);
  	list_for_each_entry(blkg, &q->blkg_list, q_node) {
  		if (blkg->pd[pol->plid]) {
  			pol->pd_free_fn(blkg->pd[pol->plid]);
  			blkg->pd[pol->plid] = NULL;
  		}
  	}
  	spin_unlock_irq(&q->queue_lock);
  	ret = -ENOMEM;
  	goto out;
a2b1693ba   Tejun Heo   blkcg: implement ...
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
  }
  EXPORT_SYMBOL_GPL(blkcg_activate_policy);
  
  /**
   * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
   * @q: request_queue of interest
   * @pol: blkcg policy to deactivate
   *
   * Deactivate @pol on @q.  Follows the same synchronization rules as
   * blkcg_activate_policy().
   */
  void blkcg_deactivate_policy(struct request_queue *q,
3c798398e   Tejun Heo   blkcg: mass renam...
1408
  			     const struct blkcg_policy *pol)
a2b1693ba   Tejun Heo   blkcg: implement ...
1409
  {
3c798398e   Tejun Heo   blkcg: mass renam...
1410
  	struct blkcg_gq *blkg;
a2b1693ba   Tejun Heo   blkcg: implement ...
1411
1412
1413
  
  	if (!blkcg_policy_enabled(q, pol))
  		return;
344e9ffcb   Jens Axboe   block: add queue_...
1414
  	if (queue_is_mq(q))
bd166ef18   Jens Axboe   blk-mq-sched: add...
1415
  		blk_mq_freeze_queue(q);
bd166ef18   Jens Axboe   blk-mq-sched: add...
1416

0d945c1f9   Christoph Hellwig   block: remove the...
1417
  	spin_lock_irq(&q->queue_lock);
a2b1693ba   Tejun Heo   blkcg: implement ...
1418
1419
1420
1421
  
  	__clear_bit(pol->plid, q->blkcg_pols);
  
  	list_for_each_entry(blkg, &q->blkg_list, q_node) {
001bea73e   Tejun Heo   blkcg: replace bl...
1422
  		if (blkg->pd[pol->plid]) {
6b0654620   Dennis Zhou (Facebook)   Revert "blk-throt...
1423
  			if (pol->pd_offline_fn)
a9520cd6f   Tejun Heo   blkcg: make blkcg...
1424
  				pol->pd_offline_fn(blkg->pd[pol->plid]);
001bea73e   Tejun Heo   blkcg: replace bl...
1425
1426
1427
  			pol->pd_free_fn(blkg->pd[pol->plid]);
  			blkg->pd[pol->plid] = NULL;
  		}
a2b1693ba   Tejun Heo   blkcg: implement ...
1428
  	}
0d945c1f9   Christoph Hellwig   block: remove the...
1429
  	spin_unlock_irq(&q->queue_lock);
bd166ef18   Jens Axboe   blk-mq-sched: add...
1430

344e9ffcb   Jens Axboe   block: add queue_...
1431
  	if (queue_is_mq(q))
bd166ef18   Jens Axboe   blk-mq-sched: add...
1432
  		blk_mq_unfreeze_queue(q);
a2b1693ba   Tejun Heo   blkcg: implement ...
1433
1434
1435
1436
  }
  EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
  
  /**
3c798398e   Tejun Heo   blkcg: mass renam...
1437
1438
   * blkcg_policy_register - register a blkcg policy
   * @pol: blkcg policy to register
8bd435b30   Tejun Heo   blkcg: remove sta...
1439
   *
3c798398e   Tejun Heo   blkcg: mass renam...
1440
1441
   * Register @pol with blkcg core.  Might sleep and @pol may be modified on
   * successful registration.  Returns 0 on success and -errno on failure.
8bd435b30   Tejun Heo   blkcg: remove sta...
1442
   */
d5bf02914   Jens Axboe   Revert "block: ad...
1443
  int blkcg_policy_register(struct blkcg_policy *pol)
3e2520668   Vivek Goyal   blkio: Implement ...
1444
  {
06b285bd1   Tejun Heo   blkcg: fix blkcg_...
1445
  	struct blkcg *blkcg;
8bd435b30   Tejun Heo   blkcg: remove sta...
1446
  	int i, ret;
e8989fae3   Tejun Heo   blkcg: unify blkg...
1447

838f13bf4   Tejun Heo   blkcg: allow blkc...
1448
  	mutex_lock(&blkcg_pol_register_mutex);
bc0d6501a   Tejun Heo   blkcg: kill blkio...
1449
  	mutex_lock(&blkcg_pol_mutex);
8bd435b30   Tejun Heo   blkcg: remove sta...
1450
1451
1452
  	/* find an empty slot */
  	ret = -ENOSPC;
  	for (i = 0; i < BLKCG_MAX_POLS; i++)
3c798398e   Tejun Heo   blkcg: mass renam...
1453
  		if (!blkcg_policy[i])
8bd435b30   Tejun Heo   blkcg: remove sta...
1454
  			break;
01c5f85ae   Jens Axboe   blk-cgroup: incre...
1455
1456
1457
  	if (i >= BLKCG_MAX_POLS) {
  		pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small
  ");
838f13bf4   Tejun Heo   blkcg: allow blkc...
1458
  		goto err_unlock;
01c5f85ae   Jens Axboe   blk-cgroup: incre...
1459
  	}
035d10b2f   Tejun Heo   blkcg: add blkio_...
1460

e84010732   weiping zhang   blkcg: add sanity...
1461
1462
1463
1464
  	/* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */
  	if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) ||
  		(!pol->pd_alloc_fn ^ !pol->pd_free_fn))
  		goto err_unlock;
06b285bd1   Tejun Heo   blkcg: fix blkcg_...
1465
  	/* register @pol */
3c798398e   Tejun Heo   blkcg: mass renam...
1466
  	pol->plid = i;
06b285bd1   Tejun Heo   blkcg: fix blkcg_...
1467
1468
1469
  	blkcg_policy[pol->plid] = pol;
  
  	/* allocate and install cpd's */
e4a9bde95   Tejun Heo   blkcg: replace bl...
1470
  	if (pol->cpd_alloc_fn) {
06b285bd1   Tejun Heo   blkcg: fix blkcg_...
1471
1472
  		list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
  			struct blkcg_policy_data *cpd;
e4a9bde95   Tejun Heo   blkcg: replace bl...
1473
  			cpd = pol->cpd_alloc_fn(GFP_KERNEL);
bbb427e34   Bart Van Assche   blkcg: Unlock blk...
1474
  			if (!cpd)
06b285bd1   Tejun Heo   blkcg: fix blkcg_...
1475
  				goto err_free_cpds;
06b285bd1   Tejun Heo   blkcg: fix blkcg_...
1476

814376483   Tejun Heo   blkcg: minor upda...
1477
1478
  			blkcg->cpd[pol->plid] = cpd;
  			cpd->blkcg = blkcg;
06b285bd1   Tejun Heo   blkcg: fix blkcg_...
1479
  			cpd->plid = pol->plid;
86a5bba5c   Tejun Heo   blkcg: make ->cpd...
1480
1481
  			if (pol->cpd_init_fn)
  				pol->cpd_init_fn(cpd);
06b285bd1   Tejun Heo   blkcg: fix blkcg_...
1482
1483
  		}
  	}
838f13bf4   Tejun Heo   blkcg: allow blkc...
1484
  	mutex_unlock(&blkcg_pol_mutex);
8bd435b30   Tejun Heo   blkcg: remove sta...
1485

8bd435b30   Tejun Heo   blkcg: remove sta...
1486
  	/* everything is in place, add intf files for the new policy */
2ee867dcf   Tejun Heo   blkcg: implement ...
1487
1488
1489
  	if (pol->dfl_cftypes)
  		WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys,
  					       pol->dfl_cftypes));
880f50e22   Tejun Heo   blkcg: mark exist...
1490
  	if (pol->legacy_cftypes)
c165b3e3c   Tejun Heo   blkcg: rename sub...
1491
  		WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys,
880f50e22   Tejun Heo   blkcg: mark exist...
1492
  						  pol->legacy_cftypes));
838f13bf4   Tejun Heo   blkcg: allow blkc...
1493
1494
  	mutex_unlock(&blkcg_pol_register_mutex);
  	return 0;
06b285bd1   Tejun Heo   blkcg: fix blkcg_...
1495
  err_free_cpds:
58a9edce0   weiping zhang   blkcg: check pol-...
1496
  	if (pol->cpd_free_fn) {
06b285bd1   Tejun Heo   blkcg: fix blkcg_...
1497
  		list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
e4a9bde95   Tejun Heo   blkcg: replace bl...
1498
1499
1500
1501
  			if (blkcg->cpd[pol->plid]) {
  				pol->cpd_free_fn(blkcg->cpd[pol->plid]);
  				blkcg->cpd[pol->plid] = NULL;
  			}
06b285bd1   Tejun Heo   blkcg: fix blkcg_...
1502
1503
1504
  		}
  	}
  	blkcg_policy[pol->plid] = NULL;
838f13bf4   Tejun Heo   blkcg: allow blkc...
1505
  err_unlock:
bc0d6501a   Tejun Heo   blkcg: kill blkio...
1506
  	mutex_unlock(&blkcg_pol_mutex);
838f13bf4   Tejun Heo   blkcg: allow blkc...
1507
  	mutex_unlock(&blkcg_pol_register_mutex);
8bd435b30   Tejun Heo   blkcg: remove sta...
1508
  	return ret;
3e2520668   Vivek Goyal   blkio: Implement ...
1509
  }
3c798398e   Tejun Heo   blkcg: mass renam...
1510
  EXPORT_SYMBOL_GPL(blkcg_policy_register);
3e2520668   Vivek Goyal   blkio: Implement ...
1511

8bd435b30   Tejun Heo   blkcg: remove sta...
1512
  /**
3c798398e   Tejun Heo   blkcg: mass renam...
1513
1514
   * blkcg_policy_unregister - unregister a blkcg policy
   * @pol: blkcg policy to unregister
8bd435b30   Tejun Heo   blkcg: remove sta...
1515
   *
3c798398e   Tejun Heo   blkcg: mass renam...
1516
   * Undo blkcg_policy_register(@pol).  Might sleep.
8bd435b30   Tejun Heo   blkcg: remove sta...
1517
   */
3c798398e   Tejun Heo   blkcg: mass renam...
1518
  void blkcg_policy_unregister(struct blkcg_policy *pol)
3e2520668   Vivek Goyal   blkio: Implement ...
1519
  {
06b285bd1   Tejun Heo   blkcg: fix blkcg_...
1520
  	struct blkcg *blkcg;
838f13bf4   Tejun Heo   blkcg: allow blkc...
1521
  	mutex_lock(&blkcg_pol_register_mutex);
bc0d6501a   Tejun Heo   blkcg: kill blkio...
1522

3c798398e   Tejun Heo   blkcg: mass renam...
1523
  	if (WARN_ON(blkcg_policy[pol->plid] != pol))
8bd435b30   Tejun Heo   blkcg: remove sta...
1524
1525
1526
  		goto out_unlock;
  
  	/* kill the intf files first */
2ee867dcf   Tejun Heo   blkcg: implement ...
1527
1528
  	if (pol->dfl_cftypes)
  		cgroup_rm_cftypes(pol->dfl_cftypes);
880f50e22   Tejun Heo   blkcg: mark exist...
1529
1530
  	if (pol->legacy_cftypes)
  		cgroup_rm_cftypes(pol->legacy_cftypes);
44ea53de4   Tejun Heo   blkcg: implement ...
1531

06b285bd1   Tejun Heo   blkcg: fix blkcg_...
1532
  	/* remove cpds and unregister */
838f13bf4   Tejun Heo   blkcg: allow blkc...
1533
  	mutex_lock(&blkcg_pol_mutex);
06b285bd1   Tejun Heo   blkcg: fix blkcg_...
1534

58a9edce0   weiping zhang   blkcg: check pol-...
1535
  	if (pol->cpd_free_fn) {
06b285bd1   Tejun Heo   blkcg: fix blkcg_...
1536
  		list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
e4a9bde95   Tejun Heo   blkcg: replace bl...
1537
1538
1539
1540
  			if (blkcg->cpd[pol->plid]) {
  				pol->cpd_free_fn(blkcg->cpd[pol->plid]);
  				blkcg->cpd[pol->plid] = NULL;
  			}
06b285bd1   Tejun Heo   blkcg: fix blkcg_...
1541
1542
  		}
  	}
3c798398e   Tejun Heo   blkcg: mass renam...
1543
  	blkcg_policy[pol->plid] = NULL;
06b285bd1   Tejun Heo   blkcg: fix blkcg_...
1544

bc0d6501a   Tejun Heo   blkcg: kill blkio...
1545
  	mutex_unlock(&blkcg_pol_mutex);
838f13bf4   Tejun Heo   blkcg: allow blkc...
1546
1547
  out_unlock:
  	mutex_unlock(&blkcg_pol_register_mutex);
3e2520668   Vivek Goyal   blkio: Implement ...
1548
  }
3c798398e   Tejun Heo   blkcg: mass renam...
1549
  EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
903d23f0a   Josef Bacik   blk-cgroup: allow...
1550

d3f77dfdc   Tejun Heo   blkcg: implement ...
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
  bool __blkcg_punt_bio_submit(struct bio *bio)
  {
  	struct blkcg_gq *blkg = bio->bi_blkg;
  
  	/* consume the flag first */
  	bio->bi_opf &= ~REQ_CGROUP_PUNT;
  
  	/* never bounce for the root cgroup */
  	if (!blkg->parent)
  		return false;
  
  	spin_lock_bh(&blkg->async_bio_lock);
  	bio_list_add(&blkg->async_bios, bio);
  	spin_unlock_bh(&blkg->async_bio_lock);
  
  	queue_work(blkcg_punt_bio_wq, &blkg->async_bio_work);
  	return true;
  }
d09d8df3a   Josef Bacik   blkcg: add generi...
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
  /*
   * Scale the accumulated delay based on how long it has been since we updated
   * the delay.  We only call this when we are adding delay, in case it's been a
   * while since we added delay, and when we are checking to see if we need to
   * delay a task, to account for any delays that may have occurred.
   */
  static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now)
  {
  	u64 old = atomic64_read(&blkg->delay_start);
  
  	/*
  	 * We only want to scale down every second.  The idea here is that we
  	 * want to delay people for min(delay_nsec, NSEC_PER_SEC) in a certain
  	 * time window.  We only want to throttle tasks for recent delay that
  	 * has occurred, in 1 second time windows since that's the maximum
  	 * things can be throttled.  We save the current delay window in
  	 * blkg->last_delay so we know what amount is still left to be charged
  	 * to the blkg from this point onward.  blkg->last_use keeps track of
  	 * the use_delay counter.  The idea is if we're unthrottling the blkg we
  	 * are ok with whatever is happening now, and we can take away more of
  	 * the accumulated delay as we've already throttled enough that
  	 * everybody is happy with their IO latencies.
  	 */
  	if (time_before64(old + NSEC_PER_SEC, now) &&
  	    atomic64_cmpxchg(&blkg->delay_start, old, now) == old) {
  		u64 cur = atomic64_read(&blkg->delay_nsec);
  		u64 sub = min_t(u64, blkg->last_delay, now - old);
  		int cur_use = atomic_read(&blkg->use_delay);
  
  		/*
  		 * We've been unthrottled, subtract a larger chunk of our
  		 * accumulated delay.
  		 */
  		if (cur_use < blkg->last_use)
  			sub = max_t(u64, sub, blkg->last_delay >> 1);
  
  		/*
  		 * This shouldn't happen, but handle it anyway.  Our delay_nsec
  		 * should only ever be growing except here where we subtract out
  		 * min(last_delay, 1 second), but lord knows bugs happen and I'd
  		 * rather not end up with negative numbers.
  		 */
  		if (unlikely(cur < sub)) {
  			atomic64_set(&blkg->delay_nsec, 0);
  			blkg->last_delay = 0;
  		} else {
  			atomic64_sub(sub, &blkg->delay_nsec);
  			blkg->last_delay = cur - sub;
  		}
  		blkg->last_use = cur_use;
  	}
  }
  
  /*
   * This is called when we want to actually walk up the hierarchy and check to
   * see if we need to throttle, and then actually throttle if there is some
   * accumulated delay.  This should only be called upon return to user space so
   * we're not holding some lock that would induce a priority inversion.
   */
  static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay)
  {
fd112c746   Josef Bacik   blk-cgroup: turn ...
1630
  	unsigned long pflags;
d09d8df3a   Josef Bacik   blkcg: add generi...
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
  	u64 now = ktime_to_ns(ktime_get());
  	u64 exp;
  	u64 delay_nsec = 0;
  	int tok;
  
  	while (blkg->parent) {
  		if (atomic_read(&blkg->use_delay)) {
  			blkcg_scale_delay(blkg, now);
  			delay_nsec = max_t(u64, delay_nsec,
  					   atomic64_read(&blkg->delay_nsec));
  		}
  		blkg = blkg->parent;
  	}
  
  	if (!delay_nsec)
  		return;
  
  	/*
  	 * Let's not sleep for all eternity if we've amassed a huge delay.
  	 * Swapping or metadata IO can accumulate 10's of seconds worth of
  	 * delay, and we want userspace to be able to do _something_ so cap the
  	 * delays at 1 second.  If there's 10's of seconds worth of delay then
  	 * the tasks will be delayed for 1 second for every syscall.
  	 */
  	delay_nsec = min_t(u64, delay_nsec, 250 * NSEC_PER_MSEC);
fd112c746   Josef Bacik   blk-cgroup: turn ...
1656
1657
  	if (use_memdelay)
  		psi_memstall_enter(&pflags);
d09d8df3a   Josef Bacik   blkcg: add generi...
1658
1659
1660
1661
1662
1663
1664
1665
1666
  
  	exp = ktime_add_ns(now, delay_nsec);
  	tok = io_schedule_prepare();
  	do {
  		__set_current_state(TASK_KILLABLE);
  		if (!schedule_hrtimeout(&exp, HRTIMER_MODE_ABS))
  			break;
  	} while (!fatal_signal_pending(current));
  	io_schedule_finish(tok);
fd112c746   Josef Bacik   blk-cgroup: turn ...
1667
1668
1669
  
  	if (use_memdelay)
  		psi_memstall_leave(&pflags);
d09d8df3a   Josef Bacik   blkcg: add generi...
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
  }
  
  /**
   * blkcg_maybe_throttle_current - throttle the current task if it has been marked
   *
   * This is only called if we've been marked with set_notify_resume().  Obviously
   * we can be set_notify_resume() for reasons other than blkcg throttling, so we
   * check to see if current->throttle_queue is set and if not this doesn't do
   * anything.  This should only ever be called by the resume code, it's not meant
   * to be called by people willy-nilly as it will actually do the work to
   * throttle the task if it is setup for throttling.
   */
  void blkcg_maybe_throttle_current(void)
  {
  	struct request_queue *q = current->throttle_queue;
  	struct cgroup_subsys_state *css;
  	struct blkcg *blkcg;
  	struct blkcg_gq *blkg;
  	bool use_memdelay = current->use_memdelay;
  
  	if (!q)
  		return;
  
  	current->throttle_queue = NULL;
  	current->use_memdelay = false;
  
  	rcu_read_lock();
  	css = kthread_blkcg();
  	if (css)
  		blkcg = css_to_blkcg(css);
  	else
  		blkcg = css_to_blkcg(task_css(current, io_cgrp_id));
  
  	if (!blkcg)
  		goto out;
  	blkg = blkg_lookup(blkcg, q);
  	if (!blkg)
  		goto out;
7754f669f   Dennis Zhou   blkcg: rename blk...
1708
  	if (!blkg_tryget(blkg))
d09d8df3a   Josef Bacik   blkcg: add generi...
1709
1710
  		goto out;
  	rcu_read_unlock();
d09d8df3a   Josef Bacik   blkcg: add generi...
1711
1712
1713
  
  	blkcg_maybe_throttle_blkg(blkg, use_memdelay);
  	blkg_put(blkg);
cc7ecc258   Josef Bacik   blk-cgroup: hold ...
1714
  	blk_put_queue(q);
d09d8df3a   Josef Bacik   blkcg: add generi...
1715
1716
1717
1718
1719
  	return;
  out:
  	rcu_read_unlock();
  	blk_put_queue(q);
  }
d09d8df3a   Josef Bacik   blkcg: add generi...
1720
1721
1722
  
  /**
   * blkcg_schedule_throttle - this task needs to check for throttling
537d71b3f   Bart Van Assche   blkcg: Fix kernel...
1723
1724
   * @q: the request queue IO was submitted on
   * @use_memdelay: do we charge this to memory delay for PSI
d09d8df3a   Josef Bacik   blkcg: add generi...
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
   *
   * This is called by the IO controller when we know there's delay accumulated
   * for the blkg for this task.  We do not pass the blkg because there are places
   * we call this that may not have that information, the swapping code for
   * instance will only have a request_queue at that point.  This set's the
   * notify_resume for the task to check and see if it requires throttling before
   * returning to user space.
   *
   * We will only schedule once per syscall.  You can call this over and over
   * again and it will only do the check once upon return to user space, and only
   * throttle once.  If the task needs to be throttled again it'll need to be
   * re-set at the next time we see the task.
   */
  void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay)
  {
  	if (unlikely(current->flags & PF_KTHREAD))
  		return;
  
  	if (!blk_get_queue(q))
  		return;
  
  	if (current->throttle_queue)
  		blk_put_queue(current->throttle_queue);
  	current->throttle_queue = q;
  	if (use_memdelay)
  		current->use_memdelay = use_memdelay;
  	set_notify_resume(current);
  }
d09d8df3a   Josef Bacik   blkcg: add generi...
1753
1754
1755
  
  /**
   * blkcg_add_delay - add delay to this blkg
537d71b3f   Bart Van Assche   blkcg: Fix kernel...
1756
1757
1758
   * @blkg: blkg of interest
   * @now: the current time in nanoseconds
   * @delta: how many nanoseconds of delay to add
d09d8df3a   Josef Bacik   blkcg: add generi...
1759
1760
1761
1762
1763
1764
1765
1766
1767
   *
   * Charge @delta to the blkg's current delay accumulation.  This is used to
   * throttle tasks if an IO controller thinks we need more throttling.
   */
  void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta)
  {
  	blkcg_scale_delay(blkg, now);
  	atomic64_add(delta, &blkg->delay_nsec);
  }
d09d8df3a   Josef Bacik   blkcg: add generi...
1768

d3f77dfdc   Tejun Heo   blkcg: implement ...
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
  static int __init blkcg_init(void)
  {
  	blkcg_punt_bio_wq = alloc_workqueue("blkcg_punt_bio",
  					    WQ_MEM_RECLAIM | WQ_FREEZABLE |
  					    WQ_UNBOUND | WQ_SYSFS, 0);
  	if (!blkcg_punt_bio_wq)
  		return -ENOMEM;
  	return 0;
  }
  subsys_initcall(blkcg_init);
903d23f0a   Josef Bacik   blk-cgroup: allow...
1779
1780
  module_param(blkcg_debug_stats, bool, 0644);
  MODULE_PARM_DESC(blkcg_debug_stats, "True if you want debug stats, false if not");