Blame view

include/linux/blk-cgroup.h 22 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  /* SPDX-License-Identifier: GPL-2.0 */
31e4c28d9   Vivek Goyal   blkio: Introduce ...
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
  #ifndef _BLK_CGROUP_H
  #define _BLK_CGROUP_H
  /*
   * Common Block IO controller cgroup interface
   *
   * Based on ideas and code from CFQ, CFS and BFQ:
   * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
   *
   * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
   *		      Paolo Valente <paolo.valente@unimore.it>
   *
   * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
   * 	              Nauman Rafique <nauman@google.com>
   */
  
  #include <linux/cgroup.h>
24bdb8ef0   Tejun Heo   blkcg: make blkcg...
18
  #include <linux/percpu_counter.h>
829fdb500   Tejun Heo   blkcg: export con...
19
  #include <linux/seq_file.h>
a637120e4   Tejun Heo   blkcg: use radix ...
20
  #include <linux/radix-tree.h>
a051661ca   Tejun Heo   blkcg: implement ...
21
  #include <linux/blkdev.h>
a5049a8ae   Tejun Heo   blkcg: fix use-af...
22
  #include <linux/atomic.h>
31e4c28d9   Vivek Goyal   blkio: Introduce ...
23

24bdb8ef0   Tejun Heo   blkcg: make blkcg...
24
25
  /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
  #define BLKG_STAT_CPU_BATCH	(INT_MAX / 2)
9355aede5   Vivek Goyal   blkio-throttle: l...
26
27
  /* Max limits for throttle policy */
  #define THROTL_IOPS_MAX		UINT_MAX
f48ec1d78   Tejun Heo   cfq: fix build br...
28
  #ifdef CONFIG_BLK_CGROUP
edcb0722c   Tejun Heo   blkcg: introduce ...
29
30
31
32
33
34
35
36
  enum blkg_rwstat_type {
  	BLKG_RWSTAT_READ,
  	BLKG_RWSTAT_WRITE,
  	BLKG_RWSTAT_SYNC,
  	BLKG_RWSTAT_ASYNC,
  
  	BLKG_RWSTAT_NR,
  	BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
303a3acb2   Divyesh Shah   blkio: Add io con...
37
  };
a637120e4   Tejun Heo   blkcg: use radix ...
38
  struct blkcg_gq;
3c798398e   Tejun Heo   blkcg: mass renam...
39
  struct blkcg {
36558c8a3   Tejun Heo   blkcg: style clea...
40
41
  	struct cgroup_subsys_state	css;
  	spinlock_t			lock;
a637120e4   Tejun Heo   blkcg: use radix ...
42
43
  
  	struct radix_tree_root		blkg_tree;
55679c8d2   Bart Van Assche   blkcg: Annotate b...
44
  	struct blkcg_gq	__rcu		*blkg_hint;
36558c8a3   Tejun Heo   blkcg: style clea...
45
  	struct hlist_head		blkg_list;
9a9e8a26d   Tejun Heo   blkcg: add blkcg->id
46

814376483   Tejun Heo   blkcg: minor upda...
47
  	struct blkcg_policy_data	*cpd[BLKCG_MAX_POLS];
52ebea749   Tejun Heo   writeback: make b...
48

7876f930d   Tejun Heo   blkcg: implement ...
49
  	struct list_head		all_blkcgs_node;
52ebea749   Tejun Heo   writeback: make b...
50
51
52
  #ifdef CONFIG_CGROUP_WRITEBACK
  	struct list_head		cgwb_list;
  #endif
31e4c28d9   Vivek Goyal   blkio: Introduce ...
53
  };
e6269c445   Tejun Heo   blkcg: add blkg_[...
54
55
  /*
   * blkg_[rw]stat->aux_cnt is excluded for local stats but included for
24bdb8ef0   Tejun Heo   blkcg: make blkcg...
56
57
   * recursive.  Used to carry stats of dead children, and, for blkg_rwstat,
   * to carry result values from read and sum operations.
e6269c445   Tejun Heo   blkcg: add blkg_[...
58
   */
edcb0722c   Tejun Heo   blkcg: introduce ...
59
  struct blkg_stat {
24bdb8ef0   Tejun Heo   blkcg: make blkcg...
60
  	struct percpu_counter		cpu_cnt;
e6269c445   Tejun Heo   blkcg: add blkg_[...
61
  	atomic64_t			aux_cnt;
edcb0722c   Tejun Heo   blkcg: introduce ...
62
63
64
  };
  
  struct blkg_rwstat {
24bdb8ef0   Tejun Heo   blkcg: make blkcg...
65
  	struct percpu_counter		cpu_cnt[BLKG_RWSTAT_NR];
e6269c445   Tejun Heo   blkcg: add blkg_[...
66
  	atomic64_t			aux_cnt[BLKG_RWSTAT_NR];
edcb0722c   Tejun Heo   blkcg: introduce ...
67
  };
f95a04afa   Tejun Heo   blkcg: embed stru...
68
69
70
71
72
  /*
   * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
   * request_queue (q).  This is used by blkcg policies which need to track
   * information per blkcg - q pair.
   *
001bea73e   Tejun Heo   blkcg: replace bl...
73
74
75
76
77
   * There can be multiple active blkcg policies and each blkg:policy pair is
   * represented by a blkg_policy_data which is allocated and freed by each
   * policy's pd_alloc/free_fn() methods.  A policy can allocate private data
   * area by allocating larger data structure which embeds blkg_policy_data
   * at the beginning.
f95a04afa   Tejun Heo   blkcg: embed stru...
78
   */
0381411e4   Tejun Heo   blkcg: let blkcg ...
79
  struct blkg_policy_data {
b276a876a   Tejun Heo   blkcg: add blkg_p...
80
  	/* the blkg and policy id this per-policy data belongs to */
3c798398e   Tejun Heo   blkcg: mass renam...
81
  	struct blkcg_gq			*blkg;
b276a876a   Tejun Heo   blkcg: add blkg_p...
82
  	int				plid;
0381411e4   Tejun Heo   blkcg: let blkcg ...
83
  };
e48453c38   Arianna Avanzini   block, cgroup: im...
84
  /*
e4a9bde95   Tejun Heo   blkcg: replace bl...
85
86
87
88
89
   * Policies that need to keep per-blkcg data which is independent from any
   * request_queue associated to it should implement cpd_alloc/free_fn()
   * methods.  A policy can allocate private data area by allocating larger
   * data structure which embeds blkcg_policy_data at the beginning.
   * cpd_init() is invoked to let each policy handle per-blkcg data.
e48453c38   Arianna Avanzini   block, cgroup: im...
90
91
   */
  struct blkcg_policy_data {
814376483   Tejun Heo   blkcg: minor upda...
92
93
  	/* the blkcg and policy id this per-policy data belongs to */
  	struct blkcg			*blkcg;
e48453c38   Arianna Avanzini   block, cgroup: im...
94
  	int				plid;
e48453c38   Arianna Avanzini   block, cgroup: im...
95
  };
3c798398e   Tejun Heo   blkcg: mass renam...
96
97
  /* association between a blk cgroup and a request queue */
  struct blkcg_gq {
c875f4d02   Tejun Heo   blkcg: drop unnec...
98
  	/* Pointer to the associated request_queue */
36558c8a3   Tejun Heo   blkcg: style clea...
99
100
101
  	struct request_queue		*q;
  	struct list_head		q_node;
  	struct hlist_node		blkcg_node;
3c798398e   Tejun Heo   blkcg: mass renam...
102
  	struct blkcg			*blkcg;
3c5478659   Tejun Heo   blkcg: make blkcg...
103

ce7acfeaf   Tejun Heo   writeback, blkcg:...
104
105
106
107
108
  	/*
  	 * Each blkg gets congested separately and the congestion state is
  	 * propagated to the matching bdi_writeback_congested.
  	 */
  	struct bdi_writeback_congested	*wb_congested;
3c5478659   Tejun Heo   blkcg: make blkcg...
109
110
  	/* all non-root blkcg_gq's are guaranteed to have access to parent */
  	struct blkcg_gq			*parent;
a051661ca   Tejun Heo   blkcg: implement ...
111
112
  	/* request allocation list for this blkcg-q pair */
  	struct request_list		rl;
3c5478659   Tejun Heo   blkcg: make blkcg...
113

1adaf3dde   Tejun Heo   blkcg: move refcn...
114
  	/* reference count */
a5049a8ae   Tejun Heo   blkcg: fix use-af...
115
  	atomic_t			refcnt;
220841906   Vivek Goyal   blkio: Export dis...
116

f427d9096   Tejun Heo   blkcg: implement ...
117
118
  	/* is this blkg online? protected by both blkcg and q locks */
  	bool				online;
77ea73388   Tejun Heo   blkcg: move io_se...
119
120
  	struct blkg_rwstat		stat_bytes;
  	struct blkg_rwstat		stat_ios;
36558c8a3   Tejun Heo   blkcg: style clea...
121
  	struct blkg_policy_data		*pd[BLKCG_MAX_POLS];
1adaf3dde   Tejun Heo   blkcg: move refcn...
122

36558c8a3   Tejun Heo   blkcg: style clea...
123
  	struct rcu_head			rcu_head;
31e4c28d9   Vivek Goyal   blkio: Introduce ...
124
  };
e4a9bde95   Tejun Heo   blkcg: replace bl...
125
  typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
814376483   Tejun Heo   blkcg: minor upda...
126
  typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
e4a9bde95   Tejun Heo   blkcg: replace bl...
127
  typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
69d7fde59   Tejun Heo   blkcg: use CGROUP...
128
  typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
001bea73e   Tejun Heo   blkcg: replace bl...
129
  typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node);
a9520cd6f   Tejun Heo   blkcg: make blkcg...
130
131
132
  typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
  typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
  typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
001bea73e   Tejun Heo   blkcg: replace bl...
133
  typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
a9520cd6f   Tejun Heo   blkcg: make blkcg...
134
  typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
3e2520668   Vivek Goyal   blkio: Implement ...
135

3c798398e   Tejun Heo   blkcg: mass renam...
136
  struct blkcg_policy {
36558c8a3   Tejun Heo   blkcg: style clea...
137
  	int				plid;
36558c8a3   Tejun Heo   blkcg: style clea...
138
  	/* cgroup files for the policy */
2ee867dcf   Tejun Heo   blkcg: implement ...
139
  	struct cftype			*dfl_cftypes;
880f50e22   Tejun Heo   blkcg: mark exist...
140
  	struct cftype			*legacy_cftypes;
f9fcc2d39   Tejun Heo   blkcg: collapse b...
141
142
  
  	/* operations */
e4a9bde95   Tejun Heo   blkcg: replace bl...
143
  	blkcg_pol_alloc_cpd_fn		*cpd_alloc_fn;
e48453c38   Arianna Avanzini   block, cgroup: im...
144
  	blkcg_pol_init_cpd_fn		*cpd_init_fn;
e4a9bde95   Tejun Heo   blkcg: replace bl...
145
  	blkcg_pol_free_cpd_fn		*cpd_free_fn;
69d7fde59   Tejun Heo   blkcg: use CGROUP...
146
  	blkcg_pol_bind_cpd_fn		*cpd_bind_fn;
e4a9bde95   Tejun Heo   blkcg: replace bl...
147

001bea73e   Tejun Heo   blkcg: replace bl...
148
  	blkcg_pol_alloc_pd_fn		*pd_alloc_fn;
f9fcc2d39   Tejun Heo   blkcg: collapse b...
149
  	blkcg_pol_init_pd_fn		*pd_init_fn;
f427d9096   Tejun Heo   blkcg: implement ...
150
151
  	blkcg_pol_online_pd_fn		*pd_online_fn;
  	blkcg_pol_offline_pd_fn		*pd_offline_fn;
001bea73e   Tejun Heo   blkcg: replace bl...
152
  	blkcg_pol_free_pd_fn		*pd_free_fn;
f9fcc2d39   Tejun Heo   blkcg: collapse b...
153
  	blkcg_pol_reset_pd_stats_fn	*pd_reset_stats_fn;
3e2520668   Vivek Goyal   blkio: Implement ...
154
  };
3c798398e   Tejun Heo   blkcg: mass renam...
155
  extern struct blkcg blkcg_root;
496d5e756   Tejun Heo   blkcg: add blkcg_...
156
  extern struct cgroup_subsys_state * const blkcg_root_css;
36558c8a3   Tejun Heo   blkcg: style clea...
157

24f290466   Tejun Heo   blkcg: inline [__...
158
159
  struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
  				      struct request_queue *q, bool update_hint);
3c798398e   Tejun Heo   blkcg: mass renam...
160
161
  struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
  				    struct request_queue *q);
36558c8a3   Tejun Heo   blkcg: style clea...
162
163
164
  int blkcg_init_queue(struct request_queue *q);
  void blkcg_drain_queue(struct request_queue *q);
  void blkcg_exit_queue(struct request_queue *q);
5efd61135   Tejun Heo   blkcg: add blkcg_...
165

3e2520668   Vivek Goyal   blkio: Implement ...
166
  /* Blkio controller policy registration */
d5bf02914   Jens Axboe   Revert "block: ad...
167
  int blkcg_policy_register(struct blkcg_policy *pol);
3c798398e   Tejun Heo   blkcg: mass renam...
168
  void blkcg_policy_unregister(struct blkcg_policy *pol);
36558c8a3   Tejun Heo   blkcg: style clea...
169
  int blkcg_activate_policy(struct request_queue *q,
3c798398e   Tejun Heo   blkcg: mass renam...
170
  			  const struct blkcg_policy *pol);
36558c8a3   Tejun Heo   blkcg: style clea...
171
  void blkcg_deactivate_policy(struct request_queue *q,
3c798398e   Tejun Heo   blkcg: mass renam...
172
  			     const struct blkcg_policy *pol);
3e2520668   Vivek Goyal   blkio: Implement ...
173

dd165eb3b   Tejun Heo   blkcg: misc prepa...
174
  const char *blkg_dev_name(struct blkcg_gq *blkg);
3c798398e   Tejun Heo   blkcg: mass renam...
175
  void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
f95a04afa   Tejun Heo   blkcg: embed stru...
176
177
  		       u64 (*prfill)(struct seq_file *,
  				     struct blkg_policy_data *, int),
3c798398e   Tejun Heo   blkcg: mass renam...
178
  		       const struct blkcg_policy *pol, int data,
ec399347d   Tejun Heo   blkcg: use @pol i...
179
  		       bool show_total);
f95a04afa   Tejun Heo   blkcg: embed stru...
180
181
  u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
  u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
829fdb500   Tejun Heo   blkcg: export con...
182
  			 const struct blkg_rwstat *rwstat);
f95a04afa   Tejun Heo   blkcg: embed stru...
183
184
185
  u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
  u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
  		       int off);
77ea73388   Tejun Heo   blkcg: move io_se...
186
187
188
189
  int blkg_print_stat_bytes(struct seq_file *sf, void *v);
  int blkg_print_stat_ios(struct seq_file *sf, void *v);
  int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v);
  int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v);
829fdb500   Tejun Heo   blkcg: export con...
190

f12c74cab   Tejun Heo   blkcg: make blkg_...
191
192
193
194
  u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
  			    struct blkcg_policy *pol, int off);
  struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
  					     struct blkcg_policy *pol, int off);
16b3de665   Tejun Heo   blkcg: implement ...
195

829fdb500   Tejun Heo   blkcg: export con...
196
  struct blkg_conf_ctx {
36558c8a3   Tejun Heo   blkcg: style clea...
197
  	struct gendisk			*disk;
3c798398e   Tejun Heo   blkcg: mass renam...
198
  	struct blkcg_gq			*blkg;
36aa9e5f5   Tejun Heo   blkcg: move body ...
199
  	char				*body;
829fdb500   Tejun Heo   blkcg: export con...
200
  };
3c798398e   Tejun Heo   blkcg: mass renam...
201
  int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
36aa9e5f5   Tejun Heo   blkcg: move body ...
202
  		   char *input, struct blkg_conf_ctx *ctx);
829fdb500   Tejun Heo   blkcg: export con...
203
  void blkg_conf_finish(struct blkg_conf_ctx *ctx);
a7c6d554a   Tejun Heo   cgroup: add/updat...
204
205
206
207
  static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
  {
  	return css ? container_of(css, struct blkcg, css) : NULL;
  }
b1208b56f   Tejun Heo   blkcg: inline bio...
208
209
  static inline struct blkcg *task_blkcg(struct task_struct *tsk)
  {
c165b3e3c   Tejun Heo   blkcg: rename sub...
210
  	return css_to_blkcg(task_css(tsk, io_cgrp_id));
b1208b56f   Tejun Heo   blkcg: inline bio...
211
212
213
214
215
  }
  
  static inline struct blkcg *bio_blkcg(struct bio *bio)
  {
  	if (bio && bio->bi_css)
a7c6d554a   Tejun Heo   cgroup: add/updat...
216
  		return css_to_blkcg(bio->bi_css);
b1208b56f   Tejun Heo   blkcg: inline bio...
217
218
  	return task_blkcg(current);
  }
fd383c2d3   Tejun Heo   blkcg: implement ...
219
220
221
  static inline struct cgroup_subsys_state *
  task_get_blkcg_css(struct task_struct *task)
  {
c165b3e3c   Tejun Heo   blkcg: rename sub...
222
  	return task_get_css(task, io_cgrp_id);
fd383c2d3   Tejun Heo   blkcg: implement ...
223
  }
0381411e4   Tejun Heo   blkcg: let blkcg ...
224
  /**
3c5478659   Tejun Heo   blkcg: make blkcg...
225
226
227
228
229
230
231
   * blkcg_parent - get the parent of a blkcg
   * @blkcg: blkcg of interest
   *
   * Return the parent blkcg of @blkcg.  Can be called anytime.
   */
  static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
  {
5c9d535b8   Tejun Heo   cgroup: remove cs...
232
  	return css_to_blkcg(blkcg->css.parent);
3c5478659   Tejun Heo   blkcg: make blkcg...
233
234
235
  }
  
  /**
24f290466   Tejun Heo   blkcg: inline [__...
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
   * __blkg_lookup - internal version of blkg_lookup()
   * @blkcg: blkcg of interest
   * @q: request_queue of interest
   * @update_hint: whether to update lookup hint with the result or not
   *
   * This is internal version and shouldn't be used by policy
   * implementations.  Looks up blkgs for the @blkcg - @q pair regardless of
   * @q's bypass state.  If @update_hint is %true, the caller should be
   * holding @q->queue_lock and lookup hint is updated on success.
   */
  static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
  					     struct request_queue *q,
  					     bool update_hint)
  {
  	struct blkcg_gq *blkg;
85b6bc9db   Tejun Heo   blkcg: move root ...
251
252
  	if (blkcg == &blkcg_root)
  		return q->root_blkg;
24f290466   Tejun Heo   blkcg: inline [__...
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
  	blkg = rcu_dereference(blkcg->blkg_hint);
  	if (blkg && blkg->q == q)
  		return blkg;
  
  	return blkg_lookup_slowpath(blkcg, q, update_hint);
  }
  
  /**
   * blkg_lookup - lookup blkg for the specified blkcg - q pair
   * @blkcg: blkcg of interest
   * @q: request_queue of interest
   *
   * Lookup blkg for the @blkcg - @q pair.  This function should be called
   * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
   * - see blk_queue_bypass_start() for details.
   */
  static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
  					   struct request_queue *q)
  {
  	WARN_ON_ONCE(!rcu_read_lock_held());
  
  	if (unlikely(blk_queue_bypass(q)))
  		return NULL;
  	return __blkg_lookup(blkcg, q, false);
  }
  
  /**
0381411e4   Tejun Heo   blkcg: let blkcg ...
280
281
282
283
284
285
   * blkg_to_pdata - get policy private data
   * @blkg: blkg of interest
   * @pol: policy of interest
   *
   * Return pointer to private data associated with the @blkg-@pol pair.
   */
f95a04afa   Tejun Heo   blkcg: embed stru...
286
287
  static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
  						  struct blkcg_policy *pol)
0381411e4   Tejun Heo   blkcg: let blkcg ...
288
  {
f95a04afa   Tejun Heo   blkcg: embed stru...
289
  	return blkg ? blkg->pd[pol->plid] : NULL;
0381411e4   Tejun Heo   blkcg: let blkcg ...
290
  }
e48453c38   Arianna Avanzini   block, cgroup: im...
291
292
293
  static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
  						     struct blkcg_policy *pol)
  {
814376483   Tejun Heo   blkcg: minor upda...
294
  	return blkcg ? blkcg->cpd[pol->plid] : NULL;
e48453c38   Arianna Avanzini   block, cgroup: im...
295
  }
0381411e4   Tejun Heo   blkcg: let blkcg ...
296
297
  /**
   * pdata_to_blkg - get blkg associated with policy private data
f95a04afa   Tejun Heo   blkcg: embed stru...
298
   * @pd: policy private data of interest
0381411e4   Tejun Heo   blkcg: let blkcg ...
299
   *
f95a04afa   Tejun Heo   blkcg: embed stru...
300
   * @pd is policy private data.  Determine the blkg it's associated with.
0381411e4   Tejun Heo   blkcg: let blkcg ...
301
   */
f95a04afa   Tejun Heo   blkcg: embed stru...
302
  static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
0381411e4   Tejun Heo   blkcg: let blkcg ...
303
  {
f95a04afa   Tejun Heo   blkcg: embed stru...
304
  	return pd ? pd->blkg : NULL;
0381411e4   Tejun Heo   blkcg: let blkcg ...
305
  }
814376483   Tejun Heo   blkcg: minor upda...
306
307
308
309
  static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
  {
  	return cpd ? cpd->blkcg : NULL;
  }
54e7ed12b   Tejun Heo   blkcg: remove blk...
310
311
312
313
314
315
316
317
  /**
   * blkg_path - format cgroup path of blkg
   * @blkg: blkg of interest
   * @buf: target buffer
   * @buflen: target buffer length
   *
   * Format the path of the cgroup of @blkg into @buf.
   */
3c798398e   Tejun Heo   blkcg: mass renam...
318
  static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
afc24d49c   Vivek Goyal   blk-cgroup: confi...
319
  {
4c737b41d   Tejun Heo   cgroup: make cgro...
320
  	return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
afc24d49c   Vivek Goyal   blk-cgroup: confi...
321
  }
1adaf3dde   Tejun Heo   blkcg: move refcn...
322
323
324
325
  /**
   * blkg_get - get a blkg reference
   * @blkg: blkg to get
   *
a5049a8ae   Tejun Heo   blkcg: fix use-af...
326
   * The caller should be holding an existing reference.
1adaf3dde   Tejun Heo   blkcg: move refcn...
327
   */
3c798398e   Tejun Heo   blkcg: mass renam...
328
  static inline void blkg_get(struct blkcg_gq *blkg)
1adaf3dde   Tejun Heo   blkcg: move refcn...
329
  {
a5049a8ae   Tejun Heo   blkcg: fix use-af...
330
331
  	WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
  	atomic_inc(&blkg->refcnt);
1adaf3dde   Tejun Heo   blkcg: move refcn...
332
  }
2a4fd070e   Tejun Heo   blkcg: move bulk ...
333
  void __blkg_release_rcu(struct rcu_head *rcu);
1adaf3dde   Tejun Heo   blkcg: move refcn...
334
335
336
337
  
  /**
   * blkg_put - put a blkg reference
   * @blkg: blkg to put
1adaf3dde   Tejun Heo   blkcg: move refcn...
338
   */
3c798398e   Tejun Heo   blkcg: mass renam...
339
  static inline void blkg_put(struct blkcg_gq *blkg)
1adaf3dde   Tejun Heo   blkcg: move refcn...
340
  {
a5049a8ae   Tejun Heo   blkcg: fix use-af...
341
342
  	WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
  	if (atomic_dec_and_test(&blkg->refcnt))
2a4fd070e   Tejun Heo   blkcg: move bulk ...
343
  		call_rcu(&blkg->rcu_head, __blkg_release_rcu);
1adaf3dde   Tejun Heo   blkcg: move refcn...
344
  }
dd4a4ffc0   Tejun Heo   blkcg: move blkg_...
345
346
347
  /**
   * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
   * @d_blkg: loop cursor pointing to the current descendant
492eb21b9   Tejun Heo   cgroup: make hier...
348
   * @pos_css: used for iteration
dd4a4ffc0   Tejun Heo   blkcg: move blkg_...
349
350
351
352
353
   * @p_blkg: target blkg to walk descendants of
   *
   * Walk @c_blkg through the descendants of @p_blkg.  Must be used with RCU
   * read locked.  If called under either blkcg or queue lock, the iteration
   * is guaranteed to include all and only online blkgs.  The caller may
492eb21b9   Tejun Heo   cgroup: make hier...
354
   * update @pos_css by calling css_rightmost_descendant() to skip subtree.
bd8815a6d   Tejun Heo   cgroup: make css_...
355
   * @p_blkg is included in the iteration and the first node to be visited.
dd4a4ffc0   Tejun Heo   blkcg: move blkg_...
356
   */
492eb21b9   Tejun Heo   cgroup: make hier...
357
358
359
  #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg)		\
  	css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css)	\
  		if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),	\
dd4a4ffc0   Tejun Heo   blkcg: move blkg_...
360
  					      (p_blkg)->q, false)))
edcb0722c   Tejun Heo   blkcg: introduce ...
361
  /**
aa539cb38   Tejun Heo   blkcg: implement ...
362
363
   * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
   * @d_blkg: loop cursor pointing to the current descendant
492eb21b9   Tejun Heo   cgroup: make hier...
364
   * @pos_css: used for iteration
aa539cb38   Tejun Heo   blkcg: implement ...
365
366
367
   * @p_blkg: target blkg to walk descendants of
   *
   * Similar to blkg_for_each_descendant_pre() but performs post-order
bd8815a6d   Tejun Heo   cgroup: make css_...
368
369
   * traversal instead.  Synchronization rules are the same.  @p_blkg is
   * included in the iteration and the last node to be visited.
aa539cb38   Tejun Heo   blkcg: implement ...
370
   */
492eb21b9   Tejun Heo   cgroup: make hier...
371
372
373
  #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg)		\
  	css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css)	\
  		if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),	\
aa539cb38   Tejun Heo   blkcg: implement ...
374
375
376
  					      (p_blkg)->q, false)))
  
  /**
a051661ca   Tejun Heo   blkcg: implement ...
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
   * blk_get_rl - get request_list to use
   * @q: request_queue of interest
   * @bio: bio which will be attached to the allocated request (may be %NULL)
   *
   * The caller wants to allocate a request from @q to use for @bio.  Find
   * the request_list to use and obtain a reference on it.  Should be called
   * under queue_lock.  This function is guaranteed to return non-%NULL
   * request_list.
   */
  static inline struct request_list *blk_get_rl(struct request_queue *q,
  					      struct bio *bio)
  {
  	struct blkcg *blkcg;
  	struct blkcg_gq *blkg;
  
  	rcu_read_lock();
  
  	blkcg = bio_blkcg(bio);
  
  	/* bypass blkg lookup and use @q->root_rl directly for root */
  	if (blkcg == &blkcg_root)
  		goto root_rl;
  
  	/*
  	 * Try to use blkg->rl.  blkg lookup may fail under memory pressure
  	 * or if either the blkcg or queue is going away.  Fall back to
  	 * root_rl in such cases.
  	 */
ae1188963   Tejun Heo   blkcg: consolidat...
405
406
  	blkg = blkg_lookup(blkcg, q);
  	if (unlikely(!blkg))
a051661ca   Tejun Heo   blkcg: implement ...
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
  		goto root_rl;
  
  	blkg_get(blkg);
  	rcu_read_unlock();
  	return &blkg->rl;
  root_rl:
  	rcu_read_unlock();
  	return &q->root_rl;
  }
  
  /**
   * blk_put_rl - put request_list
   * @rl: request_list to put
   *
   * Put the reference acquired by blk_get_rl().  Should be called under
   * queue_lock.
   */
  static inline void blk_put_rl(struct request_list *rl)
  {
401efbf83   Tejun Heo   blkcg: remove unn...
426
  	if (rl->blkg->blkcg != &blkcg_root)
a051661ca   Tejun Heo   blkcg: implement ...
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
  		blkg_put(rl->blkg);
  }
  
  /**
   * blk_rq_set_rl - associate a request with a request_list
   * @rq: request of interest
   * @rl: target request_list
   *
   * Associate @rq with @rl so that accounting and freeing can know the
   * request_list @rq came from.
   */
  static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
  {
  	rq->rl = rl;
  }
  
  /**
   * blk_rq_rl - return the request_list a request came from
   * @rq: request of interest
   *
   * Return the request_list @rq is allocated from.
   */
  static inline struct request_list *blk_rq_rl(struct request *rq)
  {
  	return rq->rl;
  }
  
  struct request_list *__blk_queue_next_rl(struct request_list *rl,
  					 struct request_queue *q);
  /**
   * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
   *
   * Should be used under queue_lock.
   */
  #define blk_queue_for_each_rl(rl, q)	\
  	for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
24bdb8ef0   Tejun Heo   blkcg: make blkcg...
463
  static inline int blkg_stat_init(struct blkg_stat *stat, gfp_t gfp)
90d3839b9   Peter Zijlstra   block: Use u64_st...
464
  {
24bdb8ef0   Tejun Heo   blkcg: make blkcg...
465
466
467
468
469
  	int ret;
  
  	ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
  	if (ret)
  		return ret;
e6269c445   Tejun Heo   blkcg: add blkg_[...
470
  	atomic64_set(&stat->aux_cnt, 0);
24bdb8ef0   Tejun Heo   blkcg: make blkcg...
471
472
473
474
475
476
  	return 0;
  }
  
  static inline void blkg_stat_exit(struct blkg_stat *stat)
  {
  	percpu_counter_destroy(&stat->cpu_cnt);
90d3839b9   Peter Zijlstra   block: Use u64_st...
477
  }
a051661ca   Tejun Heo   blkcg: implement ...
478
  /**
edcb0722c   Tejun Heo   blkcg: introduce ...
479
480
481
482
   * blkg_stat_add - add a value to a blkg_stat
   * @stat: target blkg_stat
   * @val: value to add
   *
24bdb8ef0   Tejun Heo   blkcg: make blkcg...
483
484
   * Add @val to @stat.  The caller must ensure that IRQ on the same CPU
   * don't re-enter this function for the same counter.
edcb0722c   Tejun Heo   blkcg: introduce ...
485
486
487
   */
  static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
  {
104b4e513   Nikolay Borisov   percpu_counter: R...
488
  	percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
edcb0722c   Tejun Heo   blkcg: introduce ...
489
490
491
492
493
  }
  
  /**
   * blkg_stat_read - read the current value of a blkg_stat
   * @stat: blkg_stat to read
edcb0722c   Tejun Heo   blkcg: introduce ...
494
495
496
   */
  static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
  {
24bdb8ef0   Tejun Heo   blkcg: make blkcg...
497
  	return percpu_counter_sum_positive(&stat->cpu_cnt);
edcb0722c   Tejun Heo   blkcg: introduce ...
498
499
500
501
502
503
504
505
  }
  
  /**
   * blkg_stat_reset - reset a blkg_stat
   * @stat: blkg_stat to reset
   */
  static inline void blkg_stat_reset(struct blkg_stat *stat)
  {
24bdb8ef0   Tejun Heo   blkcg: make blkcg...
506
  	percpu_counter_set(&stat->cpu_cnt, 0);
e6269c445   Tejun Heo   blkcg: add blkg_[...
507
  	atomic64_set(&stat->aux_cnt, 0);
edcb0722c   Tejun Heo   blkcg: introduce ...
508
509
510
  }
  
  /**
e6269c445   Tejun Heo   blkcg: add blkg_[...
511
   * blkg_stat_add_aux - add a blkg_stat into another's aux count
16b3de665   Tejun Heo   blkcg: implement ...
512
513
514
   * @to: the destination blkg_stat
   * @from: the source
   *
e6269c445   Tejun Heo   blkcg: add blkg_[...
515
   * Add @from's count including the aux one to @to's aux count.
16b3de665   Tejun Heo   blkcg: implement ...
516
   */
e6269c445   Tejun Heo   blkcg: add blkg_[...
517
518
  static inline void blkg_stat_add_aux(struct blkg_stat *to,
  				     struct blkg_stat *from)
16b3de665   Tejun Heo   blkcg: implement ...
519
  {
e6269c445   Tejun Heo   blkcg: add blkg_[...
520
521
  	atomic64_add(blkg_stat_read(from) + atomic64_read(&from->aux_cnt),
  		     &to->aux_cnt);
16b3de665   Tejun Heo   blkcg: implement ...
522
  }
24bdb8ef0   Tejun Heo   blkcg: make blkcg...
523
  static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp)
90d3839b9   Peter Zijlstra   block: Use u64_st...
524
  {
24bdb8ef0   Tejun Heo   blkcg: make blkcg...
525
526
527
528
529
530
531
532
533
534
535
536
537
  	int i, ret;
  
  	for (i = 0; i < BLKG_RWSTAT_NR; i++) {
  		ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp);
  		if (ret) {
  			while (--i >= 0)
  				percpu_counter_destroy(&rwstat->cpu_cnt[i]);
  			return ret;
  		}
  		atomic64_set(&rwstat->aux_cnt[i], 0);
  	}
  	return 0;
  }
e6269c445   Tejun Heo   blkcg: add blkg_[...
538

24bdb8ef0   Tejun Heo   blkcg: make blkcg...
539
540
541
  static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
  {
  	int i;
e6269c445   Tejun Heo   blkcg: add blkg_[...
542
543
  
  	for (i = 0; i < BLKG_RWSTAT_NR; i++)
24bdb8ef0   Tejun Heo   blkcg: make blkcg...
544
  		percpu_counter_destroy(&rwstat->cpu_cnt[i]);
90d3839b9   Peter Zijlstra   block: Use u64_st...
545
  }
16b3de665   Tejun Heo   blkcg: implement ...
546
  /**
edcb0722c   Tejun Heo   blkcg: introduce ...
547
548
   * blkg_rwstat_add - add a value to a blkg_rwstat
   * @rwstat: target blkg_rwstat
ef295ecf0   Christoph Hellwig   block: better op ...
549
   * @op: REQ_OP and flags
edcb0722c   Tejun Heo   blkcg: introduce ...
550
551
552
553
554
555
   * @val: value to add
   *
   * Add @val to @rwstat.  The counters are chosen according to @rw.  The
   * caller is responsible for synchronizing calls to this function.
   */
  static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
ef295ecf0   Christoph Hellwig   block: better op ...
556
  				   unsigned int op, uint64_t val)
edcb0722c   Tejun Heo   blkcg: introduce ...
557
  {
24bdb8ef0   Tejun Heo   blkcg: make blkcg...
558
  	struct percpu_counter *cnt;
edcb0722c   Tejun Heo   blkcg: introduce ...
559

63a4cc248   Mike Christie   blkg_rwstat: sepa...
560
  	if (op_is_write(op))
24bdb8ef0   Tejun Heo   blkcg: make blkcg...
561
  		cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
edcb0722c   Tejun Heo   blkcg: introduce ...
562
  	else
24bdb8ef0   Tejun Heo   blkcg: make blkcg...
563
  		cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
104b4e513   Nikolay Borisov   percpu_counter: R...
564
  	percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
24bdb8ef0   Tejun Heo   blkcg: make blkcg...
565

d71d9ae14   Christoph Hellwig   blk-cgroup: use o...
566
  	if (op_is_sync(op))
24bdb8ef0   Tejun Heo   blkcg: make blkcg...
567
  		cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
edcb0722c   Tejun Heo   blkcg: introduce ...
568
  	else
24bdb8ef0   Tejun Heo   blkcg: make blkcg...
569
  		cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
edcb0722c   Tejun Heo   blkcg: introduce ...
570

104b4e513   Nikolay Borisov   percpu_counter: R...
571
  	percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
edcb0722c   Tejun Heo   blkcg: introduce ...
572
573
574
575
576
577
  }
  
  /**
   * blkg_rwstat_read - read the current values of a blkg_rwstat
   * @rwstat: blkg_rwstat to read
   *
24bdb8ef0   Tejun Heo   blkcg: make blkcg...
578
   * Read the current snapshot of @rwstat and return it in the aux counts.
edcb0722c   Tejun Heo   blkcg: introduce ...
579
   */
c94bed899   Tejun Heo   blkcg: blkg_rwsta...
580
  static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
edcb0722c   Tejun Heo   blkcg: introduce ...
581
  {
24bdb8ef0   Tejun Heo   blkcg: make blkcg...
582
583
  	struct blkg_rwstat result;
  	int i;
edcb0722c   Tejun Heo   blkcg: introduce ...
584

24bdb8ef0   Tejun Heo   blkcg: make blkcg...
585
586
587
588
  	for (i = 0; i < BLKG_RWSTAT_NR; i++)
  		atomic64_set(&result.aux_cnt[i],
  			     percpu_counter_sum_positive(&rwstat->cpu_cnt[i]));
  	return result;
edcb0722c   Tejun Heo   blkcg: introduce ...
589
590
591
  }
  
  /**
4d5e80a76   Tejun Heo   blkcg: s/blkg_rws...
592
   * blkg_rwstat_total - read the total count of a blkg_rwstat
edcb0722c   Tejun Heo   blkcg: introduce ...
593
594
595
596
597
598
   * @rwstat: blkg_rwstat to read
   *
   * Return the total count of @rwstat regardless of the IO direction.  This
   * function can be called without synchronization and takes care of u64
   * atomicity.
   */
4d5e80a76   Tejun Heo   blkcg: s/blkg_rws...
599
  static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
edcb0722c   Tejun Heo   blkcg: introduce ...
600
601
  {
  	struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
24bdb8ef0   Tejun Heo   blkcg: make blkcg...
602
603
  	return atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
  		atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
edcb0722c   Tejun Heo   blkcg: introduce ...
604
605
606
607
608
609
610
611
  }
  
  /**
   * blkg_rwstat_reset - reset a blkg_rwstat
   * @rwstat: blkg_rwstat to reset
   */
  static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
  {
e6269c445   Tejun Heo   blkcg: add blkg_[...
612
  	int i;
24bdb8ef0   Tejun Heo   blkcg: make blkcg...
613
614
  	for (i = 0; i < BLKG_RWSTAT_NR; i++) {
  		percpu_counter_set(&rwstat->cpu_cnt[i], 0);
e6269c445   Tejun Heo   blkcg: add blkg_[...
615
  		atomic64_set(&rwstat->aux_cnt[i], 0);
24bdb8ef0   Tejun Heo   blkcg: make blkcg...
616
  	}
edcb0722c   Tejun Heo   blkcg: introduce ...
617
  }
16b3de665   Tejun Heo   blkcg: implement ...
618
  /**
e6269c445   Tejun Heo   blkcg: add blkg_[...
619
   * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count
16b3de665   Tejun Heo   blkcg: implement ...
620
621
622
   * @to: the destination blkg_rwstat
   * @from: the source
   *
e6269c445   Tejun Heo   blkcg: add blkg_[...
623
   * Add @from's count including the aux one to @to's aux count.
16b3de665   Tejun Heo   blkcg: implement ...
624
   */
e6269c445   Tejun Heo   blkcg: add blkg_[...
625
626
  static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
  				       struct blkg_rwstat *from)
16b3de665   Tejun Heo   blkcg: implement ...
627
628
629
  {
  	struct blkg_rwstat v = blkg_rwstat_read(from);
  	int i;
16b3de665   Tejun Heo   blkcg: implement ...
630
  	for (i = 0; i < BLKG_RWSTAT_NR; i++)
24bdb8ef0   Tejun Heo   blkcg: make blkcg...
631
632
  		atomic64_add(atomic64_read(&v.aux_cnt[i]) +
  			     atomic64_read(&from->aux_cnt[i]),
e6269c445   Tejun Heo   blkcg: add blkg_[...
633
  			     &to->aux_cnt[i]);
16b3de665   Tejun Heo   blkcg: implement ...
634
  }
ae1188963   Tejun Heo   blkcg: consolidat...
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
  #ifdef CONFIG_BLK_DEV_THROTTLING
  extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
  			   struct bio *bio);
  #else
  static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
  				  struct bio *bio) { return false; }
  #endif
  
  static inline bool blkcg_bio_issue_check(struct request_queue *q,
  					 struct bio *bio)
  {
  	struct blkcg *blkcg;
  	struct blkcg_gq *blkg;
  	bool throtl = false;
  
  	rcu_read_lock();
  	blkcg = bio_blkcg(bio);
007cc56b7   Shaohua Li   block: always att...
652
653
  	/* associate blkcg if bio hasn't attached one */
  	bio_associate_blkcg(bio, &blkcg->css);
ae1188963   Tejun Heo   blkcg: consolidat...
654
655
656
657
658
659
660
661
662
663
  	blkg = blkg_lookup(blkcg, q);
  	if (unlikely(!blkg)) {
  		spin_lock_irq(q->queue_lock);
  		blkg = blkg_lookup_create(blkcg, q);
  		if (IS_ERR(blkg))
  			blkg = NULL;
  		spin_unlock_irq(q->queue_lock);
  	}
  
  	throtl = blk_throtl_bio(q, blkg, bio);
77ea73388   Tejun Heo   blkcg: move io_se...
664
665
  	if (!throtl) {
  		blkg = blkg ?: q->root_blkg;
ef295ecf0   Christoph Hellwig   block: better op ...
666
  		blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf,
77ea73388   Tejun Heo   blkcg: move io_se...
667
  				bio->bi_iter.bi_size);
ef295ecf0   Christoph Hellwig   block: better op ...
668
  		blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1);
77ea73388   Tejun Heo   blkcg: move io_se...
669
  	}
ae1188963   Tejun Heo   blkcg: consolidat...
670
671
672
  	rcu_read_unlock();
  	return !throtl;
  }
36558c8a3   Tejun Heo   blkcg: style clea...
673
  #else	/* CONFIG_BLK_CGROUP */
efa7d1c73   Tejun Heo   update !CONFIG_BL...
674
675
  struct blkcg {
  };
2f5ea4771   Jens Axboe   cfq-iosched: fix ...
676

f95a04afa   Tejun Heo   blkcg: embed stru...
677
678
  struct blkg_policy_data {
  };
e48453c38   Arianna Avanzini   block, cgroup: im...
679
680
  struct blkcg_policy_data {
  };
3c798398e   Tejun Heo   blkcg: mass renam...
681
  struct blkcg_gq {
2f5ea4771   Jens Axboe   cfq-iosched: fix ...
682
  };
3c798398e   Tejun Heo   blkcg: mass renam...
683
  struct blkcg_policy {
3e2520668   Vivek Goyal   blkio: Implement ...
684
  };
496d5e756   Tejun Heo   blkcg: add blkcg_...
685
  #define blkcg_root_css	((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
fd383c2d3   Tejun Heo   blkcg: implement ...
686
687
688
689
690
  static inline struct cgroup_subsys_state *
  task_get_blkcg_css(struct task_struct *task)
  {
  	return NULL;
  }
efa7d1c73   Tejun Heo   update !CONFIG_BL...
691
  #ifdef CONFIG_BLOCK
3c798398e   Tejun Heo   blkcg: mass renam...
692
  static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
5efd61135   Tejun Heo   blkcg: add blkcg_...
693
694
695
  static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
  static inline void blkcg_drain_queue(struct request_queue *q) { }
  static inline void blkcg_exit_queue(struct request_queue *q) { }
d5bf02914   Jens Axboe   Revert "block: ad...
696
  static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
3c798398e   Tejun Heo   blkcg: mass renam...
697
  static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
a2b1693ba   Tejun Heo   blkcg: implement ...
698
  static inline int blkcg_activate_policy(struct request_queue *q,
3c798398e   Tejun Heo   blkcg: mass renam...
699
  					const struct blkcg_policy *pol) { return 0; }
a2b1693ba   Tejun Heo   blkcg: implement ...
700
  static inline void blkcg_deactivate_policy(struct request_queue *q,
3c798398e   Tejun Heo   blkcg: mass renam...
701
  					   const struct blkcg_policy *pol) { }
b1208b56f   Tejun Heo   blkcg: inline bio...
702
  static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
a051661ca   Tejun Heo   blkcg: implement ...
703

f95a04afa   Tejun Heo   blkcg: embed stru...
704
705
706
  static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
  						  struct blkcg_policy *pol) { return NULL; }
  static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
3c798398e   Tejun Heo   blkcg: mass renam...
707
708
709
  static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
  static inline void blkg_get(struct blkcg_gq *blkg) { }
  static inline void blkg_put(struct blkcg_gq *blkg) { }
afc24d49c   Vivek Goyal   blk-cgroup: confi...
710

a051661ca   Tejun Heo   blkcg: implement ...
711
712
713
714
715
  static inline struct request_list *blk_get_rl(struct request_queue *q,
  					      struct bio *bio) { return &q->root_rl; }
  static inline void blk_put_rl(struct request_list *rl) { }
  static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
  static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
ae1188963   Tejun Heo   blkcg: consolidat...
716
717
  static inline bool blkcg_bio_issue_check(struct request_queue *q,
  					 struct bio *bio) { return true; }
a051661ca   Tejun Heo   blkcg: implement ...
718
719
  #define blk_queue_for_each_rl(rl, q)	\
  	for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
efa7d1c73   Tejun Heo   update !CONFIG_BL...
720
  #endif	/* CONFIG_BLOCK */
36558c8a3   Tejun Heo   blkcg: style clea...
721
722
  #endif	/* CONFIG_BLK_CGROUP */
  #endif	/* _BLK_CGROUP_H */