Blame view

block/blk-cgroup.h 16.5 KB
31e4c28d9   Vivek Goyal   blkio: Introduce ...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
  #ifndef _BLK_CGROUP_H
  #define _BLK_CGROUP_H
  /*
   * Common Block IO controller cgroup interface
   *
   * Based on ideas and code from CFQ, CFS and BFQ:
   * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
   *
   * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
   *		      Paolo Valente <paolo.valente@unimore.it>
   *
   * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
   * 	              Nauman Rafique <nauman@google.com>
   */
  
  #include <linux/cgroup.h>
575969a0d   Vivek Goyal   blk-cgroup: Make ...
17
  #include <linux/u64_stats_sync.h>
829fdb500   Tejun Heo   blkcg: export con...
18
  #include <linux/seq_file.h>
a637120e4   Tejun Heo   blkcg: use radix ...
19
  #include <linux/radix-tree.h>
a051661ca   Tejun Heo   blkcg: implement ...
20
  #include <linux/blkdev.h>
31e4c28d9   Vivek Goyal   blkio: Introduce ...
21

9355aede5   Vivek Goyal   blkio-throttle: l...
22
23
  /* Max limits for throttle policy */
  #define THROTL_IOPS_MAX		UINT_MAX
3381cb8d2   Tejun Heo   blkcg: move blkio...
24
25
26
27
  /* CFQ specific, out here for blkcg->cfq_weight */
  #define CFQ_WEIGHT_MIN		10
  #define CFQ_WEIGHT_MAX		1000
  #define CFQ_WEIGHT_DEFAULT	500
f48ec1d78   Tejun Heo   cfq: fix build br...
28
  #ifdef CONFIG_BLK_CGROUP
edcb0722c   Tejun Heo   blkcg: introduce ...
29
30
31
32
33
34
35
36
  enum blkg_rwstat_type {
  	BLKG_RWSTAT_READ,
  	BLKG_RWSTAT_WRITE,
  	BLKG_RWSTAT_SYNC,
  	BLKG_RWSTAT_ASYNC,
  
  	BLKG_RWSTAT_NR,
  	BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
303a3acb2   Divyesh Shah   blkio: Add io con...
37
  };
a637120e4   Tejun Heo   blkcg: use radix ...
38
  struct blkcg_gq;
3c798398e   Tejun Heo   blkcg: mass renam...
39
  struct blkcg {
36558c8a3   Tejun Heo   blkcg: style clea...
40
41
  	struct cgroup_subsys_state	css;
  	spinlock_t			lock;
a637120e4   Tejun Heo   blkcg: use radix ...
42
43
44
  
  	struct radix_tree_root		blkg_tree;
  	struct blkcg_gq			*blkg_hint;
36558c8a3   Tejun Heo   blkcg: style clea...
45
  	struct hlist_head		blkg_list;
9a9e8a26d   Tejun Heo   blkcg: add blkcg->id
46
47
  
  	/* for policies to test whether associated blkcg has changed */
36558c8a3   Tejun Heo   blkcg: style clea...
48
  	uint64_t			id;
3381cb8d2   Tejun Heo   blkcg: move blkio...
49

3c798398e   Tejun Heo   blkcg: mass renam...
50
  	/* TODO: per-policy storage in blkcg */
36558c8a3   Tejun Heo   blkcg: style clea...
51
  	unsigned int			cfq_weight;	/* belongs to cfq */
e71357e11   Tejun Heo   cfq-iosched: add ...
52
  	unsigned int			cfq_leaf_weight;
31e4c28d9   Vivek Goyal   blkio: Introduce ...
53
  };
edcb0722c   Tejun Heo   blkcg: introduce ...
54
55
56
57
58
59
60
61
62
  struct blkg_stat {
  	struct u64_stats_sync		syncp;
  	uint64_t			cnt;
  };
  
  struct blkg_rwstat {
  	struct u64_stats_sync		syncp;
  	uint64_t			cnt[BLKG_RWSTAT_NR];
  };
f95a04afa   Tejun Heo   blkcg: embed stru...
63
64
65
66
67
68
69
70
71
72
73
74
75
  /*
   * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
   * request_queue (q).  This is used by blkcg policies which need to track
   * information per blkcg - q pair.
   *
   * There can be multiple active blkcg policies and each has its private
   * data on each blkg, the size of which is determined by
   * blkcg_policy->pd_size.  blkcg core allocates and frees such areas
   * together with blkg and invokes pd_init/exit_fn() methods.
   *
   * Such private data must embed struct blkg_policy_data (pd) at the
   * beginning and pd_size can't be smaller than pd.
   */
0381411e4   Tejun Heo   blkcg: let blkcg ...
76
  struct blkg_policy_data {
b276a876a   Tejun Heo   blkcg: add blkg_p...
77
  	/* the blkg and policy id this per-policy data belongs to */
3c798398e   Tejun Heo   blkcg: mass renam...
78
  	struct blkcg_gq			*blkg;
b276a876a   Tejun Heo   blkcg: add blkg_p...
79
  	int				plid;
0381411e4   Tejun Heo   blkcg: let blkcg ...
80

a2b1693ba   Tejun Heo   blkcg: implement ...
81
  	/* used during policy activation */
36558c8a3   Tejun Heo   blkcg: style clea...
82
  	struct list_head		alloc_node;
0381411e4   Tejun Heo   blkcg: let blkcg ...
83
  };
3c798398e   Tejun Heo   blkcg: mass renam...
84
85
  /* association between a blk cgroup and a request queue */
  struct blkcg_gq {
c875f4d02   Tejun Heo   blkcg: drop unnec...
86
  	/* Pointer to the associated request_queue */
36558c8a3   Tejun Heo   blkcg: style clea...
87
88
89
  	struct request_queue		*q;
  	struct list_head		q_node;
  	struct hlist_node		blkcg_node;
3c798398e   Tejun Heo   blkcg: mass renam...
90
  	struct blkcg			*blkcg;
3c5478659   Tejun Heo   blkcg: make blkcg...
91
92
93
  
  	/* all non-root blkcg_gq's are guaranteed to have access to parent */
  	struct blkcg_gq			*parent;
a051661ca   Tejun Heo   blkcg: implement ...
94
95
  	/* request allocation list for this blkcg-q pair */
  	struct request_list		rl;
3c5478659   Tejun Heo   blkcg: make blkcg...
96

1adaf3dde   Tejun Heo   blkcg: move refcn...
97
  	/* reference count */
36558c8a3   Tejun Heo   blkcg: style clea...
98
  	int				refcnt;
220841906   Vivek Goyal   blkio: Export dis...
99

f427d9096   Tejun Heo   blkcg: implement ...
100
101
  	/* is this blkg online? protected by both blkcg and q locks */
  	bool				online;
36558c8a3   Tejun Heo   blkcg: style clea...
102
  	struct blkg_policy_data		*pd[BLKCG_MAX_POLS];
1adaf3dde   Tejun Heo   blkcg: move refcn...
103

36558c8a3   Tejun Heo   blkcg: style clea...
104
  	struct rcu_head			rcu_head;
31e4c28d9   Vivek Goyal   blkio: Introduce ...
105
  };
3c798398e   Tejun Heo   blkcg: mass renam...
106
  typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
f427d9096   Tejun Heo   blkcg: implement ...
107
108
  typedef void (blkcg_pol_online_pd_fn)(struct blkcg_gq *blkg);
  typedef void (blkcg_pol_offline_pd_fn)(struct blkcg_gq *blkg);
3c798398e   Tejun Heo   blkcg: mass renam...
109
110
  typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg);
  typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
3e2520668   Vivek Goyal   blkio: Implement ...
111

3c798398e   Tejun Heo   blkcg: mass renam...
112
  struct blkcg_policy {
36558c8a3   Tejun Heo   blkcg: style clea...
113
114
  	int				plid;
  	/* policy specific private data size */
f95a04afa   Tejun Heo   blkcg: embed stru...
115
  	size_t				pd_size;
36558c8a3   Tejun Heo   blkcg: style clea...
116
117
  	/* cgroup files for the policy */
  	struct cftype			*cftypes;
f9fcc2d39   Tejun Heo   blkcg: collapse b...
118
119
120
  
  	/* operations */
  	blkcg_pol_init_pd_fn		*pd_init_fn;
f427d9096   Tejun Heo   blkcg: implement ...
121
122
  	blkcg_pol_online_pd_fn		*pd_online_fn;
  	blkcg_pol_offline_pd_fn		*pd_offline_fn;
f9fcc2d39   Tejun Heo   blkcg: collapse b...
123
124
  	blkcg_pol_exit_pd_fn		*pd_exit_fn;
  	blkcg_pol_reset_pd_stats_fn	*pd_reset_stats_fn;
3e2520668   Vivek Goyal   blkio: Implement ...
125
  };
3c798398e   Tejun Heo   blkcg: mass renam...
126
  extern struct blkcg blkcg_root;
36558c8a3   Tejun Heo   blkcg: style clea...
127

3c798398e   Tejun Heo   blkcg: mass renam...
128
129
130
  struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
  struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
  				    struct request_queue *q);
36558c8a3   Tejun Heo   blkcg: style clea...
131
132
133
  int blkcg_init_queue(struct request_queue *q);
  void blkcg_drain_queue(struct request_queue *q);
  void blkcg_exit_queue(struct request_queue *q);
5efd61135   Tejun Heo   blkcg: add blkcg_...
134

3e2520668   Vivek Goyal   blkio: Implement ...
135
  /* Blkio controller policy registration */
3c798398e   Tejun Heo   blkcg: mass renam...
136
137
  int blkcg_policy_register(struct blkcg_policy *pol);
  void blkcg_policy_unregister(struct blkcg_policy *pol);
36558c8a3   Tejun Heo   blkcg: style clea...
138
  int blkcg_activate_policy(struct request_queue *q,
3c798398e   Tejun Heo   blkcg: mass renam...
139
  			  const struct blkcg_policy *pol);
36558c8a3   Tejun Heo   blkcg: style clea...
140
  void blkcg_deactivate_policy(struct request_queue *q,
3c798398e   Tejun Heo   blkcg: mass renam...
141
  			     const struct blkcg_policy *pol);
3e2520668   Vivek Goyal   blkio: Implement ...
142

3c798398e   Tejun Heo   blkcg: mass renam...
143
  void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
f95a04afa   Tejun Heo   blkcg: embed stru...
144
145
  		       u64 (*prfill)(struct seq_file *,
  				     struct blkg_policy_data *, int),
3c798398e   Tejun Heo   blkcg: mass renam...
146
  		       const struct blkcg_policy *pol, int data,
ec399347d   Tejun Heo   blkcg: use @pol i...
147
  		       bool show_total);
f95a04afa   Tejun Heo   blkcg: embed stru...
148
149
  u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
  u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
829fdb500   Tejun Heo   blkcg: export con...
150
  			 const struct blkg_rwstat *rwstat);
f95a04afa   Tejun Heo   blkcg: embed stru...
151
152
153
  u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
  u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
  		       int off);
829fdb500   Tejun Heo   blkcg: export con...
154

16b3de665   Tejun Heo   blkcg: implement ...
155
156
157
  u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off);
  struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
  					     int off);
829fdb500   Tejun Heo   blkcg: export con...
158
  struct blkg_conf_ctx {
36558c8a3   Tejun Heo   blkcg: style clea...
159
  	struct gendisk			*disk;
3c798398e   Tejun Heo   blkcg: mass renam...
160
  	struct blkcg_gq			*blkg;
36558c8a3   Tejun Heo   blkcg: style clea...
161
  	u64				v;
829fdb500   Tejun Heo   blkcg: export con...
162
  };
3c798398e   Tejun Heo   blkcg: mass renam...
163
164
  int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
  		   const char *input, struct blkg_conf_ctx *ctx);
829fdb500   Tejun Heo   blkcg: export con...
165
  void blkg_conf_finish(struct blkg_conf_ctx *ctx);
a7c6d554a   Tejun Heo   cgroup: add/updat...
166
167
168
169
  static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
  {
  	return css ? container_of(css, struct blkcg, css) : NULL;
  }
b1208b56f   Tejun Heo   blkcg: inline bio...
170
171
  static inline struct blkcg *task_blkcg(struct task_struct *tsk)
  {
a7c6d554a   Tejun Heo   cgroup: add/updat...
172
  	return css_to_blkcg(task_css(tsk, blkio_subsys_id));
b1208b56f   Tejun Heo   blkcg: inline bio...
173
174
175
176
177
  }
  
  static inline struct blkcg *bio_blkcg(struct bio *bio)
  {
  	if (bio && bio->bi_css)
a7c6d554a   Tejun Heo   cgroup: add/updat...
178
  		return css_to_blkcg(bio->bi_css);
b1208b56f   Tejun Heo   blkcg: inline bio...
179
180
  	return task_blkcg(current);
  }
0381411e4   Tejun Heo   blkcg: let blkcg ...
181
  /**
3c5478659   Tejun Heo   blkcg: make blkcg...
182
183
184
185
186
187
188
   * blkcg_parent - get the parent of a blkcg
   * @blkcg: blkcg of interest
   *
   * Return the parent blkcg of @blkcg.  Can be called anytime.
   */
  static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
  {
638769869   Tejun Heo   cgroup: add css_p...
189
  	return css_to_blkcg(css_parent(&blkcg->css));
3c5478659   Tejun Heo   blkcg: make blkcg...
190
191
192
  }
  
  /**
0381411e4   Tejun Heo   blkcg: let blkcg ...
193
194
195
196
197
198
   * blkg_to_pdata - get policy private data
   * @blkg: blkg of interest
   * @pol: policy of interest
   *
   * Return pointer to private data associated with the @blkg-@pol pair.
   */
f95a04afa   Tejun Heo   blkcg: embed stru...
199
200
  static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
  						  struct blkcg_policy *pol)
0381411e4   Tejun Heo   blkcg: let blkcg ...
201
  {
f95a04afa   Tejun Heo   blkcg: embed stru...
202
  	return blkg ? blkg->pd[pol->plid] : NULL;
0381411e4   Tejun Heo   blkcg: let blkcg ...
203
204
205
206
  }
  
  /**
   * pdata_to_blkg - get blkg associated with policy private data
f95a04afa   Tejun Heo   blkcg: embed stru...
207
   * @pd: policy private data of interest
0381411e4   Tejun Heo   blkcg: let blkcg ...
208
   *
f95a04afa   Tejun Heo   blkcg: embed stru...
209
   * @pd is policy private data.  Determine the blkg it's associated with.
0381411e4   Tejun Heo   blkcg: let blkcg ...
210
   */
f95a04afa   Tejun Heo   blkcg: embed stru...
211
  static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
0381411e4   Tejun Heo   blkcg: let blkcg ...
212
  {
f95a04afa   Tejun Heo   blkcg: embed stru...
213
  	return pd ? pd->blkg : NULL;
0381411e4   Tejun Heo   blkcg: let blkcg ...
214
  }
54e7ed12b   Tejun Heo   blkcg: remove blk...
215
216
217
218
219
220
221
222
  /**
   * blkg_path - format cgroup path of blkg
   * @blkg: blkg of interest
   * @buf: target buffer
   * @buflen: target buffer length
   *
   * Format the path of the cgroup of @blkg into @buf.
   */
3c798398e   Tejun Heo   blkcg: mass renam...
223
  static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
afc24d49c   Vivek Goyal   blk-cgroup: confi...
224
  {
54e7ed12b   Tejun Heo   blkcg: remove blk...
225
  	int ret;
54e7ed12b   Tejun Heo   blkcg: remove blk...
226
  	ret = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
54e7ed12b   Tejun Heo   blkcg: remove blk...
227
228
229
  	if (ret)
  		strncpy(buf, "<unavailable>", buflen);
  	return ret;
afc24d49c   Vivek Goyal   blk-cgroup: confi...
230
  }
1adaf3dde   Tejun Heo   blkcg: move refcn...
231
232
233
234
235
236
  /**
   * blkg_get - get a blkg reference
   * @blkg: blkg to get
   *
   * The caller should be holding queue_lock and an existing reference.
   */
3c798398e   Tejun Heo   blkcg: mass renam...
237
  static inline void blkg_get(struct blkcg_gq *blkg)
1adaf3dde   Tejun Heo   blkcg: move refcn...
238
239
240
241
242
  {
  	lockdep_assert_held(blkg->q->queue_lock);
  	WARN_ON_ONCE(!blkg->refcnt);
  	blkg->refcnt++;
  }
2a4fd070e   Tejun Heo   blkcg: move bulk ...
243
  void __blkg_release_rcu(struct rcu_head *rcu);
1adaf3dde   Tejun Heo   blkcg: move refcn...
244
245
246
247
248
249
250
  
  /**
   * blkg_put - put a blkg reference
   * @blkg: blkg to put
   *
   * The caller should be holding queue_lock.
   */
3c798398e   Tejun Heo   blkcg: mass renam...
251
  static inline void blkg_put(struct blkcg_gq *blkg)
1adaf3dde   Tejun Heo   blkcg: move refcn...
252
253
254
255
  {
  	lockdep_assert_held(blkg->q->queue_lock);
  	WARN_ON_ONCE(blkg->refcnt <= 0);
  	if (!--blkg->refcnt)
2a4fd070e   Tejun Heo   blkcg: move bulk ...
256
  		call_rcu(&blkg->rcu_head, __blkg_release_rcu);
1adaf3dde   Tejun Heo   blkcg: move refcn...
257
  }
dd4a4ffc0   Tejun Heo   blkcg: move blkg_...
258
259
260
261
262
263
  struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
  			       bool update_hint);
  
  /**
   * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
   * @d_blkg: loop cursor pointing to the current descendant
492eb21b9   Tejun Heo   cgroup: make hier...
264
   * @pos_css: used for iteration
dd4a4ffc0   Tejun Heo   blkcg: move blkg_...
265
266
267
268
269
   * @p_blkg: target blkg to walk descendants of
   *
   * Walk @c_blkg through the descendants of @p_blkg.  Must be used with RCU
   * read locked.  If called under either blkcg or queue lock, the iteration
   * is guaranteed to include all and only online blkgs.  The caller may
492eb21b9   Tejun Heo   cgroup: make hier...
270
   * update @pos_css by calling css_rightmost_descendant() to skip subtree.
bd8815a6d   Tejun Heo   cgroup: make css_...
271
   * @p_blkg is included in the iteration and the first node to be visited.
dd4a4ffc0   Tejun Heo   blkcg: move blkg_...
272
   */
492eb21b9   Tejun Heo   cgroup: make hier...
273
274
275
  #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg)		\
  	css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css)	\
  		if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),	\
dd4a4ffc0   Tejun Heo   blkcg: move blkg_...
276
  					      (p_blkg)->q, false)))
edcb0722c   Tejun Heo   blkcg: introduce ...
277
  /**
aa539cb38   Tejun Heo   blkcg: implement ...
278
279
   * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
   * @d_blkg: loop cursor pointing to the current descendant
492eb21b9   Tejun Heo   cgroup: make hier...
280
   * @pos_css: used for iteration
aa539cb38   Tejun Heo   blkcg: implement ...
281
282
283
   * @p_blkg: target blkg to walk descendants of
   *
   * Similar to blkg_for_each_descendant_pre() but performs post-order
bd8815a6d   Tejun Heo   cgroup: make css_...
284
285
   * traversal instead.  Synchronization rules are the same.  @p_blkg is
   * included in the iteration and the last node to be visited.
aa539cb38   Tejun Heo   blkcg: implement ...
286
   */
492eb21b9   Tejun Heo   cgroup: make hier...
287
288
289
  #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg)		\
  	css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css)	\
  		if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),	\
aa539cb38   Tejun Heo   blkcg: implement ...
290
291
292
  					      (p_blkg)->q, false)))
  
  /**
a051661ca   Tejun Heo   blkcg: implement ...
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
   * blk_get_rl - get request_list to use
   * @q: request_queue of interest
   * @bio: bio which will be attached to the allocated request (may be %NULL)
   *
   * The caller wants to allocate a request from @q to use for @bio.  Find
   * the request_list to use and obtain a reference on it.  Should be called
   * under queue_lock.  This function is guaranteed to return non-%NULL
   * request_list.
   */
  static inline struct request_list *blk_get_rl(struct request_queue *q,
  					      struct bio *bio)
  {
  	struct blkcg *blkcg;
  	struct blkcg_gq *blkg;
  
  	rcu_read_lock();
  
  	blkcg = bio_blkcg(bio);
  
  	/* bypass blkg lookup and use @q->root_rl directly for root */
  	if (blkcg == &blkcg_root)
  		goto root_rl;
  
  	/*
  	 * Try to use blkg->rl.  blkg lookup may fail under memory pressure
  	 * or if either the blkcg or queue is going away.  Fall back to
  	 * root_rl in such cases.
  	 */
  	blkg = blkg_lookup_create(blkcg, q);
  	if (unlikely(IS_ERR(blkg)))
  		goto root_rl;
  
  	blkg_get(blkg);
  	rcu_read_unlock();
  	return &blkg->rl;
  root_rl:
  	rcu_read_unlock();
  	return &q->root_rl;
  }
  
  /**
   * blk_put_rl - put request_list
   * @rl: request_list to put
   *
   * Put the reference acquired by blk_get_rl().  Should be called under
   * queue_lock.
   */
  static inline void blk_put_rl(struct request_list *rl)
  {
  	/* root_rl may not have blkg set */
  	if (rl->blkg && rl->blkg->blkcg != &blkcg_root)
  		blkg_put(rl->blkg);
  }
  
  /**
   * blk_rq_set_rl - associate a request with a request_list
   * @rq: request of interest
   * @rl: target request_list
   *
   * Associate @rq with @rl so that accounting and freeing can know the
   * request_list @rq came from.
   */
  static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
  {
  	rq->rl = rl;
  }
  
  /**
   * blk_rq_rl - return the request_list a request came from
   * @rq: request of interest
   *
   * Return the request_list @rq is allocated from.
   */
  static inline struct request_list *blk_rq_rl(struct request *rq)
  {
  	return rq->rl;
  }
  
  struct request_list *__blk_queue_next_rl(struct request_list *rl,
  					 struct request_queue *q);
  /**
   * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
   *
   * Should be used under queue_lock.
   */
  #define blk_queue_for_each_rl(rl, q)	\
  	for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
  
  /**
edcb0722c   Tejun Heo   blkcg: introduce ...
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
   * blkg_stat_add - add a value to a blkg_stat
   * @stat: target blkg_stat
   * @val: value to add
   *
   * Add @val to @stat.  The caller is responsible for synchronizing calls to
   * this function.
   */
  static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
  {
  	u64_stats_update_begin(&stat->syncp);
  	stat->cnt += val;
  	u64_stats_update_end(&stat->syncp);
  }
  
  /**
   * blkg_stat_read - read the current value of a blkg_stat
   * @stat: blkg_stat to read
   *
   * Read the current value of @stat.  This function can be called without
   * synchroniztion and takes care of u64 atomicity.
   */
  static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
  {
  	unsigned int start;
  	uint64_t v;
  
  	do {
  		start = u64_stats_fetch_begin(&stat->syncp);
  		v = stat->cnt;
  	} while (u64_stats_fetch_retry(&stat->syncp, start));
  
  	return v;
  }
  
  /**
   * blkg_stat_reset - reset a blkg_stat
   * @stat: blkg_stat to reset
   */
  static inline void blkg_stat_reset(struct blkg_stat *stat)
  {
  	stat->cnt = 0;
  }
  
  /**
16b3de665   Tejun Heo   blkcg: implement ...
426
427
428
429
430
431
432
433
434
435
436
437
   * blkg_stat_merge - merge a blkg_stat into another
   * @to: the destination blkg_stat
   * @from: the source
   *
   * Add @from's count to @to.
   */
  static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from)
  {
  	blkg_stat_add(to, blkg_stat_read(from));
  }
  
  /**
edcb0722c   Tejun Heo   blkcg: introduce ...
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
   * blkg_rwstat_add - add a value to a blkg_rwstat
   * @rwstat: target blkg_rwstat
   * @rw: mask of REQ_{WRITE|SYNC}
   * @val: value to add
   *
   * Add @val to @rwstat.  The counters are chosen according to @rw.  The
   * caller is responsible for synchronizing calls to this function.
   */
  static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
  				   int rw, uint64_t val)
  {
  	u64_stats_update_begin(&rwstat->syncp);
  
  	if (rw & REQ_WRITE)
  		rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
  	else
  		rwstat->cnt[BLKG_RWSTAT_READ] += val;
  	if (rw & REQ_SYNC)
  		rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
  	else
  		rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
  
  	u64_stats_update_end(&rwstat->syncp);
  }
  
  /**
   * blkg_rwstat_read - read the current values of a blkg_rwstat
   * @rwstat: blkg_rwstat to read
   *
   * Read the current snapshot of @rwstat and return it as the return value.
   * This function can be called without synchronization and takes care of
   * u64 atomicity.
   */
c94bed899   Tejun Heo   blkcg: blkg_rwsta...
471
  static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
edcb0722c   Tejun Heo   blkcg: introduce ...
472
473
474
475
476
477
478
479
480
481
482
483
484
  {
  	unsigned int start;
  	struct blkg_rwstat tmp;
  
  	do {
  		start = u64_stats_fetch_begin(&rwstat->syncp);
  		tmp = *rwstat;
  	} while (u64_stats_fetch_retry(&rwstat->syncp, start));
  
  	return tmp;
  }
  
  /**
4d5e80a76   Tejun Heo   blkcg: s/blkg_rws...
485
   * blkg_rwstat_total - read the total count of a blkg_rwstat
edcb0722c   Tejun Heo   blkcg: introduce ...
486
487
488
489
490
491
   * @rwstat: blkg_rwstat to read
   *
   * Return the total count of @rwstat regardless of the IO direction.  This
   * function can be called without synchronization and takes care of u64
   * atomicity.
   */
4d5e80a76   Tejun Heo   blkcg: s/blkg_rws...
492
  static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
edcb0722c   Tejun Heo   blkcg: introduce ...
493
494
495
496
497
498
499
500
501
502
503
504
505
506
  {
  	struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
  
  	return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
  }
  
  /**
   * blkg_rwstat_reset - reset a blkg_rwstat
   * @rwstat: blkg_rwstat to reset
   */
  static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
  {
  	memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
  }
16b3de665   Tejun Heo   blkcg: implement ...
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
  /**
   * blkg_rwstat_merge - merge a blkg_rwstat into another
   * @to: the destination blkg_rwstat
   * @from: the source
   *
   * Add @from's counts to @to.
   */
  static inline void blkg_rwstat_merge(struct blkg_rwstat *to,
  				     struct blkg_rwstat *from)
  {
  	struct blkg_rwstat v = blkg_rwstat_read(from);
  	int i;
  
  	u64_stats_update_begin(&to->syncp);
  	for (i = 0; i < BLKG_RWSTAT_NR; i++)
  		to->cnt[i] += v.cnt[i];
  	u64_stats_update_end(&to->syncp);
  }
36558c8a3   Tejun Heo   blkcg: style clea...
525
526
527
  #else	/* CONFIG_BLK_CGROUP */
  
  struct cgroup;
b1208b56f   Tejun Heo   blkcg: inline bio...
528
  struct blkcg;
2f5ea4771   Jens Axboe   cfq-iosched: fix ...
529

f95a04afa   Tejun Heo   blkcg: embed stru...
530
531
  struct blkg_policy_data {
  };
3c798398e   Tejun Heo   blkcg: mass renam...
532
  struct blkcg_gq {
2f5ea4771   Jens Axboe   cfq-iosched: fix ...
533
  };
3c798398e   Tejun Heo   blkcg: mass renam...
534
  struct blkcg_policy {
3e2520668   Vivek Goyal   blkio: Implement ...
535
  };
3c798398e   Tejun Heo   blkcg: mass renam...
536
  static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
5efd61135   Tejun Heo   blkcg: add blkcg_...
537
538
539
  static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
  static inline void blkcg_drain_queue(struct request_queue *q) { }
  static inline void blkcg_exit_queue(struct request_queue *q) { }
3c798398e   Tejun Heo   blkcg: mass renam...
540
541
  static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
  static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
a2b1693ba   Tejun Heo   blkcg: implement ...
542
  static inline int blkcg_activate_policy(struct request_queue *q,
3c798398e   Tejun Heo   blkcg: mass renam...
543
  					const struct blkcg_policy *pol) { return 0; }
a2b1693ba   Tejun Heo   blkcg: implement ...
544
  static inline void blkcg_deactivate_policy(struct request_queue *q,
3c798398e   Tejun Heo   blkcg: mass renam...
545
  					   const struct blkcg_policy *pol) { }
b1208b56f   Tejun Heo   blkcg: inline bio...
546
  static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
a051661ca   Tejun Heo   blkcg: implement ...
547

f95a04afa   Tejun Heo   blkcg: embed stru...
548
549
550
  static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
  						  struct blkcg_policy *pol) { return NULL; }
  static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
3c798398e   Tejun Heo   blkcg: mass renam...
551
552
553
  static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
  static inline void blkg_get(struct blkcg_gq *blkg) { }
  static inline void blkg_put(struct blkcg_gq *blkg) { }
afc24d49c   Vivek Goyal   blk-cgroup: confi...
554

a051661ca   Tejun Heo   blkcg: implement ...
555
556
557
558
559
560
561
562
  static inline struct request_list *blk_get_rl(struct request_queue *q,
  					      struct bio *bio) { return &q->root_rl; }
  static inline void blk_put_rl(struct request_list *rl) { }
  static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
  static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
  
  #define blk_queue_for_each_rl(rl, q)	\
  	for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
36558c8a3   Tejun Heo   blkcg: style clea...
563
564
  #endif	/* CONFIG_BLK_CGROUP */
  #endif	/* _BLK_CGROUP_H */