Commit 5efd611351d1a847c72d74fb12ff4bd187c0cb2c

Authored by Tejun Heo
Committed by Jens Axboe
1 parent 7ee9c56205

blkcg: add blkcg_{init|drain|exit}_queue()

Currently block core calls directly into blk-throttle for init, drain
and exit.  This patch adds blkcg_{init|drain|exit}_queue() which wraps
the blk-throttle functions.  This is to give more control and
visiblity to blkcg core layer for proper layering.  Further patches
will add logic common to blkcg policies to the functions.

While at it, collapse blk_throtl_release() into blk_throtl_exit().
There's no reason to keep them separate.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

Showing 6 changed files with 55 additions and 10 deletions Side-by-side Diff

... ... @@ -20,6 +20,7 @@
20 20 #include <linux/genhd.h>
21 21 #include <linux/delay.h>
22 22 #include "blk-cgroup.h"
  23 +#include "blk.h"
23 24  
24 25 #define MAX_KEY_LEN 100
25 26  
... ... @@ -1457,6 +1458,47 @@
1457 1458 INIT_HLIST_HEAD(&blkcg->blkg_list);
1458 1459  
1459 1460 return &blkcg->css;
  1461 +}
  1462 +
  1463 +/**
  1464 + * blkcg_init_queue - initialize blkcg part of request queue
  1465 + * @q: request_queue to initialize
  1466 + *
  1467 + * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
  1468 + * part of new request_queue @q.
  1469 + *
  1470 + * RETURNS:
  1471 + * 0 on success, -errno on failure.
  1472 + */
  1473 +int blkcg_init_queue(struct request_queue *q)
  1474 +{
  1475 + might_sleep();
  1476 +
  1477 + return blk_throtl_init(q);
  1478 +}
  1479 +
  1480 +/**
  1481 + * blkcg_drain_queue - drain blkcg part of request_queue
  1482 + * @q: request_queue to drain
  1483 + *
  1484 + * Called from blk_drain_queue(). Responsible for draining blkcg part.
  1485 + */
  1486 +void blkcg_drain_queue(struct request_queue *q)
  1487 +{
  1488 + lockdep_assert_held(q->queue_lock);
  1489 +
  1490 + blk_throtl_drain(q);
  1491 +}
  1492 +
  1493 +/**
  1494 + * blkcg_exit_queue - exit and release blkcg part of request_queue
  1495 + * @q: request_queue being released
  1496 + *
  1497 + * Called from blk_release_queue(). Responsible for exiting blkcg part.
  1498 + */
  1499 +void blkcg_exit_queue(struct request_queue *q)
  1500 +{
  1501 + blk_throtl_exit(q);
1460 1502 }
1461 1503  
1462 1504 /*
... ... @@ -215,6 +215,10 @@
215 215 enum blkio_policy_id plid;
216 216 };
217 217  
  218 +extern int blkcg_init_queue(struct request_queue *q);
  219 +extern void blkcg_drain_queue(struct request_queue *q);
  220 +extern void blkcg_exit_queue(struct request_queue *q);
  221 +
218 222 /* Blkio controller policy registration */
219 223 extern void blkio_policy_register(struct blkio_policy_type *);
220 224 extern void blkio_policy_unregister(struct blkio_policy_type *);
... ... @@ -233,6 +237,9 @@
233 237 struct blkio_policy_type {
234 238 };
235 239  
  240 +static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
  241 +static inline void blkcg_drain_queue(struct request_queue *q) { }
  242 +static inline void blkcg_exit_queue(struct request_queue *q) { }
236 243 static inline void blkio_policy_register(struct blkio_policy_type *blkiop) { }
237 244 static inline void blkio_policy_unregister(struct blkio_policy_type *blkiop) { }
238 245 static inline void blkg_destroy_all(struct request_queue *q) { }
... ... @@ -34,6 +34,7 @@
34 34 #include <trace/events/block.h>
35 35  
36 36 #include "blk.h"
  37 +#include "blk-cgroup.h"
37 38  
38 39 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
39 40 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
... ... @@ -280,7 +281,7 @@
280 281 *
281 282 * This function does not cancel any asynchronous activity arising
282 283 * out of elevator or throttling code. That would require elevaotor_exit()
283   - * and blk_throtl_exit() to be called with queue lock initialized.
  284 + * and blkcg_exit_queue() to be called with queue lock initialized.
284 285 *
285 286 */
286 287 void blk_sync_queue(struct request_queue *q)
... ... @@ -372,7 +373,7 @@
372 373 if (q->elevator)
373 374 elv_drain_elevator(q);
374 375  
375   - blk_throtl_drain(q);
  376 + blkcg_drain_queue(q);
376 377  
377 378 /*
378 379 * This function might be called on a queue which failed
... ... @@ -562,7 +563,7 @@
562 563 */
563 564 q->queue_lock = &q->__queue_lock;
564 565  
565   - if (blk_throtl_init(q))
  566 + if (blkcg_init_queue(q))
566 567 goto fail_id;
567 568  
568 569 return q;
... ... @@ -9,6 +9,7 @@
9 9 #include <linux/blktrace_api.h>
10 10  
11 11 #include "blk.h"
  12 +#include "blk-cgroup.h"
12 13  
13 14 struct queue_sysfs_entry {
14 15 struct attribute attr;
... ... @@ -486,7 +487,7 @@
486 487 elevator_exit(q->elevator);
487 488 }
488 489  
489   - blk_throtl_exit(q);
  490 + blkcg_exit_queue(q);
490 491  
491 492 if (rl->rq_pool)
492 493 mempool_destroy(rl->rq_pool);
... ... @@ -494,7 +495,6 @@
494 495 if (q->queue_tags)
495 496 __blk_queue_free_tags(q);
496 497  
497   - blk_throtl_release(q);
498 498 blk_trace_shutdown(q);
499 499  
500 500 bdi_destroy(&q->backing_dev_info);
block/blk-throttle.c
... ... @@ -1226,10 +1226,7 @@
1226 1226 * it.
1227 1227 */
1228 1228 throtl_shutdown_wq(q);
1229   -}
1230 1229  
1231   -void blk_throtl_release(struct request_queue *q)
1232   -{
1233 1230 kfree(q->td);
1234 1231 }
1235 1232  
... ... @@ -236,7 +236,6 @@
236 236 extern void blk_throtl_drain(struct request_queue *q);
237 237 extern int blk_throtl_init(struct request_queue *q);
238 238 extern void blk_throtl_exit(struct request_queue *q);
239   -extern void blk_throtl_release(struct request_queue *q);
240 239 #else /* CONFIG_BLK_DEV_THROTTLING */
241 240 static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
242 241 {
... ... @@ -245,7 +244,6 @@
245 244 static inline void blk_throtl_drain(struct request_queue *q) { }
246 245 static inline int blk_throtl_init(struct request_queue *q) { return 0; }
247 246 static inline void blk_throtl_exit(struct request_queue *q) { }
248   -static inline void blk_throtl_release(struct request_queue *q) { }
249 247 #endif /* CONFIG_BLK_DEV_THROTTLING */
250 248  
251 249 #endif /* BLK_INTERNAL_H */