Blame view

block/blk-mq.h 2.66 KB
320ae51fe   Jens Axboe   blk-mq: new multi...
1
2
  #ifndef INT_BLK_MQ_H
  #define INT_BLK_MQ_H
24d2f9030   Christoph Hellwig   blk-mq: split out...
3
  struct blk_mq_tag_set;
320ae51fe   Jens Axboe   blk-mq: new multi...
4
5
6
7
8
9
10
11
  struct blk_mq_ctx {
  	struct {
  		spinlock_t		lock;
  		struct list_head	rq_list;
  	}  ____cacheline_aligned_in_smp;
  
  	unsigned int		cpu;
  	unsigned int		index_hw;
320ae51fe   Jens Axboe   blk-mq: new multi...
12
13
14
15
16
17
18
19
20
21
  
  	/* incremented at dispatch time */
  	unsigned long		rq_dispatched[2];
  	unsigned long		rq_merged;
  
  	/* incremented at completion time */
  	unsigned long		____cacheline_aligned_in_smp rq_completed[2];
  
  	struct request_queue	*queue;
  	struct kobject		kobj;
4bb659b15   Jens Axboe   blk-mq: implement...
22
  } ____cacheline_aligned_in_smp;
320ae51fe   Jens Axboe   blk-mq: new multi...
23

320ae51fe   Jens Axboe   blk-mq: new multi...
24
  void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
780db2071   Tejun Heo   blk-mq: decouble ...
25
  void blk_mq_freeze_queue(struct request_queue *q);
3edcc0ce8   Ming Lei   block: blk-mq: do...
26
  void blk_mq_free_queue(struct request_queue *q);
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
27
  int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
aed3ea94b   Jens Axboe   block: wake up wa...
28
  void blk_mq_wake_waiters(struct request_queue *q);
320ae51fe   Jens Axboe   blk-mq: new multi...
29
30
31
32
  
  /*
   * CPU hotplug helpers
   */
676141e48   Jens Axboe   blk-mq: don't dum...
33
34
  void blk_mq_enable_hotplug(void);
  void blk_mq_disable_hotplug(void);
320ae51fe   Jens Axboe   blk-mq: new multi...
35
36
37
38
  
  /*
   * CPU -> queue mappings
   */
da695ba23   Christoph Hellwig   blk-mq: allow the...
39
  int blk_mq_map_queues(struct blk_mq_tag_set *set);
f14bbe77a   Jens Axboe   blk-mq: pass in s...
40
  extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
320ae51fe   Jens Axboe   blk-mq: new multi...
41

7d7e0f90b   Christoph Hellwig   blk-mq: remove ->...
42
43
44
45
46
  static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
  		int cpu)
  {
  	return q->queue_hw_ctx[q->mq_map[cpu]];
  }
e93ecf602   Jens Axboe   blk-mq: move the ...
47
  /*
67aec14ce   Jens Axboe   blk-mq: make the ...
48
49
50
51
   * sysfs helpers
   */
  extern int blk_mq_sysfs_register(struct request_queue *q);
  extern void blk_mq_sysfs_unregister(struct request_queue *q);
868f2f0b7   Keith Busch   blk-mq: dynamic h...
52
  extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
67aec14ce   Jens Axboe   blk-mq: make the ...
53

904158376   Christoph Hellwig   block: fix blk_ab...
54
  extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
e09aae7ed   Ming Lei   blk-mq: release m...
55
  void blk_mq_release(struct request_queue *q);
1aecfe488   Ming Lei   blk-mq: move blk_...
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
  static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
  					   unsigned int cpu)
  {
  	return per_cpu_ptr(q->queue_ctx, cpu);
  }
  
  /*
   * This assumes per-cpu software queueing queues. They could be per-node
   * as well, for instance. For now this is hardcoded as-is. Note that we don't
   * care about preemption, since we know the ctx's are persistent. This does
   * mean that we can't rely on ctx always matching the currently running CPU.
   */
  static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
  {
  	return __blk_mq_get_ctx(q, get_cpu());
  }
  
  static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
  {
  	put_cpu();
  }
cb96a42cc   Ming Lei   blk-mq: fix sched...
77
78
79
  struct blk_mq_alloc_data {
  	/* input parameter */
  	struct request_queue *q;
6f3b0e8bc   Christoph Hellwig   blk-mq: add a fla...
80
  	unsigned int flags;
cb96a42cc   Ming Lei   blk-mq: fix sched...
81
82
83
84
85
86
87
  
  	/* input & output parameter */
  	struct blk_mq_ctx *ctx;
  	struct blk_mq_hw_ctx *hctx;
  };
  
  static inline void blk_mq_set_alloc_data(struct blk_mq_alloc_data *data,
6f3b0e8bc   Christoph Hellwig   blk-mq: add a fla...
88
89
  		struct request_queue *q, unsigned int flags,
  		struct blk_mq_ctx *ctx, struct blk_mq_hw_ctx *hctx)
cb96a42cc   Ming Lei   blk-mq: fix sched...
90
91
  {
  	data->q = q;
6f3b0e8bc   Christoph Hellwig   blk-mq: add a fla...
92
  	data->flags = flags;
cb96a42cc   Ming Lei   blk-mq: fix sched...
93
94
95
  	data->ctx = ctx;
  	data->hctx = hctx;
  }
19c66e59c   Ming Lei   blk-mq: prevent u...
96
97
98
99
  static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
  {
  	return hctx->nr_ctx && hctx->tags;
  }
320ae51fe   Jens Axboe   blk-mq: new multi...
100
  #endif