Blame view

block/blk.h 6.33 KB
8324aa91d   Jens Axboe   block: split tag ...
1
2
  #ifndef BLK_INTERNAL_H
  #define BLK_INTERNAL_H
86db1e297   Jens Axboe   block: continue l...
3
4
5
6
7
  /* Amount of time in which a process may batch requests */
  #define BLK_BATCH_TIME	(HZ/50UL)
  
  /* Number of requests a "batching" process may submit */
  #define BLK_BATCH_REQ	32
8324aa91d   Jens Axboe   block: split tag ...
8
9
  extern struct kmem_cache *blk_requestq_cachep;
  extern struct kobj_type blk_queue_ktype;
86db1e297   Jens Axboe   block: continue l...
10
11
12
  void init_request_from_bio(struct request *req, struct bio *bio);
  void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
  			struct bio *bio);
a411f4bbb   Boaz Harrosh   block: Un-export ...
13
14
  int blk_rq_append_bio(struct request_queue *q, struct request *rq,
  		      struct bio *bio);
c9a929dde   Tejun Heo   block: fix reques...
15
  void blk_drain_queue(struct request_queue *q, bool drain_all);
9934c8c04   Tejun Heo   block: implement ...
16
  void blk_dequeue_request(struct request *rq);
8324aa91d   Jens Axboe   block: split tag ...
17
  void __blk_queue_free_tags(struct request_queue *q);
4853abaae   Jeff Moyer   block: fix flush ...
18
19
  bool __blk_end_bidi_request(struct request *rq, int error,
  			    unsigned int nr_bytes, unsigned int bidi_bytes);
8324aa91d   Jens Axboe   block: split tag ...
20

242f9dcb8   Jens Axboe   block: unify requ...
21
22
23
  void blk_rq_timed_out_timer(unsigned long data);
  void blk_delete_timer(struct request *);
  void blk_add_timer(struct request *);
f73e2d13a   Jens Axboe   block: remove __g...
24
  void __generic_unplug_device(struct request_queue *);
242f9dcb8   Jens Axboe   block: unify requ...
25
26
27
28
29
30
31
32
33
34
  
  /*
   * Internal atomic flags for request handling
   */
  enum rq_atomic_flags {
  	REQ_ATOM_COMPLETE = 0,
  };
  
  /*
   * EH timer and IO completion will both attempt to 'grab' the request, make
25985edce   Lucas De Marchi   Fix common misspe...
35
   * sure that only one of them succeeds
242f9dcb8   Jens Axboe   block: unify requ...
36
37
38
39
40
41
42
43
44
45
   */
  static inline int blk_mark_rq_complete(struct request *rq)
  {
  	return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
  }
  
  static inline void blk_clear_rq_complete(struct request *rq)
  {
  	clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
  }
86db1e297   Jens Axboe   block: continue l...
46

158dbda00   Tejun Heo   block: reorganize...
47
48
49
50
  /*
   * Internal elevator interface
   */
  #define ELV_ON_HASH(rq)		(!hlist_unhashed(&(rq)->hash))
ae1b15396   Tejun Heo   block: reimplemen...
51
52
  void blk_insert_flush(struct request *rq);
  void blk_abort_flushes(struct request_queue *q);
dd831006d   Tejun Heo   block: misc clean...
53

158dbda00   Tejun Heo   block: reorganize...
54
55
56
57
58
  static inline struct request *__elv_next_request(struct request_queue *q)
  {
  	struct request *rq;
  
  	while (1) {
ae1b15396   Tejun Heo   block: reimplemen...
59
  		if (!list_empty(&q->queue_head)) {
158dbda00   Tejun Heo   block: reorganize...
60
  			rq = list_entry_rq(q->queue_head.next);
ae1b15396   Tejun Heo   block: reimplemen...
61
  			return rq;
158dbda00   Tejun Heo   block: reorganize...
62
  		}
3ac0cc450   shaohua.li@intel.com   block: hold queue...
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
  		/*
  		 * Flush request is running and flush request isn't queueable
  		 * in the drive, we can hold the queue till flush request is
  		 * finished. Even we don't do this, driver can't dispatch next
  		 * requests and will requeue them. And this can improve
  		 * throughput too. For example, we have request flush1, write1,
  		 * flush 2. flush1 is dispatched, then queue is hold, write1
  		 * isn't inserted to queue. After flush1 is finished, flush2
  		 * will be dispatched. Since disk cache is already clean,
  		 * flush2 will be finished very soon, so looks like flush2 is
  		 * folded to flush1.
  		 * Since the queue is hold, a flag is set to indicate the queue
  		 * should be restarted later. Please see flush_end_io() for
  		 * details.
  		 */
  		if (q->flush_pending_idx != q->flush_running_idx &&
  				!queue_flush_queueable(q)) {
  			q->flush_queue_delayed = 1;
  			return NULL;
  		}
0a58e077e   James Bottomley   block: add proper...
83
84
  		if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags) ||
  		    !q->elevator->ops->elevator_dispatch_fn(q, 0))
158dbda00   Tejun Heo   block: reorganize...
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
  			return NULL;
  	}
  }
  
  static inline void elv_activate_rq(struct request_queue *q, struct request *rq)
  {
  	struct elevator_queue *e = q->elevator;
  
  	if (e->ops->elevator_activate_req_fn)
  		e->ops->elevator_activate_req_fn(q, rq);
  }
  
  static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq)
  {
  	struct elevator_queue *e = q->elevator;
  
  	if (e->ops->elevator_deactivate_req_fn)
  		e->ops->elevator_deactivate_req_fn(q, rq);
  }
581d4e28d   Jens Axboe   block: add fault ...
104
105
106
107
108
109
110
111
112
113
114
  #ifdef CONFIG_FAIL_IO_TIMEOUT
  int blk_should_fake_timeout(struct request_queue *);
  ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
  ssize_t part_timeout_store(struct device *, struct device_attribute *,
  				const char *, size_t);
  #else
  static inline int blk_should_fake_timeout(struct request_queue *q)
  {
  	return 0;
  }
  #endif
86db1e297   Jens Axboe   block: continue l...
115
  struct io_context *current_io_context(gfp_t gfp_flags, int node);
d6d481969   Jens Axboe   block: ll_rw_blk....
116
117
118
119
120
121
  int ll_back_merge_fn(struct request_queue *q, struct request *req,
  		     struct bio *bio);
  int ll_front_merge_fn(struct request_queue *q, struct request *req, 
  		      struct bio *bio);
  int attempt_back_merge(struct request_queue *q, struct request *rq);
  int attempt_front_merge(struct request_queue *q, struct request *rq);
5e84ea3a9   Jens Axboe   block: attempt to...
122
123
  int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
  				struct request *next);
d6d481969   Jens Axboe   block: ll_rw_blk....
124
  void blk_recalc_rq_segments(struct request *rq);
80a761fd3   Tejun Heo   block: implement ...
125
  void blk_rq_set_mixed_merge(struct request *rq);
d6d481969   Jens Axboe   block: ll_rw_blk....
126

8324aa91d   Jens Axboe   block: split tag ...
127
  void blk_queue_congestion_threshold(struct request_queue *q);
ff88972c8   Adrian Bunk   proper prototype ...
128
  int blk_dev_init(void);
f253b86b4   Jens Axboe   Revert "block: fi...
129
130
  void elv_quiesce_start(struct request_queue *q);
  void elv_quiesce_end(struct request_queue *q);
8324aa91d   Jens Axboe   block: split tag ...
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
  /*
   * Return the threshold (number of used requests) at which the queue is
   * considered to be congested.  It include a little hysteresis to keep the
   * context switch rate down.
   */
  static inline int queue_congestion_on_threshold(struct request_queue *q)
  {
  	return q->nr_congestion_on;
  }
  
  /*
   * The threshold at which a queue is considered to be uncongested
   */
  static inline int queue_congestion_off_threshold(struct request_queue *q)
  {
  	return q->nr_congestion_off;
  }
c7c22e4d5   Jens Axboe   block: add suppor...
148
149
  static inline int blk_cpu_to_group(int cpu)
  {
be14eb619   Brian King   block: Range chec...
150
  	int group = NR_CPUS;
c7c22e4d5   Jens Axboe   block: add suppor...
151
  #ifdef CONFIG_SCHED_MC
be4d638c1   Rusty Russell   cpumask: Replace ...
152
  	const struct cpumask *mask = cpu_coregroup_mask(cpu);
be14eb619   Brian King   block: Range chec...
153
  	group = cpumask_first(mask);
c7c22e4d5   Jens Axboe   block: add suppor...
154
  #elif defined(CONFIG_SCHED_SMT)
be14eb619   Brian King   block: Range chec...
155
  	group = cpumask_first(topology_thread_cpumask(cpu));
c7c22e4d5   Jens Axboe   block: add suppor...
156
157
158
  #else
  	return cpu;
  #endif
be14eb619   Brian King   block: Range chec...
159
160
161
  	if (likely(group < NR_CPUS))
  		return group;
  	return cpu;
c7c22e4d5   Jens Axboe   block: add suppor...
162
  }
c2553b584   Jens Axboe   block: make blk_d...
163
164
165
166
167
  /*
   * Contribute to IO statistics IFF:
   *
   *	a) it's attached to a gendisk, and
   *	b) the queue had IO stats enabled when this request was started, and
3c4198e87   Kiyoshi Ueda   block: fix no dis...
168
   *	c) it's a file system request or a discard request
c2553b584   Jens Axboe   block: make blk_d...
169
   */
26308eab6   Jerome Marchand   block: fix incons...
170
  static inline int blk_do_io_stat(struct request *rq)
fb8ec18c3   Jens Axboe   block: fix oops i...
171
  {
33659ebba   Christoph Hellwig   block: remove wra...
172
173
174
175
  	return rq->rq_disk &&
  	       (rq->cmd_flags & REQ_IO_STAT) &&
  	       (rq->cmd_type == REQ_TYPE_FS ||
  	        (rq->cmd_flags & REQ_DISCARD));
fb8ec18c3   Jens Axboe   block: fix oops i...
176
  }
bc9fcbf9c   Tejun Heo   block: move blk_t...
177
  #ifdef CONFIG_BLK_DEV_THROTTLING
bc16a4f93   Tejun Heo   block: reorganize...
178
  extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio);
c9a929dde   Tejun Heo   block: fix reques...
179
  extern void blk_throtl_drain(struct request_queue *q);
bc9fcbf9c   Tejun Heo   block: move blk_t...
180
181
  extern int blk_throtl_init(struct request_queue *q);
  extern void blk_throtl_exit(struct request_queue *q);
c9a929dde   Tejun Heo   block: fix reques...
182
  extern void blk_throtl_release(struct request_queue *q);
bc9fcbf9c   Tejun Heo   block: move blk_t...
183
  #else /* CONFIG_BLK_DEV_THROTTLING */
bc16a4f93   Tejun Heo   block: reorganize...
184
  static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
bc9fcbf9c   Tejun Heo   block: move blk_t...
185
  {
bc16a4f93   Tejun Heo   block: reorganize...
186
  	return false;
bc9fcbf9c   Tejun Heo   block: move blk_t...
187
  }
c9a929dde   Tejun Heo   block: fix reques...
188
  static inline void blk_throtl_drain(struct request_queue *q) { }
bc9fcbf9c   Tejun Heo   block: move blk_t...
189
190
  static inline int blk_throtl_init(struct request_queue *q) { return 0; }
  static inline void blk_throtl_exit(struct request_queue *q) { }
c9a929dde   Tejun Heo   block: fix reques...
191
  static inline void blk_throtl_release(struct request_queue *q) { }
bc9fcbf9c   Tejun Heo   block: move blk_t...
192
193
194
  #endif /* CONFIG_BLK_DEV_THROTTLING */
  
  #endif /* BLK_INTERNAL_H */