Blame view

block/blk-settings.c 14.7 KB
86db1e297   Jens Axboe   block: continue l...
1
2
3
4
5
6
7
8
9
10
11
  /*
   * Functions related to setting various queue properties from drivers
   */
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/init.h>
  #include <linux/bio.h>
  #include <linux/blkdev.h>
  #include <linux/bootmem.h>	/* for max_pfn/max_low_pfn */
  
  #include "blk.h"
6728cb0e6   Jens Axboe   block: make core ...
12
  unsigned long blk_max_low_pfn;
86db1e297   Jens Axboe   block: continue l...
13
  EXPORT_SYMBOL(blk_max_low_pfn);
6728cb0e6   Jens Axboe   block: make core ...
14
15
  
  unsigned long blk_max_pfn;
86db1e297   Jens Axboe   block: continue l...
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
  
  /**
   * blk_queue_prep_rq - set a prepare_request function for queue
   * @q:		queue
   * @pfn:	prepare_request function
   *
   * It's possible for a queue to register a prepare_request callback which
   * is invoked before the request is handed to the request_fn. The goal of
   * the function is to prepare a request for I/O, it can be used to build a
   * cdb from the request data for instance.
   *
   */
  void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
  {
  	q->prep_rq_fn = pfn;
  }
86db1e297   Jens Axboe   block: continue l...
32
33
34
  EXPORT_SYMBOL(blk_queue_prep_rq);
  
  /**
fb2dce862   David Woodhouse   Add 'discard' req...
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
   * blk_queue_set_discard - set a discard_sectors function for queue
   * @q:		queue
   * @dfn:	prepare_discard function
   *
   * It's possible for a queue to register a discard callback which is used
   * to transform a discard request into the appropriate type for the
   * hardware. If none is registered, then discard requests are failed
   * with %EOPNOTSUPP.
   *
   */
  void blk_queue_set_discard(struct request_queue *q, prepare_discard_fn *dfn)
  {
  	q->prepare_discard_fn = dfn;
  }
  EXPORT_SYMBOL(blk_queue_set_discard);
  
  /**
86db1e297   Jens Axboe   block: continue l...
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
   * blk_queue_merge_bvec - set a merge_bvec function for queue
   * @q:		queue
   * @mbfn:	merge_bvec_fn
   *
   * Usually queues have static limitations on the max sectors or segments that
   * we can put in a request. Stacking drivers may have some settings that
   * are dynamic, and thus we have to query the queue whether it is ok to
   * add a new bio_vec to a bio at a given offset or not. If the block device
   * has such limitations, it needs to register a merge_bvec_fn to control
   * the size of bio's sent to it. Note that a block device *must* allow a
   * single page to be added to an empty bio. The block device driver may want
   * to use the bio_split() function to deal with these bio's. By default
   * no merge_bvec_fn is defined for a queue, and only the fixed limits are
   * honored.
   */
  void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
  {
  	q->merge_bvec_fn = mbfn;
  }
86db1e297   Jens Axboe   block: continue l...
71
72
73
74
75
76
  EXPORT_SYMBOL(blk_queue_merge_bvec);
  
  void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
  {
  	q->softirq_done_fn = fn;
  }
86db1e297   Jens Axboe   block: continue l...
77
  EXPORT_SYMBOL(blk_queue_softirq_done);
242f9dcb8   Jens Axboe   block: unify requ...
78
79
80
81
82
83
84
85
86
87
88
  void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
  {
  	q->rq_timeout = timeout;
  }
  EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
  
  void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
  {
  	q->rq_timed_out_fn = fn;
  }
  EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
ef9e3facd   Kiyoshi Ueda   block: add lld bu...
89
90
91
92
93
  void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
  {
  	q->lld_busy_fn = fn;
  }
  EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
86db1e297   Jens Axboe   block: continue l...
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
  /**
   * blk_queue_make_request - define an alternate make_request function for a device
   * @q:  the request queue for the device to be affected
   * @mfn: the alternate make_request function
   *
   * Description:
   *    The normal way for &struct bios to be passed to a device
   *    driver is for them to be collected into requests on a request
   *    queue, and then to allow the device driver to select requests
   *    off that queue when it is ready.  This works well for many block
   *    devices. However some block devices (typically virtual devices
   *    such as md or lvm) do not benefit from the processing on the
   *    request queue, and are served best by having the requests passed
   *    directly to them.  This can be achieved by providing a function
   *    to blk_queue_make_request().
   *
   * Caveat:
   *    The driver that does this *must* be able to deal appropriately
   *    with buffers in "highmemory". This can be accomplished by either calling
   *    __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
   *    blk_queue_bounce() to create a buffer in normal memory.
   **/
6728cb0e6   Jens Axboe   block: make core ...
116
  void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
86db1e297   Jens Axboe   block: continue l...
117
118
119
120
121
122
123
  {
  	/*
  	 * set defaults
  	 */
  	q->nr_requests = BLKDEV_MAX_RQ;
  	blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
  	blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
0e435ac26   Milan Broz   block: fix settin...
124
125
  	blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK);
  	blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
86db1e297   Jens Axboe   block: continue l...
126
  	q->make_request_fn = mfn;
6728cb0e6   Jens Axboe   block: make core ...
127
128
  	q->backing_dev_info.ra_pages =
  			(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
86db1e297   Jens Axboe   block: continue l...
129
130
131
132
133
134
135
136
137
138
139
140
  	q->backing_dev_info.state = 0;
  	q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
  	blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
  	blk_queue_hardsect_size(q, 512);
  	blk_queue_dma_alignment(q, 511);
  	blk_queue_congestion_threshold(q);
  	q->nr_batching = BLK_BATCH_REQ;
  
  	q->unplug_thresh = 4;		/* hmm */
  	q->unplug_delay = (3 * HZ) / 1000;	/* 3 milliseconds */
  	if (q->unplug_delay == 0)
  		q->unplug_delay = 1;
86db1e297   Jens Axboe   block: continue l...
141
142
143
144
145
146
147
148
  	q->unplug_timer.function = blk_unplug_timeout;
  	q->unplug_timer.data = (unsigned long)q;
  
  	/*
  	 * by default assume old behaviour and bounce for any highmem page
  	 */
  	blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
  }
86db1e297   Jens Axboe   block: continue l...
149
150
151
152
153
154
155
156
157
158
159
  EXPORT_SYMBOL(blk_queue_make_request);
  
  /**
   * blk_queue_bounce_limit - set bounce buffer limit for queue
   * @q:  the request queue for the device
   * @dma_addr:   bus address limit
   *
   * Description:
   *    Different hardware can have different requirements as to what pages
   *    it can do I/O directly to. A low level driver can call
   *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
710027a48   Randy Dunlap   Add some block/ s...
160
   *    buffers for doing I/O to pages residing above @dma_addr.
86db1e297   Jens Axboe   block: continue l...
161
162
163
   **/
  void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
  {
6728cb0e6   Jens Axboe   block: make core ...
164
  	unsigned long b_pfn = dma_addr >> PAGE_SHIFT;
86db1e297   Jens Axboe   block: continue l...
165
166
167
168
169
170
171
  	int dma = 0;
  
  	q->bounce_gfp = GFP_NOIO;
  #if BITS_PER_LONG == 64
  	/* Assume anything <= 4GB can be handled by IOMMU.
  	   Actually some IOMMUs can handle everything, but I don't
  	   know of a way to test this here. */
00d61e3e8   Andrea Arcangeli   Fix bounce settin...
172
  	if (b_pfn < (min_t(u64, 0x100000000UL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
86db1e297   Jens Axboe   block: continue l...
173
174
175
  		dma = 1;
  	q->bounce_pfn = max_low_pfn;
  #else
6728cb0e6   Jens Axboe   block: make core ...
176
  	if (b_pfn < blk_max_low_pfn)
86db1e297   Jens Axboe   block: continue l...
177
  		dma = 1;
6728cb0e6   Jens Axboe   block: make core ...
178
  	q->bounce_pfn = b_pfn;
86db1e297   Jens Axboe   block: continue l...
179
180
181
182
  #endif
  	if (dma) {
  		init_emergency_isa_pool();
  		q->bounce_gfp = GFP_NOIO | GFP_DMA;
6728cb0e6   Jens Axboe   block: make core ...
183
  		q->bounce_pfn = b_pfn;
86db1e297   Jens Axboe   block: continue l...
184
185
  	}
  }
86db1e297   Jens Axboe   block: continue l...
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
  EXPORT_SYMBOL(blk_queue_bounce_limit);
  
  /**
   * blk_queue_max_sectors - set max sectors for a request for this queue
   * @q:  the request queue for the device
   * @max_sectors:  max sectors in the usual 512b unit
   *
   * Description:
   *    Enables a low level driver to set an upper limit on the size of
   *    received requests.
   **/
  void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
  {
  	if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
  		max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
24c03d47d   Harvey Harrison   block: remove rem...
201
202
203
  		printk(KERN_INFO "%s: set to minimum %d
  ",
  		       __func__, max_sectors);
86db1e297   Jens Axboe   block: continue l...
204
205
206
207
208
209
210
211
212
  	}
  
  	if (BLK_DEF_MAX_SECTORS > max_sectors)
  		q->max_hw_sectors = q->max_sectors = max_sectors;
  	else {
  		q->max_sectors = BLK_DEF_MAX_SECTORS;
  		q->max_hw_sectors = max_sectors;
  	}
  }
86db1e297   Jens Axboe   block: continue l...
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
  EXPORT_SYMBOL(blk_queue_max_sectors);
  
  /**
   * blk_queue_max_phys_segments - set max phys segments for a request for this queue
   * @q:  the request queue for the device
   * @max_segments:  max number of segments
   *
   * Description:
   *    Enables a low level driver to set an upper limit on the number of
   *    physical data segments in a request.  This would be the largest sized
   *    scatter list the driver could handle.
   **/
  void blk_queue_max_phys_segments(struct request_queue *q,
  				 unsigned short max_segments)
  {
  	if (!max_segments) {
  		max_segments = 1;
24c03d47d   Harvey Harrison   block: remove rem...
230
231
232
  		printk(KERN_INFO "%s: set to minimum %d
  ",
  		       __func__, max_segments);
86db1e297   Jens Axboe   block: continue l...
233
234
235
236
  	}
  
  	q->max_phys_segments = max_segments;
  }
86db1e297   Jens Axboe   block: continue l...
237
238
239
240
241
242
243
244
245
246
  EXPORT_SYMBOL(blk_queue_max_phys_segments);
  
  /**
   * blk_queue_max_hw_segments - set max hw segments for a request for this queue
   * @q:  the request queue for the device
   * @max_segments:  max number of segments
   *
   * Description:
   *    Enables a low level driver to set an upper limit on the number of
   *    hw data segments in a request.  This would be the largest number of
710027a48   Randy Dunlap   Add some block/ s...
247
   *    address/length pairs the host adapter can actually give at once
86db1e297   Jens Axboe   block: continue l...
248
249
250
251
252
253
254
   *    to the device.
   **/
  void blk_queue_max_hw_segments(struct request_queue *q,
  			       unsigned short max_segments)
  {
  	if (!max_segments) {
  		max_segments = 1;
24c03d47d   Harvey Harrison   block: remove rem...
255
256
257
  		printk(KERN_INFO "%s: set to minimum %d
  ",
  		       __func__, max_segments);
86db1e297   Jens Axboe   block: continue l...
258
259
260
261
  	}
  
  	q->max_hw_segments = max_segments;
  }
86db1e297   Jens Axboe   block: continue l...
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
  EXPORT_SYMBOL(blk_queue_max_hw_segments);
  
  /**
   * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
   * @q:  the request queue for the device
   * @max_size:  max size of segment in bytes
   *
   * Description:
   *    Enables a low level driver to set an upper limit on the size of a
   *    coalesced segment
   **/
  void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
  {
  	if (max_size < PAGE_CACHE_SIZE) {
  		max_size = PAGE_CACHE_SIZE;
24c03d47d   Harvey Harrison   block: remove rem...
277
278
279
  		printk(KERN_INFO "%s: set to minimum %d
  ",
  		       __func__, max_size);
86db1e297   Jens Axboe   block: continue l...
280
281
282
283
  	}
  
  	q->max_segment_size = max_size;
  }
86db1e297   Jens Axboe   block: continue l...
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
  EXPORT_SYMBOL(blk_queue_max_segment_size);
  
  /**
   * blk_queue_hardsect_size - set hardware sector size for the queue
   * @q:  the request queue for the device
   * @size:  the hardware sector size, in bytes
   *
   * Description:
   *   This should typically be set to the lowest possible sector size
   *   that the hardware can operate on (possible without reverting to
   *   even internal read-modify-write operations). Usually the default
   *   of 512 covers most hardware.
   **/
  void blk_queue_hardsect_size(struct request_queue *q, unsigned short size)
  {
  	q->hardsect_size = size;
  }
86db1e297   Jens Axboe   block: continue l...
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
  EXPORT_SYMBOL(blk_queue_hardsect_size);
  
  /*
   * Returns the minimum that is _not_ zero, unless both are zero.
   */
  #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
  
  /**
   * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
   * @t:	the stacking driver (top)
   * @b:  the underlying device (bottom)
   **/
  void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
  {
  	/* zero is "infinity" */
6728cb0e6   Jens Axboe   block: make core ...
316
317
  	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
  	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
0e435ac26   Milan Broz   block: fix settin...
318
  	t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, b->seg_boundary_mask);
86db1e297   Jens Axboe   block: continue l...
319

18af8b2ca   FUJITA Tomonori   block: use min_no...
320
321
322
  	t->max_phys_segments = min_not_zero(t->max_phys_segments, b->max_phys_segments);
  	t->max_hw_segments = min_not_zero(t->max_hw_segments, b->max_hw_segments);
  	t->max_segment_size = min_not_zero(t->max_segment_size, b->max_segment_size);
6728cb0e6   Jens Axboe   block: make core ...
323
  	t->hardsect_size = max(t->hardsect_size, b->hardsect_size);
e7e72bf64   Neil Brown   Remove blkdev war...
324
325
326
327
328
  	if (!t->queue_lock)
  		WARN_ON_ONCE(1);
  	else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
  		unsigned long flags;
  		spin_lock_irqsave(t->queue_lock, flags);
75ad23bc0   Nick Piggin   block: make queue...
329
  		queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
e7e72bf64   Neil Brown   Remove blkdev war...
330
331
  		spin_unlock_irqrestore(t->queue_lock, flags);
  	}
86db1e297   Jens Axboe   block: continue l...
332
  }
86db1e297   Jens Axboe   block: continue l...
333
334
335
  EXPORT_SYMBOL(blk_queue_stack_limits);
  
  /**
e3790c7d4   Tejun Heo   block: separate o...
336
337
338
339
   * blk_queue_dma_pad - set pad mask
   * @q:     the request queue for the device
   * @mask:  pad mask
   *
27f8221af   FUJITA Tomonori   block: add blk_qu...
340
   * Set dma pad mask.
e3790c7d4   Tejun Heo   block: separate o...
341
   *
27f8221af   FUJITA Tomonori   block: add blk_qu...
342
343
   * Appending pad buffer to a request modifies the last entry of a
   * scatter list such that it includes the pad buffer.
e3790c7d4   Tejun Heo   block: separate o...
344
345
346
347
348
349
350
351
   **/
  void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
  {
  	q->dma_pad_mask = mask;
  }
  EXPORT_SYMBOL(blk_queue_dma_pad);
  
  /**
27f8221af   FUJITA Tomonori   block: add blk_qu...
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
   * blk_queue_update_dma_pad - update pad mask
   * @q:     the request queue for the device
   * @mask:  pad mask
   *
   * Update dma pad mask.
   *
   * Appending pad buffer to a request modifies the last entry of a
   * scatter list such that it includes the pad buffer.
   **/
  void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
  {
  	if (mask > q->dma_pad_mask)
  		q->dma_pad_mask = mask;
  }
  EXPORT_SYMBOL(blk_queue_update_dma_pad);
  
  /**
86db1e297   Jens Axboe   block: continue l...
369
   * blk_queue_dma_drain - Set up a drain buffer for excess dma.
86db1e297   Jens Axboe   block: continue l...
370
   * @q:  the request queue for the device
2fb98e841   Tejun Heo   block: implement ...
371
   * @dma_drain_needed: fn which returns non-zero if drain is necessary
86db1e297   Jens Axboe   block: continue l...
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
   * @buf:	physically contiguous buffer
   * @size:	size of the buffer in bytes
   *
   * Some devices have excess DMA problems and can't simply discard (or
   * zero fill) the unwanted piece of the transfer.  They have to have a
   * real area of memory to transfer it into.  The use case for this is
   * ATAPI devices in DMA mode.  If the packet command causes a transfer
   * bigger than the transfer size some HBAs will lock up if there
   * aren't DMA elements to contain the excess transfer.  What this API
   * does is adjust the queue so that the buf is always appended
   * silently to the scatterlist.
   *
   * Note: This routine adjusts max_hw_segments to make room for
   * appending the drain buffer.  If you call
   * blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after
   * calling this routine, you must set the limit to one fewer than your
   * device can support otherwise there won't be room for the drain
   * buffer.
   */
448da4d26   Harvey Harrison   block: remove ext...
391
  int blk_queue_dma_drain(struct request_queue *q,
2fb98e841   Tejun Heo   block: implement ...
392
393
  			       dma_drain_needed_fn *dma_drain_needed,
  			       void *buf, unsigned int size)
86db1e297   Jens Axboe   block: continue l...
394
395
396
397
398
399
  {
  	if (q->max_hw_segments < 2 || q->max_phys_segments < 2)
  		return -EINVAL;
  	/* make room for appending the drain */
  	--q->max_hw_segments;
  	--q->max_phys_segments;
2fb98e841   Tejun Heo   block: implement ...
400
  	q->dma_drain_needed = dma_drain_needed;
86db1e297   Jens Axboe   block: continue l...
401
402
403
404
405
  	q->dma_drain_buffer = buf;
  	q->dma_drain_size = size;
  
  	return 0;
  }
86db1e297   Jens Axboe   block: continue l...
406
407
408
409
410
411
412
413
414
415
416
  EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
  
  /**
   * blk_queue_segment_boundary - set boundary rules for segment merging
   * @q:  the request queue for the device
   * @mask:  the memory boundary mask
   **/
  void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
  {
  	if (mask < PAGE_CACHE_SIZE - 1) {
  		mask = PAGE_CACHE_SIZE - 1;
24c03d47d   Harvey Harrison   block: remove rem...
417
418
419
  		printk(KERN_INFO "%s: set to minimum %lx
  ",
  		       __func__, mask);
86db1e297   Jens Axboe   block: continue l...
420
421
422
423
  	}
  
  	q->seg_boundary_mask = mask;
  }
86db1e297   Jens Axboe   block: continue l...
424
425
426
427
428
429
430
431
  EXPORT_SYMBOL(blk_queue_segment_boundary);
  
  /**
   * blk_queue_dma_alignment - set dma length and memory alignment
   * @q:     the request queue for the device
   * @mask:  alignment mask
   *
   * description:
710027a48   Randy Dunlap   Add some block/ s...
432
   *    set required memory and length alignment for direct dma transactions.
86db1e297   Jens Axboe   block: continue l...
433
434
435
436
437
438
439
   *    this is used when buiding direct io requests for the queue.
   *
   **/
  void blk_queue_dma_alignment(struct request_queue *q, int mask)
  {
  	q->dma_alignment = mask;
  }
86db1e297   Jens Axboe   block: continue l...
440
441
442
443
444
445
446
447
  EXPORT_SYMBOL(blk_queue_dma_alignment);
  
  /**
   * blk_queue_update_dma_alignment - update dma length and memory alignment
   * @q:     the request queue for the device
   * @mask:  alignment mask
   *
   * description:
710027a48   Randy Dunlap   Add some block/ s...
448
   *    update required memory and length alignment for direct dma transactions.
86db1e297   Jens Axboe   block: continue l...
449
450
451
452
453
454
455
456
457
458
459
460
461
462
   *    If the requested alignment is larger than the current alignment, then
   *    the current queue alignment is updated to the new value, otherwise it
   *    is left alone.  The design of this is to allow multiple objects
   *    (driver, device, transport etc) to set their respective
   *    alignments without having them interfere.
   *
   **/
  void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
  {
  	BUG_ON(mask > PAGE_SIZE);
  
  	if (mask > q->dma_alignment)
  		q->dma_alignment = mask;
  }
86db1e297   Jens Axboe   block: continue l...
463
  EXPORT_SYMBOL(blk_queue_update_dma_alignment);
aeb3d3a81   Harvey Harrison   block: kmalloc ar...
464
  static int __init blk_settings_init(void)
86db1e297   Jens Axboe   block: continue l...
465
466
467
468
469
470
  {
  	blk_max_low_pfn = max_low_pfn - 1;
  	blk_max_pfn = max_pfn - 1;
  	return 0;
  }
  subsys_initcall(blk_settings_init);