Blame view

block/blk-settings.c 22.8 KB
86db1e297   Jens Axboe   block: continue l...
1
2
3
4
5
6
7
8
9
  /*
   * Functions related to setting various queue properties from drivers
   */
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/init.h>
  #include <linux/bio.h>
  #include <linux/blkdev.h>
  #include <linux/bootmem.h>	/* for max_pfn/max_low_pfn */
70dd5bf3b   Martin K. Petersen   block: Stack opti...
10
  #include <linux/gcd.h>
86db1e297   Jens Axboe   block: continue l...
11
12
  
  #include "blk.h"
6728cb0e6   Jens Axboe   block: make core ...
13
  unsigned long blk_max_low_pfn;
86db1e297   Jens Axboe   block: continue l...
14
  EXPORT_SYMBOL(blk_max_low_pfn);
6728cb0e6   Jens Axboe   block: make core ...
15
16
  
  unsigned long blk_max_pfn;
86db1e297   Jens Axboe   block: continue l...
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
  
  /**
   * blk_queue_prep_rq - set a prepare_request function for queue
   * @q:		queue
   * @pfn:	prepare_request function
   *
   * It's possible for a queue to register a prepare_request callback which
   * is invoked before the request is handed to the request_fn. The goal of
   * the function is to prepare a request for I/O, it can be used to build a
   * cdb from the request data for instance.
   *
   */
  void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
  {
  	q->prep_rq_fn = pfn;
  }
86db1e297   Jens Axboe   block: continue l...
33
34
35
  EXPORT_SYMBOL(blk_queue_prep_rq);
  
  /**
fb2dce862   David Woodhouse   Add 'discard' req...
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
   * blk_queue_set_discard - set a discard_sectors function for queue
   * @q:		queue
   * @dfn:	prepare_discard function
   *
   * It's possible for a queue to register a discard callback which is used
   * to transform a discard request into the appropriate type for the
   * hardware. If none is registered, then discard requests are failed
   * with %EOPNOTSUPP.
   *
   */
  void blk_queue_set_discard(struct request_queue *q, prepare_discard_fn *dfn)
  {
  	q->prepare_discard_fn = dfn;
  }
  EXPORT_SYMBOL(blk_queue_set_discard);
  
  /**
86db1e297   Jens Axboe   block: continue l...
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
   * blk_queue_merge_bvec - set a merge_bvec function for queue
   * @q:		queue
   * @mbfn:	merge_bvec_fn
   *
   * Usually queues have static limitations on the max sectors or segments that
   * we can put in a request. Stacking drivers may have some settings that
   * are dynamic, and thus we have to query the queue whether it is ok to
   * add a new bio_vec to a bio at a given offset or not. If the block device
   * has such limitations, it needs to register a merge_bvec_fn to control
   * the size of bio's sent to it. Note that a block device *must* allow a
   * single page to be added to an empty bio. The block device driver may want
   * to use the bio_split() function to deal with these bio's. By default
   * no merge_bvec_fn is defined for a queue, and only the fixed limits are
   * honored.
   */
  void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
  {
  	q->merge_bvec_fn = mbfn;
  }
86db1e297   Jens Axboe   block: continue l...
72
73
74
75
76
77
  EXPORT_SYMBOL(blk_queue_merge_bvec);
  
  void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
  {
  	q->softirq_done_fn = fn;
  }
86db1e297   Jens Axboe   block: continue l...
78
  EXPORT_SYMBOL(blk_queue_softirq_done);
242f9dcb8   Jens Axboe   block: unify requ...
79
80
81
82
83
84
85
86
87
88
89
  void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
  {
  	q->rq_timeout = timeout;
  }
  EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
  
  void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
  {
  	q->rq_timed_out_fn = fn;
  }
  EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
ef9e3facd   Kiyoshi Ueda   block: add lld bu...
90
91
92
93
94
  void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
  {
  	q->lld_busy_fn = fn;
  }
  EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
86db1e297   Jens Axboe   block: continue l...
95
  /**
e475bba2f   Martin K. Petersen   block: Introduce ...
96
   * blk_set_default_limits - reset limits to default values
f740f5ca0   Randy Dunlap   Fix kernel-doc pa...
97
   * @lim:  the queue_limits structure to reset
e475bba2f   Martin K. Petersen   block: Introduce ...
98
99
100
101
102
103
104
105
106
107
108
109
   *
   * Description:
   *   Returns a queue_limit struct to its default state.  Can be used by
   *   stacking drivers like DM that stage table swaps and reuse an
   *   existing device queue.
   */
  void blk_set_default_limits(struct queue_limits *lim)
  {
  	lim->max_phys_segments = MAX_PHYS_SEGMENTS;
  	lim->max_hw_segments = MAX_HW_SEGMENTS;
  	lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
  	lim->max_segment_size = MAX_SEGMENT_SIZE;
5dee2477d   Martin K. Petersen   block: Do not cla...
110
111
  	lim->max_sectors = BLK_DEF_MAX_SECTORS;
  	lim->max_hw_sectors = INT_MAX;
e475bba2f   Martin K. Petersen   block: Introduce ...
112
  	lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
3a02c8e81   Martin K. Petersen   block: Fix bounce...
113
  	lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
e475bba2f   Martin K. Petersen   block: Introduce ...
114
115
116
117
118
119
120
121
  	lim->alignment_offset = 0;
  	lim->io_opt = 0;
  	lim->misaligned = 0;
  	lim->no_cluster = 0;
  }
  EXPORT_SYMBOL(blk_set_default_limits);
  
  /**
86db1e297   Jens Axboe   block: continue l...
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
   * blk_queue_make_request - define an alternate make_request function for a device
   * @q:  the request queue for the device to be affected
   * @mfn: the alternate make_request function
   *
   * Description:
   *    The normal way for &struct bios to be passed to a device
   *    driver is for them to be collected into requests on a request
   *    queue, and then to allow the device driver to select requests
   *    off that queue when it is ready.  This works well for many block
   *    devices. However some block devices (typically virtual devices
   *    such as md or lvm) do not benefit from the processing on the
   *    request queue, and are served best by having the requests passed
   *    directly to them.  This can be achieved by providing a function
   *    to blk_queue_make_request().
   *
   * Caveat:
   *    The driver that does this *must* be able to deal appropriately
   *    with buffers in "highmemory". This can be accomplished by either calling
   *    __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
   *    blk_queue_bounce() to create a buffer in normal memory.
   **/
6728cb0e6   Jens Axboe   block: make core ...
143
  void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
86db1e297   Jens Axboe   block: continue l...
144
145
146
147
148
  {
  	/*
  	 * set defaults
  	 */
  	q->nr_requests = BLKDEV_MAX_RQ;
0e435ac26   Milan Broz   block: fix settin...
149

86db1e297   Jens Axboe   block: continue l...
150
  	q->make_request_fn = mfn;
86db1e297   Jens Axboe   block: continue l...
151
152
153
154
155
156
157
158
  	blk_queue_dma_alignment(q, 511);
  	blk_queue_congestion_threshold(q);
  	q->nr_batching = BLK_BATCH_REQ;
  
  	q->unplug_thresh = 4;		/* hmm */
  	q->unplug_delay = (3 * HZ) / 1000;	/* 3 milliseconds */
  	if (q->unplug_delay == 0)
  		q->unplug_delay = 1;
86db1e297   Jens Axboe   block: continue l...
159
160
  	q->unplug_timer.function = blk_unplug_timeout;
  	q->unplug_timer.data = (unsigned long)q;
e475bba2f   Martin K. Petersen   block: Introduce ...
161
  	blk_set_default_limits(&q->limits);
80ddf247c   Martin K. Petersen   block: Set max_se...
162
  	blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
e475bba2f   Martin K. Petersen   block: Introduce ...
163

86db1e297   Jens Axboe   block: continue l...
164
  	/*
a4e7d4640   Jens Axboe   block: always ass...
165
166
167
168
169
170
171
  	 * If the caller didn't supply a lock, fall back to our embedded
  	 * per-queue locks
  	 */
  	if (!q->queue_lock)
  		q->queue_lock = &q->__queue_lock;
  
  	/*
86db1e297   Jens Axboe   block: continue l...
172
173
174
175
  	 * by default assume old behaviour and bounce for any highmem page
  	 */
  	blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
  }
86db1e297   Jens Axboe   block: continue l...
176
177
178
179
  EXPORT_SYMBOL(blk_queue_make_request);
  
  /**
   * blk_queue_bounce_limit - set bounce buffer limit for queue
cd0aca2d5   Tejun Heo   block: fix queue ...
180
181
   * @q: the request queue for the device
   * @dma_mask: the maximum address the device can handle
86db1e297   Jens Axboe   block: continue l...
182
183
184
185
186
   *
   * Description:
   *    Different hardware can have different requirements as to what pages
   *    it can do I/O directly to. A low level driver can call
   *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
cd0aca2d5   Tejun Heo   block: fix queue ...
187
   *    buffers for doing I/O to pages residing above @dma_mask.
86db1e297   Jens Axboe   block: continue l...
188
   **/
cd0aca2d5   Tejun Heo   block: fix queue ...
189
  void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
86db1e297   Jens Axboe   block: continue l...
190
  {
cd0aca2d5   Tejun Heo   block: fix queue ...
191
  	unsigned long b_pfn = dma_mask >> PAGE_SHIFT;
86db1e297   Jens Axboe   block: continue l...
192
193
194
195
  	int dma = 0;
  
  	q->bounce_gfp = GFP_NOIO;
  #if BITS_PER_LONG == 64
cd0aca2d5   Tejun Heo   block: fix queue ...
196
197
198
199
200
201
  	/*
  	 * Assume anything <= 4GB can be handled by IOMMU.  Actually
  	 * some IOMMUs can handle everything, but I don't know of a
  	 * way to test this here.
  	 */
  	if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
86db1e297   Jens Axboe   block: continue l...
202
  		dma = 1;
025146e13   Martin K. Petersen   block: Move queue...
203
  	q->limits.bounce_pfn = max_low_pfn;
86db1e297   Jens Axboe   block: continue l...
204
  #else
6728cb0e6   Jens Axboe   block: make core ...
205
  	if (b_pfn < blk_max_low_pfn)
86db1e297   Jens Axboe   block: continue l...
206
  		dma = 1;
025146e13   Martin K. Petersen   block: Move queue...
207
  	q->limits.bounce_pfn = b_pfn;
86db1e297   Jens Axboe   block: continue l...
208
209
210
211
  #endif
  	if (dma) {
  		init_emergency_isa_pool();
  		q->bounce_gfp = GFP_NOIO | GFP_DMA;
025146e13   Martin K. Petersen   block: Move queue...
212
  		q->limits.bounce_pfn = b_pfn;
86db1e297   Jens Axboe   block: continue l...
213
214
  	}
  }
86db1e297   Jens Axboe   block: continue l...
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
  EXPORT_SYMBOL(blk_queue_bounce_limit);
  
  /**
   * blk_queue_max_sectors - set max sectors for a request for this queue
   * @q:  the request queue for the device
   * @max_sectors:  max sectors in the usual 512b unit
   *
   * Description:
   *    Enables a low level driver to set an upper limit on the size of
   *    received requests.
   **/
  void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
  {
  	if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
  		max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
24c03d47d   Harvey Harrison   block: remove rem...
230
231
232
  		printk(KERN_INFO "%s: set to minimum %d
  ",
  		       __func__, max_sectors);
86db1e297   Jens Axboe   block: continue l...
233
234
235
  	}
  
  	if (BLK_DEF_MAX_SECTORS > max_sectors)
025146e13   Martin K. Petersen   block: Move queue...
236
  		q->limits.max_hw_sectors = q->limits.max_sectors = max_sectors;
86db1e297   Jens Axboe   block: continue l...
237
  	else {
025146e13   Martin K. Petersen   block: Move queue...
238
239
  		q->limits.max_sectors = BLK_DEF_MAX_SECTORS;
  		q->limits.max_hw_sectors = max_sectors;
86db1e297   Jens Axboe   block: continue l...
240
241
  	}
  }
86db1e297   Jens Axboe   block: continue l...
242
  EXPORT_SYMBOL(blk_queue_max_sectors);
ae03bf639   Martin K. Petersen   block: Use access...
243
244
245
  void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors)
  {
  	if (BLK_DEF_MAX_SECTORS > max_sectors)
025146e13   Martin K. Petersen   block: Move queue...
246
  		q->limits.max_hw_sectors = BLK_DEF_MAX_SECTORS;
ae03bf639   Martin K. Petersen   block: Use access...
247
  	else
025146e13   Martin K. Petersen   block: Move queue...
248
  		q->limits.max_hw_sectors = max_sectors;
ae03bf639   Martin K. Petersen   block: Use access...
249
250
  }
  EXPORT_SYMBOL(blk_queue_max_hw_sectors);
86db1e297   Jens Axboe   block: continue l...
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
  /**
   * blk_queue_max_phys_segments - set max phys segments for a request for this queue
   * @q:  the request queue for the device
   * @max_segments:  max number of segments
   *
   * Description:
   *    Enables a low level driver to set an upper limit on the number of
   *    physical data segments in a request.  This would be the largest sized
   *    scatter list the driver could handle.
   **/
  void blk_queue_max_phys_segments(struct request_queue *q,
  				 unsigned short max_segments)
  {
  	if (!max_segments) {
  		max_segments = 1;
24c03d47d   Harvey Harrison   block: remove rem...
266
267
268
  		printk(KERN_INFO "%s: set to minimum %d
  ",
  		       __func__, max_segments);
86db1e297   Jens Axboe   block: continue l...
269
  	}
025146e13   Martin K. Petersen   block: Move queue...
270
  	q->limits.max_phys_segments = max_segments;
86db1e297   Jens Axboe   block: continue l...
271
  }
86db1e297   Jens Axboe   block: continue l...
272
273
274
275
276
277
278
279
280
281
  EXPORT_SYMBOL(blk_queue_max_phys_segments);
  
  /**
   * blk_queue_max_hw_segments - set max hw segments for a request for this queue
   * @q:  the request queue for the device
   * @max_segments:  max number of segments
   *
   * Description:
   *    Enables a low level driver to set an upper limit on the number of
   *    hw data segments in a request.  This would be the largest number of
710027a48   Randy Dunlap   Add some block/ s...
282
   *    address/length pairs the host adapter can actually give at once
86db1e297   Jens Axboe   block: continue l...
283
284
285
286
287
288
289
   *    to the device.
   **/
  void blk_queue_max_hw_segments(struct request_queue *q,
  			       unsigned short max_segments)
  {
  	if (!max_segments) {
  		max_segments = 1;
24c03d47d   Harvey Harrison   block: remove rem...
290
291
292
  		printk(KERN_INFO "%s: set to minimum %d
  ",
  		       __func__, max_segments);
86db1e297   Jens Axboe   block: continue l...
293
  	}
025146e13   Martin K. Petersen   block: Move queue...
294
  	q->limits.max_hw_segments = max_segments;
86db1e297   Jens Axboe   block: continue l...
295
  }
86db1e297   Jens Axboe   block: continue l...
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
  EXPORT_SYMBOL(blk_queue_max_hw_segments);
  
  /**
   * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
   * @q:  the request queue for the device
   * @max_size:  max size of segment in bytes
   *
   * Description:
   *    Enables a low level driver to set an upper limit on the size of a
   *    coalesced segment
   **/
  void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
  {
  	if (max_size < PAGE_CACHE_SIZE) {
  		max_size = PAGE_CACHE_SIZE;
24c03d47d   Harvey Harrison   block: remove rem...
311
312
313
  		printk(KERN_INFO "%s: set to minimum %d
  ",
  		       __func__, max_size);
86db1e297   Jens Axboe   block: continue l...
314
  	}
025146e13   Martin K. Petersen   block: Move queue...
315
  	q->limits.max_segment_size = max_size;
86db1e297   Jens Axboe   block: continue l...
316
  }
86db1e297   Jens Axboe   block: continue l...
317
318
319
  EXPORT_SYMBOL(blk_queue_max_segment_size);
  
  /**
e1defc4ff   Martin K. Petersen   block: Do away wi...
320
   * blk_queue_logical_block_size - set logical block size for the queue
86db1e297   Jens Axboe   block: continue l...
321
   * @q:  the request queue for the device
e1defc4ff   Martin K. Petersen   block: Do away wi...
322
   * @size:  the logical block size, in bytes
86db1e297   Jens Axboe   block: continue l...
323
324
   *
   * Description:
e1defc4ff   Martin K. Petersen   block: Do away wi...
325
326
327
   *   This should be set to the lowest possible block size that the
   *   storage device can address.  The default of 512 covers most
   *   hardware.
86db1e297   Jens Axboe   block: continue l...
328
   **/
e1defc4ff   Martin K. Petersen   block: Do away wi...
329
  void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
86db1e297   Jens Axboe   block: continue l...
330
  {
025146e13   Martin K. Petersen   block: Move queue...
331
  	q->limits.logical_block_size = size;
c72758f33   Martin K. Petersen   block: Export I/O...
332
333
334
335
336
337
  
  	if (q->limits.physical_block_size < size)
  		q->limits.physical_block_size = size;
  
  	if (q->limits.io_min < q->limits.physical_block_size)
  		q->limits.io_min = q->limits.physical_block_size;
86db1e297   Jens Axboe   block: continue l...
338
  }
e1defc4ff   Martin K. Petersen   block: Do away wi...
339
  EXPORT_SYMBOL(blk_queue_logical_block_size);
86db1e297   Jens Axboe   block: continue l...
340

c72758f33   Martin K. Petersen   block: Export I/O...
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
  /**
   * blk_queue_physical_block_size - set physical block size for the queue
   * @q:  the request queue for the device
   * @size:  the physical block size, in bytes
   *
   * Description:
   *   This should be set to the lowest possible sector size that the
   *   hardware can operate on without reverting to read-modify-write
   *   operations.
   */
  void blk_queue_physical_block_size(struct request_queue *q, unsigned short size)
  {
  	q->limits.physical_block_size = size;
  
  	if (q->limits.physical_block_size < q->limits.logical_block_size)
  		q->limits.physical_block_size = q->limits.logical_block_size;
  
  	if (q->limits.io_min < q->limits.physical_block_size)
  		q->limits.io_min = q->limits.physical_block_size;
  }
  EXPORT_SYMBOL(blk_queue_physical_block_size);
  
  /**
   * blk_queue_alignment_offset - set physical block alignment offset
   * @q:	the request queue for the device
8ebf97560   Randy Dunlap   block: fix kernel...
366
   * @offset: alignment offset in bytes
c72758f33   Martin K. Petersen   block: Export I/O...
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
   *
   * Description:
   *   Some devices are naturally misaligned to compensate for things like
   *   the legacy DOS partition table 63-sector offset.  Low-level drivers
   *   should call this function for devices whose first sector is not
   *   naturally aligned.
   */
  void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
  {
  	q->limits.alignment_offset =
  		offset & (q->limits.physical_block_size - 1);
  	q->limits.misaligned = 0;
  }
  EXPORT_SYMBOL(blk_queue_alignment_offset);
  
  /**
7c958e326   Martin K. Petersen   block: Add a wrap...
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
   * blk_limits_io_min - set minimum request size for a device
   * @limits: the queue limits
   * @min:  smallest I/O size in bytes
   *
   * Description:
   *   Some devices have an internal block size bigger than the reported
   *   hardware sector size.  This function can be used to signal the
   *   smallest I/O the device can perform without incurring a performance
   *   penalty.
   */
  void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
  {
  	limits->io_min = min;
  
  	if (limits->io_min < limits->logical_block_size)
  		limits->io_min = limits->logical_block_size;
  
  	if (limits->io_min < limits->physical_block_size)
  		limits->io_min = limits->physical_block_size;
  }
  EXPORT_SYMBOL(blk_limits_io_min);
  
  /**
c72758f33   Martin K. Petersen   block: Export I/O...
406
407
   * blk_queue_io_min - set minimum request size for the queue
   * @q:	the request queue for the device
8ebf97560   Randy Dunlap   block: fix kernel...
408
   * @min:  smallest I/O size in bytes
c72758f33   Martin K. Petersen   block: Export I/O...
409
410
   *
   * Description:
7e5f5fb09   Martin K. Petersen   block: Update top...
411
412
413
414
415
416
417
   *   Storage devices may report a granularity or preferred minimum I/O
   *   size which is the smallest request the device can perform without
   *   incurring a performance penalty.  For disk drives this is often the
   *   physical block size.  For RAID arrays it is often the stripe chunk
   *   size.  A properly aligned multiple of minimum_io_size is the
   *   preferred request size for workloads where a high number of I/O
   *   operations is desired.
c72758f33   Martin K. Petersen   block: Export I/O...
418
419
420
   */
  void blk_queue_io_min(struct request_queue *q, unsigned int min)
  {
7c958e326   Martin K. Petersen   block: Add a wrap...
421
  	blk_limits_io_min(&q->limits, min);
c72758f33   Martin K. Petersen   block: Export I/O...
422
423
424
425
  }
  EXPORT_SYMBOL(blk_queue_io_min);
  
  /**
3c5820c74   Martin K. Petersen   block: Optimal I/...
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
   * blk_limits_io_opt - set optimal request size for a device
   * @limits: the queue limits
   * @opt:  smallest I/O size in bytes
   *
   * Description:
   *   Storage devices may report an optimal I/O size, which is the
   *   device's preferred unit for sustained I/O.  This is rarely reported
   *   for disk drives.  For RAID arrays it is usually the stripe width or
   *   the internal track size.  A properly aligned multiple of
   *   optimal_io_size is the preferred request size for workloads where
   *   sustained throughput is desired.
   */
  void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
  {
  	limits->io_opt = opt;
  }
  EXPORT_SYMBOL(blk_limits_io_opt);
  
  /**
c72758f33   Martin K. Petersen   block: Export I/O...
445
446
   * blk_queue_io_opt - set optimal request size for the queue
   * @q:	the request queue for the device
8ebf97560   Randy Dunlap   block: fix kernel...
447
   * @opt:  optimal request size in bytes
c72758f33   Martin K. Petersen   block: Export I/O...
448
449
   *
   * Description:
7e5f5fb09   Martin K. Petersen   block: Update top...
450
451
452
453
454
455
   *   Storage devices may report an optimal I/O size, which is the
   *   device's preferred unit for sustained I/O.  This is rarely reported
   *   for disk drives.  For RAID arrays it is usually the stripe width or
   *   the internal track size.  A properly aligned multiple of
   *   optimal_io_size is the preferred request size for workloads where
   *   sustained throughput is desired.
c72758f33   Martin K. Petersen   block: Export I/O...
456
457
458
   */
  void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
  {
3c5820c74   Martin K. Petersen   block: Optimal I/...
459
  	blk_limits_io_opt(&q->limits, opt);
c72758f33   Martin K. Petersen   block: Export I/O...
460
461
  }
  EXPORT_SYMBOL(blk_queue_io_opt);
86db1e297   Jens Axboe   block: continue l...
462
463
464
465
466
467
468
469
470
471
472
473
  /*
   * Returns the minimum that is _not_ zero, unless both are zero.
   */
  #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
  
  /**
   * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
   * @t:	the stacking driver (top)
   * @b:  the underlying device (bottom)
   **/
  void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
  {
fef246672   Martin K. Petersen   block: Make blk_q...
474
  	blk_stack_limits(&t->limits, &b->limits, 0);
025146e13   Martin K. Petersen   block: Move queue...
475

e7e72bf64   Neil Brown   Remove blkdev war...
476
477
478
479
480
  	if (!t->queue_lock)
  		WARN_ON_ONCE(1);
  	else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
  		unsigned long flags;
  		spin_lock_irqsave(t->queue_lock, flags);
75ad23bc0   Nick Piggin   block: make queue...
481
  		queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
e7e72bf64   Neil Brown   Remove blkdev war...
482
483
  		spin_unlock_irqrestore(t->queue_lock, flags);
  	}
86db1e297   Jens Axboe   block: continue l...
484
  }
86db1e297   Jens Axboe   block: continue l...
485
486
487
  EXPORT_SYMBOL(blk_queue_stack_limits);
  
  /**
c72758f33   Martin K. Petersen   block: Export I/O...
488
489
   * blk_stack_limits - adjust queue_limits for stacked devices
   * @t:	the stacking driver limits (top)
77634f33d   Martin K. Petersen   block: Add missin...
490
   * @b:  the underlying queue limits (bottom)
c72758f33   Martin K. Petersen   block: Export I/O...
491
492
493
494
495
496
497
498
499
500
501
502
   * @offset:  offset to beginning of data within component device
   *
   * Description:
   *    Merges two queue_limit structs.  Returns 0 if alignment didn't
   *    change.  Returns -1 if adding the bottom device caused
   *    misalignment.
   */
  int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
  		     sector_t offset)
  {
  	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
  	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
77634f33d   Martin K. Petersen   block: Add missin...
503
  	t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
c72758f33   Martin K. Petersen   block: Export I/O...
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
  
  	t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
  					    b->seg_boundary_mask);
  
  	t->max_phys_segments = min_not_zero(t->max_phys_segments,
  					    b->max_phys_segments);
  
  	t->max_hw_segments = min_not_zero(t->max_hw_segments,
  					  b->max_hw_segments);
  
  	t->max_segment_size = min_not_zero(t->max_segment_size,
  					   b->max_segment_size);
  
  	t->logical_block_size = max(t->logical_block_size,
  				    b->logical_block_size);
  
  	t->physical_block_size = max(t->physical_block_size,
  				     b->physical_block_size);
  
  	t->io_min = max(t->io_min, b->io_min);
  	t->no_cluster |= b->no_cluster;
  
  	/* Bottom device offset aligned? */
  	if (offset &&
  	    (offset & (b->physical_block_size - 1)) != b->alignment_offset) {
  		t->misaligned = 1;
  		return -1;
  	}
  
  	/* If top has no alignment offset, inherit from bottom */
  	if (!t->alignment_offset)
  		t->alignment_offset =
  			b->alignment_offset & (b->physical_block_size - 1);
  
  	/* Top device aligned on logical block boundary? */
  	if (t->alignment_offset & (t->logical_block_size - 1)) {
  		t->misaligned = 1;
  		return -1;
  	}
70dd5bf3b   Martin K. Petersen   block: Stack opti...
543
544
545
546
547
548
549
550
551
  	/* Find lcm() of optimal I/O size */
  	if (t->io_opt && b->io_opt)
  		t->io_opt = (t->io_opt * b->io_opt) / gcd(t->io_opt, b->io_opt);
  	else if (b->io_opt)
  		t->io_opt = b->io_opt;
  
  	/* Verify that optimal I/O size is a multiple of io_min */
  	if (t->io_min && t->io_opt % t->io_min)
  		return -1;
c72758f33   Martin K. Petersen   block: Export I/O...
552
553
  	return 0;
  }
5d85d3247   Mike Snitzer   block: export blk...
554
  EXPORT_SYMBOL(blk_stack_limits);
c72758f33   Martin K. Petersen   block: Export I/O...
555
556
557
  
  /**
   * disk_stack_limits - adjust queue limits for stacked drivers
77634f33d   Martin K. Petersen   block: Add missin...
558
   * @disk:  MD/DM gendisk (top)
c72758f33   Martin K. Petersen   block: Export I/O...
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
   * @bdev:  the underlying block device (bottom)
   * @offset:  offset to beginning of data within component device
   *
   * Description:
   *    Merges the limits for two queues.  Returns 0 if alignment
   *    didn't change.  Returns -1 if adding the bottom device caused
   *    misalignment.
   */
  void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
  		       sector_t offset)
  {
  	struct request_queue *t = disk->queue;
  	struct request_queue *b = bdev_get_queue(bdev);
  
  	offset += get_start_sect(bdev) << 9;
  
  	if (blk_stack_limits(&t->limits, &b->limits, offset) < 0) {
  		char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
  
  		disk_name(disk, 0, top);
  		bdevname(bdev, bottom);
  
  		printk(KERN_NOTICE "%s: Warning: Device %s is misaligned
  ",
  		       top, bottom);
  	}
  
  	if (!t->queue_lock)
  		WARN_ON_ONCE(1);
  	else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
  		unsigned long flags;
  
  		spin_lock_irqsave(t->queue_lock, flags);
  		if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
  			queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
  		spin_unlock_irqrestore(t->queue_lock, flags);
  	}
  }
  EXPORT_SYMBOL(disk_stack_limits);
  
  /**
e3790c7d4   Tejun Heo   block: separate o...
600
601
602
603
   * blk_queue_dma_pad - set pad mask
   * @q:     the request queue for the device
   * @mask:  pad mask
   *
27f8221af   FUJITA Tomonori   block: add blk_qu...
604
   * Set dma pad mask.
e3790c7d4   Tejun Heo   block: separate o...
605
   *
27f8221af   FUJITA Tomonori   block: add blk_qu...
606
607
   * Appending pad buffer to a request modifies the last entry of a
   * scatter list such that it includes the pad buffer.
e3790c7d4   Tejun Heo   block: separate o...
608
609
610
611
612
613
614
615
   **/
  void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
  {
  	q->dma_pad_mask = mask;
  }
  EXPORT_SYMBOL(blk_queue_dma_pad);
  
  /**
27f8221af   FUJITA Tomonori   block: add blk_qu...
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
   * blk_queue_update_dma_pad - update pad mask
   * @q:     the request queue for the device
   * @mask:  pad mask
   *
   * Update dma pad mask.
   *
   * Appending pad buffer to a request modifies the last entry of a
   * scatter list such that it includes the pad buffer.
   **/
  void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
  {
  	if (mask > q->dma_pad_mask)
  		q->dma_pad_mask = mask;
  }
  EXPORT_SYMBOL(blk_queue_update_dma_pad);
  
  /**
86db1e297   Jens Axboe   block: continue l...
633
   * blk_queue_dma_drain - Set up a drain buffer for excess dma.
86db1e297   Jens Axboe   block: continue l...
634
   * @q:  the request queue for the device
2fb98e841   Tejun Heo   block: implement ...
635
   * @dma_drain_needed: fn which returns non-zero if drain is necessary
86db1e297   Jens Axboe   block: continue l...
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
   * @buf:	physically contiguous buffer
   * @size:	size of the buffer in bytes
   *
   * Some devices have excess DMA problems and can't simply discard (or
   * zero fill) the unwanted piece of the transfer.  They have to have a
   * real area of memory to transfer it into.  The use case for this is
   * ATAPI devices in DMA mode.  If the packet command causes a transfer
   * bigger than the transfer size some HBAs will lock up if there
   * aren't DMA elements to contain the excess transfer.  What this API
   * does is adjust the queue so that the buf is always appended
   * silently to the scatterlist.
   *
   * Note: This routine adjusts max_hw_segments to make room for
   * appending the drain buffer.  If you call
   * blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after
   * calling this routine, you must set the limit to one fewer than your
   * device can support otherwise there won't be room for the drain
   * buffer.
   */
448da4d26   Harvey Harrison   block: remove ext...
655
  int blk_queue_dma_drain(struct request_queue *q,
2fb98e841   Tejun Heo   block: implement ...
656
657
  			       dma_drain_needed_fn *dma_drain_needed,
  			       void *buf, unsigned int size)
86db1e297   Jens Axboe   block: continue l...
658
  {
ae03bf639   Martin K. Petersen   block: Use access...
659
  	if (queue_max_hw_segments(q) < 2 || queue_max_phys_segments(q) < 2)
86db1e297   Jens Axboe   block: continue l...
660
661
  		return -EINVAL;
  	/* make room for appending the drain */
ae03bf639   Martin K. Petersen   block: Use access...
662
663
  	blk_queue_max_hw_segments(q, queue_max_hw_segments(q) - 1);
  	blk_queue_max_phys_segments(q, queue_max_phys_segments(q) - 1);
2fb98e841   Tejun Heo   block: implement ...
664
  	q->dma_drain_needed = dma_drain_needed;
86db1e297   Jens Axboe   block: continue l...
665
666
667
668
669
  	q->dma_drain_buffer = buf;
  	q->dma_drain_size = size;
  
  	return 0;
  }
86db1e297   Jens Axboe   block: continue l...
670
671
672
673
674
675
676
677
678
679
680
  EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
  
  /**
   * blk_queue_segment_boundary - set boundary rules for segment merging
   * @q:  the request queue for the device
   * @mask:  the memory boundary mask
   **/
  void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
  {
  	if (mask < PAGE_CACHE_SIZE - 1) {
  		mask = PAGE_CACHE_SIZE - 1;
24c03d47d   Harvey Harrison   block: remove rem...
681
682
683
  		printk(KERN_INFO "%s: set to minimum %lx
  ",
  		       __func__, mask);
86db1e297   Jens Axboe   block: continue l...
684
  	}
025146e13   Martin K. Petersen   block: Move queue...
685
  	q->limits.seg_boundary_mask = mask;
86db1e297   Jens Axboe   block: continue l...
686
  }
86db1e297   Jens Axboe   block: continue l...
687
688
689
690
691
692
693
694
  EXPORT_SYMBOL(blk_queue_segment_boundary);
  
  /**
   * blk_queue_dma_alignment - set dma length and memory alignment
   * @q:     the request queue for the device
   * @mask:  alignment mask
   *
   * description:
710027a48   Randy Dunlap   Add some block/ s...
695
   *    set required memory and length alignment for direct dma transactions.
8feb4d20b   Alan Cox   pata_artop: typo
696
   *    this is used when building direct io requests for the queue.
86db1e297   Jens Axboe   block: continue l...
697
698
699
700
701
702
   *
   **/
  void blk_queue_dma_alignment(struct request_queue *q, int mask)
  {
  	q->dma_alignment = mask;
  }
86db1e297   Jens Axboe   block: continue l...
703
704
705
706
707
708
709
710
  EXPORT_SYMBOL(blk_queue_dma_alignment);
  
  /**
   * blk_queue_update_dma_alignment - update dma length and memory alignment
   * @q:     the request queue for the device
   * @mask:  alignment mask
   *
   * description:
710027a48   Randy Dunlap   Add some block/ s...
711
   *    update required memory and length alignment for direct dma transactions.
86db1e297   Jens Axboe   block: continue l...
712
713
714
715
716
717
718
719
720
721
722
723
724
725
   *    If the requested alignment is larger than the current alignment, then
   *    the current queue alignment is updated to the new value, otherwise it
   *    is left alone.  The design of this is to allow multiple objects
   *    (driver, device, transport etc) to set their respective
   *    alignments without having them interfere.
   *
   **/
  void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
  {
  	BUG_ON(mask > PAGE_SIZE);
  
  	if (mask > q->dma_alignment)
  		q->dma_alignment = mask;
  }
86db1e297   Jens Axboe   block: continue l...
726
  EXPORT_SYMBOL(blk_queue_update_dma_alignment);
aeb3d3a81   Harvey Harrison   block: kmalloc ar...
727
  static int __init blk_settings_init(void)
86db1e297   Jens Axboe   block: continue l...
728
729
730
731
732
733
  {
  	blk_max_low_pfn = max_low_pfn - 1;
  	blk_max_pfn = max_pfn - 1;
  	return 0;
  }
  subsys_initcall(blk_settings_init);