Blame view
block/blk-settings.c
25.1 KB
86db1e297
|
1 2 3 4 5 6 7 8 9 |
/* * Functions related to setting various queue properties from drivers */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */ |
70dd5bf3b
|
10 |
#include <linux/gcd.h> |
2cda2728a
|
11 |
#include <linux/lcm.h> |
ad5ebd2fa
|
12 |
#include <linux/jiffies.h> |
5a0e3ad6a
|
13 |
#include <linux/gfp.h> |
86db1e297
|
14 15 |
#include "blk.h" |
6728cb0e6
|
16 |
unsigned long blk_max_low_pfn; |
86db1e297
|
17 |
EXPORT_SYMBOL(blk_max_low_pfn); |
6728cb0e6
|
18 19 |
unsigned long blk_max_pfn; |
86db1e297
|
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 |
/** * blk_queue_prep_rq - set a prepare_request function for queue * @q: queue * @pfn: prepare_request function * * It's possible for a queue to register a prepare_request callback which * is invoked before the request is handed to the request_fn. The goal of * the function is to prepare a request for I/O, it can be used to build a * cdb from the request data for instance. * */ void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn) { q->prep_rq_fn = pfn; } |
86db1e297
|
36 37 38 |
EXPORT_SYMBOL(blk_queue_prep_rq); /** |
28018c242
|
39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
* blk_queue_unprep_rq - set an unprepare_request function for queue * @q: queue * @ufn: unprepare_request function * * It's possible for a queue to register an unprepare_request callback * which is invoked before the request is finally completed. The goal * of the function is to deallocate any data that was allocated in the * prepare_request callback. * */ void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn) { q->unprep_rq_fn = ufn; } EXPORT_SYMBOL(blk_queue_unprep_rq); /** |
86db1e297
|
56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 |
* blk_queue_merge_bvec - set a merge_bvec function for queue * @q: queue * @mbfn: merge_bvec_fn * * Usually queues have static limitations on the max sectors or segments that * we can put in a request. Stacking drivers may have some settings that * are dynamic, and thus we have to query the queue whether it is ok to * add a new bio_vec to a bio at a given offset or not. If the block device * has such limitations, it needs to register a merge_bvec_fn to control * the size of bio's sent to it. Note that a block device *must* allow a * single page to be added to an empty bio. The block device driver may want * to use the bio_split() function to deal with these bio's. By default * no merge_bvec_fn is defined for a queue, and only the fixed limits are * honored. */ void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn) { q->merge_bvec_fn = mbfn; } |
86db1e297
|
75 76 77 78 79 80 |
EXPORT_SYMBOL(blk_queue_merge_bvec); void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) { q->softirq_done_fn = fn; } |
86db1e297
|
81 |
EXPORT_SYMBOL(blk_queue_softirq_done); |
242f9dcb8
|
82 83 84 85 86 87 88 89 90 91 92 |
void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout) { q->rq_timeout = timeout; } EXPORT_SYMBOL_GPL(blk_queue_rq_timeout); void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn) { q->rq_timed_out_fn = fn; } EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out); |
ef9e3facd
|
93 94 95 96 97 |
void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn) { q->lld_busy_fn = fn; } EXPORT_SYMBOL_GPL(blk_queue_lld_busy); |
86db1e297
|
98 |
/** |
e475bba2f
|
99 |
* blk_set_default_limits - reset limits to default values |
f740f5ca0
|
100 |
* @lim: the queue_limits structure to reset |
e475bba2f
|
101 102 103 104 105 106 107 108 |
* * Description: * Returns a queue_limit struct to its default state. Can be used by * stacking drivers like DM that stage table swaps and reuse an * existing device queue. */ void blk_set_default_limits(struct queue_limits *lim) { |
8a78362c4
|
109 |
lim->max_segments = BLK_MAX_SEGMENTS; |
13f05c8d8
|
110 |
lim->max_integrity_segments = 0; |
e475bba2f
|
111 |
lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; |
eb28d31bc
|
112 |
lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; |
5dee2477d
|
113 114 |
lim->max_sectors = BLK_DEF_MAX_SECTORS; lim->max_hw_sectors = INT_MAX; |
86b372814
|
115 116 117 118 |
lim->max_discard_sectors = 0; lim->discard_granularity = 0; lim->discard_alignment = 0; lim->discard_misaligned = 0; |
98262f276
|
119 |
lim->discard_zeroes_data = -1; |
e475bba2f
|
120 |
lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; |
3a02c8e81
|
121 |
lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT); |
e475bba2f
|
122 123 124 |
lim->alignment_offset = 0; lim->io_opt = 0; lim->misaligned = 0; |
e692cb668
|
125 |
lim->cluster = 1; |
e475bba2f
|
126 127 128 129 |
} EXPORT_SYMBOL(blk_set_default_limits); /** |
86db1e297
|
130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 |
* blk_queue_make_request - define an alternate make_request function for a device * @q: the request queue for the device to be affected * @mfn: the alternate make_request function * * Description: * The normal way for &struct bios to be passed to a device * driver is for them to be collected into requests on a request * queue, and then to allow the device driver to select requests * off that queue when it is ready. This works well for many block * devices. However some block devices (typically virtual devices * such as md or lvm) do not benefit from the processing on the * request queue, and are served best by having the requests passed * directly to them. This can be achieved by providing a function * to blk_queue_make_request(). * * Caveat: * The driver that does this *must* be able to deal appropriately * with buffers in "highmemory". This can be accomplished by either calling * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling * blk_queue_bounce() to create a buffer in normal memory. **/ |
6728cb0e6
|
151 |
void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) |
86db1e297
|
152 153 154 155 156 |
{ /* * set defaults */ q->nr_requests = BLKDEV_MAX_RQ; |
0e435ac26
|
157 |
|
86db1e297
|
158 |
q->make_request_fn = mfn; |
86db1e297
|
159 160 161 |
blk_queue_dma_alignment(q, 511); blk_queue_congestion_threshold(q); q->nr_batching = BLK_BATCH_REQ; |
e475bba2f
|
162 |
blk_set_default_limits(&q->limits); |
086fa5ff0
|
163 |
blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS); |
e475bba2f
|
164 |
|
86db1e297
|
165 166 167 168 169 |
/* * by default assume old behaviour and bounce for any highmem page */ blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); } |
86db1e297
|
170 171 172 173 |
EXPORT_SYMBOL(blk_queue_make_request); /** * blk_queue_bounce_limit - set bounce buffer limit for queue |
cd0aca2d5
|
174 175 |
* @q: the request queue for the device * @dma_mask: the maximum address the device can handle |
86db1e297
|
176 177 178 179 180 |
* * Description: * Different hardware can have different requirements as to what pages * it can do I/O directly to. A low level driver can call * blk_queue_bounce_limit to have lower memory pages allocated as bounce |
cd0aca2d5
|
181 |
* buffers for doing I/O to pages residing above @dma_mask. |
86db1e297
|
182 |
**/ |
cd0aca2d5
|
183 |
void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask) |
86db1e297
|
184 |
{ |
cd0aca2d5
|
185 |
unsigned long b_pfn = dma_mask >> PAGE_SHIFT; |
86db1e297
|
186 187 188 189 |
int dma = 0; q->bounce_gfp = GFP_NOIO; #if BITS_PER_LONG == 64 |
cd0aca2d5
|
190 191 192 193 194 195 |
/* * Assume anything <= 4GB can be handled by IOMMU. Actually * some IOMMUs can handle everything, but I don't know of a * way to test this here. */ if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) |
86db1e297
|
196 |
dma = 1; |
efb012b36
|
197 |
q->limits.bounce_pfn = max(max_low_pfn, b_pfn); |
86db1e297
|
198 |
#else |
6728cb0e6
|
199 |
if (b_pfn < blk_max_low_pfn) |
86db1e297
|
200 |
dma = 1; |
c49825fac
|
201 |
q->limits.bounce_pfn = b_pfn; |
260a67a9e
|
202 |
#endif |
86db1e297
|
203 204 205 |
if (dma) { init_emergency_isa_pool(); q->bounce_gfp = GFP_NOIO | GFP_DMA; |
260a67a9e
|
206 |
q->limits.bounce_pfn = b_pfn; |
86db1e297
|
207 208 |
} } |
86db1e297
|
209 210 211 |
EXPORT_SYMBOL(blk_queue_bounce_limit); /** |
72d4cd9f3
|
212 213 |
* blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request * @limits: the queue limits |
2800aac11
|
214 |
* @max_hw_sectors: max hardware sectors in the usual 512b unit |
86db1e297
|
215 216 |
* * Description: |
2800aac11
|
217 218 219 220 221 222 223 224 225 |
* Enables a low level driver to set a hard upper limit, * max_hw_sectors, on the size of requests. max_hw_sectors is set by * the device driver based upon the combined capabilities of I/O * controller and storage device. * * max_sectors is a soft limit imposed by the block layer for * filesystem type requests. This value can be overridden on a * per-device basis in /sys/block/<device>/queue/max_sectors_kb. * The soft limit can not exceed max_hw_sectors. |
86db1e297
|
226 |
**/ |
72d4cd9f3
|
227 |
void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors) |
86db1e297
|
228 |
{ |
2800aac11
|
229 230 |
if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) { max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9); |
24c03d47d
|
231 232 |
printk(KERN_INFO "%s: set to minimum %d ", |
2800aac11
|
233 |
__func__, max_hw_sectors); |
86db1e297
|
234 |
} |
72d4cd9f3
|
235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 |
limits->max_hw_sectors = max_hw_sectors; limits->max_sectors = min_t(unsigned int, max_hw_sectors, BLK_DEF_MAX_SECTORS); } EXPORT_SYMBOL(blk_limits_max_hw_sectors); /** * blk_queue_max_hw_sectors - set max sectors for a request for this queue * @q: the request queue for the device * @max_hw_sectors: max hardware sectors in the usual 512b unit * * Description: * See description for blk_limits_max_hw_sectors(). **/ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) { blk_limits_max_hw_sectors(&q->limits, max_hw_sectors); |
86db1e297
|
252 |
} |
086fa5ff0
|
253 |
EXPORT_SYMBOL(blk_queue_max_hw_sectors); |
86db1e297
|
254 255 |
/** |
67efc9258
|
256 257 |
* blk_queue_max_discard_sectors - set max sectors for a single discard * @q: the request queue for the device |
c7ebf0657
|
258 |
* @max_discard_sectors: maximum number of sectors to discard |
67efc9258
|
259 260 261 262 263 264 265 266 267 |
**/ void blk_queue_max_discard_sectors(struct request_queue *q, unsigned int max_discard_sectors) { q->limits.max_discard_sectors = max_discard_sectors; } EXPORT_SYMBOL(blk_queue_max_discard_sectors); /** |
8a78362c4
|
268 |
* blk_queue_max_segments - set max hw segments for a request for this queue |
86db1e297
|
269 270 271 272 273 |
* @q: the request queue for the device * @max_segments: max number of segments * * Description: * Enables a low level driver to set an upper limit on the number of |
8a78362c4
|
274 |
* hw data segments in a request. |
86db1e297
|
275 |
**/ |
8a78362c4
|
276 |
void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments) |
86db1e297
|
277 278 279 |
{ if (!max_segments) { max_segments = 1; |
24c03d47d
|
280 281 282 |
printk(KERN_INFO "%s: set to minimum %d ", __func__, max_segments); |
86db1e297
|
283 |
} |
8a78362c4
|
284 |
q->limits.max_segments = max_segments; |
86db1e297
|
285 |
} |
8a78362c4
|
286 |
EXPORT_SYMBOL(blk_queue_max_segments); |
86db1e297
|
287 288 289 290 291 292 293 294 295 296 297 298 299 300 |
/** * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg * @q: the request queue for the device * @max_size: max size of segment in bytes * * Description: * Enables a low level driver to set an upper limit on the size of a * coalesced segment **/ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) { if (max_size < PAGE_CACHE_SIZE) { max_size = PAGE_CACHE_SIZE; |
24c03d47d
|
301 302 303 |
printk(KERN_INFO "%s: set to minimum %d ", __func__, max_size); |
86db1e297
|
304 |
} |
025146e13
|
305 |
q->limits.max_segment_size = max_size; |
86db1e297
|
306 |
} |
86db1e297
|
307 308 309 |
EXPORT_SYMBOL(blk_queue_max_segment_size); /** |
e1defc4ff
|
310 |
* blk_queue_logical_block_size - set logical block size for the queue |
86db1e297
|
311 |
* @q: the request queue for the device |
e1defc4ff
|
312 |
* @size: the logical block size, in bytes |
86db1e297
|
313 314 |
* * Description: |
e1defc4ff
|
315 316 317 |
* This should be set to the lowest possible block size that the * storage device can address. The default of 512 covers most * hardware. |
86db1e297
|
318 |
**/ |
e1defc4ff
|
319 |
void blk_queue_logical_block_size(struct request_queue *q, unsigned short size) |
86db1e297
|
320 |
{ |
025146e13
|
321 |
q->limits.logical_block_size = size; |
c72758f33
|
322 323 324 325 326 327 |
if (q->limits.physical_block_size < size) q->limits.physical_block_size = size; if (q->limits.io_min < q->limits.physical_block_size) q->limits.io_min = q->limits.physical_block_size; |
86db1e297
|
328 |
} |
e1defc4ff
|
329 |
EXPORT_SYMBOL(blk_queue_logical_block_size); |
86db1e297
|
330 |
|
c72758f33
|
331 332 333 334 335 336 337 338 339 340 |
/** * blk_queue_physical_block_size - set physical block size for the queue * @q: the request queue for the device * @size: the physical block size, in bytes * * Description: * This should be set to the lowest possible sector size that the * hardware can operate on without reverting to read-modify-write * operations. */ |
892b6f90d
|
341 |
void blk_queue_physical_block_size(struct request_queue *q, unsigned int size) |
c72758f33
|
342 343 344 345 346 347 348 349 350 351 352 353 354 355 |
{ q->limits.physical_block_size = size; if (q->limits.physical_block_size < q->limits.logical_block_size) q->limits.physical_block_size = q->limits.logical_block_size; if (q->limits.io_min < q->limits.physical_block_size) q->limits.io_min = q->limits.physical_block_size; } EXPORT_SYMBOL(blk_queue_physical_block_size); /** * blk_queue_alignment_offset - set physical block alignment offset * @q: the request queue for the device |
8ebf97560
|
356 |
* @offset: alignment offset in bytes |
c72758f33
|
357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 |
* * Description: * Some devices are naturally misaligned to compensate for things like * the legacy DOS partition table 63-sector offset. Low-level drivers * should call this function for devices whose first sector is not * naturally aligned. */ void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset) { q->limits.alignment_offset = offset & (q->limits.physical_block_size - 1); q->limits.misaligned = 0; } EXPORT_SYMBOL(blk_queue_alignment_offset); /** |
7c958e326
|
373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 |
* blk_limits_io_min - set minimum request size for a device * @limits: the queue limits * @min: smallest I/O size in bytes * * Description: * Some devices have an internal block size bigger than the reported * hardware sector size. This function can be used to signal the * smallest I/O the device can perform without incurring a performance * penalty. */ void blk_limits_io_min(struct queue_limits *limits, unsigned int min) { limits->io_min = min; if (limits->io_min < limits->logical_block_size) limits->io_min = limits->logical_block_size; if (limits->io_min < limits->physical_block_size) limits->io_min = limits->physical_block_size; } EXPORT_SYMBOL(blk_limits_io_min); /** |
c72758f33
|
396 397 |
* blk_queue_io_min - set minimum request size for the queue * @q: the request queue for the device |
8ebf97560
|
398 |
* @min: smallest I/O size in bytes |
c72758f33
|
399 400 |
* * Description: |
7e5f5fb09
|
401 402 403 404 405 406 407 |
* Storage devices may report a granularity or preferred minimum I/O * size which is the smallest request the device can perform without * incurring a performance penalty. For disk drives this is often the * physical block size. For RAID arrays it is often the stripe chunk * size. A properly aligned multiple of minimum_io_size is the * preferred request size for workloads where a high number of I/O * operations is desired. |
c72758f33
|
408 409 410 |
*/ void blk_queue_io_min(struct request_queue *q, unsigned int min) { |
7c958e326
|
411 |
blk_limits_io_min(&q->limits, min); |
c72758f33
|
412 413 414 415 |
} EXPORT_SYMBOL(blk_queue_io_min); /** |
3c5820c74
|
416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 |
* blk_limits_io_opt - set optimal request size for a device * @limits: the queue limits * @opt: smallest I/O size in bytes * * Description: * Storage devices may report an optimal I/O size, which is the * device's preferred unit for sustained I/O. This is rarely reported * for disk drives. For RAID arrays it is usually the stripe width or * the internal track size. A properly aligned multiple of * optimal_io_size is the preferred request size for workloads where * sustained throughput is desired. */ void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt) { limits->io_opt = opt; } EXPORT_SYMBOL(blk_limits_io_opt); /** |
c72758f33
|
435 436 |
* blk_queue_io_opt - set optimal request size for the queue * @q: the request queue for the device |
8ebf97560
|
437 |
* @opt: optimal request size in bytes |
c72758f33
|
438 439 |
* * Description: |
7e5f5fb09
|
440 441 442 443 444 445 |
* Storage devices may report an optimal I/O size, which is the * device's preferred unit for sustained I/O. This is rarely reported * for disk drives. For RAID arrays it is usually the stripe width or * the internal track size. A properly aligned multiple of * optimal_io_size is the preferred request size for workloads where * sustained throughput is desired. |
c72758f33
|
446 447 448 |
*/ void blk_queue_io_opt(struct request_queue *q, unsigned int opt) { |
3c5820c74
|
449 |
blk_limits_io_opt(&q->limits, opt); |
c72758f33
|
450 451 |
} EXPORT_SYMBOL(blk_queue_io_opt); |
86db1e297
|
452 453 454 455 456 457 458 |
/** * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers * @t: the stacking driver (top) * @b: the underlying device (bottom) **/ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) { |
fef246672
|
459 |
blk_stack_limits(&t->limits, &b->limits, 0); |
86db1e297
|
460 |
} |
86db1e297
|
461 462 463 |
EXPORT_SYMBOL(blk_queue_stack_limits); /** |
c72758f33
|
464 |
* blk_stack_limits - adjust queue_limits for stacked devices |
81744ee44
|
465 466 |
* @t: the stacking driver limits (top device) * @b: the underlying queue limits (bottom, component device) |
e03a72e13
|
467 |
* @start: first data sector within component device |
c72758f33
|
468 469 |
* * Description: |
81744ee44
|
470 471 472 473 474 475 476 477 478 479 480 481 482 |
* This function is used by stacking drivers like MD and DM to ensure * that all component devices have compatible block sizes and * alignments. The stacking driver must provide a queue_limits * struct (top) and then iteratively call the stacking function for * all component (bottom) devices. The stacking function will * attempt to combine the values and ensure proper alignment. * * Returns 0 if the top and bottom queue_limits are compatible. The * top device's block sizes and alignment offsets may be adjusted to * ensure alignment with the bottom device. If no compatible sizes * and alignments exist, -1 is returned and the resulting top * queue_limits will have the misaligned flag set to indicate that * the alignment_offset is undefined. |
c72758f33
|
483 484 |
*/ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, |
e03a72e13
|
485 |
sector_t start) |
c72758f33
|
486 |
{ |
e03a72e13
|
487 |
unsigned int top, bottom, alignment, ret = 0; |
86b372814
|
488 |
|
c72758f33
|
489 490 |
t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); |
77634f33d
|
491 |
t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn); |
c72758f33
|
492 493 494 |
t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, b->seg_boundary_mask); |
8a78362c4
|
495 |
t->max_segments = min_not_zero(t->max_segments, b->max_segments); |
13f05c8d8
|
496 497 |
t->max_integrity_segments = min_not_zero(t->max_integrity_segments, b->max_integrity_segments); |
c72758f33
|
498 499 500 |
t->max_segment_size = min_not_zero(t->max_segment_size, b->max_segment_size); |
fe0b393f2
|
501 |
t->misaligned |= b->misaligned; |
e03a72e13
|
502 |
alignment = queue_limit_alignment_offset(b, start); |
9504e0864
|
503 |
|
81744ee44
|
504 505 506 |
/* Bottom device has different alignment. Check that it is * compatible with the current top alignment. */ |
9504e0864
|
507 508 509 510 |
if (t->alignment_offset != alignment) { top = max(t->physical_block_size, t->io_min) + t->alignment_offset; |
81744ee44
|
511 |
bottom = max(b->physical_block_size, b->io_min) + alignment; |
9504e0864
|
512 |
|
81744ee44
|
513 |
/* Verify that top and bottom intervals line up */ |
fe0b393f2
|
514 |
if (max(top, bottom) & (min(top, bottom) - 1)) { |
9504e0864
|
515 |
t->misaligned = 1; |
fe0b393f2
|
516 517 |
ret = -1; } |
9504e0864
|
518 |
} |
c72758f33
|
519 520 521 522 523 524 525 |
t->logical_block_size = max(t->logical_block_size, b->logical_block_size); t->physical_block_size = max(t->physical_block_size, b->physical_block_size); t->io_min = max(t->io_min, b->io_min); |
9504e0864
|
526 |
t->io_opt = lcm(t->io_opt, b->io_opt); |
e692cb668
|
527 |
t->cluster &= b->cluster; |
98262f276
|
528 |
t->discard_zeroes_data &= b->discard_zeroes_data; |
c72758f33
|
529 |
|
81744ee44
|
530 |
/* Physical block size a multiple of the logical block size? */ |
9504e0864
|
531 532 |
if (t->physical_block_size & (t->logical_block_size - 1)) { t->physical_block_size = t->logical_block_size; |
c72758f33
|
533 |
t->misaligned = 1; |
fe0b393f2
|
534 |
ret = -1; |
86b372814
|
535 |
} |
81744ee44
|
536 |
/* Minimum I/O a multiple of the physical block size? */ |
9504e0864
|
537 538 539 |
if (t->io_min & (t->physical_block_size - 1)) { t->io_min = t->physical_block_size; t->misaligned = 1; |
fe0b393f2
|
540 |
ret = -1; |
c72758f33
|
541 |
} |
81744ee44
|
542 |
/* Optimal I/O a multiple of the physical block size? */ |
9504e0864
|
543 544 545 |
if (t->io_opt & (t->physical_block_size - 1)) { t->io_opt = 0; t->misaligned = 1; |
fe0b393f2
|
546 |
ret = -1; |
9504e0864
|
547 |
} |
c72758f33
|
548 |
|
81744ee44
|
549 |
/* Find lowest common alignment_offset */ |
9504e0864
|
550 551 |
t->alignment_offset = lcm(t->alignment_offset, alignment) & (max(t->physical_block_size, t->io_min) - 1); |
86b372814
|
552 |
|
81744ee44
|
553 |
/* Verify that new alignment_offset is on a logical block boundary */ |
fe0b393f2
|
554 |
if (t->alignment_offset & (t->logical_block_size - 1)) { |
c72758f33
|
555 |
t->misaligned = 1; |
fe0b393f2
|
556 557 |
ret = -1; } |
c72758f33
|
558 |
|
9504e0864
|
559 560 |
/* Discard alignment and granularity */ if (b->discard_granularity) { |
e03a72e13
|
561 |
alignment = queue_limit_discard_alignment(b, start); |
9504e0864
|
562 563 564 565 566 |
if (t->discard_granularity != 0 && t->discard_alignment != alignment) { top = t->discard_granularity + t->discard_alignment; bottom = b->discard_granularity + alignment; |
70dd5bf3b
|
567 |
|
9504e0864
|
568 569 570 571 |
/* Verify that top and bottom intervals line up */ if (max(top, bottom) & (min(top, bottom) - 1)) t->discard_misaligned = 1; } |
81744ee44
|
572 573 |
t->max_discard_sectors = min_not_zero(t->max_discard_sectors, b->max_discard_sectors); |
9504e0864
|
574 575 576 577 578 |
t->discard_granularity = max(t->discard_granularity, b->discard_granularity); t->discard_alignment = lcm(t->discard_alignment, alignment) & (t->discard_granularity - 1); } |
70dd5bf3b
|
579 |
|
fe0b393f2
|
580 |
return ret; |
c72758f33
|
581 |
} |
5d85d3247
|
582 |
EXPORT_SYMBOL(blk_stack_limits); |
c72758f33
|
583 584 |
/** |
17be8c245
|
585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 |
* bdev_stack_limits - adjust queue limits for stacked drivers * @t: the stacking driver limits (top device) * @bdev: the component block_device (bottom) * @start: first data sector within component device * * Description: * Merges queue limits for a top device and a block_device. Returns * 0 if alignment didn't change. Returns -1 if adding the bottom * device caused misalignment. */ int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, sector_t start) { struct request_queue *bq = bdev_get_queue(bdev); start += get_start_sect(bdev); |
e03a72e13
|
601 |
return blk_stack_limits(t, &bq->limits, start); |
17be8c245
|
602 603 604 605 |
} EXPORT_SYMBOL(bdev_stack_limits); /** |
c72758f33
|
606 |
* disk_stack_limits - adjust queue limits for stacked drivers |
77634f33d
|
607 |
* @disk: MD/DM gendisk (top) |
c72758f33
|
608 609 610 611 |
* @bdev: the underlying block device (bottom) * @offset: offset to beginning of data within component device * * Description: |
e03a72e13
|
612 613 |
* Merges the limits for a top level gendisk and a bottom level * block_device. |
c72758f33
|
614 615 616 617 618 |
*/ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, sector_t offset) { struct request_queue *t = disk->queue; |
c72758f33
|
619 |
|
e03a72e13
|
620 |
if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) { |
c72758f33
|
621 622 623 624 625 626 627 628 629 |
char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; disk_name(disk, 0, top); bdevname(bdev, bottom); printk(KERN_NOTICE "%s: Warning: Device %s is misaligned ", top, bottom); } |
c72758f33
|
630 631 632 633 |
} EXPORT_SYMBOL(disk_stack_limits); /** |
e3790c7d4
|
634 635 636 637 |
* blk_queue_dma_pad - set pad mask * @q: the request queue for the device * @mask: pad mask * |
27f8221af
|
638 |
* Set dma pad mask. |
e3790c7d4
|
639 |
* |
27f8221af
|
640 641 |
* Appending pad buffer to a request modifies the last entry of a * scatter list such that it includes the pad buffer. |
e3790c7d4
|
642 643 644 645 646 647 648 649 |
**/ void blk_queue_dma_pad(struct request_queue *q, unsigned int mask) { q->dma_pad_mask = mask; } EXPORT_SYMBOL(blk_queue_dma_pad); /** |
27f8221af
|
650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 |
* blk_queue_update_dma_pad - update pad mask * @q: the request queue for the device * @mask: pad mask * * Update dma pad mask. * * Appending pad buffer to a request modifies the last entry of a * scatter list such that it includes the pad buffer. **/ void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask) { if (mask > q->dma_pad_mask) q->dma_pad_mask = mask; } EXPORT_SYMBOL(blk_queue_update_dma_pad); /** |
86db1e297
|
667 |
* blk_queue_dma_drain - Set up a drain buffer for excess dma. |
86db1e297
|
668 |
* @q: the request queue for the device |
2fb98e841
|
669 |
* @dma_drain_needed: fn which returns non-zero if drain is necessary |
86db1e297
|
670 671 672 673 674 675 676 677 678 679 680 681 |
* @buf: physically contiguous buffer * @size: size of the buffer in bytes * * Some devices have excess DMA problems and can't simply discard (or * zero fill) the unwanted piece of the transfer. They have to have a * real area of memory to transfer it into. The use case for this is * ATAPI devices in DMA mode. If the packet command causes a transfer * bigger than the transfer size some HBAs will lock up if there * aren't DMA elements to contain the excess transfer. What this API * does is adjust the queue so that the buf is always appended * silently to the scatterlist. * |
8a78362c4
|
682 683 684 685 |
* Note: This routine adjusts max_hw_segments to make room for appending * the drain buffer. If you call blk_queue_max_segments() after calling * this routine, you must set the limit to one fewer than your device * can support otherwise there won't be room for the drain buffer. |
86db1e297
|
686 |
*/ |
448da4d26
|
687 |
int blk_queue_dma_drain(struct request_queue *q, |
2fb98e841
|
688 689 |
dma_drain_needed_fn *dma_drain_needed, void *buf, unsigned int size) |
86db1e297
|
690 |
{ |
8a78362c4
|
691 |
if (queue_max_segments(q) < 2) |
86db1e297
|
692 693 |
return -EINVAL; /* make room for appending the drain */ |
8a78362c4
|
694 |
blk_queue_max_segments(q, queue_max_segments(q) - 1); |
2fb98e841
|
695 |
q->dma_drain_needed = dma_drain_needed; |
86db1e297
|
696 697 698 699 700 |
q->dma_drain_buffer = buf; q->dma_drain_size = size; return 0; } |
86db1e297
|
701 702 703 704 705 706 707 708 709 710 711 |
EXPORT_SYMBOL_GPL(blk_queue_dma_drain); /** * blk_queue_segment_boundary - set boundary rules for segment merging * @q: the request queue for the device * @mask: the memory boundary mask **/ void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) { if (mask < PAGE_CACHE_SIZE - 1) { mask = PAGE_CACHE_SIZE - 1; |
24c03d47d
|
712 713 714 |
printk(KERN_INFO "%s: set to minimum %lx ", __func__, mask); |
86db1e297
|
715 |
} |
025146e13
|
716 |
q->limits.seg_boundary_mask = mask; |
86db1e297
|
717 |
} |
86db1e297
|
718 719 720 721 722 723 724 725 |
EXPORT_SYMBOL(blk_queue_segment_boundary); /** * blk_queue_dma_alignment - set dma length and memory alignment * @q: the request queue for the device * @mask: alignment mask * * description: |
710027a48
|
726 |
* set required memory and length alignment for direct dma transactions. |
8feb4d20b
|
727 |
* this is used when building direct io requests for the queue. |
86db1e297
|
728 729 730 731 732 733 |
* **/ void blk_queue_dma_alignment(struct request_queue *q, int mask) { q->dma_alignment = mask; } |
86db1e297
|
734 735 736 737 738 739 740 741 |
EXPORT_SYMBOL(blk_queue_dma_alignment); /** * blk_queue_update_dma_alignment - update dma length and memory alignment * @q: the request queue for the device * @mask: alignment mask * * description: |
710027a48
|
742 |
* update required memory and length alignment for direct dma transactions. |
86db1e297
|
743 744 745 746 747 748 749 750 751 752 753 754 755 756 |
* If the requested alignment is larger than the current alignment, then * the current queue alignment is updated to the new value, otherwise it * is left alone. The design of this is to allow multiple objects * (driver, device, transport etc) to set their respective * alignments without having them interfere. * **/ void blk_queue_update_dma_alignment(struct request_queue *q, int mask) { BUG_ON(mask > PAGE_SIZE); if (mask > q->dma_alignment) q->dma_alignment = mask; } |
86db1e297
|
757 |
EXPORT_SYMBOL(blk_queue_update_dma_alignment); |
4913efe45
|
758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 |
/** * blk_queue_flush - configure queue's cache flush capability * @q: the request queue for the device * @flush: 0, REQ_FLUSH or REQ_FLUSH | REQ_FUA * * Tell block layer cache flush capability of @q. If it supports * flushing, REQ_FLUSH should be set. If it supports bypassing * write cache for individual writes, REQ_FUA should be set. */ void blk_queue_flush(struct request_queue *q, unsigned int flush) { WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA)); if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA))) flush &= ~REQ_FUA; q->flush_flags = flush & (REQ_FLUSH | REQ_FUA); } EXPORT_SYMBOL_GPL(blk_queue_flush); |
aeb3d3a81
|
777 |
static int __init blk_settings_init(void) |
86db1e297
|
778 779 780 781 782 783 |
{ blk_max_low_pfn = max_low_pfn - 1; blk_max_pfn = max_pfn - 1; return 0; } subsys_initcall(blk_settings_init); |