Blame view
block/blk-settings.c
25.5 KB
86db1e297
|
1 2 3 4 5 6 7 8 9 |
/* * Functions related to setting various queue properties from drivers */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */ |
70dd5bf3b
|
10 |
#include <linux/gcd.h> |
2cda2728a
|
11 |
#include <linux/lcm.h> |
ad5ebd2fa
|
12 |
#include <linux/jiffies.h> |
5a0e3ad6a
|
13 |
#include <linux/gfp.h> |
86db1e297
|
14 15 |
#include "blk.h" |
6728cb0e6
|
16 |
unsigned long blk_max_low_pfn; |
86db1e297
|
17 |
EXPORT_SYMBOL(blk_max_low_pfn); |
6728cb0e6
|
18 19 |
unsigned long blk_max_pfn; |
86db1e297
|
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 |
/** * blk_queue_prep_rq - set a prepare_request function for queue * @q: queue * @pfn: prepare_request function * * It's possible for a queue to register a prepare_request callback which * is invoked before the request is handed to the request_fn. The goal of * the function is to prepare a request for I/O, it can be used to build a * cdb from the request data for instance. * */ void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn) { q->prep_rq_fn = pfn; } |
86db1e297
|
36 37 38 |
EXPORT_SYMBOL(blk_queue_prep_rq); /** |
28018c242
|
39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
* blk_queue_unprep_rq - set an unprepare_request function for queue * @q: queue * @ufn: unprepare_request function * * It's possible for a queue to register an unprepare_request callback * which is invoked before the request is finally completed. The goal * of the function is to deallocate any data that was allocated in the * prepare_request callback. * */ void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn) { q->unprep_rq_fn = ufn; } EXPORT_SYMBOL(blk_queue_unprep_rq); /** |
86db1e297
|
56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 |
* blk_queue_merge_bvec - set a merge_bvec function for queue * @q: queue * @mbfn: merge_bvec_fn * * Usually queues have static limitations on the max sectors or segments that * we can put in a request. Stacking drivers may have some settings that * are dynamic, and thus we have to query the queue whether it is ok to * add a new bio_vec to a bio at a given offset or not. If the block device * has such limitations, it needs to register a merge_bvec_fn to control * the size of bio's sent to it. Note that a block device *must* allow a * single page to be added to an empty bio. The block device driver may want * to use the bio_split() function to deal with these bio's. By default * no merge_bvec_fn is defined for a queue, and only the fixed limits are * honored. */ void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn) { q->merge_bvec_fn = mbfn; } |
86db1e297
|
75 76 77 78 79 80 |
EXPORT_SYMBOL(blk_queue_merge_bvec); void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) { q->softirq_done_fn = fn; } |
86db1e297
|
81 |
EXPORT_SYMBOL(blk_queue_softirq_done); |
242f9dcb8
|
82 83 84 85 86 87 88 89 90 91 92 |
void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout) { q->rq_timeout = timeout; } EXPORT_SYMBOL_GPL(blk_queue_rq_timeout); void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn) { q->rq_timed_out_fn = fn; } EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out); |
ef9e3facd
|
93 94 95 96 97 |
void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn) { q->lld_busy_fn = fn; } EXPORT_SYMBOL_GPL(blk_queue_lld_busy); |
86db1e297
|
98 |
/** |
e475bba2f
|
99 |
* blk_set_default_limits - reset limits to default values |
f740f5ca0
|
100 |
* @lim: the queue_limits structure to reset |
e475bba2f
|
101 102 103 104 105 106 107 108 |
* * Description: * Returns a queue_limit struct to its default state. Can be used by * stacking drivers like DM that stage table swaps and reuse an * existing device queue. */ void blk_set_default_limits(struct queue_limits *lim) { |
8a78362c4
|
109 |
lim->max_segments = BLK_MAX_SEGMENTS; |
13f05c8d8
|
110 |
lim->max_integrity_segments = 0; |
e475bba2f
|
111 |
lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; |
eb28d31bc
|
112 |
lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; |
5dee2477d
|
113 114 |
lim->max_sectors = BLK_DEF_MAX_SECTORS; lim->max_hw_sectors = INT_MAX; |
86b372814
|
115 116 117 118 |
lim->max_discard_sectors = 0; lim->discard_granularity = 0; lim->discard_alignment = 0; lim->discard_misaligned = 0; |
98262f276
|
119 |
lim->discard_zeroes_data = -1; |
e475bba2f
|
120 |
lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; |
3a02c8e81
|
121 |
lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT); |
e475bba2f
|
122 123 124 |
lim->alignment_offset = 0; lim->io_opt = 0; lim->misaligned = 0; |
e692cb668
|
125 |
lim->cluster = 1; |
e475bba2f
|
126 127 128 129 |
} EXPORT_SYMBOL(blk_set_default_limits); /** |
86db1e297
|
130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 |
* blk_queue_make_request - define an alternate make_request function for a device * @q: the request queue for the device to be affected * @mfn: the alternate make_request function * * Description: * The normal way for &struct bios to be passed to a device * driver is for them to be collected into requests on a request * queue, and then to allow the device driver to select requests * off that queue when it is ready. This works well for many block * devices. However some block devices (typically virtual devices * such as md or lvm) do not benefit from the processing on the * request queue, and are served best by having the requests passed * directly to them. This can be achieved by providing a function * to blk_queue_make_request(). * * Caveat: * The driver that does this *must* be able to deal appropriately * with buffers in "highmemory". This can be accomplished by either calling * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling * blk_queue_bounce() to create a buffer in normal memory. **/ |
6728cb0e6
|
151 |
void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) |
86db1e297
|
152 153 154 155 156 |
{ /* * set defaults */ q->nr_requests = BLKDEV_MAX_RQ; |
0e435ac26
|
157 |
|
86db1e297
|
158 |
q->make_request_fn = mfn; |
86db1e297
|
159 160 161 162 163 |
blk_queue_dma_alignment(q, 511); blk_queue_congestion_threshold(q); q->nr_batching = BLK_BATCH_REQ; q->unplug_thresh = 4; /* hmm */ |
ad5ebd2fa
|
164 |
q->unplug_delay = msecs_to_jiffies(3); /* 3 milliseconds */ |
86db1e297
|
165 166 |
if (q->unplug_delay == 0) q->unplug_delay = 1; |
86db1e297
|
167 168 |
q->unplug_timer.function = blk_unplug_timeout; q->unplug_timer.data = (unsigned long)q; |
e475bba2f
|
169 |
blk_set_default_limits(&q->limits); |
086fa5ff0
|
170 |
blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS); |
e475bba2f
|
171 |
|
86db1e297
|
172 |
/* |
a4e7d4640
|
173 174 175 176 177 178 179 |
* If the caller didn't supply a lock, fall back to our embedded * per-queue locks */ if (!q->queue_lock) q->queue_lock = &q->__queue_lock; /* |
86db1e297
|
180 181 182 183 |
* by default assume old behaviour and bounce for any highmem page */ blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); } |
86db1e297
|
184 185 186 187 |
EXPORT_SYMBOL(blk_queue_make_request); /** * blk_queue_bounce_limit - set bounce buffer limit for queue |
cd0aca2d5
|
188 189 |
* @q: the request queue for the device * @dma_mask: the maximum address the device can handle |
86db1e297
|
190 191 192 193 194 |
* * Description: * Different hardware can have different requirements as to what pages * it can do I/O directly to. A low level driver can call * blk_queue_bounce_limit to have lower memory pages allocated as bounce |
cd0aca2d5
|
195 |
* buffers for doing I/O to pages residing above @dma_mask. |
86db1e297
|
196 |
**/ |
cd0aca2d5
|
197 |
void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask) |
86db1e297
|
198 |
{ |
cd0aca2d5
|
199 |
unsigned long b_pfn = dma_mask >> PAGE_SHIFT; |
86db1e297
|
200 201 202 203 |
int dma = 0; q->bounce_gfp = GFP_NOIO; #if BITS_PER_LONG == 64 |
cd0aca2d5
|
204 205 206 207 208 209 |
/* * Assume anything <= 4GB can be handled by IOMMU. Actually * some IOMMUs can handle everything, but I don't know of a * way to test this here. */ if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) |
86db1e297
|
210 |
dma = 1; |
efb012b36
|
211 |
q->limits.bounce_pfn = max(max_low_pfn, b_pfn); |
86db1e297
|
212 |
#else |
6728cb0e6
|
213 |
if (b_pfn < blk_max_low_pfn) |
86db1e297
|
214 |
dma = 1; |
c49825fac
|
215 |
q->limits.bounce_pfn = b_pfn; |
260a67a9e
|
216 |
#endif |
86db1e297
|
217 218 219 |
if (dma) { init_emergency_isa_pool(); q->bounce_gfp = GFP_NOIO | GFP_DMA; |
260a67a9e
|
220 |
q->limits.bounce_pfn = b_pfn; |
86db1e297
|
221 222 |
} } |
86db1e297
|
223 224 225 |
EXPORT_SYMBOL(blk_queue_bounce_limit); /** |
72d4cd9f3
|
226 227 |
* blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request * @limits: the queue limits |
2800aac11
|
228 |
* @max_hw_sectors: max hardware sectors in the usual 512b unit |
86db1e297
|
229 230 |
* * Description: |
2800aac11
|
231 232 233 234 235 236 237 238 239 |
* Enables a low level driver to set a hard upper limit, * max_hw_sectors, on the size of requests. max_hw_sectors is set by * the device driver based upon the combined capabilities of I/O * controller and storage device. * * max_sectors is a soft limit imposed by the block layer for * filesystem type requests. This value can be overridden on a * per-device basis in /sys/block/<device>/queue/max_sectors_kb. * The soft limit can not exceed max_hw_sectors. |
86db1e297
|
240 |
**/ |
72d4cd9f3
|
241 |
void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors) |
86db1e297
|
242 |
{ |
2800aac11
|
243 244 |
if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) { max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9); |
24c03d47d
|
245 246 |
printk(KERN_INFO "%s: set to minimum %d ", |
2800aac11
|
247 |
__func__, max_hw_sectors); |
86db1e297
|
248 |
} |
72d4cd9f3
|
249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 |
limits->max_hw_sectors = max_hw_sectors; limits->max_sectors = min_t(unsigned int, max_hw_sectors, BLK_DEF_MAX_SECTORS); } EXPORT_SYMBOL(blk_limits_max_hw_sectors); /** * blk_queue_max_hw_sectors - set max sectors for a request for this queue * @q: the request queue for the device * @max_hw_sectors: max hardware sectors in the usual 512b unit * * Description: * See description for blk_limits_max_hw_sectors(). **/ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) { blk_limits_max_hw_sectors(&q->limits, max_hw_sectors); |
86db1e297
|
266 |
} |
086fa5ff0
|
267 |
EXPORT_SYMBOL(blk_queue_max_hw_sectors); |
86db1e297
|
268 269 |
/** |
67efc9258
|
270 271 |
* blk_queue_max_discard_sectors - set max sectors for a single discard * @q: the request queue for the device |
c7ebf0657
|
272 |
* @max_discard_sectors: maximum number of sectors to discard |
67efc9258
|
273 274 275 276 277 278 279 280 281 |
**/ void blk_queue_max_discard_sectors(struct request_queue *q, unsigned int max_discard_sectors) { q->limits.max_discard_sectors = max_discard_sectors; } EXPORT_SYMBOL(blk_queue_max_discard_sectors); /** |
8a78362c4
|
282 |
* blk_queue_max_segments - set max hw segments for a request for this queue |
86db1e297
|
283 284 285 286 287 |
* @q: the request queue for the device * @max_segments: max number of segments * * Description: * Enables a low level driver to set an upper limit on the number of |
8a78362c4
|
288 |
* hw data segments in a request. |
86db1e297
|
289 |
**/ |
8a78362c4
|
290 |
void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments) |
86db1e297
|
291 292 293 |
{ if (!max_segments) { max_segments = 1; |
24c03d47d
|
294 295 296 |
printk(KERN_INFO "%s: set to minimum %d ", __func__, max_segments); |
86db1e297
|
297 |
} |
8a78362c4
|
298 |
q->limits.max_segments = max_segments; |
86db1e297
|
299 |
} |
8a78362c4
|
300 |
EXPORT_SYMBOL(blk_queue_max_segments); |
86db1e297
|
301 302 303 304 305 306 307 308 309 310 311 312 313 314 |
/** * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg * @q: the request queue for the device * @max_size: max size of segment in bytes * * Description: * Enables a low level driver to set an upper limit on the size of a * coalesced segment **/ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) { if (max_size < PAGE_CACHE_SIZE) { max_size = PAGE_CACHE_SIZE; |
24c03d47d
|
315 316 317 |
printk(KERN_INFO "%s: set to minimum %d ", __func__, max_size); |
86db1e297
|
318 |
} |
025146e13
|
319 |
q->limits.max_segment_size = max_size; |
86db1e297
|
320 |
} |
86db1e297
|
321 322 323 |
EXPORT_SYMBOL(blk_queue_max_segment_size); /** |
e1defc4ff
|
324 |
* blk_queue_logical_block_size - set logical block size for the queue |
86db1e297
|
325 |
* @q: the request queue for the device |
e1defc4ff
|
326 |
* @size: the logical block size, in bytes |
86db1e297
|
327 328 |
* * Description: |
e1defc4ff
|
329 330 331 |
* This should be set to the lowest possible block size that the * storage device can address. The default of 512 covers most * hardware. |
86db1e297
|
332 |
**/ |
e1defc4ff
|
333 |
void blk_queue_logical_block_size(struct request_queue *q, unsigned short size) |
86db1e297
|
334 |
{ |
025146e13
|
335 |
q->limits.logical_block_size = size; |
c72758f33
|
336 337 338 339 340 341 |
if (q->limits.physical_block_size < size) q->limits.physical_block_size = size; if (q->limits.io_min < q->limits.physical_block_size) q->limits.io_min = q->limits.physical_block_size; |
86db1e297
|
342 |
} |
e1defc4ff
|
343 |
EXPORT_SYMBOL(blk_queue_logical_block_size); |
86db1e297
|
344 |
|
c72758f33
|
345 346 347 348 349 350 351 352 353 354 |
/** * blk_queue_physical_block_size - set physical block size for the queue * @q: the request queue for the device * @size: the physical block size, in bytes * * Description: * This should be set to the lowest possible sector size that the * hardware can operate on without reverting to read-modify-write * operations. */ |
892b6f90d
|
355 |
void blk_queue_physical_block_size(struct request_queue *q, unsigned int size) |
c72758f33
|
356 357 358 359 360 361 362 363 364 365 366 367 368 369 |
{ q->limits.physical_block_size = size; if (q->limits.physical_block_size < q->limits.logical_block_size) q->limits.physical_block_size = q->limits.logical_block_size; if (q->limits.io_min < q->limits.physical_block_size) q->limits.io_min = q->limits.physical_block_size; } EXPORT_SYMBOL(blk_queue_physical_block_size); /** * blk_queue_alignment_offset - set physical block alignment offset * @q: the request queue for the device |
8ebf97560
|
370 |
* @offset: alignment offset in bytes |
c72758f33
|
371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 |
* * Description: * Some devices are naturally misaligned to compensate for things like * the legacy DOS partition table 63-sector offset. Low-level drivers * should call this function for devices whose first sector is not * naturally aligned. */ void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset) { q->limits.alignment_offset = offset & (q->limits.physical_block_size - 1); q->limits.misaligned = 0; } EXPORT_SYMBOL(blk_queue_alignment_offset); /** |
7c958e326
|
387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 |
* blk_limits_io_min - set minimum request size for a device * @limits: the queue limits * @min: smallest I/O size in bytes * * Description: * Some devices have an internal block size bigger than the reported * hardware sector size. This function can be used to signal the * smallest I/O the device can perform without incurring a performance * penalty. */ void blk_limits_io_min(struct queue_limits *limits, unsigned int min) { limits->io_min = min; if (limits->io_min < limits->logical_block_size) limits->io_min = limits->logical_block_size; if (limits->io_min < limits->physical_block_size) limits->io_min = limits->physical_block_size; } EXPORT_SYMBOL(blk_limits_io_min); /** |
c72758f33
|
410 411 |
* blk_queue_io_min - set minimum request size for the queue * @q: the request queue for the device |
8ebf97560
|
412 |
* @min: smallest I/O size in bytes |
c72758f33
|
413 414 |
* * Description: |
7e5f5fb09
|
415 416 417 418 419 420 421 |
* Storage devices may report a granularity or preferred minimum I/O * size which is the smallest request the device can perform without * incurring a performance penalty. For disk drives this is often the * physical block size. For RAID arrays it is often the stripe chunk * size. A properly aligned multiple of minimum_io_size is the * preferred request size for workloads where a high number of I/O * operations is desired. |
c72758f33
|
422 423 424 |
*/ void blk_queue_io_min(struct request_queue *q, unsigned int min) { |
7c958e326
|
425 |
blk_limits_io_min(&q->limits, min); |
c72758f33
|
426 427 428 429 |
} EXPORT_SYMBOL(blk_queue_io_min); /** |
3c5820c74
|
430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 |
* blk_limits_io_opt - set optimal request size for a device * @limits: the queue limits * @opt: smallest I/O size in bytes * * Description: * Storage devices may report an optimal I/O size, which is the * device's preferred unit for sustained I/O. This is rarely reported * for disk drives. For RAID arrays it is usually the stripe width or * the internal track size. A properly aligned multiple of * optimal_io_size is the preferred request size for workloads where * sustained throughput is desired. */ void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt) { limits->io_opt = opt; } EXPORT_SYMBOL(blk_limits_io_opt); /** |
c72758f33
|
449 450 |
* blk_queue_io_opt - set optimal request size for the queue * @q: the request queue for the device |
8ebf97560
|
451 |
* @opt: optimal request size in bytes |
c72758f33
|
452 453 |
* * Description: |
7e5f5fb09
|
454 455 456 457 458 459 |
* Storage devices may report an optimal I/O size, which is the * device's preferred unit for sustained I/O. This is rarely reported * for disk drives. For RAID arrays it is usually the stripe width or * the internal track size. A properly aligned multiple of * optimal_io_size is the preferred request size for workloads where * sustained throughput is desired. |
c72758f33
|
460 461 462 |
*/ void blk_queue_io_opt(struct request_queue *q, unsigned int opt) { |
3c5820c74
|
463 |
blk_limits_io_opt(&q->limits, opt); |
c72758f33
|
464 465 |
} EXPORT_SYMBOL(blk_queue_io_opt); |
86db1e297
|
466 467 468 469 470 471 472 |
/** * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers * @t: the stacking driver (top) * @b: the underlying device (bottom) **/ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) { |
fef246672
|
473 |
blk_stack_limits(&t->limits, &b->limits, 0); |
86db1e297
|
474 |
} |
86db1e297
|
475 476 477 |
EXPORT_SYMBOL(blk_queue_stack_limits); /** |
c72758f33
|
478 |
* blk_stack_limits - adjust queue_limits for stacked devices |
81744ee44
|
479 480 |
* @t: the stacking driver limits (top device) * @b: the underlying queue limits (bottom, component device) |
e03a72e13
|
481 |
* @start: first data sector within component device |
c72758f33
|
482 483 |
* * Description: |
81744ee44
|
484 485 486 487 488 489 490 491 492 493 494 495 496 |
* This function is used by stacking drivers like MD and DM to ensure * that all component devices have compatible block sizes and * alignments. The stacking driver must provide a queue_limits * struct (top) and then iteratively call the stacking function for * all component (bottom) devices. The stacking function will * attempt to combine the values and ensure proper alignment. * * Returns 0 if the top and bottom queue_limits are compatible. The * top device's block sizes and alignment offsets may be adjusted to * ensure alignment with the bottom device. If no compatible sizes * and alignments exist, -1 is returned and the resulting top * queue_limits will have the misaligned flag set to indicate that * the alignment_offset is undefined. |
c72758f33
|
497 498 |
*/ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, |
e03a72e13
|
499 |
sector_t start) |
c72758f33
|
500 |
{ |
e03a72e13
|
501 |
unsigned int top, bottom, alignment, ret = 0; |
86b372814
|
502 |
|
c72758f33
|
503 504 |
t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); |
77634f33d
|
505 |
t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn); |
c72758f33
|
506 507 508 |
t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, b->seg_boundary_mask); |
8a78362c4
|
509 |
t->max_segments = min_not_zero(t->max_segments, b->max_segments); |
13f05c8d8
|
510 511 |
t->max_integrity_segments = min_not_zero(t->max_integrity_segments, b->max_integrity_segments); |
c72758f33
|
512 513 514 |
t->max_segment_size = min_not_zero(t->max_segment_size, b->max_segment_size); |
fe0b393f2
|
515 |
t->misaligned |= b->misaligned; |
e03a72e13
|
516 |
alignment = queue_limit_alignment_offset(b, start); |
9504e0864
|
517 |
|
81744ee44
|
518 519 520 |
/* Bottom device has different alignment. Check that it is * compatible with the current top alignment. */ |
9504e0864
|
521 522 523 524 |
if (t->alignment_offset != alignment) { top = max(t->physical_block_size, t->io_min) + t->alignment_offset; |
81744ee44
|
525 |
bottom = max(b->physical_block_size, b->io_min) + alignment; |
9504e0864
|
526 |
|
81744ee44
|
527 |
/* Verify that top and bottom intervals line up */ |
fe0b393f2
|
528 |
if (max(top, bottom) & (min(top, bottom) - 1)) { |
9504e0864
|
529 |
t->misaligned = 1; |
fe0b393f2
|
530 531 |
ret = -1; } |
9504e0864
|
532 |
} |
c72758f33
|
533 534 535 536 537 538 539 |
t->logical_block_size = max(t->logical_block_size, b->logical_block_size); t->physical_block_size = max(t->physical_block_size, b->physical_block_size); t->io_min = max(t->io_min, b->io_min); |
9504e0864
|
540 |
t->io_opt = lcm(t->io_opt, b->io_opt); |
e692cb668
|
541 |
t->cluster &= b->cluster; |
98262f276
|
542 |
t->discard_zeroes_data &= b->discard_zeroes_data; |
c72758f33
|
543 |
|
81744ee44
|
544 |
/* Physical block size a multiple of the logical block size? */ |
9504e0864
|
545 546 |
if (t->physical_block_size & (t->logical_block_size - 1)) { t->physical_block_size = t->logical_block_size; |
c72758f33
|
547 |
t->misaligned = 1; |
fe0b393f2
|
548 |
ret = -1; |
86b372814
|
549 |
} |
81744ee44
|
550 |
/* Minimum I/O a multiple of the physical block size? */ |
9504e0864
|
551 552 553 |
if (t->io_min & (t->physical_block_size - 1)) { t->io_min = t->physical_block_size; t->misaligned = 1; |
fe0b393f2
|
554 |
ret = -1; |
c72758f33
|
555 |
} |
81744ee44
|
556 |
/* Optimal I/O a multiple of the physical block size? */ |
9504e0864
|
557 558 559 |
if (t->io_opt & (t->physical_block_size - 1)) { t->io_opt = 0; t->misaligned = 1; |
fe0b393f2
|
560 |
ret = -1; |
9504e0864
|
561 |
} |
c72758f33
|
562 |
|
81744ee44
|
563 |
/* Find lowest common alignment_offset */ |
9504e0864
|
564 565 |
t->alignment_offset = lcm(t->alignment_offset, alignment) & (max(t->physical_block_size, t->io_min) - 1); |
86b372814
|
566 |
|
81744ee44
|
567 |
/* Verify that new alignment_offset is on a logical block boundary */ |
fe0b393f2
|
568 |
if (t->alignment_offset & (t->logical_block_size - 1)) { |
c72758f33
|
569 |
t->misaligned = 1; |
fe0b393f2
|
570 571 |
ret = -1; } |
c72758f33
|
572 |
|
9504e0864
|
573 574 |
/* Discard alignment and granularity */ if (b->discard_granularity) { |
e03a72e13
|
575 |
alignment = queue_limit_discard_alignment(b, start); |
9504e0864
|
576 577 578 579 580 |
if (t->discard_granularity != 0 && t->discard_alignment != alignment) { top = t->discard_granularity + t->discard_alignment; bottom = b->discard_granularity + alignment; |
70dd5bf3b
|
581 |
|
9504e0864
|
582 583 584 585 |
/* Verify that top and bottom intervals line up */ if (max(top, bottom) & (min(top, bottom) - 1)) t->discard_misaligned = 1; } |
81744ee44
|
586 587 |
t->max_discard_sectors = min_not_zero(t->max_discard_sectors, b->max_discard_sectors); |
9504e0864
|
588 589 590 591 592 |
t->discard_granularity = max(t->discard_granularity, b->discard_granularity); t->discard_alignment = lcm(t->discard_alignment, alignment) & (t->discard_granularity - 1); } |
70dd5bf3b
|
593 |
|
fe0b393f2
|
594 |
return ret; |
c72758f33
|
595 |
} |
5d85d3247
|
596 |
EXPORT_SYMBOL(blk_stack_limits); |
c72758f33
|
597 598 |
/** |
17be8c245
|
599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 |
* bdev_stack_limits - adjust queue limits for stacked drivers * @t: the stacking driver limits (top device) * @bdev: the component block_device (bottom) * @start: first data sector within component device * * Description: * Merges queue limits for a top device and a block_device. Returns * 0 if alignment didn't change. Returns -1 if adding the bottom * device caused misalignment. */ int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, sector_t start) { struct request_queue *bq = bdev_get_queue(bdev); start += get_start_sect(bdev); |
e03a72e13
|
615 |
return blk_stack_limits(t, &bq->limits, start); |
17be8c245
|
616 617 618 619 |
} EXPORT_SYMBOL(bdev_stack_limits); /** |
c72758f33
|
620 |
* disk_stack_limits - adjust queue limits for stacked drivers |
77634f33d
|
621 |
* @disk: MD/DM gendisk (top) |
c72758f33
|
622 623 624 625 |
* @bdev: the underlying block device (bottom) * @offset: offset to beginning of data within component device * * Description: |
e03a72e13
|
626 627 |
* Merges the limits for a top level gendisk and a bottom level * block_device. |
c72758f33
|
628 629 630 631 632 |
*/ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, sector_t offset) { struct request_queue *t = disk->queue; |
c72758f33
|
633 |
|
e03a72e13
|
634 |
if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) { |
c72758f33
|
635 636 637 638 639 640 641 642 643 |
char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; disk_name(disk, 0, top); bdevname(bdev, bottom); printk(KERN_NOTICE "%s: Warning: Device %s is misaligned ", top, bottom); } |
c72758f33
|
644 645 646 647 |
} EXPORT_SYMBOL(disk_stack_limits); /** |
e3790c7d4
|
648 649 650 651 |
* blk_queue_dma_pad - set pad mask * @q: the request queue for the device * @mask: pad mask * |
27f8221af
|
652 |
* Set dma pad mask. |
e3790c7d4
|
653 |
* |
27f8221af
|
654 655 |
* Appending pad buffer to a request modifies the last entry of a * scatter list such that it includes the pad buffer. |
e3790c7d4
|
656 657 658 659 660 661 662 663 |
**/ void blk_queue_dma_pad(struct request_queue *q, unsigned int mask) { q->dma_pad_mask = mask; } EXPORT_SYMBOL(blk_queue_dma_pad); /** |
27f8221af
|
664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 |
* blk_queue_update_dma_pad - update pad mask * @q: the request queue for the device * @mask: pad mask * * Update dma pad mask. * * Appending pad buffer to a request modifies the last entry of a * scatter list such that it includes the pad buffer. **/ void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask) { if (mask > q->dma_pad_mask) q->dma_pad_mask = mask; } EXPORT_SYMBOL(blk_queue_update_dma_pad); /** |
86db1e297
|
681 |
* blk_queue_dma_drain - Set up a drain buffer for excess dma. |
86db1e297
|
682 |
* @q: the request queue for the device |
2fb98e841
|
683 |
* @dma_drain_needed: fn which returns non-zero if drain is necessary |
86db1e297
|
684 685 686 687 688 689 690 691 692 693 694 695 |
* @buf: physically contiguous buffer * @size: size of the buffer in bytes * * Some devices have excess DMA problems and can't simply discard (or * zero fill) the unwanted piece of the transfer. They have to have a * real area of memory to transfer it into. The use case for this is * ATAPI devices in DMA mode. If the packet command causes a transfer * bigger than the transfer size some HBAs will lock up if there * aren't DMA elements to contain the excess transfer. What this API * does is adjust the queue so that the buf is always appended * silently to the scatterlist. * |
8a78362c4
|
696 697 698 699 |
* Note: This routine adjusts max_hw_segments to make room for appending * the drain buffer. If you call blk_queue_max_segments() after calling * this routine, you must set the limit to one fewer than your device * can support otherwise there won't be room for the drain buffer. |
86db1e297
|
700 |
*/ |
448da4d26
|
701 |
int blk_queue_dma_drain(struct request_queue *q, |
2fb98e841
|
702 703 |
dma_drain_needed_fn *dma_drain_needed, void *buf, unsigned int size) |
86db1e297
|
704 |
{ |
8a78362c4
|
705 |
if (queue_max_segments(q) < 2) |
86db1e297
|
706 707 |
return -EINVAL; /* make room for appending the drain */ |
8a78362c4
|
708 |
blk_queue_max_segments(q, queue_max_segments(q) - 1); |
2fb98e841
|
709 |
q->dma_drain_needed = dma_drain_needed; |
86db1e297
|
710 711 712 713 714 |
q->dma_drain_buffer = buf; q->dma_drain_size = size; return 0; } |
86db1e297
|
715 716 717 718 719 720 721 722 723 724 725 |
EXPORT_SYMBOL_GPL(blk_queue_dma_drain); /** * blk_queue_segment_boundary - set boundary rules for segment merging * @q: the request queue for the device * @mask: the memory boundary mask **/ void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) { if (mask < PAGE_CACHE_SIZE - 1) { mask = PAGE_CACHE_SIZE - 1; |
24c03d47d
|
726 727 728 |
printk(KERN_INFO "%s: set to minimum %lx ", __func__, mask); |
86db1e297
|
729 |
} |
025146e13
|
730 |
q->limits.seg_boundary_mask = mask; |
86db1e297
|
731 |
} |
86db1e297
|
732 733 734 735 736 737 738 739 |
EXPORT_SYMBOL(blk_queue_segment_boundary); /** * blk_queue_dma_alignment - set dma length and memory alignment * @q: the request queue for the device * @mask: alignment mask * * description: |
710027a48
|
740 |
* set required memory and length alignment for direct dma transactions. |
8feb4d20b
|
741 |
* this is used when building direct io requests for the queue. |
86db1e297
|
742 743 744 745 746 747 |
* **/ void blk_queue_dma_alignment(struct request_queue *q, int mask) { q->dma_alignment = mask; } |
86db1e297
|
748 749 750 751 752 753 754 755 |
EXPORT_SYMBOL(blk_queue_dma_alignment); /** * blk_queue_update_dma_alignment - update dma length and memory alignment * @q: the request queue for the device * @mask: alignment mask * * description: |
710027a48
|
756 |
* update required memory and length alignment for direct dma transactions. |
86db1e297
|
757 758 759 760 761 762 763 764 765 766 767 768 769 770 |
* If the requested alignment is larger than the current alignment, then * the current queue alignment is updated to the new value, otherwise it * is left alone. The design of this is to allow multiple objects * (driver, device, transport etc) to set their respective * alignments without having them interfere. * **/ void blk_queue_update_dma_alignment(struct request_queue *q, int mask) { BUG_ON(mask > PAGE_SIZE); if (mask > q->dma_alignment) q->dma_alignment = mask; } |
86db1e297
|
771 |
EXPORT_SYMBOL(blk_queue_update_dma_alignment); |
4913efe45
|
772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 |
/** * blk_queue_flush - configure queue's cache flush capability * @q: the request queue for the device * @flush: 0, REQ_FLUSH or REQ_FLUSH | REQ_FUA * * Tell block layer cache flush capability of @q. If it supports * flushing, REQ_FLUSH should be set. If it supports bypassing * write cache for individual writes, REQ_FUA should be set. */ void blk_queue_flush(struct request_queue *q, unsigned int flush) { WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA)); if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA))) flush &= ~REQ_FUA; q->flush_flags = flush & (REQ_FLUSH | REQ_FUA); } EXPORT_SYMBOL_GPL(blk_queue_flush); |
aeb3d3a81
|
791 |
static int __init blk_settings_init(void) |
86db1e297
|
792 793 794 795 796 797 |
{ blk_max_low_pfn = max_low_pfn - 1; blk_max_pfn = max_pfn - 1; return 0; } subsys_initcall(blk_settings_init); |