Blame view
block/blk-settings.c
28.7 KB
3dcf60bcb
|
1 |
// SPDX-License-Identifier: GPL-2.0 |
86db1e297
|
2 3 4 5 6 7 8 9 |
/* * Functions related to setting various queue properties from drivers */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/bio.h> #include <linux/blkdev.h> |
4ee60ec15
|
10 |
#include <linux/pagemap.h> |
edb0872f4
|
11 |
#include <linux/backing-dev-defs.h> |
70dd5bf3b
|
12 |
#include <linux/gcd.h> |
2cda2728a
|
13 |
#include <linux/lcm.h> |
ad5ebd2fa
|
14 |
#include <linux/jiffies.h> |
5a0e3ad6a
|
15 |
#include <linux/gfp.h> |
45147fb52
|
16 |
#include <linux/dma-mapping.h> |
86db1e297
|
17 18 |
#include "blk.h" |
87760e5ee
|
19 |
#include "blk-wbt.h" |
86db1e297
|
20 |
|
242f9dcb8
|
21 22 23 24 25 |
void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout) { q->rq_timeout = timeout; } EXPORT_SYMBOL_GPL(blk_queue_rq_timeout); |
86db1e297
|
26 |
/** |
e475bba2f
|
27 |
* blk_set_default_limits - reset limits to default values |
f740f5ca0
|
28 |
* @lim: the queue_limits structure to reset |
e475bba2f
|
29 30 |
* * Description: |
b1bd055d3
|
31 |
* Returns a queue_limit struct to its default state. |
e475bba2f
|
32 33 34 |
*/ void blk_set_default_limits(struct queue_limits *lim) { |
8a78362c4
|
35 |
lim->max_segments = BLK_MAX_SEGMENTS; |
1e739730c
|
36 |
lim->max_discard_segments = 1; |
13f05c8d8
|
37 |
lim->max_integrity_segments = 0; |
e475bba2f
|
38 |
lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; |
03100aada
|
39 |
lim->virt_boundary_mask = 0; |
eb28d31bc
|
40 |
lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; |
5f009d3f8
|
41 42 |
lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS; lim->max_dev_sectors = 0; |
762380ad9
|
43 |
lim->chunk_sectors = 0; |
4363ac7c1
|
44 |
lim->max_write_same_sectors = 0; |
a6f0788ec
|
45 |
lim->max_write_zeroes_sectors = 0; |
0512a75b9
|
46 |
lim->max_zone_append_sectors = 0; |
86b372814
|
47 |
lim->max_discard_sectors = 0; |
0034af036
|
48 |
lim->max_hw_discard_sectors = 0; |
86b372814
|
49 50 51 |
lim->discard_granularity = 0; lim->discard_alignment = 0; lim->discard_misaligned = 0; |
e475bba2f
|
52 |
lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; |
9bb33f24a
|
53 |
lim->bounce = BLK_BOUNCE_NONE; |
e475bba2f
|
54 55 56 |
lim->alignment_offset = 0; lim->io_opt = 0; lim->misaligned = 0; |
797476b88
|
57 |
lim->zoned = BLK_ZONED_NONE; |
a805a4fa4
|
58 |
lim->zone_write_granularity = 0; |
e475bba2f
|
59 60 61 62 |
} EXPORT_SYMBOL(blk_set_default_limits); /** |
b1bd055d3
|
63 64 65 66 67 68 69 70 71 72 73 74 |
* blk_set_stacking_limits - set default limits for stacking devices * @lim: the queue_limits structure to reset * * Description: * Returns a queue_limit struct to its default state. Should be used * by stacking drivers like DM that have no internal limits. */ void blk_set_stacking_limits(struct queue_limits *lim) { blk_set_default_limits(lim); /* Inherit limits from component devices */ |
b1bd055d3
|
75 |
lim->max_segments = USHRT_MAX; |
42c9cdfe1
|
76 |
lim->max_discard_segments = USHRT_MAX; |
b1bd055d3
|
77 |
lim->max_hw_sectors = UINT_MAX; |
d82ae52e6
|
78 |
lim->max_segment_size = UINT_MAX; |
fe86cdcef
|
79 |
lim->max_sectors = UINT_MAX; |
ca369d51b
|
80 |
lim->max_dev_sectors = UINT_MAX; |
4363ac7c1
|
81 |
lim->max_write_same_sectors = UINT_MAX; |
a6f0788ec
|
82 |
lim->max_write_zeroes_sectors = UINT_MAX; |
0512a75b9
|
83 |
lim->max_zone_append_sectors = UINT_MAX; |
b1bd055d3
|
84 85 86 87 |
} EXPORT_SYMBOL(blk_set_stacking_limits); /** |
86db1e297
|
88 |
* blk_queue_bounce_limit - set bounce buffer limit for queue |
cd0aca2d5
|
89 |
* @q: the request queue for the device |
9bb33f24a
|
90 |
* @bounce: bounce limit to enforce |
86db1e297
|
91 92 |
* * Description: |
9bb33f24a
|
93 94 95 |
* Force bouncing for ISA DMA ranges or highmem. * * DEPRECATED, don't use in new code. |
86db1e297
|
96 |
**/ |
9bb33f24a
|
97 |
void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce bounce) |
86db1e297
|
98 |
{ |
9bb33f24a
|
99 |
q->limits.bounce = bounce; |
86db1e297
|
100 |
} |
86db1e297
|
101 102 103 |
EXPORT_SYMBOL(blk_queue_bounce_limit); /** |
ca369d51b
|
104 105 |
* blk_queue_max_hw_sectors - set max sectors for a request for this queue * @q: the request queue for the device |
2800aac11
|
106 |
* @max_hw_sectors: max hardware sectors in the usual 512b unit |
86db1e297
|
107 108 |
* * Description: |
2800aac11
|
109 110 |
* Enables a low level driver to set a hard upper limit, * max_hw_sectors, on the size of requests. max_hw_sectors is set by |
4f258a463
|
111 112 |
* the device driver based upon the capabilities of the I/O * controller. |
2800aac11
|
113 |
* |
ca369d51b
|
114 115 116 |
* max_dev_sectors is a hard limit imposed by the storage device for * READ/WRITE requests. It is set by the disk driver. * |
2800aac11
|
117 118 119 120 |
* max_sectors is a soft limit imposed by the block layer for * filesystem type requests. This value can be overridden on a * per-device basis in /sys/block/<device>/queue/max_sectors_kb. * The soft limit can not exceed max_hw_sectors. |
86db1e297
|
121 |
**/ |
ca369d51b
|
122 |
void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) |
86db1e297
|
123 |
{ |
ca369d51b
|
124 125 |
struct queue_limits *limits = &q->limits; unsigned int max_sectors; |
09cbfeaf1
|
126 127 |
if ((max_hw_sectors << 9) < PAGE_SIZE) { max_hw_sectors = 1 << (PAGE_SHIFT - 9); |
24c03d47d
|
128 129 |
printk(KERN_INFO "%s: set to minimum %d ", |
2800aac11
|
130 |
__func__, max_hw_sectors); |
86db1e297
|
131 |
} |
817046ecd
|
132 133 |
max_hw_sectors = round_down(max_hw_sectors, limits->logical_block_size >> SECTOR_SHIFT); |
30e2bc08b
|
134 |
limits->max_hw_sectors = max_hw_sectors; |
817046ecd
|
135 |
|
ca369d51b
|
136 137 |
max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors); max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS); |
817046ecd
|
138 139 |
max_sectors = round_down(max_sectors, limits->logical_block_size >> SECTOR_SHIFT); |
ca369d51b
|
140 |
limits->max_sectors = max_sectors; |
817046ecd
|
141 |
|
d152c682f
|
142 |
if (!q->disk) |
edb0872f4
|
143 |
return; |
d152c682f
|
144 |
q->disk->bdi->io_pages = max_sectors >> (PAGE_SHIFT - 9); |
86db1e297
|
145 |
} |
086fa5ff0
|
146 |
EXPORT_SYMBOL(blk_queue_max_hw_sectors); |
86db1e297
|
147 148 |
/** |
762380ad9
|
149 150 151 152 153 154 |
* blk_queue_chunk_sectors - set size of the chunk for this queue * @q: the request queue for the device * @chunk_sectors: chunk sectors in the usual 512b unit * * Description: * If a driver doesn't want IOs to cross a given chunk size, it can set |
07d098e6b
|
155 156 157 158 |
* this limit and prevent merging across chunks. Note that the block layer * must accept a page worth of data at any offset. So if the crossing of * chunks is a hard limitation in the driver, it must still be prepared * to split single page bios. |
762380ad9
|
159 160 161 |
**/ void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors) { |
762380ad9
|
162 163 164 165 166 |
q->limits.chunk_sectors = chunk_sectors; } EXPORT_SYMBOL(blk_queue_chunk_sectors); /** |
67efc9258
|
167 168 |
* blk_queue_max_discard_sectors - set max sectors for a single discard * @q: the request queue for the device |
c7ebf0657
|
169 |
* @max_discard_sectors: maximum number of sectors to discard |
67efc9258
|
170 171 172 173 |
**/ void blk_queue_max_discard_sectors(struct request_queue *q, unsigned int max_discard_sectors) { |
0034af036
|
174 |
q->limits.max_hw_discard_sectors = max_discard_sectors; |
67efc9258
|
175 176 177 178 179 |
q->limits.max_discard_sectors = max_discard_sectors; } EXPORT_SYMBOL(blk_queue_max_discard_sectors); /** |
4363ac7c1
|
180 181 182 183 184 185 186 187 188 189 190 191 |
* blk_queue_max_write_same_sectors - set max sectors for a single write same * @q: the request queue for the device * @max_write_same_sectors: maximum number of sectors to write per command **/ void blk_queue_max_write_same_sectors(struct request_queue *q, unsigned int max_write_same_sectors) { q->limits.max_write_same_sectors = max_write_same_sectors; } EXPORT_SYMBOL(blk_queue_max_write_same_sectors); /** |
a6f0788ec
|
192 193 194 195 196 197 198 199 200 201 202 203 204 |
* blk_queue_max_write_zeroes_sectors - set max sectors for a single * write zeroes * @q: the request queue for the device * @max_write_zeroes_sectors: maximum number of sectors to write per command **/ void blk_queue_max_write_zeroes_sectors(struct request_queue *q, unsigned int max_write_zeroes_sectors) { q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors; } EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors); /** |
0512a75b9
|
205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 |
* blk_queue_max_zone_append_sectors - set max sectors for a single zone append * @q: the request queue for the device * @max_zone_append_sectors: maximum number of sectors to write per command **/ void blk_queue_max_zone_append_sectors(struct request_queue *q, unsigned int max_zone_append_sectors) { unsigned int max_sectors; if (WARN_ON(!blk_queue_is_zoned(q))) return; max_sectors = min(q->limits.max_hw_sectors, max_zone_append_sectors); max_sectors = min(q->limits.chunk_sectors, max_sectors); /* * Signal eventual driver bugs resulting in the max_zone_append sectors limit * being 0 due to a 0 argument, the chunk_sectors limit (zone size) not set, * or the max_hw_sectors limit not set. */ WARN_ON(!max_sectors); q->limits.max_zone_append_sectors = max_sectors; } EXPORT_SYMBOL_GPL(blk_queue_max_zone_append_sectors); /** |
8a78362c4
|
232 |
* blk_queue_max_segments - set max hw segments for a request for this queue |
86db1e297
|
233 234 235 236 237 |
* @q: the request queue for the device * @max_segments: max number of segments * * Description: * Enables a low level driver to set an upper limit on the number of |
8a78362c4
|
238 |
* hw data segments in a request. |
86db1e297
|
239 |
**/ |
8a78362c4
|
240 |
void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments) |
86db1e297
|
241 242 243 |
{ if (!max_segments) { max_segments = 1; |
24c03d47d
|
244 245 246 |
printk(KERN_INFO "%s: set to minimum %d ", __func__, max_segments); |
86db1e297
|
247 |
} |
8a78362c4
|
248 |
q->limits.max_segments = max_segments; |
86db1e297
|
249 |
} |
8a78362c4
|
250 |
EXPORT_SYMBOL(blk_queue_max_segments); |
86db1e297
|
251 252 |
/** |
1e739730c
|
253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 |
* blk_queue_max_discard_segments - set max segments for discard requests * @q: the request queue for the device * @max_segments: max number of segments * * Description: * Enables a low level driver to set an upper limit on the number of * segments in a discard request. **/ void blk_queue_max_discard_segments(struct request_queue *q, unsigned short max_segments) { q->limits.max_discard_segments = max_segments; } EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments); /** |
86db1e297
|
269 270 271 272 273 274 275 276 277 278 |
* blk_queue_max_segment_size - set max segment size for blk_rq_map_sg * @q: the request queue for the device * @max_size: max size of segment in bytes * * Description: * Enables a low level driver to set an upper limit on the size of a * coalesced segment **/ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) { |
09cbfeaf1
|
279 280 |
if (max_size < PAGE_SIZE) { max_size = PAGE_SIZE; |
24c03d47d
|
281 282 283 |
printk(KERN_INFO "%s: set to minimum %d ", __func__, max_size); |
86db1e297
|
284 |
} |
09324d32d
|
285 286 |
/* see blk_queue_virt_boundary() for the explanation */ WARN_ON_ONCE(q->limits.virt_boundary_mask); |
025146e13
|
287 |
q->limits.max_segment_size = max_size; |
86db1e297
|
288 |
} |
86db1e297
|
289 290 291 |
EXPORT_SYMBOL(blk_queue_max_segment_size); /** |
e1defc4ff
|
292 |
* blk_queue_logical_block_size - set logical block size for the queue |
86db1e297
|
293 |
* @q: the request queue for the device |
e1defc4ff
|
294 |
* @size: the logical block size, in bytes |
86db1e297
|
295 296 |
* * Description: |
e1defc4ff
|
297 298 299 |
* This should be set to the lowest possible block size that the * storage device can address. The default of 512 covers most * hardware. |
86db1e297
|
300 |
**/ |
ad6bf88a6
|
301 |
void blk_queue_logical_block_size(struct request_queue *q, unsigned int size) |
86db1e297
|
302 |
{ |
817046ecd
|
303 304 305 306 307 308 |
struct queue_limits *limits = &q->limits; limits->logical_block_size = size; if (limits->physical_block_size < size) limits->physical_block_size = size; |
c72758f33
|
309 |
|
817046ecd
|
310 311 |
if (limits->io_min < limits->physical_block_size) limits->io_min = limits->physical_block_size; |
c72758f33
|
312 |
|
817046ecd
|
313 314 315 316 |
limits->max_hw_sectors = round_down(limits->max_hw_sectors, size >> SECTOR_SHIFT); limits->max_sectors = round_down(limits->max_sectors, size >> SECTOR_SHIFT); |
86db1e297
|
317 |
} |
e1defc4ff
|
318 |
EXPORT_SYMBOL(blk_queue_logical_block_size); |
86db1e297
|
319 |
|
c72758f33
|
320 321 322 323 324 325 326 327 328 329 |
/** * blk_queue_physical_block_size - set physical block size for the queue * @q: the request queue for the device * @size: the physical block size, in bytes * * Description: * This should be set to the lowest possible sector size that the * hardware can operate on without reverting to read-modify-write * operations. */ |
892b6f90d
|
330 |
void blk_queue_physical_block_size(struct request_queue *q, unsigned int size) |
c72758f33
|
331 332 333 334 335 336 337 338 339 340 341 342 |
{ q->limits.physical_block_size = size; if (q->limits.physical_block_size < q->limits.logical_block_size) q->limits.physical_block_size = q->limits.logical_block_size; if (q->limits.io_min < q->limits.physical_block_size) q->limits.io_min = q->limits.physical_block_size; } EXPORT_SYMBOL(blk_queue_physical_block_size); /** |
a805a4fa4
|
343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 |
* blk_queue_zone_write_granularity - set zone write granularity for the queue * @q: the request queue for the zoned device * @size: the zone write granularity size, in bytes * * Description: * This should be set to the lowest possible size allowing to write in * sequential zones of a zoned block device. */ void blk_queue_zone_write_granularity(struct request_queue *q, unsigned int size) { if (WARN_ON_ONCE(!blk_queue_is_zoned(q))) return; q->limits.zone_write_granularity = size; if (q->limits.zone_write_granularity < q->limits.logical_block_size) q->limits.zone_write_granularity = q->limits.logical_block_size; } EXPORT_SYMBOL_GPL(blk_queue_zone_write_granularity); /** |
c72758f33
|
365 366 |
* blk_queue_alignment_offset - set physical block alignment offset * @q: the request queue for the device |
8ebf97560
|
367 |
* @offset: alignment offset in bytes |
c72758f33
|
368 369 370 371 372 373 374 375 376 377 378 379 380 381 |
* * Description: * Some devices are naturally misaligned to compensate for things like * the legacy DOS partition table 63-sector offset. Low-level drivers * should call this function for devices whose first sector is not * naturally aligned. */ void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset) { q->limits.alignment_offset = offset & (q->limits.physical_block_size - 1); q->limits.misaligned = 0; } EXPORT_SYMBOL(blk_queue_alignment_offset); |
471aa704d
|
382 |
void disk_update_readahead(struct gendisk *disk) |
c2e4cd57c
|
383 |
{ |
471aa704d
|
384 |
struct request_queue *q = disk->queue; |
c2e4cd57c
|
385 386 387 388 |
/* * For read-ahead of large files to be effective, we need to read ahead * at least twice the optimal I/O size. */ |
edb0872f4
|
389 |
disk->bdi->ra_pages = |
c2e4cd57c
|
390 |
max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES); |
edb0872f4
|
391 |
disk->bdi->io_pages = queue_max_sectors(q) >> (PAGE_SHIFT - 9); |
c2e4cd57c
|
392 |
} |
471aa704d
|
393 |
EXPORT_SYMBOL_GPL(disk_update_readahead); |
c2e4cd57c
|
394 |
|
c72758f33
|
395 |
/** |
7c958e326
|
396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 |
* blk_limits_io_min - set minimum request size for a device * @limits: the queue limits * @min: smallest I/O size in bytes * * Description: * Some devices have an internal block size bigger than the reported * hardware sector size. This function can be used to signal the * smallest I/O the device can perform without incurring a performance * penalty. */ void blk_limits_io_min(struct queue_limits *limits, unsigned int min) { limits->io_min = min; if (limits->io_min < limits->logical_block_size) limits->io_min = limits->logical_block_size; if (limits->io_min < limits->physical_block_size) limits->io_min = limits->physical_block_size; } EXPORT_SYMBOL(blk_limits_io_min); /** |
c72758f33
|
419 420 |
* blk_queue_io_min - set minimum request size for the queue * @q: the request queue for the device |
8ebf97560
|
421 |
* @min: smallest I/O size in bytes |
c72758f33
|
422 423 |
* * Description: |
7e5f5fb09
|
424 425 426 427 428 429 430 |
* Storage devices may report a granularity or preferred minimum I/O * size which is the smallest request the device can perform without * incurring a performance penalty. For disk drives this is often the * physical block size. For RAID arrays it is often the stripe chunk * size. A properly aligned multiple of minimum_io_size is the * preferred request size for workloads where a high number of I/O * operations is desired. |
c72758f33
|
431 432 433 |
*/ void blk_queue_io_min(struct request_queue *q, unsigned int min) { |
7c958e326
|
434 |
blk_limits_io_min(&q->limits, min); |
c72758f33
|
435 436 437 438 |
} EXPORT_SYMBOL(blk_queue_io_min); /** |
3c5820c74
|
439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 |
* blk_limits_io_opt - set optimal request size for a device * @limits: the queue limits * @opt: smallest I/O size in bytes * * Description: * Storage devices may report an optimal I/O size, which is the * device's preferred unit for sustained I/O. This is rarely reported * for disk drives. For RAID arrays it is usually the stripe width or * the internal track size. A properly aligned multiple of * optimal_io_size is the preferred request size for workloads where * sustained throughput is desired. */ void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt) { limits->io_opt = opt; } EXPORT_SYMBOL(blk_limits_io_opt); /** |
c72758f33
|
458 459 |
* blk_queue_io_opt - set optimal request size for the queue * @q: the request queue for the device |
8ebf97560
|
460 |
* @opt: optimal request size in bytes |
c72758f33
|
461 462 |
* * Description: |
7e5f5fb09
|
463 464 465 466 467 468 |
* Storage devices may report an optimal I/O size, which is the * device's preferred unit for sustained I/O. This is rarely reported * for disk drives. For RAID arrays it is usually the stripe width or * the internal track size. A properly aligned multiple of * optimal_io_size is the preferred request size for workloads where * sustained throughput is desired. |
c72758f33
|
469 470 471 |
*/ void blk_queue_io_opt(struct request_queue *q, unsigned int opt) { |
3c5820c74
|
472 |
blk_limits_io_opt(&q->limits, opt); |
d152c682f
|
473 |
if (!q->disk) |
edb0872f4
|
474 |
return; |
d152c682f
|
475 |
q->disk->bdi->ra_pages = |
c2e4cd57c
|
476 |
max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES); |
c72758f33
|
477 478 |
} EXPORT_SYMBOL(blk_queue_io_opt); |
97f433c36
|
479 480 481 482 483 484 485 |
static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs) { sectors = round_down(sectors, lbs >> SECTOR_SHIFT); if (sectors < PAGE_SIZE >> SECTOR_SHIFT) sectors = PAGE_SIZE >> SECTOR_SHIFT; return sectors; } |
86db1e297
|
486 |
/** |
c72758f33
|
487 |
* blk_stack_limits - adjust queue_limits for stacked devices |
81744ee44
|
488 489 |
* @t: the stacking driver limits (top device) * @b: the underlying queue limits (bottom, component device) |
e03a72e13
|
490 |
* @start: first data sector within component device |
c72758f33
|
491 492 |
* * Description: |
81744ee44
|
493 494 495 496 497 498 499 500 501 502 503 504 505 |
* This function is used by stacking drivers like MD and DM to ensure * that all component devices have compatible block sizes and * alignments. The stacking driver must provide a queue_limits * struct (top) and then iteratively call the stacking function for * all component (bottom) devices. The stacking function will * attempt to combine the values and ensure proper alignment. * * Returns 0 if the top and bottom queue_limits are compatible. The * top device's block sizes and alignment offsets may be adjusted to * ensure alignment with the bottom device. If no compatible sizes * and alignments exist, -1 is returned and the resulting top * queue_limits will have the misaligned flag set to indicate that * the alignment_offset is undefined. |
c72758f33
|
506 507 |
*/ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, |
e03a72e13
|
508 |
sector_t start) |
c72758f33
|
509 |
{ |
e03a72e13
|
510 |
unsigned int top, bottom, alignment, ret = 0; |
86b372814
|
511 |
|
c72758f33
|
512 513 |
t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); |
ca369d51b
|
514 |
t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors); |
4363ac7c1
|
515 516 |
t->max_write_same_sectors = min(t->max_write_same_sectors, b->max_write_same_sectors); |
a6f0788ec
|
517 518 |
t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors, b->max_write_zeroes_sectors); |
0512a75b9
|
519 520 |
t->max_zone_append_sectors = min(t->max_zone_append_sectors, b->max_zone_append_sectors); |
9bb33f24a
|
521 |
t->bounce = max(t->bounce, b->bounce); |
c72758f33
|
522 523 524 |
t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, b->seg_boundary_mask); |
03100aada
|
525 526 |
t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask, b->virt_boundary_mask); |
c72758f33
|
527 |
|
8a78362c4
|
528 |
t->max_segments = min_not_zero(t->max_segments, b->max_segments); |
1e739730c
|
529 530 |
t->max_discard_segments = min_not_zero(t->max_discard_segments, b->max_discard_segments); |
13f05c8d8
|
531 532 |
t->max_integrity_segments = min_not_zero(t->max_integrity_segments, b->max_integrity_segments); |
c72758f33
|
533 534 535 |
t->max_segment_size = min_not_zero(t->max_segment_size, b->max_segment_size); |
fe0b393f2
|
536 |
t->misaligned |= b->misaligned; |
e03a72e13
|
537 |
alignment = queue_limit_alignment_offset(b, start); |
9504e0864
|
538 |
|
81744ee44
|
539 540 541 |
/* Bottom device has different alignment. Check that it is * compatible with the current top alignment. */ |
9504e0864
|
542 543 544 545 |
if (t->alignment_offset != alignment) { top = max(t->physical_block_size, t->io_min) + t->alignment_offset; |
81744ee44
|
546 |
bottom = max(b->physical_block_size, b->io_min) + alignment; |
9504e0864
|
547 |
|
81744ee44
|
548 |
/* Verify that top and bottom intervals line up */ |
b8839b8c5
|
549 |
if (max(top, bottom) % min(top, bottom)) { |
9504e0864
|
550 |
t->misaligned = 1; |
fe0b393f2
|
551 552 |
ret = -1; } |
9504e0864
|
553 |
} |
c72758f33
|
554 555 556 557 558 559 560 |
t->logical_block_size = max(t->logical_block_size, b->logical_block_size); t->physical_block_size = max(t->physical_block_size, b->physical_block_size); t->io_min = max(t->io_min, b->io_min); |
e9637415a
|
561 |
t->io_opt = lcm_not_zero(t->io_opt, b->io_opt); |
7e7986f9d
|
562 563 564 565 |
/* Set non-power-of-2 compatible chunk_sectors boundary */ if (b->chunk_sectors) t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors); |
9504e0864
|
566 |
|
81744ee44
|
567 |
/* Physical block size a multiple of the logical block size? */ |
9504e0864
|
568 569 |
if (t->physical_block_size & (t->logical_block_size - 1)) { t->physical_block_size = t->logical_block_size; |
c72758f33
|
570 |
t->misaligned = 1; |
fe0b393f2
|
571 |
ret = -1; |
86b372814
|
572 |
} |
81744ee44
|
573 |
/* Minimum I/O a multiple of the physical block size? */ |
9504e0864
|
574 575 576 |
if (t->io_min & (t->physical_block_size - 1)) { t->io_min = t->physical_block_size; t->misaligned = 1; |
fe0b393f2
|
577 |
ret = -1; |
c72758f33
|
578 |
} |
81744ee44
|
579 |
/* Optimal I/O a multiple of the physical block size? */ |
9504e0864
|
580 581 582 |
if (t->io_opt & (t->physical_block_size - 1)) { t->io_opt = 0; t->misaligned = 1; |
fe0b393f2
|
583 |
ret = -1; |
9504e0864
|
584 |
} |
c72758f33
|
585 |
|
22ada802e
|
586 587 588 589 590 591 |
/* chunk_sectors a multiple of the physical block size? */ if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) { t->chunk_sectors = 0; t->misaligned = 1; ret = -1; } |
c78afc626
|
592 593 594 |
t->raid_partial_stripes_expensive = max(t->raid_partial_stripes_expensive, b->raid_partial_stripes_expensive); |
81744ee44
|
595 |
/* Find lowest common alignment_offset */ |
e9637415a
|
596 |
t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment) |
b8839b8c5
|
597 |
% max(t->physical_block_size, t->io_min); |
86b372814
|
598 |
|
81744ee44
|
599 |
/* Verify that new alignment_offset is on a logical block boundary */ |
fe0b393f2
|
600 |
if (t->alignment_offset & (t->logical_block_size - 1)) { |
c72758f33
|
601 |
t->misaligned = 1; |
fe0b393f2
|
602 603 |
ret = -1; } |
c72758f33
|
604 |
|
97f433c36
|
605 606 607 |
t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size); t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size); t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size); |
9504e0864
|
608 609 |
/* Discard alignment and granularity */ if (b->discard_granularity) { |
e03a72e13
|
610 |
alignment = queue_limit_discard_alignment(b, start); |
9504e0864
|
611 612 613 614 615 |
if (t->discard_granularity != 0 && t->discard_alignment != alignment) { top = t->discard_granularity + t->discard_alignment; bottom = b->discard_granularity + alignment; |
70dd5bf3b
|
616 |
|
9504e0864
|
617 |
/* Verify that top and bottom intervals line up */ |
8dd2cb7e8
|
618 |
if ((max(top, bottom) % min(top, bottom)) != 0) |
9504e0864
|
619 620 |
t->discard_misaligned = 1; } |
81744ee44
|
621 622 |
t->max_discard_sectors = min_not_zero(t->max_discard_sectors, b->max_discard_sectors); |
0034af036
|
623 624 |
t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors, b->max_hw_discard_sectors); |
9504e0864
|
625 626 |
t->discard_granularity = max(t->discard_granularity, b->discard_granularity); |
e9637415a
|
627 |
t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) % |
8dd2cb7e8
|
628 |
t->discard_granularity; |
9504e0864
|
629 |
} |
70dd5bf3b
|
630 |
|
a805a4fa4
|
631 632 |
t->zone_write_granularity = max(t->zone_write_granularity, b->zone_write_granularity); |
3093a4797
|
633 |
t->zoned = max(t->zoned, b->zoned); |
fe0b393f2
|
634 |
return ret; |
c72758f33
|
635 |
} |
5d85d3247
|
636 |
EXPORT_SYMBOL(blk_stack_limits); |
c72758f33
|
637 638 639 |
/** * disk_stack_limits - adjust queue limits for stacked drivers |
77634f33d
|
640 |
* @disk: MD/DM gendisk (top) |
c72758f33
|
641 642 643 644 |
* @bdev: the underlying block device (bottom) * @offset: offset to beginning of data within component device * * Description: |
e03a72e13
|
645 646 |
* Merges the limits for a top level gendisk and a bottom level * block_device. |
c72758f33
|
647 648 649 650 651 |
*/ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, sector_t offset) { struct request_queue *t = disk->queue; |
c72758f33
|
652 |
|
9efa82ef2
|
653 |
if (blk_stack_limits(&t->limits, &bdev_get_queue(bdev)->limits, |
453b8ab69
|
654 655 656 657 |
get_start_sect(bdev) + (offset >> 9)) < 0) pr_notice("%s: Warning: Device %pg is misaligned ", disk->disk_name, bdev); |
e74d93e96
|
658 |
|
471aa704d
|
659 |
disk_update_readahead(disk); |
c72758f33
|
660 661 662 663 |
} EXPORT_SYMBOL(disk_stack_limits); /** |
27f8221af
|
664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 |
* blk_queue_update_dma_pad - update pad mask * @q: the request queue for the device * @mask: pad mask * * Update dma pad mask. * * Appending pad buffer to a request modifies the last entry of a * scatter list such that it includes the pad buffer. **/ void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask) { if (mask > q->dma_pad_mask) q->dma_pad_mask = mask; } EXPORT_SYMBOL(blk_queue_update_dma_pad); /** |
86db1e297
|
681 682 683 684 685 686 |
* blk_queue_segment_boundary - set boundary rules for segment merging * @q: the request queue for the device * @mask: the memory boundary mask **/ void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) { |
09cbfeaf1
|
687 688 |
if (mask < PAGE_SIZE - 1) { mask = PAGE_SIZE - 1; |
24c03d47d
|
689 690 691 |
printk(KERN_INFO "%s: set to minimum %lx ", __func__, mask); |
86db1e297
|
692 |
} |
025146e13
|
693 |
q->limits.seg_boundary_mask = mask; |
86db1e297
|
694 |
} |
86db1e297
|
695 696 697 |
EXPORT_SYMBOL(blk_queue_segment_boundary); /** |
03100aada
|
698 699 700 701 702 703 704 |
* blk_queue_virt_boundary - set boundary rules for bio merging * @q: the request queue for the device * @mask: the memory boundary mask **/ void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask) { q->limits.virt_boundary_mask = mask; |
09324d32d
|
705 706 707 708 709 710 711 |
/* * Devices that require a virtual boundary do not support scatter/gather * I/O natively, but instead require a descriptor list entry for each * page (which might not be idential to the Linux PAGE_SIZE). Because * of that they are not limited by our notion of "segment size". */ |
c6c84f78e
|
712 713 |
if (mask) q->limits.max_segment_size = UINT_MAX; |
03100aada
|
714 715 716 717 |
} EXPORT_SYMBOL(blk_queue_virt_boundary); /** |
86db1e297
|
718 719 720 721 722 |
* blk_queue_dma_alignment - set dma length and memory alignment * @q: the request queue for the device * @mask: alignment mask * * description: |
710027a48
|
723 |
* set required memory and length alignment for direct dma transactions. |
8feb4d20b
|
724 |
* this is used when building direct io requests for the queue. |
86db1e297
|
725 726 727 728 729 730 |
* **/ void blk_queue_dma_alignment(struct request_queue *q, int mask) { q->dma_alignment = mask; } |
86db1e297
|
731 732 733 734 735 736 737 738 |
EXPORT_SYMBOL(blk_queue_dma_alignment); /** * blk_queue_update_dma_alignment - update dma length and memory alignment * @q: the request queue for the device * @mask: alignment mask * * description: |
710027a48
|
739 |
* update required memory and length alignment for direct dma transactions. |
86db1e297
|
740 741 742 743 744 745 746 747 748 749 750 751 752 753 |
* If the requested alignment is larger than the current alignment, then * the current queue alignment is updated to the new value, otherwise it * is left alone. The design of this is to allow multiple objects * (driver, device, transport etc) to set their respective * alignments without having them interfere. * **/ void blk_queue_update_dma_alignment(struct request_queue *q, int mask) { BUG_ON(mask > PAGE_SIZE); if (mask > q->dma_alignment) q->dma_alignment = mask; } |
86db1e297
|
754 |
EXPORT_SYMBOL(blk_queue_update_dma_alignment); |
93e9d8e83
|
755 |
/** |
d278d4a88
|
756 757 758 759 760 761 762 763 |
* blk_set_queue_depth - tell the block layer about the device queue depth * @q: the request queue for the device * @depth: queue depth * */ void blk_set_queue_depth(struct request_queue *q, unsigned int depth) { q->queue_depth = depth; |
9677a3e01
|
764 |
rq_qos_queue_depth_changed(q); |
d278d4a88
|
765 766 767 768 |
} EXPORT_SYMBOL(blk_set_queue_depth); /** |
93e9d8e83
|
769 770 771 772 773 774 775 776 777 |
* blk_queue_write_cache - configure queue's write cache * @q: the request queue for the device * @wc: write back cache on or off * @fua: device supports FUA writes, if true * * Tell the block layer about the write cache of @q. */ void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua) { |
c888a8f95
|
778 |
if (wc) |
57d74df90
|
779 |
blk_queue_flag_set(QUEUE_FLAG_WC, q); |
c888a8f95
|
780 |
else |
57d74df90
|
781 |
blk_queue_flag_clear(QUEUE_FLAG_WC, q); |
c888a8f95
|
782 |
if (fua) |
57d74df90
|
783 |
blk_queue_flag_set(QUEUE_FLAG_FUA, q); |
c888a8f95
|
784 |
else |
57d74df90
|
785 |
blk_queue_flag_clear(QUEUE_FLAG_FUA, q); |
87760e5ee
|
786 |
|
a79050434
|
787 |
wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags)); |
93e9d8e83
|
788 789 |
} EXPORT_SYMBOL_GPL(blk_queue_write_cache); |
68c43f133
|
790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 |
/** * blk_queue_required_elevator_features - Set a queue required elevator features * @q: the request queue for the target device * @features: Required elevator features OR'ed together * * Tell the block layer that for the device controlled through @q, only the * only elevators that can be used are those that implement at least the set of * features specified by @features. */ void blk_queue_required_elevator_features(struct request_queue *q, unsigned int features) { q->required_elevator_features = features; } EXPORT_SYMBOL_GPL(blk_queue_required_elevator_features); |
671df1895
|
805 |
/** |
45147fb52
|
806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 |
* blk_queue_can_use_dma_map_merging - configure queue for merging segments. * @q: the request queue for the device * @dev: the device pointer for dma * * Tell the block layer about merging the segments by dma map of @q. */ bool blk_queue_can_use_dma_map_merging(struct request_queue *q, struct device *dev) { unsigned long boundary = dma_get_merge_boundary(dev); if (!boundary) return false; /* No need to update max_segment_size. see blk_queue_virt_boundary() */ blk_queue_virt_boundary(q, boundary); return true; } EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging); |
e0c60d010
|
826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 |
static bool disk_has_partitions(struct gendisk *disk) { unsigned long idx; struct block_device *part; bool ret = false; rcu_read_lock(); xa_for_each(&disk->part_tbl, idx, part) { if (bdev_is_partition(part)) { ret = true; break; } } rcu_read_unlock(); return ret; } |
27ba3e8ff
|
843 844 845 846 847 848 849 850 851 852 853 854 855 856 |
/** * blk_queue_set_zoned - configure a disk queue zoned model. * @disk: the gendisk of the queue to configure * @model: the zoned model to set * * Set the zoned model of the request queue of @disk according to @model. * When @model is BLK_ZONED_HM (host managed), this should be called only * if zoned block device support is enabled (CONFIG_BLK_DEV_ZONED option). * If @model specifies BLK_ZONED_HA (host aware), the effective model used * depends on CONFIG_BLK_DEV_ZONED settings and on the existence of partitions * on the disk. */ void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model) { |
a805a4fa4
|
857 |
struct request_queue *q = disk->queue; |
27ba3e8ff
|
858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 |
switch (model) { case BLK_ZONED_HM: /* * Host managed devices are supported only if * CONFIG_BLK_DEV_ZONED is enabled. */ WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED)); break; case BLK_ZONED_HA: /* * Host aware devices can be treated either as regular block * devices (similar to drive managed devices) or as zoned block * devices to take advantage of the zone command set, similarly * to host managed devices. We try the latter if there are no * partitions and zoned block device support is enabled, else * we do nothing special as far as the block layer is concerned. */ if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) || |
e0c60d010
|
876 |
disk_has_partitions(disk)) |
27ba3e8ff
|
877 878 879 880 881 882 883 884 |
model = BLK_ZONED_NONE; break; case BLK_ZONED_NONE: default: if (WARN_ON_ONCE(model != BLK_ZONED_NONE)) model = BLK_ZONED_NONE; break; } |
a805a4fa4
|
885 886 887 888 889 890 891 892 |
q->limits.zoned = model; if (model != BLK_ZONED_NONE) { /* * Set the zone write granularity to the device logical block * size by default. The driver can change this value if needed. */ blk_queue_zone_write_granularity(q, queue_logical_block_size(q)); |
508aebb80
|
893 894 |
} else { blk_queue_clear_zone_settings(q); |
a805a4fa4
|
895 |
} |
27ba3e8ff
|
896 897 |
} EXPORT_SYMBOL_GPL(blk_queue_set_zoned); |