Blame view

block/blk-settings.c 27.5 KB
3dcf60bcb   Christoph Hellwig   block: add SPDX t...
1
  // SPDX-License-Identifier: GPL-2.0
86db1e297   Jens Axboe   block: continue l...
2
3
4
5
6
7
8
9
  /*
   * Functions related to setting various queue properties from drivers
   */
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/init.h>
  #include <linux/bio.h>
  #include <linux/blkdev.h>
57c8a661d   Mike Rapoport   mm: remove includ...
10
  #include <linux/memblock.h>	/* for max_pfn/max_low_pfn */
70dd5bf3b   Martin K. Petersen   block: Stack opti...
11
  #include <linux/gcd.h>
2cda2728a   Martin K. Petersen   block: Fix overru...
12
  #include <linux/lcm.h>
ad5ebd2fa   Randy Dunlap   block: jiffies fixes
13
  #include <linux/jiffies.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
14
  #include <linux/gfp.h>
45147fb52   Yoshihiro Shimoda   block: add a help...
15
  #include <linux/dma-mapping.h>
86db1e297   Jens Axboe   block: continue l...
16
17
  
  #include "blk.h"
87760e5ee   Jens Axboe   block: hook up wr...
18
  #include "blk-wbt.h"
86db1e297   Jens Axboe   block: continue l...
19

6728cb0e6   Jens Axboe   block: make core ...
20
  unsigned long blk_max_low_pfn;
86db1e297   Jens Axboe   block: continue l...
21
  EXPORT_SYMBOL(blk_max_low_pfn);
6728cb0e6   Jens Axboe   block: make core ...
22
23
  
  unsigned long blk_max_pfn;
86db1e297   Jens Axboe   block: continue l...
24

242f9dcb8   Jens Axboe   block: unify requ...
25
26
27
28
29
  void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
  {
  	q->rq_timeout = timeout;
  }
  EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
86db1e297   Jens Axboe   block: continue l...
30
  /**
e475bba2f   Martin K. Petersen   block: Introduce ...
31
   * blk_set_default_limits - reset limits to default values
f740f5ca0   Randy Dunlap   Fix kernel-doc pa...
32
   * @lim:  the queue_limits structure to reset
e475bba2f   Martin K. Petersen   block: Introduce ...
33
34
   *
   * Description:
b1bd055d3   Martin K. Petersen   block: Introduce ...
35
   *   Returns a queue_limit struct to its default state.
e475bba2f   Martin K. Petersen   block: Introduce ...
36
37
38
   */
  void blk_set_default_limits(struct queue_limits *lim)
  {
8a78362c4   Martin K. Petersen   block: Consolidat...
39
  	lim->max_segments = BLK_MAX_SEGMENTS;
1e739730c   Christoph Hellwig   block: optionally...
40
  	lim->max_discard_segments = 1;
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
41
  	lim->max_integrity_segments = 0;
e475bba2f   Martin K. Petersen   block: Introduce ...
42
  	lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
03100aada   Keith Busch   block: Replace SG...
43
  	lim->virt_boundary_mask = 0;
eb28d31bc   Martin K. Petersen   block: Add BLK_ p...
44
  	lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
5f009d3f8   Keith Busch   block: Initialize...
45
46
  	lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
  	lim->max_dev_sectors = 0;
762380ad9   Jens Axboe   block: add notion...
47
  	lim->chunk_sectors = 0;
4363ac7c1   Martin K. Petersen   block: Implement ...
48
  	lim->max_write_same_sectors = 0;
a6f0788ec   Chaitanya Kulkarni   block: add suppor...
49
  	lim->max_write_zeroes_sectors = 0;
0512a75b9   Keith Busch   block: Introduce ...
50
  	lim->max_zone_append_sectors = 0;
86b372814   Martin K. Petersen   block: Expose dis...
51
  	lim->max_discard_sectors = 0;
0034af036   Jens Axboe   block: make /sys/...
52
  	lim->max_hw_discard_sectors = 0;
86b372814   Martin K. Petersen   block: Expose dis...
53
54
55
  	lim->discard_granularity = 0;
  	lim->discard_alignment = 0;
  	lim->discard_misaligned = 0;
e475bba2f   Martin K. Petersen   block: Introduce ...
56
  	lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
3a02c8e81   Martin K. Petersen   block: Fix bounce...
57
  	lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
e475bba2f   Martin K. Petersen   block: Introduce ...
58
59
60
  	lim->alignment_offset = 0;
  	lim->io_opt = 0;
  	lim->misaligned = 0;
797476b88   Damien Le Moal   block: Add 'zoned...
61
  	lim->zoned = BLK_ZONED_NONE;
e475bba2f   Martin K. Petersen   block: Introduce ...
62
63
64
65
  }
  EXPORT_SYMBOL(blk_set_default_limits);
  
  /**
b1bd055d3   Martin K. Petersen   block: Introduce ...
66
67
68
69
70
71
72
73
74
75
76
77
   * blk_set_stacking_limits - set default limits for stacking devices
   * @lim:  the queue_limits structure to reset
   *
   * Description:
   *   Returns a queue_limit struct to its default state. Should be used
   *   by stacking drivers like DM that have no internal limits.
   */
  void blk_set_stacking_limits(struct queue_limits *lim)
  {
  	blk_set_default_limits(lim);
  
  	/* Inherit limits from component devices */
b1bd055d3   Martin K. Petersen   block: Introduce ...
78
  	lim->max_segments = USHRT_MAX;
42c9cdfe1   Mike Snitzer   block: allow max_...
79
  	lim->max_discard_segments = USHRT_MAX;
b1bd055d3   Martin K. Petersen   block: Introduce ...
80
  	lim->max_hw_sectors = UINT_MAX;
d82ae52e6   Mike Snitzer   block: properly s...
81
  	lim->max_segment_size = UINT_MAX;
fe86cdcef   Mike Snitzer   block: do not art...
82
  	lim->max_sectors = UINT_MAX;
ca369d51b   Martin K. Petersen   block/sd: Fix dev...
83
  	lim->max_dev_sectors = UINT_MAX;
4363ac7c1   Martin K. Petersen   block: Implement ...
84
  	lim->max_write_same_sectors = UINT_MAX;
a6f0788ec   Chaitanya Kulkarni   block: add suppor...
85
  	lim->max_write_zeroes_sectors = UINT_MAX;
0512a75b9   Keith Busch   block: Introduce ...
86
  	lim->max_zone_append_sectors = UINT_MAX;
b1bd055d3   Martin K. Petersen   block: Introduce ...
87
88
89
90
  }
  EXPORT_SYMBOL(blk_set_stacking_limits);
  
  /**
86db1e297   Jens Axboe   block: continue l...
91
   * blk_queue_bounce_limit - set bounce buffer limit for queue
cd0aca2d5   Tejun Heo   block: fix queue ...
92
   * @q: the request queue for the device
9f7e45d83   Santosh Shilimkar   ARM: 7794/1: bloc...
93
   * @max_addr: the maximum address the device can handle
86db1e297   Jens Axboe   block: continue l...
94
95
96
97
98
   *
   * Description:
   *    Different hardware can have different requirements as to what pages
   *    it can do I/O directly to. A low level driver can call
   *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
9f7e45d83   Santosh Shilimkar   ARM: 7794/1: bloc...
99
   *    buffers for doing I/O to pages residing above @max_addr.
86db1e297   Jens Axboe   block: continue l...
100
   **/
9f7e45d83   Santosh Shilimkar   ARM: 7794/1: bloc...
101
  void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
86db1e297   Jens Axboe   block: continue l...
102
  {
9f7e45d83   Santosh Shilimkar   ARM: 7794/1: bloc...
103
  	unsigned long b_pfn = max_addr >> PAGE_SHIFT;
86db1e297   Jens Axboe   block: continue l...
104
105
106
107
  	int dma = 0;
  
  	q->bounce_gfp = GFP_NOIO;
  #if BITS_PER_LONG == 64
cd0aca2d5   Tejun Heo   block: fix queue ...
108
109
110
111
112
113
  	/*
  	 * Assume anything <= 4GB can be handled by IOMMU.  Actually
  	 * some IOMMUs can handle everything, but I don't know of a
  	 * way to test this here.
  	 */
  	if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
86db1e297   Jens Axboe   block: continue l...
114
  		dma = 1;
efb012b36   Malahal Naineni   block: set the bo...
115
  	q->limits.bounce_pfn = max(max_low_pfn, b_pfn);
86db1e297   Jens Axboe   block: continue l...
116
  #else
6728cb0e6   Jens Axboe   block: make core ...
117
  	if (b_pfn < blk_max_low_pfn)
86db1e297   Jens Axboe   block: continue l...
118
  		dma = 1;
c49825fac   Malahal Naineni   block: set the bo...
119
  	q->limits.bounce_pfn = b_pfn;
260a67a9e   Jens Axboe   block: revert bad...
120
  #endif
86db1e297   Jens Axboe   block: continue l...
121
122
123
  	if (dma) {
  		init_emergency_isa_pool();
  		q->bounce_gfp = GFP_NOIO | GFP_DMA;
260a67a9e   Jens Axboe   block: revert bad...
124
  		q->limits.bounce_pfn = b_pfn;
86db1e297   Jens Axboe   block: continue l...
125
126
  	}
  }
86db1e297   Jens Axboe   block: continue l...
127
128
129
  EXPORT_SYMBOL(blk_queue_bounce_limit);
  
  /**
ca369d51b   Martin K. Petersen   block/sd: Fix dev...
130
131
   * blk_queue_max_hw_sectors - set max sectors for a request for this queue
   * @q:  the request queue for the device
2800aac11   Martin K. Petersen   block: Update blk...
132
   * @max_hw_sectors:  max hardware sectors in the usual 512b unit
86db1e297   Jens Axboe   block: continue l...
133
134
   *
   * Description:
2800aac11   Martin K. Petersen   block: Update blk...
135
136
   *    Enables a low level driver to set a hard upper limit,
   *    max_hw_sectors, on the size of requests.  max_hw_sectors is set by
4f258a463   Martin K. Petersen   sd: Fix maximum I...
137
138
   *    the device driver based upon the capabilities of the I/O
   *    controller.
2800aac11   Martin K. Petersen   block: Update blk...
139
   *
ca369d51b   Martin K. Petersen   block/sd: Fix dev...
140
141
142
   *    max_dev_sectors is a hard limit imposed by the storage device for
   *    READ/WRITE requests. It is set by the disk driver.
   *
2800aac11   Martin K. Petersen   block: Update blk...
143
144
145
146
   *    max_sectors is a soft limit imposed by the block layer for
   *    filesystem type requests.  This value can be overridden on a
   *    per-device basis in /sys/block/<device>/queue/max_sectors_kb.
   *    The soft limit can not exceed max_hw_sectors.
86db1e297   Jens Axboe   block: continue l...
147
   **/
ca369d51b   Martin K. Petersen   block/sd: Fix dev...
148
  void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
86db1e297   Jens Axboe   block: continue l...
149
  {
ca369d51b   Martin K. Petersen   block/sd: Fix dev...
150
151
  	struct queue_limits *limits = &q->limits;
  	unsigned int max_sectors;
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
152
153
  	if ((max_hw_sectors << 9) < PAGE_SIZE) {
  		max_hw_sectors = 1 << (PAGE_SHIFT - 9);
24c03d47d   Harvey Harrison   block: remove rem...
154
155
  		printk(KERN_INFO "%s: set to minimum %d
  ",
2800aac11   Martin K. Petersen   block: Update blk...
156
  		       __func__, max_hw_sectors);
86db1e297   Jens Axboe   block: continue l...
157
  	}
30e2bc08b   Jeff Moyer   Revert "block: re...
158
  	limits->max_hw_sectors = max_hw_sectors;
ca369d51b   Martin K. Petersen   block/sd: Fix dev...
159
160
161
  	max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
  	max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS);
  	limits->max_sectors = max_sectors;
dc3b17cc8   Jan Kara   block: Use pointe...
162
  	q->backing_dev_info->io_pages = max_sectors >> (PAGE_SHIFT - 9);
86db1e297   Jens Axboe   block: continue l...
163
  }
086fa5ff0   Martin K. Petersen   block: Rename blk...
164
  EXPORT_SYMBOL(blk_queue_max_hw_sectors);
86db1e297   Jens Axboe   block: continue l...
165
166
  
  /**
762380ad9   Jens Axboe   block: add notion...
167
168
169
170
171
172
   * blk_queue_chunk_sectors - set size of the chunk for this queue
   * @q:  the request queue for the device
   * @chunk_sectors:  chunk sectors in the usual 512b unit
   *
   * Description:
   *    If a driver doesn't want IOs to cross a given chunk size, it can set
07d098e6b   Mike Snitzer   block: allow 'chu...
173
174
175
176
   *    this limit and prevent merging across chunks. Note that the block layer
   *    must accept a page worth of data at any offset. So if the crossing of
   *    chunks is a hard limitation in the driver, it must still be prepared
   *    to split single page bios.
762380ad9   Jens Axboe   block: add notion...
177
178
179
   **/
  void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
  {
762380ad9   Jens Axboe   block: add notion...
180
181
182
183
184
  	q->limits.chunk_sectors = chunk_sectors;
  }
  EXPORT_SYMBOL(blk_queue_chunk_sectors);
  
  /**
67efc9258   Christoph Hellwig   block: allow larg...
185
186
   * blk_queue_max_discard_sectors - set max sectors for a single discard
   * @q:  the request queue for the device
c7ebf0657   Randy Dunlap   blk-settings: fix...
187
   * @max_discard_sectors: maximum number of sectors to discard
67efc9258   Christoph Hellwig   block: allow larg...
188
189
190
191
   **/
  void blk_queue_max_discard_sectors(struct request_queue *q,
  		unsigned int max_discard_sectors)
  {
0034af036   Jens Axboe   block: make /sys/...
192
  	q->limits.max_hw_discard_sectors = max_discard_sectors;
67efc9258   Christoph Hellwig   block: allow larg...
193
194
195
196
197
  	q->limits.max_discard_sectors = max_discard_sectors;
  }
  EXPORT_SYMBOL(blk_queue_max_discard_sectors);
  
  /**
4363ac7c1   Martin K. Petersen   block: Implement ...
198
199
200
201
202
203
204
205
206
207
208
209
   * blk_queue_max_write_same_sectors - set max sectors for a single write same
   * @q:  the request queue for the device
   * @max_write_same_sectors: maximum number of sectors to write per command
   **/
  void blk_queue_max_write_same_sectors(struct request_queue *q,
  				      unsigned int max_write_same_sectors)
  {
  	q->limits.max_write_same_sectors = max_write_same_sectors;
  }
  EXPORT_SYMBOL(blk_queue_max_write_same_sectors);
  
  /**
a6f0788ec   Chaitanya Kulkarni   block: add suppor...
210
211
212
213
214
215
216
217
218
219
220
221
222
   * blk_queue_max_write_zeroes_sectors - set max sectors for a single
   *                                      write zeroes
   * @q:  the request queue for the device
   * @max_write_zeroes_sectors: maximum number of sectors to write per command
   **/
  void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
  		unsigned int max_write_zeroes_sectors)
  {
  	q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors;
  }
  EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors);
  
  /**
0512a75b9   Keith Busch   block: Introduce ...
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
   * blk_queue_max_zone_append_sectors - set max sectors for a single zone append
   * @q:  the request queue for the device
   * @max_zone_append_sectors: maximum number of sectors to write per command
   **/
  void blk_queue_max_zone_append_sectors(struct request_queue *q,
  		unsigned int max_zone_append_sectors)
  {
  	unsigned int max_sectors;
  
  	if (WARN_ON(!blk_queue_is_zoned(q)))
  		return;
  
  	max_sectors = min(q->limits.max_hw_sectors, max_zone_append_sectors);
  	max_sectors = min(q->limits.chunk_sectors, max_sectors);
  
  	/*
  	 * Signal eventual driver bugs resulting in the max_zone_append sectors limit
  	 * being 0 due to a 0 argument, the chunk_sectors limit (zone size) not set,
  	 * or the max_hw_sectors limit not set.
  	 */
  	WARN_ON(!max_sectors);
  
  	q->limits.max_zone_append_sectors = max_sectors;
  }
  EXPORT_SYMBOL_GPL(blk_queue_max_zone_append_sectors);
  
  /**
8a78362c4   Martin K. Petersen   block: Consolidat...
250
   * blk_queue_max_segments - set max hw segments for a request for this queue
86db1e297   Jens Axboe   block: continue l...
251
252
253
254
255
   * @q:  the request queue for the device
   * @max_segments:  max number of segments
   *
   * Description:
   *    Enables a low level driver to set an upper limit on the number of
8a78362c4   Martin K. Petersen   block: Consolidat...
256
   *    hw data segments in a request.
86db1e297   Jens Axboe   block: continue l...
257
   **/
8a78362c4   Martin K. Petersen   block: Consolidat...
258
  void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
86db1e297   Jens Axboe   block: continue l...
259
260
261
  {
  	if (!max_segments) {
  		max_segments = 1;
24c03d47d   Harvey Harrison   block: remove rem...
262
263
264
  		printk(KERN_INFO "%s: set to minimum %d
  ",
  		       __func__, max_segments);
86db1e297   Jens Axboe   block: continue l...
265
  	}
8a78362c4   Martin K. Petersen   block: Consolidat...
266
  	q->limits.max_segments = max_segments;
86db1e297   Jens Axboe   block: continue l...
267
  }
8a78362c4   Martin K. Petersen   block: Consolidat...
268
  EXPORT_SYMBOL(blk_queue_max_segments);
86db1e297   Jens Axboe   block: continue l...
269
270
  
  /**
1e739730c   Christoph Hellwig   block: optionally...
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
   * blk_queue_max_discard_segments - set max segments for discard requests
   * @q:  the request queue for the device
   * @max_segments:  max number of segments
   *
   * Description:
   *    Enables a low level driver to set an upper limit on the number of
   *    segments in a discard request.
   **/
  void blk_queue_max_discard_segments(struct request_queue *q,
  		unsigned short max_segments)
  {
  	q->limits.max_discard_segments = max_segments;
  }
  EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments);
  
  /**
86db1e297   Jens Axboe   block: continue l...
287
288
289
290
291
292
293
294
295
296
   * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
   * @q:  the request queue for the device
   * @max_size:  max size of segment in bytes
   *
   * Description:
   *    Enables a low level driver to set an upper limit on the size of a
   *    coalesced segment
   **/
  void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
  {
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
297
298
  	if (max_size < PAGE_SIZE) {
  		max_size = PAGE_SIZE;
24c03d47d   Harvey Harrison   block: remove rem...
299
300
301
  		printk(KERN_INFO "%s: set to minimum %d
  ",
  		       __func__, max_size);
86db1e297   Jens Axboe   block: continue l...
302
  	}
09324d32d   Christoph Hellwig   block: force an u...
303
304
  	/* see blk_queue_virt_boundary() for the explanation */
  	WARN_ON_ONCE(q->limits.virt_boundary_mask);
025146e13   Martin K. Petersen   block: Move queue...
305
  	q->limits.max_segment_size = max_size;
86db1e297   Jens Axboe   block: continue l...
306
  }
86db1e297   Jens Axboe   block: continue l...
307
308
309
  EXPORT_SYMBOL(blk_queue_max_segment_size);
  
  /**
e1defc4ff   Martin K. Petersen   block: Do away wi...
310
   * blk_queue_logical_block_size - set logical block size for the queue
86db1e297   Jens Axboe   block: continue l...
311
   * @q:  the request queue for the device
e1defc4ff   Martin K. Petersen   block: Do away wi...
312
   * @size:  the logical block size, in bytes
86db1e297   Jens Axboe   block: continue l...
313
314
   *
   * Description:
e1defc4ff   Martin K. Petersen   block: Do away wi...
315
316
317
   *   This should be set to the lowest possible block size that the
   *   storage device can address.  The default of 512 covers most
   *   hardware.
86db1e297   Jens Axboe   block: continue l...
318
   **/
ad6bf88a6   Mikulas Patocka   block: fix an int...
319
  void blk_queue_logical_block_size(struct request_queue *q, unsigned int size)
86db1e297   Jens Axboe   block: continue l...
320
  {
025146e13   Martin K. Petersen   block: Move queue...
321
  	q->limits.logical_block_size = size;
c72758f33   Martin K. Petersen   block: Export I/O...
322
323
324
325
326
327
  
  	if (q->limits.physical_block_size < size)
  		q->limits.physical_block_size = size;
  
  	if (q->limits.io_min < q->limits.physical_block_size)
  		q->limits.io_min = q->limits.physical_block_size;
86db1e297   Jens Axboe   block: continue l...
328
  }
e1defc4ff   Martin K. Petersen   block: Do away wi...
329
  EXPORT_SYMBOL(blk_queue_logical_block_size);
86db1e297   Jens Axboe   block: continue l...
330

c72758f33   Martin K. Petersen   block: Export I/O...
331
332
333
334
335
336
337
338
339
340
  /**
   * blk_queue_physical_block_size - set physical block size for the queue
   * @q:  the request queue for the device
   * @size:  the physical block size, in bytes
   *
   * Description:
   *   This should be set to the lowest possible sector size that the
   *   hardware can operate on without reverting to read-modify-write
   *   operations.
   */
892b6f90d   Martin K. Petersen   block: Ensure phy...
341
  void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
c72758f33   Martin K. Petersen   block: Export I/O...
342
343
344
345
346
347
348
349
350
351
352
353
354
355
  {
  	q->limits.physical_block_size = size;
  
  	if (q->limits.physical_block_size < q->limits.logical_block_size)
  		q->limits.physical_block_size = q->limits.logical_block_size;
  
  	if (q->limits.io_min < q->limits.physical_block_size)
  		q->limits.io_min = q->limits.physical_block_size;
  }
  EXPORT_SYMBOL(blk_queue_physical_block_size);
  
  /**
   * blk_queue_alignment_offset - set physical block alignment offset
   * @q:	the request queue for the device
8ebf97560   Randy Dunlap   block: fix kernel...
356
   * @offset: alignment offset in bytes
c72758f33   Martin K. Petersen   block: Export I/O...
357
358
359
360
361
362
363
364
365
366
367
368
369
370
   *
   * Description:
   *   Some devices are naturally misaligned to compensate for things like
   *   the legacy DOS partition table 63-sector offset.  Low-level drivers
   *   should call this function for devices whose first sector is not
   *   naturally aligned.
   */
  void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
  {
  	q->limits.alignment_offset =
  		offset & (q->limits.physical_block_size - 1);
  	q->limits.misaligned = 0;
  }
  EXPORT_SYMBOL(blk_queue_alignment_offset);
c2e4cd57c   Christoph Hellwig   block: lift setti...
371
372
373
374
375
376
377
378
379
380
381
382
  void blk_queue_update_readahead(struct request_queue *q)
  {
  	/*
  	 * For read-ahead of large files to be effective, we need to read ahead
  	 * at least twice the optimal I/O size.
  	 */
  	q->backing_dev_info->ra_pages =
  		max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
  	q->backing_dev_info->io_pages =
  		queue_max_sectors(q) >> (PAGE_SHIFT - 9);
  }
  EXPORT_SYMBOL_GPL(blk_queue_update_readahead);
c72758f33   Martin K. Petersen   block: Export I/O...
383
  /**
7c958e326   Martin K. Petersen   block: Add a wrap...
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
   * blk_limits_io_min - set minimum request size for a device
   * @limits: the queue limits
   * @min:  smallest I/O size in bytes
   *
   * Description:
   *   Some devices have an internal block size bigger than the reported
   *   hardware sector size.  This function can be used to signal the
   *   smallest I/O the device can perform without incurring a performance
   *   penalty.
   */
  void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
  {
  	limits->io_min = min;
  
  	if (limits->io_min < limits->logical_block_size)
  		limits->io_min = limits->logical_block_size;
  
  	if (limits->io_min < limits->physical_block_size)
  		limits->io_min = limits->physical_block_size;
  }
  EXPORT_SYMBOL(blk_limits_io_min);
  
  /**
c72758f33   Martin K. Petersen   block: Export I/O...
407
408
   * blk_queue_io_min - set minimum request size for the queue
   * @q:	the request queue for the device
8ebf97560   Randy Dunlap   block: fix kernel...
409
   * @min:  smallest I/O size in bytes
c72758f33   Martin K. Petersen   block: Export I/O...
410
411
   *
   * Description:
7e5f5fb09   Martin K. Petersen   block: Update top...
412
413
414
415
416
417
418
   *   Storage devices may report a granularity or preferred minimum I/O
   *   size which is the smallest request the device can perform without
   *   incurring a performance penalty.  For disk drives this is often the
   *   physical block size.  For RAID arrays it is often the stripe chunk
   *   size.  A properly aligned multiple of minimum_io_size is the
   *   preferred request size for workloads where a high number of I/O
   *   operations is desired.
c72758f33   Martin K. Petersen   block: Export I/O...
419
420
421
   */
  void blk_queue_io_min(struct request_queue *q, unsigned int min)
  {
7c958e326   Martin K. Petersen   block: Add a wrap...
422
  	blk_limits_io_min(&q->limits, min);
c72758f33   Martin K. Petersen   block: Export I/O...
423
424
425
426
  }
  EXPORT_SYMBOL(blk_queue_io_min);
  
  /**
3c5820c74   Martin K. Petersen   block: Optimal I/...
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
   * blk_limits_io_opt - set optimal request size for a device
   * @limits: the queue limits
   * @opt:  smallest I/O size in bytes
   *
   * Description:
   *   Storage devices may report an optimal I/O size, which is the
   *   device's preferred unit for sustained I/O.  This is rarely reported
   *   for disk drives.  For RAID arrays it is usually the stripe width or
   *   the internal track size.  A properly aligned multiple of
   *   optimal_io_size is the preferred request size for workloads where
   *   sustained throughput is desired.
   */
  void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
  {
  	limits->io_opt = opt;
  }
  EXPORT_SYMBOL(blk_limits_io_opt);
  
  /**
c72758f33   Martin K. Petersen   block: Export I/O...
446
447
   * blk_queue_io_opt - set optimal request size for the queue
   * @q:	the request queue for the device
8ebf97560   Randy Dunlap   block: fix kernel...
448
   * @opt:  optimal request size in bytes
c72758f33   Martin K. Petersen   block: Export I/O...
449
450
   *
   * Description:
7e5f5fb09   Martin K. Petersen   block: Update top...
451
452
453
454
455
456
   *   Storage devices may report an optimal I/O size, which is the
   *   device's preferred unit for sustained I/O.  This is rarely reported
   *   for disk drives.  For RAID arrays it is usually the stripe width or
   *   the internal track size.  A properly aligned multiple of
   *   optimal_io_size is the preferred request size for workloads where
   *   sustained throughput is desired.
c72758f33   Martin K. Petersen   block: Export I/O...
457
458
459
   */
  void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
  {
3c5820c74   Martin K. Petersen   block: Optimal I/...
460
  	blk_limits_io_opt(&q->limits, opt);
c2e4cd57c   Christoph Hellwig   block: lift setti...
461
462
  	q->backing_dev_info->ra_pages =
  		max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
c72758f33   Martin K. Petersen   block: Export I/O...
463
464
  }
  EXPORT_SYMBOL(blk_queue_io_opt);
86db1e297   Jens Axboe   block: continue l...
465
  /**
c72758f33   Martin K. Petersen   block: Export I/O...
466
   * blk_stack_limits - adjust queue_limits for stacked devices
81744ee44   Martin K. Petersen   block: Fix incorr...
467
468
   * @t:	the stacking driver limits (top device)
   * @b:  the underlying queue limits (bottom, component device)
e03a72e13   Martin K. Petersen   block: Stop using...
469
   * @start:  first data sector within component device
c72758f33   Martin K. Petersen   block: Export I/O...
470
471
   *
   * Description:
81744ee44   Martin K. Petersen   block: Fix incorr...
472
473
474
475
476
477
478
479
480
481
482
483
484
   *    This function is used by stacking drivers like MD and DM to ensure
   *    that all component devices have compatible block sizes and
   *    alignments.  The stacking driver must provide a queue_limits
   *    struct (top) and then iteratively call the stacking function for
   *    all component (bottom) devices.  The stacking function will
   *    attempt to combine the values and ensure proper alignment.
   *
   *    Returns 0 if the top and bottom queue_limits are compatible.  The
   *    top device's block sizes and alignment offsets may be adjusted to
   *    ensure alignment with the bottom device. If no compatible sizes
   *    and alignments exist, -1 is returned and the resulting top
   *    queue_limits will have the misaligned flag set to indicate that
   *    the alignment_offset is undefined.
c72758f33   Martin K. Petersen   block: Export I/O...
485
486
   */
  int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
e03a72e13   Martin K. Petersen   block: Stop using...
487
  		     sector_t start)
c72758f33   Martin K. Petersen   block: Export I/O...
488
  {
e03a72e13   Martin K. Petersen   block: Stop using...
489
  	unsigned int top, bottom, alignment, ret = 0;
86b372814   Martin K. Petersen   block: Expose dis...
490

c72758f33   Martin K. Petersen   block: Export I/O...
491
492
  	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
  	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
ca369d51b   Martin K. Petersen   block/sd: Fix dev...
493
  	t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
4363ac7c1   Martin K. Petersen   block: Implement ...
494
495
  	t->max_write_same_sectors = min(t->max_write_same_sectors,
  					b->max_write_same_sectors);
a6f0788ec   Chaitanya Kulkarni   block: add suppor...
496
497
  	t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
  					b->max_write_zeroes_sectors);
0512a75b9   Keith Busch   block: Introduce ...
498
499
  	t->max_zone_append_sectors = min(t->max_zone_append_sectors,
  					b->max_zone_append_sectors);
77634f33d   Martin K. Petersen   block: Add missin...
500
  	t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
c72758f33   Martin K. Petersen   block: Export I/O...
501
502
503
  
  	t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
  					    b->seg_boundary_mask);
03100aada   Keith Busch   block: Replace SG...
504
505
  	t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
  					    b->virt_boundary_mask);
c72758f33   Martin K. Petersen   block: Export I/O...
506

8a78362c4   Martin K. Petersen   block: Consolidat...
507
  	t->max_segments = min_not_zero(t->max_segments, b->max_segments);
1e739730c   Christoph Hellwig   block: optionally...
508
509
  	t->max_discard_segments = min_not_zero(t->max_discard_segments,
  					       b->max_discard_segments);
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
510
511
  	t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
  						 b->max_integrity_segments);
c72758f33   Martin K. Petersen   block: Export I/O...
512
513
514
  
  	t->max_segment_size = min_not_zero(t->max_segment_size,
  					   b->max_segment_size);
fe0b393f2   Martin K. Petersen   block: Correct ha...
515
  	t->misaligned |= b->misaligned;
e03a72e13   Martin K. Petersen   block: Stop using...
516
  	alignment = queue_limit_alignment_offset(b, start);
9504e0864   Martin K. Petersen   block: Fix topolo...
517

81744ee44   Martin K. Petersen   block: Fix incorr...
518
519
520
  	/* Bottom device has different alignment.  Check that it is
  	 * compatible with the current top alignment.
  	 */
9504e0864   Martin K. Petersen   block: Fix topolo...
521
522
523
524
  	if (t->alignment_offset != alignment) {
  
  		top = max(t->physical_block_size, t->io_min)
  			+ t->alignment_offset;
81744ee44   Martin K. Petersen   block: Fix incorr...
525
  		bottom = max(b->physical_block_size, b->io_min) + alignment;
9504e0864   Martin K. Petersen   block: Fix topolo...
526

81744ee44   Martin K. Petersen   block: Fix incorr...
527
  		/* Verify that top and bottom intervals line up */
b8839b8c5   Mike Snitzer   block: fix alignm...
528
  		if (max(top, bottom) % min(top, bottom)) {
9504e0864   Martin K. Petersen   block: Fix topolo...
529
  			t->misaligned = 1;
fe0b393f2   Martin K. Petersen   block: Correct ha...
530
531
  			ret = -1;
  		}
9504e0864   Martin K. Petersen   block: Fix topolo...
532
  	}
c72758f33   Martin K. Petersen   block: Export I/O...
533
534
535
536
537
538
539
  	t->logical_block_size = max(t->logical_block_size,
  				    b->logical_block_size);
  
  	t->physical_block_size = max(t->physical_block_size,
  				     b->physical_block_size);
  
  	t->io_min = max(t->io_min, b->io_min);
e9637415a   Mike Snitzer   block: fix blk_st...
540
  	t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
7e7986f9d   Mike Snitzer   block: use gcd() ...
541
542
543
544
  
  	/* Set non-power-of-2 compatible chunk_sectors boundary */
  	if (b->chunk_sectors)
  		t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors);
9504e0864   Martin K. Petersen   block: Fix topolo...
545

81744ee44   Martin K. Petersen   block: Fix incorr...
546
  	/* Physical block size a multiple of the logical block size? */
9504e0864   Martin K. Petersen   block: Fix topolo...
547
548
  	if (t->physical_block_size & (t->logical_block_size - 1)) {
  		t->physical_block_size = t->logical_block_size;
c72758f33   Martin K. Petersen   block: Export I/O...
549
  		t->misaligned = 1;
fe0b393f2   Martin K. Petersen   block: Correct ha...
550
  		ret = -1;
86b372814   Martin K. Petersen   block: Expose dis...
551
  	}
81744ee44   Martin K. Petersen   block: Fix incorr...
552
  	/* Minimum I/O a multiple of the physical block size? */
9504e0864   Martin K. Petersen   block: Fix topolo...
553
554
555
  	if (t->io_min & (t->physical_block_size - 1)) {
  		t->io_min = t->physical_block_size;
  		t->misaligned = 1;
fe0b393f2   Martin K. Petersen   block: Correct ha...
556
  		ret = -1;
c72758f33   Martin K. Petersen   block: Export I/O...
557
  	}
81744ee44   Martin K. Petersen   block: Fix incorr...
558
  	/* Optimal I/O a multiple of the physical block size? */
9504e0864   Martin K. Petersen   block: Fix topolo...
559
560
561
  	if (t->io_opt & (t->physical_block_size - 1)) {
  		t->io_opt = 0;
  		t->misaligned = 1;
fe0b393f2   Martin K. Petersen   block: Correct ha...
562
  		ret = -1;
9504e0864   Martin K. Petersen   block: Fix topolo...
563
  	}
c72758f33   Martin K. Petersen   block: Export I/O...
564

22ada802e   Mike Snitzer   block: use lcm_no...
565
566
567
568
569
570
  	/* chunk_sectors a multiple of the physical block size? */
  	if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) {
  		t->chunk_sectors = 0;
  		t->misaligned = 1;
  		ret = -1;
  	}
c78afc626   Kent Overstreet   bcache/md: Use ra...
571
572
573
  	t->raid_partial_stripes_expensive =
  		max(t->raid_partial_stripes_expensive,
  		    b->raid_partial_stripes_expensive);
81744ee44   Martin K. Petersen   block: Fix incorr...
574
  	/* Find lowest common alignment_offset */
e9637415a   Mike Snitzer   block: fix blk_st...
575
  	t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
b8839b8c5   Mike Snitzer   block: fix alignm...
576
  		% max(t->physical_block_size, t->io_min);
86b372814   Martin K. Petersen   block: Expose dis...
577

81744ee44   Martin K. Petersen   block: Fix incorr...
578
  	/* Verify that new alignment_offset is on a logical block boundary */
fe0b393f2   Martin K. Petersen   block: Correct ha...
579
  	if (t->alignment_offset & (t->logical_block_size - 1)) {
c72758f33   Martin K. Petersen   block: Export I/O...
580
  		t->misaligned = 1;
fe0b393f2   Martin K. Petersen   block: Correct ha...
581
582
  		ret = -1;
  	}
c72758f33   Martin K. Petersen   block: Export I/O...
583

9504e0864   Martin K. Petersen   block: Fix topolo...
584
585
  	/* Discard alignment and granularity */
  	if (b->discard_granularity) {
e03a72e13   Martin K. Petersen   block: Stop using...
586
  		alignment = queue_limit_discard_alignment(b, start);
9504e0864   Martin K. Petersen   block: Fix topolo...
587
588
589
590
591
  
  		if (t->discard_granularity != 0 &&
  		    t->discard_alignment != alignment) {
  			top = t->discard_granularity + t->discard_alignment;
  			bottom = b->discard_granularity + alignment;
70dd5bf3b   Martin K. Petersen   block: Stack opti...
592

9504e0864   Martin K. Petersen   block: Fix topolo...
593
  			/* Verify that top and bottom intervals line up */
8dd2cb7e8   Shaohua Li   block: discard gr...
594
  			if ((max(top, bottom) % min(top, bottom)) != 0)
9504e0864   Martin K. Petersen   block: Fix topolo...
595
596
  				t->discard_misaligned = 1;
  		}
81744ee44   Martin K. Petersen   block: Fix incorr...
597
598
  		t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
  						      b->max_discard_sectors);
0034af036   Jens Axboe   block: make /sys/...
599
600
  		t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
  							 b->max_hw_discard_sectors);
9504e0864   Martin K. Petersen   block: Fix topolo...
601
602
  		t->discard_granularity = max(t->discard_granularity,
  					     b->discard_granularity);
e9637415a   Mike Snitzer   block: fix blk_st...
603
  		t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
8dd2cb7e8   Shaohua Li   block: discard gr...
604
  			t->discard_granularity;
9504e0864   Martin K. Petersen   block: Fix topolo...
605
  	}
70dd5bf3b   Martin K. Petersen   block: Stack opti...
606

3093a4797   Christoph Hellwig   block: inherit th...
607
  	t->zoned = max(t->zoned, b->zoned);
fe0b393f2   Martin K. Petersen   block: Correct ha...
608
  	return ret;
c72758f33   Martin K. Petersen   block: Export I/O...
609
  }
5d85d3247   Mike Snitzer   block: export blk...
610
  EXPORT_SYMBOL(blk_stack_limits);
c72758f33   Martin K. Petersen   block: Export I/O...
611
612
613
  
  /**
   * disk_stack_limits - adjust queue limits for stacked drivers
77634f33d   Martin K. Petersen   block: Add missin...
614
   * @disk:  MD/DM gendisk (top)
c72758f33   Martin K. Petersen   block: Export I/O...
615
616
617
618
   * @bdev:  the underlying block device (bottom)
   * @offset:  offset to beginning of data within component device
   *
   * Description:
e03a72e13   Martin K. Petersen   block: Stop using...
619
620
   *    Merges the limits for a top level gendisk and a bottom level
   *    block_device.
c72758f33   Martin K. Petersen   block: Export I/O...
621
622
623
624
625
   */
  void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
  		       sector_t offset)
  {
  	struct request_queue *t = disk->queue;
c72758f33   Martin K. Petersen   block: Export I/O...
626

9efa82ef2   Christoph Hellwig   block: remove bde...
627
628
  	if (blk_stack_limits(&t->limits, &bdev_get_queue(bdev)->limits,
  			get_start_sect(bdev) + (offset >> 9)) < 0) {
c72758f33   Martin K. Petersen   block: Export I/O...
629
630
631
632
633
634
635
636
637
  		char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
  
  		disk_name(disk, 0, top);
  		bdevname(bdev, bottom);
  
  		printk(KERN_NOTICE "%s: Warning: Device %s is misaligned
  ",
  		       top, bottom);
  	}
e74d93e96   Konstantin Khlebnikov   block: keep bdi->...
638

c2e4cd57c   Christoph Hellwig   block: lift setti...
639
  	blk_queue_update_readahead(disk->queue);
c72758f33   Martin K. Petersen   block: Export I/O...
640
641
642
643
  }
  EXPORT_SYMBOL(disk_stack_limits);
  
  /**
27f8221af   FUJITA Tomonori   block: add blk_qu...
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
   * blk_queue_update_dma_pad - update pad mask
   * @q:     the request queue for the device
   * @mask:  pad mask
   *
   * Update dma pad mask.
   *
   * Appending pad buffer to a request modifies the last entry of a
   * scatter list such that it includes the pad buffer.
   **/
  void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
  {
  	if (mask > q->dma_pad_mask)
  		q->dma_pad_mask = mask;
  }
  EXPORT_SYMBOL(blk_queue_update_dma_pad);
  
  /**
86db1e297   Jens Axboe   block: continue l...
661
662
663
664
665
666
   * blk_queue_segment_boundary - set boundary rules for segment merging
   * @q:  the request queue for the device
   * @mask:  the memory boundary mask
   **/
  void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
  {
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
667
668
  	if (mask < PAGE_SIZE - 1) {
  		mask = PAGE_SIZE - 1;
24c03d47d   Harvey Harrison   block: remove rem...
669
670
671
  		printk(KERN_INFO "%s: set to minimum %lx
  ",
  		       __func__, mask);
86db1e297   Jens Axboe   block: continue l...
672
  	}
025146e13   Martin K. Petersen   block: Move queue...
673
  	q->limits.seg_boundary_mask = mask;
86db1e297   Jens Axboe   block: continue l...
674
  }
86db1e297   Jens Axboe   block: continue l...
675
676
677
  EXPORT_SYMBOL(blk_queue_segment_boundary);
  
  /**
03100aada   Keith Busch   block: Replace SG...
678
679
680
681
682
683
684
   * blk_queue_virt_boundary - set boundary rules for bio merging
   * @q:  the request queue for the device
   * @mask:  the memory boundary mask
   **/
  void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
  {
  	q->limits.virt_boundary_mask = mask;
09324d32d   Christoph Hellwig   block: force an u...
685
686
687
688
689
690
691
  
  	/*
  	 * Devices that require a virtual boundary do not support scatter/gather
  	 * I/O natively, but instead require a descriptor list entry for each
  	 * page (which might not be idential to the Linux PAGE_SIZE).  Because
  	 * of that they are not limited by our notion of "segment size".
  	 */
c6c84f78e   Christoph Hellwig   block: fix max se...
692
693
  	if (mask)
  		q->limits.max_segment_size = UINT_MAX;
03100aada   Keith Busch   block: Replace SG...
694
695
696
697
  }
  EXPORT_SYMBOL(blk_queue_virt_boundary);
  
  /**
86db1e297   Jens Axboe   block: continue l...
698
699
700
701
702
   * blk_queue_dma_alignment - set dma length and memory alignment
   * @q:     the request queue for the device
   * @mask:  alignment mask
   *
   * description:
710027a48   Randy Dunlap   Add some block/ s...
703
   *    set required memory and length alignment for direct dma transactions.
8feb4d20b   Alan Cox   pata_artop: typo
704
   *    this is used when building direct io requests for the queue.
86db1e297   Jens Axboe   block: continue l...
705
706
707
708
709
710
   *
   **/
  void blk_queue_dma_alignment(struct request_queue *q, int mask)
  {
  	q->dma_alignment = mask;
  }
86db1e297   Jens Axboe   block: continue l...
711
712
713
714
715
716
717
718
  EXPORT_SYMBOL(blk_queue_dma_alignment);
  
  /**
   * blk_queue_update_dma_alignment - update dma length and memory alignment
   * @q:     the request queue for the device
   * @mask:  alignment mask
   *
   * description:
710027a48   Randy Dunlap   Add some block/ s...
719
   *    update required memory and length alignment for direct dma transactions.
86db1e297   Jens Axboe   block: continue l...
720
721
722
723
724
725
726
727
728
729
730
731
732
733
   *    If the requested alignment is larger than the current alignment, then
   *    the current queue alignment is updated to the new value, otherwise it
   *    is left alone.  The design of this is to allow multiple objects
   *    (driver, device, transport etc) to set their respective
   *    alignments without having them interfere.
   *
   **/
  void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
  {
  	BUG_ON(mask > PAGE_SIZE);
  
  	if (mask > q->dma_alignment)
  		q->dma_alignment = mask;
  }
86db1e297   Jens Axboe   block: continue l...
734
  EXPORT_SYMBOL(blk_queue_update_dma_alignment);
93e9d8e83   Jens Axboe   block: add abilit...
735
  /**
d278d4a88   Jens Axboe   block: add code t...
736
737
738
739
740
741
742
743
   * blk_set_queue_depth - tell the block layer about the device queue depth
   * @q:		the request queue for the device
   * @depth:		queue depth
   *
   */
  void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
  {
  	q->queue_depth = depth;
9677a3e01   Tejun Heo   block/rq_qos: imp...
744
  	rq_qos_queue_depth_changed(q);
d278d4a88   Jens Axboe   block: add code t...
745
746
747
748
  }
  EXPORT_SYMBOL(blk_set_queue_depth);
  
  /**
93e9d8e83   Jens Axboe   block: add abilit...
749
750
751
752
753
754
755
756
757
   * blk_queue_write_cache - configure queue's write cache
   * @q:		the request queue for the device
   * @wc:		write back cache on or off
   * @fua:	device supports FUA writes, if true
   *
   * Tell the block layer about the write cache of @q.
   */
  void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
  {
c888a8f95   Jens Axboe   block: kill off q...
758
  	if (wc)
57d74df90   Christoph Hellwig   block: use atomic...
759
  		blk_queue_flag_set(QUEUE_FLAG_WC, q);
c888a8f95   Jens Axboe   block: kill off q...
760
  	else
57d74df90   Christoph Hellwig   block: use atomic...
761
  		blk_queue_flag_clear(QUEUE_FLAG_WC, q);
c888a8f95   Jens Axboe   block: kill off q...
762
  	if (fua)
57d74df90   Christoph Hellwig   block: use atomic...
763
  		blk_queue_flag_set(QUEUE_FLAG_FUA, q);
c888a8f95   Jens Axboe   block: kill off q...
764
  	else
57d74df90   Christoph Hellwig   block: use atomic...
765
  		blk_queue_flag_clear(QUEUE_FLAG_FUA, q);
87760e5ee   Jens Axboe   block: hook up wr...
766

a79050434   Josef Bacik   blk-rq-qos: refac...
767
  	wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
93e9d8e83   Jens Axboe   block: add abilit...
768
769
  }
  EXPORT_SYMBOL_GPL(blk_queue_write_cache);
68c43f133   Damien Le Moal   block: Introduce ...
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
  /**
   * blk_queue_required_elevator_features - Set a queue required elevator features
   * @q:		the request queue for the target device
   * @features:	Required elevator features OR'ed together
   *
   * Tell the block layer that for the device controlled through @q, only the
   * only elevators that can be used are those that implement at least the set of
   * features specified by @features.
   */
  void blk_queue_required_elevator_features(struct request_queue *q,
  					  unsigned int features)
  {
  	q->required_elevator_features = features;
  }
  EXPORT_SYMBOL_GPL(blk_queue_required_elevator_features);
671df1895   Linus Torvalds   Merge tag 'dma-ma...
785
  /**
45147fb52   Yoshihiro Shimoda   block: add a help...
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
   * blk_queue_can_use_dma_map_merging - configure queue for merging segments.
   * @q:		the request queue for the device
   * @dev:	the device pointer for dma
   *
   * Tell the block layer about merging the segments by dma map of @q.
   */
  bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
  				       struct device *dev)
  {
  	unsigned long boundary = dma_get_merge_boundary(dev);
  
  	if (!boundary)
  		return false;
  
  	/* No need to update max_segment_size. see blk_queue_virt_boundary() */
  	blk_queue_virt_boundary(q, boundary);
  
  	return true;
  }
  EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);
27ba3e8ff   Damien Le Moal   scsi: sd: sd_zbc:...
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
  /**
   * blk_queue_set_zoned - configure a disk queue zoned model.
   * @disk:	the gendisk of the queue to configure
   * @model:	the zoned model to set
   *
   * Set the zoned model of the request queue of @disk according to @model.
   * When @model is BLK_ZONED_HM (host managed), this should be called only
   * if zoned block device support is enabled (CONFIG_BLK_DEV_ZONED option).
   * If @model specifies BLK_ZONED_HA (host aware), the effective model used
   * depends on CONFIG_BLK_DEV_ZONED settings and on the existence of partitions
   * on the disk.
   */
  void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
  {
  	switch (model) {
  	case BLK_ZONED_HM:
  		/*
  		 * Host managed devices are supported only if
  		 * CONFIG_BLK_DEV_ZONED is enabled.
  		 */
  		WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED));
  		break;
  	case BLK_ZONED_HA:
  		/*
  		 * Host aware devices can be treated either as regular block
  		 * devices (similar to drive managed devices) or as zoned block
  		 * devices to take advantage of the zone command set, similarly
  		 * to host managed devices. We try the latter if there are no
  		 * partitions and zoned block device support is enabled, else
  		 * we do nothing special as far as the block layer is concerned.
  		 */
  		if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) ||
  		    disk_has_partitions(disk))
  			model = BLK_ZONED_NONE;
  		break;
  	case BLK_ZONED_NONE:
  	default:
  		if (WARN_ON_ONCE(model != BLK_ZONED_NONE))
  			model = BLK_ZONED_NONE;
  		break;
  	}
  
  	disk->queue->limits.zoned = model;
  }
  EXPORT_SYMBOL_GPL(blk_queue_set_zoned);
aeb3d3a81   Harvey Harrison   block: kmalloc ar...
851
  static int __init blk_settings_init(void)
86db1e297   Jens Axboe   block: continue l...
852
853
854
855
856
857
  {
  	blk_max_low_pfn = max_low_pfn - 1;
  	blk_max_pfn = max_pfn - 1;
  	return 0;
  }
  subsys_initcall(blk_settings_init);