Commit 9f7e45d83ef09a481cbc4172849bd1fcf88a39ed
Committed by
Russell King
1 parent
4dcfa60071
Exists in
smarc-imx_3.14.28_1.0.0_ga
and in
1 other branch
ARM: 7794/1: block: Rename parameter dma_mask to max_addr for blk_queue_bounce_limit()
The blk_queue_bounce_limit() API parameter 'dma_mask' is actually the maximum address the device can handle rather than a dma_mask. Rename it accordingly to avoid it being interpreted as dma_mask. No functional change. The idea is to fix the bad assumptions about dma_mask wherever it could be miss-interpreted. Cc: Jens Axboe <axboe@kernel.dk> Signed-off-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Showing 1 changed file with 4 additions and 4 deletions Inline Diff
block/blk-settings.c
1 | /* | 1 | /* |
2 | * Functions related to setting various queue properties from drivers | 2 | * Functions related to setting various queue properties from drivers |
3 | */ | 3 | */ |
4 | #include <linux/kernel.h> | 4 | #include <linux/kernel.h> |
5 | #include <linux/module.h> | 5 | #include <linux/module.h> |
6 | #include <linux/init.h> | 6 | #include <linux/init.h> |
7 | #include <linux/bio.h> | 7 | #include <linux/bio.h> |
8 | #include <linux/blkdev.h> | 8 | #include <linux/blkdev.h> |
9 | #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */ | 9 | #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */ |
10 | #include <linux/gcd.h> | 10 | #include <linux/gcd.h> |
11 | #include <linux/lcm.h> | 11 | #include <linux/lcm.h> |
12 | #include <linux/jiffies.h> | 12 | #include <linux/jiffies.h> |
13 | #include <linux/gfp.h> | 13 | #include <linux/gfp.h> |
14 | 14 | ||
15 | #include "blk.h" | 15 | #include "blk.h" |
16 | 16 | ||
17 | unsigned long blk_max_low_pfn; | 17 | unsigned long blk_max_low_pfn; |
18 | EXPORT_SYMBOL(blk_max_low_pfn); | 18 | EXPORT_SYMBOL(blk_max_low_pfn); |
19 | 19 | ||
20 | unsigned long blk_max_pfn; | 20 | unsigned long blk_max_pfn; |
21 | 21 | ||
22 | /** | 22 | /** |
23 | * blk_queue_prep_rq - set a prepare_request function for queue | 23 | * blk_queue_prep_rq - set a prepare_request function for queue |
24 | * @q: queue | 24 | * @q: queue |
25 | * @pfn: prepare_request function | 25 | * @pfn: prepare_request function |
26 | * | 26 | * |
27 | * It's possible for a queue to register a prepare_request callback which | 27 | * It's possible for a queue to register a prepare_request callback which |
28 | * is invoked before the request is handed to the request_fn. The goal of | 28 | * is invoked before the request is handed to the request_fn. The goal of |
29 | * the function is to prepare a request for I/O, it can be used to build a | 29 | * the function is to prepare a request for I/O, it can be used to build a |
30 | * cdb from the request data for instance. | 30 | * cdb from the request data for instance. |
31 | * | 31 | * |
32 | */ | 32 | */ |
33 | void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn) | 33 | void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn) |
34 | { | 34 | { |
35 | q->prep_rq_fn = pfn; | 35 | q->prep_rq_fn = pfn; |
36 | } | 36 | } |
37 | EXPORT_SYMBOL(blk_queue_prep_rq); | 37 | EXPORT_SYMBOL(blk_queue_prep_rq); |
38 | 38 | ||
39 | /** | 39 | /** |
40 | * blk_queue_unprep_rq - set an unprepare_request function for queue | 40 | * blk_queue_unprep_rq - set an unprepare_request function for queue |
41 | * @q: queue | 41 | * @q: queue |
42 | * @ufn: unprepare_request function | 42 | * @ufn: unprepare_request function |
43 | * | 43 | * |
44 | * It's possible for a queue to register an unprepare_request callback | 44 | * It's possible for a queue to register an unprepare_request callback |
45 | * which is invoked before the request is finally completed. The goal | 45 | * which is invoked before the request is finally completed. The goal |
46 | * of the function is to deallocate any data that was allocated in the | 46 | * of the function is to deallocate any data that was allocated in the |
47 | * prepare_request callback. | 47 | * prepare_request callback. |
48 | * | 48 | * |
49 | */ | 49 | */ |
50 | void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn) | 50 | void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn) |
51 | { | 51 | { |
52 | q->unprep_rq_fn = ufn; | 52 | q->unprep_rq_fn = ufn; |
53 | } | 53 | } |
54 | EXPORT_SYMBOL(blk_queue_unprep_rq); | 54 | EXPORT_SYMBOL(blk_queue_unprep_rq); |
55 | 55 | ||
56 | /** | 56 | /** |
57 | * blk_queue_merge_bvec - set a merge_bvec function for queue | 57 | * blk_queue_merge_bvec - set a merge_bvec function for queue |
58 | * @q: queue | 58 | * @q: queue |
59 | * @mbfn: merge_bvec_fn | 59 | * @mbfn: merge_bvec_fn |
60 | * | 60 | * |
61 | * Usually queues have static limitations on the max sectors or segments that | 61 | * Usually queues have static limitations on the max sectors or segments that |
62 | * we can put in a request. Stacking drivers may have some settings that | 62 | * we can put in a request. Stacking drivers may have some settings that |
63 | * are dynamic, and thus we have to query the queue whether it is ok to | 63 | * are dynamic, and thus we have to query the queue whether it is ok to |
64 | * add a new bio_vec to a bio at a given offset or not. If the block device | 64 | * add a new bio_vec to a bio at a given offset or not. If the block device |
65 | * has such limitations, it needs to register a merge_bvec_fn to control | 65 | * has such limitations, it needs to register a merge_bvec_fn to control |
66 | * the size of bio's sent to it. Note that a block device *must* allow a | 66 | * the size of bio's sent to it. Note that a block device *must* allow a |
67 | * single page to be added to an empty bio. The block device driver may want | 67 | * single page to be added to an empty bio. The block device driver may want |
68 | * to use the bio_split() function to deal with these bio's. By default | 68 | * to use the bio_split() function to deal with these bio's. By default |
69 | * no merge_bvec_fn is defined for a queue, and only the fixed limits are | 69 | * no merge_bvec_fn is defined for a queue, and only the fixed limits are |
70 | * honored. | 70 | * honored. |
71 | */ | 71 | */ |
72 | void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn) | 72 | void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn) |
73 | { | 73 | { |
74 | q->merge_bvec_fn = mbfn; | 74 | q->merge_bvec_fn = mbfn; |
75 | } | 75 | } |
76 | EXPORT_SYMBOL(blk_queue_merge_bvec); | 76 | EXPORT_SYMBOL(blk_queue_merge_bvec); |
77 | 77 | ||
78 | void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) | 78 | void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) |
79 | { | 79 | { |
80 | q->softirq_done_fn = fn; | 80 | q->softirq_done_fn = fn; |
81 | } | 81 | } |
82 | EXPORT_SYMBOL(blk_queue_softirq_done); | 82 | EXPORT_SYMBOL(blk_queue_softirq_done); |
83 | 83 | ||
84 | void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout) | 84 | void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout) |
85 | { | 85 | { |
86 | q->rq_timeout = timeout; | 86 | q->rq_timeout = timeout; |
87 | } | 87 | } |
88 | EXPORT_SYMBOL_GPL(blk_queue_rq_timeout); | 88 | EXPORT_SYMBOL_GPL(blk_queue_rq_timeout); |
89 | 89 | ||
90 | void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn) | 90 | void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn) |
91 | { | 91 | { |
92 | q->rq_timed_out_fn = fn; | 92 | q->rq_timed_out_fn = fn; |
93 | } | 93 | } |
94 | EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out); | 94 | EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out); |
95 | 95 | ||
96 | void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn) | 96 | void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn) |
97 | { | 97 | { |
98 | q->lld_busy_fn = fn; | 98 | q->lld_busy_fn = fn; |
99 | } | 99 | } |
100 | EXPORT_SYMBOL_GPL(blk_queue_lld_busy); | 100 | EXPORT_SYMBOL_GPL(blk_queue_lld_busy); |
101 | 101 | ||
102 | /** | 102 | /** |
103 | * blk_set_default_limits - reset limits to default values | 103 | * blk_set_default_limits - reset limits to default values |
104 | * @lim: the queue_limits structure to reset | 104 | * @lim: the queue_limits structure to reset |
105 | * | 105 | * |
106 | * Description: | 106 | * Description: |
107 | * Returns a queue_limit struct to its default state. | 107 | * Returns a queue_limit struct to its default state. |
108 | */ | 108 | */ |
109 | void blk_set_default_limits(struct queue_limits *lim) | 109 | void blk_set_default_limits(struct queue_limits *lim) |
110 | { | 110 | { |
111 | lim->max_segments = BLK_MAX_SEGMENTS; | 111 | lim->max_segments = BLK_MAX_SEGMENTS; |
112 | lim->max_integrity_segments = 0; | 112 | lim->max_integrity_segments = 0; |
113 | lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; | 113 | lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; |
114 | lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; | 114 | lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; |
115 | lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS; | 115 | lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS; |
116 | lim->max_write_same_sectors = 0; | 116 | lim->max_write_same_sectors = 0; |
117 | lim->max_discard_sectors = 0; | 117 | lim->max_discard_sectors = 0; |
118 | lim->discard_granularity = 0; | 118 | lim->discard_granularity = 0; |
119 | lim->discard_alignment = 0; | 119 | lim->discard_alignment = 0; |
120 | lim->discard_misaligned = 0; | 120 | lim->discard_misaligned = 0; |
121 | lim->discard_zeroes_data = 0; | 121 | lim->discard_zeroes_data = 0; |
122 | lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; | 122 | lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; |
123 | lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT); | 123 | lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT); |
124 | lim->alignment_offset = 0; | 124 | lim->alignment_offset = 0; |
125 | lim->io_opt = 0; | 125 | lim->io_opt = 0; |
126 | lim->misaligned = 0; | 126 | lim->misaligned = 0; |
127 | lim->cluster = 1; | 127 | lim->cluster = 1; |
128 | } | 128 | } |
129 | EXPORT_SYMBOL(blk_set_default_limits); | 129 | EXPORT_SYMBOL(blk_set_default_limits); |
130 | 130 | ||
131 | /** | 131 | /** |
132 | * blk_set_stacking_limits - set default limits for stacking devices | 132 | * blk_set_stacking_limits - set default limits for stacking devices |
133 | * @lim: the queue_limits structure to reset | 133 | * @lim: the queue_limits structure to reset |
134 | * | 134 | * |
135 | * Description: | 135 | * Description: |
136 | * Returns a queue_limit struct to its default state. Should be used | 136 | * Returns a queue_limit struct to its default state. Should be used |
137 | * by stacking drivers like DM that have no internal limits. | 137 | * by stacking drivers like DM that have no internal limits. |
138 | */ | 138 | */ |
139 | void blk_set_stacking_limits(struct queue_limits *lim) | 139 | void blk_set_stacking_limits(struct queue_limits *lim) |
140 | { | 140 | { |
141 | blk_set_default_limits(lim); | 141 | blk_set_default_limits(lim); |
142 | 142 | ||
143 | /* Inherit limits from component devices */ | 143 | /* Inherit limits from component devices */ |
144 | lim->discard_zeroes_data = 1; | 144 | lim->discard_zeroes_data = 1; |
145 | lim->max_segments = USHRT_MAX; | 145 | lim->max_segments = USHRT_MAX; |
146 | lim->max_hw_sectors = UINT_MAX; | 146 | lim->max_hw_sectors = UINT_MAX; |
147 | lim->max_sectors = UINT_MAX; | 147 | lim->max_sectors = UINT_MAX; |
148 | lim->max_write_same_sectors = UINT_MAX; | 148 | lim->max_write_same_sectors = UINT_MAX; |
149 | } | 149 | } |
150 | EXPORT_SYMBOL(blk_set_stacking_limits); | 150 | EXPORT_SYMBOL(blk_set_stacking_limits); |
151 | 151 | ||
152 | /** | 152 | /** |
153 | * blk_queue_make_request - define an alternate make_request function for a device | 153 | * blk_queue_make_request - define an alternate make_request function for a device |
154 | * @q: the request queue for the device to be affected | 154 | * @q: the request queue for the device to be affected |
155 | * @mfn: the alternate make_request function | 155 | * @mfn: the alternate make_request function |
156 | * | 156 | * |
157 | * Description: | 157 | * Description: |
158 | * The normal way for &struct bios to be passed to a device | 158 | * The normal way for &struct bios to be passed to a device |
159 | * driver is for them to be collected into requests on a request | 159 | * driver is for them to be collected into requests on a request |
160 | * queue, and then to allow the device driver to select requests | 160 | * queue, and then to allow the device driver to select requests |
161 | * off that queue when it is ready. This works well for many block | 161 | * off that queue when it is ready. This works well for many block |
162 | * devices. However some block devices (typically virtual devices | 162 | * devices. However some block devices (typically virtual devices |
163 | * such as md or lvm) do not benefit from the processing on the | 163 | * such as md or lvm) do not benefit from the processing on the |
164 | * request queue, and are served best by having the requests passed | 164 | * request queue, and are served best by having the requests passed |
165 | * directly to them. This can be achieved by providing a function | 165 | * directly to them. This can be achieved by providing a function |
166 | * to blk_queue_make_request(). | 166 | * to blk_queue_make_request(). |
167 | * | 167 | * |
168 | * Caveat: | 168 | * Caveat: |
169 | * The driver that does this *must* be able to deal appropriately | 169 | * The driver that does this *must* be able to deal appropriately |
170 | * with buffers in "highmemory". This can be accomplished by either calling | 170 | * with buffers in "highmemory". This can be accomplished by either calling |
171 | * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling | 171 | * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling |
172 | * blk_queue_bounce() to create a buffer in normal memory. | 172 | * blk_queue_bounce() to create a buffer in normal memory. |
173 | **/ | 173 | **/ |
174 | void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) | 174 | void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) |
175 | { | 175 | { |
176 | /* | 176 | /* |
177 | * set defaults | 177 | * set defaults |
178 | */ | 178 | */ |
179 | q->nr_requests = BLKDEV_MAX_RQ; | 179 | q->nr_requests = BLKDEV_MAX_RQ; |
180 | 180 | ||
181 | q->make_request_fn = mfn; | 181 | q->make_request_fn = mfn; |
182 | blk_queue_dma_alignment(q, 511); | 182 | blk_queue_dma_alignment(q, 511); |
183 | blk_queue_congestion_threshold(q); | 183 | blk_queue_congestion_threshold(q); |
184 | q->nr_batching = BLK_BATCH_REQ; | 184 | q->nr_batching = BLK_BATCH_REQ; |
185 | 185 | ||
186 | blk_set_default_limits(&q->limits); | 186 | blk_set_default_limits(&q->limits); |
187 | 187 | ||
188 | /* | 188 | /* |
189 | * by default assume old behaviour and bounce for any highmem page | 189 | * by default assume old behaviour and bounce for any highmem page |
190 | */ | 190 | */ |
191 | blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); | 191 | blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); |
192 | } | 192 | } |
193 | EXPORT_SYMBOL(blk_queue_make_request); | 193 | EXPORT_SYMBOL(blk_queue_make_request); |
194 | 194 | ||
195 | /** | 195 | /** |
196 | * blk_queue_bounce_limit - set bounce buffer limit for queue | 196 | * blk_queue_bounce_limit - set bounce buffer limit for queue |
197 | * @q: the request queue for the device | 197 | * @q: the request queue for the device |
198 | * @dma_mask: the maximum address the device can handle | 198 | * @max_addr: the maximum address the device can handle |
199 | * | 199 | * |
200 | * Description: | 200 | * Description: |
201 | * Different hardware can have different requirements as to what pages | 201 | * Different hardware can have different requirements as to what pages |
202 | * it can do I/O directly to. A low level driver can call | 202 | * it can do I/O directly to. A low level driver can call |
203 | * blk_queue_bounce_limit to have lower memory pages allocated as bounce | 203 | * blk_queue_bounce_limit to have lower memory pages allocated as bounce |
204 | * buffers for doing I/O to pages residing above @dma_mask. | 204 | * buffers for doing I/O to pages residing above @max_addr. |
205 | **/ | 205 | **/ |
206 | void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask) | 206 | void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr) |
207 | { | 207 | { |
208 | unsigned long b_pfn = dma_mask >> PAGE_SHIFT; | 208 | unsigned long b_pfn = max_addr >> PAGE_SHIFT; |
209 | int dma = 0; | 209 | int dma = 0; |
210 | 210 | ||
211 | q->bounce_gfp = GFP_NOIO; | 211 | q->bounce_gfp = GFP_NOIO; |
212 | #if BITS_PER_LONG == 64 | 212 | #if BITS_PER_LONG == 64 |
213 | /* | 213 | /* |
214 | * Assume anything <= 4GB can be handled by IOMMU. Actually | 214 | * Assume anything <= 4GB can be handled by IOMMU. Actually |
215 | * some IOMMUs can handle everything, but I don't know of a | 215 | * some IOMMUs can handle everything, but I don't know of a |
216 | * way to test this here. | 216 | * way to test this here. |
217 | */ | 217 | */ |
218 | if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) | 218 | if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) |
219 | dma = 1; | 219 | dma = 1; |
220 | q->limits.bounce_pfn = max(max_low_pfn, b_pfn); | 220 | q->limits.bounce_pfn = max(max_low_pfn, b_pfn); |
221 | #else | 221 | #else |
222 | if (b_pfn < blk_max_low_pfn) | 222 | if (b_pfn < blk_max_low_pfn) |
223 | dma = 1; | 223 | dma = 1; |
224 | q->limits.bounce_pfn = b_pfn; | 224 | q->limits.bounce_pfn = b_pfn; |
225 | #endif | 225 | #endif |
226 | if (dma) { | 226 | if (dma) { |
227 | init_emergency_isa_pool(); | 227 | init_emergency_isa_pool(); |
228 | q->bounce_gfp = GFP_NOIO | GFP_DMA; | 228 | q->bounce_gfp = GFP_NOIO | GFP_DMA; |
229 | q->limits.bounce_pfn = b_pfn; | 229 | q->limits.bounce_pfn = b_pfn; |
230 | } | 230 | } |
231 | } | 231 | } |
232 | EXPORT_SYMBOL(blk_queue_bounce_limit); | 232 | EXPORT_SYMBOL(blk_queue_bounce_limit); |
233 | 233 | ||
234 | /** | 234 | /** |
235 | * blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request | 235 | * blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request |
236 | * @limits: the queue limits | 236 | * @limits: the queue limits |
237 | * @max_hw_sectors: max hardware sectors in the usual 512b unit | 237 | * @max_hw_sectors: max hardware sectors in the usual 512b unit |
238 | * | 238 | * |
239 | * Description: | 239 | * Description: |
240 | * Enables a low level driver to set a hard upper limit, | 240 | * Enables a low level driver to set a hard upper limit, |
241 | * max_hw_sectors, on the size of requests. max_hw_sectors is set by | 241 | * max_hw_sectors, on the size of requests. max_hw_sectors is set by |
242 | * the device driver based upon the combined capabilities of I/O | 242 | * the device driver based upon the combined capabilities of I/O |
243 | * controller and storage device. | 243 | * controller and storage device. |
244 | * | 244 | * |
245 | * max_sectors is a soft limit imposed by the block layer for | 245 | * max_sectors is a soft limit imposed by the block layer for |
246 | * filesystem type requests. This value can be overridden on a | 246 | * filesystem type requests. This value can be overridden on a |
247 | * per-device basis in /sys/block/<device>/queue/max_sectors_kb. | 247 | * per-device basis in /sys/block/<device>/queue/max_sectors_kb. |
248 | * The soft limit can not exceed max_hw_sectors. | 248 | * The soft limit can not exceed max_hw_sectors. |
249 | **/ | 249 | **/ |
250 | void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors) | 250 | void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors) |
251 | { | 251 | { |
252 | if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) { | 252 | if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) { |
253 | max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9); | 253 | max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9); |
254 | printk(KERN_INFO "%s: set to minimum %d\n", | 254 | printk(KERN_INFO "%s: set to minimum %d\n", |
255 | __func__, max_hw_sectors); | 255 | __func__, max_hw_sectors); |
256 | } | 256 | } |
257 | 257 | ||
258 | limits->max_hw_sectors = max_hw_sectors; | 258 | limits->max_hw_sectors = max_hw_sectors; |
259 | limits->max_sectors = min_t(unsigned int, max_hw_sectors, | 259 | limits->max_sectors = min_t(unsigned int, max_hw_sectors, |
260 | BLK_DEF_MAX_SECTORS); | 260 | BLK_DEF_MAX_SECTORS); |
261 | } | 261 | } |
262 | EXPORT_SYMBOL(blk_limits_max_hw_sectors); | 262 | EXPORT_SYMBOL(blk_limits_max_hw_sectors); |
263 | 263 | ||
264 | /** | 264 | /** |
265 | * blk_queue_max_hw_sectors - set max sectors for a request for this queue | 265 | * blk_queue_max_hw_sectors - set max sectors for a request for this queue |
266 | * @q: the request queue for the device | 266 | * @q: the request queue for the device |
267 | * @max_hw_sectors: max hardware sectors in the usual 512b unit | 267 | * @max_hw_sectors: max hardware sectors in the usual 512b unit |
268 | * | 268 | * |
269 | * Description: | 269 | * Description: |
270 | * See description for blk_limits_max_hw_sectors(). | 270 | * See description for blk_limits_max_hw_sectors(). |
271 | **/ | 271 | **/ |
272 | void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) | 272 | void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) |
273 | { | 273 | { |
274 | blk_limits_max_hw_sectors(&q->limits, max_hw_sectors); | 274 | blk_limits_max_hw_sectors(&q->limits, max_hw_sectors); |
275 | } | 275 | } |
276 | EXPORT_SYMBOL(blk_queue_max_hw_sectors); | 276 | EXPORT_SYMBOL(blk_queue_max_hw_sectors); |
277 | 277 | ||
278 | /** | 278 | /** |
279 | * blk_queue_max_discard_sectors - set max sectors for a single discard | 279 | * blk_queue_max_discard_sectors - set max sectors for a single discard |
280 | * @q: the request queue for the device | 280 | * @q: the request queue for the device |
281 | * @max_discard_sectors: maximum number of sectors to discard | 281 | * @max_discard_sectors: maximum number of sectors to discard |
282 | **/ | 282 | **/ |
283 | void blk_queue_max_discard_sectors(struct request_queue *q, | 283 | void blk_queue_max_discard_sectors(struct request_queue *q, |
284 | unsigned int max_discard_sectors) | 284 | unsigned int max_discard_sectors) |
285 | { | 285 | { |
286 | q->limits.max_discard_sectors = max_discard_sectors; | 286 | q->limits.max_discard_sectors = max_discard_sectors; |
287 | } | 287 | } |
288 | EXPORT_SYMBOL(blk_queue_max_discard_sectors); | 288 | EXPORT_SYMBOL(blk_queue_max_discard_sectors); |
289 | 289 | ||
290 | /** | 290 | /** |
291 | * blk_queue_max_write_same_sectors - set max sectors for a single write same | 291 | * blk_queue_max_write_same_sectors - set max sectors for a single write same |
292 | * @q: the request queue for the device | 292 | * @q: the request queue for the device |
293 | * @max_write_same_sectors: maximum number of sectors to write per command | 293 | * @max_write_same_sectors: maximum number of sectors to write per command |
294 | **/ | 294 | **/ |
295 | void blk_queue_max_write_same_sectors(struct request_queue *q, | 295 | void blk_queue_max_write_same_sectors(struct request_queue *q, |
296 | unsigned int max_write_same_sectors) | 296 | unsigned int max_write_same_sectors) |
297 | { | 297 | { |
298 | q->limits.max_write_same_sectors = max_write_same_sectors; | 298 | q->limits.max_write_same_sectors = max_write_same_sectors; |
299 | } | 299 | } |
300 | EXPORT_SYMBOL(blk_queue_max_write_same_sectors); | 300 | EXPORT_SYMBOL(blk_queue_max_write_same_sectors); |
301 | 301 | ||
302 | /** | 302 | /** |
303 | * blk_queue_max_segments - set max hw segments for a request for this queue | 303 | * blk_queue_max_segments - set max hw segments for a request for this queue |
304 | * @q: the request queue for the device | 304 | * @q: the request queue for the device |
305 | * @max_segments: max number of segments | 305 | * @max_segments: max number of segments |
306 | * | 306 | * |
307 | * Description: | 307 | * Description: |
308 | * Enables a low level driver to set an upper limit on the number of | 308 | * Enables a low level driver to set an upper limit on the number of |
309 | * hw data segments in a request. | 309 | * hw data segments in a request. |
310 | **/ | 310 | **/ |
311 | void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments) | 311 | void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments) |
312 | { | 312 | { |
313 | if (!max_segments) { | 313 | if (!max_segments) { |
314 | max_segments = 1; | 314 | max_segments = 1; |
315 | printk(KERN_INFO "%s: set to minimum %d\n", | 315 | printk(KERN_INFO "%s: set to minimum %d\n", |
316 | __func__, max_segments); | 316 | __func__, max_segments); |
317 | } | 317 | } |
318 | 318 | ||
319 | q->limits.max_segments = max_segments; | 319 | q->limits.max_segments = max_segments; |
320 | } | 320 | } |
321 | EXPORT_SYMBOL(blk_queue_max_segments); | 321 | EXPORT_SYMBOL(blk_queue_max_segments); |
322 | 322 | ||
323 | /** | 323 | /** |
324 | * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg | 324 | * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg |
325 | * @q: the request queue for the device | 325 | * @q: the request queue for the device |
326 | * @max_size: max size of segment in bytes | 326 | * @max_size: max size of segment in bytes |
327 | * | 327 | * |
328 | * Description: | 328 | * Description: |
329 | * Enables a low level driver to set an upper limit on the size of a | 329 | * Enables a low level driver to set an upper limit on the size of a |
330 | * coalesced segment | 330 | * coalesced segment |
331 | **/ | 331 | **/ |
332 | void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) | 332 | void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) |
333 | { | 333 | { |
334 | if (max_size < PAGE_CACHE_SIZE) { | 334 | if (max_size < PAGE_CACHE_SIZE) { |
335 | max_size = PAGE_CACHE_SIZE; | 335 | max_size = PAGE_CACHE_SIZE; |
336 | printk(KERN_INFO "%s: set to minimum %d\n", | 336 | printk(KERN_INFO "%s: set to minimum %d\n", |
337 | __func__, max_size); | 337 | __func__, max_size); |
338 | } | 338 | } |
339 | 339 | ||
340 | q->limits.max_segment_size = max_size; | 340 | q->limits.max_segment_size = max_size; |
341 | } | 341 | } |
342 | EXPORT_SYMBOL(blk_queue_max_segment_size); | 342 | EXPORT_SYMBOL(blk_queue_max_segment_size); |
343 | 343 | ||
344 | /** | 344 | /** |
345 | * blk_queue_logical_block_size - set logical block size for the queue | 345 | * blk_queue_logical_block_size - set logical block size for the queue |
346 | * @q: the request queue for the device | 346 | * @q: the request queue for the device |
347 | * @size: the logical block size, in bytes | 347 | * @size: the logical block size, in bytes |
348 | * | 348 | * |
349 | * Description: | 349 | * Description: |
350 | * This should be set to the lowest possible block size that the | 350 | * This should be set to the lowest possible block size that the |
351 | * storage device can address. The default of 512 covers most | 351 | * storage device can address. The default of 512 covers most |
352 | * hardware. | 352 | * hardware. |
353 | **/ | 353 | **/ |
354 | void blk_queue_logical_block_size(struct request_queue *q, unsigned short size) | 354 | void blk_queue_logical_block_size(struct request_queue *q, unsigned short size) |
355 | { | 355 | { |
356 | q->limits.logical_block_size = size; | 356 | q->limits.logical_block_size = size; |
357 | 357 | ||
358 | if (q->limits.physical_block_size < size) | 358 | if (q->limits.physical_block_size < size) |
359 | q->limits.physical_block_size = size; | 359 | q->limits.physical_block_size = size; |
360 | 360 | ||
361 | if (q->limits.io_min < q->limits.physical_block_size) | 361 | if (q->limits.io_min < q->limits.physical_block_size) |
362 | q->limits.io_min = q->limits.physical_block_size; | 362 | q->limits.io_min = q->limits.physical_block_size; |
363 | } | 363 | } |
364 | EXPORT_SYMBOL(blk_queue_logical_block_size); | 364 | EXPORT_SYMBOL(blk_queue_logical_block_size); |
365 | 365 | ||
366 | /** | 366 | /** |
367 | * blk_queue_physical_block_size - set physical block size for the queue | 367 | * blk_queue_physical_block_size - set physical block size for the queue |
368 | * @q: the request queue for the device | 368 | * @q: the request queue for the device |
369 | * @size: the physical block size, in bytes | 369 | * @size: the physical block size, in bytes |
370 | * | 370 | * |
371 | * Description: | 371 | * Description: |
372 | * This should be set to the lowest possible sector size that the | 372 | * This should be set to the lowest possible sector size that the |
373 | * hardware can operate on without reverting to read-modify-write | 373 | * hardware can operate on without reverting to read-modify-write |
374 | * operations. | 374 | * operations. |
375 | */ | 375 | */ |
376 | void blk_queue_physical_block_size(struct request_queue *q, unsigned int size) | 376 | void blk_queue_physical_block_size(struct request_queue *q, unsigned int size) |
377 | { | 377 | { |
378 | q->limits.physical_block_size = size; | 378 | q->limits.physical_block_size = size; |
379 | 379 | ||
380 | if (q->limits.physical_block_size < q->limits.logical_block_size) | 380 | if (q->limits.physical_block_size < q->limits.logical_block_size) |
381 | q->limits.physical_block_size = q->limits.logical_block_size; | 381 | q->limits.physical_block_size = q->limits.logical_block_size; |
382 | 382 | ||
383 | if (q->limits.io_min < q->limits.physical_block_size) | 383 | if (q->limits.io_min < q->limits.physical_block_size) |
384 | q->limits.io_min = q->limits.physical_block_size; | 384 | q->limits.io_min = q->limits.physical_block_size; |
385 | } | 385 | } |
386 | EXPORT_SYMBOL(blk_queue_physical_block_size); | 386 | EXPORT_SYMBOL(blk_queue_physical_block_size); |
387 | 387 | ||
388 | /** | 388 | /** |
389 | * blk_queue_alignment_offset - set physical block alignment offset | 389 | * blk_queue_alignment_offset - set physical block alignment offset |
390 | * @q: the request queue for the device | 390 | * @q: the request queue for the device |
391 | * @offset: alignment offset in bytes | 391 | * @offset: alignment offset in bytes |
392 | * | 392 | * |
393 | * Description: | 393 | * Description: |
394 | * Some devices are naturally misaligned to compensate for things like | 394 | * Some devices are naturally misaligned to compensate for things like |
395 | * the legacy DOS partition table 63-sector offset. Low-level drivers | 395 | * the legacy DOS partition table 63-sector offset. Low-level drivers |
396 | * should call this function for devices whose first sector is not | 396 | * should call this function for devices whose first sector is not |
397 | * naturally aligned. | 397 | * naturally aligned. |
398 | */ | 398 | */ |
399 | void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset) | 399 | void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset) |
400 | { | 400 | { |
401 | q->limits.alignment_offset = | 401 | q->limits.alignment_offset = |
402 | offset & (q->limits.physical_block_size - 1); | 402 | offset & (q->limits.physical_block_size - 1); |
403 | q->limits.misaligned = 0; | 403 | q->limits.misaligned = 0; |
404 | } | 404 | } |
405 | EXPORT_SYMBOL(blk_queue_alignment_offset); | 405 | EXPORT_SYMBOL(blk_queue_alignment_offset); |
406 | 406 | ||
407 | /** | 407 | /** |
408 | * blk_limits_io_min - set minimum request size for a device | 408 | * blk_limits_io_min - set minimum request size for a device |
409 | * @limits: the queue limits | 409 | * @limits: the queue limits |
410 | * @min: smallest I/O size in bytes | 410 | * @min: smallest I/O size in bytes |
411 | * | 411 | * |
412 | * Description: | 412 | * Description: |
413 | * Some devices have an internal block size bigger than the reported | 413 | * Some devices have an internal block size bigger than the reported |
414 | * hardware sector size. This function can be used to signal the | 414 | * hardware sector size. This function can be used to signal the |
415 | * smallest I/O the device can perform without incurring a performance | 415 | * smallest I/O the device can perform without incurring a performance |
416 | * penalty. | 416 | * penalty. |
417 | */ | 417 | */ |
418 | void blk_limits_io_min(struct queue_limits *limits, unsigned int min) | 418 | void blk_limits_io_min(struct queue_limits *limits, unsigned int min) |
419 | { | 419 | { |
420 | limits->io_min = min; | 420 | limits->io_min = min; |
421 | 421 | ||
422 | if (limits->io_min < limits->logical_block_size) | 422 | if (limits->io_min < limits->logical_block_size) |
423 | limits->io_min = limits->logical_block_size; | 423 | limits->io_min = limits->logical_block_size; |
424 | 424 | ||
425 | if (limits->io_min < limits->physical_block_size) | 425 | if (limits->io_min < limits->physical_block_size) |
426 | limits->io_min = limits->physical_block_size; | 426 | limits->io_min = limits->physical_block_size; |
427 | } | 427 | } |
428 | EXPORT_SYMBOL(blk_limits_io_min); | 428 | EXPORT_SYMBOL(blk_limits_io_min); |
429 | 429 | ||
430 | /** | 430 | /** |
431 | * blk_queue_io_min - set minimum request size for the queue | 431 | * blk_queue_io_min - set minimum request size for the queue |
432 | * @q: the request queue for the device | 432 | * @q: the request queue for the device |
433 | * @min: smallest I/O size in bytes | 433 | * @min: smallest I/O size in bytes |
434 | * | 434 | * |
435 | * Description: | 435 | * Description: |
436 | * Storage devices may report a granularity or preferred minimum I/O | 436 | * Storage devices may report a granularity or preferred minimum I/O |
437 | * size which is the smallest request the device can perform without | 437 | * size which is the smallest request the device can perform without |
438 | * incurring a performance penalty. For disk drives this is often the | 438 | * incurring a performance penalty. For disk drives this is often the |
439 | * physical block size. For RAID arrays it is often the stripe chunk | 439 | * physical block size. For RAID arrays it is often the stripe chunk |
440 | * size. A properly aligned multiple of minimum_io_size is the | 440 | * size. A properly aligned multiple of minimum_io_size is the |
441 | * preferred request size for workloads where a high number of I/O | 441 | * preferred request size for workloads where a high number of I/O |
442 | * operations is desired. | 442 | * operations is desired. |
443 | */ | 443 | */ |
444 | void blk_queue_io_min(struct request_queue *q, unsigned int min) | 444 | void blk_queue_io_min(struct request_queue *q, unsigned int min) |
445 | { | 445 | { |
446 | blk_limits_io_min(&q->limits, min); | 446 | blk_limits_io_min(&q->limits, min); |
447 | } | 447 | } |
448 | EXPORT_SYMBOL(blk_queue_io_min); | 448 | EXPORT_SYMBOL(blk_queue_io_min); |
449 | 449 | ||
450 | /** | 450 | /** |
451 | * blk_limits_io_opt - set optimal request size for a device | 451 | * blk_limits_io_opt - set optimal request size for a device |
452 | * @limits: the queue limits | 452 | * @limits: the queue limits |
453 | * @opt: smallest I/O size in bytes | 453 | * @opt: smallest I/O size in bytes |
454 | * | 454 | * |
455 | * Description: | 455 | * Description: |
456 | * Storage devices may report an optimal I/O size, which is the | 456 | * Storage devices may report an optimal I/O size, which is the |
457 | * device's preferred unit for sustained I/O. This is rarely reported | 457 | * device's preferred unit for sustained I/O. This is rarely reported |
458 | * for disk drives. For RAID arrays it is usually the stripe width or | 458 | * for disk drives. For RAID arrays it is usually the stripe width or |
459 | * the internal track size. A properly aligned multiple of | 459 | * the internal track size. A properly aligned multiple of |
460 | * optimal_io_size is the preferred request size for workloads where | 460 | * optimal_io_size is the preferred request size for workloads where |
461 | * sustained throughput is desired. | 461 | * sustained throughput is desired. |
462 | */ | 462 | */ |
463 | void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt) | 463 | void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt) |
464 | { | 464 | { |
465 | limits->io_opt = opt; | 465 | limits->io_opt = opt; |
466 | } | 466 | } |
467 | EXPORT_SYMBOL(blk_limits_io_opt); | 467 | EXPORT_SYMBOL(blk_limits_io_opt); |
468 | 468 | ||
469 | /** | 469 | /** |
470 | * blk_queue_io_opt - set optimal request size for the queue | 470 | * blk_queue_io_opt - set optimal request size for the queue |
471 | * @q: the request queue for the device | 471 | * @q: the request queue for the device |
472 | * @opt: optimal request size in bytes | 472 | * @opt: optimal request size in bytes |
473 | * | 473 | * |
474 | * Description: | 474 | * Description: |
475 | * Storage devices may report an optimal I/O size, which is the | 475 | * Storage devices may report an optimal I/O size, which is the |
476 | * device's preferred unit for sustained I/O. This is rarely reported | 476 | * device's preferred unit for sustained I/O. This is rarely reported |
477 | * for disk drives. For RAID arrays it is usually the stripe width or | 477 | * for disk drives. For RAID arrays it is usually the stripe width or |
478 | * the internal track size. A properly aligned multiple of | 478 | * the internal track size. A properly aligned multiple of |
479 | * optimal_io_size is the preferred request size for workloads where | 479 | * optimal_io_size is the preferred request size for workloads where |
480 | * sustained throughput is desired. | 480 | * sustained throughput is desired. |
481 | */ | 481 | */ |
482 | void blk_queue_io_opt(struct request_queue *q, unsigned int opt) | 482 | void blk_queue_io_opt(struct request_queue *q, unsigned int opt) |
483 | { | 483 | { |
484 | blk_limits_io_opt(&q->limits, opt); | 484 | blk_limits_io_opt(&q->limits, opt); |
485 | } | 485 | } |
486 | EXPORT_SYMBOL(blk_queue_io_opt); | 486 | EXPORT_SYMBOL(blk_queue_io_opt); |
487 | 487 | ||
488 | /** | 488 | /** |
489 | * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers | 489 | * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers |
490 | * @t: the stacking driver (top) | 490 | * @t: the stacking driver (top) |
491 | * @b: the underlying device (bottom) | 491 | * @b: the underlying device (bottom) |
492 | **/ | 492 | **/ |
493 | void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) | 493 | void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) |
494 | { | 494 | { |
495 | blk_stack_limits(&t->limits, &b->limits, 0); | 495 | blk_stack_limits(&t->limits, &b->limits, 0); |
496 | } | 496 | } |
497 | EXPORT_SYMBOL(blk_queue_stack_limits); | 497 | EXPORT_SYMBOL(blk_queue_stack_limits); |
498 | 498 | ||
499 | /** | 499 | /** |
500 | * blk_stack_limits - adjust queue_limits for stacked devices | 500 | * blk_stack_limits - adjust queue_limits for stacked devices |
501 | * @t: the stacking driver limits (top device) | 501 | * @t: the stacking driver limits (top device) |
502 | * @b: the underlying queue limits (bottom, component device) | 502 | * @b: the underlying queue limits (bottom, component device) |
503 | * @start: first data sector within component device | 503 | * @start: first data sector within component device |
504 | * | 504 | * |
505 | * Description: | 505 | * Description: |
506 | * This function is used by stacking drivers like MD and DM to ensure | 506 | * This function is used by stacking drivers like MD and DM to ensure |
507 | * that all component devices have compatible block sizes and | 507 | * that all component devices have compatible block sizes and |
508 | * alignments. The stacking driver must provide a queue_limits | 508 | * alignments. The stacking driver must provide a queue_limits |
509 | * struct (top) and then iteratively call the stacking function for | 509 | * struct (top) and then iteratively call the stacking function for |
510 | * all component (bottom) devices. The stacking function will | 510 | * all component (bottom) devices. The stacking function will |
511 | * attempt to combine the values and ensure proper alignment. | 511 | * attempt to combine the values and ensure proper alignment. |
512 | * | 512 | * |
513 | * Returns 0 if the top and bottom queue_limits are compatible. The | 513 | * Returns 0 if the top and bottom queue_limits are compatible. The |
514 | * top device's block sizes and alignment offsets may be adjusted to | 514 | * top device's block sizes and alignment offsets may be adjusted to |
515 | * ensure alignment with the bottom device. If no compatible sizes | 515 | * ensure alignment with the bottom device. If no compatible sizes |
516 | * and alignments exist, -1 is returned and the resulting top | 516 | * and alignments exist, -1 is returned and the resulting top |
517 | * queue_limits will have the misaligned flag set to indicate that | 517 | * queue_limits will have the misaligned flag set to indicate that |
518 | * the alignment_offset is undefined. | 518 | * the alignment_offset is undefined. |
519 | */ | 519 | */ |
520 | int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | 520 | int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, |
521 | sector_t start) | 521 | sector_t start) |
522 | { | 522 | { |
523 | unsigned int top, bottom, alignment, ret = 0; | 523 | unsigned int top, bottom, alignment, ret = 0; |
524 | 524 | ||
525 | t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); | 525 | t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); |
526 | t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); | 526 | t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); |
527 | t->max_write_same_sectors = min(t->max_write_same_sectors, | 527 | t->max_write_same_sectors = min(t->max_write_same_sectors, |
528 | b->max_write_same_sectors); | 528 | b->max_write_same_sectors); |
529 | t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn); | 529 | t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn); |
530 | 530 | ||
531 | t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, | 531 | t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, |
532 | b->seg_boundary_mask); | 532 | b->seg_boundary_mask); |
533 | 533 | ||
534 | t->max_segments = min_not_zero(t->max_segments, b->max_segments); | 534 | t->max_segments = min_not_zero(t->max_segments, b->max_segments); |
535 | t->max_integrity_segments = min_not_zero(t->max_integrity_segments, | 535 | t->max_integrity_segments = min_not_zero(t->max_integrity_segments, |
536 | b->max_integrity_segments); | 536 | b->max_integrity_segments); |
537 | 537 | ||
538 | t->max_segment_size = min_not_zero(t->max_segment_size, | 538 | t->max_segment_size = min_not_zero(t->max_segment_size, |
539 | b->max_segment_size); | 539 | b->max_segment_size); |
540 | 540 | ||
541 | t->misaligned |= b->misaligned; | 541 | t->misaligned |= b->misaligned; |
542 | 542 | ||
543 | alignment = queue_limit_alignment_offset(b, start); | 543 | alignment = queue_limit_alignment_offset(b, start); |
544 | 544 | ||
545 | /* Bottom device has different alignment. Check that it is | 545 | /* Bottom device has different alignment. Check that it is |
546 | * compatible with the current top alignment. | 546 | * compatible with the current top alignment. |
547 | */ | 547 | */ |
548 | if (t->alignment_offset != alignment) { | 548 | if (t->alignment_offset != alignment) { |
549 | 549 | ||
550 | top = max(t->physical_block_size, t->io_min) | 550 | top = max(t->physical_block_size, t->io_min) |
551 | + t->alignment_offset; | 551 | + t->alignment_offset; |
552 | bottom = max(b->physical_block_size, b->io_min) + alignment; | 552 | bottom = max(b->physical_block_size, b->io_min) + alignment; |
553 | 553 | ||
554 | /* Verify that top and bottom intervals line up */ | 554 | /* Verify that top and bottom intervals line up */ |
555 | if (max(top, bottom) & (min(top, bottom) - 1)) { | 555 | if (max(top, bottom) & (min(top, bottom) - 1)) { |
556 | t->misaligned = 1; | 556 | t->misaligned = 1; |
557 | ret = -1; | 557 | ret = -1; |
558 | } | 558 | } |
559 | } | 559 | } |
560 | 560 | ||
561 | t->logical_block_size = max(t->logical_block_size, | 561 | t->logical_block_size = max(t->logical_block_size, |
562 | b->logical_block_size); | 562 | b->logical_block_size); |
563 | 563 | ||
564 | t->physical_block_size = max(t->physical_block_size, | 564 | t->physical_block_size = max(t->physical_block_size, |
565 | b->physical_block_size); | 565 | b->physical_block_size); |
566 | 566 | ||
567 | t->io_min = max(t->io_min, b->io_min); | 567 | t->io_min = max(t->io_min, b->io_min); |
568 | t->io_opt = lcm(t->io_opt, b->io_opt); | 568 | t->io_opt = lcm(t->io_opt, b->io_opt); |
569 | 569 | ||
570 | t->cluster &= b->cluster; | 570 | t->cluster &= b->cluster; |
571 | t->discard_zeroes_data &= b->discard_zeroes_data; | 571 | t->discard_zeroes_data &= b->discard_zeroes_data; |
572 | 572 | ||
573 | /* Physical block size a multiple of the logical block size? */ | 573 | /* Physical block size a multiple of the logical block size? */ |
574 | if (t->physical_block_size & (t->logical_block_size - 1)) { | 574 | if (t->physical_block_size & (t->logical_block_size - 1)) { |
575 | t->physical_block_size = t->logical_block_size; | 575 | t->physical_block_size = t->logical_block_size; |
576 | t->misaligned = 1; | 576 | t->misaligned = 1; |
577 | ret = -1; | 577 | ret = -1; |
578 | } | 578 | } |
579 | 579 | ||
580 | /* Minimum I/O a multiple of the physical block size? */ | 580 | /* Minimum I/O a multiple of the physical block size? */ |
581 | if (t->io_min & (t->physical_block_size - 1)) { | 581 | if (t->io_min & (t->physical_block_size - 1)) { |
582 | t->io_min = t->physical_block_size; | 582 | t->io_min = t->physical_block_size; |
583 | t->misaligned = 1; | 583 | t->misaligned = 1; |
584 | ret = -1; | 584 | ret = -1; |
585 | } | 585 | } |
586 | 586 | ||
587 | /* Optimal I/O a multiple of the physical block size? */ | 587 | /* Optimal I/O a multiple of the physical block size? */ |
588 | if (t->io_opt & (t->physical_block_size - 1)) { | 588 | if (t->io_opt & (t->physical_block_size - 1)) { |
589 | t->io_opt = 0; | 589 | t->io_opt = 0; |
590 | t->misaligned = 1; | 590 | t->misaligned = 1; |
591 | ret = -1; | 591 | ret = -1; |
592 | } | 592 | } |
593 | 593 | ||
594 | /* Find lowest common alignment_offset */ | 594 | /* Find lowest common alignment_offset */ |
595 | t->alignment_offset = lcm(t->alignment_offset, alignment) | 595 | t->alignment_offset = lcm(t->alignment_offset, alignment) |
596 | & (max(t->physical_block_size, t->io_min) - 1); | 596 | & (max(t->physical_block_size, t->io_min) - 1); |
597 | 597 | ||
598 | /* Verify that new alignment_offset is on a logical block boundary */ | 598 | /* Verify that new alignment_offset is on a logical block boundary */ |
599 | if (t->alignment_offset & (t->logical_block_size - 1)) { | 599 | if (t->alignment_offset & (t->logical_block_size - 1)) { |
600 | t->misaligned = 1; | 600 | t->misaligned = 1; |
601 | ret = -1; | 601 | ret = -1; |
602 | } | 602 | } |
603 | 603 | ||
604 | /* Discard alignment and granularity */ | 604 | /* Discard alignment and granularity */ |
605 | if (b->discard_granularity) { | 605 | if (b->discard_granularity) { |
606 | alignment = queue_limit_discard_alignment(b, start); | 606 | alignment = queue_limit_discard_alignment(b, start); |
607 | 607 | ||
608 | if (t->discard_granularity != 0 && | 608 | if (t->discard_granularity != 0 && |
609 | t->discard_alignment != alignment) { | 609 | t->discard_alignment != alignment) { |
610 | top = t->discard_granularity + t->discard_alignment; | 610 | top = t->discard_granularity + t->discard_alignment; |
611 | bottom = b->discard_granularity + alignment; | 611 | bottom = b->discard_granularity + alignment; |
612 | 612 | ||
613 | /* Verify that top and bottom intervals line up */ | 613 | /* Verify that top and bottom intervals line up */ |
614 | if ((max(top, bottom) % min(top, bottom)) != 0) | 614 | if ((max(top, bottom) % min(top, bottom)) != 0) |
615 | t->discard_misaligned = 1; | 615 | t->discard_misaligned = 1; |
616 | } | 616 | } |
617 | 617 | ||
618 | t->max_discard_sectors = min_not_zero(t->max_discard_sectors, | 618 | t->max_discard_sectors = min_not_zero(t->max_discard_sectors, |
619 | b->max_discard_sectors); | 619 | b->max_discard_sectors); |
620 | t->discard_granularity = max(t->discard_granularity, | 620 | t->discard_granularity = max(t->discard_granularity, |
621 | b->discard_granularity); | 621 | b->discard_granularity); |
622 | t->discard_alignment = lcm(t->discard_alignment, alignment) % | 622 | t->discard_alignment = lcm(t->discard_alignment, alignment) % |
623 | t->discard_granularity; | 623 | t->discard_granularity; |
624 | } | 624 | } |
625 | 625 | ||
626 | return ret; | 626 | return ret; |
627 | } | 627 | } |
628 | EXPORT_SYMBOL(blk_stack_limits); | 628 | EXPORT_SYMBOL(blk_stack_limits); |
629 | 629 | ||
630 | /** | 630 | /** |
631 | * bdev_stack_limits - adjust queue limits for stacked drivers | 631 | * bdev_stack_limits - adjust queue limits for stacked drivers |
632 | * @t: the stacking driver limits (top device) | 632 | * @t: the stacking driver limits (top device) |
633 | * @bdev: the component block_device (bottom) | 633 | * @bdev: the component block_device (bottom) |
634 | * @start: first data sector within component device | 634 | * @start: first data sector within component device |
635 | * | 635 | * |
636 | * Description: | 636 | * Description: |
637 | * Merges queue limits for a top device and a block_device. Returns | 637 | * Merges queue limits for a top device and a block_device. Returns |
638 | * 0 if alignment didn't change. Returns -1 if adding the bottom | 638 | * 0 if alignment didn't change. Returns -1 if adding the bottom |
639 | * device caused misalignment. | 639 | * device caused misalignment. |
640 | */ | 640 | */ |
641 | int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, | 641 | int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, |
642 | sector_t start) | 642 | sector_t start) |
643 | { | 643 | { |
644 | struct request_queue *bq = bdev_get_queue(bdev); | 644 | struct request_queue *bq = bdev_get_queue(bdev); |
645 | 645 | ||
646 | start += get_start_sect(bdev); | 646 | start += get_start_sect(bdev); |
647 | 647 | ||
648 | return blk_stack_limits(t, &bq->limits, start); | 648 | return blk_stack_limits(t, &bq->limits, start); |
649 | } | 649 | } |
650 | EXPORT_SYMBOL(bdev_stack_limits); | 650 | EXPORT_SYMBOL(bdev_stack_limits); |
651 | 651 | ||
652 | /** | 652 | /** |
653 | * disk_stack_limits - adjust queue limits for stacked drivers | 653 | * disk_stack_limits - adjust queue limits for stacked drivers |
654 | * @disk: MD/DM gendisk (top) | 654 | * @disk: MD/DM gendisk (top) |
655 | * @bdev: the underlying block device (bottom) | 655 | * @bdev: the underlying block device (bottom) |
656 | * @offset: offset to beginning of data within component device | 656 | * @offset: offset to beginning of data within component device |
657 | * | 657 | * |
658 | * Description: | 658 | * Description: |
659 | * Merges the limits for a top level gendisk and a bottom level | 659 | * Merges the limits for a top level gendisk and a bottom level |
660 | * block_device. | 660 | * block_device. |
661 | */ | 661 | */ |
662 | void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, | 662 | void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, |
663 | sector_t offset) | 663 | sector_t offset) |
664 | { | 664 | { |
665 | struct request_queue *t = disk->queue; | 665 | struct request_queue *t = disk->queue; |
666 | 666 | ||
667 | if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) { | 667 | if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) { |
668 | char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; | 668 | char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; |
669 | 669 | ||
670 | disk_name(disk, 0, top); | 670 | disk_name(disk, 0, top); |
671 | bdevname(bdev, bottom); | 671 | bdevname(bdev, bottom); |
672 | 672 | ||
673 | printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n", | 673 | printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n", |
674 | top, bottom); | 674 | top, bottom); |
675 | } | 675 | } |
676 | } | 676 | } |
677 | EXPORT_SYMBOL(disk_stack_limits); | 677 | EXPORT_SYMBOL(disk_stack_limits); |
678 | 678 | ||
679 | /** | 679 | /** |
680 | * blk_queue_dma_pad - set pad mask | 680 | * blk_queue_dma_pad - set pad mask |
681 | * @q: the request queue for the device | 681 | * @q: the request queue for the device |
682 | * @mask: pad mask | 682 | * @mask: pad mask |
683 | * | 683 | * |
684 | * Set dma pad mask. | 684 | * Set dma pad mask. |
685 | * | 685 | * |
686 | * Appending pad buffer to a request modifies the last entry of a | 686 | * Appending pad buffer to a request modifies the last entry of a |
687 | * scatter list such that it includes the pad buffer. | 687 | * scatter list such that it includes the pad buffer. |
688 | **/ | 688 | **/ |
689 | void blk_queue_dma_pad(struct request_queue *q, unsigned int mask) | 689 | void blk_queue_dma_pad(struct request_queue *q, unsigned int mask) |
690 | { | 690 | { |
691 | q->dma_pad_mask = mask; | 691 | q->dma_pad_mask = mask; |
692 | } | 692 | } |
693 | EXPORT_SYMBOL(blk_queue_dma_pad); | 693 | EXPORT_SYMBOL(blk_queue_dma_pad); |
694 | 694 | ||
695 | /** | 695 | /** |
696 | * blk_queue_update_dma_pad - update pad mask | 696 | * blk_queue_update_dma_pad - update pad mask |
697 | * @q: the request queue for the device | 697 | * @q: the request queue for the device |
698 | * @mask: pad mask | 698 | * @mask: pad mask |
699 | * | 699 | * |
700 | * Update dma pad mask. | 700 | * Update dma pad mask. |
701 | * | 701 | * |
702 | * Appending pad buffer to a request modifies the last entry of a | 702 | * Appending pad buffer to a request modifies the last entry of a |
703 | * scatter list such that it includes the pad buffer. | 703 | * scatter list such that it includes the pad buffer. |
704 | **/ | 704 | **/ |
705 | void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask) | 705 | void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask) |
706 | { | 706 | { |
707 | if (mask > q->dma_pad_mask) | 707 | if (mask > q->dma_pad_mask) |
708 | q->dma_pad_mask = mask; | 708 | q->dma_pad_mask = mask; |
709 | } | 709 | } |
710 | EXPORT_SYMBOL(blk_queue_update_dma_pad); | 710 | EXPORT_SYMBOL(blk_queue_update_dma_pad); |
711 | 711 | ||
712 | /** | 712 | /** |
713 | * blk_queue_dma_drain - Set up a drain buffer for excess dma. | 713 | * blk_queue_dma_drain - Set up a drain buffer for excess dma. |
714 | * @q: the request queue for the device | 714 | * @q: the request queue for the device |
715 | * @dma_drain_needed: fn which returns non-zero if drain is necessary | 715 | * @dma_drain_needed: fn which returns non-zero if drain is necessary |
716 | * @buf: physically contiguous buffer | 716 | * @buf: physically contiguous buffer |
717 | * @size: size of the buffer in bytes | 717 | * @size: size of the buffer in bytes |
718 | * | 718 | * |
719 | * Some devices have excess DMA problems and can't simply discard (or | 719 | * Some devices have excess DMA problems and can't simply discard (or |
720 | * zero fill) the unwanted piece of the transfer. They have to have a | 720 | * zero fill) the unwanted piece of the transfer. They have to have a |
721 | * real area of memory to transfer it into. The use case for this is | 721 | * real area of memory to transfer it into. The use case for this is |
722 | * ATAPI devices in DMA mode. If the packet command causes a transfer | 722 | * ATAPI devices in DMA mode. If the packet command causes a transfer |
723 | * bigger than the transfer size some HBAs will lock up if there | 723 | * bigger than the transfer size some HBAs will lock up if there |
724 | * aren't DMA elements to contain the excess transfer. What this API | 724 | * aren't DMA elements to contain the excess transfer. What this API |
725 | * does is adjust the queue so that the buf is always appended | 725 | * does is adjust the queue so that the buf is always appended |
726 | * silently to the scatterlist. | 726 | * silently to the scatterlist. |
727 | * | 727 | * |
728 | * Note: This routine adjusts max_hw_segments to make room for appending | 728 | * Note: This routine adjusts max_hw_segments to make room for appending |
729 | * the drain buffer. If you call blk_queue_max_segments() after calling | 729 | * the drain buffer. If you call blk_queue_max_segments() after calling |
730 | * this routine, you must set the limit to one fewer than your device | 730 | * this routine, you must set the limit to one fewer than your device |
731 | * can support otherwise there won't be room for the drain buffer. | 731 | * can support otherwise there won't be room for the drain buffer. |
732 | */ | 732 | */ |
733 | int blk_queue_dma_drain(struct request_queue *q, | 733 | int blk_queue_dma_drain(struct request_queue *q, |
734 | dma_drain_needed_fn *dma_drain_needed, | 734 | dma_drain_needed_fn *dma_drain_needed, |
735 | void *buf, unsigned int size) | 735 | void *buf, unsigned int size) |
736 | { | 736 | { |
737 | if (queue_max_segments(q) < 2) | 737 | if (queue_max_segments(q) < 2) |
738 | return -EINVAL; | 738 | return -EINVAL; |
739 | /* make room for appending the drain */ | 739 | /* make room for appending the drain */ |
740 | blk_queue_max_segments(q, queue_max_segments(q) - 1); | 740 | blk_queue_max_segments(q, queue_max_segments(q) - 1); |
741 | q->dma_drain_needed = dma_drain_needed; | 741 | q->dma_drain_needed = dma_drain_needed; |
742 | q->dma_drain_buffer = buf; | 742 | q->dma_drain_buffer = buf; |
743 | q->dma_drain_size = size; | 743 | q->dma_drain_size = size; |
744 | 744 | ||
745 | return 0; | 745 | return 0; |
746 | } | 746 | } |
747 | EXPORT_SYMBOL_GPL(blk_queue_dma_drain); | 747 | EXPORT_SYMBOL_GPL(blk_queue_dma_drain); |
748 | 748 | ||
749 | /** | 749 | /** |
750 | * blk_queue_segment_boundary - set boundary rules for segment merging | 750 | * blk_queue_segment_boundary - set boundary rules for segment merging |
751 | * @q: the request queue for the device | 751 | * @q: the request queue for the device |
752 | * @mask: the memory boundary mask | 752 | * @mask: the memory boundary mask |
753 | **/ | 753 | **/ |
754 | void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) | 754 | void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) |
755 | { | 755 | { |
756 | if (mask < PAGE_CACHE_SIZE - 1) { | 756 | if (mask < PAGE_CACHE_SIZE - 1) { |
757 | mask = PAGE_CACHE_SIZE - 1; | 757 | mask = PAGE_CACHE_SIZE - 1; |
758 | printk(KERN_INFO "%s: set to minimum %lx\n", | 758 | printk(KERN_INFO "%s: set to minimum %lx\n", |
759 | __func__, mask); | 759 | __func__, mask); |
760 | } | 760 | } |
761 | 761 | ||
762 | q->limits.seg_boundary_mask = mask; | 762 | q->limits.seg_boundary_mask = mask; |
763 | } | 763 | } |
764 | EXPORT_SYMBOL(blk_queue_segment_boundary); | 764 | EXPORT_SYMBOL(blk_queue_segment_boundary); |
765 | 765 | ||
766 | /** | 766 | /** |
767 | * blk_queue_dma_alignment - set dma length and memory alignment | 767 | * blk_queue_dma_alignment - set dma length and memory alignment |
768 | * @q: the request queue for the device | 768 | * @q: the request queue for the device |
769 | * @mask: alignment mask | 769 | * @mask: alignment mask |
770 | * | 770 | * |
771 | * description: | 771 | * description: |
772 | * set required memory and length alignment for direct dma transactions. | 772 | * set required memory and length alignment for direct dma transactions. |
773 | * this is used when building direct io requests for the queue. | 773 | * this is used when building direct io requests for the queue. |
774 | * | 774 | * |
775 | **/ | 775 | **/ |
776 | void blk_queue_dma_alignment(struct request_queue *q, int mask) | 776 | void blk_queue_dma_alignment(struct request_queue *q, int mask) |
777 | { | 777 | { |
778 | q->dma_alignment = mask; | 778 | q->dma_alignment = mask; |
779 | } | 779 | } |
780 | EXPORT_SYMBOL(blk_queue_dma_alignment); | 780 | EXPORT_SYMBOL(blk_queue_dma_alignment); |
781 | 781 | ||
782 | /** | 782 | /** |
783 | * blk_queue_update_dma_alignment - update dma length and memory alignment | 783 | * blk_queue_update_dma_alignment - update dma length and memory alignment |
784 | * @q: the request queue for the device | 784 | * @q: the request queue for the device |
785 | * @mask: alignment mask | 785 | * @mask: alignment mask |
786 | * | 786 | * |
787 | * description: | 787 | * description: |
788 | * update required memory and length alignment for direct dma transactions. | 788 | * update required memory and length alignment for direct dma transactions. |
789 | * If the requested alignment is larger than the current alignment, then | 789 | * If the requested alignment is larger than the current alignment, then |
790 | * the current queue alignment is updated to the new value, otherwise it | 790 | * the current queue alignment is updated to the new value, otherwise it |
791 | * is left alone. The design of this is to allow multiple objects | 791 | * is left alone. The design of this is to allow multiple objects |
792 | * (driver, device, transport etc) to set their respective | 792 | * (driver, device, transport etc) to set their respective |
793 | * alignments without having them interfere. | 793 | * alignments without having them interfere. |
794 | * | 794 | * |
795 | **/ | 795 | **/ |
796 | void blk_queue_update_dma_alignment(struct request_queue *q, int mask) | 796 | void blk_queue_update_dma_alignment(struct request_queue *q, int mask) |
797 | { | 797 | { |
798 | BUG_ON(mask > PAGE_SIZE); | 798 | BUG_ON(mask > PAGE_SIZE); |
799 | 799 | ||
800 | if (mask > q->dma_alignment) | 800 | if (mask > q->dma_alignment) |
801 | q->dma_alignment = mask; | 801 | q->dma_alignment = mask; |
802 | } | 802 | } |
803 | EXPORT_SYMBOL(blk_queue_update_dma_alignment); | 803 | EXPORT_SYMBOL(blk_queue_update_dma_alignment); |
804 | 804 | ||
805 | /** | 805 | /** |
806 | * blk_queue_flush - configure queue's cache flush capability | 806 | * blk_queue_flush - configure queue's cache flush capability |
807 | * @q: the request queue for the device | 807 | * @q: the request queue for the device |
808 | * @flush: 0, REQ_FLUSH or REQ_FLUSH | REQ_FUA | 808 | * @flush: 0, REQ_FLUSH or REQ_FLUSH | REQ_FUA |
809 | * | 809 | * |
810 | * Tell block layer cache flush capability of @q. If it supports | 810 | * Tell block layer cache flush capability of @q. If it supports |
811 | * flushing, REQ_FLUSH should be set. If it supports bypassing | 811 | * flushing, REQ_FLUSH should be set. If it supports bypassing |
812 | * write cache for individual writes, REQ_FUA should be set. | 812 | * write cache for individual writes, REQ_FUA should be set. |
813 | */ | 813 | */ |
814 | void blk_queue_flush(struct request_queue *q, unsigned int flush) | 814 | void blk_queue_flush(struct request_queue *q, unsigned int flush) |
815 | { | 815 | { |
816 | WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA)); | 816 | WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA)); |
817 | 817 | ||
818 | if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA))) | 818 | if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA))) |
819 | flush &= ~REQ_FUA; | 819 | flush &= ~REQ_FUA; |
820 | 820 | ||
821 | q->flush_flags = flush & (REQ_FLUSH | REQ_FUA); | 821 | q->flush_flags = flush & (REQ_FLUSH | REQ_FUA); |
822 | } | 822 | } |
823 | EXPORT_SYMBOL_GPL(blk_queue_flush); | 823 | EXPORT_SYMBOL_GPL(blk_queue_flush); |
824 | 824 | ||
825 | void blk_queue_flush_queueable(struct request_queue *q, bool queueable) | 825 | void blk_queue_flush_queueable(struct request_queue *q, bool queueable) |
826 | { | 826 | { |
827 | q->flush_not_queueable = !queueable; | 827 | q->flush_not_queueable = !queueable; |
828 | } | 828 | } |
829 | EXPORT_SYMBOL_GPL(blk_queue_flush_queueable); | 829 | EXPORT_SYMBOL_GPL(blk_queue_flush_queueable); |
830 | 830 | ||
831 | static int __init blk_settings_init(void) | 831 | static int __init blk_settings_init(void) |
832 | { | 832 | { |
833 | blk_max_low_pfn = max_low_pfn - 1; | 833 | blk_max_low_pfn = max_low_pfn - 1; |
834 | blk_max_pfn = max_pfn - 1; | 834 | blk_max_pfn = max_pfn - 1; |
835 | return 0; | 835 | return 0; |
836 | } | 836 | } |
837 | subsys_initcall(blk_settings_init); | 837 | subsys_initcall(blk_settings_init); |
838 | 838 |