Commit f3876930952390a31c3a7fd68dd621464a36eb80
Committed by
Jens Axboe
1 parent
490b94be02
Exists in
master
and in
7 other branches
block: add a non-queueable flush flag
flush request isn't queueable in some drives. Add a flag to let driver notify block layer about this. We can optimize flush performance with the knowledge. Stable: 2.6.39 only Cc: stable@kernel.org Signed-off-by: Shaohua Li <shaohua.li@intel.com> Acked-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Showing 2 changed files with 13 additions and 0 deletions Inline Diff
block/blk-settings.c
1 | /* | 1 | /* |
2 | * Functions related to setting various queue properties from drivers | 2 | * Functions related to setting various queue properties from drivers |
3 | */ | 3 | */ |
4 | #include <linux/kernel.h> | 4 | #include <linux/kernel.h> |
5 | #include <linux/module.h> | 5 | #include <linux/module.h> |
6 | #include <linux/init.h> | 6 | #include <linux/init.h> |
7 | #include <linux/bio.h> | 7 | #include <linux/bio.h> |
8 | #include <linux/blkdev.h> | 8 | #include <linux/blkdev.h> |
9 | #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */ | 9 | #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */ |
10 | #include <linux/gcd.h> | 10 | #include <linux/gcd.h> |
11 | #include <linux/lcm.h> | 11 | #include <linux/lcm.h> |
12 | #include <linux/jiffies.h> | 12 | #include <linux/jiffies.h> |
13 | #include <linux/gfp.h> | 13 | #include <linux/gfp.h> |
14 | 14 | ||
15 | #include "blk.h" | 15 | #include "blk.h" |
16 | 16 | ||
17 | unsigned long blk_max_low_pfn; | 17 | unsigned long blk_max_low_pfn; |
18 | EXPORT_SYMBOL(blk_max_low_pfn); | 18 | EXPORT_SYMBOL(blk_max_low_pfn); |
19 | 19 | ||
20 | unsigned long blk_max_pfn; | 20 | unsigned long blk_max_pfn; |
21 | 21 | ||
22 | /** | 22 | /** |
23 | * blk_queue_prep_rq - set a prepare_request function for queue | 23 | * blk_queue_prep_rq - set a prepare_request function for queue |
24 | * @q: queue | 24 | * @q: queue |
25 | * @pfn: prepare_request function | 25 | * @pfn: prepare_request function |
26 | * | 26 | * |
27 | * It's possible for a queue to register a prepare_request callback which | 27 | * It's possible for a queue to register a prepare_request callback which |
28 | * is invoked before the request is handed to the request_fn. The goal of | 28 | * is invoked before the request is handed to the request_fn. The goal of |
29 | * the function is to prepare a request for I/O, it can be used to build a | 29 | * the function is to prepare a request for I/O, it can be used to build a |
30 | * cdb from the request data for instance. | 30 | * cdb from the request data for instance. |
31 | * | 31 | * |
32 | */ | 32 | */ |
33 | void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn) | 33 | void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn) |
34 | { | 34 | { |
35 | q->prep_rq_fn = pfn; | 35 | q->prep_rq_fn = pfn; |
36 | } | 36 | } |
37 | EXPORT_SYMBOL(blk_queue_prep_rq); | 37 | EXPORT_SYMBOL(blk_queue_prep_rq); |
38 | 38 | ||
39 | /** | 39 | /** |
40 | * blk_queue_unprep_rq - set an unprepare_request function for queue | 40 | * blk_queue_unprep_rq - set an unprepare_request function for queue |
41 | * @q: queue | 41 | * @q: queue |
42 | * @ufn: unprepare_request function | 42 | * @ufn: unprepare_request function |
43 | * | 43 | * |
44 | * It's possible for a queue to register an unprepare_request callback | 44 | * It's possible for a queue to register an unprepare_request callback |
45 | * which is invoked before the request is finally completed. The goal | 45 | * which is invoked before the request is finally completed. The goal |
46 | * of the function is to deallocate any data that was allocated in the | 46 | * of the function is to deallocate any data that was allocated in the |
47 | * prepare_request callback. | 47 | * prepare_request callback. |
48 | * | 48 | * |
49 | */ | 49 | */ |
50 | void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn) | 50 | void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn) |
51 | { | 51 | { |
52 | q->unprep_rq_fn = ufn; | 52 | q->unprep_rq_fn = ufn; |
53 | } | 53 | } |
54 | EXPORT_SYMBOL(blk_queue_unprep_rq); | 54 | EXPORT_SYMBOL(blk_queue_unprep_rq); |
55 | 55 | ||
56 | /** | 56 | /** |
57 | * blk_queue_merge_bvec - set a merge_bvec function for queue | 57 | * blk_queue_merge_bvec - set a merge_bvec function for queue |
58 | * @q: queue | 58 | * @q: queue |
59 | * @mbfn: merge_bvec_fn | 59 | * @mbfn: merge_bvec_fn |
60 | * | 60 | * |
61 | * Usually queues have static limitations on the max sectors or segments that | 61 | * Usually queues have static limitations on the max sectors or segments that |
62 | * we can put in a request. Stacking drivers may have some settings that | 62 | * we can put in a request. Stacking drivers may have some settings that |
63 | * are dynamic, and thus we have to query the queue whether it is ok to | 63 | * are dynamic, and thus we have to query the queue whether it is ok to |
64 | * add a new bio_vec to a bio at a given offset or not. If the block device | 64 | * add a new bio_vec to a bio at a given offset or not. If the block device |
65 | * has such limitations, it needs to register a merge_bvec_fn to control | 65 | * has such limitations, it needs to register a merge_bvec_fn to control |
66 | * the size of bio's sent to it. Note that a block device *must* allow a | 66 | * the size of bio's sent to it. Note that a block device *must* allow a |
67 | * single page to be added to an empty bio. The block device driver may want | 67 | * single page to be added to an empty bio. The block device driver may want |
68 | * to use the bio_split() function to deal with these bio's. By default | 68 | * to use the bio_split() function to deal with these bio's. By default |
69 | * no merge_bvec_fn is defined for a queue, and only the fixed limits are | 69 | * no merge_bvec_fn is defined for a queue, and only the fixed limits are |
70 | * honored. | 70 | * honored. |
71 | */ | 71 | */ |
72 | void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn) | 72 | void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn) |
73 | { | 73 | { |
74 | q->merge_bvec_fn = mbfn; | 74 | q->merge_bvec_fn = mbfn; |
75 | } | 75 | } |
76 | EXPORT_SYMBOL(blk_queue_merge_bvec); | 76 | EXPORT_SYMBOL(blk_queue_merge_bvec); |
77 | 77 | ||
78 | void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) | 78 | void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) |
79 | { | 79 | { |
80 | q->softirq_done_fn = fn; | 80 | q->softirq_done_fn = fn; |
81 | } | 81 | } |
82 | EXPORT_SYMBOL(blk_queue_softirq_done); | 82 | EXPORT_SYMBOL(blk_queue_softirq_done); |
83 | 83 | ||
84 | void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout) | 84 | void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout) |
85 | { | 85 | { |
86 | q->rq_timeout = timeout; | 86 | q->rq_timeout = timeout; |
87 | } | 87 | } |
88 | EXPORT_SYMBOL_GPL(blk_queue_rq_timeout); | 88 | EXPORT_SYMBOL_GPL(blk_queue_rq_timeout); |
89 | 89 | ||
90 | void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn) | 90 | void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn) |
91 | { | 91 | { |
92 | q->rq_timed_out_fn = fn; | 92 | q->rq_timed_out_fn = fn; |
93 | } | 93 | } |
94 | EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out); | 94 | EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out); |
95 | 95 | ||
96 | void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn) | 96 | void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn) |
97 | { | 97 | { |
98 | q->lld_busy_fn = fn; | 98 | q->lld_busy_fn = fn; |
99 | } | 99 | } |
100 | EXPORT_SYMBOL_GPL(blk_queue_lld_busy); | 100 | EXPORT_SYMBOL_GPL(blk_queue_lld_busy); |
101 | 101 | ||
102 | /** | 102 | /** |
103 | * blk_set_default_limits - reset limits to default values | 103 | * blk_set_default_limits - reset limits to default values |
104 | * @lim: the queue_limits structure to reset | 104 | * @lim: the queue_limits structure to reset |
105 | * | 105 | * |
106 | * Description: | 106 | * Description: |
107 | * Returns a queue_limit struct to its default state. Can be used by | 107 | * Returns a queue_limit struct to its default state. Can be used by |
108 | * stacking drivers like DM that stage table swaps and reuse an | 108 | * stacking drivers like DM that stage table swaps and reuse an |
109 | * existing device queue. | 109 | * existing device queue. |
110 | */ | 110 | */ |
111 | void blk_set_default_limits(struct queue_limits *lim) | 111 | void blk_set_default_limits(struct queue_limits *lim) |
112 | { | 112 | { |
113 | lim->max_segments = BLK_MAX_SEGMENTS; | 113 | lim->max_segments = BLK_MAX_SEGMENTS; |
114 | lim->max_integrity_segments = 0; | 114 | lim->max_integrity_segments = 0; |
115 | lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; | 115 | lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; |
116 | lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; | 116 | lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; |
117 | lim->max_sectors = BLK_DEF_MAX_SECTORS; | 117 | lim->max_sectors = BLK_DEF_MAX_SECTORS; |
118 | lim->max_hw_sectors = INT_MAX; | 118 | lim->max_hw_sectors = INT_MAX; |
119 | lim->max_discard_sectors = 0; | 119 | lim->max_discard_sectors = 0; |
120 | lim->discard_granularity = 0; | 120 | lim->discard_granularity = 0; |
121 | lim->discard_alignment = 0; | 121 | lim->discard_alignment = 0; |
122 | lim->discard_misaligned = 0; | 122 | lim->discard_misaligned = 0; |
123 | lim->discard_zeroes_data = -1; | 123 | lim->discard_zeroes_data = -1; |
124 | lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; | 124 | lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; |
125 | lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT); | 125 | lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT); |
126 | lim->alignment_offset = 0; | 126 | lim->alignment_offset = 0; |
127 | lim->io_opt = 0; | 127 | lim->io_opt = 0; |
128 | lim->misaligned = 0; | 128 | lim->misaligned = 0; |
129 | lim->cluster = 1; | 129 | lim->cluster = 1; |
130 | } | 130 | } |
131 | EXPORT_SYMBOL(blk_set_default_limits); | 131 | EXPORT_SYMBOL(blk_set_default_limits); |
132 | 132 | ||
133 | /** | 133 | /** |
134 | * blk_queue_make_request - define an alternate make_request function for a device | 134 | * blk_queue_make_request - define an alternate make_request function for a device |
135 | * @q: the request queue for the device to be affected | 135 | * @q: the request queue for the device to be affected |
136 | * @mfn: the alternate make_request function | 136 | * @mfn: the alternate make_request function |
137 | * | 137 | * |
138 | * Description: | 138 | * Description: |
139 | * The normal way for &struct bios to be passed to a device | 139 | * The normal way for &struct bios to be passed to a device |
140 | * driver is for them to be collected into requests on a request | 140 | * driver is for them to be collected into requests on a request |
141 | * queue, and then to allow the device driver to select requests | 141 | * queue, and then to allow the device driver to select requests |
142 | * off that queue when it is ready. This works well for many block | 142 | * off that queue when it is ready. This works well for many block |
143 | * devices. However some block devices (typically virtual devices | 143 | * devices. However some block devices (typically virtual devices |
144 | * such as md or lvm) do not benefit from the processing on the | 144 | * such as md or lvm) do not benefit from the processing on the |
145 | * request queue, and are served best by having the requests passed | 145 | * request queue, and are served best by having the requests passed |
146 | * directly to them. This can be achieved by providing a function | 146 | * directly to them. This can be achieved by providing a function |
147 | * to blk_queue_make_request(). | 147 | * to blk_queue_make_request(). |
148 | * | 148 | * |
149 | * Caveat: | 149 | * Caveat: |
150 | * The driver that does this *must* be able to deal appropriately | 150 | * The driver that does this *must* be able to deal appropriately |
151 | * with buffers in "highmemory". This can be accomplished by either calling | 151 | * with buffers in "highmemory". This can be accomplished by either calling |
152 | * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling | 152 | * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling |
153 | * blk_queue_bounce() to create a buffer in normal memory. | 153 | * blk_queue_bounce() to create a buffer in normal memory. |
154 | **/ | 154 | **/ |
155 | void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) | 155 | void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) |
156 | { | 156 | { |
157 | /* | 157 | /* |
158 | * set defaults | 158 | * set defaults |
159 | */ | 159 | */ |
160 | q->nr_requests = BLKDEV_MAX_RQ; | 160 | q->nr_requests = BLKDEV_MAX_RQ; |
161 | 161 | ||
162 | q->make_request_fn = mfn; | 162 | q->make_request_fn = mfn; |
163 | blk_queue_dma_alignment(q, 511); | 163 | blk_queue_dma_alignment(q, 511); |
164 | blk_queue_congestion_threshold(q); | 164 | blk_queue_congestion_threshold(q); |
165 | q->nr_batching = BLK_BATCH_REQ; | 165 | q->nr_batching = BLK_BATCH_REQ; |
166 | 166 | ||
167 | blk_set_default_limits(&q->limits); | 167 | blk_set_default_limits(&q->limits); |
168 | blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS); | 168 | blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS); |
169 | 169 | ||
170 | /* | 170 | /* |
171 | * by default assume old behaviour and bounce for any highmem page | 171 | * by default assume old behaviour and bounce for any highmem page |
172 | */ | 172 | */ |
173 | blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); | 173 | blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); |
174 | } | 174 | } |
175 | EXPORT_SYMBOL(blk_queue_make_request); | 175 | EXPORT_SYMBOL(blk_queue_make_request); |
176 | 176 | ||
177 | /** | 177 | /** |
178 | * blk_queue_bounce_limit - set bounce buffer limit for queue | 178 | * blk_queue_bounce_limit - set bounce buffer limit for queue |
179 | * @q: the request queue for the device | 179 | * @q: the request queue for the device |
180 | * @dma_mask: the maximum address the device can handle | 180 | * @dma_mask: the maximum address the device can handle |
181 | * | 181 | * |
182 | * Description: | 182 | * Description: |
183 | * Different hardware can have different requirements as to what pages | 183 | * Different hardware can have different requirements as to what pages |
184 | * it can do I/O directly to. A low level driver can call | 184 | * it can do I/O directly to. A low level driver can call |
185 | * blk_queue_bounce_limit to have lower memory pages allocated as bounce | 185 | * blk_queue_bounce_limit to have lower memory pages allocated as bounce |
186 | * buffers for doing I/O to pages residing above @dma_mask. | 186 | * buffers for doing I/O to pages residing above @dma_mask. |
187 | **/ | 187 | **/ |
188 | void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask) | 188 | void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask) |
189 | { | 189 | { |
190 | unsigned long b_pfn = dma_mask >> PAGE_SHIFT; | 190 | unsigned long b_pfn = dma_mask >> PAGE_SHIFT; |
191 | int dma = 0; | 191 | int dma = 0; |
192 | 192 | ||
193 | q->bounce_gfp = GFP_NOIO; | 193 | q->bounce_gfp = GFP_NOIO; |
194 | #if BITS_PER_LONG == 64 | 194 | #if BITS_PER_LONG == 64 |
195 | /* | 195 | /* |
196 | * Assume anything <= 4GB can be handled by IOMMU. Actually | 196 | * Assume anything <= 4GB can be handled by IOMMU. Actually |
197 | * some IOMMUs can handle everything, but I don't know of a | 197 | * some IOMMUs can handle everything, but I don't know of a |
198 | * way to test this here. | 198 | * way to test this here. |
199 | */ | 199 | */ |
200 | if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) | 200 | if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) |
201 | dma = 1; | 201 | dma = 1; |
202 | q->limits.bounce_pfn = max(max_low_pfn, b_pfn); | 202 | q->limits.bounce_pfn = max(max_low_pfn, b_pfn); |
203 | #else | 203 | #else |
204 | if (b_pfn < blk_max_low_pfn) | 204 | if (b_pfn < blk_max_low_pfn) |
205 | dma = 1; | 205 | dma = 1; |
206 | q->limits.bounce_pfn = b_pfn; | 206 | q->limits.bounce_pfn = b_pfn; |
207 | #endif | 207 | #endif |
208 | if (dma) { | 208 | if (dma) { |
209 | init_emergency_isa_pool(); | 209 | init_emergency_isa_pool(); |
210 | q->bounce_gfp = GFP_NOIO | GFP_DMA; | 210 | q->bounce_gfp = GFP_NOIO | GFP_DMA; |
211 | q->limits.bounce_pfn = b_pfn; | 211 | q->limits.bounce_pfn = b_pfn; |
212 | } | 212 | } |
213 | } | 213 | } |
214 | EXPORT_SYMBOL(blk_queue_bounce_limit); | 214 | EXPORT_SYMBOL(blk_queue_bounce_limit); |
215 | 215 | ||
216 | /** | 216 | /** |
217 | * blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request | 217 | * blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request |
218 | * @limits: the queue limits | 218 | * @limits: the queue limits |
219 | * @max_hw_sectors: max hardware sectors in the usual 512b unit | 219 | * @max_hw_sectors: max hardware sectors in the usual 512b unit |
220 | * | 220 | * |
221 | * Description: | 221 | * Description: |
222 | * Enables a low level driver to set a hard upper limit, | 222 | * Enables a low level driver to set a hard upper limit, |
223 | * max_hw_sectors, on the size of requests. max_hw_sectors is set by | 223 | * max_hw_sectors, on the size of requests. max_hw_sectors is set by |
224 | * the device driver based upon the combined capabilities of I/O | 224 | * the device driver based upon the combined capabilities of I/O |
225 | * controller and storage device. | 225 | * controller and storage device. |
226 | * | 226 | * |
227 | * max_sectors is a soft limit imposed by the block layer for | 227 | * max_sectors is a soft limit imposed by the block layer for |
228 | * filesystem type requests. This value can be overridden on a | 228 | * filesystem type requests. This value can be overridden on a |
229 | * per-device basis in /sys/block/<device>/queue/max_sectors_kb. | 229 | * per-device basis in /sys/block/<device>/queue/max_sectors_kb. |
230 | * The soft limit can not exceed max_hw_sectors. | 230 | * The soft limit can not exceed max_hw_sectors. |
231 | **/ | 231 | **/ |
232 | void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors) | 232 | void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors) |
233 | { | 233 | { |
234 | if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) { | 234 | if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) { |
235 | max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9); | 235 | max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9); |
236 | printk(KERN_INFO "%s: set to minimum %d\n", | 236 | printk(KERN_INFO "%s: set to minimum %d\n", |
237 | __func__, max_hw_sectors); | 237 | __func__, max_hw_sectors); |
238 | } | 238 | } |
239 | 239 | ||
240 | limits->max_hw_sectors = max_hw_sectors; | 240 | limits->max_hw_sectors = max_hw_sectors; |
241 | limits->max_sectors = min_t(unsigned int, max_hw_sectors, | 241 | limits->max_sectors = min_t(unsigned int, max_hw_sectors, |
242 | BLK_DEF_MAX_SECTORS); | 242 | BLK_DEF_MAX_SECTORS); |
243 | } | 243 | } |
244 | EXPORT_SYMBOL(blk_limits_max_hw_sectors); | 244 | EXPORT_SYMBOL(blk_limits_max_hw_sectors); |
245 | 245 | ||
246 | /** | 246 | /** |
247 | * blk_queue_max_hw_sectors - set max sectors for a request for this queue | 247 | * blk_queue_max_hw_sectors - set max sectors for a request for this queue |
248 | * @q: the request queue for the device | 248 | * @q: the request queue for the device |
249 | * @max_hw_sectors: max hardware sectors in the usual 512b unit | 249 | * @max_hw_sectors: max hardware sectors in the usual 512b unit |
250 | * | 250 | * |
251 | * Description: | 251 | * Description: |
252 | * See description for blk_limits_max_hw_sectors(). | 252 | * See description for blk_limits_max_hw_sectors(). |
253 | **/ | 253 | **/ |
254 | void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) | 254 | void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) |
255 | { | 255 | { |
256 | blk_limits_max_hw_sectors(&q->limits, max_hw_sectors); | 256 | blk_limits_max_hw_sectors(&q->limits, max_hw_sectors); |
257 | } | 257 | } |
258 | EXPORT_SYMBOL(blk_queue_max_hw_sectors); | 258 | EXPORT_SYMBOL(blk_queue_max_hw_sectors); |
259 | 259 | ||
260 | /** | 260 | /** |
261 | * blk_queue_max_discard_sectors - set max sectors for a single discard | 261 | * blk_queue_max_discard_sectors - set max sectors for a single discard |
262 | * @q: the request queue for the device | 262 | * @q: the request queue for the device |
263 | * @max_discard_sectors: maximum number of sectors to discard | 263 | * @max_discard_sectors: maximum number of sectors to discard |
264 | **/ | 264 | **/ |
265 | void blk_queue_max_discard_sectors(struct request_queue *q, | 265 | void blk_queue_max_discard_sectors(struct request_queue *q, |
266 | unsigned int max_discard_sectors) | 266 | unsigned int max_discard_sectors) |
267 | { | 267 | { |
268 | q->limits.max_discard_sectors = max_discard_sectors; | 268 | q->limits.max_discard_sectors = max_discard_sectors; |
269 | } | 269 | } |
270 | EXPORT_SYMBOL(blk_queue_max_discard_sectors); | 270 | EXPORT_SYMBOL(blk_queue_max_discard_sectors); |
271 | 271 | ||
272 | /** | 272 | /** |
273 | * blk_queue_max_segments - set max hw segments for a request for this queue | 273 | * blk_queue_max_segments - set max hw segments for a request for this queue |
274 | * @q: the request queue for the device | 274 | * @q: the request queue for the device |
275 | * @max_segments: max number of segments | 275 | * @max_segments: max number of segments |
276 | * | 276 | * |
277 | * Description: | 277 | * Description: |
278 | * Enables a low level driver to set an upper limit on the number of | 278 | * Enables a low level driver to set an upper limit on the number of |
279 | * hw data segments in a request. | 279 | * hw data segments in a request. |
280 | **/ | 280 | **/ |
281 | void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments) | 281 | void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments) |
282 | { | 282 | { |
283 | if (!max_segments) { | 283 | if (!max_segments) { |
284 | max_segments = 1; | 284 | max_segments = 1; |
285 | printk(KERN_INFO "%s: set to minimum %d\n", | 285 | printk(KERN_INFO "%s: set to minimum %d\n", |
286 | __func__, max_segments); | 286 | __func__, max_segments); |
287 | } | 287 | } |
288 | 288 | ||
289 | q->limits.max_segments = max_segments; | 289 | q->limits.max_segments = max_segments; |
290 | } | 290 | } |
291 | EXPORT_SYMBOL(blk_queue_max_segments); | 291 | EXPORT_SYMBOL(blk_queue_max_segments); |
292 | 292 | ||
293 | /** | 293 | /** |
294 | * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg | 294 | * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg |
295 | * @q: the request queue for the device | 295 | * @q: the request queue for the device |
296 | * @max_size: max size of segment in bytes | 296 | * @max_size: max size of segment in bytes |
297 | * | 297 | * |
298 | * Description: | 298 | * Description: |
299 | * Enables a low level driver to set an upper limit on the size of a | 299 | * Enables a low level driver to set an upper limit on the size of a |
300 | * coalesced segment | 300 | * coalesced segment |
301 | **/ | 301 | **/ |
302 | void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) | 302 | void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) |
303 | { | 303 | { |
304 | if (max_size < PAGE_CACHE_SIZE) { | 304 | if (max_size < PAGE_CACHE_SIZE) { |
305 | max_size = PAGE_CACHE_SIZE; | 305 | max_size = PAGE_CACHE_SIZE; |
306 | printk(KERN_INFO "%s: set to minimum %d\n", | 306 | printk(KERN_INFO "%s: set to minimum %d\n", |
307 | __func__, max_size); | 307 | __func__, max_size); |
308 | } | 308 | } |
309 | 309 | ||
310 | q->limits.max_segment_size = max_size; | 310 | q->limits.max_segment_size = max_size; |
311 | } | 311 | } |
312 | EXPORT_SYMBOL(blk_queue_max_segment_size); | 312 | EXPORT_SYMBOL(blk_queue_max_segment_size); |
313 | 313 | ||
314 | /** | 314 | /** |
315 | * blk_queue_logical_block_size - set logical block size for the queue | 315 | * blk_queue_logical_block_size - set logical block size for the queue |
316 | * @q: the request queue for the device | 316 | * @q: the request queue for the device |
317 | * @size: the logical block size, in bytes | 317 | * @size: the logical block size, in bytes |
318 | * | 318 | * |
319 | * Description: | 319 | * Description: |
320 | * This should be set to the lowest possible block size that the | 320 | * This should be set to the lowest possible block size that the |
321 | * storage device can address. The default of 512 covers most | 321 | * storage device can address. The default of 512 covers most |
322 | * hardware. | 322 | * hardware. |
323 | **/ | 323 | **/ |
324 | void blk_queue_logical_block_size(struct request_queue *q, unsigned short size) | 324 | void blk_queue_logical_block_size(struct request_queue *q, unsigned short size) |
325 | { | 325 | { |
326 | q->limits.logical_block_size = size; | 326 | q->limits.logical_block_size = size; |
327 | 327 | ||
328 | if (q->limits.physical_block_size < size) | 328 | if (q->limits.physical_block_size < size) |
329 | q->limits.physical_block_size = size; | 329 | q->limits.physical_block_size = size; |
330 | 330 | ||
331 | if (q->limits.io_min < q->limits.physical_block_size) | 331 | if (q->limits.io_min < q->limits.physical_block_size) |
332 | q->limits.io_min = q->limits.physical_block_size; | 332 | q->limits.io_min = q->limits.physical_block_size; |
333 | } | 333 | } |
334 | EXPORT_SYMBOL(blk_queue_logical_block_size); | 334 | EXPORT_SYMBOL(blk_queue_logical_block_size); |
335 | 335 | ||
336 | /** | 336 | /** |
337 | * blk_queue_physical_block_size - set physical block size for the queue | 337 | * blk_queue_physical_block_size - set physical block size for the queue |
338 | * @q: the request queue for the device | 338 | * @q: the request queue for the device |
339 | * @size: the physical block size, in bytes | 339 | * @size: the physical block size, in bytes |
340 | * | 340 | * |
341 | * Description: | 341 | * Description: |
342 | * This should be set to the lowest possible sector size that the | 342 | * This should be set to the lowest possible sector size that the |
343 | * hardware can operate on without reverting to read-modify-write | 343 | * hardware can operate on without reverting to read-modify-write |
344 | * operations. | 344 | * operations. |
345 | */ | 345 | */ |
346 | void blk_queue_physical_block_size(struct request_queue *q, unsigned int size) | 346 | void blk_queue_physical_block_size(struct request_queue *q, unsigned int size) |
347 | { | 347 | { |
348 | q->limits.physical_block_size = size; | 348 | q->limits.physical_block_size = size; |
349 | 349 | ||
350 | if (q->limits.physical_block_size < q->limits.logical_block_size) | 350 | if (q->limits.physical_block_size < q->limits.logical_block_size) |
351 | q->limits.physical_block_size = q->limits.logical_block_size; | 351 | q->limits.physical_block_size = q->limits.logical_block_size; |
352 | 352 | ||
353 | if (q->limits.io_min < q->limits.physical_block_size) | 353 | if (q->limits.io_min < q->limits.physical_block_size) |
354 | q->limits.io_min = q->limits.physical_block_size; | 354 | q->limits.io_min = q->limits.physical_block_size; |
355 | } | 355 | } |
356 | EXPORT_SYMBOL(blk_queue_physical_block_size); | 356 | EXPORT_SYMBOL(blk_queue_physical_block_size); |
357 | 357 | ||
358 | /** | 358 | /** |
359 | * blk_queue_alignment_offset - set physical block alignment offset | 359 | * blk_queue_alignment_offset - set physical block alignment offset |
360 | * @q: the request queue for the device | 360 | * @q: the request queue for the device |
361 | * @offset: alignment offset in bytes | 361 | * @offset: alignment offset in bytes |
362 | * | 362 | * |
363 | * Description: | 363 | * Description: |
364 | * Some devices are naturally misaligned to compensate for things like | 364 | * Some devices are naturally misaligned to compensate for things like |
365 | * the legacy DOS partition table 63-sector offset. Low-level drivers | 365 | * the legacy DOS partition table 63-sector offset. Low-level drivers |
366 | * should call this function for devices whose first sector is not | 366 | * should call this function for devices whose first sector is not |
367 | * naturally aligned. | 367 | * naturally aligned. |
368 | */ | 368 | */ |
369 | void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset) | 369 | void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset) |
370 | { | 370 | { |
371 | q->limits.alignment_offset = | 371 | q->limits.alignment_offset = |
372 | offset & (q->limits.physical_block_size - 1); | 372 | offset & (q->limits.physical_block_size - 1); |
373 | q->limits.misaligned = 0; | 373 | q->limits.misaligned = 0; |
374 | } | 374 | } |
375 | EXPORT_SYMBOL(blk_queue_alignment_offset); | 375 | EXPORT_SYMBOL(blk_queue_alignment_offset); |
376 | 376 | ||
377 | /** | 377 | /** |
378 | * blk_limits_io_min - set minimum request size for a device | 378 | * blk_limits_io_min - set minimum request size for a device |
379 | * @limits: the queue limits | 379 | * @limits: the queue limits |
380 | * @min: smallest I/O size in bytes | 380 | * @min: smallest I/O size in bytes |
381 | * | 381 | * |
382 | * Description: | 382 | * Description: |
383 | * Some devices have an internal block size bigger than the reported | 383 | * Some devices have an internal block size bigger than the reported |
384 | * hardware sector size. This function can be used to signal the | 384 | * hardware sector size. This function can be used to signal the |
385 | * smallest I/O the device can perform without incurring a performance | 385 | * smallest I/O the device can perform without incurring a performance |
386 | * penalty. | 386 | * penalty. |
387 | */ | 387 | */ |
388 | void blk_limits_io_min(struct queue_limits *limits, unsigned int min) | 388 | void blk_limits_io_min(struct queue_limits *limits, unsigned int min) |
389 | { | 389 | { |
390 | limits->io_min = min; | 390 | limits->io_min = min; |
391 | 391 | ||
392 | if (limits->io_min < limits->logical_block_size) | 392 | if (limits->io_min < limits->logical_block_size) |
393 | limits->io_min = limits->logical_block_size; | 393 | limits->io_min = limits->logical_block_size; |
394 | 394 | ||
395 | if (limits->io_min < limits->physical_block_size) | 395 | if (limits->io_min < limits->physical_block_size) |
396 | limits->io_min = limits->physical_block_size; | 396 | limits->io_min = limits->physical_block_size; |
397 | } | 397 | } |
398 | EXPORT_SYMBOL(blk_limits_io_min); | 398 | EXPORT_SYMBOL(blk_limits_io_min); |
399 | 399 | ||
400 | /** | 400 | /** |
401 | * blk_queue_io_min - set minimum request size for the queue | 401 | * blk_queue_io_min - set minimum request size for the queue |
402 | * @q: the request queue for the device | 402 | * @q: the request queue for the device |
403 | * @min: smallest I/O size in bytes | 403 | * @min: smallest I/O size in bytes |
404 | * | 404 | * |
405 | * Description: | 405 | * Description: |
406 | * Storage devices may report a granularity or preferred minimum I/O | 406 | * Storage devices may report a granularity or preferred minimum I/O |
407 | * size which is the smallest request the device can perform without | 407 | * size which is the smallest request the device can perform without |
408 | * incurring a performance penalty. For disk drives this is often the | 408 | * incurring a performance penalty. For disk drives this is often the |
409 | * physical block size. For RAID arrays it is often the stripe chunk | 409 | * physical block size. For RAID arrays it is often the stripe chunk |
410 | * size. A properly aligned multiple of minimum_io_size is the | 410 | * size. A properly aligned multiple of minimum_io_size is the |
411 | * preferred request size for workloads where a high number of I/O | 411 | * preferred request size for workloads where a high number of I/O |
412 | * operations is desired. | 412 | * operations is desired. |
413 | */ | 413 | */ |
414 | void blk_queue_io_min(struct request_queue *q, unsigned int min) | 414 | void blk_queue_io_min(struct request_queue *q, unsigned int min) |
415 | { | 415 | { |
416 | blk_limits_io_min(&q->limits, min); | 416 | blk_limits_io_min(&q->limits, min); |
417 | } | 417 | } |
418 | EXPORT_SYMBOL(blk_queue_io_min); | 418 | EXPORT_SYMBOL(blk_queue_io_min); |
419 | 419 | ||
420 | /** | 420 | /** |
421 | * blk_limits_io_opt - set optimal request size for a device | 421 | * blk_limits_io_opt - set optimal request size for a device |
422 | * @limits: the queue limits | 422 | * @limits: the queue limits |
423 | * @opt: smallest I/O size in bytes | 423 | * @opt: smallest I/O size in bytes |
424 | * | 424 | * |
425 | * Description: | 425 | * Description: |
426 | * Storage devices may report an optimal I/O size, which is the | 426 | * Storage devices may report an optimal I/O size, which is the |
427 | * device's preferred unit for sustained I/O. This is rarely reported | 427 | * device's preferred unit for sustained I/O. This is rarely reported |
428 | * for disk drives. For RAID arrays it is usually the stripe width or | 428 | * for disk drives. For RAID arrays it is usually the stripe width or |
429 | * the internal track size. A properly aligned multiple of | 429 | * the internal track size. A properly aligned multiple of |
430 | * optimal_io_size is the preferred request size for workloads where | 430 | * optimal_io_size is the preferred request size for workloads where |
431 | * sustained throughput is desired. | 431 | * sustained throughput is desired. |
432 | */ | 432 | */ |
433 | void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt) | 433 | void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt) |
434 | { | 434 | { |
435 | limits->io_opt = opt; | 435 | limits->io_opt = opt; |
436 | } | 436 | } |
437 | EXPORT_SYMBOL(blk_limits_io_opt); | 437 | EXPORT_SYMBOL(blk_limits_io_opt); |
438 | 438 | ||
439 | /** | 439 | /** |
440 | * blk_queue_io_opt - set optimal request size for the queue | 440 | * blk_queue_io_opt - set optimal request size for the queue |
441 | * @q: the request queue for the device | 441 | * @q: the request queue for the device |
442 | * @opt: optimal request size in bytes | 442 | * @opt: optimal request size in bytes |
443 | * | 443 | * |
444 | * Description: | 444 | * Description: |
445 | * Storage devices may report an optimal I/O size, which is the | 445 | * Storage devices may report an optimal I/O size, which is the |
446 | * device's preferred unit for sustained I/O. This is rarely reported | 446 | * device's preferred unit for sustained I/O. This is rarely reported |
447 | * for disk drives. For RAID arrays it is usually the stripe width or | 447 | * for disk drives. For RAID arrays it is usually the stripe width or |
448 | * the internal track size. A properly aligned multiple of | 448 | * the internal track size. A properly aligned multiple of |
449 | * optimal_io_size is the preferred request size for workloads where | 449 | * optimal_io_size is the preferred request size for workloads where |
450 | * sustained throughput is desired. | 450 | * sustained throughput is desired. |
451 | */ | 451 | */ |
452 | void blk_queue_io_opt(struct request_queue *q, unsigned int opt) | 452 | void blk_queue_io_opt(struct request_queue *q, unsigned int opt) |
453 | { | 453 | { |
454 | blk_limits_io_opt(&q->limits, opt); | 454 | blk_limits_io_opt(&q->limits, opt); |
455 | } | 455 | } |
456 | EXPORT_SYMBOL(blk_queue_io_opt); | 456 | EXPORT_SYMBOL(blk_queue_io_opt); |
457 | 457 | ||
458 | /** | 458 | /** |
459 | * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers | 459 | * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers |
460 | * @t: the stacking driver (top) | 460 | * @t: the stacking driver (top) |
461 | * @b: the underlying device (bottom) | 461 | * @b: the underlying device (bottom) |
462 | **/ | 462 | **/ |
463 | void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) | 463 | void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) |
464 | { | 464 | { |
465 | blk_stack_limits(&t->limits, &b->limits, 0); | 465 | blk_stack_limits(&t->limits, &b->limits, 0); |
466 | } | 466 | } |
467 | EXPORT_SYMBOL(blk_queue_stack_limits); | 467 | EXPORT_SYMBOL(blk_queue_stack_limits); |
468 | 468 | ||
469 | /** | 469 | /** |
470 | * blk_stack_limits - adjust queue_limits for stacked devices | 470 | * blk_stack_limits - adjust queue_limits for stacked devices |
471 | * @t: the stacking driver limits (top device) | 471 | * @t: the stacking driver limits (top device) |
472 | * @b: the underlying queue limits (bottom, component device) | 472 | * @b: the underlying queue limits (bottom, component device) |
473 | * @start: first data sector within component device | 473 | * @start: first data sector within component device |
474 | * | 474 | * |
475 | * Description: | 475 | * Description: |
476 | * This function is used by stacking drivers like MD and DM to ensure | 476 | * This function is used by stacking drivers like MD and DM to ensure |
477 | * that all component devices have compatible block sizes and | 477 | * that all component devices have compatible block sizes and |
478 | * alignments. The stacking driver must provide a queue_limits | 478 | * alignments. The stacking driver must provide a queue_limits |
479 | * struct (top) and then iteratively call the stacking function for | 479 | * struct (top) and then iteratively call the stacking function for |
480 | * all component (bottom) devices. The stacking function will | 480 | * all component (bottom) devices. The stacking function will |
481 | * attempt to combine the values and ensure proper alignment. | 481 | * attempt to combine the values and ensure proper alignment. |
482 | * | 482 | * |
483 | * Returns 0 if the top and bottom queue_limits are compatible. The | 483 | * Returns 0 if the top and bottom queue_limits are compatible. The |
484 | * top device's block sizes and alignment offsets may be adjusted to | 484 | * top device's block sizes and alignment offsets may be adjusted to |
485 | * ensure alignment with the bottom device. If no compatible sizes | 485 | * ensure alignment with the bottom device. If no compatible sizes |
486 | * and alignments exist, -1 is returned and the resulting top | 486 | * and alignments exist, -1 is returned and the resulting top |
487 | * queue_limits will have the misaligned flag set to indicate that | 487 | * queue_limits will have the misaligned flag set to indicate that |
488 | * the alignment_offset is undefined. | 488 | * the alignment_offset is undefined. |
489 | */ | 489 | */ |
490 | int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | 490 | int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, |
491 | sector_t start) | 491 | sector_t start) |
492 | { | 492 | { |
493 | unsigned int top, bottom, alignment, ret = 0; | 493 | unsigned int top, bottom, alignment, ret = 0; |
494 | 494 | ||
495 | t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); | 495 | t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); |
496 | t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); | 496 | t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); |
497 | t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn); | 497 | t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn); |
498 | 498 | ||
499 | t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, | 499 | t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, |
500 | b->seg_boundary_mask); | 500 | b->seg_boundary_mask); |
501 | 501 | ||
502 | t->max_segments = min_not_zero(t->max_segments, b->max_segments); | 502 | t->max_segments = min_not_zero(t->max_segments, b->max_segments); |
503 | t->max_integrity_segments = min_not_zero(t->max_integrity_segments, | 503 | t->max_integrity_segments = min_not_zero(t->max_integrity_segments, |
504 | b->max_integrity_segments); | 504 | b->max_integrity_segments); |
505 | 505 | ||
506 | t->max_segment_size = min_not_zero(t->max_segment_size, | 506 | t->max_segment_size = min_not_zero(t->max_segment_size, |
507 | b->max_segment_size); | 507 | b->max_segment_size); |
508 | 508 | ||
509 | t->misaligned |= b->misaligned; | 509 | t->misaligned |= b->misaligned; |
510 | 510 | ||
511 | alignment = queue_limit_alignment_offset(b, start); | 511 | alignment = queue_limit_alignment_offset(b, start); |
512 | 512 | ||
513 | /* Bottom device has different alignment. Check that it is | 513 | /* Bottom device has different alignment. Check that it is |
514 | * compatible with the current top alignment. | 514 | * compatible with the current top alignment. |
515 | */ | 515 | */ |
516 | if (t->alignment_offset != alignment) { | 516 | if (t->alignment_offset != alignment) { |
517 | 517 | ||
518 | top = max(t->physical_block_size, t->io_min) | 518 | top = max(t->physical_block_size, t->io_min) |
519 | + t->alignment_offset; | 519 | + t->alignment_offset; |
520 | bottom = max(b->physical_block_size, b->io_min) + alignment; | 520 | bottom = max(b->physical_block_size, b->io_min) + alignment; |
521 | 521 | ||
522 | /* Verify that top and bottom intervals line up */ | 522 | /* Verify that top and bottom intervals line up */ |
523 | if (max(top, bottom) & (min(top, bottom) - 1)) { | 523 | if (max(top, bottom) & (min(top, bottom) - 1)) { |
524 | t->misaligned = 1; | 524 | t->misaligned = 1; |
525 | ret = -1; | 525 | ret = -1; |
526 | } | 526 | } |
527 | } | 527 | } |
528 | 528 | ||
529 | t->logical_block_size = max(t->logical_block_size, | 529 | t->logical_block_size = max(t->logical_block_size, |
530 | b->logical_block_size); | 530 | b->logical_block_size); |
531 | 531 | ||
532 | t->physical_block_size = max(t->physical_block_size, | 532 | t->physical_block_size = max(t->physical_block_size, |
533 | b->physical_block_size); | 533 | b->physical_block_size); |
534 | 534 | ||
535 | t->io_min = max(t->io_min, b->io_min); | 535 | t->io_min = max(t->io_min, b->io_min); |
536 | t->io_opt = lcm(t->io_opt, b->io_opt); | 536 | t->io_opt = lcm(t->io_opt, b->io_opt); |
537 | 537 | ||
538 | t->cluster &= b->cluster; | 538 | t->cluster &= b->cluster; |
539 | t->discard_zeroes_data &= b->discard_zeroes_data; | 539 | t->discard_zeroes_data &= b->discard_zeroes_data; |
540 | 540 | ||
541 | /* Physical block size a multiple of the logical block size? */ | 541 | /* Physical block size a multiple of the logical block size? */ |
542 | if (t->physical_block_size & (t->logical_block_size - 1)) { | 542 | if (t->physical_block_size & (t->logical_block_size - 1)) { |
543 | t->physical_block_size = t->logical_block_size; | 543 | t->physical_block_size = t->logical_block_size; |
544 | t->misaligned = 1; | 544 | t->misaligned = 1; |
545 | ret = -1; | 545 | ret = -1; |
546 | } | 546 | } |
547 | 547 | ||
548 | /* Minimum I/O a multiple of the physical block size? */ | 548 | /* Minimum I/O a multiple of the physical block size? */ |
549 | if (t->io_min & (t->physical_block_size - 1)) { | 549 | if (t->io_min & (t->physical_block_size - 1)) { |
550 | t->io_min = t->physical_block_size; | 550 | t->io_min = t->physical_block_size; |
551 | t->misaligned = 1; | 551 | t->misaligned = 1; |
552 | ret = -1; | 552 | ret = -1; |
553 | } | 553 | } |
554 | 554 | ||
555 | /* Optimal I/O a multiple of the physical block size? */ | 555 | /* Optimal I/O a multiple of the physical block size? */ |
556 | if (t->io_opt & (t->physical_block_size - 1)) { | 556 | if (t->io_opt & (t->physical_block_size - 1)) { |
557 | t->io_opt = 0; | 557 | t->io_opt = 0; |
558 | t->misaligned = 1; | 558 | t->misaligned = 1; |
559 | ret = -1; | 559 | ret = -1; |
560 | } | 560 | } |
561 | 561 | ||
562 | /* Find lowest common alignment_offset */ | 562 | /* Find lowest common alignment_offset */ |
563 | t->alignment_offset = lcm(t->alignment_offset, alignment) | 563 | t->alignment_offset = lcm(t->alignment_offset, alignment) |
564 | & (max(t->physical_block_size, t->io_min) - 1); | 564 | & (max(t->physical_block_size, t->io_min) - 1); |
565 | 565 | ||
566 | /* Verify that new alignment_offset is on a logical block boundary */ | 566 | /* Verify that new alignment_offset is on a logical block boundary */ |
567 | if (t->alignment_offset & (t->logical_block_size - 1)) { | 567 | if (t->alignment_offset & (t->logical_block_size - 1)) { |
568 | t->misaligned = 1; | 568 | t->misaligned = 1; |
569 | ret = -1; | 569 | ret = -1; |
570 | } | 570 | } |
571 | 571 | ||
572 | /* Discard alignment and granularity */ | 572 | /* Discard alignment and granularity */ |
573 | if (b->discard_granularity) { | 573 | if (b->discard_granularity) { |
574 | alignment = queue_limit_discard_alignment(b, start); | 574 | alignment = queue_limit_discard_alignment(b, start); |
575 | 575 | ||
576 | if (t->discard_granularity != 0 && | 576 | if (t->discard_granularity != 0 && |
577 | t->discard_alignment != alignment) { | 577 | t->discard_alignment != alignment) { |
578 | top = t->discard_granularity + t->discard_alignment; | 578 | top = t->discard_granularity + t->discard_alignment; |
579 | bottom = b->discard_granularity + alignment; | 579 | bottom = b->discard_granularity + alignment; |
580 | 580 | ||
581 | /* Verify that top and bottom intervals line up */ | 581 | /* Verify that top and bottom intervals line up */ |
582 | if (max(top, bottom) & (min(top, bottom) - 1)) | 582 | if (max(top, bottom) & (min(top, bottom) - 1)) |
583 | t->discard_misaligned = 1; | 583 | t->discard_misaligned = 1; |
584 | } | 584 | } |
585 | 585 | ||
586 | t->max_discard_sectors = min_not_zero(t->max_discard_sectors, | 586 | t->max_discard_sectors = min_not_zero(t->max_discard_sectors, |
587 | b->max_discard_sectors); | 587 | b->max_discard_sectors); |
588 | t->discard_granularity = max(t->discard_granularity, | 588 | t->discard_granularity = max(t->discard_granularity, |
589 | b->discard_granularity); | 589 | b->discard_granularity); |
590 | t->discard_alignment = lcm(t->discard_alignment, alignment) & | 590 | t->discard_alignment = lcm(t->discard_alignment, alignment) & |
591 | (t->discard_granularity - 1); | 591 | (t->discard_granularity - 1); |
592 | } | 592 | } |
593 | 593 | ||
594 | return ret; | 594 | return ret; |
595 | } | 595 | } |
596 | EXPORT_SYMBOL(blk_stack_limits); | 596 | EXPORT_SYMBOL(blk_stack_limits); |
597 | 597 | ||
598 | /** | 598 | /** |
599 | * bdev_stack_limits - adjust queue limits for stacked drivers | 599 | * bdev_stack_limits - adjust queue limits for stacked drivers |
600 | * @t: the stacking driver limits (top device) | 600 | * @t: the stacking driver limits (top device) |
601 | * @bdev: the component block_device (bottom) | 601 | * @bdev: the component block_device (bottom) |
602 | * @start: first data sector within component device | 602 | * @start: first data sector within component device |
603 | * | 603 | * |
604 | * Description: | 604 | * Description: |
605 | * Merges queue limits for a top device and a block_device. Returns | 605 | * Merges queue limits for a top device and a block_device. Returns |
606 | * 0 if alignment didn't change. Returns -1 if adding the bottom | 606 | * 0 if alignment didn't change. Returns -1 if adding the bottom |
607 | * device caused misalignment. | 607 | * device caused misalignment. |
608 | */ | 608 | */ |
609 | int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, | 609 | int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, |
610 | sector_t start) | 610 | sector_t start) |
611 | { | 611 | { |
612 | struct request_queue *bq = bdev_get_queue(bdev); | 612 | struct request_queue *bq = bdev_get_queue(bdev); |
613 | 613 | ||
614 | start += get_start_sect(bdev); | 614 | start += get_start_sect(bdev); |
615 | 615 | ||
616 | return blk_stack_limits(t, &bq->limits, start); | 616 | return blk_stack_limits(t, &bq->limits, start); |
617 | } | 617 | } |
618 | EXPORT_SYMBOL(bdev_stack_limits); | 618 | EXPORT_SYMBOL(bdev_stack_limits); |
619 | 619 | ||
620 | /** | 620 | /** |
621 | * disk_stack_limits - adjust queue limits for stacked drivers | 621 | * disk_stack_limits - adjust queue limits for stacked drivers |
622 | * @disk: MD/DM gendisk (top) | 622 | * @disk: MD/DM gendisk (top) |
623 | * @bdev: the underlying block device (bottom) | 623 | * @bdev: the underlying block device (bottom) |
624 | * @offset: offset to beginning of data within component device | 624 | * @offset: offset to beginning of data within component device |
625 | * | 625 | * |
626 | * Description: | 626 | * Description: |
627 | * Merges the limits for a top level gendisk and a bottom level | 627 | * Merges the limits for a top level gendisk and a bottom level |
628 | * block_device. | 628 | * block_device. |
629 | */ | 629 | */ |
630 | void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, | 630 | void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, |
631 | sector_t offset) | 631 | sector_t offset) |
632 | { | 632 | { |
633 | struct request_queue *t = disk->queue; | 633 | struct request_queue *t = disk->queue; |
634 | 634 | ||
635 | if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) { | 635 | if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) { |
636 | char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; | 636 | char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; |
637 | 637 | ||
638 | disk_name(disk, 0, top); | 638 | disk_name(disk, 0, top); |
639 | bdevname(bdev, bottom); | 639 | bdevname(bdev, bottom); |
640 | 640 | ||
641 | printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n", | 641 | printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n", |
642 | top, bottom); | 642 | top, bottom); |
643 | } | 643 | } |
644 | } | 644 | } |
645 | EXPORT_SYMBOL(disk_stack_limits); | 645 | EXPORT_SYMBOL(disk_stack_limits); |
646 | 646 | ||
647 | /** | 647 | /** |
648 | * blk_queue_dma_pad - set pad mask | 648 | * blk_queue_dma_pad - set pad mask |
649 | * @q: the request queue for the device | 649 | * @q: the request queue for the device |
650 | * @mask: pad mask | 650 | * @mask: pad mask |
651 | * | 651 | * |
652 | * Set dma pad mask. | 652 | * Set dma pad mask. |
653 | * | 653 | * |
654 | * Appending pad buffer to a request modifies the last entry of a | 654 | * Appending pad buffer to a request modifies the last entry of a |
655 | * scatter list such that it includes the pad buffer. | 655 | * scatter list such that it includes the pad buffer. |
656 | **/ | 656 | **/ |
657 | void blk_queue_dma_pad(struct request_queue *q, unsigned int mask) | 657 | void blk_queue_dma_pad(struct request_queue *q, unsigned int mask) |
658 | { | 658 | { |
659 | q->dma_pad_mask = mask; | 659 | q->dma_pad_mask = mask; |
660 | } | 660 | } |
661 | EXPORT_SYMBOL(blk_queue_dma_pad); | 661 | EXPORT_SYMBOL(blk_queue_dma_pad); |
662 | 662 | ||
663 | /** | 663 | /** |
664 | * blk_queue_update_dma_pad - update pad mask | 664 | * blk_queue_update_dma_pad - update pad mask |
665 | * @q: the request queue for the device | 665 | * @q: the request queue for the device |
666 | * @mask: pad mask | 666 | * @mask: pad mask |
667 | * | 667 | * |
668 | * Update dma pad mask. | 668 | * Update dma pad mask. |
669 | * | 669 | * |
670 | * Appending pad buffer to a request modifies the last entry of a | 670 | * Appending pad buffer to a request modifies the last entry of a |
671 | * scatter list such that it includes the pad buffer. | 671 | * scatter list such that it includes the pad buffer. |
672 | **/ | 672 | **/ |
673 | void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask) | 673 | void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask) |
674 | { | 674 | { |
675 | if (mask > q->dma_pad_mask) | 675 | if (mask > q->dma_pad_mask) |
676 | q->dma_pad_mask = mask; | 676 | q->dma_pad_mask = mask; |
677 | } | 677 | } |
678 | EXPORT_SYMBOL(blk_queue_update_dma_pad); | 678 | EXPORT_SYMBOL(blk_queue_update_dma_pad); |
679 | 679 | ||
680 | /** | 680 | /** |
681 | * blk_queue_dma_drain - Set up a drain buffer for excess dma. | 681 | * blk_queue_dma_drain - Set up a drain buffer for excess dma. |
682 | * @q: the request queue for the device | 682 | * @q: the request queue for the device |
683 | * @dma_drain_needed: fn which returns non-zero if drain is necessary | 683 | * @dma_drain_needed: fn which returns non-zero if drain is necessary |
684 | * @buf: physically contiguous buffer | 684 | * @buf: physically contiguous buffer |
685 | * @size: size of the buffer in bytes | 685 | * @size: size of the buffer in bytes |
686 | * | 686 | * |
687 | * Some devices have excess DMA problems and can't simply discard (or | 687 | * Some devices have excess DMA problems and can't simply discard (or |
688 | * zero fill) the unwanted piece of the transfer. They have to have a | 688 | * zero fill) the unwanted piece of the transfer. They have to have a |
689 | * real area of memory to transfer it into. The use case for this is | 689 | * real area of memory to transfer it into. The use case for this is |
690 | * ATAPI devices in DMA mode. If the packet command causes a transfer | 690 | * ATAPI devices in DMA mode. If the packet command causes a transfer |
691 | * bigger than the transfer size some HBAs will lock up if there | 691 | * bigger than the transfer size some HBAs will lock up if there |
692 | * aren't DMA elements to contain the excess transfer. What this API | 692 | * aren't DMA elements to contain the excess transfer. What this API |
693 | * does is adjust the queue so that the buf is always appended | 693 | * does is adjust the queue so that the buf is always appended |
694 | * silently to the scatterlist. | 694 | * silently to the scatterlist. |
695 | * | 695 | * |
696 | * Note: This routine adjusts max_hw_segments to make room for appending | 696 | * Note: This routine adjusts max_hw_segments to make room for appending |
697 | * the drain buffer. If you call blk_queue_max_segments() after calling | 697 | * the drain buffer. If you call blk_queue_max_segments() after calling |
698 | * this routine, you must set the limit to one fewer than your device | 698 | * this routine, you must set the limit to one fewer than your device |
699 | * can support otherwise there won't be room for the drain buffer. | 699 | * can support otherwise there won't be room for the drain buffer. |
700 | */ | 700 | */ |
701 | int blk_queue_dma_drain(struct request_queue *q, | 701 | int blk_queue_dma_drain(struct request_queue *q, |
702 | dma_drain_needed_fn *dma_drain_needed, | 702 | dma_drain_needed_fn *dma_drain_needed, |
703 | void *buf, unsigned int size) | 703 | void *buf, unsigned int size) |
704 | { | 704 | { |
705 | if (queue_max_segments(q) < 2) | 705 | if (queue_max_segments(q) < 2) |
706 | return -EINVAL; | 706 | return -EINVAL; |
707 | /* make room for appending the drain */ | 707 | /* make room for appending the drain */ |
708 | blk_queue_max_segments(q, queue_max_segments(q) - 1); | 708 | blk_queue_max_segments(q, queue_max_segments(q) - 1); |
709 | q->dma_drain_needed = dma_drain_needed; | 709 | q->dma_drain_needed = dma_drain_needed; |
710 | q->dma_drain_buffer = buf; | 710 | q->dma_drain_buffer = buf; |
711 | q->dma_drain_size = size; | 711 | q->dma_drain_size = size; |
712 | 712 | ||
713 | return 0; | 713 | return 0; |
714 | } | 714 | } |
715 | EXPORT_SYMBOL_GPL(blk_queue_dma_drain); | 715 | EXPORT_SYMBOL_GPL(blk_queue_dma_drain); |
716 | 716 | ||
717 | /** | 717 | /** |
718 | * blk_queue_segment_boundary - set boundary rules for segment merging | 718 | * blk_queue_segment_boundary - set boundary rules for segment merging |
719 | * @q: the request queue for the device | 719 | * @q: the request queue for the device |
720 | * @mask: the memory boundary mask | 720 | * @mask: the memory boundary mask |
721 | **/ | 721 | **/ |
722 | void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) | 722 | void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) |
723 | { | 723 | { |
724 | if (mask < PAGE_CACHE_SIZE - 1) { | 724 | if (mask < PAGE_CACHE_SIZE - 1) { |
725 | mask = PAGE_CACHE_SIZE - 1; | 725 | mask = PAGE_CACHE_SIZE - 1; |
726 | printk(KERN_INFO "%s: set to minimum %lx\n", | 726 | printk(KERN_INFO "%s: set to minimum %lx\n", |
727 | __func__, mask); | 727 | __func__, mask); |
728 | } | 728 | } |
729 | 729 | ||
730 | q->limits.seg_boundary_mask = mask; | 730 | q->limits.seg_boundary_mask = mask; |
731 | } | 731 | } |
732 | EXPORT_SYMBOL(blk_queue_segment_boundary); | 732 | EXPORT_SYMBOL(blk_queue_segment_boundary); |
733 | 733 | ||
734 | /** | 734 | /** |
735 | * blk_queue_dma_alignment - set dma length and memory alignment | 735 | * blk_queue_dma_alignment - set dma length and memory alignment |
736 | * @q: the request queue for the device | 736 | * @q: the request queue for the device |
737 | * @mask: alignment mask | 737 | * @mask: alignment mask |
738 | * | 738 | * |
739 | * description: | 739 | * description: |
740 | * set required memory and length alignment for direct dma transactions. | 740 | * set required memory and length alignment for direct dma transactions. |
741 | * this is used when building direct io requests for the queue. | 741 | * this is used when building direct io requests for the queue. |
742 | * | 742 | * |
743 | **/ | 743 | **/ |
744 | void blk_queue_dma_alignment(struct request_queue *q, int mask) | 744 | void blk_queue_dma_alignment(struct request_queue *q, int mask) |
745 | { | 745 | { |
746 | q->dma_alignment = mask; | 746 | q->dma_alignment = mask; |
747 | } | 747 | } |
748 | EXPORT_SYMBOL(blk_queue_dma_alignment); | 748 | EXPORT_SYMBOL(blk_queue_dma_alignment); |
749 | 749 | ||
750 | /** | 750 | /** |
751 | * blk_queue_update_dma_alignment - update dma length and memory alignment | 751 | * blk_queue_update_dma_alignment - update dma length and memory alignment |
752 | * @q: the request queue for the device | 752 | * @q: the request queue for the device |
753 | * @mask: alignment mask | 753 | * @mask: alignment mask |
754 | * | 754 | * |
755 | * description: | 755 | * description: |
756 | * update required memory and length alignment for direct dma transactions. | 756 | * update required memory and length alignment for direct dma transactions. |
757 | * If the requested alignment is larger than the current alignment, then | 757 | * If the requested alignment is larger than the current alignment, then |
758 | * the current queue alignment is updated to the new value, otherwise it | 758 | * the current queue alignment is updated to the new value, otherwise it |
759 | * is left alone. The design of this is to allow multiple objects | 759 | * is left alone. The design of this is to allow multiple objects |
760 | * (driver, device, transport etc) to set their respective | 760 | * (driver, device, transport etc) to set their respective |
761 | * alignments without having them interfere. | 761 | * alignments without having them interfere. |
762 | * | 762 | * |
763 | **/ | 763 | **/ |
764 | void blk_queue_update_dma_alignment(struct request_queue *q, int mask) | 764 | void blk_queue_update_dma_alignment(struct request_queue *q, int mask) |
765 | { | 765 | { |
766 | BUG_ON(mask > PAGE_SIZE); | 766 | BUG_ON(mask > PAGE_SIZE); |
767 | 767 | ||
768 | if (mask > q->dma_alignment) | 768 | if (mask > q->dma_alignment) |
769 | q->dma_alignment = mask; | 769 | q->dma_alignment = mask; |
770 | } | 770 | } |
771 | EXPORT_SYMBOL(blk_queue_update_dma_alignment); | 771 | EXPORT_SYMBOL(blk_queue_update_dma_alignment); |
772 | 772 | ||
773 | /** | 773 | /** |
774 | * blk_queue_flush - configure queue's cache flush capability | 774 | * blk_queue_flush - configure queue's cache flush capability |
775 | * @q: the request queue for the device | 775 | * @q: the request queue for the device |
776 | * @flush: 0, REQ_FLUSH or REQ_FLUSH | REQ_FUA | 776 | * @flush: 0, REQ_FLUSH or REQ_FLUSH | REQ_FUA |
777 | * | 777 | * |
778 | * Tell block layer cache flush capability of @q. If it supports | 778 | * Tell block layer cache flush capability of @q. If it supports |
779 | * flushing, REQ_FLUSH should be set. If it supports bypassing | 779 | * flushing, REQ_FLUSH should be set. If it supports bypassing |
780 | * write cache for individual writes, REQ_FUA should be set. | 780 | * write cache for individual writes, REQ_FUA should be set. |
781 | */ | 781 | */ |
782 | void blk_queue_flush(struct request_queue *q, unsigned int flush) | 782 | void blk_queue_flush(struct request_queue *q, unsigned int flush) |
783 | { | 783 | { |
784 | WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA)); | 784 | WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA)); |
785 | 785 | ||
786 | if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA))) | 786 | if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA))) |
787 | flush &= ~REQ_FUA; | 787 | flush &= ~REQ_FUA; |
788 | 788 | ||
789 | q->flush_flags = flush & (REQ_FLUSH | REQ_FUA); | 789 | q->flush_flags = flush & (REQ_FLUSH | REQ_FUA); |
790 | } | 790 | } |
791 | EXPORT_SYMBOL_GPL(blk_queue_flush); | 791 | EXPORT_SYMBOL_GPL(blk_queue_flush); |
792 | 792 | ||
793 | void blk_queue_flush_queueable(struct request_queue *q, bool queueable) | ||
794 | { | ||
795 | q->flush_not_queueable = !queueable; | ||
796 | } | ||
797 | EXPORT_SYMBOL_GPL(blk_queue_flush_queueable); | ||
798 | |||
793 | static int __init blk_settings_init(void) | 799 | static int __init blk_settings_init(void) |
794 | { | 800 | { |
795 | blk_max_low_pfn = max_low_pfn - 1; | 801 | blk_max_low_pfn = max_low_pfn - 1; |
796 | blk_max_pfn = max_pfn - 1; | 802 | blk_max_pfn = max_pfn - 1; |
797 | return 0; | 803 | return 0; |
798 | } | 804 | } |
799 | subsys_initcall(blk_settings_init); | 805 | subsys_initcall(blk_settings_init); |
800 | 806 |
include/linux/blkdev.h
1 | #ifndef _LINUX_BLKDEV_H | 1 | #ifndef _LINUX_BLKDEV_H |
2 | #define _LINUX_BLKDEV_H | 2 | #define _LINUX_BLKDEV_H |
3 | 3 | ||
4 | #ifdef CONFIG_BLOCK | 4 | #ifdef CONFIG_BLOCK |
5 | 5 | ||
6 | #include <linux/sched.h> | 6 | #include <linux/sched.h> |
7 | #include <linux/major.h> | 7 | #include <linux/major.h> |
8 | #include <linux/genhd.h> | 8 | #include <linux/genhd.h> |
9 | #include <linux/list.h> | 9 | #include <linux/list.h> |
10 | #include <linux/timer.h> | 10 | #include <linux/timer.h> |
11 | #include <linux/workqueue.h> | 11 | #include <linux/workqueue.h> |
12 | #include <linux/pagemap.h> | 12 | #include <linux/pagemap.h> |
13 | #include <linux/backing-dev.h> | 13 | #include <linux/backing-dev.h> |
14 | #include <linux/wait.h> | 14 | #include <linux/wait.h> |
15 | #include <linux/mempool.h> | 15 | #include <linux/mempool.h> |
16 | #include <linux/bio.h> | 16 | #include <linux/bio.h> |
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | #include <linux/stringify.h> | 18 | #include <linux/stringify.h> |
19 | #include <linux/gfp.h> | 19 | #include <linux/gfp.h> |
20 | #include <linux/bsg.h> | 20 | #include <linux/bsg.h> |
21 | #include <linux/smp.h> | 21 | #include <linux/smp.h> |
22 | 22 | ||
23 | #include <asm/scatterlist.h> | 23 | #include <asm/scatterlist.h> |
24 | 24 | ||
25 | struct scsi_ioctl_command; | 25 | struct scsi_ioctl_command; |
26 | 26 | ||
27 | struct request_queue; | 27 | struct request_queue; |
28 | struct elevator_queue; | 28 | struct elevator_queue; |
29 | struct request_pm_state; | 29 | struct request_pm_state; |
30 | struct blk_trace; | 30 | struct blk_trace; |
31 | struct request; | 31 | struct request; |
32 | struct sg_io_hdr; | 32 | struct sg_io_hdr; |
33 | 33 | ||
34 | #define BLKDEV_MIN_RQ 4 | 34 | #define BLKDEV_MIN_RQ 4 |
35 | #define BLKDEV_MAX_RQ 128 /* Default maximum */ | 35 | #define BLKDEV_MAX_RQ 128 /* Default maximum */ |
36 | 36 | ||
37 | struct request; | 37 | struct request; |
38 | typedef void (rq_end_io_fn)(struct request *, int); | 38 | typedef void (rq_end_io_fn)(struct request *, int); |
39 | 39 | ||
40 | struct request_list { | 40 | struct request_list { |
41 | /* | 41 | /* |
42 | * count[], starved[], and wait[] are indexed by | 42 | * count[], starved[], and wait[] are indexed by |
43 | * BLK_RW_SYNC/BLK_RW_ASYNC | 43 | * BLK_RW_SYNC/BLK_RW_ASYNC |
44 | */ | 44 | */ |
45 | int count[2]; | 45 | int count[2]; |
46 | int starved[2]; | 46 | int starved[2]; |
47 | int elvpriv; | 47 | int elvpriv; |
48 | mempool_t *rq_pool; | 48 | mempool_t *rq_pool; |
49 | wait_queue_head_t wait[2]; | 49 | wait_queue_head_t wait[2]; |
50 | }; | 50 | }; |
51 | 51 | ||
52 | /* | 52 | /* |
53 | * request command types | 53 | * request command types |
54 | */ | 54 | */ |
55 | enum rq_cmd_type_bits { | 55 | enum rq_cmd_type_bits { |
56 | REQ_TYPE_FS = 1, /* fs request */ | 56 | REQ_TYPE_FS = 1, /* fs request */ |
57 | REQ_TYPE_BLOCK_PC, /* scsi command */ | 57 | REQ_TYPE_BLOCK_PC, /* scsi command */ |
58 | REQ_TYPE_SENSE, /* sense request */ | 58 | REQ_TYPE_SENSE, /* sense request */ |
59 | REQ_TYPE_PM_SUSPEND, /* suspend request */ | 59 | REQ_TYPE_PM_SUSPEND, /* suspend request */ |
60 | REQ_TYPE_PM_RESUME, /* resume request */ | 60 | REQ_TYPE_PM_RESUME, /* resume request */ |
61 | REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ | 61 | REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ |
62 | REQ_TYPE_SPECIAL, /* driver defined type */ | 62 | REQ_TYPE_SPECIAL, /* driver defined type */ |
63 | /* | 63 | /* |
64 | * for ATA/ATAPI devices. this really doesn't belong here, ide should | 64 | * for ATA/ATAPI devices. this really doesn't belong here, ide should |
65 | * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver | 65 | * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver |
66 | * private REQ_LB opcodes to differentiate what type of request this is | 66 | * private REQ_LB opcodes to differentiate what type of request this is |
67 | */ | 67 | */ |
68 | REQ_TYPE_ATA_TASKFILE, | 68 | REQ_TYPE_ATA_TASKFILE, |
69 | REQ_TYPE_ATA_PC, | 69 | REQ_TYPE_ATA_PC, |
70 | }; | 70 | }; |
71 | 71 | ||
72 | #define BLK_MAX_CDB 16 | 72 | #define BLK_MAX_CDB 16 |
73 | 73 | ||
74 | /* | 74 | /* |
75 | * try to put the fields that are referenced together in the same cacheline. | 75 | * try to put the fields that are referenced together in the same cacheline. |
76 | * if you modify this structure, be sure to check block/blk-core.c:rq_init() | 76 | * if you modify this structure, be sure to check block/blk-core.c:rq_init() |
77 | * as well! | 77 | * as well! |
78 | */ | 78 | */ |
79 | struct request { | 79 | struct request { |
80 | struct list_head queuelist; | 80 | struct list_head queuelist; |
81 | struct call_single_data csd; | 81 | struct call_single_data csd; |
82 | 82 | ||
83 | struct request_queue *q; | 83 | struct request_queue *q; |
84 | 84 | ||
85 | unsigned int cmd_flags; | 85 | unsigned int cmd_flags; |
86 | enum rq_cmd_type_bits cmd_type; | 86 | enum rq_cmd_type_bits cmd_type; |
87 | unsigned long atomic_flags; | 87 | unsigned long atomic_flags; |
88 | 88 | ||
89 | int cpu; | 89 | int cpu; |
90 | 90 | ||
91 | /* the following two fields are internal, NEVER access directly */ | 91 | /* the following two fields are internal, NEVER access directly */ |
92 | unsigned int __data_len; /* total data len */ | 92 | unsigned int __data_len; /* total data len */ |
93 | sector_t __sector; /* sector cursor */ | 93 | sector_t __sector; /* sector cursor */ |
94 | 94 | ||
95 | struct bio *bio; | 95 | struct bio *bio; |
96 | struct bio *biotail; | 96 | struct bio *biotail; |
97 | 97 | ||
98 | struct hlist_node hash; /* merge hash */ | 98 | struct hlist_node hash; /* merge hash */ |
99 | /* | 99 | /* |
100 | * The rb_node is only used inside the io scheduler, requests | 100 | * The rb_node is only used inside the io scheduler, requests |
101 | * are pruned when moved to the dispatch queue. So let the | 101 | * are pruned when moved to the dispatch queue. So let the |
102 | * completion_data share space with the rb_node. | 102 | * completion_data share space with the rb_node. |
103 | */ | 103 | */ |
104 | union { | 104 | union { |
105 | struct rb_node rb_node; /* sort/lookup */ | 105 | struct rb_node rb_node; /* sort/lookup */ |
106 | void *completion_data; | 106 | void *completion_data; |
107 | }; | 107 | }; |
108 | 108 | ||
109 | /* | 109 | /* |
110 | * Three pointers are available for the IO schedulers, if they need | 110 | * Three pointers are available for the IO schedulers, if they need |
111 | * more they have to dynamically allocate it. Flush requests are | 111 | * more they have to dynamically allocate it. Flush requests are |
112 | * never put on the IO scheduler. So let the flush fields share | 112 | * never put on the IO scheduler. So let the flush fields share |
113 | * space with the three elevator_private pointers. | 113 | * space with the three elevator_private pointers. |
114 | */ | 114 | */ |
115 | union { | 115 | union { |
116 | void *elevator_private[3]; | 116 | void *elevator_private[3]; |
117 | struct { | 117 | struct { |
118 | unsigned int seq; | 118 | unsigned int seq; |
119 | struct list_head list; | 119 | struct list_head list; |
120 | } flush; | 120 | } flush; |
121 | }; | 121 | }; |
122 | 122 | ||
123 | struct gendisk *rq_disk; | 123 | struct gendisk *rq_disk; |
124 | struct hd_struct *part; | 124 | struct hd_struct *part; |
125 | unsigned long start_time; | 125 | unsigned long start_time; |
126 | #ifdef CONFIG_BLK_CGROUP | 126 | #ifdef CONFIG_BLK_CGROUP |
127 | unsigned long long start_time_ns; | 127 | unsigned long long start_time_ns; |
128 | unsigned long long io_start_time_ns; /* when passed to hardware */ | 128 | unsigned long long io_start_time_ns; /* when passed to hardware */ |
129 | #endif | 129 | #endif |
130 | /* Number of scatter-gather DMA addr+len pairs after | 130 | /* Number of scatter-gather DMA addr+len pairs after |
131 | * physical address coalescing is performed. | 131 | * physical address coalescing is performed. |
132 | */ | 132 | */ |
133 | unsigned short nr_phys_segments; | 133 | unsigned short nr_phys_segments; |
134 | #if defined(CONFIG_BLK_DEV_INTEGRITY) | 134 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
135 | unsigned short nr_integrity_segments; | 135 | unsigned short nr_integrity_segments; |
136 | #endif | 136 | #endif |
137 | 137 | ||
138 | unsigned short ioprio; | 138 | unsigned short ioprio; |
139 | 139 | ||
140 | int ref_count; | 140 | int ref_count; |
141 | 141 | ||
142 | void *special; /* opaque pointer available for LLD use */ | 142 | void *special; /* opaque pointer available for LLD use */ |
143 | char *buffer; /* kaddr of the current segment if available */ | 143 | char *buffer; /* kaddr of the current segment if available */ |
144 | 144 | ||
145 | int tag; | 145 | int tag; |
146 | int errors; | 146 | int errors; |
147 | 147 | ||
148 | /* | 148 | /* |
149 | * when request is used as a packet command carrier | 149 | * when request is used as a packet command carrier |
150 | */ | 150 | */ |
151 | unsigned char __cmd[BLK_MAX_CDB]; | 151 | unsigned char __cmd[BLK_MAX_CDB]; |
152 | unsigned char *cmd; | 152 | unsigned char *cmd; |
153 | unsigned short cmd_len; | 153 | unsigned short cmd_len; |
154 | 154 | ||
155 | unsigned int extra_len; /* length of alignment and padding */ | 155 | unsigned int extra_len; /* length of alignment and padding */ |
156 | unsigned int sense_len; | 156 | unsigned int sense_len; |
157 | unsigned int resid_len; /* residual count */ | 157 | unsigned int resid_len; /* residual count */ |
158 | void *sense; | 158 | void *sense; |
159 | 159 | ||
160 | unsigned long deadline; | 160 | unsigned long deadline; |
161 | struct list_head timeout_list; | 161 | struct list_head timeout_list; |
162 | unsigned int timeout; | 162 | unsigned int timeout; |
163 | int retries; | 163 | int retries; |
164 | 164 | ||
165 | /* | 165 | /* |
166 | * completion callback. | 166 | * completion callback. |
167 | */ | 167 | */ |
168 | rq_end_io_fn *end_io; | 168 | rq_end_io_fn *end_io; |
169 | void *end_io_data; | 169 | void *end_io_data; |
170 | 170 | ||
171 | /* for bidi */ | 171 | /* for bidi */ |
172 | struct request *next_rq; | 172 | struct request *next_rq; |
173 | }; | 173 | }; |
174 | 174 | ||
175 | static inline unsigned short req_get_ioprio(struct request *req) | 175 | static inline unsigned short req_get_ioprio(struct request *req) |
176 | { | 176 | { |
177 | return req->ioprio; | 177 | return req->ioprio; |
178 | } | 178 | } |
179 | 179 | ||
180 | /* | 180 | /* |
181 | * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME | 181 | * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME |
182 | * requests. Some step values could eventually be made generic. | 182 | * requests. Some step values could eventually be made generic. |
183 | */ | 183 | */ |
184 | struct request_pm_state | 184 | struct request_pm_state |
185 | { | 185 | { |
186 | /* PM state machine step value, currently driver specific */ | 186 | /* PM state machine step value, currently driver specific */ |
187 | int pm_step; | 187 | int pm_step; |
188 | /* requested PM state value (S1, S2, S3, S4, ...) */ | 188 | /* requested PM state value (S1, S2, S3, S4, ...) */ |
189 | u32 pm_state; | 189 | u32 pm_state; |
190 | void* data; /* for driver use */ | 190 | void* data; /* for driver use */ |
191 | }; | 191 | }; |
192 | 192 | ||
193 | #include <linux/elevator.h> | 193 | #include <linux/elevator.h> |
194 | 194 | ||
195 | typedef void (request_fn_proc) (struct request_queue *q); | 195 | typedef void (request_fn_proc) (struct request_queue *q); |
196 | typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); | 196 | typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); |
197 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); | 197 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); |
198 | typedef void (unprep_rq_fn) (struct request_queue *, struct request *); | 198 | typedef void (unprep_rq_fn) (struct request_queue *, struct request *); |
199 | 199 | ||
200 | struct bio_vec; | 200 | struct bio_vec; |
201 | struct bvec_merge_data { | 201 | struct bvec_merge_data { |
202 | struct block_device *bi_bdev; | 202 | struct block_device *bi_bdev; |
203 | sector_t bi_sector; | 203 | sector_t bi_sector; |
204 | unsigned bi_size; | 204 | unsigned bi_size; |
205 | unsigned long bi_rw; | 205 | unsigned long bi_rw; |
206 | }; | 206 | }; |
207 | typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, | 207 | typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, |
208 | struct bio_vec *); | 208 | struct bio_vec *); |
209 | typedef void (softirq_done_fn)(struct request *); | 209 | typedef void (softirq_done_fn)(struct request *); |
210 | typedef int (dma_drain_needed_fn)(struct request *); | 210 | typedef int (dma_drain_needed_fn)(struct request *); |
211 | typedef int (lld_busy_fn) (struct request_queue *q); | 211 | typedef int (lld_busy_fn) (struct request_queue *q); |
212 | 212 | ||
213 | enum blk_eh_timer_return { | 213 | enum blk_eh_timer_return { |
214 | BLK_EH_NOT_HANDLED, | 214 | BLK_EH_NOT_HANDLED, |
215 | BLK_EH_HANDLED, | 215 | BLK_EH_HANDLED, |
216 | BLK_EH_RESET_TIMER, | 216 | BLK_EH_RESET_TIMER, |
217 | }; | 217 | }; |
218 | 218 | ||
219 | typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); | 219 | typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); |
220 | 220 | ||
221 | enum blk_queue_state { | 221 | enum blk_queue_state { |
222 | Queue_down, | 222 | Queue_down, |
223 | Queue_up, | 223 | Queue_up, |
224 | }; | 224 | }; |
225 | 225 | ||
226 | struct blk_queue_tag { | 226 | struct blk_queue_tag { |
227 | struct request **tag_index; /* map of busy tags */ | 227 | struct request **tag_index; /* map of busy tags */ |
228 | unsigned long *tag_map; /* bit map of free/busy tags */ | 228 | unsigned long *tag_map; /* bit map of free/busy tags */ |
229 | int busy; /* current depth */ | 229 | int busy; /* current depth */ |
230 | int max_depth; /* what we will send to device */ | 230 | int max_depth; /* what we will send to device */ |
231 | int real_max_depth; /* what the array can hold */ | 231 | int real_max_depth; /* what the array can hold */ |
232 | atomic_t refcnt; /* map can be shared */ | 232 | atomic_t refcnt; /* map can be shared */ |
233 | }; | 233 | }; |
234 | 234 | ||
235 | #define BLK_SCSI_MAX_CMDS (256) | 235 | #define BLK_SCSI_MAX_CMDS (256) |
236 | #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) | 236 | #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) |
237 | 237 | ||
238 | struct queue_limits { | 238 | struct queue_limits { |
239 | unsigned long bounce_pfn; | 239 | unsigned long bounce_pfn; |
240 | unsigned long seg_boundary_mask; | 240 | unsigned long seg_boundary_mask; |
241 | 241 | ||
242 | unsigned int max_hw_sectors; | 242 | unsigned int max_hw_sectors; |
243 | unsigned int max_sectors; | 243 | unsigned int max_sectors; |
244 | unsigned int max_segment_size; | 244 | unsigned int max_segment_size; |
245 | unsigned int physical_block_size; | 245 | unsigned int physical_block_size; |
246 | unsigned int alignment_offset; | 246 | unsigned int alignment_offset; |
247 | unsigned int io_min; | 247 | unsigned int io_min; |
248 | unsigned int io_opt; | 248 | unsigned int io_opt; |
249 | unsigned int max_discard_sectors; | 249 | unsigned int max_discard_sectors; |
250 | unsigned int discard_granularity; | 250 | unsigned int discard_granularity; |
251 | unsigned int discard_alignment; | 251 | unsigned int discard_alignment; |
252 | 252 | ||
253 | unsigned short logical_block_size; | 253 | unsigned short logical_block_size; |
254 | unsigned short max_segments; | 254 | unsigned short max_segments; |
255 | unsigned short max_integrity_segments; | 255 | unsigned short max_integrity_segments; |
256 | 256 | ||
257 | unsigned char misaligned; | 257 | unsigned char misaligned; |
258 | unsigned char discard_misaligned; | 258 | unsigned char discard_misaligned; |
259 | unsigned char cluster; | 259 | unsigned char cluster; |
260 | signed char discard_zeroes_data; | 260 | signed char discard_zeroes_data; |
261 | }; | 261 | }; |
262 | 262 | ||
263 | struct request_queue | 263 | struct request_queue |
264 | { | 264 | { |
265 | /* | 265 | /* |
266 | * Together with queue_head for cacheline sharing | 266 | * Together with queue_head for cacheline sharing |
267 | */ | 267 | */ |
268 | struct list_head queue_head; | 268 | struct list_head queue_head; |
269 | struct request *last_merge; | 269 | struct request *last_merge; |
270 | struct elevator_queue *elevator; | 270 | struct elevator_queue *elevator; |
271 | 271 | ||
272 | /* | 272 | /* |
273 | * the queue request freelist, one for reads and one for writes | 273 | * the queue request freelist, one for reads and one for writes |
274 | */ | 274 | */ |
275 | struct request_list rq; | 275 | struct request_list rq; |
276 | 276 | ||
277 | request_fn_proc *request_fn; | 277 | request_fn_proc *request_fn; |
278 | make_request_fn *make_request_fn; | 278 | make_request_fn *make_request_fn; |
279 | prep_rq_fn *prep_rq_fn; | 279 | prep_rq_fn *prep_rq_fn; |
280 | unprep_rq_fn *unprep_rq_fn; | 280 | unprep_rq_fn *unprep_rq_fn; |
281 | merge_bvec_fn *merge_bvec_fn; | 281 | merge_bvec_fn *merge_bvec_fn; |
282 | softirq_done_fn *softirq_done_fn; | 282 | softirq_done_fn *softirq_done_fn; |
283 | rq_timed_out_fn *rq_timed_out_fn; | 283 | rq_timed_out_fn *rq_timed_out_fn; |
284 | dma_drain_needed_fn *dma_drain_needed; | 284 | dma_drain_needed_fn *dma_drain_needed; |
285 | lld_busy_fn *lld_busy_fn; | 285 | lld_busy_fn *lld_busy_fn; |
286 | 286 | ||
287 | /* | 287 | /* |
288 | * Dispatch queue sorting | 288 | * Dispatch queue sorting |
289 | */ | 289 | */ |
290 | sector_t end_sector; | 290 | sector_t end_sector; |
291 | struct request *boundary_rq; | 291 | struct request *boundary_rq; |
292 | 292 | ||
293 | /* | 293 | /* |
294 | * Delayed queue handling | 294 | * Delayed queue handling |
295 | */ | 295 | */ |
296 | struct delayed_work delay_work; | 296 | struct delayed_work delay_work; |
297 | 297 | ||
298 | struct backing_dev_info backing_dev_info; | 298 | struct backing_dev_info backing_dev_info; |
299 | 299 | ||
300 | /* | 300 | /* |
301 | * The queue owner gets to use this for whatever they like. | 301 | * The queue owner gets to use this for whatever they like. |
302 | * ll_rw_blk doesn't touch it. | 302 | * ll_rw_blk doesn't touch it. |
303 | */ | 303 | */ |
304 | void *queuedata; | 304 | void *queuedata; |
305 | 305 | ||
306 | /* | 306 | /* |
307 | * queue needs bounce pages for pages above this limit | 307 | * queue needs bounce pages for pages above this limit |
308 | */ | 308 | */ |
309 | gfp_t bounce_gfp; | 309 | gfp_t bounce_gfp; |
310 | 310 | ||
311 | /* | 311 | /* |
312 | * various queue flags, see QUEUE_* below | 312 | * various queue flags, see QUEUE_* below |
313 | */ | 313 | */ |
314 | unsigned long queue_flags; | 314 | unsigned long queue_flags; |
315 | 315 | ||
316 | /* | 316 | /* |
317 | * protects queue structures from reentrancy. ->__queue_lock should | 317 | * protects queue structures from reentrancy. ->__queue_lock should |
318 | * _never_ be used directly, it is queue private. always use | 318 | * _never_ be used directly, it is queue private. always use |
319 | * ->queue_lock. | 319 | * ->queue_lock. |
320 | */ | 320 | */ |
321 | spinlock_t __queue_lock; | 321 | spinlock_t __queue_lock; |
322 | spinlock_t *queue_lock; | 322 | spinlock_t *queue_lock; |
323 | 323 | ||
324 | /* | 324 | /* |
325 | * queue kobject | 325 | * queue kobject |
326 | */ | 326 | */ |
327 | struct kobject kobj; | 327 | struct kobject kobj; |
328 | 328 | ||
329 | /* | 329 | /* |
330 | * queue settings | 330 | * queue settings |
331 | */ | 331 | */ |
332 | unsigned long nr_requests; /* Max # of requests */ | 332 | unsigned long nr_requests; /* Max # of requests */ |
333 | unsigned int nr_congestion_on; | 333 | unsigned int nr_congestion_on; |
334 | unsigned int nr_congestion_off; | 334 | unsigned int nr_congestion_off; |
335 | unsigned int nr_batching; | 335 | unsigned int nr_batching; |
336 | 336 | ||
337 | void *dma_drain_buffer; | 337 | void *dma_drain_buffer; |
338 | unsigned int dma_drain_size; | 338 | unsigned int dma_drain_size; |
339 | unsigned int dma_pad_mask; | 339 | unsigned int dma_pad_mask; |
340 | unsigned int dma_alignment; | 340 | unsigned int dma_alignment; |
341 | 341 | ||
342 | struct blk_queue_tag *queue_tags; | 342 | struct blk_queue_tag *queue_tags; |
343 | struct list_head tag_busy_list; | 343 | struct list_head tag_busy_list; |
344 | 344 | ||
345 | unsigned int nr_sorted; | 345 | unsigned int nr_sorted; |
346 | unsigned int in_flight[2]; | 346 | unsigned int in_flight[2]; |
347 | 347 | ||
348 | unsigned int rq_timeout; | 348 | unsigned int rq_timeout; |
349 | struct timer_list timeout; | 349 | struct timer_list timeout; |
350 | struct list_head timeout_list; | 350 | struct list_head timeout_list; |
351 | 351 | ||
352 | struct queue_limits limits; | 352 | struct queue_limits limits; |
353 | 353 | ||
354 | /* | 354 | /* |
355 | * sg stuff | 355 | * sg stuff |
356 | */ | 356 | */ |
357 | unsigned int sg_timeout; | 357 | unsigned int sg_timeout; |
358 | unsigned int sg_reserved_size; | 358 | unsigned int sg_reserved_size; |
359 | int node; | 359 | int node; |
360 | #ifdef CONFIG_BLK_DEV_IO_TRACE | 360 | #ifdef CONFIG_BLK_DEV_IO_TRACE |
361 | struct blk_trace *blk_trace; | 361 | struct blk_trace *blk_trace; |
362 | #endif | 362 | #endif |
363 | /* | 363 | /* |
364 | * for flush operations | 364 | * for flush operations |
365 | */ | 365 | */ |
366 | unsigned int flush_flags; | 366 | unsigned int flush_flags; |
367 | unsigned int flush_not_queueable:1; | ||
367 | unsigned int flush_pending_idx:1; | 368 | unsigned int flush_pending_idx:1; |
368 | unsigned int flush_running_idx:1; | 369 | unsigned int flush_running_idx:1; |
369 | unsigned long flush_pending_since; | 370 | unsigned long flush_pending_since; |
370 | struct list_head flush_queue[2]; | 371 | struct list_head flush_queue[2]; |
371 | struct list_head flush_data_in_flight; | 372 | struct list_head flush_data_in_flight; |
372 | struct request flush_rq; | 373 | struct request flush_rq; |
373 | 374 | ||
374 | struct mutex sysfs_lock; | 375 | struct mutex sysfs_lock; |
375 | 376 | ||
376 | #if defined(CONFIG_BLK_DEV_BSG) | 377 | #if defined(CONFIG_BLK_DEV_BSG) |
377 | struct bsg_class_device bsg_dev; | 378 | struct bsg_class_device bsg_dev; |
378 | #endif | 379 | #endif |
379 | 380 | ||
380 | #ifdef CONFIG_BLK_DEV_THROTTLING | 381 | #ifdef CONFIG_BLK_DEV_THROTTLING |
381 | /* Throttle data */ | 382 | /* Throttle data */ |
382 | struct throtl_data *td; | 383 | struct throtl_data *td; |
383 | #endif | 384 | #endif |
384 | }; | 385 | }; |
385 | 386 | ||
386 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ | 387 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ |
387 | #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ | 388 | #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ |
388 | #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ | 389 | #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ |
389 | #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ | 390 | #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ |
390 | #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ | 391 | #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ |
391 | #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ | 392 | #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ |
392 | #define QUEUE_FLAG_ELVSWITCH 7 /* don't use elevator, just do FIFO */ | 393 | #define QUEUE_FLAG_ELVSWITCH 7 /* don't use elevator, just do FIFO */ |
393 | #define QUEUE_FLAG_BIDI 8 /* queue supports bidi requests */ | 394 | #define QUEUE_FLAG_BIDI 8 /* queue supports bidi requests */ |
394 | #define QUEUE_FLAG_NOMERGES 9 /* disable merge attempts */ | 395 | #define QUEUE_FLAG_NOMERGES 9 /* disable merge attempts */ |
395 | #define QUEUE_FLAG_SAME_COMP 10 /* force complete on same CPU */ | 396 | #define QUEUE_FLAG_SAME_COMP 10 /* force complete on same CPU */ |
396 | #define QUEUE_FLAG_FAIL_IO 11 /* fake timeout */ | 397 | #define QUEUE_FLAG_FAIL_IO 11 /* fake timeout */ |
397 | #define QUEUE_FLAG_STACKABLE 12 /* supports request stacking */ | 398 | #define QUEUE_FLAG_STACKABLE 12 /* supports request stacking */ |
398 | #define QUEUE_FLAG_NONROT 13 /* non-rotational device (SSD) */ | 399 | #define QUEUE_FLAG_NONROT 13 /* non-rotational device (SSD) */ |
399 | #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ | 400 | #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ |
400 | #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ | 401 | #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ |
401 | #define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ | 402 | #define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ |
402 | #define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */ | 403 | #define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */ |
403 | #define QUEUE_FLAG_ADD_RANDOM 18 /* Contributes to random pool */ | 404 | #define QUEUE_FLAG_ADD_RANDOM 18 /* Contributes to random pool */ |
404 | #define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */ | 405 | #define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */ |
405 | 406 | ||
406 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ | 407 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ |
407 | (1 << QUEUE_FLAG_STACKABLE) | \ | 408 | (1 << QUEUE_FLAG_STACKABLE) | \ |
408 | (1 << QUEUE_FLAG_SAME_COMP) | \ | 409 | (1 << QUEUE_FLAG_SAME_COMP) | \ |
409 | (1 << QUEUE_FLAG_ADD_RANDOM)) | 410 | (1 << QUEUE_FLAG_ADD_RANDOM)) |
410 | 411 | ||
411 | static inline int queue_is_locked(struct request_queue *q) | 412 | static inline int queue_is_locked(struct request_queue *q) |
412 | { | 413 | { |
413 | #ifdef CONFIG_SMP | 414 | #ifdef CONFIG_SMP |
414 | spinlock_t *lock = q->queue_lock; | 415 | spinlock_t *lock = q->queue_lock; |
415 | return lock && spin_is_locked(lock); | 416 | return lock && spin_is_locked(lock); |
416 | #else | 417 | #else |
417 | return 1; | 418 | return 1; |
418 | #endif | 419 | #endif |
419 | } | 420 | } |
420 | 421 | ||
421 | static inline void queue_flag_set_unlocked(unsigned int flag, | 422 | static inline void queue_flag_set_unlocked(unsigned int flag, |
422 | struct request_queue *q) | 423 | struct request_queue *q) |
423 | { | 424 | { |
424 | __set_bit(flag, &q->queue_flags); | 425 | __set_bit(flag, &q->queue_flags); |
425 | } | 426 | } |
426 | 427 | ||
427 | static inline int queue_flag_test_and_clear(unsigned int flag, | 428 | static inline int queue_flag_test_and_clear(unsigned int flag, |
428 | struct request_queue *q) | 429 | struct request_queue *q) |
429 | { | 430 | { |
430 | WARN_ON_ONCE(!queue_is_locked(q)); | 431 | WARN_ON_ONCE(!queue_is_locked(q)); |
431 | 432 | ||
432 | if (test_bit(flag, &q->queue_flags)) { | 433 | if (test_bit(flag, &q->queue_flags)) { |
433 | __clear_bit(flag, &q->queue_flags); | 434 | __clear_bit(flag, &q->queue_flags); |
434 | return 1; | 435 | return 1; |
435 | } | 436 | } |
436 | 437 | ||
437 | return 0; | 438 | return 0; |
438 | } | 439 | } |
439 | 440 | ||
440 | static inline int queue_flag_test_and_set(unsigned int flag, | 441 | static inline int queue_flag_test_and_set(unsigned int flag, |
441 | struct request_queue *q) | 442 | struct request_queue *q) |
442 | { | 443 | { |
443 | WARN_ON_ONCE(!queue_is_locked(q)); | 444 | WARN_ON_ONCE(!queue_is_locked(q)); |
444 | 445 | ||
445 | if (!test_bit(flag, &q->queue_flags)) { | 446 | if (!test_bit(flag, &q->queue_flags)) { |
446 | __set_bit(flag, &q->queue_flags); | 447 | __set_bit(flag, &q->queue_flags); |
447 | return 0; | 448 | return 0; |
448 | } | 449 | } |
449 | 450 | ||
450 | return 1; | 451 | return 1; |
451 | } | 452 | } |
452 | 453 | ||
453 | static inline void queue_flag_set(unsigned int flag, struct request_queue *q) | 454 | static inline void queue_flag_set(unsigned int flag, struct request_queue *q) |
454 | { | 455 | { |
455 | WARN_ON_ONCE(!queue_is_locked(q)); | 456 | WARN_ON_ONCE(!queue_is_locked(q)); |
456 | __set_bit(flag, &q->queue_flags); | 457 | __set_bit(flag, &q->queue_flags); |
457 | } | 458 | } |
458 | 459 | ||
459 | static inline void queue_flag_clear_unlocked(unsigned int flag, | 460 | static inline void queue_flag_clear_unlocked(unsigned int flag, |
460 | struct request_queue *q) | 461 | struct request_queue *q) |
461 | { | 462 | { |
462 | __clear_bit(flag, &q->queue_flags); | 463 | __clear_bit(flag, &q->queue_flags); |
463 | } | 464 | } |
464 | 465 | ||
465 | static inline int queue_in_flight(struct request_queue *q) | 466 | static inline int queue_in_flight(struct request_queue *q) |
466 | { | 467 | { |
467 | return q->in_flight[0] + q->in_flight[1]; | 468 | return q->in_flight[0] + q->in_flight[1]; |
468 | } | 469 | } |
469 | 470 | ||
470 | static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) | 471 | static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) |
471 | { | 472 | { |
472 | WARN_ON_ONCE(!queue_is_locked(q)); | 473 | WARN_ON_ONCE(!queue_is_locked(q)); |
473 | __clear_bit(flag, &q->queue_flags); | 474 | __clear_bit(flag, &q->queue_flags); |
474 | } | 475 | } |
475 | 476 | ||
476 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) | 477 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) |
477 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) | 478 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) |
478 | #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) | 479 | #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) |
479 | #define blk_queue_noxmerges(q) \ | 480 | #define blk_queue_noxmerges(q) \ |
480 | test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) | 481 | test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) |
481 | #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) | 482 | #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) |
482 | #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) | 483 | #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) |
483 | #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) | 484 | #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) |
484 | #define blk_queue_stackable(q) \ | 485 | #define blk_queue_stackable(q) \ |
485 | test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) | 486 | test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) |
486 | #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) | 487 | #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) |
487 | #define blk_queue_secdiscard(q) (blk_queue_discard(q) && \ | 488 | #define blk_queue_secdiscard(q) (blk_queue_discard(q) && \ |
488 | test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags)) | 489 | test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags)) |
489 | 490 | ||
490 | #define blk_noretry_request(rq) \ | 491 | #define blk_noretry_request(rq) \ |
491 | ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ | 492 | ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ |
492 | REQ_FAILFAST_DRIVER)) | 493 | REQ_FAILFAST_DRIVER)) |
493 | 494 | ||
494 | #define blk_account_rq(rq) \ | 495 | #define blk_account_rq(rq) \ |
495 | (((rq)->cmd_flags & REQ_STARTED) && \ | 496 | (((rq)->cmd_flags & REQ_STARTED) && \ |
496 | ((rq)->cmd_type == REQ_TYPE_FS || \ | 497 | ((rq)->cmd_type == REQ_TYPE_FS || \ |
497 | ((rq)->cmd_flags & REQ_DISCARD))) | 498 | ((rq)->cmd_flags & REQ_DISCARD))) |
498 | 499 | ||
499 | #define blk_pm_request(rq) \ | 500 | #define blk_pm_request(rq) \ |
500 | ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \ | 501 | ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \ |
501 | (rq)->cmd_type == REQ_TYPE_PM_RESUME) | 502 | (rq)->cmd_type == REQ_TYPE_PM_RESUME) |
502 | 503 | ||
503 | #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) | 504 | #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) |
504 | #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) | 505 | #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) |
505 | /* rq->queuelist of dequeued request must be list_empty() */ | 506 | /* rq->queuelist of dequeued request must be list_empty() */ |
506 | #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) | 507 | #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) |
507 | 508 | ||
508 | #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) | 509 | #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) |
509 | 510 | ||
510 | #define rq_data_dir(rq) ((rq)->cmd_flags & 1) | 511 | #define rq_data_dir(rq) ((rq)->cmd_flags & 1) |
511 | 512 | ||
512 | static inline unsigned int blk_queue_cluster(struct request_queue *q) | 513 | static inline unsigned int blk_queue_cluster(struct request_queue *q) |
513 | { | 514 | { |
514 | return q->limits.cluster; | 515 | return q->limits.cluster; |
515 | } | 516 | } |
516 | 517 | ||
517 | /* | 518 | /* |
518 | * We regard a request as sync, if either a read or a sync write | 519 | * We regard a request as sync, if either a read or a sync write |
519 | */ | 520 | */ |
520 | static inline bool rw_is_sync(unsigned int rw_flags) | 521 | static inline bool rw_is_sync(unsigned int rw_flags) |
521 | { | 522 | { |
522 | return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC); | 523 | return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC); |
523 | } | 524 | } |
524 | 525 | ||
525 | static inline bool rq_is_sync(struct request *rq) | 526 | static inline bool rq_is_sync(struct request *rq) |
526 | { | 527 | { |
527 | return rw_is_sync(rq->cmd_flags); | 528 | return rw_is_sync(rq->cmd_flags); |
528 | } | 529 | } |
529 | 530 | ||
530 | static inline int blk_queue_full(struct request_queue *q, int sync) | 531 | static inline int blk_queue_full(struct request_queue *q, int sync) |
531 | { | 532 | { |
532 | if (sync) | 533 | if (sync) |
533 | return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags); | 534 | return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags); |
534 | return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags); | 535 | return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags); |
535 | } | 536 | } |
536 | 537 | ||
537 | static inline void blk_set_queue_full(struct request_queue *q, int sync) | 538 | static inline void blk_set_queue_full(struct request_queue *q, int sync) |
538 | { | 539 | { |
539 | if (sync) | 540 | if (sync) |
540 | queue_flag_set(QUEUE_FLAG_SYNCFULL, q); | 541 | queue_flag_set(QUEUE_FLAG_SYNCFULL, q); |
541 | else | 542 | else |
542 | queue_flag_set(QUEUE_FLAG_ASYNCFULL, q); | 543 | queue_flag_set(QUEUE_FLAG_ASYNCFULL, q); |
543 | } | 544 | } |
544 | 545 | ||
545 | static inline void blk_clear_queue_full(struct request_queue *q, int sync) | 546 | static inline void blk_clear_queue_full(struct request_queue *q, int sync) |
546 | { | 547 | { |
547 | if (sync) | 548 | if (sync) |
548 | queue_flag_clear(QUEUE_FLAG_SYNCFULL, q); | 549 | queue_flag_clear(QUEUE_FLAG_SYNCFULL, q); |
549 | else | 550 | else |
550 | queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q); | 551 | queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q); |
551 | } | 552 | } |
552 | 553 | ||
553 | 554 | ||
554 | /* | 555 | /* |
555 | * mergeable request must not have _NOMERGE or _BARRIER bit set, nor may | 556 | * mergeable request must not have _NOMERGE or _BARRIER bit set, nor may |
556 | * it already be started by driver. | 557 | * it already be started by driver. |
557 | */ | 558 | */ |
558 | #define RQ_NOMERGE_FLAGS \ | 559 | #define RQ_NOMERGE_FLAGS \ |
559 | (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA) | 560 | (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA) |
560 | #define rq_mergeable(rq) \ | 561 | #define rq_mergeable(rq) \ |
561 | (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ | 562 | (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ |
562 | (((rq)->cmd_flags & REQ_DISCARD) || \ | 563 | (((rq)->cmd_flags & REQ_DISCARD) || \ |
563 | (rq)->cmd_type == REQ_TYPE_FS)) | 564 | (rq)->cmd_type == REQ_TYPE_FS)) |
564 | 565 | ||
565 | /* | 566 | /* |
566 | * q->prep_rq_fn return values | 567 | * q->prep_rq_fn return values |
567 | */ | 568 | */ |
568 | #define BLKPREP_OK 0 /* serve it */ | 569 | #define BLKPREP_OK 0 /* serve it */ |
569 | #define BLKPREP_KILL 1 /* fatal error, kill */ | 570 | #define BLKPREP_KILL 1 /* fatal error, kill */ |
570 | #define BLKPREP_DEFER 2 /* leave on queue */ | 571 | #define BLKPREP_DEFER 2 /* leave on queue */ |
571 | 572 | ||
572 | extern unsigned long blk_max_low_pfn, blk_max_pfn; | 573 | extern unsigned long blk_max_low_pfn, blk_max_pfn; |
573 | 574 | ||
574 | /* | 575 | /* |
575 | * standard bounce addresses: | 576 | * standard bounce addresses: |
576 | * | 577 | * |
577 | * BLK_BOUNCE_HIGH : bounce all highmem pages | 578 | * BLK_BOUNCE_HIGH : bounce all highmem pages |
578 | * BLK_BOUNCE_ANY : don't bounce anything | 579 | * BLK_BOUNCE_ANY : don't bounce anything |
579 | * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary | 580 | * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary |
580 | */ | 581 | */ |
581 | 582 | ||
582 | #if BITS_PER_LONG == 32 | 583 | #if BITS_PER_LONG == 32 |
583 | #define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT) | 584 | #define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT) |
584 | #else | 585 | #else |
585 | #define BLK_BOUNCE_HIGH -1ULL | 586 | #define BLK_BOUNCE_HIGH -1ULL |
586 | #endif | 587 | #endif |
587 | #define BLK_BOUNCE_ANY (-1ULL) | 588 | #define BLK_BOUNCE_ANY (-1ULL) |
588 | #define BLK_BOUNCE_ISA (DMA_BIT_MASK(24)) | 589 | #define BLK_BOUNCE_ISA (DMA_BIT_MASK(24)) |
589 | 590 | ||
590 | /* | 591 | /* |
591 | * default timeout for SG_IO if none specified | 592 | * default timeout for SG_IO if none specified |
592 | */ | 593 | */ |
593 | #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) | 594 | #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) |
594 | #define BLK_MIN_SG_TIMEOUT (7 * HZ) | 595 | #define BLK_MIN_SG_TIMEOUT (7 * HZ) |
595 | 596 | ||
596 | #ifdef CONFIG_BOUNCE | 597 | #ifdef CONFIG_BOUNCE |
597 | extern int init_emergency_isa_pool(void); | 598 | extern int init_emergency_isa_pool(void); |
598 | extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); | 599 | extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); |
599 | #else | 600 | #else |
600 | static inline int init_emergency_isa_pool(void) | 601 | static inline int init_emergency_isa_pool(void) |
601 | { | 602 | { |
602 | return 0; | 603 | return 0; |
603 | } | 604 | } |
604 | static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) | 605 | static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) |
605 | { | 606 | { |
606 | } | 607 | } |
607 | #endif /* CONFIG_MMU */ | 608 | #endif /* CONFIG_MMU */ |
608 | 609 | ||
609 | struct rq_map_data { | 610 | struct rq_map_data { |
610 | struct page **pages; | 611 | struct page **pages; |
611 | int page_order; | 612 | int page_order; |
612 | int nr_entries; | 613 | int nr_entries; |
613 | unsigned long offset; | 614 | unsigned long offset; |
614 | int null_mapped; | 615 | int null_mapped; |
615 | int from_user; | 616 | int from_user; |
616 | }; | 617 | }; |
617 | 618 | ||
618 | struct req_iterator { | 619 | struct req_iterator { |
619 | int i; | 620 | int i; |
620 | struct bio *bio; | 621 | struct bio *bio; |
621 | }; | 622 | }; |
622 | 623 | ||
623 | /* This should not be used directly - use rq_for_each_segment */ | 624 | /* This should not be used directly - use rq_for_each_segment */ |
624 | #define for_each_bio(_bio) \ | 625 | #define for_each_bio(_bio) \ |
625 | for (; _bio; _bio = _bio->bi_next) | 626 | for (; _bio; _bio = _bio->bi_next) |
626 | #define __rq_for_each_bio(_bio, rq) \ | 627 | #define __rq_for_each_bio(_bio, rq) \ |
627 | if ((rq->bio)) \ | 628 | if ((rq->bio)) \ |
628 | for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) | 629 | for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) |
629 | 630 | ||
630 | #define rq_for_each_segment(bvl, _rq, _iter) \ | 631 | #define rq_for_each_segment(bvl, _rq, _iter) \ |
631 | __rq_for_each_bio(_iter.bio, _rq) \ | 632 | __rq_for_each_bio(_iter.bio, _rq) \ |
632 | bio_for_each_segment(bvl, _iter.bio, _iter.i) | 633 | bio_for_each_segment(bvl, _iter.bio, _iter.i) |
633 | 634 | ||
634 | #define rq_iter_last(rq, _iter) \ | 635 | #define rq_iter_last(rq, _iter) \ |
635 | (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1) | 636 | (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1) |
636 | 637 | ||
637 | #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE | 638 | #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE |
638 | # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" | 639 | # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" |
639 | #endif | 640 | #endif |
640 | #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE | 641 | #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE |
641 | extern void rq_flush_dcache_pages(struct request *rq); | 642 | extern void rq_flush_dcache_pages(struct request *rq); |
642 | #else | 643 | #else |
643 | static inline void rq_flush_dcache_pages(struct request *rq) | 644 | static inline void rq_flush_dcache_pages(struct request *rq) |
644 | { | 645 | { |
645 | } | 646 | } |
646 | #endif | 647 | #endif |
647 | 648 | ||
648 | extern int blk_register_queue(struct gendisk *disk); | 649 | extern int blk_register_queue(struct gendisk *disk); |
649 | extern void blk_unregister_queue(struct gendisk *disk); | 650 | extern void blk_unregister_queue(struct gendisk *disk); |
650 | extern void generic_make_request(struct bio *bio); | 651 | extern void generic_make_request(struct bio *bio); |
651 | extern void blk_rq_init(struct request_queue *q, struct request *rq); | 652 | extern void blk_rq_init(struct request_queue *q, struct request *rq); |
652 | extern void blk_put_request(struct request *); | 653 | extern void blk_put_request(struct request *); |
653 | extern void __blk_put_request(struct request_queue *, struct request *); | 654 | extern void __blk_put_request(struct request_queue *, struct request *); |
654 | extern struct request *blk_get_request(struct request_queue *, int, gfp_t); | 655 | extern struct request *blk_get_request(struct request_queue *, int, gfp_t); |
655 | extern struct request *blk_make_request(struct request_queue *, struct bio *, | 656 | extern struct request *blk_make_request(struct request_queue *, struct bio *, |
656 | gfp_t); | 657 | gfp_t); |
657 | extern void blk_insert_request(struct request_queue *, struct request *, int, void *); | 658 | extern void blk_insert_request(struct request_queue *, struct request *, int, void *); |
658 | extern void blk_requeue_request(struct request_queue *, struct request *); | 659 | extern void blk_requeue_request(struct request_queue *, struct request *); |
659 | extern void blk_add_request_payload(struct request *rq, struct page *page, | 660 | extern void blk_add_request_payload(struct request *rq, struct page *page, |
660 | unsigned int len); | 661 | unsigned int len); |
661 | extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); | 662 | extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); |
662 | extern int blk_lld_busy(struct request_queue *q); | 663 | extern int blk_lld_busy(struct request_queue *q); |
663 | extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, | 664 | extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, |
664 | struct bio_set *bs, gfp_t gfp_mask, | 665 | struct bio_set *bs, gfp_t gfp_mask, |
665 | int (*bio_ctr)(struct bio *, struct bio *, void *), | 666 | int (*bio_ctr)(struct bio *, struct bio *, void *), |
666 | void *data); | 667 | void *data); |
667 | extern void blk_rq_unprep_clone(struct request *rq); | 668 | extern void blk_rq_unprep_clone(struct request *rq); |
668 | extern int blk_insert_cloned_request(struct request_queue *q, | 669 | extern int blk_insert_cloned_request(struct request_queue *q, |
669 | struct request *rq); | 670 | struct request *rq); |
670 | extern void blk_delay_queue(struct request_queue *, unsigned long); | 671 | extern void blk_delay_queue(struct request_queue *, unsigned long); |
671 | extern void blk_recount_segments(struct request_queue *, struct bio *); | 672 | extern void blk_recount_segments(struct request_queue *, struct bio *); |
672 | extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, | 673 | extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, |
673 | unsigned int, void __user *); | 674 | unsigned int, void __user *); |
674 | extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, | 675 | extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, |
675 | struct scsi_ioctl_command __user *); | 676 | struct scsi_ioctl_command __user *); |
676 | 677 | ||
677 | /* | 678 | /* |
678 | * A queue has just exitted congestion. Note this in the global counter of | 679 | * A queue has just exitted congestion. Note this in the global counter of |
679 | * congested queues, and wake up anyone who was waiting for requests to be | 680 | * congested queues, and wake up anyone who was waiting for requests to be |
680 | * put back. | 681 | * put back. |
681 | */ | 682 | */ |
682 | static inline void blk_clear_queue_congested(struct request_queue *q, int sync) | 683 | static inline void blk_clear_queue_congested(struct request_queue *q, int sync) |
683 | { | 684 | { |
684 | clear_bdi_congested(&q->backing_dev_info, sync); | 685 | clear_bdi_congested(&q->backing_dev_info, sync); |
685 | } | 686 | } |
686 | 687 | ||
687 | /* | 688 | /* |
688 | * A queue has just entered congestion. Flag that in the queue's VM-visible | 689 | * A queue has just entered congestion. Flag that in the queue's VM-visible |
689 | * state flags and increment the global gounter of congested queues. | 690 | * state flags and increment the global gounter of congested queues. |
690 | */ | 691 | */ |
691 | static inline void blk_set_queue_congested(struct request_queue *q, int sync) | 692 | static inline void blk_set_queue_congested(struct request_queue *q, int sync) |
692 | { | 693 | { |
693 | set_bdi_congested(&q->backing_dev_info, sync); | 694 | set_bdi_congested(&q->backing_dev_info, sync); |
694 | } | 695 | } |
695 | 696 | ||
696 | extern void blk_start_queue(struct request_queue *q); | 697 | extern void blk_start_queue(struct request_queue *q); |
697 | extern void blk_stop_queue(struct request_queue *q); | 698 | extern void blk_stop_queue(struct request_queue *q); |
698 | extern void blk_sync_queue(struct request_queue *q); | 699 | extern void blk_sync_queue(struct request_queue *q); |
699 | extern void __blk_stop_queue(struct request_queue *q); | 700 | extern void __blk_stop_queue(struct request_queue *q); |
700 | extern void __blk_run_queue(struct request_queue *q); | 701 | extern void __blk_run_queue(struct request_queue *q); |
701 | extern void blk_run_queue(struct request_queue *); | 702 | extern void blk_run_queue(struct request_queue *); |
702 | extern int blk_rq_map_user(struct request_queue *, struct request *, | 703 | extern int blk_rq_map_user(struct request_queue *, struct request *, |
703 | struct rq_map_data *, void __user *, unsigned long, | 704 | struct rq_map_data *, void __user *, unsigned long, |
704 | gfp_t); | 705 | gfp_t); |
705 | extern int blk_rq_unmap_user(struct bio *); | 706 | extern int blk_rq_unmap_user(struct bio *); |
706 | extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); | 707 | extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); |
707 | extern int blk_rq_map_user_iov(struct request_queue *, struct request *, | 708 | extern int blk_rq_map_user_iov(struct request_queue *, struct request *, |
708 | struct rq_map_data *, struct sg_iovec *, int, | 709 | struct rq_map_data *, struct sg_iovec *, int, |
709 | unsigned int, gfp_t); | 710 | unsigned int, gfp_t); |
710 | extern int blk_execute_rq(struct request_queue *, struct gendisk *, | 711 | extern int blk_execute_rq(struct request_queue *, struct gendisk *, |
711 | struct request *, int); | 712 | struct request *, int); |
712 | extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, | 713 | extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, |
713 | struct request *, int, rq_end_io_fn *); | 714 | struct request *, int, rq_end_io_fn *); |
714 | 715 | ||
715 | static inline struct request_queue *bdev_get_queue(struct block_device *bdev) | 716 | static inline struct request_queue *bdev_get_queue(struct block_device *bdev) |
716 | { | 717 | { |
717 | return bdev->bd_disk->queue; | 718 | return bdev->bd_disk->queue; |
718 | } | 719 | } |
719 | 720 | ||
720 | /* | 721 | /* |
721 | * blk_rq_pos() : the current sector | 722 | * blk_rq_pos() : the current sector |
722 | * blk_rq_bytes() : bytes left in the entire request | 723 | * blk_rq_bytes() : bytes left in the entire request |
723 | * blk_rq_cur_bytes() : bytes left in the current segment | 724 | * blk_rq_cur_bytes() : bytes left in the current segment |
724 | * blk_rq_err_bytes() : bytes left till the next error boundary | 725 | * blk_rq_err_bytes() : bytes left till the next error boundary |
725 | * blk_rq_sectors() : sectors left in the entire request | 726 | * blk_rq_sectors() : sectors left in the entire request |
726 | * blk_rq_cur_sectors() : sectors left in the current segment | 727 | * blk_rq_cur_sectors() : sectors left in the current segment |
727 | */ | 728 | */ |
728 | static inline sector_t blk_rq_pos(const struct request *rq) | 729 | static inline sector_t blk_rq_pos(const struct request *rq) |
729 | { | 730 | { |
730 | return rq->__sector; | 731 | return rq->__sector; |
731 | } | 732 | } |
732 | 733 | ||
733 | static inline unsigned int blk_rq_bytes(const struct request *rq) | 734 | static inline unsigned int blk_rq_bytes(const struct request *rq) |
734 | { | 735 | { |
735 | return rq->__data_len; | 736 | return rq->__data_len; |
736 | } | 737 | } |
737 | 738 | ||
738 | static inline int blk_rq_cur_bytes(const struct request *rq) | 739 | static inline int blk_rq_cur_bytes(const struct request *rq) |
739 | { | 740 | { |
740 | return rq->bio ? bio_cur_bytes(rq->bio) : 0; | 741 | return rq->bio ? bio_cur_bytes(rq->bio) : 0; |
741 | } | 742 | } |
742 | 743 | ||
743 | extern unsigned int blk_rq_err_bytes(const struct request *rq); | 744 | extern unsigned int blk_rq_err_bytes(const struct request *rq); |
744 | 745 | ||
745 | static inline unsigned int blk_rq_sectors(const struct request *rq) | 746 | static inline unsigned int blk_rq_sectors(const struct request *rq) |
746 | { | 747 | { |
747 | return blk_rq_bytes(rq) >> 9; | 748 | return blk_rq_bytes(rq) >> 9; |
748 | } | 749 | } |
749 | 750 | ||
750 | static inline unsigned int blk_rq_cur_sectors(const struct request *rq) | 751 | static inline unsigned int blk_rq_cur_sectors(const struct request *rq) |
751 | { | 752 | { |
752 | return blk_rq_cur_bytes(rq) >> 9; | 753 | return blk_rq_cur_bytes(rq) >> 9; |
753 | } | 754 | } |
754 | 755 | ||
755 | /* | 756 | /* |
756 | * Request issue related functions. | 757 | * Request issue related functions. |
757 | */ | 758 | */ |
758 | extern struct request *blk_peek_request(struct request_queue *q); | 759 | extern struct request *blk_peek_request(struct request_queue *q); |
759 | extern void blk_start_request(struct request *rq); | 760 | extern void blk_start_request(struct request *rq); |
760 | extern struct request *blk_fetch_request(struct request_queue *q); | 761 | extern struct request *blk_fetch_request(struct request_queue *q); |
761 | 762 | ||
762 | /* | 763 | /* |
763 | * Request completion related functions. | 764 | * Request completion related functions. |
764 | * | 765 | * |
765 | * blk_update_request() completes given number of bytes and updates | 766 | * blk_update_request() completes given number of bytes and updates |
766 | * the request without completing it. | 767 | * the request without completing it. |
767 | * | 768 | * |
768 | * blk_end_request() and friends. __blk_end_request() must be called | 769 | * blk_end_request() and friends. __blk_end_request() must be called |
769 | * with the request queue spinlock acquired. | 770 | * with the request queue spinlock acquired. |
770 | * | 771 | * |
771 | * Several drivers define their own end_request and call | 772 | * Several drivers define their own end_request and call |
772 | * blk_end_request() for parts of the original function. | 773 | * blk_end_request() for parts of the original function. |
773 | * This prevents code duplication in drivers. | 774 | * This prevents code duplication in drivers. |
774 | */ | 775 | */ |
775 | extern bool blk_update_request(struct request *rq, int error, | 776 | extern bool blk_update_request(struct request *rq, int error, |
776 | unsigned int nr_bytes); | 777 | unsigned int nr_bytes); |
777 | extern bool blk_end_request(struct request *rq, int error, | 778 | extern bool blk_end_request(struct request *rq, int error, |
778 | unsigned int nr_bytes); | 779 | unsigned int nr_bytes); |
779 | extern void blk_end_request_all(struct request *rq, int error); | 780 | extern void blk_end_request_all(struct request *rq, int error); |
780 | extern bool blk_end_request_cur(struct request *rq, int error); | 781 | extern bool blk_end_request_cur(struct request *rq, int error); |
781 | extern bool blk_end_request_err(struct request *rq, int error); | 782 | extern bool blk_end_request_err(struct request *rq, int error); |
782 | extern bool __blk_end_request(struct request *rq, int error, | 783 | extern bool __blk_end_request(struct request *rq, int error, |
783 | unsigned int nr_bytes); | 784 | unsigned int nr_bytes); |
784 | extern void __blk_end_request_all(struct request *rq, int error); | 785 | extern void __blk_end_request_all(struct request *rq, int error); |
785 | extern bool __blk_end_request_cur(struct request *rq, int error); | 786 | extern bool __blk_end_request_cur(struct request *rq, int error); |
786 | extern bool __blk_end_request_err(struct request *rq, int error); | 787 | extern bool __blk_end_request_err(struct request *rq, int error); |
787 | 788 | ||
788 | extern void blk_complete_request(struct request *); | 789 | extern void blk_complete_request(struct request *); |
789 | extern void __blk_complete_request(struct request *); | 790 | extern void __blk_complete_request(struct request *); |
790 | extern void blk_abort_request(struct request *); | 791 | extern void blk_abort_request(struct request *); |
791 | extern void blk_abort_queue(struct request_queue *); | 792 | extern void blk_abort_queue(struct request_queue *); |
792 | extern void blk_unprep_request(struct request *); | 793 | extern void blk_unprep_request(struct request *); |
793 | 794 | ||
794 | /* | 795 | /* |
795 | * Access functions for manipulating queue properties | 796 | * Access functions for manipulating queue properties |
796 | */ | 797 | */ |
797 | extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, | 798 | extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, |
798 | spinlock_t *lock, int node_id); | 799 | spinlock_t *lock, int node_id); |
799 | extern struct request_queue *blk_init_allocated_queue_node(struct request_queue *, | 800 | extern struct request_queue *blk_init_allocated_queue_node(struct request_queue *, |
800 | request_fn_proc *, | 801 | request_fn_proc *, |
801 | spinlock_t *, int node_id); | 802 | spinlock_t *, int node_id); |
802 | extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); | 803 | extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); |
803 | extern struct request_queue *blk_init_allocated_queue(struct request_queue *, | 804 | extern struct request_queue *blk_init_allocated_queue(struct request_queue *, |
804 | request_fn_proc *, spinlock_t *); | 805 | request_fn_proc *, spinlock_t *); |
805 | extern void blk_cleanup_queue(struct request_queue *); | 806 | extern void blk_cleanup_queue(struct request_queue *); |
806 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); | 807 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); |
807 | extern void blk_queue_bounce_limit(struct request_queue *, u64); | 808 | extern void blk_queue_bounce_limit(struct request_queue *, u64); |
808 | extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int); | 809 | extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int); |
809 | extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); | 810 | extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); |
810 | extern void blk_queue_max_segments(struct request_queue *, unsigned short); | 811 | extern void blk_queue_max_segments(struct request_queue *, unsigned short); |
811 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); | 812 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); |
812 | extern void blk_queue_max_discard_sectors(struct request_queue *q, | 813 | extern void blk_queue_max_discard_sectors(struct request_queue *q, |
813 | unsigned int max_discard_sectors); | 814 | unsigned int max_discard_sectors); |
814 | extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); | 815 | extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); |
815 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); | 816 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); |
816 | extern void blk_queue_alignment_offset(struct request_queue *q, | 817 | extern void blk_queue_alignment_offset(struct request_queue *q, |
817 | unsigned int alignment); | 818 | unsigned int alignment); |
818 | extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); | 819 | extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); |
819 | extern void blk_queue_io_min(struct request_queue *q, unsigned int min); | 820 | extern void blk_queue_io_min(struct request_queue *q, unsigned int min); |
820 | extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); | 821 | extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); |
821 | extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); | 822 | extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); |
822 | extern void blk_set_default_limits(struct queue_limits *lim); | 823 | extern void blk_set_default_limits(struct queue_limits *lim); |
823 | extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | 824 | extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, |
824 | sector_t offset); | 825 | sector_t offset); |
825 | extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, | 826 | extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, |
826 | sector_t offset); | 827 | sector_t offset); |
827 | extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, | 828 | extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, |
828 | sector_t offset); | 829 | sector_t offset); |
829 | extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); | 830 | extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); |
830 | extern void blk_queue_dma_pad(struct request_queue *, unsigned int); | 831 | extern void blk_queue_dma_pad(struct request_queue *, unsigned int); |
831 | extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); | 832 | extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); |
832 | extern int blk_queue_dma_drain(struct request_queue *q, | 833 | extern int blk_queue_dma_drain(struct request_queue *q, |
833 | dma_drain_needed_fn *dma_drain_needed, | 834 | dma_drain_needed_fn *dma_drain_needed, |
834 | void *buf, unsigned int size); | 835 | void *buf, unsigned int size); |
835 | extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); | 836 | extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); |
836 | extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); | 837 | extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); |
837 | extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); | 838 | extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); |
838 | extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn); | 839 | extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn); |
839 | extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); | 840 | extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); |
840 | extern void blk_queue_dma_alignment(struct request_queue *, int); | 841 | extern void blk_queue_dma_alignment(struct request_queue *, int); |
841 | extern void blk_queue_update_dma_alignment(struct request_queue *, int); | 842 | extern void blk_queue_update_dma_alignment(struct request_queue *, int); |
842 | extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); | 843 | extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); |
843 | extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); | 844 | extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); |
844 | extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); | 845 | extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); |
845 | extern void blk_queue_flush(struct request_queue *q, unsigned int flush); | 846 | extern void blk_queue_flush(struct request_queue *q, unsigned int flush); |
847 | extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); | ||
846 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); | 848 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); |
847 | 849 | ||
848 | extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); | 850 | extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); |
849 | extern void blk_dump_rq_flags(struct request *, char *); | 851 | extern void blk_dump_rq_flags(struct request *, char *); |
850 | extern long nr_blockdev_pages(void); | 852 | extern long nr_blockdev_pages(void); |
851 | 853 | ||
852 | int blk_get_queue(struct request_queue *); | 854 | int blk_get_queue(struct request_queue *); |
853 | struct request_queue *blk_alloc_queue(gfp_t); | 855 | struct request_queue *blk_alloc_queue(gfp_t); |
854 | struct request_queue *blk_alloc_queue_node(gfp_t, int); | 856 | struct request_queue *blk_alloc_queue_node(gfp_t, int); |
855 | extern void blk_put_queue(struct request_queue *); | 857 | extern void blk_put_queue(struct request_queue *); |
856 | 858 | ||
857 | struct blk_plug { | 859 | struct blk_plug { |
858 | unsigned long magic; | 860 | unsigned long magic; |
859 | struct list_head list; | 861 | struct list_head list; |
860 | struct list_head cb_list; | 862 | struct list_head cb_list; |
861 | unsigned int should_sort; | 863 | unsigned int should_sort; |
862 | }; | 864 | }; |
863 | struct blk_plug_cb { | 865 | struct blk_plug_cb { |
864 | struct list_head list; | 866 | struct list_head list; |
865 | void (*callback)(struct blk_plug_cb *); | 867 | void (*callback)(struct blk_plug_cb *); |
866 | }; | 868 | }; |
867 | 869 | ||
868 | extern void blk_start_plug(struct blk_plug *); | 870 | extern void blk_start_plug(struct blk_plug *); |
869 | extern void blk_finish_plug(struct blk_plug *); | 871 | extern void blk_finish_plug(struct blk_plug *); |
870 | extern void blk_flush_plug_list(struct blk_plug *, bool); | 872 | extern void blk_flush_plug_list(struct blk_plug *, bool); |
871 | 873 | ||
872 | static inline void blk_flush_plug(struct task_struct *tsk) | 874 | static inline void blk_flush_plug(struct task_struct *tsk) |
873 | { | 875 | { |
874 | struct blk_plug *plug = tsk->plug; | 876 | struct blk_plug *plug = tsk->plug; |
875 | 877 | ||
876 | if (plug) | 878 | if (plug) |
877 | blk_flush_plug_list(plug, false); | 879 | blk_flush_plug_list(plug, false); |
878 | } | 880 | } |
879 | 881 | ||
880 | static inline void blk_schedule_flush_plug(struct task_struct *tsk) | 882 | static inline void blk_schedule_flush_plug(struct task_struct *tsk) |
881 | { | 883 | { |
882 | struct blk_plug *plug = tsk->plug; | 884 | struct blk_plug *plug = tsk->plug; |
883 | 885 | ||
884 | if (plug) | 886 | if (plug) |
885 | blk_flush_plug_list(plug, true); | 887 | blk_flush_plug_list(plug, true); |
886 | } | 888 | } |
887 | 889 | ||
888 | static inline bool blk_needs_flush_plug(struct task_struct *tsk) | 890 | static inline bool blk_needs_flush_plug(struct task_struct *tsk) |
889 | { | 891 | { |
890 | struct blk_plug *plug = tsk->plug; | 892 | struct blk_plug *plug = tsk->plug; |
891 | 893 | ||
892 | return plug && (!list_empty(&plug->list) || !list_empty(&plug->cb_list)); | 894 | return plug && (!list_empty(&plug->list) || !list_empty(&plug->cb_list)); |
893 | } | 895 | } |
894 | 896 | ||
895 | /* | 897 | /* |
896 | * tag stuff | 898 | * tag stuff |
897 | */ | 899 | */ |
898 | #define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) | 900 | #define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) |
899 | extern int blk_queue_start_tag(struct request_queue *, struct request *); | 901 | extern int blk_queue_start_tag(struct request_queue *, struct request *); |
900 | extern struct request *blk_queue_find_tag(struct request_queue *, int); | 902 | extern struct request *blk_queue_find_tag(struct request_queue *, int); |
901 | extern void blk_queue_end_tag(struct request_queue *, struct request *); | 903 | extern void blk_queue_end_tag(struct request_queue *, struct request *); |
902 | extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *); | 904 | extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *); |
903 | extern void blk_queue_free_tags(struct request_queue *); | 905 | extern void blk_queue_free_tags(struct request_queue *); |
904 | extern int blk_queue_resize_tags(struct request_queue *, int); | 906 | extern int blk_queue_resize_tags(struct request_queue *, int); |
905 | extern void blk_queue_invalidate_tags(struct request_queue *); | 907 | extern void blk_queue_invalidate_tags(struct request_queue *); |
906 | extern struct blk_queue_tag *blk_init_tags(int); | 908 | extern struct blk_queue_tag *blk_init_tags(int); |
907 | extern void blk_free_tags(struct blk_queue_tag *); | 909 | extern void blk_free_tags(struct blk_queue_tag *); |
908 | 910 | ||
909 | static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, | 911 | static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, |
910 | int tag) | 912 | int tag) |
911 | { | 913 | { |
912 | if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) | 914 | if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) |
913 | return NULL; | 915 | return NULL; |
914 | return bqt->tag_index[tag]; | 916 | return bqt->tag_index[tag]; |
915 | } | 917 | } |
916 | 918 | ||
917 | #define BLKDEV_DISCARD_SECURE 0x01 /* secure discard */ | 919 | #define BLKDEV_DISCARD_SECURE 0x01 /* secure discard */ |
918 | 920 | ||
919 | extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); | 921 | extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); |
920 | extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | 922 | extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
921 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); | 923 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); |
922 | extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, | 924 | extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
923 | sector_t nr_sects, gfp_t gfp_mask); | 925 | sector_t nr_sects, gfp_t gfp_mask); |
924 | static inline int sb_issue_discard(struct super_block *sb, sector_t block, | 926 | static inline int sb_issue_discard(struct super_block *sb, sector_t block, |
925 | sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) | 927 | sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) |
926 | { | 928 | { |
927 | return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9), | 929 | return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9), |
928 | nr_blocks << (sb->s_blocksize_bits - 9), | 930 | nr_blocks << (sb->s_blocksize_bits - 9), |
929 | gfp_mask, flags); | 931 | gfp_mask, flags); |
930 | } | 932 | } |
931 | static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, | 933 | static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, |
932 | sector_t nr_blocks, gfp_t gfp_mask) | 934 | sector_t nr_blocks, gfp_t gfp_mask) |
933 | { | 935 | { |
934 | return blkdev_issue_zeroout(sb->s_bdev, | 936 | return blkdev_issue_zeroout(sb->s_bdev, |
935 | block << (sb->s_blocksize_bits - 9), | 937 | block << (sb->s_blocksize_bits - 9), |
936 | nr_blocks << (sb->s_blocksize_bits - 9), | 938 | nr_blocks << (sb->s_blocksize_bits - 9), |
937 | gfp_mask); | 939 | gfp_mask); |
938 | } | 940 | } |
939 | 941 | ||
940 | extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); | 942 | extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); |
941 | 943 | ||
942 | enum blk_default_limits { | 944 | enum blk_default_limits { |
943 | BLK_MAX_SEGMENTS = 128, | 945 | BLK_MAX_SEGMENTS = 128, |
944 | BLK_SAFE_MAX_SECTORS = 255, | 946 | BLK_SAFE_MAX_SECTORS = 255, |
945 | BLK_DEF_MAX_SECTORS = 1024, | 947 | BLK_DEF_MAX_SECTORS = 1024, |
946 | BLK_MAX_SEGMENT_SIZE = 65536, | 948 | BLK_MAX_SEGMENT_SIZE = 65536, |
947 | BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, | 949 | BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, |
948 | }; | 950 | }; |
949 | 951 | ||
950 | #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) | 952 | #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) |
951 | 953 | ||
952 | static inline unsigned long queue_bounce_pfn(struct request_queue *q) | 954 | static inline unsigned long queue_bounce_pfn(struct request_queue *q) |
953 | { | 955 | { |
954 | return q->limits.bounce_pfn; | 956 | return q->limits.bounce_pfn; |
955 | } | 957 | } |
956 | 958 | ||
957 | static inline unsigned long queue_segment_boundary(struct request_queue *q) | 959 | static inline unsigned long queue_segment_boundary(struct request_queue *q) |
958 | { | 960 | { |
959 | return q->limits.seg_boundary_mask; | 961 | return q->limits.seg_boundary_mask; |
960 | } | 962 | } |
961 | 963 | ||
962 | static inline unsigned int queue_max_sectors(struct request_queue *q) | 964 | static inline unsigned int queue_max_sectors(struct request_queue *q) |
963 | { | 965 | { |
964 | return q->limits.max_sectors; | 966 | return q->limits.max_sectors; |
965 | } | 967 | } |
966 | 968 | ||
967 | static inline unsigned int queue_max_hw_sectors(struct request_queue *q) | 969 | static inline unsigned int queue_max_hw_sectors(struct request_queue *q) |
968 | { | 970 | { |
969 | return q->limits.max_hw_sectors; | 971 | return q->limits.max_hw_sectors; |
970 | } | 972 | } |
971 | 973 | ||
972 | static inline unsigned short queue_max_segments(struct request_queue *q) | 974 | static inline unsigned short queue_max_segments(struct request_queue *q) |
973 | { | 975 | { |
974 | return q->limits.max_segments; | 976 | return q->limits.max_segments; |
975 | } | 977 | } |
976 | 978 | ||
977 | static inline unsigned int queue_max_segment_size(struct request_queue *q) | 979 | static inline unsigned int queue_max_segment_size(struct request_queue *q) |
978 | { | 980 | { |
979 | return q->limits.max_segment_size; | 981 | return q->limits.max_segment_size; |
980 | } | 982 | } |
981 | 983 | ||
982 | static inline unsigned short queue_logical_block_size(struct request_queue *q) | 984 | static inline unsigned short queue_logical_block_size(struct request_queue *q) |
983 | { | 985 | { |
984 | int retval = 512; | 986 | int retval = 512; |
985 | 987 | ||
986 | if (q && q->limits.logical_block_size) | 988 | if (q && q->limits.logical_block_size) |
987 | retval = q->limits.logical_block_size; | 989 | retval = q->limits.logical_block_size; |
988 | 990 | ||
989 | return retval; | 991 | return retval; |
990 | } | 992 | } |
991 | 993 | ||
992 | static inline unsigned short bdev_logical_block_size(struct block_device *bdev) | 994 | static inline unsigned short bdev_logical_block_size(struct block_device *bdev) |
993 | { | 995 | { |
994 | return queue_logical_block_size(bdev_get_queue(bdev)); | 996 | return queue_logical_block_size(bdev_get_queue(bdev)); |
995 | } | 997 | } |
996 | 998 | ||
997 | static inline unsigned int queue_physical_block_size(struct request_queue *q) | 999 | static inline unsigned int queue_physical_block_size(struct request_queue *q) |
998 | { | 1000 | { |
999 | return q->limits.physical_block_size; | 1001 | return q->limits.physical_block_size; |
1000 | } | 1002 | } |
1001 | 1003 | ||
1002 | static inline unsigned int bdev_physical_block_size(struct block_device *bdev) | 1004 | static inline unsigned int bdev_physical_block_size(struct block_device *bdev) |
1003 | { | 1005 | { |
1004 | return queue_physical_block_size(bdev_get_queue(bdev)); | 1006 | return queue_physical_block_size(bdev_get_queue(bdev)); |
1005 | } | 1007 | } |
1006 | 1008 | ||
1007 | static inline unsigned int queue_io_min(struct request_queue *q) | 1009 | static inline unsigned int queue_io_min(struct request_queue *q) |
1008 | { | 1010 | { |
1009 | return q->limits.io_min; | 1011 | return q->limits.io_min; |
1010 | } | 1012 | } |
1011 | 1013 | ||
1012 | static inline int bdev_io_min(struct block_device *bdev) | 1014 | static inline int bdev_io_min(struct block_device *bdev) |
1013 | { | 1015 | { |
1014 | return queue_io_min(bdev_get_queue(bdev)); | 1016 | return queue_io_min(bdev_get_queue(bdev)); |
1015 | } | 1017 | } |
1016 | 1018 | ||
1017 | static inline unsigned int queue_io_opt(struct request_queue *q) | 1019 | static inline unsigned int queue_io_opt(struct request_queue *q) |
1018 | { | 1020 | { |
1019 | return q->limits.io_opt; | 1021 | return q->limits.io_opt; |
1020 | } | 1022 | } |
1021 | 1023 | ||
1022 | static inline int bdev_io_opt(struct block_device *bdev) | 1024 | static inline int bdev_io_opt(struct block_device *bdev) |
1023 | { | 1025 | { |
1024 | return queue_io_opt(bdev_get_queue(bdev)); | 1026 | return queue_io_opt(bdev_get_queue(bdev)); |
1025 | } | 1027 | } |
1026 | 1028 | ||
1027 | static inline int queue_alignment_offset(struct request_queue *q) | 1029 | static inline int queue_alignment_offset(struct request_queue *q) |
1028 | { | 1030 | { |
1029 | if (q->limits.misaligned) | 1031 | if (q->limits.misaligned) |
1030 | return -1; | 1032 | return -1; |
1031 | 1033 | ||
1032 | return q->limits.alignment_offset; | 1034 | return q->limits.alignment_offset; |
1033 | } | 1035 | } |
1034 | 1036 | ||
1035 | static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) | 1037 | static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) |
1036 | { | 1038 | { |
1037 | unsigned int granularity = max(lim->physical_block_size, lim->io_min); | 1039 | unsigned int granularity = max(lim->physical_block_size, lim->io_min); |
1038 | unsigned int alignment = (sector << 9) & (granularity - 1); | 1040 | unsigned int alignment = (sector << 9) & (granularity - 1); |
1039 | 1041 | ||
1040 | return (granularity + lim->alignment_offset - alignment) | 1042 | return (granularity + lim->alignment_offset - alignment) |
1041 | & (granularity - 1); | 1043 | & (granularity - 1); |
1042 | } | 1044 | } |
1043 | 1045 | ||
1044 | static inline int bdev_alignment_offset(struct block_device *bdev) | 1046 | static inline int bdev_alignment_offset(struct block_device *bdev) |
1045 | { | 1047 | { |
1046 | struct request_queue *q = bdev_get_queue(bdev); | 1048 | struct request_queue *q = bdev_get_queue(bdev); |
1047 | 1049 | ||
1048 | if (q->limits.misaligned) | 1050 | if (q->limits.misaligned) |
1049 | return -1; | 1051 | return -1; |
1050 | 1052 | ||
1051 | if (bdev != bdev->bd_contains) | 1053 | if (bdev != bdev->bd_contains) |
1052 | return bdev->bd_part->alignment_offset; | 1054 | return bdev->bd_part->alignment_offset; |
1053 | 1055 | ||
1054 | return q->limits.alignment_offset; | 1056 | return q->limits.alignment_offset; |
1055 | } | 1057 | } |
1056 | 1058 | ||
1057 | static inline int queue_discard_alignment(struct request_queue *q) | 1059 | static inline int queue_discard_alignment(struct request_queue *q) |
1058 | { | 1060 | { |
1059 | if (q->limits.discard_misaligned) | 1061 | if (q->limits.discard_misaligned) |
1060 | return -1; | 1062 | return -1; |
1061 | 1063 | ||
1062 | return q->limits.discard_alignment; | 1064 | return q->limits.discard_alignment; |
1063 | } | 1065 | } |
1064 | 1066 | ||
1065 | static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) | 1067 | static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) |
1066 | { | 1068 | { |
1067 | unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1); | 1069 | unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1); |
1068 | 1070 | ||
1069 | return (lim->discard_granularity + lim->discard_alignment - alignment) | 1071 | return (lim->discard_granularity + lim->discard_alignment - alignment) |
1070 | & (lim->discard_granularity - 1); | 1072 | & (lim->discard_granularity - 1); |
1071 | } | 1073 | } |
1072 | 1074 | ||
1073 | static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) | 1075 | static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) |
1074 | { | 1076 | { |
1075 | if (q->limits.discard_zeroes_data == 1) | 1077 | if (q->limits.discard_zeroes_data == 1) |
1076 | return 1; | 1078 | return 1; |
1077 | 1079 | ||
1078 | return 0; | 1080 | return 0; |
1079 | } | 1081 | } |
1080 | 1082 | ||
1081 | static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev) | 1083 | static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev) |
1082 | { | 1084 | { |
1083 | return queue_discard_zeroes_data(bdev_get_queue(bdev)); | 1085 | return queue_discard_zeroes_data(bdev_get_queue(bdev)); |
1084 | } | 1086 | } |
1085 | 1087 | ||
1086 | static inline int queue_dma_alignment(struct request_queue *q) | 1088 | static inline int queue_dma_alignment(struct request_queue *q) |
1087 | { | 1089 | { |
1088 | return q ? q->dma_alignment : 511; | 1090 | return q ? q->dma_alignment : 511; |
1089 | } | 1091 | } |
1090 | 1092 | ||
1091 | static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, | 1093 | static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, |
1092 | unsigned int len) | 1094 | unsigned int len) |
1093 | { | 1095 | { |
1094 | unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; | 1096 | unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; |
1095 | return !(addr & alignment) && !(len & alignment); | 1097 | return !(addr & alignment) && !(len & alignment); |
1096 | } | 1098 | } |
1097 | 1099 | ||
1098 | /* assumes size > 256 */ | 1100 | /* assumes size > 256 */ |
1099 | static inline unsigned int blksize_bits(unsigned int size) | 1101 | static inline unsigned int blksize_bits(unsigned int size) |
1100 | { | 1102 | { |
1101 | unsigned int bits = 8; | 1103 | unsigned int bits = 8; |
1102 | do { | 1104 | do { |
1103 | bits++; | 1105 | bits++; |
1104 | size >>= 1; | 1106 | size >>= 1; |
1105 | } while (size > 256); | 1107 | } while (size > 256); |
1106 | return bits; | 1108 | return bits; |
1107 | } | 1109 | } |
1108 | 1110 | ||
1109 | static inline unsigned int block_size(struct block_device *bdev) | 1111 | static inline unsigned int block_size(struct block_device *bdev) |
1110 | { | 1112 | { |
1111 | return bdev->bd_block_size; | 1113 | return bdev->bd_block_size; |
1114 | } | ||
1115 | |||
1116 | static inline bool queue_flush_queueable(struct request_queue *q) | ||
1117 | { | ||
1118 | return !q->flush_not_queueable; | ||
1112 | } | 1119 | } |
1113 | 1120 | ||
1114 | typedef struct {struct page *v;} Sector; | 1121 | typedef struct {struct page *v;} Sector; |
1115 | 1122 | ||
1116 | unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); | 1123 | unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); |
1117 | 1124 | ||
1118 | static inline void put_dev_sector(Sector p) | 1125 | static inline void put_dev_sector(Sector p) |
1119 | { | 1126 | { |
1120 | page_cache_release(p.v); | 1127 | page_cache_release(p.v); |
1121 | } | 1128 | } |
1122 | 1129 | ||
1123 | struct work_struct; | 1130 | struct work_struct; |
1124 | int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); | 1131 | int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); |
1125 | 1132 | ||
1126 | #ifdef CONFIG_BLK_CGROUP | 1133 | #ifdef CONFIG_BLK_CGROUP |
1127 | /* | 1134 | /* |
1128 | * This should not be using sched_clock(). A real patch is in progress | 1135 | * This should not be using sched_clock(). A real patch is in progress |
1129 | * to fix this up, until that is in place we need to disable preemption | 1136 | * to fix this up, until that is in place we need to disable preemption |
1130 | * around sched_clock() in this function and set_io_start_time_ns(). | 1137 | * around sched_clock() in this function and set_io_start_time_ns(). |
1131 | */ | 1138 | */ |
1132 | static inline void set_start_time_ns(struct request *req) | 1139 | static inline void set_start_time_ns(struct request *req) |
1133 | { | 1140 | { |
1134 | preempt_disable(); | 1141 | preempt_disable(); |
1135 | req->start_time_ns = sched_clock(); | 1142 | req->start_time_ns = sched_clock(); |
1136 | preempt_enable(); | 1143 | preempt_enable(); |
1137 | } | 1144 | } |
1138 | 1145 | ||
1139 | static inline void set_io_start_time_ns(struct request *req) | 1146 | static inline void set_io_start_time_ns(struct request *req) |
1140 | { | 1147 | { |
1141 | preempt_disable(); | 1148 | preempt_disable(); |
1142 | req->io_start_time_ns = sched_clock(); | 1149 | req->io_start_time_ns = sched_clock(); |
1143 | preempt_enable(); | 1150 | preempt_enable(); |
1144 | } | 1151 | } |
1145 | 1152 | ||
1146 | static inline uint64_t rq_start_time_ns(struct request *req) | 1153 | static inline uint64_t rq_start_time_ns(struct request *req) |
1147 | { | 1154 | { |
1148 | return req->start_time_ns; | 1155 | return req->start_time_ns; |
1149 | } | 1156 | } |
1150 | 1157 | ||
1151 | static inline uint64_t rq_io_start_time_ns(struct request *req) | 1158 | static inline uint64_t rq_io_start_time_ns(struct request *req) |
1152 | { | 1159 | { |
1153 | return req->io_start_time_ns; | 1160 | return req->io_start_time_ns; |
1154 | } | 1161 | } |
1155 | #else | 1162 | #else |
1156 | static inline void set_start_time_ns(struct request *req) {} | 1163 | static inline void set_start_time_ns(struct request *req) {} |
1157 | static inline void set_io_start_time_ns(struct request *req) {} | 1164 | static inline void set_io_start_time_ns(struct request *req) {} |
1158 | static inline uint64_t rq_start_time_ns(struct request *req) | 1165 | static inline uint64_t rq_start_time_ns(struct request *req) |
1159 | { | 1166 | { |
1160 | return 0; | 1167 | return 0; |
1161 | } | 1168 | } |
1162 | static inline uint64_t rq_io_start_time_ns(struct request *req) | 1169 | static inline uint64_t rq_io_start_time_ns(struct request *req) |
1163 | { | 1170 | { |
1164 | return 0; | 1171 | return 0; |
1165 | } | 1172 | } |
1166 | #endif | 1173 | #endif |
1167 | 1174 | ||
1168 | #ifdef CONFIG_BLK_DEV_THROTTLING | 1175 | #ifdef CONFIG_BLK_DEV_THROTTLING |
1169 | extern int blk_throtl_init(struct request_queue *q); | 1176 | extern int blk_throtl_init(struct request_queue *q); |
1170 | extern void blk_throtl_exit(struct request_queue *q); | 1177 | extern void blk_throtl_exit(struct request_queue *q); |
1171 | extern int blk_throtl_bio(struct request_queue *q, struct bio **bio); | 1178 | extern int blk_throtl_bio(struct request_queue *q, struct bio **bio); |
1172 | #else /* CONFIG_BLK_DEV_THROTTLING */ | 1179 | #else /* CONFIG_BLK_DEV_THROTTLING */ |
1173 | static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio) | 1180 | static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio) |
1174 | { | 1181 | { |
1175 | return 0; | 1182 | return 0; |
1176 | } | 1183 | } |
1177 | 1184 | ||
1178 | static inline int blk_throtl_init(struct request_queue *q) { return 0; } | 1185 | static inline int blk_throtl_init(struct request_queue *q) { return 0; } |
1179 | static inline int blk_throtl_exit(struct request_queue *q) { return 0; } | 1186 | static inline int blk_throtl_exit(struct request_queue *q) { return 0; } |
1180 | #endif /* CONFIG_BLK_DEV_THROTTLING */ | 1187 | #endif /* CONFIG_BLK_DEV_THROTTLING */ |
1181 | 1188 | ||
1182 | #define MODULE_ALIAS_BLOCKDEV(major,minor) \ | 1189 | #define MODULE_ALIAS_BLOCKDEV(major,minor) \ |
1183 | MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) | 1190 | MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) |
1184 | #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ | 1191 | #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ |
1185 | MODULE_ALIAS("block-major-" __stringify(major) "-*") | 1192 | MODULE_ALIAS("block-major-" __stringify(major) "-*") |
1186 | 1193 | ||
1187 | #if defined(CONFIG_BLK_DEV_INTEGRITY) | 1194 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
1188 | 1195 | ||
1189 | #define INTEGRITY_FLAG_READ 2 /* verify data integrity on read */ | 1196 | #define INTEGRITY_FLAG_READ 2 /* verify data integrity on read */ |
1190 | #define INTEGRITY_FLAG_WRITE 4 /* generate data integrity on write */ | 1197 | #define INTEGRITY_FLAG_WRITE 4 /* generate data integrity on write */ |
1191 | 1198 | ||
1192 | struct blk_integrity_exchg { | 1199 | struct blk_integrity_exchg { |
1193 | void *prot_buf; | 1200 | void *prot_buf; |
1194 | void *data_buf; | 1201 | void *data_buf; |
1195 | sector_t sector; | 1202 | sector_t sector; |
1196 | unsigned int data_size; | 1203 | unsigned int data_size; |
1197 | unsigned short sector_size; | 1204 | unsigned short sector_size; |
1198 | const char *disk_name; | 1205 | const char *disk_name; |
1199 | }; | 1206 | }; |
1200 | 1207 | ||
1201 | typedef void (integrity_gen_fn) (struct blk_integrity_exchg *); | 1208 | typedef void (integrity_gen_fn) (struct blk_integrity_exchg *); |
1202 | typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *); | 1209 | typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *); |
1203 | typedef void (integrity_set_tag_fn) (void *, void *, unsigned int); | 1210 | typedef void (integrity_set_tag_fn) (void *, void *, unsigned int); |
1204 | typedef void (integrity_get_tag_fn) (void *, void *, unsigned int); | 1211 | typedef void (integrity_get_tag_fn) (void *, void *, unsigned int); |
1205 | 1212 | ||
1206 | struct blk_integrity { | 1213 | struct blk_integrity { |
1207 | integrity_gen_fn *generate_fn; | 1214 | integrity_gen_fn *generate_fn; |
1208 | integrity_vrfy_fn *verify_fn; | 1215 | integrity_vrfy_fn *verify_fn; |
1209 | integrity_set_tag_fn *set_tag_fn; | 1216 | integrity_set_tag_fn *set_tag_fn; |
1210 | integrity_get_tag_fn *get_tag_fn; | 1217 | integrity_get_tag_fn *get_tag_fn; |
1211 | 1218 | ||
1212 | unsigned short flags; | 1219 | unsigned short flags; |
1213 | unsigned short tuple_size; | 1220 | unsigned short tuple_size; |
1214 | unsigned short sector_size; | 1221 | unsigned short sector_size; |
1215 | unsigned short tag_size; | 1222 | unsigned short tag_size; |
1216 | 1223 | ||
1217 | const char *name; | 1224 | const char *name; |
1218 | 1225 | ||
1219 | struct kobject kobj; | 1226 | struct kobject kobj; |
1220 | }; | 1227 | }; |
1221 | 1228 | ||
1222 | extern bool blk_integrity_is_initialized(struct gendisk *); | 1229 | extern bool blk_integrity_is_initialized(struct gendisk *); |
1223 | extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); | 1230 | extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); |
1224 | extern void blk_integrity_unregister(struct gendisk *); | 1231 | extern void blk_integrity_unregister(struct gendisk *); |
1225 | extern int blk_integrity_compare(struct gendisk *, struct gendisk *); | 1232 | extern int blk_integrity_compare(struct gendisk *, struct gendisk *); |
1226 | extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, | 1233 | extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, |
1227 | struct scatterlist *); | 1234 | struct scatterlist *); |
1228 | extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); | 1235 | extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); |
1229 | extern int blk_integrity_merge_rq(struct request_queue *, struct request *, | 1236 | extern int blk_integrity_merge_rq(struct request_queue *, struct request *, |
1230 | struct request *); | 1237 | struct request *); |
1231 | extern int blk_integrity_merge_bio(struct request_queue *, struct request *, | 1238 | extern int blk_integrity_merge_bio(struct request_queue *, struct request *, |
1232 | struct bio *); | 1239 | struct bio *); |
1233 | 1240 | ||
1234 | static inline | 1241 | static inline |
1235 | struct blk_integrity *bdev_get_integrity(struct block_device *bdev) | 1242 | struct blk_integrity *bdev_get_integrity(struct block_device *bdev) |
1236 | { | 1243 | { |
1237 | return bdev->bd_disk->integrity; | 1244 | return bdev->bd_disk->integrity; |
1238 | } | 1245 | } |
1239 | 1246 | ||
1240 | static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) | 1247 | static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) |
1241 | { | 1248 | { |
1242 | return disk->integrity; | 1249 | return disk->integrity; |
1243 | } | 1250 | } |
1244 | 1251 | ||
1245 | static inline int blk_integrity_rq(struct request *rq) | 1252 | static inline int blk_integrity_rq(struct request *rq) |
1246 | { | 1253 | { |
1247 | if (rq->bio == NULL) | 1254 | if (rq->bio == NULL) |
1248 | return 0; | 1255 | return 0; |
1249 | 1256 | ||
1250 | return bio_integrity(rq->bio); | 1257 | return bio_integrity(rq->bio); |
1251 | } | 1258 | } |
1252 | 1259 | ||
1253 | static inline void blk_queue_max_integrity_segments(struct request_queue *q, | 1260 | static inline void blk_queue_max_integrity_segments(struct request_queue *q, |
1254 | unsigned int segs) | 1261 | unsigned int segs) |
1255 | { | 1262 | { |
1256 | q->limits.max_integrity_segments = segs; | 1263 | q->limits.max_integrity_segments = segs; |
1257 | } | 1264 | } |
1258 | 1265 | ||
1259 | static inline unsigned short | 1266 | static inline unsigned short |
1260 | queue_max_integrity_segments(struct request_queue *q) | 1267 | queue_max_integrity_segments(struct request_queue *q) |
1261 | { | 1268 | { |
1262 | return q->limits.max_integrity_segments; | 1269 | return q->limits.max_integrity_segments; |
1263 | } | 1270 | } |
1264 | 1271 | ||
1265 | #else /* CONFIG_BLK_DEV_INTEGRITY */ | 1272 | #else /* CONFIG_BLK_DEV_INTEGRITY */ |
1266 | 1273 | ||
1267 | #define blk_integrity_rq(rq) (0) | 1274 | #define blk_integrity_rq(rq) (0) |
1268 | #define blk_rq_count_integrity_sg(a, b) (0) | 1275 | #define blk_rq_count_integrity_sg(a, b) (0) |
1269 | #define blk_rq_map_integrity_sg(a, b, c) (0) | 1276 | #define blk_rq_map_integrity_sg(a, b, c) (0) |
1270 | #define bdev_get_integrity(a) (0) | 1277 | #define bdev_get_integrity(a) (0) |
1271 | #define blk_get_integrity(a) (0) | 1278 | #define blk_get_integrity(a) (0) |
1272 | #define blk_integrity_compare(a, b) (0) | 1279 | #define blk_integrity_compare(a, b) (0) |
1273 | #define blk_integrity_register(a, b) (0) | 1280 | #define blk_integrity_register(a, b) (0) |
1274 | #define blk_integrity_unregister(a) do { } while (0); | 1281 | #define blk_integrity_unregister(a) do { } while (0); |
1275 | #define blk_queue_max_integrity_segments(a, b) do { } while (0); | 1282 | #define blk_queue_max_integrity_segments(a, b) do { } while (0); |
1276 | #define queue_max_integrity_segments(a) (0) | 1283 | #define queue_max_integrity_segments(a) (0) |
1277 | #define blk_integrity_merge_rq(a, b, c) (0) | 1284 | #define blk_integrity_merge_rq(a, b, c) (0) |
1278 | #define blk_integrity_merge_bio(a, b, c) (0) | 1285 | #define blk_integrity_merge_bio(a, b, c) (0) |
1279 | #define blk_integrity_is_initialized(a) (0) | 1286 | #define blk_integrity_is_initialized(a) (0) |
1280 | 1287 | ||
1281 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ | 1288 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
1282 | 1289 | ||
1283 | struct block_device_operations { | 1290 | struct block_device_operations { |
1284 | int (*open) (struct block_device *, fmode_t); | 1291 | int (*open) (struct block_device *, fmode_t); |
1285 | int (*release) (struct gendisk *, fmode_t); | 1292 | int (*release) (struct gendisk *, fmode_t); |
1286 | int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); | 1293 | int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); |
1287 | int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); | 1294 | int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); |
1288 | int (*direct_access) (struct block_device *, sector_t, | 1295 | int (*direct_access) (struct block_device *, sector_t, |
1289 | void **, unsigned long *); | 1296 | void **, unsigned long *); |
1290 | unsigned int (*check_events) (struct gendisk *disk, | 1297 | unsigned int (*check_events) (struct gendisk *disk, |
1291 | unsigned int clearing); | 1298 | unsigned int clearing); |
1292 | /* ->media_changed() is DEPRECATED, use ->check_events() instead */ | 1299 | /* ->media_changed() is DEPRECATED, use ->check_events() instead */ |
1293 | int (*media_changed) (struct gendisk *); | 1300 | int (*media_changed) (struct gendisk *); |
1294 | void (*unlock_native_capacity) (struct gendisk *); | 1301 | void (*unlock_native_capacity) (struct gendisk *); |
1295 | int (*revalidate_disk) (struct gendisk *); | 1302 | int (*revalidate_disk) (struct gendisk *); |
1296 | int (*getgeo)(struct block_device *, struct hd_geometry *); | 1303 | int (*getgeo)(struct block_device *, struct hd_geometry *); |
1297 | /* this callback is with swap_lock and sometimes page table lock held */ | 1304 | /* this callback is with swap_lock and sometimes page table lock held */ |
1298 | void (*swap_slot_free_notify) (struct block_device *, unsigned long); | 1305 | void (*swap_slot_free_notify) (struct block_device *, unsigned long); |
1299 | struct module *owner; | 1306 | struct module *owner; |
1300 | }; | 1307 | }; |
1301 | 1308 | ||
1302 | extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, | 1309 | extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, |
1303 | unsigned long); | 1310 | unsigned long); |
1304 | #else /* CONFIG_BLOCK */ | 1311 | #else /* CONFIG_BLOCK */ |
1305 | /* | 1312 | /* |
1306 | * stubs for when the block layer is configured out | 1313 | * stubs for when the block layer is configured out |
1307 | */ | 1314 | */ |
1308 | #define buffer_heads_over_limit 0 | 1315 | #define buffer_heads_over_limit 0 |
1309 | 1316 | ||
1310 | static inline long nr_blockdev_pages(void) | 1317 | static inline long nr_blockdev_pages(void) |
1311 | { | 1318 | { |
1312 | return 0; | 1319 | return 0; |
1313 | } | 1320 | } |
1314 | 1321 | ||
1315 | struct blk_plug { | 1322 | struct blk_plug { |
1316 | }; | 1323 | }; |
1317 | 1324 | ||
1318 | static inline void blk_start_plug(struct blk_plug *plug) | 1325 | static inline void blk_start_plug(struct blk_plug *plug) |
1319 | { | 1326 | { |
1320 | } | 1327 | } |
1321 | 1328 | ||
1322 | static inline void blk_finish_plug(struct blk_plug *plug) | 1329 | static inline void blk_finish_plug(struct blk_plug *plug) |
1323 | { | 1330 | { |
1324 | } | 1331 | } |
1325 | 1332 | ||
1326 | static inline void blk_flush_plug(struct task_struct *task) | 1333 | static inline void blk_flush_plug(struct task_struct *task) |
1327 | { | 1334 | { |
1328 | } | 1335 | } |
1329 | 1336 | ||
1330 | static inline void blk_schedule_flush_plug(struct task_struct *task) | 1337 | static inline void blk_schedule_flush_plug(struct task_struct *task) |
1331 | { | 1338 | { |
1332 | } | 1339 | } |
1333 | 1340 | ||
1334 | 1341 | ||
1335 | static inline bool blk_needs_flush_plug(struct task_struct *tsk) | 1342 | static inline bool blk_needs_flush_plug(struct task_struct *tsk) |
1336 | { | 1343 | { |
1337 | return false; | 1344 | return false; |
1338 | } | 1345 | } |
1339 | 1346 | ||
1340 | #endif /* CONFIG_BLOCK */ | 1347 | #endif /* CONFIG_BLOCK */ |
1341 | 1348 | ||
1342 | #endif | 1349 | #endif |
1343 | 1350 |