Blame view
include/linux/blkdev.h
55.1 KB
1da177e4c Linux-2.6.12-rc2 |
1 2 |
#ifndef _LINUX_BLKDEV_H #define _LINUX_BLKDEV_H |
85fd0bc95 Fix blkdev.h buil... |
3 |
#include <linux/sched.h> |
f5ff8422b Fix warnings with... |
4 |
#ifdef CONFIG_BLOCK |
1da177e4c Linux-2.6.12-rc2 |
5 6 7 |
#include <linux/major.h> #include <linux/genhd.h> #include <linux/list.h> |
320ae51fe blk-mq: new multi... |
8 |
#include <linux/llist.h> |
1da177e4c Linux-2.6.12-rc2 |
9 10 11 |
#include <linux/timer.h> #include <linux/workqueue.h> #include <linux/pagemap.h> |
66114cad6 writeback: separa... |
12 |
#include <linux/backing-dev-defs.h> |
1da177e4c Linux-2.6.12-rc2 |
13 14 |
#include <linux/wait.h> #include <linux/mempool.h> |
34c0fd540 mm, dax, pmem: in... |
15 |
#include <linux/pfn.h> |
1da177e4c Linux-2.6.12-rc2 |
16 |
#include <linux/bio.h> |
1da177e4c Linux-2.6.12-rc2 |
17 |
#include <linux/stringify.h> |
3e6053d76 block: adjust blk... |
18 |
#include <linux/gfp.h> |
d351af01b bsg: bind bsg to ... |
19 |
#include <linux/bsg.h> |
c7c22e4d5 block: add suppor... |
20 |
#include <linux/smp.h> |
548bc8e1b block: RCU free r... |
21 |
#include <linux/rcupdate.h> |
add703fda blk-mq: use percp... |
22 |
#include <linux/percpu-refcount.h> |
84be456f8 remove <asm/scatt... |
23 |
#include <linux/scatterlist.h> |
6a0cb1bc1 block: Implement ... |
24 |
#include <linux/blkzoned.h> |
1da177e4c Linux-2.6.12-rc2 |
25 |
|
de4772542 include: replace ... |
26 |
struct module; |
21b2f0c80 [SCSI] unify SCSI... |
27 |
struct scsi_ioctl_command; |
1da177e4c Linux-2.6.12-rc2 |
28 |
struct request_queue; |
1da177e4c Linux-2.6.12-rc2 |
29 |
struct elevator_queue; |
2056a782f [PATCH] Block que... |
30 |
struct blk_trace; |
3d6392cfb bsg: support for ... |
31 32 |
struct request; struct sg_io_hdr; |
aa387cc89 block: add bsg he... |
33 |
struct bsg_job; |
3c798398e blkcg: mass renam... |
34 |
struct blkcg_gq; |
7c94e1c15 block: introduce ... |
35 |
struct blk_flush_queue; |
bbd3e0643 block: add an API... |
36 |
struct pr_ops; |
87760e5ee block: hook up wr... |
37 |
struct rq_wb; |
1da177e4c Linux-2.6.12-rc2 |
38 39 40 |
#define BLKDEV_MIN_RQ 4 #define BLKDEV_MAX_RQ 128 /* Default maximum */ |
8bd435b30 blkcg: remove sta... |
41 42 43 44 45 |
/* * Maximum number of blkcg policies allowed to be registered concurrently. * Defined here to simplify include dependency. */ #define BLKCG_MAX_POLS 2 |
8ffdc6550 [BLOCK] add @upto... |
46 |
typedef void (rq_end_io_fn)(struct request *, int); |
1da177e4c Linux-2.6.12-rc2 |
47 |
|
5b788ce3e block: prepare fo... |
48 49 |
#define BLK_RL_SYNCFULL (1U << 0) #define BLK_RL_ASYNCFULL (1U << 1) |
1da177e4c Linux-2.6.12-rc2 |
50 |
struct request_list { |
5b788ce3e block: prepare fo... |
51 |
struct request_queue *q; /* the queue this rl belongs to */ |
a051661ca blkcg: implement ... |
52 53 54 |
#ifdef CONFIG_BLK_CGROUP struct blkcg_gq *blkg; /* blkg this request pool belongs to */ #endif |
1faa16d22 block: change the... |
55 56 57 58 |
/* * count[], starved[], and wait[] are indexed by * BLK_RW_SYNC/BLK_RW_ASYNC */ |
8a5ecdd42 block: add q->nr_... |
59 60 61 62 |
int count[2]; int starved[2]; mempool_t *rq_pool; wait_queue_head_t wait[2]; |
5b788ce3e block: prepare fo... |
63 |
unsigned int flags; |
1da177e4c Linux-2.6.12-rc2 |
64 |
}; |
4aff5e233 [PATCH] Split str... |
65 66 67 68 69 70 |
/* * request command types */ enum rq_cmd_type_bits { REQ_TYPE_FS = 1, /* fs request */ REQ_TYPE_BLOCK_PC, /* scsi command */ |
b42171ef7 block: move REQ_T... |
71 |
REQ_TYPE_DRV_PRIV, /* driver defined types from here */ |
4aff5e233 [PATCH] Split str... |
72 |
}; |
e80640213 block: split out ... |
73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 |
/* * request flags */ typedef __u32 __bitwise req_flags_t; /* elevator knows about this request */ #define RQF_SORTED ((__force req_flags_t)(1 << 0)) /* drive already may have started this one */ #define RQF_STARTED ((__force req_flags_t)(1 << 1)) /* uses tagged queueing */ #define RQF_QUEUED ((__force req_flags_t)(1 << 2)) /* may not be passed by ioscheduler */ #define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3)) /* request for flush sequence */ #define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4)) /* merge of different types, fail separately */ #define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5)) /* track inflight for MQ */ #define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6)) /* don't call prep for this one */ #define RQF_DONTPREP ((__force req_flags_t)(1 << 7)) /* set for "ide_preempt" requests and also for requests for which the SCSI "quiesce" state must be ignored. */ #define RQF_PREEMPT ((__force req_flags_t)(1 << 8)) /* contains copies of user pages */ #define RQF_COPY_USER ((__force req_flags_t)(1 << 9)) /* vaguely specified driver internal error. Ignored by the block layer */ #define RQF_FAILED ((__force req_flags_t)(1 << 10)) /* don't warn about errors */ #define RQF_QUIET ((__force req_flags_t)(1 << 11)) /* elevator private data attached */ #define RQF_ELVPRIV ((__force req_flags_t)(1 << 12)) /* account I/O stat */ #define RQF_IO_STAT ((__force req_flags_t)(1 << 13)) /* request came from our alloc pool */ #define RQF_ALLOCED ((__force req_flags_t)(1 << 14)) /* runtime pm request */ #define RQF_PM ((__force req_flags_t)(1 << 15)) /* on IO scheduler merge hash */ #define RQF_HASHED ((__force req_flags_t)(1 << 16)) |
cf43e6be8 block: add scalab... |
112 113 |
/* IO stats tracking on */ #define RQF_STATS ((__force req_flags_t)(1 << 17)) |
f9d03f96b block: improve ha... |
114 115 116 |
/* Look at ->special_vec for the actual data payload instead of the bio chain. */ #define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18)) |
e80640213 block: split out ... |
117 118 119 |
/* flags that prevent us from merging requests: */ #define RQF_NOMERGE_FLAGS \ |
f9d03f96b block: improve ha... |
120 |
(RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD) |
e80640213 block: split out ... |
121 |
|
1da177e4c Linux-2.6.12-rc2 |
122 123 124 |
#define BLK_MAX_CDB 16 /* |
af76e555e blk-mq: initializ... |
125 126 127 128 |
* Try to put the fields that are referenced together in the same cacheline. * * If you modify this structure, make sure to update blk_rq_init() and * especially blk_mq_rq_ctx_init() to take care of the added fields. |
1da177e4c Linux-2.6.12-rc2 |
129 130 |
*/ struct request { |
6897fc22e kernel: use lockl... |
131 |
struct list_head queuelist; |
320ae51fe blk-mq: new multi... |
132 133 |
union { struct call_single_data csd; |
9828c2c6c block: Convert fi... |
134 |
u64 fifo_time; |
320ae51fe blk-mq: new multi... |
135 |
}; |
ff856bad6 [BLOCK] ll_rw_blk... |
136 |
|
165125e1e [BLOCK] Get rid o... |
137 |
struct request_queue *q; |
320ae51fe blk-mq: new multi... |
138 |
struct blk_mq_ctx *mq_ctx; |
e6a1c874a [PATCH] struct re... |
139 |
|
ca93e4534 block: better pac... |
140 |
int cpu; |
b42171ef7 block: move REQ_T... |
141 |
unsigned cmd_type; |
ef295ecf0 block: better op ... |
142 |
unsigned int cmd_flags; /* op and common flags */ |
e80640213 block: split out ... |
143 |
req_flags_t rq_flags; |
242f9dcb8 block: unify requ... |
144 |
unsigned long atomic_flags; |
1da177e4c Linux-2.6.12-rc2 |
145 |
|
a2dec7b36 block: hide reque... |
146 |
/* the following two fields are internal, NEVER access directly */ |
a2dec7b36 block: hide reque... |
147 |
unsigned int __data_len; /* total data len */ |
181fdde3b block: remove 16 ... |
148 |
sector_t __sector; /* sector cursor */ |
1da177e4c Linux-2.6.12-rc2 |
149 150 151 |
struct bio *bio; struct bio *biotail; |
360f92c24 block: fix regres... |
152 153 154 155 156 157 158 159 160 161 162 |
/* * The hash is used inside the scheduler, and killed once the * request reaches the dispatch list. The ipi_list is only used * to queue the request for softirq completion, which is long * after the request has been unhashed (and even removed from * the dispatch list). */ union { struct hlist_node hash; /* merge hash */ struct list_head ipi_list; }; |
e6a1c874a [PATCH] struct re... |
163 164 165 |
/* * The rb_node is only used inside the io scheduler, requests * are pruned when moved to the dispatch queue. So let the |
c186794db block: share requ... |
166 |
* completion_data share space with the rb_node. |
e6a1c874a [PATCH] struct re... |
167 168 169 |
*/ union { struct rb_node rb_node; /* sort/lookup */ |
f9d03f96b block: improve ha... |
170 |
struct bio_vec special_vec; |
c186794db block: share requ... |
171 |
void *completion_data; |
e6a1c874a [PATCH] struct re... |
172 |
}; |
9817064b6 [PATCH] elevator:... |
173 |
|
ff7d145fd [PATCH] Add one m... |
174 |
/* |
7f1dc8a2d blkio: Fix blkio ... |
175 |
* Three pointers are available for the IO schedulers, if they need |
c186794db block: share requ... |
176 177 |
* more they have to dynamically allocate it. Flush requests are * never put on the IO scheduler. So let the flush fields share |
a612fddf0 block, cfq: move ... |
178 |
* space with the elevator data. |
ff7d145fd [PATCH] Add one m... |
179 |
*/ |
c186794db block: share requ... |
180 |
union { |
a612fddf0 block, cfq: move ... |
181 182 183 184 |
struct { struct io_cq *icq; void *priv[2]; } elv; |
c186794db block: share requ... |
185 186 187 |
struct { unsigned int seq; struct list_head list; |
4853abaae block: fix flush ... |
188 |
rq_end_io_fn *saved_end_io; |
c186794db block: share requ... |
189 190 |
} flush; }; |
ff7d145fd [PATCH] Add one m... |
191 |
|
8f34ee75d [PATCH] Rearrange... |
192 |
struct gendisk *rq_disk; |
09e099d4b block: fix accoun... |
193 |
struct hd_struct *part; |
1da177e4c Linux-2.6.12-rc2 |
194 |
unsigned long start_time; |
cf43e6be8 block: add scalab... |
195 |
struct blk_issue_stat issue_stat; |
9195291e5 blkio: Increment ... |
196 |
#ifdef CONFIG_BLK_CGROUP |
a051661ca blkcg: implement ... |
197 |
struct request_list *rl; /* rl this rq is alloced from */ |
9195291e5 blkio: Increment ... |
198 199 200 |
unsigned long long start_time_ns; unsigned long long io_start_time_ns; /* when passed to hardware */ #endif |
1da177e4c Linux-2.6.12-rc2 |
201 202 203 204 |
/* Number of scatter-gather DMA addr+len pairs after * physical address coalescing is performed. */ unsigned short nr_phys_segments; |
13f05c8d8 block/scsi: Provi... |
205 206 207 |
#if defined(CONFIG_BLK_DEV_INTEGRITY) unsigned short nr_integrity_segments; #endif |
1da177e4c Linux-2.6.12-rc2 |
208 |
|
8f34ee75d [PATCH] Rearrange... |
209 |
unsigned short ioprio; |
731ec497e block: kill rq->data |
210 |
void *special; /* opaque pointer available for LLD use */ |
1da177e4c Linux-2.6.12-rc2 |
211 |
|
cdd602621 [PATCH] Remove ->... |
212 213 |
int tag; int errors; |
1da177e4c Linux-2.6.12-rc2 |
214 215 216 |
/* * when request is used as a packet command carrier */ |
d7e3c3249 block: add large ... |
217 218 |
unsigned char __cmd[BLK_MAX_CDB]; unsigned char *cmd; |
181fdde3b block: remove 16 ... |
219 |
unsigned short cmd_len; |
1da177e4c Linux-2.6.12-rc2 |
220 |
|
7a85f8896 block: restore th... |
221 |
unsigned int extra_len; /* length of alignment and padding */ |
1da177e4c Linux-2.6.12-rc2 |
222 |
unsigned int sense_len; |
c3a4d78c5 block: add rq->re... |
223 |
unsigned int resid_len; /* residual count */ |
1da177e4c Linux-2.6.12-rc2 |
224 |
void *sense; |
242f9dcb8 block: unify requ... |
225 226 |
unsigned long deadline; struct list_head timeout_list; |
1da177e4c Linux-2.6.12-rc2 |
227 |
unsigned int timeout; |
17e01f216 [SCSI] add retrie... |
228 |
int retries; |
1da177e4c Linux-2.6.12-rc2 |
229 230 |
/* |
c00895ab2 [PATCH] Remove ->... |
231 |
* completion callback. |
1da177e4c Linux-2.6.12-rc2 |
232 233 234 |
*/ rq_end_io_fn *end_io; void *end_io_data; |
abae1fde6 add a struct requ... |
235 236 237 |
/* for bidi */ struct request *next_rq; |
1da177e4c Linux-2.6.12-rc2 |
238 |
}; |
766ca4428 virtio_blk: use a... |
239 240 241 242 |
static inline unsigned short req_get_ioprio(struct request *req) { return req->ioprio; } |
1da177e4c Linux-2.6.12-rc2 |
243 |
#include <linux/elevator.h> |
320ae51fe blk-mq: new multi... |
244 |
struct blk_queue_ctx; |
165125e1e [BLOCK] Get rid o... |
245 |
typedef void (request_fn_proc) (struct request_queue *q); |
dece16353 block: change ->m... |
246 |
typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio); |
165125e1e [BLOCK] Get rid o... |
247 |
typedef int (prep_rq_fn) (struct request_queue *, struct request *); |
28018c242 block: implement ... |
248 |
typedef void (unprep_rq_fn) (struct request_queue *, struct request *); |
1da177e4c Linux-2.6.12-rc2 |
249 250 |
struct bio_vec; |
ff856bad6 [BLOCK] ll_rw_blk... |
251 |
typedef void (softirq_done_fn)(struct request *); |
2fb98e841 block: implement ... |
252 |
typedef int (dma_drain_needed_fn)(struct request *); |
ef9e3facd block: add lld bu... |
253 |
typedef int (lld_busy_fn) (struct request_queue *q); |
aa387cc89 block: add bsg he... |
254 |
typedef int (bsg_job_fn) (struct bsg_job *); |
1da177e4c Linux-2.6.12-rc2 |
255 |
|
242f9dcb8 block: unify requ... |
256 257 258 259 260 261 262 |
enum blk_eh_timer_return { BLK_EH_NOT_HANDLED, BLK_EH_HANDLED, BLK_EH_RESET_TIMER, }; typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); |
1da177e4c Linux-2.6.12-rc2 |
263 264 265 266 |
enum blk_queue_state { Queue_down, Queue_up, }; |
1da177e4c Linux-2.6.12-rc2 |
267 268 269 |
struct blk_queue_tag { struct request **tag_index; /* map of busy tags */ unsigned long *tag_map; /* bit map of free/busy tags */ |
1da177e4c Linux-2.6.12-rc2 |
270 |
int max_depth; /* what we will send to device */ |
ba0250824 [PATCH] blk: fix ... |
271 |
int real_max_depth; /* what the array can hold */ |
1da177e4c Linux-2.6.12-rc2 |
272 |
atomic_t refcnt; /* map can be shared */ |
ee1b6f7af block: support di... |
273 274 |
int alloc_policy; /* tag allocation policy */ int next_tag; /* next tag */ |
1da177e4c Linux-2.6.12-rc2 |
275 |
}; |
ee1b6f7af block: support di... |
276 277 |
#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */ #define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */ |
1da177e4c Linux-2.6.12-rc2 |
278 |
|
abf543937 block: move cmdfi... |
279 280 |
#define BLK_SCSI_MAX_CMDS (256) #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) |
797476b88 block: Add 'zoned... |
281 282 283 284 285 286 287 288 |
/* * Zoned block device models (zoned limit). */ enum blk_zoned_model { BLK_ZONED_NONE, /* Regular block device */ BLK_ZONED_HA, /* Host-aware zoned block device */ BLK_ZONED_HM, /* Host-managed zoned block device */ }; |
025146e13 block: Move queue... |
289 290 291 |
struct queue_limits { unsigned long bounce_pfn; unsigned long seg_boundary_mask; |
03100aada block: Replace SG... |
292 |
unsigned long virt_boundary_mask; |
025146e13 block: Move queue... |
293 294 |
unsigned int max_hw_sectors; |
ca369d51b block/sd: Fix dev... |
295 |
unsigned int max_dev_sectors; |
762380ad9 block: add notion... |
296 |
unsigned int chunk_sectors; |
025146e13 block: Move queue... |
297 298 |
unsigned int max_sectors; unsigned int max_segment_size; |
c72758f33 block: Export I/O... |
299 300 301 302 |
unsigned int physical_block_size; unsigned int alignment_offset; unsigned int io_min; unsigned int io_opt; |
67efc9258 block: allow larg... |
303 |
unsigned int max_discard_sectors; |
0034af036 block: make /sys/... |
304 |
unsigned int max_hw_discard_sectors; |
4363ac7c1 block: Implement ... |
305 |
unsigned int max_write_same_sectors; |
a6f0788ec block: add suppor... |
306 |
unsigned int max_write_zeroes_sectors; |
86b372814 block: Expose dis... |
307 308 |
unsigned int discard_granularity; unsigned int discard_alignment; |
025146e13 block: Move queue... |
309 310 |
unsigned short logical_block_size; |
8a78362c4 block: Consolidat... |
311 |
unsigned short max_segments; |
13f05c8d8 block/scsi: Provi... |
312 |
unsigned short max_integrity_segments; |
025146e13 block: Move queue... |
313 |
|
c72758f33 block: Export I/O... |
314 |
unsigned char misaligned; |
86b372814 block: Expose dis... |
315 |
unsigned char discard_misaligned; |
e692cb668 block: Deprecate ... |
316 |
unsigned char cluster; |
a934a00a6 block: Fix discar... |
317 |
unsigned char discard_zeroes_data; |
c78afc626 bcache/md: Use ra... |
318 |
unsigned char raid_partial_stripes_expensive; |
797476b88 block: Add 'zoned... |
319 |
enum blk_zoned_model zoned; |
025146e13 block: Move queue... |
320 |
}; |
6a0cb1bc1 block: Implement ... |
321 322 323 324 325 326 327 328 329 330 331 332 |
#ifdef CONFIG_BLK_DEV_ZONED struct blk_zone_report_hdr { unsigned int nr_zones; u8 padding[60]; }; extern int blkdev_report_zones(struct block_device *bdev, sector_t sector, struct blk_zone *zones, unsigned int *nr_zones, gfp_t gfp_mask); extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors, sector_t nr_sectors, gfp_t gfp_mask); |
3ed05a987 blk-zoned: implem... |
333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 |
extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg); extern int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg); #else /* CONFIG_BLK_DEV_ZONED */ static inline int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { return -ENOTTY; } static inline int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { return -ENOTTY; } |
6a0cb1bc1 block: Implement ... |
353 |
#endif /* CONFIG_BLK_DEV_ZONED */ |
d7b763013 block: reorder re... |
354 |
struct request_queue { |
1da177e4c Linux-2.6.12-rc2 |
355 356 357 358 359 |
/* * Together with queue_head for cacheline sharing */ struct list_head queue_head; struct request *last_merge; |
b374d18a4 block: get rid of... |
360 |
struct elevator_queue *elevator; |
8a5ecdd42 block: add q->nr_... |
361 362 |
int nr_rqs[2]; /* # allocated [a]sync rqs */ int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ |
1da177e4c Linux-2.6.12-rc2 |
363 |
|
87760e5ee block: hook up wr... |
364 |
struct rq_wb *rq_wb; |
1da177e4c Linux-2.6.12-rc2 |
365 |
/* |
a051661ca blkcg: implement ... |
366 367 368 369 |
* If blkcg is not used, @q->root_rl serves all requests. If blkcg * is used, root blkg allocates from @q->root_rl and all other * blkgs from their own blkg->rl. Which one to use should be * determined using bio_request_list(). |
1da177e4c Linux-2.6.12-rc2 |
370 |
*/ |
a051661ca blkcg: implement ... |
371 |
struct request_list root_rl; |
1da177e4c Linux-2.6.12-rc2 |
372 373 |
request_fn_proc *request_fn; |
1da177e4c Linux-2.6.12-rc2 |
374 375 |
make_request_fn *make_request_fn; prep_rq_fn *prep_rq_fn; |
28018c242 block: implement ... |
376 |
unprep_rq_fn *unprep_rq_fn; |
ff856bad6 [BLOCK] ll_rw_blk... |
377 |
softirq_done_fn *softirq_done_fn; |
242f9dcb8 block: unify requ... |
378 |
rq_timed_out_fn *rq_timed_out_fn; |
2fb98e841 block: implement ... |
379 |
dma_drain_needed_fn *dma_drain_needed; |
ef9e3facd block: add lld bu... |
380 |
lld_busy_fn *lld_busy_fn; |
1da177e4c Linux-2.6.12-rc2 |
381 |
|
320ae51fe blk-mq: new multi... |
382 383 384 385 386 |
struct blk_mq_ops *mq_ops; unsigned int *mq_map; /* sw queues */ |
e6cdb0929 blk-mq: fix spars... |
387 |
struct blk_mq_ctx __percpu *queue_ctx; |
320ae51fe blk-mq: new multi... |
388 |
unsigned int nr_queues; |
d278d4a88 block: add code t... |
389 |
unsigned int queue_depth; |
320ae51fe blk-mq: new multi... |
390 391 392 |
/* hw dispatch queues */ struct blk_mq_hw_ctx **queue_hw_ctx; unsigned int nr_hw_queues; |
1da177e4c Linux-2.6.12-rc2 |
393 |
/* |
8922e16cf [PATCH] 01/05 Imp... |
394 395 |
* Dispatch queue sorting */ |
1b47f531e [PATCH] generic d... |
396 |
sector_t end_sector; |
8922e16cf [PATCH] 01/05 Imp... |
397 |
struct request *boundary_rq; |
8922e16cf [PATCH] 01/05 Imp... |
398 399 |
/* |
3cca6dc1c block: add API fo... |
400 |
* Delayed queue handling |
1da177e4c Linux-2.6.12-rc2 |
401 |
*/ |
3cca6dc1c block: add API fo... |
402 |
struct delayed_work delay_work; |
1da177e4c Linux-2.6.12-rc2 |
403 404 405 406 407 408 409 410 |
struct backing_dev_info backing_dev_info; /* * The queue owner gets to use this for whatever they like. * ll_rw_blk doesn't touch it. */ void *queuedata; |
1da177e4c Linux-2.6.12-rc2 |
411 |
/* |
d7b763013 block: reorder re... |
412 |
* various queue flags, see QUEUE_* below |
1da177e4c Linux-2.6.12-rc2 |
413 |
*/ |
d7b763013 block: reorder re... |
414 |
unsigned long queue_flags; |
1da177e4c Linux-2.6.12-rc2 |
415 416 |
/* |
a73f730d0 block, cfq: move ... |
417 418 419 420 421 422 |
* ida allocated id for this queue. Used to index queues from * ioctx. */ int id; /* |
d7b763013 block: reorder re... |
423 |
* queue needs bounce pages for pages above this limit |
1da177e4c Linux-2.6.12-rc2 |
424 |
*/ |
d7b763013 block: reorder re... |
425 |
gfp_t bounce_gfp; |
1da177e4c Linux-2.6.12-rc2 |
426 427 |
/* |
152587deb [PATCH] fix NMI l... |
428 429 430 |
* protects queue structures from reentrancy. ->__queue_lock should * _never_ be used directly, it is queue private. always use * ->queue_lock. |
1da177e4c Linux-2.6.12-rc2 |
431 |
*/ |
152587deb [PATCH] fix NMI l... |
432 |
spinlock_t __queue_lock; |
1da177e4c Linux-2.6.12-rc2 |
433 434 435 436 437 438 |
spinlock_t *queue_lock; /* * queue kobject */ struct kobject kobj; |
320ae51fe blk-mq: new multi... |
439 440 441 442 |
/* * mq queue kobject */ struct kobject mq_kobj; |
ac6fc48c9 block: move blk_i... |
443 444 445 |
#ifdef CONFIG_BLK_DEV_INTEGRITY struct blk_integrity integrity; #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
47fafbc70 block / PM: Repla... |
446 |
#ifdef CONFIG_PM |
6c9546675 block: add runtim... |
447 448 449 450 |
struct device *dev; int rpm_status; unsigned int nr_pending; #endif |
1da177e4c Linux-2.6.12-rc2 |
451 452 453 454 455 456 457 |
/* * queue settings */ unsigned long nr_requests; /* Max # of requests */ unsigned int nr_congestion_on; unsigned int nr_congestion_off; unsigned int nr_batching; |
fa0ccd837 block: implement ... |
458 |
unsigned int dma_drain_size; |
d7b763013 block: reorder re... |
459 |
void *dma_drain_buffer; |
e3790c7d4 block: separate o... |
460 |
unsigned int dma_pad_mask; |
1da177e4c Linux-2.6.12-rc2 |
461 462 463 |
unsigned int dma_alignment; struct blk_queue_tag *queue_tags; |
6eca9004d [BLOCK] Fix bad s... |
464 |
struct list_head tag_busy_list; |
1da177e4c Linux-2.6.12-rc2 |
465 |
|
15853af9f [BLOCK] Implement... |
466 |
unsigned int nr_sorted; |
0a7ae2ff0 block: change the... |
467 |
unsigned int in_flight[2]; |
cf43e6be8 block: add scalab... |
468 469 |
struct blk_rq_stat rq_stats[2]; |
24faf6f60 block: Make blk_c... |
470 471 472 473 474 475 |
/* * Number of active block driver functions for which blk_drain_queue() * must wait. Must be incremented around functions that unlock the * queue_lock internally, e.g. scsi_request_fn(). */ unsigned int request_fn_active; |
1da177e4c Linux-2.6.12-rc2 |
476 |
|
242f9dcb8 block: unify requ... |
477 |
unsigned int rq_timeout; |
64f1c21e8 blk-mq: make the ... |
478 |
int poll_nsec; |
242f9dcb8 block: unify requ... |
479 |
struct timer_list timeout; |
287922eb0 block: defer time... |
480 |
struct work_struct timeout_work; |
242f9dcb8 block: unify requ... |
481 |
struct list_head timeout_list; |
a612fddf0 block, cfq: move ... |
482 |
struct list_head icq_list; |
4eef30499 blkcg: move per-q... |
483 |
#ifdef CONFIG_BLK_CGROUP |
a2b1693ba blkcg: implement ... |
484 |
DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS); |
3c798398e blkcg: mass renam... |
485 |
struct blkcg_gq *root_blkg; |
03aa264ac blkcg: let blkcg ... |
486 |
struct list_head blkg_list; |
4eef30499 blkcg: move per-q... |
487 |
#endif |
a612fddf0 block, cfq: move ... |
488 |
|
025146e13 block: Move queue... |
489 |
struct queue_limits limits; |
1da177e4c Linux-2.6.12-rc2 |
490 491 492 493 494 |
/* * sg stuff */ unsigned int sg_timeout; unsigned int sg_reserved_size; |
1946089a1 [PATCH] NUMA awar... |
495 |
int node; |
6c5c93415 [PATCH] ifdef blk... |
496 |
#ifdef CONFIG_BLK_DEV_IO_TRACE |
2056a782f [PATCH] Block que... |
497 |
struct blk_trace *blk_trace; |
6c5c93415 [PATCH] ifdef blk... |
498 |
#endif |
1da177e4c Linux-2.6.12-rc2 |
499 |
/* |
4913efe45 block: deprecate ... |
500 |
* for flush operations |
1da177e4c Linux-2.6.12-rc2 |
501 |
*/ |
7c94e1c15 block: introduce ... |
502 |
struct blk_flush_queue *fq; |
483f4afc4 [PATCH] fix sysfs... |
503 |
|
6fca6a611 blk-mq: add helpe... |
504 505 |
struct list_head requeue_list; spinlock_t requeue_lock; |
2849450ad blk-mq: introduce... |
506 |
struct delayed_work requeue_work; |
6fca6a611 blk-mq: add helpe... |
507 |
|
483f4afc4 [PATCH] fix sysfs... |
508 |
struct mutex sysfs_lock; |
d351af01b bsg: bind bsg to ... |
509 |
|
d732580b4 block: implement ... |
510 |
int bypass_depth; |
4ecd4fef3 block: use an ato... |
511 |
atomic_t mq_freeze_depth; |
d732580b4 block: implement ... |
512 |
|
d351af01b bsg: bind bsg to ... |
513 |
#if defined(CONFIG_BLK_DEV_BSG) |
aa387cc89 block: add bsg he... |
514 515 |
bsg_job_fn *bsg_job_fn; int bsg_job_size; |
d351af01b bsg: bind bsg to ... |
516 517 |
struct bsg_class_device bsg_dev; #endif |
e43473b7f blkio: Core imple... |
518 519 520 521 522 |
#ifdef CONFIG_BLK_DEV_THROTTLING /* Throttle data */ struct throtl_data *td; #endif |
548bc8e1b block: RCU free r... |
523 |
struct rcu_head rcu_head; |
320ae51fe blk-mq: new multi... |
524 |
wait_queue_head_t mq_freeze_wq; |
3ef28e83a block: generic re... |
525 |
struct percpu_ref q_usage_counter; |
320ae51fe blk-mq: new multi... |
526 |
struct list_head all_q_node; |
0d2602ca3 blk-mq: improve s... |
527 528 529 |
struct blk_mq_tag_set *tag_set; struct list_head tag_set_list; |
54efd50bf block: make gener... |
530 |
struct bio_set *bio_split; |
4593fdbe7 blk-mq: fix sysfs... |
531 532 |
bool mq_sysfs_init_done; |
1da177e4c Linux-2.6.12-rc2 |
533 |
}; |
1da177e4c Linux-2.6.12-rc2 |
534 535 |
#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ |
1faa16d22 block: change the... |
536 537 |
#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ |
3f3299d5c block: Rename que... |
538 |
#define QUEUE_FLAG_DYING 5 /* queue being torn down */ |
d732580b4 block: implement ... |
539 |
#define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */ |
c21e6beba block: get rid of... |
540 541 |
#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ #define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ |
5757a6d76 block: strict rq_... |
542 |
#define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */ |
c21e6beba block: get rid of... |
543 544 545 |
#define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */ #define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */ #define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */ |
88e740f16 block: add queue ... |
546 |
#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ |
c21e6beba block: get rid of... |
547 548 549 550 |
#define QUEUE_FLAG_IO_STAT 13 /* do IO stats */ #define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */ #define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */ #define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ |
288dab8a3 block: add a sepa... |
551 |
#define QUEUE_FLAG_SECERASE 17 /* supports secure erase */ |
5757a6d76 block: strict rq_... |
552 |
#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ |
c246e80d8 block: Avoid that... |
553 |
#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ |
320ae51fe blk-mq: new multi... |
554 |
#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ |
05f1dd531 block: add queue ... |
555 |
#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/ |
05229beed block: add block ... |
556 |
#define QUEUE_FLAG_POLL 22 /* IO polling enabled if set */ |
93e9d8e83 block: add abilit... |
557 558 |
#define QUEUE_FLAG_WC 23 /* Write back caching */ #define QUEUE_FLAG_FUA 24 /* device supports FUA writes */ |
c888a8f95 block: kill off q... |
559 |
#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */ |
163d4baae block: add QUEUE_... |
560 |
#define QUEUE_FLAG_DAX 26 /* device supports DAX */ |
cf43e6be8 block: add scalab... |
561 |
#define QUEUE_FLAG_STATS 27 /* track rq completion times */ |
bc58ba946 block: add sysfs ... |
562 563 |
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ |
01e97f6b8 block: enable rq ... |
564 |
(1 << QUEUE_FLAG_STACKABLE) | \ |
e2e1a148b block: add sysfs ... |
565 566 |
(1 << QUEUE_FLAG_SAME_COMP) | \ (1 << QUEUE_FLAG_ADD_RANDOM)) |
797e7dbbe [BLOCK] reimpleme... |
567 |
|
94eddfbea blk-mq: ensure th... |
568 |
#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ |
ad9cf3bbd block: mark blk-m... |
569 |
(1 << QUEUE_FLAG_STACKABLE) | \ |
8e0b60b96 blk-mq: enable po... |
570 571 |
(1 << QUEUE_FLAG_SAME_COMP) | \ (1 << QUEUE_FLAG_POLL)) |
94eddfbea blk-mq: ensure th... |
572 |
|
8bcb6c7d4 block: use lockde... |
573 |
static inline void queue_lockdep_assert_held(struct request_queue *q) |
8f45c1a58 block: fix queue ... |
574 |
{ |
8bcb6c7d4 block: use lockde... |
575 576 |
if (q->queue_lock) lockdep_assert_held(q->queue_lock); |
8f45c1a58 block: fix queue ... |
577 |
} |
75ad23bc0 block: make queue... |
578 579 580 581 582 |
static inline void queue_flag_set_unlocked(unsigned int flag, struct request_queue *q) { __set_bit(flag, &q->queue_flags); } |
e48ec6900 block: extend que... |
583 584 585 |
static inline int queue_flag_test_and_clear(unsigned int flag, struct request_queue *q) { |
8bcb6c7d4 block: use lockde... |
586 |
queue_lockdep_assert_held(q); |
e48ec6900 block: extend que... |
587 588 589 590 591 592 593 594 595 596 597 598 |
if (test_bit(flag, &q->queue_flags)) { __clear_bit(flag, &q->queue_flags); return 1; } return 0; } static inline int queue_flag_test_and_set(unsigned int flag, struct request_queue *q) { |
8bcb6c7d4 block: use lockde... |
599 |
queue_lockdep_assert_held(q); |
e48ec6900 block: extend que... |
600 601 602 603 604 605 606 607 |
if (!test_bit(flag, &q->queue_flags)) { __set_bit(flag, &q->queue_flags); return 0; } return 1; } |
75ad23bc0 block: make queue... |
608 609 |
static inline void queue_flag_set(unsigned int flag, struct request_queue *q) { |
8bcb6c7d4 block: use lockde... |
610 |
queue_lockdep_assert_held(q); |
75ad23bc0 block: make queue... |
611 612 613 614 615 616 617 618 |
__set_bit(flag, &q->queue_flags); } static inline void queue_flag_clear_unlocked(unsigned int flag, struct request_queue *q) { __clear_bit(flag, &q->queue_flags); } |
0a7ae2ff0 block: change the... |
619 620 621 622 |
static inline int queue_in_flight(struct request_queue *q) { return q->in_flight[0] + q->in_flight[1]; } |
75ad23bc0 block: make queue... |
623 624 |
static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) { |
8bcb6c7d4 block: use lockde... |
625 |
queue_lockdep_assert_held(q); |
75ad23bc0 block: make queue... |
626 627 |
__clear_bit(flag, &q->queue_flags); } |
1da177e4c Linux-2.6.12-rc2 |
628 629 |
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) |
3f3299d5c block: Rename que... |
630 |
#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) |
c246e80d8 block: Avoid that... |
631 |
#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) |
d732580b4 block: implement ... |
632 |
#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags) |
320ae51fe blk-mq: new multi... |
633 |
#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) |
ac9fafa12 block: Skip I/O m... |
634 |
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) |
488991e28 block: Added in s... |
635 636 |
#define blk_queue_noxmerges(q) \ test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) |
a68bbddba block: add queue ... |
637 |
#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) |
bc58ba946 block: add sysfs ... |
638 |
#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) |
e2e1a148b block: add sysfs ... |
639 |
#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) |
4ee5eaf45 block: add a queu... |
640 641 |
#define blk_queue_stackable(q) \ test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) |
c15227de1 block: use normal... |
642 |
#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) |
288dab8a3 block: add a sepa... |
643 644 |
#define blk_queue_secure_erase(q) \ (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags)) |
163d4baae block: add QUEUE_... |
645 |
#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags) |
1da177e4c Linux-2.6.12-rc2 |
646 |
|
33659ebba block: remove wra... |
647 648 649 650 651 |
#define blk_noretry_request(rq) \ ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ REQ_FAILFAST_DRIVER)) #define blk_account_rq(rq) \ |
e80640213 block: split out ... |
652 |
(((rq)->rq_flags & RQF_STARTED) && \ |
e2a60da74 block: Clean up s... |
653 |
((rq)->cmd_type == REQ_TYPE_FS)) |
33659ebba block: remove wra... |
654 |
|
ab780f1ec block: inherit CP... |
655 |
#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) |
abae1fde6 add a struct requ... |
656 |
#define blk_bidi_rq(rq) ((rq)->next_rq != NULL) |
336cdb400 blk_end_request: ... |
657 658 |
/* rq->queuelist of dequeued request must be list_empty() */ #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) |
1da177e4c Linux-2.6.12-rc2 |
659 660 |
#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) |
4e1b2d52a block, fs, driver... |
661 |
#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) |
1da177e4c Linux-2.6.12-rc2 |
662 |
|
49fd524f9 bsg: update check... |
663 664 665 666 667 668 669 670 |
/* * Driver can handle struct request, if it either has an old style * request_fn defined, or is blk-mq based. */ static inline bool queue_is_rq_based(struct request_queue *q) { return q->request_fn || q->mq_ops; } |
e692cb668 block: Deprecate ... |
671 672 673 674 |
static inline unsigned int blk_queue_cluster(struct request_queue *q) { return q->limits.cluster; } |
797476b88 block: Add 'zoned... |
675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 |
static inline enum blk_zoned_model blk_queue_zoned_model(struct request_queue *q) { return q->limits.zoned; } static inline bool blk_queue_is_zoned(struct request_queue *q) { switch (blk_queue_zoned_model(q)) { case BLK_ZONED_HA: case BLK_ZONED_HM: return true; default: return false; } } |
6a0cb1bc1 block: Implement ... |
691 692 693 694 |
static inline unsigned int blk_queue_zone_size(struct request_queue *q) { return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0; } |
1faa16d22 block: change the... |
695 696 |
static inline bool rq_is_sync(struct request *rq) { |
ef295ecf0 block: better op ... |
697 |
return op_is_sync(rq->cmd_flags); |
1faa16d22 block: change the... |
698 |
} |
5b788ce3e block: prepare fo... |
699 |
static inline bool blk_rl_full(struct request_list *rl, bool sync) |
1da177e4c Linux-2.6.12-rc2 |
700 |
{ |
5b788ce3e block: prepare fo... |
701 702 703 |
unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; return rl->flags & flag; |
1da177e4c Linux-2.6.12-rc2 |
704 |
} |
5b788ce3e block: prepare fo... |
705 |
static inline void blk_set_rl_full(struct request_list *rl, bool sync) |
1da177e4c Linux-2.6.12-rc2 |
706 |
{ |
5b788ce3e block: prepare fo... |
707 708 709 |
unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; rl->flags |= flag; |
1da177e4c Linux-2.6.12-rc2 |
710 |
} |
5b788ce3e block: prepare fo... |
711 |
static inline void blk_clear_rl_full(struct request_list *rl, bool sync) |
1da177e4c Linux-2.6.12-rc2 |
712 |
{ |
5b788ce3e block: prepare fo... |
713 714 715 |
unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; rl->flags &= ~flag; |
1da177e4c Linux-2.6.12-rc2 |
716 |
} |
e2a60da74 block: Clean up s... |
717 718 719 720 |
static inline bool rq_mergeable(struct request *rq) { if (rq->cmd_type != REQ_TYPE_FS) return false; |
1da177e4c Linux-2.6.12-rc2 |
721 |
|
3a5e02ced block, drivers: a... |
722 723 |
if (req_op(rq) == REQ_OP_FLUSH) return false; |
a6f0788ec block: add suppor... |
724 725 |
if (req_op(rq) == REQ_OP_WRITE_ZEROES) return false; |
e2a60da74 block: Clean up s... |
726 |
if (rq->cmd_flags & REQ_NOMERGE_FLAGS) |
e80640213 block: split out ... |
727 728 |
return false; if (rq->rq_flags & RQF_NOMERGE_FLAGS) |
e2a60da74 block: Clean up s... |
729 730 731 732 |
return false; return true; } |
1da177e4c Linux-2.6.12-rc2 |
733 |
|
4363ac7c1 block: Implement ... |
734 735 736 737 738 739 740 |
static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) { if (bio_data(a) == bio_data(b)) return true; return false; } |
d278d4a88 block: add code t... |
741 742 743 744 745 746 747 |
static inline unsigned int blk_queue_depth(struct request_queue *q) { if (q->queue_depth) return q->queue_depth; return q->nr_requests; } |
1da177e4c Linux-2.6.12-rc2 |
748 |
/* |
1da177e4c Linux-2.6.12-rc2 |
749 750 |
* q->prep_rq_fn return values */ |
0fb5b1fb3 block/sd: Return ... |
751 752 753 754 755 756 |
enum { BLKPREP_OK, /* serve it */ BLKPREP_KILL, /* fatal error, kill, return -EIO */ BLKPREP_DEFER, /* leave on queue */ BLKPREP_INVALID, /* invalid command, kill, return -EREMOTEIO */ }; |
1da177e4c Linux-2.6.12-rc2 |
757 758 759 760 761 762 763 764 765 766 |
extern unsigned long blk_max_low_pfn, blk_max_pfn; /* * standard bounce addresses: * * BLK_BOUNCE_HIGH : bounce all highmem pages * BLK_BOUNCE_ANY : don't bounce anything * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary */ |
2472892a3 block: fix memory... |
767 768 |
#if BITS_PER_LONG == 32 |
1da177e4c Linux-2.6.12-rc2 |
769 |
#define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT) |
2472892a3 block: fix memory... |
770 771 772 773 |
#else #define BLK_BOUNCE_HIGH -1ULL #endif #define BLK_BOUNCE_ANY (-1ULL) |
bfe172310 block: kill ISA_D... |
774 |
#define BLK_BOUNCE_ISA (DMA_BIT_MASK(24)) |
1da177e4c Linux-2.6.12-rc2 |
775 |
|
3d6392cfb bsg: support for ... |
776 777 778 779 |
/* * default timeout for SG_IO if none specified */ #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) |
f2f1fa78a Enforce a minimum... |
780 |
#define BLK_MIN_SG_TIMEOUT (7 * HZ) |
3d6392cfb bsg: support for ... |
781 |
|
2a7326b5b CONFIG_BOUNCE to ... |
782 |
#ifdef CONFIG_BOUNCE |
1da177e4c Linux-2.6.12-rc2 |
783 |
extern int init_emergency_isa_pool(void); |
165125e1e [BLOCK] Get rid o... |
784 |
extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); |
1da177e4c Linux-2.6.12-rc2 |
785 786 787 788 789 |
#else static inline int init_emergency_isa_pool(void) { return 0; } |
165125e1e [BLOCK] Get rid o... |
790 |
static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) |
1da177e4c Linux-2.6.12-rc2 |
791 792 793 |
{ } #endif /* CONFIG_MMU */ |
152e283fd block: introduce ... |
794 795 796 797 |
struct rq_map_data { struct page **pages; int page_order; int nr_entries; |
56c451f4b [SCSI] block: fix... |
798 |
unsigned long offset; |
97ae77a1c [SCSI] block: mak... |
799 |
int null_mapped; |
ecb554a84 block: fix sg SG_... |
800 |
int from_user; |
152e283fd block: introduce ... |
801 |
}; |
5705f7021 Introduce rq_for_... |
802 |
struct req_iterator { |
7988613b0 block: Convert bi... |
803 |
struct bvec_iter iter; |
5705f7021 Introduce rq_for_... |
804 805 806 807 |
struct bio *bio; }; /* This should not be used directly - use rq_for_each_segment */ |
1e4280791 block: reduce sta... |
808 809 |
#define for_each_bio(_bio) \ for (; _bio; _bio = _bio->bi_next) |
5705f7021 Introduce rq_for_... |
810 |
#define __rq_for_each_bio(_bio, rq) \ |
1da177e4c Linux-2.6.12-rc2 |
811 812 |
if ((rq->bio)) \ for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) |
5705f7021 Introduce rq_for_... |
813 814 |
#define rq_for_each_segment(bvl, _rq, _iter) \ __rq_for_each_bio(_iter.bio, _rq) \ |
7988613b0 block: Convert bi... |
815 |
bio_for_each_segment(bvl, _iter.bio, _iter.iter) |
5705f7021 Introduce rq_for_... |
816 |
|
4550dd6c6 block: Immutable ... |
817 |
#define rq_iter_last(bvec, _iter) \ |
7988613b0 block: Convert bi... |
818 |
(_iter.bio->bi_next == NULL && \ |
4550dd6c6 block: Immutable ... |
819 |
bio_iter_last(bvec, _iter.iter)) |
5705f7021 Introduce rq_for_... |
820 |
|
2d4dc890b block: add helper... |
821 822 823 824 825 826 827 828 829 830 |
#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" #endif #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE extern void rq_flush_dcache_pages(struct request *rq); #else static inline void rq_flush_dcache_pages(struct request *rq) { } #endif |
2af3a8159 block: Add vfs_ms... |
831 832 833 834 835 836 837 838 839 840 |
#ifdef CONFIG_PRINTK #define vfs_msg(sb, level, fmt, ...) \ __vfs_msg(sb, level, fmt, ##__VA_ARGS__) #else #define vfs_msg(sb, level, fmt, ...) \ do { \ no_printk(fmt, ##__VA_ARGS__); \ __vfs_msg(sb, "", " "); \ } while (0) #endif |
1da177e4c Linux-2.6.12-rc2 |
841 842 |
extern int blk_register_queue(struct gendisk *disk); extern void blk_unregister_queue(struct gendisk *disk); |
dece16353 block: change ->m... |
843 |
extern blk_qc_t generic_make_request(struct bio *bio); |
2a4aa30c5 block: rename and... |
844 |
extern void blk_rq_init(struct request_queue *q, struct request *rq); |
1da177e4c Linux-2.6.12-rc2 |
845 |
extern void blk_put_request(struct request *); |
165125e1e [BLOCK] Get rid o... |
846 |
extern void __blk_put_request(struct request_queue *, struct request *); |
165125e1e [BLOCK] Get rid o... |
847 |
extern struct request *blk_get_request(struct request_queue *, int, gfp_t); |
f27b087b8 block: add blk_rq... |
848 |
extern void blk_rq_set_block_pc(struct request *); |
165125e1e [BLOCK] Get rid o... |
849 |
extern void blk_requeue_request(struct request_queue *, struct request *); |
ef9e3facd block: add lld bu... |
850 |
extern int blk_lld_busy(struct request_queue *q); |
78d8e58a0 Revert "block, dm... |
851 852 853 854 855 |
extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, struct bio_set *bs, gfp_t gfp_mask, int (*bio_ctr)(struct bio *, struct bio *, void *), void *data); extern void blk_rq_unprep_clone(struct request *rq); |
82124d603 block: add reques... |
856 857 |
extern int blk_insert_cloned_request(struct request_queue *q, struct request *rq); |
98d61d5b1 block: simplify a... |
858 |
extern int blk_rq_append_bio(struct request *rq, struct bio *bio); |
3cca6dc1c block: add API fo... |
859 |
extern void blk_delay_queue(struct request_queue *, unsigned long); |
54efd50bf block: make gener... |
860 861 |
extern void blk_queue_split(struct request_queue *, struct bio **, struct bio_set *); |
165125e1e [BLOCK] Get rid o... |
862 |
extern void blk_recount_segments(struct request_queue *, struct bio *); |
0bfc96cb7 block: fail SCSI ... |
863 |
extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); |
577ebb374 block: add and us... |
864 865 |
extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t, unsigned int, void __user *); |
74f3c8aff [PATCH] switch sc... |
866 867 |
extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, unsigned int, void __user *); |
e915e872e [PATCH] switch sg... |
868 869 |
extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, struct scsi_ioctl_command __user *); |
3fcfab16c [PATCH] separate ... |
870 |
|
6f3b0e8bc blk-mq: add a fla... |
871 |
extern int blk_queue_enter(struct request_queue *q, bool nowait); |
2e6edc953 block: protect rw... |
872 |
extern void blk_queue_exit(struct request_queue *q); |
165125e1e [BLOCK] Get rid o... |
873 |
extern void blk_start_queue(struct request_queue *q); |
21491412f block: add blk_st... |
874 |
extern void blk_start_queue_async(struct request_queue *q); |
165125e1e [BLOCK] Get rid o... |
875 |
extern void blk_stop_queue(struct request_queue *q); |
1da177e4c Linux-2.6.12-rc2 |
876 |
extern void blk_sync_queue(struct request_queue *q); |
165125e1e [BLOCK] Get rid o... |
877 |
extern void __blk_stop_queue(struct request_queue *q); |
24ecfbe27 block: add blk_ru... |
878 |
extern void __blk_run_queue(struct request_queue *q); |
a7928c157 block: move PM re... |
879 |
extern void __blk_run_queue_uncond(struct request_queue *q); |
165125e1e [BLOCK] Get rid o... |
880 |
extern void blk_run_queue(struct request_queue *); |
c21e6beba block: get rid of... |
881 |
extern void blk_run_queue_async(struct request_queue *q); |
6a83e74d2 blk-mq: Introduce... |
882 |
extern void blk_mq_quiesce_queue(struct request_queue *q); |
a3bce90ed block: add gfp_ma... |
883 |
extern int blk_rq_map_user(struct request_queue *, struct request *, |
152e283fd block: introduce ... |
884 885 |
struct rq_map_data *, void __user *, unsigned long, gfp_t); |
8e5cfc45e [PATCH] Fixup blk... |
886 |
extern int blk_rq_unmap_user(struct bio *); |
165125e1e [BLOCK] Get rid o... |
887 888 |
extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); extern int blk_rq_map_user_iov(struct request_queue *, struct request *, |
26e49cfc7 block: pass iov_i... |
889 890 |
struct rq_map_data *, const struct iov_iter *, gfp_t); |
165125e1e [BLOCK] Get rid o... |
891 |
extern int blk_execute_rq(struct request_queue *, struct gendisk *, |
994ca9a19 [PATCH] update bl... |
892 |
struct request *, int); |
165125e1e [BLOCK] Get rid o... |
893 |
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, |
15fc858a0 [BLOCK] Correct b... |
894 |
struct request *, int, rq_end_io_fn *); |
6e39b69e7 [SCSI] export blk... |
895 |
|
bbd7bb701 block: move poll ... |
896 |
bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie); |
05229beed block: add block ... |
897 |
|
165125e1e [BLOCK] Get rid o... |
898 |
static inline struct request_queue *bdev_get_queue(struct block_device *bdev) |
1da177e4c Linux-2.6.12-rc2 |
899 |
{ |
ff9ea3238 block, bdi: an ac... |
900 |
return bdev->bd_disk->queue; /* this is never NULL */ |
1da177e4c Linux-2.6.12-rc2 |
901 |
} |
1da177e4c Linux-2.6.12-rc2 |
902 |
/* |
80a761fd3 block: implement ... |
903 904 905 906 907 908 |
* blk_rq_pos() : the current sector * blk_rq_bytes() : bytes left in the entire request * blk_rq_cur_bytes() : bytes left in the current segment * blk_rq_err_bytes() : bytes left till the next error boundary * blk_rq_sectors() : sectors left in the entire request * blk_rq_cur_sectors() : sectors left in the current segment |
5efccd17c block: reorder re... |
909 |
*/ |
5b93629b4 block: implement ... |
910 911 |
static inline sector_t blk_rq_pos(const struct request *rq) { |
a2dec7b36 block: hide reque... |
912 |
return rq->__sector; |
2e46e8b27 block: drop reque... |
913 914 915 916 |
} static inline unsigned int blk_rq_bytes(const struct request *rq) { |
a2dec7b36 block: hide reque... |
917 |
return rq->__data_len; |
5b93629b4 block: implement ... |
918 |
} |
2e46e8b27 block: drop reque... |
919 920 921 922 |
static inline int blk_rq_cur_bytes(const struct request *rq) { return rq->bio ? bio_cur_bytes(rq->bio) : 0; } |
5efccd17c block: reorder re... |
923 |
|
80a761fd3 block: implement ... |
924 |
extern unsigned int blk_rq_err_bytes(const struct request *rq); |
5b93629b4 block: implement ... |
925 926 |
static inline unsigned int blk_rq_sectors(const struct request *rq) { |
2e46e8b27 block: drop reque... |
927 |
return blk_rq_bytes(rq) >> 9; |
5b93629b4 block: implement ... |
928 929 930 931 |
} static inline unsigned int blk_rq_cur_sectors(const struct request *rq) { |
2e46e8b27 block: drop reque... |
932 |
return blk_rq_cur_bytes(rq) >> 9; |
5b93629b4 block: implement ... |
933 |
} |
f31dc1cd4 block: Consolidat... |
934 |
static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, |
8fe0d473f block: convert me... |
935 |
int op) |
f31dc1cd4 block: Consolidat... |
936 |
{ |
7afafc8a4 block: Fix secure... |
937 |
if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)) |
871dd9286 block: fix max di... |
938 |
return min(q->limits.max_discard_sectors, UINT_MAX >> 9); |
f31dc1cd4 block: Consolidat... |
939 |
|
8fe0d473f block: convert me... |
940 |
if (unlikely(op == REQ_OP_WRITE_SAME)) |
4363ac7c1 block: Implement ... |
941 |
return q->limits.max_write_same_sectors; |
a6f0788ec block: add suppor... |
942 943 |
if (unlikely(op == REQ_OP_WRITE_ZEROES)) return q->limits.max_write_zeroes_sectors; |
f31dc1cd4 block: Consolidat... |
944 945 |
return q->limits.max_sectors; } |
762380ad9 block: add notion... |
946 947 948 949 950 951 952 953 |
/* * Return maximum size of a request at given offset. Only valid for * file system requests. */ static inline unsigned int blk_max_size_offset(struct request_queue *q, sector_t offset) { if (!q->limits.chunk_sectors) |
736ed4de7 block: blk_max_si... |
954 |
return q->limits.max_sectors; |
762380ad9 block: add notion... |
955 956 957 958 |
return q->limits.chunk_sectors - (offset & (q->limits.chunk_sectors - 1)); } |
17007f399 block: Fix front ... |
959 960 |
static inline unsigned int blk_rq_get_max_sectors(struct request *rq, sector_t offset) |
f31dc1cd4 block: Consolidat... |
961 962 |
{ struct request_queue *q = rq->q; |
f21018427 block: fix blk_rq... |
963 |
if (unlikely(rq->cmd_type != REQ_TYPE_FS)) |
f31dc1cd4 block: Consolidat... |
964 |
return q->limits.max_hw_sectors; |
7afafc8a4 block: Fix secure... |
965 966 967 |
if (!q->limits.chunk_sectors || req_op(rq) == REQ_OP_DISCARD || req_op(rq) == REQ_OP_SECURE_ERASE) |
8fe0d473f block: convert me... |
968 |
return blk_queue_get_max_sectors(q, req_op(rq)); |
762380ad9 block: add notion... |
969 |
|
17007f399 block: Fix front ... |
970 |
return min(blk_max_size_offset(q, offset), |
8fe0d473f block: convert me... |
971 |
blk_queue_get_max_sectors(q, req_op(rq))); |
f31dc1cd4 block: Consolidat... |
972 |
} |
75afb3529 block: Add nr_bio... |
973 974 975 976 977 978 979 980 981 982 |
static inline unsigned int blk_rq_count_bios(struct request *rq) { unsigned int nr_bios = 0; struct bio *bio; __rq_for_each_bio(bio, rq) nr_bios++; return nr_bios; } |
5efccd17c block: reorder re... |
983 |
/* |
5dc8b362a block: Add iocont... |
984 985 986 987 988 989 990 991 992 993 994 995 996 997 |
* blk_rq_set_prio - associate a request with prio from ioc * @rq: request of interest * @ioc: target iocontext * * Assocate request prio with ioc prio so request based drivers * can leverage priority information. */ static inline void blk_rq_set_prio(struct request *rq, struct io_context *ioc) { if (ioc) rq->ioprio = ioc->ioprio; } /* |
9934c8c04 block: implement ... |
998 999 1000 1001 1002 1003 1004 |
* Request issue related functions. */ extern struct request *blk_peek_request(struct request_queue *q); extern void blk_start_request(struct request *rq); extern struct request *blk_fetch_request(struct request_queue *q); /* |
2e60e0229 block: clean up r... |
1005 1006 1007 1008 1009 |
* Request completion related functions. * * blk_update_request() completes given number of bytes and updates * the request without completing it. * |
f06d9a2b5 block: replace en... |
1010 1011 |
* blk_end_request() and friends. __blk_end_request() must be called * with the request queue spinlock acquired. |
1da177e4c Linux-2.6.12-rc2 |
1012 1013 |
* * Several drivers define their own end_request and call |
3bcddeac1 blk_end_request: ... |
1014 1015 |
* blk_end_request() for parts of the original function. * This prevents code duplication in drivers. |
1da177e4c Linux-2.6.12-rc2 |
1016 |
*/ |
2e60e0229 block: clean up r... |
1017 1018 |
extern bool blk_update_request(struct request *rq, int error, unsigned int nr_bytes); |
12120077b block: export blk... |
1019 |
extern void blk_finish_request(struct request *rq, int error); |
b1f744937 block: move compl... |
1020 1021 1022 1023 |
extern bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes); extern void blk_end_request_all(struct request *rq, int error); extern bool blk_end_request_cur(struct request *rq, int error); |
80a761fd3 block: implement ... |
1024 |
extern bool blk_end_request_err(struct request *rq, int error); |
b1f744937 block: move compl... |
1025 1026 1027 1028 |
extern bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes); extern void __blk_end_request_all(struct request *rq, int error); extern bool __blk_end_request_cur(struct request *rq, int error); |
80a761fd3 block: implement ... |
1029 |
extern bool __blk_end_request_err(struct request *rq, int error); |
2e60e0229 block: clean up r... |
1030 |
|
ff856bad6 [BLOCK] ll_rw_blk... |
1031 |
extern void blk_complete_request(struct request *); |
242f9dcb8 block: unify requ... |
1032 1033 |
extern void __blk_complete_request(struct request *); extern void blk_abort_request(struct request *); |
28018c242 block: implement ... |
1034 |
extern void blk_unprep_request(struct request *); |
ff856bad6 [BLOCK] ll_rw_blk... |
1035 |
|
1da177e4c Linux-2.6.12-rc2 |
1036 |
/* |
1da177e4c Linux-2.6.12-rc2 |
1037 1038 |
* Access functions for manipulating queue properties */ |
165125e1e [BLOCK] Get rid o... |
1039 |
extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, |
1946089a1 [PATCH] NUMA awar... |
1040 |
spinlock_t *lock, int node_id); |
165125e1e [BLOCK] Get rid o... |
1041 |
extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); |
01effb0dc block: allow init... |
1042 1043 |
extern struct request_queue *blk_init_allocated_queue(struct request_queue *, request_fn_proc *, spinlock_t *); |
165125e1e [BLOCK] Get rid o... |
1044 1045 1046 |
extern void blk_cleanup_queue(struct request_queue *); extern void blk_queue_make_request(struct request_queue *, make_request_fn *); extern void blk_queue_bounce_limit(struct request_queue *, u64); |
086fa5ff0 block: Rename blk... |
1047 |
extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); |
762380ad9 block: add notion... |
1048 |
extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); |
8a78362c4 block: Consolidat... |
1049 |
extern void blk_queue_max_segments(struct request_queue *, unsigned short); |
165125e1e [BLOCK] Get rid o... |
1050 |
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); |
67efc9258 block: allow larg... |
1051 1052 |
extern void blk_queue_max_discard_sectors(struct request_queue *q, unsigned int max_discard_sectors); |
4363ac7c1 block: Implement ... |
1053 1054 |
extern void blk_queue_max_write_same_sectors(struct request_queue *q, unsigned int max_write_same_sectors); |
a6f0788ec block: add suppor... |
1055 1056 |
extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q, unsigned int max_write_same_sectors); |
e1defc4ff block: Do away wi... |
1057 |
extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); |
892b6f90d block: Ensure phy... |
1058 |
extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); |
c72758f33 block: Export I/O... |
1059 1060 |
extern void blk_queue_alignment_offset(struct request_queue *q, unsigned int alignment); |
7c958e326 block: Add a wrap... |
1061 |
extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); |
c72758f33 block: Export I/O... |
1062 |
extern void blk_queue_io_min(struct request_queue *q, unsigned int min); |
3c5820c74 block: Optimal I/... |
1063 |
extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); |
c72758f33 block: Export I/O... |
1064 |
extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); |
d278d4a88 block: add code t... |
1065 |
extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth); |
e475bba2f block: Introduce ... |
1066 |
extern void blk_set_default_limits(struct queue_limits *lim); |
b1bd055d3 block: Introduce ... |
1067 |
extern void blk_set_stacking_limits(struct queue_limits *lim); |
c72758f33 block: Export I/O... |
1068 1069 |
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, sector_t offset); |
17be8c245 block: bdev_stack... |
1070 1071 |
extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, sector_t offset); |
c72758f33 block: Export I/O... |
1072 1073 |
extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, sector_t offset); |
165125e1e [BLOCK] Get rid o... |
1074 |
extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); |
e3790c7d4 block: separate o... |
1075 |
extern void blk_queue_dma_pad(struct request_queue *, unsigned int); |
27f8221af block: add blk_qu... |
1076 |
extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); |
2fb98e841 block: implement ... |
1077 1078 1079 |
extern int blk_queue_dma_drain(struct request_queue *q, dma_drain_needed_fn *dma_drain_needed, void *buf, unsigned int size); |
ef9e3facd block: add lld bu... |
1080 |
extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); |
165125e1e [BLOCK] Get rid o... |
1081 |
extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); |
03100aada block: Replace SG... |
1082 |
extern void blk_queue_virt_boundary(struct request_queue *, unsigned long); |
165125e1e [BLOCK] Get rid o... |
1083 |
extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); |
28018c242 block: implement ... |
1084 |
extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn); |
165125e1e [BLOCK] Get rid o... |
1085 |
extern void blk_queue_dma_alignment(struct request_queue *, int); |
11c3e689f [SCSI] block: Int... |
1086 |
extern void blk_queue_update_dma_alignment(struct request_queue *, int); |
165125e1e [BLOCK] Get rid o... |
1087 |
extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); |
242f9dcb8 block: unify requ... |
1088 1089 |
extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); |
f38769309 block: add a non-... |
1090 |
extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); |
93e9d8e83 block: add abilit... |
1091 |
extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); |
1da177e4c Linux-2.6.12-rc2 |
1092 |
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); |
1da177e4c Linux-2.6.12-rc2 |
1093 |
|
f9d03f96b block: improve ha... |
1094 1095 1096 1097 1098 1099 |
static inline unsigned short blk_rq_nr_phys_segments(struct request *rq) { if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) return 1; return rq->nr_phys_segments; } |
165125e1e [BLOCK] Get rid o... |
1100 |
extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); |
1da177e4c Linux-2.6.12-rc2 |
1101 |
extern void blk_dump_rq_flags(struct request *, char *); |
1da177e4c Linux-2.6.12-rc2 |
1102 |
extern long nr_blockdev_pages(void); |
1da177e4c Linux-2.6.12-rc2 |
1103 |
|
09ac46c42 block: misc updat... |
1104 |
bool __must_check blk_get_queue(struct request_queue *); |
165125e1e [BLOCK] Get rid o... |
1105 1106 1107 |
struct request_queue *blk_alloc_queue(gfp_t); struct request_queue *blk_alloc_queue_node(gfp_t, int); extern void blk_put_queue(struct request_queue *); |
3f21c265c block: add blk_se... |
1108 |
extern void blk_set_queue_dying(struct request_queue *); |
1da177e4c Linux-2.6.12-rc2 |
1109 |
|
316cc67d5 block: document b... |
1110 |
/* |
6c9546675 block: add runtim... |
1111 1112 |
* block layer runtime pm functions */ |
47fafbc70 block / PM: Repla... |
1113 |
#ifdef CONFIG_PM |
6c9546675 block: add runtim... |
1114 1115 1116 1117 1118 |
extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev); extern int blk_pre_runtime_suspend(struct request_queue *q); extern void blk_post_runtime_suspend(struct request_queue *q, int err); extern void blk_pre_runtime_resume(struct request_queue *q); extern void blk_post_runtime_resume(struct request_queue *q, int err); |
d07ab6d11 block: Add blk_se... |
1119 |
extern void blk_set_runtime_active(struct request_queue *q); |
6c9546675 block: add runtim... |
1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 |
#else static inline void blk_pm_runtime_init(struct request_queue *q, struct device *dev) {} static inline int blk_pre_runtime_suspend(struct request_queue *q) { return -ENOSYS; } static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {} static inline void blk_pre_runtime_resume(struct request_queue *q) {} static inline void blk_post_runtime_resume(struct request_queue *q, int err) {} |
9a05e7541 block: Change ext... |
1130 |
static inline void blk_set_runtime_active(struct request_queue *q) {} |
6c9546675 block: add runtim... |
1131 1132 1133 |
#endif /* |
75df71362 block: document b... |
1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 |
* blk_plug permits building a queue of related requests by holding the I/O * fragments for a short period. This allows merging of sequential requests * into single larger request. As the requests are moved from a per-task list to * the device's request_queue in a batch, this results in improved scalability * as the lock contention for request_queue lock is reduced. * * It is ok not to disable preemption when adding the request to the plug list * or when attempting a merge, because blk_schedule_flush_list() will only flush * the plug list when the task sleeps by itself. For details, please see * schedule() where blk_schedule_flush_plug() is called. |
316cc67d5 block: document b... |
1144 |
*/ |
73c101011 block: initial pa... |
1145 |
struct blk_plug { |
75df71362 block: document b... |
1146 |
struct list_head list; /* requests */ |
320ae51fe blk-mq: new multi... |
1147 |
struct list_head mq_list; /* blk-mq requests */ |
75df71362 block: document b... |
1148 |
struct list_head cb_list; /* md requires an unplug callback */ |
73c101011 block: initial pa... |
1149 |
}; |
55c022bbd block: avoid buil... |
1150 |
#define BLK_MAX_REQUEST_COUNT 16 |
50d24c344 block: immediatel... |
1151 |
#define BLK_PLUG_FLUSH_SIZE (128 * 1024) |
55c022bbd block: avoid buil... |
1152 |
|
9cbb17508 blk: centralize n... |
1153 |
struct blk_plug_cb; |
74018dc30 blk: pass from_sc... |
1154 |
typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); |
048c9374a block: Enhance ne... |
1155 1156 |
struct blk_plug_cb { struct list_head list; |
9cbb17508 blk: centralize n... |
1157 1158 |
blk_plug_cb_fn callback; void *data; |
048c9374a block: Enhance ne... |
1159 |
}; |
9cbb17508 blk: centralize n... |
1160 1161 |
extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data, int size); |
73c101011 block: initial pa... |
1162 1163 |
extern void blk_start_plug(struct blk_plug *); extern void blk_finish_plug(struct blk_plug *); |
f6603783f block: only force... |
1164 |
extern void blk_flush_plug_list(struct blk_plug *, bool); |
73c101011 block: initial pa... |
1165 1166 1167 1168 |
static inline void blk_flush_plug(struct task_struct *tsk) { struct blk_plug *plug = tsk->plug; |
88b996cd0 block: cleanup th... |
1169 |
if (plug) |
a237c1c5b block: let io_sch... |
1170 1171 1172 1173 1174 1175 1176 1177 |
blk_flush_plug_list(plug, false); } static inline void blk_schedule_flush_plug(struct task_struct *tsk) { struct blk_plug *plug = tsk->plug; if (plug) |
f6603783f block: only force... |
1178 |
blk_flush_plug_list(plug, true); |
73c101011 block: initial pa... |
1179 1180 1181 1182 1183 |
} static inline bool blk_needs_flush_plug(struct task_struct *tsk) { struct blk_plug *plug = tsk->plug; |
320ae51fe blk-mq: new multi... |
1184 1185 1186 1187 |
return plug && (!list_empty(&plug->list) || !list_empty(&plug->mq_list) || !list_empty(&plug->cb_list)); |
73c101011 block: initial pa... |
1188 |
} |
1da177e4c Linux-2.6.12-rc2 |
1189 1190 1191 |
/* * tag stuff */ |
165125e1e [BLOCK] Get rid o... |
1192 1193 1194 |
extern int blk_queue_start_tag(struct request_queue *, struct request *); extern struct request *blk_queue_find_tag(struct request_queue *, int); extern void blk_queue_end_tag(struct request_queue *, struct request *); |
ee1b6f7af block: support di... |
1195 |
extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int); |
165125e1e [BLOCK] Get rid o... |
1196 1197 1198 |
extern void blk_queue_free_tags(struct request_queue *); extern int blk_queue_resize_tags(struct request_queue *, int); extern void blk_queue_invalidate_tags(struct request_queue *); |
ee1b6f7af block: support di... |
1199 |
extern struct blk_queue_tag *blk_init_tags(int, int); |
492dfb489 [SCSI] block: add... |
1200 |
extern void blk_free_tags(struct blk_queue_tag *); |
1da177e4c Linux-2.6.12-rc2 |
1201 |
|
f583f4924 [PATCH] helper fu... |
1202 1203 1204 1205 1206 1207 1208 |
static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, int tag) { if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) return NULL; return bqt->tag_index[tag]; } |
dd3932edd block: remove BLK... |
1209 |
|
e950fdf71 block: introduce ... |
1210 1211 1212 |
#define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */ #define BLKDEV_DISCARD_ZERO (1 << 1) /* must reliably zero data */ |
dd3932edd block: remove BLK... |
1213 1214 |
extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); |
fbd9b09a1 blkdev: generaliz... |
1215 1216 |
extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); |
38f252553 block: add __blkd... |
1217 |
extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
288dab8a3 block: add a sepa... |
1218 |
sector_t nr_sects, gfp_t gfp_mask, int flags, |
469e3216e block discard: us... |
1219 |
struct bio **biop); |
4363ac7c1 block: Implement ... |
1220 1221 |
extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct page *page); |
e73c23ff7 block: add async ... |
1222 1223 1224 |
extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, bool discard); |
3f14d792f blkdev: add blkde... |
1225 |
extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
d93ba7a5a block: Add discar... |
1226 |
sector_t nr_sects, gfp_t gfp_mask, bool discard); |
2cf6d26a3 block: pass gfp_m... |
1227 1228 |
static inline int sb_issue_discard(struct super_block *sb, sector_t block, sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) |
fb2dce862 Add 'discard' req... |
1229 |
{ |
2cf6d26a3 block: pass gfp_m... |
1230 1231 1232 |
return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9), nr_blocks << (sb->s_blocksize_bits - 9), gfp_mask, flags); |
fb2dce862 Add 'discard' req... |
1233 |
} |
e6fa0be69 Add helper functi... |
1234 |
static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, |
a107e5a3a Merge branch 'nex... |
1235 |
sector_t nr_blocks, gfp_t gfp_mask) |
e6fa0be69 Add helper functi... |
1236 1237 1238 1239 |
{ return blkdev_issue_zeroout(sb->s_bdev, block << (sb->s_blocksize_bits - 9), nr_blocks << (sb->s_blocksize_bits - 9), |
d93ba7a5a block: Add discar... |
1240 |
gfp_mask, true); |
e6fa0be69 Add helper functi... |
1241 |
} |
1da177e4c Linux-2.6.12-rc2 |
1242 |
|
018e04468 block: get rid of... |
1243 |
extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); |
0b07de85a allow userspace t... |
1244 |
|
eb28d31bc block: Add BLK_ p... |
1245 1246 1247 |
enum blk_default_limits { BLK_MAX_SEGMENTS = 128, BLK_SAFE_MAX_SECTORS = 255, |
d2be537c3 block: bump BLK_D... |
1248 |
BLK_DEF_MAX_SECTORS = 2560, |
eb28d31bc block: Add BLK_ p... |
1249 1250 1251 |
BLK_MAX_SEGMENT_SIZE = 65536, BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, }; |
0e435ac26 block: fix settin... |
1252 |
|
1da177e4c Linux-2.6.12-rc2 |
1253 |
#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) |
ae03bf639 block: Use access... |
1254 1255 |
static inline unsigned long queue_bounce_pfn(struct request_queue *q) { |
025146e13 block: Move queue... |
1256 |
return q->limits.bounce_pfn; |
ae03bf639 block: Use access... |
1257 1258 1259 1260 |
} static inline unsigned long queue_segment_boundary(struct request_queue *q) { |
025146e13 block: Move queue... |
1261 |
return q->limits.seg_boundary_mask; |
ae03bf639 block: Use access... |
1262 |
} |
03100aada block: Replace SG... |
1263 1264 1265 1266 |
static inline unsigned long queue_virt_boundary(struct request_queue *q) { return q->limits.virt_boundary_mask; } |
ae03bf639 block: Use access... |
1267 1268 |
static inline unsigned int queue_max_sectors(struct request_queue *q) { |
025146e13 block: Move queue... |
1269 |
return q->limits.max_sectors; |
ae03bf639 block: Use access... |
1270 1271 1272 1273 |
} static inline unsigned int queue_max_hw_sectors(struct request_queue *q) { |
025146e13 block: Move queue... |
1274 |
return q->limits.max_hw_sectors; |
ae03bf639 block: Use access... |
1275 |
} |
8a78362c4 block: Consolidat... |
1276 |
static inline unsigned short queue_max_segments(struct request_queue *q) |
ae03bf639 block: Use access... |
1277 |
{ |
8a78362c4 block: Consolidat... |
1278 |
return q->limits.max_segments; |
ae03bf639 block: Use access... |
1279 1280 1281 1282 |
} static inline unsigned int queue_max_segment_size(struct request_queue *q) { |
025146e13 block: Move queue... |
1283 |
return q->limits.max_segment_size; |
ae03bf639 block: Use access... |
1284 |
} |
e1defc4ff block: Do away wi... |
1285 |
static inline unsigned short queue_logical_block_size(struct request_queue *q) |
1da177e4c Linux-2.6.12-rc2 |
1286 1287 |
{ int retval = 512; |
025146e13 block: Move queue... |
1288 1289 |
if (q && q->limits.logical_block_size) retval = q->limits.logical_block_size; |
1da177e4c Linux-2.6.12-rc2 |
1290 1291 1292 |
return retval; } |
e1defc4ff block: Do away wi... |
1293 |
static inline unsigned short bdev_logical_block_size(struct block_device *bdev) |
1da177e4c Linux-2.6.12-rc2 |
1294 |
{ |
e1defc4ff block: Do away wi... |
1295 |
return queue_logical_block_size(bdev_get_queue(bdev)); |
1da177e4c Linux-2.6.12-rc2 |
1296 |
} |
c72758f33 block: Export I/O... |
1297 1298 1299 1300 |
static inline unsigned int queue_physical_block_size(struct request_queue *q) { return q->limits.physical_block_size; } |
892b6f90d block: Ensure phy... |
1301 |
static inline unsigned int bdev_physical_block_size(struct block_device *bdev) |
ac481c20e block: Topology i... |
1302 1303 1304 |
{ return queue_physical_block_size(bdev_get_queue(bdev)); } |
c72758f33 block: Export I/O... |
1305 1306 1307 1308 |
static inline unsigned int queue_io_min(struct request_queue *q) { return q->limits.io_min; } |
ac481c20e block: Topology i... |
1309 1310 1311 1312 |
static inline int bdev_io_min(struct block_device *bdev) { return queue_io_min(bdev_get_queue(bdev)); } |
c72758f33 block: Export I/O... |
1313 1314 1315 1316 |
static inline unsigned int queue_io_opt(struct request_queue *q) { return q->limits.io_opt; } |
ac481c20e block: Topology i... |
1317 1318 1319 1320 |
static inline int bdev_io_opt(struct block_device *bdev) { return queue_io_opt(bdev_get_queue(bdev)); } |
c72758f33 block: Export I/O... |
1321 1322 |
static inline int queue_alignment_offset(struct request_queue *q) { |
ac481c20e block: Topology i... |
1323 |
if (q->limits.misaligned) |
c72758f33 block: Export I/O... |
1324 |
return -1; |
ac481c20e block: Topology i... |
1325 |
return q->limits.alignment_offset; |
c72758f33 block: Export I/O... |
1326 |
} |
e03a72e13 block: Stop using... |
1327 |
static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) |
81744ee44 block: Fix incorr... |
1328 1329 |
{ unsigned int granularity = max(lim->physical_block_size, lim->io_min); |
b8839b8c5 block: fix alignm... |
1330 |
unsigned int alignment = sector_div(sector, granularity >> 9) << 9; |
81744ee44 block: Fix incorr... |
1331 |
|
b8839b8c5 block: fix alignm... |
1332 |
return (granularity + lim->alignment_offset - alignment) % granularity; |
c72758f33 block: Export I/O... |
1333 |
} |
ac481c20e block: Topology i... |
1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 |
static inline int bdev_alignment_offset(struct block_device *bdev) { struct request_queue *q = bdev_get_queue(bdev); if (q->limits.misaligned) return -1; if (bdev != bdev->bd_contains) return bdev->bd_part->alignment_offset; return q->limits.alignment_offset; } |
86b372814 block: Expose dis... |
1346 1347 1348 1349 1350 1351 1352 |
static inline int queue_discard_alignment(struct request_queue *q) { if (q->limits.discard_misaligned) return -1; return q->limits.discard_alignment; } |
e03a72e13 block: Stop using... |
1353 |
static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) |
86b372814 block: Expose dis... |
1354 |
{ |
59771079c blk: avoid divide... |
1355 |
unsigned int alignment, granularity, offset; |
dd3d145d4 block: Fix discar... |
1356 |
|
a934a00a6 block: Fix discar... |
1357 1358 |
if (!lim->max_discard_sectors) return 0; |
59771079c blk: avoid divide... |
1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 |
/* Why are these in bytes, not sectors? */ alignment = lim->discard_alignment >> 9; granularity = lim->discard_granularity >> 9; if (!granularity) return 0; /* Offset of the partition start in 'granularity' sectors */ offset = sector_div(sector, granularity); /* And why do we do this modulus *again* in blkdev_issue_discard()? */ offset = (granularity + alignment - offset) % granularity; /* Turn it back into bytes, gaah */ return offset << 9; |
86b372814 block: Expose dis... |
1373 |
} |
c6e666345 block: split disc... |
1374 1375 1376 1377 1378 1379 1380 1381 1382 |
static inline int bdev_discard_alignment(struct block_device *bdev) { struct request_queue *q = bdev_get_queue(bdev); if (bdev != bdev->bd_contains) return bdev->bd_part->discard_alignment; return q->limits.discard_alignment; } |
98262f276 block: Allow devi... |
1383 1384 |
static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) { |
a934a00a6 block: Fix discar... |
1385 |
if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1) |
98262f276 block: Allow devi... |
1386 1387 1388 1389 1390 1391 1392 1393 1394 |
return 1; return 0; } static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev) { return queue_discard_zeroes_data(bdev_get_queue(bdev)); } |
4363ac7c1 block: Implement ... |
1395 1396 1397 1398 1399 1400 1401 1402 1403 |
static inline unsigned int bdev_write_same(struct block_device *bdev) { struct request_queue *q = bdev_get_queue(bdev); if (q) return q->limits.max_write_same_sectors; return 0; } |
a6f0788ec block: add suppor... |
1404 1405 1406 1407 1408 1409 1410 1411 1412 |
static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev) { struct request_queue *q = bdev_get_queue(bdev); if (q) return q->limits.max_write_zeroes_sectors; return 0; } |
797476b88 block: Add 'zoned... |
1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 |
static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev) { struct request_queue *q = bdev_get_queue(bdev); if (q) return blk_queue_zoned_model(q); return BLK_ZONED_NONE; } static inline bool bdev_is_zoned(struct block_device *bdev) { struct request_queue *q = bdev_get_queue(bdev); if (q) return blk_queue_is_zoned(q); return false; } |
6a0cb1bc1 block: Implement ... |
1432 1433 1434 1435 1436 1437 1438 1439 1440 |
static inline unsigned int bdev_zone_size(struct block_device *bdev) { struct request_queue *q = bdev_get_queue(bdev); if (q) return blk_queue_zone_size(q); return 0; } |
165125e1e [BLOCK] Get rid o... |
1441 |
static inline int queue_dma_alignment(struct request_queue *q) |
1da177e4c Linux-2.6.12-rc2 |
1442 |
{ |
482eb6891 block: allow queu... |
1443 |
return q ? q->dma_alignment : 511; |
1da177e4c Linux-2.6.12-rc2 |
1444 |
} |
144177991 block: fix an add... |
1445 |
static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, |
879040742 block: add blk_rq... |
1446 1447 1448 |
unsigned int len) { unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; |
144177991 block: fix an add... |
1449 |
return !(addr & alignment) && !(len & alignment); |
879040742 block: add blk_rq... |
1450 |
} |
1da177e4c Linux-2.6.12-rc2 |
1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 |
/* assumes size > 256 */ static inline unsigned int blksize_bits(unsigned int size) { unsigned int bits = 8; do { bits++; size >>= 1; } while (size > 256); return bits; } |
2befb9e36 [PATCH] include/l... |
1461 |
static inline unsigned int block_size(struct block_device *bdev) |
1da177e4c Linux-2.6.12-rc2 |
1462 1463 1464 |
{ return bdev->bd_block_size; } |
f38769309 block: add a non-... |
1465 1466 |
static inline bool queue_flush_queueable(struct request_queue *q) { |
c888a8f95 block: kill off q... |
1467 |
return !test_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags); |
f38769309 block: add a non-... |
1468 |
} |
1da177e4c Linux-2.6.12-rc2 |
1469 1470 1471 1472 1473 1474 |
typedef struct {struct page *v;} Sector; unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); static inline void put_dev_sector(Sector p) { |
09cbfeaf1 mm, fs: get rid o... |
1475 |
put_page(p.v); |
1da177e4c Linux-2.6.12-rc2 |
1476 |
} |
e0af29171 block: check virt... |
1477 1478 1479 1480 1481 1482 |
static inline bool __bvec_gap_to_prev(struct request_queue *q, struct bio_vec *bprv, unsigned int offset) { return offset || ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); } |
03100aada block: Replace SG... |
1483 1484 1485 1486 1487 1488 1489 1490 1491 |
/* * Check if adding a bio_vec after bprv with offset would create a gap in * the SG list. Most drivers don't care about this, but some do. */ static inline bool bvec_gap_to_prev(struct request_queue *q, struct bio_vec *bprv, unsigned int offset) { if (!queue_virt_boundary(q)) return false; |
e0af29171 block: check virt... |
1492 |
return __bvec_gap_to_prev(q, bprv, offset); |
03100aada block: Replace SG... |
1493 |
} |
5e7c4274a block: Check for ... |
1494 1495 1496 |
static inline bool bio_will_gap(struct request_queue *q, struct bio *prev, struct bio *next) { |
25e71a99f block: get the 1s... |
1497 1498 1499 1500 1501 |
if (bio_has_data(prev) && queue_virt_boundary(q)) { struct bio_vec pb, nb; bio_get_last_bvec(prev, &pb); bio_get_first_bvec(next, &nb); |
5e7c4274a block: Check for ... |
1502 |
|
25e71a99f block: get the 1s... |
1503 1504 1505 1506 |
return __bvec_gap_to_prev(q, &pb, nb.bv_offset); } return false; |
5e7c4274a block: Check for ... |
1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 |
} static inline bool req_gap_back_merge(struct request *req, struct bio *bio) { return bio_will_gap(req->q, req->biotail, bio); } static inline bool req_gap_front_merge(struct request *req, struct bio *bio) { return bio_will_gap(req->q, bio, req->bio); } |
59c3d45e4 block: remove 'q'... |
1518 |
int kblockd_schedule_work(struct work_struct *work); |
ee63cfa7f block: add kblock... |
1519 |
int kblockd_schedule_work_on(int cpu, struct work_struct *work); |
59c3d45e4 block: remove 'q'... |
1520 |
int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay); |
8ab14595b block: add kblock... |
1521 |
int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); |
1da177e4c Linux-2.6.12-rc2 |
1522 |
|
9195291e5 blkio: Increment ... |
1523 |
#ifdef CONFIG_BLK_CGROUP |
28f4197e5 block: disable pr... |
1524 1525 1526 1527 1528 |
/* * This should not be using sched_clock(). A real patch is in progress * to fix this up, until that is in place we need to disable preemption * around sched_clock() in this function and set_io_start_time_ns(). */ |
9195291e5 blkio: Increment ... |
1529 1530 |
static inline void set_start_time_ns(struct request *req) { |
28f4197e5 block: disable pr... |
1531 |
preempt_disable(); |
9195291e5 blkio: Increment ... |
1532 |
req->start_time_ns = sched_clock(); |
28f4197e5 block: disable pr... |
1533 |
preempt_enable(); |
9195291e5 blkio: Increment ... |
1534 1535 1536 1537 |
} static inline void set_io_start_time_ns(struct request *req) { |
28f4197e5 block: disable pr... |
1538 |
preempt_disable(); |
9195291e5 blkio: Increment ... |
1539 |
req->io_start_time_ns = sched_clock(); |
28f4197e5 block: disable pr... |
1540 |
preempt_enable(); |
9195291e5 blkio: Increment ... |
1541 |
} |
84c124da9 blkio: Changes to... |
1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 |
static inline uint64_t rq_start_time_ns(struct request *req) { return req->start_time_ns; } static inline uint64_t rq_io_start_time_ns(struct request *req) { return req->io_start_time_ns; } |
9195291e5 blkio: Increment ... |
1552 1553 1554 |
#else static inline void set_start_time_ns(struct request *req) {} static inline void set_io_start_time_ns(struct request *req) {} |
84c124da9 blkio: Changes to... |
1555 1556 1557 1558 1559 1560 1561 1562 |
static inline uint64_t rq_start_time_ns(struct request *req) { return 0; } static inline uint64_t rq_io_start_time_ns(struct request *req) { return 0; } |
9195291e5 blkio: Increment ... |
1563 |
#endif |
1da177e4c Linux-2.6.12-rc2 |
1564 1565 1566 1567 |
#define MODULE_ALIAS_BLOCKDEV(major,minor) \ MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ MODULE_ALIAS("block-major-" __stringify(major) "-*") |
7ba1ba12e block: Block laye... |
1568 |
#if defined(CONFIG_BLK_DEV_INTEGRITY) |
8288f496e block: Add prefix... |
1569 1570 1571 |
enum blk_integrity_flags { BLK_INTEGRITY_VERIFY = 1 << 0, BLK_INTEGRITY_GENERATE = 1 << 1, |
3aec2f41a block: Add a disk... |
1572 |
BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2, |
aae7df501 block: Integrity ... |
1573 |
BLK_INTEGRITY_IP_CHECKSUM = 1 << 3, |
8288f496e block: Add prefix... |
1574 |
}; |
7ba1ba12e block: Block laye... |
1575 |
|
185930885 block: Clean up t... |
1576 |
struct blk_integrity_iter { |
7ba1ba12e block: Block laye... |
1577 1578 |
void *prot_buf; void *data_buf; |
3be91c4a3 block: Deprecate ... |
1579 |
sector_t seed; |
7ba1ba12e block: Block laye... |
1580 |
unsigned int data_size; |
3be91c4a3 block: Deprecate ... |
1581 |
unsigned short interval; |
7ba1ba12e block: Block laye... |
1582 1583 |
const char *disk_name; }; |
185930885 block: Clean up t... |
1584 |
typedef int (integrity_processing_fn) (struct blk_integrity_iter *); |
7ba1ba12e block: Block laye... |
1585 |
|
0f8087ecd block: Consolidat... |
1586 1587 1588 1589 1590 |
struct blk_integrity_profile { integrity_processing_fn *generate_fn; integrity_processing_fn *verify_fn; const char *name; }; |
7ba1ba12e block: Block laye... |
1591 |
|
25520d55c block: Inline blk... |
1592 |
extern void blk_integrity_register(struct gendisk *, struct blk_integrity *); |
7ba1ba12e block: Block laye... |
1593 |
extern void blk_integrity_unregister(struct gendisk *); |
ad7fce931 block: Switch blk... |
1594 |
extern int blk_integrity_compare(struct gendisk *, struct gendisk *); |
13f05c8d8 block/scsi: Provi... |
1595 1596 1597 |
extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, struct scatterlist *); extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); |
4eaf99bea block: Don't merg... |
1598 1599 1600 1601 |
extern bool blk_integrity_merge_rq(struct request_queue *, struct request *, struct request *); extern bool blk_integrity_merge_bio(struct request_queue *, struct request *, struct bio *); |
7ba1ba12e block: Block laye... |
1602 |
|
25520d55c block: Inline blk... |
1603 |
static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) |
b04accc42 block: revert par... |
1604 |
{ |
ac6fc48c9 block: move blk_i... |
1605 |
struct blk_integrity *bi = &disk->queue->integrity; |
25520d55c block: Inline blk... |
1606 1607 1608 1609 1610 |
if (!bi->profile) return NULL; return bi; |
b04accc42 block: revert par... |
1611 |
} |
25520d55c block: Inline blk... |
1612 1613 |
static inline struct blk_integrity *bdev_get_integrity(struct block_device *bdev) |
b02739b01 block: gendisk in... |
1614 |
{ |
25520d55c block: Inline blk... |
1615 |
return blk_get_integrity(bdev->bd_disk); |
b02739b01 block: gendisk in... |
1616 |
} |
180b2f95d block: Replace bi... |
1617 |
static inline bool blk_integrity_rq(struct request *rq) |
7ba1ba12e block: Block laye... |
1618 |
{ |
180b2f95d block: Replace bi... |
1619 |
return rq->cmd_flags & REQ_INTEGRITY; |
7ba1ba12e block: Block laye... |
1620 |
} |
13f05c8d8 block/scsi: Provi... |
1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 |
static inline void blk_queue_max_integrity_segments(struct request_queue *q, unsigned int segs) { q->limits.max_integrity_segments = segs; } static inline unsigned short queue_max_integrity_segments(struct request_queue *q) { return q->limits.max_integrity_segments; } |
7f39add3b block: Refuse req... |
1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 |
static inline bool integrity_req_gap_back_merge(struct request *req, struct bio *next) { struct bio_integrity_payload *bip = bio_integrity(req->bio); struct bio_integrity_payload *bip_next = bio_integrity(next); return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], bip_next->bip_vec[0].bv_offset); } static inline bool integrity_req_gap_front_merge(struct request *req, struct bio *bio) { struct bio_integrity_payload *bip = bio_integrity(bio); struct bio_integrity_payload *bip_next = bio_integrity(req->bio); return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], bip_next->bip_vec[0].bv_offset); } |
7ba1ba12e block: Block laye... |
1651 |
#else /* CONFIG_BLK_DEV_INTEGRITY */ |
fd83240a6 blockdev: convert... |
1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 |
struct bio; struct block_device; struct gendisk; struct blk_integrity; static inline int blk_integrity_rq(struct request *rq) { return 0; } static inline int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *b) { return 0; } static inline int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *b, struct scatterlist *s) { return 0; } static inline struct blk_integrity *bdev_get_integrity(struct block_device *b) { |
61a04e5b3 include/linux/blk... |
1674 |
return NULL; |
fd83240a6 blockdev: convert... |
1675 1676 1677 1678 1679 1680 1681 1682 1683 |
} static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) { return NULL; } static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) { return 0; } |
25520d55c block: Inline blk... |
1684 |
static inline void blk_integrity_register(struct gendisk *d, |
fd83240a6 blockdev: convert... |
1685 1686 |
struct blk_integrity *b) { |
fd83240a6 blockdev: convert... |
1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 |
} static inline void blk_integrity_unregister(struct gendisk *d) { } static inline void blk_queue_max_integrity_segments(struct request_queue *q, unsigned int segs) { } static inline unsigned short queue_max_integrity_segments(struct request_queue *q) { return 0; } |
4eaf99bea block: Don't merg... |
1699 1700 1701 |
static inline bool blk_integrity_merge_rq(struct request_queue *rq, struct request *r1, struct request *r2) |
fd83240a6 blockdev: convert... |
1702 |
{ |
cb1a5ab6e block: Fix merge ... |
1703 |
return true; |
fd83240a6 blockdev: convert... |
1704 |
} |
4eaf99bea block: Don't merg... |
1705 1706 1707 |
static inline bool blk_integrity_merge_bio(struct request_queue *rq, struct request *r, struct bio *b) |
fd83240a6 blockdev: convert... |
1708 |
{ |
cb1a5ab6e block: Fix merge ... |
1709 |
return true; |
fd83240a6 blockdev: convert... |
1710 |
} |
25520d55c block: Inline blk... |
1711 |
|
7f39add3b block: Refuse req... |
1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 |
static inline bool integrity_req_gap_back_merge(struct request *req, struct bio *next) { return false; } static inline bool integrity_req_gap_front_merge(struct request *req, struct bio *bio) { return false; } |
7ba1ba12e block: Block laye... |
1722 1723 |
#endif /* CONFIG_BLK_DEV_INTEGRITY */ |
b2e0d1625 dax: fix lifetime... |
1724 1725 1726 1727 1728 1729 1730 1731 1732 |
/** * struct blk_dax_ctl - control and output parameters for ->direct_access * @sector: (input) offset relative to a block_device * @addr: (output) kernel virtual address for @sector populated by driver * @pfn: (output) page frame number for @addr populated by driver * @size: (input) number of bytes requested */ struct blk_dax_ctl { sector_t sector; |
7a9eb2066 pmem: kill __pmem... |
1733 |
void *addr; |
b2e0d1625 dax: fix lifetime... |
1734 |
long size; |
34c0fd540 mm, dax, pmem: in... |
1735 |
pfn_t pfn; |
b2e0d1625 dax: fix lifetime... |
1736 |
}; |
08f858512 [PATCH] move bloc... |
1737 |
struct block_device_operations { |
d4430d62f [PATCH] beginning... |
1738 |
int (*open) (struct block_device *, fmode_t); |
db2a144be block_device_oper... |
1739 |
void (*release) (struct gendisk *, fmode_t); |
c11f0c0b5 block/mm: make bd... |
1740 |
int (*rw_page)(struct block_device *, sector_t, struct page *, bool); |
d4430d62f [PATCH] beginning... |
1741 1742 |
int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); |
7a9eb2066 pmem: kill __pmem... |
1743 1744 |
long (*direct_access)(struct block_device *, sector_t, void **, pfn_t *, long); |
77ea887e4 implement in-kern... |
1745 1746 1747 |
unsigned int (*check_events) (struct gendisk *disk, unsigned int clearing); /* ->media_changed() is DEPRECATED, use ->check_events() instead */ |
08f858512 [PATCH] move bloc... |
1748 |
int (*media_changed) (struct gendisk *); |
c3e33e043 block,ide: simpli... |
1749 |
void (*unlock_native_capacity) (struct gendisk *); |
08f858512 [PATCH] move bloc... |
1750 1751 |
int (*revalidate_disk) (struct gendisk *); int (*getgeo)(struct block_device *, struct hd_geometry *); |
b3a27d052 swap: Add swap sl... |
1752 1753 |
/* this callback is with swap_lock and sometimes page table lock held */ void (*swap_slot_free_notify) (struct block_device *, unsigned long); |
08f858512 [PATCH] move bloc... |
1754 |
struct module *owner; |
bbd3e0643 block: add an API... |
1755 |
const struct pr_ops *pr_ops; |
08f858512 [PATCH] move bloc... |
1756 |
}; |
633a08b81 [PATCH] introduce... |
1757 1758 |
extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, unsigned long); |
47a191fd3 fs/block_dev.c: a... |
1759 1760 1761 |
extern int bdev_read_page(struct block_device *, sector_t, struct page *); extern int bdev_write_page(struct block_device *, sector_t, struct page *, struct writeback_control *); |
b2e0d1625 dax: fix lifetime... |
1762 |
extern long bdev_direct_access(struct block_device *, struct blk_dax_ctl *); |
2d96afc8f block: Add bdev_d... |
1763 |
extern int bdev_dax_supported(struct super_block *, int); |
a8078b1fc block: Update blk... |
1764 |
extern bool bdev_dax_capable(struct block_device *); |
9361401eb [PATCH] BLOCK: Ma... |
1765 |
#else /* CONFIG_BLOCK */ |
ac13a829f fs/libfs.c: add g... |
1766 1767 |
struct block_device; |
9361401eb [PATCH] BLOCK: Ma... |
1768 1769 1770 1771 |
/* * stubs for when the block layer is configured out */ #define buffer_heads_over_limit 0 |
9361401eb [PATCH] BLOCK: Ma... |
1772 1773 1774 1775 |
static inline long nr_blockdev_pages(void) { return 0; } |
1f940bdfc block: fixup plug... |
1776 1777 1778 1779 |
struct blk_plug { }; static inline void blk_start_plug(struct blk_plug *plug) |
73c101011 block: initial pa... |
1780 1781 |
{ } |
1f940bdfc block: fixup plug... |
1782 |
static inline void blk_finish_plug(struct blk_plug *plug) |
73c101011 block: initial pa... |
1783 1784 |
{ } |
1f940bdfc block: fixup plug... |
1785 |
static inline void blk_flush_plug(struct task_struct *task) |
73c101011 block: initial pa... |
1786 1787 |
{ } |
a237c1c5b block: let io_sch... |
1788 1789 1790 |
static inline void blk_schedule_flush_plug(struct task_struct *task) { } |
73c101011 block: initial pa... |
1791 1792 1793 1794 |
static inline bool blk_needs_flush_plug(struct task_struct *tsk) { return false; } |
ac13a829f fs/libfs.c: add g... |
1795 1796 1797 1798 1799 |
static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, sector_t *error_sector) { return 0; } |
9361401eb [PATCH] BLOCK: Ma... |
1800 |
#endif /* CONFIG_BLOCK */ |
1da177e4c Linux-2.6.12-rc2 |
1801 |
#endif |