Blame view
block/blk.h
4.59 KB
8324aa91d
|
1 2 |
#ifndef BLK_INTERNAL_H #define BLK_INTERNAL_H |
86db1e297
|
3 4 5 6 7 |
/* Amount of time in which a process may batch requests */ #define BLK_BATCH_TIME (HZ/50UL) /* Number of requests a "batching" process may submit */ #define BLK_BATCH_REQ 32 |
8324aa91d
|
8 9 |
extern struct kmem_cache *blk_requestq_cachep; extern struct kobj_type blk_queue_ktype; |
86db1e297
|
10 11 12 |
void init_request_from_bio(struct request *req, struct bio *bio); void blk_rq_bio_prep(struct request_queue *q, struct request *rq, struct bio *bio); |
a411f4bbb
|
13 14 |
int blk_rq_append_bio(struct request_queue *q, struct request *rq, struct bio *bio); |
9934c8c04
|
15 |
void blk_dequeue_request(struct request *rq); |
8324aa91d
|
16 |
void __blk_queue_free_tags(struct request_queue *q); |
86db1e297
|
17 18 |
void blk_unplug_work(struct work_struct *work); void blk_unplug_timeout(unsigned long data); |
242f9dcb8
|
19 20 21 |
void blk_rq_timed_out_timer(unsigned long data); void blk_delete_timer(struct request *); void blk_add_timer(struct request *); |
f73e2d13a
|
22 |
void __generic_unplug_device(struct request_queue *); |
242f9dcb8
|
23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
/* * Internal atomic flags for request handling */ enum rq_atomic_flags { REQ_ATOM_COMPLETE = 0, }; /* * EH timer and IO completion will both attempt to 'grab' the request, make * sure that only one of them suceeds */ static inline int blk_mark_rq_complete(struct request *rq) { return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); } static inline void blk_clear_rq_complete(struct request *rq) { clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); } |
86db1e297
|
44 |
|
158dbda00
|
45 46 47 48 |
/* * Internal elevator interface */ #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) |
dd4c133f3
|
49 |
struct request *blk_do_flush(struct request_queue *q, struct request *rq); |
dd831006d
|
50 |
|
158dbda00
|
51 52 53 54 55 56 57 |
static inline struct request *__elv_next_request(struct request_queue *q) { struct request *rq; while (1) { while (!list_empty(&q->queue_head)) { rq = list_entry_rq(q->queue_head.next); |
4fed947cb
|
58 59 60 |
if (!(rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) || rq == &q->flush_rq) return rq; |
dd4c133f3
|
61 |
rq = blk_do_flush(q, rq); |
dd831006d
|
62 |
if (rq) |
158dbda00
|
63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 |
return rq; } if (!q->elevator->ops->elevator_dispatch_fn(q, 0)) return NULL; } } static inline void elv_activate_rq(struct request_queue *q, struct request *rq) { struct elevator_queue *e = q->elevator; if (e->ops->elevator_activate_req_fn) e->ops->elevator_activate_req_fn(q, rq); } static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq) { struct elevator_queue *e = q->elevator; if (e->ops->elevator_deactivate_req_fn) e->ops->elevator_deactivate_req_fn(q, rq); } |
581d4e28d
|
86 87 88 89 90 91 92 93 94 95 96 |
#ifdef CONFIG_FAIL_IO_TIMEOUT int blk_should_fake_timeout(struct request_queue *); ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); ssize_t part_timeout_store(struct device *, struct device_attribute *, const char *, size_t); #else static inline int blk_should_fake_timeout(struct request_queue *q) { return 0; } #endif |
86db1e297
|
97 |
struct io_context *current_io_context(gfp_t gfp_flags, int node); |
d6d481969
|
98 99 100 101 102 103 104 |
int ll_back_merge_fn(struct request_queue *q, struct request *req, struct bio *bio); int ll_front_merge_fn(struct request_queue *q, struct request *req, struct bio *bio); int attempt_back_merge(struct request_queue *q, struct request *rq); int attempt_front_merge(struct request_queue *q, struct request *rq); void blk_recalc_rq_segments(struct request *rq); |
80a761fd3
|
105 |
void blk_rq_set_mixed_merge(struct request *rq); |
d6d481969
|
106 |
|
8324aa91d
|
107 |
void blk_queue_congestion_threshold(struct request_queue *q); |
ff88972c8
|
108 |
int blk_dev_init(void); |
f253b86b4
|
109 110 |
void elv_quiesce_start(struct request_queue *q); void elv_quiesce_end(struct request_queue *q); |
8324aa91d
|
111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 |
/* * Return the threshold (number of used requests) at which the queue is * considered to be congested. It include a little hysteresis to keep the * context switch rate down. */ static inline int queue_congestion_on_threshold(struct request_queue *q) { return q->nr_congestion_on; } /* * The threshold at which a queue is considered to be uncongested */ static inline int queue_congestion_off_threshold(struct request_queue *q) { return q->nr_congestion_off; } |
c7c22e4d5
|
128 129 |
static inline int blk_cpu_to_group(int cpu) { |
be14eb619
|
130 |
int group = NR_CPUS; |
c7c22e4d5
|
131 |
#ifdef CONFIG_SCHED_MC |
be4d638c1
|
132 |
const struct cpumask *mask = cpu_coregroup_mask(cpu); |
be14eb619
|
133 |
group = cpumask_first(mask); |
c7c22e4d5
|
134 |
#elif defined(CONFIG_SCHED_SMT) |
be14eb619
|
135 |
group = cpumask_first(topology_thread_cpumask(cpu)); |
c7c22e4d5
|
136 137 138 |
#else return cpu; #endif |
be14eb619
|
139 140 141 |
if (likely(group < NR_CPUS)) return group; return cpu; |
c7c22e4d5
|
142 |
} |
c2553b584
|
143 144 145 146 147 |
/* * Contribute to IO statistics IFF: * * a) it's attached to a gendisk, and * b) the queue had IO stats enabled when this request was started, and |
3c4198e87
|
148 |
* c) it's a file system request or a discard request |
c2553b584
|
149 |
*/ |
26308eab6
|
150 |
static inline int blk_do_io_stat(struct request *rq) |
fb8ec18c3
|
151 |
{ |
33659ebba
|
152 153 154 155 |
return rq->rq_disk && (rq->cmd_flags & REQ_IO_STAT) && (rq->cmd_type == REQ_TYPE_FS || (rq->cmd_flags & REQ_DISCARD)); |
fb8ec18c3
|
156 |
} |
8324aa91d
|
157 |
#endif |