Blame view
block/blk.h
3.04 KB
8324aa91d
|
1 2 |
#ifndef BLK_INTERNAL_H #define BLK_INTERNAL_H |
86db1e297
|
3 4 5 6 7 |
/* Amount of time in which a process may batch requests */ #define BLK_BATCH_TIME (HZ/50UL) /* Number of requests a "batching" process may submit */ #define BLK_BATCH_REQ 32 |
8324aa91d
|
8 9 |
extern struct kmem_cache *blk_requestq_cachep; extern struct kobj_type blk_queue_ktype; |
86db1e297
|
10 11 12 |
void init_request_from_bio(struct request *req, struct bio *bio); void blk_rq_bio_prep(struct request_queue *q, struct request *rq, struct bio *bio); |
8324aa91d
|
13 |
void __blk_queue_free_tags(struct request_queue *q); |
86db1e297
|
14 15 |
void blk_unplug_work(struct work_struct *work); void blk_unplug_timeout(unsigned long data); |
242f9dcb8
|
16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 |
void blk_rq_timed_out_timer(unsigned long data); void blk_delete_timer(struct request *); void blk_add_timer(struct request *); /* * Internal atomic flags for request handling */ enum rq_atomic_flags { REQ_ATOM_COMPLETE = 0, }; /* * EH timer and IO completion will both attempt to 'grab' the request, make * sure that only one of them suceeds */ static inline int blk_mark_rq_complete(struct request *rq) { return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); } static inline void blk_clear_rq_complete(struct request *rq) { clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); } |
86db1e297
|
40 |
|
581d4e28d
|
41 42 43 44 45 46 47 48 49 50 51 |
#ifdef CONFIG_FAIL_IO_TIMEOUT int blk_should_fake_timeout(struct request_queue *); ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); ssize_t part_timeout_store(struct device *, struct device_attribute *, const char *, size_t); #else static inline int blk_should_fake_timeout(struct request_queue *q) { return 0; } #endif |
86db1e297
|
52 |
struct io_context *current_io_context(gfp_t gfp_flags, int node); |
d6d481969
|
53 54 55 56 57 58 59 60 |
int ll_back_merge_fn(struct request_queue *q, struct request *req, struct bio *bio); int ll_front_merge_fn(struct request_queue *q, struct request *req, struct bio *bio); int attempt_back_merge(struct request_queue *q, struct request *rq); int attempt_front_merge(struct request_queue *q, struct request *rq); void blk_recalc_rq_segments(struct request *rq); void blk_recalc_rq_sectors(struct request *rq, int nsect); |
8324aa91d
|
61 |
void blk_queue_congestion_threshold(struct request_queue *q); |
ff88972c8
|
62 |
int blk_dev_init(void); |
8324aa91d
|
63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 |
/* * Return the threshold (number of used requests) at which the queue is * considered to be congested. It include a little hysteresis to keep the * context switch rate down. */ static inline int queue_congestion_on_threshold(struct request_queue *q) { return q->nr_congestion_on; } /* * The threshold at which a queue is considered to be uncongested */ static inline int queue_congestion_off_threshold(struct request_queue *q) { return q->nr_congestion_off; } |
7ba1ba12e
|
80 81 82 83 84 85 86 |
#if defined(CONFIG_BLK_DEV_INTEGRITY) #define rq_for_each_integrity_segment(bvl, _rq, _iter) \ __rq_for_each_bio(_iter.bio, _rq) \ bip_for_each_vec(bvl, _iter.bio->bi_integrity, _iter.i) #endif /* BLK_DEV_INTEGRITY */ |
c7c22e4d5
|
87 88 89 90 91 92 93 94 95 96 97 |
static inline int blk_cpu_to_group(int cpu) { #ifdef CONFIG_SCHED_MC cpumask_t mask = cpu_coregroup_map(cpu); return first_cpu(mask); #elif defined(CONFIG_SCHED_SMT) return first_cpu(per_cpu(cpu_sibling_map, cpu)); #else return cpu; #endif } |
8324aa91d
|
98 |
#endif |