Blame view
include/linux/iocontext.h
4.7 KB
fd0928df9
|
1 2 |
#ifndef IOCONTEXT_H #define IOCONTEXT_H |
4ac845a2e
|
3 |
#include <linux/radix-tree.h> |
34e6bbf23
|
4 |
#include <linux/rcupdate.h> |
b2efa0526
|
5 |
#include <linux/workqueue.h> |
4ac845a2e
|
6 |
|
dc86900e0
|
7 |
enum { |
621032ad6
|
8 |
ICQ_EXITED = 1 << 2, |
dc86900e0
|
9 |
}; |
f1f8cc946
|
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 |
/* * An io_cq (icq) is association between an io_context (ioc) and a * request_queue (q). This is used by elevators which need to track * information per ioc - q pair. * * Elevator can request use of icq by setting elevator_type->icq_size and * ->icq_align. Both size and align must be larger than that of struct * io_cq and elevator can use the tail area for private information. The * recommended way to do this is defining a struct which contains io_cq as * the first member followed by private members and using its size and * align. For example, * * struct snail_io_cq { * struct io_cq icq; * int poke_snail; * int feed_snail; * }; * * struct elevator_type snail_elv_type { * .ops = { ... }, * .icq_size = sizeof(struct snail_io_cq), * .icq_align = __alignof__(struct snail_io_cq), * ... * }; * * If icq_size is set, block core will manage icq's. All requests will * have its ->elv.icq field set before elevator_ops->elevator_set_req_fn() * is called and be holding a reference to the associated io_context. * * Whenever a new icq is created, elevator_ops->elevator_init_icq_fn() is * called and, on destruction, ->elevator_exit_icq_fn(). Both functions * are called with both the associated io_context and queue locks held. * * Elevator is allowed to lookup icq using ioc_lookup_icq() while holding * queue lock but the returned icq is valid only until the queue lock is * released. Elevators can not and should not try to create or destroy * icq's. * * As icq's are linked from both ioc and q, the locking rules are a bit * complex. * * - ioc lock nests inside q lock. * * - ioc->icq_list and icq->ioc_node are protected by ioc lock. * q->icq_list and icq->q_node by q lock. * * - ioc->icq_tree and ioc->icq_hint are protected by ioc lock, while icq * itself is protected by q lock. However, both the indexes and icq * itself are also RCU managed and lookup can be performed holding only * the q lock. * * - icq's are not reference counted. They are destroyed when either the * ioc or q goes away. Each request with icq set holds an extra * reference to ioc to ensure it stays until the request is completed. * * - Linking and unlinking icq's are performed while holding both ioc and q * locks. Due to the lock ordering, q exit is simple but ioc exit * requires reverse-order double lock dance. */ |
c58698073
|
69 70 71 |
struct io_cq { struct request_queue *q; struct io_context *ioc; |
fd0928df9
|
72 |
|
7e5a87944
|
73 74 75 76 77 78 79 80 81 82 83 84 85 86 |
/* * q_node and ioc_node link io_cq through icq_list of q and ioc * respectively. Both fields are unused once ioc_exit_icq() is * called and shared with __rcu_icq_cache and __rcu_head which are * used for RCU free of io_cq. */ union { struct list_head q_node; struct kmem_cache *__rcu_icq_cache; }; union { struct hlist_node ioc_node; struct rcu_head __rcu_head; }; |
dc86900e0
|
87 |
|
d705ae6b1
|
88 |
unsigned int flags; |
fd0928df9
|
89 90 91 |
}; /* |
d38ecf935
|
92 93 |
* I/O subsystem state of the associated processes. It is refcounted * and kmalloc'ed. These could be shared between processes. |
fd0928df9
|
94 95 |
*/ struct io_context { |
d9c7d394a
|
96 |
atomic_long_t refcount; |
f6e8d01be
|
97 |
atomic_t active_ref; |
d38ecf935
|
98 99 100 101 |
atomic_t nr_tasks; /* all the fields below are protected by this lock */ spinlock_t lock; |
fd0928df9
|
102 103 |
unsigned short ioprio; |
31e4c28d9
|
104 |
|
fd0928df9
|
105 106 107 |
/* * For request batching */ |
fd0928df9
|
108 |
int nr_batch_requests; /* Number of requests left in the batch */ |
58c24a616
|
109 |
unsigned long last_waited; /* Time last woken after wait for request */ |
fd0928df9
|
110 |
|
c58698073
|
111 112 113 |
struct radix_tree_root icq_tree; struct io_cq __rcu *icq_hint; struct hlist_head icq_list; |
b2efa0526
|
114 115 |
struct work_struct release_work; |
fd0928df9
|
116 |
}; |
f6e8d01be
|
117 118 119 120 121 122 123 124 125 |
/** * get_io_context_active - get active reference on ioc * @ioc: ioc of interest * * Only iocs with active reference can issue new IOs. This function * acquires an active reference on @ioc. The caller must already have an * active reference on @ioc. */ static inline void get_io_context_active(struct io_context *ioc) |
d38ecf935
|
126 |
{ |
3d48749d9
|
127 |
WARN_ON_ONCE(atomic_long_read(&ioc->refcount) <= 0); |
f6e8d01be
|
128 |
WARN_ON_ONCE(atomic_read(&ioc->active_ref) <= 0); |
3d48749d9
|
129 |
atomic_long_inc(&ioc->refcount); |
f6e8d01be
|
130 131 132 133 134 135 136 137 |
atomic_inc(&ioc->active_ref); } static inline void ioc_task_link(struct io_context *ioc) { get_io_context_active(ioc); WARN_ON_ONCE(atomic_read(&ioc->nr_tasks) <= 0); |
3d48749d9
|
138 |
atomic_inc(&ioc->nr_tasks); |
d38ecf935
|
139 |
} |
b69f22920
|
140 |
struct task_struct; |
da9cbc873
|
141 |
#ifdef CONFIG_BLOCK |
11a3122f6
|
142 |
void put_io_context(struct io_context *ioc); |
f6e8d01be
|
143 |
void put_io_context_active(struct io_context *ioc); |
b69f22920
|
144 |
void exit_io_context(struct task_struct *task); |
6e736be7f
|
145 146 |
struct io_context *get_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node); |
da9cbc873
|
147 |
#else |
da9cbc873
|
148 |
struct io_context; |
11a3122f6
|
149 |
static inline void put_io_context(struct io_context *ioc) { } |
42ec57a8f
|
150 |
static inline void exit_io_context(struct task_struct *task) { } |
da9cbc873
|
151 |
#endif |
fd0928df9
|
152 |
#endif |