Blame view
include/linux/iocontext.h
2.01 KB
fd0928df9
|
1 2 |
#ifndef IOCONTEXT_H #define IOCONTEXT_H |
4ac845a2e
|
3 |
#include <linux/radix-tree.h> |
34e6bbf23
|
4 |
#include <linux/rcupdate.h> |
4ac845a2e
|
5 |
|
fd0928df9
|
6 |
struct cfq_queue; |
383cd7213
|
7 8 9 10 11 12 13 |
struct cfq_ttime { unsigned long last_end_request; unsigned long ttime_total; unsigned long ttime_samples; unsigned long ttime_mean; }; |
dc86900e0
|
14 15 16 17 |
enum { CIC_IOPRIO_CHANGED, CIC_CGROUP_CHANGED, }; |
fd0928df9
|
18 |
struct cfq_io_context { |
fd0928df9
|
19 |
void *key; |
283287a52
|
20 |
struct request_queue *q; |
fd0928df9
|
21 22 23 24 |
struct cfq_queue *cfqq[2]; struct io_context *ioc; |
383cd7213
|
25 |
struct cfq_ttime ttime; |
fd0928df9
|
26 |
|
fd0928df9
|
27 |
struct list_head queue_list; |
ffc4e7595
|
28 |
struct hlist_node cic_list; |
fd0928df9
|
29 |
|
dc86900e0
|
30 |
unsigned long changed; |
fd0928df9
|
31 32 |
void (*dtor)(struct io_context *); /* destructor */ void (*exit)(struct io_context *); /* called on task exit */ |
34e6bbf23
|
33 34 |
struct rcu_head rcu_head; |
fd0928df9
|
35 36 37 |
}; /* |
d38ecf935
|
38 39 |
* I/O subsystem state of the associated processes. It is refcounted * and kmalloc'ed. These could be shared between processes. |
fd0928df9
|
40 41 |
*/ struct io_context { |
d9c7d394a
|
42 |
atomic_long_t refcount; |
d38ecf935
|
43 44 45 46 |
atomic_t nr_tasks; /* all the fields below are protected by this lock */ spinlock_t lock; |
fd0928df9
|
47 48 |
unsigned short ioprio; |
31e4c28d9
|
49 |
|
fd0928df9
|
50 51 52 |
/* * For request batching */ |
fd0928df9
|
53 |
int nr_batch_requests; /* Number of requests left in the batch */ |
58c24a616
|
54 |
unsigned long last_waited; /* Time last woken after wait for request */ |
fd0928df9
|
55 |
|
4ac845a2e
|
56 |
struct radix_tree_root radix_root; |
ffc4e7595
|
57 |
struct hlist_head cic_list; |
4d2deb40b
|
58 |
void __rcu *ioc_data; |
fd0928df9
|
59 |
}; |
d38ecf935
|
60 61 62 63 64 65 |
static inline struct io_context *ioc_task_link(struct io_context *ioc) { /* * if ref count is zero, don't allow sharing (ioc is going away, it's * a race). */ |
d9c7d394a
|
66 |
if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) { |
cbb4f2646
|
67 |
atomic_inc(&ioc->nr_tasks); |
d38ecf935
|
68 |
return ioc; |
d237e5c7c
|
69 |
} |
d38ecf935
|
70 71 72 |
return NULL; } |
b69f22920
|
73 |
struct task_struct; |
da9cbc873
|
74 |
#ifdef CONFIG_BLOCK |
42ec57a8f
|
75 |
void put_io_context(struct io_context *ioc); |
b69f22920
|
76 |
void exit_io_context(struct task_struct *task); |
6e736be7f
|
77 78 |
struct io_context *get_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node); |
dc86900e0
|
79 80 |
void ioc_ioprio_changed(struct io_context *ioc, int ioprio); void ioc_cgroup_changed(struct io_context *ioc); |
da9cbc873
|
81 |
#else |
da9cbc873
|
82 |
struct io_context; |
42ec57a8f
|
83 84 |
static inline void put_io_context(struct io_context *ioc) { } static inline void exit_io_context(struct task_struct *task) { } |
da9cbc873
|
85 |
#endif |
fd0928df9
|
86 |
#endif |