Blame view

include/linux/iocontext.h 4.45 KB
fd0928df9   Jens Axboe   ioprio: move io p...
1
2
  #ifndef IOCONTEXT_H
  #define IOCONTEXT_H
4ac845a2e   Jens Axboe   block: cfq: make ...
3
  #include <linux/radix-tree.h>
34e6bbf23   Fabio Checconi   cfq-iosched: fix ...
4
  #include <linux/rcupdate.h>
b2efa0526   Tejun Heo   block, cfq: unlin...
5
  #include <linux/workqueue.h>
4ac845a2e   Jens Axboe   block: cfq: make ...
6

dc86900e0   Tejun Heo   block, cfq: move ...
7
  enum {
c58698073   Tejun Heo   block, cfq: reorg...
8
9
  	ICQ_IOPRIO_CHANGED,
  	ICQ_CGROUP_CHANGED,
dc86900e0   Tejun Heo   block, cfq: move ...
10
  };
f1f8cc946   Tejun Heo   block, cfq: move ...
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
  /*
   * An io_cq (icq) is association between an io_context (ioc) and a
   * request_queue (q).  This is used by elevators which need to track
   * information per ioc - q pair.
   *
   * Elevator can request use of icq by setting elevator_type->icq_size and
   * ->icq_align.  Both size and align must be larger than that of struct
   * io_cq and elevator can use the tail area for private information.  The
   * recommended way to do this is defining a struct which contains io_cq as
   * the first member followed by private members and using its size and
   * align.  For example,
   *
   *	struct snail_io_cq {
   *		struct io_cq	icq;
   *		int		poke_snail;
   *		int		feed_snail;
   *	};
   *
   *	struct elevator_type snail_elv_type {
   *		.ops =		{ ... },
   *		.icq_size =	sizeof(struct snail_io_cq),
   *		.icq_align =	__alignof__(struct snail_io_cq),
   *		...
   *	};
   *
   * If icq_size is set, block core will manage icq's.  All requests will
   * have its ->elv.icq field set before elevator_ops->elevator_set_req_fn()
   * is called and be holding a reference to the associated io_context.
   *
   * Whenever a new icq is created, elevator_ops->elevator_init_icq_fn() is
   * called and, on destruction, ->elevator_exit_icq_fn().  Both functions
   * are called with both the associated io_context and queue locks held.
   *
   * Elevator is allowed to lookup icq using ioc_lookup_icq() while holding
   * queue lock but the returned icq is valid only until the queue lock is
   * released.  Elevators can not and should not try to create or destroy
   * icq's.
   *
   * As icq's are linked from both ioc and q, the locking rules are a bit
   * complex.
   *
   * - ioc lock nests inside q lock.
   *
   * - ioc->icq_list and icq->ioc_node are protected by ioc lock.
   *   q->icq_list and icq->q_node by q lock.
   *
   * - ioc->icq_tree and ioc->icq_hint are protected by ioc lock, while icq
   *   itself is protected by q lock.  However, both the indexes and icq
   *   itself are also RCU managed and lookup can be performed holding only
   *   the q lock.
   *
   * - icq's are not reference counted.  They are destroyed when either the
   *   ioc or q goes away.  Each request with icq set holds an extra
   *   reference to ioc to ensure it stays until the request is completed.
   *
   * - Linking and unlinking icq's are performed while holding both ioc and q
   *   locks.  Due to the lock ordering, q exit is simple but ioc exit
   *   requires reverse-order double lock dance.
   */
c58698073   Tejun Heo   block, cfq: reorg...
70
71
72
  struct io_cq {
  	struct request_queue	*q;
  	struct io_context	*ioc;
fd0928df9   Jens Axboe   ioprio: move io p...
73

7e5a87944   Tejun Heo   block, cfq: move ...
74
75
76
77
78
79
80
81
82
83
84
85
86
87
  	/*
  	 * q_node and ioc_node link io_cq through icq_list of q and ioc
  	 * respectively.  Both fields are unused once ioc_exit_icq() is
  	 * called and shared with __rcu_icq_cache and __rcu_head which are
  	 * used for RCU free of io_cq.
  	 */
  	union {
  		struct list_head	q_node;
  		struct kmem_cache	*__rcu_icq_cache;
  	};
  	union {
  		struct hlist_node	ioc_node;
  		struct rcu_head		__rcu_head;
  	};
dc86900e0   Tejun Heo   block, cfq: move ...
88

c58698073   Tejun Heo   block, cfq: reorg...
89
  	unsigned long		changed;
fd0928df9   Jens Axboe   ioprio: move io p...
90
91
92
  };
  
  /*
d38ecf935   Jens Axboe   io context sharin...
93
94
   * I/O subsystem state of the associated processes.  It is refcounted
   * and kmalloc'ed. These could be shared between processes.
fd0928df9   Jens Axboe   ioprio: move io p...
95
96
   */
  struct io_context {
d9c7d394a   Nikanth Karthikesan   block: prevent po...
97
  	atomic_long_t refcount;
d38ecf935   Jens Axboe   io context sharin...
98
99
100
101
  	atomic_t nr_tasks;
  
  	/* all the fields below are protected by this lock */
  	spinlock_t lock;
fd0928df9   Jens Axboe   ioprio: move io p...
102
103
  
  	unsigned short ioprio;
31e4c28d9   Vivek Goyal   blkio: Introduce ...
104

fd0928df9   Jens Axboe   ioprio: move io p...
105
106
107
  	/*
  	 * For request batching
  	 */
fd0928df9   Jens Axboe   ioprio: move io p...
108
  	int nr_batch_requests;     /* Number of requests left in the batch */
58c24a616   Richard Kennedy   block: remove pad...
109
  	unsigned long last_waited; /* Time last woken after wait for request */
fd0928df9   Jens Axboe   ioprio: move io p...
110

c58698073   Tejun Heo   block, cfq: reorg...
111
112
113
  	struct radix_tree_root	icq_tree;
  	struct io_cq __rcu	*icq_hint;
  	struct hlist_head	icq_list;
b2efa0526   Tejun Heo   block, cfq: unlin...
114
115
  
  	struct work_struct release_work;
fd0928df9   Jens Axboe   ioprio: move io p...
116
  };
d38ecf935   Jens Axboe   io context sharin...
117
118
119
120
121
122
  static inline struct io_context *ioc_task_link(struct io_context *ioc)
  {
  	/*
  	 * if ref count is zero, don't allow sharing (ioc is going away, it's
  	 * a race).
  	 */
d9c7d394a   Nikanth Karthikesan   block: prevent po...
123
  	if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) {
cbb4f2646   Li Zefan   io context: fix r...
124
  		atomic_inc(&ioc->nr_tasks);
d38ecf935   Jens Axboe   io context sharin...
125
  		return ioc;
d237e5c7c   Jens Axboe   io context: incre...
126
  	}
d38ecf935   Jens Axboe   io context sharin...
127
128
129
  
  	return NULL;
  }
b69f22920   Louis Rilling   block: Fix io_con...
130
  struct task_struct;
da9cbc873   Jens Axboe   block: blkdev.h c...
131
  #ifdef CONFIG_BLOCK
b2efa0526   Tejun Heo   block, cfq: unlin...
132
  void put_io_context(struct io_context *ioc, struct request_queue *locked_q);
b69f22920   Louis Rilling   block: Fix io_con...
133
  void exit_io_context(struct task_struct *task);
6e736be7f   Tejun Heo   block: make ioc g...
134
135
  struct io_context *get_task_io_context(struct task_struct *task,
  				       gfp_t gfp_flags, int node);
dc86900e0   Tejun Heo   block, cfq: move ...
136
137
  void ioc_ioprio_changed(struct io_context *ioc, int ioprio);
  void ioc_cgroup_changed(struct io_context *ioc);
da9cbc873   Jens Axboe   block: blkdev.h c...
138
  #else
da9cbc873   Jens Axboe   block: blkdev.h c...
139
  struct io_context;
b2efa0526   Tejun Heo   block, cfq: unlin...
140
141
  static inline void put_io_context(struct io_context *ioc,
  				  struct request_queue *locked_q) { }
42ec57a8f   Tejun Heo   block: misc ioc c...
142
  static inline void exit_io_context(struct task_struct *task) { }
da9cbc873   Jens Axboe   block: blkdev.h c...
143
  #endif
fd0928df9   Jens Axboe   ioprio: move io p...
144
  #endif