Blame view

include/linux/iocontext.h 4.74 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  /* SPDX-License-Identifier: GPL-2.0 */
fd0928df9   Jens Axboe   ioprio: move io p...
2
3
  #ifndef IOCONTEXT_H
  #define IOCONTEXT_H
4ac845a2e   Jens Axboe   block: cfq: make ...
4
  #include <linux/radix-tree.h>
34e6bbf23   Fabio Checconi   cfq-iosched: fix ...
5
  #include <linux/rcupdate.h>
b2efa0526   Tejun Heo   block, cfq: unlin...
6
  #include <linux/workqueue.h>
4ac845a2e   Jens Axboe   block: cfq: make ...
7

dc86900e0   Tejun Heo   block, cfq: move ...
8
  enum {
621032ad6   Tejun Heo   block: exit_io_co...
9
  	ICQ_EXITED		= 1 << 2,
dc86900e0   Tejun Heo   block, cfq: move ...
10
  };
f1f8cc946   Tejun Heo   block, cfq: move ...
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
  /*
   * An io_cq (icq) is association between an io_context (ioc) and a
   * request_queue (q).  This is used by elevators which need to track
   * information per ioc - q pair.
   *
   * Elevator can request use of icq by setting elevator_type->icq_size and
   * ->icq_align.  Both size and align must be larger than that of struct
   * io_cq and elevator can use the tail area for private information.  The
   * recommended way to do this is defining a struct which contains io_cq as
   * the first member followed by private members and using its size and
   * align.  For example,
   *
   *	struct snail_io_cq {
   *		struct io_cq	icq;
   *		int		poke_snail;
   *		int		feed_snail;
   *	};
   *
   *	struct elevator_type snail_elv_type {
   *		.ops =		{ ... },
   *		.icq_size =	sizeof(struct snail_io_cq),
   *		.icq_align =	__alignof__(struct snail_io_cq),
   *		...
   *	};
   *
   * If icq_size is set, block core will manage icq's.  All requests will
   * have its ->elv.icq field set before elevator_ops->elevator_set_req_fn()
   * is called and be holding a reference to the associated io_context.
   *
   * Whenever a new icq is created, elevator_ops->elevator_init_icq_fn() is
   * called and, on destruction, ->elevator_exit_icq_fn().  Both functions
   * are called with both the associated io_context and queue locks held.
   *
   * Elevator is allowed to lookup icq using ioc_lookup_icq() while holding
   * queue lock but the returned icq is valid only until the queue lock is
   * released.  Elevators can not and should not try to create or destroy
   * icq's.
   *
   * As icq's are linked from both ioc and q, the locking rules are a bit
   * complex.
   *
   * - ioc lock nests inside q lock.
   *
   * - ioc->icq_list and icq->ioc_node are protected by ioc lock.
   *   q->icq_list and icq->q_node by q lock.
   *
   * - ioc->icq_tree and ioc->icq_hint are protected by ioc lock, while icq
   *   itself is protected by q lock.  However, both the indexes and icq
   *   itself are also RCU managed and lookup can be performed holding only
   *   the q lock.
   *
   * - icq's are not reference counted.  They are destroyed when either the
   *   ioc or q goes away.  Each request with icq set holds an extra
   *   reference to ioc to ensure it stays until the request is completed.
   *
   * - Linking and unlinking icq's are performed while holding both ioc and q
   *   locks.  Due to the lock ordering, q exit is simple but ioc exit
   *   requires reverse-order double lock dance.
   */
c58698073   Tejun Heo   block, cfq: reorg...
70
71
72
  struct io_cq {
  	struct request_queue	*q;
  	struct io_context	*ioc;
fd0928df9   Jens Axboe   ioprio: move io p...
73

7e5a87944   Tejun Heo   block, cfq: move ...
74
75
76
77
78
79
80
81
82
83
84
85
86
87
  	/*
  	 * q_node and ioc_node link io_cq through icq_list of q and ioc
  	 * respectively.  Both fields are unused once ioc_exit_icq() is
  	 * called and shared with __rcu_icq_cache and __rcu_head which are
  	 * used for RCU free of io_cq.
  	 */
  	union {
  		struct list_head	q_node;
  		struct kmem_cache	*__rcu_icq_cache;
  	};
  	union {
  		struct hlist_node	ioc_node;
  		struct rcu_head		__rcu_head;
  	};
dc86900e0   Tejun Heo   block, cfq: move ...
88

d705ae6b1   Tejun Heo   block: replace ic...
89
  	unsigned int		flags;
fd0928df9   Jens Axboe   ioprio: move io p...
90
91
92
  };
  
  /*
d38ecf935   Jens Axboe   io context sharin...
93
94
   * I/O subsystem state of the associated processes.  It is refcounted
   * and kmalloc'ed. These could be shared between processes.
fd0928df9   Jens Axboe   ioprio: move io p...
95
96
   */
  struct io_context {
d9c7d394a   Nikanth Karthikesan   block: prevent po...
97
  	atomic_long_t refcount;
f6e8d01be   Tejun Heo   block: add io_con...
98
  	atomic_t active_ref;
d38ecf935   Jens Axboe   io context sharin...
99
100
101
102
  	atomic_t nr_tasks;
  
  	/* all the fields below are protected by this lock */
  	spinlock_t lock;
fd0928df9   Jens Axboe   ioprio: move io p...
103
104
  
  	unsigned short ioprio;
31e4c28d9   Vivek Goyal   blkio: Introduce ...
105

fd0928df9   Jens Axboe   ioprio: move io p...
106
107
108
  	/*
  	 * For request batching
  	 */
fd0928df9   Jens Axboe   ioprio: move io p...
109
  	int nr_batch_requests;     /* Number of requests left in the batch */
58c24a616   Richard Kennedy   block: remove pad...
110
  	unsigned long last_waited; /* Time last woken after wait for request */
fd0928df9   Jens Axboe   ioprio: move io p...
111

c58698073   Tejun Heo   block, cfq: reorg...
112
113
114
  	struct radix_tree_root	icq_tree;
  	struct io_cq __rcu	*icq_hint;
  	struct hlist_head	icq_list;
b2efa0526   Tejun Heo   block, cfq: unlin...
115
116
  
  	struct work_struct release_work;
fd0928df9   Jens Axboe   ioprio: move io p...
117
  };
f6e8d01be   Tejun Heo   block: add io_con...
118
119
120
121
122
123
124
125
126
  /**
   * get_io_context_active - get active reference on ioc
   * @ioc: ioc of interest
   *
   * Only iocs with active reference can issue new IOs.  This function
   * acquires an active reference on @ioc.  The caller must already have an
   * active reference on @ioc.
   */
  static inline void get_io_context_active(struct io_context *ioc)
d38ecf935   Jens Axboe   io context sharin...
127
  {
3d48749d9   Tejun Heo   block: ioc_task_l...
128
  	WARN_ON_ONCE(atomic_long_read(&ioc->refcount) <= 0);
f6e8d01be   Tejun Heo   block: add io_con...
129
  	WARN_ON_ONCE(atomic_read(&ioc->active_ref) <= 0);
3d48749d9   Tejun Heo   block: ioc_task_l...
130
  	atomic_long_inc(&ioc->refcount);
f6e8d01be   Tejun Heo   block: add io_con...
131
132
133
134
135
136
137
138
  	atomic_inc(&ioc->active_ref);
  }
  
  static inline void ioc_task_link(struct io_context *ioc)
  {
  	get_io_context_active(ioc);
  
  	WARN_ON_ONCE(atomic_read(&ioc->nr_tasks) <= 0);
3d48749d9   Tejun Heo   block: ioc_task_l...
139
  	atomic_inc(&ioc->nr_tasks);
d38ecf935   Jens Axboe   io context sharin...
140
  }
b69f22920   Louis Rilling   block: Fix io_con...
141
  struct task_struct;
da9cbc873   Jens Axboe   block: blkdev.h c...
142
  #ifdef CONFIG_BLOCK
11a3122f6   Tejun Heo   block: strip out ...
143
  void put_io_context(struct io_context *ioc);
f6e8d01be   Tejun Heo   block: add io_con...
144
  void put_io_context_active(struct io_context *ioc);
b69f22920   Louis Rilling   block: Fix io_con...
145
  void exit_io_context(struct task_struct *task);
6e736be7f   Tejun Heo   block: make ioc g...
146
147
  struct io_context *get_task_io_context(struct task_struct *task,
  				       gfp_t gfp_flags, int node);
da9cbc873   Jens Axboe   block: blkdev.h c...
148
  #else
da9cbc873   Jens Axboe   block: blkdev.h c...
149
  struct io_context;
11a3122f6   Tejun Heo   block: strip out ...
150
  static inline void put_io_context(struct io_context *ioc) { }
42ec57a8f   Tejun Heo   block: misc ioc c...
151
  static inline void exit_io_context(struct task_struct *task) { }
da9cbc873   Jens Axboe   block: blkdev.h c...
152
  #endif
fd0928df9   Jens Axboe   ioprio: move io p...
153
  #endif