Blame view

drivers/md/dm-queue-length.c 5.91 KB
fd5e03390   Kiyoshi Ueda   dm mpath: add que...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
  /*
   * Copyright (C) 2004-2005 IBM Corp.  All Rights Reserved.
   * Copyright (C) 2006-2009 NEC Corporation.
   *
   * dm-queue-length.c
   *
   * Module Author: Stefan Bader, IBM
   * Modified by: Kiyoshi Ueda, NEC
   *
   * This file is released under the GPL.
   *
   * queue-length path selector - choose a path with the least number of
   * in-flight I/Os.
   */
  
  #include "dm.h"
  #include "dm-path-selector.h"
  
  #include <linux/slab.h>
  #include <linux/ctype.h>
  #include <linux/errno.h>
  #include <linux/module.h>
60063497a   Arun Sharma   atomic: use <linu...
23
  #include <linux/atomic.h>
fd5e03390   Kiyoshi Ueda   dm mpath: add que...
24
25
  
  #define DM_MSG_PREFIX	"multipath queue-length"
21136f89d   Mike Snitzer   dm mpath: remove ...
26
27
  #define QL_MIN_IO	1
  #define QL_VERSION	"0.2.0"
fd5e03390   Kiyoshi Ueda   dm mpath: add que...
28
29
30
31
  
  struct selector {
  	struct list_head	valid_paths;
  	struct list_head	failed_paths;
9659f8114   Mike Snitzer   dm mpath: push pa...
32
  	spinlock_t lock;
fd5e03390   Kiyoshi Ueda   dm mpath: add que...
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
  };
  
  struct path_info {
  	struct list_head	list;
  	struct dm_path		*path;
  	unsigned		repeat_count;
  	atomic_t		qlen;	/* the number of in-flight I/Os */
  };
  
  static struct selector *alloc_selector(void)
  {
  	struct selector *s = kmalloc(sizeof(*s), GFP_KERNEL);
  
  	if (s) {
  		INIT_LIST_HEAD(&s->valid_paths);
  		INIT_LIST_HEAD(&s->failed_paths);
9659f8114   Mike Snitzer   dm mpath: push pa...
49
  		spin_lock_init(&s->lock);
fd5e03390   Kiyoshi Ueda   dm mpath: add que...
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
  	}
  
  	return s;
  }
  
  static int ql_create(struct path_selector *ps, unsigned argc, char **argv)
  {
  	struct selector *s = alloc_selector();
  
  	if (!s)
  		return -ENOMEM;
  
  	ps->context = s;
  	return 0;
  }
  
  static void ql_free_paths(struct list_head *paths)
  {
  	struct path_info *pi, *next;
  
  	list_for_each_entry_safe(pi, next, paths, list) {
  		list_del(&pi->list);
  		kfree(pi);
  	}
  }
  
  static void ql_destroy(struct path_selector *ps)
  {
  	struct selector *s = ps->context;
  
  	ql_free_paths(&s->valid_paths);
  	ql_free_paths(&s->failed_paths);
  	kfree(s);
  	ps->context = NULL;
  }
  
  static int ql_status(struct path_selector *ps, struct dm_path *path,
  		     status_type_t type, char *result, unsigned maxlen)
  {
  	unsigned sz = 0;
  	struct path_info *pi;
  
  	/* When called with NULL path, return selector status/args. */
  	if (!path)
  		DMEMIT("0 ");
  	else {
  		pi = path->pscontext;
  
  		switch (type) {
  		case STATUSTYPE_INFO:
  			DMEMIT("%d ", atomic_read(&pi->qlen));
  			break;
  		case STATUSTYPE_TABLE:
  			DMEMIT("%u ", pi->repeat_count);
  			break;
  		}
  	}
  
  	return sz;
  }
  
  static int ql_add_path(struct path_selector *ps, struct dm_path *path,
  		       int argc, char **argv, char **error)
  {
  	struct selector *s = ps->context;
  	struct path_info *pi;
  	unsigned repeat_count = QL_MIN_IO;
31998ef19   Mikulas Patocka   dm: reject traili...
117
  	char dummy;
9659f8114   Mike Snitzer   dm mpath: push pa...
118
  	unsigned long flags;
fd5e03390   Kiyoshi Ueda   dm mpath: add que...
119
120
121
122
123
124
125
126
127
128
  
  	/*
  	 * Arguments: [<repeat_count>]
  	 * 	<repeat_count>: The number of I/Os before switching path.
  	 * 			If not given, default (QL_MIN_IO) is used.
  	 */
  	if (argc > 1) {
  		*error = "queue-length ps: incorrect number of arguments";
  		return -EINVAL;
  	}
31998ef19   Mikulas Patocka   dm: reject traili...
129
  	if ((argc == 1) && (sscanf(argv[0], "%u%c", &repeat_count, &dummy) != 1)) {
fd5e03390   Kiyoshi Ueda   dm mpath: add que...
130
131
132
  		*error = "queue-length ps: invalid repeat count";
  		return -EINVAL;
  	}
21136f89d   Mike Snitzer   dm mpath: remove ...
133
134
135
136
  	if (repeat_count > 1) {
  		DMWARN_LIMIT("repeat_count > 1 is deprecated, using 1 instead");
  		repeat_count = 1;
  	}
fd5e03390   Kiyoshi Ueda   dm mpath: add que...
137
138
139
140
141
142
143
144
145
146
147
148
  	/* Allocate the path information structure */
  	pi = kmalloc(sizeof(*pi), GFP_KERNEL);
  	if (!pi) {
  		*error = "queue-length ps: Error allocating path information";
  		return -ENOMEM;
  	}
  
  	pi->path = path;
  	pi->repeat_count = repeat_count;
  	atomic_set(&pi->qlen, 0);
  
  	path->pscontext = pi;
9659f8114   Mike Snitzer   dm mpath: push pa...
149
  	spin_lock_irqsave(&s->lock, flags);
fd5e03390   Kiyoshi Ueda   dm mpath: add que...
150
  	list_add_tail(&pi->list, &s->valid_paths);
9659f8114   Mike Snitzer   dm mpath: push pa...
151
  	spin_unlock_irqrestore(&s->lock, flags);
fd5e03390   Kiyoshi Ueda   dm mpath: add que...
152
153
154
155
156
157
158
159
  
  	return 0;
  }
  
  static void ql_fail_path(struct path_selector *ps, struct dm_path *path)
  {
  	struct selector *s = ps->context;
  	struct path_info *pi = path->pscontext;
9659f8114   Mike Snitzer   dm mpath: push pa...
160
  	unsigned long flags;
fd5e03390   Kiyoshi Ueda   dm mpath: add que...
161

9659f8114   Mike Snitzer   dm mpath: push pa...
162
  	spin_lock_irqsave(&s->lock, flags);
fd5e03390   Kiyoshi Ueda   dm mpath: add que...
163
  	list_move(&pi->list, &s->failed_paths);
9659f8114   Mike Snitzer   dm mpath: push pa...
164
  	spin_unlock_irqrestore(&s->lock, flags);
fd5e03390   Kiyoshi Ueda   dm mpath: add que...
165
166
167
168
169
170
  }
  
  static int ql_reinstate_path(struct path_selector *ps, struct dm_path *path)
  {
  	struct selector *s = ps->context;
  	struct path_info *pi = path->pscontext;
9659f8114   Mike Snitzer   dm mpath: push pa...
171
  	unsigned long flags;
fd5e03390   Kiyoshi Ueda   dm mpath: add que...
172

9659f8114   Mike Snitzer   dm mpath: push pa...
173
  	spin_lock_irqsave(&s->lock, flags);
fd5e03390   Kiyoshi Ueda   dm mpath: add que...
174
  	list_move_tail(&pi->list, &s->valid_paths);
9659f8114   Mike Snitzer   dm mpath: push pa...
175
  	spin_unlock_irqrestore(&s->lock, flags);
fd5e03390   Kiyoshi Ueda   dm mpath: add que...
176
177
178
179
180
181
182
  
  	return 0;
  }
  
  /*
   * Select a path having the minimum number of in-flight I/Os
   */
90a4323cc   Mike Snitzer   dm path selector:...
183
  static struct dm_path *ql_select_path(struct path_selector *ps, size_t nr_bytes)
fd5e03390   Kiyoshi Ueda   dm mpath: add que...
184
185
186
  {
  	struct selector *s = ps->context;
  	struct path_info *pi = NULL, *best = NULL;
9659f8114   Mike Snitzer   dm mpath: push pa...
187
188
  	struct dm_path *ret = NULL;
  	unsigned long flags;
fd5e03390   Kiyoshi Ueda   dm mpath: add que...
189

9659f8114   Mike Snitzer   dm mpath: push pa...
190
  	spin_lock_irqsave(&s->lock, flags);
fd5e03390   Kiyoshi Ueda   dm mpath: add que...
191
  	if (list_empty(&s->valid_paths))
9659f8114   Mike Snitzer   dm mpath: push pa...
192
  		goto out;
fd5e03390   Kiyoshi Ueda   dm mpath: add que...
193

fd5e03390   Kiyoshi Ueda   dm mpath: add que...
194
195
196
197
198
199
200
201
202
203
  	list_for_each_entry(pi, &s->valid_paths, list) {
  		if (!best ||
  		    (atomic_read(&pi->qlen) < atomic_read(&best->qlen)))
  			best = pi;
  
  		if (!atomic_read(&best->qlen))
  			break;
  	}
  
  	if (!best)
9659f8114   Mike Snitzer   dm mpath: push pa...
204
  		goto out;
fd5e03390   Kiyoshi Ueda   dm mpath: add que...
205

f20426056   Khazhismel Kumykov   dm mpath selector...
206
207
  	/* Move most recently used to least preferred to evenly balance. */
  	list_move_tail(&best->list, &s->valid_paths);
9659f8114   Mike Snitzer   dm mpath: push pa...
208
209
210
211
  	ret = best->path;
  out:
  	spin_unlock_irqrestore(&s->lock, flags);
  	return ret;
fd5e03390   Kiyoshi Ueda   dm mpath: add que...
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
  }
  
  static int ql_start_io(struct path_selector *ps, struct dm_path *path,
  		       size_t nr_bytes)
  {
  	struct path_info *pi = path->pscontext;
  
  	atomic_inc(&pi->qlen);
  
  	return 0;
  }
  
  static int ql_end_io(struct path_selector *ps, struct dm_path *path,
  		     size_t nr_bytes)
  {
  	struct path_info *pi = path->pscontext;
  
  	atomic_dec(&pi->qlen);
  
  	return 0;
  }
  
  static struct path_selector_type ql_ps = {
  	.name		= "queue-length",
  	.module		= THIS_MODULE,
  	.table_args	= 1,
  	.info_args	= 1,
  	.create		= ql_create,
  	.destroy	= ql_destroy,
  	.status		= ql_status,
  	.add_path	= ql_add_path,
  	.fail_path	= ql_fail_path,
  	.reinstate_path	= ql_reinstate_path,
  	.select_path	= ql_select_path,
  	.start_io	= ql_start_io,
  	.end_io		= ql_end_io,
  };
  
  static int __init dm_ql_init(void)
  {
  	int r = dm_register_path_selector(&ql_ps);
  
  	if (r < 0)
  		DMERR("register failed %d", r);
  
  	DMINFO("version " QL_VERSION " loaded");
  
  	return r;
  }
  
  static void __exit dm_ql_exit(void)
  {
  	int r = dm_unregister_path_selector(&ql_ps);
  
  	if (r < 0)
  		DMERR("unregister failed %d", r);
  }
  
  module_init(dm_ql_init);
  module_exit(dm_ql_exit);
  
  MODULE_AUTHOR("Stefan Bader <Stefan.Bader at de.ibm.com>");
  MODULE_DESCRIPTION(
  	"(C) Copyright IBM Corp. 2004,2005   All Rights Reserved.
  "
  	DM_NAME " path selector to balance the number of in-flight I/Os"
  );
  MODULE_LICENSE("GPL");