Blame view
drivers/md/dm-delay.c
8.37 KB
26b9f2287 dm: delay target |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 |
/* * Copyright (C) 2005-2007 Red Hat GmbH * * A target that delays reads and/or writes and can send * them to different devices. * * This file is released under the GPL. */ #include <linux/module.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/bio.h> #include <linux/slab.h> |
586e80e6e dm: remove dm hea... |
15 |
#include <linux/device-mapper.h> |
26b9f2287 dm: delay target |
16 17 18 19 |
#define DM_MSG_PREFIX "delay" struct delay_c { struct timer_list delay_timer; |
ac818646d dm delay: cleanup |
20 |
struct mutex timer_lock; |
26b9f2287 dm: delay target |
21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 |
struct work_struct flush_expired_bios; struct list_head delayed_bios; atomic_t may_delay; mempool_t *delayed_pool; struct dm_dev *dev_read; sector_t start_read; unsigned read_delay; unsigned reads; struct dm_dev *dev_write; sector_t start_write; unsigned write_delay; unsigned writes; }; |
028867ac2 dm: use kmem_cach... |
36 |
struct dm_delay_info { |
26b9f2287 dm: delay target |
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
struct delay_c *context; struct list_head list; struct bio *bio; unsigned long expires; }; static DEFINE_MUTEX(delayed_bios_lock); static struct workqueue_struct *kdelayd_wq; static struct kmem_cache *delayed_cache; static void handle_delayed_timer(unsigned long data) { struct delay_c *dc = (struct delay_c *)data; queue_work(kdelayd_wq, &dc->flush_expired_bios); } static void queue_timeout(struct delay_c *dc, unsigned long expires) { |
ac818646d dm delay: cleanup |
57 |
mutex_lock(&dc->timer_lock); |
26b9f2287 dm: delay target |
58 59 60 |
if (!timer_pending(&dc->delay_timer) || expires < dc->delay_timer.expires) mod_timer(&dc->delay_timer, expires); |
ac818646d dm delay: cleanup |
61 |
mutex_unlock(&dc->timer_lock); |
26b9f2287 dm: delay target |
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 |
} static void flush_bios(struct bio *bio) { struct bio *n; while (bio) { n = bio->bi_next; bio->bi_next = NULL; generic_make_request(bio); bio = n; } } static struct bio *flush_delayed_bios(struct delay_c *dc, int flush_all) { |
028867ac2 dm: use kmem_cach... |
78 |
struct dm_delay_info *delayed, *next; |
26b9f2287 dm: delay target |
79 80 |
unsigned long next_expires = 0; int start_timer = 0; |
051814c69 dm: bio_list macr... |
81 |
struct bio_list flush_bios = { }; |
26b9f2287 dm: delay target |
82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 |
mutex_lock(&delayed_bios_lock); list_for_each_entry_safe(delayed, next, &dc->delayed_bios, list) { if (flush_all || time_after_eq(jiffies, delayed->expires)) { list_del(&delayed->list); bio_list_add(&flush_bios, delayed->bio); if ((bio_data_dir(delayed->bio) == WRITE)) delayed->context->writes--; else delayed->context->reads--; mempool_free(delayed, dc->delayed_pool); continue; } if (!start_timer) { start_timer = 1; next_expires = delayed->expires; } else next_expires = min(next_expires, delayed->expires); } mutex_unlock(&delayed_bios_lock); if (start_timer) queue_timeout(dc, next_expires); return bio_list_get(&flush_bios); } static void flush_expired_bios(struct work_struct *work) { struct delay_c *dc; dc = container_of(work, struct delay_c, flush_expired_bios); flush_bios(flush_delayed_bios(dc, 0)); } /* * Mapping parameters: * <device> <offset> <delay> [<write_device> <write_offset> <write_delay>] * * With separate write parameters, the first set is only used for reads. * Delays are specified in milliseconds. */ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv) { struct delay_c *dc; unsigned long long tmpll; if (argc != 3 && argc != 6) { ti->error = "requires exactly 3 or 6 arguments"; return -EINVAL; } dc = kmalloc(sizeof(*dc), GFP_KERNEL); if (!dc) { ti->error = "Cannot allocate context"; return -ENOMEM; } dc->reads = dc->writes = 0; if (sscanf(argv[1], "%llu", &tmpll) != 1) { ti->error = "Invalid device sector"; goto bad; } dc->start_read = tmpll; if (sscanf(argv[2], "%u", &dc->read_delay) != 1) { ti->error = "Invalid delay"; goto bad; } if (dm_get_device(ti, argv[0], dc->start_read, ti->len, dm_table_get_mode(ti->table), &dc->dev_read)) { ti->error = "Device lookup failed"; goto bad; } |
2e64a0f92 dm delay: fix ctr... |
160 161 |
dc->dev_write = NULL; if (argc == 3) |
26b9f2287 dm: delay target |
162 |
goto out; |
26b9f2287 dm: delay target |
163 164 165 |
if (sscanf(argv[4], "%llu", &tmpll) != 1) { ti->error = "Invalid write device sector"; |
2e64a0f92 dm delay: fix ctr... |
166 |
goto bad_dev_read; |
26b9f2287 dm: delay target |
167 168 169 170 171 |
} dc->start_write = tmpll; if (sscanf(argv[5], "%u", &dc->write_delay) != 1) { ti->error = "Invalid write delay"; |
2e64a0f92 dm delay: fix ctr... |
172 |
goto bad_dev_read; |
26b9f2287 dm: delay target |
173 174 175 176 177 |
} if (dm_get_device(ti, argv[3], dc->start_write, ti->len, dm_table_get_mode(ti->table), &dc->dev_write)) { ti->error = "Write device lookup failed"; |
2e64a0f92 dm delay: fix ctr... |
178 |
goto bad_dev_read; |
26b9f2287 dm: delay target |
179 180 181 182 183 184 |
} out: dc->delayed_pool = mempool_create_slab_pool(128, delayed_cache); if (!dc->delayed_pool) { DMERR("Couldn't create delayed bio pool."); |
2e64a0f92 dm delay: fix ctr... |
185 |
goto bad_dev_write; |
26b9f2287 dm: delay target |
186 |
} |
ac818646d dm delay: cleanup |
187 |
setup_timer(&dc->delay_timer, handle_delayed_timer, (unsigned long)dc); |
26b9f2287 dm: delay target |
188 189 190 |
INIT_WORK(&dc->flush_expired_bios, flush_expired_bios); INIT_LIST_HEAD(&dc->delayed_bios); |
ac818646d dm delay: cleanup |
191 |
mutex_init(&dc->timer_lock); |
26b9f2287 dm: delay target |
192 |
atomic_set(&dc->may_delay, 1); |
c927259e3 dm delay: support... |
193 |
ti->num_flush_requests = 1; |
26b9f2287 dm: delay target |
194 195 |
ti->private = dc; return 0; |
2e64a0f92 dm delay: fix ctr... |
196 197 198 199 200 |
bad_dev_write: if (dc->dev_write) dm_put_device(ti, dc->dev_write); bad_dev_read: dm_put_device(ti, dc->dev_read); |
26b9f2287 dm: delay target |
201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 |
bad: kfree(dc); return -EINVAL; } static void delay_dtr(struct dm_target *ti) { struct delay_c *dc = ti->private; flush_workqueue(kdelayd_wq); dm_put_device(ti, dc->dev_read); if (dc->dev_write) dm_put_device(ti, dc->dev_write); mempool_destroy(dc->delayed_pool); kfree(dc); } static int delay_bio(struct delay_c *dc, int delay, struct bio *bio) { |
028867ac2 dm: use kmem_cach... |
223 |
struct dm_delay_info *delayed; |
26b9f2287 dm: delay target |
224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 |
unsigned long expires = 0; if (!delay || !atomic_read(&dc->may_delay)) return 1; delayed = mempool_alloc(dc->delayed_pool, GFP_NOIO); delayed->context = dc; delayed->bio = bio; delayed->expires = expires = jiffies + (delay * HZ / 1000); mutex_lock(&delayed_bios_lock); if (bio_data_dir(bio) == WRITE) dc->writes++; else dc->reads++; list_add_tail(&delayed->list, &dc->delayed_bios); mutex_unlock(&delayed_bios_lock); queue_timeout(dc, expires); return 0; } static void delay_presuspend(struct dm_target *ti) { struct delay_c *dc = ti->private; atomic_set(&dc->may_delay, 0); del_timer_sync(&dc->delay_timer); flush_bios(flush_delayed_bios(dc, 1)); } static void delay_resume(struct dm_target *ti) { struct delay_c *dc = ti->private; atomic_set(&dc->may_delay, 1); } static int delay_map(struct dm_target *ti, struct bio *bio, union map_info *map_context) { struct delay_c *dc = ti->private; if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) { bio->bi_bdev = dc->dev_write->bdev; |
c927259e3 dm delay: support... |
274 275 276 |
if (bio_sectors(bio)) bio->bi_sector = dc->start_write + (bio->bi_sector - ti->begin); |
26b9f2287 dm: delay target |
277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 |
return delay_bio(dc, dc->write_delay, bio); } bio->bi_bdev = dc->dev_read->bdev; bio->bi_sector = dc->start_read + (bio->bi_sector - ti->begin); return delay_bio(dc, dc->read_delay, bio); } static int delay_status(struct dm_target *ti, status_type_t type, char *result, unsigned maxlen) { struct delay_c *dc = ti->private; int sz = 0; switch (type) { case STATUSTYPE_INFO: DMEMIT("%u %u", dc->reads, dc->writes); break; case STATUSTYPE_TABLE: DMEMIT("%s %llu %u", dc->dev_read->name, (unsigned long long) dc->start_read, dc->read_delay); if (dc->dev_write) |
79662d1ea dm delay: fix status |
304 |
DMEMIT(" %s %llu %u", dc->dev_write->name, |
26b9f2287 dm: delay target |
305 306 307 308 309 310 311 |
(unsigned long long) dc->start_write, dc->write_delay); break; } return 0; } |
af4874e03 dm target:s intro... |
312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 |
static int delay_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) { struct delay_c *dc = ti->private; int ret = 0; ret = fn(ti, dc->dev_read, dc->start_read, data); if (ret) goto out; if (dc->dev_write) ret = fn(ti, dc->dev_write, dc->start_write, data); out: return ret; } |
26b9f2287 dm: delay target |
328 329 |
static struct target_type delay_target = { .name = "delay", |
af4874e03 dm target:s intro... |
330 |
.version = {1, 1, 0}, |
26b9f2287 dm: delay target |
331 332 333 334 335 336 337 |
.module = THIS_MODULE, .ctr = delay_ctr, .dtr = delay_dtr, .map = delay_map, .presuspend = delay_presuspend, .resume = delay_resume, .status = delay_status, |
af4874e03 dm target:s intro... |
338 |
.iterate_devices = delay_iterate_devices, |
26b9f2287 dm: delay target |
339 340 341 342 343 344 345 346 347 348 349 |
}; static int __init dm_delay_init(void) { int r = -ENOMEM; kdelayd_wq = create_workqueue("kdelayd"); if (!kdelayd_wq) { DMERR("Couldn't start kdelayd"); goto bad_queue; } |
028867ac2 dm: use kmem_cach... |
350 |
delayed_cache = KMEM_CACHE(dm_delay_info, 0); |
26b9f2287 dm: delay target |
351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 |
if (!delayed_cache) { DMERR("Couldn't create delayed bio cache."); goto bad_memcache; } r = dm_register_target(&delay_target); if (r < 0) { DMERR("register failed %d", r); goto bad_register; } return 0; bad_register: kmem_cache_destroy(delayed_cache); bad_memcache: destroy_workqueue(kdelayd_wq); bad_queue: return r; } static void __exit dm_delay_exit(void) { |
10d3bd09a dm: consolidate t... |
374 |
dm_unregister_target(&delay_target); |
26b9f2287 dm: delay target |
375 376 377 378 379 380 381 382 383 384 385 |
kmem_cache_destroy(delayed_cache); destroy_workqueue(kdelayd_wq); } /* Module hooks */ module_init(dm_delay_init); module_exit(dm_delay_exit); MODULE_DESCRIPTION(DM_NAME " delay target"); MODULE_AUTHOR("Heinz Mauelshagen <mauelshagen@redhat.com>"); MODULE_LICENSE("GPL"); |