Commit 44feb387f6f5584535bd6e3ad7ccfdce715d7dba

Authored by Mike Snitzer
Committed by Alasdair G Kergon
1 parent 28eed34e76

dm thin: prepare to separate bio_prison code

The bio prison code will be useful to share with future DM targets.

Prepare to move this code into a separate module, adding a dm prefix
to structures and functions that will be exported.

Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Joe Thornber <ejt@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>

Showing 1 changed file with 131 additions and 90 deletions Side-by-side Diff

drivers/md/dm-thin.c
... ... @@ -58,7 +58,7 @@
58 58 * i) plug io further to this physical block. (see bio_prison code).
59 59 *
60 60 * ii) quiesce any read io to that shared data block. Obviously
61   - * including all devices that share this block. (see deferred_set code)
  61 + * including all devices that share this block. (see dm_deferred_set code)
62 62 *
63 63 * iii) copy the data block to a newly allocate block. This step can be
64 64 * missed out if the io covers the block. (schedule_copy).
65 65  
... ... @@ -104,9 +104,9 @@
104 104 * by a key, multiple bios can be in the same cell. When the cell is
105 105 * subsequently unlocked the bios become available.
106 106 */
107   -struct bio_prison;
  107 +struct dm_bio_prison;
108 108  
109   -struct cell_key {
  109 +struct dm_cell_key {
110 110 int virtual;
111 111 dm_thin_id dev;
112 112 dm_block_t block;
113 113  
... ... @@ -114,13 +114,13 @@
114 114  
115 115 struct dm_bio_prison_cell {
116 116 struct hlist_node list;
117   - struct bio_prison *prison;
118   - struct cell_key key;
  117 + struct dm_bio_prison *prison;
  118 + struct dm_cell_key key;
119 119 struct bio *holder;
120 120 struct bio_list bios;
121 121 };
122 122  
123   -struct bio_prison {
  123 +struct dm_bio_prison {
124 124 spinlock_t lock;
125 125 mempool_t *cell_pool;
126 126  
127 127  
128 128  
... ... @@ -148,13 +148,13 @@
148 148 * @nr_cells should be the number of cells you want in use _concurrently_.
149 149 * Don't confuse it with the number of distinct keys.
150 150 */
151   -static struct bio_prison *prison_create(unsigned nr_cells)
  151 +static struct dm_bio_prison *dm_bio_prison_create(unsigned nr_cells)
152 152 {
153 153 unsigned i;
154 154 uint32_t nr_buckets = calc_nr_buckets(nr_cells);
155   - size_t len = sizeof(struct bio_prison) +
  155 + size_t len = sizeof(struct dm_bio_prison) +
156 156 (sizeof(struct hlist_head) * nr_buckets);
157   - struct bio_prison *prison = kmalloc(len, GFP_KERNEL);
  157 + struct dm_bio_prison *prison = kmalloc(len, GFP_KERNEL);
158 158  
159 159 if (!prison)
160 160 return NULL;
161 161  
... ... @@ -175,13 +175,13 @@
175 175 return prison;
176 176 }
177 177  
178   -static void prison_destroy(struct bio_prison *prison)
  178 +static void dm_bio_prison_destroy(struct dm_bio_prison *prison)
179 179 {
180 180 mempool_destroy(prison->cell_pool);
181 181 kfree(prison);
182 182 }
183 183  
184   -static uint32_t hash_key(struct bio_prison *prison, struct cell_key *key)
  184 +static uint32_t hash_key(struct dm_bio_prison *prison, struct dm_cell_key *key)
185 185 {
186 186 const unsigned long BIG_PRIME = 4294967291UL;
187 187 uint64_t hash = key->block * BIG_PRIME;
... ... @@ -189,7 +189,7 @@
189 189 return (uint32_t) (hash & prison->hash_mask);
190 190 }
191 191  
192   -static int keys_equal(struct cell_key *lhs, struct cell_key *rhs)
  192 +static int keys_equal(struct dm_cell_key *lhs, struct dm_cell_key *rhs)
193 193 {
194 194 return (lhs->virtual == rhs->virtual) &&
195 195 (lhs->dev == rhs->dev) &&
... ... @@ -197,7 +197,7 @@
197 197 }
198 198  
199 199 static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket,
200   - struct cell_key *key)
  200 + struct dm_cell_key *key)
201 201 {
202 202 struct dm_bio_prison_cell *cell;
203 203 struct hlist_node *tmp;
... ... @@ -215,8 +215,8 @@
215 215 *
216 216 * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
217 217 */
218   -static int bio_detain(struct bio_prison *prison, struct cell_key *key,
219   - struct bio *inmate, struct dm_bio_prison_cell **ref)
  218 +static int dm_bio_detain(struct dm_bio_prison *prison, struct dm_cell_key *key,
  219 + struct bio *inmate, struct dm_bio_prison_cell **ref)
220 220 {
221 221 int r = 1;
222 222 unsigned long flags;
... ... @@ -277,7 +277,7 @@
277 277 */
278 278 static void __cell_release(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
279 279 {
280   - struct bio_prison *prison = cell->prison;
  280 + struct dm_bio_prison *prison = cell->prison;
281 281  
282 282 hlist_del(&cell->list);
283 283  
284 284  
... ... @@ -289,10 +289,10 @@
289 289 mempool_free(cell, prison->cell_pool);
290 290 }
291 291  
292   -static void cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios)
  292 +static void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios)
293 293 {
294 294 unsigned long flags;
295   - struct bio_prison *prison = cell->prison;
  295 + struct dm_bio_prison *prison = cell->prison;
296 296  
297 297 spin_lock_irqsave(&prison->lock, flags);
298 298 __cell_release(cell, bios);
299 299  
... ... @@ -313,10 +313,10 @@
313 313 __cell_release(cell, NULL);
314 314 }
315 315  
316   -static void cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
  316 +static void dm_cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
317 317 {
318 318 unsigned long flags;
319   - struct bio_prison *prison = cell->prison;
  319 + struct dm_bio_prison *prison = cell->prison;
320 320  
321 321 spin_lock_irqsave(&prison->lock, flags);
322 322 __cell_release_singleton(cell, bio);
... ... @@ -329,7 +329,7 @@
329 329 static void __cell_release_no_holder(struct dm_bio_prison_cell *cell,
330 330 struct bio_list *inmates)
331 331 {
332   - struct bio_prison *prison = cell->prison;
  332 + struct dm_bio_prison *prison = cell->prison;
333 333  
334 334 hlist_del(&cell->list);
335 335 bio_list_merge(inmates, &cell->bios);
336 336  
337 337  
338 338  
... ... @@ -337,20 +337,20 @@
337 337 mempool_free(cell, prison->cell_pool);
338 338 }
339 339  
340   -static void cell_release_no_holder(struct dm_bio_prison_cell *cell,
341   - struct bio_list *inmates)
  340 +static void dm_cell_release_no_holder(struct dm_bio_prison_cell *cell,
  341 + struct bio_list *inmates)
342 342 {
343 343 unsigned long flags;
344   - struct bio_prison *prison = cell->prison;
  344 + struct dm_bio_prison *prison = cell->prison;
345 345  
346 346 spin_lock_irqsave(&prison->lock, flags);
347 347 __cell_release_no_holder(cell, inmates);
348 348 spin_unlock_irqrestore(&prison->lock, flags);
349 349 }
350 350  
351   -static void cell_error(struct dm_bio_prison_cell *cell)
  351 +static void dm_cell_error(struct dm_bio_prison_cell *cell)
352 352 {
353   - struct bio_prison *prison = cell->prison;
  353 + struct dm_bio_prison *prison = cell->prison;
354 354 struct bio_list bios;
355 355 struct bio *bio;
356 356 unsigned long flags;
357 357  
358 358  
359 359  
360 360  
361 361  
... ... @@ -374,24 +374,29 @@
374 374 * new mapping could free the old block that the read bios are mapped to.
375 375 */
376 376  
377   -struct deferred_set;
378   -struct deferred_entry {
379   - struct deferred_set *ds;
  377 +struct dm_deferred_set;
  378 +struct dm_deferred_entry {
  379 + struct dm_deferred_set *ds;
380 380 unsigned count;
381 381 struct list_head work_items;
382 382 };
383 383  
384   -struct deferred_set {
  384 +struct dm_deferred_set {
385 385 spinlock_t lock;
386 386 unsigned current_entry;
387 387 unsigned sweeper;
388   - struct deferred_entry entries[DEFERRED_SET_SIZE];
  388 + struct dm_deferred_entry entries[DEFERRED_SET_SIZE];
389 389 };
390 390  
391   -static void ds_init(struct deferred_set *ds)
  391 +static struct dm_deferred_set *dm_deferred_set_create(void)
392 392 {
393 393 int i;
  394 + struct dm_deferred_set *ds;
394 395  
  396 + ds = kmalloc(sizeof(*ds), GFP_KERNEL);
  397 + if (!ds)
  398 + return NULL;
  399 +
395 400 spin_lock_init(&ds->lock);
396 401 ds->current_entry = 0;
397 402 ds->sweeper = 0;
398 403  
399 404  
400 405  
... ... @@ -400,12 +405,19 @@
400 405 ds->entries[i].count = 0;
401 406 INIT_LIST_HEAD(&ds->entries[i].work_items);
402 407 }
  408 +
  409 + return ds;
403 410 }
404 411  
405   -static struct deferred_entry *ds_inc(struct deferred_set *ds)
  412 +static void dm_deferred_set_destroy(struct dm_deferred_set *ds)
406 413 {
  414 + kfree(ds);
  415 +}
  416 +
  417 +static struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds)
  418 +{
407 419 unsigned long flags;
408   - struct deferred_entry *entry;
  420 + struct dm_deferred_entry *entry;
409 421  
410 422 spin_lock_irqsave(&ds->lock, flags);
411 423 entry = ds->entries + ds->current_entry;
... ... @@ -420,7 +432,7 @@
420 432 return (index + 1) % DEFERRED_SET_SIZE;
421 433 }
422 434  
423   -static void __sweep(struct deferred_set *ds, struct list_head *head)
  435 +static void __sweep(struct dm_deferred_set *ds, struct list_head *head)
424 436 {
425 437 while ((ds->sweeper != ds->current_entry) &&
426 438 !ds->entries[ds->sweeper].count) {
... ... @@ -432,7 +444,7 @@
432 444 list_splice_init(&ds->entries[ds->sweeper].work_items, head);
433 445 }
434 446  
435   -static void ds_dec(struct deferred_entry *entry, struct list_head *head)
  447 +static void dm_deferred_entry_dec(struct dm_deferred_entry *entry, struct list_head *head)
436 448 {
437 449 unsigned long flags;
438 450  
... ... @@ -446,7 +458,7 @@
446 458 /*
447 459 * Returns 1 if deferred or 0 if no pending items to delay job.
448 460 */
449   -static int ds_add_work(struct deferred_set *ds, struct list_head *work)
  461 +static int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work)
450 462 {
451 463 int r = 1;
452 464 unsigned long flags;
453 465  
... ... @@ -467,13 +479,28 @@
467 479 return r;
468 480 }
469 481  
  482 +static int __init dm_bio_prison_init(void)
  483 +{
  484 + _cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0);
  485 + if (!_cell_cache)
  486 + return -ENOMEM;
  487 +
  488 + return 0;
  489 +}
  490 +
  491 +static void __exit dm_bio_prison_exit(void)
  492 +{
  493 + kmem_cache_destroy(_cell_cache);
  494 + _cell_cache = NULL;
  495 +}
  496 +
470 497 /*----------------------------------------------------------------*/
471 498  
472 499 /*
473 500 * Key building.
474 501 */
475 502 static void build_data_key(struct dm_thin_device *td,
476   - dm_block_t b, struct cell_key *key)
  503 + dm_block_t b, struct dm_cell_key *key)
477 504 {
478 505 key->virtual = 0;
479 506 key->dev = dm_thin_dev_id(td);
... ... @@ -481,7 +508,7 @@
481 508 }
482 509  
483 510 static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
484   - struct cell_key *key)
  511 + struct dm_cell_key *key)
485 512 {
486 513 key->virtual = 1;
487 514 key->dev = dm_thin_dev_id(td);
... ... @@ -534,7 +561,7 @@
534 561 unsigned low_water_triggered:1; /* A dm event has been sent */
535 562 unsigned no_free_space:1; /* A -ENOSPC warning has been issued */
536 563  
537   - struct bio_prison *prison;
  564 + struct dm_bio_prison *prison;
538 565 struct dm_kcopyd_client *copier;
539 566  
540 567 struct workqueue_struct *wq;
... ... @@ -552,8 +579,8 @@
552 579  
553 580 struct bio_list retry_on_resume_list;
554 581  
555   - struct deferred_set shared_read_ds;
556   - struct deferred_set all_io_ds;
  582 + struct dm_deferred_set *shared_read_ds;
  583 + struct dm_deferred_set *all_io_ds;
557 584  
558 585 struct dm_thin_new_mapping *next_mapping;
559 586 mempool_t *mapping_pool;
... ... @@ -660,8 +687,8 @@
660 687  
661 688 struct dm_thin_endio_hook {
662 689 struct thin_c *tc;
663   - struct deferred_entry *shared_read_entry;
664   - struct deferred_entry *all_io_entry;
  690 + struct dm_deferred_entry *shared_read_entry;
  691 + struct dm_deferred_entry *all_io_entry;
665 692 struct dm_thin_new_mapping *overwrite_mapping;
666 693 };
667 694  
... ... @@ -877,7 +904,7 @@
877 904 unsigned long flags;
878 905  
879 906 spin_lock_irqsave(&pool->lock, flags);
880   - cell_release(cell, &pool->deferred_bios);
  907 + dm_cell_release(cell, &pool->deferred_bios);
881 908 spin_unlock_irqrestore(&tc->pool->lock, flags);
882 909  
883 910 wake_worker(pool);
... ... @@ -896,7 +923,7 @@
896 923 bio_list_init(&bios);
897 924  
898 925 spin_lock_irqsave(&pool->lock, flags);
899   - cell_release_no_holder(cell, &pool->deferred_bios);
  926 + dm_cell_release_no_holder(cell, &pool->deferred_bios);
900 927 spin_unlock_irqrestore(&pool->lock, flags);
901 928  
902 929 wake_worker(pool);
... ... @@ -906,7 +933,7 @@
906 933 {
907 934 if (m->bio)
908 935 m->bio->bi_end_io = m->saved_bi_end_io;
909   - cell_error(m->cell);
  936 + dm_cell_error(m->cell);
910 937 list_del(&m->list);
911 938 mempool_free(m, m->tc->pool->mapping_pool);
912 939 }
... ... @@ -921,7 +948,7 @@
921 948 bio->bi_end_io = m->saved_bi_end_io;
922 949  
923 950 if (m->err) {
924   - cell_error(m->cell);
  951 + dm_cell_error(m->cell);
925 952 goto out;
926 953 }
927 954  
... ... @@ -933,7 +960,7 @@
933 960 r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
934 961 if (r) {
935 962 DMERR("dm_thin_insert_block() failed");
936   - cell_error(m->cell);
  963 + dm_cell_error(m->cell);
937 964 goto out;
938 965 }
939 966  
... ... @@ -1067,7 +1094,7 @@
1067 1094 m->err = 0;
1068 1095 m->bio = NULL;
1069 1096  
1070   - if (!ds_add_work(&pool->shared_read_ds, &m->list))
  1097 + if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
1071 1098 m->quiesced = 1;
1072 1099  
1073 1100 /*
... ... @@ -1099,7 +1126,7 @@
1099 1126 if (r < 0) {
1100 1127 mempool_free(m, pool->mapping_pool);
1101 1128 DMERR("dm_kcopyd_copy() failed");
1102   - cell_error(cell);
  1129 + dm_cell_error(cell);
1103 1130 }
1104 1131 }
1105 1132 }
... ... @@ -1164,7 +1191,7 @@
1164 1191 if (r < 0) {
1165 1192 mempool_free(m, pool->mapping_pool);
1166 1193 DMERR("dm_kcopyd_zero() failed");
1167   - cell_error(cell);
  1194 + dm_cell_error(cell);
1168 1195 }
1169 1196 }
1170 1197 }
... ... @@ -1276,7 +1303,7 @@
1276 1303 struct bio_list bios;
1277 1304  
1278 1305 bio_list_init(&bios);
1279   - cell_release(cell, &bios);
  1306 + dm_cell_release(cell, &bios);
1280 1307  
1281 1308 while ((bio = bio_list_pop(&bios)))
1282 1309 retry_on_resume(bio);
1283 1310  
... ... @@ -1288,13 +1315,13 @@
1288 1315 unsigned long flags;
1289 1316 struct pool *pool = tc->pool;
1290 1317 struct dm_bio_prison_cell *cell, *cell2;
1291   - struct cell_key key, key2;
  1318 + struct dm_cell_key key, key2;
1292 1319 dm_block_t block = get_bio_block(tc, bio);
1293 1320 struct dm_thin_lookup_result lookup_result;
1294 1321 struct dm_thin_new_mapping *m;
1295 1322  
1296 1323 build_virtual_key(tc->td, block, &key);
1297   - if (bio_detain(tc->pool->prison, &key, bio, &cell))
  1324 + if (dm_bio_detain(tc->pool->prison, &key, bio, &cell))
1298 1325 return;
1299 1326  
1300 1327 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
... ... @@ -1306,8 +1333,8 @@
1306 1333 * on this block.
1307 1334 */
1308 1335 build_data_key(tc->td, lookup_result.block, &key2);
1309   - if (bio_detain(tc->pool->prison, &key2, bio, &cell2)) {
1310   - cell_release_singleton(cell, bio);
  1336 + if (dm_bio_detain(tc->pool->prison, &key2, bio, &cell2)) {
  1337 + dm_cell_release_singleton(cell, bio);
1311 1338 break;
1312 1339 }
1313 1340  
... ... @@ -1326,7 +1353,7 @@
1326 1353 m->err = 0;
1327 1354 m->bio = bio;
1328 1355  
1329   - if (!ds_add_work(&pool->all_io_ds, &m->list)) {
  1356 + if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {
1330 1357 spin_lock_irqsave(&pool->lock, flags);
1331 1358 list_add(&m->list, &pool->prepared_discards);
1332 1359 spin_unlock_irqrestore(&pool->lock, flags);
... ... @@ -1338,8 +1365,8 @@
1338 1365 * a block boundary. So we submit the discard of a
1339 1366 * partial block appropriately.
1340 1367 */
1341   - cell_release_singleton(cell, bio);
1342   - cell_release_singleton(cell2, bio);
  1368 + dm_cell_release_singleton(cell, bio);
  1369 + dm_cell_release_singleton(cell2, bio);
1343 1370 if ((!lookup_result.shared) && pool->pf.discard_passdown)
1344 1371 remap_and_issue(tc, bio, lookup_result.block);
1345 1372 else
1346 1373  
1347 1374  
... ... @@ -1351,20 +1378,20 @@
1351 1378 /*
1352 1379 * It isn't provisioned, just forget it.
1353 1380 */
1354   - cell_release_singleton(cell, bio);
  1381 + dm_cell_release_singleton(cell, bio);
1355 1382 bio_endio(bio, 0);
1356 1383 break;
1357 1384  
1358 1385 default:
1359 1386 DMERR("discard: find block unexpectedly returned %d", r);
1360   - cell_release_singleton(cell, bio);
  1387 + dm_cell_release_singleton(cell, bio);
1361 1388 bio_io_error(bio);
1362 1389 break;
1363 1390 }
1364 1391 }
1365 1392  
1366 1393 static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1367   - struct cell_key *key,
  1394 + struct dm_cell_key *key,
1368 1395 struct dm_thin_lookup_result *lookup_result,
1369 1396 struct dm_bio_prison_cell *cell)
1370 1397 {
... ... @@ -1384,7 +1411,7 @@
1384 1411  
1385 1412 default:
1386 1413 DMERR("%s: alloc_data_block() failed, error = %d", __func__, r);
1387   - cell_error(cell);
  1414 + dm_cell_error(cell);
1388 1415 break;
1389 1416 }
1390 1417 }
1391 1418  
... ... @@ -1395,14 +1422,14 @@
1395 1422 {
1396 1423 struct dm_bio_prison_cell *cell;
1397 1424 struct pool *pool = tc->pool;
1398   - struct cell_key key;
  1425 + struct dm_cell_key key;
1399 1426  
1400 1427 /*
1401 1428 * If cell is already occupied, then sharing is already in the process
1402 1429 * of being broken so we have nothing further to do here.
1403 1430 */
1404 1431 build_data_key(tc->td, lookup_result->block, &key);
1405   - if (bio_detain(pool->prison, &key, bio, &cell))
  1432 + if (dm_bio_detain(pool->prison, &key, bio, &cell))
1406 1433 return;
1407 1434  
1408 1435 if (bio_data_dir(bio) == WRITE && bio->bi_size)
1409 1436  
... ... @@ -1410,9 +1437,9 @@
1410 1437 else {
1411 1438 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
1412 1439  
1413   - h->shared_read_entry = ds_inc(&pool->shared_read_ds);
  1440 + h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
1414 1441  
1415   - cell_release_singleton(cell, bio);
  1442 + dm_cell_release_singleton(cell, bio);
1416 1443 remap_and_issue(tc, bio, lookup_result->block);
1417 1444 }
1418 1445 }
... ... @@ -1427,7 +1454,7 @@
1427 1454 * Remap empty bios (flushes) immediately, without provisioning.
1428 1455 */
1429 1456 if (!bio->bi_size) {
1430   - cell_release_singleton(cell, bio);
  1457 + dm_cell_release_singleton(cell, bio);
1431 1458 remap_and_issue(tc, bio, 0);
1432 1459 return;
1433 1460 }
... ... @@ -1437,7 +1464,7 @@
1437 1464 */
1438 1465 if (bio_data_dir(bio) == READ) {
1439 1466 zero_fill_bio(bio);
1440   - cell_release_singleton(cell, bio);
  1467 + dm_cell_release_singleton(cell, bio);
1441 1468 bio_endio(bio, 0);
1442 1469 return;
1443 1470 }
... ... @@ -1458,7 +1485,7 @@
1458 1485 default:
1459 1486 DMERR("%s: alloc_data_block() failed, error = %d", __func__, r);
1460 1487 set_pool_mode(tc->pool, PM_READ_ONLY);
1461   - cell_error(cell);
  1488 + dm_cell_error(cell);
1462 1489 break;
1463 1490 }
1464 1491 }
... ... @@ -1468,7 +1495,7 @@
1468 1495 int r;
1469 1496 dm_block_t block = get_bio_block(tc, bio);
1470 1497 struct dm_bio_prison_cell *cell;
1471   - struct cell_key key;
  1498 + struct dm_cell_key key;
1472 1499 struct dm_thin_lookup_result lookup_result;
1473 1500  
1474 1501 /*
... ... @@ -1476,7 +1503,7 @@
1476 1503 * being provisioned so we have nothing further to do here.
1477 1504 */
1478 1505 build_virtual_key(tc->td, block, &key);
1479   - if (bio_detain(tc->pool->prison, &key, bio, &cell))
  1506 + if (dm_bio_detain(tc->pool->prison, &key, bio, &cell))
1480 1507 return;
1481 1508  
1482 1509 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
... ... @@ -1491,7 +1518,7 @@
1491 1518 * TODO: this will probably have to change when discard goes
1492 1519 * back in.
1493 1520 */
1494   - cell_release_singleton(cell, bio);
  1521 + dm_cell_release_singleton(cell, bio);
1495 1522  
1496 1523 if (lookup_result.shared)
1497 1524 process_shared_bio(tc, bio, block, &lookup_result);
... ... @@ -1501,7 +1528,7 @@
1501 1528  
1502 1529 case -ENODATA:
1503 1530 if (bio_data_dir(bio) == READ && tc->origin_dev) {
1504   - cell_release_singleton(cell, bio);
  1531 + dm_cell_release_singleton(cell, bio);
1505 1532 remap_to_origin_and_issue(tc, bio);
1506 1533 } else
1507 1534 provision_block(tc, bio, block, cell);
... ... @@ -1509,7 +1536,7 @@
1509 1536  
1510 1537 default:
1511 1538 DMERR("dm_thin_find_block() failed, error = %d", r);
1512   - cell_release_singleton(cell, bio);
  1539 + dm_cell_release_singleton(cell, bio);
1513 1540 bio_io_error(bio);
1514 1541 break;
1515 1542 }
... ... @@ -1718,7 +1745,7 @@
1718 1745  
1719 1746 h->tc = tc;
1720 1747 h->shared_read_entry = NULL;
1721   - h->all_io_entry = bio->bi_rw & REQ_DISCARD ? NULL : ds_inc(&pool->all_io_ds);
  1748 + h->all_io_entry = bio->bi_rw & REQ_DISCARD ? NULL : dm_deferred_entry_inc(pool->all_io_ds);
1722 1749 h->overwrite_mapping = NULL;
1723 1750  
1724 1751 return h;
... ... @@ -1928,7 +1955,7 @@
1928 1955 if (dm_pool_metadata_close(pool->pmd) < 0)
1929 1956 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
1930 1957  
1931   - prison_destroy(pool->prison);
  1958 + dm_bio_prison_destroy(pool->prison);
1932 1959 dm_kcopyd_client_destroy(pool->copier);
1933 1960  
1934 1961 if (pool->wq)
... ... @@ -1938,6 +1965,8 @@
1938 1965 mempool_free(pool->next_mapping, pool->mapping_pool);
1939 1966 mempool_destroy(pool->mapping_pool);
1940 1967 mempool_destroy(pool->endio_hook_pool);
  1968 + dm_deferred_set_destroy(pool->shared_read_ds);
  1969 + dm_deferred_set_destroy(pool->all_io_ds);
1941 1970 kfree(pool);
1942 1971 }
1943 1972  
... ... @@ -1976,7 +2005,7 @@
1976 2005 pool->sectors_per_block_shift = __ffs(block_size);
1977 2006 pool->low_water_blocks = 0;
1978 2007 pool_features_init(&pool->pf);
1979   - pool->prison = prison_create(PRISON_CELLS);
  2008 + pool->prison = dm_bio_prison_create(PRISON_CELLS);
1980 2009 if (!pool->prison) {
1981 2010 *error = "Error creating pool's bio prison";
1982 2011 err_p = ERR_PTR(-ENOMEM);
1983 2012  
... ... @@ -2012,9 +2041,21 @@
2012 2041 pool->low_water_triggered = 0;
2013 2042 pool->no_free_space = 0;
2014 2043 bio_list_init(&pool->retry_on_resume_list);
2015   - ds_init(&pool->shared_read_ds);
2016   - ds_init(&pool->all_io_ds);
2017 2044  
  2045 + pool->shared_read_ds = dm_deferred_set_create();
  2046 + if (!pool->shared_read_ds) {
  2047 + *error = "Error creating pool's shared read deferred set";
  2048 + err_p = ERR_PTR(-ENOMEM);
  2049 + goto bad_shared_read_ds;
  2050 + }
  2051 +
  2052 + pool->all_io_ds = dm_deferred_set_create();
  2053 + if (!pool->all_io_ds) {
  2054 + *error = "Error creating pool's all io deferred set";
  2055 + err_p = ERR_PTR(-ENOMEM);
  2056 + goto bad_all_io_ds;
  2057 + }
  2058 +
2018 2059 pool->next_mapping = NULL;
2019 2060 pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE,
2020 2061 _new_mapping_cache);
2021 2062  
... ... @@ -2042,11 +2083,15 @@
2042 2083 bad_endio_hook_pool:
2043 2084 mempool_destroy(pool->mapping_pool);
2044 2085 bad_mapping_pool:
  2086 + dm_deferred_set_destroy(pool->all_io_ds);
  2087 +bad_all_io_ds:
  2088 + dm_deferred_set_destroy(pool->shared_read_ds);
  2089 +bad_shared_read_ds:
2045 2090 destroy_workqueue(pool->wq);
2046 2091 bad_wq:
2047 2092 dm_kcopyd_client_destroy(pool->copier);
2048 2093 bad_kcopyd_client:
2049   - prison_destroy(pool->prison);
  2094 + dm_bio_prison_destroy(pool->prison);
2050 2095 bad_prison:
2051 2096 kfree(pool);
2052 2097 bad_pool:
... ... @@ -2982,7 +3027,7 @@
2982 3027  
2983 3028 if (h->shared_read_entry) {
2984 3029 INIT_LIST_HEAD(&work);
2985   - ds_dec(h->shared_read_entry, &work);
  3030 + dm_deferred_entry_dec(h->shared_read_entry, &work);
2986 3031  
2987 3032 spin_lock_irqsave(&pool->lock, flags);
2988 3033 list_for_each_entry_safe(m, tmp, &work, list) {
... ... @@ -2995,7 +3040,7 @@
2995 3040  
2996 3041 if (h->all_io_entry) {
2997 3042 INIT_LIST_HEAD(&work);
2998   - ds_dec(h->all_io_entry, &work);
  3043 + dm_deferred_entry_dec(h->all_io_entry, &work);
2999 3044 spin_lock_irqsave(&pool->lock, flags);
3000 3045 list_for_each_entry_safe(m, tmp, &work, list)
3001 3046 list_add(&m->list, &pool->prepared_discards);
... ... @@ -3128,9 +3173,7 @@
3128 3173  
3129 3174 r = -ENOMEM;
3130 3175  
3131   - _cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0);
3132   - if (!_cell_cache)
3133   - goto bad_cell_cache;
  3176 + dm_bio_prison_init();
3134 3177  
3135 3178 _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
3136 3179 if (!_new_mapping_cache)
... ... @@ -3145,8 +3188,6 @@
3145 3188 bad_endio_hook_cache:
3146 3189 kmem_cache_destroy(_new_mapping_cache);
3147 3190 bad_new_mapping_cache:
3148   - kmem_cache_destroy(_cell_cache);
3149   -bad_cell_cache:
3150 3191 dm_unregister_target(&pool_target);
3151 3192 bad_pool_target:
3152 3193 dm_unregister_target(&thin_target);
... ... @@ -3159,7 +3200,7 @@
3159 3200 dm_unregister_target(&thin_target);
3160 3201 dm_unregister_target(&pool_target);
3161 3202  
3162   - kmem_cache_destroy(_cell_cache);
  3203 + dm_bio_prison_exit();
3163 3204 kmem_cache_destroy(_new_mapping_cache);
3164 3205 kmem_cache_destroy(_endio_hook_cache);
3165 3206 }