Commit 538c2bc4ec400e9d8eda31246c7537cabc398dd0

Authored by Joe Thornber
Committed by Greg Kroah-Hartman
1 parent 7bf9ed7e78

dm cache: fix problematic dual use of a single migration count variable

commit a59db67656021fa212e9b95a583f13c34eb67cd9 upstream.

Introduce a new variable to count the number of allocated migration
structures.  The existing variable cache->nr_migrations became
overloaded.  It was used to:

 i) track of the number of migrations in flight for the purposes of
    quiescing during suspend.

 ii) to estimate the amount of background IO occuring.

Recent discard changes meant that REQ_DISCARD bios are processed with
a migration.  Discards are not background IO so nr_migrations was not
incremented.  However this could cause quiescing to complete early.

(i) is now handled with a new variable cache->nr_allocated_migrations.
cache->nr_migrations has been renamed cache->nr_io_migrations.
cleanup_migration() is now called free_io_migration(), since it
decrements that variable.

Also, remove the unused cache->next_migration variable that got replaced
with with prealloc_structs a while ago.

Signed-off-by: Joe Thornber <ejt@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

Showing 1 changed file with 50 additions and 39 deletions Side-by-side Diff

drivers/md/dm-cache-target.c
... ... @@ -222,8 +222,14 @@
222 222 struct list_head need_commit_migrations;
223 223 sector_t migration_threshold;
224 224 wait_queue_head_t migration_wait;
225   - atomic_t nr_migrations;
  225 + atomic_t nr_allocated_migrations;
226 226  
  227 + /*
  228 + * The number of in flight migrations that are performing
  229 + * background io. eg, promotion, writeback.
  230 + */
  231 + atomic_t nr_io_migrations;
  232 +
227 233 wait_queue_head_t quiescing_wait;
228 234 atomic_t quiescing;
229 235 atomic_t quiescing_ack;
... ... @@ -258,7 +264,6 @@
258 264 struct dm_deferred_set *all_io_ds;
259 265  
260 266 mempool_t *migration_pool;
261   - struct dm_cache_migration *next_migration;
262 267  
263 268 struct dm_cache_policy *policy;
264 269 unsigned policy_nr_args;
265 270  
... ... @@ -349,10 +354,31 @@
349 354 dm_bio_prison_free_cell(cache->prison, cell);
350 355 }
351 356  
  357 +static struct dm_cache_migration *alloc_migration(struct cache *cache)
  358 +{
  359 + struct dm_cache_migration *mg;
  360 +
  361 + mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
  362 + if (mg) {
  363 + mg->cache = cache;
  364 + atomic_inc(&mg->cache->nr_allocated_migrations);
  365 + }
  366 +
  367 + return mg;
  368 +}
  369 +
  370 +static void free_migration(struct dm_cache_migration *mg)
  371 +{
  372 + if (atomic_dec_and_test(&mg->cache->nr_allocated_migrations))
  373 + wake_up(&mg->cache->migration_wait);
  374 +
  375 + mempool_free(mg, mg->cache->migration_pool);
  376 +}
  377 +
352 378 static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
353 379 {
354 380 if (!p->mg) {
355   - p->mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
  381 + p->mg = alloc_migration(cache);
356 382 if (!p->mg)
357 383 return -ENOMEM;
358 384 }
... ... @@ -381,7 +407,7 @@
381 407 free_prison_cell(cache, p->cell1);
382 408  
383 409 if (p->mg)
384   - mempool_free(p->mg, cache->migration_pool);
  410 + free_migration(p->mg);
385 411 }
386 412  
387 413 static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p)
388 414  
389 415  
390 416  
391 417  
... ... @@ -817,26 +843,16 @@
817 843 * Migration covers moving data from the origin device to the cache, or
818 844 * vice versa.
819 845 *--------------------------------------------------------------*/
820   -static void free_migration(struct dm_cache_migration *mg)
  846 +static void inc_io_migrations(struct cache *cache)
821 847 {
822   - mempool_free(mg, mg->cache->migration_pool);
  848 + atomic_inc(&cache->nr_io_migrations);
823 849 }
824 850  
825   -static void inc_nr_migrations(struct cache *cache)
  851 +static void dec_io_migrations(struct cache *cache)
826 852 {
827   - atomic_inc(&cache->nr_migrations);
  853 + atomic_dec(&cache->nr_io_migrations);
828 854 }
829 855  
830   -static void dec_nr_migrations(struct cache *cache)
831   -{
832   - atomic_dec(&cache->nr_migrations);
833   -
834   - /*
835   - * Wake the worker in case we're suspending the target.
836   - */
837   - wake_up(&cache->migration_wait);
838   -}
839   -
840 856 static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
841 857 bool holder)
842 858 {
843 859  
844 860  
... ... @@ -857,11 +873,10 @@
857 873 wake_worker(cache);
858 874 }
859 875  
860   -static void cleanup_migration(struct dm_cache_migration *mg)
  876 +static void free_io_migration(struct dm_cache_migration *mg)
861 877 {
862   - struct cache *cache = mg->cache;
  878 + dec_io_migrations(mg->cache);
863 879 free_migration(mg);
864   - dec_nr_migrations(cache);
865 880 }
866 881  
867 882 static void migration_failure(struct dm_cache_migration *mg)
... ... @@ -886,7 +901,7 @@
886 901 cell_defer(cache, mg->new_ocell, true);
887 902 }
888 903  
889   - cleanup_migration(mg);
  904 + free_io_migration(mg);
890 905 }
891 906  
892 907 static void migration_success_pre_commit(struct dm_cache_migration *mg)
... ... @@ -897,7 +912,7 @@
897 912 if (mg->writeback) {
898 913 clear_dirty(cache, mg->old_oblock, mg->cblock);
899 914 cell_defer(cache, mg->old_ocell, false);
900   - cleanup_migration(mg);
  915 + free_io_migration(mg);
901 916 return;
902 917  
903 918 } else if (mg->demote) {
904 919  
... ... @@ -907,14 +922,14 @@
907 922 mg->old_oblock);
908 923 if (mg->promote)
909 924 cell_defer(cache, mg->new_ocell, true);
910   - cleanup_migration(mg);
  925 + free_io_migration(mg);
911 926 return;
912 927 }
913 928 } else {
914 929 if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) {
915 930 DMWARN_LIMIT("promotion failed; couldn't update on disk metadata");
916 931 policy_remove_mapping(cache->policy, mg->new_oblock);
917   - cleanup_migration(mg);
  932 + free_io_migration(mg);
918 933 return;
919 934 }
920 935 }
... ... @@ -947,7 +962,7 @@
947 962 } else {
948 963 if (mg->invalidate)
949 964 policy_remove_mapping(cache->policy, mg->old_oblock);
950   - cleanup_migration(mg);
  965 + free_io_migration(mg);
951 966 }
952 967  
953 968 } else {
... ... @@ -962,7 +977,7 @@
962 977 bio_endio(mg->new_ocell->holder, 0);
963 978 cell_defer(cache, mg->new_ocell, false);
964 979 }
965   - cleanup_migration(mg);
  980 + free_io_migration(mg);
966 981 }
967 982 }
968 983  
... ... @@ -1178,7 +1193,7 @@
1178 1193 mg->new_ocell = cell;
1179 1194 mg->start_jiffies = jiffies;
1180 1195  
1181   - inc_nr_migrations(cache);
  1196 + inc_io_migrations(cache);
1182 1197 quiesce_migration(mg);
1183 1198 }
1184 1199  
... ... @@ -1201,7 +1216,7 @@
1201 1216 mg->new_ocell = NULL;
1202 1217 mg->start_jiffies = jiffies;
1203 1218  
1204   - inc_nr_migrations(cache);
  1219 + inc_io_migrations(cache);
1205 1220 quiesce_migration(mg);
1206 1221 }
1207 1222  
... ... @@ -1227,7 +1242,7 @@
1227 1242 mg->new_ocell = new_ocell;
1228 1243 mg->start_jiffies = jiffies;
1229 1244  
1230   - inc_nr_migrations(cache);
  1245 + inc_io_migrations(cache);
1231 1246 quiesce_migration(mg);
1232 1247 }
1233 1248  
... ... @@ -1254,7 +1269,7 @@
1254 1269 mg->new_ocell = NULL;
1255 1270 mg->start_jiffies = jiffies;
1256 1271  
1257   - inc_nr_migrations(cache);
  1272 + inc_io_migrations(cache);
1258 1273 quiesce_migration(mg);
1259 1274 }
1260 1275  
... ... @@ -1320,7 +1335,7 @@
1320 1335  
1321 1336 static bool spare_migration_bandwidth(struct cache *cache)
1322 1337 {
1323   - sector_t current_volume = (atomic_read(&cache->nr_migrations) + 1) *
  1338 + sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) *
1324 1339 cache->sectors_per_block;
1325 1340 return current_volume < cache->migration_threshold;
1326 1341 }
... ... @@ -1670,7 +1685,7 @@
1670 1685  
1671 1686 static void wait_for_migrations(struct cache *cache)
1672 1687 {
1673   - wait_event(cache->migration_wait, !atomic_read(&cache->nr_migrations));
  1688 + wait_event(cache->migration_wait, !atomic_read(&cache->nr_allocated_migrations));
1674 1689 }
1675 1690  
1676 1691 static void stop_worker(struct cache *cache)
... ... @@ -1782,9 +1797,6 @@
1782 1797 {
1783 1798 unsigned i;
1784 1799  
1785   - if (cache->next_migration)
1786   - mempool_free(cache->next_migration, cache->migration_pool);
1787   -
1788 1800 if (cache->migration_pool)
1789 1801 mempool_destroy(cache->migration_pool);
1790 1802  
... ... @@ -2292,7 +2304,8 @@
2292 2304 INIT_LIST_HEAD(&cache->quiesced_migrations);
2293 2305 INIT_LIST_HEAD(&cache->completed_migrations);
2294 2306 INIT_LIST_HEAD(&cache->need_commit_migrations);
2295   - atomic_set(&cache->nr_migrations, 0);
  2307 + atomic_set(&cache->nr_allocated_migrations, 0);
  2308 + atomic_set(&cache->nr_io_migrations, 0);
2296 2309 init_waitqueue_head(&cache->migration_wait);
2297 2310  
2298 2311 init_waitqueue_head(&cache->quiescing_wait);
... ... @@ -2350,8 +2363,6 @@
2350 2363 *error = "Error creating cache's migration mempool";
2351 2364 goto bad;
2352 2365 }
2353   -
2354   - cache->next_migration = NULL;
2355 2366  
2356 2367 cache->need_tick_bio = true;
2357 2368 cache->sized = false;