Commit b50b636bce6293fa858cc7ff6c3ffe4920d90006

Authored by Tejun Heo
Committed by Jens Axboe
1 parent b9a1920837

block, cfq: kill ioc_gone

Now that cic's are immediately unlinked under both locks, there's no
need to count and drain cic's before module unload.  RCU callback
completion is waited with rcu_barrier().

While at it, remove residual RCU operations on cic_list.

Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

Showing 2 changed files with 5 additions and 55 deletions Side-by-side Diff

... ... @@ -62,10 +62,6 @@
62 62 static struct kmem_cache *cfq_pool;
63 63 static struct kmem_cache *cfq_ioc_pool;
64 64  
65   -static DEFINE_PER_CPU(unsigned long, cfq_ioc_count);
66   -static struct completion *ioc_gone;
67   -static DEFINE_SPINLOCK(ioc_gone_lock);
68   -
69 65 #define CFQ_PRIO_LISTS IOPRIO_BE_NR
70 66 #define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
71 67 #define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
... ... @@ -2671,26 +2667,8 @@
2671 2667  
2672 2668 static void cfq_cic_free_rcu(struct rcu_head *head)
2673 2669 {
2674   - struct cfq_io_context *cic;
2675   -
2676   - cic = container_of(head, struct cfq_io_context, rcu_head);
2677   -
2678   - kmem_cache_free(cfq_ioc_pool, cic);
2679   - elv_ioc_count_dec(cfq_ioc_count);
2680   -
2681   - if (ioc_gone) {
2682   - /*
2683   - * CFQ scheduler is exiting, grab exit lock and check
2684   - * the pending io context count. If it hits zero,
2685   - * complete ioc_gone and set it back to NULL
2686   - */
2687   - spin_lock(&ioc_gone_lock);
2688   - if (ioc_gone && !elv_ioc_count_read(cfq_ioc_count)) {
2689   - complete(ioc_gone);
2690   - ioc_gone = NULL;
2691   - }
2692   - spin_unlock(&ioc_gone_lock);
2693   - }
  2670 + kmem_cache_free(cfq_ioc_pool,
  2671 + container_of(head, struct cfq_io_context, rcu_head));
2694 2672 }
2695 2673  
2696 2674 static void cfq_cic_free(struct cfq_io_context *cic)
... ... @@ -2705,7 +2683,7 @@
2705 2683  
2706 2684 BUG_ON(!(dead_key & CIC_DEAD_KEY));
2707 2685 radix_tree_delete(&ioc->radix_root, dead_key >> CIC_DEAD_INDEX_SHIFT);
2708   - hlist_del_rcu(&cic->cic_list);
  2686 + hlist_del(&cic->cic_list);
2709 2687 cfq_cic_free(cic);
2710 2688 }
2711 2689  
... ... @@ -2782,7 +2760,6 @@
2782 2760 INIT_HLIST_NODE(&cic->cic_list);
2783 2761 cic->exit = cfq_exit_cic;
2784 2762 cic->release = cfq_release_cic;
2785   - elv_ioc_count_inc(cfq_ioc_count);
2786 2763 }
2787 2764  
2788 2765 return cic;
... ... @@ -3072,7 +3049,7 @@
3072 3049  
3073 3050 ret = radix_tree_insert(&ioc->radix_root, q->id, cic);
3074 3051 if (likely(!ret)) {
3075   - hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list);
  3052 + hlist_add_head(&cic->cic_list, &ioc->cic_list);
3076 3053 list_add(&cic->queue_list, &cfqd->cic_list);
3077 3054 cic = NULL;
3078 3055 } else if (ret == -EEXIST) {
3079 3056  
... ... @@ -4156,19 +4133,9 @@
4156 4133  
4157 4134 static void __exit cfq_exit(void)
4158 4135 {
4159   - DECLARE_COMPLETION_ONSTACK(all_gone);
4160 4136 blkio_policy_unregister(&blkio_policy_cfq);
4161 4137 elv_unregister(&iosched_cfq);
4162   - ioc_gone = &all_gone;
4163   - /* ioc_gone's update must be visible before reading ioc_count */
4164   - smp_wmb();
4165   -
4166   - /*
4167   - * this also protects us from entering cfq_slab_kill() with
4168   - * pending RCU callbacks
4169   - */
4170   - if (elv_ioc_count_read(cfq_ioc_count))
4171   - wait_for_completion(&all_gone);
  4138 + rcu_barrier(); /* make sure all cic RCU frees are complete */
4172 4139 cfq_slab_kill();
4173 4140 }
4174 4141  
include/linux/elevator.h
... ... @@ -196,23 +196,6 @@
196 196 INIT_LIST_HEAD(&(rq)->csd.list); \
197 197 } while (0)
198 198  
199   -/*
200   - * io context count accounting
201   - */
202   -#define elv_ioc_count_mod(name, __val) this_cpu_add(name, __val)
203   -#define elv_ioc_count_inc(name) this_cpu_inc(name)
204   -#define elv_ioc_count_dec(name) this_cpu_dec(name)
205   -
206   -#define elv_ioc_count_read(name) \
207   -({ \
208   - unsigned long __val = 0; \
209   - int __cpu; \
210   - smp_wmb(); \
211   - for_each_possible_cpu(__cpu) \
212   - __val += per_cpu(name, __cpu); \
213   - __val; \
214   -})
215   -
216 199 #endif /* CONFIG_BLOCK */
217 200 #endif