Commit 9b84cacd013996f244d85b3d873287c2a8f88658

Authored by Tejun Heo
Committed by Jens Axboe
1 parent 7e5a879449

block, cfq: restructure io_cq creation path for io_context interface cleanup

Add elevator_ops->elevator_init_icq_fn() and restructure
cfq_create_cic() and rename it to ioc_create_icq().

The new function expects its caller to pass in io_context, uses
elevator_type->icq_cache, handles generic init, calls the new elevator
operation for elevator specific initialization, and returns pointer to
created or looked up icq.  This leaves cfq_icq_pool variable without
any user.  Removed.

This prepares for io_context interface cleanup and doesn't introduce
any functional difference.

Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

Showing 2 changed files with 43 additions and 53 deletions Side-by-side Diff

... ... @@ -59,7 +59,6 @@
59 59 #define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elv.priv[1])
60 60  
61 61 static struct kmem_cache *cfq_pool;
62   -static struct kmem_cache *cfq_icq_pool;
63 62  
64 63 #define CFQ_PRIO_LISTS IOPRIO_BE_NR
65 64 #define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
... ... @@ -2707,6 +2706,13 @@
2707 2706 cfq_put_queue(cfqq);
2708 2707 }
2709 2708  
  2709 +static void cfq_init_icq(struct io_cq *icq)
  2710 +{
  2711 + struct cfq_io_cq *cic = icq_to_cic(icq);
  2712 +
  2713 + cic->ttime.last_end_request = jiffies;
  2714 +}
  2715 +
2710 2716 static void cfq_exit_icq(struct io_cq *icq)
2711 2717 {
2712 2718 struct cfq_io_cq *cic = icq_to_cic(icq);
... ... @@ -2723,21 +2729,6 @@
2723 2729 }
2724 2730 }
2725 2731  
2726   -static struct cfq_io_cq *cfq_alloc_cic(struct cfq_data *cfqd, gfp_t gfp_mask)
2727   -{
2728   - struct cfq_io_cq *cic;
2729   -
2730   - cic = kmem_cache_alloc_node(cfq_icq_pool, gfp_mask | __GFP_ZERO,
2731   - cfqd->queue->node);
2732   - if (cic) {
2733   - cic->ttime.last_end_request = jiffies;
2734   - INIT_LIST_HEAD(&cic->icq.q_node);
2735   - INIT_HLIST_NODE(&cic->icq.ioc_node);
2736   - }
2737   -
2738   - return cic;
2739   -}
2740   -
2741 2732 static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
2742 2733 {
2743 2734 struct task_struct *tsk = current;
2744 2735  
2745 2736  
2746 2737  
2747 2738  
2748 2739  
2749 2740  
2750 2741  
2751 2742  
2752 2743  
2753 2744  
2754 2745  
2755 2746  
2756 2747  
... ... @@ -2945,64 +2936,62 @@
2945 2936 }
2946 2937  
2947 2938 /**
2948   - * cfq_create_cic - create and link a cfq_io_cq
2949   - * @cfqd: cfqd of interest
  2939 + * ioc_create_icq - create and link io_cq
  2940 + * @q: request_queue of interest
2950 2941 * @gfp_mask: allocation mask
2951 2942 *
2952   - * Make sure cfq_io_cq linking %current->io_context and @cfqd exists. If
2953   - * ioc and/or cic doesn't exist, they will be created using @gfp_mask.
  2943 + * Make sure io_cq linking %current->io_context and @q exists. If either
  2944 + * io_context and/or icq don't exist, they will be created using @gfp_mask.
  2945 + *
  2946 + * The caller is responsible for ensuring @ioc won't go away and @q is
  2947 + * alive and will stay alive until this function returns.
2954 2948 */
2955   -static int cfq_create_cic(struct cfq_data *cfqd, gfp_t gfp_mask)
  2949 +static struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask)
2956 2950 {
2957   - struct request_queue *q = cfqd->queue;
2958   - struct io_cq *icq = NULL;
2959   - struct cfq_io_cq *cic;
  2951 + struct elevator_type *et = q->elevator->type;
2960 2952 struct io_context *ioc;
2961   - int ret = -ENOMEM;
  2953 + struct io_cq *icq;
2962 2954  
2963   - might_sleep_if(gfp_mask & __GFP_WAIT);
2964   -
2965 2955 /* allocate stuff */
2966 2956 ioc = create_io_context(current, gfp_mask, q->node);
2967 2957 if (!ioc)
2968   - goto out;
  2958 + return NULL;
2969 2959  
2970   - cic = cfq_alloc_cic(cfqd, gfp_mask);
2971   - if (!cic)
2972   - goto out;
2973   - icq = &cic->icq;
  2960 + icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
  2961 + q->node);
  2962 + if (!icq)
  2963 + return NULL;
2974 2964  
2975   - ret = radix_tree_preload(gfp_mask);
2976   - if (ret)
2977   - goto out;
  2965 + if (radix_tree_preload(gfp_mask) < 0) {
  2966 + kmem_cache_free(et->icq_cache, icq);
  2967 + return NULL;
  2968 + }
2978 2969  
2979 2970 icq->ioc = ioc;
2980   - icq->q = cfqd->queue;
  2971 + icq->q = q;
  2972 + INIT_LIST_HEAD(&icq->q_node);
  2973 + INIT_HLIST_NODE(&icq->ioc_node);
2981 2974  
2982 2975 /* lock both q and ioc and try to link @icq */
2983 2976 spin_lock_irq(q->queue_lock);
2984 2977 spin_lock(&ioc->lock);
2985 2978  
2986   - ret = radix_tree_insert(&ioc->icq_tree, q->id, icq);
2987   - if (likely(!ret)) {
  2979 + if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
2988 2980 hlist_add_head(&icq->ioc_node, &ioc->icq_list);
2989 2981 list_add(&icq->q_node, &q->icq_list);
2990   - icq = NULL;
2991   - } else if (ret == -EEXIST) {
2992   - /* someone else already did it */
2993   - ret = 0;
  2982 + if (et->ops.elevator_init_icq_fn)
  2983 + et->ops.elevator_init_icq_fn(icq);
  2984 + } else {
  2985 + kmem_cache_free(et->icq_cache, icq);
  2986 + icq = ioc_lookup_icq(ioc, q);
  2987 + if (!icq)
  2988 + printk(KERN_ERR "cfq: icq link failed!\n");
2994 2989 }
2995 2990  
2996 2991 spin_unlock(&ioc->lock);
2997 2992 spin_unlock_irq(q->queue_lock);
2998   -
2999 2993 radix_tree_preload_end();
3000   -out:
3001   - if (ret)
3002   - printk(KERN_ERR "cfq: icq link failed!\n");
3003   - if (icq)
3004   - kmem_cache_free(cfq_icq_pool, icq);
3005   - return ret;
  2994 + return icq;
3006 2995 }
3007 2996  
3008 2997 /**
... ... @@ -3022,7 +3011,6 @@
3022 3011 struct request_queue *q = cfqd->queue;
3023 3012 struct cfq_io_cq *cic = NULL;
3024 3013 struct io_context *ioc;
3025   - int err;
3026 3014  
3027 3015 lockdep_assert_held(q->queue_lock);
3028 3016  
3029 3017  
... ... @@ -3037,9 +3025,9 @@
3037 3025  
3038 3026 /* slow path - unlock, create missing ones and retry */
3039 3027 spin_unlock_irq(q->queue_lock);
3040   - err = cfq_create_cic(cfqd, gfp_mask);
  3028 + cic = icq_to_cic(ioc_create_icq(q, gfp_mask));
3041 3029 spin_lock_irq(q->queue_lock);
3042   - if (err)
  3030 + if (!cic)
3043 3031 return NULL;
3044 3032 }
3045 3033  
... ... @@ -3975,6 +3963,7 @@
3975 3963 .elevator_completed_req_fn = cfq_completed_request,
3976 3964 .elevator_former_req_fn = elv_rb_former_request,
3977 3965 .elevator_latter_req_fn = elv_rb_latter_request,
  3966 + .elevator_init_icq_fn = cfq_init_icq,
3978 3967 .elevator_exit_icq_fn = cfq_exit_icq,
3979 3968 .elevator_set_req_fn = cfq_set_request,
3980 3969 .elevator_put_req_fn = cfq_put_request,
... ... @@ -4028,7 +4017,6 @@
4028 4017 kmem_cache_destroy(cfq_pool);
4029 4018 return ret;
4030 4019 }
4031   - cfq_icq_pool = iosched_cfq.icq_cache;
4032 4020  
4033 4021 blkio_policy_register(&blkio_policy_cfq);
4034 4022  
include/linux/elevator.h
... ... @@ -26,6 +26,7 @@
26 26 typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *);
27 27 typedef int (elevator_may_queue_fn) (struct request_queue *, int);
28 28  
  29 +typedef void (elevator_init_icq_fn) (struct io_cq *);
29 30 typedef void (elevator_exit_icq_fn) (struct io_cq *);
30 31 typedef int (elevator_set_req_fn) (struct request_queue *, struct request *, gfp_t);
31 32 typedef void (elevator_put_req_fn) (struct request *);
... ... @@ -59,6 +60,7 @@
59 60 elevator_request_list_fn *elevator_former_req_fn;
60 61 elevator_request_list_fn *elevator_latter_req_fn;
61 62  
  63 + elevator_init_icq_fn *elevator_init_icq_fn;
62 64 elevator_exit_icq_fn *elevator_exit_icq_fn;
63 65  
64 66 elevator_set_req_fn *elevator_set_req_fn;