Commit ca32aefc7f2539ed88d42763330d54ee3e61769a

Authored by Tejun Heo
Committed by Jens Axboe
1 parent 0a5a7d0e32

blkcg: use q and plid instead of opaque void * for blkio_group association

blkgio_group is association between a block cgroup and a queue for a
given policy.  Using opaque void * for association makes things
confusing and hinders factoring of common code.  Use request_queue *
and, if necessary, policy id instead.

This will help block cgroup API cleanup.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

Showing 5 changed files with 70 additions and 71 deletions Side-by-side Diff

... ... @@ -129,7 +129,7 @@
129 129 if (blkiop->plid != blkg->plid)
130 130 continue;
131 131 if (blkiop->ops.blkio_update_group_weight_fn)
132   - blkiop->ops.blkio_update_group_weight_fn(blkg->key,
  132 + blkiop->ops.blkio_update_group_weight_fn(blkg->q,
133 133 blkg, weight);
134 134 }
135 135 }
136 136  
... ... @@ -147,12 +147,12 @@
147 147  
148 148 if (fileid == BLKIO_THROTL_read_bps_device
149 149 && blkiop->ops.blkio_update_group_read_bps_fn)
150   - blkiop->ops.blkio_update_group_read_bps_fn(blkg->key,
  150 + blkiop->ops.blkio_update_group_read_bps_fn(blkg->q,
151 151 blkg, bps);
152 152  
153 153 if (fileid == BLKIO_THROTL_write_bps_device
154 154 && blkiop->ops.blkio_update_group_write_bps_fn)
155   - blkiop->ops.blkio_update_group_write_bps_fn(blkg->key,
  155 + blkiop->ops.blkio_update_group_write_bps_fn(blkg->q,
156 156 blkg, bps);
157 157 }
158 158 }
159 159  
... ... @@ -170,12 +170,12 @@
170 170  
171 171 if (fileid == BLKIO_THROTL_read_iops_device
172 172 && blkiop->ops.blkio_update_group_read_iops_fn)
173   - blkiop->ops.blkio_update_group_read_iops_fn(blkg->key,
  173 + blkiop->ops.blkio_update_group_read_iops_fn(blkg->q,
174 174 blkg, iops);
175 175  
176 176 if (fileid == BLKIO_THROTL_write_iops_device
177 177 && blkiop->ops.blkio_update_group_write_iops_fn)
178   - blkiop->ops.blkio_update_group_write_iops_fn(blkg->key,
  178 + blkiop->ops.blkio_update_group_write_iops_fn(blkg->q,
179 179 blkg,iops);
180 180 }
181 181 }
182 182  
... ... @@ -478,14 +478,14 @@
478 478 EXPORT_SYMBOL_GPL(blkio_alloc_blkg_stats);
479 479  
480 480 void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
481   - struct blkio_group *blkg, void *key, dev_t dev,
  481 + struct blkio_group *blkg, struct request_queue *q, dev_t dev,
482 482 enum blkio_policy_id plid)
483 483 {
484 484 unsigned long flags;
485 485  
486 486 spin_lock_irqsave(&blkcg->lock, flags);
487 487 spin_lock_init(&blkg->stats_lock);
488   - rcu_assign_pointer(blkg->key, key);
  488 + rcu_assign_pointer(blkg->q, q);
489 489 blkg->blkcg_id = css_id(&blkcg->css);
490 490 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
491 491 blkg->plid = plid;
492 492  
493 493  
494 494  
... ... @@ -531,18 +531,16 @@
531 531 EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
532 532  
533 533 /* called under rcu_read_lock(). */
534   -struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
  534 +struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg,
  535 + struct request_queue *q,
  536 + enum blkio_policy_id plid)
535 537 {
536 538 struct blkio_group *blkg;
537 539 struct hlist_node *n;
538   - void *__key;
539 540  
540   - hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
541   - __key = blkg->key;
542   - if (__key == key)
  541 + hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node)
  542 + if (blkg->q == q && blkg->plid == plid)
543 543 return blkg;
544   - }
545   -
546 544 return NULL;
547 545 }
548 546 EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
... ... @@ -1582,7 +1580,7 @@
1582 1580 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
1583 1581 unsigned long flags;
1584 1582 struct blkio_group *blkg;
1585   - void *key;
  1583 + struct request_queue *q;
1586 1584 struct blkio_policy_type *blkiop;
1587 1585 struct blkio_policy_node *pn, *pntmp;
1588 1586  
... ... @@ -1597,7 +1595,7 @@
1597 1595  
1598 1596 blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
1599 1597 blkcg_node);
1600   - key = rcu_dereference(blkg->key);
  1598 + q = rcu_dereference(blkg->q);
1601 1599 __blkiocg_del_blkio_group(blkg);
1602 1600  
1603 1601 spin_unlock_irqrestore(&blkcg->lock, flags);
... ... @@ -1611,7 +1609,7 @@
1611 1609 list_for_each_entry(blkiop, &blkio_list, list) {
1612 1610 if (blkiop->plid != blkg->plid)
1613 1611 continue;
1614   - blkiop->ops.blkio_unlink_group_fn(key, blkg);
  1612 + blkiop->ops.blkio_unlink_group_fn(q, blkg);
1615 1613 }
1616 1614 spin_unlock(&blkio_list_lock);
1617 1615 } while (1);
... ... @@ -153,8 +153,8 @@
153 153 };
154 154  
155 155 struct blkio_group {
156   - /* An rcu protected unique identifier for the group */
157   - void *key;
  156 + /* Pointer to the associated request_queue, RCU protected */
  157 + struct request_queue __rcu *q;
158 158 struct hlist_node blkcg_node;
159 159 unsigned short blkcg_id;
160 160 /* Store cgroup path */
161 161  
162 162  
163 163  
164 164  
165 165  
... ... @@ -202,17 +202,18 @@
202 202 extern unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg,
203 203 dev_t dev);
204 204  
205   -typedef void (blkio_unlink_group_fn) (void *key, struct blkio_group *blkg);
  205 +typedef void (blkio_unlink_group_fn)(struct request_queue *q,
  206 + struct blkio_group *blkg);
206 207 typedef bool (blkio_clear_queue_fn)(struct request_queue *q);
207   -typedef void (blkio_update_group_weight_fn) (void *key,
  208 +typedef void (blkio_update_group_weight_fn)(struct request_queue *q,
208 209 struct blkio_group *blkg, unsigned int weight);
209   -typedef void (blkio_update_group_read_bps_fn) (void * key,
  210 +typedef void (blkio_update_group_read_bps_fn)(struct request_queue *q,
210 211 struct blkio_group *blkg, u64 read_bps);
211   -typedef void (blkio_update_group_write_bps_fn) (void *key,
  212 +typedef void (blkio_update_group_write_bps_fn)(struct request_queue *q,
212 213 struct blkio_group *blkg, u64 write_bps);
213   -typedef void (blkio_update_group_read_iops_fn) (void *key,
  214 +typedef void (blkio_update_group_read_iops_fn)(struct request_queue *q,
214 215 struct blkio_group *blkg, unsigned int read_iops);
215   -typedef void (blkio_update_group_write_iops_fn) (void *key,
  216 +typedef void (blkio_update_group_write_iops_fn)(struct request_queue *q,
216 217 struct blkio_group *blkg, unsigned int write_iops);
217 218  
218 219 struct blkio_policy_ops {
219 220  
... ... @@ -305,12 +306,13 @@
305 306 extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup);
306 307 extern struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk);
307 308 extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
308   - struct blkio_group *blkg, void *key, dev_t dev,
  309 + struct blkio_group *blkg, struct request_queue *q, dev_t dev,
309 310 enum blkio_policy_id plid);
310 311 extern int blkio_alloc_blkg_stats(struct blkio_group *blkg);
311 312 extern int blkiocg_del_blkio_group(struct blkio_group *blkg);
312 313 extern struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg,
313   - void *key);
  314 + struct request_queue *q,
  315 + enum blkio_policy_id plid);
314 316 void blkiocg_update_timeslice_used(struct blkio_group *blkg,
315 317 unsigned long time,
316 318 unsigned long unaccounted_time);
block/blk-throttle.c
... ... @@ -252,7 +252,7 @@
252 252 __throtl_tg_fill_dev_details(td, tg);
253 253  
254 254 /* Add group onto cgroup list */
255   - blkiocg_add_blkio_group(blkcg, &tg->blkg, (void *)td,
  255 + blkiocg_add_blkio_group(blkcg, &tg->blkg, td->queue,
256 256 tg->blkg.dev, BLKIO_POLICY_THROTL);
257 257  
258 258 tg->bps[READ] = blkcg_get_read_bps(blkcg, tg->blkg.dev);
... ... @@ -288,7 +288,6 @@
288 288 throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg)
289 289 {
290 290 struct throtl_grp *tg = NULL;
291   - void *key = td;
292 291  
293 292 /*
294 293 * This is the common case when there are no blkio cgroups.
... ... @@ -297,7 +296,8 @@
297 296 if (blkcg == &blkio_root_cgroup)
298 297 tg = td->root_tg;
299 298 else
300   - tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key));
  299 + tg = tg_of_blkg(blkiocg_lookup_group(blkcg, td->queue,
  300 + BLKIO_POLICY_THROTL));
301 301  
302 302 __throtl_tg_fill_dev_details(td, tg);
303 303 return tg;
304 304  
305 305  
306 306  
307 307  
... ... @@ -1012,22 +1012,22 @@
1012 1012 * no new IO will come in this group. So get rid of this group as soon as
1013 1013 * any pending IO in the group is finished.
1014 1014 *
1015   - * This function is called under rcu_read_lock(). key is the rcu protected
1016   - * pointer. That means "key" is a valid throtl_data pointer as long as we are
1017   - * rcu read lock.
  1015 + * This function is called under rcu_read_lock(). @q is the rcu protected
  1016 + * pointer. That means @q is a valid request_queue pointer as long as we
  1017 + * are rcu read lock.
1018 1018 *
1019   - * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
  1019 + * @q was fetched from blkio_group under blkio_cgroup->lock. That means
1020 1020 * it should not be NULL as even if queue was going away, cgroup deltion
1021 1021 * path got to it first.
1022 1022 */
1023   -void throtl_unlink_blkio_group(void *key, struct blkio_group *blkg)
  1023 +void throtl_unlink_blkio_group(struct request_queue *q,
  1024 + struct blkio_group *blkg)
1024 1025 {
1025 1026 unsigned long flags;
1026   - struct throtl_data *td = key;
1027 1027  
1028   - spin_lock_irqsave(td->queue->queue_lock, flags);
1029   - throtl_destroy_tg(td, tg_of_blkg(blkg));
1030   - spin_unlock_irqrestore(td->queue->queue_lock, flags);
  1028 + spin_lock_irqsave(q->queue_lock, flags);
  1029 + throtl_destroy_tg(q->td, tg_of_blkg(blkg));
  1030 + spin_unlock_irqrestore(q->queue_lock, flags);
1031 1031 }
1032 1032  
1033 1033 static bool throtl_clear_queue(struct request_queue *q)
1034 1034  
1035 1035  
1036 1036  
1037 1037  
1038 1038  
1039 1039  
1040 1040  
1041 1041  
1042 1042  
1043 1043  
1044 1044  
1045 1045  
1046 1046  
... ... @@ -1054,52 +1054,48 @@
1054 1054 }
1055 1055  
1056 1056 /*
1057   - * For all update functions, key should be a valid pointer because these
  1057 + * For all update functions, @q should be a valid pointer because these
1058 1058 * update functions are called under blkcg_lock, that means, blkg is
1059   - * valid and in turn key is valid. queue exit path can not race because
  1059 + * valid and in turn @q is valid. queue exit path can not race because
1060 1060 * of blkcg_lock
1061 1061 *
1062 1062 * Can not take queue lock in update functions as queue lock under blkcg_lock
1063 1063 * is not allowed. Under other paths we take blkcg_lock under queue_lock.
1064 1064 */
1065   -static void throtl_update_blkio_group_read_bps(void *key,
  1065 +static void throtl_update_blkio_group_read_bps(struct request_queue *q,
1066 1066 struct blkio_group *blkg, u64 read_bps)
1067 1067 {
1068   - struct throtl_data *td = key;
1069 1068 struct throtl_grp *tg = tg_of_blkg(blkg);
1070 1069  
1071 1070 tg->bps[READ] = read_bps;
1072   - throtl_update_blkio_group_common(td, tg);
  1071 + throtl_update_blkio_group_common(q->td, tg);
1073 1072 }
1074 1073  
1075   -static void throtl_update_blkio_group_write_bps(void *key,
  1074 +static void throtl_update_blkio_group_write_bps(struct request_queue *q,
1076 1075 struct blkio_group *blkg, u64 write_bps)
1077 1076 {
1078   - struct throtl_data *td = key;
1079 1077 struct throtl_grp *tg = tg_of_blkg(blkg);
1080 1078  
1081 1079 tg->bps[WRITE] = write_bps;
1082   - throtl_update_blkio_group_common(td, tg);
  1080 + throtl_update_blkio_group_common(q->td, tg);
1083 1081 }
1084 1082  
1085   -static void throtl_update_blkio_group_read_iops(void *key,
  1083 +static void throtl_update_blkio_group_read_iops(struct request_queue *q,
1086 1084 struct blkio_group *blkg, unsigned int read_iops)
1087 1085 {
1088   - struct throtl_data *td = key;
1089 1086 struct throtl_grp *tg = tg_of_blkg(blkg);
1090 1087  
1091 1088 tg->iops[READ] = read_iops;
1092   - throtl_update_blkio_group_common(td, tg);
  1089 + throtl_update_blkio_group_common(q->td, tg);
1093 1090 }
1094 1091  
1095   -static void throtl_update_blkio_group_write_iops(void *key,
  1092 +static void throtl_update_blkio_group_write_iops(struct request_queue *q,
1096 1093 struct blkio_group *blkg, unsigned int write_iops)
1097 1094 {
1098   - struct throtl_data *td = key;
1099 1095 struct throtl_grp *tg = tg_of_blkg(blkg);
1100 1096  
1101 1097 tg->iops[WRITE] = write_iops;
1102   - throtl_update_blkio_group_common(td, tg);
  1098 + throtl_update_blkio_group_common(q->td, tg);
1103 1099 }
1104 1100  
1105 1101 static void throtl_shutdown_wq(struct request_queue *q)
... ... @@ -1306,7 +1302,7 @@
1306 1302 spin_unlock_irq(q->queue_lock);
1307 1303  
1308 1304 /*
1309   - * Wait for tg->blkg->key accessors to exit their grace periods.
  1305 + * Wait for tg->blkg->q accessors to exit their grace periods.
1310 1306 * Do this wait only if there are other undestroyed groups out
1311 1307 * there (other than root group). This can happen if cgroup deletion
1312 1308 * path claimed the responsibility of cleaning up a group before
... ... @@ -1020,7 +1020,8 @@
1020 1020 return NULL;
1021 1021 }
1022 1022  
1023   -static void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg,
  1023 +static void cfq_update_blkio_group_weight(struct request_queue *q,
  1024 + struct blkio_group *blkg,
1024 1025 unsigned int weight)
1025 1026 {
1026 1027 struct cfq_group *cfqg = cfqg_of_blkg(blkg);
1027 1028  
... ... @@ -1043,10 +1044,10 @@
1043 1044 if (bdi->dev) {
1044 1045 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
1045 1046 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg,
1046   - (void *)cfqd, MKDEV(major, minor));
  1047 + cfqd->queue, MKDEV(major, minor));
1047 1048 } else
1048 1049 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg,
1049   - (void *)cfqd, 0);
  1050 + cfqd->queue, 0);
1050 1051  
1051 1052 cfqd->nr_blkcg_linked_grps++;
1052 1053 cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
... ... @@ -1097,7 +1098,6 @@
1097 1098 cfq_find_cfqg(struct cfq_data *cfqd, struct blkio_cgroup *blkcg)
1098 1099 {
1099 1100 struct cfq_group *cfqg = NULL;
1100   - void *key = cfqd;
1101 1101 struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
1102 1102 unsigned int major, minor;
1103 1103  
... ... @@ -1108,7 +1108,8 @@
1108 1108 if (blkcg == &blkio_root_cgroup)
1109 1109 cfqg = &cfqd->root_group;
1110 1110 else
1111   - cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key));
  1111 + cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, cfqd->queue,
  1112 + BLKIO_POLICY_PROP));
1112 1113  
1113 1114 if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
1114 1115 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
1115 1116  
1116 1117  
1117 1118  
1118 1119  
1119 1120  
... ... @@ -1247,21 +1248,22 @@
1247 1248 * any pending IO in the group is finished.
1248 1249 *
1249 1250 * This function is called under rcu_read_lock(). key is the rcu protected
1250   - * pointer. That means "key" is a valid cfq_data pointer as long as we are rcu
1251   - * read lock.
  1251 + * pointer. That means @q is a valid request_queue pointer as long as we
  1252 + * are rcu read lock.
1252 1253 *
1253   - * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
  1254 + * @q was fetched from blkio_group under blkio_cgroup->lock. That means
1254 1255 * it should not be NULL as even if elevator was exiting, cgroup deltion
1255 1256 * path got to it first.
1256 1257 */
1257   -static void cfq_unlink_blkio_group(void *key, struct blkio_group *blkg)
  1258 +static void cfq_unlink_blkio_group(struct request_queue *q,
  1259 + struct blkio_group *blkg)
1258 1260 {
1259   - unsigned long flags;
1260   - struct cfq_data *cfqd = key;
  1261 + struct cfq_data *cfqd = q->elevator->elevator_data;
  1262 + unsigned long flags;
1261 1263  
1262   - spin_lock_irqsave(cfqd->queue->queue_lock, flags);
  1264 + spin_lock_irqsave(q->queue_lock, flags);
1263 1265 cfq_destroy_cfqg(cfqd, cfqg_of_blkg(blkg));
1264   - spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
  1266 + spin_unlock_irqrestore(q->queue_lock, flags);
1265 1267 }
1266 1268  
1267 1269 static struct elevator_type iosched_cfq;
... ... @@ -3718,7 +3720,7 @@
3718 3720 rcu_read_lock();
3719 3721  
3720 3722 cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,
3721   - (void *)cfqd, 0);
  3723 + cfqd->queue, 0);
3722 3724 rcu_read_unlock();
3723 3725 cfqd->nr_blkcg_linked_grps++;
3724 3726  
... ... @@ -68,8 +68,9 @@
68 68 }
69 69  
70 70 static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
71   - struct blkio_group *blkg, void *key, dev_t dev) {
72   - blkiocg_add_blkio_group(blkcg, blkg, key, dev, BLKIO_POLICY_PROP);
  71 + struct blkio_group *blkg, struct request_queue *q, dev_t dev)
  72 +{
  73 + blkiocg_add_blkio_group(blkcg, blkg, q, dev, BLKIO_POLICY_PROP);
73 74 }
74 75  
75 76 static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg)
... ... @@ -105,7 +106,7 @@
105 106 static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync) {}
106 107  
107 108 static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
108   - struct blkio_group *blkg, void *key, dev_t dev) {}
  109 + struct blkio_group *blkg, struct request_queue *q, dev_t dev) {}
109 110 static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg)
110 111 {
111 112 return 0;