Commit 3f5e0a34daed197aa55d0c6b466bb4cd03babb4f
1 parent
3f6ef38110
Exists in
master
and in
13 other branches
bcache: Kill dead cgroup code
This hasn't been used or even enabled in ages. Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Showing 5 changed files with 0 additions and 202 deletions Side-by-side Diff
drivers/md/bcache/Kconfig
... | ... | @@ -24,12 +24,4 @@ |
24 | 24 | Keeps all active closures in a linked list and provides a debugfs |
25 | 25 | interface to list them, which makes it possible to see asynchronous |
26 | 26 | operations that get stuck. |
27 | - | |
28 | -# cgroup code needs to be updated: | |
29 | -# | |
30 | -#config CGROUP_BCACHE | |
31 | -# bool "Cgroup controls for bcache" | |
32 | -# depends on BCACHE && BLK_CGROUP | |
33 | -# ---help--- | |
34 | -# TODO |
drivers/md/bcache/btree.c
... | ... | @@ -68,14 +68,10 @@ |
68 | 68 | * alloc_bucket() cannot fail. This should be true but is not completely |
69 | 69 | * obvious. |
70 | 70 | * |
71 | - * Make sure all allocations get charged to the root cgroup | |
72 | - * | |
73 | 71 | * Plugging? |
74 | 72 | * |
75 | 73 | * If data write is less than hard sector size of ssd, round up offset in open |
76 | 74 | * bucket to the next whole sector |
77 | - * | |
78 | - * Also lookup by cgroup in get_open_bucket() | |
79 | 75 | * |
80 | 76 | * Superblock needs to be fleshed out for multiple cache devices |
81 | 77 | * |
drivers/md/bcache/request.c
... | ... | @@ -12,11 +12,9 @@ |
12 | 12 | #include "request.h" |
13 | 13 | #include "writeback.h" |
14 | 14 | |
15 | -#include <linux/cgroup.h> | |
16 | 15 | #include <linux/module.h> |
17 | 16 | #include <linux/hash.h> |
18 | 17 | #include <linux/random.h> |
19 | -#include "blk-cgroup.h" | |
20 | 18 | |
21 | 19 | #include <trace/events/bcache.h> |
22 | 20 | |
23 | 21 | |
24 | 22 | |
... | ... | @@ -27,171 +25,13 @@ |
27 | 25 | |
28 | 26 | static void bch_data_insert_start(struct closure *); |
29 | 27 | |
30 | -/* Cgroup interface */ | |
31 | - | |
32 | -#ifdef CONFIG_CGROUP_BCACHE | |
33 | -static struct bch_cgroup bcache_default_cgroup = { .cache_mode = -1 }; | |
34 | - | |
35 | -static struct bch_cgroup *cgroup_to_bcache(struct cgroup *cgroup) | |
36 | -{ | |
37 | - struct cgroup_subsys_state *css; | |
38 | - return cgroup && | |
39 | - (css = cgroup_subsys_state(cgroup, bcache_subsys_id)) | |
40 | - ? container_of(css, struct bch_cgroup, css) | |
41 | - : &bcache_default_cgroup; | |
42 | -} | |
43 | - | |
44 | -struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio) | |
45 | -{ | |
46 | - struct cgroup_subsys_state *css = bio->bi_css | |
47 | - ? cgroup_subsys_state(bio->bi_css->cgroup, bcache_subsys_id) | |
48 | - : task_subsys_state(current, bcache_subsys_id); | |
49 | - | |
50 | - return css | |
51 | - ? container_of(css, struct bch_cgroup, css) | |
52 | - : &bcache_default_cgroup; | |
53 | -} | |
54 | - | |
55 | -static ssize_t cache_mode_read(struct cgroup *cgrp, struct cftype *cft, | |
56 | - struct file *file, | |
57 | - char __user *buf, size_t nbytes, loff_t *ppos) | |
58 | -{ | |
59 | - char tmp[1024]; | |
60 | - int len = bch_snprint_string_list(tmp, PAGE_SIZE, bch_cache_modes, | |
61 | - cgroup_to_bcache(cgrp)->cache_mode + 1); | |
62 | - | |
63 | - if (len < 0) | |
64 | - return len; | |
65 | - | |
66 | - return simple_read_from_buffer(buf, nbytes, ppos, tmp, len); | |
67 | -} | |
68 | - | |
69 | -static int cache_mode_write(struct cgroup *cgrp, struct cftype *cft, | |
70 | - const char *buf) | |
71 | -{ | |
72 | - int v = bch_read_string_list(buf, bch_cache_modes); | |
73 | - if (v < 0) | |
74 | - return v; | |
75 | - | |
76 | - cgroup_to_bcache(cgrp)->cache_mode = v - 1; | |
77 | - return 0; | |
78 | -} | |
79 | - | |
80 | -static u64 bch_verify_read(struct cgroup *cgrp, struct cftype *cft) | |
81 | -{ | |
82 | - return cgroup_to_bcache(cgrp)->verify; | |
83 | -} | |
84 | - | |
85 | -static int bch_verify_write(struct cgroup *cgrp, struct cftype *cft, u64 val) | |
86 | -{ | |
87 | - cgroup_to_bcache(cgrp)->verify = val; | |
88 | - return 0; | |
89 | -} | |
90 | - | |
91 | -static u64 bch_cache_hits_read(struct cgroup *cgrp, struct cftype *cft) | |
92 | -{ | |
93 | - struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp); | |
94 | - return atomic_read(&bcachecg->stats.cache_hits); | |
95 | -} | |
96 | - | |
97 | -static u64 bch_cache_misses_read(struct cgroup *cgrp, struct cftype *cft) | |
98 | -{ | |
99 | - struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp); | |
100 | - return atomic_read(&bcachecg->stats.cache_misses); | |
101 | -} | |
102 | - | |
103 | -static u64 bch_cache_bypass_hits_read(struct cgroup *cgrp, | |
104 | - struct cftype *cft) | |
105 | -{ | |
106 | - struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp); | |
107 | - return atomic_read(&bcachecg->stats.cache_bypass_hits); | |
108 | -} | |
109 | - | |
110 | -static u64 bch_cache_bypass_misses_read(struct cgroup *cgrp, | |
111 | - struct cftype *cft) | |
112 | -{ | |
113 | - struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp); | |
114 | - return atomic_read(&bcachecg->stats.cache_bypass_misses); | |
115 | -} | |
116 | - | |
117 | -static struct cftype bch_files[] = { | |
118 | - { | |
119 | - .name = "cache_mode", | |
120 | - .read = cache_mode_read, | |
121 | - .write_string = cache_mode_write, | |
122 | - }, | |
123 | - { | |
124 | - .name = "verify", | |
125 | - .read_u64 = bch_verify_read, | |
126 | - .write_u64 = bch_verify_write, | |
127 | - }, | |
128 | - { | |
129 | - .name = "cache_hits", | |
130 | - .read_u64 = bch_cache_hits_read, | |
131 | - }, | |
132 | - { | |
133 | - .name = "cache_misses", | |
134 | - .read_u64 = bch_cache_misses_read, | |
135 | - }, | |
136 | - { | |
137 | - .name = "cache_bypass_hits", | |
138 | - .read_u64 = bch_cache_bypass_hits_read, | |
139 | - }, | |
140 | - { | |
141 | - .name = "cache_bypass_misses", | |
142 | - .read_u64 = bch_cache_bypass_misses_read, | |
143 | - }, | |
144 | - { } /* terminate */ | |
145 | -}; | |
146 | - | |
147 | -static void init_bch_cgroup(struct bch_cgroup *cg) | |
148 | -{ | |
149 | - cg->cache_mode = -1; | |
150 | -} | |
151 | - | |
152 | -static struct cgroup_subsys_state *bcachecg_create(struct cgroup *cgroup) | |
153 | -{ | |
154 | - struct bch_cgroup *cg; | |
155 | - | |
156 | - cg = kzalloc(sizeof(*cg), GFP_KERNEL); | |
157 | - if (!cg) | |
158 | - return ERR_PTR(-ENOMEM); | |
159 | - init_bch_cgroup(cg); | |
160 | - return &cg->css; | |
161 | -} | |
162 | - | |
163 | -static void bcachecg_destroy(struct cgroup *cgroup) | |
164 | -{ | |
165 | - struct bch_cgroup *cg = cgroup_to_bcache(cgroup); | |
166 | - kfree(cg); | |
167 | -} | |
168 | - | |
169 | -struct cgroup_subsys bcache_subsys = { | |
170 | - .create = bcachecg_create, | |
171 | - .destroy = bcachecg_destroy, | |
172 | - .subsys_id = bcache_subsys_id, | |
173 | - .name = "bcache", | |
174 | - .module = THIS_MODULE, | |
175 | -}; | |
176 | -EXPORT_SYMBOL_GPL(bcache_subsys); | |
177 | -#endif | |
178 | - | |
179 | 28 | static unsigned cache_mode(struct cached_dev *dc, struct bio *bio) |
180 | 29 | { |
181 | -#ifdef CONFIG_CGROUP_BCACHE | |
182 | - int r = bch_bio_to_cgroup(bio)->cache_mode; | |
183 | - if (r >= 0) | |
184 | - return r; | |
185 | -#endif | |
186 | 30 | return BDEV_CACHE_MODE(&dc->sb); |
187 | 31 | } |
188 | 32 | |
189 | 33 | static bool verify(struct cached_dev *dc, struct bio *bio) |
190 | 34 | { |
191 | -#ifdef CONFIG_CGROUP_BCACHE | |
192 | - if (bch_bio_to_cgroup(bio)->verify) | |
193 | - return true; | |
194 | -#endif | |
195 | 35 | return dc->verify; |
196 | 36 | } |
197 | 37 | |
... | ... | @@ -1305,9 +1145,6 @@ |
1305 | 1145 | |
1306 | 1146 | void bch_request_exit(void) |
1307 | 1147 | { |
1308 | -#ifdef CONFIG_CGROUP_BCACHE | |
1309 | - cgroup_unload_subsys(&bcache_subsys); | |
1310 | -#endif | |
1311 | 1148 | if (bch_search_cache) |
1312 | 1149 | kmem_cache_destroy(bch_search_cache); |
1313 | 1150 | } |
... | ... | @@ -1318,12 +1155,6 @@ |
1318 | 1155 | if (!bch_search_cache) |
1319 | 1156 | return -ENOMEM; |
1320 | 1157 | |
1321 | -#ifdef CONFIG_CGROUP_BCACHE | |
1322 | - cgroup_load_subsys(&bcache_subsys); | |
1323 | - init_bch_cgroup(&bcache_default_cgroup); | |
1324 | - | |
1325 | - cgroup_add_cftypes(&bcache_subsys, bch_files); | |
1326 | -#endif | |
1327 | 1158 | return 0; |
1328 | 1159 | } |
drivers/md/bcache/request.h
1 | 1 | #ifndef _BCACHE_REQUEST_H_ |
2 | 2 | #define _BCACHE_REQUEST_H_ |
3 | 3 | |
4 | -#include <linux/cgroup.h> | |
5 | - | |
6 | 4 | struct data_insert_op { |
7 | 5 | struct closure cl; |
8 | 6 | struct cache_set *c; |
... | ... | @@ -41,22 +39,6 @@ |
41 | 39 | void bch_flash_dev_request_init(struct bcache_device *d); |
42 | 40 | |
43 | 41 | extern struct kmem_cache *bch_search_cache, *bch_passthrough_cache; |
44 | - | |
45 | -struct bch_cgroup { | |
46 | -#ifdef CONFIG_CGROUP_BCACHE | |
47 | - struct cgroup_subsys_state css; | |
48 | -#endif | |
49 | - /* | |
50 | - * We subtract one from the index into bch_cache_modes[], so that | |
51 | - * default == -1; this makes it so the rest match up with d->cache_mode, | |
52 | - * and we use d->cache_mode if cgrp->cache_mode < 0 | |
53 | - */ | |
54 | - short cache_mode; | |
55 | - bool verify; | |
56 | - struct cache_stat_collector stats; | |
57 | -}; | |
58 | - | |
59 | -struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio); | |
60 | 42 | |
61 | 43 | #endif /* _BCACHE_REQUEST_H_ */ |
drivers/md/bcache/stats.c
... | ... | @@ -201,9 +201,6 @@ |
201 | 201 | struct cached_dev *dc = container_of(d, struct cached_dev, disk); |
202 | 202 | mark_cache_stats(&dc->accounting.collector, hit, bypass); |
203 | 203 | mark_cache_stats(&c->accounting.collector, hit, bypass); |
204 | -#ifdef CONFIG_CGROUP_BCACHE | |
205 | - mark_cache_stats(&(bch_bio_to_cgroup(s->orig_bio)->stats), hit, bypass); | |
206 | -#endif | |
207 | 204 | } |
208 | 205 | |
209 | 206 | void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d) |