Commit 1e01c968db3d0aebd48e31db15f24516b03128df

Authored by Konrad Rzeszutek Wilk
Committed by Linus Torvalds
1 parent 905cd0e1bf

frontswap: make frontswap_init use a pointer for the ops

This simplifies the code in the frontswap - we can get rid of the
'backend_registered' test and instead check against frontswap_ops.

[v1: Rebase on top of 703ba7fe5e0 (ramster->zcache move]
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: Bob Liu <lliubbo@gmail.com>
Cc: Wanpeng Li <liwanp@linux.vnet.ibm.com>
Cc: Andor Daam <andor.daam@googlemail.com>
Cc: Dan Magenheimer <dan.magenheimer@oracle.com>
Cc: Florian Schmaus <fschmaus@gmail.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Stefan Hengelein <ilendir@googlemail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 4 changed files with 26 additions and 28 deletions Side-by-side Diff

drivers/staging/zcache/zcache-main.c
... ... @@ -1707,9 +1707,9 @@
1707 1707 .init = zcache_frontswap_init
1708 1708 };
1709 1709  
1710   -struct frontswap_ops zcache_frontswap_register_ops(void)
  1710 +struct frontswap_ops *zcache_frontswap_register_ops(void)
1711 1711 {
1712   - struct frontswap_ops old_ops =
  1712 + struct frontswap_ops *old_ops =
1713 1713 frontswap_register_ops(&zcache_frontswap_ops);
1714 1714  
1715 1715 return old_ops;
... ... @@ -1874,7 +1874,7 @@
1874 1874 pr_warn("%s: cleancache_ops overridden\n", namestr);
1875 1875 }
1876 1876 if (zcache_enabled && !disable_frontswap) {
1877   - struct frontswap_ops old_ops;
  1877 + struct frontswap_ops *old_ops;
1878 1878  
1879 1879 old_ops = zcache_frontswap_register_ops();
1880 1880 if (frontswap_has_exclusive_gets)
... ... @@ -1886,7 +1886,7 @@
1886 1886 namestr, frontswap_has_exclusive_gets,
1887 1887 !disable_frontswap_ignore_nonactive);
1888 1888 #endif
1889   - if (old_ops.init != NULL)
  1889 + if (old_ops != NULL)
1890 1890 pr_warn("%s: frontswap_ops overridden\n", namestr);
1891 1891 }
1892 1892 if (ramster_enabled)
... ... @@ -362,7 +362,7 @@
362 362 }
363 363 __setup("nofrontswap", no_frontswap);
364 364  
365   -static struct frontswap_ops __initdata tmem_frontswap_ops = {
  365 +static struct frontswap_ops tmem_frontswap_ops = {
366 366 .store = tmem_frontswap_store,
367 367 .load = tmem_frontswap_load,
368 368 .invalidate_page = tmem_frontswap_flush_page,
369 369  
... ... @@ -378,11 +378,11 @@
378 378 #ifdef CONFIG_FRONTSWAP
379 379 if (tmem_enabled && use_frontswap) {
380 380 char *s = "";
381   - struct frontswap_ops old_ops =
  381 + struct frontswap_ops *old_ops =
382 382 frontswap_register_ops(&tmem_frontswap_ops);
383 383  
384 384 tmem_frontswap_poolid = -1;
385   - if (old_ops.init != NULL)
  385 + if (old_ops)
386 386 s = " (WARNING: frontswap_ops overridden)";
387 387 printk(KERN_INFO "frontswap enabled, RAM provided by "
388 388 "Xen Transcendent Memory%s\n", s);
include/linux/frontswap.h
... ... @@ -14,7 +14,7 @@
14 14 };
15 15  
16 16 extern bool frontswap_enabled;
17   -extern struct frontswap_ops
  17 +extern struct frontswap_ops *
18 18 frontswap_register_ops(struct frontswap_ops *ops);
19 19 extern void frontswap_shrink(unsigned long);
20 20 extern unsigned long frontswap_curr_pages(void);
... ... @@ -24,7 +24,7 @@
24 24 * frontswap_ops is set by frontswap_register_ops to contain the pointers
25 25 * to the frontswap "backend" implementation functions.
26 26 */
27   -static struct frontswap_ops frontswap_ops __read_mostly;
  27 +static struct frontswap_ops *frontswap_ops __read_mostly;
28 28  
29 29 /*
30 30 * This global enablement flag reduces overhead on systems where frontswap_ops
31 31  
32 32  
33 33  
34 34  
35 35  
36 36  
37 37  
38 38  
... ... @@ -108,41 +108,39 @@
108 108 *
109 109 * The time between the backend being registered and the swap file system
110 110 * calling the backend (via the frontswap_* functions) is indeterminate as
111   - * backend_registered is not atomic_t (or a value guarded by a spinlock).
  111 + * frontswap_ops is not atomic_t (or a value guarded by a spinlock).
112 112 * That is OK as we are comfortable missing some of these calls to the newly
113 113 * registered backend.
114 114 *
115 115 * Obviously the opposite (unloading the backend) must be done after all
116 116 * the frontswap_[store|load|invalidate_area|invalidate_page] start
117   - * ignorning or failing the requests - at which point backend_registered
  117 + * ignorning or failing the requests - at which point frontswap_ops
118 118 * would have to be made in some fashion atomic.
119 119 */
120 120 static DECLARE_BITMAP(need_init, MAX_SWAPFILES);
121   -static bool backend_registered __read_mostly;
122 121  
123 122 /*
124 123 * Register operations for frontswap, returning previous thus allowing
125 124 * detection of multiple backends and possible nesting.
126 125 */
127   -struct frontswap_ops frontswap_register_ops(struct frontswap_ops *ops)
  126 +struct frontswap_ops *frontswap_register_ops(struct frontswap_ops *ops)
128 127 {
129   - struct frontswap_ops old = frontswap_ops;
  128 + struct frontswap_ops *old = frontswap_ops;
130 129 int i;
131 130  
132   - frontswap_ops = *ops;
133 131 frontswap_enabled = true;
134 132  
135 133 for (i = 0; i < MAX_SWAPFILES; i++) {
136 134 if (test_and_clear_bit(i, need_init))
137   - (*frontswap_ops.init)(i);
  135 + ops->init(i);
138 136 }
139 137 /*
140   - * We MUST have backend_registered set _after_ the frontswap_init's
  138 + * We MUST have frontswap_ops set _after_ the frontswap_init's
141 139 * have been called. Otherwise __frontswap_store might fail. Hence
142 140 * the barrier to make sure compiler does not re-order us.
143 141 */
144 142 barrier();
145   - backend_registered = true;
  143 + frontswap_ops = ops;
146 144 return old;
147 145 }
148 146 EXPORT_SYMBOL(frontswap_register_ops);
149 147  
... ... @@ -172,11 +170,11 @@
172 170 {
173 171 struct swap_info_struct *sis = swap_info[type];
174 172  
175   - if (backend_registered) {
  173 + if (frontswap_ops) {
176 174 BUG_ON(sis == NULL);
177 175 if (sis->frontswap_map == NULL)
178 176 return;
179   - (*frontswap_ops.init)(type);
  177 + frontswap_ops->init(type);
180 178 } else {
181 179 BUG_ON(type > MAX_SWAPFILES);
182 180 set_bit(type, need_init);
... ... @@ -206,7 +204,7 @@
206 204 struct swap_info_struct *sis = swap_info[type];
207 205 pgoff_t offset = swp_offset(entry);
208 206  
209   - if (!backend_registered) {
  207 + if (!frontswap_ops) {
210 208 inc_frontswap_failed_stores();
211 209 return ret;
212 210 }
... ... @@ -215,7 +213,7 @@
215 213 BUG_ON(sis == NULL);
216 214 if (frontswap_test(sis, offset))
217 215 dup = 1;
218   - ret = frontswap_ops.store(type, offset, page);
  216 + ret = frontswap_ops->store(type, offset, page);
219 217 if (ret == 0) {
220 218 frontswap_set(sis, offset);
221 219 inc_frontswap_succ_stores();
222 220  
... ... @@ -250,13 +248,13 @@
250 248 struct swap_info_struct *sis = swap_info[type];
251 249 pgoff_t offset = swp_offset(entry);
252 250  
253   - if (!backend_registered)
  251 + if (!frontswap_ops)
254 252 return ret;
255 253  
256 254 BUG_ON(!PageLocked(page));
257 255 BUG_ON(sis == NULL);
258 256 if (frontswap_test(sis, offset))
259   - ret = frontswap_ops.load(type, offset, page);
  257 + ret = frontswap_ops->load(type, offset, page);
260 258 if (ret == 0) {
261 259 inc_frontswap_loads();
262 260 if (frontswap_tmem_exclusive_gets_enabled) {
263 261  
... ... @@ -276,12 +274,12 @@
276 274 {
277 275 struct swap_info_struct *sis = swap_info[type];
278 276  
279   - if (!backend_registered)
  277 + if (!frontswap_ops)
280 278 return;
281 279  
282 280 BUG_ON(sis == NULL);
283 281 if (frontswap_test(sis, offset)) {
284   - frontswap_ops.invalidate_page(type, offset);
  282 + frontswap_ops->invalidate_page(type, offset);
285 283 __frontswap_clear(sis, offset);
286 284 inc_frontswap_invalidates();
287 285 }
288 286  
... ... @@ -296,11 +294,11 @@
296 294 {
297 295 struct swap_info_struct *sis = swap_info[type];
298 296  
299   - if (backend_registered) {
  297 + if (frontswap_ops) {
300 298 BUG_ON(sis == NULL);
301 299 if (sis->frontswap_map == NULL)
302 300 return;
303   - (*frontswap_ops.invalidate_area)(type);
  301 + frontswap_ops->invalidate_area(type);
304 302 atomic_set(&sis->frontswap_pages, 0);
305 303 memset(sis->frontswap_map, 0, sis->max / sizeof(long));
306 304 }