Commit 33c2a174120b2c1baec9d1dac513f9d4b761b26a

Authored by Linus Torvalds

Merge tag 'stable/for-linus-3.7-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/mm

Pull frontswap update from Konrad Rzeszutek Wilk:
 "Features:
   - Support exlusive get if backend is capable.
  Bug-fixes:
   - Fix compile warnings
   - Add comments/cleanup doc
   - Fix wrong if condition"

* tag 'stable/for-linus-3.7-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/mm:
  frontswap: support exclusive gets if tmem backend is capable
  mm: frontswap: fix a wrong if condition in frontswap_shrink
  mm/frontswap: fix uninit'ed variable warning
  mm/frontswap: cleanup doc and comment error
  mm: frontswap: remove unneeded headers

Showing 2 changed files Side-by-side Diff

include/linux/frontswap.h
... ... @@ -19,6 +19,8 @@
19 19 extern void frontswap_shrink(unsigned long);
20 20 extern unsigned long frontswap_curr_pages(void);
21 21 extern void frontswap_writethrough(bool);
  22 +#define FRONTSWAP_HAS_EXCLUSIVE_GETS
  23 +extern void frontswap_tmem_exclusive_gets(bool);
22 24  
23 25 extern void __frontswap_init(unsigned type);
24 26 extern int __frontswap_store(struct page *page);
... ... @@ -44,6 +44,13 @@
44 44 */
45 45 static bool frontswap_writethrough_enabled __read_mostly;
46 46  
  47 +/*
  48 + * If enabled, the underlying tmem implementation is capable of doing
  49 + * exclusive gets, so frontswap_load, on a successful tmem_get must
  50 + * mark the page as no longer in frontswap AND mark it dirty.
  51 + */
  52 +static bool frontswap_tmem_exclusive_gets_enabled __read_mostly;
  53 +
47 54 #ifdef CONFIG_DEBUG_FS
48 55 /*
49 56 * Counters available via /sys/kernel/debug/frontswap (if debugfs is
... ... @@ -97,6 +104,15 @@
97 104 EXPORT_SYMBOL(frontswap_writethrough);
98 105  
99 106 /*
  107 + * Enable/disable frontswap exclusive gets (see above).
  108 + */
  109 +void frontswap_tmem_exclusive_gets(bool enable)
  110 +{
  111 + frontswap_tmem_exclusive_gets_enabled = enable;
  112 +}
  113 +EXPORT_SYMBOL(frontswap_tmem_exclusive_gets);
  114 +
  115 +/*
100 116 * Called when a swap device is swapon'd.
101 117 */
102 118 void __frontswap_init(unsigned type)
103 119  
... ... @@ -174,8 +190,13 @@
174 190 BUG_ON(sis == NULL);
175 191 if (frontswap_test(sis, offset))
176 192 ret = frontswap_ops.load(type, offset, page);
177   - if (ret == 0)
  193 + if (ret == 0) {
178 194 inc_frontswap_loads();
  195 + if (frontswap_tmem_exclusive_gets_enabled) {
  196 + SetPageDirty(page);
  197 + frontswap_clear(sis, offset);
  198 + }
  199 + }
179 200 return ret;
180 201 }
181 202 EXPORT_SYMBOL(__frontswap_load);
... ... @@ -263,6 +284,11 @@
263 284 return ret;
264 285 }
265 286  
  287 +/*
  288 + * Used to check if it's necessory and feasible to unuse pages.
  289 + * Return 1 when nothing to do, 0 when need to shink pages,
  290 + * error code when there is an error.
  291 + */
266 292 static int __frontswap_shrink(unsigned long target_pages,
267 293 unsigned long *pages_to_unuse,
268 294 int *type)
... ... @@ -275,7 +301,7 @@
275 301 if (total_pages <= target_pages) {
276 302 /* Nothing to do */
277 303 *pages_to_unuse = 0;
278   - return 0;
  304 + return 1;
279 305 }
280 306 total_pages_to_unuse = total_pages - target_pages;
281 307 return __frontswap_unuse_pages(total_pages_to_unuse, pages_to_unuse, type);
... ... @@ -292,7 +318,7 @@
292 318 void frontswap_shrink(unsigned long target_pages)
293 319 {
294 320 unsigned long pages_to_unuse = 0;
295   - int type, ret;
  321 + int uninitialized_var(type), ret;
296 322  
297 323 /*
298 324 * we don't want to hold swap_lock while doing a very
... ... @@ -302,7 +328,7 @@
302 328 spin_lock(&swap_lock);
303 329 ret = __frontswap_shrink(target_pages, &pages_to_unuse, &type);
304 330 spin_unlock(&swap_lock);
305   - if (ret == 0 && pages_to_unuse)
  331 + if (ret == 0)
306 332 try_to_unuse(type, true, pages_to_unuse);
307 333 return;
308 334 }