Commit f116695a500cdd84cbeac68bc373e98ae729c24b

Authored by Sasha Levin
Committed by Konrad Rzeszutek Wilk
1 parent 96253444db

mm: frontswap: split out __frontswap_unuse_pages

An attempt at making frontswap_shrink shorter and more readable. This patch
splits out walking through the swap list to find an entry with enough
pages to unuse.

Also, assert that the internal __frontswap_unuse_pages is called under swap
lock, since that part of code was previously directly happen inside the lock.

Signed-off-by: Sasha Levin <levinsasha928@gmail.com>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>

Showing 1 changed file with 39 additions and 20 deletions Side-by-side Diff

... ... @@ -230,6 +230,41 @@
230 230 return totalpages;
231 231 }
232 232  
  233 +static int __frontswap_unuse_pages(unsigned long total, unsigned long *unused,
  234 + int *swapid)
  235 +{
  236 + int ret = -EINVAL;
  237 + struct swap_info_struct *si = NULL;
  238 + int si_frontswap_pages;
  239 + unsigned long total_pages_to_unuse = total;
  240 + unsigned long pages = 0, pages_to_unuse = 0;
  241 + int type;
  242 +
  243 + assert_spin_locked(&swap_lock);
  244 + for (type = swap_list.head; type >= 0; type = si->next) {
  245 + si = swap_info[type];
  246 + si_frontswap_pages = atomic_read(&si->frontswap_pages);
  247 + if (total_pages_to_unuse < si_frontswap_pages) {
  248 + pages = pages_to_unuse = total_pages_to_unuse;
  249 + } else {
  250 + pages = si_frontswap_pages;
  251 + pages_to_unuse = 0; /* unuse all */
  252 + }
  253 + /* ensure there is enough RAM to fetch pages from frontswap */
  254 + if (security_vm_enough_memory_mm(current->mm, pages)) {
  255 + ret = -ENOMEM;
  256 + continue;
  257 + }
  258 + vm_unacct_memory(pages);
  259 + *unused = pages_to_unuse;
  260 + *swapid = type;
  261 + ret = 0;
  262 + break;
  263 + }
  264 +
  265 + return ret;
  266 +}
  267 +
233 268 /*
234 269 * Frontswap, like a true swap device, may unnecessarily retain pages
235 270 * under certain circumstances; "shrink" frontswap is essentially a
236 271  
... ... @@ -240,11 +275,9 @@
240 275 */
241 276 void frontswap_shrink(unsigned long target_pages)
242 277 {
243   - struct swap_info_struct *si = NULL;
244   - int si_frontswap_pages;
245 278 unsigned long total_pages = 0, total_pages_to_unuse;
246   - unsigned long pages = 0, pages_to_unuse = 0;
247   - int type;
  279 + unsigned long pages_to_unuse = 0;
  280 + int type, ret;
248 281 bool locked = false;
249 282  
250 283 /*
... ... @@ -258,22 +291,8 @@
258 291 if (total_pages <= target_pages)
259 292 goto out;
260 293 total_pages_to_unuse = total_pages - target_pages;
261   - for (type = swap_list.head; type >= 0; type = si->next) {
262   - si = swap_info[type];
263   - si_frontswap_pages = atomic_read(&si->frontswap_pages);
264   - if (total_pages_to_unuse < si_frontswap_pages) {
265   - pages = pages_to_unuse = total_pages_to_unuse;
266   - } else {
267   - pages = si_frontswap_pages;
268   - pages_to_unuse = 0; /* unuse all */
269   - }
270   - /* ensure there is enough RAM to fetch pages from frontswap */
271   - if (security_vm_enough_memory_mm(current->mm, pages))
272   - continue;
273   - vm_unacct_memory(pages);
274   - break;
275   - }
276   - if (type < 0)
  294 + ret = __frontswap_unuse_pages(total_pages_to_unuse, &pages_to_unuse, &type);
  295 + if (ret < 0)
277 296 goto out;
278 297 locked = false;
279 298 spin_unlock(&swap_lock);