Commit 83896fb5e51594281720d145164f866ba769abd5

Authored by Linus Torvalds
1 parent 7a608572a2

Revert "mm: simplify code of swap.c"

This reverts commit d8505dee1a87b8d41b9c4ee1325cd72258226fbc.

Chris Mason ended up chasing down some page allocation errors and pages
stuck waiting on the IO scheduler, and was able to narrow it down to two
commits: commit 744ed1442757 ("mm: batch activate_page() to reduce lock
contention") and d8505dee1a87 ("mm: simplify code of swap.c").

This reverts the second one.

Reported-and-debugged-by: Chris Mason <chris.mason@oracle.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Jens Axboe <jaxboe@fusionio.com>
Cc: linux-mm <linux-mm@kvack.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 1 changed file with 47 additions and 54 deletions Side-by-side Diff

... ... @@ -178,13 +178,15 @@
178 178 }
179 179 EXPORT_SYMBOL(put_pages_list);
180 180  
181   -static void pagevec_lru_move_fn(struct pagevec *pvec,
182   - void (*move_fn)(struct page *page, void *arg),
183   - void *arg)
  181 +/*
  182 + * pagevec_move_tail() must be called with IRQ disabled.
  183 + * Otherwise this may cause nasty races.
  184 + */
  185 +static void pagevec_move_tail(struct pagevec *pvec)
184 186 {
185 187 int i;
  188 + int pgmoved = 0;
186 189 struct zone *zone = NULL;
187   - unsigned long flags = 0;
188 190  
189 191 for (i = 0; i < pagevec_count(pvec); i++) {
190 192 struct page *page = pvec->pages[i];
191 193  
192 194  
193 195  
194 196  
195 197  
196 198  
... ... @@ -192,49 +194,29 @@
192 194  
193 195 if (pagezone != zone) {
194 196 if (zone)
195   - spin_unlock_irqrestore(&zone->lru_lock, flags);
  197 + spin_unlock(&zone->lru_lock);
196 198 zone = pagezone;
197   - spin_lock_irqsave(&zone->lru_lock, flags);
  199 + spin_lock(&zone->lru_lock);
198 200 }
199   -
200   - (*move_fn)(page, arg);
  201 + if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
  202 + int lru = page_lru_base_type(page);
  203 + list_move_tail(&page->lru, &zone->lru[lru].list);
  204 + pgmoved++;
  205 + }
201 206 }
202 207 if (zone)
203   - spin_unlock_irqrestore(&zone->lru_lock, flags);
204   - release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
  208 + spin_unlock(&zone->lru_lock);
  209 + __count_vm_events(PGROTATED, pgmoved);
  210 + release_pages(pvec->pages, pvec->nr, pvec->cold);
205 211 pagevec_reinit(pvec);
206 212 }
207 213  
208   -static void pagevec_move_tail_fn(struct page *page, void *arg)
209   -{
210   - int *pgmoved = arg;
211   - struct zone *zone = page_zone(page);
212   -
213   - if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
214   - int lru = page_lru_base_type(page);
215   - list_move_tail(&page->lru, &zone->lru[lru].list);
216   - (*pgmoved)++;
217   - }
218   -}
219   -
220 214 /*
221   - * pagevec_move_tail() must be called with IRQ disabled.
222   - * Otherwise this may cause nasty races.
223   - */
224   -static void pagevec_move_tail(struct pagevec *pvec)
225   -{
226   - int pgmoved = 0;
227   -
228   - pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved);
229   - __count_vm_events(PGROTATED, pgmoved);
230   -}
231   -
232   -/*
233 215 * Writeback is about to end against a page which has been marked for immediate
234 216 * reclaim. If it still appears to be reclaimable, move it to the tail of the
235 217 * inactive list.
236 218 */
237   -void rotate_reclaimable_page(struct page *page)
  219 +void rotate_reclaimable_page(struct page *page)
238 220 {
239 221 if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
240 222 !PageUnevictable(page) && PageLRU(page)) {
241 223  
242 224  
... ... @@ -534,33 +516,44 @@
534 516 }
535 517 }
536 518  
537   -static void ____pagevec_lru_add_fn(struct page *page, void *arg)
538   -{
539   - enum lru_list lru = (enum lru_list)arg;
540   - struct zone *zone = page_zone(page);
541   - int file = is_file_lru(lru);
542   - int active = is_active_lru(lru);
543   -
544   - VM_BUG_ON(PageActive(page));
545   - VM_BUG_ON(PageUnevictable(page));
546   - VM_BUG_ON(PageLRU(page));
547   -
548   - SetPageLRU(page);
549   - if (active)
550   - SetPageActive(page);
551   - update_page_reclaim_stat(zone, page, file, active);
552   - add_page_to_lru_list(zone, page, lru);
553   -}
554   -
555 519 /*
556 520 * Add the passed pages to the LRU, then drop the caller's refcount
557 521 * on them. Reinitialises the caller's pagevec.
558 522 */
559 523 void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
560 524 {
  525 + int i;
  526 + struct zone *zone = NULL;
  527 +
561 528 VM_BUG_ON(is_unevictable_lru(lru));
562 529  
563   - pagevec_lru_move_fn(pvec, ____pagevec_lru_add_fn, (void *)lru);
  530 + for (i = 0; i < pagevec_count(pvec); i++) {
  531 + struct page *page = pvec->pages[i];
  532 + struct zone *pagezone = page_zone(page);
  533 + int file;
  534 + int active;
  535 +
  536 + if (pagezone != zone) {
  537 + if (zone)
  538 + spin_unlock_irq(&zone->lru_lock);
  539 + zone = pagezone;
  540 + spin_lock_irq(&zone->lru_lock);
  541 + }
  542 + VM_BUG_ON(PageActive(page));
  543 + VM_BUG_ON(PageUnevictable(page));
  544 + VM_BUG_ON(PageLRU(page));
  545 + SetPageLRU(page);
  546 + active = is_active_lru(lru);
  547 + file = is_file_lru(lru);
  548 + if (active)
  549 + SetPageActive(page);
  550 + update_page_reclaim_stat(zone, page, file, active);
  551 + add_page_to_lru_list(zone, page, lru);
  552 + }
  553 + if (zone)
  554 + spin_unlock_irq(&zone->lru_lock);
  555 + release_pages(pvec->pages, pvec->nr, pvec->cold);
  556 + pagevec_reinit(pvec);
564 557 }
565 558  
566 559 EXPORT_SYMBOL(____pagevec_lru_add);