Commit 69aa12f2961a0310d58375815fa391e5528a79b3

Authored by Jianyu Zhan
Committed by Jiri Slaby
1 parent 4cd64dcede

mm/swap.c: clean up *lru_cache_add* functions

commit 2329d3751b082b4fd354f334a88662d72abac52d upstream.

In mm/swap.c, __lru_cache_add() is exported, but actually there are no
users outside this file.

This patch unexports __lru_cache_add(), and makes it static.  It also
exports lru_cache_add_file(), as it is use by cifs and fuse, which can
loaded as modules.

Signed-off-by: Jianyu Zhan <nasa4836@gmail.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Shaohua Li <shli@kernel.org>
Cc: Bob Liu <bob.liu@oracle.com>
Cc: Seth Jennings <sjenning@linux.vnet.ibm.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Rafael Aquini <aquini@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Khalid Aziz <khalid.aziz@oracle.com>
Cc: Christoph Hellwig <hch@lst.de>
Reviewed-by: Zhang Yanfei <zhangyanfei@cn.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Mel Gorman <mgorman@suse.de>
Signed-off-by: Jiri Slaby <jslaby@suse.cz>

Showing 2 changed files with 25 additions and 25 deletions Side-by-side Diff

include/linux/swap.h
... ... @@ -268,8 +268,9 @@
268 268  
269 269  
270 270 /* linux/mm/swap.c */
271   -extern void __lru_cache_add(struct page *);
272 271 extern void lru_cache_add(struct page *);
  272 +extern void lru_cache_add_anon(struct page *page);
  273 +extern void lru_cache_add_file(struct page *page);
273 274 extern void lru_add_page_tail(struct page *page, struct page *page_tail,
274 275 struct lruvec *lruvec, struct list_head *head);
275 276 extern void activate_page(struct page *);
... ... @@ -282,22 +283,6 @@
282 283 extern void swap_setup(void);
283 284  
284 285 extern void add_page_to_unevictable_list(struct page *page);
285   -
286   -/**
287   - * lru_cache_add: add a page to the page lists
288   - * @page: the page to add
289   - */
290   -static inline void lru_cache_add_anon(struct page *page)
291   -{
292   - ClearPageActive(page);
293   - __lru_cache_add(page);
294   -}
295   -
296   -static inline void lru_cache_add_file(struct page *page)
297   -{
298   - ClearPageActive(page);
299   - __lru_cache_add(page);
300   -}
301 286  
302 287 /* linux/mm/vmscan.c */
303 288 extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
... ... @@ -548,13 +548,7 @@
548 548 }
549 549 EXPORT_SYMBOL(mark_page_accessed);
550 550  
551   -/*
552   - * Queue the page for addition to the LRU via pagevec. The decision on whether
553   - * to add the page to the [in]active [file|anon] list is deferred until the
554   - * pagevec is drained. This gives a chance for the caller of __lru_cache_add()
555   - * have the page added to the active list using mark_page_accessed().
556   - */
557   -void __lru_cache_add(struct page *page)
  551 +static void __lru_cache_add(struct page *page)
558 552 {
559 553 struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
560 554  
561 555  
562 556  
... ... @@ -564,11 +558,32 @@
564 558 pagevec_add(pvec, page);
565 559 put_cpu_var(lru_add_pvec);
566 560 }
567   -EXPORT_SYMBOL(__lru_cache_add);
568 561  
569 562 /**
  563 + * lru_cache_add: add a page to the page lists
  564 + * @page: the page to add
  565 + */
  566 +void lru_cache_add_anon(struct page *page)
  567 +{
  568 + ClearPageActive(page);
  569 + __lru_cache_add(page);
  570 +}
  571 +
  572 +void lru_cache_add_file(struct page *page)
  573 +{
  574 + ClearPageActive(page);
  575 + __lru_cache_add(page);
  576 +}
  577 +EXPORT_SYMBOL(lru_cache_add_file);
  578 +
  579 +/**
570 580 * lru_cache_add - add a page to a page list
571 581 * @page: the page to be added to the LRU.
  582 + *
  583 + * Queue the page for addition to the LRU via pagevec. The decision on whether
  584 + * to add the page to the [in]active [file|anon] list is deferred until the
  585 + * pagevec is drained. This gives a chance for the caller of lru_cache_add()
  586 + * have the page added to the active list using mark_page_accessed().
572 587 */
573 588 void lru_cache_add(struct page *page)
574 589 {