Commit f45840b5c128445da70e7ec33adc47b4a12bdaf4

Authored by Nick Piggin
Committed by Linus Torvalds
1 parent 9978ad583e

mm: pagecache insertion fewer atomics

Setting and clearing the page locked when inserting it into swapcache /
pagecache when it has no other references can use non-atomic page flags
operations because no other CPU may be operating on it at this time.

This saves one atomic operation when inserting a page into pagecache.

Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 2 changed files with 9 additions and 9 deletions Side-by-side Diff

include/linux/pagemap.h
... ... @@ -299,14 +299,14 @@
299 299 extern void __lock_page_nosync(struct page *page);
300 300 extern void unlock_page(struct page *page);
301 301  
302   -static inline void set_page_locked(struct page *page)
  302 +static inline void __set_page_locked(struct page *page)
303 303 {
304   - set_bit(PG_locked, &page->flags);
  304 + __set_bit(PG_locked, &page->flags);
305 305 }
306 306  
307   -static inline void clear_page_locked(struct page *page)
  307 +static inline void __clear_page_locked(struct page *page)
308 308 {
309   - clear_bit(PG_locked, &page->flags);
  309 + __clear_bit(PG_locked, &page->flags);
310 310 }
311 311  
312 312 static inline int trylock_page(struct page *page)
313 313  
314 314  
... ... @@ -438,17 +438,17 @@
438 438  
439 439 /*
440 440 * Like add_to_page_cache_locked, but used to add newly allocated pages:
441   - * the page is new, so we can just run set_page_locked() against it.
  441 + * the page is new, so we can just run __set_page_locked() against it.
442 442 */
443 443 static inline int add_to_page_cache(struct page *page,
444 444 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
445 445 {
446 446 int error;
447 447  
448   - set_page_locked(page);
  448 + __set_page_locked(page);
449 449 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
450 450 if (unlikely(error))
451   - clear_page_locked(page);
  451 + __clear_page_locked(page);
452 452 return error;
453 453 }
454 454  
... ... @@ -303,7 +303,7 @@
303 303 * re-using the just freed swap entry for an existing page.
304 304 * May fail (-ENOMEM) if radix-tree node allocation failed.
305 305 */
306   - set_page_locked(new_page);
  306 + __set_page_locked(new_page);
307 307 SetPageSwapBacked(new_page);
308 308 err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL);
309 309 if (likely(!err)) {
... ... @@ -315,7 +315,7 @@
315 315 return new_page;
316 316 }
317 317 ClearPageSwapBacked(new_page);
318   - clear_page_locked(new_page);
  318 + __clear_page_locked(new_page);
319 319 swap_free(entry);
320 320 } while (err != -ENOMEM);
321 321