Commit 529ae9aaa08378cfe2a4350bded76f32cc8ff0ce
Committed by
Linus Torvalds
1 parent
e9ba969818
Exists in
master
and in
39 other branches
mm: rename page trylock
Converting page lock to new locking bitops requires a change of page flag operation naming, so we might as well convert it to something nicer (!TestSetPageLocked_Lock => trylock_page, SetPageLocked => set_page_locked). This also facilitates lockdeping of page lock. Signed-off-by: Nick Piggin <npiggin@suse.de> Acked-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Acked-by: Peter Zijlstra <peterz@infradead.org> Acked-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 20 changed files with 74 additions and 59 deletions Side-by-side Diff
- drivers/scsi/sg.c
- fs/afs/write.c
- fs/cifs/file.c
- fs/jbd/commit.c
- fs/jbd2/commit.c
- fs/reiserfs/journal.c
- fs/splice.c
- fs/xfs/linux-2.6/xfs_aops.c
- include/linux/page-flags.h
- include/linux/pagemap.h
- mm/filemap.c
- mm/memory.c
- mm/migrate.c
- mm/rmap.c
- mm/shmem.c
- mm/swap.c
- mm/swap_state.c
- mm/swapfile.c
- mm/truncate.c
- mm/vmscan.c
drivers/scsi/sg.c
fs/afs/write.c
fs/cifs/file.c
fs/jbd/commit.c
... | ... | @@ -63,7 +63,7 @@ |
63 | 63 | goto nope; |
64 | 64 | |
65 | 65 | /* OK, it's a truncated page */ |
66 | - if (TestSetPageLocked(page)) | |
66 | + if (!trylock_page(page)) | |
67 | 67 | goto nope; |
68 | 68 | |
69 | 69 | page_cache_get(page); |
... | ... | @@ -446,7 +446,7 @@ |
446 | 446 | spin_lock(&journal->j_list_lock); |
447 | 447 | } |
448 | 448 | if (unlikely(!buffer_uptodate(bh))) { |
449 | - if (TestSetPageLocked(bh->b_page)) { | |
449 | + if (!trylock_page(bh->b_page)) { | |
450 | 450 | spin_unlock(&journal->j_list_lock); |
451 | 451 | lock_page(bh->b_page); |
452 | 452 | spin_lock(&journal->j_list_lock); |
fs/jbd2/commit.c
fs/reiserfs/journal.c
... | ... | @@ -627,7 +627,7 @@ |
627 | 627 | static void release_buffer_page(struct buffer_head *bh) |
628 | 628 | { |
629 | 629 | struct page *page = bh->b_page; |
630 | - if (!page->mapping && !TestSetPageLocked(page)) { | |
630 | + if (!page->mapping && trylock_page(page)) { | |
631 | 631 | page_cache_get(page); |
632 | 632 | put_bh(bh); |
633 | 633 | if (!page->mapping) |
fs/splice.c
fs/xfs/linux-2.6/xfs_aops.c
... | ... | @@ -675,7 +675,7 @@ |
675 | 675 | } else |
676 | 676 | pg_offset = PAGE_CACHE_SIZE; |
677 | 677 | |
678 | - if (page->index == tindex && !TestSetPageLocked(page)) { | |
678 | + if (page->index == tindex && trylock_page(page)) { | |
679 | 679 | pg_len = xfs_probe_page(page, pg_offset, mapped); |
680 | 680 | unlock_page(page); |
681 | 681 | } |
... | ... | @@ -759,7 +759,7 @@ |
759 | 759 | |
760 | 760 | if (page->index != tindex) |
761 | 761 | goto fail; |
762 | - if (TestSetPageLocked(page)) | |
762 | + if (!trylock_page(page)) | |
763 | 763 | goto fail; |
764 | 764 | if (PageWriteback(page)) |
765 | 765 | goto fail_unlock_page; |
include/linux/page-flags.h
... | ... | @@ -163,7 +163,7 @@ |
163 | 163 | |
164 | 164 | struct page; /* forward declaration */ |
165 | 165 | |
166 | -PAGEFLAG(Locked, locked) TESTSCFLAG(Locked, locked) | |
166 | +TESTPAGEFLAG(Locked, locked) | |
167 | 167 | PAGEFLAG(Error, error) |
168 | 168 | PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced) |
169 | 169 | PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty) |
include/linux/pagemap.h
... | ... | @@ -250,30 +250,7 @@ |
250 | 250 | return read_cache_page(mapping, index, filler, data); |
251 | 251 | } |
252 | 252 | |
253 | -int add_to_page_cache_locked(struct page *page, struct address_space *mapping, | |
254 | - pgoff_t index, gfp_t gfp_mask); | |
255 | -int add_to_page_cache_lru(struct page *page, struct address_space *mapping, | |
256 | - pgoff_t index, gfp_t gfp_mask); | |
257 | -extern void remove_from_page_cache(struct page *page); | |
258 | -extern void __remove_from_page_cache(struct page *page); | |
259 | - | |
260 | 253 | /* |
261 | - * Like add_to_page_cache_locked, but used to add newly allocated pages: | |
262 | - * the page is new, so we can just run SetPageLocked() against it. | |
263 | - */ | |
264 | -static inline int add_to_page_cache(struct page *page, | |
265 | - struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) | |
266 | -{ | |
267 | - int error; | |
268 | - | |
269 | - SetPageLocked(page); | |
270 | - error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); | |
271 | - if (unlikely(error)) | |
272 | - ClearPageLocked(page); | |
273 | - return error; | |
274 | -} | |
275 | - | |
276 | -/* | |
277 | 254 | * Return byte-offset into filesystem object for page. |
278 | 255 | */ |
279 | 256 | static inline loff_t page_offset(struct page *page) |
280 | 257 | |
... | ... | @@ -294,13 +271,28 @@ |
294 | 271 | extern void __lock_page_nosync(struct page *page); |
295 | 272 | extern void unlock_page(struct page *page); |
296 | 273 | |
274 | +static inline void set_page_locked(struct page *page) | |
275 | +{ | |
276 | + set_bit(PG_locked, &page->flags); | |
277 | +} | |
278 | + | |
279 | +static inline void clear_page_locked(struct page *page) | |
280 | +{ | |
281 | + clear_bit(PG_locked, &page->flags); | |
282 | +} | |
283 | + | |
284 | +static inline int trylock_page(struct page *page) | |
285 | +{ | |
286 | + return !test_and_set_bit(PG_locked, &page->flags); | |
287 | +} | |
288 | + | |
297 | 289 | /* |
298 | 290 | * lock_page may only be called if we have the page's inode pinned. |
299 | 291 | */ |
300 | 292 | static inline void lock_page(struct page *page) |
301 | 293 | { |
302 | 294 | might_sleep(); |
303 | - if (TestSetPageLocked(page)) | |
295 | + if (!trylock_page(page)) | |
304 | 296 | __lock_page(page); |
305 | 297 | } |
306 | 298 | |
... | ... | @@ -312,7 +304,7 @@ |
312 | 304 | static inline int lock_page_killable(struct page *page) |
313 | 305 | { |
314 | 306 | might_sleep(); |
315 | - if (TestSetPageLocked(page)) | |
307 | + if (!trylock_page(page)) | |
316 | 308 | return __lock_page_killable(page); |
317 | 309 | return 0; |
318 | 310 | } |
... | ... | @@ -324,7 +316,7 @@ |
324 | 316 | static inline void lock_page_nosync(struct page *page) |
325 | 317 | { |
326 | 318 | might_sleep(); |
327 | - if (TestSetPageLocked(page)) | |
319 | + if (!trylock_page(page)) | |
328 | 320 | __lock_page_nosync(page); |
329 | 321 | } |
330 | 322 | |
... | ... | @@ -407,6 +399,29 @@ |
407 | 399 | ret = __get_user(c, end); |
408 | 400 | } |
409 | 401 | return ret; |
402 | +} | |
403 | + | |
404 | +int add_to_page_cache_locked(struct page *page, struct address_space *mapping, | |
405 | + pgoff_t index, gfp_t gfp_mask); | |
406 | +int add_to_page_cache_lru(struct page *page, struct address_space *mapping, | |
407 | + pgoff_t index, gfp_t gfp_mask); | |
408 | +extern void remove_from_page_cache(struct page *page); | |
409 | +extern void __remove_from_page_cache(struct page *page); | |
410 | + | |
411 | +/* | |
412 | + * Like add_to_page_cache_locked, but used to add newly allocated pages: | |
413 | + * the page is new, so we can just run set_page_locked() against it. | |
414 | + */ | |
415 | +static inline int add_to_page_cache(struct page *page, | |
416 | + struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) | |
417 | +{ | |
418 | + int error; | |
419 | + | |
420 | + set_page_locked(page); | |
421 | + error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); | |
422 | + if (unlikely(error)) | |
423 | + clear_page_locked(page); | |
424 | + return error; | |
410 | 425 | } |
411 | 426 | |
412 | 427 | #endif /* _LINUX_PAGEMAP_H */ |
mm/filemap.c
... | ... | @@ -558,14 +558,14 @@ |
558 | 558 | * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. |
559 | 559 | * |
560 | 560 | * The first mb is necessary to safely close the critical section opened by the |
561 | - * TestSetPageLocked(), the second mb is necessary to enforce ordering between | |
562 | - * the clear_bit and the read of the waitqueue (to avoid SMP races with a | |
563 | - * parallel wait_on_page_locked()). | |
561 | + * test_and_set_bit() to lock the page; the second mb is necessary to enforce | |
562 | + * ordering between the clear_bit and the read of the waitqueue (to avoid SMP | |
563 | + * races with a parallel wait_on_page_locked()). | |
564 | 564 | */ |
565 | 565 | void unlock_page(struct page *page) |
566 | 566 | { |
567 | 567 | smp_mb__before_clear_bit(); |
568 | - if (!TestClearPageLocked(page)) | |
568 | + if (!test_and_clear_bit(PG_locked, &page->flags)) | |
569 | 569 | BUG(); |
570 | 570 | smp_mb__after_clear_bit(); |
571 | 571 | wake_up_page(page, PG_locked); |
... | ... | @@ -931,7 +931,7 @@ |
931 | 931 | struct page *page = find_get_page(mapping, index); |
932 | 932 | |
933 | 933 | if (page) { |
934 | - if (!TestSetPageLocked(page)) | |
934 | + if (trylock_page(page)) | |
935 | 935 | return page; |
936 | 936 | page_cache_release(page); |
937 | 937 | return NULL; |
... | ... | @@ -1027,7 +1027,7 @@ |
1027 | 1027 | if (inode->i_blkbits == PAGE_CACHE_SHIFT || |
1028 | 1028 | !mapping->a_ops->is_partially_uptodate) |
1029 | 1029 | goto page_not_up_to_date; |
1030 | - if (TestSetPageLocked(page)) | |
1030 | + if (!trylock_page(page)) | |
1031 | 1031 | goto page_not_up_to_date; |
1032 | 1032 | if (!mapping->a_ops->is_partially_uptodate(page, |
1033 | 1033 | desc, offset)) |
mm/memory.c
mm/migrate.c
... | ... | @@ -605,7 +605,7 @@ |
605 | 605 | * establishing additional references. We are the only one |
606 | 606 | * holding a reference to the new page at this point. |
607 | 607 | */ |
608 | - if (TestSetPageLocked(newpage)) | |
608 | + if (!trylock_page(newpage)) | |
609 | 609 | BUG(); |
610 | 610 | |
611 | 611 | /* Prepare mapping for the new page.*/ |
... | ... | @@ -667,7 +667,7 @@ |
667 | 667 | BUG_ON(charge); |
668 | 668 | |
669 | 669 | rc = -EAGAIN; |
670 | - if (TestSetPageLocked(page)) { | |
670 | + if (!trylock_page(page)) { | |
671 | 671 | if (!force) |
672 | 672 | goto move_newpage; |
673 | 673 | lock_page(page); |
mm/rmap.c
... | ... | @@ -422,7 +422,7 @@ |
422 | 422 | referenced += page_referenced_anon(page, mem_cont); |
423 | 423 | else if (is_locked) |
424 | 424 | referenced += page_referenced_file(page, mem_cont); |
425 | - else if (TestSetPageLocked(page)) | |
425 | + else if (!trylock_page(page)) | |
426 | 426 | referenced++; |
427 | 427 | else { |
428 | 428 | if (page->mapping) |
mm/shmem.c
... | ... | @@ -1265,7 +1265,7 @@ |
1265 | 1265 | } |
1266 | 1266 | |
1267 | 1267 | /* We have to do this with page locked to prevent races */ |
1268 | - if (TestSetPageLocked(swappage)) { | |
1268 | + if (!trylock_page(swappage)) { | |
1269 | 1269 | shmem_swp_unmap(entry); |
1270 | 1270 | spin_unlock(&info->lock); |
1271 | 1271 | wait_on_page_locked(swappage); |
... | ... | @@ -1329,7 +1329,7 @@ |
1329 | 1329 | shmem_swp_unmap(entry); |
1330 | 1330 | filepage = find_get_page(mapping, idx); |
1331 | 1331 | if (filepage && |
1332 | - (!PageUptodate(filepage) || TestSetPageLocked(filepage))) { | |
1332 | + (!PageUptodate(filepage) || !trylock_page(filepage))) { | |
1333 | 1333 | spin_unlock(&info->lock); |
1334 | 1334 | wait_on_page_locked(filepage); |
1335 | 1335 | page_cache_release(filepage); |
mm/swap.c
... | ... | @@ -444,7 +444,7 @@ |
444 | 444 | for (i = 0; i < pagevec_count(pvec); i++) { |
445 | 445 | struct page *page = pvec->pages[i]; |
446 | 446 | |
447 | - if (PagePrivate(page) && !TestSetPageLocked(page)) { | |
447 | + if (PagePrivate(page) && trylock_page(page)) { | |
448 | 448 | if (PagePrivate(page)) |
449 | 449 | try_to_release_page(page, 0); |
450 | 450 | unlock_page(page); |
mm/swap_state.c
... | ... | @@ -201,7 +201,7 @@ |
201 | 201 | */ |
202 | 202 | static inline void free_swap_cache(struct page *page) |
203 | 203 | { |
204 | - if (PageSwapCache(page) && !TestSetPageLocked(page)) { | |
204 | + if (PageSwapCache(page) && trylock_page(page)) { | |
205 | 205 | remove_exclusive_swap_page(page); |
206 | 206 | unlock_page(page); |
207 | 207 | } |
208 | 208 | |
... | ... | @@ -302,9 +302,9 @@ |
302 | 302 | * re-using the just freed swap entry for an existing page. |
303 | 303 | * May fail (-ENOMEM) if radix-tree node allocation failed. |
304 | 304 | */ |
305 | - SetPageLocked(new_page); | |
305 | + set_page_locked(new_page); | |
306 | 306 | err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL); |
307 | - if (!err) { | |
307 | + if (likely(!err)) { | |
308 | 308 | /* |
309 | 309 | * Initiate read into locked page and return. |
310 | 310 | */ |
... | ... | @@ -312,7 +312,7 @@ |
312 | 312 | swap_readpage(NULL, new_page); |
313 | 313 | return new_page; |
314 | 314 | } |
315 | - ClearPageLocked(new_page); | |
315 | + clear_page_locked(new_page); | |
316 | 316 | swap_free(entry); |
317 | 317 | } while (err != -ENOMEM); |
318 | 318 |
mm/swapfile.c
... | ... | @@ -403,7 +403,7 @@ |
403 | 403 | if (p) { |
404 | 404 | if (swap_entry_free(p, swp_offset(entry)) == 1) { |
405 | 405 | page = find_get_page(&swapper_space, entry.val); |
406 | - if (page && unlikely(TestSetPageLocked(page))) { | |
406 | + if (page && unlikely(!trylock_page(page))) { | |
407 | 407 | page_cache_release(page); |
408 | 408 | page = NULL; |
409 | 409 | } |
mm/truncate.c
... | ... | @@ -187,7 +187,7 @@ |
187 | 187 | if (page_index > next) |
188 | 188 | next = page_index; |
189 | 189 | next++; |
190 | - if (TestSetPageLocked(page)) | |
190 | + if (!trylock_page(page)) | |
191 | 191 | continue; |
192 | 192 | if (PageWriteback(page)) { |
193 | 193 | unlock_page(page); |
... | ... | @@ -280,7 +280,7 @@ |
280 | 280 | pgoff_t index; |
281 | 281 | int lock_failed; |
282 | 282 | |
283 | - lock_failed = TestSetPageLocked(page); | |
283 | + lock_failed = !trylock_page(page); | |
284 | 284 | |
285 | 285 | /* |
286 | 286 | * We really shouldn't be looking at the ->index of an |
mm/vmscan.c
... | ... | @@ -496,7 +496,7 @@ |
496 | 496 | page = lru_to_page(page_list); |
497 | 497 | list_del(&page->lru); |
498 | 498 | |
499 | - if (TestSetPageLocked(page)) | |
499 | + if (!trylock_page(page)) | |
500 | 500 | goto keep; |
501 | 501 | |
502 | 502 | VM_BUG_ON(PageActive(page)); |
... | ... | @@ -582,7 +582,7 @@ |
582 | 582 | * A synchronous write - probably a ramdisk. Go |
583 | 583 | * ahead and try to reclaim the page. |
584 | 584 | */ |
585 | - if (TestSetPageLocked(page)) | |
585 | + if (!trylock_page(page)) | |
586 | 586 | goto keep; |
587 | 587 | if (PageDirty(page) || PageWriteback(page)) |
588 | 588 | goto keep_locked; |