Commit 513c66820333a2cc7b54eaa6a7b5c34f6ffaf770

Authored by Michal Hocko
Committed by Greg Kroah-Hartman
1 parent 8326fa8ec2

mm: get rid of radix tree gfp mask for pagecache_get_page

commit 45f87de57f8fad59302fd263dd81ffa4843b5b24 upstream.

Commit 2457aec63745 ("mm: non-atomically mark page accessed during page
cache allocation where possible") has added a separate parameter for
specifying gfp mask for radix tree allocations.

Not only this is less than optimal from the API point of view because it
is error prone, it is also buggy currently because
grab_cache_page_write_begin is using GFP_KERNEL for radix tree and if
fgp_flags doesn't contain FGP_NOFS (mostly controlled by fs by
AOP_FLAG_NOFS flag) but the mapping_gfp_mask has __GFP_FS cleared then
the radix tree allocation wouldn't obey the restriction and might
recurse into filesystem and cause deadlocks.  This is the case for most
filesystems unfortunately because only ext4 and gfs2 are using
AOP_FLAG_NOFS.

Let's simply remove radix_gfp_mask parameter because the allocation
context is same for both page cache and for the radix tree.  Just make
sure that the radix tree gets only the sane subset of the mask (e.g.  do
not pass __GFP_WRITE).

Long term it is more preferable to convert remaining users of
AOP_FLAG_NOFS to use mapping_gfp_mask instead and simplify this
interface even further.

Reported-by: Dave Chinner <david@fromorbit.com>
Signed-off-by: Michal Hocko <mhocko@suse.cz>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

Showing 2 changed files with 18 additions and 24 deletions Side-by-side Diff

include/linux/pagemap.h
... ... @@ -251,7 +251,7 @@
251 251 #define FGP_NOWAIT 0x00000020
252 252  
253 253 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
254   - int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask);
  254 + int fgp_flags, gfp_t cache_gfp_mask);
255 255  
256 256 /**
257 257 * find_get_page - find and get a page reference
258 258  
... ... @@ -266,13 +266,13 @@
266 266 static inline struct page *find_get_page(struct address_space *mapping,
267 267 pgoff_t offset)
268 268 {
269   - return pagecache_get_page(mapping, offset, 0, 0, 0);
  269 + return pagecache_get_page(mapping, offset, 0, 0);
270 270 }
271 271  
272 272 static inline struct page *find_get_page_flags(struct address_space *mapping,
273 273 pgoff_t offset, int fgp_flags)
274 274 {
275   - return pagecache_get_page(mapping, offset, fgp_flags, 0, 0);
  275 + return pagecache_get_page(mapping, offset, fgp_flags, 0);
276 276 }
277 277  
278 278 /**
... ... @@ -292,7 +292,7 @@
292 292 static inline struct page *find_lock_page(struct address_space *mapping,
293 293 pgoff_t offset)
294 294 {
295   - return pagecache_get_page(mapping, offset, FGP_LOCK, 0, 0);
  295 + return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
296 296 }
297 297  
298 298 /**
... ... @@ -319,7 +319,7 @@
319 319 {
320 320 return pagecache_get_page(mapping, offset,
321 321 FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
322   - gfp_mask, gfp_mask & GFP_RECLAIM_MASK);
  322 + gfp_mask);
323 323 }
324 324  
325 325 /**
... ... @@ -340,8 +340,7 @@
340 340 {
341 341 return pagecache_get_page(mapping, index,
342 342 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
343   - mapping_gfp_mask(mapping),
344   - GFP_NOFS);
  343 + mapping_gfp_mask(mapping));
345 344 }
346 345  
347 346 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
... ... @@ -1046,8 +1046,7 @@
1046 1046 * @mapping: the address_space to search
1047 1047 * @offset: the page index
1048 1048 * @fgp_flags: PCG flags
1049   - * @cache_gfp_mask: gfp mask to use for the page cache data page allocation
1050   - * @radix_gfp_mask: gfp mask to use for radix tree node allocation
  1049 + * @gfp_mask: gfp mask to use for the page cache data page allocation
1051 1050 *
1052 1051 * Looks up the page cache slot at @mapping & @offset.
1053 1052 *
... ... @@ -1056,11 +1055,9 @@
1056 1055 * FGP_ACCESSED: the page will be marked accessed
1057 1056 * FGP_LOCK: Page is return locked
1058 1057 * FGP_CREAT: If page is not present then a new page is allocated using
1059   - * @cache_gfp_mask and added to the page cache and the VM's LRU
1060   - * list. If radix tree nodes are allocated during page cache
1061   - * insertion then @radix_gfp_mask is used. The page is returned
1062   - * locked and with an increased refcount. Otherwise, %NULL is
1063   - * returned.
  1058 + * @gfp_mask and added to the page cache and the VM's LRU
  1059 + * list. The page is returned locked and with an increased
  1060 + * refcount. Otherwise, %NULL is returned.
1064 1061 *
1065 1062 * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even
1066 1063 * if the GFP flags specified for FGP_CREAT are atomic.
... ... @@ -1068,7 +1065,7 @@
1068 1065 * If there is a page cache page, it is returned with an increased refcount.
1069 1066 */
1070 1067 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
1071   - int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask)
  1068 + int fgp_flags, gfp_t gfp_mask)
1072 1069 {
1073 1070 struct page *page;
1074 1071  
1075 1072  
... ... @@ -1105,13 +1102,11 @@
1105 1102 if (!page && (fgp_flags & FGP_CREAT)) {
1106 1103 int err;
1107 1104 if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping))
1108   - cache_gfp_mask |= __GFP_WRITE;
1109   - if (fgp_flags & FGP_NOFS) {
1110   - cache_gfp_mask &= ~__GFP_FS;
1111   - radix_gfp_mask &= ~__GFP_FS;
1112   - }
  1105 + gfp_mask |= __GFP_WRITE;
  1106 + if (fgp_flags & FGP_NOFS)
  1107 + gfp_mask &= ~__GFP_FS;
1113 1108  
1114   - page = __page_cache_alloc(cache_gfp_mask);
  1109 + page = __page_cache_alloc(gfp_mask);
1115 1110 if (!page)
1116 1111 return NULL;
1117 1112  
... ... @@ -1122,7 +1117,8 @@
1122 1117 if (fgp_flags & FGP_ACCESSED)
1123 1118 __SetPageReferenced(page);
1124 1119  
1125   - err = add_to_page_cache_lru(page, mapping, offset, radix_gfp_mask);
  1120 + err = add_to_page_cache_lru(page, mapping, offset,
  1121 + gfp_mask & GFP_RECLAIM_MASK);
1126 1122 if (unlikely(err)) {
1127 1123 page_cache_release(page);
1128 1124 page = NULL;
... ... @@ -2443,8 +2439,7 @@
2443 2439 fgp_flags |= FGP_NOFS;
2444 2440  
2445 2441 page = pagecache_get_page(mapping, index, fgp_flags,
2446   - mapping_gfp_mask(mapping),
2447   - GFP_KERNEL);
  2442 + mapping_gfp_mask(mapping));
2448 2443 if (page)
2449 2444 wait_for_stable_page(page);
2450 2445