Commit 5949eac4d9b5bf936c12cb7ec3a09084c1326834
Committed by
Linus Torvalds
1 parent
3142b651ad
Exists in
master
and in
4 other branches
drm/i915: use shmem_read_mapping_page
Soon tmpfs will stop supporting ->readpage and read_cache_page_gfp(): once "tmpfs: add shmem_read_mapping_page_gfp" has been applied, this patch can be applied to ease the transition. Make i915_gem_object_get_pages_gtt() use shmem_read_mapping_page_gfp() in the one place it's needed; elsewhere use shmem_read_mapping_page(), with the mapping's gfp_mask properly initialized. Forget about __GFP_COLD: since tmpfs initializes its pages with memset, asking for a cold page is counter-productive. Include linux/shmem_fs.h also in drm_gem.c: with shmem_file_setup() now declared there too, we shall remove the prototype from linux/mm.h later. Signed-off-by: Hugh Dickins <hughd@google.com> Cc: Christoph Hellwig <hch@infradead.org> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Keith Packard <keithp@keithp.com> Cc: Dave Airlie <airlied@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 2 changed files with 15 additions and 17 deletions Side-by-side Diff
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/i915/i915_gem.c
... | ... | @@ -31,6 +31,7 @@ |
31 | 31 | #include "i915_drv.h" |
32 | 32 | #include "i915_trace.h" |
33 | 33 | #include "intel_drv.h" |
34 | +#include <linux/shmem_fs.h> | |
34 | 35 | #include <linux/slab.h> |
35 | 36 | #include <linux/swap.h> |
36 | 37 | #include <linux/pci.h> |
... | ... | @@ -359,8 +360,7 @@ |
359 | 360 | if ((page_offset + remain) > PAGE_SIZE) |
360 | 361 | page_length = PAGE_SIZE - page_offset; |
361 | 362 | |
362 | - page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, | |
363 | - GFP_HIGHUSER | __GFP_RECLAIMABLE); | |
363 | + page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); | |
364 | 364 | if (IS_ERR(page)) |
365 | 365 | return PTR_ERR(page); |
366 | 366 | |
... | ... | @@ -463,8 +463,7 @@ |
463 | 463 | if ((data_page_offset + page_length) > PAGE_SIZE) |
464 | 464 | page_length = PAGE_SIZE - data_page_offset; |
465 | 465 | |
466 | - page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, | |
467 | - GFP_HIGHUSER | __GFP_RECLAIMABLE); | |
466 | + page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); | |
468 | 467 | if (IS_ERR(page)) { |
469 | 468 | ret = PTR_ERR(page); |
470 | 469 | goto out; |
... | ... | @@ -797,8 +796,7 @@ |
797 | 796 | if ((page_offset + remain) > PAGE_SIZE) |
798 | 797 | page_length = PAGE_SIZE - page_offset; |
799 | 798 | |
800 | - page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, | |
801 | - GFP_HIGHUSER | __GFP_RECLAIMABLE); | |
799 | + page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); | |
802 | 800 | if (IS_ERR(page)) |
803 | 801 | return PTR_ERR(page); |
804 | 802 | |
... | ... | @@ -907,8 +905,7 @@ |
907 | 905 | if ((data_page_offset + page_length) > PAGE_SIZE) |
908 | 906 | page_length = PAGE_SIZE - data_page_offset; |
909 | 907 | |
910 | - page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, | |
911 | - GFP_HIGHUSER | __GFP_RECLAIMABLE); | |
908 | + page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); | |
912 | 909 | if (IS_ERR(page)) { |
913 | 910 | ret = PTR_ERR(page); |
914 | 911 | goto out; |
915 | 912 | |
... | ... | @@ -1558,12 +1555,10 @@ |
1558 | 1555 | |
1559 | 1556 | inode = obj->base.filp->f_path.dentry->d_inode; |
1560 | 1557 | mapping = inode->i_mapping; |
1558 | + gfpmask |= mapping_gfp_mask(mapping); | |
1559 | + | |
1561 | 1560 | for (i = 0; i < page_count; i++) { |
1562 | - page = read_cache_page_gfp(mapping, i, | |
1563 | - GFP_HIGHUSER | | |
1564 | - __GFP_COLD | | |
1565 | - __GFP_RECLAIMABLE | | |
1566 | - gfpmask); | |
1561 | + page = shmem_read_mapping_page_gfp(mapping, i, gfpmask); | |
1567 | 1562 | if (IS_ERR(page)) |
1568 | 1563 | goto err_pages; |
1569 | 1564 | |
... | ... | @@ -3565,6 +3560,7 @@ |
3565 | 3560 | { |
3566 | 3561 | struct drm_i915_private *dev_priv = dev->dev_private; |
3567 | 3562 | struct drm_i915_gem_object *obj; |
3563 | + struct address_space *mapping; | |
3568 | 3564 | |
3569 | 3565 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); |
3570 | 3566 | if (obj == NULL) |
... | ... | @@ -3575,6 +3571,9 @@ |
3575 | 3571 | return NULL; |
3576 | 3572 | } |
3577 | 3573 | |
3574 | + mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; | |
3575 | + mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE); | |
3576 | + | |
3578 | 3577 | i915_gem_info_add_obj(dev_priv, size); |
3579 | 3578 | |
3580 | 3579 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; |
... | ... | @@ -3950,8 +3949,7 @@ |
3950 | 3949 | |
3951 | 3950 | page_count = obj->base.size / PAGE_SIZE; |
3952 | 3951 | for (i = 0; i < page_count; i++) { |
3953 | - struct page *page = read_cache_page_gfp(mapping, i, | |
3954 | - GFP_HIGHUSER | __GFP_RECLAIMABLE); | |
3952 | + struct page *page = shmem_read_mapping_page(mapping, i); | |
3955 | 3953 | if (!IS_ERR(page)) { |
3956 | 3954 | char *dst = kmap_atomic(page); |
3957 | 3955 | memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE); |
... | ... | @@ -4012,8 +4010,7 @@ |
4012 | 4010 | struct page *page; |
4013 | 4011 | char *dst, *src; |
4014 | 4012 | |
4015 | - page = read_cache_page_gfp(mapping, i, | |
4016 | - GFP_HIGHUSER | __GFP_RECLAIMABLE); | |
4013 | + page = shmem_read_mapping_page(mapping, i); | |
4017 | 4014 | if (IS_ERR(page)) |
4018 | 4015 | return PTR_ERR(page); |
4019 | 4016 |