Commit 912985dce45ef18fcdd9f5439fef054e0e22302a

Authored by Rusty Russell
1 parent 40c42076eb

mm: Make generic weak get_user_pages_fast and EXPORT_GPL it

Out of line get_user_pages_fast fallback implementation, make it a weak
symbol, get rid of CONFIG_HAVE_GET_USER_PAGES_FAST.

Export the symbol to modules so lguest can use it.

Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>

Showing 6 changed files with 16 additions and 29 deletions Side-by-side Diff

arch/powerpc/Kconfig
... ... @@ -42,9 +42,6 @@
42 42 bool
43 43 default y
44 44  
45   -config HAVE_GET_USER_PAGES_FAST
46   - def_bool PPC64
47   -
48 45 config HAVE_SETUP_PER_CPU_AREA
49 46 def_bool PPC64
50 47  
... ... @@ -22,7 +22,6 @@
22 22 select HAVE_IDE
23 23 select HAVE_OPROFILE
24 24 select HAVE_IOREMAP_PROT
25   - select HAVE_GET_USER_PAGES_FAST
26 25 select HAVE_KPROBES
27 26 select ARCH_WANT_OPTIONAL_GPIOLIB
28 27 select HAVE_KRETPROBES
arch/x86/mm/Makefile
1 1 obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
2   - pat.o pgtable.o
  2 + pat.o pgtable.o gup.o
3 3  
4   -obj-$(CONFIG_HAVE_GET_USER_PAGES_FAST) += gup.o
5 4 obj-$(CONFIG_X86_32) += pgtable_32.o
6 5  
7 6 obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
... ... @@ -834,7 +834,6 @@
834 834 struct vm_area_struct **pprev, unsigned long start,
835 835 unsigned long end, unsigned long newflags);
836 836  
837   -#ifdef CONFIG_HAVE_GET_USER_PAGES_FAST
838 837 /*
839 838 * get_user_pages_fast provides equivalent functionality to get_user_pages,
840 839 * operating on current and current->mm (force=0 and doesn't return any vmas).
... ... @@ -847,25 +846,6 @@
847 846 */
848 847 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
849 848 struct page **pages);
850   -
851   -#else
852   -/*
853   - * Should probably be moved to asm-generic, and architectures can include it if
854   - * they don't implement their own get_user_pages_fast.
855   - */
856   -#define get_user_pages_fast(start, nr_pages, write, pages) \
857   -({ \
858   - struct mm_struct *mm = current->mm; \
859   - int ret; \
860   - \
861   - down_read(&mm->mmap_sem); \
862   - ret = get_user_pages(current, mm, start, nr_pages, \
863   - write, 0, pages, NULL); \
864   - up_read(&mm->mmap_sem); \
865   - \
866   - ret; \
867   -})
868   -#endif
869 849  
870 850 /*
871 851 * A callback you can register to apply pressure to ageable caches.
... ... @@ -77,9 +77,6 @@
77 77 def_bool y
78 78 depends on !SPARSEMEM
79 79  
80   -config HAVE_GET_USER_PAGES_FAST
81   - bool
82   -
83 80 #
84 81 # Both the NUMA code and DISCONTIGMEM use arrays of pg_data_t's
85 82 # to represent different areas of memory. This variable allows
... ... @@ -171,4 +171,19 @@
171 171 mm->unmap_area = arch_unmap_area;
172 172 }
173 173 #endif
  174 +
  175 +int __attribute__((weak)) get_user_pages_fast(unsigned long start,
  176 + int nr_pages, int write, struct page **pages)
  177 +{
  178 + struct mm_struct *mm = current->mm;
  179 + int ret;
  180 +
  181 + down_read(&mm->mmap_sem);
  182 + ret = get_user_pages(current, mm, start, nr_pages,
  183 + write, 0, pages, NULL);
  184 + up_read(&mm->mmap_sem);
  185 +
  186 + return ret;
  187 +}
  188 +EXPORT_SYMBOL_GPL(get_user_pages_fast);