Blame view
include/linux/ksm.h
3.98 KB
f8af4da3b ksm: the mm inter... |
1 2 3 4 5 6 7 8 9 10 11 |
#ifndef __LINUX_KSM_H #define __LINUX_KSM_H /* * Memory merging support. * * This code enables dynamic sharing of identical pages found in different * memory areas, even if they are not shared by fork(). */ #include <linux/bitops.h> #include <linux/mm.h> |
5ad646880 ksm: let shared p... |
12 13 |
#include <linux/pagemap.h> #include <linux/rmap.h> |
f8af4da3b ksm: the mm inter... |
14 |
#include <linux/sched.h> |
08beca44d ksm: stable_node ... |
15 |
struct stable_node; |
5ad646880 ksm: let shared p... |
16 |
struct mem_cgroup; |
08beca44d ksm: stable_node ... |
17 |
|
4969c1192 mm: fix swapin ra... |
18 19 |
struct page *ksm_does_need_to_copy(struct page *page, struct vm_area_struct *vma, unsigned long address); |
f8af4da3b ksm: the mm inter... |
20 21 22 23 |
#ifdef CONFIG_KSM int ksm_madvise(struct vm_area_struct *vma, unsigned long start, unsigned long end, int advice, unsigned long *vm_flags); int __ksm_enter(struct mm_struct *mm); |
1c2fb7a4c ksm: fix deadlock... |
24 |
void __ksm_exit(struct mm_struct *mm); |
f8af4da3b ksm: the mm inter... |
25 26 27 28 29 30 31 |
static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) { if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) return __ksm_enter(mm); return 0; } |
1c2fb7a4c ksm: fix deadlock... |
32 |
static inline void ksm_exit(struct mm_struct *mm) |
f8af4da3b ksm: the mm inter... |
33 34 |
{ if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) |
1c2fb7a4c ksm: fix deadlock... |
35 |
__ksm_exit(mm); |
f8af4da3b ksm: the mm inter... |
36 |
} |
9a8408951 ksm: identify Pag... |
37 38 39 40 |
/* * A KSM page is one of those write-protected "shared pages" or "merged pages" * which KSM maps into multiple mms, wherever identical anonymous page content |
08beca44d ksm: stable_node ... |
41 42 |
* is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any * anon_vma, but to that page's node of the stable tree. |
9a8408951 ksm: identify Pag... |
43 44 45 |
*/ static inline int PageKsm(struct page *page) { |
3ca7b3c5b mm: define PAGE_M... |
46 47 |
return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); |
9a8408951 ksm: identify Pag... |
48 |
} |
08beca44d ksm: stable_node ... |
49 50 51 52 53 54 55 56 57 58 59 |
static inline struct stable_node *page_stable_node(struct page *page) { return PageKsm(page) ? page_rmapping(page) : NULL; } static inline void set_page_stable_node(struct page *page, struct stable_node *stable_node) { page->mapping = (void *)stable_node + (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); } |
5ad646880 ksm: let shared p... |
60 61 62 63 64 65 66 67 68 69 70 |
/* * When do_swap_page() first faults in from swap what used to be a KSM page, * no problem, it will be assigned to this vma's anon_vma; but thereafter, * it might be faulted into a different anon_vma (or perhaps to a different * offset in the same anon_vma). do_swap_page() cannot do all the locking * needed to reconstitute a cross-anon_vma KSM page: for now it has to make * a copy, and leave remerging the pages to a later pass of ksmd. * * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE, * but what if the vma was unmerged while the page was swapped out? */ |
4969c1192 mm: fix swapin ra... |
71 |
static inline int ksm_might_need_to_copy(struct page *page, |
5ad646880 ksm: let shared p... |
72 |
struct vm_area_struct *vma, unsigned long address) |
9a8408951 ksm: identify Pag... |
73 |
{ |
5ad646880 ksm: let shared p... |
74 |
struct anon_vma *anon_vma = page_anon_vma(page); |
4969c1192 mm: fix swapin ra... |
75 76 77 |
return anon_vma && (anon_vma->root != vma->anon_vma->root || page->index != linear_page_index(vma, address)); |
9a8408951 ksm: identify Pag... |
78 |
} |
5ad646880 ksm: let shared p... |
79 80 81 82 |
int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg, unsigned long *vm_flags); int try_to_unmap_ksm(struct page *page, enum ttu_flags flags); |
e9995ef97 ksm: rmap_walk to... |
83 84 85 |
int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *, struct vm_area_struct *, unsigned long, void *), void *arg); void ksm_migrate_page(struct page *newpage, struct page *oldpage); |
5ad646880 ksm: let shared p... |
86 |
|
f8af4da3b ksm: the mm inter... |
87 |
#else /* !CONFIG_KSM */ |
f8af4da3b ksm: the mm inter... |
88 89 90 91 |
static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) { return 0; } |
1c2fb7a4c ksm: fix deadlock... |
92 |
static inline void ksm_exit(struct mm_struct *mm) |
f8af4da3b ksm: the mm inter... |
93 94 |
{ } |
9a8408951 ksm: identify Pag... |
95 96 97 98 99 |
static inline int PageKsm(struct page *page) { return 0; } |
f42647acc fix ksm.h breakag... |
100 101 102 103 104 105 |
#ifdef CONFIG_MMU static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, unsigned long end, int advice, unsigned long *vm_flags) { return 0; } |
4969c1192 mm: fix swapin ra... |
106 |
static inline int ksm_might_need_to_copy(struct page *page, |
5ad646880 ksm: let shared p... |
107 108 |
struct vm_area_struct *vma, unsigned long address) { |
4969c1192 mm: fix swapin ra... |
109 |
return 0; |
5ad646880 ksm: let shared p... |
110 111 112 113 114 115 116 117 118 119 120 121 |
} static inline int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg, unsigned long *vm_flags) { return 0; } static inline int try_to_unmap_ksm(struct page *page, enum ttu_flags flags) { return 0; } |
e9995ef97 ksm: rmap_walk to... |
122 123 124 125 126 127 128 129 130 131 |
static inline int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page*, struct vm_area_struct *, unsigned long, void *), void *arg) { return 0; } static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage) { } |
f42647acc fix ksm.h breakag... |
132 |
#endif /* CONFIG_MMU */ |
f8af4da3b ksm: the mm inter... |
133 |
#endif /* !CONFIG_KSM */ |
5ad646880 ksm: let shared p... |
134 |
#endif /* __LINUX_KSM_H */ |