Blame view
include/linux/ksm.h
2.84 KB
f8af4da3b
|
1 2 3 4 5 6 7 8 9 10 11 |
#ifndef __LINUX_KSM_H #define __LINUX_KSM_H /* * Memory merging support. * * This code enables dynamic sharing of identical pages found in different * memory areas, even if they are not shared by fork(). */ #include <linux/bitops.h> #include <linux/mm.h> |
5ad646880
|
12 13 |
#include <linux/pagemap.h> #include <linux/rmap.h> |
f8af4da3b
|
14 |
#include <linux/sched.h> |
08beca44d
|
15 |
struct stable_node; |
5ad646880
|
16 |
struct mem_cgroup; |
08beca44d
|
17 |
|
f8af4da3b
|
18 19 20 21 |
#ifdef CONFIG_KSM int ksm_madvise(struct vm_area_struct *vma, unsigned long start, unsigned long end, int advice, unsigned long *vm_flags); int __ksm_enter(struct mm_struct *mm); |
1c2fb7a4c
|
22 |
void __ksm_exit(struct mm_struct *mm); |
f8af4da3b
|
23 24 25 26 27 28 29 |
static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) { if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) return __ksm_enter(mm); return 0; } |
1c2fb7a4c
|
30 |
static inline void ksm_exit(struct mm_struct *mm) |
f8af4da3b
|
31 32 |
{ if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) |
1c2fb7a4c
|
33 |
__ksm_exit(mm); |
f8af4da3b
|
34 |
} |
9a8408951
|
35 |
|
08beca44d
|
36 37 38 39 40 41 42 43 44 45 46 |
static inline struct stable_node *page_stable_node(struct page *page) { return PageKsm(page) ? page_rmapping(page) : NULL; } static inline void set_page_stable_node(struct page *page, struct stable_node *stable_node) { page->mapping = (void *)stable_node + (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); } |
5ad646880
|
47 48 49 50 51 52 53 54 55 56 57 |
/* * When do_swap_page() first faults in from swap what used to be a KSM page, * no problem, it will be assigned to this vma's anon_vma; but thereafter, * it might be faulted into a different anon_vma (or perhaps to a different * offset in the same anon_vma). do_swap_page() cannot do all the locking * needed to reconstitute a cross-anon_vma KSM page: for now it has to make * a copy, and leave remerging the pages to a later pass of ksmd. * * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE, * but what if the vma was unmerged while the page was swapped out? */ |
cbf86cfe0
|
58 59 |
struct page *ksm_might_need_to_copy(struct page *page, struct vm_area_struct *vma, unsigned long address); |
5ad646880
|
60 |
|
051ac83ad
|
61 |
int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc); |
e9995ef97
|
62 |
void ksm_migrate_page(struct page *newpage, struct page *oldpage); |
5ad646880
|
63 |
|
f8af4da3b
|
64 |
#else /* !CONFIG_KSM */ |
f8af4da3b
|
65 66 67 68 |
static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) { return 0; } |
1c2fb7a4c
|
69 |
static inline void ksm_exit(struct mm_struct *mm) |
f8af4da3b
|
70 71 |
{ } |
9a8408951
|
72 |
|
f42647acc
|
73 74 75 76 77 78 |
#ifdef CONFIG_MMU static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, unsigned long end, int advice, unsigned long *vm_flags) { return 0; } |
cbf86cfe0
|
79 |
static inline struct page *ksm_might_need_to_copy(struct page *page, |
5ad646880
|
80 81 |
struct vm_area_struct *vma, unsigned long address) { |
cbf86cfe0
|
82 |
return page; |
5ad646880
|
83 84 85 86 87 88 89 |
} static inline int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg, unsigned long *vm_flags) { return 0; } |
051ac83ad
|
90 91 |
static inline int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) |
e9995ef97
|
92 93 94 95 96 97 98 |
{ return 0; } static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage) { } |
f42647acc
|
99 |
#endif /* CONFIG_MMU */ |
f8af4da3b
|
100 |
#endif /* !CONFIG_KSM */ |
5ad646880
|
101 |
#endif /* __LINUX_KSM_H */ |