Commit 895791dac6946d535991edd11341046f8e85ea77
Committed by
Ingo Molnar
1 parent
4bb9c5c021
Exists in
master
and in
7 other branches
VM, x86, PAT: add a new vm flag to track full pfnmap at mmap
Impact: cleanup Add a new vm flag VM_PFN_AT_MMAP to identify a PFNMAP that is fully mapped with remap_pfn_range. Patch removes the overloading of VM_INSERTPAGE from the earlier patch. Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Acked-by: Nick Piggin <npiggin@suse.de> LKML-Reference: <20090313233543.GA19909@linux-os.sc.intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Showing 2 changed files with 5 additions and 15 deletions Side-by-side Diff
include/linux/mm.h
... | ... | @@ -98,12 +98,13 @@ |
98 | 98 | #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ |
99 | 99 | #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ |
100 | 100 | #define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */ |
101 | -#define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it. Refer note in VM_PFNMAP_AT_MMAP below */ | |
101 | +#define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */ | |
102 | 102 | #define VM_ALWAYSDUMP 0x04000000 /* Always include in core dumps */ |
103 | 103 | |
104 | 104 | #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */ |
105 | 105 | #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ |
106 | 106 | #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */ |
107 | +#define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */ | |
107 | 108 | |
108 | 109 | #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ |
109 | 110 | #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS |
... | ... | @@ -127,17 +128,6 @@ |
127 | 128 | #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP) |
128 | 129 | |
129 | 130 | /* |
130 | - * pfnmap vmas that are fully mapped at mmap time (not mapped on fault). | |
131 | - * Used by x86 PAT to identify such PFNMAP mappings and optimize their handling. | |
132 | - * Note VM_INSERTPAGE flag is overloaded here. i.e, | |
133 | - * VM_INSERTPAGE && !VM_PFNMAP implies | |
134 | - * The vma has had "vm_insert_page()" done on it | |
135 | - * VM_INSERTPAGE && VM_PFNMAP implies | |
136 | - * The vma is PFNMAP with full mapping at mmap time | |
137 | - */ | |
138 | -#define VM_PFNMAP_AT_MMAP (VM_INSERTPAGE | VM_PFNMAP) | |
139 | - | |
140 | -/* | |
141 | 131 | * mapping from the currently active vm_flags protection bits (the |
142 | 132 | * low four bits) to a page protection mask.. |
143 | 133 | */ |
... | ... | @@ -156,7 +146,7 @@ |
156 | 146 | */ |
157 | 147 | static inline int is_linear_pfn_mapping(struct vm_area_struct *vma) |
158 | 148 | { |
159 | - return ((vma->vm_flags & VM_PFNMAP_AT_MMAP) == VM_PFNMAP_AT_MMAP); | |
149 | + return (vma->vm_flags & VM_PFN_AT_MMAP); | |
160 | 150 | } |
161 | 151 | |
162 | 152 | static inline int is_pfn_mapping(struct vm_area_struct *vma) |
mm/memory.c
... | ... | @@ -1667,7 +1667,7 @@ |
1667 | 1667 | */ |
1668 | 1668 | if (addr == vma->vm_start && end == vma->vm_end) { |
1669 | 1669 | vma->vm_pgoff = pfn; |
1670 | - vma->vm_flags |= VM_PFNMAP_AT_MMAP; | |
1670 | + vma->vm_flags |= VM_PFN_AT_MMAP; | |
1671 | 1671 | } else if (is_cow_mapping(vma->vm_flags)) |
1672 | 1672 | return -EINVAL; |
1673 | 1673 | |
... | ... | @@ -1680,7 +1680,7 @@ |
1680 | 1680 | * needed from higher level routine calling unmap_vmas |
1681 | 1681 | */ |
1682 | 1682 | vma->vm_flags &= ~(VM_IO | VM_RESERVED | VM_PFNMAP); |
1683 | - vma->vm_flags &= ~VM_PFNMAP_AT_MMAP; | |
1683 | + vma->vm_flags &= ~VM_PFN_AT_MMAP; | |
1684 | 1684 | return -EINVAL; |
1685 | 1685 | } |
1686 | 1686 |