Blame view
mm/interval_tree.c
3.13 KB
8092f73c5 treewide: Replace... |
1 |
// SPDX-License-Identifier: GPL-2.0-only |
6b2dbba8b mm: replace vma p... |
2 3 4 5 |
/* * mm/interval_tree.c - interval tree for mapping->i_mmap * * Copyright (C) 2012, Michel Lespinasse <walken@google.com> |
6b2dbba8b mm: replace vma p... |
6 7 8 9 |
*/ #include <linux/mm.h> #include <linux/fs.h> |
bf181b9f9 mm anon rmap: rep... |
10 |
#include <linux/rmap.h> |
9826a516f mm: interval tree... |
11 |
#include <linux/interval_tree_generic.h> |
6b2dbba8b mm: replace vma p... |
12 |
|
9826a516f mm: interval tree... |
13 14 15 16 17 18 19 |
static inline unsigned long vma_start_pgoff(struct vm_area_struct *v) { return v->vm_pgoff; } static inline unsigned long vma_last_pgoff(struct vm_area_struct *v) { |
e025f059a mm/interval_tree.... |
20 |
return v->vm_pgoff + vma_pages(v) - 1; |
9826a516f mm: interval tree... |
21 |
} |
ac51b934f mm: replace vma->... |
22 23 |
INTERVAL_TREE_DEFINE(struct vm_area_struct, shared.rb, unsigned long, shared.rb_subtree_last, |
9826a516f mm: interval tree... |
24 25 26 27 28 |
vma_start_pgoff, vma_last_pgoff,, vma_interval_tree) /* Insert node immediately after prev in the interval tree */ void vma_interval_tree_insert_after(struct vm_area_struct *node, struct vm_area_struct *prev, |
f808c13fd lib/interval_tree... |
29 |
struct rb_root_cached *root) |
6b2dbba8b mm: replace vma p... |
30 31 32 |
{ struct rb_node **link; struct vm_area_struct *parent; |
9826a516f mm: interval tree... |
33 |
unsigned long last = vma_last_pgoff(node); |
6b2dbba8b mm: replace vma p... |
34 |
|
81d1b09c6 mm: convert a few... |
35 |
VM_BUG_ON_VMA(vma_start_pgoff(node) != vma_start_pgoff(prev), node); |
6b2dbba8b mm: replace vma p... |
36 |
|
ac51b934f mm: replace vma->... |
37 |
if (!prev->shared.rb.rb_right) { |
9826a516f mm: interval tree... |
38 |
parent = prev; |
ac51b934f mm: replace vma->... |
39 |
link = &prev->shared.rb.rb_right; |
6b2dbba8b mm: replace vma p... |
40 |
} else { |
ac51b934f mm: replace vma->... |
41 42 43 44 45 46 47 48 49 |
parent = rb_entry(prev->shared.rb.rb_right, struct vm_area_struct, shared.rb); if (parent->shared.rb_subtree_last < last) parent->shared.rb_subtree_last = last; while (parent->shared.rb.rb_left) { parent = rb_entry(parent->shared.rb.rb_left, struct vm_area_struct, shared.rb); if (parent->shared.rb_subtree_last < last) parent->shared.rb_subtree_last = last; |
6b2dbba8b mm: replace vma p... |
50 |
} |
ac51b934f mm: replace vma->... |
51 |
link = &parent->shared.rb.rb_left; |
6b2dbba8b mm: replace vma p... |
52 |
} |
ac51b934f mm: replace vma->... |
53 54 |
node->shared.rb_subtree_last = last; rb_link_node(&node->shared.rb, &parent->shared.rb, link); |
f808c13fd lib/interval_tree... |
55 |
rb_insert_augmented(&node->shared.rb, &root->rb_root, |
9826a516f mm: interval tree... |
56 |
&vma_interval_tree_augment); |
6b2dbba8b mm: replace vma p... |
57 |
} |
bf181b9f9 mm anon rmap: rep... |
58 59 60 61 62 63 64 65 66 67 68 69 |
static inline unsigned long avc_start_pgoff(struct anon_vma_chain *avc) { return vma_start_pgoff(avc->vma); } static inline unsigned long avc_last_pgoff(struct anon_vma_chain *avc) { return vma_last_pgoff(avc->vma); } INTERVAL_TREE_DEFINE(struct anon_vma_chain, rb, unsigned long, rb_subtree_last, |
ed8ea8150 mm: add CONFIG_DE... |
70 71 72 73 |
avc_start_pgoff, avc_last_pgoff, static inline, __anon_vma_interval_tree) void anon_vma_interval_tree_insert(struct anon_vma_chain *node, |
f808c13fd lib/interval_tree... |
74 |
struct rb_root_cached *root) |
ed8ea8150 mm: add CONFIG_DE... |
75 76 77 78 79 80 81 82 83 |
{ #ifdef CONFIG_DEBUG_VM_RB node->cached_vma_start = avc_start_pgoff(node); node->cached_vma_last = avc_last_pgoff(node); #endif __anon_vma_interval_tree_insert(node, root); } void anon_vma_interval_tree_remove(struct anon_vma_chain *node, |
f808c13fd lib/interval_tree... |
84 |
struct rb_root_cached *root) |
ed8ea8150 mm: add CONFIG_DE... |
85 86 87 88 89 |
{ __anon_vma_interval_tree_remove(node, root); } struct anon_vma_chain * |
f808c13fd lib/interval_tree... |
90 |
anon_vma_interval_tree_iter_first(struct rb_root_cached *root, |
ed8ea8150 mm: add CONFIG_DE... |
91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 |
unsigned long first, unsigned long last) { return __anon_vma_interval_tree_iter_first(root, first, last); } struct anon_vma_chain * anon_vma_interval_tree_iter_next(struct anon_vma_chain *node, unsigned long first, unsigned long last) { return __anon_vma_interval_tree_iter_next(node, first, last); } #ifdef CONFIG_DEBUG_VM_RB void anon_vma_interval_tree_verify(struct anon_vma_chain *node) { WARN_ON_ONCE(node->cached_vma_start != avc_start_pgoff(node)); WARN_ON_ONCE(node->cached_vma_last != avc_last_pgoff(node)); } #endif |