Blame view

mm/vmacache.c 3.13 KB
615d6e875   Davidlohr Bueso   mm: per-thread vm...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
  /*
   * Copyright (C) 2014 Davidlohr Bueso.
   */
  #include <linux/sched.h>
  #include <linux/mm.h>
  #include <linux/vmacache.h>
  
  /*
   * Flush vma caches for threads that share a given mm.
   *
   * The operation is safe because the caller holds the mmap_sem
   * exclusively and other threads accessing the vma cache will
   * have mmap_sem held at least for read, so no extra locking
   * is required to maintain the vma cache.
   */
  void vmacache_flush_all(struct mm_struct *mm)
  {
  	struct task_struct *g, *p;
f5f302e21   Davidlohr Bueso   mm,vmacache: coun...
19
  	count_vm_vmacache_event(VMACACHE_FULL_FLUSHES);
6b4ebc3a9   Davidlohr Bueso   mm,vmacache: opti...
20
21
22
23
24
25
26
27
28
  	/*
  	 * Single threaded tasks need not iterate the entire
  	 * list of process. We can avoid the flushing as well
  	 * since the mm's seqnum was increased and don't have
  	 * to worry about other threads' seqnum. Current's
  	 * flush will occur upon the next lookup.
  	 */
  	if (atomic_read(&mm->mm_users) == 1)
  		return;
615d6e875   Davidlohr Bueso   mm: per-thread vm...
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
  	rcu_read_lock();
  	for_each_process_thread(g, p) {
  		/*
  		 * Only flush the vmacache pointers as the
  		 * mm seqnum is already set and curr's will
  		 * be set upon invalidation when the next
  		 * lookup is done.
  		 */
  		if (mm == p->mm)
  			vmacache_flush(p);
  	}
  	rcu_read_unlock();
  }
  
  /*
   * This task may be accessing a foreign mm via (for example)
   * get_user_pages()->find_vma().  The vmacache is task-local and this
   * task's vmacache pertains to a different mm (ie, its own).  There is
   * nothing we can do here.
   *
   * Also handle the case where a kernel thread has adopted this mm via use_mm().
   * That kernel thread's vmacache is not applicable to this mm.
   */
a2c1aad3b   Davidlohr Bueso   mm/vmacache: inli...
52
  static inline bool vmacache_valid_mm(struct mm_struct *mm)
615d6e875   Davidlohr Bueso   mm: per-thread vm...
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
  {
  	return current->mm == mm && !(current->flags & PF_KTHREAD);
  }
  
  void vmacache_update(unsigned long addr, struct vm_area_struct *newvma)
  {
  	if (vmacache_valid_mm(newvma->vm_mm))
  		current->vmacache[VMACACHE_HASH(addr)] = newvma;
  }
  
  static bool vmacache_valid(struct mm_struct *mm)
  {
  	struct task_struct *curr;
  
  	if (!vmacache_valid_mm(mm))
  		return false;
  
  	curr = current;
  	if (mm->vmacache_seqnum != curr->vmacache_seqnum) {
  		/*
  		 * First attempt will always be invalid, initialize
  		 * the new cache for this task here.
  		 */
  		curr->vmacache_seqnum = mm->vmacache_seqnum;
  		vmacache_flush(curr);
  		return false;
  	}
  	return true;
  }
  
  struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr)
  {
  	int i;
131ddc5c7   Alexey Dobriyan   mm: unrig VMA cac...
86
  	count_vm_vmacache_event(VMACACHE_FIND_CALLS);
615d6e875   Davidlohr Bueso   mm: per-thread vm...
87
88
89
90
91
  	if (!vmacache_valid(mm))
  		return NULL;
  
  	for (i = 0; i < VMACACHE_SIZE; i++) {
  		struct vm_area_struct *vma = current->vmacache[i];
50f5aa8a9   Linus Torvalds   mm: don't pointle...
92
93
94
95
  		if (!vma)
  			continue;
  		if (WARN_ON_ONCE(vma->vm_mm != mm))
  			break;
4f115147f   Davidlohr Bueso   mm,vmacache: add ...
96
97
  		if (vma->vm_start <= addr && vma->vm_end > addr) {
  			count_vm_vmacache_event(VMACACHE_FIND_HITS);
615d6e875   Davidlohr Bueso   mm: per-thread vm...
98
  			return vma;
4f115147f   Davidlohr Bueso   mm,vmacache: add ...
99
  		}
615d6e875   Davidlohr Bueso   mm: per-thread vm...
100
101
102
103
104
105
106
107
108
109
110
  	}
  
  	return NULL;
  }
  
  #ifndef CONFIG_MMU
  struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
  					   unsigned long start,
  					   unsigned long end)
  {
  	int i;
131ddc5c7   Alexey Dobriyan   mm: unrig VMA cac...
111
  	count_vm_vmacache_event(VMACACHE_FIND_CALLS);
615d6e875   Davidlohr Bueso   mm: per-thread vm...
112
113
114
115
116
  	if (!vmacache_valid(mm))
  		return NULL;
  
  	for (i = 0; i < VMACACHE_SIZE; i++) {
  		struct vm_area_struct *vma = current->vmacache[i];
4f115147f   Davidlohr Bueso   mm,vmacache: add ...
117
118
  		if (vma && vma->vm_start == start && vma->vm_end == end) {
  			count_vm_vmacache_event(VMACACHE_FIND_HITS);
615d6e875   Davidlohr Bueso   mm: per-thread vm...
119
  			return vma;
4f115147f   Davidlohr Bueso   mm,vmacache: add ...
120
  		}
615d6e875   Davidlohr Bueso   mm: per-thread vm...
121
122
123
124
125
  	}
  
  	return NULL;
  }
  #endif