Commit 0f0a327fa12cd55de5e7f8c05a70ac3d047f405e
Committed by
Oded Gabbay
1 parent
34ee645e83
Exists in
ti-lsk-linux-4.1.y
and in
10 other branches
mmu_notifier: add the callback for mmu_notifier_invalidate_range()
Now that the mmu_notifier_invalidate_range() calls are in place, add the callback to allow subsystems to register against it. Signed-off-by: Joerg Roedel <jroedel@suse.de> Reviewed-by: Andrea Arcangeli <aarcange@redhat.com> Reviewed-by: Jérôme Glisse <jglisse@redhat.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Rik van Riel <riel@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Johannes Weiner <jweiner@redhat.com> Cc: Jay Cornwall <Jay.Cornwall@amd.com> Cc: Oded Gabbay <Oded.Gabbay@amd.com> Cc: Suravee Suthikulpanit <Suravee.Suthikulpanit@amd.com> Cc: Jesse Barnes <jbarnes@virtuousgeek.org> Cc: David Woodhouse <dwmw2@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Oded Gabbay <oded.gabbay@amd.com>
Showing 2 changed files with 57 additions and 5 deletions Side-by-side Diff
include/linux/mmu_notifier.h
... | ... | @@ -98,11 +98,11 @@ |
98 | 98 | /* |
99 | 99 | * invalidate_range_start() and invalidate_range_end() must be |
100 | 100 | * paired and are called only when the mmap_sem and/or the |
101 | - * locks protecting the reverse maps are held. The subsystem | |
102 | - * must guarantee that no additional references are taken to | |
103 | - * the pages in the range established between the call to | |
104 | - * invalidate_range_start() and the matching call to | |
105 | - * invalidate_range_end(). | |
101 | + * locks protecting the reverse maps are held. If the subsystem | |
102 | + * can't guarantee that no additional references are taken to | |
103 | + * the pages in the range, it has to implement the | |
104 | + * invalidate_range() notifier to remove any references taken | |
105 | + * after invalidate_range_start(). | |
106 | 106 | * |
107 | 107 | * Invalidation of multiple concurrent ranges may be |
108 | 108 | * optionally permitted by the driver. Either way the |
... | ... | @@ -144,6 +144,29 @@ |
144 | 144 | void (*invalidate_range_end)(struct mmu_notifier *mn, |
145 | 145 | struct mm_struct *mm, |
146 | 146 | unsigned long start, unsigned long end); |
147 | + | |
148 | + /* | |
149 | + * invalidate_range() is either called between | |
150 | + * invalidate_range_start() and invalidate_range_end() when the | |
151 | + * VM has to free pages that where unmapped, but before the | |
152 | + * pages are actually freed, or outside of _start()/_end() when | |
153 | + * a (remote) TLB is necessary. | |
154 | + * | |
155 | + * If invalidate_range() is used to manage a non-CPU TLB with | |
156 | + * shared page-tables, it not necessary to implement the | |
157 | + * invalidate_range_start()/end() notifiers, as | |
158 | + * invalidate_range() alread catches the points in time when an | |
159 | + * external TLB range needs to be flushed. | |
160 | + * | |
161 | + * The invalidate_range() function is called under the ptl | |
162 | + * spin-lock and not allowed to sleep. | |
163 | + * | |
164 | + * Note that this function might be called with just a sub-range | |
165 | + * of what was passed to invalidate_range_start()/end(), if | |
166 | + * called between those functions. | |
167 | + */ | |
168 | + void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm, | |
169 | + unsigned long start, unsigned long end); | |
147 | 170 | }; |
148 | 171 | |
149 | 172 | /* |
... | ... | @@ -190,6 +213,8 @@ |
190 | 213 | unsigned long start, unsigned long end); |
191 | 214 | extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, |
192 | 215 | unsigned long start, unsigned long end); |
216 | +extern void __mmu_notifier_invalidate_range(struct mm_struct *mm, | |
217 | + unsigned long start, unsigned long end); | |
193 | 218 | |
194 | 219 | static inline void mmu_notifier_release(struct mm_struct *mm) |
195 | 220 | { |
... | ... | @@ -245,6 +270,8 @@ |
245 | 270 | static inline void mmu_notifier_invalidate_range(struct mm_struct *mm, |
246 | 271 | unsigned long start, unsigned long end) |
247 | 272 | { |
273 | + if (mm_has_notifiers(mm)) | |
274 | + __mmu_notifier_invalidate_range(mm, start, end); | |
248 | 275 | } |
249 | 276 | |
250 | 277 | static inline void mmu_notifier_mm_init(struct mm_struct *mm) |
mm/mmu_notifier.c
... | ... | @@ -193,12 +193,37 @@ |
193 | 193 | |
194 | 194 | id = srcu_read_lock(&srcu); |
195 | 195 | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { |
196 | + /* | |
197 | + * Call invalidate_range here too to avoid the need for the | |
198 | + * subsystem of having to register an invalidate_range_end | |
199 | + * call-back when there is invalidate_range already. Usually a | |
200 | + * subsystem registers either invalidate_range_start()/end() or | |
201 | + * invalidate_range(), so this will be no additional overhead | |
202 | + * (besides the pointer check). | |
203 | + */ | |
204 | + if (mn->ops->invalidate_range) | |
205 | + mn->ops->invalidate_range(mn, mm, start, end); | |
196 | 206 | if (mn->ops->invalidate_range_end) |
197 | 207 | mn->ops->invalidate_range_end(mn, mm, start, end); |
198 | 208 | } |
199 | 209 | srcu_read_unlock(&srcu, id); |
200 | 210 | } |
201 | 211 | EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_end); |
212 | + | |
213 | +void __mmu_notifier_invalidate_range(struct mm_struct *mm, | |
214 | + unsigned long start, unsigned long end) | |
215 | +{ | |
216 | + struct mmu_notifier *mn; | |
217 | + int id; | |
218 | + | |
219 | + id = srcu_read_lock(&srcu); | |
220 | + hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { | |
221 | + if (mn->ops->invalidate_range) | |
222 | + mn->ops->invalidate_range(mn, mm, start, end); | |
223 | + } | |
224 | + srcu_read_unlock(&srcu, id); | |
225 | +} | |
226 | +EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range); | |
202 | 227 | |
203 | 228 | static int do_mmu_notifier_register(struct mmu_notifier *mn, |
204 | 229 | struct mm_struct *mm, |