Blame view

mm/mmu_notifier.c 12.2 KB
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
1
2
3
4
5
  /*
   *  linux/mm/mmu_notifier.c
   *
   *  Copyright (C) 2008  Qumranet, Inc.
   *  Copyright (C) 2008  SGI
93e205a72   Christoph Lameter   fix Christoph's e...
6
   *             Christoph Lameter <cl@linux.com>
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
7
8
9
10
11
12
13
   *
   *  This work is licensed under the terms of the GNU GPL, version 2. See
   *  the COPYING file in the top-level directory.
   */
  
  #include <linux/rculist.h>
  #include <linux/mmu_notifier.h>
b95f1b31b   Paul Gortmaker   mm: Map most file...
14
  #include <linux/export.h>
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
15
16
  #include <linux/mm.h>
  #include <linux/err.h>
21a92735f   Sagi Grimberg   mm: mmu_notifier:...
17
  #include <linux/srcu.h>
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
18
19
  #include <linux/rcupdate.h>
  #include <linux/sched.h>
6e84f3152   Ingo Molnar   sched/headers: Pr...
20
  #include <linux/sched/mm.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
21
  #include <linux/slab.h>
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
22

21a92735f   Sagi Grimberg   mm: mmu_notifier:...
23
  /* global SRCU for all MMs */
dde8da6cf   Paul E. McKenney   mm: Use static in...
24
  DEFINE_STATIC_SRCU(srcu);
21a92735f   Sagi Grimberg   mm: mmu_notifier:...
25

cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
26
  /*
b972216e2   Peter Zijlstra   mmu_notifier: add...
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
   * This function allows mmu_notifier::release callback to delay a call to
   * a function that will free appropriate resources. The function must be
   * quick and must not block.
   */
  void mmu_notifier_call_srcu(struct rcu_head *rcu,
  			    void (*func)(struct rcu_head *rcu))
  {
  	call_srcu(&srcu, rcu, func);
  }
  EXPORT_SYMBOL_GPL(mmu_notifier_call_srcu);
  
  void mmu_notifier_synchronize(void)
  {
  	/* Wait for any running method to finish. */
  	srcu_barrier(&srcu);
  }
  EXPORT_SYMBOL_GPL(mmu_notifier_synchronize);
  
  /*
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
46
47
48
49
50
51
   * This function can't run concurrently against mmu_notifier_register
   * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
   * runs with mm_users == 0. Other tasks may still invoke mmu notifiers
   * in parallel despite there being no task using this mm any more,
   * through the vmas outside of the exit_mmap context, such as with
   * vmtruncate. This serializes against mmu_notifier_unregister with
21a92735f   Sagi Grimberg   mm: mmu_notifier:...
52
53
   * the mmu_notifier_mm->lock in addition to SRCU and it serializes
   * against the other mmu notifiers with SRCU. struct mmu_notifier_mm
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
54
55
56
57
58
59
   * can't go away from under us as exit_mmap holds an mm_count pin
   * itself.
   */
  void __mmu_notifier_release(struct mm_struct *mm)
  {
  	struct mmu_notifier *mn;
21a92735f   Sagi Grimberg   mm: mmu_notifier:...
60
  	int id;
3ad3d901b   Xiao Guangrong   mm: mmu_notifier:...
61
62
  
  	/*
d34883d4e   Xiao Guangrong   mm: mmu_notifier:...
63
64
  	 * SRCU here will block mmu_notifier_unregister until
  	 * ->release returns.
3ad3d901b   Xiao Guangrong   mm: mmu_notifier:...
65
  	 */
21a92735f   Sagi Grimberg   mm: mmu_notifier:...
66
  	id = srcu_read_lock(&srcu);
d34883d4e   Xiao Guangrong   mm: mmu_notifier:...
67
68
69
70
71
72
73
74
75
  	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist)
  		/*
  		 * If ->release runs before mmu_notifier_unregister it must be
  		 * handled, as it's the only way for the driver to flush all
  		 * existing sptes and stop the driver from establishing any more
  		 * sptes before all the pages in the mm are freed.
  		 */
  		if (mn->ops->release)
  			mn->ops->release(mn, mm);
d34883d4e   Xiao Guangrong   mm: mmu_notifier:...
76

cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
77
78
79
80
81
82
  	spin_lock(&mm->mmu_notifier_mm->lock);
  	while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
  		mn = hlist_entry(mm->mmu_notifier_mm->list.first,
  				 struct mmu_notifier,
  				 hlist);
  		/*
d34883d4e   Xiao Guangrong   mm: mmu_notifier:...
83
84
85
86
  		 * We arrived before mmu_notifier_unregister so
  		 * mmu_notifier_unregister will do nothing other than to wait
  		 * for ->release to finish and for mmu_notifier_unregister to
  		 * return.
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
87
88
  		 */
  		hlist_del_init_rcu(&mn->hlist);
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
89
90
  	}
  	spin_unlock(&mm->mmu_notifier_mm->lock);
b972216e2   Peter Zijlstra   mmu_notifier: add...
91
  	srcu_read_unlock(&srcu, id);
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
92
93
  
  	/*
d34883d4e   Xiao Guangrong   mm: mmu_notifier:...
94
95
96
97
98
99
100
  	 * synchronize_srcu here prevents mmu_notifier_release from returning to
  	 * exit_mmap (which would proceed with freeing all pages in the mm)
  	 * until the ->release method returns, if it was invoked by
  	 * mmu_notifier_unregister.
  	 *
  	 * The mmu_notifier_mm can't go away from under us because one mm_count
  	 * is held by exit_mmap.
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
101
  	 */
21a92735f   Sagi Grimberg   mm: mmu_notifier:...
102
  	synchronize_srcu(&srcu);
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
103
104
105
106
107
108
109
110
  }
  
  /*
   * If no young bitflag is supported by the hardware, ->clear_flush_young can
   * unmap the address and return 1 or 0 depending if the mapping previously
   * existed or not.
   */
  int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
571284680   Andres Lagar-Cavilla   kvm: Fix page age...
111
112
  					unsigned long start,
  					unsigned long end)
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
113
114
  {
  	struct mmu_notifier *mn;
21a92735f   Sagi Grimberg   mm: mmu_notifier:...
115
  	int young = 0, id;
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
116

21a92735f   Sagi Grimberg   mm: mmu_notifier:...
117
  	id = srcu_read_lock(&srcu);
b67bfe0d4   Sasha Levin   hlist: drop the n...
118
  	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
119
  		if (mn->ops->clear_flush_young)
571284680   Andres Lagar-Cavilla   kvm: Fix page age...
120
  			young |= mn->ops->clear_flush_young(mn, mm, start, end);
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
121
  	}
21a92735f   Sagi Grimberg   mm: mmu_notifier:...
122
  	srcu_read_unlock(&srcu, id);
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
123
124
125
  
  	return young;
  }
1d7715c67   Vladimir Davydov   mmu-notifier: add...
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
  int __mmu_notifier_clear_young(struct mm_struct *mm,
  			       unsigned long start,
  			       unsigned long end)
  {
  	struct mmu_notifier *mn;
  	int young = 0, id;
  
  	id = srcu_read_lock(&srcu);
  	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
  		if (mn->ops->clear_young)
  			young |= mn->ops->clear_young(mn, mm, start, end);
  	}
  	srcu_read_unlock(&srcu, id);
  
  	return young;
  }
8ee53820e   Andrea Arcangeli   thp: mmu_notifier...
142
143
144
145
  int __mmu_notifier_test_young(struct mm_struct *mm,
  			      unsigned long address)
  {
  	struct mmu_notifier *mn;
21a92735f   Sagi Grimberg   mm: mmu_notifier:...
146
  	int young = 0, id;
8ee53820e   Andrea Arcangeli   thp: mmu_notifier...
147

21a92735f   Sagi Grimberg   mm: mmu_notifier:...
148
  	id = srcu_read_lock(&srcu);
b67bfe0d4   Sasha Levin   hlist: drop the n...
149
  	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
8ee53820e   Andrea Arcangeli   thp: mmu_notifier...
150
151
152
153
154
155
  		if (mn->ops->test_young) {
  			young = mn->ops->test_young(mn, mm, address);
  			if (young)
  				break;
  		}
  	}
21a92735f   Sagi Grimberg   mm: mmu_notifier:...
156
  	srcu_read_unlock(&srcu, id);
8ee53820e   Andrea Arcangeli   thp: mmu_notifier...
157
158
159
  
  	return young;
  }
828502d30   Izik Eidus   ksm: add mmu_noti...
160
161
162
163
  void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
  			       pte_t pte)
  {
  	struct mmu_notifier *mn;
21a92735f   Sagi Grimberg   mm: mmu_notifier:...
164
  	int id;
828502d30   Izik Eidus   ksm: add mmu_noti...
165

21a92735f   Sagi Grimberg   mm: mmu_notifier:...
166
  	id = srcu_read_lock(&srcu);
b67bfe0d4   Sasha Levin   hlist: drop the n...
167
  	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
828502d30   Izik Eidus   ksm: add mmu_noti...
168
169
  		if (mn->ops->change_pte)
  			mn->ops->change_pte(mn, mm, address, pte);
828502d30   Izik Eidus   ksm: add mmu_noti...
170
  	}
21a92735f   Sagi Grimberg   mm: mmu_notifier:...
171
  	srcu_read_unlock(&srcu, id);
828502d30   Izik Eidus   ksm: add mmu_noti...
172
  }
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
173
174
175
176
  void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
  				  unsigned long start, unsigned long end)
  {
  	struct mmu_notifier *mn;
21a92735f   Sagi Grimberg   mm: mmu_notifier:...
177
  	int id;
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
178

21a92735f   Sagi Grimberg   mm: mmu_notifier:...
179
  	id = srcu_read_lock(&srcu);
b67bfe0d4   Sasha Levin   hlist: drop the n...
180
  	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
181
182
183
  		if (mn->ops->invalidate_range_start)
  			mn->ops->invalidate_range_start(mn, mm, start, end);
  	}
21a92735f   Sagi Grimberg   mm: mmu_notifier:...
184
  	srcu_read_unlock(&srcu, id);
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
185
  }
fa794199e   Cliff Wickman   mm: export mmu no...
186
  EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_start);
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
187
188
  
  void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
4645b9fe8   Jérôme Glisse   mm/mmu_notifier: ...
189
190
191
  					 unsigned long start,
  					 unsigned long end,
  					 bool only_end)
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
192
193
  {
  	struct mmu_notifier *mn;
21a92735f   Sagi Grimberg   mm: mmu_notifier:...
194
  	int id;
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
195

21a92735f   Sagi Grimberg   mm: mmu_notifier:...
196
  	id = srcu_read_lock(&srcu);
b67bfe0d4   Sasha Levin   hlist: drop the n...
197
  	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
0f0a327fa   Joerg Roedel   mmu_notifier: add...
198
199
200
201
202
203
204
  		/*
  		 * Call invalidate_range here too to avoid the need for the
  		 * subsystem of having to register an invalidate_range_end
  		 * call-back when there is invalidate_range already. Usually a
  		 * subsystem registers either invalidate_range_start()/end() or
  		 * invalidate_range(), so this will be no additional overhead
  		 * (besides the pointer check).
4645b9fe8   Jérôme Glisse   mm/mmu_notifier: ...
205
206
207
208
209
  		 *
  		 * We skip call to invalidate_range() if we know it is safe ie
  		 * call site use mmu_notifier_invalidate_range_only_end() which
  		 * is safe to do when we know that a call to invalidate_range()
  		 * already happen under page table lock.
0f0a327fa   Joerg Roedel   mmu_notifier: add...
210
  		 */
4645b9fe8   Jérôme Glisse   mm/mmu_notifier: ...
211
  		if (!only_end && mn->ops->invalidate_range)
0f0a327fa   Joerg Roedel   mmu_notifier: add...
212
  			mn->ops->invalidate_range(mn, mm, start, end);
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
213
214
215
  		if (mn->ops->invalidate_range_end)
  			mn->ops->invalidate_range_end(mn, mm, start, end);
  	}
21a92735f   Sagi Grimberg   mm: mmu_notifier:...
216
  	srcu_read_unlock(&srcu, id);
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
217
  }
fa794199e   Cliff Wickman   mm: export mmu no...
218
  EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_end);
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
219

0f0a327fa   Joerg Roedel   mmu_notifier: add...
220
221
222
223
224
225
226
227
228
229
230
231
232
233
  void __mmu_notifier_invalidate_range(struct mm_struct *mm,
  				  unsigned long start, unsigned long end)
  {
  	struct mmu_notifier *mn;
  	int id;
  
  	id = srcu_read_lock(&srcu);
  	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
  		if (mn->ops->invalidate_range)
  			mn->ops->invalidate_range(mn, mm, start, end);
  	}
  	srcu_read_unlock(&srcu, id);
  }
  EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range);
5ff7091f5   David Rientjes   mm, mmu_notifier:...
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
  /*
   * Must be called while holding mm->mmap_sem for either read or write.
   * The result is guaranteed to be valid until mm->mmap_sem is dropped.
   */
  bool mm_has_blockable_invalidate_notifiers(struct mm_struct *mm)
  {
  	struct mmu_notifier *mn;
  	int id;
  	bool ret = false;
  
  	WARN_ON_ONCE(!rwsem_is_locked(&mm->mmap_sem));
  
  	if (!mm_has_notifiers(mm))
  		return ret;
  
  	id = srcu_read_lock(&srcu);
  	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
  		if (!mn->ops->invalidate_range &&
  		    !mn->ops->invalidate_range_start &&
  		    !mn->ops->invalidate_range_end)
  				continue;
  
  		if (!(mn->ops->flags & MMU_INVALIDATE_DOES_NOT_BLOCK)) {
  			ret = true;
  			break;
  		}
  	}
  	srcu_read_unlock(&srcu, id);
  	return ret;
  }
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
264
265
266
267
268
269
270
271
  static int do_mmu_notifier_register(struct mmu_notifier *mn,
  				    struct mm_struct *mm,
  				    int take_mmap_sem)
  {
  	struct mmu_notifier_mm *mmu_notifier_mm;
  	int ret;
  
  	BUG_ON(atomic_read(&mm->mm_users) <= 0);
35cfa2b0b   Gavin Shan   mm/mmu_notifier: ...
272
273
274
275
  	ret = -ENOMEM;
  	mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
  	if (unlikely(!mmu_notifier_mm))
  		goto out;
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
276
277
278
279
  	if (take_mmap_sem)
  		down_write(&mm->mmap_sem);
  	ret = mm_take_all_locks(mm);
  	if (unlikely(ret))
35cfa2b0b   Gavin Shan   mm/mmu_notifier: ...
280
  		goto out_clean;
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
281
282
283
284
  
  	if (!mm_has_notifiers(mm)) {
  		INIT_HLIST_HEAD(&mmu_notifier_mm->list);
  		spin_lock_init(&mmu_notifier_mm->lock);
e0f3c3f78   Gavin Shan   mm/mmu_notifier: ...
285

cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
286
  		mm->mmu_notifier_mm = mmu_notifier_mm;
35cfa2b0b   Gavin Shan   mm/mmu_notifier: ...
287
  		mmu_notifier_mm = NULL;
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
288
  	}
f1f100764   Vegard Nossum   mm: add new mmgra...
289
  	mmgrab(mm);
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
290
291
292
293
294
295
296
297
298
299
300
301
302
303
  
  	/*
  	 * Serialize the update against mmu_notifier_unregister. A
  	 * side note: mmu_notifier_release can't run concurrently with
  	 * us because we hold the mm_users pin (either implicitly as
  	 * current->mm or explicitly with get_task_mm() or similar).
  	 * We can't race against any other mmu notifier method either
  	 * thanks to mm_take_all_locks().
  	 */
  	spin_lock(&mm->mmu_notifier_mm->lock);
  	hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
  	spin_unlock(&mm->mmu_notifier_mm->lock);
  
  	mm_drop_all_locks(mm);
35cfa2b0b   Gavin Shan   mm/mmu_notifier: ...
304
  out_clean:
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
305
306
  	if (take_mmap_sem)
  		up_write(&mm->mmap_sem);
35cfa2b0b   Gavin Shan   mm/mmu_notifier: ...
307
308
  	kfree(mmu_notifier_mm);
  out:
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
  	BUG_ON(atomic_read(&mm->mm_users) <= 0);
  	return ret;
  }
  
  /*
   * Must not hold mmap_sem nor any other VM related lock when calling
   * this registration function. Must also ensure mm_users can't go down
   * to zero while this runs to avoid races with mmu_notifier_release,
   * so mm has to be current->mm or the mm should be pinned safely such
   * as with get_task_mm(). If the mm is not current->mm, the mm_users
   * pin should be released by calling mmput after mmu_notifier_register
   * returns. mmu_notifier_unregister must be always called to
   * unregister the notifier. mm_count is automatically pinned to allow
   * mmu_notifier_unregister to safely run at any time later, before or
   * after exit_mmap. ->release will always be called before exit_mmap
   * frees the pages.
   */
  int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
  {
  	return do_mmu_notifier_register(mn, mm, 1);
  }
  EXPORT_SYMBOL_GPL(mmu_notifier_register);
  
  /*
   * Same as mmu_notifier_register but here the caller must hold the
   * mmap_sem in write mode.
   */
  int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
  {
  	return do_mmu_notifier_register(mn, mm, 0);
  }
  EXPORT_SYMBOL_GPL(__mmu_notifier_register);
  
  /* this is called after the last mmu_notifier_unregister() returned */
  void __mmu_notifier_mm_destroy(struct mm_struct *mm)
  {
  	BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list));
  	kfree(mm->mmu_notifier_mm);
  	mm->mmu_notifier_mm = LIST_POISON1; /* debug */
  }
  
  /*
   * This releases the mm_count pin automatically and frees the mm
   * structure if it was the last user of it. It serializes against
21a92735f   Sagi Grimberg   mm: mmu_notifier:...
353
354
   * running mmu notifiers with SRCU and against mmu_notifier_unregister
   * with the unregister lock + SRCU. All sptes must be dropped before
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
355
356
357
358
359
360
361
362
   * calling mmu_notifier_unregister. ->release or any other notifier
   * method may be invoked concurrently with mmu_notifier_unregister,
   * and only after mmu_notifier_unregister returned we're guaranteed
   * that ->release or any other method can't run anymore.
   */
  void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
  {
  	BUG_ON(atomic_read(&mm->mm_count) <= 0);
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
363
  	if (!hlist_unhashed(&mn->hlist)) {
d34883d4e   Xiao Guangrong   mm: mmu_notifier:...
364
365
366
367
  		/*
  		 * SRCU here will force exit_mmap to wait for ->release to
  		 * finish before freeing the pages.
  		 */
21a92735f   Sagi Grimberg   mm: mmu_notifier:...
368
  		int id;
3ad3d901b   Xiao Guangrong   mm: mmu_notifier:...
369

d34883d4e   Xiao Guangrong   mm: mmu_notifier:...
370
  		id = srcu_read_lock(&srcu);
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
371
  		/*
d34883d4e   Xiao Guangrong   mm: mmu_notifier:...
372
373
  		 * exit_mmap will block in mmu_notifier_release to guarantee
  		 * that ->release is called before freeing the pages.
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
374
375
376
  		 */
  		if (mn->ops->release)
  			mn->ops->release(mn, mm);
d34883d4e   Xiao Guangrong   mm: mmu_notifier:...
377
  		srcu_read_unlock(&srcu, id);
3ad3d901b   Xiao Guangrong   mm: mmu_notifier:...
378

d34883d4e   Xiao Guangrong   mm: mmu_notifier:...
379
  		spin_lock(&mm->mmu_notifier_mm->lock);
751efd861   Robin Holt   mmu_notifier_unre...
380
  		/*
d34883d4e   Xiao Guangrong   mm: mmu_notifier:...
381
382
  		 * Can not use list_del_rcu() since __mmu_notifier_release
  		 * can delete it before we hold the lock.
751efd861   Robin Holt   mmu_notifier_unre...
383
  		 */
d34883d4e   Xiao Guangrong   mm: mmu_notifier:...
384
  		hlist_del_init_rcu(&mn->hlist);
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
385
  		spin_unlock(&mm->mmu_notifier_mm->lock);
d34883d4e   Xiao Guangrong   mm: mmu_notifier:...
386
  	}
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
387
388
  
  	/*
d34883d4e   Xiao Guangrong   mm: mmu_notifier:...
389
  	 * Wait for any running method to finish, of course including
83a35e360   Geert Uytterhoeven   treewide: relase ...
390
  	 * ->release if it was run by mmu_notifier_release instead of us.
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
391
  	 */
21a92735f   Sagi Grimberg   mm: mmu_notifier:...
392
  	synchronize_srcu(&srcu);
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
393
394
395
396
397
398
  
  	BUG_ON(atomic_read(&mm->mm_count) <= 0);
  
  	mmdrop(mm);
  }
  EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
21a92735f   Sagi Grimberg   mm: mmu_notifier:...
399

b972216e2   Peter Zijlstra   mmu_notifier: add...
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
  /*
   * Same as mmu_notifier_unregister but no callback and no srcu synchronization.
   */
  void mmu_notifier_unregister_no_release(struct mmu_notifier *mn,
  					struct mm_struct *mm)
  {
  	spin_lock(&mm->mmu_notifier_mm->lock);
  	/*
  	 * Can not use list_del_rcu() since __mmu_notifier_release
  	 * can delete it before we hold the lock.
  	 */
  	hlist_del_init_rcu(&mn->hlist);
  	spin_unlock(&mm->mmu_notifier_mm->lock);
  
  	BUG_ON(atomic_read(&mm->mm_count) <= 0);
  	mmdrop(mm);
  }
  EXPORT_SYMBOL_GPL(mmu_notifier_unregister_no_release);