Blame view

arch/mn10300/mm/tlb-smp.c 5.13 KB
965ea4bbb   Akira Takeuchi   MN10300: SMP TLB ...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
  /* SMP TLB support routines.
   *
   * Copyright (C) 2006-2008 Panasonic Corporation
   * All Rights Reserved.
   *
   * This program is free software; you can redistribute it and/or
   * modify it under the terms of the GNU General Public License
   * version 2 as published by the Free Software Foundation.
   *
   * This program is distributed in the hope that it will be useful,
   * but WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
   * GNU General Public License for more details.
   */
  #include <linux/interrupt.h>
  #include <linux/spinlock.h>
  #include <linux/init.h>
  #include <linux/jiffies.h>
  #include <linux/cpumask.h>
  #include <linux/err.h>
  #include <linux/kernel.h>
  #include <linux/delay.h>
  #include <linux/sched.h>
  #include <linux/profile.h>
  #include <linux/smp.h>
  #include <asm/tlbflush.h>
  #include <asm/system.h>
  #include <asm/bitops.h>
  #include <asm/processor.h>
  #include <asm/bug.h>
  #include <asm/exceptions.h>
  #include <asm/hardirq.h>
  #include <asm/fpu.h>
  #include <asm/mmu_context.h>
  #include <asm/thread_info.h>
  #include <asm/cpu-regs.h>
  #include <asm/intctl-regs.h>
  
  /*
   * For flush TLB
   */
  #define FLUSH_ALL	0xffffffff
  
  static cpumask_t flush_cpumask;
  static struct mm_struct *flush_mm;
  static unsigned long flush_va;
  static DEFINE_SPINLOCK(tlbstate_lock);
  
  DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
  	&init_mm, 0
  };
  
  static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
  			     unsigned long va);
  static void do_flush_tlb_all(void *info);
  
  /**
   * smp_flush_tlb - Callback to invalidate the TLB.
   * @unused: Callback context (ignored).
   */
  void smp_flush_tlb(void *unused)
  {
  	unsigned long cpu_id;
  
  	cpu_id = get_cpu();
8ea9716fd   KOSAKI Motohiro   mn10300: convert ...
66
  	if (!cpumask_test_cpu(cpu_id, &flush_cpumask))
965ea4bbb   Akira Takeuchi   MN10300: SMP TLB ...
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
  		/* This was a BUG() but until someone can quote me the line
  		 * from the intel manual that guarantees an IPI to multiple
  		 * CPUs is retried _only_ on the erroring CPUs its staying as a
  		 * return
  		 *
  		 * BUG();
  		 */
  		goto out;
  
  	if (flush_va == FLUSH_ALL)
  		local_flush_tlb();
  	else
  		local_flush_tlb_page(flush_mm, flush_va);
  
  	smp_mb__before_clear_bit();
8ea9716fd   KOSAKI Motohiro   mn10300: convert ...
82
  	cpumask_clear_cpu(cpu_id, &flush_cpumask);
965ea4bbb   Akira Takeuchi   MN10300: SMP TLB ...
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
  	smp_mb__after_clear_bit();
  out:
  	put_cpu();
  }
  
  /**
   * flush_tlb_others - Tell the specified CPUs to invalidate their TLBs
   * @cpumask: The list of CPUs to target.
   * @mm: The VM context to flush from (if va!=FLUSH_ALL).
   * @va: Virtual address to flush or FLUSH_ALL to flush everything.
   */
  static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
  			     unsigned long va)
  {
  	cpumask_t tmp;
  
  	/* A couple of sanity checks (to be removed):
  	 * - mask must not be empty
  	 * - current CPU must not be in mask
  	 * - we do not send IPIs to as-yet unbooted CPUs.
  	 */
  	BUG_ON(!mm);
8ea9716fd   KOSAKI Motohiro   mn10300: convert ...
105
106
  	BUG_ON(cpumask_empty(&cpumask));
  	BUG_ON(cpumask_test_cpu(smp_processor_id(), &cpumask));
965ea4bbb   Akira Takeuchi   MN10300: SMP TLB ...
107

8ea9716fd   KOSAKI Motohiro   mn10300: convert ...
108
109
  	cpumask_and(&tmp, &cpumask, cpu_online_mask);
  	BUG_ON(!cpumask_equal(&cpumask, &tmp));
965ea4bbb   Akira Takeuchi   MN10300: SMP TLB ...
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
  
  	/* I'm not happy about this global shared spinlock in the MM hot path,
  	 * but we'll see how contended it is.
  	 *
  	 * Temporarily this turns IRQs off, so that lockups are detected by the
  	 * NMI watchdog.
  	 */
  	spin_lock(&tlbstate_lock);
  
  	flush_mm = mm;
  	flush_va = va;
  #if NR_CPUS <= BITS_PER_LONG
  	atomic_set_mask(cpumask.bits[0], &flush_cpumask.bits[0]);
  #else
  #error Not supported.
  #endif
  
  	/* FIXME: if NR_CPUS>=3, change send_IPI_mask */
  	smp_call_function(smp_flush_tlb, NULL, 1);
8ea9716fd   KOSAKI Motohiro   mn10300: convert ...
129
  	while (!cpumask_empty(&flush_cpumask))
965ea4bbb   Akira Takeuchi   MN10300: SMP TLB ...
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
  		/* Lockup detection does not belong here */
  		smp_mb();
  
  	flush_mm = NULL;
  	flush_va = 0;
  	spin_unlock(&tlbstate_lock);
  }
  
  /**
   * flush_tlb_mm - Invalidate TLB of specified VM context
   * @mm: The VM context to invalidate.
   */
  void flush_tlb_mm(struct mm_struct *mm)
  {
  	cpumask_t cpu_mask;
  
  	preempt_disable();
8ea9716fd   KOSAKI Motohiro   mn10300: convert ...
147
148
  	cpumask_copy(&cpu_mask, mm_cpumask(mm));
  	cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
965ea4bbb   Akira Takeuchi   MN10300: SMP TLB ...
149
150
  
  	local_flush_tlb();
8ea9716fd   KOSAKI Motohiro   mn10300: convert ...
151
  	if (!cpumask_empty(&cpu_mask))
965ea4bbb   Akira Takeuchi   MN10300: SMP TLB ...
152
153
154
155
156
157
158
159
160
161
162
163
164
165
  		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
  
  	preempt_enable();
  }
  
  /**
   * flush_tlb_current_task - Invalidate TLB of current task
   */
  void flush_tlb_current_task(void)
  {
  	struct mm_struct *mm = current->mm;
  	cpumask_t cpu_mask;
  
  	preempt_disable();
8ea9716fd   KOSAKI Motohiro   mn10300: convert ...
166
167
  	cpumask_copy(&cpu_mask, mm_cpumask(mm));
  	cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
965ea4bbb   Akira Takeuchi   MN10300: SMP TLB ...
168
169
  
  	local_flush_tlb();
8ea9716fd   KOSAKI Motohiro   mn10300: convert ...
170
  	if (!cpumask_empty(&cpu_mask))
965ea4bbb   Akira Takeuchi   MN10300: SMP TLB ...
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
  		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
  
  	preempt_enable();
  }
  
  /**
   * flush_tlb_page - Invalidate TLB of page
   * @vma: The VM context to invalidate the page for.
   * @va: The virtual address of the page to invalidate.
   */
  void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
  {
  	struct mm_struct *mm = vma->vm_mm;
  	cpumask_t cpu_mask;
  
  	preempt_disable();
8ea9716fd   KOSAKI Motohiro   mn10300: convert ...
187
188
  	cpumask_copy(&cpu_mask, mm_cpumask(mm));
  	cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
965ea4bbb   Akira Takeuchi   MN10300: SMP TLB ...
189
190
  
  	local_flush_tlb_page(mm, va);
8ea9716fd   KOSAKI Motohiro   mn10300: convert ...
191
  	if (!cpumask_empty(&cpu_mask))
965ea4bbb   Akira Takeuchi   MN10300: SMP TLB ...
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
  		flush_tlb_others(cpu_mask, mm, va);
  
  	preempt_enable();
  }
  
  /**
   * do_flush_tlb_all - Callback to completely invalidate a TLB
   * @unused: Callback context (ignored).
   */
  static void do_flush_tlb_all(void *unused)
  {
  	local_flush_tlb_all();
  }
  
  /**
   * flush_tlb_all - Completely invalidate TLBs on all CPUs
   */
  void flush_tlb_all(void)
  {
  	on_each_cpu(do_flush_tlb_all, 0, 1);
  }