Commit db6f41063cbdb58b14846e600e6bc3f4e4c2e888

Authored by Will Deacon
Committed by Catalin Marinas
1 parent c783c2815e

arm64: mm: don't treat user cache maintenance faults as writes

On arm64, cache maintenance faults appear as data aborts with the CM
bit set in the ESR. The WnR bit, usually used to distinguish between
faulting loads and stores, always reads as 1 and (slightly confusingly)
the instructions are treated as reads by the architecture.

This patch fixes our fault handling code to treat cache maintenance
faults in the same way as loads.

Signed-off-by: Will Deacon <will.deacon@arm.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>

Showing 1 changed file with 20 additions and 26 deletions Side-by-side Diff

arch/arm64/mm/fault.c
... ... @@ -152,25 +152,8 @@
152 152 #define ESR_CM (1 << 8)
153 153 #define ESR_LNX_EXEC (1 << 24)
154 154  
155   -/*
156   - * Check that the permissions on the VMA allow for the fault which occurred.
157   - * If we encountered a write fault, we must have write permission, otherwise
158   - * we allow any permission.
159   - */
160   -static inline bool access_error(unsigned int esr, struct vm_area_struct *vma)
161   -{
162   - unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
163   -
164   - if (esr & ESR_WRITE)
165   - mask = VM_WRITE;
166   - if (esr & ESR_LNX_EXEC)
167   - mask = VM_EXEC;
168   -
169   - return vma->vm_flags & mask ? false : true;
170   -}
171   -
172 155 static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
173   - unsigned int esr, unsigned int flags,
  156 + unsigned int mm_flags, unsigned long vm_flags,
174 157 struct task_struct *tsk)
175 158 {
176 159 struct vm_area_struct *vma;
177 160  
... ... @@ -188,12 +171,17 @@
188 171 * it.
189 172 */
190 173 good_area:
191   - if (access_error(esr, vma)) {
  174 + /*
  175 + * Check that the permissions on the VMA allow for the fault which
  176 + * occurred. If we encountered a write or exec fault, we must have
  177 + * appropriate permissions, otherwise we allow any permission.
  178 + */
  179 + if (!(vma->vm_flags & vm_flags)) {
192 180 fault = VM_FAULT_BADACCESS;
193 181 goto out;
194 182 }
195 183  
196   - return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags);
  184 + return handle_mm_fault(mm, vma, addr & PAGE_MASK, mm_flags);
197 185  
198 186 check_stack:
199 187 if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
200 188  
... ... @@ -208,10 +196,16 @@
208 196 struct task_struct *tsk;
209 197 struct mm_struct *mm;
210 198 int fault, sig, code;
211   - bool write = (esr & ESR_WRITE) && !(esr & ESR_CM);
212   - unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
213   - (write ? FAULT_FLAG_WRITE : 0);
  199 + unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
  200 + unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
214 201  
  202 + if (esr & ESR_LNX_EXEC) {
  203 + vm_flags = VM_EXEC;
  204 + } else if ((esr & ESR_WRITE) && !(esr & ESR_CM)) {
  205 + vm_flags = VM_WRITE;
  206 + mm_flags |= FAULT_FLAG_WRITE;
  207 + }
  208 +
215 209 tsk = current;
216 210 mm = tsk->mm;
217 211  
... ... @@ -248,7 +242,7 @@
248 242 #endif
249 243 }
250 244  
251   - fault = __do_page_fault(mm, addr, esr, flags, tsk);
  245 + fault = __do_page_fault(mm, addr, mm_flags, vm_flags, tsk);
252 246  
253 247 /*
254 248 * If we need to retry but a fatal signal is pending, handle the
... ... @@ -265,7 +259,7 @@
265 259 */
266 260  
267 261 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
268   - if (flags & FAULT_FLAG_ALLOW_RETRY) {
  262 + if (mm_flags & FAULT_FLAG_ALLOW_RETRY) {
269 263 if (fault & VM_FAULT_MAJOR) {
270 264 tsk->maj_flt++;
271 265 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs,
... ... @@ -280,7 +274,7 @@
280 274 * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of
281 275 * starvation.
282 276 */
283   - flags &= ~FAULT_FLAG_ALLOW_RETRY;
  277 + mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
284 278 goto retry;
285 279 }
286 280 }