Commit d9d76778927dc953c553b83ab52287dfbd15ac6a

Authored by Linus Torvalds

Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 mm updates from Ingo Molnar:
 "A handful of changes:

   - two memory encryption related fixes

   - don't display the kernel's virtual memory layout plaintext on
     32-bit kernels either

   - two simplifications"

* 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/mm: Remove the now redundant N_MEMORY check
  dma-mapping: Fix dma_pgprot() for unencrypted coherent pages
  x86: Don't let pgprot_modify() change the page encryption bit
  x86/mm/kmmio: Use this_cpu_ptr() instead get_cpu_var() for kmmio_ctx
  x86/mm/init/32: Stop printing the virtual memory layout

Showing 6 changed files Side-by-side Diff

arch/x86/include/asm/pgtable.h
... ... @@ -621,12 +621,15 @@
621 621 return __pmd(val);
622 622 }
623 623  
624   -/* mprotect needs to preserve PAT bits when updating vm_page_prot */
  624 +/*
  625 + * mprotect needs to preserve PAT and encryption bits when updating
  626 + * vm_page_prot
  627 + */
625 628 #define pgprot_modify pgprot_modify
626 629 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
627 630 {
628 631 pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
629   - pgprotval_t addbits = pgprot_val(newprot);
  632 + pgprotval_t addbits = pgprot_val(newprot) & ~_PAGE_CHG_MASK;
630 633 return __pgprot(preservebits | addbits);
631 634 }
632 635  
arch/x86/include/asm/pgtable_types.h
... ... @@ -118,7 +118,7 @@
118 118 */
119 119 #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
120 120 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
121   - _PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
  121 + _PAGE_SOFT_DIRTY | _PAGE_DEVMAP | _PAGE_ENC)
122 122 #define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
123 123  
124 124 /*
arch/x86/mm/init_32.c
... ... @@ -788,44 +788,6 @@
788 788 x86_init.hyper.init_after_bootmem();
789 789  
790 790 mem_init_print_info(NULL);
791   - printk(KERN_INFO "virtual kernel memory layout:\n"
792   - " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
793   - " cpu_entry : 0x%08lx - 0x%08lx (%4ld kB)\n"
794   -#ifdef CONFIG_HIGHMEM
795   - " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
796   -#endif
797   - " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
798   - " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
799   - " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
800   - " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
801   - " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
802   - FIXADDR_START, FIXADDR_TOP,
803   - (FIXADDR_TOP - FIXADDR_START) >> 10,
804   -
805   - CPU_ENTRY_AREA_BASE,
806   - CPU_ENTRY_AREA_BASE + CPU_ENTRY_AREA_MAP_SIZE,
807   - CPU_ENTRY_AREA_MAP_SIZE >> 10,
808   -
809   -#ifdef CONFIG_HIGHMEM
810   - PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
811   - (LAST_PKMAP*PAGE_SIZE) >> 10,
812   -#endif
813   -
814   - VMALLOC_START, VMALLOC_END,
815   - (VMALLOC_END - VMALLOC_START) >> 20,
816   -
817   - (unsigned long)__va(0), (unsigned long)high_memory,
818   - ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
819   -
820   - (unsigned long)&__init_begin, (unsigned long)&__init_end,
821   - ((unsigned long)&__init_end -
822   - (unsigned long)&__init_begin) >> 10,
823   -
824   - (unsigned long)&_etext, (unsigned long)&_edata,
825   - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
826   -
827   - (unsigned long)&_text, (unsigned long)&_etext,
828   - ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
829 791  
830 792 /*
831 793 * Check boundaries twice: Some fundamental inconsistencies can
arch/x86/mm/init_64.c
... ... @@ -818,8 +818,7 @@
818 818 * will not set it back.
819 819 */
820 820 node_clear_state(0, N_MEMORY);
821   - if (N_MEMORY != N_NORMAL_MEMORY)
822   - node_clear_state(0, N_NORMAL_MEMORY);
  821 + node_clear_state(0, N_NORMAL_MEMORY);
823 822  
824 823 zone_sizes_init();
825 824 }
... ... @@ -260,7 +260,7 @@
260 260 goto no_kmmio;
261 261 }
262 262  
263   - ctx = &get_cpu_var(kmmio_ctx);
  263 + ctx = this_cpu_ptr(&kmmio_ctx);
264 264 if (ctx->active) {
265 265 if (page_base == ctx->addr) {
266 266 /*
... ... @@ -285,7 +285,7 @@
285 285 pr_emerg("previous hit was at 0x%08lx.\n", ctx->addr);
286 286 disarm_kmmio_fault_page(faultpage);
287 287 }
288   - goto no_kmmio_ctx;
  288 + goto no_kmmio;
289 289 }
290 290 ctx->active++;
291 291  
292 292  
... ... @@ -314,11 +314,8 @@
314 314 * the user should drop to single cpu before tracing.
315 315 */
316 316  
317   - put_cpu_var(kmmio_ctx);
318 317 return 1; /* fault handled */
319 318  
320   -no_kmmio_ctx:
321   - put_cpu_var(kmmio_ctx);
322 319 no_kmmio:
323 320 rcu_read_unlock();
324 321 preempt_enable_no_resched();
... ... @@ -333,7 +330,7 @@
333 330 static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
334 331 {
335 332 int ret = 0;
336   - struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx);
  333 + struct kmmio_context *ctx = this_cpu_ptr(&kmmio_ctx);
337 334  
338 335 if (!ctx->active) {
339 336 /*
... ... @@ -371,7 +368,6 @@
371 368 if (!(regs->flags & X86_EFLAGS_TF))
372 369 ret = 1;
373 370 out:
374   - put_cpu_var(kmmio_ctx);
375 371 return ret;
376 372 }
377 373  
kernel/dma/mapping.c
... ... @@ -154,6 +154,8 @@
154 154 */
155 155 pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs)
156 156 {
  157 + if (force_dma_unencrypted(dev))
  158 + prot = pgprot_decrypted(prot);
157 159 if (dev_is_dma_coherent(dev) ||
158 160 (IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) &&
159 161 (attrs & DMA_ATTR_NON_CONSISTENT)))