Commit c1821c2e9711adc3cd298a16b7237c92a2cee78d

Authored by Gerald Schaefer
Committed by Martin Schwidefsky
1 parent 86aa9fc245

[S390] noexec protection

This provides a noexec protection on s390 hardware. Our hardware does
not have any bits left in the pte for a hw noexec bit, so this is a
different approach using shadow page tables and a special addressing
mode that allows separate address spaces for code and data.

As a special feature of our "secondary-space" addressing mode, separate
page tables can be specified for the translation of data addresses
(storage operands) and instruction addresses. The shadow page table is
used for the instruction addresses and the standard page table for the
data addresses.
The shadow page table is linked to the standard page table by a pointer
in page->lru.next of the struct page corresponding to the page that
contains the standard page table (since page->private is not really
private with the pte_lock and the page table pages are not in the LRU
list).
Depending on the software bits of a pte, it is either inserted into
both page tables or just into the standard (data) page table. Pages of
a vma that does not have the VM_EXEC bit set get mapped only in the
data address space. Any try to execute code on such a page will cause a
page translation exception. The standard reaction to this is a SIGSEGV
with two exceptions: the two system call opcodes 0x0a77 (sys_sigreturn)
and 0x0aad (sys_rt_sigreturn) are allowed. They are stored by the
kernel to the signal stack frame. Unfortunately, the signal return
mechanism cannot be modified to use an SA_RESTORER because the
exception unwinding code depends on the system call opcode stored
behind the signal stack frame.

This feature requires that user space is executed in secondary-space
mode and the kernel in home-space mode, which means that the addressing
modes need to be switched and that the noexec protection only works
for user space.
After switching the addressing modes, we cannot use the mvcp/mvcs
instructions anymore to copy between kernel and user space. A new
mvcos instruction has been added to the z9 EC/BC hardware which allows
to copy between arbitrary address spaces, but on older hardware the
page tables need to be walked manually.

Signed-off-by: Gerald Schaefer <geraldsc@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

Showing 28 changed files with 913 additions and 115 deletions Side-by-side Diff

... ... @@ -134,6 +134,31 @@
134 134 bool
135 135 default y
136 136  
  137 +config S390_SWITCH_AMODE
  138 + bool "Switch kernel/user addressing modes"
  139 + help
  140 + This option allows to switch the addressing modes of kernel and user
  141 + space. The kernel parameter switch_amode=on will enable this feature,
  142 + default is disabled. Enabling this (via kernel parameter) on machines
  143 + earlier than IBM System z9-109 EC/BC will reduce system performance.
  144 +
  145 + Note that this option will also be selected by selecting the execute
  146 + protection option below. Enabling the execute protection via the
  147 + noexec kernel parameter will also switch the addressing modes,
  148 + independent of the switch_amode kernel parameter.
  149 +
  150 +
  151 +config S390_EXEC_PROTECT
  152 + bool "Data execute protection"
  153 + select S390_SWITCH_AMODE
  154 + help
  155 + This option allows to enable a buffer overflow protection for user
  156 + space programs and it also selects the addressing mode option above.
  157 + The kernel parameter noexec=on will enable this feature and also
  158 + switch the addressing modes, default is disabled. Enabling this (via
  159 + kernel parameter) on machines earlier than IBM System z9-109 EC/BC
  160 + will reduce system performance.
  161 +
137 162 comment "Code generation options"
138 163  
139 164 choice
... ... @@ -108,6 +108,8 @@
108 108 CONFIG_COMPAT=y
109 109 CONFIG_SYSVIPC_COMPAT=y
110 110 CONFIG_AUDIT_ARCH=y
  111 +CONFIG_S390_SWITCH_AMODE=y
  112 +CONFIG_S390_EXEC_PROTECT=y
111 113  
112 114 #
113 115 # Code generation options
arch/s390/kernel/compat_linux.c
... ... @@ -69,6 +69,12 @@
69 69  
70 70 #include "compat_linux.h"
71 71  
  72 +long psw_user32_bits = (PSW_BASE32_BITS | PSW_MASK_DAT | PSW_ASC_HOME |
  73 + PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
  74 + PSW_MASK_PSTATE | PSW_DEFAULT_KEY);
  75 +long psw32_user_bits = (PSW32_BASE_BITS | PSW32_MASK_DAT | PSW32_ASC_HOME |
  76 + PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK |
  77 + PSW32_MASK_PSTATE);
72 78  
73 79 /* For this source file, we want overflow handling. */
74 80  
arch/s390/kernel/compat_linux.h
... ... @@ -115,37 +115,6 @@
115 115 __u32 addr;
116 116 } _psw_t32 __attribute__ ((aligned(8)));
117 117  
118   -#define PSW32_MASK_PER 0x40000000UL
119   -#define PSW32_MASK_DAT 0x04000000UL
120   -#define PSW32_MASK_IO 0x02000000UL
121   -#define PSW32_MASK_EXT 0x01000000UL
122   -#define PSW32_MASK_KEY 0x00F00000UL
123   -#define PSW32_MASK_MCHECK 0x00040000UL
124   -#define PSW32_MASK_WAIT 0x00020000UL
125   -#define PSW32_MASK_PSTATE 0x00010000UL
126   -#define PSW32_MASK_ASC 0x0000C000UL
127   -#define PSW32_MASK_CC 0x00003000UL
128   -#define PSW32_MASK_PM 0x00000f00UL
129   -
130   -#define PSW32_ADDR_AMODE31 0x80000000UL
131   -#define PSW32_ADDR_INSN 0x7FFFFFFFUL
132   -
133   -#define PSW32_BASE_BITS 0x00080000UL
134   -
135   -#define PSW32_ASC_PRIMARY 0x00000000UL
136   -#define PSW32_ASC_ACCREG 0x00004000UL
137   -#define PSW32_ASC_SECONDARY 0x00008000UL
138   -#define PSW32_ASC_HOME 0x0000C000UL
139   -
140   -#define PSW32_USER_BITS (PSW32_BASE_BITS | PSW32_MASK_DAT | PSW32_ASC_HOME | \
141   - PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK | \
142   - PSW32_MASK_PSTATE)
143   -
144   -#define PSW32_MASK_MERGE(CURRENT,NEW) \
145   - (((CURRENT) & ~(PSW32_MASK_CC|PSW32_MASK_PM)) | \
146   - ((NEW) & (PSW32_MASK_CC|PSW32_MASK_PM)))
147   -
148   -
149 118 typedef struct
150 119 {
151 120 _psw_t32 psw;
arch/s390/kernel/compat_signal.c
... ... @@ -298,7 +298,7 @@
298 298 _s390_regs_common32 regs32;
299 299 int err, i;
300 300  
301   - regs32.psw.mask = PSW32_MASK_MERGE(PSW32_USER_BITS,
  301 + regs32.psw.mask = PSW32_MASK_MERGE(psw32_user_bits,
302 302 (__u32)(regs->psw.mask >> 32));
303 303 regs32.psw.addr = PSW32_ADDR_AMODE31 | (__u32) regs->psw.addr;
304 304 for (i = 0; i < NUM_GPRS; i++)
arch/s390/kernel/ipl.c
... ... @@ -1016,12 +1016,12 @@
1016 1016 __ctl_clear_bit(0,28);
1017 1017  
1018 1018 /* Set new machine check handler */
1019   - S390_lowcore.mcck_new_psw.mask = PSW_KERNEL_BITS & ~PSW_MASK_MCHECK;
  1019 + S390_lowcore.mcck_new_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK;
1020 1020 S390_lowcore.mcck_new_psw.addr =
1021 1021 PSW_ADDR_AMODE | (unsigned long) &reset_mcck_handler;
1022 1022  
1023 1023 /* Set new program check handler */
1024   - S390_lowcore.program_new_psw.mask = PSW_KERNEL_BITS & ~PSW_MASK_MCHECK;
  1024 + S390_lowcore.program_new_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK;
1025 1025 S390_lowcore.program_new_psw.addr =
1026 1026 PSW_ADDR_AMODE | (unsigned long) &reset_pgm_handler;
1027 1027  
arch/s390/kernel/process.c
... ... @@ -144,7 +144,7 @@
144 144  
145 145 trace_hardirqs_on();
146 146 /* Wait for external, I/O or machine check interrupt. */
147   - __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_WAIT |
  147 + __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
148 148 PSW_MASK_IO | PSW_MASK_EXT);
149 149 }
150 150  
... ... @@ -190,7 +190,7 @@
190 190 struct pt_regs regs;
191 191  
192 192 memset(&regs, 0, sizeof(regs));
193   - regs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_IO | PSW_MASK_EXT;
  193 + regs.psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT;
194 194 regs.psw.addr = (unsigned long) kernel_thread_starter | PSW_ADDR_AMODE;
195 195 regs.gprs[9] = (unsigned long) fn;
196 196 regs.gprs[10] = (unsigned long) arg;
arch/s390/kernel/ptrace.c
... ... @@ -230,9 +230,9 @@
230 230 */
231 231 if (addr == (addr_t) &dummy->regs.psw.mask &&
232 232 #ifdef CONFIG_COMPAT
233   - data != PSW_MASK_MERGE(PSW_USER32_BITS, data) &&
  233 + data != PSW_MASK_MERGE(psw_user32_bits, data) &&
234 234 #endif
235   - data != PSW_MASK_MERGE(PSW_USER_BITS, data))
  235 + data != PSW_MASK_MERGE(psw_user_bits, data))
236 236 /* Invalid psw mask. */
237 237 return -EINVAL;
238 238 #ifndef CONFIG_64BIT
... ... @@ -393,7 +393,7 @@
393 393 if (addr == (addr_t) &dummy32->regs.psw.mask) {
394 394 /* Fake a 31 bit psw mask. */
395 395 tmp = (__u32)(task_pt_regs(child)->psw.mask >> 32);
396   - tmp = PSW32_MASK_MERGE(PSW32_USER_BITS, tmp);
  396 + tmp = PSW32_MASK_MERGE(psw32_user_bits, tmp);
397 397 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
398 398 /* Fake a 31 bit psw address. */
399 399 tmp = (__u32) task_pt_regs(child)->psw.addr |
400 400  
... ... @@ -468,11 +468,11 @@
468 468 */
469 469 if (addr == (addr_t) &dummy32->regs.psw.mask) {
470 470 /* Build a 64 bit psw mask from 31 bit mask. */
471   - if (tmp != PSW32_MASK_MERGE(PSW32_USER_BITS, tmp))
  471 + if (tmp != PSW32_MASK_MERGE(psw32_user_bits, tmp))
472 472 /* Invalid psw mask. */
473 473 return -EINVAL;
474 474 task_pt_regs(child)->psw.mask =
475   - PSW_MASK_MERGE(PSW_USER32_BITS, (__u64) tmp << 32);
  475 + PSW_MASK_MERGE(psw_user32_bits, (__u64) tmp << 32);
476 476 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
477 477 /* Build a 64 bit psw address from 31 bit address. */
478 478 task_pt_regs(child)->psw.addr =
arch/s390/kernel/setup.c
... ... @@ -50,7 +50,14 @@
50 50 #include <asm/page.h>
51 51 #include <asm/ptrace.h>
52 52 #include <asm/sections.h>
  53 +#include <asm/compat.h>
53 54  
  55 +long psw_kernel_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY |
  56 + PSW_MASK_MCHECK | PSW_DEFAULT_KEY);
  57 +long psw_user_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME |
  58 + PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
  59 + PSW_MASK_PSTATE | PSW_DEFAULT_KEY);
  60 +
54 61 /*
55 62 * User copy operations.
56 63 */
... ... @@ -383,6 +390,84 @@
383 390 }
384 391 early_param("ipldelay", early_parse_ipldelay);
385 392  
  393 +#ifdef CONFIG_S390_SWITCH_AMODE
  394 +unsigned int switch_amode = 0;
  395 +EXPORT_SYMBOL_GPL(switch_amode);
  396 +
  397 +static inline void set_amode_and_uaccess(unsigned long user_amode,
  398 + unsigned long user32_amode)
  399 +{
  400 + psw_user_bits = PSW_BASE_BITS | PSW_MASK_DAT | user_amode |
  401 + PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
  402 + PSW_MASK_PSTATE | PSW_DEFAULT_KEY;
  403 +#ifdef CONFIG_COMPAT
  404 + psw_user32_bits = PSW_BASE32_BITS | PSW_MASK_DAT | user_amode |
  405 + PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
  406 + PSW_MASK_PSTATE | PSW_DEFAULT_KEY;
  407 + psw32_user_bits = PSW32_BASE_BITS | PSW32_MASK_DAT | user32_amode |
  408 + PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK |
  409 + PSW32_MASK_PSTATE;
  410 +#endif
  411 + psw_kernel_bits = PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME |
  412 + PSW_MASK_MCHECK | PSW_DEFAULT_KEY;
  413 +
  414 + if (MACHINE_HAS_MVCOS) {
  415 + printk("mvcos available.\n");
  416 + memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess));
  417 + } else {
  418 + printk("mvcos not available.\n");
  419 + memcpy(&uaccess, &uaccess_pt, sizeof(uaccess));
  420 + }
  421 +}
  422 +
  423 +/*
  424 + * Switch kernel/user addressing modes?
  425 + */
  426 +static int __init early_parse_switch_amode(char *p)
  427 +{
  428 + switch_amode = 1;
  429 + return 0;
  430 +}
  431 +early_param("switch_amode", early_parse_switch_amode);
  432 +
  433 +#else /* CONFIG_S390_SWITCH_AMODE */
  434 +static inline void set_amode_and_uaccess(unsigned long user_amode,
  435 + unsigned long user32_amode)
  436 +{
  437 +}
  438 +#endif /* CONFIG_S390_SWITCH_AMODE */
  439 +
  440 +#ifdef CONFIG_S390_EXEC_PROTECT
  441 +unsigned int s390_noexec = 0;
  442 +EXPORT_SYMBOL_GPL(s390_noexec);
  443 +
  444 +/*
  445 + * Enable execute protection?
  446 + */
  447 +static int __init early_parse_noexec(char *p)
  448 +{
  449 + if (!strncmp(p, "off", 3))
  450 + return 0;
  451 + switch_amode = 1;
  452 + s390_noexec = 1;
  453 + return 0;
  454 +}
  455 +early_param("noexec", early_parse_noexec);
  456 +#endif /* CONFIG_S390_EXEC_PROTECT */
  457 +
  458 +static void setup_addressing_mode(void)
  459 +{
  460 + if (s390_noexec) {
  461 + printk("S390 execute protection active, ");
  462 + set_amode_and_uaccess(PSW_ASC_SECONDARY, PSW32_ASC_SECONDARY);
  463 + return;
  464 + }
  465 + if (switch_amode) {
  466 + printk("S390 address spaces switched, ");
  467 + set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY);
  468 + }
  469 +}
  470 +
386 471 static void __init
387 472 setup_lowcore(void)
388 473 {
389 474  
390 475  
391 476  
392 477  
... ... @@ -399,19 +484,21 @@
399 484 lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
400 485 lc->restart_psw.addr =
401 486 PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
402   - lc->external_new_psw.mask = PSW_KERNEL_BITS;
  487 + if (switch_amode)
  488 + lc->restart_psw.mask |= PSW_ASC_HOME;
  489 + lc->external_new_psw.mask = psw_kernel_bits;
403 490 lc->external_new_psw.addr =
404 491 PSW_ADDR_AMODE | (unsigned long) ext_int_handler;
405   - lc->svc_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_IO | PSW_MASK_EXT;
  492 + lc->svc_new_psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT;
406 493 lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call;
407   - lc->program_new_psw.mask = PSW_KERNEL_BITS;
  494 + lc->program_new_psw.mask = psw_kernel_bits;
408 495 lc->program_new_psw.addr =
409 496 PSW_ADDR_AMODE | (unsigned long)pgm_check_handler;
410 497 lc->mcck_new_psw.mask =
411   - PSW_KERNEL_BITS & ~PSW_MASK_MCHECK & ~PSW_MASK_DAT;
  498 + psw_kernel_bits & ~PSW_MASK_MCHECK & ~PSW_MASK_DAT;
412 499 lc->mcck_new_psw.addr =
413 500 PSW_ADDR_AMODE | (unsigned long) mcck_int_handler;
414   - lc->io_new_psw.mask = PSW_KERNEL_BITS;
  501 + lc->io_new_psw.mask = psw_kernel_bits;
415 502 lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
416 503 lc->ipl_device = S390_lowcore.ipl_device;
417 504 lc->jiffy_timer = -1LL;
... ... @@ -645,6 +732,7 @@
645 732 parse_early_param();
646 733  
647 734 setup_memory_end();
  735 + setup_addressing_mode();
648 736 setup_memory();
649 737 setup_resources();
650 738 setup_lowcore();
arch/s390/kernel/signal.c
... ... @@ -119,7 +119,7 @@
119 119  
120 120 /* Copy a 'clean' PSW mask to the user to avoid leaking
121 121 information about whether PER is currently on. */
122   - user_sregs.regs.psw.mask = PSW_MASK_MERGE(PSW_USER_BITS, regs->psw.mask);
  122 + user_sregs.regs.psw.mask = PSW_MASK_MERGE(psw_user_bits, regs->psw.mask);
123 123 user_sregs.regs.psw.addr = regs->psw.addr;
124 124 memcpy(&user_sregs.regs.gprs, &regs->gprs, sizeof(sregs->regs.gprs));
125 125 memcpy(&user_sregs.regs.acrs, current->thread.acrs,
arch/s390/kernel/smp.c
... ... @@ -244,7 +244,7 @@
244 244 void smp_send_stop(void)
245 245 {
246 246 /* Disable all interrupts/machine checks */
247   - __load_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK);
  247 + __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK);
248 248  
249 249 /* write magic number to zero page (absolute 0) */
250 250 lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC;
arch/s390/lib/uaccess_mvcos.c
... ... @@ -162,6 +162,44 @@
162 162 return size;
163 163 }
164 164  
  165 +static size_t strnlen_user_mvcos(size_t count, const char __user *src)
  166 +{
  167 + char buf[256];
  168 + int rc;
  169 + size_t done, len, len_str;
  170 +
  171 + done = 0;
  172 + do {
  173 + len = min(count - done, (size_t) 256);
  174 + rc = uaccess.copy_from_user(len, src + done, buf);
  175 + if (unlikely(rc == len))
  176 + return 0;
  177 + len -= rc;
  178 + len_str = strnlen(buf, len);
  179 + done += len_str;
  180 + } while ((len_str == len) && (done < count));
  181 + return done + 1;
  182 +}
  183 +
  184 +static size_t strncpy_from_user_mvcos(size_t count, const char __user *src,
  185 + char *dst)
  186 +{
  187 + int rc;
  188 + size_t done, len, len_str;
  189 +
  190 + done = 0;
  191 + do {
  192 + len = min(count - done, (size_t) 4096);
  193 + rc = uaccess.copy_from_user(len, src + done, dst);
  194 + if (unlikely(rc == len))
  195 + return -EFAULT;
  196 + len -= rc;
  197 + len_str = strnlen(dst, len);
  198 + done += len_str;
  199 + } while ((len_str == len) && (done < count));
  200 + return done;
  201 +}
  202 +
165 203 struct uaccess_ops uaccess_mvcos = {
166 204 .copy_from_user = copy_from_user_mvcos_check,
167 205 .copy_from_user_small = copy_from_user_std,
... ... @@ -174,4 +212,19 @@
174 212 .futex_atomic_op = futex_atomic_op_std,
175 213 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std,
176 214 };
  215 +
  216 +#ifdef CONFIG_S390_SWITCH_AMODE
  217 +struct uaccess_ops uaccess_mvcos_switch = {
  218 + .copy_from_user = copy_from_user_mvcos,
  219 + .copy_from_user_small = copy_from_user_mvcos,
  220 + .copy_to_user = copy_to_user_mvcos,
  221 + .copy_to_user_small = copy_to_user_mvcos,
  222 + .copy_in_user = copy_in_user_mvcos,
  223 + .clear_user = clear_user_mvcos,
  224 + .strnlen_user = strnlen_user_mvcos,
  225 + .strncpy_from_user = strncpy_from_user_mvcos,
  226 + .futex_atomic_op = futex_atomic_op_pt,
  227 + .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt,
  228 +};
  229 +#endif
arch/s390/lib/uaccess_pt.c
1 1 /*
2 2 * arch/s390/lib/uaccess_pt.c
3 3 *
4   - * User access functions based on page table walks.
  4 + * User access functions based on page table walks for enhanced
  5 + * system layout without hardware support.
5 6 *
6 7 * Copyright IBM Corp. 2006
7 8 * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
... ... @@ -134,6 +135,49 @@
134 135 goto retry;
135 136 }
136 137  
  138 +/*
  139 + * Do DAT for user address by page table walk, return kernel address.
  140 + * This function needs to be called with current->mm->page_table_lock held.
  141 + */
  142 +static inline unsigned long __dat_user_addr(unsigned long uaddr)
  143 +{
  144 + struct mm_struct *mm = current->mm;
  145 + unsigned long pfn, ret;
  146 + pgd_t *pgd;
  147 + pmd_t *pmd;
  148 + pte_t *pte;
  149 + int rc;
  150 +
  151 + ret = 0;
  152 +retry:
  153 + pgd = pgd_offset(mm, uaddr);
  154 + if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
  155 + goto fault;
  156 +
  157 + pmd = pmd_offset(pgd, uaddr);
  158 + if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
  159 + goto fault;
  160 +
  161 + pte = pte_offset_map(pmd, uaddr);
  162 + if (!pte || !pte_present(*pte))
  163 + goto fault;
  164 +
  165 + pfn = pte_pfn(*pte);
  166 + if (!pfn_valid(pfn))
  167 + goto out;
  168 +
  169 + ret = (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1));
  170 +out:
  171 + return ret;
  172 +fault:
  173 + spin_unlock(&mm->page_table_lock);
  174 + rc = __handle_fault(mm, uaddr, 0);
  175 + spin_lock(&mm->page_table_lock);
  176 + if (rc)
  177 + goto out;
  178 + goto retry;
  179 +}
  180 +
137 181 size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
138 182 {
139 183 size_t rc;
... ... @@ -156,4 +200,278 @@
156 200 }
157 201 return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
158 202 }
  203 +
  204 +static size_t clear_user_pt(size_t n, void __user *to)
  205 +{
  206 + long done, size, ret;
  207 +
  208 + if (segment_eq(get_fs(), KERNEL_DS)) {
  209 + memset((void __kernel __force *) to, 0, n);
  210 + return 0;
  211 + }
  212 + done = 0;
  213 + do {
  214 + if (n - done > PAGE_SIZE)
  215 + size = PAGE_SIZE;
  216 + else
  217 + size = n - done;
  218 + ret = __user_copy_pt((unsigned long) to + done,
  219 + &empty_zero_page, size, 1);
  220 + done += size;
  221 + if (ret)
  222 + return ret + n - done;
  223 + } while (done < n);
  224 + return 0;
  225 +}
  226 +
  227 +static size_t strnlen_user_pt(size_t count, const char __user *src)
  228 +{
  229 + char *addr;
  230 + unsigned long uaddr = (unsigned long) src;
  231 + struct mm_struct *mm = current->mm;
  232 + unsigned long offset, pfn, done, len;
  233 + pgd_t *pgd;
  234 + pmd_t *pmd;
  235 + pte_t *pte;
  236 + size_t len_str;
  237 +
  238 + if (segment_eq(get_fs(), KERNEL_DS))
  239 + return strnlen((const char __kernel __force *) src, count) + 1;
  240 + done = 0;
  241 +retry:
  242 + spin_lock(&mm->page_table_lock);
  243 + do {
  244 + pgd = pgd_offset(mm, uaddr);
  245 + if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
  246 + goto fault;
  247 +
  248 + pmd = pmd_offset(pgd, uaddr);
  249 + if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
  250 + goto fault;
  251 +
  252 + pte = pte_offset_map(pmd, uaddr);
  253 + if (!pte || !pte_present(*pte))
  254 + goto fault;
  255 +
  256 + pfn = pte_pfn(*pte);
  257 + if (!pfn_valid(pfn)) {
  258 + done = -1;
  259 + goto out;
  260 + }
  261 +
  262 + offset = uaddr & (PAGE_SIZE-1);
  263 + addr = (char *)(pfn << PAGE_SHIFT) + offset;
  264 + len = min(count - done, PAGE_SIZE - offset);
  265 + len_str = strnlen(addr, len);
  266 + done += len_str;
  267 + uaddr += len_str;
  268 + } while ((len_str == len) && (done < count));
  269 +out:
  270 + spin_unlock(&mm->page_table_lock);
  271 + return done + 1;
  272 +fault:
  273 + spin_unlock(&mm->page_table_lock);
  274 + if (__handle_fault(mm, uaddr, 0)) {
  275 + return 0;
  276 + }
  277 + goto retry;
  278 +}
  279 +
  280 +static size_t strncpy_from_user_pt(size_t count, const char __user *src,
  281 + char *dst)
  282 +{
  283 + size_t n = strnlen_user_pt(count, src);
  284 +
  285 + if (!n)
  286 + return -EFAULT;
  287 + if (n > count)
  288 + n = count;
  289 + if (segment_eq(get_fs(), KERNEL_DS)) {
  290 + memcpy(dst, (const char __kernel __force *) src, n);
  291 + if (dst[n-1] == '\0')
  292 + return n-1;
  293 + else
  294 + return n;
  295 + }
  296 + if (__user_copy_pt((unsigned long) src, dst, n, 0))
  297 + return -EFAULT;
  298 + if (dst[n-1] == '\0')
  299 + return n-1;
  300 + else
  301 + return n;
  302 +}
  303 +
  304 +static size_t copy_in_user_pt(size_t n, void __user *to,
  305 + const void __user *from)
  306 +{
  307 + struct mm_struct *mm = current->mm;
  308 + unsigned long offset_from, offset_to, offset_max, pfn_from, pfn_to,
  309 + uaddr, done, size;
  310 + unsigned long uaddr_from = (unsigned long) from;
  311 + unsigned long uaddr_to = (unsigned long) to;
  312 + pgd_t *pgd_from, *pgd_to;
  313 + pmd_t *pmd_from, *pmd_to;
  314 + pte_t *pte_from, *pte_to;
  315 + int write_user;
  316 +
  317 + done = 0;
  318 +retry:
  319 + spin_lock(&mm->page_table_lock);
  320 + do {
  321 + pgd_from = pgd_offset(mm, uaddr_from);
  322 + if (pgd_none(*pgd_from) || unlikely(pgd_bad(*pgd_from))) {
  323 + uaddr = uaddr_from;
  324 + write_user = 0;
  325 + goto fault;
  326 + }
  327 + pgd_to = pgd_offset(mm, uaddr_to);
  328 + if (pgd_none(*pgd_to) || unlikely(pgd_bad(*pgd_to))) {
  329 + uaddr = uaddr_to;
  330 + write_user = 1;
  331 + goto fault;
  332 + }
  333 +
  334 + pmd_from = pmd_offset(pgd_from, uaddr_from);
  335 + if (pmd_none(*pmd_from) || unlikely(pmd_bad(*pmd_from))) {
  336 + uaddr = uaddr_from;
  337 + write_user = 0;
  338 + goto fault;
  339 + }
  340 + pmd_to = pmd_offset(pgd_to, uaddr_to);
  341 + if (pmd_none(*pmd_to) || unlikely(pmd_bad(*pmd_to))) {
  342 + uaddr = uaddr_to;
  343 + write_user = 1;
  344 + goto fault;
  345 + }
  346 +
  347 + pte_from = pte_offset_map(pmd_from, uaddr_from);
  348 + if (!pte_from || !pte_present(*pte_from)) {
  349 + uaddr = uaddr_from;
  350 + write_user = 0;
  351 + goto fault;
  352 + }
  353 + pte_to = pte_offset_map(pmd_to, uaddr_to);
  354 + if (!pte_to || !pte_present(*pte_to) || !pte_write(*pte_to)) {
  355 + uaddr = uaddr_to;
  356 + write_user = 1;
  357 + goto fault;
  358 + }
  359 +
  360 + pfn_from = pte_pfn(*pte_from);
  361 + if (!pfn_valid(pfn_from))
  362 + goto out;
  363 + pfn_to = pte_pfn(*pte_to);
  364 + if (!pfn_valid(pfn_to))
  365 + goto out;
  366 +
  367 + offset_from = uaddr_from & (PAGE_SIZE-1);
  368 + offset_to = uaddr_from & (PAGE_SIZE-1);
  369 + offset_max = max(offset_from, offset_to);
  370 + size = min(n - done, PAGE_SIZE - offset_max);
  371 +
  372 + memcpy((void *)(pfn_to << PAGE_SHIFT) + offset_to,
  373 + (void *)(pfn_from << PAGE_SHIFT) + offset_from, size);
  374 + done += size;
  375 + uaddr_from += size;
  376 + uaddr_to += size;
  377 + } while (done < n);
  378 +out:
  379 + spin_unlock(&mm->page_table_lock);
  380 + return n - done;
  381 +fault:
  382 + spin_unlock(&mm->page_table_lock);
  383 + if (__handle_fault(mm, uaddr, write_user))
  384 + return n - done;
  385 + goto retry;
  386 +}
  387 +
  388 +#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
  389 + asm volatile("0: l %1,0(%6)\n" \
  390 + "1: " insn \
  391 + "2: cs %1,%2,0(%6)\n" \
  392 + "3: jl 1b\n" \
  393 + " lhi %0,0\n" \
  394 + "4:\n" \
  395 + EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
  396 + : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
  397 + "=m" (*uaddr) \
  398 + : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
  399 + "m" (*uaddr) : "cc" );
  400 +
  401 +int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
  402 +{
  403 + int oldval = 0, newval, ret;
  404 +
  405 + spin_lock(&current->mm->page_table_lock);
  406 + uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
  407 + if (!uaddr) {
  408 + spin_unlock(&current->mm->page_table_lock);
  409 + return -EFAULT;
  410 + }
  411 + get_page(virt_to_page(uaddr));
  412 + spin_unlock(&current->mm->page_table_lock);
  413 + switch (op) {
  414 + case FUTEX_OP_SET:
  415 + __futex_atomic_op("lr %2,%5\n",
  416 + ret, oldval, newval, uaddr, oparg);
  417 + break;
  418 + case FUTEX_OP_ADD:
  419 + __futex_atomic_op("lr %2,%1\nar %2,%5\n",
  420 + ret, oldval, newval, uaddr, oparg);
  421 + break;
  422 + case FUTEX_OP_OR:
  423 + __futex_atomic_op("lr %2,%1\nor %2,%5\n",
  424 + ret, oldval, newval, uaddr, oparg);
  425 + break;
  426 + case FUTEX_OP_ANDN:
  427 + __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
  428 + ret, oldval, newval, uaddr, oparg);
  429 + break;
  430 + case FUTEX_OP_XOR:
  431 + __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
  432 + ret, oldval, newval, uaddr, oparg);
  433 + break;
  434 + default:
  435 + ret = -ENOSYS;
  436 + }
  437 + put_page(virt_to_page(uaddr));
  438 + *old = oldval;
  439 + return ret;
  440 +}
  441 +
  442 +int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
  443 +{
  444 + int ret;
  445 +
  446 + spin_lock(&current->mm->page_table_lock);
  447 + uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
  448 + if (!uaddr) {
  449 + spin_unlock(&current->mm->page_table_lock);
  450 + return -EFAULT;
  451 + }
  452 + get_page(virt_to_page(uaddr));
  453 + spin_unlock(&current->mm->page_table_lock);
  454 + asm volatile(" cs %1,%4,0(%5)\n"
  455 + "0: lr %0,%1\n"
  456 + "1:\n"
  457 + EX_TABLE(0b,1b)
  458 + : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
  459 + : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
  460 + : "cc", "memory" );
  461 + put_page(virt_to_page(uaddr));
  462 + return ret;
  463 +}
  464 +
  465 +struct uaccess_ops uaccess_pt = {
  466 + .copy_from_user = copy_from_user_pt,
  467 + .copy_from_user_small = copy_from_user_pt,
  468 + .copy_to_user = copy_to_user_pt,
  469 + .copy_to_user_small = copy_to_user_pt,
  470 + .copy_in_user = copy_in_user_pt,
  471 + .clear_user = clear_user_pt,
  472 + .strnlen_user = strnlen_user_pt,
  473 + .strncpy_from_user = strncpy_from_user_pt,
  474 + .futex_atomic_op = futex_atomic_op_pt,
  475 + .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt,
  476 +};
arch/s390/mm/fault.c
... ... @@ -137,7 +137,9 @@
137 137  
138 138 /*
139 139 * Check which address space the address belongs to.
140   - * Returns 1 for user space and 0 for kernel space.
  140 + * May return 1 or 2 for user space and 0 for kernel space.
  141 + * Returns 2 for user space in primary addressing mode with
  142 + * CONFIG_S390_EXEC_PROTECT on and kernel parameter noexec=on.
141 143 */
142 144 static inline int check_user_space(struct pt_regs *regs, int error_code)
143 145 {
... ... @@ -154,7 +156,7 @@
154 156 return __check_access_register(regs, error_code);
155 157 if (descriptor == 2)
156 158 return current->thread.mm_segment.ar4;
157   - return descriptor != 0;
  159 + return ((descriptor != 0) ^ (switch_amode)) << s390_noexec;
158 160 }
159 161  
160 162 /*
... ... @@ -183,6 +185,77 @@
183 185 force_sig_info(SIGSEGV, &si, current);
184 186 }
185 187  
  188 +#ifdef CONFIG_S390_EXEC_PROTECT
  189 +extern long sys_sigreturn(struct pt_regs *regs);
  190 +extern long sys_rt_sigreturn(struct pt_regs *regs);
  191 +extern long sys32_sigreturn(struct pt_regs *regs);
  192 +extern long sys32_rt_sigreturn(struct pt_regs *regs);
  193 +
  194 +static inline void do_sigreturn(struct mm_struct *mm, struct pt_regs *regs,
  195 + int rt)
  196 +{
  197 + up_read(&mm->mmap_sem);
  198 + clear_tsk_thread_flag(current, TIF_SINGLE_STEP);
  199 +#ifdef CONFIG_COMPAT
  200 + if (test_tsk_thread_flag(current, TIF_31BIT)) {
  201 + if (rt)
  202 + sys32_rt_sigreturn(regs);
  203 + else
  204 + sys32_sigreturn(regs);
  205 + return;
  206 + }
  207 +#endif /* CONFIG_COMPAT */
  208 + if (rt)
  209 + sys_rt_sigreturn(regs);
  210 + else
  211 + sys_sigreturn(regs);
  212 + return;
  213 +}
  214 +
  215 +static int signal_return(struct mm_struct *mm, struct pt_regs *regs,
  216 + unsigned long address, unsigned long error_code)
  217 +{
  218 + pgd_t *pgd;
  219 + pmd_t *pmd;
  220 + pte_t *pte;
  221 + u16 *instruction;
  222 + unsigned long pfn, uaddr = regs->psw.addr;
  223 +
  224 + spin_lock(&mm->page_table_lock);
  225 + pgd = pgd_offset(mm, uaddr);
  226 + if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
  227 + goto out_fault;
  228 + pmd = pmd_offset(pgd, uaddr);
  229 + if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
  230 + goto out_fault;
  231 + pte = pte_offset_map(pmd_offset(pgd_offset(mm, uaddr), uaddr), uaddr);
  232 + if (!pte || !pte_present(*pte))
  233 + goto out_fault;
  234 + pfn = pte_pfn(*pte);
  235 + if (!pfn_valid(pfn))
  236 + goto out_fault;
  237 + spin_unlock(&mm->page_table_lock);
  238 +
  239 + instruction = (u16 *) ((pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE-1)));
  240 + if (*instruction == 0x0a77)
  241 + do_sigreturn(mm, regs, 0);
  242 + else if (*instruction == 0x0aad)
  243 + do_sigreturn(mm, regs, 1);
  244 + else {
  245 + printk("- XXX - do_exception: task = %s, primary, NO EXEC "
  246 + "-> SIGSEGV\n", current->comm);
  247 + up_read(&mm->mmap_sem);
  248 + current->thread.prot_addr = address;
  249 + current->thread.trap_no = error_code;
  250 + do_sigsegv(regs, error_code, SEGV_MAPERR, address);
  251 + }
  252 + return 0;
  253 +out_fault:
  254 + spin_unlock(&mm->page_table_lock);
  255 + return -EFAULT;
  256 +}
  257 +#endif /* CONFIG_S390_EXEC_PROTECT */
  258 +
186 259 /*
187 260 * This routine handles page faults. It determines the address,
188 261 * and the problem, and then passes it off to one of the appropriate
... ... @@ -260,6 +333,17 @@
260 333 vma = find_vma(mm, address);
261 334 if (!vma)
262 335 goto bad_area;
  336 +
  337 +#ifdef CONFIG_S390_EXEC_PROTECT
  338 + if (unlikely((user_address == 2) && !(vma->vm_flags & VM_EXEC)))
  339 + if (!signal_return(mm, regs, address, error_code))
  340 + /*
  341 + * signal_return() has done an up_read(&mm->mmap_sem)
  342 + * if it returns 0.
  343 + */
  344 + return;
  345 +#endif
  346 +
263 347 if (vma->vm_start <= address)
264 348 goto good_area;
265 349 if (!(vma->vm_flags & VM_GROWSDOWN))
... ... @@ -104,7 +104,7 @@
104 104 pmd = pmd_offset(pgd, address);
105 105 pte = pte_offset_kernel(pmd, address);
106 106 new_pte = mk_pte_phys(address, __pgprot(_PAGE_RO));
107   - set_pte(pte, new_pte);
  107 + *pte = new_pte;
108 108 }
109 109 }
110 110  
111 111  
... ... @@ -124,11 +124,11 @@
124 124 #ifdef CONFIG_64BIT
125 125 pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERN_REGION_TABLE;
126 126 for (i = 0; i < PTRS_PER_PGD; i++)
127   - pgd_clear(pg_dir + i);
  127 + pgd_clear_kernel(pg_dir + i);
128 128 #else
129 129 pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
130 130 for (i = 0; i < PTRS_PER_PGD; i++)
131   - pmd_clear((pmd_t *)(pg_dir + i));
  131 + pmd_clear_kernel((pmd_t *)(pg_dir + i));
132 132 #endif
133 133 vmem_map_init();
134 134 setup_ro_region();
... ... @@ -82,7 +82,7 @@
82 82 if (!pmd)
83 83 return NULL;
84 84 for (i = 0; i < PTRS_PER_PMD; i++)
85   - pmd_clear(pmd + i);
  85 + pmd_clear_kernel(pmd + i);
86 86 return pmd;
87 87 }
88 88  
... ... @@ -97,7 +97,7 @@
97 97 return NULL;
98 98 pte_val(empty_pte) = _PAGE_TYPE_EMPTY;
99 99 for (i = 0; i < PTRS_PER_PTE; i++)
100   - set_pte(pte + i, empty_pte);
  100 + pte[i] = empty_pte;
101 101 return pte;
102 102 }
103 103  
... ... @@ -119,7 +119,7 @@
119 119 pm_dir = vmem_pmd_alloc();
120 120 if (!pm_dir)
121 121 goto out;
122   - pgd_populate(&init_mm, pg_dir, pm_dir);
  122 + pgd_populate_kernel(&init_mm, pg_dir, pm_dir);
123 123 }
124 124  
125 125 pm_dir = pmd_offset(pg_dir, address);
... ... @@ -132,7 +132,7 @@
132 132  
133 133 pt_dir = pte_offset_kernel(pm_dir, address);
134 134 pte = pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL);
135   - set_pte(pt_dir, pte);
  135 + *pt_dir = pte;
136 136 }
137 137 ret = 0;
138 138 out:
... ... @@ -161,7 +161,7 @@
161 161 if (pmd_none(*pm_dir))
162 162 continue;
163 163 pt_dir = pte_offset_kernel(pm_dir, address);
164   - set_pte(pt_dir, pte);
  164 + *pt_dir = pte;
165 165 }
166 166 flush_tlb_kernel_range(start, start + size);
167 167 }
... ... @@ -191,7 +191,7 @@
191 191 pm_dir = vmem_pmd_alloc();
192 192 if (!pm_dir)
193 193 goto out;
194   - pgd_populate(&init_mm, pg_dir, pm_dir);
  194 + pgd_populate_kernel(&init_mm, pg_dir, pm_dir);
195 195 }
196 196  
197 197 pm_dir = pmd_offset(pg_dir, address);
... ... @@ -210,7 +210,7 @@
210 210 if (!new_page)
211 211 goto out;
212 212 pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
213   - set_pte(pt_dir, pte);
  213 + *pt_dir = pte;
214 214 }
215 215 }
216 216 ret = 0;
include/asm-s390/compat.h
... ... @@ -6,6 +6,34 @@
6 6 #include <linux/types.h>
7 7 #include <linux/sched.h>
8 8  
  9 +#define PSW32_MASK_PER 0x40000000UL
  10 +#define PSW32_MASK_DAT 0x04000000UL
  11 +#define PSW32_MASK_IO 0x02000000UL
  12 +#define PSW32_MASK_EXT 0x01000000UL
  13 +#define PSW32_MASK_KEY 0x00F00000UL
  14 +#define PSW32_MASK_MCHECK 0x00040000UL
  15 +#define PSW32_MASK_WAIT 0x00020000UL
  16 +#define PSW32_MASK_PSTATE 0x00010000UL
  17 +#define PSW32_MASK_ASC 0x0000C000UL
  18 +#define PSW32_MASK_CC 0x00003000UL
  19 +#define PSW32_MASK_PM 0x00000f00UL
  20 +
  21 +#define PSW32_ADDR_AMODE31 0x80000000UL
  22 +#define PSW32_ADDR_INSN 0x7FFFFFFFUL
  23 +
  24 +#define PSW32_BASE_BITS 0x00080000UL
  25 +
  26 +#define PSW32_ASC_PRIMARY 0x00000000UL
  27 +#define PSW32_ASC_ACCREG 0x00004000UL
  28 +#define PSW32_ASC_SECONDARY 0x00008000UL
  29 +#define PSW32_ASC_HOME 0x0000C000UL
  30 +
  31 +#define PSW32_MASK_MERGE(CURRENT,NEW) \
  32 + (((CURRENT) & ~(PSW32_MASK_CC|PSW32_MASK_PM)) | \
  33 + ((NEW) & (PSW32_MASK_CC|PSW32_MASK_PM)))
  34 +
  35 +extern long psw32_user_bits;
  36 +
9 37 #define COMPAT_USER_HZ 100
10 38  
11 39 typedef u32 compat_size_t;
include/asm-s390/lowcore.h
... ... @@ -220,7 +220,8 @@
220 220 __u32 kernel_asce; /* 0xc4c */
221 221 __u32 user_asce; /* 0xc50 */
222 222 __u32 panic_stack; /* 0xc54 */
223   - __u8 pad10[0xc60-0xc58]; /* 0xc58 */
  223 + __u32 user_exec_asce; /* 0xc58 */
  224 + __u8 pad10[0xc60-0xc5c]; /* 0xc5c */
224 225 /* entry.S sensitive area start */
225 226 struct cpuinfo_S390 cpu_data; /* 0xc60 */
226 227 __u32 ipl_device; /* 0xc7c */
... ... @@ -310,7 +311,8 @@
310 311 __u64 kernel_asce; /* 0xd58 */
311 312 __u64 user_asce; /* 0xd60 */
312 313 __u64 panic_stack; /* 0xd68 */
313   - __u8 pad10[0xd80-0xd70]; /* 0xd70 */
  314 + __u64 user_exec_asce; /* 0xd70 */
  315 + __u8 pad10[0xd80-0xd78]; /* 0xd78 */
314 316 /* entry.S sensitive area start */
315 317 struct cpuinfo_S390 cpu_data; /* 0xd80 */
316 318 __u32 ipl_device; /* 0xdb8 */
include/asm-s390/mmu_context.h
... ... @@ -9,6 +9,7 @@
9 9 #ifndef __S390_MMU_CONTEXT_H
10 10 #define __S390_MMU_CONTEXT_H
11 11  
  12 +#include <asm/pgalloc.h>
12 13 /*
13 14 * get a new mmu context.. S390 don't know about contexts.
14 15 */
15 16  
16 17  
... ... @@ -16,29 +17,44 @@
16 17  
17 18 #define destroy_context(mm) do { } while (0)
18 19  
  20 +#ifndef __s390x__
  21 +#define LCTL_OPCODE "lctl"
  22 +#define PGTABLE_BITS (_SEGMENT_TABLE|USER_STD_MASK)
  23 +#else
  24 +#define LCTL_OPCODE "lctlg"
  25 +#define PGTABLE_BITS (_REGION_TABLE|USER_STD_MASK)
  26 +#endif
  27 +
19 28 static inline void enter_lazy_tlb(struct mm_struct *mm,
20 29 struct task_struct *tsk)
21 30 {
22 31 }
23 32  
24 33 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
25   - struct task_struct *tsk)
  34 + struct task_struct *tsk)
26 35 {
27   - if (prev != next) {
28   -#ifndef __s390x__
29   - S390_lowcore.user_asce = (__pa(next->pgd)&PAGE_MASK) |
30   - (_SEGMENT_TABLE|USER_STD_MASK);
31   - /* Load home space page table origin. */
32   - asm volatile("lctl 13,13,%0"
33   - : : "m" (S390_lowcore.user_asce) );
34   -#else /* __s390x__ */
35   - S390_lowcore.user_asce = (__pa(next->pgd) & PAGE_MASK) |
36   - (_REGION_TABLE|USER_STD_MASK);
37   - /* Load home space page table origin. */
38   - asm volatile("lctlg 13,13,%0"
39   - : : "m" (S390_lowcore.user_asce) );
40   -#endif /* __s390x__ */
41   - }
  36 + pgd_t *shadow_pgd = get_shadow_pgd(next->pgd);
  37 +
  38 + if (prev != next) {
  39 + S390_lowcore.user_asce = (__pa(next->pgd) & PAGE_MASK) |
  40 + PGTABLE_BITS;
  41 + if (shadow_pgd) {
  42 + /* Load primary/secondary space page table origin. */
  43 + S390_lowcore.user_exec_asce =
  44 + (__pa(shadow_pgd) & PAGE_MASK) | PGTABLE_BITS;
  45 + asm volatile(LCTL_OPCODE" 1,1,%0\n"
  46 + LCTL_OPCODE" 7,7,%1"
  47 + : : "m" (S390_lowcore.user_exec_asce),
  48 + "m" (S390_lowcore.user_asce) );
  49 + } else if (switch_amode) {
  50 + /* Load primary space page table origin. */
  51 + asm volatile(LCTL_OPCODE" 1,1,%0"
  52 + : : "m" (S390_lowcore.user_asce) );
  53 + } else
  54 + /* Load home space page table origin. */
  55 + asm volatile(LCTL_OPCODE" 13,13,%0"
  56 + : : "m" (S390_lowcore.user_asce) );
  57 + }
42 58 cpu_set(smp_processor_id(), next->cpu_vm_mask);
43 59 }
44 60  
... ... @@ -51,5 +67,5 @@
51 67 set_fs(current->thread.mm_segment);
52 68 }
53 69  
54   -#endif
  70 +#endif /* __S390_MMU_CONTEXT_H */
include/asm-s390/pgalloc.h
... ... @@ -47,6 +47,17 @@
47 47  
48 48 if (!pgd)
49 49 return NULL;
  50 + if (s390_noexec) {
  51 + pgd_t *shadow_pgd = (pgd_t *)
  52 + __get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER);
  53 + struct page *page = virt_to_page(pgd);
  54 +
  55 + if (!shadow_pgd) {
  56 + free_pages((unsigned long) pgd, PGD_ALLOC_ORDER);
  57 + return NULL;
  58 + }
  59 + page->lru.next = (void *) shadow_pgd;
  60 + }
50 61 for (i = 0; i < PTRS_PER_PGD; i++)
51 62 #ifndef __s390x__
52 63 pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE));
... ... @@ -58,6 +69,10 @@
58 69  
59 70 static inline void pgd_free(pgd_t *pgd)
60 71 {
  72 + pgd_t *shadow_pgd = get_shadow_pgd(pgd);
  73 +
  74 + if (shadow_pgd)
  75 + free_pages((unsigned long) shadow_pgd, PGD_ALLOC_ORDER);
61 76 free_pages((unsigned long) pgd, PGD_ALLOC_ORDER);
62 77 }
63 78  
... ... @@ -71,6 +86,7 @@
71 86 #define pmd_free(x) do { } while (0)
72 87 #define __pmd_free_tlb(tlb,x) do { } while (0)
73 88 #define pgd_populate(mm, pmd, pte) BUG()
  89 +#define pgd_populate_kernel(mm, pmd, pte) BUG()
74 90 #else /* __s390x__ */
75 91 static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
76 92 {
... ... @@ -79,6 +95,17 @@
79 95  
80 96 if (!pmd)
81 97 return NULL;
  98 + if (s390_noexec) {
  99 + pmd_t *shadow_pmd = (pmd_t *)
  100 + __get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER);
  101 + struct page *page = virt_to_page(pmd);
  102 +
  103 + if (!shadow_pmd) {
  104 + free_pages((unsigned long) pmd, PMD_ALLOC_ORDER);
  105 + return NULL;
  106 + }
  107 + page->lru.next = (void *) shadow_pmd;
  108 + }
82 109 for (i=0; i < PTRS_PER_PMD; i++)
83 110 pmd_clear(pmd + i);
84 111 return pmd;
... ... @@ -86,6 +113,10 @@
86 113  
87 114 static inline void pmd_free (pmd_t *pmd)
88 115 {
  116 + pmd_t *shadow_pmd = get_shadow_pmd(pmd);
  117 +
  118 + if (shadow_pmd)
  119 + free_pages((unsigned long) shadow_pmd, PMD_ALLOC_ORDER);
89 120 free_pages((unsigned long) pmd, PMD_ALLOC_ORDER);
90 121 }
91 122  
92 123  
... ... @@ -95,11 +126,22 @@
95 126 pmd_free(pmd); \
96 127 } while (0)
97 128  
98   -static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
  129 +static inline void
  130 +pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
99 131 {
100 132 pgd_val(*pgd) = _PGD_ENTRY | __pa(pmd);
101 133 }
102 134  
  135 +static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
  136 +{
  137 + pgd_t *shadow_pgd = get_shadow_pgd(pgd);
  138 + pmd_t *shadow_pmd = get_shadow_pmd(pmd);
  139 +
  140 + if (shadow_pgd && shadow_pmd)
  141 + pgd_populate_kernel(mm, shadow_pgd, shadow_pmd);
  142 + pgd_populate_kernel(mm, pgd, pmd);
  143 +}
  144 +
103 145 #endif /* __s390x__ */
104 146  
105 147 static inline void
... ... @@ -119,7 +161,13 @@
119 161 static inline void
120 162 pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
121 163 {
122   - pmd_populate_kernel(mm, pmd, (pte_t *)page_to_phys(page));
  164 + pte_t *pte = (pte_t *)page_to_phys(page);
  165 + pmd_t *shadow_pmd = get_shadow_pmd(pmd);
  166 + pte_t *shadow_pte = get_shadow_pte(pte);
  167 +
  168 + pmd_populate_kernel(mm, pmd, pte);
  169 + if (shadow_pmd && shadow_pte)
  170 + pmd_populate_kernel(mm, shadow_pmd, shadow_pte);
123 171 }
124 172  
125 173 /*
... ... @@ -133,6 +181,17 @@
133 181  
134 182 if (!pte)
135 183 return NULL;
  184 + if (s390_noexec) {
  185 + pte_t *shadow_pte = (pte_t *)
  186 + __get_free_page(GFP_KERNEL|__GFP_REPEAT);
  187 + struct page *page = virt_to_page(pte);
  188 +
  189 + if (!shadow_pte) {
  190 + free_page((unsigned long) pte);
  191 + return NULL;
  192 + }
  193 + page->lru.next = (void *) shadow_pte;
  194 + }
136 195 for (i=0; i < PTRS_PER_PTE; i++) {
137 196 pte_clear(mm, vmaddr, pte + i);
138 197 vmaddr += PAGE_SIZE;
139 198  
140 199  
... ... @@ -151,15 +210,31 @@
151 210  
152 211 static inline void pte_free_kernel(pte_t *pte)
153 212 {
154   - free_page((unsigned long) pte);
  213 + pte_t *shadow_pte = get_shadow_pte(pte);
  214 +
  215 + if (shadow_pte)
  216 + free_page((unsigned long) shadow_pte);
  217 + free_page((unsigned long) pte);
155 218 }
156 219  
157 220 static inline void pte_free(struct page *pte)
158 221 {
159   - __free_page(pte);
  222 + struct page *shadow_page = get_shadow_page(pte);
  223 +
  224 + if (shadow_page)
  225 + __free_page(shadow_page);
  226 + __free_page(pte);
160 227 }
161 228  
162   -#define __pte_free_tlb(tlb,pte) tlb_remove_page(tlb,pte)
  229 +#define __pte_free_tlb(tlb, pte) \
  230 +({ \
  231 + struct mmu_gather *__tlb = (tlb); \
  232 + struct page *__pte = (pte); \
  233 + struct page *shadow_page = get_shadow_page(__pte); \
  234 + if (shadow_page) \
  235 + tlb_remove_page(__tlb, shadow_page); \
  236 + tlb_remove_page(__tlb, __pte); \
  237 +})
163 238  
164 239 #endif /* _S390_PGALLOC_H */
include/asm-s390/pgtable.h
... ... @@ -224,6 +224,8 @@
224 224 #define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */
225 225 #define _PAGE_TYPE_RO 0x200
226 226 #define _PAGE_TYPE_RW 0x000
  227 +#define _PAGE_TYPE_EX_RO 0x202
  228 +#define _PAGE_TYPE_EX_RW 0x002
227 229  
228 230 /*
229 231 * PTE type bits are rather complicated. handle_pte_fault uses pte_present,
230 232  
231 233  
... ... @@ -244,11 +246,13 @@
244 246 * _PAGE_TYPE_FILE 11?1 -> 11?1
245 247 * _PAGE_TYPE_RO 0100 -> 1100
246 248 * _PAGE_TYPE_RW 0000 -> 1000
  249 + * _PAGE_TYPE_EX_RO 0110 -> 1110
  250 + * _PAGE_TYPE_EX_RW 0010 -> 1010
247 251 *
248   - * pte_none is true for bits combinations 1000, 1100
  252 + * pte_none is true for bits combinations 1000, 1010, 1100, 1110
249 253 * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001
250 254 * pte_file is true for bits combinations 1101, 1111
251   - * swap pte is 1011 and 0001, 0011, 0101, 0111, 1010 and 1110 are invalid.
  255 + * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
252 256 */
253 257  
254 258 #ifndef __s390x__
255 259  
256 260  
257 261  
258 262  
... ... @@ -313,34 +317,101 @@
313 317 #define PAGE_NONE __pgprot(_PAGE_TYPE_NONE)
314 318 #define PAGE_RO __pgprot(_PAGE_TYPE_RO)
315 319 #define PAGE_RW __pgprot(_PAGE_TYPE_RW)
  320 +#define PAGE_EX_RO __pgprot(_PAGE_TYPE_EX_RO)
  321 +#define PAGE_EX_RW __pgprot(_PAGE_TYPE_EX_RW)
316 322  
317 323 #define PAGE_KERNEL PAGE_RW
318 324 #define PAGE_COPY PAGE_RO
319 325  
320 326 /*
321   - * The S390 can't do page protection for execute, and considers that the
322   - * same are read. Also, write permissions imply read permissions. This is
323   - * the closest we can get..
  327 + * Dependent on the EXEC_PROTECT option s390 can do execute protection.
  328 + * Write permission always implies read permission. In theory with a
  329 + * primary/secondary page table execute only can be implemented but
  330 + * it would cost an additional bit in the pte to distinguish all the
  331 + * different pte types. To avoid that execute permission currently
  332 + * implies read permission as well.
324 333 */
325 334 /*xwr*/
326 335 #define __P000 PAGE_NONE
327 336 #define __P001 PAGE_RO
328 337 #define __P010 PAGE_RO
329 338 #define __P011 PAGE_RO
330   -#define __P100 PAGE_RO
331   -#define __P101 PAGE_RO
332   -#define __P110 PAGE_RO
333   -#define __P111 PAGE_RO
  339 +#define __P100 PAGE_EX_RO
  340 +#define __P101 PAGE_EX_RO
  341 +#define __P110 PAGE_EX_RO
  342 +#define __P111 PAGE_EX_RO
334 343  
335 344 #define __S000 PAGE_NONE
336 345 #define __S001 PAGE_RO
337 346 #define __S010 PAGE_RW
338 347 #define __S011 PAGE_RW
339   -#define __S100 PAGE_RO
340   -#define __S101 PAGE_RO
341   -#define __S110 PAGE_RW
342   -#define __S111 PAGE_RW
  348 +#define __S100 PAGE_EX_RO
  349 +#define __S101 PAGE_EX_RO
  350 +#define __S110 PAGE_EX_RW
  351 +#define __S111 PAGE_EX_RW
343 352  
  353 +#ifndef __s390x__
  354 +# define PMD_SHADOW_SHIFT 1
  355 +# define PGD_SHADOW_SHIFT 1
  356 +#else /* __s390x__ */
  357 +# define PMD_SHADOW_SHIFT 2
  358 +# define PGD_SHADOW_SHIFT 2
  359 +#endif /* __s390x__ */
  360 +
  361 +static inline struct page *get_shadow_page(struct page *page)
  362 +{
  363 + if (s390_noexec && !list_empty(&page->lru))
  364 + return virt_to_page(page->lru.next);
  365 + return NULL;
  366 +}
  367 +
  368 +static inline pte_t *get_shadow_pte(pte_t *ptep)
  369 +{
  370 + unsigned long pteptr = (unsigned long) (ptep);
  371 +
  372 + if (s390_noexec) {
  373 + unsigned long offset = pteptr & (PAGE_SIZE - 1);
  374 + void *addr = (void *) (pteptr ^ offset);
  375 + struct page *page = virt_to_page(addr);
  376 + if (!list_empty(&page->lru))
  377 + return (pte_t *) ((unsigned long) page->lru.next |
  378 + offset);
  379 + }
  380 + return NULL;
  381 +}
  382 +
  383 +static inline pmd_t *get_shadow_pmd(pmd_t *pmdp)
  384 +{
  385 + unsigned long pmdptr = (unsigned long) (pmdp);
  386 +
  387 + if (s390_noexec) {
  388 + unsigned long offset = pmdptr &
  389 + ((PAGE_SIZE << PMD_SHADOW_SHIFT) - 1);
  390 + void *addr = (void *) (pmdptr ^ offset);
  391 + struct page *page = virt_to_page(addr);
  392 + if (!list_empty(&page->lru))
  393 + return (pmd_t *) ((unsigned long) page->lru.next |
  394 + offset);
  395 + }
  396 + return NULL;
  397 +}
  398 +
  399 +static inline pgd_t *get_shadow_pgd(pgd_t *pgdp)
  400 +{
  401 + unsigned long pgdptr = (unsigned long) (pgdp);
  402 +
  403 + if (s390_noexec) {
  404 + unsigned long offset = pgdptr &
  405 + ((PAGE_SIZE << PGD_SHADOW_SHIFT) - 1);
  406 + void *addr = (void *) (pgdptr ^ offset);
  407 + struct page *page = virt_to_page(addr);
  408 + if (!list_empty(&page->lru))
  409 + return (pgd_t *) ((unsigned long) page->lru.next |
  410 + offset);
  411 + }
  412 + return NULL;
  413 +}
  414 +
344 415 /*
345 416 * Certain architectures need to do special things when PTEs
346 417 * within a page table are directly modified. Thus, the following
347 418  
... ... @@ -348,7 +419,16 @@
348 419 */
349 420 static inline void set_pte(pte_t *pteptr, pte_t pteval)
350 421 {
  422 + pte_t *shadow_pte = get_shadow_pte(pteptr);
  423 +
351 424 *pteptr = pteval;
  425 + if (shadow_pte) {
  426 + if (!(pte_val(pteval) & _PAGE_INVALID) &&
  427 + (pte_val(pteval) & _PAGE_SWX))
  428 + pte_val(*shadow_pte) = pte_val(pteval) | _PAGE_RO;
  429 + else
  430 + pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY;
  431 + }
352 432 }
353 433 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
354 434  
... ... @@ -466,7 +546,7 @@
466 546  
467 547 static inline void pgd_clear(pgd_t * pgdp) { }
468 548  
469   -static inline void pmd_clear(pmd_t * pmdp)
  549 +static inline void pmd_clear_kernel(pmd_t * pmdp)
470 550 {
471 551 pmd_val(pmdp[0]) = _PAGE_TABLE_INV;
472 552 pmd_val(pmdp[1]) = _PAGE_TABLE_INV;
473 553  
474 554  
475 555  
476 556  
477 557  
478 558  
... ... @@ -474,24 +554,55 @@
474 554 pmd_val(pmdp[3]) = _PAGE_TABLE_INV;
475 555 }
476 556  
  557 +static inline void pmd_clear(pmd_t * pmdp)
  558 +{
  559 + pmd_t *shadow_pmd = get_shadow_pmd(pmdp);
  560 +
  561 + pmd_clear_kernel(pmdp);
  562 + if (shadow_pmd)
  563 + pmd_clear_kernel(shadow_pmd);
  564 +}
  565 +
477 566 #else /* __s390x__ */
478 567  
479   -static inline void pgd_clear(pgd_t * pgdp)
  568 +static inline void pgd_clear_kernel(pgd_t * pgdp)
480 569 {
481 570 pgd_val(*pgdp) = _PGD_ENTRY_INV | _PGD_ENTRY;
482 571 }
483 572  
484   -static inline void pmd_clear(pmd_t * pmdp)
  573 +static inline void pgd_clear(pgd_t * pgdp)
485 574 {
  575 + pgd_t *shadow_pgd = get_shadow_pgd(pgdp);
  576 +
  577 + pgd_clear_kernel(pgdp);
  578 + if (shadow_pgd)
  579 + pgd_clear_kernel(shadow_pgd);
  580 +}
  581 +
  582 +static inline void pmd_clear_kernel(pmd_t * pmdp)
  583 +{
486 584 pmd_val(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY;
487 585 pmd_val1(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY;
488 586 }
489 587  
  588 +static inline void pmd_clear(pmd_t * pmdp)
  589 +{
  590 + pmd_t *shadow_pmd = get_shadow_pmd(pmdp);
  591 +
  592 + pmd_clear_kernel(pmdp);
  593 + if (shadow_pmd)
  594 + pmd_clear_kernel(shadow_pmd);
  595 +}
  596 +
490 597 #endif /* __s390x__ */
491 598  
492 599 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
493 600 {
  601 + pte_t *shadow_pte = get_shadow_pte(ptep);
  602 +
494 603 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
  604 + if (shadow_pte)
  605 + pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY;
495 606 }
496 607  
497 608 /*
498 609  
... ... @@ -609,8 +720,11 @@
609 720 unsigned long address, pte_t *ptep)
610 721 {
611 722 pte_t pte = *ptep;
  723 + pte_t *shadow_pte = get_shadow_pte(ptep);
612 724  
613 725 __ptep_ipte(address, ptep);
  726 + if (shadow_pte)
  727 + __ptep_ipte(address, shadow_pte);
614 728 return pte;
615 729 }
616 730  
include/asm-s390/processor.h
... ... @@ -145,7 +145,7 @@
145 145  
146 146 #define start_thread(regs, new_psw, new_stackp) do { \
147 147 set_fs(USER_DS); \
148   - regs->psw.mask = PSW_USER_BITS; \
  148 + regs->psw.mask = psw_user_bits; \
149 149 regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
150 150 regs->gprs[15] = new_stackp ; \
151 151 } while (0)
152 152  
... ... @@ -154,14 +154,14 @@
154 154  
155 155 #define start_thread(regs, new_psw, new_stackp) do { \
156 156 set_fs(USER_DS); \
157   - regs->psw.mask = PSW_USER_BITS; \
  157 + regs->psw.mask = psw_user_bits; \
158 158 regs->psw.addr = new_psw; \
159 159 regs->gprs[15] = new_stackp; \
160 160 } while (0)
161 161  
162 162 #define start_thread31(regs, new_psw, new_stackp) do { \
163 163 set_fs(USER_DS); \
164   - regs->psw.mask = PSW_USER32_BITS; \
  164 + regs->psw.mask = psw_user32_bits; \
165 165 regs->psw.addr = new_psw; \
166 166 regs->gprs[15] = new_stackp; \
167 167 } while (0)
include/asm-s390/ptrace.h
... ... @@ -266,17 +266,12 @@
266 266 #define PSW_ASC_SECONDARY 0x0000800000000000UL
267 267 #define PSW_ASC_HOME 0x0000C00000000000UL
268 268  
269   -#define PSW_USER32_BITS (PSW_BASE32_BITS | PSW_MASK_DAT | PSW_ASC_HOME | \
270   - PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | \
271   - PSW_MASK_PSTATE | PSW_DEFAULT_KEY)
  269 +extern long psw_user32_bits;
272 270  
273 271 #endif /* __s390x__ */
274 272  
275   -#define PSW_KERNEL_BITS (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY | \
276   - PSW_MASK_MCHECK | PSW_DEFAULT_KEY)
277   -#define PSW_USER_BITS (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME | \
278   - PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | \
279   - PSW_MASK_PSTATE | PSW_DEFAULT_KEY)
  273 +extern long psw_kernel_bits;
  274 +extern long psw_user_bits;
280 275  
281 276 /* This macro merges a NEW PSW mask specified by the user into
282 277 the currently active PSW mask CURRENT, modifying only those
include/asm-s390/setup.h
... ... @@ -42,6 +42,18 @@
42 42  
43 43 extern struct mem_chunk memory_chunk[];
44 44  
  45 +#ifdef CONFIG_S390_SWITCH_AMODE
  46 +extern unsigned int switch_amode;
  47 +#else
  48 +#define switch_amode (0)
  49 +#endif
  50 +
  51 +#ifdef CONFIG_S390_EXEC_PROTECT
  52 +extern unsigned int s390_noexec;
  53 +#else
  54 +#define s390_noexec (0)
  55 +#endif
  56 +
45 57 /*
46 58 * Machine features detected in head.S
47 59 */
include/asm-s390/smp.h
... ... @@ -110,7 +110,7 @@
110 110 static inline void smp_send_stop(void)
111 111 {
112 112 /* Disable all interrupts/machine checks */
113   - __load_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK);
  113 + __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK);
114 114 }
115 115  
116 116 #define smp_cpu_not_running(cpu) 1
include/asm-s390/system.h
... ... @@ -373,8 +373,8 @@
373 373 __load_psw_mask(mask | (__raw_local_irq_stosm(0x00) & ~(-1UL >> 8)));
374 374 }
375 375  
376   -#define local_mcck_enable() __set_psw_mask(PSW_KERNEL_BITS)
377   -#define local_mcck_disable() __set_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK)
  376 +#define local_mcck_enable() __set_psw_mask(psw_kernel_bits)
  377 +#define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK)
378 378  
379 379 #ifdef CONFIG_SMP
380 380  
include/asm-s390/tlbflush.h
... ... @@ -3,6 +3,7 @@
3 3  
4 4 #include <linux/mm.h>
5 5 #include <asm/processor.h>
  6 +#include <asm/pgalloc.h>
6 7  
7 8 /*
8 9 * TLB flushing:
... ... @@ -102,6 +103,14 @@
102 103 if (unlikely(cpus_empty(mm->cpu_vm_mask)))
103 104 return;
104 105 if (MACHINE_HAS_IDTE) {
  106 + pgd_t *shadow_pgd = get_shadow_pgd(mm->pgd);
  107 +
  108 + if (shadow_pgd) {
  109 + asm volatile(
  110 + " .insn rrf,0xb98e0000,0,%0,%1,0"
  111 + : : "a" (2048),
  112 + "a" (__pa(shadow_pgd) & PAGE_MASK) : "cc" );
  113 + }
105 114 asm volatile(
106 115 " .insn rrf,0xb98e0000,0,%0,%1,0"
107 116 : : "a" (2048), "a" (__pa(mm->pgd)&PAGE_MASK) : "cc");
include/asm-s390/uaccess.h
... ... @@ -90,6 +90,8 @@
90 90 extern struct uaccess_ops uaccess;
91 91 extern struct uaccess_ops uaccess_std;
92 92 extern struct uaccess_ops uaccess_mvcos;
  93 +extern struct uaccess_ops uaccess_mvcos_switch;
  94 +extern struct uaccess_ops uaccess_pt;
93 95  
94 96 static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
95 97 {