Commit 19f9a34f87c48bbd270d617d1c986d0c23866a1a

Authored by Paul Mundt
1 parent 8c12b5dc13

sh: Initial vsyscall page support.

This implements initial support for the vsyscall page on SH.
At the moment we leave it configurable due to having nommu
to support from the same code base. We hook it up for the
signal trampoline return at present, with more to be added
later, once uClibc catches up.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>

Showing 19 changed files with 473 additions and 17 deletions Side-by-side Diff

arch/sh/kernel/Makefile
... ... @@ -9,6 +9,7 @@
9 9 io.o io_generic.o sh_ksyms.o syscalls.o
10 10  
11 11 obj-y += cpu/ timers/
  12 +obj-$(CONFIG_VSYSCALL) += vsyscall/
12 13  
13 14 obj-$(CONFIG_SMP) += smp.o
14 15 obj-$(CONFIG_CF_ENABLER) += cf-enabler.o
arch/sh/kernel/process.c
... ... @@ -355,7 +355,7 @@
355 355 else if (next->thread.ubc_pc && next->mm) {
356 356 int asid = 0;
357 357 #ifdef CONFIG_MMU
358   - asid |= next->mm->context & MMU_CONTEXT_ASID_MASK;
  358 + asid |= next->mm->context.id & MMU_CONTEXT_ASID_MASK;
359 359 #endif
360 360 ubc_set_tracing(asid, next->thread.ubc_pc);
361 361 } else {
arch/sh/kernel/signal.c
... ... @@ -8,7 +8,6 @@
8 8 * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
9 9 *
10 10 */
11   -
12 11 #include <linux/sched.h>
13 12 #include <linux/mm.h>
14 13 #include <linux/smp.h>
... ... @@ -21,6 +20,7 @@
21 20 #include <linux/unistd.h>
22 21 #include <linux/stddef.h>
23 22 #include <linux/tty.h>
  23 +#include <linux/elf.h>
24 24 #include <linux/personality.h>
25 25 #include <linux/binfmts.h>
26 26  
... ... @@ -29,8 +29,6 @@
29 29 #include <asm/pgtable.h>
30 30 #include <asm/cacheflush.h>
31 31  
32   -#undef DEBUG
33   -
34 32 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
35 33  
36 34 /*
... ... @@ -312,6 +310,11 @@
312 310 return (void __user *)((sp - frame_size) & -8ul);
313 311 }
314 312  
  313 +/* These symbols are defined with the addresses in the vsyscall page.
  314 + See vsyscall-trapa.S. */
  315 +extern void __user __kernel_sigreturn;
  316 +extern void __user __kernel_rt_sigreturn;
  317 +
315 318 static int setup_frame(int sig, struct k_sigaction *ka,
316 319 sigset_t *set, struct pt_regs *regs)
317 320 {
... ... @@ -340,6 +343,10 @@
340 343 already in userspace. */
341 344 if (ka->sa.sa_flags & SA_RESTORER) {
342 345 regs->pr = (unsigned long) ka->sa.sa_restorer;
  346 +#ifdef CONFIG_VSYSCALL
  347 + } else if (likely(current->mm->context.vdso)) {
  348 + regs->pr = VDSO_SYM(&__kernel_sigreturn);
  349 +#endif
343 350 } else {
344 351 /* Generate return code (system call to sigreturn) */
345 352 err |= __put_user(MOVW(7), &frame->retcode[0]);
... ... @@ -416,6 +423,10 @@
416 423 already in userspace. */
417 424 if (ka->sa.sa_flags & SA_RESTORER) {
418 425 regs->pr = (unsigned long) ka->sa.sa_restorer;
  426 +#ifdef CONFIG_VSYSCALL
  427 + } else if (likely(current->mm->context.vdso)) {
  428 + regs->pr = VDSO_SYM(&__kernel_rt_sigreturn);
  429 +#endif
419 430 } else {
420 431 /* Generate return code (system call to rt_sigreturn) */
421 432 err |= __put_user(MOVW(7), &frame->retcode[0]);
arch/sh/kernel/vsyscall/Makefile
  1 +obj-y += vsyscall.o vsyscall-syscall.o
  2 +
  3 +$(obj)/vsyscall-syscall.o: \
  4 + $(foreach F,trapa,$(obj)/vsyscall-$F.so)
  5 +
  6 +# Teach kbuild about targets
  7 +targets += $(foreach F,trapa,vsyscall-$F.o vsyscall-$F.so)
  8 +targets += vsyscall-note.o vsyscall.lds
  9 +
  10 +# The DSO images are built using a special linker script
  11 +quiet_cmd_syscall = SYSCALL $@
  12 + cmd_syscall = $(CC) -nostdlib $(SYSCFLAGS_$(@F)) \
  13 + -Wl,-T,$(filter-out FORCE,$^) -o $@
  14 +
  15 +export CPPFLAGS_vsyscall.lds += -P -C -Ush
  16 +
  17 +vsyscall-flags = -shared -s -Wl,-soname=linux-gate.so.1 \
  18 + $(call ld-option, -Wl$(comma)--hash-style=sysv)
  19 +
  20 +SYSCFLAGS_vsyscall-trapa.so = $(vsyscall-flags)
  21 +
  22 +$(obj)/vsyscall-trapa.so: \
  23 +$(obj)/vsyscall-%.so: $(src)/vsyscall.lds $(obj)/vsyscall-%.o FORCE
  24 + $(call if_changed,syscall)
  25 +
  26 +# We also create a special relocatable object that should mirror the symbol
  27 +# table and layout of the linked DSO. With ld -R we can then refer to
  28 +# these symbols in the kernel code rather than hand-coded addresses.
  29 +extra-y += vsyscall-syms.o
  30 +$(obj)/built-in.o: $(obj)/vsyscall-syms.o
  31 +$(obj)/built-in.o: ld_flags += -R $(obj)/vsyscall-syms.o
  32 +
  33 +SYSCFLAGS_vsyscall-syms.o = -r
  34 +$(obj)/vsyscall-syms.o: $(src)/vsyscall.lds \
  35 + $(obj)/vsyscall-trapa.o $(obj)/vsyscall-note.o FORCE
  36 + $(call if_changed,syscall)
arch/sh/kernel/vsyscall/vsyscall-note.S
  1 +/*
  2 + * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
  3 + * Here we can supply some information useful to userland.
  4 + */
  5 +
  6 +#include <linux/uts.h>
  7 +#include <linux/version.h>
  8 +
  9 +#define ASM_ELF_NOTE_BEGIN(name, flags, vendor, type) \
  10 + .section name, flags; \
  11 + .balign 4; \
  12 + .long 1f - 0f; /* name length */ \
  13 + .long 3f - 2f; /* data length */ \
  14 + .long type; /* note type */ \
  15 +0: .asciz vendor; /* vendor name */ \
  16 +1: .balign 4; \
  17 +2:
  18 +
  19 +#define ASM_ELF_NOTE_END \
  20 +3: .balign 4; /* pad out section */ \
  21 + .previous
  22 +
  23 + ASM_ELF_NOTE_BEGIN(".note.kernel-version", "a", UTS_SYSNAME, 0)
  24 + .long LINUX_VERSION_CODE
  25 + ASM_ELF_NOTE_END
arch/sh/kernel/vsyscall/vsyscall-sigreturn.S
  1 +#include <asm/unistd.h>
  2 +
  3 + .text
  4 + .balign 32
  5 + .globl __kernel_sigreturn
  6 + .type __kernel_sigreturn,@function
  7 +__kernel_sigreturn:
  8 +.LSTART_sigreturn:
  9 + mov.w 1f, r3
  10 + trapa #0x10
  11 + or r0, r0
  12 + or r0, r0
  13 + or r0, r0
  14 + or r0, r0
  15 + or r0, r0
  16 +
  17 +1: .short __NR_sigreturn
  18 +.LEND_sigreturn:
  19 + .size __kernel_sigreturn,.-.LSTART_sigreturn
  20 +
  21 + .balign 32
  22 + .globl __kernel_rt_sigreturn
  23 + .type __kernel_rt_sigreturn,@function
  24 +__kernel_rt_sigreturn:
  25 +.LSTART_rt_sigreturn:
  26 + mov.w 1f, r3
  27 + trapa #0x10
  28 + or r0, r0
  29 + or r0, r0
  30 + or r0, r0
  31 + or r0, r0
  32 + or r0, r0
  33 +
  34 +1: .short __NR_rt_sigreturn
  35 +.LEND_rt_sigreturn:
  36 + .size __kernel_rt_sigreturn,.-.LSTART_rt_sigreturn
  37 +
  38 + .section .eh_frame,"a",@progbits
  39 + .previous
arch/sh/kernel/vsyscall/vsyscall-syscall.S
  1 +#include <linux/init.h>
  2 +
  3 +__INITDATA
  4 +
  5 + .globl vsyscall_trapa_start, vsyscall_trapa_end
  6 +vsyscall_trapa_start:
  7 + .incbin "arch/sh/kernel/vsyscall/vsyscall-trapa.so"
  8 +vsyscall_trapa_end:
  9 +
  10 +__FINIT
arch/sh/kernel/vsyscall/vsyscall-trapa.S
  1 + .text
  2 + .globl __kernel_vsyscall
  3 + .type __kernel_vsyscall,@function
  4 +__kernel_vsyscall:
  5 +.LSTART_vsyscall:
  6 + /* XXX: We'll have to do something here once we opt to use the vDSO
  7 + * page for something other than the signal trampoline.. as well as
  8 + * fill out .eh_frame -- PFM. */
  9 +.LEND_vsyscall:
  10 + .size __kernel_vsyscall,.-.LSTART_vsyscall
  11 + .previous
  12 +
  13 + .section .eh_frame,"a",@progbits
  14 +.LCIE:
  15 + .ualong .LCIE_end - .LCIE_start
  16 +.LCIE_start:
  17 + .ualong 0 /* CIE ID */
  18 + .byte 0x1 /* Version number */
  19 + .string "zRS" /* NUL-terminated augmentation string */
  20 + .uleb128 0x1 /* Code alignment factor */
  21 + .sleb128 -4 /* Data alignment factor */
  22 + .byte 0x11 /* Return address register column */
  23 + /* Augmentation length and data (none) */
  24 + .byte 0xc /* DW_CFA_def_cfa */
  25 + .uleb128 0xf /* r15 */
  26 + .uleb128 0x0 /* offset 0 */
  27 +
  28 + .align 2
  29 +.LCIE_end:
  30 +
  31 + .ualong .LFDE_end-.LFDE_start /* Length FDE */
  32 +.LFDE_start:
  33 + .ualong .LCIE /* CIE pointer */
  34 + .ualong .LSTART_vsyscall-. /* start address */
  35 + .ualong .LEND_vsyscall-.LSTART_vsyscall
  36 + .uleb128 0
  37 + .align 2
  38 +.LFDE_end:
  39 + .previous
  40 +
  41 +/* Get the common code for the sigreturn entry points */
  42 +#include "vsyscall-sigreturn.S"
arch/sh/kernel/vsyscall/vsyscall.c
  1 +/*
  2 + * arch/sh/kernel/vsyscall.c
  3 + *
  4 + * Copyright (C) 2006 Paul Mundt
  5 + *
  6 + * vDSO randomization
  7 + * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar
  8 + *
  9 + * This file is subject to the terms and conditions of the GNU General Public
  10 + * License. See the file "COPYING" in the main directory of this archive
  11 + * for more details.
  12 + */
  13 +#include <linux/mm.h>
  14 +#include <linux/slab.h>
  15 +#include <linux/kernel.h>
  16 +#include <linux/init.h>
  17 +#include <linux/gfp.h>
  18 +#include <linux/module.h>
  19 +#include <linux/elf.h>
  20 +
  21 +/*
  22 + * Should the kernel map a VDSO page into processes and pass its
  23 + * address down to glibc upon exec()?
  24 + */
  25 +unsigned int __read_mostly vdso_enabled = 1;
  26 +EXPORT_SYMBOL_GPL(vdso_enabled);
  27 +
  28 +static int __init vdso_setup(char *s)
  29 +{
  30 + vdso_enabled = simple_strtoul(s, NULL, 0);
  31 + return 1;
  32 +}
  33 +__setup("vdso=", vdso_setup);
  34 +
  35 +/*
  36 + * These symbols are defined by vsyscall.o to mark the bounds
  37 + * of the ELF DSO images included therein.
  38 + */
  39 +extern const char vsyscall_trapa_start, vsyscall_trapa_end;
  40 +static void *syscall_page;
  41 +
  42 +int __init vsyscall_init(void)
  43 +{
  44 + syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
  45 +
  46 + /*
  47 + * XXX: Map this page to a fixmap entry if we get around
  48 + * to adding the page to ELF core dumps
  49 + */
  50 +
  51 + memcpy(syscall_page,
  52 + &vsyscall_trapa_start,
  53 + &vsyscall_trapa_end - &vsyscall_trapa_start);
  54 +
  55 + return 0;
  56 +}
  57 +
  58 +static struct page *syscall_vma_nopage(struct vm_area_struct *vma,
  59 + unsigned long address, int *type)
  60 +{
  61 + unsigned long offset = address - vma->vm_start;
  62 + struct page *page;
  63 +
  64 + if (address < vma->vm_start || address > vma->vm_end)
  65 + return NOPAGE_SIGBUS;
  66 +
  67 + page = virt_to_page(syscall_page + offset);
  68 +
  69 + get_page(page);
  70 +
  71 + return page;
  72 +}
  73 +
  74 +/* Prevent VMA merging */
  75 +static void syscall_vma_close(struct vm_area_struct *vma)
  76 +{
  77 +}
  78 +
  79 +static struct vm_operations_struct syscall_vm_ops = {
  80 + .nopage = syscall_vma_nopage,
  81 + .close = syscall_vma_close,
  82 +};
  83 +
  84 +/* Setup a VMA at program startup for the vsyscall page */
  85 +int arch_setup_additional_pages(struct linux_binprm *bprm,
  86 + int executable_stack)
  87 +{
  88 + struct vm_area_struct *vma;
  89 + struct mm_struct *mm = current->mm;
  90 + unsigned long addr;
  91 + int ret;
  92 +
  93 + down_write(&mm->mmap_sem);
  94 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
  95 + if (IS_ERR_VALUE(addr)) {
  96 + ret = addr;
  97 + goto up_fail;
  98 + }
  99 +
  100 + vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL);
  101 + if (!vma) {
  102 + ret = -ENOMEM;
  103 + goto up_fail;
  104 + }
  105 +
  106 + vma->vm_start = addr;
  107 + vma->vm_end = addr + PAGE_SIZE;
  108 + /* MAYWRITE to allow gdb to COW and set breakpoints */
  109 + vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE;
  110 + vma->vm_flags |= mm->def_flags;
  111 + vma->vm_page_prot = protection_map[vma->vm_flags & 7];
  112 + vma->vm_ops = &syscall_vm_ops;
  113 + vma->vm_mm = mm;
  114 +
  115 + ret = insert_vm_struct(mm, vma);
  116 + if (unlikely(ret)) {
  117 + kmem_cache_free(vm_area_cachep, vma);
  118 + goto up_fail;
  119 + }
  120 +
  121 + current->mm->context.vdso = (void *)addr;
  122 +
  123 + mm->total_vm++;
  124 +up_fail:
  125 + up_write(&mm->mmap_sem);
  126 + return ret;
  127 +}
  128 +
  129 +const char *arch_vma_name(struct vm_area_struct *vma)
  130 +{
  131 + if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
  132 + return "[vdso]";
  133 +
  134 + return NULL;
  135 +}
  136 +
  137 +struct vm_area_struct *get_gate_vma(struct task_struct *task)
  138 +{
  139 + return NULL;
  140 +}
  141 +
  142 +int in_gate_area(struct task_struct *task, unsigned long address)
  143 +{
  144 + return 0;
  145 +}
  146 +
  147 +int in_gate_area_no_task(unsigned long address)
  148 +{
  149 + return 0;
  150 +}
arch/sh/kernel/vsyscall/vsyscall.lds.S
  1 +/*
  2 + * Linker script for vsyscall DSO. The vsyscall page is an ELF shared
  3 + * object prelinked to its virtual address, and with only one read-only
  4 + * segment (that fits in one page). This script controls its layout.
  5 + */
  6 +#include <asm/asm-offsets.h>
  7 +
  8 +#ifdef CONFIG_CPU_LITTLE_ENDIAN
  9 +OUTPUT_FORMAT("elf32-sh-linux", "elf32-sh-linux", "elf32-sh-linux")
  10 +#else
  11 +OUTPUT_FORMAT("elf32-shbig-linux", "elf32-shbig-linux", "elf32-shbig-linux")
  12 +#endif
  13 +OUTPUT_ARCH(sh)
  14 +
  15 +/* The ELF entry point can be used to set the AT_SYSINFO value. */
  16 +ENTRY(__kernel_vsyscall);
  17 +
  18 +SECTIONS
  19 +{
  20 + . = SIZEOF_HEADERS;
  21 +
  22 + .hash : { *(.hash) } :text
  23 + .gnu.hash : { *(.gnu.hash) }
  24 + .dynsym : { *(.dynsym) }
  25 + .dynstr : { *(.dynstr) }
  26 + .gnu.version : { *(.gnu.version) }
  27 + .gnu.version_d : { *(.gnu.version_d) }
  28 + .gnu.version_r : { *(.gnu.version_r) }
  29 +
  30 + /* This linker script is used both with -r and with -shared.
  31 + For the layouts to match, we need to skip more than enough
  32 + space for the dynamic symbol table et al. If this amount
  33 + is insufficient, ld -shared will barf. Just increase it here. */
  34 + . = 0x400;
  35 +
  36 + .text : { *(.text) } :text =0x90909090
  37 + .note : { *(.note.*) } :text :note
  38 + .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
  39 + .eh_frame : { KEEP (*(.eh_frame)) } :text
  40 + .dynamic : { *(.dynamic) } :text :dynamic
  41 + .useless : {
  42 + *(.got.plt) *(.got)
  43 + *(.data .data.* .gnu.linkonce.d.*)
  44 + *(.dynbss)
  45 + *(.bss .bss.* .gnu.linkonce.b.*)
  46 + } :text
  47 +}
  48 +
  49 +/*
  50 + * We must supply the ELF program headers explicitly to get just one
  51 + * PT_LOAD segment, and set the flags explicitly to make segments read-only.
  52 + */
  53 +PHDRS
  54 +{
  55 + text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */
  56 + dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
  57 + note PT_NOTE FLAGS(4); /* PF_R */
  58 + eh_frame_hdr 0x6474e550; /* PT_GNU_EH_FRAME, but ld doesn't match the name */
  59 +}
  60 +
  61 +/*
  62 + * This controls what symbols we export from the DSO.
  63 + */
  64 +VERSION
  65 +{
  66 + LINUX_2.6 {
  67 + global:
  68 + __kernel_vsyscall;
  69 + __kernel_sigreturn;
  70 + __kernel_rt_sigreturn;
  71 +
  72 + local: *;
  73 + };
  74 +}
... ... @@ -223,6 +223,19 @@
223 223 32-bits through the SH-4A PMB. If this is not set, legacy
224 224 29-bit physical addressing will be used.
225 225  
  226 +config VSYSCALL
  227 + bool "Support vsyscall page"
  228 + depends on MMU
  229 + default y
  230 + help
  231 + This will enable support for the kernel mapping a vDSO page
  232 + in process space, and subsequently handing down the entry point
  233 + to the libc through the ELF auxiliary vector.
  234 +
  235 + From the kernel side this is used for the signal trampoline.
  236 + For systems with an MMU that can afford to give up a page,
  237 + (the default value) say Y.
  238 +
226 239 choice
227 240 prompt "HugeTLB page size"
228 241 depends on HUGETLB_PAGE && CPU_SH4 && MMU
... ... @@ -286,6 +286,9 @@
286 286 initsize >> 10);
287 287  
288 288 p3_cache_init();
  289 +
  290 + /* Initialize the vDSO */
  291 + vsyscall_init();
289 292 }
290 293  
291 294 void free_initmem(void)
arch/sh/mm/tlb-flush.c
... ... @@ -14,12 +14,12 @@
14 14  
15 15 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
16 16 {
17   - if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) {
  17 + if (vma->vm_mm && vma->vm_mm->context.id != NO_CONTEXT) {
18 18 unsigned long flags;
19 19 unsigned long asid;
20 20 unsigned long saved_asid = MMU_NO_ASID;
21 21  
22   - asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK;
  22 + asid = vma->vm_mm->context.id & MMU_CONTEXT_ASID_MASK;
23 23 page &= PAGE_MASK;
24 24  
25 25 local_irq_save(flags);
26 26  
27 27  
28 28  
... ... @@ -39,20 +39,21 @@
39 39 {
40 40 struct mm_struct *mm = vma->vm_mm;
41 41  
42   - if (mm->context != NO_CONTEXT) {
  42 + if (mm->context.id != NO_CONTEXT) {
43 43 unsigned long flags;
44 44 int size;
45 45  
46 46 local_irq_save(flags);
47 47 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
48 48 if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
49   - mm->context = NO_CONTEXT;
  49 + mm->context.id = NO_CONTEXT;
50 50 if (mm == current->mm)
51 51 activate_context(mm);
52 52 } else {
53   - unsigned long asid = mm->context&MMU_CONTEXT_ASID_MASK;
  53 + unsigned long asid;
54 54 unsigned long saved_asid = MMU_NO_ASID;
55 55  
  56 + asid = mm->context.id & MMU_CONTEXT_ASID_MASK;
56 57 start &= PAGE_MASK;
57 58 end += (PAGE_SIZE - 1);
58 59 end &= PAGE_MASK;
59 60  
... ... @@ -81,9 +82,10 @@
81 82 if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
82 83 flush_tlb_all();
83 84 } else {
84   - unsigned long asid = init_mm.context&MMU_CONTEXT_ASID_MASK;
  85 + unsigned long asid;
85 86 unsigned long saved_asid = get_asid();
86 87  
  88 + asid = init_mm.context.id & MMU_CONTEXT_ASID_MASK;
87 89 start &= PAGE_MASK;
88 90 end += (PAGE_SIZE - 1);
89 91 end &= PAGE_MASK;
90 92  
... ... @@ -101,11 +103,11 @@
101 103 {
102 104 /* Invalidate all TLB of this process. */
103 105 /* Instead of invalidating each TLB, we get new MMU context. */
104   - if (mm->context != NO_CONTEXT) {
  106 + if (mm->context.id != NO_CONTEXT) {
105 107 unsigned long flags;
106 108  
107 109 local_irq_save(flags);
108   - mm->context = NO_CONTEXT;
  110 + mm->context.id = NO_CONTEXT;
109 111 if (mm == current->mm)
110 112 activate_context(mm);
111 113 local_irq_restore(flags);
include/asm-sh/auxvec.h
1 1 #ifndef __ASM_SH_AUXVEC_H
2 2 #define __ASM_SH_AUXVEC_H
3 3  
  4 +/*
  5 + * Architecture-neutral AT_ values in 0-17, leave some room
  6 + * for more of them.
  7 + */
  8 +
  9 +#ifdef CONFIG_VSYSCALL
  10 +/*
  11 + * Only define this in the vsyscall case, the entry point to
  12 + * the vsyscall page gets placed here. The kernel will attempt
  13 + * to build a gate VMA we don't care about otherwise..
  14 + */
  15 +#define AT_SYSINFO_EHDR 33
  16 +#endif
  17 +
4 18 #endif /* __ASM_SH_AUXVEC_H */
include/asm-sh/elf.h
... ... @@ -121,5 +121,25 @@
121 121 #define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
122 122 #endif
123 123  
  124 +#ifdef CONFIG_VSYSCALL
  125 +/* vDSO has arch_setup_additional_pages */
  126 +#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
  127 +struct linux_binprm;
  128 +extern int arch_setup_additional_pages(struct linux_binprm *bprm,
  129 + int executable_stack);
  130 +
  131 +extern unsigned int vdso_enabled;
  132 +extern void __kernel_vsyscall;
  133 +
  134 +#define VDSO_BASE ((unsigned long)current->mm->context.vdso)
  135 +#define VDSO_SYM(x) (VDSO_BASE + (unsigned long)(x))
  136 +
  137 +#define ARCH_DLINFO \
  138 +do { \
  139 + if (vdso_enabled) \
  140 + NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); \
  141 +} while (0)
  142 +#endif /* CONFIG_VSYSCALL */
  143 +
124 144 #endif /* __ASM_SH_ELF_H */
include/asm-sh/mmu.h
... ... @@ -11,7 +11,12 @@
11 11 #else
12 12  
13 13 /* Default "unsigned long" context */
14   -typedef unsigned long mm_context_t;
  14 +typedef unsigned long mm_context_id_t;
  15 +
  16 +typedef struct {
  17 + mm_context_id_t id;
  18 + void *vdso;
  19 +} mm_context_t;
15 20  
16 21 #endif /* CONFIG_MMU */
17 22  
include/asm-sh/mmu_context.h
... ... @@ -49,7 +49,7 @@
49 49 unsigned long mc = mmu_context_cache;
50 50  
51 51 /* Check if we have old version of context. */
52   - if (((mm->context ^ mc) & MMU_CONTEXT_VERSION_MASK) == 0)
  52 + if (((mm->context.id ^ mc) & MMU_CONTEXT_VERSION_MASK) == 0)
53 53 /* It's up to date, do nothing */
54 54 return;
55 55  
... ... @@ -68,7 +68,7 @@
68 68 if (!mc)
69 69 mmu_context_cache = mc = MMU_CONTEXT_FIRST_VERSION;
70 70 }
71   - mm->context = mc;
  71 + mm->context.id = mc;
72 72 }
73 73  
74 74 /*
... ... @@ -78,7 +78,7 @@
78 78 static __inline__ int init_new_context(struct task_struct *tsk,
79 79 struct mm_struct *mm)
80 80 {
81   - mm->context = NO_CONTEXT;
  81 + mm->context.id = NO_CONTEXT;
82 82  
83 83 return 0;
84 84 }
... ... @@ -123,7 +123,7 @@
123 123 static __inline__ void activate_context(struct mm_struct *mm)
124 124 {
125 125 get_mmu_context(mm);
126   - set_asid(mm->context & MMU_CONTEXT_ASID_MASK);
  126 + set_asid(mm->context.id & MMU_CONTEXT_ASID_MASK);
127 127 }
128 128  
129 129 /* MMU_TTB can be used for optimizing the fault handling.
include/asm-sh/page.h
... ... @@ -117,6 +117,11 @@
117 117 #include <asm-generic/memory_model.h>
118 118 #include <asm-generic/page.h>
119 119  
  120 +/* vDSO support */
  121 +#ifdef CONFIG_VSYSCALL
  122 +#define __HAVE_ARCH_GATE_AREA
  123 +#endif
  124 +
120 125 #endif /* __KERNEL__ */
121 126 #endif /* __ASM_SH_PAGE_H */
include/asm-sh/processor.h
... ... @@ -276,6 +276,12 @@
276 276 #define prefetchw(x) prefetch(x)
277 277 #endif
278 278  
  279 +#ifdef CONFIG_VSYSCALL
  280 +extern int vsyscall_init(void);
  281 +#else
  282 +#define vsyscall_init() do { } while (0)
  283 +#endif
  284 +
279 285 #endif /* __KERNEL__ */
280 286 #endif /* __ASM_SH_PROCESSOR_H */