Commit e839ca528718e68cad32a307dc9aabf01ef3eb05

Authored by David Howells
1 parent 4eb14db444

Disintegrate asm/system.h for SH

Disintegrate asm/system.h for SH.

Signed-off-by: David Howells <dhowells@redhat.com>
cc: linux-sh@vger.kernel.org

Showing 57 changed files with 634 additions and 527 deletions Side-by-side Diff

arch/sh/boards/mach-microdev/irq.c
... ... @@ -12,7 +12,6 @@
12 12 #include <linux/init.h>
13 13 #include <linux/irq.h>
14 14 #include <linux/interrupt.h>
15   -#include <asm/system.h>
16 15 #include <asm/io.h>
17 16 #include <mach/microdev.h>
18 17  
arch/sh/include/asm/atomic-irq.h
1 1 #ifndef __ASM_SH_ATOMIC_IRQ_H
2 2 #define __ASM_SH_ATOMIC_IRQ_H
3 3  
  4 +#include <linux/irqflags.h>
  5 +
4 6 /*
5 7 * To get proper branch prediction for the main line, we must branch
6 8 * forward to code at the end of this object's .text section, then
arch/sh/include/asm/atomic.h
... ... @@ -9,7 +9,7 @@
9 9  
10 10 #include <linux/compiler.h>
11 11 #include <linux/types.h>
12   -#include <asm/system.h>
  12 +#include <asm/cmpxchg.h>
13 13  
14 14 #define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
15 15  
arch/sh/include/asm/auxvec.h
... ... @@ -33,5 +33,7 @@
33 33 #define AT_L1D_CACHESHAPE 35
34 34 #define AT_L2_CACHESHAPE 36
35 35  
  36 +#define AT_VECTOR_SIZE_ARCH 5 /* entries in ARCH_DLINFO */
  37 +
36 38 #endif /* __ASM_SH_AUXVEC_H */
arch/sh/include/asm/barrier.h
  1 +/*
  2 + * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
  3 + * Copyright (C) 2002 Paul Mundt
  4 + */
  5 +#ifndef __ASM_SH_BARRIER_H
  6 +#define __ASM_SH_BARRIER_H
  7 +
  8 +#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5)
  9 +#include <asm/cache_insns.h>
  10 +#endif
  11 +
  12 +/*
  13 + * A brief note on ctrl_barrier(), the control register write barrier.
  14 + *
  15 + * Legacy SH cores typically require a sequence of 8 nops after
  16 + * modification of a control register in order for the changes to take
  17 + * effect. On newer cores (like the sh4a and sh5) this is accomplished
  18 + * with icbi.
  19 + *
  20 + * Also note that on sh4a in the icbi case we can forego a synco for the
  21 + * write barrier, as it's not necessary for control registers.
  22 + *
  23 + * Historically we have only done this type of barrier for the MMUCR, but
  24 + * it's also necessary for the CCR, so we make it generic here instead.
  25 + */
  26 +#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5)
  27 +#define mb() __asm__ __volatile__ ("synco": : :"memory")
  28 +#define rmb() mb()
  29 +#define wmb() __asm__ __volatile__ ("synco": : :"memory")
  30 +#define ctrl_barrier() __icbi(PAGE_OFFSET)
  31 +#define read_barrier_depends() do { } while(0)
  32 +#else
  33 +#define mb() __asm__ __volatile__ ("": : :"memory")
  34 +#define rmb() mb()
  35 +#define wmb() __asm__ __volatile__ ("": : :"memory")
  36 +#define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop")
  37 +#define read_barrier_depends() do { } while(0)
  38 +#endif
  39 +
  40 +#ifdef CONFIG_SMP
  41 +#define smp_mb() mb()
  42 +#define smp_rmb() rmb()
  43 +#define smp_wmb() wmb()
  44 +#define smp_read_barrier_depends() read_barrier_depends()
  45 +#else
  46 +#define smp_mb() barrier()
  47 +#define smp_rmb() barrier()
  48 +#define smp_wmb() barrier()
  49 +#define smp_read_barrier_depends() do { } while(0)
  50 +#endif
  51 +
  52 +#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
  53 +
  54 +#endif /* __ASM_SH_BARRIER_H */
arch/sh/include/asm/bitops.h
... ... @@ -7,7 +7,6 @@
7 7 #error only <linux/bitops.h> can be included directly
8 8 #endif
9 9  
10   -#include <asm/system.h>
11 10 /* For __swab32 */
12 11 #include <asm/byteorder.h>
13 12  
arch/sh/include/asm/bl_bit.h
  1 +#ifndef __ASM_SH_BL_BIT_H
  2 +#define __ASM_SH_BL_BIT_H
  3 +
  4 +#ifdef CONFIG_SUPERH32
  5 +# include "bl_bit_32.h"
  6 +#else
  7 +# include "bl_bit_64.h"
  8 +#endif
  9 +
  10 +#endif /* __ASM_SH_BL_BIT_H */
arch/sh/include/asm/bl_bit_32.h
  1 +#ifndef __ASM_SH_BL_BIT_32_H
  2 +#define __ASM_SH_BL_BIT_32_H
  3 +
  4 +static inline void set_bl_bit(void)
  5 +{
  6 + unsigned long __dummy0, __dummy1;
  7 +
  8 + __asm__ __volatile__ (
  9 + "stc sr, %0\n\t"
  10 + "or %2, %0\n\t"
  11 + "and %3, %0\n\t"
  12 + "ldc %0, sr\n\t"
  13 + : "=&r" (__dummy0), "=r" (__dummy1)
  14 + : "r" (0x10000000), "r" (0xffffff0f)
  15 + : "memory"
  16 + );
  17 +}
  18 +
  19 +static inline void clear_bl_bit(void)
  20 +{
  21 + unsigned long __dummy0, __dummy1;
  22 +
  23 + __asm__ __volatile__ (
  24 + "stc sr, %0\n\t"
  25 + "and %2, %0\n\t"
  26 + "ldc %0, sr\n\t"
  27 + : "=&r" (__dummy0), "=r" (__dummy1)
  28 + : "1" (~0x10000000)
  29 + : "memory"
  30 + );
  31 +}
  32 +
  33 +#endif /* __ASM_SH_BL_BIT_32_H */
arch/sh/include/asm/bl_bit_64.h
  1 +/*
  2 + * Copyright (C) 2000, 2001 Paolo Alberelli
  3 + * Copyright (C) 2003 Paul Mundt
  4 + * Copyright (C) 2004 Richard Curnow
  5 + *
  6 + * This file is subject to the terms and conditions of the GNU General Public
  7 + * License. See the file "COPYING" in the main directory of this archive
  8 + * for more details.
  9 + */
  10 +#ifndef __ASM_SH_BL_BIT_64_H
  11 +#define __ASM_SH_BL_BIT_64_H
  12 +
  13 +#include <asm/processor.h>
  14 +
  15 +#define SR_BL_LL 0x0000000010000000LL
  16 +
  17 +static inline void set_bl_bit(void)
  18 +{
  19 + unsigned long long __dummy0, __dummy1 = SR_BL_LL;
  20 +
  21 + __asm__ __volatile__("getcon " __SR ", %0\n\t"
  22 + "or %0, %1, %0\n\t"
  23 + "putcon %0, " __SR "\n\t"
  24 + : "=&r" (__dummy0)
  25 + : "r" (__dummy1));
  26 +
  27 +}
  28 +
  29 +static inline void clear_bl_bit(void)
  30 +{
  31 + unsigned long long __dummy0, __dummy1 = ~SR_BL_LL;
  32 +
  33 + __asm__ __volatile__("getcon " __SR ", %0\n\t"
  34 + "and %0, %1, %0\n\t"
  35 + "putcon %0, " __SR "\n\t"
  36 + : "=&r" (__dummy0)
  37 + : "r" (__dummy1));
  38 +}
  39 +
  40 +#endif /* __ASM_SH_BL_BIT_64_H */
arch/sh/include/asm/bug.h
1 1 #ifndef __ASM_SH_BUG_H
2 2 #define __ASM_SH_BUG_H
3 3  
  4 +#include <linux/linkage.h>
  5 +
4 6 #define TRAPA_BUG_OPCODE 0xc33e /* trapa #0x3e */
5 7 #define BUGFLAG_UNWINDER (1 << 1)
6 8  
... ... @@ -106,6 +108,9 @@
106 108 #endif /* CONFIG_GENERIC_BUG */
107 109  
108 110 #include <asm-generic/bug.h>
  111 +
  112 +struct pt_regs;
  113 +extern void die(const char *str, struct pt_regs *regs, long err) __attribute__ ((noreturn));
109 114  
110 115 #endif /* __ASM_SH_BUG_H */
arch/sh/include/asm/cache_insns.h
  1 +#ifndef __ASM_SH_CACHE_INSNS_H
  2 +#define __ASM_SH_CACHE_INSNS_H
  3 +
  4 +
  5 +#ifdef CONFIG_SUPERH32
  6 +# include "cache_insns_32.h"
  7 +#else
  8 +# include "cache_insns_64.h"
  9 +#endif
  10 +
  11 +#endif /* __ASM_SH_CACHE_INSNS_H */
arch/sh/include/asm/cache_insns_32.h
  1 +#ifndef __ASM_SH_CACHE_INSNS_32_H
  2 +#define __ASM_SH_CACHE_INSNS_32_H
  3 +
  4 +#include <linux/types.h>
  5 +
  6 +#if defined(CONFIG_CPU_SH4A)
  7 +#define __icbi(addr) __asm__ __volatile__ ( "icbi @%0\n\t" : : "r" (addr))
  8 +#else
  9 +#define __icbi(addr) mb()
  10 +#endif
  11 +
  12 +#define __ocbp(addr) __asm__ __volatile__ ( "ocbp @%0\n\t" : : "r" (addr))
  13 +#define __ocbi(addr) __asm__ __volatile__ ( "ocbi @%0\n\t" : : "r" (addr))
  14 +#define __ocbwb(addr) __asm__ __volatile__ ( "ocbwb @%0\n\t" : : "r" (addr))
  15 +
  16 +static inline reg_size_t register_align(void *val)
  17 +{
  18 + return (unsigned long)(signed long)val;
  19 +}
  20 +
  21 +#endif /* __ASM_SH_CACHE_INSNS_32_H */
arch/sh/include/asm/cache_insns_64.h
  1 +/*
  2 + * Copyright (C) 2000, 2001 Paolo Alberelli
  3 + * Copyright (C) 2003 Paul Mundt
  4 + * Copyright (C) 2004 Richard Curnow
  5 + *
  6 + * This file is subject to the terms and conditions of the GNU General Public
  7 + * License. See the file "COPYING" in the main directory of this archive
  8 + * for more details.
  9 + */
  10 +#ifndef __ASM_SH_CACHE_INSNS_64_H
  11 +#define __ASM_SH_CACHE_INSNS_64_H
  12 +
  13 +#define __icbi(addr) __asm__ __volatile__ ( "icbi %0, 0\n\t" : : "r" (addr))
  14 +#define __ocbp(addr) __asm__ __volatile__ ( "ocbp %0, 0\n\t" : : "r" (addr))
  15 +#define __ocbi(addr) __asm__ __volatile__ ( "ocbi %0, 0\n\t" : : "r" (addr))
  16 +#define __ocbwb(addr) __asm__ __volatile__ ( "ocbwb %0, 0\n\t" : : "r" (addr))
  17 +
  18 +static inline reg_size_t register_align(void *val)
  19 +{
  20 + return (unsigned long long)(signed long long)(signed long)val;
  21 +}
  22 +
  23 +#endif /* __ASM_SH_CACHE_INSNS_64_H */
arch/sh/include/asm/cmpxchg-irq.h
1 1 #ifndef __ASM_SH_CMPXCHG_IRQ_H
2 2 #define __ASM_SH_CMPXCHG_IRQ_H
3 3  
  4 +#include <linux/irqflags.h>
  5 +
4 6 static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
5 7 {
6 8 unsigned long flags, retval;
arch/sh/include/asm/cmpxchg.h
  1 +#ifndef __ASM_SH_CMPXCHG_H
  2 +#define __ASM_SH_CMPXCHG_H
  3 +
  4 +/*
  5 + * Atomic operations that C can't guarantee us. Useful for
  6 + * resource counting etc..
  7 + */
  8 +
  9 +#include <linux/compiler.h>
  10 +#include <linux/types.h>
  11 +
  12 +#if defined(CONFIG_GUSA_RB)
  13 +#include <asm/cmpxchg-grb.h>
  14 +#elif defined(CONFIG_CPU_SH4A)
  15 +#include <asm/cmpxchg-llsc.h>
  16 +#else
  17 +#include <asm/cmpxchg-irq.h>
  18 +#endif
  19 +
  20 +extern void __xchg_called_with_bad_pointer(void);
  21 +
  22 +#define __xchg(ptr, x, size) \
  23 +({ \
  24 + unsigned long __xchg__res; \
  25 + volatile void *__xchg_ptr = (ptr); \
  26 + switch (size) { \
  27 + case 4: \
  28 + __xchg__res = xchg_u32(__xchg_ptr, x); \
  29 + break; \
  30 + case 1: \
  31 + __xchg__res = xchg_u8(__xchg_ptr, x); \
  32 + break; \
  33 + default: \
  34 + __xchg_called_with_bad_pointer(); \
  35 + __xchg__res = x; \
  36 + break; \
  37 + } \
  38 + \
  39 + __xchg__res; \
  40 +})
  41 +
  42 +#define xchg(ptr,x) \
  43 + ((__typeof__(*(ptr)))__xchg((ptr),(unsigned long)(x), sizeof(*(ptr))))
  44 +
  45 +/* This function doesn't exist, so you'll get a linker error
  46 + * if something tries to do an invalid cmpxchg(). */
  47 +extern void __cmpxchg_called_with_bad_pointer(void);
  48 +
  49 +#define __HAVE_ARCH_CMPXCHG 1
  50 +
  51 +static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
  52 + unsigned long new, int size)
  53 +{
  54 + switch (size) {
  55 + case 4:
  56 + return __cmpxchg_u32(ptr, old, new);
  57 + }
  58 + __cmpxchg_called_with_bad_pointer();
  59 + return old;
  60 +}
  61 +
  62 +#define cmpxchg(ptr,o,n) \
  63 + ({ \
  64 + __typeof__(*(ptr)) _o_ = (o); \
  65 + __typeof__(*(ptr)) _n_ = (n); \
  66 + (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
  67 + (unsigned long)_n_, sizeof(*(ptr))); \
  68 + })
  69 +
  70 +#endif /* __ASM_SH_CMPXCHG_H */
arch/sh/include/asm/exec.h
  1 +/*
  2 + * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
  3 + * Copyright (C) 2002 Paul Mundt
  4 + */
  5 +#ifndef __ASM_SH_EXEC_H
  6 +#define __ASM_SH_EXEC_H
  7 +
  8 +#define arch_align_stack(x) (x)
  9 +
  10 +#endif /* __ASM_SH_EXEC_H */
arch/sh/include/asm/futex-irq.h
1 1 #ifndef __ASM_SH_FUTEX_IRQ_H
2 2 #define __ASM_SH_FUTEX_IRQ_H
3 3  
4   -#include <asm/system.h>
5 4  
6 5 static inline int atomic_futex_op_xchg_set(int oparg, u32 __user *uaddr,
7 6 int *oldval)
arch/sh/include/asm/io.h
... ... @@ -14,7 +14,6 @@
14 14 */
15 15 #include <linux/errno.h>
16 16 #include <asm/cache.h>
17   -#include <asm/system.h>
18 17 #include <asm/addrspace.h>
19 18 #include <asm/machvec.h>
20 19 #include <asm/pgtable.h>
arch/sh/include/asm/processor.h
... ... @@ -101,6 +101,10 @@
101 101 #define cpu_sleep() __asm__ __volatile__ ("sleep" : : : "memory")
102 102 #define cpu_relax() barrier()
103 103  
  104 +void default_idle(void);
  105 +void cpu_idle_wait(void);
  106 +void stop_this_cpu(void *);
  107 +
104 108 /* Forward decl */
105 109 struct seq_operations;
106 110 struct task_struct;
... ... @@ -159,6 +163,17 @@
159 163 int vsyscall_init(void);
160 164 #else
161 165 #define vsyscall_init() do { } while (0)
  166 +#endif
  167 +
  168 +/*
  169 + * SH-2A has both 16 and 32-bit opcodes, do lame encoding checks.
  170 + */
  171 +#ifdef CONFIG_CPU_SH2A
  172 +extern unsigned int instruction_size(unsigned int insn);
  173 +#elif defined(CONFIG_SUPERH32)
  174 +#define instruction_size(insn) (2)
  175 +#else
  176 +#define instruction_size(insn) (4)
162 177 #endif
163 178  
164 179 #endif /* __ASSEMBLY__ */
arch/sh/include/asm/ptrace.h
... ... @@ -37,7 +37,6 @@
37 37 #include <linux/thread_info.h>
38 38 #include <asm/addrspace.h>
39 39 #include <asm/page.h>
40   -#include <asm/system.h>
41 40  
42 41 #define user_mode(regs) (((regs)->sr & 0x40000000)==0)
43 42 #define kernel_stack_pointer(_regs) ((unsigned long)(_regs)->regs[15])
arch/sh/include/asm/setup.h
... ... @@ -20,6 +20,7 @@
20 20  
21 21 void sh_mv_setup(void);
22 22 void check_for_initrd(void);
  23 +void per_cpu_trap_init(void);
23 24  
24 25 #endif /* __KERNEL__ */
25 26  
arch/sh/include/asm/switch_to.h
  1 +/*
  2 + * Copyright (C) 2000, 2001 Paolo Alberelli
  3 + * Copyright (C) 2003 Paul Mundt
  4 + * Copyright (C) 2004 Richard Curnow
  5 + *
  6 + * This file is subject to the terms and conditions of the GNU General Public
  7 + * License. See the file "COPYING" in the main directory of this archive
  8 + * for more details.
  9 + */
  10 +#ifndef __ASM_SH_SWITCH_TO_H
  11 +#define __ASM_SH_SWITCH_TO_H
  12 +
  13 +#ifdef CONFIG_SUPERH32
  14 +# include "switch_to_32.h"
  15 +#else
  16 +# include "switch_to_64.h"
  17 +#endif
  18 +
  19 +#endif /* __ASM_SH_SWITCH_TO_H */
arch/sh/include/asm/switch_to_32.h
  1 +#ifndef __ASM_SH_SWITCH_TO_32_H
  2 +#define __ASM_SH_SWITCH_TO_32_H
  3 +
  4 +#ifdef CONFIG_SH_DSP
  5 +
  6 +#define is_dsp_enabled(tsk) \
  7 + (!!(tsk->thread.dsp_status.status & SR_DSP))
  8 +
  9 +#define __restore_dsp(tsk) \
  10 +do { \
  11 + register u32 *__ts2 __asm__ ("r2") = \
  12 + (u32 *)&tsk->thread.dsp_status; \
  13 + __asm__ __volatile__ ( \
  14 + ".balign 4\n\t" \
  15 + "movs.l @r2+, a0\n\t" \
  16 + "movs.l @r2+, a1\n\t" \
  17 + "movs.l @r2+, a0g\n\t" \
  18 + "movs.l @r2+, a1g\n\t" \
  19 + "movs.l @r2+, m0\n\t" \
  20 + "movs.l @r2+, m1\n\t" \
  21 + "movs.l @r2+, x0\n\t" \
  22 + "movs.l @r2+, x1\n\t" \
  23 + "movs.l @r2+, y0\n\t" \
  24 + "movs.l @r2+, y1\n\t" \
  25 + "lds.l @r2+, dsr\n\t" \
  26 + "ldc.l @r2+, rs\n\t" \
  27 + "ldc.l @r2+, re\n\t" \
  28 + "ldc.l @r2+, mod\n\t" \
  29 + : : "r" (__ts2)); \
  30 +} while (0)
  31 +
  32 +#define __save_dsp(tsk) \
  33 +do { \
  34 + register u32 *__ts2 __asm__ ("r2") = \
  35 + (u32 *)&tsk->thread.dsp_status + 14; \
  36 + \
  37 + __asm__ __volatile__ ( \
  38 + ".balign 4\n\t" \
  39 + "stc.l mod, @-r2\n\t" \
  40 + "stc.l re, @-r2\n\t" \
  41 + "stc.l rs, @-r2\n\t" \
  42 + "sts.l dsr, @-r2\n\t" \
  43 + "movs.l y1, @-r2\n\t" \
  44 + "movs.l y0, @-r2\n\t" \
  45 + "movs.l x1, @-r2\n\t" \
  46 + "movs.l x0, @-r2\n\t" \
  47 + "movs.l m1, @-r2\n\t" \
  48 + "movs.l m0, @-r2\n\t" \
  49 + "movs.l a1g, @-r2\n\t" \
  50 + "movs.l a0g, @-r2\n\t" \
  51 + "movs.l a1, @-r2\n\t" \
  52 + "movs.l a0, @-r2\n\t" \
  53 + : : "r" (__ts2)); \
  54 +} while (0)
  55 +
  56 +#else
  57 +
  58 +#define is_dsp_enabled(tsk) (0)
  59 +#define __save_dsp(tsk) do { } while (0)
  60 +#define __restore_dsp(tsk) do { } while (0)
  61 +#endif
  62 +
  63 +struct task_struct *__switch_to(struct task_struct *prev,
  64 + struct task_struct *next);
  65 +
  66 +/*
  67 + * switch_to() should switch tasks to task nr n, first
  68 + */
  69 +#define switch_to(prev, next, last) \
  70 +do { \
  71 + register u32 *__ts1 __asm__ ("r1"); \
  72 + register u32 *__ts2 __asm__ ("r2"); \
  73 + register u32 *__ts4 __asm__ ("r4"); \
  74 + register u32 *__ts5 __asm__ ("r5"); \
  75 + register u32 *__ts6 __asm__ ("r6"); \
  76 + register u32 __ts7 __asm__ ("r7"); \
  77 + struct task_struct *__last; \
  78 + \
  79 + if (is_dsp_enabled(prev)) \
  80 + __save_dsp(prev); \
  81 + \
  82 + __ts1 = (u32 *)&prev->thread.sp; \
  83 + __ts2 = (u32 *)&prev->thread.pc; \
  84 + __ts4 = (u32 *)prev; \
  85 + __ts5 = (u32 *)next; \
  86 + __ts6 = (u32 *)&next->thread.sp; \
  87 + __ts7 = next->thread.pc; \
  88 + \
  89 + __asm__ __volatile__ ( \
  90 + ".balign 4\n\t" \
  91 + "stc.l gbr, @-r15\n\t" \
  92 + "sts.l pr, @-r15\n\t" \
  93 + "mov.l r8, @-r15\n\t" \
  94 + "mov.l r9, @-r15\n\t" \
  95 + "mov.l r10, @-r15\n\t" \
  96 + "mov.l r11, @-r15\n\t" \
  97 + "mov.l r12, @-r15\n\t" \
  98 + "mov.l r13, @-r15\n\t" \
  99 + "mov.l r14, @-r15\n\t" \
  100 + "mov.l r15, @r1\t! save SP\n\t" \
  101 + "mov.l @r6, r15\t! change to new stack\n\t" \
  102 + "mova 1f, %0\n\t" \
  103 + "mov.l %0, @r2\t! save PC\n\t" \
  104 + "mov.l 2f, %0\n\t" \
  105 + "jmp @%0\t! call __switch_to\n\t" \
  106 + " lds r7, pr\t! with return to new PC\n\t" \
  107 + ".balign 4\n" \
  108 + "2:\n\t" \
  109 + ".long __switch_to\n" \
  110 + "1:\n\t" \
  111 + "mov.l @r15+, r14\n\t" \
  112 + "mov.l @r15+, r13\n\t" \
  113 + "mov.l @r15+, r12\n\t" \
  114 + "mov.l @r15+, r11\n\t" \
  115 + "mov.l @r15+, r10\n\t" \
  116 + "mov.l @r15+, r9\n\t" \
  117 + "mov.l @r15+, r8\n\t" \
  118 + "lds.l @r15+, pr\n\t" \
  119 + "ldc.l @r15+, gbr\n\t" \
  120 + : "=z" (__last) \
  121 + : "r" (__ts1), "r" (__ts2), "r" (__ts4), \
  122 + "r" (__ts5), "r" (__ts6), "r" (__ts7) \
  123 + : "r3", "t"); \
  124 + \
  125 + last = __last; \
  126 +} while (0)
  127 +
  128 +#define finish_arch_switch(prev) \
  129 +do { \
  130 + if (is_dsp_enabled(prev)) \
  131 + __restore_dsp(prev); \
  132 +} while (0)
  133 +
  134 +#endif /* __ASM_SH_SWITCH_TO_32_H */
arch/sh/include/asm/switch_to_64.h
  1 +/*
  2 + * Copyright (C) 2000, 2001 Paolo Alberelli
  3 + * Copyright (C) 2003 Paul Mundt
  4 + * Copyright (C) 2004 Richard Curnow
  5 + *
  6 + * This file is subject to the terms and conditions of the GNU General Public
  7 + * License. See the file "COPYING" in the main directory of this archive
  8 + * for more details.
  9 + */
  10 +#ifndef __ASM_SH_SWITCH_TO_64_H
  11 +#define __ASM_SH_SWITCH_TO_64_H
  12 +
  13 +struct thread_struct;
  14 +struct task_struct;
  15 +
  16 +/*
  17 + * switch_to() should switch tasks to task nr n, first
  18 + */
  19 +struct task_struct *sh64_switch_to(struct task_struct *prev,
  20 + struct thread_struct *prev_thread,
  21 + struct task_struct *next,
  22 + struct thread_struct *next_thread);
  23 +
  24 +#define switch_to(prev,next,last) \
  25 +do { \
  26 + if (last_task_used_math != next) { \
  27 + struct pt_regs *regs = next->thread.uregs; \
  28 + if (regs) regs->sr |= SR_FD; \
  29 + } \
  30 + last = sh64_switch_to(prev, &prev->thread, next, \
  31 + &next->thread); \
  32 +} while (0)
  33 +
  34 +
  35 +#endif /* __ASM_SH_SWITCH_TO_64_H */
arch/sh/include/asm/system.h
1   -#ifndef __ASM_SH_SYSTEM_H
2   -#define __ASM_SH_SYSTEM_H
3   -
4   -/*
5   - * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
6   - * Copyright (C) 2002 Paul Mundt
7   - */
8   -
9   -#include <linux/irqflags.h>
10   -#include <linux/compiler.h>
11   -#include <linux/linkage.h>
12   -#include <asm/types.h>
13   -#include <asm/uncached.h>
14   -
15   -#define AT_VECTOR_SIZE_ARCH 5 /* entries in ARCH_DLINFO */
16   -
17   -/*
18   - * A brief note on ctrl_barrier(), the control register write barrier.
19   - *
20   - * Legacy SH cores typically require a sequence of 8 nops after
21   - * modification of a control register in order for the changes to take
22   - * effect. On newer cores (like the sh4a and sh5) this is accomplished
23   - * with icbi.
24   - *
25   - * Also note that on sh4a in the icbi case we can forego a synco for the
26   - * write barrier, as it's not necessary for control registers.
27   - *
28   - * Historically we have only done this type of barrier for the MMUCR, but
29   - * it's also necessary for the CCR, so we make it generic here instead.
30   - */
31   -#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5)
32   -#define mb() __asm__ __volatile__ ("synco": : :"memory")
33   -#define rmb() mb()
34   -#define wmb() __asm__ __volatile__ ("synco": : :"memory")
35   -#define ctrl_barrier() __icbi(PAGE_OFFSET)
36   -#define read_barrier_depends() do { } while(0)
37   -#else
38   -#define mb() __asm__ __volatile__ ("": : :"memory")
39   -#define rmb() mb()
40   -#define wmb() __asm__ __volatile__ ("": : :"memory")
41   -#define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop")
42   -#define read_barrier_depends() do { } while(0)
43   -#endif
44   -
45   -#ifdef CONFIG_SMP
46   -#define smp_mb() mb()
47   -#define smp_rmb() rmb()
48   -#define smp_wmb() wmb()
49   -#define smp_read_barrier_depends() read_barrier_depends()
50   -#else
51   -#define smp_mb() barrier()
52   -#define smp_rmb() barrier()
53   -#define smp_wmb() barrier()
54   -#define smp_read_barrier_depends() do { } while(0)
55   -#endif
56   -
57   -#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
58   -
59   -#ifdef CONFIG_GUSA_RB
60   -#include <asm/cmpxchg-grb.h>
61   -#elif defined(CONFIG_CPU_SH4A)
62   -#include <asm/cmpxchg-llsc.h>
63   -#else
64   -#include <asm/cmpxchg-irq.h>
65   -#endif
66   -
67   -extern void __xchg_called_with_bad_pointer(void);
68   -
69   -#define __xchg(ptr, x, size) \
70   -({ \
71   - unsigned long __xchg__res; \
72   - volatile void *__xchg_ptr = (ptr); \
73   - switch (size) { \
74   - case 4: \
75   - __xchg__res = xchg_u32(__xchg_ptr, x); \
76   - break; \
77   - case 1: \
78   - __xchg__res = xchg_u8(__xchg_ptr, x); \
79   - break; \
80   - default: \
81   - __xchg_called_with_bad_pointer(); \
82   - __xchg__res = x; \
83   - break; \
84   - } \
85   - \
86   - __xchg__res; \
87   -})
88   -
89   -#define xchg(ptr,x) \
90   - ((__typeof__(*(ptr)))__xchg((ptr),(unsigned long)(x), sizeof(*(ptr))))
91   -
92   -/* This function doesn't exist, so you'll get a linker error
93   - * if something tries to do an invalid cmpxchg(). */
94   -extern void __cmpxchg_called_with_bad_pointer(void);
95   -
96   -#define __HAVE_ARCH_CMPXCHG 1
97   -
98   -static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
99   - unsigned long new, int size)
100   -{
101   - switch (size) {
102   - case 4:
103   - return __cmpxchg_u32(ptr, old, new);
104   - }
105   - __cmpxchg_called_with_bad_pointer();
106   - return old;
107   -}
108   -
109   -#define cmpxchg(ptr,o,n) \
110   - ({ \
111   - __typeof__(*(ptr)) _o_ = (o); \
112   - __typeof__(*(ptr)) _n_ = (n); \
113   - (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
114   - (unsigned long)_n_, sizeof(*(ptr))); \
115   - })
116   -
117   -struct pt_regs;
118   -
119   -extern void die(const char *str, struct pt_regs *regs, long err) __attribute__ ((noreturn));
  1 +/* FILE TO BE DELETED. DO NOT ADD STUFF HERE! */
  2 +#include <asm/barrier.h>
  3 +#include <asm/bl_bit.h>
  4 +#include <asm/cache_insns.h>
  5 +#include <asm/cmpxchg.h>
  6 +#include <asm/exec.h>
  7 +#include <asm/switch_to.h>
  8 +#include <asm/traps.h>
120 9 void free_initmem(void);
121   -void free_initrd_mem(unsigned long start, unsigned long end);
122   -
123   -extern void *set_exception_table_vec(unsigned int vec, void *handler);
124   -
125   -static inline void *set_exception_table_evt(unsigned int evt, void *handler)
126   -{
127   - return set_exception_table_vec(evt >> 5, handler);
128   -}
129   -
130   -/*
131   - * SH-2A has both 16 and 32-bit opcodes, do lame encoding checks.
132   - */
133   -#ifdef CONFIG_CPU_SH2A
134   -extern unsigned int instruction_size(unsigned int insn);
135   -#elif defined(CONFIG_SUPERH32)
136   -#define instruction_size(insn) (2)
137   -#else
138   -#define instruction_size(insn) (4)
139   -#endif
140   -
141   -void per_cpu_trap_init(void);
142   -void default_idle(void);
143   -void cpu_idle_wait(void);
144   -void stop_this_cpu(void *);
145   -
146   -#ifdef CONFIG_SUPERH32
147   -#define BUILD_TRAP_HANDLER(name) \
148   -asmlinkage void name##_trap_handler(unsigned long r4, unsigned long r5, \
149   - unsigned long r6, unsigned long r7, \
150   - struct pt_regs __regs)
151   -
152   -#define TRAP_HANDLER_DECL \
153   - struct pt_regs *regs = RELOC_HIDE(&__regs, 0); \
154   - unsigned int vec = regs->tra; \
155   - (void)vec;
156   -#else
157   -#define BUILD_TRAP_HANDLER(name) \
158   -asmlinkage void name##_trap_handler(unsigned int vec, struct pt_regs *regs)
159   -#define TRAP_HANDLER_DECL
160   -#endif
161   -
162   -BUILD_TRAP_HANDLER(address_error);
163   -BUILD_TRAP_HANDLER(debug);
164   -BUILD_TRAP_HANDLER(bug);
165   -BUILD_TRAP_HANDLER(breakpoint);
166   -BUILD_TRAP_HANDLER(singlestep);
167   -BUILD_TRAP_HANDLER(fpu_error);
168   -BUILD_TRAP_HANDLER(fpu_state_restore);
169   -BUILD_TRAP_HANDLER(nmi);
170   -
171   -#define arch_align_stack(x) (x)
172   -
173   -struct mem_access {
174   - unsigned long (*from)(void *dst, const void __user *src, unsigned long cnt);
175   - unsigned long (*to)(void __user *dst, const void *src, unsigned long cnt);
176   -};
177   -
178   -#ifdef CONFIG_SUPERH32
179   -# include "system_32.h"
180   -#else
181   -# include "system_64.h"
182   -#endif
183   -
184   -#endif
arch/sh/include/asm/system_32.h
1   -#ifndef __ASM_SH_SYSTEM_32_H
2   -#define __ASM_SH_SYSTEM_32_H
3   -
4   -#include <linux/types.h>
5   -#include <asm/mmu.h>
6   -
7   -#ifdef CONFIG_SH_DSP
8   -
9   -#define is_dsp_enabled(tsk) \
10   - (!!(tsk->thread.dsp_status.status & SR_DSP))
11   -
12   -#define __restore_dsp(tsk) \
13   -do { \
14   - register u32 *__ts2 __asm__ ("r2") = \
15   - (u32 *)&tsk->thread.dsp_status; \
16   - __asm__ __volatile__ ( \
17   - ".balign 4\n\t" \
18   - "movs.l @r2+, a0\n\t" \
19   - "movs.l @r2+, a1\n\t" \
20   - "movs.l @r2+, a0g\n\t" \
21   - "movs.l @r2+, a1g\n\t" \
22   - "movs.l @r2+, m0\n\t" \
23   - "movs.l @r2+, m1\n\t" \
24   - "movs.l @r2+, x0\n\t" \
25   - "movs.l @r2+, x1\n\t" \
26   - "movs.l @r2+, y0\n\t" \
27   - "movs.l @r2+, y1\n\t" \
28   - "lds.l @r2+, dsr\n\t" \
29   - "ldc.l @r2+, rs\n\t" \
30   - "ldc.l @r2+, re\n\t" \
31   - "ldc.l @r2+, mod\n\t" \
32   - : : "r" (__ts2)); \
33   -} while (0)
34   -
35   -
36   -#define __save_dsp(tsk) \
37   -do { \
38   - register u32 *__ts2 __asm__ ("r2") = \
39   - (u32 *)&tsk->thread.dsp_status + 14; \
40   - \
41   - __asm__ __volatile__ ( \
42   - ".balign 4\n\t" \
43   - "stc.l mod, @-r2\n\t" \
44   - "stc.l re, @-r2\n\t" \
45   - "stc.l rs, @-r2\n\t" \
46   - "sts.l dsr, @-r2\n\t" \
47   - "movs.l y1, @-r2\n\t" \
48   - "movs.l y0, @-r2\n\t" \
49   - "movs.l x1, @-r2\n\t" \
50   - "movs.l x0, @-r2\n\t" \
51   - "movs.l m1, @-r2\n\t" \
52   - "movs.l m0, @-r2\n\t" \
53   - "movs.l a1g, @-r2\n\t" \
54   - "movs.l a0g, @-r2\n\t" \
55   - "movs.l a1, @-r2\n\t" \
56   - "movs.l a0, @-r2\n\t" \
57   - : : "r" (__ts2)); \
58   -} while (0)
59   -
60   -#else
61   -
62   -#define is_dsp_enabled(tsk) (0)
63   -#define __save_dsp(tsk) do { } while (0)
64   -#define __restore_dsp(tsk) do { } while (0)
65   -#endif
66   -
67   -#if defined(CONFIG_CPU_SH4A)
68   -#define __icbi(addr) __asm__ __volatile__ ( "icbi @%0\n\t" : : "r" (addr))
69   -#else
70   -#define __icbi(addr) mb()
71   -#endif
72   -
73   -#define __ocbp(addr) __asm__ __volatile__ ( "ocbp @%0\n\t" : : "r" (addr))
74   -#define __ocbi(addr) __asm__ __volatile__ ( "ocbi @%0\n\t" : : "r" (addr))
75   -#define __ocbwb(addr) __asm__ __volatile__ ( "ocbwb @%0\n\t" : : "r" (addr))
76   -
77   -struct task_struct *__switch_to(struct task_struct *prev,
78   - struct task_struct *next);
79   -
80   -/*
81   - * switch_to() should switch tasks to task nr n, first
82   - */
83   -#define switch_to(prev, next, last) \
84   -do { \
85   - register u32 *__ts1 __asm__ ("r1"); \
86   - register u32 *__ts2 __asm__ ("r2"); \
87   - register u32 *__ts4 __asm__ ("r4"); \
88   - register u32 *__ts5 __asm__ ("r5"); \
89   - register u32 *__ts6 __asm__ ("r6"); \
90   - register u32 __ts7 __asm__ ("r7"); \
91   - struct task_struct *__last; \
92   - \
93   - if (is_dsp_enabled(prev)) \
94   - __save_dsp(prev); \
95   - \
96   - __ts1 = (u32 *)&prev->thread.sp; \
97   - __ts2 = (u32 *)&prev->thread.pc; \
98   - __ts4 = (u32 *)prev; \
99   - __ts5 = (u32 *)next; \
100   - __ts6 = (u32 *)&next->thread.sp; \
101   - __ts7 = next->thread.pc; \
102   - \
103   - __asm__ __volatile__ ( \
104   - ".balign 4\n\t" \
105   - "stc.l gbr, @-r15\n\t" \
106   - "sts.l pr, @-r15\n\t" \
107   - "mov.l r8, @-r15\n\t" \
108   - "mov.l r9, @-r15\n\t" \
109   - "mov.l r10, @-r15\n\t" \
110   - "mov.l r11, @-r15\n\t" \
111   - "mov.l r12, @-r15\n\t" \
112   - "mov.l r13, @-r15\n\t" \
113   - "mov.l r14, @-r15\n\t" \
114   - "mov.l r15, @r1\t! save SP\n\t" \
115   - "mov.l @r6, r15\t! change to new stack\n\t" \
116   - "mova 1f, %0\n\t" \
117   - "mov.l %0, @r2\t! save PC\n\t" \
118   - "mov.l 2f, %0\n\t" \
119   - "jmp @%0\t! call __switch_to\n\t" \
120   - " lds r7, pr\t! with return to new PC\n\t" \
121   - ".balign 4\n" \
122   - "2:\n\t" \
123   - ".long __switch_to\n" \
124   - "1:\n\t" \
125   - "mov.l @r15+, r14\n\t" \
126   - "mov.l @r15+, r13\n\t" \
127   - "mov.l @r15+, r12\n\t" \
128   - "mov.l @r15+, r11\n\t" \
129   - "mov.l @r15+, r10\n\t" \
130   - "mov.l @r15+, r9\n\t" \
131   - "mov.l @r15+, r8\n\t" \
132   - "lds.l @r15+, pr\n\t" \
133   - "ldc.l @r15+, gbr\n\t" \
134   - : "=z" (__last) \
135   - : "r" (__ts1), "r" (__ts2), "r" (__ts4), \
136   - "r" (__ts5), "r" (__ts6), "r" (__ts7) \
137   - : "r3", "t"); \
138   - \
139   - last = __last; \
140   -} while (0)
141   -
142   -#define finish_arch_switch(prev) \
143   -do { \
144   - if (is_dsp_enabled(prev)) \
145   - __restore_dsp(prev); \
146   -} while (0)
147   -
148   -#ifdef CONFIG_CPU_HAS_SR_RB
149   -#define lookup_exception_vector() \
150   -({ \
151   - unsigned long _vec; \
152   - \
153   - __asm__ __volatile__ ( \
154   - "stc r2_bank, %0\n\t" \
155   - : "=r" (_vec) \
156   - ); \
157   - \
158   - _vec; \
159   -})
160   -#else
161   -#define lookup_exception_vector() \
162   -({ \
163   - unsigned long _vec; \
164   - __asm__ __volatile__ ( \
165   - "mov r4, %0\n\t" \
166   - : "=r" (_vec) \
167   - ); \
168   - \
169   - _vec; \
170   -})
171   -#endif
172   -
173   -static inline reg_size_t register_align(void *val)
174   -{
175   - return (unsigned long)(signed long)val;
176   -}
177   -
178   -int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
179   - struct mem_access *ma, int, unsigned long address);
180   -
181   -static inline void trigger_address_error(void)
182   -{
183   - __asm__ __volatile__ (
184   - "ldc %0, sr\n\t"
185   - "mov.l @%1, %0"
186   - :
187   - : "r" (0x10000000), "r" (0x80000001)
188   - );
189   -}
190   -
191   -asmlinkage void do_address_error(struct pt_regs *regs,
192   - unsigned long writeaccess,
193   - unsigned long address);
194   -asmlinkage void do_divide_error(unsigned long r4, unsigned long r5,
195   - unsigned long r6, unsigned long r7,
196   - struct pt_regs __regs);
197   -asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
198   - unsigned long r6, unsigned long r7,
199   - struct pt_regs __regs);
200   -asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
201   - unsigned long r6, unsigned long r7,
202   - struct pt_regs __regs);
203   -asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
204   - unsigned long r6, unsigned long r7,
205   - struct pt_regs __regs);
206   -
207   -static inline void set_bl_bit(void)
208   -{
209   - unsigned long __dummy0, __dummy1;
210   -
211   - __asm__ __volatile__ (
212   - "stc sr, %0\n\t"
213   - "or %2, %0\n\t"
214   - "and %3, %0\n\t"
215   - "ldc %0, sr\n\t"
216   - : "=&r" (__dummy0), "=r" (__dummy1)
217   - : "r" (0x10000000), "r" (0xffffff0f)
218   - : "memory"
219   - );
220   -}
221   -
222   -static inline void clear_bl_bit(void)
223   -{
224   - unsigned long __dummy0, __dummy1;
225   -
226   - __asm__ __volatile__ (
227   - "stc sr, %0\n\t"
228   - "and %2, %0\n\t"
229   - "ldc %0, sr\n\t"
230   - : "=&r" (__dummy0), "=r" (__dummy1)
231   - : "1" (~0x10000000)
232   - : "memory"
233   - );
234   -}
235   -
236   -#endif /* __ASM_SH_SYSTEM_32_H */
arch/sh/include/asm/system_64.h
1   -#ifndef __ASM_SH_SYSTEM_64_H
2   -#define __ASM_SH_SYSTEM_64_H
3   -
4   -/*
5   - * include/asm-sh/system_64.h
6   - *
7   - * Copyright (C) 2000, 2001 Paolo Alberelli
8   - * Copyright (C) 2003 Paul Mundt
9   - * Copyright (C) 2004 Richard Curnow
10   - *
11   - * This file is subject to the terms and conditions of the GNU General Public
12   - * License. See the file "COPYING" in the main directory of this archive
13   - * for more details.
14   - */
15   -#include <cpu/registers.h>
16   -#include <asm/processor.h>
17   -
18   -/*
19   - * switch_to() should switch tasks to task nr n, first
20   - */
21   -struct thread_struct;
22   -struct task_struct *sh64_switch_to(struct task_struct *prev,
23   - struct thread_struct *prev_thread,
24   - struct task_struct *next,
25   - struct thread_struct *next_thread);
26   -
27   -#define switch_to(prev,next,last) \
28   -do { \
29   - if (last_task_used_math != next) { \
30   - struct pt_regs *regs = next->thread.uregs; \
31   - if (regs) regs->sr |= SR_FD; \
32   - } \
33   - last = sh64_switch_to(prev, &prev->thread, next, \
34   - &next->thread); \
35   -} while (0)
36   -
37   -#define __icbi(addr) __asm__ __volatile__ ( "icbi %0, 0\n\t" : : "r" (addr))
38   -#define __ocbp(addr) __asm__ __volatile__ ( "ocbp %0, 0\n\t" : : "r" (addr))
39   -#define __ocbi(addr) __asm__ __volatile__ ( "ocbi %0, 0\n\t" : : "r" (addr))
40   -#define __ocbwb(addr) __asm__ __volatile__ ( "ocbwb %0, 0\n\t" : : "r" (addr))
41   -
42   -static inline reg_size_t register_align(void *val)
43   -{
44   - return (unsigned long long)(signed long long)(signed long)val;
45   -}
46   -
47   -extern void phys_stext(void);
48   -
49   -static inline void trigger_address_error(void)
50   -{
51   - phys_stext();
52   -}
53   -
54   -#define SR_BL_LL 0x0000000010000000LL
55   -
56   -static inline void set_bl_bit(void)
57   -{
58   - unsigned long long __dummy0, __dummy1 = SR_BL_LL;
59   -
60   - __asm__ __volatile__("getcon " __SR ", %0\n\t"
61   - "or %0, %1, %0\n\t"
62   - "putcon %0, " __SR "\n\t"
63   - : "=&r" (__dummy0)
64   - : "r" (__dummy1));
65   -
66   -}
67   -
68   -static inline void clear_bl_bit(void)
69   -{
70   - unsigned long long __dummy0, __dummy1 = ~SR_BL_LL;
71   -
72   - __asm__ __volatile__("getcon " __SR ", %0\n\t"
73   - "and %0, %1, %0\n\t"
74   - "putcon %0, " __SR "\n\t"
75   - : "=&r" (__dummy0)
76   - : "r" (__dummy1));
77   -}
78   -
79   -#endif /* __ASM_SH_SYSTEM_64_H */
arch/sh/include/asm/traps.h
  1 +#ifndef __ASM_SH_TRAPS_H
  2 +#define __ASM_SH_TRAPS_H
  3 +
  4 +#include <linux/compiler.h>
  5 +
  6 +#ifdef CONFIG_SUPERH32
  7 +# include "traps_32.h"
  8 +#else
  9 +# include "traps_64.h"
  10 +#endif
  11 +
  12 +BUILD_TRAP_HANDLER(address_error);
  13 +BUILD_TRAP_HANDLER(debug);
  14 +BUILD_TRAP_HANDLER(bug);
  15 +BUILD_TRAP_HANDLER(breakpoint);
  16 +BUILD_TRAP_HANDLER(singlestep);
  17 +BUILD_TRAP_HANDLER(fpu_error);
  18 +BUILD_TRAP_HANDLER(fpu_state_restore);
  19 +BUILD_TRAP_HANDLER(nmi);
  20 +
  21 +#endif /* __ASM_SH_TRAPS_H */
arch/sh/include/asm/traps_32.h
  1 +#ifndef __ASM_SH_TRAPS_32_H
  2 +#define __ASM_SH_TRAPS_32_H
  3 +
  4 +#include <linux/types.h>
  5 +#include <asm/mmu.h>
  6 +
  7 +#ifdef CONFIG_CPU_HAS_SR_RB
  8 +#define lookup_exception_vector() \
  9 +({ \
  10 + unsigned long _vec; \
  11 + \
  12 + __asm__ __volatile__ ( \
  13 + "stc r2_bank, %0\n\t" \
  14 + : "=r" (_vec) \
  15 + ); \
  16 + \
  17 + _vec; \
  18 +})
  19 +#else
  20 +#define lookup_exception_vector() \
  21 +({ \
  22 + unsigned long _vec; \
  23 + __asm__ __volatile__ ( \
  24 + "mov r4, %0\n\t" \
  25 + : "=r" (_vec) \
  26 + ); \
  27 + \
  28 + _vec; \
  29 +})
  30 +#endif
  31 +
  32 +static inline void trigger_address_error(void)
  33 +{
  34 + __asm__ __volatile__ (
  35 + "ldc %0, sr\n\t"
  36 + "mov.l @%1, %0"
  37 + :
  38 + : "r" (0x10000000), "r" (0x80000001)
  39 + );
  40 +}
  41 +
  42 +asmlinkage void do_address_error(struct pt_regs *regs,
  43 + unsigned long writeaccess,
  44 + unsigned long address);
  45 +asmlinkage void do_divide_error(unsigned long r4, unsigned long r5,
  46 + unsigned long r6, unsigned long r7,
  47 + struct pt_regs __regs);
  48 +asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
  49 + unsigned long r6, unsigned long r7,
  50 + struct pt_regs __regs);
  51 +asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
  52 + unsigned long r6, unsigned long r7,
  53 + struct pt_regs __regs);
  54 +asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
  55 + unsigned long r6, unsigned long r7,
  56 + struct pt_regs __regs);
  57 +
  58 +#define BUILD_TRAP_HANDLER(name) \
  59 +asmlinkage void name##_trap_handler(unsigned long r4, unsigned long r5, \
  60 + unsigned long r6, unsigned long r7, \
  61 + struct pt_regs __regs)
  62 +
  63 +#define TRAP_HANDLER_DECL \
  64 + struct pt_regs *regs = RELOC_HIDE(&__regs, 0); \
  65 + unsigned int vec = regs->tra; \
  66 + (void)vec;
  67 +
  68 +#endif /* __ASM_SH_TRAPS_32_H */
arch/sh/include/asm/traps_64.h
  1 +/*
  2 + * Copyright (C) 2000, 2001 Paolo Alberelli
  3 + * Copyright (C) 2003 Paul Mundt
  4 + * Copyright (C) 2004 Richard Curnow
  5 + *
  6 + * This file is subject to the terms and conditions of the GNU General Public
  7 + * License. See the file "COPYING" in the main directory of this archive
  8 + * for more details.
  9 + */
  10 +#ifndef __ASM_SH_TRAPS_64_H
  11 +#define __ASM_SH_TRAPS_64_H
  12 +
  13 +extern void phys_stext(void);
  14 +
  15 +static inline void trigger_address_error(void)
  16 +{
  17 + phys_stext();
  18 +}
  19 +
  20 +#define BUILD_TRAP_HANDLER(name) \
  21 +asmlinkage void name##_trap_handler(unsigned int vec, struct pt_regs *regs)
  22 +#define TRAP_HANDLER_DECL
  23 +
  24 +#endif /* __ASM_SH_TRAPS_64_H */
arch/sh/include/asm/uaccess.h
... ... @@ -254,6 +254,20 @@
254 254 unsigned long search_exception_table(unsigned long addr);
255 255 const struct exception_table_entry *search_exception_tables(unsigned long addr);
256 256  
  257 +extern void *set_exception_table_vec(unsigned int vec, void *handler);
  258 +
  259 +static inline void *set_exception_table_evt(unsigned int evt, void *handler)
  260 +{
  261 + return set_exception_table_vec(evt >> 5, handler);
  262 +}
  263 +
  264 +struct mem_access {
  265 + unsigned long (*from)(void *dst, const void __user *src, unsigned long cnt);
  266 + unsigned long (*to)(void __user *dst, const void *src, unsigned long cnt);
  267 +};
  268 +
  269 +int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
  270 + struct mem_access *ma, int, unsigned long address);
257 271  
258 272 #endif /* __ASM_SH_UACCESS_H */
arch/sh/kernel/cpu/init.c
... ... @@ -18,13 +18,13 @@
18 18 #include <asm/processor.h>
19 19 #include <asm/uaccess.h>
20 20 #include <asm/page.h>
21   -#include <asm/system.h>
22 21 #include <asm/cacheflush.h>
23 22 #include <asm/cache.h>
24 23 #include <asm/elf.h>
25 24 #include <asm/io.h>
26 25 #include <asm/smp.h>
27 26 #include <asm/sh_bios.h>
  27 +#include <asm/setup.h>
28 28  
29 29 #ifdef CONFIG_SH_FPU
30 30 #define cpu_has_fpu 1
arch/sh/kernel/cpu/irq/imask.c
... ... @@ -19,7 +19,6 @@
19 19 #include <linux/cache.h>
20 20 #include <linux/irq.h>
21 21 #include <linux/bitmap.h>
22   -#include <asm/system.h>
23 22 #include <asm/irq.h>
24 23  
25 24 /* Bitmap of IRQ masked */
arch/sh/kernel/cpu/sh2a/opcode_helper.c
... ... @@ -10,7 +10,6 @@
10 10 * for more details.
11 11 */
12 12 #include <linux/kernel.h>
13   -#include <asm/system.h>
14 13  
15 14 /*
16 15 * Instructions on SH are generally fixed at 16-bits, however, SH-2A
arch/sh/kernel/cpu/sh4/fpu.c
... ... @@ -15,7 +15,6 @@
15 15 #include <linux/io.h>
16 16 #include <cpu/fpu.h>
17 17 #include <asm/processor.h>
18   -#include <asm/system.h>
19 18 #include <asm/fpu.h>
20 19  
21 20 /* The PR (precision) bit in the FP Status Register must be clear when
arch/sh/kernel/hw_breakpoint.c
... ... @@ -22,6 +22,7 @@
22 22 #include <asm/hw_breakpoint.h>
23 23 #include <asm/mmu_context.h>
24 24 #include <asm/ptrace.h>
  25 +#include <asm/traps.h>
25 26  
26 27 /*
27 28 * Stores the breakpoints currently in use on each breakpoint address
arch/sh/kernel/idle.c
... ... @@ -18,9 +18,9 @@
18 18 #include <linux/smp.h>
19 19 #include <linux/cpuidle.h>
20 20 #include <asm/pgalloc.h>
21   -#include <asm/system.h>
22 21 #include <linux/atomic.h>
23 22 #include <asm/smp.h>
  23 +#include <asm/bl_bit.h>
24 24  
25 25 void (*pm_idle)(void);
26 26  
arch/sh/kernel/io_trapped.c
... ... @@ -15,7 +15,6 @@
15 15 #include <linux/vmalloc.h>
16 16 #include <linux/module.h>
17 17 #include <linux/init.h>
18   -#include <asm/system.h>
19 18 #include <asm/mmu_context.h>
20 19 #include <asm/uaccess.h>
21 20 #include <asm/io.h>
arch/sh/kernel/process_32.c
... ... @@ -24,7 +24,6 @@
24 24 #include <linux/prefetch.h>
25 25 #include <asm/uaccess.h>
26 26 #include <asm/mmu_context.h>
27   -#include <asm/system.h>
28 27 #include <asm/fpu.h>
29 28 #include <asm/syscalls.h>
30 29  
arch/sh/kernel/process_64.c
... ... @@ -30,6 +30,7 @@
30 30 #include <asm/pgtable.h>
31 31 #include <asm/mmu_context.h>
32 32 #include <asm/fpu.h>
  33 +#include <asm/switch_to.h>
33 34  
34 35 struct task_struct *last_task_used_math = NULL;
35 36  
arch/sh/kernel/ptrace_32.c
... ... @@ -28,7 +28,6 @@
28 28 #include <linux/hw_breakpoint.h>
29 29 #include <asm/uaccess.h>
30 30 #include <asm/pgtable.h>
31   -#include <asm/system.h>
32 31 #include <asm/processor.h>
33 32 #include <asm/mmu_context.h>
34 33 #include <asm/syscalls.h>
arch/sh/kernel/ptrace_64.c
... ... @@ -34,11 +34,11 @@
34 34 #include <asm/io.h>
35 35 #include <asm/uaccess.h>
36 36 #include <asm/pgtable.h>
37   -#include <asm/system.h>
38 37 #include <asm/processor.h>
39 38 #include <asm/mmu_context.h>
40 39 #include <asm/syscalls.h>
41 40 #include <asm/fpu.h>
  41 +#include <asm/traps.h>
42 42  
43 43 #define CREATE_TRACE_POINTS
44 44 #include <trace/events/syscalls.h>
arch/sh/kernel/reboot.c
... ... @@ -8,8 +8,8 @@
8 8 #endif
9 9 #include <asm/addrspace.h>
10 10 #include <asm/reboot.h>
11   -#include <asm/system.h>
12 11 #include <asm/tlbflush.h>
  12 +#include <asm/traps.h>
13 13  
14 14 void (*pm_power_off)(void);
15 15 EXPORT_SYMBOL(pm_power_off);
arch/sh/kernel/signal_32.c
... ... @@ -25,7 +25,6 @@
25 25 #include <linux/freezer.h>
26 26 #include <linux/io.h>
27 27 #include <linux/tracehook.h>
28   -#include <asm/system.h>
29 28 #include <asm/ucontext.h>
30 29 #include <asm/uaccess.h>
31 30 #include <asm/pgtable.h>
arch/sh/kernel/smp.c
... ... @@ -23,7 +23,6 @@
23 23 #include <linux/sched.h>
24 24 #include <linux/atomic.h>
25 25 #include <asm/processor.h>
26   -#include <asm/system.h>
27 26 #include <asm/mmu_context.h>
28 27 #include <asm/smp.h>
29 28 #include <asm/cacheflush.h>
arch/sh/kernel/traps.c
... ... @@ -7,7 +7,7 @@
7 7 #include <linux/uaccess.h>
8 8 #include <linux/hardirq.h>
9 9 #include <asm/unwinder.h>
10   -#include <asm/system.h>
  10 +#include <asm/traps.h>
11 11  
12 12 #ifdef CONFIG_GENERIC_BUG
13 13 static void handle_BUG(struct pt_regs *regs)
arch/sh/kernel/traps_32.c
... ... @@ -27,10 +27,11 @@
27 27 #include <linux/sysfs.h>
28 28 #include <linux/uaccess.h>
29 29 #include <linux/perf_event.h>
30   -#include <asm/system.h>
31 30 #include <asm/alignment.h>
32 31 #include <asm/fpu.h>
33 32 #include <asm/kprobes.h>
  33 +#include <asm/traps.h>
  34 +#include <asm/bl_bit.h>
34 35  
35 36 #ifdef CONFIG_CPU_SH2
36 37 # define TRAP_RESERVED_INST 4
arch/sh/kernel/traps_64.c
... ... @@ -25,7 +25,6 @@
25 25 #include <linux/sysctl.h>
26 26 #include <linux/module.h>
27 27 #include <linux/perf_event.h>
28   -#include <asm/system.h>
29 28 #include <asm/uaccess.h>
30 29 #include <asm/io.h>
31 30 #include <linux/atomic.h>
arch/sh/math-emu/math.c
... ... @@ -14,7 +14,6 @@
14 14 #include <linux/signal.h>
15 15 #include <linux/perf_event.h>
16 16  
17   -#include <asm/system.h>
18 17 #include <asm/uaccess.h>
19 18 #include <asm/processor.h>
20 19 #include <asm/io.h>
arch/sh/mm/fault_32.c
... ... @@ -17,9 +17,9 @@
17 17 #include <linux/kprobes.h>
18 18 #include <linux/perf_event.h>
19 19 #include <asm/io_trapped.h>
20   -#include <asm/system.h>
21 20 #include <asm/mmu_context.h>
22 21 #include <asm/tlbflush.h>
  22 +#include <asm/traps.h>
23 23  
24 24 static inline int notify_page_fault(struct pt_regs *regs, int trap)
25 25 {
arch/sh/mm/fault_64.c
... ... @@ -33,7 +33,6 @@
33 33 #include <linux/mm.h>
34 34 #include <linux/smp.h>
35 35 #include <linux/interrupt.h>
36   -#include <asm/system.h>
37 36 #include <asm/tlb.h>
38 37 #include <asm/io.h>
39 38 #include <asm/uaccess.h>
arch/sh/mm/flush-sh4.c
1 1 #include <linux/mm.h>
2 2 #include <asm/mmu_context.h>
3 3 #include <asm/cacheflush.h>
  4 +#include <asm/traps.h>
4 5  
5 6 /*
6 7 * Write back the dirty D-caches, but not invalidate them.
... ... @@ -25,7 +25,6 @@
25 25 #include <linux/vmalloc.h>
26 26 #include <asm/cacheflush.h>
27 27 #include <asm/sizes.h>
28   -#include <asm/system.h>
29 28 #include <asm/uaccess.h>
30 29 #include <asm/pgtable.h>
31 30 #include <asm/page.h>
arch/sh/mm/tlb-pteaex.c
... ... @@ -12,7 +12,6 @@
12 12 #include <linux/kernel.h>
13 13 #include <linux/mm.h>
14 14 #include <linux/io.h>
15   -#include <asm/system.h>
16 15 #include <asm/mmu_context.h>
17 16 #include <asm/cacheflush.h>
18 17  
arch/sh/mm/tlb-sh3.c
... ... @@ -20,7 +20,6 @@
20 20 #include <linux/smp.h>
21 21 #include <linux/interrupt.h>
22 22  
23   -#include <asm/system.h>
24 23 #include <asm/io.h>
25 24 #include <asm/uaccess.h>
26 25 #include <asm/pgalloc.h>
arch/sh/mm/tlb-sh4.c
... ... @@ -11,7 +11,6 @@
11 11 #include <linux/kernel.h>
12 12 #include <linux/mm.h>
13 13 #include <linux/io.h>
14   -#include <asm/system.h>
15 14 #include <asm/mmu_context.h>
16 15 #include <asm/cacheflush.h>
17 16  
arch/sh/mm/tlbflush_64.c
... ... @@ -22,7 +22,6 @@
22 22 #include <linux/smp.h>
23 23 #include <linux/perf_event.h>
24 24 #include <linux/interrupt.h>
25   -#include <asm/system.h>
26 25 #include <asm/io.h>
27 26 #include <asm/tlb.h>
28 27 #include <asm/uaccess.h>