Commit a3a85a763c399c0bf483a30d82d2d613e6f94cd3

Authored by Richard Weinberger
1 parent d824d06328

um: Disintegrate asm/system.h

Signed-off-by: Richard Weinberger <richard@nod.at>
Reported-by: Toralf Förster <toralf.foerster@gmx.de>
CC: dhowells@redhat.com

Showing 5 changed files with 84 additions and 136 deletions Side-by-side Diff

arch/um/drivers/mconsole_kern.c
... ... @@ -22,6 +22,7 @@
22 22 #include <linux/workqueue.h>
23 23 #include <linux/mutex.h>
24 24 #include <asm/uaccess.h>
  25 +#include <asm/switch_to.h>
25 26  
26 27 #include "init.h"
27 28 #include "irq_kern.h"
arch/um/include/asm/Kbuild
1 1 generic-y += bug.h cputime.h device.h emergency-restart.h futex.h hardirq.h
2 2 generic-y += hw_irq.h irq_regs.h kdebug.h percpu.h sections.h topology.h xor.h
3   -generic-y += ftrace.h pci.h io.h param.h delay.h mutex.h current.h
  3 +generic-y += ftrace.h pci.h io.h param.h delay.h mutex.h current.h exec.h
arch/x86/um/asm/barrier.h
  1 +#ifndef _ASM_UM_BARRIER_H_
  2 +#define _ASM_UM_BARRIER_H_
  3 +
  4 +#include <asm/asm.h>
  5 +#include <asm/segment.h>
  6 +#include <asm/cpufeature.h>
  7 +#include <asm/cmpxchg.h>
  8 +#include <asm/nops.h>
  9 +
  10 +#include <linux/kernel.h>
  11 +#include <linux/irqflags.h>
  12 +
  13 +/*
  14 + * Force strict CPU ordering.
  15 + * And yes, this is required on UP too when we're talking
  16 + * to devices.
  17 + */
  18 +#ifdef CONFIG_X86_32
  19 +
  20 +#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
  21 +#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
  22 +#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
  23 +
  24 +#else /* CONFIG_X86_32 */
  25 +
  26 +#define mb() asm volatile("mfence" : : : "memory")
  27 +#define rmb() asm volatile("lfence" : : : "memory")
  28 +#define wmb() asm volatile("sfence" : : : "memory")
  29 +
  30 +#endif /* CONFIG_X86_32 */
  31 +
  32 +#define read_barrier_depends() do { } while (0)
  33 +
  34 +#ifdef CONFIG_SMP
  35 +
  36 +#define smp_mb() mb()
  37 +#ifdef CONFIG_X86_PPRO_FENCE
  38 +#define smp_rmb() rmb()
  39 +#else /* CONFIG_X86_PPRO_FENCE */
  40 +#define smp_rmb() barrier()
  41 +#endif /* CONFIG_X86_PPRO_FENCE */
  42 +
  43 +#ifdef CONFIG_X86_OOSTORE
  44 +#define smp_wmb() wmb()
  45 +#else /* CONFIG_X86_OOSTORE */
  46 +#define smp_wmb() barrier()
  47 +#endif /* CONFIG_X86_OOSTORE */
  48 +
  49 +#define smp_read_barrier_depends() read_barrier_depends()
  50 +#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
  51 +
  52 +#else /* CONFIG_SMP */
  53 +
  54 +#define smp_mb() barrier()
  55 +#define smp_rmb() barrier()
  56 +#define smp_wmb() barrier()
  57 +#define smp_read_barrier_depends() do { } while (0)
  58 +#define set_mb(var, value) do { var = value; barrier(); } while (0)
  59 +
  60 +#endif /* CONFIG_SMP */
  61 +
  62 +/*
  63 + * Stop RDTSC speculation. This is needed when you need to use RDTSC
  64 + * (or get_cycles or vread that possibly accesses the TSC) in a defined
  65 + * code region.
  66 + *
  67 + * (Could use an alternative three way for this if there was one.)
  68 + */
  69 +static inline void rdtsc_barrier(void)
  70 +{
  71 + alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
  72 + alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
  73 +}
  74 +
  75 +#endif
arch/x86/um/asm/switch_to.h
  1 +#ifndef _ASM_UM_SWITCH_TO_H_
  2 +#define _ASM_UM_SWITCH_TO_H_
  3 +
  4 +extern void *_switch_to(void *prev, void *next, void *last);
  5 +#define switch_to(prev, next, last) prev = _switch_to(prev, next, last)
  6 +
  7 +#endif
arch/x86/um/asm/system.h
1   -#ifndef _ASM_X86_SYSTEM_H_
2   -#define _ASM_X86_SYSTEM_H_
3   -
4   -#include <asm/asm.h>
5   -#include <asm/segment.h>
6   -#include <asm/cpufeature.h>
7   -#include <asm/cmpxchg.h>
8   -#include <asm/nops.h>
9   -
10   -#include <linux/kernel.h>
11   -#include <linux/irqflags.h>
12   -
13   -/* entries in ARCH_DLINFO: */
14   -#ifdef CONFIG_IA32_EMULATION
15   -# define AT_VECTOR_SIZE_ARCH 2
16   -#else
17   -# define AT_VECTOR_SIZE_ARCH 1
18   -#endif
19   -
20   -extern unsigned long arch_align_stack(unsigned long sp);
21   -
22   -void default_idle(void);
23   -
24   -/*
25   - * Force strict CPU ordering.
26   - * And yes, this is required on UP too when we're talking
27   - * to devices.
28   - */
29   -#ifdef CONFIG_X86_32
30   -/*
31   - * Some non-Intel clones support out of order store. wmb() ceases to be a
32   - * nop for these.
33   - */
34   -#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
35   -#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
36   -#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
37   -#else
38   -#define mb() asm volatile("mfence":::"memory")
39   -#define rmb() asm volatile("lfence":::"memory")
40   -#define wmb() asm volatile("sfence" ::: "memory")
41   -#endif
42   -
43   -/**
44   - * read_barrier_depends - Flush all pending reads that subsequents reads
45   - * depend on.
46   - *
47   - * No data-dependent reads from memory-like regions are ever reordered
48   - * over this barrier. All reads preceding this primitive are guaranteed
49   - * to access memory (but not necessarily other CPUs' caches) before any
50   - * reads following this primitive that depend on the data return by
51   - * any of the preceding reads. This primitive is much lighter weight than
52   - * rmb() on most CPUs, and is never heavier weight than is
53   - * rmb().
54   - *
55   - * These ordering constraints are respected by both the local CPU
56   - * and the compiler.
57   - *
58   - * Ordering is not guaranteed by anything other than these primitives,
59   - * not even by data dependencies. See the documentation for
60   - * memory_barrier() for examples and URLs to more information.
61   - *
62   - * For example, the following code would force ordering (the initial
63   - * value of "a" is zero, "b" is one, and "p" is "&a"):
64   - *
65   - * <programlisting>
66   - * CPU 0 CPU 1
67   - *
68   - * b = 2;
69   - * memory_barrier();
70   - * p = &b; q = p;
71   - * read_barrier_depends();
72   - * d = *q;
73   - * </programlisting>
74   - *
75   - * because the read of "*q" depends on the read of "p" and these
76   - * two reads are separated by a read_barrier_depends(). However,
77   - * the following code, with the same initial values for "a" and "b":
78   - *
79   - * <programlisting>
80   - * CPU 0 CPU 1
81   - *
82   - * a = 2;
83   - * memory_barrier();
84   - * b = 3; y = b;
85   - * read_barrier_depends();
86   - * x = a;
87   - * </programlisting>
88   - *
89   - * does not enforce ordering, since there is no data dependency between
90   - * the read of "a" and the read of "b". Therefore, on some CPUs, such
91   - * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
92   - * in cases like this where there are no data dependencies.
93   - **/
94   -
95   -#define read_barrier_depends() do { } while (0)
96   -
97   -#ifdef CONFIG_SMP
98   -#define smp_mb() mb()
99   -#ifdef CONFIG_X86_PPRO_FENCE
100   -# define smp_rmb() rmb()
101   -#else
102   -# define smp_rmb() barrier()
103   -#endif
104   -#ifdef CONFIG_X86_OOSTORE
105   -# define smp_wmb() wmb()
106   -#else
107   -# define smp_wmb() barrier()
108   -#endif
109   -#define smp_read_barrier_depends() read_barrier_depends()
110   -#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
111   -#else
112   -#define smp_mb() barrier()
113   -#define smp_rmb() barrier()
114   -#define smp_wmb() barrier()
115   -#define smp_read_barrier_depends() do { } while (0)
116   -#define set_mb(var, value) do { var = value; barrier(); } while (0)
117   -#endif
118   -
119   -/*
120   - * Stop RDTSC speculation. This is needed when you need to use RDTSC
121   - * (or get_cycles or vread that possibly accesses the TSC) in a defined
122   - * code region.
123   - *
124   - * (Could use an alternative three way for this if there was one.)
125   - */
126   -static inline void rdtsc_barrier(void)
127   -{
128   - alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
129   - alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
130   -}
131   -
132   -extern void *_switch_to(void *prev, void *next, void *last);
133   -#define switch_to(prev, next, last) prev = _switch_to(prev, next, last)
134   -
135   -#endif