Blame view

include/asm-generic/system.h 3.27 KB
aafe4dbed   Arnd Bergmann   asm-generic: add ...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
  /* Generic system definitions, based on MN10300 definitions.
   *
   * It should be possible to use these on really simple architectures,
   * but it serves more as a starting point for new ports.
   *
   * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
   * Written by David Howells (dhowells@redhat.com)
   *
   * This program is free software; you can redistribute it and/or
   * modify it under the terms of the GNU General Public Licence
   * as published by the Free Software Foundation; either version
   * 2 of the Licence, or (at your option) any later version.
   */
  #ifndef __ASM_GENERIC_SYSTEM_H
  #define __ASM_GENERIC_SYSTEM_H
aafe4dbed   Arnd Bergmann   asm-generic: add ...
16
17
18
19
20
21
  #ifndef __ASSEMBLY__
  
  #include <linux/types.h>
  #include <linux/irqflags.h>
  
  #include <asm/cmpxchg-local.h>
c66911266   Mathieu Lacage   asm-generic: cmpx...
22
  #include <asm/cmpxchg.h>
aafe4dbed   Arnd Bergmann   asm-generic: add ...
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
  
  struct task_struct;
  
  /* context switching is now performed out-of-line in switch_to.S */
  extern struct task_struct *__switch_to(struct task_struct *,
  		struct task_struct *);
  #define switch_to(prev, next, last)					\
  	do {								\
  		((last) = __switch_to((prev), (next)));			\
  	} while (0)
  
  #define arch_align_stack(x) (x)
  
  #define nop() asm volatile ("nop")
  
  #endif /* !__ASSEMBLY__ */
  
  /*
   * Force strict CPU ordering.
   * And yes, this is required on UP too when we're talking
   * to devices.
   *
   * This implementation only contains a compiler barrier.
   */
  
  #define mb()	asm volatile ("": : :"memory")
  #define rmb()	mb()
  #define wmb()	asm volatile ("": : :"memory")
  
  #ifdef CONFIG_SMP
  #define smp_mb()	mb()
  #define smp_rmb()	rmb()
  #define smp_wmb()	wmb()
  #else
  #define smp_mb()	barrier()
  #define smp_rmb()	barrier()
  #define smp_wmb()	barrier()
  #endif
  
  #define set_mb(var, value)  do { var = value;  mb(); } while (0)
  #define set_wmb(var, value) do { var = value; wmb(); } while (0)
  
  #define read_barrier_depends()		do {} while (0)
  #define smp_read_barrier_depends()	do {} while (0)
  
  /*
   * we make sure local_irq_enable() doesn't cause priority inversion
   */
  #ifndef __ASSEMBLY__
  
  /* This function doesn't exist, so you'll get a linker error
   *    if something tries to do an invalid xchg().  */
  extern void __xchg_called_with_bad_pointer(void);
  
  static inline
  unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
  {
  	unsigned long ret, flags;
  
  	switch (size) {
  	case 1:
  #ifdef __xchg_u8
  		return __xchg_u8(x, ptr);
  #else
  		local_irq_save(flags);
  		ret = *(volatile u8 *)ptr;
  		*(volatile u8 *)ptr = x;
  		local_irq_restore(flags);
  		return ret;
  #endif /* __xchg_u8 */
  
  	case 2:
  #ifdef __xchg_u16
  		return __xchg_u16(x, ptr);
  #else
  		local_irq_save(flags);
  		ret = *(volatile u16 *)ptr;
  		*(volatile u16 *)ptr = x;
  		local_irq_restore(flags);
  		return ret;
  #endif /* __xchg_u16 */
  
  	case 4:
  #ifdef __xchg_u32
  		return __xchg_u32(x, ptr);
  #else
  		local_irq_save(flags);
  		ret = *(volatile u32 *)ptr;
  		*(volatile u32 *)ptr = x;
  		local_irq_restore(flags);
  		return ret;
  #endif /* __xchg_u32 */
  
  #ifdef CONFIG_64BIT
  	case 8:
  #ifdef __xchg_u64
  		return __xchg_u64(x, ptr);
  #else
  		local_irq_save(flags);
  		ret = *(volatile u64 *)ptr;
  		*(volatile u64 *)ptr = x;
  		local_irq_restore(flags);
  		return ret;
  #endif /* __xchg_u64 */
  #endif /* CONFIG_64BIT */
  
  	default:
  		__xchg_called_with_bad_pointer();
  		return x;
  	}
  }
  
  #define xchg(ptr, x) \
  	((__typeof__(*(ptr))) __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
aafe4dbed   Arnd Bergmann   asm-generic: add ...
137
  #endif /* !__ASSEMBLY__ */
aafe4dbed   Arnd Bergmann   asm-generic: add ...
138
  #endif /* __ASM_GENERIC_SYSTEM_H */