Commit 79a69d342d71b2b4eafdf51e2451606cfe380a44

Authored by Linus Torvalds

Merge tag 'arm64-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/cmarinas/linux-aarch64

Pull arm64 patches from Catalin Marinas:

 - SMP support for the PSCI booting protocol (power state coordination
   interface).

 - Simple earlyprintk support.

 - Platform devices populated by default from the DT (SoC-agnostic).

 - CONTEXTIDR support (used by external trace tools).

* tag 'arm64-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/cmarinas/linux-aarch64:
  arm64: mm: update CONTEXTIDR register to contain PID of current process
  arm64: atomics: fix grossly inconsistent asm constraints for exclusives
  arm64: compat: use compat_uptr_t type for compat_ucontext.uc_link
  arm64: Select ARCH_WANT_FRAME_POINTERS
  arm64: Add kvm_para.h and xor.h generic headers
  arm64: SMP: enable PSCI boot method
  arm64: psci: add support for PSCI invocations from the kernel
  arm64: SMP: rework the SMP code to be enabling method agnostic
  arm64: perf: add guest vs host discrimination
  arm64: add COMPAT_PSR_*_BIT flags
  arm64: Add simple earlyprintk support
  arm64: Populate the platform devices

Showing 29 changed files Side-by-side Diff

Documentation/arm64/memory.txt
... ... @@ -35,6 +35,8 @@
35 35  
36 36 ffffffbe00000000 ffffffbffbbfffff ~8GB [guard, future vmmemap]
37 37  
  38 +ffffffbffbc00000 ffffffbffbdfffff 2MB earlyprintk device
  39 +
38 40 ffffffbffbe00000 ffffffbffbe0ffff 64KB PCI I/O space
39 41  
40 42 ffffffbbffff0000 ffffffbcffffffff ~2MB [guard]
... ... @@ -2,6 +2,7 @@
2 2 def_bool y
3 3 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
4 4 select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
  5 + select ARCH_WANT_FRAME_POINTERS
5 6 select ARM_AMBA
6 7 select CLONE_BACKWARDS
7 8 select COMMON_CLK
arch/arm64/Kconfig.debug
... ... @@ -24,5 +24,22 @@
24 24 Enables the display of the minimum amount of free stack which each
25 25 task has ever had available in the sysrq-T output.
26 26  
  27 +config EARLY_PRINTK
  28 + bool "Early printk support"
  29 + default y
  30 + help
  31 + Say Y here if you want to have an early console using the
  32 + earlyprintk=<name>[,<addr>][,<options>] kernel parameter. It
  33 + is assumed that the early console device has been initialised
  34 + by the boot loader prior to starting the Linux kernel.
  35 +
  36 +config PID_IN_CONTEXTIDR
  37 + bool "Write the current PID to the CONTEXTIDR register"
  38 + help
  39 + Enabling this option causes the kernel to write the current PID to
  40 + the CONTEXTIDR register, at the expense of some additional
  41 + instructions during context switch. Say Y here only if you are
  42 + planning to use hardware trace tools with this kernel.
  43 +
27 44 endmenu
arch/arm64/include/asm/Kbuild
... ... @@ -19,6 +19,7 @@
19 19 generic-y += irq_regs.h
20 20 generic-y += kdebug.h
21 21 generic-y += kmap_types.h
  22 +generic-y += kvm_para.h
22 23 generic-y += local.h
23 24 generic-y += local64.h
24 25 generic-y += mman.h
... ... @@ -48,4 +49,5 @@
48 49 generic-y += types.h
49 50 generic-y += unaligned.h
50 51 generic-y += user.h
  52 +generic-y += xor.h
arch/arm64/include/asm/atomic.h
... ... @@ -49,12 +49,12 @@
49 49 int result;
50 50  
51 51 asm volatile("// atomic_add\n"
52   -"1: ldxr %w0, [%3]\n"
53   -" add %w0, %w0, %w4\n"
54   -" stxr %w1, %w0, [%3]\n"
  52 +"1: ldxr %w0, %2\n"
  53 +" add %w0, %w0, %w3\n"
  54 +" stxr %w1, %w0, %2\n"
55 55 " cbnz %w1, 1b"
56   - : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
57   - : "r" (&v->counter), "Ir" (i)
  56 + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
  57 + : "Ir" (i)
58 58 : "cc");
59 59 }
60 60  
61 61  
... ... @@ -64,13 +64,13 @@
64 64 int result;
65 65  
66 66 asm volatile("// atomic_add_return\n"
67   -"1: ldaxr %w0, [%3]\n"
68   -" add %w0, %w0, %w4\n"
69   -" stlxr %w1, %w0, [%3]\n"
  67 +"1: ldaxr %w0, %2\n"
  68 +" add %w0, %w0, %w3\n"
  69 +" stlxr %w1, %w0, %2\n"
70 70 " cbnz %w1, 1b"
71   - : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
72   - : "r" (&v->counter), "Ir" (i)
73   - : "cc");
  71 + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
  72 + : "Ir" (i)
  73 + : "cc", "memory");
74 74  
75 75 return result;
76 76 }
77 77  
... ... @@ -81,12 +81,12 @@
81 81 int result;
82 82  
83 83 asm volatile("// atomic_sub\n"
84   -"1: ldxr %w0, [%3]\n"
85   -" sub %w0, %w0, %w4\n"
86   -" stxr %w1, %w0, [%3]\n"
  84 +"1: ldxr %w0, %2\n"
  85 +" sub %w0, %w0, %w3\n"
  86 +" stxr %w1, %w0, %2\n"
87 87 " cbnz %w1, 1b"
88   - : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
89   - : "r" (&v->counter), "Ir" (i)
  88 + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
  89 + : "Ir" (i)
90 90 : "cc");
91 91 }
92 92  
93 93  
... ... @@ -96,13 +96,13 @@
96 96 int result;
97 97  
98 98 asm volatile("// atomic_sub_return\n"
99   -"1: ldaxr %w0, [%3]\n"
100   -" sub %w0, %w0, %w4\n"
101   -" stlxr %w1, %w0, [%3]\n"
  99 +"1: ldaxr %w0, %2\n"
  100 +" sub %w0, %w0, %w3\n"
  101 +" stlxr %w1, %w0, %2\n"
102 102 " cbnz %w1, 1b"
103   - : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
104   - : "r" (&v->counter), "Ir" (i)
105   - : "cc");
  103 + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
  104 + : "Ir" (i)
  105 + : "cc", "memory");
106 106  
107 107 return result;
108 108 }
109 109  
110 110  
... ... @@ -113,15 +113,15 @@
113 113 int oldval;
114 114  
115 115 asm volatile("// atomic_cmpxchg\n"
116   -"1: ldaxr %w1, [%3]\n"
117   -" cmp %w1, %w4\n"
  116 +"1: ldaxr %w1, %2\n"
  117 +" cmp %w1, %w3\n"
118 118 " b.ne 2f\n"
119   -" stlxr %w0, %w5, [%3]\n"
  119 +" stlxr %w0, %w4, %2\n"
120 120 " cbnz %w0, 1b\n"
121 121 "2:"
122   - : "=&r" (tmp), "=&r" (oldval), "+o" (ptr->counter)
123   - : "r" (&ptr->counter), "Ir" (old), "r" (new)
124   - : "cc");
  122 + : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
  123 + : "Ir" (old), "r" (new)
  124 + : "cc", "memory");
125 125  
126 126 return oldval;
127 127 }
128 128  
... ... @@ -131,12 +131,12 @@
131 131 unsigned long tmp, tmp2;
132 132  
133 133 asm volatile("// atomic_clear_mask\n"
134   -"1: ldxr %0, [%3]\n"
135   -" bic %0, %0, %4\n"
136   -" stxr %w1, %0, [%3]\n"
  134 +"1: ldxr %0, %2\n"
  135 +" bic %0, %0, %3\n"
  136 +" stxr %w1, %0, %2\n"
137 137 " cbnz %w1, 1b"
138   - : "=&r" (tmp), "=&r" (tmp2), "+o" (*addr)
139   - : "r" (addr), "Ir" (mask)
  138 + : "=&r" (tmp), "=&r" (tmp2), "+Q" (*addr)
  139 + : "Ir" (mask)
140 140 : "cc");
141 141 }
142 142  
143 143  
... ... @@ -182,12 +182,12 @@
182 182 unsigned long tmp;
183 183  
184 184 asm volatile("// atomic64_add\n"
185   -"1: ldxr %0, [%3]\n"
186   -" add %0, %0, %4\n"
187   -" stxr %w1, %0, [%3]\n"
  185 +"1: ldxr %0, %2\n"
  186 +" add %0, %0, %3\n"
  187 +" stxr %w1, %0, %2\n"
188 188 " cbnz %w1, 1b"
189   - : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
190   - : "r" (&v->counter), "Ir" (i)
  189 + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
  190 + : "Ir" (i)
191 191 : "cc");
192 192 }
193 193  
194 194  
... ... @@ -197,13 +197,13 @@
197 197 unsigned long tmp;
198 198  
199 199 asm volatile("// atomic64_add_return\n"
200   -"1: ldaxr %0, [%3]\n"
201   -" add %0, %0, %4\n"
202   -" stlxr %w1, %0, [%3]\n"
  200 +"1: ldaxr %0, %2\n"
  201 +" add %0, %0, %3\n"
  202 +" stlxr %w1, %0, %2\n"
203 203 " cbnz %w1, 1b"
204   - : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
205   - : "r" (&v->counter), "Ir" (i)
206   - : "cc");
  204 + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
  205 + : "Ir" (i)
  206 + : "cc", "memory");
207 207  
208 208 return result;
209 209 }
210 210  
... ... @@ -214,12 +214,12 @@
214 214 unsigned long tmp;
215 215  
216 216 asm volatile("// atomic64_sub\n"
217   -"1: ldxr %0, [%3]\n"
218   -" sub %0, %0, %4\n"
219   -" stxr %w1, %0, [%3]\n"
  217 +"1: ldxr %0, %2\n"
  218 +" sub %0, %0, %3\n"
  219 +" stxr %w1, %0, %2\n"
220 220 " cbnz %w1, 1b"
221   - : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
222   - : "r" (&v->counter), "Ir" (i)
  221 + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
  222 + : "Ir" (i)
223 223 : "cc");
224 224 }
225 225  
226 226  
... ... @@ -229,13 +229,13 @@
229 229 unsigned long tmp;
230 230  
231 231 asm volatile("// atomic64_sub_return\n"
232   -"1: ldaxr %0, [%3]\n"
233   -" sub %0, %0, %4\n"
234   -" stlxr %w1, %0, [%3]\n"
  232 +"1: ldaxr %0, %2\n"
  233 +" sub %0, %0, %3\n"
  234 +" stlxr %w1, %0, %2\n"
235 235 " cbnz %w1, 1b"
236   - : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
237   - : "r" (&v->counter), "Ir" (i)
238   - : "cc");
  236 + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
  237 + : "Ir" (i)
  238 + : "cc", "memory");
239 239  
240 240 return result;
241 241 }
242 242  
243 243  
... ... @@ -246,15 +246,15 @@
246 246 unsigned long res;
247 247  
248 248 asm volatile("// atomic64_cmpxchg\n"
249   -"1: ldaxr %1, [%3]\n"
250   -" cmp %1, %4\n"
  249 +"1: ldaxr %1, %2\n"
  250 +" cmp %1, %3\n"
251 251 " b.ne 2f\n"
252   -" stlxr %w0, %5, [%3]\n"
  252 +" stlxr %w0, %4, %2\n"
253 253 " cbnz %w0, 1b\n"
254 254 "2:"
255   - : "=&r" (res), "=&r" (oldval), "+o" (ptr->counter)
256   - : "r" (&ptr->counter), "Ir" (old), "r" (new)
257   - : "cc");
  255 + : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
  256 + : "Ir" (old), "r" (new)
  257 + : "cc", "memory");
258 258  
259 259 return oldval;
260 260 }
261 261  
262 262  
... ... @@ -267,15 +267,15 @@
267 267 unsigned long tmp;
268 268  
269 269 asm volatile("// atomic64_dec_if_positive\n"
270   -"1: ldaxr %0, [%3]\n"
  270 +"1: ldaxr %0, %2\n"
271 271 " subs %0, %0, #1\n"
272 272 " b.mi 2f\n"
273   -" stlxr %w1, %0, [%3]\n"
  273 +" stlxr %w1, %0, %2\n"
274 274 " cbnz %w1, 1b\n"
275 275 "2:"
276   - : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
277   - : "r" (&v->counter)
278   - : "cc");
  276 + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
  277 + :
  278 + : "cc", "memory");
279 279  
280 280 return result;
281 281 }
arch/arm64/include/asm/cmpxchg.h
... ... @@ -29,39 +29,39 @@
29 29 switch (size) {
30 30 case 1:
31 31 asm volatile("// __xchg1\n"
32   - "1: ldaxrb %w0, [%3]\n"
33   - " stlxrb %w1, %w2, [%3]\n"
  32 + "1: ldaxrb %w0, %2\n"
  33 + " stlxrb %w1, %w3, %2\n"
34 34 " cbnz %w1, 1b\n"
35   - : "=&r" (ret), "=&r" (tmp)
36   - : "r" (x), "r" (ptr)
37   - : "memory", "cc");
  35 + : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
  36 + : "r" (x)
  37 + : "cc", "memory");
38 38 break;
39 39 case 2:
40 40 asm volatile("// __xchg2\n"
41   - "1: ldaxrh %w0, [%3]\n"
42   - " stlxrh %w1, %w2, [%3]\n"
  41 + "1: ldaxrh %w0, %2\n"
  42 + " stlxrh %w1, %w3, %2\n"
43 43 " cbnz %w1, 1b\n"
44   - : "=&r" (ret), "=&r" (tmp)
45   - : "r" (x), "r" (ptr)
46   - : "memory", "cc");
  44 + : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
  45 + : "r" (x)
  46 + : "cc", "memory");
47 47 break;
48 48 case 4:
49 49 asm volatile("// __xchg4\n"
50   - "1: ldaxr %w0, [%3]\n"
51   - " stlxr %w1, %w2, [%3]\n"
  50 + "1: ldaxr %w0, %2\n"
  51 + " stlxr %w1, %w3, %2\n"
52 52 " cbnz %w1, 1b\n"
53   - : "=&r" (ret), "=&r" (tmp)
54   - : "r" (x), "r" (ptr)
55   - : "memory", "cc");
  53 + : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
  54 + : "r" (x)
  55 + : "cc", "memory");
56 56 break;
57 57 case 8:
58 58 asm volatile("// __xchg8\n"
59   - "1: ldaxr %0, [%3]\n"
60   - " stlxr %w1, %2, [%3]\n"
  59 + "1: ldaxr %0, %2\n"
  60 + " stlxr %w1, %3, %2\n"
61 61 " cbnz %w1, 1b\n"
62   - : "=&r" (ret), "=&r" (tmp)
63   - : "r" (x), "r" (ptr)
64   - : "memory", "cc");
  62 + : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
  63 + : "r" (x)
  64 + : "cc", "memory");
65 65 break;
66 66 default:
67 67 BUILD_BUG();
68 68  
69 69  
... ... @@ -82,14 +82,14 @@
82 82 case 1:
83 83 do {
84 84 asm volatile("// __cmpxchg1\n"
85   - " ldxrb %w1, [%2]\n"
  85 + " ldxrb %w1, %2\n"
86 86 " mov %w0, #0\n"
87 87 " cmp %w1, %w3\n"
88 88 " b.ne 1f\n"
89   - " stxrb %w0, %w4, [%2]\n"
  89 + " stxrb %w0, %w4, %2\n"
90 90 "1:\n"
91   - : "=&r" (res), "=&r" (oldval)
92   - : "r" (ptr), "Ir" (old), "r" (new)
  91 + : "=&r" (res), "=&r" (oldval), "+Q" (*(u8 *)ptr)
  92 + : "Ir" (old), "r" (new)
93 93 : "cc");
94 94 } while (res);
95 95 break;
96 96  
97 97  
98 98  
99 99  
100 100  
... ... @@ -97,29 +97,29 @@
97 97 case 2:
98 98 do {
99 99 asm volatile("// __cmpxchg2\n"
100   - " ldxrh %w1, [%2]\n"
  100 + " ldxrh %w1, %2\n"
101 101 " mov %w0, #0\n"
102 102 " cmp %w1, %w3\n"
103 103 " b.ne 1f\n"
104   - " stxrh %w0, %w4, [%2]\n"
  104 + " stxrh %w0, %w4, %2\n"
105 105 "1:\n"
106   - : "=&r" (res), "=&r" (oldval)
107   - : "r" (ptr), "Ir" (old), "r" (new)
108   - : "memory", "cc");
  106 + : "=&r" (res), "=&r" (oldval), "+Q" (*(u16 *)ptr)
  107 + : "Ir" (old), "r" (new)
  108 + : "cc");
109 109 } while (res);
110 110 break;
111 111  
112 112 case 4:
113 113 do {
114 114 asm volatile("// __cmpxchg4\n"
115   - " ldxr %w1, [%2]\n"
  115 + " ldxr %w1, %2\n"
116 116 " mov %w0, #0\n"
117 117 " cmp %w1, %w3\n"
118 118 " b.ne 1f\n"
119   - " stxr %w0, %w4, [%2]\n"
  119 + " stxr %w0, %w4, %2\n"
120 120 "1:\n"
121   - : "=&r" (res), "=&r" (oldval)
122   - : "r" (ptr), "Ir" (old), "r" (new)
  121 + : "=&r" (res), "=&r" (oldval), "+Q" (*(u32 *)ptr)
  122 + : "Ir" (old), "r" (new)
123 123 : "cc");
124 124 } while (res);
125 125 break;
126 126  
127 127  
... ... @@ -127,14 +127,14 @@
127 127 case 8:
128 128 do {
129 129 asm volatile("// __cmpxchg8\n"
130   - " ldxr %1, [%2]\n"
  130 + " ldxr %1, %2\n"
131 131 " mov %w0, #0\n"
132 132 " cmp %1, %3\n"
133 133 " b.ne 1f\n"
134   - " stxr %w0, %4, [%2]\n"
  134 + " stxr %w0, %4, %2\n"
135 135 "1:\n"
136   - : "=&r" (res), "=&r" (oldval)
137   - : "r" (ptr), "Ir" (old), "r" (new)
  136 + : "=&r" (res), "=&r" (oldval), "+Q" (*(u64 *)ptr)
  137 + : "Ir" (old), "r" (new)
138 138 : "cc");
139 139 } while (res);
140 140 break;
arch/arm64/include/asm/futex.h
... ... @@ -39,7 +39,7 @@
39 39 " .popsection\n" \
40 40 : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \
41 41 : "r" (oparg), "Ir" (-EFAULT) \
42   - : "cc")
  42 + : "cc", "memory")
43 43  
44 44 static inline int
45 45 futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
arch/arm64/include/asm/io.h
... ... @@ -230,6 +230,9 @@
230 230 #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
231 231 #define iounmap __iounmap
232 232  
  233 +#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF)
  234 +#define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PTE_PXN | PTE_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
  235 +
233 236 #define ARCH_HAS_IOREMAP_WC
234 237 #include <asm-generic/iomap.h>
235 238  
arch/arm64/include/asm/memory.h
... ... @@ -43,6 +43,7 @@
43 43 #define PAGE_OFFSET UL(0xffffffc000000000)
44 44 #define MODULES_END (PAGE_OFFSET)
45 45 #define MODULES_VADDR (MODULES_END - SZ_64M)
  46 +#define EARLYCON_IOBASE (MODULES_VADDR - SZ_4M)
46 47 #define VA_BITS (39)
47 48 #define TASK_SIZE_64 (UL(1) << VA_BITS)
48 49  
arch/arm64/include/asm/mmu.h
... ... @@ -26,6 +26,7 @@
26 26  
27 27 extern void paging_init(void);
28 28 extern void setup_mm_for_reboot(void);
  29 +extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
29 30  
30 31 #endif
arch/arm64/include/asm/mmu_context.h
... ... @@ -35,6 +35,21 @@
35 35 void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
36 36 void __new_context(struct mm_struct *mm);
37 37  
  38 +#ifdef CONFIG_PID_IN_CONTEXTIDR
  39 +static inline void contextidr_thread_switch(struct task_struct *next)
  40 +{
  41 + asm(
  42 + " msr contextidr_el1, %0\n"
  43 + " isb"
  44 + :
  45 + : "r" (task_pid_nr(next)));
  46 +}
  47 +#else
  48 +static inline void contextidr_thread_switch(struct task_struct *next)
  49 +{
  50 +}
  51 +#endif
  52 +
38 53 /*
39 54 * Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0.
40 55 */
arch/arm64/include/asm/perf_event.h
... ... @@ -17,7 +17,12 @@
17 17 #ifndef __ASM_PERF_EVENT_H
18 18 #define __ASM_PERF_EVENT_H
19 19  
20   -/* It's quiet around here... */
  20 +#ifdef CONFIG_HW_PERF_EVENTS
  21 +struct pt_regs;
  22 +extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
  23 +extern unsigned long perf_misc_flags(struct pt_regs *regs);
  24 +#define perf_misc_flags(regs) perf_misc_flags(regs)
  25 +#endif
21 26  
22 27 #endif
arch/arm64/include/asm/psci.h
  1 +/*
  2 + * This program is free software; you can redistribute it and/or modify
  3 + * it under the terms of the GNU General Public License version 2 as
  4 + * published by the Free Software Foundation.
  5 + *
  6 + * This program is distributed in the hope that it will be useful,
  7 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9 + * GNU General Public License for more details.
  10 + *
  11 + * Copyright (C) 2013 ARM Limited
  12 + */
  13 +
  14 +#ifndef __ASM_PSCI_H
  15 +#define __ASM_PSCI_H
  16 +
  17 +#define PSCI_POWER_STATE_TYPE_STANDBY 0
  18 +#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
  19 +
  20 +struct psci_power_state {
  21 + u16 id;
  22 + u8 type;
  23 + u8 affinity_level;
  24 +};
  25 +
  26 +struct psci_operations {
  27 + int (*cpu_suspend)(struct psci_power_state state,
  28 + unsigned long entry_point);
  29 + int (*cpu_off)(struct psci_power_state state);
  30 + int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
  31 + int (*migrate)(unsigned long cpuid);
  32 +};
  33 +
  34 +extern struct psci_operations psci_ops;
  35 +
  36 +int psci_init(void);
  37 +
  38 +#endif /* __ASM_PSCI_H */
arch/arm64/include/asm/ptrace.h
... ... @@ -42,6 +42,16 @@
42 42 #define COMPAT_PSR_MODE_UND 0x0000001b
43 43 #define COMPAT_PSR_MODE_SYS 0x0000001f
44 44 #define COMPAT_PSR_T_BIT 0x00000020
  45 +#define COMPAT_PSR_F_BIT 0x00000040
  46 +#define COMPAT_PSR_I_BIT 0x00000080
  47 +#define COMPAT_PSR_A_BIT 0x00000100
  48 +#define COMPAT_PSR_E_BIT 0x00000200
  49 +#define COMPAT_PSR_J_BIT 0x01000000
  50 +#define COMPAT_PSR_Q_BIT 0x08000000
  51 +#define COMPAT_PSR_V_BIT 0x10000000
  52 +#define COMPAT_PSR_C_BIT 0x20000000
  53 +#define COMPAT_PSR_Z_BIT 0x40000000
  54 +#define COMPAT_PSR_N_BIT 0x80000000
45 55 #define COMPAT_PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
46 56 /*
47 57 * These are 'magic' values for PTRACE_PEEKUSR that return info about where a
arch/arm64/include/asm/smp.h
... ... @@ -66,5 +66,16 @@
66 66 extern void arch_send_call_function_single_ipi(int cpu);
67 67 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
68 68  
  69 +struct device_node;
  70 +
  71 +struct smp_enable_ops {
  72 + const char *name;
  73 + int (*init_cpu)(struct device_node *, int);
  74 + int (*prepare_cpu)(int);
  75 +};
  76 +
  77 +extern const struct smp_enable_ops smp_spin_table_ops;
  78 +extern const struct smp_enable_ops smp_psci_ops;
  79 +
69 80 #endif /* ifndef __ASM_SMP_H */
arch/arm64/include/asm/spinlock.h
... ... @@ -45,13 +45,13 @@
45 45 asm volatile(
46 46 " sevl\n"
47 47 "1: wfe\n"
48   - "2: ldaxr %w0, [%1]\n"
  48 + "2: ldaxr %w0, %1\n"
49 49 " cbnz %w0, 1b\n"
50   - " stxr %w0, %w2, [%1]\n"
  50 + " stxr %w0, %w2, %1\n"
51 51 " cbnz %w0, 2b\n"
52   - : "=&r" (tmp)
53   - : "r" (&lock->lock), "r" (1)
54   - : "memory");
  52 + : "=&r" (tmp), "+Q" (lock->lock)
  53 + : "r" (1)
  54 + : "cc", "memory");
55 55 }
56 56  
57 57 static inline int arch_spin_trylock(arch_spinlock_t *lock)
58 58  
59 59  
... ... @@ -59,13 +59,13 @@
59 59 unsigned int tmp;
60 60  
61 61 asm volatile(
62   - " ldaxr %w0, [%1]\n"
  62 + " ldaxr %w0, %1\n"
63 63 " cbnz %w0, 1f\n"
64   - " stxr %w0, %w2, [%1]\n"
  64 + " stxr %w0, %w2, %1\n"
65 65 "1:\n"
66   - : "=&r" (tmp)
67   - : "r" (&lock->lock), "r" (1)
68   - : "memory");
  66 + : "=&r" (tmp), "+Q" (lock->lock)
  67 + : "r" (1)
  68 + : "cc", "memory");
69 69  
70 70 return !tmp;
71 71 }
... ... @@ -73,8 +73,8 @@
73 73 static inline void arch_spin_unlock(arch_spinlock_t *lock)
74 74 {
75 75 asm volatile(
76   - " stlr %w1, [%0]\n"
77   - : : "r" (&lock->lock), "r" (0) : "memory");
  76 + " stlr %w1, %0\n"
  77 + : "=Q" (lock->lock) : "r" (0) : "memory");
78 78 }
79 79  
80 80 /*
81 81  
82 82  
... ... @@ -94,13 +94,13 @@
94 94 asm volatile(
95 95 " sevl\n"
96 96 "1: wfe\n"
97   - "2: ldaxr %w0, [%1]\n"
  97 + "2: ldaxr %w0, %1\n"
98 98 " cbnz %w0, 1b\n"
99   - " stxr %w0, %w2, [%1]\n"
  99 + " stxr %w0, %w2, %1\n"
100 100 " cbnz %w0, 2b\n"
101   - : "=&r" (tmp)
102   - : "r" (&rw->lock), "r" (0x80000000)
103   - : "memory");
  101 + : "=&r" (tmp), "+Q" (rw->lock)
  102 + : "r" (0x80000000)
  103 + : "cc", "memory");
104 104 }
105 105  
106 106 static inline int arch_write_trylock(arch_rwlock_t *rw)
107 107  
108 108  
... ... @@ -108,13 +108,13 @@
108 108 unsigned int tmp;
109 109  
110 110 asm volatile(
111   - " ldaxr %w0, [%1]\n"
  111 + " ldaxr %w0, %1\n"
112 112 " cbnz %w0, 1f\n"
113   - " stxr %w0, %w2, [%1]\n"
  113 + " stxr %w0, %w2, %1\n"
114 114 "1:\n"
115   - : "=&r" (tmp)
116   - : "r" (&rw->lock), "r" (0x80000000)
117   - : "memory");
  115 + : "=&r" (tmp), "+Q" (rw->lock)
  116 + : "r" (0x80000000)
  117 + : "cc", "memory");
118 118  
119 119 return !tmp;
120 120 }
... ... @@ -122,8 +122,8 @@
122 122 static inline void arch_write_unlock(arch_rwlock_t *rw)
123 123 {
124 124 asm volatile(
125   - " stlr %w1, [%0]\n"
126   - : : "r" (&rw->lock), "r" (0) : "memory");
  125 + " stlr %w1, %0\n"
  126 + : "=Q" (rw->lock) : "r" (0) : "memory");
127 127 }
128 128  
129 129 /* write_can_lock - would write_trylock() succeed? */
130 130  
131 131  
... ... @@ -148,14 +148,14 @@
148 148 asm volatile(
149 149 " sevl\n"
150 150 "1: wfe\n"
151   - "2: ldaxr %w0, [%2]\n"
  151 + "2: ldaxr %w0, %2\n"
152 152 " add %w0, %w0, #1\n"
153 153 " tbnz %w0, #31, 1b\n"
154   - " stxr %w1, %w0, [%2]\n"
  154 + " stxr %w1, %w0, %2\n"
155 155 " cbnz %w1, 2b\n"
156   - : "=&r" (tmp), "=&r" (tmp2)
157   - : "r" (&rw->lock)
158   - : "memory");
  156 + : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
  157 + :
  158 + : "cc", "memory");
159 159 }
160 160  
161 161 static inline void arch_read_unlock(arch_rwlock_t *rw)
162 162  
163 163  
... ... @@ -163,13 +163,13 @@
163 163 unsigned int tmp, tmp2;
164 164  
165 165 asm volatile(
166   - "1: ldxr %w0, [%2]\n"
  166 + "1: ldxr %w0, %2\n"
167 167 " sub %w0, %w0, #1\n"
168   - " stlxr %w1, %w0, [%2]\n"
  168 + " stlxr %w1, %w0, %2\n"
169 169 " cbnz %w1, 1b\n"
170   - : "=&r" (tmp), "=&r" (tmp2)
171   - : "r" (&rw->lock)
172   - : "memory");
  170 + : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
  171 + :
  172 + : "cc", "memory");
173 173 }
174 174  
175 175 static inline int arch_read_trylock(arch_rwlock_t *rw)
176 176  
177 177  
... ... @@ -177,14 +177,14 @@
177 177 unsigned int tmp, tmp2 = 1;
178 178  
179 179 asm volatile(
180   - " ldaxr %w0, [%2]\n"
  180 + " ldaxr %w0, %2\n"
181 181 " add %w0, %w0, #1\n"
182 182 " tbnz %w0, #31, 1f\n"
183   - " stxr %w1, %w0, [%2]\n"
  183 + " stxr %w1, %w0, %2\n"
184 184 "1:\n"
185   - : "=&r" (tmp), "+r" (tmp2)
186   - : "r" (&rw->lock)
187   - : "memory");
  185 + : "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock)
  186 + :
  187 + : "cc", "memory");
188 188  
189 189 return !tmp2;
190 190 }
arch/arm64/include/uapi/asm/Kbuild
1 1 # UAPI Header export list
2 2 include include/uapi/asm-generic/Kbuild.asm
3 3  
  4 +generic-y += kvm_para.h
  5 +
4 6 header-y += auxvec.h
5 7 header-y += bitsperlong.h
6 8 header-y += byteorder.h
7 9 header-y += fcntl.h
8 10 header-y += hwcap.h
  11 +header-y += kvm_para.h
9 12 header-y += param.h
10 13 header-y += ptrace.h
11 14 header-y += setup.h
arch/arm64/kernel/Makefile
... ... @@ -9,14 +9,15 @@
9 9 arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \
10 10 entry-fpsimd.o process.o ptrace.o setup.o signal.o \
11 11 sys.o stacktrace.o time.o traps.o io.o vdso.o \
12   - hyp-stub.o
  12 + hyp-stub.o psci.o
13 13  
14 14 arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
15 15 sys_compat.o
16 16 arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o
17   -arm64-obj-$(CONFIG_SMP) += smp.o
  17 +arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o smp_psci.o
18 18 arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
19 19 arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT)+= hw_breakpoint.o
  20 +arm64-obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
20 21  
21 22 obj-y += $(arm64-obj-y) vdso/
22 23 obj-m += $(arm64-obj-m)
arch/arm64/kernel/early_printk.c
  1 +/*
  2 + * Earlyprintk support.
  3 + *
  4 + * Copyright (C) 2012 ARM Ltd.
  5 + * Author: Catalin Marinas <catalin.marinas@arm.com>
  6 + *
  7 + * This program is free software: you can redistribute it and/or modify
  8 + * it under the terms of the GNU General Public License version 2 as
  9 + * published by the Free Software Foundation.
  10 + *
  11 + * This program is distributed in the hope that it will be useful,
  12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14 + * GNU General Public License for more details.
  15 + *
  16 + * You should have received a copy of the GNU General Public License
  17 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
  18 + */
  19 +#include <linux/kernel.h>
  20 +#include <linux/console.h>
  21 +#include <linux/init.h>
  22 +#include <linux/string.h>
  23 +#include <linux/mm.h>
  24 +#include <linux/io.h>
  25 +
  26 +#include <linux/amba/serial.h>
  27 +
  28 +static void __iomem *early_base;
  29 +static void (*printch)(char ch);
  30 +
  31 +/*
  32 + * PL011 single character TX.
  33 + */
  34 +static void pl011_printch(char ch)
  35 +{
  36 + while (readl_relaxed(early_base + UART01x_FR) & UART01x_FR_TXFF)
  37 + ;
  38 + writeb_relaxed(ch, early_base + UART01x_DR);
  39 + while (readl_relaxed(early_base + UART01x_FR) & UART01x_FR_BUSY)
  40 + ;
  41 +}
  42 +
  43 +struct earlycon_match {
  44 + const char *name;
  45 + void (*printch)(char ch);
  46 +};
  47 +
  48 +static const struct earlycon_match earlycon_match[] __initconst = {
  49 + { .name = "pl011", .printch = pl011_printch, },
  50 + {}
  51 +};
  52 +
  53 +static void early_write(struct console *con, const char *s, unsigned n)
  54 +{
  55 + while (n-- > 0) {
  56 + if (*s == '\n')
  57 + printch('\r');
  58 + printch(*s);
  59 + s++;
  60 + }
  61 +}
  62 +
  63 +static struct console early_console = {
  64 + .name = "earlycon",
  65 + .write = early_write,
  66 + .flags = CON_PRINTBUFFER | CON_BOOT,
  67 + .index = -1,
  68 +};
  69 +
  70 +/*
  71 + * Parse earlyprintk=... parameter in the format:
  72 + *
  73 + * <name>[,<addr>][,<options>]
  74 + *
  75 + * and register the early console. It is assumed that the UART has been
  76 + * initialised by the bootloader already.
  77 + */
  78 +static int __init setup_early_printk(char *buf)
  79 +{
  80 + const struct earlycon_match *match = earlycon_match;
  81 + phys_addr_t paddr = 0;
  82 +
  83 + if (!buf) {
  84 + pr_warning("No earlyprintk arguments passed.\n");
  85 + return 0;
  86 + }
  87 +
  88 + while (match->name) {
  89 + size_t len = strlen(match->name);
  90 + if (!strncmp(buf, match->name, len)) {
  91 + buf += len;
  92 + break;
  93 + }
  94 + match++;
  95 + }
  96 + if (!match->name) {
  97 + pr_warning("Unknown earlyprintk arguments: %s\n", buf);
  98 + return 0;
  99 + }
  100 +
  101 + /* I/O address */
  102 + if (!strncmp(buf, ",0x", 3)) {
  103 + char *e;
  104 + paddr = simple_strtoul(buf + 1, &e, 16);
  105 + buf = e;
  106 + }
  107 + /* no options parsing yet */
  108 +
  109 + if (paddr)
  110 + early_base = early_io_map(paddr, EARLYCON_IOBASE);
  111 +
  112 + printch = match->printch;
  113 + register_console(&early_console);
  114 +
  115 + return 0;
  116 +}
  117 +
  118 +early_param("earlyprintk", setup_early_printk);
arch/arm64/kernel/head.S
... ... @@ -82,10 +82,8 @@
82 82  
83 83 #ifdef CONFIG_ARM64_64K_PAGES
84 84 #define MM_MMUFLAGS PTE_ATTRINDX(MT_NORMAL) | PTE_FLAGS
85   -#define IO_MMUFLAGS PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_XN | PTE_FLAGS
86 85 #else
87 86 #define MM_MMUFLAGS PMD_ATTRINDX(MT_NORMAL) | PMD_FLAGS
88   -#define IO_MMUFLAGS PMD_ATTRINDX(MT_DEVICE_nGnRE) | PMD_SECT_XN | PMD_FLAGS
89 87 #endif
90 88  
91 89 /*
... ... @@ -368,6 +366,7 @@
368 366 * - identity mapping to enable the MMU (low address, TTBR0)
369 367 * - first few MB of the kernel linear mapping to jump to once the MMU has
370 368 * been enabled, including the FDT blob (TTBR1)
  369 + * - UART mapping if CONFIG_EARLY_PRINTK is enabled (TTBR1)
371 370 */
372 371 __create_page_tables:
373 372 pgtbl x25, x26, x24 // idmap_pg_dir and swapper_pg_dir addresses
... ... @@ -420,6 +419,15 @@
420 419 sub x6, x6, #1 // inclusive range
421 420 create_block_map x0, x7, x3, x5, x6
422 421 1:
  422 +#ifdef CONFIG_EARLY_PRINTK
  423 + /*
  424 + * Create the pgd entry for the UART mapping. The full mapping is done
  425 + * later based earlyprintk kernel parameter.
  426 + */
  427 + ldr x5, =EARLYCON_IOBASE // UART virtual address
  428 + add x0, x26, #2 * PAGE_SIZE // section table address
  429 + create_pgd_entry x26, x0, x5, x6, x7
  430 +#endif
423 431 ret
424 432 ENDPROC(__create_page_tables)
425 433 .ltorg
arch/arm64/kernel/perf_event.c
... ... @@ -1331,6 +1331,11 @@
1331 1331 {
1332 1332 struct frame_tail __user *tail;
1333 1333  
  1334 + if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
  1335 + /* We don't support guest os callchain now */
  1336 + return;
  1337 + }
  1338 +
1334 1339 tail = (struct frame_tail __user *)regs->regs[29];
1335 1340  
1336 1341 while (entry->nr < PERF_MAX_STACK_DEPTH &&
1337 1342  
... ... @@ -1355,9 +1360,41 @@
1355 1360 {
1356 1361 struct stackframe frame;
1357 1362  
  1363 + if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
  1364 + /* We don't support guest os callchain now */
  1365 + return;
  1366 + }
  1367 +
1358 1368 frame.fp = regs->regs[29];
1359 1369 frame.sp = regs->sp;
1360 1370 frame.pc = regs->pc;
1361 1371 walk_stackframe(&frame, callchain_trace, entry);
  1372 +}
  1373 +
  1374 +unsigned long perf_instruction_pointer(struct pt_regs *regs)
  1375 +{
  1376 + if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
  1377 + return perf_guest_cbs->get_guest_ip();
  1378 +
  1379 + return instruction_pointer(regs);
  1380 +}
  1381 +
  1382 +unsigned long perf_misc_flags(struct pt_regs *regs)
  1383 +{
  1384 + int misc = 0;
  1385 +
  1386 + if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
  1387 + if (perf_guest_cbs->is_user_mode())
  1388 + misc |= PERF_RECORD_MISC_GUEST_USER;
  1389 + else
  1390 + misc |= PERF_RECORD_MISC_GUEST_KERNEL;
  1391 + } else {
  1392 + if (user_mode(regs))
  1393 + misc |= PERF_RECORD_MISC_USER;
  1394 + else
  1395 + misc |= PERF_RECORD_MISC_KERNEL;
  1396 + }
  1397 +
  1398 + return misc;
1362 1399 }
arch/arm64/kernel/process.c
... ... @@ -45,9 +45,10 @@
45 45  
46 46 #include <asm/compat.h>
47 47 #include <asm/cacheflush.h>
  48 +#include <asm/fpsimd.h>
  49 +#include <asm/mmu_context.h>
48 50 #include <asm/processor.h>
49 51 #include <asm/stacktrace.h>
50   -#include <asm/fpsimd.h>
51 52  
52 53 static void setup_restart(void)
53 54 {
... ... @@ -314,6 +315,7 @@
314 315 /* the actual thread switch */
315 316 last = cpu_switch_to(prev, next);
316 317  
  318 + contextidr_thread_switch(next);
317 319 return last;
318 320 }
319 321  
arch/arm64/kernel/psci.c
  1 +/*
  2 + * This program is free software; you can redistribute it and/or modify
  3 + * it under the terms of the GNU General Public License version 2 as
  4 + * published by the Free Software Foundation.
  5 + *
  6 + * This program is distributed in the hope that it will be useful,
  7 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9 + * GNU General Public License for more details.
  10 + *
  11 + * Copyright (C) 2013 ARM Limited
  12 + *
  13 + * Author: Will Deacon <will.deacon@arm.com>
  14 + */
  15 +
  16 +#define pr_fmt(fmt) "psci: " fmt
  17 +
  18 +#include <linux/init.h>
  19 +#include <linux/of.h>
  20 +
  21 +#include <asm/compiler.h>
  22 +#include <asm/errno.h>
  23 +#include <asm/psci.h>
  24 +
  25 +struct psci_operations psci_ops;
  26 +
  27 +static int (*invoke_psci_fn)(u64, u64, u64, u64);
  28 +
  29 +enum psci_function {
  30 + PSCI_FN_CPU_SUSPEND,
  31 + PSCI_FN_CPU_ON,
  32 + PSCI_FN_CPU_OFF,
  33 + PSCI_FN_MIGRATE,
  34 + PSCI_FN_MAX,
  35 +};
  36 +
  37 +static u32 psci_function_id[PSCI_FN_MAX];
  38 +
  39 +#define PSCI_RET_SUCCESS 0
  40 +#define PSCI_RET_EOPNOTSUPP -1
  41 +#define PSCI_RET_EINVAL -2
  42 +#define PSCI_RET_EPERM -3
  43 +
  44 +static int psci_to_linux_errno(int errno)
  45 +{
  46 + switch (errno) {
  47 + case PSCI_RET_SUCCESS:
  48 + return 0;
  49 + case PSCI_RET_EOPNOTSUPP:
  50 + return -EOPNOTSUPP;
  51 + case PSCI_RET_EINVAL:
  52 + return -EINVAL;
  53 + case PSCI_RET_EPERM:
  54 + return -EPERM;
  55 + };
  56 +
  57 + return -EINVAL;
  58 +}
  59 +
  60 +#define PSCI_POWER_STATE_ID_MASK 0xffff
  61 +#define PSCI_POWER_STATE_ID_SHIFT 0
  62 +#define PSCI_POWER_STATE_TYPE_MASK 0x1
  63 +#define PSCI_POWER_STATE_TYPE_SHIFT 16
  64 +#define PSCI_POWER_STATE_AFFL_MASK 0x3
  65 +#define PSCI_POWER_STATE_AFFL_SHIFT 24
  66 +
  67 +static u32 psci_power_state_pack(struct psci_power_state state)
  68 +{
  69 + return ((state.id & PSCI_POWER_STATE_ID_MASK)
  70 + << PSCI_POWER_STATE_ID_SHIFT) |
  71 + ((state.type & PSCI_POWER_STATE_TYPE_MASK)
  72 + << PSCI_POWER_STATE_TYPE_SHIFT) |
  73 + ((state.affinity_level & PSCI_POWER_STATE_AFFL_MASK)
  74 + << PSCI_POWER_STATE_AFFL_SHIFT);
  75 +}
  76 +
  77 +/*
  78 + * The following two functions are invoked via the invoke_psci_fn pointer
  79 + * and will not be inlined, allowing us to piggyback on the AAPCS.
  80 + */
  81 +static noinline int __invoke_psci_fn_hvc(u64 function_id, u64 arg0, u64 arg1,
  82 + u64 arg2)
  83 +{
  84 + asm volatile(
  85 + __asmeq("%0", "x0")
  86 + __asmeq("%1", "x1")
  87 + __asmeq("%2", "x2")
  88 + __asmeq("%3", "x3")
  89 + "hvc #0\n"
  90 + : "+r" (function_id)
  91 + : "r" (arg0), "r" (arg1), "r" (arg2));
  92 +
  93 + return function_id;
  94 +}
  95 +
  96 +static noinline int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1,
  97 + u64 arg2)
  98 +{
  99 + asm volatile(
  100 + __asmeq("%0", "x0")
  101 + __asmeq("%1", "x1")
  102 + __asmeq("%2", "x2")
  103 + __asmeq("%3", "x3")
  104 + "smc #0\n"
  105 + : "+r" (function_id)
  106 + : "r" (arg0), "r" (arg1), "r" (arg2));
  107 +
  108 + return function_id;
  109 +}
  110 +
  111 +static int psci_cpu_suspend(struct psci_power_state state,
  112 + unsigned long entry_point)
  113 +{
  114 + int err;
  115 + u32 fn, power_state;
  116 +
  117 + fn = psci_function_id[PSCI_FN_CPU_SUSPEND];
  118 + power_state = psci_power_state_pack(state);
  119 + err = invoke_psci_fn(fn, power_state, entry_point, 0);
  120 + return psci_to_linux_errno(err);
  121 +}
  122 +
  123 +static int psci_cpu_off(struct psci_power_state state)
  124 +{
  125 + int err;
  126 + u32 fn, power_state;
  127 +
  128 + fn = psci_function_id[PSCI_FN_CPU_OFF];
  129 + power_state = psci_power_state_pack(state);
  130 + err = invoke_psci_fn(fn, power_state, 0, 0);
  131 + return psci_to_linux_errno(err);
  132 +}
  133 +
  134 +static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point)
  135 +{
  136 + int err;
  137 + u32 fn;
  138 +
  139 + fn = psci_function_id[PSCI_FN_CPU_ON];
  140 + err = invoke_psci_fn(fn, cpuid, entry_point, 0);
  141 + return psci_to_linux_errno(err);
  142 +}
  143 +
  144 +static int psci_migrate(unsigned long cpuid)
  145 +{
  146 + int err;
  147 + u32 fn;
  148 +
  149 + fn = psci_function_id[PSCI_FN_MIGRATE];
  150 + err = invoke_psci_fn(fn, cpuid, 0, 0);
  151 + return psci_to_linux_errno(err);
  152 +}
  153 +
  154 +static const struct of_device_id psci_of_match[] __initconst = {
  155 + { .compatible = "arm,psci", },
  156 + {},
  157 +};
  158 +
  159 +int __init psci_init(void)
  160 +{
  161 + struct device_node *np;
  162 + const char *method;
  163 + u32 id;
  164 + int err = 0;
  165 +
  166 + np = of_find_matching_node(NULL, psci_of_match);
  167 + if (!np)
  168 + return -ENODEV;
  169 +
  170 + pr_info("probing function IDs from device-tree\n");
  171 +
  172 + if (of_property_read_string(np, "method", &method)) {
  173 + pr_warning("missing \"method\" property\n");
  174 + err = -ENXIO;
  175 + goto out_put_node;
  176 + }
  177 +
  178 + if (!strcmp("hvc", method)) {
  179 + invoke_psci_fn = __invoke_psci_fn_hvc;
  180 + } else if (!strcmp("smc", method)) {
  181 + invoke_psci_fn = __invoke_psci_fn_smc;
  182 + } else {
  183 + pr_warning("invalid \"method\" property: %s\n", method);
  184 + err = -EINVAL;
  185 + goto out_put_node;
  186 + }
  187 +
  188 + if (!of_property_read_u32(np, "cpu_suspend", &id)) {
  189 + psci_function_id[PSCI_FN_CPU_SUSPEND] = id;
  190 + psci_ops.cpu_suspend = psci_cpu_suspend;
  191 + }
  192 +
  193 + if (!of_property_read_u32(np, "cpu_off", &id)) {
  194 + psci_function_id[PSCI_FN_CPU_OFF] = id;
  195 + psci_ops.cpu_off = psci_cpu_off;
  196 + }
  197 +
  198 + if (!of_property_read_u32(np, "cpu_on", &id)) {
  199 + psci_function_id[PSCI_FN_CPU_ON] = id;
  200 + psci_ops.cpu_on = psci_cpu_on;
  201 + }
  202 +
  203 + if (!of_property_read_u32(np, "migrate", &id)) {
  204 + psci_function_id[PSCI_FN_MIGRATE] = id;
  205 + psci_ops.migrate = psci_migrate;
  206 + }
  207 +
  208 +out_put_node:
  209 + of_node_put(np);
  210 + return err;
  211 +}
arch/arm64/kernel/setup.c
... ... @@ -39,6 +39,7 @@
39 39 #include <linux/proc_fs.h>
40 40 #include <linux/memblock.h>
41 41 #include <linux/of_fdt.h>
  42 +#include <linux/of_platform.h>
42 43  
43 44 #include <asm/cputype.h>
44 45 #include <asm/elf.h>
... ... @@ -49,6 +50,7 @@
49 50 #include <asm/tlbflush.h>
50 51 #include <asm/traps.h>
51 52 #include <asm/memblock.h>
  53 +#include <asm/psci.h>
52 54  
53 55 unsigned int processor_id;
54 56 EXPORT_SYMBOL(processor_id);
... ... @@ -260,6 +262,8 @@
260 262  
261 263 unflatten_device_tree();
262 264  
  265 + psci_init();
  266 +
263 267 #ifdef CONFIG_SMP
264 268 smp_init_cpus();
265 269 #endif
... ... @@ -288,6 +292,13 @@
288 292 return 0;
289 293 }
290 294 subsys_initcall(topology_init);
  295 +
  296 +static int __init arm64_device_probe(void)
  297 +{
  298 + of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
  299 + return 0;
  300 +}
  301 +device_initcall(arm64_device_probe);
291 302  
292 303 static const char *hwcap_str[] = {
293 304 "fp",
arch/arm64/kernel/signal32.c
... ... @@ -76,7 +76,7 @@
76 76  
77 77 struct compat_ucontext {
78 78 compat_ulong_t uc_flags;
79   - struct compat_ucontext *uc_link;
  79 + compat_uptr_t uc_link;
80 80 compat_stack_t uc_stack;
81 81 struct compat_sigcontext uc_mcontext;
82 82 compat_sigset_t uc_sigmask;
... ... @@ -703,7 +703,7 @@
703 703 err |= copy_siginfo_to_user32(&frame->info, info);
704 704  
705 705 __put_user_error(0, &frame->sig.uc.uc_flags, err);
706   - __put_user_error(NULL, &frame->sig.uc.uc_link, err);
  706 + __put_user_error(0, &frame->sig.uc.uc_link, err);
707 707  
708 708 memset(&stack, 0, sizeof(stack));
709 709 stack.ss_sp = (compat_uptr_t)current->sas_ss_sp;
arch/arm64/kernel/smp.c
... ... @@ -233,8 +233,29 @@
233 233 }
234 234  
235 235 static void (*smp_cross_call)(const struct cpumask *, unsigned int);
236   -static phys_addr_t cpu_release_addr[NR_CPUS];
237 236  
  237 +static const struct smp_enable_ops *enable_ops[] __initconst = {
  238 + &smp_spin_table_ops,
  239 + &smp_psci_ops,
  240 + NULL,
  241 +};
  242 +
  243 +static const struct smp_enable_ops *smp_enable_ops[NR_CPUS];
  244 +
  245 +static const struct smp_enable_ops * __init smp_get_enable_ops(const char *name)
  246 +{
  247 + const struct smp_enable_ops *ops = enable_ops[0];
  248 +
  249 + while (ops) {
  250 + if (!strcmp(name, ops->name))
  251 + return ops;
  252 +
  253 + ops++;
  254 + }
  255 +
  256 + return NULL;
  257 +}
  258 +
238 259 /*
239 260 * Enumerate the possible CPU set from the device tree.
240 261 */
241 262  
242 263  
... ... @@ -252,22 +273,22 @@
252 273 * We currently support only the "spin-table" enable-method.
253 274 */
254 275 enable_method = of_get_property(dn, "enable-method", NULL);
255   - if (!enable_method || strcmp(enable_method, "spin-table")) {
256   - pr_err("CPU %d: missing or invalid enable-method property: %s\n",
257   - cpu, enable_method);
  276 + if (!enable_method) {
  277 + pr_err("CPU %d: missing enable-method property\n", cpu);
258 278 goto next;
259 279 }
260 280  
261   - /*
262   - * Determine the address from which the CPU is polling.
263   - */
264   - if (of_property_read_u64(dn, "cpu-release-addr",
265   - &cpu_release_addr[cpu])) {
266   - pr_err("CPU %d: missing or invalid cpu-release-addr property\n",
267   - cpu);
  281 + smp_enable_ops[cpu] = smp_get_enable_ops(enable_method);
  282 +
  283 + if (!smp_enable_ops[cpu]) {
  284 + pr_err("CPU %d: invalid enable-method property: %s\n",
  285 + cpu, enable_method);
268 286 goto next;
269 287 }
270 288  
  289 + if (smp_enable_ops[cpu]->init_cpu(dn, cpu))
  290 + goto next;
  291 +
271 292 set_cpu_possible(cpu, true);
272 293 next:
273 294 cpu++;
... ... @@ -281,8 +302,7 @@
281 302  
282 303 void __init smp_prepare_cpus(unsigned int max_cpus)
283 304 {
284   - int cpu;
285   - void **release_addr;
  305 + int cpu, err;
286 306 unsigned int ncores = num_possible_cpus();
287 307  
288 308 /*
289 309  
290 310  
291 311  
292 312  
293 313  
294 314  
... ... @@ -291,30 +311,35 @@
291 311 if (max_cpus > ncores)
292 312 max_cpus = ncores;
293 313  
  314 + /* Don't bother if we're effectively UP */
  315 + if (max_cpus <= 1)
  316 + return;
  317 +
294 318 /*
295 319 * Initialise the present map (which describes the set of CPUs
296 320 * actually populated at the present time) and release the
297 321 * secondaries from the bootloader.
  322 + *
  323 + * Make sure we online at most (max_cpus - 1) additional CPUs.
298 324 */
  325 + max_cpus--;
299 326 for_each_possible_cpu(cpu) {
300 327 if (max_cpus == 0)
301 328 break;
302 329  
303   - if (!cpu_release_addr[cpu])
  330 + if (cpu == smp_processor_id())
304 331 continue;
305 332  
306   - release_addr = __va(cpu_release_addr[cpu]);
307   - release_addr[0] = (void *)__pa(secondary_holding_pen);
308   - __flush_dcache_area(release_addr, sizeof(release_addr[0]));
  333 + if (!smp_enable_ops[cpu])
  334 + continue;
309 335  
  336 + err = smp_enable_ops[cpu]->prepare_cpu(cpu);
  337 + if (err)
  338 + continue;
  339 +
310 340 set_cpu_present(cpu, true);
311 341 max_cpus--;
312 342 }
313   -
314   - /*
315   - * Send an event to wake up the secondaries.
316   - */
317   - sev();
318 343 }
319 344  
320 345  
arch/arm64/kernel/smp_psci.c
  1 +/*
  2 + * PSCI SMP initialisation
  3 + *
  4 + * Copyright (C) 2013 ARM Ltd.
  5 + *
  6 + * This program is free software; you can redistribute it and/or modify
  7 + * it under the terms of the GNU General Public License version 2 as
  8 + * published by the Free Software Foundation.
  9 + *
  10 + * This program is distributed in the hope that it will be useful,
  11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13 + * GNU General Public License for more details.
  14 + *
  15 + * You should have received a copy of the GNU General Public License
  16 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17 + */
  18 +
  19 +#include <linux/init.h>
  20 +#include <linux/of.h>
  21 +#include <linux/smp.h>
  22 +
  23 +#include <asm/psci.h>
  24 +
  25 +static int __init smp_psci_init_cpu(struct device_node *dn, int cpu)
  26 +{
  27 + return 0;
  28 +}
  29 +
  30 +static int __init smp_psci_prepare_cpu(int cpu)
  31 +{
  32 + int err;
  33 +
  34 + if (!psci_ops.cpu_on) {
  35 + pr_err("psci: no cpu_on method, not booting CPU%d\n", cpu);
  36 + return -ENODEV;
  37 + }
  38 +
  39 + err = psci_ops.cpu_on(cpu, __pa(secondary_holding_pen));
  40 + if (err) {
  41 + pr_err("psci: failed to boot CPU%d (%d)\n", cpu, err);
  42 + return err;
  43 + }
  44 +
  45 + return 0;
  46 +}
  47 +
  48 +const struct smp_enable_ops smp_psci_ops __initconst = {
  49 + .name = "psci",
  50 + .init_cpu = smp_psci_init_cpu,
  51 + .prepare_cpu = smp_psci_prepare_cpu,
  52 +};
arch/arm64/kernel/smp_spin_table.c
  1 +/*
  2 + * Spin Table SMP initialisation
  3 + *
  4 + * Copyright (C) 2013 ARM Ltd.
  5 + *
  6 + * This program is free software; you can redistribute it and/or modify
  7 + * it under the terms of the GNU General Public License version 2 as
  8 + * published by the Free Software Foundation.
  9 + *
  10 + * This program is distributed in the hope that it will be useful,
  11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13 + * GNU General Public License for more details.
  14 + *
  15 + * You should have received a copy of the GNU General Public License
  16 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17 + */
  18 +
  19 +#include <linux/init.h>
  20 +#include <linux/of.h>
  21 +#include <linux/smp.h>
  22 +
  23 +#include <asm/cacheflush.h>
  24 +
  25 +static phys_addr_t cpu_release_addr[NR_CPUS];
  26 +
  27 +static int __init smp_spin_table_init_cpu(struct device_node *dn, int cpu)
  28 +{
  29 + /*
  30 + * Determine the address from which the CPU is polling.
  31 + */
  32 + if (of_property_read_u64(dn, "cpu-release-addr",
  33 + &cpu_release_addr[cpu])) {
  34 + pr_err("CPU %d: missing or invalid cpu-release-addr property\n",
  35 + cpu);
  36 +
  37 + return -1;
  38 + }
  39 +
  40 + return 0;
  41 +}
  42 +
  43 +static int __init smp_spin_table_prepare_cpu(int cpu)
  44 +{
  45 + void **release_addr;
  46 +
  47 + if (!cpu_release_addr[cpu])
  48 + return -ENODEV;
  49 +
  50 + release_addr = __va(cpu_release_addr[cpu]);
  51 + release_addr[0] = (void *)__pa(secondary_holding_pen);
  52 + __flush_dcache_area(release_addr, sizeof(release_addr[0]));
  53 +
  54 + /*
  55 + * Send an event to wake up the secondary CPU.
  56 + */
  57 + sev();
  58 +
  59 + return 0;
  60 +}
  61 +
  62 +const struct smp_enable_ops smp_spin_table_ops __initconst = {
  63 + .name = "spin-table",
  64 + .init_cpu = smp_spin_table_init_cpu,
  65 + .prepare_cpu = smp_spin_table_prepare_cpu,
  66 +};
... ... @@ -25,6 +25,7 @@
25 25 #include <linux/nodemask.h>
26 26 #include <linux/memblock.h>
27 27 #include <linux/fs.h>
  28 +#include <linux/io.h>
28 29  
29 30 #include <asm/cputype.h>
30 31 #include <asm/sections.h>
... ... @@ -250,6 +251,47 @@
250 251 phys += next - addr;
251 252 } while (pgd++, addr = next, addr != end);
252 253 }
  254 +
  255 +#ifdef CONFIG_EARLY_PRINTK
  256 +/*
  257 + * Create an early I/O mapping using the pgd/pmd entries already populated
  258 + * in head.S as this function is called too early to allocated any memory. The
  259 + * mapping size is 2MB with 4KB pages or 64KB or 64KB pages.
  260 + */
  261 +void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt)
  262 +{
  263 + unsigned long size, mask;
  264 + bool page64k = IS_ENABLED(ARM64_64K_PAGES);
  265 + pgd_t *pgd;
  266 + pud_t *pud;
  267 + pmd_t *pmd;
  268 + pte_t *pte;
  269 +
  270 + /*
  271 + * No early pte entries with !ARM64_64K_PAGES configuration, so using
  272 + * sections (pmd).
  273 + */
  274 + size = page64k ? PAGE_SIZE : SECTION_SIZE;
  275 + mask = ~(size - 1);
  276 +
  277 + pgd = pgd_offset_k(virt);
  278 + pud = pud_offset(pgd, virt);
  279 + if (pud_none(*pud))
  280 + return NULL;
  281 + pmd = pmd_offset(pud, virt);
  282 +
  283 + if (page64k) {
  284 + if (pmd_none(*pmd))
  285 + return NULL;
  286 + pte = pte_offset_kernel(pmd, virt);
  287 + set_pte(pte, __pte((phys & mask) | PROT_DEVICE_nGnRE));
  288 + } else {
  289 + set_pmd(pmd, __pmd((phys & mask) | PROT_SECT_DEVICE_nGnRE));
  290 + }
  291 +
  292 + return (void __iomem *)((virt & mask) + (phys & ~mask));
  293 +}
  294 +#endif
253 295  
254 296 static void __init map_mem(void)
255 297 {