Commit 79a69d342d71b2b4eafdf51e2451606cfe380a44
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
Merge tag 'arm64-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/cmarinas/linux-aarch64
Pull arm64 patches from Catalin Marinas: - SMP support for the PSCI booting protocol (power state coordination interface). - Simple earlyprintk support. - Platform devices populated by default from the DT (SoC-agnostic). - CONTEXTIDR support (used by external trace tools). * tag 'arm64-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/cmarinas/linux-aarch64: arm64: mm: update CONTEXTIDR register to contain PID of current process arm64: atomics: fix grossly inconsistent asm constraints for exclusives arm64: compat: use compat_uptr_t type for compat_ucontext.uc_link arm64: Select ARCH_WANT_FRAME_POINTERS arm64: Add kvm_para.h and xor.h generic headers arm64: SMP: enable PSCI boot method arm64: psci: add support for PSCI invocations from the kernel arm64: SMP: rework the SMP code to be enabling method agnostic arm64: perf: add guest vs host discrimination arm64: add COMPAT_PSR_*_BIT flags arm64: Add simple earlyprintk support arm64: Populate the platform devices
Showing 29 changed files Inline Diff
- Documentation/arm64/memory.txt
- arch/arm64/Kconfig
- arch/arm64/Kconfig.debug
- arch/arm64/include/asm/Kbuild
- arch/arm64/include/asm/atomic.h
- arch/arm64/include/asm/cmpxchg.h
- arch/arm64/include/asm/futex.h
- arch/arm64/include/asm/io.h
- arch/arm64/include/asm/memory.h
- arch/arm64/include/asm/mmu.h
- arch/arm64/include/asm/mmu_context.h
- arch/arm64/include/asm/perf_event.h
- arch/arm64/include/asm/psci.h
- arch/arm64/include/asm/ptrace.h
- arch/arm64/include/asm/smp.h
- arch/arm64/include/asm/spinlock.h
- arch/arm64/include/uapi/asm/Kbuild
- arch/arm64/kernel/Makefile
- arch/arm64/kernel/early_printk.c
- arch/arm64/kernel/head.S
- arch/arm64/kernel/perf_event.c
- arch/arm64/kernel/process.c
- arch/arm64/kernel/psci.c
- arch/arm64/kernel/setup.c
- arch/arm64/kernel/signal32.c
- arch/arm64/kernel/smp.c
- arch/arm64/kernel/smp_psci.c
- arch/arm64/kernel/smp_spin_table.c
- arch/arm64/mm/mmu.c
Documentation/arm64/memory.txt
1 | Memory Layout on AArch64 Linux | 1 | Memory Layout on AArch64 Linux |
2 | ============================== | 2 | ============================== |
3 | 3 | ||
4 | Author: Catalin Marinas <catalin.marinas@arm.com> | 4 | Author: Catalin Marinas <catalin.marinas@arm.com> |
5 | Date : 20 February 2012 | 5 | Date : 20 February 2012 |
6 | 6 | ||
7 | This document describes the virtual memory layout used by the AArch64 | 7 | This document describes the virtual memory layout used by the AArch64 |
8 | Linux kernel. The architecture allows up to 4 levels of translation | 8 | Linux kernel. The architecture allows up to 4 levels of translation |
9 | tables with a 4KB page size and up to 3 levels with a 64KB page size. | 9 | tables with a 4KB page size and up to 3 levels with a 64KB page size. |
10 | 10 | ||
11 | AArch64 Linux uses 3 levels of translation tables with the 4KB page | 11 | AArch64 Linux uses 3 levels of translation tables with the 4KB page |
12 | configuration, allowing 39-bit (512GB) virtual addresses for both user | 12 | configuration, allowing 39-bit (512GB) virtual addresses for both user |
13 | and kernel. With 64KB pages, only 2 levels of translation tables are | 13 | and kernel. With 64KB pages, only 2 levels of translation tables are |
14 | used but the memory layout is the same. | 14 | used but the memory layout is the same. |
15 | 15 | ||
16 | User addresses have bits 63:39 set to 0 while the kernel addresses have | 16 | User addresses have bits 63:39 set to 0 while the kernel addresses have |
17 | the same bits set to 1. TTBRx selection is given by bit 63 of the | 17 | the same bits set to 1. TTBRx selection is given by bit 63 of the |
18 | virtual address. The swapper_pg_dir contains only kernel (global) | 18 | virtual address. The swapper_pg_dir contains only kernel (global) |
19 | mappings while the user pgd contains only user (non-global) mappings. | 19 | mappings while the user pgd contains only user (non-global) mappings. |
20 | The swapper_pgd_dir address is written to TTBR1 and never written to | 20 | The swapper_pgd_dir address is written to TTBR1 and never written to |
21 | TTBR0. | 21 | TTBR0. |
22 | 22 | ||
23 | 23 | ||
24 | AArch64 Linux memory layout: | 24 | AArch64 Linux memory layout: |
25 | 25 | ||
26 | Start End Size Use | 26 | Start End Size Use |
27 | ----------------------------------------------------------------------- | 27 | ----------------------------------------------------------------------- |
28 | 0000000000000000 0000007fffffffff 512GB user | 28 | 0000000000000000 0000007fffffffff 512GB user |
29 | 29 | ||
30 | ffffff8000000000 ffffffbbfffeffff ~240GB vmalloc | 30 | ffffff8000000000 ffffffbbfffeffff ~240GB vmalloc |
31 | 31 | ||
32 | ffffffbbffff0000 ffffffbbffffffff 64KB [guard page] | 32 | ffffffbbffff0000 ffffffbbffffffff 64KB [guard page] |
33 | 33 | ||
34 | ffffffbc00000000 ffffffbdffffffff 8GB vmemmap | 34 | ffffffbc00000000 ffffffbdffffffff 8GB vmemmap |
35 | 35 | ||
36 | ffffffbe00000000 ffffffbffbbfffff ~8GB [guard, future vmmemap] | 36 | ffffffbe00000000 ffffffbffbbfffff ~8GB [guard, future vmmemap] |
37 | 37 | ||
38 | ffffffbffbc00000 ffffffbffbdfffff 2MB earlyprintk device | ||
39 | |||
38 | ffffffbffbe00000 ffffffbffbe0ffff 64KB PCI I/O space | 40 | ffffffbffbe00000 ffffffbffbe0ffff 64KB PCI I/O space |
39 | 41 | ||
40 | ffffffbbffff0000 ffffffbcffffffff ~2MB [guard] | 42 | ffffffbbffff0000 ffffffbcffffffff ~2MB [guard] |
41 | 43 | ||
42 | ffffffbffc000000 ffffffbfffffffff 64MB modules | 44 | ffffffbffc000000 ffffffbfffffffff 64MB modules |
43 | 45 | ||
44 | ffffffc000000000 ffffffffffffffff 256GB kernel logical memory map | 46 | ffffffc000000000 ffffffffffffffff 256GB kernel logical memory map |
45 | 47 | ||
46 | 48 | ||
47 | Translation table lookup with 4KB pages: | 49 | Translation table lookup with 4KB pages: |
48 | 50 | ||
49 | +--------+--------+--------+--------+--------+--------+--------+--------+ | 51 | +--------+--------+--------+--------+--------+--------+--------+--------+ |
50 | |63 56|55 48|47 40|39 32|31 24|23 16|15 8|7 0| | 52 | |63 56|55 48|47 40|39 32|31 24|23 16|15 8|7 0| |
51 | +--------+--------+--------+--------+--------+--------+--------+--------+ | 53 | +--------+--------+--------+--------+--------+--------+--------+--------+ |
52 | | | | | | | | 54 | | | | | | | |
53 | | | | | | v | 55 | | | | | | v |
54 | | | | | | [11:0] in-page offset | 56 | | | | | | [11:0] in-page offset |
55 | | | | | +-> [20:12] L3 index | 57 | | | | | +-> [20:12] L3 index |
56 | | | | +-----------> [29:21] L2 index | 58 | | | | +-----------> [29:21] L2 index |
57 | | | +---------------------> [38:30] L1 index | 59 | | | +---------------------> [38:30] L1 index |
58 | | +-------------------------------> [47:39] L0 index (not used) | 60 | | +-------------------------------> [47:39] L0 index (not used) |
59 | +-------------------------------------------------> [63] TTBR0/1 | 61 | +-------------------------------------------------> [63] TTBR0/1 |
60 | 62 | ||
61 | 63 | ||
62 | Translation table lookup with 64KB pages: | 64 | Translation table lookup with 64KB pages: |
63 | 65 | ||
64 | +--------+--------+--------+--------+--------+--------+--------+--------+ | 66 | +--------+--------+--------+--------+--------+--------+--------+--------+ |
65 | |63 56|55 48|47 40|39 32|31 24|23 16|15 8|7 0| | 67 | |63 56|55 48|47 40|39 32|31 24|23 16|15 8|7 0| |
66 | +--------+--------+--------+--------+--------+--------+--------+--------+ | 68 | +--------+--------+--------+--------+--------+--------+--------+--------+ |
67 | | | | | | | 69 | | | | | | |
68 | | | | | v | 70 | | | | | v |
69 | | | | | [15:0] in-page offset | 71 | | | | | [15:0] in-page offset |
70 | | | | +----------> [28:16] L3 index | 72 | | | | +----------> [28:16] L3 index |
71 | | | +--------------------------> [41:29] L2 index (only 38:29 used) | 73 | | | +--------------------------> [41:29] L2 index (only 38:29 used) |
72 | | +-------------------------------> [47:42] L1 index (not used) | 74 | | +-------------------------------> [47:42] L1 index (not used) |
73 | +-------------------------------------------------> [63] TTBR0/1 | 75 | +-------------------------------------------------> [63] TTBR0/1 |
74 | 76 |
arch/arm64/Kconfig
1 | config ARM64 | 1 | config ARM64 |
2 | def_bool y | 2 | def_bool y |
3 | select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE | 3 | select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE |
4 | select ARCH_WANT_COMPAT_IPC_PARSE_VERSION | 4 | select ARCH_WANT_COMPAT_IPC_PARSE_VERSION |
5 | select ARCH_WANT_FRAME_POINTERS | ||
5 | select ARM_AMBA | 6 | select ARM_AMBA |
6 | select CLONE_BACKWARDS | 7 | select CLONE_BACKWARDS |
7 | select COMMON_CLK | 8 | select COMMON_CLK |
8 | select GENERIC_CLOCKEVENTS | 9 | select GENERIC_CLOCKEVENTS |
9 | select GENERIC_HARDIRQS_NO_DEPRECATED | 10 | select GENERIC_HARDIRQS_NO_DEPRECATED |
10 | select GENERIC_IOMAP | 11 | select GENERIC_IOMAP |
11 | select GENERIC_IRQ_PROBE | 12 | select GENERIC_IRQ_PROBE |
12 | select GENERIC_IRQ_SHOW | 13 | select GENERIC_IRQ_SHOW |
13 | select GENERIC_SMP_IDLE_THREAD | 14 | select GENERIC_SMP_IDLE_THREAD |
14 | select GENERIC_TIME_VSYSCALL | 15 | select GENERIC_TIME_VSYSCALL |
15 | select HARDIRQS_SW_RESEND | 16 | select HARDIRQS_SW_RESEND |
16 | select HAVE_ARCH_TRACEHOOK | 17 | select HAVE_ARCH_TRACEHOOK |
17 | select HAVE_DEBUG_BUGVERBOSE | 18 | select HAVE_DEBUG_BUGVERBOSE |
18 | select HAVE_DEBUG_KMEMLEAK | 19 | select HAVE_DEBUG_KMEMLEAK |
19 | select HAVE_DMA_API_DEBUG | 20 | select HAVE_DMA_API_DEBUG |
20 | select HAVE_DMA_ATTRS | 21 | select HAVE_DMA_ATTRS |
21 | select HAVE_GENERIC_DMA_COHERENT | 22 | select HAVE_GENERIC_DMA_COHERENT |
22 | select HAVE_GENERIC_HARDIRQS | 23 | select HAVE_GENERIC_HARDIRQS |
23 | select HAVE_HW_BREAKPOINT if PERF_EVENTS | 24 | select HAVE_HW_BREAKPOINT if PERF_EVENTS |
24 | select HAVE_MEMBLOCK | 25 | select HAVE_MEMBLOCK |
25 | select HAVE_PERF_EVENTS | 26 | select HAVE_PERF_EVENTS |
26 | select IRQ_DOMAIN | 27 | select IRQ_DOMAIN |
27 | select MODULES_USE_ELF_RELA | 28 | select MODULES_USE_ELF_RELA |
28 | select NO_BOOTMEM | 29 | select NO_BOOTMEM |
29 | select OF | 30 | select OF |
30 | select OF_EARLY_FLATTREE | 31 | select OF_EARLY_FLATTREE |
31 | select PERF_USE_VMALLOC | 32 | select PERF_USE_VMALLOC |
32 | select RTC_LIB | 33 | select RTC_LIB |
33 | select SPARSE_IRQ | 34 | select SPARSE_IRQ |
34 | select SYSCTL_EXCEPTION_TRACE | 35 | select SYSCTL_EXCEPTION_TRACE |
35 | help | 36 | help |
36 | ARM 64-bit (AArch64) Linux support. | 37 | ARM 64-bit (AArch64) Linux support. |
37 | 38 | ||
38 | config 64BIT | 39 | config 64BIT |
39 | def_bool y | 40 | def_bool y |
40 | 41 | ||
41 | config ARCH_PHYS_ADDR_T_64BIT | 42 | config ARCH_PHYS_ADDR_T_64BIT |
42 | def_bool y | 43 | def_bool y |
43 | 44 | ||
44 | config MMU | 45 | config MMU |
45 | def_bool y | 46 | def_bool y |
46 | 47 | ||
47 | config NO_IOPORT | 48 | config NO_IOPORT |
48 | def_bool y | 49 | def_bool y |
49 | 50 | ||
50 | config STACKTRACE_SUPPORT | 51 | config STACKTRACE_SUPPORT |
51 | def_bool y | 52 | def_bool y |
52 | 53 | ||
53 | config LOCKDEP_SUPPORT | 54 | config LOCKDEP_SUPPORT |
54 | def_bool y | 55 | def_bool y |
55 | 56 | ||
56 | config TRACE_IRQFLAGS_SUPPORT | 57 | config TRACE_IRQFLAGS_SUPPORT |
57 | def_bool y | 58 | def_bool y |
58 | 59 | ||
59 | config GENERIC_LOCKBREAK | 60 | config GENERIC_LOCKBREAK |
60 | def_bool y | 61 | def_bool y |
61 | depends on SMP && PREEMPT | 62 | depends on SMP && PREEMPT |
62 | 63 | ||
63 | config RWSEM_GENERIC_SPINLOCK | 64 | config RWSEM_GENERIC_SPINLOCK |
64 | def_bool y | 65 | def_bool y |
65 | 66 | ||
66 | config GENERIC_HWEIGHT | 67 | config GENERIC_HWEIGHT |
67 | def_bool y | 68 | def_bool y |
68 | 69 | ||
69 | config GENERIC_CSUM | 70 | config GENERIC_CSUM |
70 | def_bool y | 71 | def_bool y |
71 | 72 | ||
72 | config GENERIC_CALIBRATE_DELAY | 73 | config GENERIC_CALIBRATE_DELAY |
73 | def_bool y | 74 | def_bool y |
74 | 75 | ||
75 | config ZONE_DMA32 | 76 | config ZONE_DMA32 |
76 | def_bool y | 77 | def_bool y |
77 | 78 | ||
78 | config ARCH_DMA_ADDR_T_64BIT | 79 | config ARCH_DMA_ADDR_T_64BIT |
79 | def_bool y | 80 | def_bool y |
80 | 81 | ||
81 | config NEED_DMA_MAP_STATE | 82 | config NEED_DMA_MAP_STATE |
82 | def_bool y | 83 | def_bool y |
83 | 84 | ||
84 | config NEED_SG_DMA_LENGTH | 85 | config NEED_SG_DMA_LENGTH |
85 | def_bool y | 86 | def_bool y |
86 | 87 | ||
87 | config SWIOTLB | 88 | config SWIOTLB |
88 | def_bool y | 89 | def_bool y |
89 | 90 | ||
90 | config IOMMU_HELPER | 91 | config IOMMU_HELPER |
91 | def_bool SWIOTLB | 92 | def_bool SWIOTLB |
92 | 93 | ||
93 | config GENERIC_GPIO | 94 | config GENERIC_GPIO |
94 | def_bool y | 95 | def_bool y |
95 | 96 | ||
96 | source "init/Kconfig" | 97 | source "init/Kconfig" |
97 | 98 | ||
98 | source "kernel/Kconfig.freezer" | 99 | source "kernel/Kconfig.freezer" |
99 | 100 | ||
100 | menu "System Type" | 101 | menu "System Type" |
101 | 102 | ||
102 | endmenu | 103 | endmenu |
103 | 104 | ||
104 | menu "Bus support" | 105 | menu "Bus support" |
105 | 106 | ||
106 | config ARM_AMBA | 107 | config ARM_AMBA |
107 | bool | 108 | bool |
108 | 109 | ||
109 | endmenu | 110 | endmenu |
110 | 111 | ||
111 | menu "Kernel Features" | 112 | menu "Kernel Features" |
112 | 113 | ||
113 | source "kernel/time/Kconfig" | 114 | source "kernel/time/Kconfig" |
114 | 115 | ||
115 | config ARM64_64K_PAGES | 116 | config ARM64_64K_PAGES |
116 | bool "Enable 64KB pages support" | 117 | bool "Enable 64KB pages support" |
117 | help | 118 | help |
118 | This feature enables 64KB pages support (4KB by default) | 119 | This feature enables 64KB pages support (4KB by default) |
119 | allowing only two levels of page tables and faster TLB | 120 | allowing only two levels of page tables and faster TLB |
120 | look-up. AArch32 emulation is not available when this feature | 121 | look-up. AArch32 emulation is not available when this feature |
121 | is enabled. | 122 | is enabled. |
122 | 123 | ||
123 | config SMP | 124 | config SMP |
124 | bool "Symmetric Multi-Processing" | 125 | bool "Symmetric Multi-Processing" |
125 | select USE_GENERIC_SMP_HELPERS | 126 | select USE_GENERIC_SMP_HELPERS |
126 | help | 127 | help |
127 | This enables support for systems with more than one CPU. If | 128 | This enables support for systems with more than one CPU. If |
128 | you say N here, the kernel will run on single and | 129 | you say N here, the kernel will run on single and |
129 | multiprocessor machines, but will use only one CPU of a | 130 | multiprocessor machines, but will use only one CPU of a |
130 | multiprocessor machine. If you say Y here, the kernel will run | 131 | multiprocessor machine. If you say Y here, the kernel will run |
131 | on many, but not all, single processor machines. On a single | 132 | on many, but not all, single processor machines. On a single |
132 | processor machine, the kernel will run faster if you say N | 133 | processor machine, the kernel will run faster if you say N |
133 | here. | 134 | here. |
134 | 135 | ||
135 | If you don't know what to do here, say N. | 136 | If you don't know what to do here, say N. |
136 | 137 | ||
137 | config NR_CPUS | 138 | config NR_CPUS |
138 | int "Maximum number of CPUs (2-32)" | 139 | int "Maximum number of CPUs (2-32)" |
139 | range 2 32 | 140 | range 2 32 |
140 | depends on SMP | 141 | depends on SMP |
141 | default "4" | 142 | default "4" |
142 | 143 | ||
143 | source kernel/Kconfig.preempt | 144 | source kernel/Kconfig.preempt |
144 | 145 | ||
145 | config HZ | 146 | config HZ |
146 | int | 147 | int |
147 | default 100 | 148 | default 100 |
148 | 149 | ||
149 | config ARCH_HAS_HOLES_MEMORYMODEL | 150 | config ARCH_HAS_HOLES_MEMORYMODEL |
150 | def_bool y if SPARSEMEM | 151 | def_bool y if SPARSEMEM |
151 | 152 | ||
152 | config ARCH_SPARSEMEM_ENABLE | 153 | config ARCH_SPARSEMEM_ENABLE |
153 | def_bool y | 154 | def_bool y |
154 | select SPARSEMEM_VMEMMAP_ENABLE | 155 | select SPARSEMEM_VMEMMAP_ENABLE |
155 | 156 | ||
156 | config ARCH_SPARSEMEM_DEFAULT | 157 | config ARCH_SPARSEMEM_DEFAULT |
157 | def_bool ARCH_SPARSEMEM_ENABLE | 158 | def_bool ARCH_SPARSEMEM_ENABLE |
158 | 159 | ||
159 | config ARCH_SELECT_MEMORY_MODEL | 160 | config ARCH_SELECT_MEMORY_MODEL |
160 | def_bool ARCH_SPARSEMEM_ENABLE | 161 | def_bool ARCH_SPARSEMEM_ENABLE |
161 | 162 | ||
162 | config HAVE_ARCH_PFN_VALID | 163 | config HAVE_ARCH_PFN_VALID |
163 | def_bool ARCH_HAS_HOLES_MEMORYMODEL || !SPARSEMEM | 164 | def_bool ARCH_HAS_HOLES_MEMORYMODEL || !SPARSEMEM |
164 | 165 | ||
165 | config HW_PERF_EVENTS | 166 | config HW_PERF_EVENTS |
166 | bool "Enable hardware performance counter support for perf events" | 167 | bool "Enable hardware performance counter support for perf events" |
167 | depends on PERF_EVENTS | 168 | depends on PERF_EVENTS |
168 | default y | 169 | default y |
169 | help | 170 | help |
170 | Enable hardware performance counter support for perf events. If | 171 | Enable hardware performance counter support for perf events. If |
171 | disabled, perf events will use software events only. | 172 | disabled, perf events will use software events only. |
172 | 173 | ||
173 | source "mm/Kconfig" | 174 | source "mm/Kconfig" |
174 | 175 | ||
175 | endmenu | 176 | endmenu |
176 | 177 | ||
177 | menu "Boot options" | 178 | menu "Boot options" |
178 | 179 | ||
179 | config CMDLINE | 180 | config CMDLINE |
180 | string "Default kernel command string" | 181 | string "Default kernel command string" |
181 | default "" | 182 | default "" |
182 | help | 183 | help |
183 | Provide a set of default command-line options at build time by | 184 | Provide a set of default command-line options at build time by |
184 | entering them here. As a minimum, you should specify the the | 185 | entering them here. As a minimum, you should specify the the |
185 | root device (e.g. root=/dev/nfs). | 186 | root device (e.g. root=/dev/nfs). |
186 | 187 | ||
187 | config CMDLINE_FORCE | 188 | config CMDLINE_FORCE |
188 | bool "Always use the default kernel command string" | 189 | bool "Always use the default kernel command string" |
189 | help | 190 | help |
190 | Always use the default kernel command string, even if the boot | 191 | Always use the default kernel command string, even if the boot |
191 | loader passes other arguments to the kernel. | 192 | loader passes other arguments to the kernel. |
192 | This is useful if you cannot or don't want to change the | 193 | This is useful if you cannot or don't want to change the |
193 | command-line options your boot loader passes to the kernel. | 194 | command-line options your boot loader passes to the kernel. |
194 | 195 | ||
195 | endmenu | 196 | endmenu |
196 | 197 | ||
197 | menu "Userspace binary formats" | 198 | menu "Userspace binary formats" |
198 | 199 | ||
199 | source "fs/Kconfig.binfmt" | 200 | source "fs/Kconfig.binfmt" |
200 | 201 | ||
201 | config COMPAT | 202 | config COMPAT |
202 | bool "Kernel support for 32-bit EL0" | 203 | bool "Kernel support for 32-bit EL0" |
203 | depends on !ARM64_64K_PAGES | 204 | depends on !ARM64_64K_PAGES |
204 | select COMPAT_BINFMT_ELF | 205 | select COMPAT_BINFMT_ELF |
205 | select HAVE_UID16 | 206 | select HAVE_UID16 |
206 | help | 207 | help |
207 | This option enables support for a 32-bit EL0 running under a 64-bit | 208 | This option enables support for a 32-bit EL0 running under a 64-bit |
208 | kernel at EL1. AArch32-specific components such as system calls, | 209 | kernel at EL1. AArch32-specific components such as system calls, |
209 | the user helper functions, VFP support and the ptrace interface are | 210 | the user helper functions, VFP support and the ptrace interface are |
210 | handled appropriately by the kernel. | 211 | handled appropriately by the kernel. |
211 | 212 | ||
212 | If you want to execute 32-bit userspace applications, say Y. | 213 | If you want to execute 32-bit userspace applications, say Y. |
213 | 214 | ||
214 | config SYSVIPC_COMPAT | 215 | config SYSVIPC_COMPAT |
215 | def_bool y | 216 | def_bool y |
216 | depends on COMPAT && SYSVIPC | 217 | depends on COMPAT && SYSVIPC |
217 | 218 | ||
218 | endmenu | 219 | endmenu |
219 | 220 | ||
220 | source "net/Kconfig" | 221 | source "net/Kconfig" |
221 | 222 | ||
222 | source "drivers/Kconfig" | 223 | source "drivers/Kconfig" |
223 | 224 | ||
224 | source "fs/Kconfig" | 225 | source "fs/Kconfig" |
225 | 226 | ||
226 | source "arch/arm64/Kconfig.debug" | 227 | source "arch/arm64/Kconfig.debug" |
227 | 228 | ||
228 | source "security/Kconfig" | 229 | source "security/Kconfig" |
229 | 230 | ||
230 | source "crypto/Kconfig" | 231 | source "crypto/Kconfig" |
231 | 232 | ||
232 | source "lib/Kconfig" | 233 | source "lib/Kconfig" |
233 | 234 |
arch/arm64/Kconfig.debug
1 | menu "Kernel hacking" | 1 | menu "Kernel hacking" |
2 | 2 | ||
3 | source "lib/Kconfig.debug" | 3 | source "lib/Kconfig.debug" |
4 | 4 | ||
5 | config FRAME_POINTER | 5 | config FRAME_POINTER |
6 | bool | 6 | bool |
7 | default y | 7 | default y |
8 | 8 | ||
9 | config DEBUG_ERRORS | 9 | config DEBUG_ERRORS |
10 | bool "Verbose kernel error messages" | 10 | bool "Verbose kernel error messages" |
11 | depends on DEBUG_KERNEL | 11 | depends on DEBUG_KERNEL |
12 | help | 12 | help |
13 | This option controls verbose debugging information which can be | 13 | This option controls verbose debugging information which can be |
14 | printed when the kernel detects an internal error. This debugging | 14 | printed when the kernel detects an internal error. This debugging |
15 | information is useful to kernel hackers when tracking down problems, | 15 | information is useful to kernel hackers when tracking down problems, |
16 | but mostly meaningless to other people. It's safe to say Y unless | 16 | but mostly meaningless to other people. It's safe to say Y unless |
17 | you are concerned with the code size or don't want to see these | 17 | you are concerned with the code size or don't want to see these |
18 | messages. | 18 | messages. |
19 | 19 | ||
20 | config DEBUG_STACK_USAGE | 20 | config DEBUG_STACK_USAGE |
21 | bool "Enable stack utilization instrumentation" | 21 | bool "Enable stack utilization instrumentation" |
22 | depends on DEBUG_KERNEL | 22 | depends on DEBUG_KERNEL |
23 | help | 23 | help |
24 | Enables the display of the minimum amount of free stack which each | 24 | Enables the display of the minimum amount of free stack which each |
25 | task has ever had available in the sysrq-T output. | 25 | task has ever had available in the sysrq-T output. |
26 | 26 | ||
27 | config EARLY_PRINTK | ||
28 | bool "Early printk support" | ||
29 | default y | ||
30 | help | ||
31 | Say Y here if you want to have an early console using the | ||
32 | earlyprintk=<name>[,<addr>][,<options>] kernel parameter. It | ||
33 | is assumed that the early console device has been initialised | ||
34 | by the boot loader prior to starting the Linux kernel. | ||
35 | |||
36 | config PID_IN_CONTEXTIDR | ||
37 | bool "Write the current PID to the CONTEXTIDR register" | ||
38 | help | ||
39 | Enabling this option causes the kernel to write the current PID to | ||
40 | the CONTEXTIDR register, at the expense of some additional | ||
41 | instructions during context switch. Say Y here only if you are | ||
42 | planning to use hardware trace tools with this kernel. | ||
43 | |||
27 | endmenu | 44 | endmenu |
28 | 45 |
arch/arm64/include/asm/Kbuild
1 | 1 | ||
2 | 2 | ||
3 | generic-y += bug.h | 3 | generic-y += bug.h |
4 | generic-y += bugs.h | 4 | generic-y += bugs.h |
5 | generic-y += checksum.h | 5 | generic-y += checksum.h |
6 | generic-y += clkdev.h | 6 | generic-y += clkdev.h |
7 | generic-y += cputime.h | 7 | generic-y += cputime.h |
8 | generic-y += current.h | 8 | generic-y += current.h |
9 | generic-y += delay.h | 9 | generic-y += delay.h |
10 | generic-y += div64.h | 10 | generic-y += div64.h |
11 | generic-y += dma.h | 11 | generic-y += dma.h |
12 | generic-y += emergency-restart.h | 12 | generic-y += emergency-restart.h |
13 | generic-y += errno.h | 13 | generic-y += errno.h |
14 | generic-y += ftrace.h | 14 | generic-y += ftrace.h |
15 | generic-y += hw_irq.h | 15 | generic-y += hw_irq.h |
16 | generic-y += ioctl.h | 16 | generic-y += ioctl.h |
17 | generic-y += ioctls.h | 17 | generic-y += ioctls.h |
18 | generic-y += ipcbuf.h | 18 | generic-y += ipcbuf.h |
19 | generic-y += irq_regs.h | 19 | generic-y += irq_regs.h |
20 | generic-y += kdebug.h | 20 | generic-y += kdebug.h |
21 | generic-y += kmap_types.h | 21 | generic-y += kmap_types.h |
22 | generic-y += kvm_para.h | ||
22 | generic-y += local.h | 23 | generic-y += local.h |
23 | generic-y += local64.h | 24 | generic-y += local64.h |
24 | generic-y += mman.h | 25 | generic-y += mman.h |
25 | generic-y += msgbuf.h | 26 | generic-y += msgbuf.h |
26 | generic-y += mutex.h | 27 | generic-y += mutex.h |
27 | generic-y += pci.h | 28 | generic-y += pci.h |
28 | generic-y += percpu.h | 29 | generic-y += percpu.h |
29 | generic-y += poll.h | 30 | generic-y += poll.h |
30 | generic-y += posix_types.h | 31 | generic-y += posix_types.h |
31 | generic-y += resource.h | 32 | generic-y += resource.h |
32 | generic-y += scatterlist.h | 33 | generic-y += scatterlist.h |
33 | generic-y += sections.h | 34 | generic-y += sections.h |
34 | generic-y += segment.h | 35 | generic-y += segment.h |
35 | generic-y += sembuf.h | 36 | generic-y += sembuf.h |
36 | generic-y += serial.h | 37 | generic-y += serial.h |
37 | generic-y += shmbuf.h | 38 | generic-y += shmbuf.h |
38 | generic-y += sizes.h | 39 | generic-y += sizes.h |
39 | generic-y += socket.h | 40 | generic-y += socket.h |
40 | generic-y += sockios.h | 41 | generic-y += sockios.h |
41 | generic-y += string.h | 42 | generic-y += string.h |
42 | generic-y += switch_to.h | 43 | generic-y += switch_to.h |
43 | generic-y += swab.h | 44 | generic-y += swab.h |
44 | generic-y += termbits.h | 45 | generic-y += termbits.h |
45 | generic-y += termios.h | 46 | generic-y += termios.h |
46 | generic-y += topology.h | 47 | generic-y += topology.h |
47 | generic-y += trace_clock.h | 48 | generic-y += trace_clock.h |
48 | generic-y += types.h | 49 | generic-y += types.h |
49 | generic-y += unaligned.h | 50 | generic-y += unaligned.h |
50 | generic-y += user.h | 51 | generic-y += user.h |
52 | generic-y += xor.h | ||
51 | 53 |
arch/arm64/include/asm/atomic.h
1 | /* | 1 | /* |
2 | * Based on arch/arm/include/asm/atomic.h | 2 | * Based on arch/arm/include/asm/atomic.h |
3 | * | 3 | * |
4 | * Copyright (C) 1996 Russell King. | 4 | * Copyright (C) 1996 Russell King. |
5 | * Copyright (C) 2002 Deep Blue Solutions Ltd. | 5 | * Copyright (C) 2002 Deep Blue Solutions Ltd. |
6 | * Copyright (C) 2012 ARM Ltd. | 6 | * Copyright (C) 2012 ARM Ltd. |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License version 2 as | 9 | * it under the terms of the GNU General Public License version 2 as |
10 | * published by the Free Software Foundation. | 10 | * published by the Free Software Foundation. |
11 | * | 11 | * |
12 | * This program is distributed in the hope that it will be useful, | 12 | * This program is distributed in the hope that it will be useful, |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
15 | * GNU General Public License for more details. | 15 | * GNU General Public License for more details. |
16 | * | 16 | * |
17 | * You should have received a copy of the GNU General Public License | 17 | * You should have received a copy of the GNU General Public License |
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
19 | */ | 19 | */ |
20 | #ifndef __ASM_ATOMIC_H | 20 | #ifndef __ASM_ATOMIC_H |
21 | #define __ASM_ATOMIC_H | 21 | #define __ASM_ATOMIC_H |
22 | 22 | ||
23 | #include <linux/compiler.h> | 23 | #include <linux/compiler.h> |
24 | #include <linux/types.h> | 24 | #include <linux/types.h> |
25 | 25 | ||
26 | #include <asm/barrier.h> | 26 | #include <asm/barrier.h> |
27 | #include <asm/cmpxchg.h> | 27 | #include <asm/cmpxchg.h> |
28 | 28 | ||
29 | #define ATOMIC_INIT(i) { (i) } | 29 | #define ATOMIC_INIT(i) { (i) } |
30 | 30 | ||
31 | #ifdef __KERNEL__ | 31 | #ifdef __KERNEL__ |
32 | 32 | ||
33 | /* | 33 | /* |
34 | * On ARM, ordinary assignment (str instruction) doesn't clear the local | 34 | * On ARM, ordinary assignment (str instruction) doesn't clear the local |
35 | * strex/ldrex monitor on some implementations. The reason we can use it for | 35 | * strex/ldrex monitor on some implementations. The reason we can use it for |
36 | * atomic_set() is the clrex or dummy strex done on every exception return. | 36 | * atomic_set() is the clrex or dummy strex done on every exception return. |
37 | */ | 37 | */ |
38 | #define atomic_read(v) (*(volatile int *)&(v)->counter) | 38 | #define atomic_read(v) (*(volatile int *)&(v)->counter) |
39 | #define atomic_set(v,i) (((v)->counter) = (i)) | 39 | #define atomic_set(v,i) (((v)->counter) = (i)) |
40 | 40 | ||
41 | /* | 41 | /* |
42 | * AArch64 UP and SMP safe atomic ops. We use load exclusive and | 42 | * AArch64 UP and SMP safe atomic ops. We use load exclusive and |
43 | * store exclusive to ensure that these are atomic. We may loop | 43 | * store exclusive to ensure that these are atomic. We may loop |
44 | * to ensure that the update happens. | 44 | * to ensure that the update happens. |
45 | */ | 45 | */ |
46 | static inline void atomic_add(int i, atomic_t *v) | 46 | static inline void atomic_add(int i, atomic_t *v) |
47 | { | 47 | { |
48 | unsigned long tmp; | 48 | unsigned long tmp; |
49 | int result; | 49 | int result; |
50 | 50 | ||
51 | asm volatile("// atomic_add\n" | 51 | asm volatile("// atomic_add\n" |
52 | "1: ldxr %w0, [%3]\n" | 52 | "1: ldxr %w0, %2\n" |
53 | " add %w0, %w0, %w4\n" | 53 | " add %w0, %w0, %w3\n" |
54 | " stxr %w1, %w0, [%3]\n" | 54 | " stxr %w1, %w0, %2\n" |
55 | " cbnz %w1, 1b" | 55 | " cbnz %w1, 1b" |
56 | : "=&r" (result), "=&r" (tmp), "+o" (v->counter) | 56 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
57 | : "r" (&v->counter), "Ir" (i) | 57 | : "Ir" (i) |
58 | : "cc"); | 58 | : "cc"); |
59 | } | 59 | } |
60 | 60 | ||
61 | static inline int atomic_add_return(int i, atomic_t *v) | 61 | static inline int atomic_add_return(int i, atomic_t *v) |
62 | { | 62 | { |
63 | unsigned long tmp; | 63 | unsigned long tmp; |
64 | int result; | 64 | int result; |
65 | 65 | ||
66 | asm volatile("// atomic_add_return\n" | 66 | asm volatile("// atomic_add_return\n" |
67 | "1: ldaxr %w0, [%3]\n" | 67 | "1: ldaxr %w0, %2\n" |
68 | " add %w0, %w0, %w4\n" | 68 | " add %w0, %w0, %w3\n" |
69 | " stlxr %w1, %w0, [%3]\n" | 69 | " stlxr %w1, %w0, %2\n" |
70 | " cbnz %w1, 1b" | 70 | " cbnz %w1, 1b" |
71 | : "=&r" (result), "=&r" (tmp), "+o" (v->counter) | 71 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
72 | : "r" (&v->counter), "Ir" (i) | 72 | : "Ir" (i) |
73 | : "cc"); | 73 | : "cc", "memory"); |
74 | 74 | ||
75 | return result; | 75 | return result; |
76 | } | 76 | } |
77 | 77 | ||
78 | static inline void atomic_sub(int i, atomic_t *v) | 78 | static inline void atomic_sub(int i, atomic_t *v) |
79 | { | 79 | { |
80 | unsigned long tmp; | 80 | unsigned long tmp; |
81 | int result; | 81 | int result; |
82 | 82 | ||
83 | asm volatile("// atomic_sub\n" | 83 | asm volatile("// atomic_sub\n" |
84 | "1: ldxr %w0, [%3]\n" | 84 | "1: ldxr %w0, %2\n" |
85 | " sub %w0, %w0, %w4\n" | 85 | " sub %w0, %w0, %w3\n" |
86 | " stxr %w1, %w0, [%3]\n" | 86 | " stxr %w1, %w0, %2\n" |
87 | " cbnz %w1, 1b" | 87 | " cbnz %w1, 1b" |
88 | : "=&r" (result), "=&r" (tmp), "+o" (v->counter) | 88 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
89 | : "r" (&v->counter), "Ir" (i) | 89 | : "Ir" (i) |
90 | : "cc"); | 90 | : "cc"); |
91 | } | 91 | } |
92 | 92 | ||
93 | static inline int atomic_sub_return(int i, atomic_t *v) | 93 | static inline int atomic_sub_return(int i, atomic_t *v) |
94 | { | 94 | { |
95 | unsigned long tmp; | 95 | unsigned long tmp; |
96 | int result; | 96 | int result; |
97 | 97 | ||
98 | asm volatile("// atomic_sub_return\n" | 98 | asm volatile("// atomic_sub_return\n" |
99 | "1: ldaxr %w0, [%3]\n" | 99 | "1: ldaxr %w0, %2\n" |
100 | " sub %w0, %w0, %w4\n" | 100 | " sub %w0, %w0, %w3\n" |
101 | " stlxr %w1, %w0, [%3]\n" | 101 | " stlxr %w1, %w0, %2\n" |
102 | " cbnz %w1, 1b" | 102 | " cbnz %w1, 1b" |
103 | : "=&r" (result), "=&r" (tmp), "+o" (v->counter) | 103 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
104 | : "r" (&v->counter), "Ir" (i) | 104 | : "Ir" (i) |
105 | : "cc"); | 105 | : "cc", "memory"); |
106 | 106 | ||
107 | return result; | 107 | return result; |
108 | } | 108 | } |
109 | 109 | ||
110 | static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) | 110 | static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) |
111 | { | 111 | { |
112 | unsigned long tmp; | 112 | unsigned long tmp; |
113 | int oldval; | 113 | int oldval; |
114 | 114 | ||
115 | asm volatile("// atomic_cmpxchg\n" | 115 | asm volatile("// atomic_cmpxchg\n" |
116 | "1: ldaxr %w1, [%3]\n" | 116 | "1: ldaxr %w1, %2\n" |
117 | " cmp %w1, %w4\n" | 117 | " cmp %w1, %w3\n" |
118 | " b.ne 2f\n" | 118 | " b.ne 2f\n" |
119 | " stlxr %w0, %w5, [%3]\n" | 119 | " stlxr %w0, %w4, %2\n" |
120 | " cbnz %w0, 1b\n" | 120 | " cbnz %w0, 1b\n" |
121 | "2:" | 121 | "2:" |
122 | : "=&r" (tmp), "=&r" (oldval), "+o" (ptr->counter) | 122 | : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter) |
123 | : "r" (&ptr->counter), "Ir" (old), "r" (new) | 123 | : "Ir" (old), "r" (new) |
124 | : "cc"); | 124 | : "cc", "memory"); |
125 | 125 | ||
126 | return oldval; | 126 | return oldval; |
127 | } | 127 | } |
128 | 128 | ||
129 | static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) | 129 | static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) |
130 | { | 130 | { |
131 | unsigned long tmp, tmp2; | 131 | unsigned long tmp, tmp2; |
132 | 132 | ||
133 | asm volatile("// atomic_clear_mask\n" | 133 | asm volatile("// atomic_clear_mask\n" |
134 | "1: ldxr %0, [%3]\n" | 134 | "1: ldxr %0, %2\n" |
135 | " bic %0, %0, %4\n" | 135 | " bic %0, %0, %3\n" |
136 | " stxr %w1, %0, [%3]\n" | 136 | " stxr %w1, %0, %2\n" |
137 | " cbnz %w1, 1b" | 137 | " cbnz %w1, 1b" |
138 | : "=&r" (tmp), "=&r" (tmp2), "+o" (*addr) | 138 | : "=&r" (tmp), "=&r" (tmp2), "+Q" (*addr) |
139 | : "r" (addr), "Ir" (mask) | 139 | : "Ir" (mask) |
140 | : "cc"); | 140 | : "cc"); |
141 | } | 141 | } |
142 | 142 | ||
143 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | 143 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
144 | 144 | ||
145 | static inline int __atomic_add_unless(atomic_t *v, int a, int u) | 145 | static inline int __atomic_add_unless(atomic_t *v, int a, int u) |
146 | { | 146 | { |
147 | int c, old; | 147 | int c, old; |
148 | 148 | ||
149 | c = atomic_read(v); | 149 | c = atomic_read(v); |
150 | while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c) | 150 | while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c) |
151 | c = old; | 151 | c = old; |
152 | return c; | 152 | return c; |
153 | } | 153 | } |
154 | 154 | ||
155 | #define atomic_inc(v) atomic_add(1, v) | 155 | #define atomic_inc(v) atomic_add(1, v) |
156 | #define atomic_dec(v) atomic_sub(1, v) | 156 | #define atomic_dec(v) atomic_sub(1, v) |
157 | 157 | ||
158 | #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) | 158 | #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) |
159 | #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) | 159 | #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) |
160 | #define atomic_inc_return(v) (atomic_add_return(1, v)) | 160 | #define atomic_inc_return(v) (atomic_add_return(1, v)) |
161 | #define atomic_dec_return(v) (atomic_sub_return(1, v)) | 161 | #define atomic_dec_return(v) (atomic_sub_return(1, v)) |
162 | #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) | 162 | #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) |
163 | 163 | ||
164 | #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) | 164 | #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) |
165 | 165 | ||
166 | #define smp_mb__before_atomic_dec() smp_mb() | 166 | #define smp_mb__before_atomic_dec() smp_mb() |
167 | #define smp_mb__after_atomic_dec() smp_mb() | 167 | #define smp_mb__after_atomic_dec() smp_mb() |
168 | #define smp_mb__before_atomic_inc() smp_mb() | 168 | #define smp_mb__before_atomic_inc() smp_mb() |
169 | #define smp_mb__after_atomic_inc() smp_mb() | 169 | #define smp_mb__after_atomic_inc() smp_mb() |
170 | 170 | ||
171 | /* | 171 | /* |
172 | * 64-bit atomic operations. | 172 | * 64-bit atomic operations. |
173 | */ | 173 | */ |
174 | #define ATOMIC64_INIT(i) { (i) } | 174 | #define ATOMIC64_INIT(i) { (i) } |
175 | 175 | ||
176 | #define atomic64_read(v) (*(volatile long long *)&(v)->counter) | 176 | #define atomic64_read(v) (*(volatile long long *)&(v)->counter) |
177 | #define atomic64_set(v,i) (((v)->counter) = (i)) | 177 | #define atomic64_set(v,i) (((v)->counter) = (i)) |
178 | 178 | ||
179 | static inline void atomic64_add(u64 i, atomic64_t *v) | 179 | static inline void atomic64_add(u64 i, atomic64_t *v) |
180 | { | 180 | { |
181 | long result; | 181 | long result; |
182 | unsigned long tmp; | 182 | unsigned long tmp; |
183 | 183 | ||
184 | asm volatile("// atomic64_add\n" | 184 | asm volatile("// atomic64_add\n" |
185 | "1: ldxr %0, [%3]\n" | 185 | "1: ldxr %0, %2\n" |
186 | " add %0, %0, %4\n" | 186 | " add %0, %0, %3\n" |
187 | " stxr %w1, %0, [%3]\n" | 187 | " stxr %w1, %0, %2\n" |
188 | " cbnz %w1, 1b" | 188 | " cbnz %w1, 1b" |
189 | : "=&r" (result), "=&r" (tmp), "+o" (v->counter) | 189 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
190 | : "r" (&v->counter), "Ir" (i) | 190 | : "Ir" (i) |
191 | : "cc"); | 191 | : "cc"); |
192 | } | 192 | } |
193 | 193 | ||
194 | static inline long atomic64_add_return(long i, atomic64_t *v) | 194 | static inline long atomic64_add_return(long i, atomic64_t *v) |
195 | { | 195 | { |
196 | long result; | 196 | long result; |
197 | unsigned long tmp; | 197 | unsigned long tmp; |
198 | 198 | ||
199 | asm volatile("// atomic64_add_return\n" | 199 | asm volatile("// atomic64_add_return\n" |
200 | "1: ldaxr %0, [%3]\n" | 200 | "1: ldaxr %0, %2\n" |
201 | " add %0, %0, %4\n" | 201 | " add %0, %0, %3\n" |
202 | " stlxr %w1, %0, [%3]\n" | 202 | " stlxr %w1, %0, %2\n" |
203 | " cbnz %w1, 1b" | 203 | " cbnz %w1, 1b" |
204 | : "=&r" (result), "=&r" (tmp), "+o" (v->counter) | 204 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
205 | : "r" (&v->counter), "Ir" (i) | 205 | : "Ir" (i) |
206 | : "cc"); | 206 | : "cc", "memory"); |
207 | 207 | ||
208 | return result; | 208 | return result; |
209 | } | 209 | } |
210 | 210 | ||
211 | static inline void atomic64_sub(u64 i, atomic64_t *v) | 211 | static inline void atomic64_sub(u64 i, atomic64_t *v) |
212 | { | 212 | { |
213 | long result; | 213 | long result; |
214 | unsigned long tmp; | 214 | unsigned long tmp; |
215 | 215 | ||
216 | asm volatile("// atomic64_sub\n" | 216 | asm volatile("// atomic64_sub\n" |
217 | "1: ldxr %0, [%3]\n" | 217 | "1: ldxr %0, %2\n" |
218 | " sub %0, %0, %4\n" | 218 | " sub %0, %0, %3\n" |
219 | " stxr %w1, %0, [%3]\n" | 219 | " stxr %w1, %0, %2\n" |
220 | " cbnz %w1, 1b" | 220 | " cbnz %w1, 1b" |
221 | : "=&r" (result), "=&r" (tmp), "+o" (v->counter) | 221 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
222 | : "r" (&v->counter), "Ir" (i) | 222 | : "Ir" (i) |
223 | : "cc"); | 223 | : "cc"); |
224 | } | 224 | } |
225 | 225 | ||
226 | static inline long atomic64_sub_return(long i, atomic64_t *v) | 226 | static inline long atomic64_sub_return(long i, atomic64_t *v) |
227 | { | 227 | { |
228 | long result; | 228 | long result; |
229 | unsigned long tmp; | 229 | unsigned long tmp; |
230 | 230 | ||
231 | asm volatile("// atomic64_sub_return\n" | 231 | asm volatile("// atomic64_sub_return\n" |
232 | "1: ldaxr %0, [%3]\n" | 232 | "1: ldaxr %0, %2\n" |
233 | " sub %0, %0, %4\n" | 233 | " sub %0, %0, %3\n" |
234 | " stlxr %w1, %0, [%3]\n" | 234 | " stlxr %w1, %0, %2\n" |
235 | " cbnz %w1, 1b" | 235 | " cbnz %w1, 1b" |
236 | : "=&r" (result), "=&r" (tmp), "+o" (v->counter) | 236 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
237 | : "r" (&v->counter), "Ir" (i) | 237 | : "Ir" (i) |
238 | : "cc"); | 238 | : "cc", "memory"); |
239 | 239 | ||
240 | return result; | 240 | return result; |
241 | } | 241 | } |
242 | 242 | ||
243 | static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new) | 243 | static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new) |
244 | { | 244 | { |
245 | long oldval; | 245 | long oldval; |
246 | unsigned long res; | 246 | unsigned long res; |
247 | 247 | ||
248 | asm volatile("// atomic64_cmpxchg\n" | 248 | asm volatile("// atomic64_cmpxchg\n" |
249 | "1: ldaxr %1, [%3]\n" | 249 | "1: ldaxr %1, %2\n" |
250 | " cmp %1, %4\n" | 250 | " cmp %1, %3\n" |
251 | " b.ne 2f\n" | 251 | " b.ne 2f\n" |
252 | " stlxr %w0, %5, [%3]\n" | 252 | " stlxr %w0, %4, %2\n" |
253 | " cbnz %w0, 1b\n" | 253 | " cbnz %w0, 1b\n" |
254 | "2:" | 254 | "2:" |
255 | : "=&r" (res), "=&r" (oldval), "+o" (ptr->counter) | 255 | : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter) |
256 | : "r" (&ptr->counter), "Ir" (old), "r" (new) | 256 | : "Ir" (old), "r" (new) |
257 | : "cc"); | 257 | : "cc", "memory"); |
258 | 258 | ||
259 | return oldval; | 259 | return oldval; |
260 | } | 260 | } |
261 | 261 | ||
262 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) | 262 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) |
263 | 263 | ||
264 | static inline long atomic64_dec_if_positive(atomic64_t *v) | 264 | static inline long atomic64_dec_if_positive(atomic64_t *v) |
265 | { | 265 | { |
266 | long result; | 266 | long result; |
267 | unsigned long tmp; | 267 | unsigned long tmp; |
268 | 268 | ||
269 | asm volatile("// atomic64_dec_if_positive\n" | 269 | asm volatile("// atomic64_dec_if_positive\n" |
270 | "1: ldaxr %0, [%3]\n" | 270 | "1: ldaxr %0, %2\n" |
271 | " subs %0, %0, #1\n" | 271 | " subs %0, %0, #1\n" |
272 | " b.mi 2f\n" | 272 | " b.mi 2f\n" |
273 | " stlxr %w1, %0, [%3]\n" | 273 | " stlxr %w1, %0, %2\n" |
274 | " cbnz %w1, 1b\n" | 274 | " cbnz %w1, 1b\n" |
275 | "2:" | 275 | "2:" |
276 | : "=&r" (result), "=&r" (tmp), "+o" (v->counter) | 276 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
277 | : "r" (&v->counter) | 277 | : |
278 | : "cc"); | 278 | : "cc", "memory"); |
279 | 279 | ||
280 | return result; | 280 | return result; |
281 | } | 281 | } |
282 | 282 | ||
283 | static inline int atomic64_add_unless(atomic64_t *v, long a, long u) | 283 | static inline int atomic64_add_unless(atomic64_t *v, long a, long u) |
284 | { | 284 | { |
285 | long c, old; | 285 | long c, old; |
286 | 286 | ||
287 | c = atomic64_read(v); | 287 | c = atomic64_read(v); |
288 | while (c != u && (old = atomic64_cmpxchg((v), c, c + a)) != c) | 288 | while (c != u && (old = atomic64_cmpxchg((v), c, c + a)) != c) |
289 | c = old; | 289 | c = old; |
290 | 290 | ||
291 | return c != u; | 291 | return c != u; |
292 | } | 292 | } |
293 | 293 | ||
294 | #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) | 294 | #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) |
295 | #define atomic64_inc(v) atomic64_add(1LL, (v)) | 295 | #define atomic64_inc(v) atomic64_add(1LL, (v)) |
296 | #define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) | 296 | #define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) |
297 | #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) | 297 | #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) |
298 | #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) | 298 | #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) |
299 | #define atomic64_dec(v) atomic64_sub(1LL, (v)) | 299 | #define atomic64_dec(v) atomic64_sub(1LL, (v)) |
300 | #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v)) | 300 | #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v)) |
301 | #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) | 301 | #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) |
302 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) | 302 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) |
303 | 303 | ||
304 | #endif | 304 | #endif |
305 | #endif | 305 | #endif |
306 | 306 |
arch/arm64/include/asm/cmpxchg.h
1 | /* | 1 | /* |
2 | * Based on arch/arm/include/asm/cmpxchg.h | 2 | * Based on arch/arm/include/asm/cmpxchg.h |
3 | * | 3 | * |
4 | * Copyright (C) 2012 ARM Ltd. | 4 | * Copyright (C) 2012 ARM Ltd. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | * | 9 | * |
10 | * This program is distributed in the hope that it will be useful, | 10 | * This program is distributed in the hope that it will be useful, |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
13 | * GNU General Public License for more details. | 13 | * GNU General Public License for more details. |
14 | * | 14 | * |
15 | * You should have received a copy of the GNU General Public License | 15 | * You should have received a copy of the GNU General Public License |
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
17 | */ | 17 | */ |
18 | #ifndef __ASM_CMPXCHG_H | 18 | #ifndef __ASM_CMPXCHG_H |
19 | #define __ASM_CMPXCHG_H | 19 | #define __ASM_CMPXCHG_H |
20 | 20 | ||
21 | #include <linux/bug.h> | 21 | #include <linux/bug.h> |
22 | 22 | ||
23 | #include <asm/barrier.h> | 23 | #include <asm/barrier.h> |
24 | 24 | ||
25 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) | 25 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) |
26 | { | 26 | { |
27 | unsigned long ret, tmp; | 27 | unsigned long ret, tmp; |
28 | 28 | ||
29 | switch (size) { | 29 | switch (size) { |
30 | case 1: | 30 | case 1: |
31 | asm volatile("// __xchg1\n" | 31 | asm volatile("// __xchg1\n" |
32 | "1: ldaxrb %w0, [%3]\n" | 32 | "1: ldaxrb %w0, %2\n" |
33 | " stlxrb %w1, %w2, [%3]\n" | 33 | " stlxrb %w1, %w3, %2\n" |
34 | " cbnz %w1, 1b\n" | 34 | " cbnz %w1, 1b\n" |
35 | : "=&r" (ret), "=&r" (tmp) | 35 | : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr) |
36 | : "r" (x), "r" (ptr) | 36 | : "r" (x) |
37 | : "memory", "cc"); | 37 | : "cc", "memory"); |
38 | break; | 38 | break; |
39 | case 2: | 39 | case 2: |
40 | asm volatile("// __xchg2\n" | 40 | asm volatile("// __xchg2\n" |
41 | "1: ldaxrh %w0, [%3]\n" | 41 | "1: ldaxrh %w0, %2\n" |
42 | " stlxrh %w1, %w2, [%3]\n" | 42 | " stlxrh %w1, %w3, %2\n" |
43 | " cbnz %w1, 1b\n" | 43 | " cbnz %w1, 1b\n" |
44 | : "=&r" (ret), "=&r" (tmp) | 44 | : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr) |
45 | : "r" (x), "r" (ptr) | 45 | : "r" (x) |
46 | : "memory", "cc"); | 46 | : "cc", "memory"); |
47 | break; | 47 | break; |
48 | case 4: | 48 | case 4: |
49 | asm volatile("// __xchg4\n" | 49 | asm volatile("// __xchg4\n" |
50 | "1: ldaxr %w0, [%3]\n" | 50 | "1: ldaxr %w0, %2\n" |
51 | " stlxr %w1, %w2, [%3]\n" | 51 | " stlxr %w1, %w3, %2\n" |
52 | " cbnz %w1, 1b\n" | 52 | " cbnz %w1, 1b\n" |
53 | : "=&r" (ret), "=&r" (tmp) | 53 | : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr) |
54 | : "r" (x), "r" (ptr) | 54 | : "r" (x) |
55 | : "memory", "cc"); | 55 | : "cc", "memory"); |
56 | break; | 56 | break; |
57 | case 8: | 57 | case 8: |
58 | asm volatile("// __xchg8\n" | 58 | asm volatile("// __xchg8\n" |
59 | "1: ldaxr %0, [%3]\n" | 59 | "1: ldaxr %0, %2\n" |
60 | " stlxr %w1, %2, [%3]\n" | 60 | " stlxr %w1, %3, %2\n" |
61 | " cbnz %w1, 1b\n" | 61 | " cbnz %w1, 1b\n" |
62 | : "=&r" (ret), "=&r" (tmp) | 62 | : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr) |
63 | : "r" (x), "r" (ptr) | 63 | : "r" (x) |
64 | : "memory", "cc"); | 64 | : "cc", "memory"); |
65 | break; | 65 | break; |
66 | default: | 66 | default: |
67 | BUILD_BUG(); | 67 | BUILD_BUG(); |
68 | } | 68 | } |
69 | 69 | ||
70 | return ret; | 70 | return ret; |
71 | } | 71 | } |
72 | 72 | ||
73 | #define xchg(ptr,x) \ | 73 | #define xchg(ptr,x) \ |
74 | ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) | 74 | ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) |
75 | 75 | ||
76 | static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | 76 | static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, |
77 | unsigned long new, int size) | 77 | unsigned long new, int size) |
78 | { | 78 | { |
79 | unsigned long oldval = 0, res; | 79 | unsigned long oldval = 0, res; |
80 | 80 | ||
81 | switch (size) { | 81 | switch (size) { |
82 | case 1: | 82 | case 1: |
83 | do { | 83 | do { |
84 | asm volatile("// __cmpxchg1\n" | 84 | asm volatile("// __cmpxchg1\n" |
85 | " ldxrb %w1, [%2]\n" | 85 | " ldxrb %w1, %2\n" |
86 | " mov %w0, #0\n" | 86 | " mov %w0, #0\n" |
87 | " cmp %w1, %w3\n" | 87 | " cmp %w1, %w3\n" |
88 | " b.ne 1f\n" | 88 | " b.ne 1f\n" |
89 | " stxrb %w0, %w4, [%2]\n" | 89 | " stxrb %w0, %w4, %2\n" |
90 | "1:\n" | 90 | "1:\n" |
91 | : "=&r" (res), "=&r" (oldval) | 91 | : "=&r" (res), "=&r" (oldval), "+Q" (*(u8 *)ptr) |
92 | : "r" (ptr), "Ir" (old), "r" (new) | 92 | : "Ir" (old), "r" (new) |
93 | : "cc"); | 93 | : "cc"); |
94 | } while (res); | 94 | } while (res); |
95 | break; | 95 | break; |
96 | 96 | ||
97 | case 2: | 97 | case 2: |
98 | do { | 98 | do { |
99 | asm volatile("// __cmpxchg2\n" | 99 | asm volatile("// __cmpxchg2\n" |
100 | " ldxrh %w1, [%2]\n" | 100 | " ldxrh %w1, %2\n" |
101 | " mov %w0, #0\n" | 101 | " mov %w0, #0\n" |
102 | " cmp %w1, %w3\n" | 102 | " cmp %w1, %w3\n" |
103 | " b.ne 1f\n" | 103 | " b.ne 1f\n" |
104 | " stxrh %w0, %w4, [%2]\n" | 104 | " stxrh %w0, %w4, %2\n" |
105 | "1:\n" | 105 | "1:\n" |
106 | : "=&r" (res), "=&r" (oldval) | 106 | : "=&r" (res), "=&r" (oldval), "+Q" (*(u16 *)ptr) |
107 | : "r" (ptr), "Ir" (old), "r" (new) | 107 | : "Ir" (old), "r" (new) |
108 | : "memory", "cc"); | 108 | : "cc"); |
109 | } while (res); | 109 | } while (res); |
110 | break; | 110 | break; |
111 | 111 | ||
112 | case 4: | 112 | case 4: |
113 | do { | 113 | do { |
114 | asm volatile("// __cmpxchg4\n" | 114 | asm volatile("// __cmpxchg4\n" |
115 | " ldxr %w1, [%2]\n" | 115 | " ldxr %w1, %2\n" |
116 | " mov %w0, #0\n" | 116 | " mov %w0, #0\n" |
117 | " cmp %w1, %w3\n" | 117 | " cmp %w1, %w3\n" |
118 | " b.ne 1f\n" | 118 | " b.ne 1f\n" |
119 | " stxr %w0, %w4, [%2]\n" | 119 | " stxr %w0, %w4, %2\n" |
120 | "1:\n" | 120 | "1:\n" |
121 | : "=&r" (res), "=&r" (oldval) | 121 | : "=&r" (res), "=&r" (oldval), "+Q" (*(u32 *)ptr) |
122 | : "r" (ptr), "Ir" (old), "r" (new) | 122 | : "Ir" (old), "r" (new) |
123 | : "cc"); | 123 | : "cc"); |
124 | } while (res); | 124 | } while (res); |
125 | break; | 125 | break; |
126 | 126 | ||
127 | case 8: | 127 | case 8: |
128 | do { | 128 | do { |
129 | asm volatile("// __cmpxchg8\n" | 129 | asm volatile("// __cmpxchg8\n" |
130 | " ldxr %1, [%2]\n" | 130 | " ldxr %1, %2\n" |
131 | " mov %w0, #0\n" | 131 | " mov %w0, #0\n" |
132 | " cmp %1, %3\n" | 132 | " cmp %1, %3\n" |
133 | " b.ne 1f\n" | 133 | " b.ne 1f\n" |
134 | " stxr %w0, %4, [%2]\n" | 134 | " stxr %w0, %4, %2\n" |
135 | "1:\n" | 135 | "1:\n" |
136 | : "=&r" (res), "=&r" (oldval) | 136 | : "=&r" (res), "=&r" (oldval), "+Q" (*(u64 *)ptr) |
137 | : "r" (ptr), "Ir" (old), "r" (new) | 137 | : "Ir" (old), "r" (new) |
138 | : "cc"); | 138 | : "cc"); |
139 | } while (res); | 139 | } while (res); |
140 | break; | 140 | break; |
141 | 141 | ||
142 | default: | 142 | default: |
143 | BUILD_BUG(); | 143 | BUILD_BUG(); |
144 | } | 144 | } |
145 | 145 | ||
146 | return oldval; | 146 | return oldval; |
147 | } | 147 | } |
148 | 148 | ||
149 | static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, | 149 | static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, |
150 | unsigned long new, int size) | 150 | unsigned long new, int size) |
151 | { | 151 | { |
152 | unsigned long ret; | 152 | unsigned long ret; |
153 | 153 | ||
154 | smp_mb(); | 154 | smp_mb(); |
155 | ret = __cmpxchg(ptr, old, new, size); | 155 | ret = __cmpxchg(ptr, old, new, size); |
156 | smp_mb(); | 156 | smp_mb(); |
157 | 157 | ||
158 | return ret; | 158 | return ret; |
159 | } | 159 | } |
160 | 160 | ||
161 | #define cmpxchg(ptr,o,n) \ | 161 | #define cmpxchg(ptr,o,n) \ |
162 | ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \ | 162 | ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \ |
163 | (unsigned long)(o), \ | 163 | (unsigned long)(o), \ |
164 | (unsigned long)(n), \ | 164 | (unsigned long)(n), \ |
165 | sizeof(*(ptr)))) | 165 | sizeof(*(ptr)))) |
166 | 166 | ||
167 | #define cmpxchg_local(ptr,o,n) \ | 167 | #define cmpxchg_local(ptr,o,n) \ |
168 | ((__typeof__(*(ptr)))__cmpxchg((ptr), \ | 168 | ((__typeof__(*(ptr)))__cmpxchg((ptr), \ |
169 | (unsigned long)(o), \ | 169 | (unsigned long)(o), \ |
170 | (unsigned long)(n), \ | 170 | (unsigned long)(n), \ |
171 | sizeof(*(ptr)))) | 171 | sizeof(*(ptr)))) |
172 | 172 | ||
173 | #endif /* __ASM_CMPXCHG_H */ | 173 | #endif /* __ASM_CMPXCHG_H */ |
174 | 174 |
arch/arm64/include/asm/futex.h
1 | /* | 1 | /* |
2 | * Copyright (C) 2012 ARM Ltd. | 2 | * Copyright (C) 2012 ARM Ltd. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License version 2 as | 5 | * it under the terms of the GNU General Public License version 2 as |
6 | * published by the Free Software Foundation. | 6 | * published by the Free Software Foundation. |
7 | * | 7 | * |
8 | * This program is distributed in the hope that it will be useful, | 8 | * This program is distributed in the hope that it will be useful, |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
11 | * GNU General Public License for more details. | 11 | * GNU General Public License for more details. |
12 | * | 12 | * |
13 | * You should have received a copy of the GNU General Public License | 13 | * You should have received a copy of the GNU General Public License |
14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
15 | */ | 15 | */ |
16 | #ifndef __ASM_FUTEX_H | 16 | #ifndef __ASM_FUTEX_H |
17 | #define __ASM_FUTEX_H | 17 | #define __ASM_FUTEX_H |
18 | 18 | ||
19 | #ifdef __KERNEL__ | 19 | #ifdef __KERNEL__ |
20 | 20 | ||
21 | #include <linux/futex.h> | 21 | #include <linux/futex.h> |
22 | #include <linux/uaccess.h> | 22 | #include <linux/uaccess.h> |
23 | #include <asm/errno.h> | 23 | #include <asm/errno.h> |
24 | 24 | ||
25 | #define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \ | 25 | #define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \ |
26 | asm volatile( \ | 26 | asm volatile( \ |
27 | "1: ldaxr %w1, %2\n" \ | 27 | "1: ldaxr %w1, %2\n" \ |
28 | insn "\n" \ | 28 | insn "\n" \ |
29 | "2: stlxr %w3, %w0, %2\n" \ | 29 | "2: stlxr %w3, %w0, %2\n" \ |
30 | " cbnz %w3, 1b\n" \ | 30 | " cbnz %w3, 1b\n" \ |
31 | "3:\n" \ | 31 | "3:\n" \ |
32 | " .pushsection .fixup,\"ax\"\n" \ | 32 | " .pushsection .fixup,\"ax\"\n" \ |
33 | "4: mov %w0, %w5\n" \ | 33 | "4: mov %w0, %w5\n" \ |
34 | " b 3b\n" \ | 34 | " b 3b\n" \ |
35 | " .popsection\n" \ | 35 | " .popsection\n" \ |
36 | " .pushsection __ex_table,\"a\"\n" \ | 36 | " .pushsection __ex_table,\"a\"\n" \ |
37 | " .align 3\n" \ | 37 | " .align 3\n" \ |
38 | " .quad 1b, 4b, 2b, 4b\n" \ | 38 | " .quad 1b, 4b, 2b, 4b\n" \ |
39 | " .popsection\n" \ | 39 | " .popsection\n" \ |
40 | : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \ | 40 | : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \ |
41 | : "r" (oparg), "Ir" (-EFAULT) \ | 41 | : "r" (oparg), "Ir" (-EFAULT) \ |
42 | : "cc") | 42 | : "cc", "memory") |
43 | 43 | ||
44 | static inline int | 44 | static inline int |
45 | futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) | 45 | futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) |
46 | { | 46 | { |
47 | int op = (encoded_op >> 28) & 7; | 47 | int op = (encoded_op >> 28) & 7; |
48 | int cmp = (encoded_op >> 24) & 15; | 48 | int cmp = (encoded_op >> 24) & 15; |
49 | int oparg = (encoded_op << 8) >> 20; | 49 | int oparg = (encoded_op << 8) >> 20; |
50 | int cmparg = (encoded_op << 20) >> 20; | 50 | int cmparg = (encoded_op << 20) >> 20; |
51 | int oldval = 0, ret, tmp; | 51 | int oldval = 0, ret, tmp; |
52 | 52 | ||
53 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) | 53 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) |
54 | oparg = 1 << oparg; | 54 | oparg = 1 << oparg; |
55 | 55 | ||
56 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) | 56 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) |
57 | return -EFAULT; | 57 | return -EFAULT; |
58 | 58 | ||
59 | pagefault_disable(); /* implies preempt_disable() */ | 59 | pagefault_disable(); /* implies preempt_disable() */ |
60 | 60 | ||
61 | switch (op) { | 61 | switch (op) { |
62 | case FUTEX_OP_SET: | 62 | case FUTEX_OP_SET: |
63 | __futex_atomic_op("mov %w0, %w4", | 63 | __futex_atomic_op("mov %w0, %w4", |
64 | ret, oldval, uaddr, tmp, oparg); | 64 | ret, oldval, uaddr, tmp, oparg); |
65 | break; | 65 | break; |
66 | case FUTEX_OP_ADD: | 66 | case FUTEX_OP_ADD: |
67 | __futex_atomic_op("add %w0, %w1, %w4", | 67 | __futex_atomic_op("add %w0, %w1, %w4", |
68 | ret, oldval, uaddr, tmp, oparg); | 68 | ret, oldval, uaddr, tmp, oparg); |
69 | break; | 69 | break; |
70 | case FUTEX_OP_OR: | 70 | case FUTEX_OP_OR: |
71 | __futex_atomic_op("orr %w0, %w1, %w4", | 71 | __futex_atomic_op("orr %w0, %w1, %w4", |
72 | ret, oldval, uaddr, tmp, oparg); | 72 | ret, oldval, uaddr, tmp, oparg); |
73 | break; | 73 | break; |
74 | case FUTEX_OP_ANDN: | 74 | case FUTEX_OP_ANDN: |
75 | __futex_atomic_op("and %w0, %w1, %w4", | 75 | __futex_atomic_op("and %w0, %w1, %w4", |
76 | ret, oldval, uaddr, tmp, ~oparg); | 76 | ret, oldval, uaddr, tmp, ~oparg); |
77 | break; | 77 | break; |
78 | case FUTEX_OP_XOR: | 78 | case FUTEX_OP_XOR: |
79 | __futex_atomic_op("eor %w0, %w1, %w4", | 79 | __futex_atomic_op("eor %w0, %w1, %w4", |
80 | ret, oldval, uaddr, tmp, oparg); | 80 | ret, oldval, uaddr, tmp, oparg); |
81 | break; | 81 | break; |
82 | default: | 82 | default: |
83 | ret = -ENOSYS; | 83 | ret = -ENOSYS; |
84 | } | 84 | } |
85 | 85 | ||
86 | pagefault_enable(); /* subsumes preempt_enable() */ | 86 | pagefault_enable(); /* subsumes preempt_enable() */ |
87 | 87 | ||
88 | if (!ret) { | 88 | if (!ret) { |
89 | switch (cmp) { | 89 | switch (cmp) { |
90 | case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; | 90 | case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; |
91 | case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; | 91 | case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; |
92 | case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; | 92 | case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; |
93 | case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; | 93 | case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; |
94 | case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; | 94 | case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; |
95 | case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; | 95 | case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; |
96 | default: ret = -ENOSYS; | 96 | default: ret = -ENOSYS; |
97 | } | 97 | } |
98 | } | 98 | } |
99 | return ret; | 99 | return ret; |
100 | } | 100 | } |
101 | 101 | ||
102 | static inline int | 102 | static inline int |
103 | futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | 103 | futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, |
104 | u32 oldval, u32 newval) | 104 | u32 oldval, u32 newval) |
105 | { | 105 | { |
106 | int ret = 0; | 106 | int ret = 0; |
107 | u32 val, tmp; | 107 | u32 val, tmp; |
108 | 108 | ||
109 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) | 109 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) |
110 | return -EFAULT; | 110 | return -EFAULT; |
111 | 111 | ||
112 | asm volatile("// futex_atomic_cmpxchg_inatomic\n" | 112 | asm volatile("// futex_atomic_cmpxchg_inatomic\n" |
113 | "1: ldaxr %w1, %2\n" | 113 | "1: ldaxr %w1, %2\n" |
114 | " sub %w3, %w1, %w4\n" | 114 | " sub %w3, %w1, %w4\n" |
115 | " cbnz %w3, 3f\n" | 115 | " cbnz %w3, 3f\n" |
116 | "2: stlxr %w3, %w5, %2\n" | 116 | "2: stlxr %w3, %w5, %2\n" |
117 | " cbnz %w3, 1b\n" | 117 | " cbnz %w3, 1b\n" |
118 | "3:\n" | 118 | "3:\n" |
119 | " .pushsection .fixup,\"ax\"\n" | 119 | " .pushsection .fixup,\"ax\"\n" |
120 | "4: mov %w0, %w6\n" | 120 | "4: mov %w0, %w6\n" |
121 | " b 3b\n" | 121 | " b 3b\n" |
122 | " .popsection\n" | 122 | " .popsection\n" |
123 | " .pushsection __ex_table,\"a\"\n" | 123 | " .pushsection __ex_table,\"a\"\n" |
124 | " .align 3\n" | 124 | " .align 3\n" |
125 | " .quad 1b, 4b, 2b, 4b\n" | 125 | " .quad 1b, 4b, 2b, 4b\n" |
126 | " .popsection\n" | 126 | " .popsection\n" |
127 | : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp) | 127 | : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp) |
128 | : "r" (oldval), "r" (newval), "Ir" (-EFAULT) | 128 | : "r" (oldval), "r" (newval), "Ir" (-EFAULT) |
129 | : "cc", "memory"); | 129 | : "cc", "memory"); |
130 | 130 | ||
131 | *uval = val; | 131 | *uval = val; |
132 | return ret; | 132 | return ret; |
133 | } | 133 | } |
134 | 134 | ||
135 | #endif /* __KERNEL__ */ | 135 | #endif /* __KERNEL__ */ |
136 | #endif /* __ASM_FUTEX_H */ | 136 | #endif /* __ASM_FUTEX_H */ |
137 | 137 |
arch/arm64/include/asm/io.h
1 | /* | 1 | /* |
2 | * Based on arch/arm/include/asm/io.h | 2 | * Based on arch/arm/include/asm/io.h |
3 | * | 3 | * |
4 | * Copyright (C) 1996-2000 Russell King | 4 | * Copyright (C) 1996-2000 Russell King |
5 | * Copyright (C) 2012 ARM Ltd. | 5 | * Copyright (C) 2012 ARM Ltd. |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | * | 10 | * |
11 | * This program is distributed in the hope that it will be useful, | 11 | * This program is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | * GNU General Public License for more details. | 14 | * GNU General Public License for more details. |
15 | * | 15 | * |
16 | * You should have received a copy of the GNU General Public License | 16 | * You should have received a copy of the GNU General Public License |
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
18 | */ | 18 | */ |
19 | #ifndef __ASM_IO_H | 19 | #ifndef __ASM_IO_H |
20 | #define __ASM_IO_H | 20 | #define __ASM_IO_H |
21 | 21 | ||
22 | #ifdef __KERNEL__ | 22 | #ifdef __KERNEL__ |
23 | 23 | ||
24 | #include <linux/types.h> | 24 | #include <linux/types.h> |
25 | 25 | ||
26 | #include <asm/byteorder.h> | 26 | #include <asm/byteorder.h> |
27 | #include <asm/barrier.h> | 27 | #include <asm/barrier.h> |
28 | #include <asm/pgtable.h> | 28 | #include <asm/pgtable.h> |
29 | 29 | ||
30 | /* | 30 | /* |
31 | * Generic IO read/write. These perform native-endian accesses. | 31 | * Generic IO read/write. These perform native-endian accesses. |
32 | */ | 32 | */ |
33 | static inline void __raw_writeb(u8 val, volatile void __iomem *addr) | 33 | static inline void __raw_writeb(u8 val, volatile void __iomem *addr) |
34 | { | 34 | { |
35 | asm volatile("strb %w0, [%1]" : : "r" (val), "r" (addr)); | 35 | asm volatile("strb %w0, [%1]" : : "r" (val), "r" (addr)); |
36 | } | 36 | } |
37 | 37 | ||
38 | static inline void __raw_writew(u16 val, volatile void __iomem *addr) | 38 | static inline void __raw_writew(u16 val, volatile void __iomem *addr) |
39 | { | 39 | { |
40 | asm volatile("strh %w0, [%1]" : : "r" (val), "r" (addr)); | 40 | asm volatile("strh %w0, [%1]" : : "r" (val), "r" (addr)); |
41 | } | 41 | } |
42 | 42 | ||
43 | static inline void __raw_writel(u32 val, volatile void __iomem *addr) | 43 | static inline void __raw_writel(u32 val, volatile void __iomem *addr) |
44 | { | 44 | { |
45 | asm volatile("str %w0, [%1]" : : "r" (val), "r" (addr)); | 45 | asm volatile("str %w0, [%1]" : : "r" (val), "r" (addr)); |
46 | } | 46 | } |
47 | 47 | ||
48 | static inline void __raw_writeq(u64 val, volatile void __iomem *addr) | 48 | static inline void __raw_writeq(u64 val, volatile void __iomem *addr) |
49 | { | 49 | { |
50 | asm volatile("str %0, [%1]" : : "r" (val), "r" (addr)); | 50 | asm volatile("str %0, [%1]" : : "r" (val), "r" (addr)); |
51 | } | 51 | } |
52 | 52 | ||
53 | static inline u8 __raw_readb(const volatile void __iomem *addr) | 53 | static inline u8 __raw_readb(const volatile void __iomem *addr) |
54 | { | 54 | { |
55 | u8 val; | 55 | u8 val; |
56 | asm volatile("ldrb %w0, [%1]" : "=r" (val) : "r" (addr)); | 56 | asm volatile("ldrb %w0, [%1]" : "=r" (val) : "r" (addr)); |
57 | return val; | 57 | return val; |
58 | } | 58 | } |
59 | 59 | ||
60 | static inline u16 __raw_readw(const volatile void __iomem *addr) | 60 | static inline u16 __raw_readw(const volatile void __iomem *addr) |
61 | { | 61 | { |
62 | u16 val; | 62 | u16 val; |
63 | asm volatile("ldrh %w0, [%1]" : "=r" (val) : "r" (addr)); | 63 | asm volatile("ldrh %w0, [%1]" : "=r" (val) : "r" (addr)); |
64 | return val; | 64 | return val; |
65 | } | 65 | } |
66 | 66 | ||
67 | static inline u32 __raw_readl(const volatile void __iomem *addr) | 67 | static inline u32 __raw_readl(const volatile void __iomem *addr) |
68 | { | 68 | { |
69 | u32 val; | 69 | u32 val; |
70 | asm volatile("ldr %w0, [%1]" : "=r" (val) : "r" (addr)); | 70 | asm volatile("ldr %w0, [%1]" : "=r" (val) : "r" (addr)); |
71 | return val; | 71 | return val; |
72 | } | 72 | } |
73 | 73 | ||
74 | static inline u64 __raw_readq(const volatile void __iomem *addr) | 74 | static inline u64 __raw_readq(const volatile void __iomem *addr) |
75 | { | 75 | { |
76 | u64 val; | 76 | u64 val; |
77 | asm volatile("ldr %0, [%1]" : "=r" (val) : "r" (addr)); | 77 | asm volatile("ldr %0, [%1]" : "=r" (val) : "r" (addr)); |
78 | return val; | 78 | return val; |
79 | } | 79 | } |
80 | 80 | ||
81 | /* IO barriers */ | 81 | /* IO barriers */ |
82 | #define __iormb() rmb() | 82 | #define __iormb() rmb() |
83 | #define __iowmb() wmb() | 83 | #define __iowmb() wmb() |
84 | 84 | ||
85 | #define mmiowb() do { } while (0) | 85 | #define mmiowb() do { } while (0) |
86 | 86 | ||
87 | /* | 87 | /* |
88 | * Relaxed I/O memory access primitives. These follow the Device memory | 88 | * Relaxed I/O memory access primitives. These follow the Device memory |
89 | * ordering rules but do not guarantee any ordering relative to Normal memory | 89 | * ordering rules but do not guarantee any ordering relative to Normal memory |
90 | * accesses. | 90 | * accesses. |
91 | */ | 91 | */ |
92 | #define readb_relaxed(c) ({ u8 __v = __raw_readb(c); __v; }) | 92 | #define readb_relaxed(c) ({ u8 __v = __raw_readb(c); __v; }) |
93 | #define readw_relaxed(c) ({ u16 __v = le16_to_cpu((__force __le16)__raw_readw(c)); __v; }) | 93 | #define readw_relaxed(c) ({ u16 __v = le16_to_cpu((__force __le16)__raw_readw(c)); __v; }) |
94 | #define readl_relaxed(c) ({ u32 __v = le32_to_cpu((__force __le32)__raw_readl(c)); __v; }) | 94 | #define readl_relaxed(c) ({ u32 __v = le32_to_cpu((__force __le32)__raw_readl(c)); __v; }) |
95 | 95 | ||
96 | #define writeb_relaxed(v,c) ((void)__raw_writeb((v),(c))) | 96 | #define writeb_relaxed(v,c) ((void)__raw_writeb((v),(c))) |
97 | #define writew_relaxed(v,c) ((void)__raw_writew((__force u16)cpu_to_le16(v),(c))) | 97 | #define writew_relaxed(v,c) ((void)__raw_writew((__force u16)cpu_to_le16(v),(c))) |
98 | #define writel_relaxed(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c))) | 98 | #define writel_relaxed(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c))) |
99 | 99 | ||
100 | /* | 100 | /* |
101 | * I/O memory access primitives. Reads are ordered relative to any | 101 | * I/O memory access primitives. Reads are ordered relative to any |
102 | * following Normal memory access. Writes are ordered relative to any prior | 102 | * following Normal memory access. Writes are ordered relative to any prior |
103 | * Normal memory access. | 103 | * Normal memory access. |
104 | */ | 104 | */ |
105 | #define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) | 105 | #define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) |
106 | #define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) | 106 | #define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) |
107 | #define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) | 107 | #define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) |
108 | 108 | ||
109 | #define writeb(v,c) ({ __iowmb(); writeb_relaxed((v),(c)); }) | 109 | #define writeb(v,c) ({ __iowmb(); writeb_relaxed((v),(c)); }) |
110 | #define writew(v,c) ({ __iowmb(); writew_relaxed((v),(c)); }) | 110 | #define writew(v,c) ({ __iowmb(); writew_relaxed((v),(c)); }) |
111 | #define writel(v,c) ({ __iowmb(); writel_relaxed((v),(c)); }) | 111 | #define writel(v,c) ({ __iowmb(); writel_relaxed((v),(c)); }) |
112 | 112 | ||
113 | /* | 113 | /* |
114 | * I/O port access primitives. | 114 | * I/O port access primitives. |
115 | */ | 115 | */ |
116 | #define IO_SPACE_LIMIT 0xffff | 116 | #define IO_SPACE_LIMIT 0xffff |
117 | #define PCI_IOBASE ((void __iomem *)(MODULES_VADDR - SZ_2M)) | 117 | #define PCI_IOBASE ((void __iomem *)(MODULES_VADDR - SZ_2M)) |
118 | 118 | ||
119 | static inline u8 inb(unsigned long addr) | 119 | static inline u8 inb(unsigned long addr) |
120 | { | 120 | { |
121 | return readb(addr + PCI_IOBASE); | 121 | return readb(addr + PCI_IOBASE); |
122 | } | 122 | } |
123 | 123 | ||
124 | static inline u16 inw(unsigned long addr) | 124 | static inline u16 inw(unsigned long addr) |
125 | { | 125 | { |
126 | return readw(addr + PCI_IOBASE); | 126 | return readw(addr + PCI_IOBASE); |
127 | } | 127 | } |
128 | 128 | ||
129 | static inline u32 inl(unsigned long addr) | 129 | static inline u32 inl(unsigned long addr) |
130 | { | 130 | { |
131 | return readl(addr + PCI_IOBASE); | 131 | return readl(addr + PCI_IOBASE); |
132 | } | 132 | } |
133 | 133 | ||
134 | static inline void outb(u8 b, unsigned long addr) | 134 | static inline void outb(u8 b, unsigned long addr) |
135 | { | 135 | { |
136 | writeb(b, addr + PCI_IOBASE); | 136 | writeb(b, addr + PCI_IOBASE); |
137 | } | 137 | } |
138 | 138 | ||
139 | static inline void outw(u16 b, unsigned long addr) | 139 | static inline void outw(u16 b, unsigned long addr) |
140 | { | 140 | { |
141 | writew(b, addr + PCI_IOBASE); | 141 | writew(b, addr + PCI_IOBASE); |
142 | } | 142 | } |
143 | 143 | ||
144 | static inline void outl(u32 b, unsigned long addr) | 144 | static inline void outl(u32 b, unsigned long addr) |
145 | { | 145 | { |
146 | writel(b, addr + PCI_IOBASE); | 146 | writel(b, addr + PCI_IOBASE); |
147 | } | 147 | } |
148 | 148 | ||
149 | #define inb_p(addr) inb(addr) | 149 | #define inb_p(addr) inb(addr) |
150 | #define inw_p(addr) inw(addr) | 150 | #define inw_p(addr) inw(addr) |
151 | #define inl_p(addr) inl(addr) | 151 | #define inl_p(addr) inl(addr) |
152 | 152 | ||
153 | #define outb_p(x, addr) outb((x), (addr)) | 153 | #define outb_p(x, addr) outb((x), (addr)) |
154 | #define outw_p(x, addr) outw((x), (addr)) | 154 | #define outw_p(x, addr) outw((x), (addr)) |
155 | #define outl_p(x, addr) outl((x), (addr)) | 155 | #define outl_p(x, addr) outl((x), (addr)) |
156 | 156 | ||
157 | static inline void insb(unsigned long addr, void *buffer, int count) | 157 | static inline void insb(unsigned long addr, void *buffer, int count) |
158 | { | 158 | { |
159 | u8 *buf = buffer; | 159 | u8 *buf = buffer; |
160 | while (count--) | 160 | while (count--) |
161 | *buf++ = __raw_readb(addr + PCI_IOBASE); | 161 | *buf++ = __raw_readb(addr + PCI_IOBASE); |
162 | } | 162 | } |
163 | 163 | ||
164 | static inline void insw(unsigned long addr, void *buffer, int count) | 164 | static inline void insw(unsigned long addr, void *buffer, int count) |
165 | { | 165 | { |
166 | u16 *buf = buffer; | 166 | u16 *buf = buffer; |
167 | while (count--) | 167 | while (count--) |
168 | *buf++ = __raw_readw(addr + PCI_IOBASE); | 168 | *buf++ = __raw_readw(addr + PCI_IOBASE); |
169 | } | 169 | } |
170 | 170 | ||
171 | static inline void insl(unsigned long addr, void *buffer, int count) | 171 | static inline void insl(unsigned long addr, void *buffer, int count) |
172 | { | 172 | { |
173 | u32 *buf = buffer; | 173 | u32 *buf = buffer; |
174 | while (count--) | 174 | while (count--) |
175 | *buf++ = __raw_readl(addr + PCI_IOBASE); | 175 | *buf++ = __raw_readl(addr + PCI_IOBASE); |
176 | } | 176 | } |
177 | 177 | ||
178 | static inline void outsb(unsigned long addr, const void *buffer, int count) | 178 | static inline void outsb(unsigned long addr, const void *buffer, int count) |
179 | { | 179 | { |
180 | const u8 *buf = buffer; | 180 | const u8 *buf = buffer; |
181 | while (count--) | 181 | while (count--) |
182 | __raw_writeb(*buf++, addr + PCI_IOBASE); | 182 | __raw_writeb(*buf++, addr + PCI_IOBASE); |
183 | } | 183 | } |
184 | 184 | ||
185 | static inline void outsw(unsigned long addr, const void *buffer, int count) | 185 | static inline void outsw(unsigned long addr, const void *buffer, int count) |
186 | { | 186 | { |
187 | const u16 *buf = buffer; | 187 | const u16 *buf = buffer; |
188 | while (count--) | 188 | while (count--) |
189 | __raw_writew(*buf++, addr + PCI_IOBASE); | 189 | __raw_writew(*buf++, addr + PCI_IOBASE); |
190 | } | 190 | } |
191 | 191 | ||
192 | static inline void outsl(unsigned long addr, const void *buffer, int count) | 192 | static inline void outsl(unsigned long addr, const void *buffer, int count) |
193 | { | 193 | { |
194 | const u32 *buf = buffer; | 194 | const u32 *buf = buffer; |
195 | while (count--) | 195 | while (count--) |
196 | __raw_writel(*buf++, addr + PCI_IOBASE); | 196 | __raw_writel(*buf++, addr + PCI_IOBASE); |
197 | } | 197 | } |
198 | 198 | ||
199 | #define insb_p(port,to,len) insb(port,to,len) | 199 | #define insb_p(port,to,len) insb(port,to,len) |
200 | #define insw_p(port,to,len) insw(port,to,len) | 200 | #define insw_p(port,to,len) insw(port,to,len) |
201 | #define insl_p(port,to,len) insl(port,to,len) | 201 | #define insl_p(port,to,len) insl(port,to,len) |
202 | 202 | ||
203 | #define outsb_p(port,from,len) outsb(port,from,len) | 203 | #define outsb_p(port,from,len) outsb(port,from,len) |
204 | #define outsw_p(port,from,len) outsw(port,from,len) | 204 | #define outsw_p(port,from,len) outsw(port,from,len) |
205 | #define outsl_p(port,from,len) outsl(port,from,len) | 205 | #define outsl_p(port,from,len) outsl(port,from,len) |
206 | 206 | ||
207 | /* | 207 | /* |
208 | * String version of I/O memory access operations. | 208 | * String version of I/O memory access operations. |
209 | */ | 209 | */ |
210 | extern void __memcpy_fromio(void *, const volatile void __iomem *, size_t); | 210 | extern void __memcpy_fromio(void *, const volatile void __iomem *, size_t); |
211 | extern void __memcpy_toio(volatile void __iomem *, const void *, size_t); | 211 | extern void __memcpy_toio(volatile void __iomem *, const void *, size_t); |
212 | extern void __memset_io(volatile void __iomem *, int, size_t); | 212 | extern void __memset_io(volatile void __iomem *, int, size_t); |
213 | 213 | ||
214 | #define memset_io(c,v,l) __memset_io((c),(v),(l)) | 214 | #define memset_io(c,v,l) __memset_io((c),(v),(l)) |
215 | #define memcpy_fromio(a,c,l) __memcpy_fromio((a),(c),(l)) | 215 | #define memcpy_fromio(a,c,l) __memcpy_fromio((a),(c),(l)) |
216 | #define memcpy_toio(c,a,l) __memcpy_toio((c),(a),(l)) | 216 | #define memcpy_toio(c,a,l) __memcpy_toio((c),(a),(l)) |
217 | 217 | ||
218 | /* | 218 | /* |
219 | * I/O memory mapping functions. | 219 | * I/O memory mapping functions. |
220 | */ | 220 | */ |
221 | extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot); | 221 | extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot); |
222 | extern void __iounmap(volatile void __iomem *addr); | 222 | extern void __iounmap(volatile void __iomem *addr); |
223 | 223 | ||
224 | #define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY) | 224 | #define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY) |
225 | #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) | 225 | #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) |
226 | #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC)) | 226 | #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC)) |
227 | 227 | ||
228 | #define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) | 228 | #define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) |
229 | #define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) | 229 | #define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) |
230 | #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) | 230 | #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) |
231 | #define iounmap __iounmap | 231 | #define iounmap __iounmap |
232 | 232 | ||
233 | #define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF) | ||
234 | #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PTE_PXN | PTE_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE)) | ||
235 | |||
233 | #define ARCH_HAS_IOREMAP_WC | 236 | #define ARCH_HAS_IOREMAP_WC |
234 | #include <asm-generic/iomap.h> | 237 | #include <asm-generic/iomap.h> |
235 | 238 | ||
236 | /* | 239 | /* |
237 | * More restrictive address range checking than the default implementation | 240 | * More restrictive address range checking than the default implementation |
238 | * (PHYS_OFFSET and PHYS_MASK taken into account). | 241 | * (PHYS_OFFSET and PHYS_MASK taken into account). |
239 | */ | 242 | */ |
240 | #define ARCH_HAS_VALID_PHYS_ADDR_RANGE | 243 | #define ARCH_HAS_VALID_PHYS_ADDR_RANGE |
241 | extern int valid_phys_addr_range(unsigned long addr, size_t size); | 244 | extern int valid_phys_addr_range(unsigned long addr, size_t size); |
242 | extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); | 245 | extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); |
243 | 246 | ||
244 | extern int devmem_is_allowed(unsigned long pfn); | 247 | extern int devmem_is_allowed(unsigned long pfn); |
245 | 248 | ||
246 | /* | 249 | /* |
247 | * Convert a physical pointer to a virtual kernel pointer for /dev/mem | 250 | * Convert a physical pointer to a virtual kernel pointer for /dev/mem |
248 | * access | 251 | * access |
249 | */ | 252 | */ |
250 | #define xlate_dev_mem_ptr(p) __va(p) | 253 | #define xlate_dev_mem_ptr(p) __va(p) |
251 | 254 | ||
252 | /* | 255 | /* |
253 | * Convert a virtual cached pointer to an uncached pointer | 256 | * Convert a virtual cached pointer to an uncached pointer |
254 | */ | 257 | */ |
255 | #define xlate_dev_kmem_ptr(p) p | 258 | #define xlate_dev_kmem_ptr(p) p |
256 | 259 | ||
257 | #endif /* __KERNEL__ */ | 260 | #endif /* __KERNEL__ */ |
258 | #endif /* __ASM_IO_H */ | 261 | #endif /* __ASM_IO_H */ |
259 | 262 |
arch/arm64/include/asm/memory.h
1 | /* | 1 | /* |
2 | * Based on arch/arm/include/asm/memory.h | 2 | * Based on arch/arm/include/asm/memory.h |
3 | * | 3 | * |
4 | * Copyright (C) 2000-2002 Russell King | 4 | * Copyright (C) 2000-2002 Russell King |
5 | * Copyright (C) 2012 ARM Ltd. | 5 | * Copyright (C) 2012 ARM Ltd. |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | * | 10 | * |
11 | * This program is distributed in the hope that it will be useful, | 11 | * This program is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | * GNU General Public License for more details. | 14 | * GNU General Public License for more details. |
15 | * | 15 | * |
16 | * You should have received a copy of the GNU General Public License | 16 | * You should have received a copy of the GNU General Public License |
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
18 | * | 18 | * |
19 | * Note: this file should not be included by non-asm/.h files | 19 | * Note: this file should not be included by non-asm/.h files |
20 | */ | 20 | */ |
21 | #ifndef __ASM_MEMORY_H | 21 | #ifndef __ASM_MEMORY_H |
22 | #define __ASM_MEMORY_H | 22 | #define __ASM_MEMORY_H |
23 | 23 | ||
24 | #include <linux/compiler.h> | 24 | #include <linux/compiler.h> |
25 | #include <linux/const.h> | 25 | #include <linux/const.h> |
26 | #include <linux/types.h> | 26 | #include <linux/types.h> |
27 | #include <asm/sizes.h> | 27 | #include <asm/sizes.h> |
28 | 28 | ||
29 | /* | 29 | /* |
30 | * Allow for constants defined here to be used from assembly code | 30 | * Allow for constants defined here to be used from assembly code |
31 | * by prepending the UL suffix only with actual C code compilation. | 31 | * by prepending the UL suffix only with actual C code compilation. |
32 | */ | 32 | */ |
33 | #define UL(x) _AC(x, UL) | 33 | #define UL(x) _AC(x, UL) |
34 | 34 | ||
35 | /* | 35 | /* |
36 | * PAGE_OFFSET - the virtual address of the start of the kernel image. | 36 | * PAGE_OFFSET - the virtual address of the start of the kernel image. |
37 | * VA_BITS - the maximum number of bits for virtual addresses. | 37 | * VA_BITS - the maximum number of bits for virtual addresses. |
38 | * TASK_SIZE - the maximum size of a user space task. | 38 | * TASK_SIZE - the maximum size of a user space task. |
39 | * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area. | 39 | * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area. |
40 | * The module space lives between the addresses given by TASK_SIZE | 40 | * The module space lives between the addresses given by TASK_SIZE |
41 | * and PAGE_OFFSET - it must be within 128MB of the kernel text. | 41 | * and PAGE_OFFSET - it must be within 128MB of the kernel text. |
42 | */ | 42 | */ |
43 | #define PAGE_OFFSET UL(0xffffffc000000000) | 43 | #define PAGE_OFFSET UL(0xffffffc000000000) |
44 | #define MODULES_END (PAGE_OFFSET) | 44 | #define MODULES_END (PAGE_OFFSET) |
45 | #define MODULES_VADDR (MODULES_END - SZ_64M) | 45 | #define MODULES_VADDR (MODULES_END - SZ_64M) |
46 | #define EARLYCON_IOBASE (MODULES_VADDR - SZ_4M) | ||
46 | #define VA_BITS (39) | 47 | #define VA_BITS (39) |
47 | #define TASK_SIZE_64 (UL(1) << VA_BITS) | 48 | #define TASK_SIZE_64 (UL(1) << VA_BITS) |
48 | 49 | ||
49 | #ifdef CONFIG_COMPAT | 50 | #ifdef CONFIG_COMPAT |
50 | #define TASK_SIZE_32 UL(0x100000000) | 51 | #define TASK_SIZE_32 UL(0x100000000) |
51 | #define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ | 52 | #define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ |
52 | TASK_SIZE_32 : TASK_SIZE_64) | 53 | TASK_SIZE_32 : TASK_SIZE_64) |
53 | #else | 54 | #else |
54 | #define TASK_SIZE TASK_SIZE_64 | 55 | #define TASK_SIZE TASK_SIZE_64 |
55 | #endif /* CONFIG_COMPAT */ | 56 | #endif /* CONFIG_COMPAT */ |
56 | 57 | ||
57 | #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4)) | 58 | #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4)) |
58 | 59 | ||
59 | #if TASK_SIZE_64 > MODULES_VADDR | 60 | #if TASK_SIZE_64 > MODULES_VADDR |
60 | #error Top of 64-bit user space clashes with start of module space | 61 | #error Top of 64-bit user space clashes with start of module space |
61 | #endif | 62 | #endif |
62 | 63 | ||
63 | /* | 64 | /* |
64 | * Physical vs virtual RAM address space conversion. These are | 65 | * Physical vs virtual RAM address space conversion. These are |
65 | * private definitions which should NOT be used outside memory.h | 66 | * private definitions which should NOT be used outside memory.h |
66 | * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. | 67 | * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. |
67 | */ | 68 | */ |
68 | #define __virt_to_phys(x) (((phys_addr_t)(x) - PAGE_OFFSET + PHYS_OFFSET)) | 69 | #define __virt_to_phys(x) (((phys_addr_t)(x) - PAGE_OFFSET + PHYS_OFFSET)) |
69 | #define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET + PAGE_OFFSET)) | 70 | #define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET + PAGE_OFFSET)) |
70 | 71 | ||
71 | /* | 72 | /* |
72 | * Convert a physical address to a Page Frame Number and back | 73 | * Convert a physical address to a Page Frame Number and back |
73 | */ | 74 | */ |
74 | #define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT)) | 75 | #define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT)) |
75 | #define __pfn_to_phys(pfn) ((phys_addr_t)(pfn) << PAGE_SHIFT) | 76 | #define __pfn_to_phys(pfn) ((phys_addr_t)(pfn) << PAGE_SHIFT) |
76 | 77 | ||
77 | /* | 78 | /* |
78 | * Convert a page to/from a physical address | 79 | * Convert a page to/from a physical address |
79 | */ | 80 | */ |
80 | #define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page))) | 81 | #define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page))) |
81 | #define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys))) | 82 | #define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys))) |
82 | 83 | ||
83 | /* | 84 | /* |
84 | * Memory types available. | 85 | * Memory types available. |
85 | */ | 86 | */ |
86 | #define MT_DEVICE_nGnRnE 0 | 87 | #define MT_DEVICE_nGnRnE 0 |
87 | #define MT_DEVICE_nGnRE 1 | 88 | #define MT_DEVICE_nGnRE 1 |
88 | #define MT_DEVICE_GRE 2 | 89 | #define MT_DEVICE_GRE 2 |
89 | #define MT_NORMAL_NC 3 | 90 | #define MT_NORMAL_NC 3 |
90 | #define MT_NORMAL 4 | 91 | #define MT_NORMAL 4 |
91 | 92 | ||
92 | #ifndef __ASSEMBLY__ | 93 | #ifndef __ASSEMBLY__ |
93 | 94 | ||
94 | extern phys_addr_t memstart_addr; | 95 | extern phys_addr_t memstart_addr; |
95 | /* PHYS_OFFSET - the physical address of the start of memory. */ | 96 | /* PHYS_OFFSET - the physical address of the start of memory. */ |
96 | #define PHYS_OFFSET ({ memstart_addr; }) | 97 | #define PHYS_OFFSET ({ memstart_addr; }) |
97 | 98 | ||
98 | /* | 99 | /* |
99 | * PFNs are used to describe any physical page; this means | 100 | * PFNs are used to describe any physical page; this means |
100 | * PFN 0 == physical address 0. | 101 | * PFN 0 == physical address 0. |
101 | * | 102 | * |
102 | * This is the PFN of the first RAM page in the kernel | 103 | * This is the PFN of the first RAM page in the kernel |
103 | * direct-mapped view. We assume this is the first page | 104 | * direct-mapped view. We assume this is the first page |
104 | * of RAM in the mem_map as well. | 105 | * of RAM in the mem_map as well. |
105 | */ | 106 | */ |
106 | #define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT) | 107 | #define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT) |
107 | 108 | ||
108 | /* | 109 | /* |
109 | * Note: Drivers should NOT use these. They are the wrong | 110 | * Note: Drivers should NOT use these. They are the wrong |
110 | * translation for translating DMA addresses. Use the driver | 111 | * translation for translating DMA addresses. Use the driver |
111 | * DMA support - see dma-mapping.h. | 112 | * DMA support - see dma-mapping.h. |
112 | */ | 113 | */ |
113 | static inline phys_addr_t virt_to_phys(const volatile void *x) | 114 | static inline phys_addr_t virt_to_phys(const volatile void *x) |
114 | { | 115 | { |
115 | return __virt_to_phys((unsigned long)(x)); | 116 | return __virt_to_phys((unsigned long)(x)); |
116 | } | 117 | } |
117 | 118 | ||
118 | static inline void *phys_to_virt(phys_addr_t x) | 119 | static inline void *phys_to_virt(phys_addr_t x) |
119 | { | 120 | { |
120 | return (void *)(__phys_to_virt(x)); | 121 | return (void *)(__phys_to_virt(x)); |
121 | } | 122 | } |
122 | 123 | ||
123 | /* | 124 | /* |
124 | * Drivers should NOT use these either. | 125 | * Drivers should NOT use these either. |
125 | */ | 126 | */ |
126 | #define __pa(x) __virt_to_phys((unsigned long)(x)) | 127 | #define __pa(x) __virt_to_phys((unsigned long)(x)) |
127 | #define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) | 128 | #define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) |
128 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) | 129 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) |
129 | 130 | ||
130 | /* | 131 | /* |
131 | * virt_to_page(k) convert a _valid_ virtual address to struct page * | 132 | * virt_to_page(k) convert a _valid_ virtual address to struct page * |
132 | * virt_addr_valid(k) indicates whether a virtual address is valid | 133 | * virt_addr_valid(k) indicates whether a virtual address is valid |
133 | */ | 134 | */ |
134 | #define ARCH_PFN_OFFSET PHYS_PFN_OFFSET | 135 | #define ARCH_PFN_OFFSET PHYS_PFN_OFFSET |
135 | 136 | ||
136 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) | 137 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) |
137 | #define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \ | 138 | #define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \ |
138 | ((void *)(kaddr) < (void *)high_memory)) | 139 | ((void *)(kaddr) < (void *)high_memory)) |
139 | 140 | ||
140 | #endif | 141 | #endif |
141 | 142 | ||
142 | #include <asm-generic/memory_model.h> | 143 | #include <asm-generic/memory_model.h> |
143 | 144 | ||
144 | #endif | 145 | #endif |
145 | 146 |
arch/arm64/include/asm/mmu.h
1 | /* | 1 | /* |
2 | * Copyright (C) 2012 ARM Ltd. | 2 | * Copyright (C) 2012 ARM Ltd. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License version 2 as | 5 | * it under the terms of the GNU General Public License version 2 as |
6 | * published by the Free Software Foundation. | 6 | * published by the Free Software Foundation. |
7 | * | 7 | * |
8 | * This program is distributed in the hope that it will be useful, | 8 | * This program is distributed in the hope that it will be useful, |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
11 | * GNU General Public License for more details. | 11 | * GNU General Public License for more details. |
12 | * | 12 | * |
13 | * You should have received a copy of the GNU General Public License | 13 | * You should have received a copy of the GNU General Public License |
14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
15 | */ | 15 | */ |
16 | #ifndef __ASM_MMU_H | 16 | #ifndef __ASM_MMU_H |
17 | #define __ASM_MMU_H | 17 | #define __ASM_MMU_H |
18 | 18 | ||
19 | typedef struct { | 19 | typedef struct { |
20 | unsigned int id; | 20 | unsigned int id; |
21 | raw_spinlock_t id_lock; | 21 | raw_spinlock_t id_lock; |
22 | void *vdso; | 22 | void *vdso; |
23 | } mm_context_t; | 23 | } mm_context_t; |
24 | 24 | ||
25 | #define ASID(mm) ((mm)->context.id & 0xffff) | 25 | #define ASID(mm) ((mm)->context.id & 0xffff) |
26 | 26 | ||
27 | extern void paging_init(void); | 27 | extern void paging_init(void); |
28 | extern void setup_mm_for_reboot(void); | 28 | extern void setup_mm_for_reboot(void); |
29 | extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); | ||
29 | 30 | ||
30 | #endif | 31 | #endif |
31 | 32 |
arch/arm64/include/asm/mmu_context.h
1 | /* | 1 | /* |
2 | * Based on arch/arm/include/asm/mmu_context.h | 2 | * Based on arch/arm/include/asm/mmu_context.h |
3 | * | 3 | * |
4 | * Copyright (C) 1996 Russell King. | 4 | * Copyright (C) 1996 Russell King. |
5 | * Copyright (C) 2012 ARM Ltd. | 5 | * Copyright (C) 2012 ARM Ltd. |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | * | 10 | * |
11 | * This program is distributed in the hope that it will be useful, | 11 | * This program is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | * GNU General Public License for more details. | 14 | * GNU General Public License for more details. |
15 | * | 15 | * |
16 | * You should have received a copy of the GNU General Public License | 16 | * You should have received a copy of the GNU General Public License |
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
18 | */ | 18 | */ |
19 | #ifndef __ASM_MMU_CONTEXT_H | 19 | #ifndef __ASM_MMU_CONTEXT_H |
20 | #define __ASM_MMU_CONTEXT_H | 20 | #define __ASM_MMU_CONTEXT_H |
21 | 21 | ||
22 | #include <linux/compiler.h> | 22 | #include <linux/compiler.h> |
23 | #include <linux/sched.h> | 23 | #include <linux/sched.h> |
24 | 24 | ||
25 | #include <asm/cacheflush.h> | 25 | #include <asm/cacheflush.h> |
26 | #include <asm/proc-fns.h> | 26 | #include <asm/proc-fns.h> |
27 | #include <asm-generic/mm_hooks.h> | 27 | #include <asm-generic/mm_hooks.h> |
28 | #include <asm/cputype.h> | 28 | #include <asm/cputype.h> |
29 | #include <asm/pgtable.h> | 29 | #include <asm/pgtable.h> |
30 | 30 | ||
31 | #define MAX_ASID_BITS 16 | 31 | #define MAX_ASID_BITS 16 |
32 | 32 | ||
33 | extern unsigned int cpu_last_asid; | 33 | extern unsigned int cpu_last_asid; |
34 | 34 | ||
35 | void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); | 35 | void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); |
36 | void __new_context(struct mm_struct *mm); | 36 | void __new_context(struct mm_struct *mm); |
37 | 37 | ||
38 | #ifdef CONFIG_PID_IN_CONTEXTIDR | ||
39 | static inline void contextidr_thread_switch(struct task_struct *next) | ||
40 | { | ||
41 | asm( | ||
42 | " msr contextidr_el1, %0\n" | ||
43 | " isb" | ||
44 | : | ||
45 | : "r" (task_pid_nr(next))); | ||
46 | } | ||
47 | #else | ||
48 | static inline void contextidr_thread_switch(struct task_struct *next) | ||
49 | { | ||
50 | } | ||
51 | #endif | ||
52 | |||
38 | /* | 53 | /* |
39 | * Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0. | 54 | * Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0. |
40 | */ | 55 | */ |
41 | static inline void cpu_set_reserved_ttbr0(void) | 56 | static inline void cpu_set_reserved_ttbr0(void) |
42 | { | 57 | { |
43 | unsigned long ttbr = page_to_phys(empty_zero_page); | 58 | unsigned long ttbr = page_to_phys(empty_zero_page); |
44 | 59 | ||
45 | asm( | 60 | asm( |
46 | " msr ttbr0_el1, %0 // set TTBR0\n" | 61 | " msr ttbr0_el1, %0 // set TTBR0\n" |
47 | " isb" | 62 | " isb" |
48 | : | 63 | : |
49 | : "r" (ttbr)); | 64 | : "r" (ttbr)); |
50 | } | 65 | } |
51 | 66 | ||
52 | static inline void switch_new_context(struct mm_struct *mm) | 67 | static inline void switch_new_context(struct mm_struct *mm) |
53 | { | 68 | { |
54 | unsigned long flags; | 69 | unsigned long flags; |
55 | 70 | ||
56 | __new_context(mm); | 71 | __new_context(mm); |
57 | 72 | ||
58 | local_irq_save(flags); | 73 | local_irq_save(flags); |
59 | cpu_switch_mm(mm->pgd, mm); | 74 | cpu_switch_mm(mm->pgd, mm); |
60 | local_irq_restore(flags); | 75 | local_irq_restore(flags); |
61 | } | 76 | } |
62 | 77 | ||
63 | static inline void check_and_switch_context(struct mm_struct *mm, | 78 | static inline void check_and_switch_context(struct mm_struct *mm, |
64 | struct task_struct *tsk) | 79 | struct task_struct *tsk) |
65 | { | 80 | { |
66 | /* | 81 | /* |
67 | * Required during context switch to avoid speculative page table | 82 | * Required during context switch to avoid speculative page table |
68 | * walking with the wrong TTBR. | 83 | * walking with the wrong TTBR. |
69 | */ | 84 | */ |
70 | cpu_set_reserved_ttbr0(); | 85 | cpu_set_reserved_ttbr0(); |
71 | 86 | ||
72 | if (!((mm->context.id ^ cpu_last_asid) >> MAX_ASID_BITS)) | 87 | if (!((mm->context.id ^ cpu_last_asid) >> MAX_ASID_BITS)) |
73 | /* | 88 | /* |
74 | * The ASID is from the current generation, just switch to the | 89 | * The ASID is from the current generation, just switch to the |
75 | * new pgd. This condition is only true for calls from | 90 | * new pgd. This condition is only true for calls from |
76 | * context_switch() and interrupts are already disabled. | 91 | * context_switch() and interrupts are already disabled. |
77 | */ | 92 | */ |
78 | cpu_switch_mm(mm->pgd, mm); | 93 | cpu_switch_mm(mm->pgd, mm); |
79 | else if (irqs_disabled()) | 94 | else if (irqs_disabled()) |
80 | /* | 95 | /* |
81 | * Defer the new ASID allocation until after the context | 96 | * Defer the new ASID allocation until after the context |
82 | * switch critical region since __new_context() cannot be | 97 | * switch critical region since __new_context() cannot be |
83 | * called with interrupts disabled. | 98 | * called with interrupts disabled. |
84 | */ | 99 | */ |
85 | set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM); | 100 | set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM); |
86 | else | 101 | else |
87 | /* | 102 | /* |
88 | * That is a direct call to switch_mm() or activate_mm() with | 103 | * That is a direct call to switch_mm() or activate_mm() with |
89 | * interrupts enabled and a new context. | 104 | * interrupts enabled and a new context. |
90 | */ | 105 | */ |
91 | switch_new_context(mm); | 106 | switch_new_context(mm); |
92 | } | 107 | } |
93 | 108 | ||
94 | #define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0) | 109 | #define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0) |
95 | #define destroy_context(mm) do { } while(0) | 110 | #define destroy_context(mm) do { } while(0) |
96 | 111 | ||
97 | #define finish_arch_post_lock_switch \ | 112 | #define finish_arch_post_lock_switch \ |
98 | finish_arch_post_lock_switch | 113 | finish_arch_post_lock_switch |
99 | static inline void finish_arch_post_lock_switch(void) | 114 | static inline void finish_arch_post_lock_switch(void) |
100 | { | 115 | { |
101 | if (test_and_clear_thread_flag(TIF_SWITCH_MM)) { | 116 | if (test_and_clear_thread_flag(TIF_SWITCH_MM)) { |
102 | struct mm_struct *mm = current->mm; | 117 | struct mm_struct *mm = current->mm; |
103 | unsigned long flags; | 118 | unsigned long flags; |
104 | 119 | ||
105 | __new_context(mm); | 120 | __new_context(mm); |
106 | 121 | ||
107 | local_irq_save(flags); | 122 | local_irq_save(flags); |
108 | cpu_switch_mm(mm->pgd, mm); | 123 | cpu_switch_mm(mm->pgd, mm); |
109 | local_irq_restore(flags); | 124 | local_irq_restore(flags); |
110 | } | 125 | } |
111 | } | 126 | } |
112 | 127 | ||
113 | /* | 128 | /* |
114 | * This is called when "tsk" is about to enter lazy TLB mode. | 129 | * This is called when "tsk" is about to enter lazy TLB mode. |
115 | * | 130 | * |
116 | * mm: describes the currently active mm context | 131 | * mm: describes the currently active mm context |
117 | * tsk: task which is entering lazy tlb | 132 | * tsk: task which is entering lazy tlb |
118 | * cpu: cpu number which is entering lazy tlb | 133 | * cpu: cpu number which is entering lazy tlb |
119 | * | 134 | * |
120 | * tsk->mm will be NULL | 135 | * tsk->mm will be NULL |
121 | */ | 136 | */ |
122 | static inline void | 137 | static inline void |
123 | enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | 138 | enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) |
124 | { | 139 | { |
125 | } | 140 | } |
126 | 141 | ||
127 | /* | 142 | /* |
128 | * This is the actual mm switch as far as the scheduler | 143 | * This is the actual mm switch as far as the scheduler |
129 | * is concerned. No registers are touched. We avoid | 144 | * is concerned. No registers are touched. We avoid |
130 | * calling the CPU specific function when the mm hasn't | 145 | * calling the CPU specific function when the mm hasn't |
131 | * actually changed. | 146 | * actually changed. |
132 | */ | 147 | */ |
133 | static inline void | 148 | static inline void |
134 | switch_mm(struct mm_struct *prev, struct mm_struct *next, | 149 | switch_mm(struct mm_struct *prev, struct mm_struct *next, |
135 | struct task_struct *tsk) | 150 | struct task_struct *tsk) |
136 | { | 151 | { |
137 | unsigned int cpu = smp_processor_id(); | 152 | unsigned int cpu = smp_processor_id(); |
138 | 153 | ||
139 | #ifdef CONFIG_SMP | 154 | #ifdef CONFIG_SMP |
140 | /* check for possible thread migration */ | 155 | /* check for possible thread migration */ |
141 | if (!cpumask_empty(mm_cpumask(next)) && | 156 | if (!cpumask_empty(mm_cpumask(next)) && |
142 | !cpumask_test_cpu(cpu, mm_cpumask(next))) | 157 | !cpumask_test_cpu(cpu, mm_cpumask(next))) |
143 | __flush_icache_all(); | 158 | __flush_icache_all(); |
144 | #endif | 159 | #endif |
145 | if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) | 160 | if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) |
146 | check_and_switch_context(next, tsk); | 161 | check_and_switch_context(next, tsk); |
147 | } | 162 | } |
148 | 163 | ||
149 | #define deactivate_mm(tsk,mm) do { } while (0) | 164 | #define deactivate_mm(tsk,mm) do { } while (0) |
150 | #define activate_mm(prev,next) switch_mm(prev, next, NULL) | 165 | #define activate_mm(prev,next) switch_mm(prev, next, NULL) |
151 | 166 | ||
152 | #endif | 167 | #endif |
153 | 168 |
arch/arm64/include/asm/perf_event.h
1 | /* | 1 | /* |
2 | * Copyright (C) 2012 ARM Ltd. | 2 | * Copyright (C) 2012 ARM Ltd. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License version 2 as | 5 | * it under the terms of the GNU General Public License version 2 as |
6 | * published by the Free Software Foundation. | 6 | * published by the Free Software Foundation. |
7 | * | 7 | * |
8 | * This program is distributed in the hope that it will be useful, | 8 | * This program is distributed in the hope that it will be useful, |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
11 | * GNU General Public License for more details. | 11 | * GNU General Public License for more details. |
12 | * | 12 | * |
13 | * You should have received a copy of the GNU General Public License | 13 | * You should have received a copy of the GNU General Public License |
14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #ifndef __ASM_PERF_EVENT_H | 17 | #ifndef __ASM_PERF_EVENT_H |
18 | #define __ASM_PERF_EVENT_H | 18 | #define __ASM_PERF_EVENT_H |
19 | 19 | ||
20 | /* It's quiet around here... */ | 20 | #ifdef CONFIG_HW_PERF_EVENTS |
21 | struct pt_regs; | ||
22 | extern unsigned long perf_instruction_pointer(struct pt_regs *regs); | ||
23 | extern unsigned long perf_misc_flags(struct pt_regs *regs); | ||
24 | #define perf_misc_flags(regs) perf_misc_flags(regs) | ||
25 | #endif | ||
21 | 26 | ||
22 | #endif | 27 | #endif |
23 | 28 |
arch/arm64/include/asm/psci.h
File was created | 1 | /* | |
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License version 2 as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * Copyright (C) 2013 ARM Limited | ||
12 | */ | ||
13 | |||
14 | #ifndef __ASM_PSCI_H | ||
15 | #define __ASM_PSCI_H | ||
16 | |||
17 | #define PSCI_POWER_STATE_TYPE_STANDBY 0 | ||
18 | #define PSCI_POWER_STATE_TYPE_POWER_DOWN 1 | ||
19 | |||
20 | struct psci_power_state { | ||
21 | u16 id; | ||
22 | u8 type; | ||
23 | u8 affinity_level; | ||
24 | }; | ||
25 | |||
26 | struct psci_operations { | ||
27 | int (*cpu_suspend)(struct psci_power_state state, | ||
28 | unsigned long entry_point); | ||
29 | int (*cpu_off)(struct psci_power_state state); | ||
30 | int (*cpu_on)(unsigned long cpuid, unsigned long entry_point); | ||
31 | int (*migrate)(unsigned long cpuid); | ||
32 | }; | ||
33 | |||
34 | extern struct psci_operations psci_ops; | ||
35 | |||
36 | int psci_init(void); | ||
37 | |||
38 | #endif /* __ASM_PSCI_H */ | ||
39 |
arch/arm64/include/asm/ptrace.h
1 | /* | 1 | /* |
2 | * Based on arch/arm/include/asm/ptrace.h | 2 | * Based on arch/arm/include/asm/ptrace.h |
3 | * | 3 | * |
4 | * Copyright (C) 1996-2003 Russell King | 4 | * Copyright (C) 1996-2003 Russell King |
5 | * Copyright (C) 2012 ARM Ltd. | 5 | * Copyright (C) 2012 ARM Ltd. |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | * | 10 | * |
11 | * This program is distributed in the hope that it will be useful, | 11 | * This program is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | * GNU General Public License for more details. | 14 | * GNU General Public License for more details. |
15 | * | 15 | * |
16 | * You should have received a copy of the GNU General Public License | 16 | * You should have received a copy of the GNU General Public License |
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
18 | */ | 18 | */ |
19 | #ifndef __ASM_PTRACE_H | 19 | #ifndef __ASM_PTRACE_H |
20 | #define __ASM_PTRACE_H | 20 | #define __ASM_PTRACE_H |
21 | 21 | ||
22 | #include <uapi/asm/ptrace.h> | 22 | #include <uapi/asm/ptrace.h> |
23 | 23 | ||
24 | /* AArch32-specific ptrace requests */ | 24 | /* AArch32-specific ptrace requests */ |
25 | #define COMPAT_PTRACE_GETREGS 12 | 25 | #define COMPAT_PTRACE_GETREGS 12 |
26 | #define COMPAT_PTRACE_SETREGS 13 | 26 | #define COMPAT_PTRACE_SETREGS 13 |
27 | #define COMPAT_PTRACE_GET_THREAD_AREA 22 | 27 | #define COMPAT_PTRACE_GET_THREAD_AREA 22 |
28 | #define COMPAT_PTRACE_SET_SYSCALL 23 | 28 | #define COMPAT_PTRACE_SET_SYSCALL 23 |
29 | #define COMPAT_PTRACE_GETVFPREGS 27 | 29 | #define COMPAT_PTRACE_GETVFPREGS 27 |
30 | #define COMPAT_PTRACE_SETVFPREGS 28 | 30 | #define COMPAT_PTRACE_SETVFPREGS 28 |
31 | #define COMPAT_PTRACE_GETHBPREGS 29 | 31 | #define COMPAT_PTRACE_GETHBPREGS 29 |
32 | #define COMPAT_PTRACE_SETHBPREGS 30 | 32 | #define COMPAT_PTRACE_SETHBPREGS 30 |
33 | 33 | ||
34 | /* AArch32 CPSR bits */ | 34 | /* AArch32 CPSR bits */ |
35 | #define COMPAT_PSR_MODE_MASK 0x0000001f | 35 | #define COMPAT_PSR_MODE_MASK 0x0000001f |
36 | #define COMPAT_PSR_MODE_USR 0x00000010 | 36 | #define COMPAT_PSR_MODE_USR 0x00000010 |
37 | #define COMPAT_PSR_MODE_FIQ 0x00000011 | 37 | #define COMPAT_PSR_MODE_FIQ 0x00000011 |
38 | #define COMPAT_PSR_MODE_IRQ 0x00000012 | 38 | #define COMPAT_PSR_MODE_IRQ 0x00000012 |
39 | #define COMPAT_PSR_MODE_SVC 0x00000013 | 39 | #define COMPAT_PSR_MODE_SVC 0x00000013 |
40 | #define COMPAT_PSR_MODE_ABT 0x00000017 | 40 | #define COMPAT_PSR_MODE_ABT 0x00000017 |
41 | #define COMPAT_PSR_MODE_HYP 0x0000001a | 41 | #define COMPAT_PSR_MODE_HYP 0x0000001a |
42 | #define COMPAT_PSR_MODE_UND 0x0000001b | 42 | #define COMPAT_PSR_MODE_UND 0x0000001b |
43 | #define COMPAT_PSR_MODE_SYS 0x0000001f | 43 | #define COMPAT_PSR_MODE_SYS 0x0000001f |
44 | #define COMPAT_PSR_T_BIT 0x00000020 | 44 | #define COMPAT_PSR_T_BIT 0x00000020 |
45 | #define COMPAT_PSR_F_BIT 0x00000040 | ||
46 | #define COMPAT_PSR_I_BIT 0x00000080 | ||
47 | #define COMPAT_PSR_A_BIT 0x00000100 | ||
48 | #define COMPAT_PSR_E_BIT 0x00000200 | ||
49 | #define COMPAT_PSR_J_BIT 0x01000000 | ||
50 | #define COMPAT_PSR_Q_BIT 0x08000000 | ||
51 | #define COMPAT_PSR_V_BIT 0x10000000 | ||
52 | #define COMPAT_PSR_C_BIT 0x20000000 | ||
53 | #define COMPAT_PSR_Z_BIT 0x40000000 | ||
54 | #define COMPAT_PSR_N_BIT 0x80000000 | ||
45 | #define COMPAT_PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */ | 55 | #define COMPAT_PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */ |
46 | /* | 56 | /* |
47 | * These are 'magic' values for PTRACE_PEEKUSR that return info about where a | 57 | * These are 'magic' values for PTRACE_PEEKUSR that return info about where a |
48 | * process is located in memory. | 58 | * process is located in memory. |
49 | */ | 59 | */ |
50 | #define COMPAT_PT_TEXT_ADDR 0x10000 | 60 | #define COMPAT_PT_TEXT_ADDR 0x10000 |
51 | #define COMPAT_PT_DATA_ADDR 0x10004 | 61 | #define COMPAT_PT_DATA_ADDR 0x10004 |
52 | #define COMPAT_PT_TEXT_END_ADDR 0x10008 | 62 | #define COMPAT_PT_TEXT_END_ADDR 0x10008 |
53 | #ifndef __ASSEMBLY__ | 63 | #ifndef __ASSEMBLY__ |
54 | 64 | ||
55 | /* sizeof(struct user) for AArch32 */ | 65 | /* sizeof(struct user) for AArch32 */ |
56 | #define COMPAT_USER_SZ 296 | 66 | #define COMPAT_USER_SZ 296 |
57 | 67 | ||
58 | /* Architecturally defined mapping between AArch32 and AArch64 registers */ | 68 | /* Architecturally defined mapping between AArch32 and AArch64 registers */ |
59 | #define compat_usr(x) regs[(x)] | 69 | #define compat_usr(x) regs[(x)] |
60 | #define compat_sp regs[13] | 70 | #define compat_sp regs[13] |
61 | #define compat_lr regs[14] | 71 | #define compat_lr regs[14] |
62 | #define compat_sp_hyp regs[15] | 72 | #define compat_sp_hyp regs[15] |
63 | #define compat_sp_irq regs[16] | 73 | #define compat_sp_irq regs[16] |
64 | #define compat_lr_irq regs[17] | 74 | #define compat_lr_irq regs[17] |
65 | #define compat_sp_svc regs[18] | 75 | #define compat_sp_svc regs[18] |
66 | #define compat_lr_svc regs[19] | 76 | #define compat_lr_svc regs[19] |
67 | #define compat_sp_abt regs[20] | 77 | #define compat_sp_abt regs[20] |
68 | #define compat_lr_abt regs[21] | 78 | #define compat_lr_abt regs[21] |
69 | #define compat_sp_und regs[22] | 79 | #define compat_sp_und regs[22] |
70 | #define compat_lr_und regs[23] | 80 | #define compat_lr_und regs[23] |
71 | #define compat_r8_fiq regs[24] | 81 | #define compat_r8_fiq regs[24] |
72 | #define compat_r9_fiq regs[25] | 82 | #define compat_r9_fiq regs[25] |
73 | #define compat_r10_fiq regs[26] | 83 | #define compat_r10_fiq regs[26] |
74 | #define compat_r11_fiq regs[27] | 84 | #define compat_r11_fiq regs[27] |
75 | #define compat_r12_fiq regs[28] | 85 | #define compat_r12_fiq regs[28] |
76 | #define compat_sp_fiq regs[29] | 86 | #define compat_sp_fiq regs[29] |
77 | #define compat_lr_fiq regs[30] | 87 | #define compat_lr_fiq regs[30] |
78 | 88 | ||
79 | /* | 89 | /* |
80 | * This struct defines the way the registers are stored on the stack during an | 90 | * This struct defines the way the registers are stored on the stack during an |
81 | * exception. Note that sizeof(struct pt_regs) has to be a multiple of 16 (for | 91 | * exception. Note that sizeof(struct pt_regs) has to be a multiple of 16 (for |
82 | * stack alignment). struct user_pt_regs must form a prefix of struct pt_regs. | 92 | * stack alignment). struct user_pt_regs must form a prefix of struct pt_regs. |
83 | */ | 93 | */ |
84 | struct pt_regs { | 94 | struct pt_regs { |
85 | union { | 95 | union { |
86 | struct user_pt_regs user_regs; | 96 | struct user_pt_regs user_regs; |
87 | struct { | 97 | struct { |
88 | u64 regs[31]; | 98 | u64 regs[31]; |
89 | u64 sp; | 99 | u64 sp; |
90 | u64 pc; | 100 | u64 pc; |
91 | u64 pstate; | 101 | u64 pstate; |
92 | }; | 102 | }; |
93 | }; | 103 | }; |
94 | u64 orig_x0; | 104 | u64 orig_x0; |
95 | u64 syscallno; | 105 | u64 syscallno; |
96 | }; | 106 | }; |
97 | 107 | ||
98 | #define arch_has_single_step() (1) | 108 | #define arch_has_single_step() (1) |
99 | 109 | ||
100 | #ifdef CONFIG_COMPAT | 110 | #ifdef CONFIG_COMPAT |
101 | #define compat_thumb_mode(regs) \ | 111 | #define compat_thumb_mode(regs) \ |
102 | (((regs)->pstate & COMPAT_PSR_T_BIT)) | 112 | (((regs)->pstate & COMPAT_PSR_T_BIT)) |
103 | #else | 113 | #else |
104 | #define compat_thumb_mode(regs) (0) | 114 | #define compat_thumb_mode(regs) (0) |
105 | #endif | 115 | #endif |
106 | 116 | ||
107 | #define user_mode(regs) \ | 117 | #define user_mode(regs) \ |
108 | (((regs)->pstate & PSR_MODE_MASK) == PSR_MODE_EL0t) | 118 | (((regs)->pstate & PSR_MODE_MASK) == PSR_MODE_EL0t) |
109 | 119 | ||
110 | #define compat_user_mode(regs) \ | 120 | #define compat_user_mode(regs) \ |
111 | (((regs)->pstate & (PSR_MODE32_BIT | PSR_MODE_MASK)) == \ | 121 | (((regs)->pstate & (PSR_MODE32_BIT | PSR_MODE_MASK)) == \ |
112 | (PSR_MODE32_BIT | PSR_MODE_EL0t)) | 122 | (PSR_MODE32_BIT | PSR_MODE_EL0t)) |
113 | 123 | ||
114 | #define processor_mode(regs) \ | 124 | #define processor_mode(regs) \ |
115 | ((regs)->pstate & PSR_MODE_MASK) | 125 | ((regs)->pstate & PSR_MODE_MASK) |
116 | 126 | ||
117 | #define interrupts_enabled(regs) \ | 127 | #define interrupts_enabled(regs) \ |
118 | (!((regs)->pstate & PSR_I_BIT)) | 128 | (!((regs)->pstate & PSR_I_BIT)) |
119 | 129 | ||
120 | #define fast_interrupts_enabled(regs) \ | 130 | #define fast_interrupts_enabled(regs) \ |
121 | (!((regs)->pstate & PSR_F_BIT)) | 131 | (!((regs)->pstate & PSR_F_BIT)) |
122 | 132 | ||
123 | #define user_stack_pointer(regs) \ | 133 | #define user_stack_pointer(regs) \ |
124 | ((regs)->sp) | 134 | ((regs)->sp) |
125 | 135 | ||
126 | /* | 136 | /* |
127 | * Are the current registers suitable for user mode? (used to maintain | 137 | * Are the current registers suitable for user mode? (used to maintain |
128 | * security in signal handlers) | 138 | * security in signal handlers) |
129 | */ | 139 | */ |
130 | static inline int valid_user_regs(struct user_pt_regs *regs) | 140 | static inline int valid_user_regs(struct user_pt_regs *regs) |
131 | { | 141 | { |
132 | if (user_mode(regs) && (regs->pstate & PSR_I_BIT) == 0) { | 142 | if (user_mode(regs) && (regs->pstate & PSR_I_BIT) == 0) { |
133 | regs->pstate &= ~(PSR_F_BIT | PSR_A_BIT); | 143 | regs->pstate &= ~(PSR_F_BIT | PSR_A_BIT); |
134 | 144 | ||
135 | /* The T bit is reserved for AArch64 */ | 145 | /* The T bit is reserved for AArch64 */ |
136 | if (!(regs->pstate & PSR_MODE32_BIT)) | 146 | if (!(regs->pstate & PSR_MODE32_BIT)) |
137 | regs->pstate &= ~COMPAT_PSR_T_BIT; | 147 | regs->pstate &= ~COMPAT_PSR_T_BIT; |
138 | 148 | ||
139 | return 1; | 149 | return 1; |
140 | } | 150 | } |
141 | 151 | ||
142 | /* | 152 | /* |
143 | * Force PSR to something logical... | 153 | * Force PSR to something logical... |
144 | */ | 154 | */ |
145 | regs->pstate &= PSR_f | PSR_s | (PSR_x & ~PSR_A_BIT) | \ | 155 | regs->pstate &= PSR_f | PSR_s | (PSR_x & ~PSR_A_BIT) | \ |
146 | COMPAT_PSR_T_BIT | PSR_MODE32_BIT; | 156 | COMPAT_PSR_T_BIT | PSR_MODE32_BIT; |
147 | 157 | ||
148 | if (!(regs->pstate & PSR_MODE32_BIT)) { | 158 | if (!(regs->pstate & PSR_MODE32_BIT)) { |
149 | regs->pstate &= ~COMPAT_PSR_T_BIT; | 159 | regs->pstate &= ~COMPAT_PSR_T_BIT; |
150 | regs->pstate |= PSR_MODE_EL0t; | 160 | regs->pstate |= PSR_MODE_EL0t; |
151 | } | 161 | } |
152 | 162 | ||
153 | return 0; | 163 | return 0; |
154 | } | 164 | } |
155 | 165 | ||
156 | #define instruction_pointer(regs) (regs)->pc | 166 | #define instruction_pointer(regs) (regs)->pc |
157 | 167 | ||
158 | #ifdef CONFIG_SMP | 168 | #ifdef CONFIG_SMP |
159 | extern unsigned long profile_pc(struct pt_regs *regs); | 169 | extern unsigned long profile_pc(struct pt_regs *regs); |
160 | #else | 170 | #else |
161 | #define profile_pc(regs) instruction_pointer(regs) | 171 | #define profile_pc(regs) instruction_pointer(regs) |
162 | #endif | 172 | #endif |
163 | 173 | ||
164 | extern int aarch32_break_trap(struct pt_regs *regs); | 174 | extern int aarch32_break_trap(struct pt_regs *regs); |
165 | 175 | ||
166 | #endif /* __ASSEMBLY__ */ | 176 | #endif /* __ASSEMBLY__ */ |
167 | #endif | 177 | #endif |
168 | 178 |
arch/arm64/include/asm/smp.h
1 | /* | 1 | /* |
2 | * Copyright (C) 2012 ARM Ltd. | 2 | * Copyright (C) 2012 ARM Ltd. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License version 2 as | 5 | * it under the terms of the GNU General Public License version 2 as |
6 | * published by the Free Software Foundation. | 6 | * published by the Free Software Foundation. |
7 | * | 7 | * |
8 | * This program is distributed in the hope that it will be useful, | 8 | * This program is distributed in the hope that it will be useful, |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
11 | * GNU General Public License for more details. | 11 | * GNU General Public License for more details. |
12 | * | 12 | * |
13 | * You should have received a copy of the GNU General Public License | 13 | * You should have received a copy of the GNU General Public License |
14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
15 | */ | 15 | */ |
16 | #ifndef __ASM_SMP_H | 16 | #ifndef __ASM_SMP_H |
17 | #define __ASM_SMP_H | 17 | #define __ASM_SMP_H |
18 | 18 | ||
19 | #include <linux/threads.h> | 19 | #include <linux/threads.h> |
20 | #include <linux/cpumask.h> | 20 | #include <linux/cpumask.h> |
21 | #include <linux/thread_info.h> | 21 | #include <linux/thread_info.h> |
22 | 22 | ||
23 | #ifndef CONFIG_SMP | 23 | #ifndef CONFIG_SMP |
24 | # error "<asm/smp.h> included in non-SMP build" | 24 | # error "<asm/smp.h> included in non-SMP build" |
25 | #endif | 25 | #endif |
26 | 26 | ||
27 | #define raw_smp_processor_id() (current_thread_info()->cpu) | 27 | #define raw_smp_processor_id() (current_thread_info()->cpu) |
28 | 28 | ||
29 | struct seq_file; | 29 | struct seq_file; |
30 | 30 | ||
31 | /* | 31 | /* |
32 | * generate IPI list text | 32 | * generate IPI list text |
33 | */ | 33 | */ |
34 | extern void show_ipi_list(struct seq_file *p, int prec); | 34 | extern void show_ipi_list(struct seq_file *p, int prec); |
35 | 35 | ||
36 | /* | 36 | /* |
37 | * Called from C code, this handles an IPI. | 37 | * Called from C code, this handles an IPI. |
38 | */ | 38 | */ |
39 | extern void handle_IPI(int ipinr, struct pt_regs *regs); | 39 | extern void handle_IPI(int ipinr, struct pt_regs *regs); |
40 | 40 | ||
41 | /* | 41 | /* |
42 | * Setup the set of possible CPUs (via set_cpu_possible) | 42 | * Setup the set of possible CPUs (via set_cpu_possible) |
43 | */ | 43 | */ |
44 | extern void smp_init_cpus(void); | 44 | extern void smp_init_cpus(void); |
45 | 45 | ||
46 | /* | 46 | /* |
47 | * Provide a function to raise an IPI cross call on CPUs in callmap. | 47 | * Provide a function to raise an IPI cross call on CPUs in callmap. |
48 | */ | 48 | */ |
49 | extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int)); | 49 | extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int)); |
50 | 50 | ||
51 | /* | 51 | /* |
52 | * Called from the secondary holding pen, this is the secondary CPU entry point. | 52 | * Called from the secondary holding pen, this is the secondary CPU entry point. |
53 | */ | 53 | */ |
54 | asmlinkage void secondary_start_kernel(void); | 54 | asmlinkage void secondary_start_kernel(void); |
55 | 55 | ||
56 | /* | 56 | /* |
57 | * Initial data for bringing up a secondary CPU. | 57 | * Initial data for bringing up a secondary CPU. |
58 | */ | 58 | */ |
59 | struct secondary_data { | 59 | struct secondary_data { |
60 | void *stack; | 60 | void *stack; |
61 | }; | 61 | }; |
62 | extern struct secondary_data secondary_data; | 62 | extern struct secondary_data secondary_data; |
63 | extern void secondary_holding_pen(void); | 63 | extern void secondary_holding_pen(void); |
64 | extern volatile unsigned long secondary_holding_pen_release; | 64 | extern volatile unsigned long secondary_holding_pen_release; |
65 | 65 | ||
66 | extern void arch_send_call_function_single_ipi(int cpu); | 66 | extern void arch_send_call_function_single_ipi(int cpu); |
67 | extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); | 67 | extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); |
68 | 68 | ||
69 | struct device_node; | ||
70 | |||
71 | struct smp_enable_ops { | ||
72 | const char *name; | ||
73 | int (*init_cpu)(struct device_node *, int); | ||
74 | int (*prepare_cpu)(int); | ||
75 | }; | ||
76 | |||
77 | extern const struct smp_enable_ops smp_spin_table_ops; | ||
78 | extern const struct smp_enable_ops smp_psci_ops; | ||
79 | |||
69 | #endif /* ifndef __ASM_SMP_H */ | 80 | #endif /* ifndef __ASM_SMP_H */ |
70 | 81 |
arch/arm64/include/asm/spinlock.h
1 | /* | 1 | /* |
2 | * Copyright (C) 2012 ARM Ltd. | 2 | * Copyright (C) 2012 ARM Ltd. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License version 2 as | 5 | * it under the terms of the GNU General Public License version 2 as |
6 | * published by the Free Software Foundation. | 6 | * published by the Free Software Foundation. |
7 | * | 7 | * |
8 | * This program is distributed in the hope that it will be useful, | 8 | * This program is distributed in the hope that it will be useful, |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
11 | * GNU General Public License for more details. | 11 | * GNU General Public License for more details. |
12 | * | 12 | * |
13 | * You should have received a copy of the GNU General Public License | 13 | * You should have received a copy of the GNU General Public License |
14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
15 | */ | 15 | */ |
16 | #ifndef __ASM_SPINLOCK_H | 16 | #ifndef __ASM_SPINLOCK_H |
17 | #define __ASM_SPINLOCK_H | 17 | #define __ASM_SPINLOCK_H |
18 | 18 | ||
19 | #include <asm/spinlock_types.h> | 19 | #include <asm/spinlock_types.h> |
20 | #include <asm/processor.h> | 20 | #include <asm/processor.h> |
21 | 21 | ||
22 | /* | 22 | /* |
23 | * Spinlock implementation. | 23 | * Spinlock implementation. |
24 | * | 24 | * |
25 | * The old value is read exclusively and the new one, if unlocked, is written | 25 | * The old value is read exclusively and the new one, if unlocked, is written |
26 | * exclusively. In case of failure, the loop is restarted. | 26 | * exclusively. In case of failure, the loop is restarted. |
27 | * | 27 | * |
28 | * The memory barriers are implicit with the load-acquire and store-release | 28 | * The memory barriers are implicit with the load-acquire and store-release |
29 | * instructions. | 29 | * instructions. |
30 | * | 30 | * |
31 | * Unlocked value: 0 | 31 | * Unlocked value: 0 |
32 | * Locked value: 1 | 32 | * Locked value: 1 |
33 | */ | 33 | */ |
34 | 34 | ||
35 | #define arch_spin_is_locked(x) ((x)->lock != 0) | 35 | #define arch_spin_is_locked(x) ((x)->lock != 0) |
36 | #define arch_spin_unlock_wait(lock) \ | 36 | #define arch_spin_unlock_wait(lock) \ |
37 | do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) | 37 | do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) |
38 | 38 | ||
39 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) | 39 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
40 | 40 | ||
41 | static inline void arch_spin_lock(arch_spinlock_t *lock) | 41 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
42 | { | 42 | { |
43 | unsigned int tmp; | 43 | unsigned int tmp; |
44 | 44 | ||
45 | asm volatile( | 45 | asm volatile( |
46 | " sevl\n" | 46 | " sevl\n" |
47 | "1: wfe\n" | 47 | "1: wfe\n" |
48 | "2: ldaxr %w0, [%1]\n" | 48 | "2: ldaxr %w0, %1\n" |
49 | " cbnz %w0, 1b\n" | 49 | " cbnz %w0, 1b\n" |
50 | " stxr %w0, %w2, [%1]\n" | 50 | " stxr %w0, %w2, %1\n" |
51 | " cbnz %w0, 2b\n" | 51 | " cbnz %w0, 2b\n" |
52 | : "=&r" (tmp) | 52 | : "=&r" (tmp), "+Q" (lock->lock) |
53 | : "r" (&lock->lock), "r" (1) | 53 | : "r" (1) |
54 | : "memory"); | 54 | : "cc", "memory"); |
55 | } | 55 | } |
56 | 56 | ||
57 | static inline int arch_spin_trylock(arch_spinlock_t *lock) | 57 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
58 | { | 58 | { |
59 | unsigned int tmp; | 59 | unsigned int tmp; |
60 | 60 | ||
61 | asm volatile( | 61 | asm volatile( |
62 | " ldaxr %w0, [%1]\n" | 62 | " ldaxr %w0, %1\n" |
63 | " cbnz %w0, 1f\n" | 63 | " cbnz %w0, 1f\n" |
64 | " stxr %w0, %w2, [%1]\n" | 64 | " stxr %w0, %w2, %1\n" |
65 | "1:\n" | 65 | "1:\n" |
66 | : "=&r" (tmp) | 66 | : "=&r" (tmp), "+Q" (lock->lock) |
67 | : "r" (&lock->lock), "r" (1) | 67 | : "r" (1) |
68 | : "memory"); | 68 | : "cc", "memory"); |
69 | 69 | ||
70 | return !tmp; | 70 | return !tmp; |
71 | } | 71 | } |
72 | 72 | ||
73 | static inline void arch_spin_unlock(arch_spinlock_t *lock) | 73 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
74 | { | 74 | { |
75 | asm volatile( | 75 | asm volatile( |
76 | " stlr %w1, [%0]\n" | 76 | " stlr %w1, %0\n" |
77 | : : "r" (&lock->lock), "r" (0) : "memory"); | 77 | : "=Q" (lock->lock) : "r" (0) : "memory"); |
78 | } | 78 | } |
79 | 79 | ||
80 | /* | 80 | /* |
81 | * Write lock implementation. | 81 | * Write lock implementation. |
82 | * | 82 | * |
83 | * Write locks set bit 31. Unlocking, is done by writing 0 since the lock is | 83 | * Write locks set bit 31. Unlocking, is done by writing 0 since the lock is |
84 | * exclusively held. | 84 | * exclusively held. |
85 | * | 85 | * |
86 | * The memory barriers are implicit with the load-acquire and store-release | 86 | * The memory barriers are implicit with the load-acquire and store-release |
87 | * instructions. | 87 | * instructions. |
88 | */ | 88 | */ |
89 | 89 | ||
90 | static inline void arch_write_lock(arch_rwlock_t *rw) | 90 | static inline void arch_write_lock(arch_rwlock_t *rw) |
91 | { | 91 | { |
92 | unsigned int tmp; | 92 | unsigned int tmp; |
93 | 93 | ||
94 | asm volatile( | 94 | asm volatile( |
95 | " sevl\n" | 95 | " sevl\n" |
96 | "1: wfe\n" | 96 | "1: wfe\n" |
97 | "2: ldaxr %w0, [%1]\n" | 97 | "2: ldaxr %w0, %1\n" |
98 | " cbnz %w0, 1b\n" | 98 | " cbnz %w0, 1b\n" |
99 | " stxr %w0, %w2, [%1]\n" | 99 | " stxr %w0, %w2, %1\n" |
100 | " cbnz %w0, 2b\n" | 100 | " cbnz %w0, 2b\n" |
101 | : "=&r" (tmp) | 101 | : "=&r" (tmp), "+Q" (rw->lock) |
102 | : "r" (&rw->lock), "r" (0x80000000) | 102 | : "r" (0x80000000) |
103 | : "memory"); | 103 | : "cc", "memory"); |
104 | } | 104 | } |
105 | 105 | ||
106 | static inline int arch_write_trylock(arch_rwlock_t *rw) | 106 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
107 | { | 107 | { |
108 | unsigned int tmp; | 108 | unsigned int tmp; |
109 | 109 | ||
110 | asm volatile( | 110 | asm volatile( |
111 | " ldaxr %w0, [%1]\n" | 111 | " ldaxr %w0, %1\n" |
112 | " cbnz %w0, 1f\n" | 112 | " cbnz %w0, 1f\n" |
113 | " stxr %w0, %w2, [%1]\n" | 113 | " stxr %w0, %w2, %1\n" |
114 | "1:\n" | 114 | "1:\n" |
115 | : "=&r" (tmp) | 115 | : "=&r" (tmp), "+Q" (rw->lock) |
116 | : "r" (&rw->lock), "r" (0x80000000) | 116 | : "r" (0x80000000) |
117 | : "memory"); | 117 | : "cc", "memory"); |
118 | 118 | ||
119 | return !tmp; | 119 | return !tmp; |
120 | } | 120 | } |
121 | 121 | ||
122 | static inline void arch_write_unlock(arch_rwlock_t *rw) | 122 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
123 | { | 123 | { |
124 | asm volatile( | 124 | asm volatile( |
125 | " stlr %w1, [%0]\n" | 125 | " stlr %w1, %0\n" |
126 | : : "r" (&rw->lock), "r" (0) : "memory"); | 126 | : "=Q" (rw->lock) : "r" (0) : "memory"); |
127 | } | 127 | } |
128 | 128 | ||
129 | /* write_can_lock - would write_trylock() succeed? */ | 129 | /* write_can_lock - would write_trylock() succeed? */ |
130 | #define arch_write_can_lock(x) ((x)->lock == 0) | 130 | #define arch_write_can_lock(x) ((x)->lock == 0) |
131 | 131 | ||
132 | /* | 132 | /* |
133 | * Read lock implementation. | 133 | * Read lock implementation. |
134 | * | 134 | * |
135 | * It exclusively loads the lock value, increments it and stores the new value | 135 | * It exclusively loads the lock value, increments it and stores the new value |
136 | * back if positive and the CPU still exclusively owns the location. If the | 136 | * back if positive and the CPU still exclusively owns the location. If the |
137 | * value is negative, the lock is already held. | 137 | * value is negative, the lock is already held. |
138 | * | 138 | * |
139 | * During unlocking there may be multiple active read locks but no write lock. | 139 | * During unlocking there may be multiple active read locks but no write lock. |
140 | * | 140 | * |
141 | * The memory barriers are implicit with the load-acquire and store-release | 141 | * The memory barriers are implicit with the load-acquire and store-release |
142 | * instructions. | 142 | * instructions. |
143 | */ | 143 | */ |
144 | static inline void arch_read_lock(arch_rwlock_t *rw) | 144 | static inline void arch_read_lock(arch_rwlock_t *rw) |
145 | { | 145 | { |
146 | unsigned int tmp, tmp2; | 146 | unsigned int tmp, tmp2; |
147 | 147 | ||
148 | asm volatile( | 148 | asm volatile( |
149 | " sevl\n" | 149 | " sevl\n" |
150 | "1: wfe\n" | 150 | "1: wfe\n" |
151 | "2: ldaxr %w0, [%2]\n" | 151 | "2: ldaxr %w0, %2\n" |
152 | " add %w0, %w0, #1\n" | 152 | " add %w0, %w0, #1\n" |
153 | " tbnz %w0, #31, 1b\n" | 153 | " tbnz %w0, #31, 1b\n" |
154 | " stxr %w1, %w0, [%2]\n" | 154 | " stxr %w1, %w0, %2\n" |
155 | " cbnz %w1, 2b\n" | 155 | " cbnz %w1, 2b\n" |
156 | : "=&r" (tmp), "=&r" (tmp2) | 156 | : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) |
157 | : "r" (&rw->lock) | 157 | : |
158 | : "memory"); | 158 | : "cc", "memory"); |
159 | } | 159 | } |
160 | 160 | ||
161 | static inline void arch_read_unlock(arch_rwlock_t *rw) | 161 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
162 | { | 162 | { |
163 | unsigned int tmp, tmp2; | 163 | unsigned int tmp, tmp2; |
164 | 164 | ||
165 | asm volatile( | 165 | asm volatile( |
166 | "1: ldxr %w0, [%2]\n" | 166 | "1: ldxr %w0, %2\n" |
167 | " sub %w0, %w0, #1\n" | 167 | " sub %w0, %w0, #1\n" |
168 | " stlxr %w1, %w0, [%2]\n" | 168 | " stlxr %w1, %w0, %2\n" |
169 | " cbnz %w1, 1b\n" | 169 | " cbnz %w1, 1b\n" |
170 | : "=&r" (tmp), "=&r" (tmp2) | 170 | : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) |
171 | : "r" (&rw->lock) | 171 | : |
172 | : "memory"); | 172 | : "cc", "memory"); |
173 | } | 173 | } |
174 | 174 | ||
175 | static inline int arch_read_trylock(arch_rwlock_t *rw) | 175 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
176 | { | 176 | { |
177 | unsigned int tmp, tmp2 = 1; | 177 | unsigned int tmp, tmp2 = 1; |
178 | 178 | ||
179 | asm volatile( | 179 | asm volatile( |
180 | " ldaxr %w0, [%2]\n" | 180 | " ldaxr %w0, %2\n" |
181 | " add %w0, %w0, #1\n" | 181 | " add %w0, %w0, #1\n" |
182 | " tbnz %w0, #31, 1f\n" | 182 | " tbnz %w0, #31, 1f\n" |
183 | " stxr %w1, %w0, [%2]\n" | 183 | " stxr %w1, %w0, %2\n" |
184 | "1:\n" | 184 | "1:\n" |
185 | : "=&r" (tmp), "+r" (tmp2) | 185 | : "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock) |
186 | : "r" (&rw->lock) | 186 | : |
187 | : "memory"); | 187 | : "cc", "memory"); |
188 | 188 | ||
189 | return !tmp2; | 189 | return !tmp2; |
190 | } | 190 | } |
191 | 191 | ||
192 | /* read_can_lock - would read_trylock() succeed? */ | 192 | /* read_can_lock - would read_trylock() succeed? */ |
193 | #define arch_read_can_lock(x) ((x)->lock < 0x80000000) | 193 | #define arch_read_can_lock(x) ((x)->lock < 0x80000000) |
194 | 194 | ||
195 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | 195 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
196 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | 196 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) |
197 | 197 | ||
198 | #define arch_spin_relax(lock) cpu_relax() | 198 | #define arch_spin_relax(lock) cpu_relax() |
199 | #define arch_read_relax(lock) cpu_relax() | 199 | #define arch_read_relax(lock) cpu_relax() |
200 | #define arch_write_relax(lock) cpu_relax() | 200 | #define arch_write_relax(lock) cpu_relax() |
201 | 201 | ||
202 | #endif /* __ASM_SPINLOCK_H */ | 202 | #endif /* __ASM_SPINLOCK_H */ |
203 | 203 |
arch/arm64/include/uapi/asm/Kbuild
1 | # UAPI Header export list | 1 | # UAPI Header export list |
2 | include include/uapi/asm-generic/Kbuild.asm | 2 | include include/uapi/asm-generic/Kbuild.asm |
3 | 3 | ||
4 | generic-y += kvm_para.h | ||
5 | |||
4 | header-y += auxvec.h | 6 | header-y += auxvec.h |
5 | header-y += bitsperlong.h | 7 | header-y += bitsperlong.h |
6 | header-y += byteorder.h | 8 | header-y += byteorder.h |
7 | header-y += fcntl.h | 9 | header-y += fcntl.h |
8 | header-y += hwcap.h | 10 | header-y += hwcap.h |
11 | header-y += kvm_para.h | ||
9 | header-y += param.h | 12 | header-y += param.h |
10 | header-y += ptrace.h | 13 | header-y += ptrace.h |
11 | header-y += setup.h | 14 | header-y += setup.h |
12 | header-y += sigcontext.h | 15 | header-y += sigcontext.h |
13 | header-y += siginfo.h | 16 | header-y += siginfo.h |
14 | header-y += signal.h | 17 | header-y += signal.h |
15 | header-y += stat.h | 18 | header-y += stat.h |
16 | header-y += statfs.h | 19 | header-y += statfs.h |
17 | header-y += unistd.h | 20 | header-y += unistd.h |
18 | 21 |
arch/arm64/kernel/Makefile
1 | # | 1 | # |
2 | # Makefile for the linux kernel. | 2 | # Makefile for the linux kernel. |
3 | # | 3 | # |
4 | 4 | ||
5 | CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET) | 5 | CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET) |
6 | AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET) | 6 | AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET) |
7 | 7 | ||
8 | # Object file lists. | 8 | # Object file lists. |
9 | arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \ | 9 | arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \ |
10 | entry-fpsimd.o process.o ptrace.o setup.o signal.o \ | 10 | entry-fpsimd.o process.o ptrace.o setup.o signal.o \ |
11 | sys.o stacktrace.o time.o traps.o io.o vdso.o \ | 11 | sys.o stacktrace.o time.o traps.o io.o vdso.o \ |
12 | hyp-stub.o | 12 | hyp-stub.o psci.o |
13 | 13 | ||
14 | arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ | 14 | arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ |
15 | sys_compat.o | 15 | sys_compat.o |
16 | arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o | 16 | arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o |
17 | arm64-obj-$(CONFIG_SMP) += smp.o | 17 | arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o smp_psci.o |
18 | arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o | 18 | arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o |
19 | arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT)+= hw_breakpoint.o | 19 | arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT)+= hw_breakpoint.o |
20 | arm64-obj-$(CONFIG_EARLY_PRINTK) += early_printk.o | ||
20 | 21 | ||
21 | obj-y += $(arm64-obj-y) vdso/ | 22 | obj-y += $(arm64-obj-y) vdso/ |
22 | obj-m += $(arm64-obj-m) | 23 | obj-m += $(arm64-obj-m) |
23 | head-y := head.o | 24 | head-y := head.o |
24 | extra-y := $(head-y) vmlinux.lds | 25 | extra-y := $(head-y) vmlinux.lds |
25 | 26 | ||
26 | # vDSO - this must be built first to generate the symbol offsets | 27 | # vDSO - this must be built first to generate the symbol offsets |
27 | $(call objectify,$(arm64-obj-y)): $(obj)/vdso/vdso-offsets.h | 28 | $(call objectify,$(arm64-obj-y)): $(obj)/vdso/vdso-offsets.h |
28 | $(obj)/vdso/vdso-offsets.h: $(obj)/vdso | 29 | $(obj)/vdso/vdso-offsets.h: $(obj)/vdso |
29 | 30 |
arch/arm64/kernel/early_printk.c
File was created | 1 | /* | |
2 | * Earlyprintk support. | ||
3 | * | ||
4 | * Copyright (C) 2012 ARM Ltd. | ||
5 | * Author: Catalin Marinas <catalin.marinas@arm.com> | ||
6 | * | ||
7 | * This program is free software: you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/console.h> | ||
21 | #include <linux/init.h> | ||
22 | #include <linux/string.h> | ||
23 | #include <linux/mm.h> | ||
24 | #include <linux/io.h> | ||
25 | |||
26 | #include <linux/amba/serial.h> | ||
27 | |||
28 | static void __iomem *early_base; | ||
29 | static void (*printch)(char ch); | ||
30 | |||
31 | /* | ||
32 | * PL011 single character TX. | ||
33 | */ | ||
34 | static void pl011_printch(char ch) | ||
35 | { | ||
36 | while (readl_relaxed(early_base + UART01x_FR) & UART01x_FR_TXFF) | ||
37 | ; | ||
38 | writeb_relaxed(ch, early_base + UART01x_DR); | ||
39 | while (readl_relaxed(early_base + UART01x_FR) & UART01x_FR_BUSY) | ||
40 | ; | ||
41 | } | ||
42 | |||
43 | struct earlycon_match { | ||
44 | const char *name; | ||
45 | void (*printch)(char ch); | ||
46 | }; | ||
47 | |||
48 | static const struct earlycon_match earlycon_match[] __initconst = { | ||
49 | { .name = "pl011", .printch = pl011_printch, }, | ||
50 | {} | ||
51 | }; | ||
52 | |||
53 | static void early_write(struct console *con, const char *s, unsigned n) | ||
54 | { | ||
55 | while (n-- > 0) { | ||
56 | if (*s == '\n') | ||
57 | printch('\r'); | ||
58 | printch(*s); | ||
59 | s++; | ||
60 | } | ||
61 | } | ||
62 | |||
63 | static struct console early_console = { | ||
64 | .name = "earlycon", | ||
65 | .write = early_write, | ||
66 | .flags = CON_PRINTBUFFER | CON_BOOT, | ||
67 | .index = -1, | ||
68 | }; | ||
69 | |||
70 | /* | ||
71 | * Parse earlyprintk=... parameter in the format: | ||
72 | * | ||
73 | * <name>[,<addr>][,<options>] | ||
74 | * | ||
75 | * and register the early console. It is assumed that the UART has been | ||
76 | * initialised by the bootloader already. | ||
77 | */ | ||
78 | static int __init setup_early_printk(char *buf) | ||
79 | { | ||
80 | const struct earlycon_match *match = earlycon_match; | ||
81 | phys_addr_t paddr = 0; | ||
82 | |||
83 | if (!buf) { | ||
84 | pr_warning("No earlyprintk arguments passed.\n"); | ||
85 | return 0; | ||
86 | } | ||
87 | |||
88 | while (match->name) { | ||
89 | size_t len = strlen(match->name); | ||
90 | if (!strncmp(buf, match->name, len)) { | ||
91 | buf += len; | ||
92 | break; | ||
93 | } | ||
94 | match++; | ||
95 | } | ||
96 | if (!match->name) { | ||
97 | pr_warning("Unknown earlyprintk arguments: %s\n", buf); | ||
98 | return 0; | ||
99 | } | ||
100 | |||
101 | /* I/O address */ | ||
102 | if (!strncmp(buf, ",0x", 3)) { | ||
103 | char *e; | ||
104 | paddr = simple_strtoul(buf + 1, &e, 16); | ||
105 | buf = e; | ||
106 | } | ||
107 | /* no options parsing yet */ | ||
108 | |||
109 | if (paddr) | ||
110 | early_base = early_io_map(paddr, EARLYCON_IOBASE); | ||
111 | |||
112 | printch = match->printch; | ||
113 | register_console(&early_console); | ||
114 | |||
115 | return 0; | ||
116 | } | ||
117 | |||
118 | early_param("earlyprintk", setup_early_printk); | ||
119 |
arch/arm64/kernel/head.S
1 | /* | 1 | /* |
2 | * Low-level CPU initialisation | 2 | * Low-level CPU initialisation |
3 | * Based on arch/arm/kernel/head.S | 3 | * Based on arch/arm/kernel/head.S |
4 | * | 4 | * |
5 | * Copyright (C) 1994-2002 Russell King | 5 | * Copyright (C) 1994-2002 Russell King |
6 | * Copyright (C) 2003-2012 ARM Ltd. | 6 | * Copyright (C) 2003-2012 ARM Ltd. |
7 | * Authors: Catalin Marinas <catalin.marinas@arm.com> | 7 | * Authors: Catalin Marinas <catalin.marinas@arm.com> |
8 | * Will Deacon <will.deacon@arm.com> | 8 | * Will Deacon <will.deacon@arm.com> |
9 | * | 9 | * |
10 | * This program is free software; you can redistribute it and/or modify | 10 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License version 2 as | 11 | * it under the terms of the GNU General Public License version 2 as |
12 | * published by the Free Software Foundation. | 12 | * published by the Free Software Foundation. |
13 | * | 13 | * |
14 | * This program is distributed in the hope that it will be useful, | 14 | * This program is distributed in the hope that it will be useful, |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
17 | * GNU General Public License for more details. | 17 | * GNU General Public License for more details. |
18 | * | 18 | * |
19 | * You should have received a copy of the GNU General Public License | 19 | * You should have received a copy of the GNU General Public License |
20 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 20 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <linux/linkage.h> | 23 | #include <linux/linkage.h> |
24 | #include <linux/init.h> | 24 | #include <linux/init.h> |
25 | 25 | ||
26 | #include <asm/assembler.h> | 26 | #include <asm/assembler.h> |
27 | #include <asm/ptrace.h> | 27 | #include <asm/ptrace.h> |
28 | #include <asm/asm-offsets.h> | 28 | #include <asm/asm-offsets.h> |
29 | #include <asm/memory.h> | 29 | #include <asm/memory.h> |
30 | #include <asm/thread_info.h> | 30 | #include <asm/thread_info.h> |
31 | #include <asm/pgtable-hwdef.h> | 31 | #include <asm/pgtable-hwdef.h> |
32 | #include <asm/pgtable.h> | 32 | #include <asm/pgtable.h> |
33 | #include <asm/page.h> | 33 | #include <asm/page.h> |
34 | #include <asm/virt.h> | 34 | #include <asm/virt.h> |
35 | 35 | ||
36 | /* | 36 | /* |
37 | * swapper_pg_dir is the virtual address of the initial page table. We place | 37 | * swapper_pg_dir is the virtual address of the initial page table. We place |
38 | * the page tables 3 * PAGE_SIZE below KERNEL_RAM_VADDR. The idmap_pg_dir has | 38 | * the page tables 3 * PAGE_SIZE below KERNEL_RAM_VADDR. The idmap_pg_dir has |
39 | * 2 pages and is placed below swapper_pg_dir. | 39 | * 2 pages and is placed below swapper_pg_dir. |
40 | */ | 40 | */ |
41 | #define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET) | 41 | #define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET) |
42 | 42 | ||
43 | #if (KERNEL_RAM_VADDR & 0xfffff) != 0x80000 | 43 | #if (KERNEL_RAM_VADDR & 0xfffff) != 0x80000 |
44 | #error KERNEL_RAM_VADDR must start at 0xXXX80000 | 44 | #error KERNEL_RAM_VADDR must start at 0xXXX80000 |
45 | #endif | 45 | #endif |
46 | 46 | ||
47 | #define SWAPPER_DIR_SIZE (3 * PAGE_SIZE) | 47 | #define SWAPPER_DIR_SIZE (3 * PAGE_SIZE) |
48 | #define IDMAP_DIR_SIZE (2 * PAGE_SIZE) | 48 | #define IDMAP_DIR_SIZE (2 * PAGE_SIZE) |
49 | 49 | ||
50 | .globl swapper_pg_dir | 50 | .globl swapper_pg_dir |
51 | .equ swapper_pg_dir, KERNEL_RAM_VADDR - SWAPPER_DIR_SIZE | 51 | .equ swapper_pg_dir, KERNEL_RAM_VADDR - SWAPPER_DIR_SIZE |
52 | 52 | ||
53 | .globl idmap_pg_dir | 53 | .globl idmap_pg_dir |
54 | .equ idmap_pg_dir, swapper_pg_dir - IDMAP_DIR_SIZE | 54 | .equ idmap_pg_dir, swapper_pg_dir - IDMAP_DIR_SIZE |
55 | 55 | ||
56 | .macro pgtbl, ttb0, ttb1, phys | 56 | .macro pgtbl, ttb0, ttb1, phys |
57 | add \ttb1, \phys, #TEXT_OFFSET - SWAPPER_DIR_SIZE | 57 | add \ttb1, \phys, #TEXT_OFFSET - SWAPPER_DIR_SIZE |
58 | sub \ttb0, \ttb1, #IDMAP_DIR_SIZE | 58 | sub \ttb0, \ttb1, #IDMAP_DIR_SIZE |
59 | .endm | 59 | .endm |
60 | 60 | ||
61 | #ifdef CONFIG_ARM64_64K_PAGES | 61 | #ifdef CONFIG_ARM64_64K_PAGES |
62 | #define BLOCK_SHIFT PAGE_SHIFT | 62 | #define BLOCK_SHIFT PAGE_SHIFT |
63 | #define BLOCK_SIZE PAGE_SIZE | 63 | #define BLOCK_SIZE PAGE_SIZE |
64 | #else | 64 | #else |
65 | #define BLOCK_SHIFT SECTION_SHIFT | 65 | #define BLOCK_SHIFT SECTION_SHIFT |
66 | #define BLOCK_SIZE SECTION_SIZE | 66 | #define BLOCK_SIZE SECTION_SIZE |
67 | #endif | 67 | #endif |
68 | 68 | ||
69 | #define KERNEL_START KERNEL_RAM_VADDR | 69 | #define KERNEL_START KERNEL_RAM_VADDR |
70 | #define KERNEL_END _end | 70 | #define KERNEL_END _end |
71 | 71 | ||
72 | /* | 72 | /* |
73 | * Initial memory map attributes. | 73 | * Initial memory map attributes. |
74 | */ | 74 | */ |
75 | #ifndef CONFIG_SMP | 75 | #ifndef CONFIG_SMP |
76 | #define PTE_FLAGS PTE_TYPE_PAGE | PTE_AF | 76 | #define PTE_FLAGS PTE_TYPE_PAGE | PTE_AF |
77 | #define PMD_FLAGS PMD_TYPE_SECT | PMD_SECT_AF | 77 | #define PMD_FLAGS PMD_TYPE_SECT | PMD_SECT_AF |
78 | #else | 78 | #else |
79 | #define PTE_FLAGS PTE_TYPE_PAGE | PTE_AF | PTE_SHARED | 79 | #define PTE_FLAGS PTE_TYPE_PAGE | PTE_AF | PTE_SHARED |
80 | #define PMD_FLAGS PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S | 80 | #define PMD_FLAGS PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S |
81 | #endif | 81 | #endif |
82 | 82 | ||
83 | #ifdef CONFIG_ARM64_64K_PAGES | 83 | #ifdef CONFIG_ARM64_64K_PAGES |
84 | #define MM_MMUFLAGS PTE_ATTRINDX(MT_NORMAL) | PTE_FLAGS | 84 | #define MM_MMUFLAGS PTE_ATTRINDX(MT_NORMAL) | PTE_FLAGS |
85 | #define IO_MMUFLAGS PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_XN | PTE_FLAGS | ||
86 | #else | 85 | #else |
87 | #define MM_MMUFLAGS PMD_ATTRINDX(MT_NORMAL) | PMD_FLAGS | 86 | #define MM_MMUFLAGS PMD_ATTRINDX(MT_NORMAL) | PMD_FLAGS |
88 | #define IO_MMUFLAGS PMD_ATTRINDX(MT_DEVICE_nGnRE) | PMD_SECT_XN | PMD_FLAGS | ||
89 | #endif | 87 | #endif |
90 | 88 | ||
91 | /* | 89 | /* |
92 | * Kernel startup entry point. | 90 | * Kernel startup entry point. |
93 | * --------------------------- | 91 | * --------------------------- |
94 | * | 92 | * |
95 | * The requirements are: | 93 | * The requirements are: |
96 | * MMU = off, D-cache = off, I-cache = on or off, | 94 | * MMU = off, D-cache = off, I-cache = on or off, |
97 | * x0 = physical address to the FDT blob. | 95 | * x0 = physical address to the FDT blob. |
98 | * | 96 | * |
99 | * This code is mostly position independent so you call this at | 97 | * This code is mostly position independent so you call this at |
100 | * __pa(PAGE_OFFSET + TEXT_OFFSET). | 98 | * __pa(PAGE_OFFSET + TEXT_OFFSET). |
101 | * | 99 | * |
102 | * Note that the callee-saved registers are used for storing variables | 100 | * Note that the callee-saved registers are used for storing variables |
103 | * that are useful before the MMU is enabled. The allocations are described | 101 | * that are useful before the MMU is enabled. The allocations are described |
104 | * in the entry routines. | 102 | * in the entry routines. |
105 | */ | 103 | */ |
106 | __HEAD | 104 | __HEAD |
107 | 105 | ||
108 | /* | 106 | /* |
109 | * DO NOT MODIFY. Image header expected by Linux boot-loaders. | 107 | * DO NOT MODIFY. Image header expected by Linux boot-loaders. |
110 | */ | 108 | */ |
111 | b stext // branch to kernel start, magic | 109 | b stext // branch to kernel start, magic |
112 | .long 0 // reserved | 110 | .long 0 // reserved |
113 | .quad TEXT_OFFSET // Image load offset from start of RAM | 111 | .quad TEXT_OFFSET // Image load offset from start of RAM |
114 | .quad 0 // reserved | 112 | .quad 0 // reserved |
115 | .quad 0 // reserved | 113 | .quad 0 // reserved |
116 | 114 | ||
117 | ENTRY(stext) | 115 | ENTRY(stext) |
118 | mov x21, x0 // x21=FDT | 116 | mov x21, x0 // x21=FDT |
119 | bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET | 117 | bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET |
120 | bl el2_setup // Drop to EL1 | 118 | bl el2_setup // Drop to EL1 |
121 | mrs x22, midr_el1 // x22=cpuid | 119 | mrs x22, midr_el1 // x22=cpuid |
122 | mov x0, x22 | 120 | mov x0, x22 |
123 | bl lookup_processor_type | 121 | bl lookup_processor_type |
124 | mov x23, x0 // x23=current cpu_table | 122 | mov x23, x0 // x23=current cpu_table |
125 | cbz x23, __error_p // invalid processor (x23=0)? | 123 | cbz x23, __error_p // invalid processor (x23=0)? |
126 | bl __vet_fdt | 124 | bl __vet_fdt |
127 | bl __create_page_tables // x25=TTBR0, x26=TTBR1 | 125 | bl __create_page_tables // x25=TTBR0, x26=TTBR1 |
128 | /* | 126 | /* |
129 | * The following calls CPU specific code in a position independent | 127 | * The following calls CPU specific code in a position independent |
130 | * manner. See arch/arm64/mm/proc.S for details. x23 = base of | 128 | * manner. See arch/arm64/mm/proc.S for details. x23 = base of |
131 | * cpu_info structure selected by lookup_processor_type above. | 129 | * cpu_info structure selected by lookup_processor_type above. |
132 | * On return, the CPU will be ready for the MMU to be turned on and | 130 | * On return, the CPU will be ready for the MMU to be turned on and |
133 | * the TCR will have been set. | 131 | * the TCR will have been set. |
134 | */ | 132 | */ |
135 | ldr x27, __switch_data // address to jump to after | 133 | ldr x27, __switch_data // address to jump to after |
136 | // MMU has been enabled | 134 | // MMU has been enabled |
137 | adr lr, __enable_mmu // return (PIC) address | 135 | adr lr, __enable_mmu // return (PIC) address |
138 | ldr x12, [x23, #CPU_INFO_SETUP] | 136 | ldr x12, [x23, #CPU_INFO_SETUP] |
139 | add x12, x12, x28 // __virt_to_phys | 137 | add x12, x12, x28 // __virt_to_phys |
140 | br x12 // initialise processor | 138 | br x12 // initialise processor |
141 | ENDPROC(stext) | 139 | ENDPROC(stext) |
142 | 140 | ||
143 | /* | 141 | /* |
144 | * If we're fortunate enough to boot at EL2, ensure that the world is | 142 | * If we're fortunate enough to boot at EL2, ensure that the world is |
145 | * sane before dropping to EL1. | 143 | * sane before dropping to EL1. |
146 | */ | 144 | */ |
147 | ENTRY(el2_setup) | 145 | ENTRY(el2_setup) |
148 | mrs x0, CurrentEL | 146 | mrs x0, CurrentEL |
149 | cmp x0, #PSR_MODE_EL2t | 147 | cmp x0, #PSR_MODE_EL2t |
150 | ccmp x0, #PSR_MODE_EL2h, #0x4, ne | 148 | ccmp x0, #PSR_MODE_EL2h, #0x4, ne |
151 | ldr x0, =__boot_cpu_mode // Compute __boot_cpu_mode | 149 | ldr x0, =__boot_cpu_mode // Compute __boot_cpu_mode |
152 | add x0, x0, x28 | 150 | add x0, x0, x28 |
153 | b.eq 1f | 151 | b.eq 1f |
154 | str wzr, [x0] // Remember we don't have EL2... | 152 | str wzr, [x0] // Remember we don't have EL2... |
155 | ret | 153 | ret |
156 | 154 | ||
157 | /* Hyp configuration. */ | 155 | /* Hyp configuration. */ |
158 | 1: ldr w1, =BOOT_CPU_MODE_EL2 | 156 | 1: ldr w1, =BOOT_CPU_MODE_EL2 |
159 | str w1, [x0, #4] // This CPU has EL2 | 157 | str w1, [x0, #4] // This CPU has EL2 |
160 | mov x0, #(1 << 31) // 64-bit EL1 | 158 | mov x0, #(1 << 31) // 64-bit EL1 |
161 | msr hcr_el2, x0 | 159 | msr hcr_el2, x0 |
162 | 160 | ||
163 | /* Generic timers. */ | 161 | /* Generic timers. */ |
164 | mrs x0, cnthctl_el2 | 162 | mrs x0, cnthctl_el2 |
165 | orr x0, x0, #3 // Enable EL1 physical timers | 163 | orr x0, x0, #3 // Enable EL1 physical timers |
166 | msr cnthctl_el2, x0 | 164 | msr cnthctl_el2, x0 |
167 | msr cntvoff_el2, xzr // Clear virtual offset | 165 | msr cntvoff_el2, xzr // Clear virtual offset |
168 | 166 | ||
169 | /* Populate ID registers. */ | 167 | /* Populate ID registers. */ |
170 | mrs x0, midr_el1 | 168 | mrs x0, midr_el1 |
171 | mrs x1, mpidr_el1 | 169 | mrs x1, mpidr_el1 |
172 | msr vpidr_el2, x0 | 170 | msr vpidr_el2, x0 |
173 | msr vmpidr_el2, x1 | 171 | msr vmpidr_el2, x1 |
174 | 172 | ||
175 | /* sctlr_el1 */ | 173 | /* sctlr_el1 */ |
176 | mov x0, #0x0800 // Set/clear RES{1,0} bits | 174 | mov x0, #0x0800 // Set/clear RES{1,0} bits |
177 | movk x0, #0x30d0, lsl #16 | 175 | movk x0, #0x30d0, lsl #16 |
178 | msr sctlr_el1, x0 | 176 | msr sctlr_el1, x0 |
179 | 177 | ||
180 | /* Coprocessor traps. */ | 178 | /* Coprocessor traps. */ |
181 | mov x0, #0x33ff | 179 | mov x0, #0x33ff |
182 | msr cptr_el2, x0 // Disable copro. traps to EL2 | 180 | msr cptr_el2, x0 // Disable copro. traps to EL2 |
183 | 181 | ||
184 | #ifdef CONFIG_COMPAT | 182 | #ifdef CONFIG_COMPAT |
185 | msr hstr_el2, xzr // Disable CP15 traps to EL2 | 183 | msr hstr_el2, xzr // Disable CP15 traps to EL2 |
186 | #endif | 184 | #endif |
187 | 185 | ||
188 | /* Stage-2 translation */ | 186 | /* Stage-2 translation */ |
189 | msr vttbr_el2, xzr | 187 | msr vttbr_el2, xzr |
190 | 188 | ||
191 | /* Hypervisor stub */ | 189 | /* Hypervisor stub */ |
192 | adr x0, __hyp_stub_vectors | 190 | adr x0, __hyp_stub_vectors |
193 | msr vbar_el2, x0 | 191 | msr vbar_el2, x0 |
194 | 192 | ||
195 | /* spsr */ | 193 | /* spsr */ |
196 | mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ | 194 | mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ |
197 | PSR_MODE_EL1h) | 195 | PSR_MODE_EL1h) |
198 | msr spsr_el2, x0 | 196 | msr spsr_el2, x0 |
199 | msr elr_el2, lr | 197 | msr elr_el2, lr |
200 | eret | 198 | eret |
201 | ENDPROC(el2_setup) | 199 | ENDPROC(el2_setup) |
202 | 200 | ||
203 | /* | 201 | /* |
204 | * We need to find out the CPU boot mode long after boot, so we need to | 202 | * We need to find out the CPU boot mode long after boot, so we need to |
205 | * store it in a writable variable. | 203 | * store it in a writable variable. |
206 | * | 204 | * |
207 | * This is not in .bss, because we set it sufficiently early that the boot-time | 205 | * This is not in .bss, because we set it sufficiently early that the boot-time |
208 | * zeroing of .bss would clobber it. | 206 | * zeroing of .bss would clobber it. |
209 | */ | 207 | */ |
210 | .pushsection .data | 208 | .pushsection .data |
211 | ENTRY(__boot_cpu_mode) | 209 | ENTRY(__boot_cpu_mode) |
212 | .long BOOT_CPU_MODE_EL2 | 210 | .long BOOT_CPU_MODE_EL2 |
213 | .long 0 | 211 | .long 0 |
214 | .popsection | 212 | .popsection |
215 | 213 | ||
216 | .align 3 | 214 | .align 3 |
217 | 2: .quad . | 215 | 2: .quad . |
218 | .quad PAGE_OFFSET | 216 | .quad PAGE_OFFSET |
219 | 217 | ||
220 | #ifdef CONFIG_SMP | 218 | #ifdef CONFIG_SMP |
221 | .pushsection .smp.pen.text, "ax" | 219 | .pushsection .smp.pen.text, "ax" |
222 | .align 3 | 220 | .align 3 |
223 | 1: .quad . | 221 | 1: .quad . |
224 | .quad secondary_holding_pen_release | 222 | .quad secondary_holding_pen_release |
225 | 223 | ||
226 | /* | 224 | /* |
227 | * This provides a "holding pen" for platforms to hold all secondary | 225 | * This provides a "holding pen" for platforms to hold all secondary |
228 | * cores are held until we're ready for them to initialise. | 226 | * cores are held until we're ready for them to initialise. |
229 | */ | 227 | */ |
230 | ENTRY(secondary_holding_pen) | 228 | ENTRY(secondary_holding_pen) |
231 | bl __calc_phys_offset // x24=phys offset | 229 | bl __calc_phys_offset // x24=phys offset |
232 | bl el2_setup // Drop to EL1 | 230 | bl el2_setup // Drop to EL1 |
233 | mrs x0, mpidr_el1 | 231 | mrs x0, mpidr_el1 |
234 | and x0, x0, #15 // CPU number | 232 | and x0, x0, #15 // CPU number |
235 | adr x1, 1b | 233 | adr x1, 1b |
236 | ldp x2, x3, [x1] | 234 | ldp x2, x3, [x1] |
237 | sub x1, x1, x2 | 235 | sub x1, x1, x2 |
238 | add x3, x3, x1 | 236 | add x3, x3, x1 |
239 | pen: ldr x4, [x3] | 237 | pen: ldr x4, [x3] |
240 | cmp x4, x0 | 238 | cmp x4, x0 |
241 | b.eq secondary_startup | 239 | b.eq secondary_startup |
242 | wfe | 240 | wfe |
243 | b pen | 241 | b pen |
244 | ENDPROC(secondary_holding_pen) | 242 | ENDPROC(secondary_holding_pen) |
245 | .popsection | 243 | .popsection |
246 | 244 | ||
247 | ENTRY(secondary_startup) | 245 | ENTRY(secondary_startup) |
248 | /* | 246 | /* |
249 | * Common entry point for secondary CPUs. | 247 | * Common entry point for secondary CPUs. |
250 | */ | 248 | */ |
251 | mrs x22, midr_el1 // x22=cpuid | 249 | mrs x22, midr_el1 // x22=cpuid |
252 | mov x0, x22 | 250 | mov x0, x22 |
253 | bl lookup_processor_type | 251 | bl lookup_processor_type |
254 | mov x23, x0 // x23=current cpu_table | 252 | mov x23, x0 // x23=current cpu_table |
255 | cbz x23, __error_p // invalid processor (x23=0)? | 253 | cbz x23, __error_p // invalid processor (x23=0)? |
256 | 254 | ||
257 | pgtbl x25, x26, x24 // x25=TTBR0, x26=TTBR1 | 255 | pgtbl x25, x26, x24 // x25=TTBR0, x26=TTBR1 |
258 | ldr x12, [x23, #CPU_INFO_SETUP] | 256 | ldr x12, [x23, #CPU_INFO_SETUP] |
259 | add x12, x12, x28 // __virt_to_phys | 257 | add x12, x12, x28 // __virt_to_phys |
260 | blr x12 // initialise processor | 258 | blr x12 // initialise processor |
261 | 259 | ||
262 | ldr x21, =secondary_data | 260 | ldr x21, =secondary_data |
263 | ldr x27, =__secondary_switched // address to jump to after enabling the MMU | 261 | ldr x27, =__secondary_switched // address to jump to after enabling the MMU |
264 | b __enable_mmu | 262 | b __enable_mmu |
265 | ENDPROC(secondary_startup) | 263 | ENDPROC(secondary_startup) |
266 | 264 | ||
267 | ENTRY(__secondary_switched) | 265 | ENTRY(__secondary_switched) |
268 | ldr x0, [x21] // get secondary_data.stack | 266 | ldr x0, [x21] // get secondary_data.stack |
269 | mov sp, x0 | 267 | mov sp, x0 |
270 | mov x29, #0 | 268 | mov x29, #0 |
271 | b secondary_start_kernel | 269 | b secondary_start_kernel |
272 | ENDPROC(__secondary_switched) | 270 | ENDPROC(__secondary_switched) |
273 | #endif /* CONFIG_SMP */ | 271 | #endif /* CONFIG_SMP */ |
274 | 272 | ||
275 | /* | 273 | /* |
276 | * Setup common bits before finally enabling the MMU. Essentially this is just | 274 | * Setup common bits before finally enabling the MMU. Essentially this is just |
277 | * loading the page table pointer and vector base registers. | 275 | * loading the page table pointer and vector base registers. |
278 | * | 276 | * |
279 | * On entry to this code, x0 must contain the SCTLR_EL1 value for turning on | 277 | * On entry to this code, x0 must contain the SCTLR_EL1 value for turning on |
280 | * the MMU. | 278 | * the MMU. |
281 | */ | 279 | */ |
282 | __enable_mmu: | 280 | __enable_mmu: |
283 | ldr x5, =vectors | 281 | ldr x5, =vectors |
284 | msr vbar_el1, x5 | 282 | msr vbar_el1, x5 |
285 | msr ttbr0_el1, x25 // load TTBR0 | 283 | msr ttbr0_el1, x25 // load TTBR0 |
286 | msr ttbr1_el1, x26 // load TTBR1 | 284 | msr ttbr1_el1, x26 // load TTBR1 |
287 | isb | 285 | isb |
288 | b __turn_mmu_on | 286 | b __turn_mmu_on |
289 | ENDPROC(__enable_mmu) | 287 | ENDPROC(__enable_mmu) |
290 | 288 | ||
291 | /* | 289 | /* |
292 | * Enable the MMU. This completely changes the structure of the visible memory | 290 | * Enable the MMU. This completely changes the structure of the visible memory |
293 | * space. You will not be able to trace execution through this. | 291 | * space. You will not be able to trace execution through this. |
294 | * | 292 | * |
295 | * x0 = system control register | 293 | * x0 = system control register |
296 | * x27 = *virtual* address to jump to upon completion | 294 | * x27 = *virtual* address to jump to upon completion |
297 | * | 295 | * |
298 | * other registers depend on the function called upon completion | 296 | * other registers depend on the function called upon completion |
299 | */ | 297 | */ |
300 | .align 6 | 298 | .align 6 |
301 | __turn_mmu_on: | 299 | __turn_mmu_on: |
302 | msr sctlr_el1, x0 | 300 | msr sctlr_el1, x0 |
303 | isb | 301 | isb |
304 | br x27 | 302 | br x27 |
305 | ENDPROC(__turn_mmu_on) | 303 | ENDPROC(__turn_mmu_on) |
306 | 304 | ||
307 | /* | 305 | /* |
308 | * Calculate the start of physical memory. | 306 | * Calculate the start of physical memory. |
309 | */ | 307 | */ |
310 | __calc_phys_offset: | 308 | __calc_phys_offset: |
311 | adr x0, 1f | 309 | adr x0, 1f |
312 | ldp x1, x2, [x0] | 310 | ldp x1, x2, [x0] |
313 | sub x28, x0, x1 // x28 = PHYS_OFFSET - PAGE_OFFSET | 311 | sub x28, x0, x1 // x28 = PHYS_OFFSET - PAGE_OFFSET |
314 | add x24, x2, x28 // x24 = PHYS_OFFSET | 312 | add x24, x2, x28 // x24 = PHYS_OFFSET |
315 | ret | 313 | ret |
316 | ENDPROC(__calc_phys_offset) | 314 | ENDPROC(__calc_phys_offset) |
317 | 315 | ||
318 | .align 3 | 316 | .align 3 |
319 | 1: .quad . | 317 | 1: .quad . |
320 | .quad PAGE_OFFSET | 318 | .quad PAGE_OFFSET |
321 | 319 | ||
322 | /* | 320 | /* |
323 | * Macro to populate the PGD for the corresponding block entry in the next | 321 | * Macro to populate the PGD for the corresponding block entry in the next |
324 | * level (tbl) for the given virtual address. | 322 | * level (tbl) for the given virtual address. |
325 | * | 323 | * |
326 | * Preserves: pgd, tbl, virt | 324 | * Preserves: pgd, tbl, virt |
327 | * Corrupts: tmp1, tmp2 | 325 | * Corrupts: tmp1, tmp2 |
328 | */ | 326 | */ |
329 | .macro create_pgd_entry, pgd, tbl, virt, tmp1, tmp2 | 327 | .macro create_pgd_entry, pgd, tbl, virt, tmp1, tmp2 |
330 | lsr \tmp1, \virt, #PGDIR_SHIFT | 328 | lsr \tmp1, \virt, #PGDIR_SHIFT |
331 | and \tmp1, \tmp1, #PTRS_PER_PGD - 1 // PGD index | 329 | and \tmp1, \tmp1, #PTRS_PER_PGD - 1 // PGD index |
332 | orr \tmp2, \tbl, #3 // PGD entry table type | 330 | orr \tmp2, \tbl, #3 // PGD entry table type |
333 | str \tmp2, [\pgd, \tmp1, lsl #3] | 331 | str \tmp2, [\pgd, \tmp1, lsl #3] |
334 | .endm | 332 | .endm |
335 | 333 | ||
336 | /* | 334 | /* |
337 | * Macro to populate block entries in the page table for the start..end | 335 | * Macro to populate block entries in the page table for the start..end |
338 | * virtual range (inclusive). | 336 | * virtual range (inclusive). |
339 | * | 337 | * |
340 | * Preserves: tbl, flags | 338 | * Preserves: tbl, flags |
341 | * Corrupts: phys, start, end, pstate | 339 | * Corrupts: phys, start, end, pstate |
342 | */ | 340 | */ |
343 | .macro create_block_map, tbl, flags, phys, start, end, idmap=0 | 341 | .macro create_block_map, tbl, flags, phys, start, end, idmap=0 |
344 | lsr \phys, \phys, #BLOCK_SHIFT | 342 | lsr \phys, \phys, #BLOCK_SHIFT |
345 | .if \idmap | 343 | .if \idmap |
346 | and \start, \phys, #PTRS_PER_PTE - 1 // table index | 344 | and \start, \phys, #PTRS_PER_PTE - 1 // table index |
347 | .else | 345 | .else |
348 | lsr \start, \start, #BLOCK_SHIFT | 346 | lsr \start, \start, #BLOCK_SHIFT |
349 | and \start, \start, #PTRS_PER_PTE - 1 // table index | 347 | and \start, \start, #PTRS_PER_PTE - 1 // table index |
350 | .endif | 348 | .endif |
351 | orr \phys, \flags, \phys, lsl #BLOCK_SHIFT // table entry | 349 | orr \phys, \flags, \phys, lsl #BLOCK_SHIFT // table entry |
352 | .ifnc \start,\end | 350 | .ifnc \start,\end |
353 | lsr \end, \end, #BLOCK_SHIFT | 351 | lsr \end, \end, #BLOCK_SHIFT |
354 | and \end, \end, #PTRS_PER_PTE - 1 // table end index | 352 | and \end, \end, #PTRS_PER_PTE - 1 // table end index |
355 | .endif | 353 | .endif |
356 | 9999: str \phys, [\tbl, \start, lsl #3] // store the entry | 354 | 9999: str \phys, [\tbl, \start, lsl #3] // store the entry |
357 | .ifnc \start,\end | 355 | .ifnc \start,\end |
358 | add \start, \start, #1 // next entry | 356 | add \start, \start, #1 // next entry |
359 | add \phys, \phys, #BLOCK_SIZE // next block | 357 | add \phys, \phys, #BLOCK_SIZE // next block |
360 | cmp \start, \end | 358 | cmp \start, \end |
361 | b.ls 9999b | 359 | b.ls 9999b |
362 | .endif | 360 | .endif |
363 | .endm | 361 | .endm |
364 | 362 | ||
365 | /* | 363 | /* |
366 | * Setup the initial page tables. We only setup the barest amount which is | 364 | * Setup the initial page tables. We only setup the barest amount which is |
367 | * required to get the kernel running. The following sections are required: | 365 | * required to get the kernel running. The following sections are required: |
368 | * - identity mapping to enable the MMU (low address, TTBR0) | 366 | * - identity mapping to enable the MMU (low address, TTBR0) |
369 | * - first few MB of the kernel linear mapping to jump to once the MMU has | 367 | * - first few MB of the kernel linear mapping to jump to once the MMU has |
370 | * been enabled, including the FDT blob (TTBR1) | 368 | * been enabled, including the FDT blob (TTBR1) |
369 | * - UART mapping if CONFIG_EARLY_PRINTK is enabled (TTBR1) | ||
371 | */ | 370 | */ |
372 | __create_page_tables: | 371 | __create_page_tables: |
373 | pgtbl x25, x26, x24 // idmap_pg_dir and swapper_pg_dir addresses | 372 | pgtbl x25, x26, x24 // idmap_pg_dir and swapper_pg_dir addresses |
374 | 373 | ||
375 | /* | 374 | /* |
376 | * Clear the idmap and swapper page tables. | 375 | * Clear the idmap and swapper page tables. |
377 | */ | 376 | */ |
378 | mov x0, x25 | 377 | mov x0, x25 |
379 | add x6, x26, #SWAPPER_DIR_SIZE | 378 | add x6, x26, #SWAPPER_DIR_SIZE |
380 | 1: stp xzr, xzr, [x0], #16 | 379 | 1: stp xzr, xzr, [x0], #16 |
381 | stp xzr, xzr, [x0], #16 | 380 | stp xzr, xzr, [x0], #16 |
382 | stp xzr, xzr, [x0], #16 | 381 | stp xzr, xzr, [x0], #16 |
383 | stp xzr, xzr, [x0], #16 | 382 | stp xzr, xzr, [x0], #16 |
384 | cmp x0, x6 | 383 | cmp x0, x6 |
385 | b.lo 1b | 384 | b.lo 1b |
386 | 385 | ||
387 | ldr x7, =MM_MMUFLAGS | 386 | ldr x7, =MM_MMUFLAGS |
388 | 387 | ||
389 | /* | 388 | /* |
390 | * Create the identity mapping. | 389 | * Create the identity mapping. |
391 | */ | 390 | */ |
392 | add x0, x25, #PAGE_SIZE // section table address | 391 | add x0, x25, #PAGE_SIZE // section table address |
393 | adr x3, __turn_mmu_on // virtual/physical address | 392 | adr x3, __turn_mmu_on // virtual/physical address |
394 | create_pgd_entry x25, x0, x3, x5, x6 | 393 | create_pgd_entry x25, x0, x3, x5, x6 |
395 | create_block_map x0, x7, x3, x5, x5, idmap=1 | 394 | create_block_map x0, x7, x3, x5, x5, idmap=1 |
396 | 395 | ||
397 | /* | 396 | /* |
398 | * Map the kernel image (starting with PHYS_OFFSET). | 397 | * Map the kernel image (starting with PHYS_OFFSET). |
399 | */ | 398 | */ |
400 | add x0, x26, #PAGE_SIZE // section table address | 399 | add x0, x26, #PAGE_SIZE // section table address |
401 | mov x5, #PAGE_OFFSET | 400 | mov x5, #PAGE_OFFSET |
402 | create_pgd_entry x26, x0, x5, x3, x6 | 401 | create_pgd_entry x26, x0, x5, x3, x6 |
403 | ldr x6, =KERNEL_END - 1 | 402 | ldr x6, =KERNEL_END - 1 |
404 | mov x3, x24 // phys offset | 403 | mov x3, x24 // phys offset |
405 | create_block_map x0, x7, x3, x5, x6 | 404 | create_block_map x0, x7, x3, x5, x6 |
406 | 405 | ||
407 | /* | 406 | /* |
408 | * Map the FDT blob (maximum 2MB; must be within 512MB of | 407 | * Map the FDT blob (maximum 2MB; must be within 512MB of |
409 | * PHYS_OFFSET). | 408 | * PHYS_OFFSET). |
410 | */ | 409 | */ |
411 | mov x3, x21 // FDT phys address | 410 | mov x3, x21 // FDT phys address |
412 | and x3, x3, #~((1 << 21) - 1) // 2MB aligned | 411 | and x3, x3, #~((1 << 21) - 1) // 2MB aligned |
413 | mov x6, #PAGE_OFFSET | 412 | mov x6, #PAGE_OFFSET |
414 | sub x5, x3, x24 // subtract PHYS_OFFSET | 413 | sub x5, x3, x24 // subtract PHYS_OFFSET |
415 | tst x5, #~((1 << 29) - 1) // within 512MB? | 414 | tst x5, #~((1 << 29) - 1) // within 512MB? |
416 | csel x21, xzr, x21, ne // zero the FDT pointer | 415 | csel x21, xzr, x21, ne // zero the FDT pointer |
417 | b.ne 1f | 416 | b.ne 1f |
418 | add x5, x5, x6 // __va(FDT blob) | 417 | add x5, x5, x6 // __va(FDT blob) |
419 | add x6, x5, #1 << 21 // 2MB for the FDT blob | 418 | add x6, x5, #1 << 21 // 2MB for the FDT blob |
420 | sub x6, x6, #1 // inclusive range | 419 | sub x6, x6, #1 // inclusive range |
421 | create_block_map x0, x7, x3, x5, x6 | 420 | create_block_map x0, x7, x3, x5, x6 |
422 | 1: | 421 | 1: |
422 | #ifdef CONFIG_EARLY_PRINTK | ||
423 | /* | ||
424 | * Create the pgd entry for the UART mapping. The full mapping is done | ||
425 | * later based earlyprintk kernel parameter. | ||
426 | */ | ||
427 | ldr x5, =EARLYCON_IOBASE // UART virtual address | ||
428 | add x0, x26, #2 * PAGE_SIZE // section table address | ||
429 | create_pgd_entry x26, x0, x5, x6, x7 | ||
430 | #endif | ||
423 | ret | 431 | ret |
424 | ENDPROC(__create_page_tables) | 432 | ENDPROC(__create_page_tables) |
425 | .ltorg | 433 | .ltorg |
426 | 434 | ||
427 | .align 3 | 435 | .align 3 |
428 | .type __switch_data, %object | 436 | .type __switch_data, %object |
429 | __switch_data: | 437 | __switch_data: |
430 | .quad __mmap_switched | 438 | .quad __mmap_switched |
431 | .quad __data_loc // x4 | 439 | .quad __data_loc // x4 |
432 | .quad _data // x5 | 440 | .quad _data // x5 |
433 | .quad __bss_start // x6 | 441 | .quad __bss_start // x6 |
434 | .quad _end // x7 | 442 | .quad _end // x7 |
435 | .quad processor_id // x4 | 443 | .quad processor_id // x4 |
436 | .quad __fdt_pointer // x5 | 444 | .quad __fdt_pointer // x5 |
437 | .quad memstart_addr // x6 | 445 | .quad memstart_addr // x6 |
438 | .quad init_thread_union + THREAD_START_SP // sp | 446 | .quad init_thread_union + THREAD_START_SP // sp |
439 | 447 | ||
440 | /* | 448 | /* |
441 | * The following fragment of code is executed with the MMU on in MMU mode, and | 449 | * The following fragment of code is executed with the MMU on in MMU mode, and |
442 | * uses absolute addresses; this is not position independent. | 450 | * uses absolute addresses; this is not position independent. |
443 | */ | 451 | */ |
444 | __mmap_switched: | 452 | __mmap_switched: |
445 | adr x3, __switch_data + 8 | 453 | adr x3, __switch_data + 8 |
446 | 454 | ||
447 | ldp x4, x5, [x3], #16 | 455 | ldp x4, x5, [x3], #16 |
448 | ldp x6, x7, [x3], #16 | 456 | ldp x6, x7, [x3], #16 |
449 | cmp x4, x5 // Copy data segment if needed | 457 | cmp x4, x5 // Copy data segment if needed |
450 | 1: ccmp x5, x6, #4, ne | 458 | 1: ccmp x5, x6, #4, ne |
451 | b.eq 2f | 459 | b.eq 2f |
452 | ldr x16, [x4], #8 | 460 | ldr x16, [x4], #8 |
453 | str x16, [x5], #8 | 461 | str x16, [x5], #8 |
454 | b 1b | 462 | b 1b |
455 | 2: | 463 | 2: |
456 | 1: cmp x6, x7 | 464 | 1: cmp x6, x7 |
457 | b.hs 2f | 465 | b.hs 2f |
458 | str xzr, [x6], #8 // Clear BSS | 466 | str xzr, [x6], #8 // Clear BSS |
459 | b 1b | 467 | b 1b |
460 | 2: | 468 | 2: |
461 | ldp x4, x5, [x3], #16 | 469 | ldp x4, x5, [x3], #16 |
462 | ldr x6, [x3], #8 | 470 | ldr x6, [x3], #8 |
463 | ldr x16, [x3] | 471 | ldr x16, [x3] |
464 | mov sp, x16 | 472 | mov sp, x16 |
465 | str x22, [x4] // Save processor ID | 473 | str x22, [x4] // Save processor ID |
466 | str x21, [x5] // Save FDT pointer | 474 | str x21, [x5] // Save FDT pointer |
467 | str x24, [x6] // Save PHYS_OFFSET | 475 | str x24, [x6] // Save PHYS_OFFSET |
468 | mov x29, #0 | 476 | mov x29, #0 |
469 | b start_kernel | 477 | b start_kernel |
470 | ENDPROC(__mmap_switched) | 478 | ENDPROC(__mmap_switched) |
471 | 479 | ||
472 | /* | 480 | /* |
473 | * Exception handling. Something went wrong and we can't proceed. We ought to | 481 | * Exception handling. Something went wrong and we can't proceed. We ought to |
474 | * tell the user, but since we don't have any guarantee that we're even | 482 | * tell the user, but since we don't have any guarantee that we're even |
475 | * running on the right architecture, we do virtually nothing. | 483 | * running on the right architecture, we do virtually nothing. |
476 | */ | 484 | */ |
477 | __error_p: | 485 | __error_p: |
478 | ENDPROC(__error_p) | 486 | ENDPROC(__error_p) |
479 | 487 | ||
480 | __error: | 488 | __error: |
481 | 1: nop | 489 | 1: nop |
482 | b 1b | 490 | b 1b |
483 | ENDPROC(__error) | 491 | ENDPROC(__error) |
484 | 492 | ||
485 | /* | 493 | /* |
486 | * This function gets the processor ID in w0 and searches the cpu_table[] for | 494 | * This function gets the processor ID in w0 and searches the cpu_table[] for |
487 | * a match. It returns a pointer to the struct cpu_info it found. The | 495 | * a match. It returns a pointer to the struct cpu_info it found. The |
488 | * cpu_table[] must end with an empty (all zeros) structure. | 496 | * cpu_table[] must end with an empty (all zeros) structure. |
489 | * | 497 | * |
490 | * This routine can be called via C code and it needs to work with the MMU | 498 | * This routine can be called via C code and it needs to work with the MMU |
491 | * both disabled and enabled (the offset is calculated automatically). | 499 | * both disabled and enabled (the offset is calculated automatically). |
492 | */ | 500 | */ |
493 | ENTRY(lookup_processor_type) | 501 | ENTRY(lookup_processor_type) |
494 | adr x1, __lookup_processor_type_data | 502 | adr x1, __lookup_processor_type_data |
495 | ldp x2, x3, [x1] | 503 | ldp x2, x3, [x1] |
496 | sub x1, x1, x2 // get offset between VA and PA | 504 | sub x1, x1, x2 // get offset between VA and PA |
497 | add x3, x3, x1 // convert VA to PA | 505 | add x3, x3, x1 // convert VA to PA |
498 | 1: | 506 | 1: |
499 | ldp w5, w6, [x3] // load cpu_id_val and cpu_id_mask | 507 | ldp w5, w6, [x3] // load cpu_id_val and cpu_id_mask |
500 | cbz w5, 2f // end of list? | 508 | cbz w5, 2f // end of list? |
501 | and w6, w6, w0 | 509 | and w6, w6, w0 |
502 | cmp w5, w6 | 510 | cmp w5, w6 |
503 | b.eq 3f | 511 | b.eq 3f |
504 | add x3, x3, #CPU_INFO_SZ | 512 | add x3, x3, #CPU_INFO_SZ |
505 | b 1b | 513 | b 1b |
506 | 2: | 514 | 2: |
507 | mov x3, #0 // unknown processor | 515 | mov x3, #0 // unknown processor |
508 | 3: | 516 | 3: |
509 | mov x0, x3 | 517 | mov x0, x3 |
510 | ret | 518 | ret |
511 | ENDPROC(lookup_processor_type) | 519 | ENDPROC(lookup_processor_type) |
512 | 520 | ||
513 | .align 3 | 521 | .align 3 |
514 | .type __lookup_processor_type_data, %object | 522 | .type __lookup_processor_type_data, %object |
515 | __lookup_processor_type_data: | 523 | __lookup_processor_type_data: |
516 | .quad . | 524 | .quad . |
517 | .quad cpu_table | 525 | .quad cpu_table |
518 | .size __lookup_processor_type_data, . - __lookup_processor_type_data | 526 | .size __lookup_processor_type_data, . - __lookup_processor_type_data |
519 | 527 | ||
520 | /* | 528 | /* |
521 | * Determine validity of the x21 FDT pointer. | 529 | * Determine validity of the x21 FDT pointer. |
522 | * The dtb must be 8-byte aligned and live in the first 512M of memory. | 530 | * The dtb must be 8-byte aligned and live in the first 512M of memory. |
523 | */ | 531 | */ |
524 | __vet_fdt: | 532 | __vet_fdt: |
525 | tst x21, #0x7 | 533 | tst x21, #0x7 |
526 | b.ne 1f | 534 | b.ne 1f |
527 | cmp x21, x24 | 535 | cmp x21, x24 |
528 | b.lt 1f | 536 | b.lt 1f |
529 | mov x0, #(1 << 29) | 537 | mov x0, #(1 << 29) |
530 | add x0, x0, x24 | 538 | add x0, x0, x24 |
531 | cmp x21, x0 | 539 | cmp x21, x0 |
532 | b.ge 1f | 540 | b.ge 1f |
533 | ret | 541 | ret |
534 | 1: | 542 | 1: |
535 | mov x21, #0 | 543 | mov x21, #0 |
536 | ret | 544 | ret |
arch/arm64/kernel/perf_event.c
1 | /* | 1 | /* |
2 | * PMU support | 2 | * PMU support |
3 | * | 3 | * |
4 | * Copyright (C) 2012 ARM Limited | 4 | * Copyright (C) 2012 ARM Limited |
5 | * Author: Will Deacon <will.deacon@arm.com> | 5 | * Author: Will Deacon <will.deacon@arm.com> |
6 | * | 6 | * |
7 | * This code is based heavily on the ARMv7 perf event code. | 7 | * This code is based heavily on the ARMv7 perf event code. |
8 | * | 8 | * |
9 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License version 2 as | 10 | * it under the terms of the GNU General Public License version 2 as |
11 | * published by the Free Software Foundation. | 11 | * published by the Free Software Foundation. |
12 | * | 12 | * |
13 | * This program is distributed in the hope that it will be useful, | 13 | * This program is distributed in the hope that it will be useful, |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
16 | * GNU General Public License for more details. | 16 | * GNU General Public License for more details. |
17 | * | 17 | * |
18 | * You should have received a copy of the GNU General Public License | 18 | * You should have received a copy of the GNU General Public License |
19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
20 | */ | 20 | */ |
21 | #define pr_fmt(fmt) "hw perfevents: " fmt | 21 | #define pr_fmt(fmt) "hw perfevents: " fmt |
22 | 22 | ||
23 | #include <linux/bitmap.h> | 23 | #include <linux/bitmap.h> |
24 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
25 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
26 | #include <linux/export.h> | 26 | #include <linux/export.h> |
27 | #include <linux/perf_event.h> | 27 | #include <linux/perf_event.h> |
28 | #include <linux/platform_device.h> | 28 | #include <linux/platform_device.h> |
29 | #include <linux/spinlock.h> | 29 | #include <linux/spinlock.h> |
30 | #include <linux/uaccess.h> | 30 | #include <linux/uaccess.h> |
31 | 31 | ||
32 | #include <asm/cputype.h> | 32 | #include <asm/cputype.h> |
33 | #include <asm/irq.h> | 33 | #include <asm/irq.h> |
34 | #include <asm/irq_regs.h> | 34 | #include <asm/irq_regs.h> |
35 | #include <asm/pmu.h> | 35 | #include <asm/pmu.h> |
36 | #include <asm/stacktrace.h> | 36 | #include <asm/stacktrace.h> |
37 | 37 | ||
38 | /* | 38 | /* |
39 | * ARMv8 supports a maximum of 32 events. | 39 | * ARMv8 supports a maximum of 32 events. |
40 | * The cycle counter is included in this total. | 40 | * The cycle counter is included in this total. |
41 | */ | 41 | */ |
42 | #define ARMPMU_MAX_HWEVENTS 32 | 42 | #define ARMPMU_MAX_HWEVENTS 32 |
43 | 43 | ||
44 | static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events); | 44 | static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events); |
45 | static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask); | 45 | static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask); |
46 | static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events); | 46 | static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events); |
47 | 47 | ||
48 | #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) | 48 | #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) |
49 | 49 | ||
50 | /* Set at runtime when we know what CPU type we are. */ | 50 | /* Set at runtime when we know what CPU type we are. */ |
51 | static struct arm_pmu *cpu_pmu; | 51 | static struct arm_pmu *cpu_pmu; |
52 | 52 | ||
53 | int | 53 | int |
54 | armpmu_get_max_events(void) | 54 | armpmu_get_max_events(void) |
55 | { | 55 | { |
56 | int max_events = 0; | 56 | int max_events = 0; |
57 | 57 | ||
58 | if (cpu_pmu != NULL) | 58 | if (cpu_pmu != NULL) |
59 | max_events = cpu_pmu->num_events; | 59 | max_events = cpu_pmu->num_events; |
60 | 60 | ||
61 | return max_events; | 61 | return max_events; |
62 | } | 62 | } |
63 | EXPORT_SYMBOL_GPL(armpmu_get_max_events); | 63 | EXPORT_SYMBOL_GPL(armpmu_get_max_events); |
64 | 64 | ||
65 | int perf_num_counters(void) | 65 | int perf_num_counters(void) |
66 | { | 66 | { |
67 | return armpmu_get_max_events(); | 67 | return armpmu_get_max_events(); |
68 | } | 68 | } |
69 | EXPORT_SYMBOL_GPL(perf_num_counters); | 69 | EXPORT_SYMBOL_GPL(perf_num_counters); |
70 | 70 | ||
71 | #define HW_OP_UNSUPPORTED 0xFFFF | 71 | #define HW_OP_UNSUPPORTED 0xFFFF |
72 | 72 | ||
73 | #define C(_x) \ | 73 | #define C(_x) \ |
74 | PERF_COUNT_HW_CACHE_##_x | 74 | PERF_COUNT_HW_CACHE_##_x |
75 | 75 | ||
76 | #define CACHE_OP_UNSUPPORTED 0xFFFF | 76 | #define CACHE_OP_UNSUPPORTED 0xFFFF |
77 | 77 | ||
78 | static int | 78 | static int |
79 | armpmu_map_cache_event(const unsigned (*cache_map) | 79 | armpmu_map_cache_event(const unsigned (*cache_map) |
80 | [PERF_COUNT_HW_CACHE_MAX] | 80 | [PERF_COUNT_HW_CACHE_MAX] |
81 | [PERF_COUNT_HW_CACHE_OP_MAX] | 81 | [PERF_COUNT_HW_CACHE_OP_MAX] |
82 | [PERF_COUNT_HW_CACHE_RESULT_MAX], | 82 | [PERF_COUNT_HW_CACHE_RESULT_MAX], |
83 | u64 config) | 83 | u64 config) |
84 | { | 84 | { |
85 | unsigned int cache_type, cache_op, cache_result, ret; | 85 | unsigned int cache_type, cache_op, cache_result, ret; |
86 | 86 | ||
87 | cache_type = (config >> 0) & 0xff; | 87 | cache_type = (config >> 0) & 0xff; |
88 | if (cache_type >= PERF_COUNT_HW_CACHE_MAX) | 88 | if (cache_type >= PERF_COUNT_HW_CACHE_MAX) |
89 | return -EINVAL; | 89 | return -EINVAL; |
90 | 90 | ||
91 | cache_op = (config >> 8) & 0xff; | 91 | cache_op = (config >> 8) & 0xff; |
92 | if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) | 92 | if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) |
93 | return -EINVAL; | 93 | return -EINVAL; |
94 | 94 | ||
95 | cache_result = (config >> 16) & 0xff; | 95 | cache_result = (config >> 16) & 0xff; |
96 | if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) | 96 | if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) |
97 | return -EINVAL; | 97 | return -EINVAL; |
98 | 98 | ||
99 | ret = (int)(*cache_map)[cache_type][cache_op][cache_result]; | 99 | ret = (int)(*cache_map)[cache_type][cache_op][cache_result]; |
100 | 100 | ||
101 | if (ret == CACHE_OP_UNSUPPORTED) | 101 | if (ret == CACHE_OP_UNSUPPORTED) |
102 | return -ENOENT; | 102 | return -ENOENT; |
103 | 103 | ||
104 | return ret; | 104 | return ret; |
105 | } | 105 | } |
106 | 106 | ||
107 | static int | 107 | static int |
108 | armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) | 108 | armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) |
109 | { | 109 | { |
110 | int mapping = (*event_map)[config]; | 110 | int mapping = (*event_map)[config]; |
111 | return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; | 111 | return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; |
112 | } | 112 | } |
113 | 113 | ||
114 | static int | 114 | static int |
115 | armpmu_map_raw_event(u32 raw_event_mask, u64 config) | 115 | armpmu_map_raw_event(u32 raw_event_mask, u64 config) |
116 | { | 116 | { |
117 | return (int)(config & raw_event_mask); | 117 | return (int)(config & raw_event_mask); |
118 | } | 118 | } |
119 | 119 | ||
120 | static int map_cpu_event(struct perf_event *event, | 120 | static int map_cpu_event(struct perf_event *event, |
121 | const unsigned (*event_map)[PERF_COUNT_HW_MAX], | 121 | const unsigned (*event_map)[PERF_COUNT_HW_MAX], |
122 | const unsigned (*cache_map) | 122 | const unsigned (*cache_map) |
123 | [PERF_COUNT_HW_CACHE_MAX] | 123 | [PERF_COUNT_HW_CACHE_MAX] |
124 | [PERF_COUNT_HW_CACHE_OP_MAX] | 124 | [PERF_COUNT_HW_CACHE_OP_MAX] |
125 | [PERF_COUNT_HW_CACHE_RESULT_MAX], | 125 | [PERF_COUNT_HW_CACHE_RESULT_MAX], |
126 | u32 raw_event_mask) | 126 | u32 raw_event_mask) |
127 | { | 127 | { |
128 | u64 config = event->attr.config; | 128 | u64 config = event->attr.config; |
129 | 129 | ||
130 | switch (event->attr.type) { | 130 | switch (event->attr.type) { |
131 | case PERF_TYPE_HARDWARE: | 131 | case PERF_TYPE_HARDWARE: |
132 | return armpmu_map_event(event_map, config); | 132 | return armpmu_map_event(event_map, config); |
133 | case PERF_TYPE_HW_CACHE: | 133 | case PERF_TYPE_HW_CACHE: |
134 | return armpmu_map_cache_event(cache_map, config); | 134 | return armpmu_map_cache_event(cache_map, config); |
135 | case PERF_TYPE_RAW: | 135 | case PERF_TYPE_RAW: |
136 | return armpmu_map_raw_event(raw_event_mask, config); | 136 | return armpmu_map_raw_event(raw_event_mask, config); |
137 | } | 137 | } |
138 | 138 | ||
139 | return -ENOENT; | 139 | return -ENOENT; |
140 | } | 140 | } |
141 | 141 | ||
142 | int | 142 | int |
143 | armpmu_event_set_period(struct perf_event *event, | 143 | armpmu_event_set_period(struct perf_event *event, |
144 | struct hw_perf_event *hwc, | 144 | struct hw_perf_event *hwc, |
145 | int idx) | 145 | int idx) |
146 | { | 146 | { |
147 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | 147 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
148 | s64 left = local64_read(&hwc->period_left); | 148 | s64 left = local64_read(&hwc->period_left); |
149 | s64 period = hwc->sample_period; | 149 | s64 period = hwc->sample_period; |
150 | int ret = 0; | 150 | int ret = 0; |
151 | 151 | ||
152 | if (unlikely(left <= -period)) { | 152 | if (unlikely(left <= -period)) { |
153 | left = period; | 153 | left = period; |
154 | local64_set(&hwc->period_left, left); | 154 | local64_set(&hwc->period_left, left); |
155 | hwc->last_period = period; | 155 | hwc->last_period = period; |
156 | ret = 1; | 156 | ret = 1; |
157 | } | 157 | } |
158 | 158 | ||
159 | if (unlikely(left <= 0)) { | 159 | if (unlikely(left <= 0)) { |
160 | left += period; | 160 | left += period; |
161 | local64_set(&hwc->period_left, left); | 161 | local64_set(&hwc->period_left, left); |
162 | hwc->last_period = period; | 162 | hwc->last_period = period; |
163 | ret = 1; | 163 | ret = 1; |
164 | } | 164 | } |
165 | 165 | ||
166 | if (left > (s64)armpmu->max_period) | 166 | if (left > (s64)armpmu->max_period) |
167 | left = armpmu->max_period; | 167 | left = armpmu->max_period; |
168 | 168 | ||
169 | local64_set(&hwc->prev_count, (u64)-left); | 169 | local64_set(&hwc->prev_count, (u64)-left); |
170 | 170 | ||
171 | armpmu->write_counter(idx, (u64)(-left) & 0xffffffff); | 171 | armpmu->write_counter(idx, (u64)(-left) & 0xffffffff); |
172 | 172 | ||
173 | perf_event_update_userpage(event); | 173 | perf_event_update_userpage(event); |
174 | 174 | ||
175 | return ret; | 175 | return ret; |
176 | } | 176 | } |
177 | 177 | ||
178 | u64 | 178 | u64 |
179 | armpmu_event_update(struct perf_event *event, | 179 | armpmu_event_update(struct perf_event *event, |
180 | struct hw_perf_event *hwc, | 180 | struct hw_perf_event *hwc, |
181 | int idx) | 181 | int idx) |
182 | { | 182 | { |
183 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | 183 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
184 | u64 delta, prev_raw_count, new_raw_count; | 184 | u64 delta, prev_raw_count, new_raw_count; |
185 | 185 | ||
186 | again: | 186 | again: |
187 | prev_raw_count = local64_read(&hwc->prev_count); | 187 | prev_raw_count = local64_read(&hwc->prev_count); |
188 | new_raw_count = armpmu->read_counter(idx); | 188 | new_raw_count = armpmu->read_counter(idx); |
189 | 189 | ||
190 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, | 190 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, |
191 | new_raw_count) != prev_raw_count) | 191 | new_raw_count) != prev_raw_count) |
192 | goto again; | 192 | goto again; |
193 | 193 | ||
194 | delta = (new_raw_count - prev_raw_count) & armpmu->max_period; | 194 | delta = (new_raw_count - prev_raw_count) & armpmu->max_period; |
195 | 195 | ||
196 | local64_add(delta, &event->count); | 196 | local64_add(delta, &event->count); |
197 | local64_sub(delta, &hwc->period_left); | 197 | local64_sub(delta, &hwc->period_left); |
198 | 198 | ||
199 | return new_raw_count; | 199 | return new_raw_count; |
200 | } | 200 | } |
201 | 201 | ||
202 | static void | 202 | static void |
203 | armpmu_read(struct perf_event *event) | 203 | armpmu_read(struct perf_event *event) |
204 | { | 204 | { |
205 | struct hw_perf_event *hwc = &event->hw; | 205 | struct hw_perf_event *hwc = &event->hw; |
206 | 206 | ||
207 | /* Don't read disabled counters! */ | 207 | /* Don't read disabled counters! */ |
208 | if (hwc->idx < 0) | 208 | if (hwc->idx < 0) |
209 | return; | 209 | return; |
210 | 210 | ||
211 | armpmu_event_update(event, hwc, hwc->idx); | 211 | armpmu_event_update(event, hwc, hwc->idx); |
212 | } | 212 | } |
213 | 213 | ||
214 | static void | 214 | static void |
215 | armpmu_stop(struct perf_event *event, int flags) | 215 | armpmu_stop(struct perf_event *event, int flags) |
216 | { | 216 | { |
217 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | 217 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
218 | struct hw_perf_event *hwc = &event->hw; | 218 | struct hw_perf_event *hwc = &event->hw; |
219 | 219 | ||
220 | /* | 220 | /* |
221 | * ARM pmu always has to update the counter, so ignore | 221 | * ARM pmu always has to update the counter, so ignore |
222 | * PERF_EF_UPDATE, see comments in armpmu_start(). | 222 | * PERF_EF_UPDATE, see comments in armpmu_start(). |
223 | */ | 223 | */ |
224 | if (!(hwc->state & PERF_HES_STOPPED)) { | 224 | if (!(hwc->state & PERF_HES_STOPPED)) { |
225 | armpmu->disable(hwc, hwc->idx); | 225 | armpmu->disable(hwc, hwc->idx); |
226 | barrier(); /* why? */ | 226 | barrier(); /* why? */ |
227 | armpmu_event_update(event, hwc, hwc->idx); | 227 | armpmu_event_update(event, hwc, hwc->idx); |
228 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; | 228 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; |
229 | } | 229 | } |
230 | } | 230 | } |
231 | 231 | ||
232 | static void | 232 | static void |
233 | armpmu_start(struct perf_event *event, int flags) | 233 | armpmu_start(struct perf_event *event, int flags) |
234 | { | 234 | { |
235 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | 235 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
236 | struct hw_perf_event *hwc = &event->hw; | 236 | struct hw_perf_event *hwc = &event->hw; |
237 | 237 | ||
238 | /* | 238 | /* |
239 | * ARM pmu always has to reprogram the period, so ignore | 239 | * ARM pmu always has to reprogram the period, so ignore |
240 | * PERF_EF_RELOAD, see the comment below. | 240 | * PERF_EF_RELOAD, see the comment below. |
241 | */ | 241 | */ |
242 | if (flags & PERF_EF_RELOAD) | 242 | if (flags & PERF_EF_RELOAD) |
243 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); | 243 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); |
244 | 244 | ||
245 | hwc->state = 0; | 245 | hwc->state = 0; |
246 | /* | 246 | /* |
247 | * Set the period again. Some counters can't be stopped, so when we | 247 | * Set the period again. Some counters can't be stopped, so when we |
248 | * were stopped we simply disabled the IRQ source and the counter | 248 | * were stopped we simply disabled the IRQ source and the counter |
249 | * may have been left counting. If we don't do this step then we may | 249 | * may have been left counting. If we don't do this step then we may |
250 | * get an interrupt too soon or *way* too late if the overflow has | 250 | * get an interrupt too soon or *way* too late if the overflow has |
251 | * happened since disabling. | 251 | * happened since disabling. |
252 | */ | 252 | */ |
253 | armpmu_event_set_period(event, hwc, hwc->idx); | 253 | armpmu_event_set_period(event, hwc, hwc->idx); |
254 | armpmu->enable(hwc, hwc->idx); | 254 | armpmu->enable(hwc, hwc->idx); |
255 | } | 255 | } |
256 | 256 | ||
257 | static void | 257 | static void |
258 | armpmu_del(struct perf_event *event, int flags) | 258 | armpmu_del(struct perf_event *event, int flags) |
259 | { | 259 | { |
260 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | 260 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
261 | struct pmu_hw_events *hw_events = armpmu->get_hw_events(); | 261 | struct pmu_hw_events *hw_events = armpmu->get_hw_events(); |
262 | struct hw_perf_event *hwc = &event->hw; | 262 | struct hw_perf_event *hwc = &event->hw; |
263 | int idx = hwc->idx; | 263 | int idx = hwc->idx; |
264 | 264 | ||
265 | WARN_ON(idx < 0); | 265 | WARN_ON(idx < 0); |
266 | 266 | ||
267 | armpmu_stop(event, PERF_EF_UPDATE); | 267 | armpmu_stop(event, PERF_EF_UPDATE); |
268 | hw_events->events[idx] = NULL; | 268 | hw_events->events[idx] = NULL; |
269 | clear_bit(idx, hw_events->used_mask); | 269 | clear_bit(idx, hw_events->used_mask); |
270 | 270 | ||
271 | perf_event_update_userpage(event); | 271 | perf_event_update_userpage(event); |
272 | } | 272 | } |
273 | 273 | ||
274 | static int | 274 | static int |
275 | armpmu_add(struct perf_event *event, int flags) | 275 | armpmu_add(struct perf_event *event, int flags) |
276 | { | 276 | { |
277 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | 277 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
278 | struct pmu_hw_events *hw_events = armpmu->get_hw_events(); | 278 | struct pmu_hw_events *hw_events = armpmu->get_hw_events(); |
279 | struct hw_perf_event *hwc = &event->hw; | 279 | struct hw_perf_event *hwc = &event->hw; |
280 | int idx; | 280 | int idx; |
281 | int err = 0; | 281 | int err = 0; |
282 | 282 | ||
283 | perf_pmu_disable(event->pmu); | 283 | perf_pmu_disable(event->pmu); |
284 | 284 | ||
285 | /* If we don't have a space for the counter then finish early. */ | 285 | /* If we don't have a space for the counter then finish early. */ |
286 | idx = armpmu->get_event_idx(hw_events, hwc); | 286 | idx = armpmu->get_event_idx(hw_events, hwc); |
287 | if (idx < 0) { | 287 | if (idx < 0) { |
288 | err = idx; | 288 | err = idx; |
289 | goto out; | 289 | goto out; |
290 | } | 290 | } |
291 | 291 | ||
292 | /* | 292 | /* |
293 | * If there is an event in the counter we are going to use then make | 293 | * If there is an event in the counter we are going to use then make |
294 | * sure it is disabled. | 294 | * sure it is disabled. |
295 | */ | 295 | */ |
296 | event->hw.idx = idx; | 296 | event->hw.idx = idx; |
297 | armpmu->disable(hwc, idx); | 297 | armpmu->disable(hwc, idx); |
298 | hw_events->events[idx] = event; | 298 | hw_events->events[idx] = event; |
299 | 299 | ||
300 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; | 300 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; |
301 | if (flags & PERF_EF_START) | 301 | if (flags & PERF_EF_START) |
302 | armpmu_start(event, PERF_EF_RELOAD); | 302 | armpmu_start(event, PERF_EF_RELOAD); |
303 | 303 | ||
304 | /* Propagate our changes to the userspace mapping. */ | 304 | /* Propagate our changes to the userspace mapping. */ |
305 | perf_event_update_userpage(event); | 305 | perf_event_update_userpage(event); |
306 | 306 | ||
307 | out: | 307 | out: |
308 | perf_pmu_enable(event->pmu); | 308 | perf_pmu_enable(event->pmu); |
309 | return err; | 309 | return err; |
310 | } | 310 | } |
311 | 311 | ||
312 | static int | 312 | static int |
313 | validate_event(struct pmu_hw_events *hw_events, | 313 | validate_event(struct pmu_hw_events *hw_events, |
314 | struct perf_event *event) | 314 | struct perf_event *event) |
315 | { | 315 | { |
316 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | 316 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
317 | struct hw_perf_event fake_event = event->hw; | 317 | struct hw_perf_event fake_event = event->hw; |
318 | struct pmu *leader_pmu = event->group_leader->pmu; | 318 | struct pmu *leader_pmu = event->group_leader->pmu; |
319 | 319 | ||
320 | if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) | 320 | if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) |
321 | return 1; | 321 | return 1; |
322 | 322 | ||
323 | return armpmu->get_event_idx(hw_events, &fake_event) >= 0; | 323 | return armpmu->get_event_idx(hw_events, &fake_event) >= 0; |
324 | } | 324 | } |
325 | 325 | ||
326 | static int | 326 | static int |
327 | validate_group(struct perf_event *event) | 327 | validate_group(struct perf_event *event) |
328 | { | 328 | { |
329 | struct perf_event *sibling, *leader = event->group_leader; | 329 | struct perf_event *sibling, *leader = event->group_leader; |
330 | struct pmu_hw_events fake_pmu; | 330 | struct pmu_hw_events fake_pmu; |
331 | DECLARE_BITMAP(fake_used_mask, ARMPMU_MAX_HWEVENTS); | 331 | DECLARE_BITMAP(fake_used_mask, ARMPMU_MAX_HWEVENTS); |
332 | 332 | ||
333 | /* | 333 | /* |
334 | * Initialise the fake PMU. We only need to populate the | 334 | * Initialise the fake PMU. We only need to populate the |
335 | * used_mask for the purposes of validation. | 335 | * used_mask for the purposes of validation. |
336 | */ | 336 | */ |
337 | memset(fake_used_mask, 0, sizeof(fake_used_mask)); | 337 | memset(fake_used_mask, 0, sizeof(fake_used_mask)); |
338 | fake_pmu.used_mask = fake_used_mask; | 338 | fake_pmu.used_mask = fake_used_mask; |
339 | 339 | ||
340 | if (!validate_event(&fake_pmu, leader)) | 340 | if (!validate_event(&fake_pmu, leader)) |
341 | return -EINVAL; | 341 | return -EINVAL; |
342 | 342 | ||
343 | list_for_each_entry(sibling, &leader->sibling_list, group_entry) { | 343 | list_for_each_entry(sibling, &leader->sibling_list, group_entry) { |
344 | if (!validate_event(&fake_pmu, sibling)) | 344 | if (!validate_event(&fake_pmu, sibling)) |
345 | return -EINVAL; | 345 | return -EINVAL; |
346 | } | 346 | } |
347 | 347 | ||
348 | if (!validate_event(&fake_pmu, event)) | 348 | if (!validate_event(&fake_pmu, event)) |
349 | return -EINVAL; | 349 | return -EINVAL; |
350 | 350 | ||
351 | return 0; | 351 | return 0; |
352 | } | 352 | } |
353 | 353 | ||
354 | static void | 354 | static void |
355 | armpmu_release_hardware(struct arm_pmu *armpmu) | 355 | armpmu_release_hardware(struct arm_pmu *armpmu) |
356 | { | 356 | { |
357 | int i, irq, irqs; | 357 | int i, irq, irqs; |
358 | struct platform_device *pmu_device = armpmu->plat_device; | 358 | struct platform_device *pmu_device = armpmu->plat_device; |
359 | 359 | ||
360 | irqs = min(pmu_device->num_resources, num_possible_cpus()); | 360 | irqs = min(pmu_device->num_resources, num_possible_cpus()); |
361 | 361 | ||
362 | for (i = 0; i < irqs; ++i) { | 362 | for (i = 0; i < irqs; ++i) { |
363 | if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs)) | 363 | if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs)) |
364 | continue; | 364 | continue; |
365 | irq = platform_get_irq(pmu_device, i); | 365 | irq = platform_get_irq(pmu_device, i); |
366 | if (irq >= 0) | 366 | if (irq >= 0) |
367 | free_irq(irq, armpmu); | 367 | free_irq(irq, armpmu); |
368 | } | 368 | } |
369 | } | 369 | } |
370 | 370 | ||
371 | static int | 371 | static int |
372 | armpmu_reserve_hardware(struct arm_pmu *armpmu) | 372 | armpmu_reserve_hardware(struct arm_pmu *armpmu) |
373 | { | 373 | { |
374 | int i, err, irq, irqs; | 374 | int i, err, irq, irqs; |
375 | struct platform_device *pmu_device = armpmu->plat_device; | 375 | struct platform_device *pmu_device = armpmu->plat_device; |
376 | 376 | ||
377 | if (!pmu_device) { | 377 | if (!pmu_device) { |
378 | pr_err("no PMU device registered\n"); | 378 | pr_err("no PMU device registered\n"); |
379 | return -ENODEV; | 379 | return -ENODEV; |
380 | } | 380 | } |
381 | 381 | ||
382 | irqs = min(pmu_device->num_resources, num_possible_cpus()); | 382 | irqs = min(pmu_device->num_resources, num_possible_cpus()); |
383 | if (irqs < 1) { | 383 | if (irqs < 1) { |
384 | pr_err("no irqs for PMUs defined\n"); | 384 | pr_err("no irqs for PMUs defined\n"); |
385 | return -ENODEV; | 385 | return -ENODEV; |
386 | } | 386 | } |
387 | 387 | ||
388 | for (i = 0; i < irqs; ++i) { | 388 | for (i = 0; i < irqs; ++i) { |
389 | err = 0; | 389 | err = 0; |
390 | irq = platform_get_irq(pmu_device, i); | 390 | irq = platform_get_irq(pmu_device, i); |
391 | if (irq < 0) | 391 | if (irq < 0) |
392 | continue; | 392 | continue; |
393 | 393 | ||
394 | /* | 394 | /* |
395 | * If we have a single PMU interrupt that we can't shift, | 395 | * If we have a single PMU interrupt that we can't shift, |
396 | * assume that we're running on a uniprocessor machine and | 396 | * assume that we're running on a uniprocessor machine and |
397 | * continue. Otherwise, continue without this interrupt. | 397 | * continue. Otherwise, continue without this interrupt. |
398 | */ | 398 | */ |
399 | if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) { | 399 | if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) { |
400 | pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", | 400 | pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", |
401 | irq, i); | 401 | irq, i); |
402 | continue; | 402 | continue; |
403 | } | 403 | } |
404 | 404 | ||
405 | err = request_irq(irq, armpmu->handle_irq, | 405 | err = request_irq(irq, armpmu->handle_irq, |
406 | IRQF_NOBALANCING, | 406 | IRQF_NOBALANCING, |
407 | "arm-pmu", armpmu); | 407 | "arm-pmu", armpmu); |
408 | if (err) { | 408 | if (err) { |
409 | pr_err("unable to request IRQ%d for ARM PMU counters\n", | 409 | pr_err("unable to request IRQ%d for ARM PMU counters\n", |
410 | irq); | 410 | irq); |
411 | armpmu_release_hardware(armpmu); | 411 | armpmu_release_hardware(armpmu); |
412 | return err; | 412 | return err; |
413 | } | 413 | } |
414 | 414 | ||
415 | cpumask_set_cpu(i, &armpmu->active_irqs); | 415 | cpumask_set_cpu(i, &armpmu->active_irqs); |
416 | } | 416 | } |
417 | 417 | ||
418 | return 0; | 418 | return 0; |
419 | } | 419 | } |
420 | 420 | ||
421 | static void | 421 | static void |
422 | hw_perf_event_destroy(struct perf_event *event) | 422 | hw_perf_event_destroy(struct perf_event *event) |
423 | { | 423 | { |
424 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | 424 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
425 | atomic_t *active_events = &armpmu->active_events; | 425 | atomic_t *active_events = &armpmu->active_events; |
426 | struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex; | 426 | struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex; |
427 | 427 | ||
428 | if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) { | 428 | if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) { |
429 | armpmu_release_hardware(armpmu); | 429 | armpmu_release_hardware(armpmu); |
430 | mutex_unlock(pmu_reserve_mutex); | 430 | mutex_unlock(pmu_reserve_mutex); |
431 | } | 431 | } |
432 | } | 432 | } |
433 | 433 | ||
434 | static int | 434 | static int |
435 | event_requires_mode_exclusion(struct perf_event_attr *attr) | 435 | event_requires_mode_exclusion(struct perf_event_attr *attr) |
436 | { | 436 | { |
437 | return attr->exclude_idle || attr->exclude_user || | 437 | return attr->exclude_idle || attr->exclude_user || |
438 | attr->exclude_kernel || attr->exclude_hv; | 438 | attr->exclude_kernel || attr->exclude_hv; |
439 | } | 439 | } |
440 | 440 | ||
441 | static int | 441 | static int |
442 | __hw_perf_event_init(struct perf_event *event) | 442 | __hw_perf_event_init(struct perf_event *event) |
443 | { | 443 | { |
444 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | 444 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
445 | struct hw_perf_event *hwc = &event->hw; | 445 | struct hw_perf_event *hwc = &event->hw; |
446 | int mapping, err; | 446 | int mapping, err; |
447 | 447 | ||
448 | mapping = armpmu->map_event(event); | 448 | mapping = armpmu->map_event(event); |
449 | 449 | ||
450 | if (mapping < 0) { | 450 | if (mapping < 0) { |
451 | pr_debug("event %x:%llx not supported\n", event->attr.type, | 451 | pr_debug("event %x:%llx not supported\n", event->attr.type, |
452 | event->attr.config); | 452 | event->attr.config); |
453 | return mapping; | 453 | return mapping; |
454 | } | 454 | } |
455 | 455 | ||
456 | /* | 456 | /* |
457 | * We don't assign an index until we actually place the event onto | 457 | * We don't assign an index until we actually place the event onto |
458 | * hardware. Use -1 to signify that we haven't decided where to put it | 458 | * hardware. Use -1 to signify that we haven't decided where to put it |
459 | * yet. For SMP systems, each core has it's own PMU so we can't do any | 459 | * yet. For SMP systems, each core has it's own PMU so we can't do any |
460 | * clever allocation or constraints checking at this point. | 460 | * clever allocation or constraints checking at this point. |
461 | */ | 461 | */ |
462 | hwc->idx = -1; | 462 | hwc->idx = -1; |
463 | hwc->config_base = 0; | 463 | hwc->config_base = 0; |
464 | hwc->config = 0; | 464 | hwc->config = 0; |
465 | hwc->event_base = 0; | 465 | hwc->event_base = 0; |
466 | 466 | ||
467 | /* | 467 | /* |
468 | * Check whether we need to exclude the counter from certain modes. | 468 | * Check whether we need to exclude the counter from certain modes. |
469 | */ | 469 | */ |
470 | if ((!armpmu->set_event_filter || | 470 | if ((!armpmu->set_event_filter || |
471 | armpmu->set_event_filter(hwc, &event->attr)) && | 471 | armpmu->set_event_filter(hwc, &event->attr)) && |
472 | event_requires_mode_exclusion(&event->attr)) { | 472 | event_requires_mode_exclusion(&event->attr)) { |
473 | pr_debug("ARM performance counters do not support mode exclusion\n"); | 473 | pr_debug("ARM performance counters do not support mode exclusion\n"); |
474 | return -EPERM; | 474 | return -EPERM; |
475 | } | 475 | } |
476 | 476 | ||
477 | /* | 477 | /* |
478 | * Store the event encoding into the config_base field. | 478 | * Store the event encoding into the config_base field. |
479 | */ | 479 | */ |
480 | hwc->config_base |= (unsigned long)mapping; | 480 | hwc->config_base |= (unsigned long)mapping; |
481 | 481 | ||
482 | if (!hwc->sample_period) { | 482 | if (!hwc->sample_period) { |
483 | /* | 483 | /* |
484 | * For non-sampling runs, limit the sample_period to half | 484 | * For non-sampling runs, limit the sample_period to half |
485 | * of the counter width. That way, the new counter value | 485 | * of the counter width. That way, the new counter value |
486 | * is far less likely to overtake the previous one unless | 486 | * is far less likely to overtake the previous one unless |
487 | * you have some serious IRQ latency issues. | 487 | * you have some serious IRQ latency issues. |
488 | */ | 488 | */ |
489 | hwc->sample_period = armpmu->max_period >> 1; | 489 | hwc->sample_period = armpmu->max_period >> 1; |
490 | hwc->last_period = hwc->sample_period; | 490 | hwc->last_period = hwc->sample_period; |
491 | local64_set(&hwc->period_left, hwc->sample_period); | 491 | local64_set(&hwc->period_left, hwc->sample_period); |
492 | } | 492 | } |
493 | 493 | ||
494 | err = 0; | 494 | err = 0; |
495 | if (event->group_leader != event) { | 495 | if (event->group_leader != event) { |
496 | err = validate_group(event); | 496 | err = validate_group(event); |
497 | if (err) | 497 | if (err) |
498 | return -EINVAL; | 498 | return -EINVAL; |
499 | } | 499 | } |
500 | 500 | ||
501 | return err; | 501 | return err; |
502 | } | 502 | } |
503 | 503 | ||
504 | static int armpmu_event_init(struct perf_event *event) | 504 | static int armpmu_event_init(struct perf_event *event) |
505 | { | 505 | { |
506 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | 506 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
507 | int err = 0; | 507 | int err = 0; |
508 | atomic_t *active_events = &armpmu->active_events; | 508 | atomic_t *active_events = &armpmu->active_events; |
509 | 509 | ||
510 | if (armpmu->map_event(event) == -ENOENT) | 510 | if (armpmu->map_event(event) == -ENOENT) |
511 | return -ENOENT; | 511 | return -ENOENT; |
512 | 512 | ||
513 | event->destroy = hw_perf_event_destroy; | 513 | event->destroy = hw_perf_event_destroy; |
514 | 514 | ||
515 | if (!atomic_inc_not_zero(active_events)) { | 515 | if (!atomic_inc_not_zero(active_events)) { |
516 | mutex_lock(&armpmu->reserve_mutex); | 516 | mutex_lock(&armpmu->reserve_mutex); |
517 | if (atomic_read(active_events) == 0) | 517 | if (atomic_read(active_events) == 0) |
518 | err = armpmu_reserve_hardware(armpmu); | 518 | err = armpmu_reserve_hardware(armpmu); |
519 | 519 | ||
520 | if (!err) | 520 | if (!err) |
521 | atomic_inc(active_events); | 521 | atomic_inc(active_events); |
522 | mutex_unlock(&armpmu->reserve_mutex); | 522 | mutex_unlock(&armpmu->reserve_mutex); |
523 | } | 523 | } |
524 | 524 | ||
525 | if (err) | 525 | if (err) |
526 | return err; | 526 | return err; |
527 | 527 | ||
528 | err = __hw_perf_event_init(event); | 528 | err = __hw_perf_event_init(event); |
529 | if (err) | 529 | if (err) |
530 | hw_perf_event_destroy(event); | 530 | hw_perf_event_destroy(event); |
531 | 531 | ||
532 | return err; | 532 | return err; |
533 | } | 533 | } |
534 | 534 | ||
535 | static void armpmu_enable(struct pmu *pmu) | 535 | static void armpmu_enable(struct pmu *pmu) |
536 | { | 536 | { |
537 | struct arm_pmu *armpmu = to_arm_pmu(pmu); | 537 | struct arm_pmu *armpmu = to_arm_pmu(pmu); |
538 | struct pmu_hw_events *hw_events = armpmu->get_hw_events(); | 538 | struct pmu_hw_events *hw_events = armpmu->get_hw_events(); |
539 | int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); | 539 | int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); |
540 | 540 | ||
541 | if (enabled) | 541 | if (enabled) |
542 | armpmu->start(); | 542 | armpmu->start(); |
543 | } | 543 | } |
544 | 544 | ||
545 | static void armpmu_disable(struct pmu *pmu) | 545 | static void armpmu_disable(struct pmu *pmu) |
546 | { | 546 | { |
547 | struct arm_pmu *armpmu = to_arm_pmu(pmu); | 547 | struct arm_pmu *armpmu = to_arm_pmu(pmu); |
548 | armpmu->stop(); | 548 | armpmu->stop(); |
549 | } | 549 | } |
550 | 550 | ||
551 | static void __init armpmu_init(struct arm_pmu *armpmu) | 551 | static void __init armpmu_init(struct arm_pmu *armpmu) |
552 | { | 552 | { |
553 | atomic_set(&armpmu->active_events, 0); | 553 | atomic_set(&armpmu->active_events, 0); |
554 | mutex_init(&armpmu->reserve_mutex); | 554 | mutex_init(&armpmu->reserve_mutex); |
555 | 555 | ||
556 | armpmu->pmu = (struct pmu) { | 556 | armpmu->pmu = (struct pmu) { |
557 | .pmu_enable = armpmu_enable, | 557 | .pmu_enable = armpmu_enable, |
558 | .pmu_disable = armpmu_disable, | 558 | .pmu_disable = armpmu_disable, |
559 | .event_init = armpmu_event_init, | 559 | .event_init = armpmu_event_init, |
560 | .add = armpmu_add, | 560 | .add = armpmu_add, |
561 | .del = armpmu_del, | 561 | .del = armpmu_del, |
562 | .start = armpmu_start, | 562 | .start = armpmu_start, |
563 | .stop = armpmu_stop, | 563 | .stop = armpmu_stop, |
564 | .read = armpmu_read, | 564 | .read = armpmu_read, |
565 | }; | 565 | }; |
566 | } | 566 | } |
567 | 567 | ||
568 | int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type) | 568 | int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type) |
569 | { | 569 | { |
570 | armpmu_init(armpmu); | 570 | armpmu_init(armpmu); |
571 | return perf_pmu_register(&armpmu->pmu, name, type); | 571 | return perf_pmu_register(&armpmu->pmu, name, type); |
572 | } | 572 | } |
573 | 573 | ||
574 | /* | 574 | /* |
575 | * ARMv8 PMUv3 Performance Events handling code. | 575 | * ARMv8 PMUv3 Performance Events handling code. |
576 | * Common event types. | 576 | * Common event types. |
577 | */ | 577 | */ |
578 | enum armv8_pmuv3_perf_types { | 578 | enum armv8_pmuv3_perf_types { |
579 | /* Required events. */ | 579 | /* Required events. */ |
580 | ARMV8_PMUV3_PERFCTR_PMNC_SW_INCR = 0x00, | 580 | ARMV8_PMUV3_PERFCTR_PMNC_SW_INCR = 0x00, |
581 | ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL = 0x03, | 581 | ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL = 0x03, |
582 | ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS = 0x04, | 582 | ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS = 0x04, |
583 | ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED = 0x10, | 583 | ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED = 0x10, |
584 | ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES = 0x11, | 584 | ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES = 0x11, |
585 | ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED = 0x12, | 585 | ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED = 0x12, |
586 | 586 | ||
587 | /* At least one of the following is required. */ | 587 | /* At least one of the following is required. */ |
588 | ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED = 0x08, | 588 | ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED = 0x08, |
589 | ARMV8_PMUV3_PERFCTR_OP_SPEC = 0x1B, | 589 | ARMV8_PMUV3_PERFCTR_OP_SPEC = 0x1B, |
590 | 590 | ||
591 | /* Common architectural events. */ | 591 | /* Common architectural events. */ |
592 | ARMV8_PMUV3_PERFCTR_MEM_READ = 0x06, | 592 | ARMV8_PMUV3_PERFCTR_MEM_READ = 0x06, |
593 | ARMV8_PMUV3_PERFCTR_MEM_WRITE = 0x07, | 593 | ARMV8_PMUV3_PERFCTR_MEM_WRITE = 0x07, |
594 | ARMV8_PMUV3_PERFCTR_EXC_TAKEN = 0x09, | 594 | ARMV8_PMUV3_PERFCTR_EXC_TAKEN = 0x09, |
595 | ARMV8_PMUV3_PERFCTR_EXC_EXECUTED = 0x0A, | 595 | ARMV8_PMUV3_PERFCTR_EXC_EXECUTED = 0x0A, |
596 | ARMV8_PMUV3_PERFCTR_CID_WRITE = 0x0B, | 596 | ARMV8_PMUV3_PERFCTR_CID_WRITE = 0x0B, |
597 | ARMV8_PMUV3_PERFCTR_PC_WRITE = 0x0C, | 597 | ARMV8_PMUV3_PERFCTR_PC_WRITE = 0x0C, |
598 | ARMV8_PMUV3_PERFCTR_PC_IMM_BRANCH = 0x0D, | 598 | ARMV8_PMUV3_PERFCTR_PC_IMM_BRANCH = 0x0D, |
599 | ARMV8_PMUV3_PERFCTR_PC_PROC_RETURN = 0x0E, | 599 | ARMV8_PMUV3_PERFCTR_PC_PROC_RETURN = 0x0E, |
600 | ARMV8_PMUV3_PERFCTR_MEM_UNALIGNED_ACCESS = 0x0F, | 600 | ARMV8_PMUV3_PERFCTR_MEM_UNALIGNED_ACCESS = 0x0F, |
601 | ARMV8_PMUV3_PERFCTR_TTBR_WRITE = 0x1C, | 601 | ARMV8_PMUV3_PERFCTR_TTBR_WRITE = 0x1C, |
602 | 602 | ||
603 | /* Common microarchitectural events. */ | 603 | /* Common microarchitectural events. */ |
604 | ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL = 0x01, | 604 | ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL = 0x01, |
605 | ARMV8_PMUV3_PERFCTR_ITLB_REFILL = 0x02, | 605 | ARMV8_PMUV3_PERFCTR_ITLB_REFILL = 0x02, |
606 | ARMV8_PMUV3_PERFCTR_DTLB_REFILL = 0x05, | 606 | ARMV8_PMUV3_PERFCTR_DTLB_REFILL = 0x05, |
607 | ARMV8_PMUV3_PERFCTR_MEM_ACCESS = 0x13, | 607 | ARMV8_PMUV3_PERFCTR_MEM_ACCESS = 0x13, |
608 | ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS = 0x14, | 608 | ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS = 0x14, |
609 | ARMV8_PMUV3_PERFCTR_L1_DCACHE_WB = 0x15, | 609 | ARMV8_PMUV3_PERFCTR_L1_DCACHE_WB = 0x15, |
610 | ARMV8_PMUV3_PERFCTR_L2_CACHE_ACCESS = 0x16, | 610 | ARMV8_PMUV3_PERFCTR_L2_CACHE_ACCESS = 0x16, |
611 | ARMV8_PMUV3_PERFCTR_L2_CACHE_REFILL = 0x17, | 611 | ARMV8_PMUV3_PERFCTR_L2_CACHE_REFILL = 0x17, |
612 | ARMV8_PMUV3_PERFCTR_L2_CACHE_WB = 0x18, | 612 | ARMV8_PMUV3_PERFCTR_L2_CACHE_WB = 0x18, |
613 | ARMV8_PMUV3_PERFCTR_BUS_ACCESS = 0x19, | 613 | ARMV8_PMUV3_PERFCTR_BUS_ACCESS = 0x19, |
614 | ARMV8_PMUV3_PERFCTR_MEM_ERROR = 0x1A, | 614 | ARMV8_PMUV3_PERFCTR_MEM_ERROR = 0x1A, |
615 | ARMV8_PMUV3_PERFCTR_BUS_CYCLES = 0x1D, | 615 | ARMV8_PMUV3_PERFCTR_BUS_CYCLES = 0x1D, |
616 | }; | 616 | }; |
617 | 617 | ||
618 | /* PMUv3 HW events mapping. */ | 618 | /* PMUv3 HW events mapping. */ |
619 | static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = { | 619 | static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = { |
620 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES, | 620 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES, |
621 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED, | 621 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED, |
622 | [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS, | 622 | [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS, |
623 | [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL, | 623 | [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL, |
624 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = HW_OP_UNSUPPORTED, | 624 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = HW_OP_UNSUPPORTED, |
625 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED, | 625 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED, |
626 | [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, | 626 | [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, |
627 | [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED, | 627 | [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED, |
628 | [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED, | 628 | [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED, |
629 | }; | 629 | }; |
630 | 630 | ||
631 | static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | 631 | static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] |
632 | [PERF_COUNT_HW_CACHE_OP_MAX] | 632 | [PERF_COUNT_HW_CACHE_OP_MAX] |
633 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | 633 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { |
634 | [C(L1D)] = { | 634 | [C(L1D)] = { |
635 | [C(OP_READ)] = { | 635 | [C(OP_READ)] = { |
636 | [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS, | 636 | [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS, |
637 | [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL, | 637 | [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL, |
638 | }, | 638 | }, |
639 | [C(OP_WRITE)] = { | 639 | [C(OP_WRITE)] = { |
640 | [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS, | 640 | [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS, |
641 | [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL, | 641 | [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL, |
642 | }, | 642 | }, |
643 | [C(OP_PREFETCH)] = { | 643 | [C(OP_PREFETCH)] = { |
644 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | 644 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
645 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | 645 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
646 | }, | 646 | }, |
647 | }, | 647 | }, |
648 | [C(L1I)] = { | 648 | [C(L1I)] = { |
649 | [C(OP_READ)] = { | 649 | [C(OP_READ)] = { |
650 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | 650 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
651 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | 651 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
652 | }, | 652 | }, |
653 | [C(OP_WRITE)] = { | 653 | [C(OP_WRITE)] = { |
654 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | 654 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
655 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | 655 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
656 | }, | 656 | }, |
657 | [C(OP_PREFETCH)] = { | 657 | [C(OP_PREFETCH)] = { |
658 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | 658 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
659 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | 659 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
660 | }, | 660 | }, |
661 | }, | 661 | }, |
662 | [C(LL)] = { | 662 | [C(LL)] = { |
663 | [C(OP_READ)] = { | 663 | [C(OP_READ)] = { |
664 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | 664 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
665 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | 665 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
666 | }, | 666 | }, |
667 | [C(OP_WRITE)] = { | 667 | [C(OP_WRITE)] = { |
668 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | 668 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
669 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | 669 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
670 | }, | 670 | }, |
671 | [C(OP_PREFETCH)] = { | 671 | [C(OP_PREFETCH)] = { |
672 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | 672 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
673 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | 673 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
674 | }, | 674 | }, |
675 | }, | 675 | }, |
676 | [C(DTLB)] = { | 676 | [C(DTLB)] = { |
677 | [C(OP_READ)] = { | 677 | [C(OP_READ)] = { |
678 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | 678 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
679 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | 679 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
680 | }, | 680 | }, |
681 | [C(OP_WRITE)] = { | 681 | [C(OP_WRITE)] = { |
682 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | 682 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
683 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | 683 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
684 | }, | 684 | }, |
685 | [C(OP_PREFETCH)] = { | 685 | [C(OP_PREFETCH)] = { |
686 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | 686 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
687 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | 687 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
688 | }, | 688 | }, |
689 | }, | 689 | }, |
690 | [C(ITLB)] = { | 690 | [C(ITLB)] = { |
691 | [C(OP_READ)] = { | 691 | [C(OP_READ)] = { |
692 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | 692 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
693 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | 693 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
694 | }, | 694 | }, |
695 | [C(OP_WRITE)] = { | 695 | [C(OP_WRITE)] = { |
696 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | 696 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
697 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | 697 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
698 | }, | 698 | }, |
699 | [C(OP_PREFETCH)] = { | 699 | [C(OP_PREFETCH)] = { |
700 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | 700 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
701 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | 701 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
702 | }, | 702 | }, |
703 | }, | 703 | }, |
704 | [C(BPU)] = { | 704 | [C(BPU)] = { |
705 | [C(OP_READ)] = { | 705 | [C(OP_READ)] = { |
706 | [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED, | 706 | [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED, |
707 | [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED, | 707 | [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED, |
708 | }, | 708 | }, |
709 | [C(OP_WRITE)] = { | 709 | [C(OP_WRITE)] = { |
710 | [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED, | 710 | [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED, |
711 | [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED, | 711 | [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED, |
712 | }, | 712 | }, |
713 | [C(OP_PREFETCH)] = { | 713 | [C(OP_PREFETCH)] = { |
714 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | 714 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
715 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | 715 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
716 | }, | 716 | }, |
717 | }, | 717 | }, |
718 | [C(NODE)] = { | 718 | [C(NODE)] = { |
719 | [C(OP_READ)] = { | 719 | [C(OP_READ)] = { |
720 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | 720 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
721 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | 721 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
722 | }, | 722 | }, |
723 | [C(OP_WRITE)] = { | 723 | [C(OP_WRITE)] = { |
724 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | 724 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
725 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | 725 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
726 | }, | 726 | }, |
727 | [C(OP_PREFETCH)] = { | 727 | [C(OP_PREFETCH)] = { |
728 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | 728 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
729 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | 729 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
730 | }, | 730 | }, |
731 | }, | 731 | }, |
732 | }; | 732 | }; |
733 | 733 | ||
734 | /* | 734 | /* |
735 | * Perf Events' indices | 735 | * Perf Events' indices |
736 | */ | 736 | */ |
737 | #define ARMV8_IDX_CYCLE_COUNTER 0 | 737 | #define ARMV8_IDX_CYCLE_COUNTER 0 |
738 | #define ARMV8_IDX_COUNTER0 1 | 738 | #define ARMV8_IDX_COUNTER0 1 |
739 | #define ARMV8_IDX_COUNTER_LAST (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1) | 739 | #define ARMV8_IDX_COUNTER_LAST (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1) |
740 | 740 | ||
741 | #define ARMV8_MAX_COUNTERS 32 | 741 | #define ARMV8_MAX_COUNTERS 32 |
742 | #define ARMV8_COUNTER_MASK (ARMV8_MAX_COUNTERS - 1) | 742 | #define ARMV8_COUNTER_MASK (ARMV8_MAX_COUNTERS - 1) |
743 | 743 | ||
744 | /* | 744 | /* |
745 | * ARMv8 low level PMU access | 745 | * ARMv8 low level PMU access |
746 | */ | 746 | */ |
747 | 747 | ||
748 | /* | 748 | /* |
749 | * Perf Event to low level counters mapping | 749 | * Perf Event to low level counters mapping |
750 | */ | 750 | */ |
751 | #define ARMV8_IDX_TO_COUNTER(x) \ | 751 | #define ARMV8_IDX_TO_COUNTER(x) \ |
752 | (((x) - ARMV8_IDX_COUNTER0) & ARMV8_COUNTER_MASK) | 752 | (((x) - ARMV8_IDX_COUNTER0) & ARMV8_COUNTER_MASK) |
753 | 753 | ||
754 | /* | 754 | /* |
755 | * Per-CPU PMCR: config reg | 755 | * Per-CPU PMCR: config reg |
756 | */ | 756 | */ |
757 | #define ARMV8_PMCR_E (1 << 0) /* Enable all counters */ | 757 | #define ARMV8_PMCR_E (1 << 0) /* Enable all counters */ |
758 | #define ARMV8_PMCR_P (1 << 1) /* Reset all counters */ | 758 | #define ARMV8_PMCR_P (1 << 1) /* Reset all counters */ |
759 | #define ARMV8_PMCR_C (1 << 2) /* Cycle counter reset */ | 759 | #define ARMV8_PMCR_C (1 << 2) /* Cycle counter reset */ |
760 | #define ARMV8_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */ | 760 | #define ARMV8_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */ |
761 | #define ARMV8_PMCR_X (1 << 4) /* Export to ETM */ | 761 | #define ARMV8_PMCR_X (1 << 4) /* Export to ETM */ |
762 | #define ARMV8_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/ | 762 | #define ARMV8_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/ |
763 | #define ARMV8_PMCR_N_SHIFT 11 /* Number of counters supported */ | 763 | #define ARMV8_PMCR_N_SHIFT 11 /* Number of counters supported */ |
764 | #define ARMV8_PMCR_N_MASK 0x1f | 764 | #define ARMV8_PMCR_N_MASK 0x1f |
765 | #define ARMV8_PMCR_MASK 0x3f /* Mask for writable bits */ | 765 | #define ARMV8_PMCR_MASK 0x3f /* Mask for writable bits */ |
766 | 766 | ||
767 | /* | 767 | /* |
768 | * PMOVSR: counters overflow flag status reg | 768 | * PMOVSR: counters overflow flag status reg |
769 | */ | 769 | */ |
770 | #define ARMV8_OVSR_MASK 0xffffffff /* Mask for writable bits */ | 770 | #define ARMV8_OVSR_MASK 0xffffffff /* Mask for writable bits */ |
771 | #define ARMV8_OVERFLOWED_MASK ARMV8_OVSR_MASK | 771 | #define ARMV8_OVERFLOWED_MASK ARMV8_OVSR_MASK |
772 | 772 | ||
773 | /* | 773 | /* |
774 | * PMXEVTYPER: Event selection reg | 774 | * PMXEVTYPER: Event selection reg |
775 | */ | 775 | */ |
776 | #define ARMV8_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */ | 776 | #define ARMV8_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */ |
777 | #define ARMV8_EVTYPE_EVENT 0xff /* Mask for EVENT bits */ | 777 | #define ARMV8_EVTYPE_EVENT 0xff /* Mask for EVENT bits */ |
778 | 778 | ||
779 | /* | 779 | /* |
780 | * Event filters for PMUv3 | 780 | * Event filters for PMUv3 |
781 | */ | 781 | */ |
782 | #define ARMV8_EXCLUDE_EL1 (1 << 31) | 782 | #define ARMV8_EXCLUDE_EL1 (1 << 31) |
783 | #define ARMV8_EXCLUDE_EL0 (1 << 30) | 783 | #define ARMV8_EXCLUDE_EL0 (1 << 30) |
784 | #define ARMV8_INCLUDE_EL2 (1 << 27) | 784 | #define ARMV8_INCLUDE_EL2 (1 << 27) |
785 | 785 | ||
786 | static inline u32 armv8pmu_pmcr_read(void) | 786 | static inline u32 armv8pmu_pmcr_read(void) |
787 | { | 787 | { |
788 | u32 val; | 788 | u32 val; |
789 | asm volatile("mrs %0, pmcr_el0" : "=r" (val)); | 789 | asm volatile("mrs %0, pmcr_el0" : "=r" (val)); |
790 | return val; | 790 | return val; |
791 | } | 791 | } |
792 | 792 | ||
793 | static inline void armv8pmu_pmcr_write(u32 val) | 793 | static inline void armv8pmu_pmcr_write(u32 val) |
794 | { | 794 | { |
795 | val &= ARMV8_PMCR_MASK; | 795 | val &= ARMV8_PMCR_MASK; |
796 | isb(); | 796 | isb(); |
797 | asm volatile("msr pmcr_el0, %0" :: "r" (val)); | 797 | asm volatile("msr pmcr_el0, %0" :: "r" (val)); |
798 | } | 798 | } |
799 | 799 | ||
800 | static inline int armv8pmu_has_overflowed(u32 pmovsr) | 800 | static inline int armv8pmu_has_overflowed(u32 pmovsr) |
801 | { | 801 | { |
802 | return pmovsr & ARMV8_OVERFLOWED_MASK; | 802 | return pmovsr & ARMV8_OVERFLOWED_MASK; |
803 | } | 803 | } |
804 | 804 | ||
805 | static inline int armv8pmu_counter_valid(int idx) | 805 | static inline int armv8pmu_counter_valid(int idx) |
806 | { | 806 | { |
807 | return idx >= ARMV8_IDX_CYCLE_COUNTER && idx <= ARMV8_IDX_COUNTER_LAST; | 807 | return idx >= ARMV8_IDX_CYCLE_COUNTER && idx <= ARMV8_IDX_COUNTER_LAST; |
808 | } | 808 | } |
809 | 809 | ||
810 | static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx) | 810 | static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx) |
811 | { | 811 | { |
812 | int ret = 0; | 812 | int ret = 0; |
813 | u32 counter; | 813 | u32 counter; |
814 | 814 | ||
815 | if (!armv8pmu_counter_valid(idx)) { | 815 | if (!armv8pmu_counter_valid(idx)) { |
816 | pr_err("CPU%u checking wrong counter %d overflow status\n", | 816 | pr_err("CPU%u checking wrong counter %d overflow status\n", |
817 | smp_processor_id(), idx); | 817 | smp_processor_id(), idx); |
818 | } else { | 818 | } else { |
819 | counter = ARMV8_IDX_TO_COUNTER(idx); | 819 | counter = ARMV8_IDX_TO_COUNTER(idx); |
820 | ret = pmnc & BIT(counter); | 820 | ret = pmnc & BIT(counter); |
821 | } | 821 | } |
822 | 822 | ||
823 | return ret; | 823 | return ret; |
824 | } | 824 | } |
825 | 825 | ||
826 | static inline int armv8pmu_select_counter(int idx) | 826 | static inline int armv8pmu_select_counter(int idx) |
827 | { | 827 | { |
828 | u32 counter; | 828 | u32 counter; |
829 | 829 | ||
830 | if (!armv8pmu_counter_valid(idx)) { | 830 | if (!armv8pmu_counter_valid(idx)) { |
831 | pr_err("CPU%u selecting wrong PMNC counter %d\n", | 831 | pr_err("CPU%u selecting wrong PMNC counter %d\n", |
832 | smp_processor_id(), idx); | 832 | smp_processor_id(), idx); |
833 | return -EINVAL; | 833 | return -EINVAL; |
834 | } | 834 | } |
835 | 835 | ||
836 | counter = ARMV8_IDX_TO_COUNTER(idx); | 836 | counter = ARMV8_IDX_TO_COUNTER(idx); |
837 | asm volatile("msr pmselr_el0, %0" :: "r" (counter)); | 837 | asm volatile("msr pmselr_el0, %0" :: "r" (counter)); |
838 | isb(); | 838 | isb(); |
839 | 839 | ||
840 | return idx; | 840 | return idx; |
841 | } | 841 | } |
842 | 842 | ||
843 | static inline u32 armv8pmu_read_counter(int idx) | 843 | static inline u32 armv8pmu_read_counter(int idx) |
844 | { | 844 | { |
845 | u32 value = 0; | 845 | u32 value = 0; |
846 | 846 | ||
847 | if (!armv8pmu_counter_valid(idx)) | 847 | if (!armv8pmu_counter_valid(idx)) |
848 | pr_err("CPU%u reading wrong counter %d\n", | 848 | pr_err("CPU%u reading wrong counter %d\n", |
849 | smp_processor_id(), idx); | 849 | smp_processor_id(), idx); |
850 | else if (idx == ARMV8_IDX_CYCLE_COUNTER) | 850 | else if (idx == ARMV8_IDX_CYCLE_COUNTER) |
851 | asm volatile("mrs %0, pmccntr_el0" : "=r" (value)); | 851 | asm volatile("mrs %0, pmccntr_el0" : "=r" (value)); |
852 | else if (armv8pmu_select_counter(idx) == idx) | 852 | else if (armv8pmu_select_counter(idx) == idx) |
853 | asm volatile("mrs %0, pmxevcntr_el0" : "=r" (value)); | 853 | asm volatile("mrs %0, pmxevcntr_el0" : "=r" (value)); |
854 | 854 | ||
855 | return value; | 855 | return value; |
856 | } | 856 | } |
857 | 857 | ||
858 | static inline void armv8pmu_write_counter(int idx, u32 value) | 858 | static inline void armv8pmu_write_counter(int idx, u32 value) |
859 | { | 859 | { |
860 | if (!armv8pmu_counter_valid(idx)) | 860 | if (!armv8pmu_counter_valid(idx)) |
861 | pr_err("CPU%u writing wrong counter %d\n", | 861 | pr_err("CPU%u writing wrong counter %d\n", |
862 | smp_processor_id(), idx); | 862 | smp_processor_id(), idx); |
863 | else if (idx == ARMV8_IDX_CYCLE_COUNTER) | 863 | else if (idx == ARMV8_IDX_CYCLE_COUNTER) |
864 | asm volatile("msr pmccntr_el0, %0" :: "r" (value)); | 864 | asm volatile("msr pmccntr_el0, %0" :: "r" (value)); |
865 | else if (armv8pmu_select_counter(idx) == idx) | 865 | else if (armv8pmu_select_counter(idx) == idx) |
866 | asm volatile("msr pmxevcntr_el0, %0" :: "r" (value)); | 866 | asm volatile("msr pmxevcntr_el0, %0" :: "r" (value)); |
867 | } | 867 | } |
868 | 868 | ||
869 | static inline void armv8pmu_write_evtype(int idx, u32 val) | 869 | static inline void armv8pmu_write_evtype(int idx, u32 val) |
870 | { | 870 | { |
871 | if (armv8pmu_select_counter(idx) == idx) { | 871 | if (armv8pmu_select_counter(idx) == idx) { |
872 | val &= ARMV8_EVTYPE_MASK; | 872 | val &= ARMV8_EVTYPE_MASK; |
873 | asm volatile("msr pmxevtyper_el0, %0" :: "r" (val)); | 873 | asm volatile("msr pmxevtyper_el0, %0" :: "r" (val)); |
874 | } | 874 | } |
875 | } | 875 | } |
876 | 876 | ||
877 | static inline int armv8pmu_enable_counter(int idx) | 877 | static inline int armv8pmu_enable_counter(int idx) |
878 | { | 878 | { |
879 | u32 counter; | 879 | u32 counter; |
880 | 880 | ||
881 | if (!armv8pmu_counter_valid(idx)) { | 881 | if (!armv8pmu_counter_valid(idx)) { |
882 | pr_err("CPU%u enabling wrong PMNC counter %d\n", | 882 | pr_err("CPU%u enabling wrong PMNC counter %d\n", |
883 | smp_processor_id(), idx); | 883 | smp_processor_id(), idx); |
884 | return -EINVAL; | 884 | return -EINVAL; |
885 | } | 885 | } |
886 | 886 | ||
887 | counter = ARMV8_IDX_TO_COUNTER(idx); | 887 | counter = ARMV8_IDX_TO_COUNTER(idx); |
888 | asm volatile("msr pmcntenset_el0, %0" :: "r" (BIT(counter))); | 888 | asm volatile("msr pmcntenset_el0, %0" :: "r" (BIT(counter))); |
889 | return idx; | 889 | return idx; |
890 | } | 890 | } |
891 | 891 | ||
892 | static inline int armv8pmu_disable_counter(int idx) | 892 | static inline int armv8pmu_disable_counter(int idx) |
893 | { | 893 | { |
894 | u32 counter; | 894 | u32 counter; |
895 | 895 | ||
896 | if (!armv8pmu_counter_valid(idx)) { | 896 | if (!armv8pmu_counter_valid(idx)) { |
897 | pr_err("CPU%u disabling wrong PMNC counter %d\n", | 897 | pr_err("CPU%u disabling wrong PMNC counter %d\n", |
898 | smp_processor_id(), idx); | 898 | smp_processor_id(), idx); |
899 | return -EINVAL; | 899 | return -EINVAL; |
900 | } | 900 | } |
901 | 901 | ||
902 | counter = ARMV8_IDX_TO_COUNTER(idx); | 902 | counter = ARMV8_IDX_TO_COUNTER(idx); |
903 | asm volatile("msr pmcntenclr_el0, %0" :: "r" (BIT(counter))); | 903 | asm volatile("msr pmcntenclr_el0, %0" :: "r" (BIT(counter))); |
904 | return idx; | 904 | return idx; |
905 | } | 905 | } |
906 | 906 | ||
907 | static inline int armv8pmu_enable_intens(int idx) | 907 | static inline int armv8pmu_enable_intens(int idx) |
908 | { | 908 | { |
909 | u32 counter; | 909 | u32 counter; |
910 | 910 | ||
911 | if (!armv8pmu_counter_valid(idx)) { | 911 | if (!armv8pmu_counter_valid(idx)) { |
912 | pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n", | 912 | pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n", |
913 | smp_processor_id(), idx); | 913 | smp_processor_id(), idx); |
914 | return -EINVAL; | 914 | return -EINVAL; |
915 | } | 915 | } |
916 | 916 | ||
917 | counter = ARMV8_IDX_TO_COUNTER(idx); | 917 | counter = ARMV8_IDX_TO_COUNTER(idx); |
918 | asm volatile("msr pmintenset_el1, %0" :: "r" (BIT(counter))); | 918 | asm volatile("msr pmintenset_el1, %0" :: "r" (BIT(counter))); |
919 | return idx; | 919 | return idx; |
920 | } | 920 | } |
921 | 921 | ||
922 | static inline int armv8pmu_disable_intens(int idx) | 922 | static inline int armv8pmu_disable_intens(int idx) |
923 | { | 923 | { |
924 | u32 counter; | 924 | u32 counter; |
925 | 925 | ||
926 | if (!armv8pmu_counter_valid(idx)) { | 926 | if (!armv8pmu_counter_valid(idx)) { |
927 | pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n", | 927 | pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n", |
928 | smp_processor_id(), idx); | 928 | smp_processor_id(), idx); |
929 | return -EINVAL; | 929 | return -EINVAL; |
930 | } | 930 | } |
931 | 931 | ||
932 | counter = ARMV8_IDX_TO_COUNTER(idx); | 932 | counter = ARMV8_IDX_TO_COUNTER(idx); |
933 | asm volatile("msr pmintenclr_el1, %0" :: "r" (BIT(counter))); | 933 | asm volatile("msr pmintenclr_el1, %0" :: "r" (BIT(counter))); |
934 | isb(); | 934 | isb(); |
935 | /* Clear the overflow flag in case an interrupt is pending. */ | 935 | /* Clear the overflow flag in case an interrupt is pending. */ |
936 | asm volatile("msr pmovsclr_el0, %0" :: "r" (BIT(counter))); | 936 | asm volatile("msr pmovsclr_el0, %0" :: "r" (BIT(counter))); |
937 | isb(); | 937 | isb(); |
938 | return idx; | 938 | return idx; |
939 | } | 939 | } |
940 | 940 | ||
941 | static inline u32 armv8pmu_getreset_flags(void) | 941 | static inline u32 armv8pmu_getreset_flags(void) |
942 | { | 942 | { |
943 | u32 value; | 943 | u32 value; |
944 | 944 | ||
945 | /* Read */ | 945 | /* Read */ |
946 | asm volatile("mrs %0, pmovsclr_el0" : "=r" (value)); | 946 | asm volatile("mrs %0, pmovsclr_el0" : "=r" (value)); |
947 | 947 | ||
948 | /* Write to clear flags */ | 948 | /* Write to clear flags */ |
949 | value &= ARMV8_OVSR_MASK; | 949 | value &= ARMV8_OVSR_MASK; |
950 | asm volatile("msr pmovsclr_el0, %0" :: "r" (value)); | 950 | asm volatile("msr pmovsclr_el0, %0" :: "r" (value)); |
951 | 951 | ||
952 | return value; | 952 | return value; |
953 | } | 953 | } |
954 | 954 | ||
955 | static void armv8pmu_enable_event(struct hw_perf_event *hwc, int idx) | 955 | static void armv8pmu_enable_event(struct hw_perf_event *hwc, int idx) |
956 | { | 956 | { |
957 | unsigned long flags; | 957 | unsigned long flags; |
958 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 958 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
959 | 959 | ||
960 | /* | 960 | /* |
961 | * Enable counter and interrupt, and set the counter to count | 961 | * Enable counter and interrupt, and set the counter to count |
962 | * the event that we're interested in. | 962 | * the event that we're interested in. |
963 | */ | 963 | */ |
964 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | 964 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
965 | 965 | ||
966 | /* | 966 | /* |
967 | * Disable counter | 967 | * Disable counter |
968 | */ | 968 | */ |
969 | armv8pmu_disable_counter(idx); | 969 | armv8pmu_disable_counter(idx); |
970 | 970 | ||
971 | /* | 971 | /* |
972 | * Set event (if destined for PMNx counters). | 972 | * Set event (if destined for PMNx counters). |
973 | */ | 973 | */ |
974 | armv8pmu_write_evtype(idx, hwc->config_base); | 974 | armv8pmu_write_evtype(idx, hwc->config_base); |
975 | 975 | ||
976 | /* | 976 | /* |
977 | * Enable interrupt for this counter | 977 | * Enable interrupt for this counter |
978 | */ | 978 | */ |
979 | armv8pmu_enable_intens(idx); | 979 | armv8pmu_enable_intens(idx); |
980 | 980 | ||
981 | /* | 981 | /* |
982 | * Enable counter | 982 | * Enable counter |
983 | */ | 983 | */ |
984 | armv8pmu_enable_counter(idx); | 984 | armv8pmu_enable_counter(idx); |
985 | 985 | ||
986 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | 986 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
987 | } | 987 | } |
988 | 988 | ||
989 | static void armv8pmu_disable_event(struct hw_perf_event *hwc, int idx) | 989 | static void armv8pmu_disable_event(struct hw_perf_event *hwc, int idx) |
990 | { | 990 | { |
991 | unsigned long flags; | 991 | unsigned long flags; |
992 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 992 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
993 | 993 | ||
994 | /* | 994 | /* |
995 | * Disable counter and interrupt | 995 | * Disable counter and interrupt |
996 | */ | 996 | */ |
997 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | 997 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
998 | 998 | ||
999 | /* | 999 | /* |
1000 | * Disable counter | 1000 | * Disable counter |
1001 | */ | 1001 | */ |
1002 | armv8pmu_disable_counter(idx); | 1002 | armv8pmu_disable_counter(idx); |
1003 | 1003 | ||
1004 | /* | 1004 | /* |
1005 | * Disable interrupt for this counter | 1005 | * Disable interrupt for this counter |
1006 | */ | 1006 | */ |
1007 | armv8pmu_disable_intens(idx); | 1007 | armv8pmu_disable_intens(idx); |
1008 | 1008 | ||
1009 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | 1009 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
1010 | } | 1010 | } |
1011 | 1011 | ||
1012 | static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev) | 1012 | static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev) |
1013 | { | 1013 | { |
1014 | u32 pmovsr; | 1014 | u32 pmovsr; |
1015 | struct perf_sample_data data; | 1015 | struct perf_sample_data data; |
1016 | struct pmu_hw_events *cpuc; | 1016 | struct pmu_hw_events *cpuc; |
1017 | struct pt_regs *regs; | 1017 | struct pt_regs *regs; |
1018 | int idx; | 1018 | int idx; |
1019 | 1019 | ||
1020 | /* | 1020 | /* |
1021 | * Get and reset the IRQ flags | 1021 | * Get and reset the IRQ flags |
1022 | */ | 1022 | */ |
1023 | pmovsr = armv8pmu_getreset_flags(); | 1023 | pmovsr = armv8pmu_getreset_flags(); |
1024 | 1024 | ||
1025 | /* | 1025 | /* |
1026 | * Did an overflow occur? | 1026 | * Did an overflow occur? |
1027 | */ | 1027 | */ |
1028 | if (!armv8pmu_has_overflowed(pmovsr)) | 1028 | if (!armv8pmu_has_overflowed(pmovsr)) |
1029 | return IRQ_NONE; | 1029 | return IRQ_NONE; |
1030 | 1030 | ||
1031 | /* | 1031 | /* |
1032 | * Handle the counter(s) overflow(s) | 1032 | * Handle the counter(s) overflow(s) |
1033 | */ | 1033 | */ |
1034 | regs = get_irq_regs(); | 1034 | regs = get_irq_regs(); |
1035 | 1035 | ||
1036 | cpuc = &__get_cpu_var(cpu_hw_events); | 1036 | cpuc = &__get_cpu_var(cpu_hw_events); |
1037 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { | 1037 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
1038 | struct perf_event *event = cpuc->events[idx]; | 1038 | struct perf_event *event = cpuc->events[idx]; |
1039 | struct hw_perf_event *hwc; | 1039 | struct hw_perf_event *hwc; |
1040 | 1040 | ||
1041 | /* Ignore if we don't have an event. */ | 1041 | /* Ignore if we don't have an event. */ |
1042 | if (!event) | 1042 | if (!event) |
1043 | continue; | 1043 | continue; |
1044 | 1044 | ||
1045 | /* | 1045 | /* |
1046 | * We have a single interrupt for all counters. Check that | 1046 | * We have a single interrupt for all counters. Check that |
1047 | * each counter has overflowed before we process it. | 1047 | * each counter has overflowed before we process it. |
1048 | */ | 1048 | */ |
1049 | if (!armv8pmu_counter_has_overflowed(pmovsr, idx)) | 1049 | if (!armv8pmu_counter_has_overflowed(pmovsr, idx)) |
1050 | continue; | 1050 | continue; |
1051 | 1051 | ||
1052 | hwc = &event->hw; | 1052 | hwc = &event->hw; |
1053 | armpmu_event_update(event, hwc, idx); | 1053 | armpmu_event_update(event, hwc, idx); |
1054 | perf_sample_data_init(&data, 0, hwc->last_period); | 1054 | perf_sample_data_init(&data, 0, hwc->last_period); |
1055 | if (!armpmu_event_set_period(event, hwc, idx)) | 1055 | if (!armpmu_event_set_period(event, hwc, idx)) |
1056 | continue; | 1056 | continue; |
1057 | 1057 | ||
1058 | if (perf_event_overflow(event, &data, regs)) | 1058 | if (perf_event_overflow(event, &data, regs)) |
1059 | cpu_pmu->disable(hwc, idx); | 1059 | cpu_pmu->disable(hwc, idx); |
1060 | } | 1060 | } |
1061 | 1061 | ||
1062 | /* | 1062 | /* |
1063 | * Handle the pending perf events. | 1063 | * Handle the pending perf events. |
1064 | * | 1064 | * |
1065 | * Note: this call *must* be run with interrupts disabled. For | 1065 | * Note: this call *must* be run with interrupts disabled. For |
1066 | * platforms that can have the PMU interrupts raised as an NMI, this | 1066 | * platforms that can have the PMU interrupts raised as an NMI, this |
1067 | * will not work. | 1067 | * will not work. |
1068 | */ | 1068 | */ |
1069 | irq_work_run(); | 1069 | irq_work_run(); |
1070 | 1070 | ||
1071 | return IRQ_HANDLED; | 1071 | return IRQ_HANDLED; |
1072 | } | 1072 | } |
1073 | 1073 | ||
1074 | static void armv8pmu_start(void) | 1074 | static void armv8pmu_start(void) |
1075 | { | 1075 | { |
1076 | unsigned long flags; | 1076 | unsigned long flags; |
1077 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 1077 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
1078 | 1078 | ||
1079 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | 1079 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
1080 | /* Enable all counters */ | 1080 | /* Enable all counters */ |
1081 | armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMCR_E); | 1081 | armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMCR_E); |
1082 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | 1082 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
1083 | } | 1083 | } |
1084 | 1084 | ||
1085 | static void armv8pmu_stop(void) | 1085 | static void armv8pmu_stop(void) |
1086 | { | 1086 | { |
1087 | unsigned long flags; | 1087 | unsigned long flags; |
1088 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 1088 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
1089 | 1089 | ||
1090 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | 1090 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
1091 | /* Disable all counters */ | 1091 | /* Disable all counters */ |
1092 | armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMCR_E); | 1092 | armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMCR_E); |
1093 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | 1093 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
1094 | } | 1094 | } |
1095 | 1095 | ||
1096 | static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc, | 1096 | static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc, |
1097 | struct hw_perf_event *event) | 1097 | struct hw_perf_event *event) |
1098 | { | 1098 | { |
1099 | int idx; | 1099 | int idx; |
1100 | unsigned long evtype = event->config_base & ARMV8_EVTYPE_EVENT; | 1100 | unsigned long evtype = event->config_base & ARMV8_EVTYPE_EVENT; |
1101 | 1101 | ||
1102 | /* Always place a cycle counter into the cycle counter. */ | 1102 | /* Always place a cycle counter into the cycle counter. */ |
1103 | if (evtype == ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES) { | 1103 | if (evtype == ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES) { |
1104 | if (test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask)) | 1104 | if (test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask)) |
1105 | return -EAGAIN; | 1105 | return -EAGAIN; |
1106 | 1106 | ||
1107 | return ARMV8_IDX_CYCLE_COUNTER; | 1107 | return ARMV8_IDX_CYCLE_COUNTER; |
1108 | } | 1108 | } |
1109 | 1109 | ||
1110 | /* | 1110 | /* |
1111 | * For anything other than a cycle counter, try and use | 1111 | * For anything other than a cycle counter, try and use |
1112 | * the events counters | 1112 | * the events counters |
1113 | */ | 1113 | */ |
1114 | for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) { | 1114 | for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) { |
1115 | if (!test_and_set_bit(idx, cpuc->used_mask)) | 1115 | if (!test_and_set_bit(idx, cpuc->used_mask)) |
1116 | return idx; | 1116 | return idx; |
1117 | } | 1117 | } |
1118 | 1118 | ||
1119 | /* The counters are all in use. */ | 1119 | /* The counters are all in use. */ |
1120 | return -EAGAIN; | 1120 | return -EAGAIN; |
1121 | } | 1121 | } |
1122 | 1122 | ||
1123 | /* | 1123 | /* |
1124 | * Add an event filter to a given event. This will only work for PMUv2 PMUs. | 1124 | * Add an event filter to a given event. This will only work for PMUv2 PMUs. |
1125 | */ | 1125 | */ |
1126 | static int armv8pmu_set_event_filter(struct hw_perf_event *event, | 1126 | static int armv8pmu_set_event_filter(struct hw_perf_event *event, |
1127 | struct perf_event_attr *attr) | 1127 | struct perf_event_attr *attr) |
1128 | { | 1128 | { |
1129 | unsigned long config_base = 0; | 1129 | unsigned long config_base = 0; |
1130 | 1130 | ||
1131 | if (attr->exclude_idle) | 1131 | if (attr->exclude_idle) |
1132 | return -EPERM; | 1132 | return -EPERM; |
1133 | if (attr->exclude_user) | 1133 | if (attr->exclude_user) |
1134 | config_base |= ARMV8_EXCLUDE_EL0; | 1134 | config_base |= ARMV8_EXCLUDE_EL0; |
1135 | if (attr->exclude_kernel) | 1135 | if (attr->exclude_kernel) |
1136 | config_base |= ARMV8_EXCLUDE_EL1; | 1136 | config_base |= ARMV8_EXCLUDE_EL1; |
1137 | if (!attr->exclude_hv) | 1137 | if (!attr->exclude_hv) |
1138 | config_base |= ARMV8_INCLUDE_EL2; | 1138 | config_base |= ARMV8_INCLUDE_EL2; |
1139 | 1139 | ||
1140 | /* | 1140 | /* |
1141 | * Install the filter into config_base as this is used to | 1141 | * Install the filter into config_base as this is used to |
1142 | * construct the event type. | 1142 | * construct the event type. |
1143 | */ | 1143 | */ |
1144 | event->config_base = config_base; | 1144 | event->config_base = config_base; |
1145 | 1145 | ||
1146 | return 0; | 1146 | return 0; |
1147 | } | 1147 | } |
1148 | 1148 | ||
1149 | static void armv8pmu_reset(void *info) | 1149 | static void armv8pmu_reset(void *info) |
1150 | { | 1150 | { |
1151 | u32 idx, nb_cnt = cpu_pmu->num_events; | 1151 | u32 idx, nb_cnt = cpu_pmu->num_events; |
1152 | 1152 | ||
1153 | /* The counter and interrupt enable registers are unknown at reset. */ | 1153 | /* The counter and interrupt enable registers are unknown at reset. */ |
1154 | for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) | 1154 | for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) |
1155 | armv8pmu_disable_event(NULL, idx); | 1155 | armv8pmu_disable_event(NULL, idx); |
1156 | 1156 | ||
1157 | /* Initialize & Reset PMNC: C and P bits. */ | 1157 | /* Initialize & Reset PMNC: C and P bits. */ |
1158 | armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C); | 1158 | armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C); |
1159 | 1159 | ||
1160 | /* Disable access from userspace. */ | 1160 | /* Disable access from userspace. */ |
1161 | asm volatile("msr pmuserenr_el0, %0" :: "r" (0)); | 1161 | asm volatile("msr pmuserenr_el0, %0" :: "r" (0)); |
1162 | } | 1162 | } |
1163 | 1163 | ||
1164 | static int armv8_pmuv3_map_event(struct perf_event *event) | 1164 | static int armv8_pmuv3_map_event(struct perf_event *event) |
1165 | { | 1165 | { |
1166 | return map_cpu_event(event, &armv8_pmuv3_perf_map, | 1166 | return map_cpu_event(event, &armv8_pmuv3_perf_map, |
1167 | &armv8_pmuv3_perf_cache_map, 0xFF); | 1167 | &armv8_pmuv3_perf_cache_map, 0xFF); |
1168 | } | 1168 | } |
1169 | 1169 | ||
1170 | static struct arm_pmu armv8pmu = { | 1170 | static struct arm_pmu armv8pmu = { |
1171 | .handle_irq = armv8pmu_handle_irq, | 1171 | .handle_irq = armv8pmu_handle_irq, |
1172 | .enable = armv8pmu_enable_event, | 1172 | .enable = armv8pmu_enable_event, |
1173 | .disable = armv8pmu_disable_event, | 1173 | .disable = armv8pmu_disable_event, |
1174 | .read_counter = armv8pmu_read_counter, | 1174 | .read_counter = armv8pmu_read_counter, |
1175 | .write_counter = armv8pmu_write_counter, | 1175 | .write_counter = armv8pmu_write_counter, |
1176 | .get_event_idx = armv8pmu_get_event_idx, | 1176 | .get_event_idx = armv8pmu_get_event_idx, |
1177 | .start = armv8pmu_start, | 1177 | .start = armv8pmu_start, |
1178 | .stop = armv8pmu_stop, | 1178 | .stop = armv8pmu_stop, |
1179 | .reset = armv8pmu_reset, | 1179 | .reset = armv8pmu_reset, |
1180 | .max_period = (1LLU << 32) - 1, | 1180 | .max_period = (1LLU << 32) - 1, |
1181 | }; | 1181 | }; |
1182 | 1182 | ||
1183 | static u32 __init armv8pmu_read_num_pmnc_events(void) | 1183 | static u32 __init armv8pmu_read_num_pmnc_events(void) |
1184 | { | 1184 | { |
1185 | u32 nb_cnt; | 1185 | u32 nb_cnt; |
1186 | 1186 | ||
1187 | /* Read the nb of CNTx counters supported from PMNC */ | 1187 | /* Read the nb of CNTx counters supported from PMNC */ |
1188 | nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMCR_N_SHIFT) & ARMV8_PMCR_N_MASK; | 1188 | nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMCR_N_SHIFT) & ARMV8_PMCR_N_MASK; |
1189 | 1189 | ||
1190 | /* Add the CPU cycles counter and return */ | 1190 | /* Add the CPU cycles counter and return */ |
1191 | return nb_cnt + 1; | 1191 | return nb_cnt + 1; |
1192 | } | 1192 | } |
1193 | 1193 | ||
1194 | static struct arm_pmu *__init armv8_pmuv3_pmu_init(void) | 1194 | static struct arm_pmu *__init armv8_pmuv3_pmu_init(void) |
1195 | { | 1195 | { |
1196 | armv8pmu.name = "arm/armv8-pmuv3"; | 1196 | armv8pmu.name = "arm/armv8-pmuv3"; |
1197 | armv8pmu.map_event = armv8_pmuv3_map_event; | 1197 | armv8pmu.map_event = armv8_pmuv3_map_event; |
1198 | armv8pmu.num_events = armv8pmu_read_num_pmnc_events(); | 1198 | armv8pmu.num_events = armv8pmu_read_num_pmnc_events(); |
1199 | armv8pmu.set_event_filter = armv8pmu_set_event_filter; | 1199 | armv8pmu.set_event_filter = armv8pmu_set_event_filter; |
1200 | return &armv8pmu; | 1200 | return &armv8pmu; |
1201 | } | 1201 | } |
1202 | 1202 | ||
1203 | /* | 1203 | /* |
1204 | * Ensure the PMU has sane values out of reset. | 1204 | * Ensure the PMU has sane values out of reset. |
1205 | * This requires SMP to be available, so exists as a separate initcall. | 1205 | * This requires SMP to be available, so exists as a separate initcall. |
1206 | */ | 1206 | */ |
1207 | static int __init | 1207 | static int __init |
1208 | cpu_pmu_reset(void) | 1208 | cpu_pmu_reset(void) |
1209 | { | 1209 | { |
1210 | if (cpu_pmu && cpu_pmu->reset) | 1210 | if (cpu_pmu && cpu_pmu->reset) |
1211 | return on_each_cpu(cpu_pmu->reset, NULL, 1); | 1211 | return on_each_cpu(cpu_pmu->reset, NULL, 1); |
1212 | return 0; | 1212 | return 0; |
1213 | } | 1213 | } |
1214 | arch_initcall(cpu_pmu_reset); | 1214 | arch_initcall(cpu_pmu_reset); |
1215 | 1215 | ||
1216 | /* | 1216 | /* |
1217 | * PMU platform driver and devicetree bindings. | 1217 | * PMU platform driver and devicetree bindings. |
1218 | */ | 1218 | */ |
1219 | static struct of_device_id armpmu_of_device_ids[] = { | 1219 | static struct of_device_id armpmu_of_device_ids[] = { |
1220 | {.compatible = "arm,armv8-pmuv3"}, | 1220 | {.compatible = "arm,armv8-pmuv3"}, |
1221 | {}, | 1221 | {}, |
1222 | }; | 1222 | }; |
1223 | 1223 | ||
1224 | static int armpmu_device_probe(struct platform_device *pdev) | 1224 | static int armpmu_device_probe(struct platform_device *pdev) |
1225 | { | 1225 | { |
1226 | if (!cpu_pmu) | 1226 | if (!cpu_pmu) |
1227 | return -ENODEV; | 1227 | return -ENODEV; |
1228 | 1228 | ||
1229 | cpu_pmu->plat_device = pdev; | 1229 | cpu_pmu->plat_device = pdev; |
1230 | return 0; | 1230 | return 0; |
1231 | } | 1231 | } |
1232 | 1232 | ||
1233 | static struct platform_driver armpmu_driver = { | 1233 | static struct platform_driver armpmu_driver = { |
1234 | .driver = { | 1234 | .driver = { |
1235 | .name = "arm-pmu", | 1235 | .name = "arm-pmu", |
1236 | .of_match_table = armpmu_of_device_ids, | 1236 | .of_match_table = armpmu_of_device_ids, |
1237 | }, | 1237 | }, |
1238 | .probe = armpmu_device_probe, | 1238 | .probe = armpmu_device_probe, |
1239 | }; | 1239 | }; |
1240 | 1240 | ||
1241 | static int __init register_pmu_driver(void) | 1241 | static int __init register_pmu_driver(void) |
1242 | { | 1242 | { |
1243 | return platform_driver_register(&armpmu_driver); | 1243 | return platform_driver_register(&armpmu_driver); |
1244 | } | 1244 | } |
1245 | device_initcall(register_pmu_driver); | 1245 | device_initcall(register_pmu_driver); |
1246 | 1246 | ||
1247 | static struct pmu_hw_events *armpmu_get_cpu_events(void) | 1247 | static struct pmu_hw_events *armpmu_get_cpu_events(void) |
1248 | { | 1248 | { |
1249 | return &__get_cpu_var(cpu_hw_events); | 1249 | return &__get_cpu_var(cpu_hw_events); |
1250 | } | 1250 | } |
1251 | 1251 | ||
1252 | static void __init cpu_pmu_init(struct arm_pmu *armpmu) | 1252 | static void __init cpu_pmu_init(struct arm_pmu *armpmu) |
1253 | { | 1253 | { |
1254 | int cpu; | 1254 | int cpu; |
1255 | for_each_possible_cpu(cpu) { | 1255 | for_each_possible_cpu(cpu) { |
1256 | struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu); | 1256 | struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu); |
1257 | events->events = per_cpu(hw_events, cpu); | 1257 | events->events = per_cpu(hw_events, cpu); |
1258 | events->used_mask = per_cpu(used_mask, cpu); | 1258 | events->used_mask = per_cpu(used_mask, cpu); |
1259 | raw_spin_lock_init(&events->pmu_lock); | 1259 | raw_spin_lock_init(&events->pmu_lock); |
1260 | } | 1260 | } |
1261 | armpmu->get_hw_events = armpmu_get_cpu_events; | 1261 | armpmu->get_hw_events = armpmu_get_cpu_events; |
1262 | } | 1262 | } |
1263 | 1263 | ||
1264 | static int __init init_hw_perf_events(void) | 1264 | static int __init init_hw_perf_events(void) |
1265 | { | 1265 | { |
1266 | u64 dfr = read_cpuid(ID_AA64DFR0_EL1); | 1266 | u64 dfr = read_cpuid(ID_AA64DFR0_EL1); |
1267 | 1267 | ||
1268 | switch ((dfr >> 8) & 0xf) { | 1268 | switch ((dfr >> 8) & 0xf) { |
1269 | case 0x1: /* PMUv3 */ | 1269 | case 0x1: /* PMUv3 */ |
1270 | cpu_pmu = armv8_pmuv3_pmu_init(); | 1270 | cpu_pmu = armv8_pmuv3_pmu_init(); |
1271 | break; | 1271 | break; |
1272 | } | 1272 | } |
1273 | 1273 | ||
1274 | if (cpu_pmu) { | 1274 | if (cpu_pmu) { |
1275 | pr_info("enabled with %s PMU driver, %d counters available\n", | 1275 | pr_info("enabled with %s PMU driver, %d counters available\n", |
1276 | cpu_pmu->name, cpu_pmu->num_events); | 1276 | cpu_pmu->name, cpu_pmu->num_events); |
1277 | cpu_pmu_init(cpu_pmu); | 1277 | cpu_pmu_init(cpu_pmu); |
1278 | armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW); | 1278 | armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW); |
1279 | } else { | 1279 | } else { |
1280 | pr_info("no hardware support available\n"); | 1280 | pr_info("no hardware support available\n"); |
1281 | } | 1281 | } |
1282 | 1282 | ||
1283 | return 0; | 1283 | return 0; |
1284 | } | 1284 | } |
1285 | early_initcall(init_hw_perf_events); | 1285 | early_initcall(init_hw_perf_events); |
1286 | 1286 | ||
1287 | /* | 1287 | /* |
1288 | * Callchain handling code. | 1288 | * Callchain handling code. |
1289 | */ | 1289 | */ |
1290 | struct frame_tail { | 1290 | struct frame_tail { |
1291 | struct frame_tail __user *fp; | 1291 | struct frame_tail __user *fp; |
1292 | unsigned long lr; | 1292 | unsigned long lr; |
1293 | } __attribute__((packed)); | 1293 | } __attribute__((packed)); |
1294 | 1294 | ||
1295 | /* | 1295 | /* |
1296 | * Get the return address for a single stackframe and return a pointer to the | 1296 | * Get the return address for a single stackframe and return a pointer to the |
1297 | * next frame tail. | 1297 | * next frame tail. |
1298 | */ | 1298 | */ |
1299 | static struct frame_tail __user * | 1299 | static struct frame_tail __user * |
1300 | user_backtrace(struct frame_tail __user *tail, | 1300 | user_backtrace(struct frame_tail __user *tail, |
1301 | struct perf_callchain_entry *entry) | 1301 | struct perf_callchain_entry *entry) |
1302 | { | 1302 | { |
1303 | struct frame_tail buftail; | 1303 | struct frame_tail buftail; |
1304 | unsigned long err; | 1304 | unsigned long err; |
1305 | 1305 | ||
1306 | /* Also check accessibility of one struct frame_tail beyond */ | 1306 | /* Also check accessibility of one struct frame_tail beyond */ |
1307 | if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) | 1307 | if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) |
1308 | return NULL; | 1308 | return NULL; |
1309 | 1309 | ||
1310 | pagefault_disable(); | 1310 | pagefault_disable(); |
1311 | err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail)); | 1311 | err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail)); |
1312 | pagefault_enable(); | 1312 | pagefault_enable(); |
1313 | 1313 | ||
1314 | if (err) | 1314 | if (err) |
1315 | return NULL; | 1315 | return NULL; |
1316 | 1316 | ||
1317 | perf_callchain_store(entry, buftail.lr); | 1317 | perf_callchain_store(entry, buftail.lr); |
1318 | 1318 | ||
1319 | /* | 1319 | /* |
1320 | * Frame pointers should strictly progress back up the stack | 1320 | * Frame pointers should strictly progress back up the stack |
1321 | * (towards higher addresses). | 1321 | * (towards higher addresses). |
1322 | */ | 1322 | */ |
1323 | if (tail >= buftail.fp) | 1323 | if (tail >= buftail.fp) |
1324 | return NULL; | 1324 | return NULL; |
1325 | 1325 | ||
1326 | return buftail.fp; | 1326 | return buftail.fp; |
1327 | } | 1327 | } |
1328 | 1328 | ||
1329 | void perf_callchain_user(struct perf_callchain_entry *entry, | 1329 | void perf_callchain_user(struct perf_callchain_entry *entry, |
1330 | struct pt_regs *regs) | 1330 | struct pt_regs *regs) |
1331 | { | 1331 | { |
1332 | struct frame_tail __user *tail; | 1332 | struct frame_tail __user *tail; |
1333 | 1333 | ||
1334 | if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { | ||
1335 | /* We don't support guest os callchain now */ | ||
1336 | return; | ||
1337 | } | ||
1338 | |||
1334 | tail = (struct frame_tail __user *)regs->regs[29]; | 1339 | tail = (struct frame_tail __user *)regs->regs[29]; |
1335 | 1340 | ||
1336 | while (entry->nr < PERF_MAX_STACK_DEPTH && | 1341 | while (entry->nr < PERF_MAX_STACK_DEPTH && |
1337 | tail && !((unsigned long)tail & 0xf)) | 1342 | tail && !((unsigned long)tail & 0xf)) |
1338 | tail = user_backtrace(tail, entry); | 1343 | tail = user_backtrace(tail, entry); |
1339 | } | 1344 | } |
1340 | 1345 | ||
1341 | /* | 1346 | /* |
1342 | * Gets called by walk_stackframe() for every stackframe. This will be called | 1347 | * Gets called by walk_stackframe() for every stackframe. This will be called |
1343 | * whist unwinding the stackframe and is like a subroutine return so we use | 1348 | * whist unwinding the stackframe and is like a subroutine return so we use |
1344 | * the PC. | 1349 | * the PC. |
1345 | */ | 1350 | */ |
1346 | static int callchain_trace(struct stackframe *frame, void *data) | 1351 | static int callchain_trace(struct stackframe *frame, void *data) |
1347 | { | 1352 | { |
1348 | struct perf_callchain_entry *entry = data; | 1353 | struct perf_callchain_entry *entry = data; |
1349 | perf_callchain_store(entry, frame->pc); | 1354 | perf_callchain_store(entry, frame->pc); |
1350 | return 0; | 1355 | return 0; |
1351 | } | 1356 | } |
1352 | 1357 | ||
1353 | void perf_callchain_kernel(struct perf_callchain_entry *entry, | 1358 | void perf_callchain_kernel(struct perf_callchain_entry *entry, |
1354 | struct pt_regs *regs) | 1359 | struct pt_regs *regs) |
1355 | { | 1360 | { |
1356 | struct stackframe frame; | 1361 | struct stackframe frame; |
1357 | 1362 | ||
1363 | if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { | ||
1364 | /* We don't support guest os callchain now */ | ||
1365 | return; | ||
1366 | } | ||
1367 | |||
1358 | frame.fp = regs->regs[29]; | 1368 | frame.fp = regs->regs[29]; |
1359 | frame.sp = regs->sp; | 1369 | frame.sp = regs->sp; |
1360 | frame.pc = regs->pc; | 1370 | frame.pc = regs->pc; |
1361 | walk_stackframe(&frame, callchain_trace, entry); | 1371 | walk_stackframe(&frame, callchain_trace, entry); |
1372 | } | ||
1373 | |||
1374 | unsigned long perf_instruction_pointer(struct pt_regs *regs) | ||
1375 | { | ||
1376 | if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) | ||
1377 | return perf_guest_cbs->get_guest_ip(); | ||
1378 | |||
1379 | return instruction_pointer(regs); | ||
1380 | } | ||
1381 | |||
1382 | unsigned long perf_misc_flags(struct pt_regs *regs) | ||
1383 | { | ||
1384 | int misc = 0; | ||
1385 | |||
1386 | if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { | ||
1387 | if (perf_guest_cbs->is_user_mode()) | ||
1388 | misc |= PERF_RECORD_MISC_GUEST_USER; | ||
1389 | else | ||
1390 | misc |= PERF_RECORD_MISC_GUEST_KERNEL; | ||
1391 | } else { | ||
1392 | if (user_mode(regs)) | ||
1393 | misc |= PERF_RECORD_MISC_USER; | ||
1394 | else | ||
1395 | misc |= PERF_RECORD_MISC_KERNEL; | ||
1396 | } | ||
1397 | |||
1398 | return misc; | ||
1362 | } | 1399 | } |
1363 | 1400 |
arch/arm64/kernel/process.c
1 | /* | 1 | /* |
2 | * Based on arch/arm/kernel/process.c | 2 | * Based on arch/arm/kernel/process.c |
3 | * | 3 | * |
4 | * Original Copyright (C) 1995 Linus Torvalds | 4 | * Original Copyright (C) 1995 Linus Torvalds |
5 | * Copyright (C) 1996-2000 Russell King - Converted to ARM. | 5 | * Copyright (C) 1996-2000 Russell King - Converted to ARM. |
6 | * Copyright (C) 2012 ARM Ltd. | 6 | * Copyright (C) 2012 ARM Ltd. |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License version 2 as | 9 | * it under the terms of the GNU General Public License version 2 as |
10 | * published by the Free Software Foundation. | 10 | * published by the Free Software Foundation. |
11 | * | 11 | * |
12 | * This program is distributed in the hope that it will be useful, | 12 | * This program is distributed in the hope that it will be useful, |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
15 | * GNU General Public License for more details. | 15 | * GNU General Public License for more details. |
16 | * | 16 | * |
17 | * You should have received a copy of the GNU General Public License | 17 | * You should have received a copy of the GNU General Public License |
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
19 | */ | 19 | */ |
20 | 20 | ||
21 | #include <stdarg.h> | 21 | #include <stdarg.h> |
22 | 22 | ||
23 | #include <linux/export.h> | 23 | #include <linux/export.h> |
24 | #include <linux/sched.h> | 24 | #include <linux/sched.h> |
25 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
26 | #include <linux/mm.h> | 26 | #include <linux/mm.h> |
27 | #include <linux/stddef.h> | 27 | #include <linux/stddef.h> |
28 | #include <linux/unistd.h> | 28 | #include <linux/unistd.h> |
29 | #include <linux/user.h> | 29 | #include <linux/user.h> |
30 | #include <linux/delay.h> | 30 | #include <linux/delay.h> |
31 | #include <linux/reboot.h> | 31 | #include <linux/reboot.h> |
32 | #include <linux/interrupt.h> | 32 | #include <linux/interrupt.h> |
33 | #include <linux/kallsyms.h> | 33 | #include <linux/kallsyms.h> |
34 | #include <linux/init.h> | 34 | #include <linux/init.h> |
35 | #include <linux/cpu.h> | 35 | #include <linux/cpu.h> |
36 | #include <linux/elfcore.h> | 36 | #include <linux/elfcore.h> |
37 | #include <linux/pm.h> | 37 | #include <linux/pm.h> |
38 | #include <linux/tick.h> | 38 | #include <linux/tick.h> |
39 | #include <linux/utsname.h> | 39 | #include <linux/utsname.h> |
40 | #include <linux/uaccess.h> | 40 | #include <linux/uaccess.h> |
41 | #include <linux/random.h> | 41 | #include <linux/random.h> |
42 | #include <linux/hw_breakpoint.h> | 42 | #include <linux/hw_breakpoint.h> |
43 | #include <linux/personality.h> | 43 | #include <linux/personality.h> |
44 | #include <linux/notifier.h> | 44 | #include <linux/notifier.h> |
45 | 45 | ||
46 | #include <asm/compat.h> | 46 | #include <asm/compat.h> |
47 | #include <asm/cacheflush.h> | 47 | #include <asm/cacheflush.h> |
48 | #include <asm/fpsimd.h> | ||
49 | #include <asm/mmu_context.h> | ||
48 | #include <asm/processor.h> | 50 | #include <asm/processor.h> |
49 | #include <asm/stacktrace.h> | 51 | #include <asm/stacktrace.h> |
50 | #include <asm/fpsimd.h> | ||
51 | 52 | ||
52 | static void setup_restart(void) | 53 | static void setup_restart(void) |
53 | { | 54 | { |
54 | /* | 55 | /* |
55 | * Tell the mm system that we are going to reboot - | 56 | * Tell the mm system that we are going to reboot - |
56 | * we may need it to insert some 1:1 mappings so that | 57 | * we may need it to insert some 1:1 mappings so that |
57 | * soft boot works. | 58 | * soft boot works. |
58 | */ | 59 | */ |
59 | setup_mm_for_reboot(); | 60 | setup_mm_for_reboot(); |
60 | 61 | ||
61 | /* Clean and invalidate caches */ | 62 | /* Clean and invalidate caches */ |
62 | flush_cache_all(); | 63 | flush_cache_all(); |
63 | 64 | ||
64 | /* Turn D-cache off */ | 65 | /* Turn D-cache off */ |
65 | cpu_cache_off(); | 66 | cpu_cache_off(); |
66 | 67 | ||
67 | /* Push out any further dirty data, and ensure cache is empty */ | 68 | /* Push out any further dirty data, and ensure cache is empty */ |
68 | flush_cache_all(); | 69 | flush_cache_all(); |
69 | } | 70 | } |
70 | 71 | ||
71 | void soft_restart(unsigned long addr) | 72 | void soft_restart(unsigned long addr) |
72 | { | 73 | { |
73 | setup_restart(); | 74 | setup_restart(); |
74 | cpu_reset(addr); | 75 | cpu_reset(addr); |
75 | } | 76 | } |
76 | 77 | ||
77 | /* | 78 | /* |
78 | * Function pointers to optional machine specific functions | 79 | * Function pointers to optional machine specific functions |
79 | */ | 80 | */ |
80 | void (*pm_power_off)(void); | 81 | void (*pm_power_off)(void); |
81 | EXPORT_SYMBOL_GPL(pm_power_off); | 82 | EXPORT_SYMBOL_GPL(pm_power_off); |
82 | 83 | ||
83 | void (*pm_restart)(const char *cmd); | 84 | void (*pm_restart)(const char *cmd); |
84 | EXPORT_SYMBOL_GPL(pm_restart); | 85 | EXPORT_SYMBOL_GPL(pm_restart); |
85 | 86 | ||
86 | 87 | ||
87 | /* | 88 | /* |
88 | * This is our default idle handler. | 89 | * This is our default idle handler. |
89 | */ | 90 | */ |
90 | static void default_idle(void) | 91 | static void default_idle(void) |
91 | { | 92 | { |
92 | /* | 93 | /* |
93 | * This should do all the clock switching and wait for interrupt | 94 | * This should do all the clock switching and wait for interrupt |
94 | * tricks | 95 | * tricks |
95 | */ | 96 | */ |
96 | cpu_do_idle(); | 97 | cpu_do_idle(); |
97 | local_irq_enable(); | 98 | local_irq_enable(); |
98 | } | 99 | } |
99 | 100 | ||
100 | /* | 101 | /* |
101 | * The idle thread. | 102 | * The idle thread. |
102 | * We always respect 'hlt_counter' to prevent low power idle. | 103 | * We always respect 'hlt_counter' to prevent low power idle. |
103 | */ | 104 | */ |
104 | void cpu_idle(void) | 105 | void cpu_idle(void) |
105 | { | 106 | { |
106 | local_fiq_enable(); | 107 | local_fiq_enable(); |
107 | 108 | ||
108 | /* endless idle loop with no priority at all */ | 109 | /* endless idle loop with no priority at all */ |
109 | while (1) { | 110 | while (1) { |
110 | tick_nohz_idle_enter(); | 111 | tick_nohz_idle_enter(); |
111 | rcu_idle_enter(); | 112 | rcu_idle_enter(); |
112 | while (!need_resched()) { | 113 | while (!need_resched()) { |
113 | /* | 114 | /* |
114 | * We need to disable interrupts here to ensure | 115 | * We need to disable interrupts here to ensure |
115 | * we don't miss a wakeup call. | 116 | * we don't miss a wakeup call. |
116 | */ | 117 | */ |
117 | local_irq_disable(); | 118 | local_irq_disable(); |
118 | if (!need_resched()) { | 119 | if (!need_resched()) { |
119 | stop_critical_timings(); | 120 | stop_critical_timings(); |
120 | default_idle(); | 121 | default_idle(); |
121 | start_critical_timings(); | 122 | start_critical_timings(); |
122 | /* | 123 | /* |
123 | * default_idle functions should always return | 124 | * default_idle functions should always return |
124 | * with IRQs enabled. | 125 | * with IRQs enabled. |
125 | */ | 126 | */ |
126 | WARN_ON(irqs_disabled()); | 127 | WARN_ON(irqs_disabled()); |
127 | } else { | 128 | } else { |
128 | local_irq_enable(); | 129 | local_irq_enable(); |
129 | } | 130 | } |
130 | } | 131 | } |
131 | rcu_idle_exit(); | 132 | rcu_idle_exit(); |
132 | tick_nohz_idle_exit(); | 133 | tick_nohz_idle_exit(); |
133 | schedule_preempt_disabled(); | 134 | schedule_preempt_disabled(); |
134 | } | 135 | } |
135 | } | 136 | } |
136 | 137 | ||
137 | void machine_shutdown(void) | 138 | void machine_shutdown(void) |
138 | { | 139 | { |
139 | #ifdef CONFIG_SMP | 140 | #ifdef CONFIG_SMP |
140 | smp_send_stop(); | 141 | smp_send_stop(); |
141 | #endif | 142 | #endif |
142 | } | 143 | } |
143 | 144 | ||
144 | void machine_halt(void) | 145 | void machine_halt(void) |
145 | { | 146 | { |
146 | machine_shutdown(); | 147 | machine_shutdown(); |
147 | while (1); | 148 | while (1); |
148 | } | 149 | } |
149 | 150 | ||
150 | void machine_power_off(void) | 151 | void machine_power_off(void) |
151 | { | 152 | { |
152 | machine_shutdown(); | 153 | machine_shutdown(); |
153 | if (pm_power_off) | 154 | if (pm_power_off) |
154 | pm_power_off(); | 155 | pm_power_off(); |
155 | } | 156 | } |
156 | 157 | ||
157 | void machine_restart(char *cmd) | 158 | void machine_restart(char *cmd) |
158 | { | 159 | { |
159 | machine_shutdown(); | 160 | machine_shutdown(); |
160 | 161 | ||
161 | /* Disable interrupts first */ | 162 | /* Disable interrupts first */ |
162 | local_irq_disable(); | 163 | local_irq_disable(); |
163 | local_fiq_disable(); | 164 | local_fiq_disable(); |
164 | 165 | ||
165 | /* Now call the architecture specific reboot code. */ | 166 | /* Now call the architecture specific reboot code. */ |
166 | if (pm_restart) | 167 | if (pm_restart) |
167 | pm_restart(cmd); | 168 | pm_restart(cmd); |
168 | 169 | ||
169 | /* | 170 | /* |
170 | * Whoops - the architecture was unable to reboot. | 171 | * Whoops - the architecture was unable to reboot. |
171 | */ | 172 | */ |
172 | printk("Reboot failed -- System halted\n"); | 173 | printk("Reboot failed -- System halted\n"); |
173 | while (1); | 174 | while (1); |
174 | } | 175 | } |
175 | 176 | ||
176 | void __show_regs(struct pt_regs *regs) | 177 | void __show_regs(struct pt_regs *regs) |
177 | { | 178 | { |
178 | int i; | 179 | int i; |
179 | 180 | ||
180 | printk("CPU: %d %s (%s %.*s)\n", | 181 | printk("CPU: %d %s (%s %.*s)\n", |
181 | raw_smp_processor_id(), print_tainted(), | 182 | raw_smp_processor_id(), print_tainted(), |
182 | init_utsname()->release, | 183 | init_utsname()->release, |
183 | (int)strcspn(init_utsname()->version, " "), | 184 | (int)strcspn(init_utsname()->version, " "), |
184 | init_utsname()->version); | 185 | init_utsname()->version); |
185 | print_symbol("PC is at %s\n", instruction_pointer(regs)); | 186 | print_symbol("PC is at %s\n", instruction_pointer(regs)); |
186 | print_symbol("LR is at %s\n", regs->regs[30]); | 187 | print_symbol("LR is at %s\n", regs->regs[30]); |
187 | printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n", | 188 | printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n", |
188 | regs->pc, regs->regs[30], regs->pstate); | 189 | regs->pc, regs->regs[30], regs->pstate); |
189 | printk("sp : %016llx\n", regs->sp); | 190 | printk("sp : %016llx\n", regs->sp); |
190 | for (i = 29; i >= 0; i--) { | 191 | for (i = 29; i >= 0; i--) { |
191 | printk("x%-2d: %016llx ", i, regs->regs[i]); | 192 | printk("x%-2d: %016llx ", i, regs->regs[i]); |
192 | if (i % 2 == 0) | 193 | if (i % 2 == 0) |
193 | printk("\n"); | 194 | printk("\n"); |
194 | } | 195 | } |
195 | printk("\n"); | 196 | printk("\n"); |
196 | } | 197 | } |
197 | 198 | ||
198 | void show_regs(struct pt_regs * regs) | 199 | void show_regs(struct pt_regs * regs) |
199 | { | 200 | { |
200 | printk("\n"); | 201 | printk("\n"); |
201 | printk("Pid: %d, comm: %20s\n", task_pid_nr(current), current->comm); | 202 | printk("Pid: %d, comm: %20s\n", task_pid_nr(current), current->comm); |
202 | __show_regs(regs); | 203 | __show_regs(regs); |
203 | } | 204 | } |
204 | 205 | ||
205 | /* | 206 | /* |
206 | * Free current thread data structures etc.. | 207 | * Free current thread data structures etc.. |
207 | */ | 208 | */ |
208 | void exit_thread(void) | 209 | void exit_thread(void) |
209 | { | 210 | { |
210 | } | 211 | } |
211 | 212 | ||
212 | void flush_thread(void) | 213 | void flush_thread(void) |
213 | { | 214 | { |
214 | fpsimd_flush_thread(); | 215 | fpsimd_flush_thread(); |
215 | flush_ptrace_hw_breakpoint(current); | 216 | flush_ptrace_hw_breakpoint(current); |
216 | } | 217 | } |
217 | 218 | ||
218 | void release_thread(struct task_struct *dead_task) | 219 | void release_thread(struct task_struct *dead_task) |
219 | { | 220 | { |
220 | } | 221 | } |
221 | 222 | ||
222 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) | 223 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) |
223 | { | 224 | { |
224 | fpsimd_save_state(¤t->thread.fpsimd_state); | 225 | fpsimd_save_state(¤t->thread.fpsimd_state); |
225 | *dst = *src; | 226 | *dst = *src; |
226 | return 0; | 227 | return 0; |
227 | } | 228 | } |
228 | 229 | ||
229 | asmlinkage void ret_from_fork(void) asm("ret_from_fork"); | 230 | asmlinkage void ret_from_fork(void) asm("ret_from_fork"); |
230 | 231 | ||
231 | int copy_thread(unsigned long clone_flags, unsigned long stack_start, | 232 | int copy_thread(unsigned long clone_flags, unsigned long stack_start, |
232 | unsigned long stk_sz, struct task_struct *p) | 233 | unsigned long stk_sz, struct task_struct *p) |
233 | { | 234 | { |
234 | struct pt_regs *childregs = task_pt_regs(p); | 235 | struct pt_regs *childregs = task_pt_regs(p); |
235 | unsigned long tls = p->thread.tp_value; | 236 | unsigned long tls = p->thread.tp_value; |
236 | 237 | ||
237 | memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context)); | 238 | memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context)); |
238 | 239 | ||
239 | if (likely(!(p->flags & PF_KTHREAD))) { | 240 | if (likely(!(p->flags & PF_KTHREAD))) { |
240 | *childregs = *current_pt_regs(); | 241 | *childregs = *current_pt_regs(); |
241 | childregs->regs[0] = 0; | 242 | childregs->regs[0] = 0; |
242 | if (is_compat_thread(task_thread_info(p))) { | 243 | if (is_compat_thread(task_thread_info(p))) { |
243 | if (stack_start) | 244 | if (stack_start) |
244 | childregs->compat_sp = stack_start; | 245 | childregs->compat_sp = stack_start; |
245 | } else { | 246 | } else { |
246 | /* | 247 | /* |
247 | * Read the current TLS pointer from tpidr_el0 as it may be | 248 | * Read the current TLS pointer from tpidr_el0 as it may be |
248 | * out-of-sync with the saved value. | 249 | * out-of-sync with the saved value. |
249 | */ | 250 | */ |
250 | asm("mrs %0, tpidr_el0" : "=r" (tls)); | 251 | asm("mrs %0, tpidr_el0" : "=r" (tls)); |
251 | if (stack_start) { | 252 | if (stack_start) { |
252 | /* 16-byte aligned stack mandatory on AArch64 */ | 253 | /* 16-byte aligned stack mandatory on AArch64 */ |
253 | if (stack_start & 15) | 254 | if (stack_start & 15) |
254 | return -EINVAL; | 255 | return -EINVAL; |
255 | childregs->sp = stack_start; | 256 | childregs->sp = stack_start; |
256 | } | 257 | } |
257 | } | 258 | } |
258 | /* | 259 | /* |
259 | * If a TLS pointer was passed to clone (4th argument), use it | 260 | * If a TLS pointer was passed to clone (4th argument), use it |
260 | * for the new thread. | 261 | * for the new thread. |
261 | */ | 262 | */ |
262 | if (clone_flags & CLONE_SETTLS) | 263 | if (clone_flags & CLONE_SETTLS) |
263 | tls = childregs->regs[3]; | 264 | tls = childregs->regs[3]; |
264 | } else { | 265 | } else { |
265 | memset(childregs, 0, sizeof(struct pt_regs)); | 266 | memset(childregs, 0, sizeof(struct pt_regs)); |
266 | childregs->pstate = PSR_MODE_EL1h; | 267 | childregs->pstate = PSR_MODE_EL1h; |
267 | p->thread.cpu_context.x19 = stack_start; | 268 | p->thread.cpu_context.x19 = stack_start; |
268 | p->thread.cpu_context.x20 = stk_sz; | 269 | p->thread.cpu_context.x20 = stk_sz; |
269 | } | 270 | } |
270 | p->thread.cpu_context.pc = (unsigned long)ret_from_fork; | 271 | p->thread.cpu_context.pc = (unsigned long)ret_from_fork; |
271 | p->thread.cpu_context.sp = (unsigned long)childregs; | 272 | p->thread.cpu_context.sp = (unsigned long)childregs; |
272 | p->thread.tp_value = tls; | 273 | p->thread.tp_value = tls; |
273 | 274 | ||
274 | ptrace_hw_copy_thread(p); | 275 | ptrace_hw_copy_thread(p); |
275 | 276 | ||
276 | return 0; | 277 | return 0; |
277 | } | 278 | } |
278 | 279 | ||
279 | static void tls_thread_switch(struct task_struct *next) | 280 | static void tls_thread_switch(struct task_struct *next) |
280 | { | 281 | { |
281 | unsigned long tpidr, tpidrro; | 282 | unsigned long tpidr, tpidrro; |
282 | 283 | ||
283 | if (!is_compat_task()) { | 284 | if (!is_compat_task()) { |
284 | asm("mrs %0, tpidr_el0" : "=r" (tpidr)); | 285 | asm("mrs %0, tpidr_el0" : "=r" (tpidr)); |
285 | current->thread.tp_value = tpidr; | 286 | current->thread.tp_value = tpidr; |
286 | } | 287 | } |
287 | 288 | ||
288 | if (is_compat_thread(task_thread_info(next))) { | 289 | if (is_compat_thread(task_thread_info(next))) { |
289 | tpidr = 0; | 290 | tpidr = 0; |
290 | tpidrro = next->thread.tp_value; | 291 | tpidrro = next->thread.tp_value; |
291 | } else { | 292 | } else { |
292 | tpidr = next->thread.tp_value; | 293 | tpidr = next->thread.tp_value; |
293 | tpidrro = 0; | 294 | tpidrro = 0; |
294 | } | 295 | } |
295 | 296 | ||
296 | asm( | 297 | asm( |
297 | " msr tpidr_el0, %0\n" | 298 | " msr tpidr_el0, %0\n" |
298 | " msr tpidrro_el0, %1" | 299 | " msr tpidrro_el0, %1" |
299 | : : "r" (tpidr), "r" (tpidrro)); | 300 | : : "r" (tpidr), "r" (tpidrro)); |
300 | } | 301 | } |
301 | 302 | ||
302 | /* | 303 | /* |
303 | * Thread switching. | 304 | * Thread switching. |
304 | */ | 305 | */ |
305 | struct task_struct *__switch_to(struct task_struct *prev, | 306 | struct task_struct *__switch_to(struct task_struct *prev, |
306 | struct task_struct *next) | 307 | struct task_struct *next) |
307 | { | 308 | { |
308 | struct task_struct *last; | 309 | struct task_struct *last; |
309 | 310 | ||
310 | fpsimd_thread_switch(next); | 311 | fpsimd_thread_switch(next); |
311 | tls_thread_switch(next); | 312 | tls_thread_switch(next); |
312 | hw_breakpoint_thread_switch(next); | 313 | hw_breakpoint_thread_switch(next); |
313 | 314 | ||
314 | /* the actual thread switch */ | 315 | /* the actual thread switch */ |
315 | last = cpu_switch_to(prev, next); | 316 | last = cpu_switch_to(prev, next); |
316 | 317 | ||
318 | contextidr_thread_switch(next); | ||
317 | return last; | 319 | return last; |
318 | } | 320 | } |
319 | 321 | ||
320 | unsigned long get_wchan(struct task_struct *p) | 322 | unsigned long get_wchan(struct task_struct *p) |
321 | { | 323 | { |
322 | struct stackframe frame; | 324 | struct stackframe frame; |
323 | int count = 0; | 325 | int count = 0; |
324 | if (!p || p == current || p->state == TASK_RUNNING) | 326 | if (!p || p == current || p->state == TASK_RUNNING) |
325 | return 0; | 327 | return 0; |
326 | 328 | ||
327 | frame.fp = thread_saved_fp(p); | 329 | frame.fp = thread_saved_fp(p); |
328 | frame.sp = thread_saved_sp(p); | 330 | frame.sp = thread_saved_sp(p); |
329 | frame.pc = thread_saved_pc(p); | 331 | frame.pc = thread_saved_pc(p); |
330 | do { | 332 | do { |
331 | int ret = unwind_frame(&frame); | 333 | int ret = unwind_frame(&frame); |
332 | if (ret < 0) | 334 | if (ret < 0) |
333 | return 0; | 335 | return 0; |
334 | if (!in_sched_functions(frame.pc)) | 336 | if (!in_sched_functions(frame.pc)) |
335 | return frame.pc; | 337 | return frame.pc; |
336 | } while (count ++ < 16); | 338 | } while (count ++ < 16); |
337 | return 0; | 339 | return 0; |
338 | } | 340 | } |
339 | 341 | ||
340 | unsigned long arch_align_stack(unsigned long sp) | 342 | unsigned long arch_align_stack(unsigned long sp) |
341 | { | 343 | { |
342 | if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) | 344 | if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) |
343 | sp -= get_random_int() & ~PAGE_MASK; | 345 | sp -= get_random_int() & ~PAGE_MASK; |
344 | return sp & ~0xf; | 346 | return sp & ~0xf; |
345 | } | 347 | } |
346 | 348 | ||
347 | static unsigned long randomize_base(unsigned long base) | 349 | static unsigned long randomize_base(unsigned long base) |
348 | { | 350 | { |
349 | unsigned long range_end = base + (STACK_RND_MASK << PAGE_SHIFT) + 1; | 351 | unsigned long range_end = base + (STACK_RND_MASK << PAGE_SHIFT) + 1; |
350 | return randomize_range(base, range_end, 0) ? : base; | 352 | return randomize_range(base, range_end, 0) ? : base; |
351 | } | 353 | } |
352 | 354 | ||
353 | unsigned long arch_randomize_brk(struct mm_struct *mm) | 355 | unsigned long arch_randomize_brk(struct mm_struct *mm) |
354 | { | 356 | { |
355 | return randomize_base(mm->brk); | 357 | return randomize_base(mm->brk); |
356 | } | 358 | } |
357 | 359 | ||
358 | unsigned long randomize_et_dyn(unsigned long base) | 360 | unsigned long randomize_et_dyn(unsigned long base) |
359 | { | 361 | { |
360 | return randomize_base(base); | 362 | return randomize_base(base); |
361 | } | 363 | } |
arch/arm64/kernel/psci.c
File was created | 1 | /* | |
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License version 2 as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * Copyright (C) 2013 ARM Limited | ||
12 | * | ||
13 | * Author: Will Deacon <will.deacon@arm.com> | ||
14 | */ | ||
15 | |||
16 | #define pr_fmt(fmt) "psci: " fmt | ||
17 | |||
18 | #include <linux/init.h> | ||
19 | #include <linux/of.h> | ||
20 | |||
21 | #include <asm/compiler.h> | ||
22 | #include <asm/errno.h> | ||
23 | #include <asm/psci.h> | ||
24 | |||
25 | struct psci_operations psci_ops; | ||
26 | |||
27 | static int (*invoke_psci_fn)(u64, u64, u64, u64); | ||
28 | |||
29 | enum psci_function { | ||
30 | PSCI_FN_CPU_SUSPEND, | ||
31 | PSCI_FN_CPU_ON, | ||
32 | PSCI_FN_CPU_OFF, | ||
33 | PSCI_FN_MIGRATE, | ||
34 | PSCI_FN_MAX, | ||
35 | }; | ||
36 | |||
37 | static u32 psci_function_id[PSCI_FN_MAX]; | ||
38 | |||
39 | #define PSCI_RET_SUCCESS 0 | ||
40 | #define PSCI_RET_EOPNOTSUPP -1 | ||
41 | #define PSCI_RET_EINVAL -2 | ||
42 | #define PSCI_RET_EPERM -3 | ||
43 | |||
44 | static int psci_to_linux_errno(int errno) | ||
45 | { | ||
46 | switch (errno) { | ||
47 | case PSCI_RET_SUCCESS: | ||
48 | return 0; | ||
49 | case PSCI_RET_EOPNOTSUPP: | ||
50 | return -EOPNOTSUPP; | ||
51 | case PSCI_RET_EINVAL: | ||
52 | return -EINVAL; | ||
53 | case PSCI_RET_EPERM: | ||
54 | return -EPERM; | ||
55 | }; | ||
56 | |||
57 | return -EINVAL; | ||
58 | } | ||
59 | |||
60 | #define PSCI_POWER_STATE_ID_MASK 0xffff | ||
61 | #define PSCI_POWER_STATE_ID_SHIFT 0 | ||
62 | #define PSCI_POWER_STATE_TYPE_MASK 0x1 | ||
63 | #define PSCI_POWER_STATE_TYPE_SHIFT 16 | ||
64 | #define PSCI_POWER_STATE_AFFL_MASK 0x3 | ||
65 | #define PSCI_POWER_STATE_AFFL_SHIFT 24 | ||
66 | |||
67 | static u32 psci_power_state_pack(struct psci_power_state state) | ||
68 | { | ||
69 | return ((state.id & PSCI_POWER_STATE_ID_MASK) | ||
70 | << PSCI_POWER_STATE_ID_SHIFT) | | ||
71 | ((state.type & PSCI_POWER_STATE_TYPE_MASK) | ||
72 | << PSCI_POWER_STATE_TYPE_SHIFT) | | ||
73 | ((state.affinity_level & PSCI_POWER_STATE_AFFL_MASK) | ||
74 | << PSCI_POWER_STATE_AFFL_SHIFT); | ||
75 | } | ||
76 | |||
77 | /* | ||
78 | * The following two functions are invoked via the invoke_psci_fn pointer | ||
79 | * and will not be inlined, allowing us to piggyback on the AAPCS. | ||
80 | */ | ||
81 | static noinline int __invoke_psci_fn_hvc(u64 function_id, u64 arg0, u64 arg1, | ||
82 | u64 arg2) | ||
83 | { | ||
84 | asm volatile( | ||
85 | __asmeq("%0", "x0") | ||
86 | __asmeq("%1", "x1") | ||
87 | __asmeq("%2", "x2") | ||
88 | __asmeq("%3", "x3") | ||
89 | "hvc #0\n" | ||
90 | : "+r" (function_id) | ||
91 | : "r" (arg0), "r" (arg1), "r" (arg2)); | ||
92 | |||
93 | return function_id; | ||
94 | } | ||
95 | |||
96 | static noinline int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1, | ||
97 | u64 arg2) | ||
98 | { | ||
99 | asm volatile( | ||
100 | __asmeq("%0", "x0") | ||
101 | __asmeq("%1", "x1") | ||
102 | __asmeq("%2", "x2") | ||
103 | __asmeq("%3", "x3") | ||
104 | "smc #0\n" | ||
105 | : "+r" (function_id) | ||
106 | : "r" (arg0), "r" (arg1), "r" (arg2)); | ||
107 | |||
108 | return function_id; | ||
109 | } | ||
110 | |||
111 | static int psci_cpu_suspend(struct psci_power_state state, | ||
112 | unsigned long entry_point) | ||
113 | { | ||
114 | int err; | ||
115 | u32 fn, power_state; | ||
116 | |||
117 | fn = psci_function_id[PSCI_FN_CPU_SUSPEND]; | ||
118 | power_state = psci_power_state_pack(state); | ||
119 | err = invoke_psci_fn(fn, power_state, entry_point, 0); | ||
120 | return psci_to_linux_errno(err); | ||
121 | } | ||
122 | |||
123 | static int psci_cpu_off(struct psci_power_state state) | ||
124 | { | ||
125 | int err; | ||
126 | u32 fn, power_state; | ||
127 | |||
128 | fn = psci_function_id[PSCI_FN_CPU_OFF]; | ||
129 | power_state = psci_power_state_pack(state); | ||
130 | err = invoke_psci_fn(fn, power_state, 0, 0); | ||
131 | return psci_to_linux_errno(err); | ||
132 | } | ||
133 | |||
134 | static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point) | ||
135 | { | ||
136 | int err; | ||
137 | u32 fn; | ||
138 | |||
139 | fn = psci_function_id[PSCI_FN_CPU_ON]; | ||
140 | err = invoke_psci_fn(fn, cpuid, entry_point, 0); | ||
141 | return psci_to_linux_errno(err); | ||
142 | } | ||
143 | |||
144 | static int psci_migrate(unsigned long cpuid) | ||
145 | { | ||
146 | int err; | ||
147 | u32 fn; | ||
148 | |||
149 | fn = psci_function_id[PSCI_FN_MIGRATE]; | ||
150 | err = invoke_psci_fn(fn, cpuid, 0, 0); | ||
151 | return psci_to_linux_errno(err); | ||
152 | } | ||
153 | |||
154 | static const struct of_device_id psci_of_match[] __initconst = { | ||
155 | { .compatible = "arm,psci", }, | ||
156 | {}, | ||
157 | }; | ||
158 | |||
159 | int __init psci_init(void) | ||
160 | { | ||
161 | struct device_node *np; | ||
162 | const char *method; | ||
163 | u32 id; | ||
164 | int err = 0; | ||
165 | |||
166 | np = of_find_matching_node(NULL, psci_of_match); | ||
167 | if (!np) | ||
168 | return -ENODEV; | ||
169 | |||
170 | pr_info("probing function IDs from device-tree\n"); | ||
171 | |||
172 | if (of_property_read_string(np, "method", &method)) { | ||
173 | pr_warning("missing \"method\" property\n"); | ||
174 | err = -ENXIO; | ||
175 | goto out_put_node; | ||
176 | } | ||
177 | |||
178 | if (!strcmp("hvc", method)) { | ||
179 | invoke_psci_fn = __invoke_psci_fn_hvc; | ||
180 | } else if (!strcmp("smc", method)) { | ||
181 | invoke_psci_fn = __invoke_psci_fn_smc; | ||
182 | } else { | ||
183 | pr_warning("invalid \"method\" property: %s\n", method); | ||
184 | err = -EINVAL; | ||
185 | goto out_put_node; | ||
186 | } | ||
187 | |||
188 | if (!of_property_read_u32(np, "cpu_suspend", &id)) { | ||
189 | psci_function_id[PSCI_FN_CPU_SUSPEND] = id; | ||
190 | psci_ops.cpu_suspend = psci_cpu_suspend; | ||
191 | } | ||
192 | |||
193 | if (!of_property_read_u32(np, "cpu_off", &id)) { | ||
194 | psci_function_id[PSCI_FN_CPU_OFF] = id; | ||
195 | psci_ops.cpu_off = psci_cpu_off; | ||
196 | } | ||
197 | |||
198 | if (!of_property_read_u32(np, "cpu_on", &id)) { | ||
199 | psci_function_id[PSCI_FN_CPU_ON] = id; | ||
200 | psci_ops.cpu_on = psci_cpu_on; | ||
201 | } | ||
202 | |||
203 | if (!of_property_read_u32(np, "migrate", &id)) { | ||
204 | psci_function_id[PSCI_FN_MIGRATE] = id; | ||
205 | psci_ops.migrate = psci_migrate; | ||
206 | } | ||
207 | |||
208 | out_put_node: | ||
209 | of_node_put(np); | ||
210 | return err; | ||
211 | } | ||
212 |
arch/arm64/kernel/setup.c
1 | /* | 1 | /* |
2 | * Based on arch/arm/kernel/setup.c | 2 | * Based on arch/arm/kernel/setup.c |
3 | * | 3 | * |
4 | * Copyright (C) 1995-2001 Russell King | 4 | * Copyright (C) 1995-2001 Russell King |
5 | * Copyright (C) 2012 ARM Ltd. | 5 | * Copyright (C) 2012 ARM Ltd. |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | * | 10 | * |
11 | * This program is distributed in the hope that it will be useful, | 11 | * This program is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | * GNU General Public License for more details. | 14 | * GNU General Public License for more details. |
15 | * | 15 | * |
16 | * You should have received a copy of the GNU General Public License | 16 | * You should have received a copy of the GNU General Public License |
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include <linux/export.h> | 20 | #include <linux/export.h> |
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/stddef.h> | 22 | #include <linux/stddef.h> |
23 | #include <linux/ioport.h> | 23 | #include <linux/ioport.h> |
24 | #include <linux/delay.h> | 24 | #include <linux/delay.h> |
25 | #include <linux/utsname.h> | 25 | #include <linux/utsname.h> |
26 | #include <linux/initrd.h> | 26 | #include <linux/initrd.h> |
27 | #include <linux/console.h> | 27 | #include <linux/console.h> |
28 | #include <linux/bootmem.h> | 28 | #include <linux/bootmem.h> |
29 | #include <linux/seq_file.h> | 29 | #include <linux/seq_file.h> |
30 | #include <linux/screen_info.h> | 30 | #include <linux/screen_info.h> |
31 | #include <linux/init.h> | 31 | #include <linux/init.h> |
32 | #include <linux/kexec.h> | 32 | #include <linux/kexec.h> |
33 | #include <linux/crash_dump.h> | 33 | #include <linux/crash_dump.h> |
34 | #include <linux/root_dev.h> | 34 | #include <linux/root_dev.h> |
35 | #include <linux/cpu.h> | 35 | #include <linux/cpu.h> |
36 | #include <linux/interrupt.h> | 36 | #include <linux/interrupt.h> |
37 | #include <linux/smp.h> | 37 | #include <linux/smp.h> |
38 | #include <linux/fs.h> | 38 | #include <linux/fs.h> |
39 | #include <linux/proc_fs.h> | 39 | #include <linux/proc_fs.h> |
40 | #include <linux/memblock.h> | 40 | #include <linux/memblock.h> |
41 | #include <linux/of_fdt.h> | 41 | #include <linux/of_fdt.h> |
42 | #include <linux/of_platform.h> | ||
42 | 43 | ||
43 | #include <asm/cputype.h> | 44 | #include <asm/cputype.h> |
44 | #include <asm/elf.h> | 45 | #include <asm/elf.h> |
45 | #include <asm/cputable.h> | 46 | #include <asm/cputable.h> |
46 | #include <asm/sections.h> | 47 | #include <asm/sections.h> |
47 | #include <asm/setup.h> | 48 | #include <asm/setup.h> |
48 | #include <asm/cacheflush.h> | 49 | #include <asm/cacheflush.h> |
49 | #include <asm/tlbflush.h> | 50 | #include <asm/tlbflush.h> |
50 | #include <asm/traps.h> | 51 | #include <asm/traps.h> |
51 | #include <asm/memblock.h> | 52 | #include <asm/memblock.h> |
53 | #include <asm/psci.h> | ||
52 | 54 | ||
53 | unsigned int processor_id; | 55 | unsigned int processor_id; |
54 | EXPORT_SYMBOL(processor_id); | 56 | EXPORT_SYMBOL(processor_id); |
55 | 57 | ||
56 | unsigned int elf_hwcap __read_mostly; | 58 | unsigned int elf_hwcap __read_mostly; |
57 | EXPORT_SYMBOL_GPL(elf_hwcap); | 59 | EXPORT_SYMBOL_GPL(elf_hwcap); |
58 | 60 | ||
59 | static const char *cpu_name; | 61 | static const char *cpu_name; |
60 | static const char *machine_name; | 62 | static const char *machine_name; |
61 | phys_addr_t __fdt_pointer __initdata; | 63 | phys_addr_t __fdt_pointer __initdata; |
62 | 64 | ||
63 | /* | 65 | /* |
64 | * Standard memory resources | 66 | * Standard memory resources |
65 | */ | 67 | */ |
66 | static struct resource mem_res[] = { | 68 | static struct resource mem_res[] = { |
67 | { | 69 | { |
68 | .name = "Kernel code", | 70 | .name = "Kernel code", |
69 | .start = 0, | 71 | .start = 0, |
70 | .end = 0, | 72 | .end = 0, |
71 | .flags = IORESOURCE_MEM | 73 | .flags = IORESOURCE_MEM |
72 | }, | 74 | }, |
73 | { | 75 | { |
74 | .name = "Kernel data", | 76 | .name = "Kernel data", |
75 | .start = 0, | 77 | .start = 0, |
76 | .end = 0, | 78 | .end = 0, |
77 | .flags = IORESOURCE_MEM | 79 | .flags = IORESOURCE_MEM |
78 | } | 80 | } |
79 | }; | 81 | }; |
80 | 82 | ||
81 | #define kernel_code mem_res[0] | 83 | #define kernel_code mem_res[0] |
82 | #define kernel_data mem_res[1] | 84 | #define kernel_data mem_res[1] |
83 | 85 | ||
84 | void __init early_print(const char *str, ...) | 86 | void __init early_print(const char *str, ...) |
85 | { | 87 | { |
86 | char buf[256]; | 88 | char buf[256]; |
87 | va_list ap; | 89 | va_list ap; |
88 | 90 | ||
89 | va_start(ap, str); | 91 | va_start(ap, str); |
90 | vsnprintf(buf, sizeof(buf), str, ap); | 92 | vsnprintf(buf, sizeof(buf), str, ap); |
91 | va_end(ap); | 93 | va_end(ap); |
92 | 94 | ||
93 | printk("%s", buf); | 95 | printk("%s", buf); |
94 | } | 96 | } |
95 | 97 | ||
96 | static void __init setup_processor(void) | 98 | static void __init setup_processor(void) |
97 | { | 99 | { |
98 | struct cpu_info *cpu_info; | 100 | struct cpu_info *cpu_info; |
99 | 101 | ||
100 | /* | 102 | /* |
101 | * locate processor in the list of supported processor | 103 | * locate processor in the list of supported processor |
102 | * types. The linker builds this table for us from the | 104 | * types. The linker builds this table for us from the |
103 | * entries in arch/arm/mm/proc.S | 105 | * entries in arch/arm/mm/proc.S |
104 | */ | 106 | */ |
105 | cpu_info = lookup_processor_type(read_cpuid_id()); | 107 | cpu_info = lookup_processor_type(read_cpuid_id()); |
106 | if (!cpu_info) { | 108 | if (!cpu_info) { |
107 | printk("CPU configuration botched (ID %08x), unable to continue.\n", | 109 | printk("CPU configuration botched (ID %08x), unable to continue.\n", |
108 | read_cpuid_id()); | 110 | read_cpuid_id()); |
109 | while (1); | 111 | while (1); |
110 | } | 112 | } |
111 | 113 | ||
112 | cpu_name = cpu_info->cpu_name; | 114 | cpu_name = cpu_info->cpu_name; |
113 | 115 | ||
114 | printk("CPU: %s [%08x] revision %d\n", | 116 | printk("CPU: %s [%08x] revision %d\n", |
115 | cpu_name, read_cpuid_id(), read_cpuid_id() & 15); | 117 | cpu_name, read_cpuid_id(), read_cpuid_id() & 15); |
116 | 118 | ||
117 | sprintf(init_utsname()->machine, "aarch64"); | 119 | sprintf(init_utsname()->machine, "aarch64"); |
118 | elf_hwcap = 0; | 120 | elf_hwcap = 0; |
119 | } | 121 | } |
120 | 122 | ||
121 | static void __init setup_machine_fdt(phys_addr_t dt_phys) | 123 | static void __init setup_machine_fdt(phys_addr_t dt_phys) |
122 | { | 124 | { |
123 | struct boot_param_header *devtree; | 125 | struct boot_param_header *devtree; |
124 | unsigned long dt_root; | 126 | unsigned long dt_root; |
125 | 127 | ||
126 | /* Check we have a non-NULL DT pointer */ | 128 | /* Check we have a non-NULL DT pointer */ |
127 | if (!dt_phys) { | 129 | if (!dt_phys) { |
128 | early_print("\n" | 130 | early_print("\n" |
129 | "Error: NULL or invalid device tree blob\n" | 131 | "Error: NULL or invalid device tree blob\n" |
130 | "The dtb must be 8-byte aligned and passed in the first 512MB of memory\n" | 132 | "The dtb must be 8-byte aligned and passed in the first 512MB of memory\n" |
131 | "\nPlease check your bootloader.\n"); | 133 | "\nPlease check your bootloader.\n"); |
132 | 134 | ||
133 | while (true) | 135 | while (true) |
134 | cpu_relax(); | 136 | cpu_relax(); |
135 | 137 | ||
136 | } | 138 | } |
137 | 139 | ||
138 | devtree = phys_to_virt(dt_phys); | 140 | devtree = phys_to_virt(dt_phys); |
139 | 141 | ||
140 | /* Check device tree validity */ | 142 | /* Check device tree validity */ |
141 | if (be32_to_cpu(devtree->magic) != OF_DT_HEADER) { | 143 | if (be32_to_cpu(devtree->magic) != OF_DT_HEADER) { |
142 | early_print("\n" | 144 | early_print("\n" |
143 | "Error: invalid device tree blob at physical address 0x%p (virtual address 0x%p)\n" | 145 | "Error: invalid device tree blob at physical address 0x%p (virtual address 0x%p)\n" |
144 | "Expected 0x%x, found 0x%x\n" | 146 | "Expected 0x%x, found 0x%x\n" |
145 | "\nPlease check your bootloader.\n", | 147 | "\nPlease check your bootloader.\n", |
146 | dt_phys, devtree, OF_DT_HEADER, | 148 | dt_phys, devtree, OF_DT_HEADER, |
147 | be32_to_cpu(devtree->magic)); | 149 | be32_to_cpu(devtree->magic)); |
148 | 150 | ||
149 | while (true) | 151 | while (true) |
150 | cpu_relax(); | 152 | cpu_relax(); |
151 | } | 153 | } |
152 | 154 | ||
153 | initial_boot_params = devtree; | 155 | initial_boot_params = devtree; |
154 | dt_root = of_get_flat_dt_root(); | 156 | dt_root = of_get_flat_dt_root(); |
155 | 157 | ||
156 | machine_name = of_get_flat_dt_prop(dt_root, "model", NULL); | 158 | machine_name = of_get_flat_dt_prop(dt_root, "model", NULL); |
157 | if (!machine_name) | 159 | if (!machine_name) |
158 | machine_name = of_get_flat_dt_prop(dt_root, "compatible", NULL); | 160 | machine_name = of_get_flat_dt_prop(dt_root, "compatible", NULL); |
159 | if (!machine_name) | 161 | if (!machine_name) |
160 | machine_name = "<unknown>"; | 162 | machine_name = "<unknown>"; |
161 | pr_info("Machine: %s\n", machine_name); | 163 | pr_info("Machine: %s\n", machine_name); |
162 | 164 | ||
163 | /* Retrieve various information from the /chosen node */ | 165 | /* Retrieve various information from the /chosen node */ |
164 | of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line); | 166 | of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line); |
165 | /* Initialize {size,address}-cells info */ | 167 | /* Initialize {size,address}-cells info */ |
166 | of_scan_flat_dt(early_init_dt_scan_root, NULL); | 168 | of_scan_flat_dt(early_init_dt_scan_root, NULL); |
167 | /* Setup memory, calling early_init_dt_add_memory_arch */ | 169 | /* Setup memory, calling early_init_dt_add_memory_arch */ |
168 | of_scan_flat_dt(early_init_dt_scan_memory, NULL); | 170 | of_scan_flat_dt(early_init_dt_scan_memory, NULL); |
169 | } | 171 | } |
170 | 172 | ||
171 | void __init early_init_dt_add_memory_arch(u64 base, u64 size) | 173 | void __init early_init_dt_add_memory_arch(u64 base, u64 size) |
172 | { | 174 | { |
173 | base &= PAGE_MASK; | 175 | base &= PAGE_MASK; |
174 | size &= PAGE_MASK; | 176 | size &= PAGE_MASK; |
175 | if (base + size < PHYS_OFFSET) { | 177 | if (base + size < PHYS_OFFSET) { |
176 | pr_warning("Ignoring memory block 0x%llx - 0x%llx\n", | 178 | pr_warning("Ignoring memory block 0x%llx - 0x%llx\n", |
177 | base, base + size); | 179 | base, base + size); |
178 | return; | 180 | return; |
179 | } | 181 | } |
180 | if (base < PHYS_OFFSET) { | 182 | if (base < PHYS_OFFSET) { |
181 | pr_warning("Ignoring memory range 0x%llx - 0x%llx\n", | 183 | pr_warning("Ignoring memory range 0x%llx - 0x%llx\n", |
182 | base, PHYS_OFFSET); | 184 | base, PHYS_OFFSET); |
183 | size -= PHYS_OFFSET - base; | 185 | size -= PHYS_OFFSET - base; |
184 | base = PHYS_OFFSET; | 186 | base = PHYS_OFFSET; |
185 | } | 187 | } |
186 | memblock_add(base, size); | 188 | memblock_add(base, size); |
187 | } | 189 | } |
188 | 190 | ||
189 | void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) | 191 | void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) |
190 | { | 192 | { |
191 | return __va(memblock_alloc(size, align)); | 193 | return __va(memblock_alloc(size, align)); |
192 | } | 194 | } |
193 | 195 | ||
194 | /* | 196 | /* |
195 | * Limit the memory size that was specified via FDT. | 197 | * Limit the memory size that was specified via FDT. |
196 | */ | 198 | */ |
197 | static int __init early_mem(char *p) | 199 | static int __init early_mem(char *p) |
198 | { | 200 | { |
199 | phys_addr_t limit; | 201 | phys_addr_t limit; |
200 | 202 | ||
201 | if (!p) | 203 | if (!p) |
202 | return 1; | 204 | return 1; |
203 | 205 | ||
204 | limit = memparse(p, &p) & PAGE_MASK; | 206 | limit = memparse(p, &p) & PAGE_MASK; |
205 | pr_notice("Memory limited to %lldMB\n", limit >> 20); | 207 | pr_notice("Memory limited to %lldMB\n", limit >> 20); |
206 | 208 | ||
207 | memblock_enforce_memory_limit(limit); | 209 | memblock_enforce_memory_limit(limit); |
208 | 210 | ||
209 | return 0; | 211 | return 0; |
210 | } | 212 | } |
211 | early_param("mem", early_mem); | 213 | early_param("mem", early_mem); |
212 | 214 | ||
213 | static void __init request_standard_resources(void) | 215 | static void __init request_standard_resources(void) |
214 | { | 216 | { |
215 | struct memblock_region *region; | 217 | struct memblock_region *region; |
216 | struct resource *res; | 218 | struct resource *res; |
217 | 219 | ||
218 | kernel_code.start = virt_to_phys(_text); | 220 | kernel_code.start = virt_to_phys(_text); |
219 | kernel_code.end = virt_to_phys(_etext - 1); | 221 | kernel_code.end = virt_to_phys(_etext - 1); |
220 | kernel_data.start = virt_to_phys(_sdata); | 222 | kernel_data.start = virt_to_phys(_sdata); |
221 | kernel_data.end = virt_to_phys(_end - 1); | 223 | kernel_data.end = virt_to_phys(_end - 1); |
222 | 224 | ||
223 | for_each_memblock(memory, region) { | 225 | for_each_memblock(memory, region) { |
224 | res = alloc_bootmem_low(sizeof(*res)); | 226 | res = alloc_bootmem_low(sizeof(*res)); |
225 | res->name = "System RAM"; | 227 | res->name = "System RAM"; |
226 | res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); | 228 | res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); |
227 | res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; | 229 | res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; |
228 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; | 230 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; |
229 | 231 | ||
230 | request_resource(&iomem_resource, res); | 232 | request_resource(&iomem_resource, res); |
231 | 233 | ||
232 | if (kernel_code.start >= res->start && | 234 | if (kernel_code.start >= res->start && |
233 | kernel_code.end <= res->end) | 235 | kernel_code.end <= res->end) |
234 | request_resource(res, &kernel_code); | 236 | request_resource(res, &kernel_code); |
235 | if (kernel_data.start >= res->start && | 237 | if (kernel_data.start >= res->start && |
236 | kernel_data.end <= res->end) | 238 | kernel_data.end <= res->end) |
237 | request_resource(res, &kernel_data); | 239 | request_resource(res, &kernel_data); |
238 | } | 240 | } |
239 | } | 241 | } |
240 | 242 | ||
241 | void __init setup_arch(char **cmdline_p) | 243 | void __init setup_arch(char **cmdline_p) |
242 | { | 244 | { |
243 | setup_processor(); | 245 | setup_processor(); |
244 | 246 | ||
245 | setup_machine_fdt(__fdt_pointer); | 247 | setup_machine_fdt(__fdt_pointer); |
246 | 248 | ||
247 | init_mm.start_code = (unsigned long) _text; | 249 | init_mm.start_code = (unsigned long) _text; |
248 | init_mm.end_code = (unsigned long) _etext; | 250 | init_mm.end_code = (unsigned long) _etext; |
249 | init_mm.end_data = (unsigned long) _edata; | 251 | init_mm.end_data = (unsigned long) _edata; |
250 | init_mm.brk = (unsigned long) _end; | 252 | init_mm.brk = (unsigned long) _end; |
251 | 253 | ||
252 | *cmdline_p = boot_command_line; | 254 | *cmdline_p = boot_command_line; |
253 | 255 | ||
254 | parse_early_param(); | 256 | parse_early_param(); |
255 | 257 | ||
256 | arm64_memblock_init(); | 258 | arm64_memblock_init(); |
257 | 259 | ||
258 | paging_init(); | 260 | paging_init(); |
259 | request_standard_resources(); | 261 | request_standard_resources(); |
260 | 262 | ||
261 | unflatten_device_tree(); | 263 | unflatten_device_tree(); |
262 | 264 | ||
265 | psci_init(); | ||
266 | |||
263 | #ifdef CONFIG_SMP | 267 | #ifdef CONFIG_SMP |
264 | smp_init_cpus(); | 268 | smp_init_cpus(); |
265 | #endif | 269 | #endif |
266 | 270 | ||
267 | #ifdef CONFIG_VT | 271 | #ifdef CONFIG_VT |
268 | #if defined(CONFIG_VGA_CONSOLE) | 272 | #if defined(CONFIG_VGA_CONSOLE) |
269 | conswitchp = &vga_con; | 273 | conswitchp = &vga_con; |
270 | #elif defined(CONFIG_DUMMY_CONSOLE) | 274 | #elif defined(CONFIG_DUMMY_CONSOLE) |
271 | conswitchp = &dummy_con; | 275 | conswitchp = &dummy_con; |
272 | #endif | 276 | #endif |
273 | #endif | 277 | #endif |
274 | } | 278 | } |
275 | 279 | ||
276 | static DEFINE_PER_CPU(struct cpu, cpu_data); | 280 | static DEFINE_PER_CPU(struct cpu, cpu_data); |
277 | 281 | ||
278 | static int __init topology_init(void) | 282 | static int __init topology_init(void) |
279 | { | 283 | { |
280 | int i; | 284 | int i; |
281 | 285 | ||
282 | for_each_possible_cpu(i) { | 286 | for_each_possible_cpu(i) { |
283 | struct cpu *cpu = &per_cpu(cpu_data, i); | 287 | struct cpu *cpu = &per_cpu(cpu_data, i); |
284 | cpu->hotpluggable = 1; | 288 | cpu->hotpluggable = 1; |
285 | register_cpu(cpu, i); | 289 | register_cpu(cpu, i); |
286 | } | 290 | } |
287 | 291 | ||
288 | return 0; | 292 | return 0; |
289 | } | 293 | } |
290 | subsys_initcall(topology_init); | 294 | subsys_initcall(topology_init); |
295 | |||
296 | static int __init arm64_device_probe(void) | ||
297 | { | ||
298 | of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); | ||
299 | return 0; | ||
300 | } | ||
301 | device_initcall(arm64_device_probe); | ||
291 | 302 | ||
292 | static const char *hwcap_str[] = { | 303 | static const char *hwcap_str[] = { |
293 | "fp", | 304 | "fp", |
294 | "asimd", | 305 | "asimd", |
295 | NULL | 306 | NULL |
296 | }; | 307 | }; |
297 | 308 | ||
298 | static int c_show(struct seq_file *m, void *v) | 309 | static int c_show(struct seq_file *m, void *v) |
299 | { | 310 | { |
300 | int i; | 311 | int i; |
301 | 312 | ||
302 | seq_printf(m, "Processor\t: %s rev %d (%s)\n", | 313 | seq_printf(m, "Processor\t: %s rev %d (%s)\n", |
303 | cpu_name, read_cpuid_id() & 15, ELF_PLATFORM); | 314 | cpu_name, read_cpuid_id() & 15, ELF_PLATFORM); |
304 | 315 | ||
305 | for_each_online_cpu(i) { | 316 | for_each_online_cpu(i) { |
306 | /* | 317 | /* |
307 | * glibc reads /proc/cpuinfo to determine the number of | 318 | * glibc reads /proc/cpuinfo to determine the number of |
308 | * online processors, looking for lines beginning with | 319 | * online processors, looking for lines beginning with |
309 | * "processor". Give glibc what it expects. | 320 | * "processor". Give glibc what it expects. |
310 | */ | 321 | */ |
311 | #ifdef CONFIG_SMP | 322 | #ifdef CONFIG_SMP |
312 | seq_printf(m, "processor\t: %d\n", i); | 323 | seq_printf(m, "processor\t: %d\n", i); |
313 | #endif | 324 | #endif |
314 | seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n", | 325 | seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n", |
315 | loops_per_jiffy / (500000UL/HZ), | 326 | loops_per_jiffy / (500000UL/HZ), |
316 | loops_per_jiffy / (5000UL/HZ) % 100); | 327 | loops_per_jiffy / (5000UL/HZ) % 100); |
317 | } | 328 | } |
318 | 329 | ||
319 | /* dump out the processor features */ | 330 | /* dump out the processor features */ |
320 | seq_puts(m, "Features\t: "); | 331 | seq_puts(m, "Features\t: "); |
321 | 332 | ||
322 | for (i = 0; hwcap_str[i]; i++) | 333 | for (i = 0; hwcap_str[i]; i++) |
323 | if (elf_hwcap & (1 << i)) | 334 | if (elf_hwcap & (1 << i)) |
324 | seq_printf(m, "%s ", hwcap_str[i]); | 335 | seq_printf(m, "%s ", hwcap_str[i]); |
325 | 336 | ||
326 | seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24); | 337 | seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24); |
327 | seq_printf(m, "CPU architecture: AArch64\n"); | 338 | seq_printf(m, "CPU architecture: AArch64\n"); |
328 | seq_printf(m, "CPU variant\t: 0x%x\n", (read_cpuid_id() >> 20) & 15); | 339 | seq_printf(m, "CPU variant\t: 0x%x\n", (read_cpuid_id() >> 20) & 15); |
329 | seq_printf(m, "CPU part\t: 0x%03x\n", (read_cpuid_id() >> 4) & 0xfff); | 340 | seq_printf(m, "CPU part\t: 0x%03x\n", (read_cpuid_id() >> 4) & 0xfff); |
330 | seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15); | 341 | seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15); |
331 | 342 | ||
332 | seq_puts(m, "\n"); | 343 | seq_puts(m, "\n"); |
333 | 344 | ||
334 | seq_printf(m, "Hardware\t: %s\n", machine_name); | 345 | seq_printf(m, "Hardware\t: %s\n", machine_name); |
335 | 346 | ||
336 | return 0; | 347 | return 0; |
337 | } | 348 | } |
338 | 349 | ||
339 | static void *c_start(struct seq_file *m, loff_t *pos) | 350 | static void *c_start(struct seq_file *m, loff_t *pos) |
340 | { | 351 | { |
341 | return *pos < 1 ? (void *)1 : NULL; | 352 | return *pos < 1 ? (void *)1 : NULL; |
342 | } | 353 | } |
343 | 354 | ||
344 | static void *c_next(struct seq_file *m, void *v, loff_t *pos) | 355 | static void *c_next(struct seq_file *m, void *v, loff_t *pos) |
345 | { | 356 | { |
346 | ++*pos; | 357 | ++*pos; |
347 | return NULL; | 358 | return NULL; |
348 | } | 359 | } |
349 | 360 | ||
350 | static void c_stop(struct seq_file *m, void *v) | 361 | static void c_stop(struct seq_file *m, void *v) |
351 | { | 362 | { |
352 | } | 363 | } |
353 | 364 | ||
354 | const struct seq_operations cpuinfo_op = { | 365 | const struct seq_operations cpuinfo_op = { |
355 | .start = c_start, | 366 | .start = c_start, |
356 | .next = c_next, | 367 | .next = c_next, |
357 | .stop = c_stop, | 368 | .stop = c_stop, |
358 | .show = c_show | 369 | .show = c_show |
359 | }; | 370 | }; |
360 | 371 |
arch/arm64/kernel/signal32.c
1 | /* | 1 | /* |
2 | * Based on arch/arm/kernel/signal.c | 2 | * Based on arch/arm/kernel/signal.c |
3 | * | 3 | * |
4 | * Copyright (C) 1995-2009 Russell King | 4 | * Copyright (C) 1995-2009 Russell King |
5 | * Copyright (C) 2012 ARM Ltd. | 5 | * Copyright (C) 2012 ARM Ltd. |
6 | * Modified by Will Deacon <will.deacon@arm.com> | 6 | * Modified by Will Deacon <will.deacon@arm.com> |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License version 2 as | 9 | * it under the terms of the GNU General Public License version 2 as |
10 | * published by the Free Software Foundation. | 10 | * published by the Free Software Foundation. |
11 | * | 11 | * |
12 | * This program is distributed in the hope that it will be useful, | 12 | * This program is distributed in the hope that it will be useful, |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
15 | * GNU General Public License for more details. | 15 | * GNU General Public License for more details. |
16 | * | 16 | * |
17 | * You should have received a copy of the GNU General Public License | 17 | * You should have received a copy of the GNU General Public License |
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
19 | */ | 19 | */ |
20 | 20 | ||
21 | #include <linux/compat.h> | 21 | #include <linux/compat.h> |
22 | #include <linux/signal.h> | 22 | #include <linux/signal.h> |
23 | #include <linux/syscalls.h> | 23 | #include <linux/syscalls.h> |
24 | #include <linux/ratelimit.h> | 24 | #include <linux/ratelimit.h> |
25 | 25 | ||
26 | #include <asm/fpsimd.h> | 26 | #include <asm/fpsimd.h> |
27 | #include <asm/signal32.h> | 27 | #include <asm/signal32.h> |
28 | #include <asm/uaccess.h> | 28 | #include <asm/uaccess.h> |
29 | #include <asm/unistd32.h> | 29 | #include <asm/unistd32.h> |
30 | 30 | ||
31 | struct compat_sigaction { | 31 | struct compat_sigaction { |
32 | compat_uptr_t sa_handler; | 32 | compat_uptr_t sa_handler; |
33 | compat_ulong_t sa_flags; | 33 | compat_ulong_t sa_flags; |
34 | compat_uptr_t sa_restorer; | 34 | compat_uptr_t sa_restorer; |
35 | compat_sigset_t sa_mask; | 35 | compat_sigset_t sa_mask; |
36 | }; | 36 | }; |
37 | 37 | ||
38 | struct compat_old_sigaction { | 38 | struct compat_old_sigaction { |
39 | compat_uptr_t sa_handler; | 39 | compat_uptr_t sa_handler; |
40 | compat_old_sigset_t sa_mask; | 40 | compat_old_sigset_t sa_mask; |
41 | compat_ulong_t sa_flags; | 41 | compat_ulong_t sa_flags; |
42 | compat_uptr_t sa_restorer; | 42 | compat_uptr_t sa_restorer; |
43 | }; | 43 | }; |
44 | 44 | ||
45 | typedef struct compat_sigaltstack { | 45 | typedef struct compat_sigaltstack { |
46 | compat_uptr_t ss_sp; | 46 | compat_uptr_t ss_sp; |
47 | int ss_flags; | 47 | int ss_flags; |
48 | compat_size_t ss_size; | 48 | compat_size_t ss_size; |
49 | } compat_stack_t; | 49 | } compat_stack_t; |
50 | 50 | ||
51 | struct compat_sigcontext { | 51 | struct compat_sigcontext { |
52 | /* We always set these two fields to 0 */ | 52 | /* We always set these two fields to 0 */ |
53 | compat_ulong_t trap_no; | 53 | compat_ulong_t trap_no; |
54 | compat_ulong_t error_code; | 54 | compat_ulong_t error_code; |
55 | 55 | ||
56 | compat_ulong_t oldmask; | 56 | compat_ulong_t oldmask; |
57 | compat_ulong_t arm_r0; | 57 | compat_ulong_t arm_r0; |
58 | compat_ulong_t arm_r1; | 58 | compat_ulong_t arm_r1; |
59 | compat_ulong_t arm_r2; | 59 | compat_ulong_t arm_r2; |
60 | compat_ulong_t arm_r3; | 60 | compat_ulong_t arm_r3; |
61 | compat_ulong_t arm_r4; | 61 | compat_ulong_t arm_r4; |
62 | compat_ulong_t arm_r5; | 62 | compat_ulong_t arm_r5; |
63 | compat_ulong_t arm_r6; | 63 | compat_ulong_t arm_r6; |
64 | compat_ulong_t arm_r7; | 64 | compat_ulong_t arm_r7; |
65 | compat_ulong_t arm_r8; | 65 | compat_ulong_t arm_r8; |
66 | compat_ulong_t arm_r9; | 66 | compat_ulong_t arm_r9; |
67 | compat_ulong_t arm_r10; | 67 | compat_ulong_t arm_r10; |
68 | compat_ulong_t arm_fp; | 68 | compat_ulong_t arm_fp; |
69 | compat_ulong_t arm_ip; | 69 | compat_ulong_t arm_ip; |
70 | compat_ulong_t arm_sp; | 70 | compat_ulong_t arm_sp; |
71 | compat_ulong_t arm_lr; | 71 | compat_ulong_t arm_lr; |
72 | compat_ulong_t arm_pc; | 72 | compat_ulong_t arm_pc; |
73 | compat_ulong_t arm_cpsr; | 73 | compat_ulong_t arm_cpsr; |
74 | compat_ulong_t fault_address; | 74 | compat_ulong_t fault_address; |
75 | }; | 75 | }; |
76 | 76 | ||
77 | struct compat_ucontext { | 77 | struct compat_ucontext { |
78 | compat_ulong_t uc_flags; | 78 | compat_ulong_t uc_flags; |
79 | struct compat_ucontext *uc_link; | 79 | compat_uptr_t uc_link; |
80 | compat_stack_t uc_stack; | 80 | compat_stack_t uc_stack; |
81 | struct compat_sigcontext uc_mcontext; | 81 | struct compat_sigcontext uc_mcontext; |
82 | compat_sigset_t uc_sigmask; | 82 | compat_sigset_t uc_sigmask; |
83 | int __unused[32 - (sizeof (compat_sigset_t) / sizeof (int))]; | 83 | int __unused[32 - (sizeof (compat_sigset_t) / sizeof (int))]; |
84 | compat_ulong_t uc_regspace[128] __attribute__((__aligned__(8))); | 84 | compat_ulong_t uc_regspace[128] __attribute__((__aligned__(8))); |
85 | }; | 85 | }; |
86 | 86 | ||
87 | struct compat_vfp_sigframe { | 87 | struct compat_vfp_sigframe { |
88 | compat_ulong_t magic; | 88 | compat_ulong_t magic; |
89 | compat_ulong_t size; | 89 | compat_ulong_t size; |
90 | struct compat_user_vfp { | 90 | struct compat_user_vfp { |
91 | compat_u64 fpregs[32]; | 91 | compat_u64 fpregs[32]; |
92 | compat_ulong_t fpscr; | 92 | compat_ulong_t fpscr; |
93 | } ufp; | 93 | } ufp; |
94 | struct compat_user_vfp_exc { | 94 | struct compat_user_vfp_exc { |
95 | compat_ulong_t fpexc; | 95 | compat_ulong_t fpexc; |
96 | compat_ulong_t fpinst; | 96 | compat_ulong_t fpinst; |
97 | compat_ulong_t fpinst2; | 97 | compat_ulong_t fpinst2; |
98 | } ufp_exc; | 98 | } ufp_exc; |
99 | } __attribute__((__aligned__(8))); | 99 | } __attribute__((__aligned__(8))); |
100 | 100 | ||
101 | #define VFP_MAGIC 0x56465001 | 101 | #define VFP_MAGIC 0x56465001 |
102 | #define VFP_STORAGE_SIZE sizeof(struct compat_vfp_sigframe) | 102 | #define VFP_STORAGE_SIZE sizeof(struct compat_vfp_sigframe) |
103 | 103 | ||
104 | struct compat_aux_sigframe { | 104 | struct compat_aux_sigframe { |
105 | struct compat_vfp_sigframe vfp; | 105 | struct compat_vfp_sigframe vfp; |
106 | 106 | ||
107 | /* Something that isn't a valid magic number for any coprocessor. */ | 107 | /* Something that isn't a valid magic number for any coprocessor. */ |
108 | unsigned long end_magic; | 108 | unsigned long end_magic; |
109 | } __attribute__((__aligned__(8))); | 109 | } __attribute__((__aligned__(8))); |
110 | 110 | ||
111 | struct compat_sigframe { | 111 | struct compat_sigframe { |
112 | struct compat_ucontext uc; | 112 | struct compat_ucontext uc; |
113 | compat_ulong_t retcode[2]; | 113 | compat_ulong_t retcode[2]; |
114 | }; | 114 | }; |
115 | 115 | ||
116 | struct compat_rt_sigframe { | 116 | struct compat_rt_sigframe { |
117 | struct compat_siginfo info; | 117 | struct compat_siginfo info; |
118 | struct compat_sigframe sig; | 118 | struct compat_sigframe sig; |
119 | }; | 119 | }; |
120 | 120 | ||
121 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | 121 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) |
122 | 122 | ||
123 | /* | 123 | /* |
124 | * For ARM syscalls, the syscall number has to be loaded into r7. | 124 | * For ARM syscalls, the syscall number has to be loaded into r7. |
125 | * We do not support an OABI userspace. | 125 | * We do not support an OABI userspace. |
126 | */ | 126 | */ |
127 | #define MOV_R7_NR_SIGRETURN (0xe3a07000 | __NR_compat_sigreturn) | 127 | #define MOV_R7_NR_SIGRETURN (0xe3a07000 | __NR_compat_sigreturn) |
128 | #define SVC_SYS_SIGRETURN (0xef000000 | __NR_compat_sigreturn) | 128 | #define SVC_SYS_SIGRETURN (0xef000000 | __NR_compat_sigreturn) |
129 | #define MOV_R7_NR_RT_SIGRETURN (0xe3a07000 | __NR_compat_rt_sigreturn) | 129 | #define MOV_R7_NR_RT_SIGRETURN (0xe3a07000 | __NR_compat_rt_sigreturn) |
130 | #define SVC_SYS_RT_SIGRETURN (0xef000000 | __NR_compat_rt_sigreturn) | 130 | #define SVC_SYS_RT_SIGRETURN (0xef000000 | __NR_compat_rt_sigreturn) |
131 | 131 | ||
132 | /* | 132 | /* |
133 | * For Thumb syscalls, we also pass the syscall number via r7. We therefore | 133 | * For Thumb syscalls, we also pass the syscall number via r7. We therefore |
134 | * need two 16-bit instructions. | 134 | * need two 16-bit instructions. |
135 | */ | 135 | */ |
136 | #define SVC_THUMB_SIGRETURN (((0xdf00 | __NR_compat_sigreturn) << 16) | \ | 136 | #define SVC_THUMB_SIGRETURN (((0xdf00 | __NR_compat_sigreturn) << 16) | \ |
137 | 0x2700 | __NR_compat_sigreturn) | 137 | 0x2700 | __NR_compat_sigreturn) |
138 | #define SVC_THUMB_RT_SIGRETURN (((0xdf00 | __NR_compat_rt_sigreturn) << 16) | \ | 138 | #define SVC_THUMB_RT_SIGRETURN (((0xdf00 | __NR_compat_rt_sigreturn) << 16) | \ |
139 | 0x2700 | __NR_compat_rt_sigreturn) | 139 | 0x2700 | __NR_compat_rt_sigreturn) |
140 | 140 | ||
141 | const compat_ulong_t aarch32_sigret_code[6] = { | 141 | const compat_ulong_t aarch32_sigret_code[6] = { |
142 | /* | 142 | /* |
143 | * AArch32 sigreturn code. | 143 | * AArch32 sigreturn code. |
144 | * We don't construct an OABI SWI - instead we just set the imm24 field | 144 | * We don't construct an OABI SWI - instead we just set the imm24 field |
145 | * to the EABI syscall number so that we create a sane disassembly. | 145 | * to the EABI syscall number so that we create a sane disassembly. |
146 | */ | 146 | */ |
147 | MOV_R7_NR_SIGRETURN, SVC_SYS_SIGRETURN, SVC_THUMB_SIGRETURN, | 147 | MOV_R7_NR_SIGRETURN, SVC_SYS_SIGRETURN, SVC_THUMB_SIGRETURN, |
148 | MOV_R7_NR_RT_SIGRETURN, SVC_SYS_RT_SIGRETURN, SVC_THUMB_RT_SIGRETURN, | 148 | MOV_R7_NR_RT_SIGRETURN, SVC_SYS_RT_SIGRETURN, SVC_THUMB_RT_SIGRETURN, |
149 | }; | 149 | }; |
150 | 150 | ||
151 | static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set) | 151 | static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set) |
152 | { | 152 | { |
153 | compat_sigset_t cset; | 153 | compat_sigset_t cset; |
154 | 154 | ||
155 | cset.sig[0] = set->sig[0] & 0xffffffffull; | 155 | cset.sig[0] = set->sig[0] & 0xffffffffull; |
156 | cset.sig[1] = set->sig[0] >> 32; | 156 | cset.sig[1] = set->sig[0] >> 32; |
157 | 157 | ||
158 | return copy_to_user(uset, &cset, sizeof(*uset)); | 158 | return copy_to_user(uset, &cset, sizeof(*uset)); |
159 | } | 159 | } |
160 | 160 | ||
161 | static inline int get_sigset_t(sigset_t *set, | 161 | static inline int get_sigset_t(sigset_t *set, |
162 | const compat_sigset_t __user *uset) | 162 | const compat_sigset_t __user *uset) |
163 | { | 163 | { |
164 | compat_sigset_t s32; | 164 | compat_sigset_t s32; |
165 | 165 | ||
166 | if (copy_from_user(&s32, uset, sizeof(*uset))) | 166 | if (copy_from_user(&s32, uset, sizeof(*uset))) |
167 | return -EFAULT; | 167 | return -EFAULT; |
168 | 168 | ||
169 | set->sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32); | 169 | set->sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32); |
170 | return 0; | 170 | return 0; |
171 | } | 171 | } |
172 | 172 | ||
173 | int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) | 173 | int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) |
174 | { | 174 | { |
175 | int err; | 175 | int err; |
176 | 176 | ||
177 | if (!access_ok(VERIFY_WRITE, to, sizeof(*to))) | 177 | if (!access_ok(VERIFY_WRITE, to, sizeof(*to))) |
178 | return -EFAULT; | 178 | return -EFAULT; |
179 | 179 | ||
180 | /* If you change siginfo_t structure, please be sure | 180 | /* If you change siginfo_t structure, please be sure |
181 | * this code is fixed accordingly. | 181 | * this code is fixed accordingly. |
182 | * It should never copy any pad contained in the structure | 182 | * It should never copy any pad contained in the structure |
183 | * to avoid security leaks, but must copy the generic | 183 | * to avoid security leaks, but must copy the generic |
184 | * 3 ints plus the relevant union member. | 184 | * 3 ints plus the relevant union member. |
185 | * This routine must convert siginfo from 64bit to 32bit as well | 185 | * This routine must convert siginfo from 64bit to 32bit as well |
186 | * at the same time. | 186 | * at the same time. |
187 | */ | 187 | */ |
188 | err = __put_user(from->si_signo, &to->si_signo); | 188 | err = __put_user(from->si_signo, &to->si_signo); |
189 | err |= __put_user(from->si_errno, &to->si_errno); | 189 | err |= __put_user(from->si_errno, &to->si_errno); |
190 | err |= __put_user((short)from->si_code, &to->si_code); | 190 | err |= __put_user((short)from->si_code, &to->si_code); |
191 | if (from->si_code < 0) | 191 | if (from->si_code < 0) |
192 | err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, | 192 | err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, |
193 | SI_PAD_SIZE); | 193 | SI_PAD_SIZE); |
194 | else switch (from->si_code & __SI_MASK) { | 194 | else switch (from->si_code & __SI_MASK) { |
195 | case __SI_KILL: | 195 | case __SI_KILL: |
196 | err |= __put_user(from->si_pid, &to->si_pid); | 196 | err |= __put_user(from->si_pid, &to->si_pid); |
197 | err |= __put_user(from->si_uid, &to->si_uid); | 197 | err |= __put_user(from->si_uid, &to->si_uid); |
198 | break; | 198 | break; |
199 | case __SI_TIMER: | 199 | case __SI_TIMER: |
200 | err |= __put_user(from->si_tid, &to->si_tid); | 200 | err |= __put_user(from->si_tid, &to->si_tid); |
201 | err |= __put_user(from->si_overrun, &to->si_overrun); | 201 | err |= __put_user(from->si_overrun, &to->si_overrun); |
202 | err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, | 202 | err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, |
203 | &to->si_ptr); | 203 | &to->si_ptr); |
204 | break; | 204 | break; |
205 | case __SI_POLL: | 205 | case __SI_POLL: |
206 | err |= __put_user(from->si_band, &to->si_band); | 206 | err |= __put_user(from->si_band, &to->si_band); |
207 | err |= __put_user(from->si_fd, &to->si_fd); | 207 | err |= __put_user(from->si_fd, &to->si_fd); |
208 | break; | 208 | break; |
209 | case __SI_FAULT: | 209 | case __SI_FAULT: |
210 | err |= __put_user((compat_uptr_t)(unsigned long)from->si_addr, | 210 | err |= __put_user((compat_uptr_t)(unsigned long)from->si_addr, |
211 | &to->si_addr); | 211 | &to->si_addr); |
212 | #ifdef BUS_MCEERR_AO | 212 | #ifdef BUS_MCEERR_AO |
213 | /* | 213 | /* |
214 | * Other callers might not initialize the si_lsb field, | 214 | * Other callers might not initialize the si_lsb field, |
215 | * so check explicitely for the right codes here. | 215 | * so check explicitely for the right codes here. |
216 | */ | 216 | */ |
217 | if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) | 217 | if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) |
218 | err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); | 218 | err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); |
219 | #endif | 219 | #endif |
220 | break; | 220 | break; |
221 | case __SI_CHLD: | 221 | case __SI_CHLD: |
222 | err |= __put_user(from->si_pid, &to->si_pid); | 222 | err |= __put_user(from->si_pid, &to->si_pid); |
223 | err |= __put_user(from->si_uid, &to->si_uid); | 223 | err |= __put_user(from->si_uid, &to->si_uid); |
224 | err |= __put_user(from->si_status, &to->si_status); | 224 | err |= __put_user(from->si_status, &to->si_status); |
225 | err |= __put_user(from->si_utime, &to->si_utime); | 225 | err |= __put_user(from->si_utime, &to->si_utime); |
226 | err |= __put_user(from->si_stime, &to->si_stime); | 226 | err |= __put_user(from->si_stime, &to->si_stime); |
227 | break; | 227 | break; |
228 | case __SI_RT: /* This is not generated by the kernel as of now. */ | 228 | case __SI_RT: /* This is not generated by the kernel as of now. */ |
229 | case __SI_MESGQ: /* But this is */ | 229 | case __SI_MESGQ: /* But this is */ |
230 | err |= __put_user(from->si_pid, &to->si_pid); | 230 | err |= __put_user(from->si_pid, &to->si_pid); |
231 | err |= __put_user(from->si_uid, &to->si_uid); | 231 | err |= __put_user(from->si_uid, &to->si_uid); |
232 | err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, &to->si_ptr); | 232 | err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, &to->si_ptr); |
233 | break; | 233 | break; |
234 | default: /* this is just in case for now ... */ | 234 | default: /* this is just in case for now ... */ |
235 | err |= __put_user(from->si_pid, &to->si_pid); | 235 | err |= __put_user(from->si_pid, &to->si_pid); |
236 | err |= __put_user(from->si_uid, &to->si_uid); | 236 | err |= __put_user(from->si_uid, &to->si_uid); |
237 | break; | 237 | break; |
238 | } | 238 | } |
239 | return err; | 239 | return err; |
240 | } | 240 | } |
241 | 241 | ||
242 | int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) | 242 | int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) |
243 | { | 243 | { |
244 | memset(to, 0, sizeof *to); | 244 | memset(to, 0, sizeof *to); |
245 | 245 | ||
246 | if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) || | 246 | if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) || |
247 | copy_from_user(to->_sifields._pad, | 247 | copy_from_user(to->_sifields._pad, |
248 | from->_sifields._pad, SI_PAD_SIZE)) | 248 | from->_sifields._pad, SI_PAD_SIZE)) |
249 | return -EFAULT; | 249 | return -EFAULT; |
250 | 250 | ||
251 | return 0; | 251 | return 0; |
252 | } | 252 | } |
253 | 253 | ||
254 | /* | 254 | /* |
255 | * VFP save/restore code. | 255 | * VFP save/restore code. |
256 | */ | 256 | */ |
257 | static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame) | 257 | static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame) |
258 | { | 258 | { |
259 | struct fpsimd_state *fpsimd = ¤t->thread.fpsimd_state; | 259 | struct fpsimd_state *fpsimd = ¤t->thread.fpsimd_state; |
260 | compat_ulong_t magic = VFP_MAGIC; | 260 | compat_ulong_t magic = VFP_MAGIC; |
261 | compat_ulong_t size = VFP_STORAGE_SIZE; | 261 | compat_ulong_t size = VFP_STORAGE_SIZE; |
262 | compat_ulong_t fpscr, fpexc; | 262 | compat_ulong_t fpscr, fpexc; |
263 | int err = 0; | 263 | int err = 0; |
264 | 264 | ||
265 | /* | 265 | /* |
266 | * Save the hardware registers to the fpsimd_state structure. | 266 | * Save the hardware registers to the fpsimd_state structure. |
267 | * Note that this also saves V16-31, which aren't visible | 267 | * Note that this also saves V16-31, which aren't visible |
268 | * in AArch32. | 268 | * in AArch32. |
269 | */ | 269 | */ |
270 | fpsimd_save_state(fpsimd); | 270 | fpsimd_save_state(fpsimd); |
271 | 271 | ||
272 | /* Place structure header on the stack */ | 272 | /* Place structure header on the stack */ |
273 | __put_user_error(magic, &frame->magic, err); | 273 | __put_user_error(magic, &frame->magic, err); |
274 | __put_user_error(size, &frame->size, err); | 274 | __put_user_error(size, &frame->size, err); |
275 | 275 | ||
276 | /* | 276 | /* |
277 | * Now copy the FP registers. Since the registers are packed, | 277 | * Now copy the FP registers. Since the registers are packed, |
278 | * we can copy the prefix we want (V0-V15) as it is. | 278 | * we can copy the prefix we want (V0-V15) as it is. |
279 | * FIXME: Won't work if big endian. | 279 | * FIXME: Won't work if big endian. |
280 | */ | 280 | */ |
281 | err |= __copy_to_user(&frame->ufp.fpregs, fpsimd->vregs, | 281 | err |= __copy_to_user(&frame->ufp.fpregs, fpsimd->vregs, |
282 | sizeof(frame->ufp.fpregs)); | 282 | sizeof(frame->ufp.fpregs)); |
283 | 283 | ||
284 | /* Create an AArch32 fpscr from the fpsr and the fpcr. */ | 284 | /* Create an AArch32 fpscr from the fpsr and the fpcr. */ |
285 | fpscr = (fpsimd->fpsr & VFP_FPSCR_STAT_MASK) | | 285 | fpscr = (fpsimd->fpsr & VFP_FPSCR_STAT_MASK) | |
286 | (fpsimd->fpcr & VFP_FPSCR_CTRL_MASK); | 286 | (fpsimd->fpcr & VFP_FPSCR_CTRL_MASK); |
287 | __put_user_error(fpscr, &frame->ufp.fpscr, err); | 287 | __put_user_error(fpscr, &frame->ufp.fpscr, err); |
288 | 288 | ||
289 | /* | 289 | /* |
290 | * The exception register aren't available so we fake up a | 290 | * The exception register aren't available so we fake up a |
291 | * basic FPEXC and zero everything else. | 291 | * basic FPEXC and zero everything else. |
292 | */ | 292 | */ |
293 | fpexc = (1 << 30); | 293 | fpexc = (1 << 30); |
294 | __put_user_error(fpexc, &frame->ufp_exc.fpexc, err); | 294 | __put_user_error(fpexc, &frame->ufp_exc.fpexc, err); |
295 | __put_user_error(0, &frame->ufp_exc.fpinst, err); | 295 | __put_user_error(0, &frame->ufp_exc.fpinst, err); |
296 | __put_user_error(0, &frame->ufp_exc.fpinst2, err); | 296 | __put_user_error(0, &frame->ufp_exc.fpinst2, err); |
297 | 297 | ||
298 | return err ? -EFAULT : 0; | 298 | return err ? -EFAULT : 0; |
299 | } | 299 | } |
300 | 300 | ||
301 | static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame) | 301 | static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame) |
302 | { | 302 | { |
303 | struct fpsimd_state fpsimd; | 303 | struct fpsimd_state fpsimd; |
304 | compat_ulong_t magic = VFP_MAGIC; | 304 | compat_ulong_t magic = VFP_MAGIC; |
305 | compat_ulong_t size = VFP_STORAGE_SIZE; | 305 | compat_ulong_t size = VFP_STORAGE_SIZE; |
306 | compat_ulong_t fpscr; | 306 | compat_ulong_t fpscr; |
307 | int err = 0; | 307 | int err = 0; |
308 | 308 | ||
309 | __get_user_error(magic, &frame->magic, err); | 309 | __get_user_error(magic, &frame->magic, err); |
310 | __get_user_error(size, &frame->size, err); | 310 | __get_user_error(size, &frame->size, err); |
311 | 311 | ||
312 | if (err) | 312 | if (err) |
313 | return -EFAULT; | 313 | return -EFAULT; |
314 | if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE) | 314 | if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE) |
315 | return -EINVAL; | 315 | return -EINVAL; |
316 | 316 | ||
317 | /* | 317 | /* |
318 | * Copy the FP registers into the start of the fpsimd_state. | 318 | * Copy the FP registers into the start of the fpsimd_state. |
319 | * FIXME: Won't work if big endian. | 319 | * FIXME: Won't work if big endian. |
320 | */ | 320 | */ |
321 | err |= __copy_from_user(fpsimd.vregs, frame->ufp.fpregs, | 321 | err |= __copy_from_user(fpsimd.vregs, frame->ufp.fpregs, |
322 | sizeof(frame->ufp.fpregs)); | 322 | sizeof(frame->ufp.fpregs)); |
323 | 323 | ||
324 | /* Extract the fpsr and the fpcr from the fpscr */ | 324 | /* Extract the fpsr and the fpcr from the fpscr */ |
325 | __get_user_error(fpscr, &frame->ufp.fpscr, err); | 325 | __get_user_error(fpscr, &frame->ufp.fpscr, err); |
326 | fpsimd.fpsr = fpscr & VFP_FPSCR_STAT_MASK; | 326 | fpsimd.fpsr = fpscr & VFP_FPSCR_STAT_MASK; |
327 | fpsimd.fpcr = fpscr & VFP_FPSCR_CTRL_MASK; | 327 | fpsimd.fpcr = fpscr & VFP_FPSCR_CTRL_MASK; |
328 | 328 | ||
329 | /* | 329 | /* |
330 | * We don't need to touch the exception register, so | 330 | * We don't need to touch the exception register, so |
331 | * reload the hardware state. | 331 | * reload the hardware state. |
332 | */ | 332 | */ |
333 | if (!err) { | 333 | if (!err) { |
334 | preempt_disable(); | 334 | preempt_disable(); |
335 | fpsimd_load_state(&fpsimd); | 335 | fpsimd_load_state(&fpsimd); |
336 | preempt_enable(); | 336 | preempt_enable(); |
337 | } | 337 | } |
338 | 338 | ||
339 | return err ? -EFAULT : 0; | 339 | return err ? -EFAULT : 0; |
340 | } | 340 | } |
341 | 341 | ||
342 | /* | 342 | /* |
343 | * atomically swap in the new signal mask, and wait for a signal. | 343 | * atomically swap in the new signal mask, and wait for a signal. |
344 | */ | 344 | */ |
345 | asmlinkage int compat_sys_sigsuspend(int restart, compat_ulong_t oldmask, | 345 | asmlinkage int compat_sys_sigsuspend(int restart, compat_ulong_t oldmask, |
346 | compat_old_sigset_t mask) | 346 | compat_old_sigset_t mask) |
347 | { | 347 | { |
348 | sigset_t blocked; | 348 | sigset_t blocked; |
349 | 349 | ||
350 | siginitset(¤t->blocked, mask); | 350 | siginitset(¤t->blocked, mask); |
351 | return sigsuspend(&blocked); | 351 | return sigsuspend(&blocked); |
352 | } | 352 | } |
353 | 353 | ||
354 | asmlinkage int compat_sys_sigaction(int sig, | 354 | asmlinkage int compat_sys_sigaction(int sig, |
355 | const struct compat_old_sigaction __user *act, | 355 | const struct compat_old_sigaction __user *act, |
356 | struct compat_old_sigaction __user *oact) | 356 | struct compat_old_sigaction __user *oact) |
357 | { | 357 | { |
358 | struct k_sigaction new_ka, old_ka; | 358 | struct k_sigaction new_ka, old_ka; |
359 | int ret; | 359 | int ret; |
360 | compat_old_sigset_t mask; | 360 | compat_old_sigset_t mask; |
361 | compat_uptr_t handler, restorer; | 361 | compat_uptr_t handler, restorer; |
362 | 362 | ||
363 | if (act) { | 363 | if (act) { |
364 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || | 364 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || |
365 | __get_user(handler, &act->sa_handler) || | 365 | __get_user(handler, &act->sa_handler) || |
366 | __get_user(restorer, &act->sa_restorer) || | 366 | __get_user(restorer, &act->sa_restorer) || |
367 | __get_user(new_ka.sa.sa_flags, &act->sa_flags) || | 367 | __get_user(new_ka.sa.sa_flags, &act->sa_flags) || |
368 | __get_user(mask, &act->sa_mask)) | 368 | __get_user(mask, &act->sa_mask)) |
369 | return -EFAULT; | 369 | return -EFAULT; |
370 | 370 | ||
371 | new_ka.sa.sa_handler = compat_ptr(handler); | 371 | new_ka.sa.sa_handler = compat_ptr(handler); |
372 | new_ka.sa.sa_restorer = compat_ptr(restorer); | 372 | new_ka.sa.sa_restorer = compat_ptr(restorer); |
373 | siginitset(&new_ka.sa.sa_mask, mask); | 373 | siginitset(&new_ka.sa.sa_mask, mask); |
374 | } | 374 | } |
375 | 375 | ||
376 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); | 376 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); |
377 | 377 | ||
378 | if (!ret && oact) { | 378 | if (!ret && oact) { |
379 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || | 379 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || |
380 | __put_user(ptr_to_compat(old_ka.sa.sa_handler), | 380 | __put_user(ptr_to_compat(old_ka.sa.sa_handler), |
381 | &oact->sa_handler) || | 381 | &oact->sa_handler) || |
382 | __put_user(ptr_to_compat(old_ka.sa.sa_restorer), | 382 | __put_user(ptr_to_compat(old_ka.sa.sa_restorer), |
383 | &oact->sa_restorer) || | 383 | &oact->sa_restorer) || |
384 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || | 384 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || |
385 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) | 385 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) |
386 | return -EFAULT; | 386 | return -EFAULT; |
387 | } | 387 | } |
388 | 388 | ||
389 | return ret; | 389 | return ret; |
390 | } | 390 | } |
391 | 391 | ||
392 | asmlinkage int compat_sys_rt_sigaction(int sig, | 392 | asmlinkage int compat_sys_rt_sigaction(int sig, |
393 | const struct compat_sigaction __user *act, | 393 | const struct compat_sigaction __user *act, |
394 | struct compat_sigaction __user *oact, | 394 | struct compat_sigaction __user *oact, |
395 | compat_size_t sigsetsize) | 395 | compat_size_t sigsetsize) |
396 | { | 396 | { |
397 | struct k_sigaction new_ka, old_ka; | 397 | struct k_sigaction new_ka, old_ka; |
398 | int ret; | 398 | int ret; |
399 | 399 | ||
400 | /* XXX: Don't preclude handling different sized sigset_t's. */ | 400 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
401 | if (sigsetsize != sizeof(compat_sigset_t)) | 401 | if (sigsetsize != sizeof(compat_sigset_t)) |
402 | return -EINVAL; | 402 | return -EINVAL; |
403 | 403 | ||
404 | if (act) { | 404 | if (act) { |
405 | compat_uptr_t handler, restorer; | 405 | compat_uptr_t handler, restorer; |
406 | 406 | ||
407 | ret = get_user(handler, &act->sa_handler); | 407 | ret = get_user(handler, &act->sa_handler); |
408 | new_ka.sa.sa_handler = compat_ptr(handler); | 408 | new_ka.sa.sa_handler = compat_ptr(handler); |
409 | ret |= get_user(restorer, &act->sa_restorer); | 409 | ret |= get_user(restorer, &act->sa_restorer); |
410 | new_ka.sa.sa_restorer = compat_ptr(restorer); | 410 | new_ka.sa.sa_restorer = compat_ptr(restorer); |
411 | ret |= get_sigset_t(&new_ka.sa.sa_mask, &act->sa_mask); | 411 | ret |= get_sigset_t(&new_ka.sa.sa_mask, &act->sa_mask); |
412 | ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); | 412 | ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); |
413 | if (ret) | 413 | if (ret) |
414 | return -EFAULT; | 414 | return -EFAULT; |
415 | } | 415 | } |
416 | 416 | ||
417 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); | 417 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); |
418 | if (!ret && oact) { | 418 | if (!ret && oact) { |
419 | ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler); | 419 | ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler); |
420 | ret |= put_sigset_t(&oact->sa_mask, &old_ka.sa.sa_mask); | 420 | ret |= put_sigset_t(&oact->sa_mask, &old_ka.sa.sa_mask); |
421 | ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); | 421 | ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); |
422 | } | 422 | } |
423 | return ret; | 423 | return ret; |
424 | } | 424 | } |
425 | 425 | ||
426 | int compat_do_sigaltstack(compat_uptr_t compat_uss, compat_uptr_t compat_uoss, | 426 | int compat_do_sigaltstack(compat_uptr_t compat_uss, compat_uptr_t compat_uoss, |
427 | compat_ulong_t sp) | 427 | compat_ulong_t sp) |
428 | { | 428 | { |
429 | compat_stack_t __user *newstack = compat_ptr(compat_uss); | 429 | compat_stack_t __user *newstack = compat_ptr(compat_uss); |
430 | compat_stack_t __user *oldstack = compat_ptr(compat_uoss); | 430 | compat_stack_t __user *oldstack = compat_ptr(compat_uoss); |
431 | compat_uptr_t ss_sp; | 431 | compat_uptr_t ss_sp; |
432 | int ret; | 432 | int ret; |
433 | mm_segment_t old_fs; | 433 | mm_segment_t old_fs; |
434 | stack_t uss, uoss; | 434 | stack_t uss, uoss; |
435 | 435 | ||
436 | /* Marshall the compat new stack into a stack_t */ | 436 | /* Marshall the compat new stack into a stack_t */ |
437 | if (newstack) { | 437 | if (newstack) { |
438 | if (get_user(ss_sp, &newstack->ss_sp) || | 438 | if (get_user(ss_sp, &newstack->ss_sp) || |
439 | __get_user(uss.ss_flags, &newstack->ss_flags) || | 439 | __get_user(uss.ss_flags, &newstack->ss_flags) || |
440 | __get_user(uss.ss_size, &newstack->ss_size)) | 440 | __get_user(uss.ss_size, &newstack->ss_size)) |
441 | return -EFAULT; | 441 | return -EFAULT; |
442 | uss.ss_sp = compat_ptr(ss_sp); | 442 | uss.ss_sp = compat_ptr(ss_sp); |
443 | } | 443 | } |
444 | 444 | ||
445 | old_fs = get_fs(); | 445 | old_fs = get_fs(); |
446 | set_fs(KERNEL_DS); | 446 | set_fs(KERNEL_DS); |
447 | /* The __user pointer casts are valid because of the set_fs() */ | 447 | /* The __user pointer casts are valid because of the set_fs() */ |
448 | ret = do_sigaltstack( | 448 | ret = do_sigaltstack( |
449 | newstack ? (stack_t __user *) &uss : NULL, | 449 | newstack ? (stack_t __user *) &uss : NULL, |
450 | oldstack ? (stack_t __user *) &uoss : NULL, | 450 | oldstack ? (stack_t __user *) &uoss : NULL, |
451 | (unsigned long)sp); | 451 | (unsigned long)sp); |
452 | set_fs(old_fs); | 452 | set_fs(old_fs); |
453 | 453 | ||
454 | /* Convert the old stack_t into a compat stack. */ | 454 | /* Convert the old stack_t into a compat stack. */ |
455 | if (!ret && oldstack && | 455 | if (!ret && oldstack && |
456 | (put_user(ptr_to_compat(uoss.ss_sp), &oldstack->ss_sp) || | 456 | (put_user(ptr_to_compat(uoss.ss_sp), &oldstack->ss_sp) || |
457 | __put_user(uoss.ss_flags, &oldstack->ss_flags) || | 457 | __put_user(uoss.ss_flags, &oldstack->ss_flags) || |
458 | __put_user(uoss.ss_size, &oldstack->ss_size))) | 458 | __put_user(uoss.ss_size, &oldstack->ss_size))) |
459 | return -EFAULT; | 459 | return -EFAULT; |
460 | return ret; | 460 | return ret; |
461 | } | 461 | } |
462 | 462 | ||
463 | static int compat_restore_sigframe(struct pt_regs *regs, | 463 | static int compat_restore_sigframe(struct pt_regs *regs, |
464 | struct compat_sigframe __user *sf) | 464 | struct compat_sigframe __user *sf) |
465 | { | 465 | { |
466 | int err; | 466 | int err; |
467 | sigset_t set; | 467 | sigset_t set; |
468 | struct compat_aux_sigframe __user *aux; | 468 | struct compat_aux_sigframe __user *aux; |
469 | 469 | ||
470 | err = get_sigset_t(&set, &sf->uc.uc_sigmask); | 470 | err = get_sigset_t(&set, &sf->uc.uc_sigmask); |
471 | if (err == 0) { | 471 | if (err == 0) { |
472 | sigdelsetmask(&set, ~_BLOCKABLE); | 472 | sigdelsetmask(&set, ~_BLOCKABLE); |
473 | set_current_blocked(&set); | 473 | set_current_blocked(&set); |
474 | } | 474 | } |
475 | 475 | ||
476 | __get_user_error(regs->regs[0], &sf->uc.uc_mcontext.arm_r0, err); | 476 | __get_user_error(regs->regs[0], &sf->uc.uc_mcontext.arm_r0, err); |
477 | __get_user_error(regs->regs[1], &sf->uc.uc_mcontext.arm_r1, err); | 477 | __get_user_error(regs->regs[1], &sf->uc.uc_mcontext.arm_r1, err); |
478 | __get_user_error(regs->regs[2], &sf->uc.uc_mcontext.arm_r2, err); | 478 | __get_user_error(regs->regs[2], &sf->uc.uc_mcontext.arm_r2, err); |
479 | __get_user_error(regs->regs[3], &sf->uc.uc_mcontext.arm_r3, err); | 479 | __get_user_error(regs->regs[3], &sf->uc.uc_mcontext.arm_r3, err); |
480 | __get_user_error(regs->regs[4], &sf->uc.uc_mcontext.arm_r4, err); | 480 | __get_user_error(regs->regs[4], &sf->uc.uc_mcontext.arm_r4, err); |
481 | __get_user_error(regs->regs[5], &sf->uc.uc_mcontext.arm_r5, err); | 481 | __get_user_error(regs->regs[5], &sf->uc.uc_mcontext.arm_r5, err); |
482 | __get_user_error(regs->regs[6], &sf->uc.uc_mcontext.arm_r6, err); | 482 | __get_user_error(regs->regs[6], &sf->uc.uc_mcontext.arm_r6, err); |
483 | __get_user_error(regs->regs[7], &sf->uc.uc_mcontext.arm_r7, err); | 483 | __get_user_error(regs->regs[7], &sf->uc.uc_mcontext.arm_r7, err); |
484 | __get_user_error(regs->regs[8], &sf->uc.uc_mcontext.arm_r8, err); | 484 | __get_user_error(regs->regs[8], &sf->uc.uc_mcontext.arm_r8, err); |
485 | __get_user_error(regs->regs[9], &sf->uc.uc_mcontext.arm_r9, err); | 485 | __get_user_error(regs->regs[9], &sf->uc.uc_mcontext.arm_r9, err); |
486 | __get_user_error(regs->regs[10], &sf->uc.uc_mcontext.arm_r10, err); | 486 | __get_user_error(regs->regs[10], &sf->uc.uc_mcontext.arm_r10, err); |
487 | __get_user_error(regs->regs[11], &sf->uc.uc_mcontext.arm_fp, err); | 487 | __get_user_error(regs->regs[11], &sf->uc.uc_mcontext.arm_fp, err); |
488 | __get_user_error(regs->regs[12], &sf->uc.uc_mcontext.arm_ip, err); | 488 | __get_user_error(regs->regs[12], &sf->uc.uc_mcontext.arm_ip, err); |
489 | __get_user_error(regs->compat_sp, &sf->uc.uc_mcontext.arm_sp, err); | 489 | __get_user_error(regs->compat_sp, &sf->uc.uc_mcontext.arm_sp, err); |
490 | __get_user_error(regs->compat_lr, &sf->uc.uc_mcontext.arm_lr, err); | 490 | __get_user_error(regs->compat_lr, &sf->uc.uc_mcontext.arm_lr, err); |
491 | __get_user_error(regs->pc, &sf->uc.uc_mcontext.arm_pc, err); | 491 | __get_user_error(regs->pc, &sf->uc.uc_mcontext.arm_pc, err); |
492 | __get_user_error(regs->pstate, &sf->uc.uc_mcontext.arm_cpsr, err); | 492 | __get_user_error(regs->pstate, &sf->uc.uc_mcontext.arm_cpsr, err); |
493 | 493 | ||
494 | /* | 494 | /* |
495 | * Avoid compat_sys_sigreturn() restarting. | 495 | * Avoid compat_sys_sigreturn() restarting. |
496 | */ | 496 | */ |
497 | regs->syscallno = ~0UL; | 497 | regs->syscallno = ~0UL; |
498 | 498 | ||
499 | err |= !valid_user_regs(®s->user_regs); | 499 | err |= !valid_user_regs(®s->user_regs); |
500 | 500 | ||
501 | aux = (struct compat_aux_sigframe __user *) sf->uc.uc_regspace; | 501 | aux = (struct compat_aux_sigframe __user *) sf->uc.uc_regspace; |
502 | if (err == 0) | 502 | if (err == 0) |
503 | err |= compat_restore_vfp_context(&aux->vfp); | 503 | err |= compat_restore_vfp_context(&aux->vfp); |
504 | 504 | ||
505 | return err; | 505 | return err; |
506 | } | 506 | } |
507 | 507 | ||
508 | asmlinkage int compat_sys_sigreturn(struct pt_regs *regs) | 508 | asmlinkage int compat_sys_sigreturn(struct pt_regs *regs) |
509 | { | 509 | { |
510 | struct compat_sigframe __user *frame; | 510 | struct compat_sigframe __user *frame; |
511 | 511 | ||
512 | /* Always make any pending restarted system calls return -EINTR */ | 512 | /* Always make any pending restarted system calls return -EINTR */ |
513 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | 513 | current_thread_info()->restart_block.fn = do_no_restart_syscall; |
514 | 514 | ||
515 | /* | 515 | /* |
516 | * Since we stacked the signal on a 64-bit boundary, | 516 | * Since we stacked the signal on a 64-bit boundary, |
517 | * then 'sp' should be word aligned here. If it's | 517 | * then 'sp' should be word aligned here. If it's |
518 | * not, then the user is trying to mess with us. | 518 | * not, then the user is trying to mess with us. |
519 | */ | 519 | */ |
520 | if (regs->compat_sp & 7) | 520 | if (regs->compat_sp & 7) |
521 | goto badframe; | 521 | goto badframe; |
522 | 522 | ||
523 | frame = (struct compat_sigframe __user *)regs->compat_sp; | 523 | frame = (struct compat_sigframe __user *)regs->compat_sp; |
524 | 524 | ||
525 | if (!access_ok(VERIFY_READ, frame, sizeof (*frame))) | 525 | if (!access_ok(VERIFY_READ, frame, sizeof (*frame))) |
526 | goto badframe; | 526 | goto badframe; |
527 | 527 | ||
528 | if (compat_restore_sigframe(regs, frame)) | 528 | if (compat_restore_sigframe(regs, frame)) |
529 | goto badframe; | 529 | goto badframe; |
530 | 530 | ||
531 | return regs->regs[0]; | 531 | return regs->regs[0]; |
532 | 532 | ||
533 | badframe: | 533 | badframe: |
534 | if (show_unhandled_signals) | 534 | if (show_unhandled_signals) |
535 | pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n", | 535 | pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n", |
536 | current->comm, task_pid_nr(current), __func__, | 536 | current->comm, task_pid_nr(current), __func__, |
537 | regs->pc, regs->sp); | 537 | regs->pc, regs->sp); |
538 | force_sig(SIGSEGV, current); | 538 | force_sig(SIGSEGV, current); |
539 | return 0; | 539 | return 0; |
540 | } | 540 | } |
541 | 541 | ||
542 | asmlinkage int compat_sys_rt_sigreturn(struct pt_regs *regs) | 542 | asmlinkage int compat_sys_rt_sigreturn(struct pt_regs *regs) |
543 | { | 543 | { |
544 | struct compat_rt_sigframe __user *frame; | 544 | struct compat_rt_sigframe __user *frame; |
545 | 545 | ||
546 | /* Always make any pending restarted system calls return -EINTR */ | 546 | /* Always make any pending restarted system calls return -EINTR */ |
547 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | 547 | current_thread_info()->restart_block.fn = do_no_restart_syscall; |
548 | 548 | ||
549 | /* | 549 | /* |
550 | * Since we stacked the signal on a 64-bit boundary, | 550 | * Since we stacked the signal on a 64-bit boundary, |
551 | * then 'sp' should be word aligned here. If it's | 551 | * then 'sp' should be word aligned here. If it's |
552 | * not, then the user is trying to mess with us. | 552 | * not, then the user is trying to mess with us. |
553 | */ | 553 | */ |
554 | if (regs->compat_sp & 7) | 554 | if (regs->compat_sp & 7) |
555 | goto badframe; | 555 | goto badframe; |
556 | 556 | ||
557 | frame = (struct compat_rt_sigframe __user *)regs->compat_sp; | 557 | frame = (struct compat_rt_sigframe __user *)regs->compat_sp; |
558 | 558 | ||
559 | if (!access_ok(VERIFY_READ, frame, sizeof (*frame))) | 559 | if (!access_ok(VERIFY_READ, frame, sizeof (*frame))) |
560 | goto badframe; | 560 | goto badframe; |
561 | 561 | ||
562 | if (compat_restore_sigframe(regs, &frame->sig)) | 562 | if (compat_restore_sigframe(regs, &frame->sig)) |
563 | goto badframe; | 563 | goto badframe; |
564 | 564 | ||
565 | if (compat_do_sigaltstack(ptr_to_compat(&frame->sig.uc.uc_stack), | 565 | if (compat_do_sigaltstack(ptr_to_compat(&frame->sig.uc.uc_stack), |
566 | ptr_to_compat((void __user *)NULL), | 566 | ptr_to_compat((void __user *)NULL), |
567 | regs->compat_sp) == -EFAULT) | 567 | regs->compat_sp) == -EFAULT) |
568 | goto badframe; | 568 | goto badframe; |
569 | 569 | ||
570 | return regs->regs[0]; | 570 | return regs->regs[0]; |
571 | 571 | ||
572 | badframe: | 572 | badframe: |
573 | if (show_unhandled_signals) | 573 | if (show_unhandled_signals) |
574 | pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n", | 574 | pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n", |
575 | current->comm, task_pid_nr(current), __func__, | 575 | current->comm, task_pid_nr(current), __func__, |
576 | regs->pc, regs->sp); | 576 | regs->pc, regs->sp); |
577 | force_sig(SIGSEGV, current); | 577 | force_sig(SIGSEGV, current); |
578 | return 0; | 578 | return 0; |
579 | } | 579 | } |
580 | 580 | ||
581 | static void __user *compat_get_sigframe(struct k_sigaction *ka, | 581 | static void __user *compat_get_sigframe(struct k_sigaction *ka, |
582 | struct pt_regs *regs, | 582 | struct pt_regs *regs, |
583 | int framesize) | 583 | int framesize) |
584 | { | 584 | { |
585 | compat_ulong_t sp = regs->compat_sp; | 585 | compat_ulong_t sp = regs->compat_sp; |
586 | void __user *frame; | 586 | void __user *frame; |
587 | 587 | ||
588 | /* | 588 | /* |
589 | * This is the X/Open sanctioned signal stack switching. | 589 | * This is the X/Open sanctioned signal stack switching. |
590 | */ | 590 | */ |
591 | if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) | 591 | if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) |
592 | sp = current->sas_ss_sp + current->sas_ss_size; | 592 | sp = current->sas_ss_sp + current->sas_ss_size; |
593 | 593 | ||
594 | /* | 594 | /* |
595 | * ATPCS B01 mandates 8-byte alignment | 595 | * ATPCS B01 mandates 8-byte alignment |
596 | */ | 596 | */ |
597 | frame = compat_ptr((compat_uptr_t)((sp - framesize) & ~7)); | 597 | frame = compat_ptr((compat_uptr_t)((sp - framesize) & ~7)); |
598 | 598 | ||
599 | /* | 599 | /* |
600 | * Check that we can actually write to the signal frame. | 600 | * Check that we can actually write to the signal frame. |
601 | */ | 601 | */ |
602 | if (!access_ok(VERIFY_WRITE, frame, framesize)) | 602 | if (!access_ok(VERIFY_WRITE, frame, framesize)) |
603 | frame = NULL; | 603 | frame = NULL; |
604 | 604 | ||
605 | return frame; | 605 | return frame; |
606 | } | 606 | } |
607 | 607 | ||
608 | static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka, | 608 | static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka, |
609 | compat_ulong_t __user *rc, void __user *frame, | 609 | compat_ulong_t __user *rc, void __user *frame, |
610 | int usig) | 610 | int usig) |
611 | { | 611 | { |
612 | compat_ulong_t handler = ptr_to_compat(ka->sa.sa_handler); | 612 | compat_ulong_t handler = ptr_to_compat(ka->sa.sa_handler); |
613 | compat_ulong_t retcode; | 613 | compat_ulong_t retcode; |
614 | compat_ulong_t spsr = regs->pstate & ~PSR_f; | 614 | compat_ulong_t spsr = regs->pstate & ~PSR_f; |
615 | int thumb; | 615 | int thumb; |
616 | 616 | ||
617 | /* Check if the handler is written for ARM or Thumb */ | 617 | /* Check if the handler is written for ARM or Thumb */ |
618 | thumb = handler & 1; | 618 | thumb = handler & 1; |
619 | 619 | ||
620 | if (thumb) { | 620 | if (thumb) { |
621 | spsr |= COMPAT_PSR_T_BIT; | 621 | spsr |= COMPAT_PSR_T_BIT; |
622 | spsr &= ~COMPAT_PSR_IT_MASK; | 622 | spsr &= ~COMPAT_PSR_IT_MASK; |
623 | } else { | 623 | } else { |
624 | spsr &= ~COMPAT_PSR_T_BIT; | 624 | spsr &= ~COMPAT_PSR_T_BIT; |
625 | } | 625 | } |
626 | 626 | ||
627 | if (ka->sa.sa_flags & SA_RESTORER) { | 627 | if (ka->sa.sa_flags & SA_RESTORER) { |
628 | retcode = ptr_to_compat(ka->sa.sa_restorer); | 628 | retcode = ptr_to_compat(ka->sa.sa_restorer); |
629 | } else { | 629 | } else { |
630 | /* Set up sigreturn pointer */ | 630 | /* Set up sigreturn pointer */ |
631 | unsigned int idx = thumb << 1; | 631 | unsigned int idx = thumb << 1; |
632 | 632 | ||
633 | if (ka->sa.sa_flags & SA_SIGINFO) | 633 | if (ka->sa.sa_flags & SA_SIGINFO) |
634 | idx += 3; | 634 | idx += 3; |
635 | 635 | ||
636 | retcode = AARCH32_VECTORS_BASE + | 636 | retcode = AARCH32_VECTORS_BASE + |
637 | AARCH32_KERN_SIGRET_CODE_OFFSET + | 637 | AARCH32_KERN_SIGRET_CODE_OFFSET + |
638 | (idx << 2) + thumb; | 638 | (idx << 2) + thumb; |
639 | } | 639 | } |
640 | 640 | ||
641 | regs->regs[0] = usig; | 641 | regs->regs[0] = usig; |
642 | regs->compat_sp = ptr_to_compat(frame); | 642 | regs->compat_sp = ptr_to_compat(frame); |
643 | regs->compat_lr = retcode; | 643 | regs->compat_lr = retcode; |
644 | regs->pc = handler; | 644 | regs->pc = handler; |
645 | regs->pstate = spsr; | 645 | regs->pstate = spsr; |
646 | } | 646 | } |
647 | 647 | ||
648 | static int compat_setup_sigframe(struct compat_sigframe __user *sf, | 648 | static int compat_setup_sigframe(struct compat_sigframe __user *sf, |
649 | struct pt_regs *regs, sigset_t *set) | 649 | struct pt_regs *regs, sigset_t *set) |
650 | { | 650 | { |
651 | struct compat_aux_sigframe __user *aux; | 651 | struct compat_aux_sigframe __user *aux; |
652 | int err = 0; | 652 | int err = 0; |
653 | 653 | ||
654 | __put_user_error(regs->regs[0], &sf->uc.uc_mcontext.arm_r0, err); | 654 | __put_user_error(regs->regs[0], &sf->uc.uc_mcontext.arm_r0, err); |
655 | __put_user_error(regs->regs[1], &sf->uc.uc_mcontext.arm_r1, err); | 655 | __put_user_error(regs->regs[1], &sf->uc.uc_mcontext.arm_r1, err); |
656 | __put_user_error(regs->regs[2], &sf->uc.uc_mcontext.arm_r2, err); | 656 | __put_user_error(regs->regs[2], &sf->uc.uc_mcontext.arm_r2, err); |
657 | __put_user_error(regs->regs[3], &sf->uc.uc_mcontext.arm_r3, err); | 657 | __put_user_error(regs->regs[3], &sf->uc.uc_mcontext.arm_r3, err); |
658 | __put_user_error(regs->regs[4], &sf->uc.uc_mcontext.arm_r4, err); | 658 | __put_user_error(regs->regs[4], &sf->uc.uc_mcontext.arm_r4, err); |
659 | __put_user_error(regs->regs[5], &sf->uc.uc_mcontext.arm_r5, err); | 659 | __put_user_error(regs->regs[5], &sf->uc.uc_mcontext.arm_r5, err); |
660 | __put_user_error(regs->regs[6], &sf->uc.uc_mcontext.arm_r6, err); | 660 | __put_user_error(regs->regs[6], &sf->uc.uc_mcontext.arm_r6, err); |
661 | __put_user_error(regs->regs[7], &sf->uc.uc_mcontext.arm_r7, err); | 661 | __put_user_error(regs->regs[7], &sf->uc.uc_mcontext.arm_r7, err); |
662 | __put_user_error(regs->regs[8], &sf->uc.uc_mcontext.arm_r8, err); | 662 | __put_user_error(regs->regs[8], &sf->uc.uc_mcontext.arm_r8, err); |
663 | __put_user_error(regs->regs[9], &sf->uc.uc_mcontext.arm_r9, err); | 663 | __put_user_error(regs->regs[9], &sf->uc.uc_mcontext.arm_r9, err); |
664 | __put_user_error(regs->regs[10], &sf->uc.uc_mcontext.arm_r10, err); | 664 | __put_user_error(regs->regs[10], &sf->uc.uc_mcontext.arm_r10, err); |
665 | __put_user_error(regs->regs[11], &sf->uc.uc_mcontext.arm_fp, err); | 665 | __put_user_error(regs->regs[11], &sf->uc.uc_mcontext.arm_fp, err); |
666 | __put_user_error(regs->regs[12], &sf->uc.uc_mcontext.arm_ip, err); | 666 | __put_user_error(regs->regs[12], &sf->uc.uc_mcontext.arm_ip, err); |
667 | __put_user_error(regs->compat_sp, &sf->uc.uc_mcontext.arm_sp, err); | 667 | __put_user_error(regs->compat_sp, &sf->uc.uc_mcontext.arm_sp, err); |
668 | __put_user_error(regs->compat_lr, &sf->uc.uc_mcontext.arm_lr, err); | 668 | __put_user_error(regs->compat_lr, &sf->uc.uc_mcontext.arm_lr, err); |
669 | __put_user_error(regs->pc, &sf->uc.uc_mcontext.arm_pc, err); | 669 | __put_user_error(regs->pc, &sf->uc.uc_mcontext.arm_pc, err); |
670 | __put_user_error(regs->pstate, &sf->uc.uc_mcontext.arm_cpsr, err); | 670 | __put_user_error(regs->pstate, &sf->uc.uc_mcontext.arm_cpsr, err); |
671 | 671 | ||
672 | __put_user_error((compat_ulong_t)0, &sf->uc.uc_mcontext.trap_no, err); | 672 | __put_user_error((compat_ulong_t)0, &sf->uc.uc_mcontext.trap_no, err); |
673 | __put_user_error((compat_ulong_t)0, &sf->uc.uc_mcontext.error_code, err); | 673 | __put_user_error((compat_ulong_t)0, &sf->uc.uc_mcontext.error_code, err); |
674 | __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err); | 674 | __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err); |
675 | __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err); | 675 | __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err); |
676 | 676 | ||
677 | err |= put_sigset_t(&sf->uc.uc_sigmask, set); | 677 | err |= put_sigset_t(&sf->uc.uc_sigmask, set); |
678 | 678 | ||
679 | aux = (struct compat_aux_sigframe __user *) sf->uc.uc_regspace; | 679 | aux = (struct compat_aux_sigframe __user *) sf->uc.uc_regspace; |
680 | 680 | ||
681 | if (err == 0) | 681 | if (err == 0) |
682 | err |= compat_preserve_vfp_context(&aux->vfp); | 682 | err |= compat_preserve_vfp_context(&aux->vfp); |
683 | __put_user_error(0, &aux->end_magic, err); | 683 | __put_user_error(0, &aux->end_magic, err); |
684 | 684 | ||
685 | return err; | 685 | return err; |
686 | } | 686 | } |
687 | 687 | ||
688 | /* | 688 | /* |
689 | * 32-bit signal handling routines called from signal.c | 689 | * 32-bit signal handling routines called from signal.c |
690 | */ | 690 | */ |
691 | int compat_setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, | 691 | int compat_setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, |
692 | sigset_t *set, struct pt_regs *regs) | 692 | sigset_t *set, struct pt_regs *regs) |
693 | { | 693 | { |
694 | struct compat_rt_sigframe __user *frame; | 694 | struct compat_rt_sigframe __user *frame; |
695 | compat_stack_t stack; | 695 | compat_stack_t stack; |
696 | int err = 0; | 696 | int err = 0; |
697 | 697 | ||
698 | frame = compat_get_sigframe(ka, regs, sizeof(*frame)); | 698 | frame = compat_get_sigframe(ka, regs, sizeof(*frame)); |
699 | 699 | ||
700 | if (!frame) | 700 | if (!frame) |
701 | return 1; | 701 | return 1; |
702 | 702 | ||
703 | err |= copy_siginfo_to_user32(&frame->info, info); | 703 | err |= copy_siginfo_to_user32(&frame->info, info); |
704 | 704 | ||
705 | __put_user_error(0, &frame->sig.uc.uc_flags, err); | 705 | __put_user_error(0, &frame->sig.uc.uc_flags, err); |
706 | __put_user_error(NULL, &frame->sig.uc.uc_link, err); | 706 | __put_user_error(0, &frame->sig.uc.uc_link, err); |
707 | 707 | ||
708 | memset(&stack, 0, sizeof(stack)); | 708 | memset(&stack, 0, sizeof(stack)); |
709 | stack.ss_sp = (compat_uptr_t)current->sas_ss_sp; | 709 | stack.ss_sp = (compat_uptr_t)current->sas_ss_sp; |
710 | stack.ss_flags = sas_ss_flags(regs->compat_sp); | 710 | stack.ss_flags = sas_ss_flags(regs->compat_sp); |
711 | stack.ss_size = current->sas_ss_size; | 711 | stack.ss_size = current->sas_ss_size; |
712 | err |= __copy_to_user(&frame->sig.uc.uc_stack, &stack, sizeof(stack)); | 712 | err |= __copy_to_user(&frame->sig.uc.uc_stack, &stack, sizeof(stack)); |
713 | 713 | ||
714 | err |= compat_setup_sigframe(&frame->sig, regs, set); | 714 | err |= compat_setup_sigframe(&frame->sig, regs, set); |
715 | 715 | ||
716 | if (err == 0) { | 716 | if (err == 0) { |
717 | compat_setup_return(regs, ka, frame->sig.retcode, frame, usig); | 717 | compat_setup_return(regs, ka, frame->sig.retcode, frame, usig); |
718 | regs->regs[1] = (compat_ulong_t)(unsigned long)&frame->info; | 718 | regs->regs[1] = (compat_ulong_t)(unsigned long)&frame->info; |
719 | regs->regs[2] = (compat_ulong_t)(unsigned long)&frame->sig.uc; | 719 | regs->regs[2] = (compat_ulong_t)(unsigned long)&frame->sig.uc; |
720 | } | 720 | } |
721 | 721 | ||
722 | return err; | 722 | return err; |
723 | } | 723 | } |
724 | 724 | ||
725 | int compat_setup_frame(int usig, struct k_sigaction *ka, sigset_t *set, | 725 | int compat_setup_frame(int usig, struct k_sigaction *ka, sigset_t *set, |
726 | struct pt_regs *regs) | 726 | struct pt_regs *regs) |
727 | { | 727 | { |
728 | struct compat_sigframe __user *frame; | 728 | struct compat_sigframe __user *frame; |
729 | int err = 0; | 729 | int err = 0; |
730 | 730 | ||
731 | frame = compat_get_sigframe(ka, regs, sizeof(*frame)); | 731 | frame = compat_get_sigframe(ka, regs, sizeof(*frame)); |
732 | 732 | ||
733 | if (!frame) | 733 | if (!frame) |
734 | return 1; | 734 | return 1; |
735 | 735 | ||
736 | __put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err); | 736 | __put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err); |
737 | 737 | ||
738 | err |= compat_setup_sigframe(frame, regs, set); | 738 | err |= compat_setup_sigframe(frame, regs, set); |
739 | if (err == 0) | 739 | if (err == 0) |
740 | compat_setup_return(regs, ka, frame->retcode, frame, usig); | 740 | compat_setup_return(regs, ka, frame->retcode, frame, usig); |
741 | 741 | ||
742 | return err; | 742 | return err; |
743 | } | 743 | } |
744 | 744 | ||
745 | /* | 745 | /* |
746 | * RT signals don't have generic compat wrappers. | 746 | * RT signals don't have generic compat wrappers. |
747 | * See arch/powerpc/kernel/signal_32.c | 747 | * See arch/powerpc/kernel/signal_32.c |
748 | */ | 748 | */ |
749 | asmlinkage int compat_sys_rt_sigprocmask(int how, compat_sigset_t __user *set, | 749 | asmlinkage int compat_sys_rt_sigprocmask(int how, compat_sigset_t __user *set, |
750 | compat_sigset_t __user *oset, | 750 | compat_sigset_t __user *oset, |
751 | compat_size_t sigsetsize) | 751 | compat_size_t sigsetsize) |
752 | { | 752 | { |
753 | sigset_t s; | 753 | sigset_t s; |
754 | sigset_t __user *up; | 754 | sigset_t __user *up; |
755 | int ret; | 755 | int ret; |
756 | mm_segment_t old_fs = get_fs(); | 756 | mm_segment_t old_fs = get_fs(); |
757 | 757 | ||
758 | if (set) { | 758 | if (set) { |
759 | if (get_sigset_t(&s, set)) | 759 | if (get_sigset_t(&s, set)) |
760 | return -EFAULT; | 760 | return -EFAULT; |
761 | } | 761 | } |
762 | 762 | ||
763 | set_fs(KERNEL_DS); | 763 | set_fs(KERNEL_DS); |
764 | /* This is valid because of the set_fs() */ | 764 | /* This is valid because of the set_fs() */ |
765 | up = (sigset_t __user *) &s; | 765 | up = (sigset_t __user *) &s; |
766 | ret = sys_rt_sigprocmask(how, set ? up : NULL, oset ? up : NULL, | 766 | ret = sys_rt_sigprocmask(how, set ? up : NULL, oset ? up : NULL, |
767 | sigsetsize); | 767 | sigsetsize); |
768 | set_fs(old_fs); | 768 | set_fs(old_fs); |
769 | if (ret) | 769 | if (ret) |
770 | return ret; | 770 | return ret; |
771 | if (oset) { | 771 | if (oset) { |
772 | if (put_sigset_t(oset, &s)) | 772 | if (put_sigset_t(oset, &s)) |
773 | return -EFAULT; | 773 | return -EFAULT; |
774 | } | 774 | } |
775 | return 0; | 775 | return 0; |
776 | } | 776 | } |
777 | 777 | ||
778 | asmlinkage int compat_sys_rt_sigpending(compat_sigset_t __user *set, | 778 | asmlinkage int compat_sys_rt_sigpending(compat_sigset_t __user *set, |
779 | compat_size_t sigsetsize) | 779 | compat_size_t sigsetsize) |
780 | { | 780 | { |
781 | sigset_t s; | 781 | sigset_t s; |
782 | int ret; | 782 | int ret; |
783 | mm_segment_t old_fs = get_fs(); | 783 | mm_segment_t old_fs = get_fs(); |
784 | 784 | ||
785 | set_fs(KERNEL_DS); | 785 | set_fs(KERNEL_DS); |
786 | /* The __user pointer cast is valid because of the set_fs() */ | 786 | /* The __user pointer cast is valid because of the set_fs() */ |
787 | ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize); | 787 | ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize); |
788 | set_fs(old_fs); | 788 | set_fs(old_fs); |
789 | if (!ret) { | 789 | if (!ret) { |
790 | if (put_sigset_t(set, &s)) | 790 | if (put_sigset_t(set, &s)) |
791 | return -EFAULT; | 791 | return -EFAULT; |
792 | } | 792 | } |
793 | return ret; | 793 | return ret; |
794 | } | 794 | } |
795 | 795 | ||
796 | asmlinkage int compat_sys_rt_sigqueueinfo(int pid, int sig, | 796 | asmlinkage int compat_sys_rt_sigqueueinfo(int pid, int sig, |
797 | compat_siginfo_t __user *uinfo) | 797 | compat_siginfo_t __user *uinfo) |
798 | { | 798 | { |
799 | siginfo_t info; | 799 | siginfo_t info; |
800 | int ret; | 800 | int ret; |
801 | mm_segment_t old_fs = get_fs(); | 801 | mm_segment_t old_fs = get_fs(); |
802 | 802 | ||
803 | ret = copy_siginfo_from_user32(&info, uinfo); | 803 | ret = copy_siginfo_from_user32(&info, uinfo); |
804 | if (unlikely(ret)) | 804 | if (unlikely(ret)) |
805 | return ret; | 805 | return ret; |
806 | 806 | ||
807 | set_fs (KERNEL_DS); | 807 | set_fs (KERNEL_DS); |
808 | /* The __user pointer cast is valid because of the set_fs() */ | 808 | /* The __user pointer cast is valid because of the set_fs() */ |
809 | ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *) &info); | 809 | ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *) &info); |
810 | set_fs (old_fs); | 810 | set_fs (old_fs); |
811 | return ret; | 811 | return ret; |
812 | } | 812 | } |
813 | 813 | ||
814 | void compat_setup_restart_syscall(struct pt_regs *regs) | 814 | void compat_setup_restart_syscall(struct pt_regs *regs) |
815 | { | 815 | { |
816 | regs->regs[7] = __NR_compat_restart_syscall; | 816 | regs->regs[7] = __NR_compat_restart_syscall; |
817 | } | 817 | } |
818 | 818 |
arch/arm64/kernel/smp.c
1 | /* | 1 | /* |
2 | * SMP initialisation and IPI support | 2 | * SMP initialisation and IPI support |
3 | * Based on arch/arm/kernel/smp.c | 3 | * Based on arch/arm/kernel/smp.c |
4 | * | 4 | * |
5 | * Copyright (C) 2012 ARM Ltd. | 5 | * Copyright (C) 2012 ARM Ltd. |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | * | 10 | * |
11 | * This program is distributed in the hope that it will be useful, | 11 | * This program is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | * GNU General Public License for more details. | 14 | * GNU General Public License for more details. |
15 | * | 15 | * |
16 | * You should have received a copy of the GNU General Public License | 16 | * You should have received a copy of the GNU General Public License |
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include <linux/delay.h> | 20 | #include <linux/delay.h> |
21 | #include <linux/init.h> | 21 | #include <linux/init.h> |
22 | #include <linux/spinlock.h> | 22 | #include <linux/spinlock.h> |
23 | #include <linux/sched.h> | 23 | #include <linux/sched.h> |
24 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
25 | #include <linux/cache.h> | 25 | #include <linux/cache.h> |
26 | #include <linux/profile.h> | 26 | #include <linux/profile.h> |
27 | #include <linux/errno.h> | 27 | #include <linux/errno.h> |
28 | #include <linux/mm.h> | 28 | #include <linux/mm.h> |
29 | #include <linux/err.h> | 29 | #include <linux/err.h> |
30 | #include <linux/cpu.h> | 30 | #include <linux/cpu.h> |
31 | #include <linux/smp.h> | 31 | #include <linux/smp.h> |
32 | #include <linux/seq_file.h> | 32 | #include <linux/seq_file.h> |
33 | #include <linux/irq.h> | 33 | #include <linux/irq.h> |
34 | #include <linux/percpu.h> | 34 | #include <linux/percpu.h> |
35 | #include <linux/clockchips.h> | 35 | #include <linux/clockchips.h> |
36 | #include <linux/completion.h> | 36 | #include <linux/completion.h> |
37 | #include <linux/of.h> | 37 | #include <linux/of.h> |
38 | 38 | ||
39 | #include <asm/atomic.h> | 39 | #include <asm/atomic.h> |
40 | #include <asm/cacheflush.h> | 40 | #include <asm/cacheflush.h> |
41 | #include <asm/cputype.h> | 41 | #include <asm/cputype.h> |
42 | #include <asm/mmu_context.h> | 42 | #include <asm/mmu_context.h> |
43 | #include <asm/pgtable.h> | 43 | #include <asm/pgtable.h> |
44 | #include <asm/pgalloc.h> | 44 | #include <asm/pgalloc.h> |
45 | #include <asm/processor.h> | 45 | #include <asm/processor.h> |
46 | #include <asm/sections.h> | 46 | #include <asm/sections.h> |
47 | #include <asm/tlbflush.h> | 47 | #include <asm/tlbflush.h> |
48 | #include <asm/ptrace.h> | 48 | #include <asm/ptrace.h> |
49 | 49 | ||
50 | /* | 50 | /* |
51 | * as from 2.5, kernels no longer have an init_tasks structure | 51 | * as from 2.5, kernels no longer have an init_tasks structure |
52 | * so we need some other way of telling a new secondary core | 52 | * so we need some other way of telling a new secondary core |
53 | * where to place its SVC stack | 53 | * where to place its SVC stack |
54 | */ | 54 | */ |
55 | struct secondary_data secondary_data; | 55 | struct secondary_data secondary_data; |
56 | volatile unsigned long secondary_holding_pen_release = -1; | 56 | volatile unsigned long secondary_holding_pen_release = -1; |
57 | 57 | ||
58 | enum ipi_msg_type { | 58 | enum ipi_msg_type { |
59 | IPI_RESCHEDULE, | 59 | IPI_RESCHEDULE, |
60 | IPI_CALL_FUNC, | 60 | IPI_CALL_FUNC, |
61 | IPI_CALL_FUNC_SINGLE, | 61 | IPI_CALL_FUNC_SINGLE, |
62 | IPI_CPU_STOP, | 62 | IPI_CPU_STOP, |
63 | }; | 63 | }; |
64 | 64 | ||
65 | static DEFINE_RAW_SPINLOCK(boot_lock); | 65 | static DEFINE_RAW_SPINLOCK(boot_lock); |
66 | 66 | ||
67 | /* | 67 | /* |
68 | * Write secondary_holding_pen_release in a way that is guaranteed to be | 68 | * Write secondary_holding_pen_release in a way that is guaranteed to be |
69 | * visible to all observers, irrespective of whether they're taking part | 69 | * visible to all observers, irrespective of whether they're taking part |
70 | * in coherency or not. This is necessary for the hotplug code to work | 70 | * in coherency or not. This is necessary for the hotplug code to work |
71 | * reliably. | 71 | * reliably. |
72 | */ | 72 | */ |
73 | static void __cpuinit write_pen_release(int val) | 73 | static void __cpuinit write_pen_release(int val) |
74 | { | 74 | { |
75 | void *start = (void *)&secondary_holding_pen_release; | 75 | void *start = (void *)&secondary_holding_pen_release; |
76 | unsigned long size = sizeof(secondary_holding_pen_release); | 76 | unsigned long size = sizeof(secondary_holding_pen_release); |
77 | 77 | ||
78 | secondary_holding_pen_release = val; | 78 | secondary_holding_pen_release = val; |
79 | __flush_dcache_area(start, size); | 79 | __flush_dcache_area(start, size); |
80 | } | 80 | } |
81 | 81 | ||
82 | /* | 82 | /* |
83 | * Boot a secondary CPU, and assign it the specified idle task. | 83 | * Boot a secondary CPU, and assign it the specified idle task. |
84 | * This also gives us the initial stack to use for this CPU. | 84 | * This also gives us the initial stack to use for this CPU. |
85 | */ | 85 | */ |
86 | static int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) | 86 | static int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) |
87 | { | 87 | { |
88 | unsigned long timeout; | 88 | unsigned long timeout; |
89 | 89 | ||
90 | /* | 90 | /* |
91 | * Set synchronisation state between this boot processor | 91 | * Set synchronisation state between this boot processor |
92 | * and the secondary one | 92 | * and the secondary one |
93 | */ | 93 | */ |
94 | raw_spin_lock(&boot_lock); | 94 | raw_spin_lock(&boot_lock); |
95 | 95 | ||
96 | /* | 96 | /* |
97 | * Update the pen release flag. | 97 | * Update the pen release flag. |
98 | */ | 98 | */ |
99 | write_pen_release(cpu); | 99 | write_pen_release(cpu); |
100 | 100 | ||
101 | /* | 101 | /* |
102 | * Send an event, causing the secondaries to read pen_release. | 102 | * Send an event, causing the secondaries to read pen_release. |
103 | */ | 103 | */ |
104 | sev(); | 104 | sev(); |
105 | 105 | ||
106 | timeout = jiffies + (1 * HZ); | 106 | timeout = jiffies + (1 * HZ); |
107 | while (time_before(jiffies, timeout)) { | 107 | while (time_before(jiffies, timeout)) { |
108 | if (secondary_holding_pen_release == -1UL) | 108 | if (secondary_holding_pen_release == -1UL) |
109 | break; | 109 | break; |
110 | udelay(10); | 110 | udelay(10); |
111 | } | 111 | } |
112 | 112 | ||
113 | /* | 113 | /* |
114 | * Now the secondary core is starting up let it run its | 114 | * Now the secondary core is starting up let it run its |
115 | * calibrations, then wait for it to finish | 115 | * calibrations, then wait for it to finish |
116 | */ | 116 | */ |
117 | raw_spin_unlock(&boot_lock); | 117 | raw_spin_unlock(&boot_lock); |
118 | 118 | ||
119 | return secondary_holding_pen_release != -1 ? -ENOSYS : 0; | 119 | return secondary_holding_pen_release != -1 ? -ENOSYS : 0; |
120 | } | 120 | } |
121 | 121 | ||
122 | static DECLARE_COMPLETION(cpu_running); | 122 | static DECLARE_COMPLETION(cpu_running); |
123 | 123 | ||
124 | int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) | 124 | int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) |
125 | { | 125 | { |
126 | int ret; | 126 | int ret; |
127 | 127 | ||
128 | /* | 128 | /* |
129 | * We need to tell the secondary core where to find its stack and the | 129 | * We need to tell the secondary core where to find its stack and the |
130 | * page tables. | 130 | * page tables. |
131 | */ | 131 | */ |
132 | secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; | 132 | secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; |
133 | __flush_dcache_area(&secondary_data, sizeof(secondary_data)); | 133 | __flush_dcache_area(&secondary_data, sizeof(secondary_data)); |
134 | 134 | ||
135 | /* | 135 | /* |
136 | * Now bring the CPU into our world. | 136 | * Now bring the CPU into our world. |
137 | */ | 137 | */ |
138 | ret = boot_secondary(cpu, idle); | 138 | ret = boot_secondary(cpu, idle); |
139 | if (ret == 0) { | 139 | if (ret == 0) { |
140 | /* | 140 | /* |
141 | * CPU was successfully started, wait for it to come online or | 141 | * CPU was successfully started, wait for it to come online or |
142 | * time out. | 142 | * time out. |
143 | */ | 143 | */ |
144 | wait_for_completion_timeout(&cpu_running, | 144 | wait_for_completion_timeout(&cpu_running, |
145 | msecs_to_jiffies(1000)); | 145 | msecs_to_jiffies(1000)); |
146 | 146 | ||
147 | if (!cpu_online(cpu)) { | 147 | if (!cpu_online(cpu)) { |
148 | pr_crit("CPU%u: failed to come online\n", cpu); | 148 | pr_crit("CPU%u: failed to come online\n", cpu); |
149 | ret = -EIO; | 149 | ret = -EIO; |
150 | } | 150 | } |
151 | } else { | 151 | } else { |
152 | pr_err("CPU%u: failed to boot: %d\n", cpu, ret); | 152 | pr_err("CPU%u: failed to boot: %d\n", cpu, ret); |
153 | } | 153 | } |
154 | 154 | ||
155 | secondary_data.stack = NULL; | 155 | secondary_data.stack = NULL; |
156 | 156 | ||
157 | return ret; | 157 | return ret; |
158 | } | 158 | } |
159 | 159 | ||
160 | /* | 160 | /* |
161 | * This is the secondary CPU boot entry. We're using this CPUs | 161 | * This is the secondary CPU boot entry. We're using this CPUs |
162 | * idle thread stack, but a set of temporary page tables. | 162 | * idle thread stack, but a set of temporary page tables. |
163 | */ | 163 | */ |
164 | asmlinkage void __cpuinit secondary_start_kernel(void) | 164 | asmlinkage void __cpuinit secondary_start_kernel(void) |
165 | { | 165 | { |
166 | struct mm_struct *mm = &init_mm; | 166 | struct mm_struct *mm = &init_mm; |
167 | unsigned int cpu = smp_processor_id(); | 167 | unsigned int cpu = smp_processor_id(); |
168 | 168 | ||
169 | printk("CPU%u: Booted secondary processor\n", cpu); | 169 | printk("CPU%u: Booted secondary processor\n", cpu); |
170 | 170 | ||
171 | /* | 171 | /* |
172 | * All kernel threads share the same mm context; grab a | 172 | * All kernel threads share the same mm context; grab a |
173 | * reference and switch to it. | 173 | * reference and switch to it. |
174 | */ | 174 | */ |
175 | atomic_inc(&mm->mm_count); | 175 | atomic_inc(&mm->mm_count); |
176 | current->active_mm = mm; | 176 | current->active_mm = mm; |
177 | cpumask_set_cpu(cpu, mm_cpumask(mm)); | 177 | cpumask_set_cpu(cpu, mm_cpumask(mm)); |
178 | 178 | ||
179 | /* | 179 | /* |
180 | * TTBR0 is only used for the identity mapping at this stage. Make it | 180 | * TTBR0 is only used for the identity mapping at this stage. Make it |
181 | * point to zero page to avoid speculatively fetching new entries. | 181 | * point to zero page to avoid speculatively fetching new entries. |
182 | */ | 182 | */ |
183 | cpu_set_reserved_ttbr0(); | 183 | cpu_set_reserved_ttbr0(); |
184 | flush_tlb_all(); | 184 | flush_tlb_all(); |
185 | 185 | ||
186 | preempt_disable(); | 186 | preempt_disable(); |
187 | trace_hardirqs_off(); | 187 | trace_hardirqs_off(); |
188 | 188 | ||
189 | /* | 189 | /* |
190 | * Let the primary processor know we're out of the | 190 | * Let the primary processor know we're out of the |
191 | * pen, then head off into the C entry point | 191 | * pen, then head off into the C entry point |
192 | */ | 192 | */ |
193 | write_pen_release(-1); | 193 | write_pen_release(-1); |
194 | 194 | ||
195 | /* | 195 | /* |
196 | * Synchronise with the boot thread. | 196 | * Synchronise with the boot thread. |
197 | */ | 197 | */ |
198 | raw_spin_lock(&boot_lock); | 198 | raw_spin_lock(&boot_lock); |
199 | raw_spin_unlock(&boot_lock); | 199 | raw_spin_unlock(&boot_lock); |
200 | 200 | ||
201 | /* | 201 | /* |
202 | * Enable local interrupts. | 202 | * Enable local interrupts. |
203 | */ | 203 | */ |
204 | notify_cpu_starting(cpu); | 204 | notify_cpu_starting(cpu); |
205 | local_irq_enable(); | 205 | local_irq_enable(); |
206 | local_fiq_enable(); | 206 | local_fiq_enable(); |
207 | 207 | ||
208 | /* | 208 | /* |
209 | * OK, now it's safe to let the boot CPU continue. Wait for | 209 | * OK, now it's safe to let the boot CPU continue. Wait for |
210 | * the CPU migration code to notice that the CPU is online | 210 | * the CPU migration code to notice that the CPU is online |
211 | * before we continue. | 211 | * before we continue. |
212 | */ | 212 | */ |
213 | set_cpu_online(cpu, true); | 213 | set_cpu_online(cpu, true); |
214 | complete(&cpu_running); | 214 | complete(&cpu_running); |
215 | 215 | ||
216 | /* | 216 | /* |
217 | * OK, it's off to the idle thread for us | 217 | * OK, it's off to the idle thread for us |
218 | */ | 218 | */ |
219 | cpu_idle(); | 219 | cpu_idle(); |
220 | } | 220 | } |
221 | 221 | ||
222 | void __init smp_cpus_done(unsigned int max_cpus) | 222 | void __init smp_cpus_done(unsigned int max_cpus) |
223 | { | 223 | { |
224 | unsigned long bogosum = loops_per_jiffy * num_online_cpus(); | 224 | unsigned long bogosum = loops_per_jiffy * num_online_cpus(); |
225 | 225 | ||
226 | pr_info("SMP: Total of %d processors activated (%lu.%02lu BogoMIPS).\n", | 226 | pr_info("SMP: Total of %d processors activated (%lu.%02lu BogoMIPS).\n", |
227 | num_online_cpus(), bogosum / (500000/HZ), | 227 | num_online_cpus(), bogosum / (500000/HZ), |
228 | (bogosum / (5000/HZ)) % 100); | 228 | (bogosum / (5000/HZ)) % 100); |
229 | } | 229 | } |
230 | 230 | ||
231 | void __init smp_prepare_boot_cpu(void) | 231 | void __init smp_prepare_boot_cpu(void) |
232 | { | 232 | { |
233 | } | 233 | } |
234 | 234 | ||
235 | static void (*smp_cross_call)(const struct cpumask *, unsigned int); | 235 | static void (*smp_cross_call)(const struct cpumask *, unsigned int); |
236 | static phys_addr_t cpu_release_addr[NR_CPUS]; | ||
237 | 236 | ||
237 | static const struct smp_enable_ops *enable_ops[] __initconst = { | ||
238 | &smp_spin_table_ops, | ||
239 | &smp_psci_ops, | ||
240 | NULL, | ||
241 | }; | ||
242 | |||
243 | static const struct smp_enable_ops *smp_enable_ops[NR_CPUS]; | ||
244 | |||
245 | static const struct smp_enable_ops * __init smp_get_enable_ops(const char *name) | ||
246 | { | ||
247 | const struct smp_enable_ops *ops = enable_ops[0]; | ||
248 | |||
249 | while (ops) { | ||
250 | if (!strcmp(name, ops->name)) | ||
251 | return ops; | ||
252 | |||
253 | ops++; | ||
254 | } | ||
255 | |||
256 | return NULL; | ||
257 | } | ||
258 | |||
238 | /* | 259 | /* |
239 | * Enumerate the possible CPU set from the device tree. | 260 | * Enumerate the possible CPU set from the device tree. |
240 | */ | 261 | */ |
241 | void __init smp_init_cpus(void) | 262 | void __init smp_init_cpus(void) |
242 | { | 263 | { |
243 | const char *enable_method; | 264 | const char *enable_method; |
244 | struct device_node *dn = NULL; | 265 | struct device_node *dn = NULL; |
245 | int cpu = 0; | 266 | int cpu = 0; |
246 | 267 | ||
247 | while ((dn = of_find_node_by_type(dn, "cpu"))) { | 268 | while ((dn = of_find_node_by_type(dn, "cpu"))) { |
248 | if (cpu >= NR_CPUS) | 269 | if (cpu >= NR_CPUS) |
249 | goto next; | 270 | goto next; |
250 | 271 | ||
251 | /* | 272 | /* |
252 | * We currently support only the "spin-table" enable-method. | 273 | * We currently support only the "spin-table" enable-method. |
253 | */ | 274 | */ |
254 | enable_method = of_get_property(dn, "enable-method", NULL); | 275 | enable_method = of_get_property(dn, "enable-method", NULL); |
255 | if (!enable_method || strcmp(enable_method, "spin-table")) { | 276 | if (!enable_method) { |
256 | pr_err("CPU %d: missing or invalid enable-method property: %s\n", | 277 | pr_err("CPU %d: missing enable-method property\n", cpu); |
257 | cpu, enable_method); | ||
258 | goto next; | 278 | goto next; |
259 | } | 279 | } |
260 | 280 | ||
261 | /* | 281 | smp_enable_ops[cpu] = smp_get_enable_ops(enable_method); |
262 | * Determine the address from which the CPU is polling. | 282 | |
263 | */ | 283 | if (!smp_enable_ops[cpu]) { |
264 | if (of_property_read_u64(dn, "cpu-release-addr", | 284 | pr_err("CPU %d: invalid enable-method property: %s\n", |
265 | &cpu_release_addr[cpu])) { | 285 | cpu, enable_method); |
266 | pr_err("CPU %d: missing or invalid cpu-release-addr property\n", | ||
267 | cpu); | ||
268 | goto next; | 286 | goto next; |
269 | } | 287 | } |
270 | 288 | ||
289 | if (smp_enable_ops[cpu]->init_cpu(dn, cpu)) | ||
290 | goto next; | ||
291 | |||
271 | set_cpu_possible(cpu, true); | 292 | set_cpu_possible(cpu, true); |
272 | next: | 293 | next: |
273 | cpu++; | 294 | cpu++; |
274 | } | 295 | } |
275 | 296 | ||
276 | /* sanity check */ | 297 | /* sanity check */ |
277 | if (cpu > NR_CPUS) | 298 | if (cpu > NR_CPUS) |
278 | pr_warning("no. of cores (%d) greater than configured maximum of %d - clipping\n", | 299 | pr_warning("no. of cores (%d) greater than configured maximum of %d - clipping\n", |
279 | cpu, NR_CPUS); | 300 | cpu, NR_CPUS); |
280 | } | 301 | } |
281 | 302 | ||
282 | void __init smp_prepare_cpus(unsigned int max_cpus) | 303 | void __init smp_prepare_cpus(unsigned int max_cpus) |
283 | { | 304 | { |
284 | int cpu; | 305 | int cpu, err; |
285 | void **release_addr; | ||
286 | unsigned int ncores = num_possible_cpus(); | 306 | unsigned int ncores = num_possible_cpus(); |
287 | 307 | ||
288 | /* | 308 | /* |
289 | * are we trying to boot more cores than exist? | 309 | * are we trying to boot more cores than exist? |
290 | */ | 310 | */ |
291 | if (max_cpus > ncores) | 311 | if (max_cpus > ncores) |
292 | max_cpus = ncores; | 312 | max_cpus = ncores; |
293 | 313 | ||
314 | /* Don't bother if we're effectively UP */ | ||
315 | if (max_cpus <= 1) | ||
316 | return; | ||
317 | |||
294 | /* | 318 | /* |
295 | * Initialise the present map (which describes the set of CPUs | 319 | * Initialise the present map (which describes the set of CPUs |
296 | * actually populated at the present time) and release the | 320 | * actually populated at the present time) and release the |
297 | * secondaries from the bootloader. | 321 | * secondaries from the bootloader. |
322 | * | ||
323 | * Make sure we online at most (max_cpus - 1) additional CPUs. | ||
298 | */ | 324 | */ |
325 | max_cpus--; | ||
299 | for_each_possible_cpu(cpu) { | 326 | for_each_possible_cpu(cpu) { |
300 | if (max_cpus == 0) | 327 | if (max_cpus == 0) |
301 | break; | 328 | break; |
302 | 329 | ||
303 | if (!cpu_release_addr[cpu]) | 330 | if (cpu == smp_processor_id()) |
304 | continue; | 331 | continue; |
305 | 332 | ||
306 | release_addr = __va(cpu_release_addr[cpu]); | 333 | if (!smp_enable_ops[cpu]) |
307 | release_addr[0] = (void *)__pa(secondary_holding_pen); | 334 | continue; |
308 | __flush_dcache_area(release_addr, sizeof(release_addr[0])); | ||
309 | 335 | ||
336 | err = smp_enable_ops[cpu]->prepare_cpu(cpu); | ||
337 | if (err) | ||
338 | continue; | ||
339 | |||
310 | set_cpu_present(cpu, true); | 340 | set_cpu_present(cpu, true); |
311 | max_cpus--; | 341 | max_cpus--; |
312 | } | 342 | } |
313 | |||
314 | /* | ||
315 | * Send an event to wake up the secondaries. | ||
316 | */ | ||
317 | sev(); | ||
318 | } | 343 | } |
319 | 344 | ||
320 | 345 | ||
321 | void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) | 346 | void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) |
322 | { | 347 | { |
323 | smp_cross_call = fn; | 348 | smp_cross_call = fn; |
324 | } | 349 | } |
325 | 350 | ||
326 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) | 351 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
327 | { | 352 | { |
328 | smp_cross_call(mask, IPI_CALL_FUNC); | 353 | smp_cross_call(mask, IPI_CALL_FUNC); |
329 | } | 354 | } |
330 | 355 | ||
331 | void arch_send_call_function_single_ipi(int cpu) | 356 | void arch_send_call_function_single_ipi(int cpu) |
332 | { | 357 | { |
333 | smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); | 358 | smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); |
334 | } | 359 | } |
335 | 360 | ||
336 | static const char *ipi_types[NR_IPI] = { | 361 | static const char *ipi_types[NR_IPI] = { |
337 | #define S(x,s) [x - IPI_RESCHEDULE] = s | 362 | #define S(x,s) [x - IPI_RESCHEDULE] = s |
338 | S(IPI_RESCHEDULE, "Rescheduling interrupts"), | 363 | S(IPI_RESCHEDULE, "Rescheduling interrupts"), |
339 | S(IPI_CALL_FUNC, "Function call interrupts"), | 364 | S(IPI_CALL_FUNC, "Function call interrupts"), |
340 | S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"), | 365 | S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"), |
341 | S(IPI_CPU_STOP, "CPU stop interrupts"), | 366 | S(IPI_CPU_STOP, "CPU stop interrupts"), |
342 | }; | 367 | }; |
343 | 368 | ||
344 | void show_ipi_list(struct seq_file *p, int prec) | 369 | void show_ipi_list(struct seq_file *p, int prec) |
345 | { | 370 | { |
346 | unsigned int cpu, i; | 371 | unsigned int cpu, i; |
347 | 372 | ||
348 | for (i = 0; i < NR_IPI; i++) { | 373 | for (i = 0; i < NR_IPI; i++) { |
349 | seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i + IPI_RESCHEDULE, | 374 | seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i + IPI_RESCHEDULE, |
350 | prec >= 4 ? " " : ""); | 375 | prec >= 4 ? " " : ""); |
351 | for_each_present_cpu(cpu) | 376 | for_each_present_cpu(cpu) |
352 | seq_printf(p, "%10u ", | 377 | seq_printf(p, "%10u ", |
353 | __get_irq_stat(cpu, ipi_irqs[i])); | 378 | __get_irq_stat(cpu, ipi_irqs[i])); |
354 | seq_printf(p, " %s\n", ipi_types[i]); | 379 | seq_printf(p, " %s\n", ipi_types[i]); |
355 | } | 380 | } |
356 | } | 381 | } |
357 | 382 | ||
358 | u64 smp_irq_stat_cpu(unsigned int cpu) | 383 | u64 smp_irq_stat_cpu(unsigned int cpu) |
359 | { | 384 | { |
360 | u64 sum = 0; | 385 | u64 sum = 0; |
361 | int i; | 386 | int i; |
362 | 387 | ||
363 | for (i = 0; i < NR_IPI; i++) | 388 | for (i = 0; i < NR_IPI; i++) |
364 | sum += __get_irq_stat(cpu, ipi_irqs[i]); | 389 | sum += __get_irq_stat(cpu, ipi_irqs[i]); |
365 | 390 | ||
366 | return sum; | 391 | return sum; |
367 | } | 392 | } |
368 | 393 | ||
369 | static DEFINE_RAW_SPINLOCK(stop_lock); | 394 | static DEFINE_RAW_SPINLOCK(stop_lock); |
370 | 395 | ||
371 | /* | 396 | /* |
372 | * ipi_cpu_stop - handle IPI from smp_send_stop() | 397 | * ipi_cpu_stop - handle IPI from smp_send_stop() |
373 | */ | 398 | */ |
374 | static void ipi_cpu_stop(unsigned int cpu) | 399 | static void ipi_cpu_stop(unsigned int cpu) |
375 | { | 400 | { |
376 | if (system_state == SYSTEM_BOOTING || | 401 | if (system_state == SYSTEM_BOOTING || |
377 | system_state == SYSTEM_RUNNING) { | 402 | system_state == SYSTEM_RUNNING) { |
378 | raw_spin_lock(&stop_lock); | 403 | raw_spin_lock(&stop_lock); |
379 | pr_crit("CPU%u: stopping\n", cpu); | 404 | pr_crit("CPU%u: stopping\n", cpu); |
380 | dump_stack(); | 405 | dump_stack(); |
381 | raw_spin_unlock(&stop_lock); | 406 | raw_spin_unlock(&stop_lock); |
382 | } | 407 | } |
383 | 408 | ||
384 | set_cpu_online(cpu, false); | 409 | set_cpu_online(cpu, false); |
385 | 410 | ||
386 | local_fiq_disable(); | 411 | local_fiq_disable(); |
387 | local_irq_disable(); | 412 | local_irq_disable(); |
388 | 413 | ||
389 | while (1) | 414 | while (1) |
390 | cpu_relax(); | 415 | cpu_relax(); |
391 | } | 416 | } |
392 | 417 | ||
393 | /* | 418 | /* |
394 | * Main handler for inter-processor interrupts | 419 | * Main handler for inter-processor interrupts |
395 | */ | 420 | */ |
396 | void handle_IPI(int ipinr, struct pt_regs *regs) | 421 | void handle_IPI(int ipinr, struct pt_regs *regs) |
397 | { | 422 | { |
398 | unsigned int cpu = smp_processor_id(); | 423 | unsigned int cpu = smp_processor_id(); |
399 | struct pt_regs *old_regs = set_irq_regs(regs); | 424 | struct pt_regs *old_regs = set_irq_regs(regs); |
400 | 425 | ||
401 | if (ipinr >= IPI_RESCHEDULE && ipinr < IPI_RESCHEDULE + NR_IPI) | 426 | if (ipinr >= IPI_RESCHEDULE && ipinr < IPI_RESCHEDULE + NR_IPI) |
402 | __inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_RESCHEDULE]); | 427 | __inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_RESCHEDULE]); |
403 | 428 | ||
404 | switch (ipinr) { | 429 | switch (ipinr) { |
405 | case IPI_RESCHEDULE: | 430 | case IPI_RESCHEDULE: |
406 | scheduler_ipi(); | 431 | scheduler_ipi(); |
407 | break; | 432 | break; |
408 | 433 | ||
409 | case IPI_CALL_FUNC: | 434 | case IPI_CALL_FUNC: |
410 | irq_enter(); | 435 | irq_enter(); |
411 | generic_smp_call_function_interrupt(); | 436 | generic_smp_call_function_interrupt(); |
412 | irq_exit(); | 437 | irq_exit(); |
413 | break; | 438 | break; |
414 | 439 | ||
415 | case IPI_CALL_FUNC_SINGLE: | 440 | case IPI_CALL_FUNC_SINGLE: |
416 | irq_enter(); | 441 | irq_enter(); |
417 | generic_smp_call_function_single_interrupt(); | 442 | generic_smp_call_function_single_interrupt(); |
418 | irq_exit(); | 443 | irq_exit(); |
419 | break; | 444 | break; |
420 | 445 | ||
421 | case IPI_CPU_STOP: | 446 | case IPI_CPU_STOP: |
422 | irq_enter(); | 447 | irq_enter(); |
423 | ipi_cpu_stop(cpu); | 448 | ipi_cpu_stop(cpu); |
424 | irq_exit(); | 449 | irq_exit(); |
425 | break; | 450 | break; |
426 | 451 | ||
427 | default: | 452 | default: |
428 | pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); | 453 | pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); |
429 | break; | 454 | break; |
430 | } | 455 | } |
431 | set_irq_regs(old_regs); | 456 | set_irq_regs(old_regs); |
432 | } | 457 | } |
433 | 458 | ||
434 | void smp_send_reschedule(int cpu) | 459 | void smp_send_reschedule(int cpu) |
435 | { | 460 | { |
436 | smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); | 461 | smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); |
437 | } | 462 | } |
438 | 463 | ||
439 | void smp_send_stop(void) | 464 | void smp_send_stop(void) |
440 | { | 465 | { |
441 | unsigned long timeout; | 466 | unsigned long timeout; |
442 | 467 | ||
443 | if (num_online_cpus() > 1) { | 468 | if (num_online_cpus() > 1) { |
444 | cpumask_t mask; | 469 | cpumask_t mask; |
445 | 470 | ||
446 | cpumask_copy(&mask, cpu_online_mask); | 471 | cpumask_copy(&mask, cpu_online_mask); |
447 | cpu_clear(smp_processor_id(), mask); | 472 | cpu_clear(smp_processor_id(), mask); |
448 | 473 | ||
449 | smp_cross_call(&mask, IPI_CPU_STOP); | 474 | smp_cross_call(&mask, IPI_CPU_STOP); |
450 | } | 475 | } |
451 | 476 | ||
452 | /* Wait up to one second for other CPUs to stop */ | 477 | /* Wait up to one second for other CPUs to stop */ |
453 | timeout = USEC_PER_SEC; | 478 | timeout = USEC_PER_SEC; |
454 | while (num_online_cpus() > 1 && timeout--) | 479 | while (num_online_cpus() > 1 && timeout--) |
455 | udelay(1); | 480 | udelay(1); |
456 | 481 | ||
457 | if (num_online_cpus() > 1) | 482 | if (num_online_cpus() > 1) |
arch/arm64/kernel/smp_psci.c
File was created | 1 | /* | |
2 | * PSCI SMP initialisation | ||
3 | * | ||
4 | * Copyright (C) 2013 ARM Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | |||
19 | #include <linux/init.h> | ||
20 | #include <linux/of.h> | ||
21 | #include <linux/smp.h> | ||
22 | |||
23 | #include <asm/psci.h> | ||
24 | |||
25 | static int __init smp_psci_init_cpu(struct device_node *dn, int cpu) | ||
26 | { | ||
27 | return 0; | ||
28 | } | ||
29 | |||
30 | static int __init smp_psci_prepare_cpu(int cpu) | ||
31 | { | ||
32 | int err; | ||
33 | |||
34 | if (!psci_ops.cpu_on) { | ||
35 | pr_err("psci: no cpu_on method, not booting CPU%d\n", cpu); | ||
36 | return -ENODEV; | ||
37 | } | ||
38 | |||
39 | err = psci_ops.cpu_on(cpu, __pa(secondary_holding_pen)); | ||
40 | if (err) { | ||
41 | pr_err("psci: failed to boot CPU%d (%d)\n", cpu, err); | ||
42 | return err; | ||
43 | } | ||
44 | |||
45 | return 0; | ||
46 | } | ||
47 | |||
48 | const struct smp_enable_ops smp_psci_ops __initconst = { | ||
49 | .name = "psci", | ||
50 | .init_cpu = smp_psci_init_cpu, | ||
51 | .prepare_cpu = smp_psci_prepare_cpu, | ||
52 | }; | ||
53 |
arch/arm64/kernel/smp_spin_table.c
File was created | 1 | /* | |
2 | * Spin Table SMP initialisation | ||
3 | * | ||
4 | * Copyright (C) 2013 ARM Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | |||
19 | #include <linux/init.h> | ||
20 | #include <linux/of.h> | ||
21 | #include <linux/smp.h> | ||
22 | |||
23 | #include <asm/cacheflush.h> | ||
24 | |||
25 | static phys_addr_t cpu_release_addr[NR_CPUS]; | ||
26 | |||
27 | static int __init smp_spin_table_init_cpu(struct device_node *dn, int cpu) | ||
28 | { | ||
29 | /* | ||
30 | * Determine the address from which the CPU is polling. | ||
31 | */ | ||
32 | if (of_property_read_u64(dn, "cpu-release-addr", | ||
33 | &cpu_release_addr[cpu])) { | ||
34 | pr_err("CPU %d: missing or invalid cpu-release-addr property\n", | ||
35 | cpu); | ||
36 | |||
37 | return -1; | ||
38 | } | ||
39 | |||
40 | return 0; | ||
41 | } | ||
42 | |||
43 | static int __init smp_spin_table_prepare_cpu(int cpu) | ||
44 | { | ||
45 | void **release_addr; | ||
46 | |||
47 | if (!cpu_release_addr[cpu]) | ||
48 | return -ENODEV; | ||
49 | |||
50 | release_addr = __va(cpu_release_addr[cpu]); | ||
51 | release_addr[0] = (void *)__pa(secondary_holding_pen); | ||
52 | __flush_dcache_area(release_addr, sizeof(release_addr[0])); | ||
53 | |||
54 | /* | ||
55 | * Send an event to wake up the secondary CPU. | ||
56 | */ | ||
57 | sev(); | ||
58 | |||
59 | return 0; | ||
60 | } | ||
61 | |||
62 | const struct smp_enable_ops smp_spin_table_ops __initconst = { | ||
63 | .name = "spin-table", | ||
64 | .init_cpu = smp_spin_table_init_cpu, | ||
65 | .prepare_cpu = smp_spin_table_prepare_cpu, | ||
66 | }; | ||
67 |
arch/arm64/mm/mmu.c
1 | /* | 1 | /* |
2 | * Based on arch/arm/mm/mmu.c | 2 | * Based on arch/arm/mm/mmu.c |
3 | * | 3 | * |
4 | * Copyright (C) 1995-2005 Russell King | 4 | * Copyright (C) 1995-2005 Russell King |
5 | * Copyright (C) 2012 ARM Ltd. | 5 | * Copyright (C) 2012 ARM Ltd. |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | * | 10 | * |
11 | * This program is distributed in the hope that it will be useful, | 11 | * This program is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | * GNU General Public License for more details. | 14 | * GNU General Public License for more details. |
15 | * | 15 | * |
16 | * You should have received a copy of the GNU General Public License | 16 | * You should have received a copy of the GNU General Public License |
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include <linux/export.h> | 20 | #include <linux/export.h> |
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/errno.h> | 22 | #include <linux/errno.h> |
23 | #include <linux/init.h> | 23 | #include <linux/init.h> |
24 | #include <linux/mman.h> | 24 | #include <linux/mman.h> |
25 | #include <linux/nodemask.h> | 25 | #include <linux/nodemask.h> |
26 | #include <linux/memblock.h> | 26 | #include <linux/memblock.h> |
27 | #include <linux/fs.h> | 27 | #include <linux/fs.h> |
28 | #include <linux/io.h> | ||
28 | 29 | ||
29 | #include <asm/cputype.h> | 30 | #include <asm/cputype.h> |
30 | #include <asm/sections.h> | 31 | #include <asm/sections.h> |
31 | #include <asm/setup.h> | 32 | #include <asm/setup.h> |
32 | #include <asm/sizes.h> | 33 | #include <asm/sizes.h> |
33 | #include <asm/tlb.h> | 34 | #include <asm/tlb.h> |
34 | #include <asm/mmu_context.h> | 35 | #include <asm/mmu_context.h> |
35 | 36 | ||
36 | #include "mm.h" | 37 | #include "mm.h" |
37 | 38 | ||
38 | /* | 39 | /* |
39 | * Empty_zero_page is a special page that is used for zero-initialized data | 40 | * Empty_zero_page is a special page that is used for zero-initialized data |
40 | * and COW. | 41 | * and COW. |
41 | */ | 42 | */ |
42 | struct page *empty_zero_page; | 43 | struct page *empty_zero_page; |
43 | EXPORT_SYMBOL(empty_zero_page); | 44 | EXPORT_SYMBOL(empty_zero_page); |
44 | 45 | ||
45 | pgprot_t pgprot_default; | 46 | pgprot_t pgprot_default; |
46 | EXPORT_SYMBOL(pgprot_default); | 47 | EXPORT_SYMBOL(pgprot_default); |
47 | 48 | ||
48 | static pmdval_t prot_sect_kernel; | 49 | static pmdval_t prot_sect_kernel; |
49 | 50 | ||
50 | struct cachepolicy { | 51 | struct cachepolicy { |
51 | const char policy[16]; | 52 | const char policy[16]; |
52 | u64 mair; | 53 | u64 mair; |
53 | u64 tcr; | 54 | u64 tcr; |
54 | }; | 55 | }; |
55 | 56 | ||
56 | static struct cachepolicy cache_policies[] __initdata = { | 57 | static struct cachepolicy cache_policies[] __initdata = { |
57 | { | 58 | { |
58 | .policy = "uncached", | 59 | .policy = "uncached", |
59 | .mair = 0x44, /* inner, outer non-cacheable */ | 60 | .mair = 0x44, /* inner, outer non-cacheable */ |
60 | .tcr = TCR_IRGN_NC | TCR_ORGN_NC, | 61 | .tcr = TCR_IRGN_NC | TCR_ORGN_NC, |
61 | }, { | 62 | }, { |
62 | .policy = "writethrough", | 63 | .policy = "writethrough", |
63 | .mair = 0xaa, /* inner, outer write-through, read-allocate */ | 64 | .mair = 0xaa, /* inner, outer write-through, read-allocate */ |
64 | .tcr = TCR_IRGN_WT | TCR_ORGN_WT, | 65 | .tcr = TCR_IRGN_WT | TCR_ORGN_WT, |
65 | }, { | 66 | }, { |
66 | .policy = "writeback", | 67 | .policy = "writeback", |
67 | .mair = 0xee, /* inner, outer write-back, read-allocate */ | 68 | .mair = 0xee, /* inner, outer write-back, read-allocate */ |
68 | .tcr = TCR_IRGN_WBnWA | TCR_ORGN_WBnWA, | 69 | .tcr = TCR_IRGN_WBnWA | TCR_ORGN_WBnWA, |
69 | } | 70 | } |
70 | }; | 71 | }; |
71 | 72 | ||
72 | /* | 73 | /* |
73 | * These are useful for identifying cache coherency problems by allowing the | 74 | * These are useful for identifying cache coherency problems by allowing the |
74 | * cache or the cache and writebuffer to be turned off. It changes the Normal | 75 | * cache or the cache and writebuffer to be turned off. It changes the Normal |
75 | * memory caching attributes in the MAIR_EL1 register. | 76 | * memory caching attributes in the MAIR_EL1 register. |
76 | */ | 77 | */ |
77 | static int __init early_cachepolicy(char *p) | 78 | static int __init early_cachepolicy(char *p) |
78 | { | 79 | { |
79 | int i; | 80 | int i; |
80 | u64 tmp; | 81 | u64 tmp; |
81 | 82 | ||
82 | for (i = 0; i < ARRAY_SIZE(cache_policies); i++) { | 83 | for (i = 0; i < ARRAY_SIZE(cache_policies); i++) { |
83 | int len = strlen(cache_policies[i].policy); | 84 | int len = strlen(cache_policies[i].policy); |
84 | 85 | ||
85 | if (memcmp(p, cache_policies[i].policy, len) == 0) | 86 | if (memcmp(p, cache_policies[i].policy, len) == 0) |
86 | break; | 87 | break; |
87 | } | 88 | } |
88 | if (i == ARRAY_SIZE(cache_policies)) { | 89 | if (i == ARRAY_SIZE(cache_policies)) { |
89 | pr_err("ERROR: unknown or unsupported cache policy: %s\n", p); | 90 | pr_err("ERROR: unknown or unsupported cache policy: %s\n", p); |
90 | return 0; | 91 | return 0; |
91 | } | 92 | } |
92 | 93 | ||
93 | flush_cache_all(); | 94 | flush_cache_all(); |
94 | 95 | ||
95 | /* | 96 | /* |
96 | * Modify MT_NORMAL attributes in MAIR_EL1. | 97 | * Modify MT_NORMAL attributes in MAIR_EL1. |
97 | */ | 98 | */ |
98 | asm volatile( | 99 | asm volatile( |
99 | " mrs %0, mair_el1\n" | 100 | " mrs %0, mair_el1\n" |
100 | " bfi %0, %1, #%2, #8\n" | 101 | " bfi %0, %1, #%2, #8\n" |
101 | " msr mair_el1, %0\n" | 102 | " msr mair_el1, %0\n" |
102 | " isb\n" | 103 | " isb\n" |
103 | : "=&r" (tmp) | 104 | : "=&r" (tmp) |
104 | : "r" (cache_policies[i].mair), "i" (MT_NORMAL * 8)); | 105 | : "r" (cache_policies[i].mair), "i" (MT_NORMAL * 8)); |
105 | 106 | ||
106 | /* | 107 | /* |
107 | * Modify TCR PTW cacheability attributes. | 108 | * Modify TCR PTW cacheability attributes. |
108 | */ | 109 | */ |
109 | asm volatile( | 110 | asm volatile( |
110 | " mrs %0, tcr_el1\n" | 111 | " mrs %0, tcr_el1\n" |
111 | " bic %0, %0, %2\n" | 112 | " bic %0, %0, %2\n" |
112 | " orr %0, %0, %1\n" | 113 | " orr %0, %0, %1\n" |
113 | " msr tcr_el1, %0\n" | 114 | " msr tcr_el1, %0\n" |
114 | " isb\n" | 115 | " isb\n" |
115 | : "=&r" (tmp) | 116 | : "=&r" (tmp) |
116 | : "r" (cache_policies[i].tcr), "r" (TCR_IRGN_MASK | TCR_ORGN_MASK)); | 117 | : "r" (cache_policies[i].tcr), "r" (TCR_IRGN_MASK | TCR_ORGN_MASK)); |
117 | 118 | ||
118 | flush_cache_all(); | 119 | flush_cache_all(); |
119 | 120 | ||
120 | return 0; | 121 | return 0; |
121 | } | 122 | } |
122 | early_param("cachepolicy", early_cachepolicy); | 123 | early_param("cachepolicy", early_cachepolicy); |
123 | 124 | ||
124 | /* | 125 | /* |
125 | * Adjust the PMD section entries according to the CPU in use. | 126 | * Adjust the PMD section entries according to the CPU in use. |
126 | */ | 127 | */ |
127 | static void __init init_mem_pgprot(void) | 128 | static void __init init_mem_pgprot(void) |
128 | { | 129 | { |
129 | pteval_t default_pgprot; | 130 | pteval_t default_pgprot; |
130 | int i; | 131 | int i; |
131 | 132 | ||
132 | default_pgprot = PTE_ATTRINDX(MT_NORMAL); | 133 | default_pgprot = PTE_ATTRINDX(MT_NORMAL); |
133 | prot_sect_kernel = PMD_TYPE_SECT | PMD_SECT_AF | PMD_ATTRINDX(MT_NORMAL); | 134 | prot_sect_kernel = PMD_TYPE_SECT | PMD_SECT_AF | PMD_ATTRINDX(MT_NORMAL); |
134 | 135 | ||
135 | #ifdef CONFIG_SMP | 136 | #ifdef CONFIG_SMP |
136 | /* | 137 | /* |
137 | * Mark memory with the "shared" attribute for SMP systems | 138 | * Mark memory with the "shared" attribute for SMP systems |
138 | */ | 139 | */ |
139 | default_pgprot |= PTE_SHARED; | 140 | default_pgprot |= PTE_SHARED; |
140 | prot_sect_kernel |= PMD_SECT_S; | 141 | prot_sect_kernel |= PMD_SECT_S; |
141 | #endif | 142 | #endif |
142 | 143 | ||
143 | for (i = 0; i < 16; i++) { | 144 | for (i = 0; i < 16; i++) { |
144 | unsigned long v = pgprot_val(protection_map[i]); | 145 | unsigned long v = pgprot_val(protection_map[i]); |
145 | protection_map[i] = __pgprot(v | default_pgprot); | 146 | protection_map[i] = __pgprot(v | default_pgprot); |
146 | } | 147 | } |
147 | 148 | ||
148 | pgprot_default = __pgprot(PTE_TYPE_PAGE | PTE_AF | default_pgprot); | 149 | pgprot_default = __pgprot(PTE_TYPE_PAGE | PTE_AF | default_pgprot); |
149 | } | 150 | } |
150 | 151 | ||
151 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | 152 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, |
152 | unsigned long size, pgprot_t vma_prot) | 153 | unsigned long size, pgprot_t vma_prot) |
153 | { | 154 | { |
154 | if (!pfn_valid(pfn)) | 155 | if (!pfn_valid(pfn)) |
155 | return pgprot_noncached(vma_prot); | 156 | return pgprot_noncached(vma_prot); |
156 | else if (file->f_flags & O_SYNC) | 157 | else if (file->f_flags & O_SYNC) |
157 | return pgprot_writecombine(vma_prot); | 158 | return pgprot_writecombine(vma_prot); |
158 | return vma_prot; | 159 | return vma_prot; |
159 | } | 160 | } |
160 | EXPORT_SYMBOL(phys_mem_access_prot); | 161 | EXPORT_SYMBOL(phys_mem_access_prot); |
161 | 162 | ||
162 | static void __init *early_alloc(unsigned long sz) | 163 | static void __init *early_alloc(unsigned long sz) |
163 | { | 164 | { |
164 | void *ptr = __va(memblock_alloc(sz, sz)); | 165 | void *ptr = __va(memblock_alloc(sz, sz)); |
165 | memset(ptr, 0, sz); | 166 | memset(ptr, 0, sz); |
166 | return ptr; | 167 | return ptr; |
167 | } | 168 | } |
168 | 169 | ||
169 | static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, | 170 | static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, |
170 | unsigned long end, unsigned long pfn) | 171 | unsigned long end, unsigned long pfn) |
171 | { | 172 | { |
172 | pte_t *pte; | 173 | pte_t *pte; |
173 | 174 | ||
174 | if (pmd_none(*pmd)) { | 175 | if (pmd_none(*pmd)) { |
175 | pte = early_alloc(PTRS_PER_PTE * sizeof(pte_t)); | 176 | pte = early_alloc(PTRS_PER_PTE * sizeof(pte_t)); |
176 | __pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE); | 177 | __pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE); |
177 | } | 178 | } |
178 | BUG_ON(pmd_bad(*pmd)); | 179 | BUG_ON(pmd_bad(*pmd)); |
179 | 180 | ||
180 | pte = pte_offset_kernel(pmd, addr); | 181 | pte = pte_offset_kernel(pmd, addr); |
181 | do { | 182 | do { |
182 | set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC)); | 183 | set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC)); |
183 | pfn++; | 184 | pfn++; |
184 | } while (pte++, addr += PAGE_SIZE, addr != end); | 185 | } while (pte++, addr += PAGE_SIZE, addr != end); |
185 | } | 186 | } |
186 | 187 | ||
187 | static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, | 188 | static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, |
188 | unsigned long end, phys_addr_t phys) | 189 | unsigned long end, phys_addr_t phys) |
189 | { | 190 | { |
190 | pmd_t *pmd; | 191 | pmd_t *pmd; |
191 | unsigned long next; | 192 | unsigned long next; |
192 | 193 | ||
193 | /* | 194 | /* |
194 | * Check for initial section mappings in the pgd/pud and remove them. | 195 | * Check for initial section mappings in the pgd/pud and remove them. |
195 | */ | 196 | */ |
196 | if (pud_none(*pud) || pud_bad(*pud)) { | 197 | if (pud_none(*pud) || pud_bad(*pud)) { |
197 | pmd = early_alloc(PTRS_PER_PMD * sizeof(pmd_t)); | 198 | pmd = early_alloc(PTRS_PER_PMD * sizeof(pmd_t)); |
198 | pud_populate(&init_mm, pud, pmd); | 199 | pud_populate(&init_mm, pud, pmd); |
199 | } | 200 | } |
200 | 201 | ||
201 | pmd = pmd_offset(pud, addr); | 202 | pmd = pmd_offset(pud, addr); |
202 | do { | 203 | do { |
203 | next = pmd_addr_end(addr, end); | 204 | next = pmd_addr_end(addr, end); |
204 | /* try section mapping first */ | 205 | /* try section mapping first */ |
205 | if (((addr | next | phys) & ~SECTION_MASK) == 0) | 206 | if (((addr | next | phys) & ~SECTION_MASK) == 0) |
206 | set_pmd(pmd, __pmd(phys | prot_sect_kernel)); | 207 | set_pmd(pmd, __pmd(phys | prot_sect_kernel)); |
207 | else | 208 | else |
208 | alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys)); | 209 | alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys)); |
209 | phys += next - addr; | 210 | phys += next - addr; |
210 | } while (pmd++, addr = next, addr != end); | 211 | } while (pmd++, addr = next, addr != end); |
211 | } | 212 | } |
212 | 213 | ||
213 | static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, | 214 | static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, |
214 | unsigned long end, unsigned long phys) | 215 | unsigned long end, unsigned long phys) |
215 | { | 216 | { |
216 | pud_t *pud = pud_offset(pgd, addr); | 217 | pud_t *pud = pud_offset(pgd, addr); |
217 | unsigned long next; | 218 | unsigned long next; |
218 | 219 | ||
219 | do { | 220 | do { |
220 | next = pud_addr_end(addr, end); | 221 | next = pud_addr_end(addr, end); |
221 | alloc_init_pmd(pud, addr, next, phys); | 222 | alloc_init_pmd(pud, addr, next, phys); |
222 | phys += next - addr; | 223 | phys += next - addr; |
223 | } while (pud++, addr = next, addr != end); | 224 | } while (pud++, addr = next, addr != end); |
224 | } | 225 | } |
225 | 226 | ||
226 | /* | 227 | /* |
227 | * Create the page directory entries and any necessary page tables for the | 228 | * Create the page directory entries and any necessary page tables for the |
228 | * mapping specified by 'md'. | 229 | * mapping specified by 'md'. |
229 | */ | 230 | */ |
230 | static void __init create_mapping(phys_addr_t phys, unsigned long virt, | 231 | static void __init create_mapping(phys_addr_t phys, unsigned long virt, |
231 | phys_addr_t size) | 232 | phys_addr_t size) |
232 | { | 233 | { |
233 | unsigned long addr, length, end, next; | 234 | unsigned long addr, length, end, next; |
234 | pgd_t *pgd; | 235 | pgd_t *pgd; |
235 | 236 | ||
236 | if (virt < VMALLOC_START) { | 237 | if (virt < VMALLOC_START) { |
237 | pr_warning("BUG: not creating mapping for 0x%016llx at 0x%016lx - outside kernel range\n", | 238 | pr_warning("BUG: not creating mapping for 0x%016llx at 0x%016lx - outside kernel range\n", |
238 | phys, virt); | 239 | phys, virt); |
239 | return; | 240 | return; |
240 | } | 241 | } |
241 | 242 | ||
242 | addr = virt & PAGE_MASK; | 243 | addr = virt & PAGE_MASK; |
243 | length = PAGE_ALIGN(size + (virt & ~PAGE_MASK)); | 244 | length = PAGE_ALIGN(size + (virt & ~PAGE_MASK)); |
244 | 245 | ||
245 | pgd = pgd_offset_k(addr); | 246 | pgd = pgd_offset_k(addr); |
246 | end = addr + length; | 247 | end = addr + length; |
247 | do { | 248 | do { |
248 | next = pgd_addr_end(addr, end); | 249 | next = pgd_addr_end(addr, end); |
249 | alloc_init_pud(pgd, addr, next, phys); | 250 | alloc_init_pud(pgd, addr, next, phys); |
250 | phys += next - addr; | 251 | phys += next - addr; |
251 | } while (pgd++, addr = next, addr != end); | 252 | } while (pgd++, addr = next, addr != end); |
252 | } | 253 | } |
254 | |||
255 | #ifdef CONFIG_EARLY_PRINTK | ||
256 | /* | ||
257 | * Create an early I/O mapping using the pgd/pmd entries already populated | ||
258 | * in head.S as this function is called too early to allocated any memory. The | ||
259 | * mapping size is 2MB with 4KB pages or 64KB or 64KB pages. | ||
260 | */ | ||
261 | void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt) | ||
262 | { | ||
263 | unsigned long size, mask; | ||
264 | bool page64k = IS_ENABLED(ARM64_64K_PAGES); | ||
265 | pgd_t *pgd; | ||
266 | pud_t *pud; | ||
267 | pmd_t *pmd; | ||
268 | pte_t *pte; | ||
269 | |||
270 | /* | ||
271 | * No early pte entries with !ARM64_64K_PAGES configuration, so using | ||
272 | * sections (pmd). | ||
273 | */ | ||
274 | size = page64k ? PAGE_SIZE : SECTION_SIZE; | ||
275 | mask = ~(size - 1); | ||
276 | |||
277 | pgd = pgd_offset_k(virt); | ||
278 | pud = pud_offset(pgd, virt); | ||
279 | if (pud_none(*pud)) | ||
280 | return NULL; | ||
281 | pmd = pmd_offset(pud, virt); | ||
282 | |||
283 | if (page64k) { | ||
284 | if (pmd_none(*pmd)) | ||
285 | return NULL; | ||
286 | pte = pte_offset_kernel(pmd, virt); | ||
287 | set_pte(pte, __pte((phys & mask) | PROT_DEVICE_nGnRE)); | ||
288 | } else { | ||
289 | set_pmd(pmd, __pmd((phys & mask) | PROT_SECT_DEVICE_nGnRE)); | ||
290 | } | ||
291 | |||
292 | return (void __iomem *)((virt & mask) + (phys & ~mask)); | ||
293 | } | ||
294 | #endif | ||
253 | 295 | ||
254 | static void __init map_mem(void) | 296 | static void __init map_mem(void) |
255 | { | 297 | { |
256 | struct memblock_region *reg; | 298 | struct memblock_region *reg; |
257 | 299 | ||
258 | /* map all the memory banks */ | 300 | /* map all the memory banks */ |
259 | for_each_memblock(memory, reg) { | 301 | for_each_memblock(memory, reg) { |
260 | phys_addr_t start = reg->base; | 302 | phys_addr_t start = reg->base; |
261 | phys_addr_t end = start + reg->size; | 303 | phys_addr_t end = start + reg->size; |
262 | 304 | ||
263 | if (start >= end) | 305 | if (start >= end) |
264 | break; | 306 | break; |
265 | 307 | ||
266 | create_mapping(start, __phys_to_virt(start), end - start); | 308 | create_mapping(start, __phys_to_virt(start), end - start); |
267 | } | 309 | } |
268 | } | 310 | } |
269 | 311 | ||
270 | /* | 312 | /* |
271 | * paging_init() sets up the page tables, initialises the zone memory | 313 | * paging_init() sets up the page tables, initialises the zone memory |
272 | * maps and sets up the zero page. | 314 | * maps and sets up the zero page. |
273 | */ | 315 | */ |
274 | void __init paging_init(void) | 316 | void __init paging_init(void) |
275 | { | 317 | { |
276 | void *zero_page; | 318 | void *zero_page; |
277 | 319 | ||
278 | /* | 320 | /* |
279 | * Maximum PGDIR_SIZE addressable via the initial direct kernel | 321 | * Maximum PGDIR_SIZE addressable via the initial direct kernel |
280 | * mapping in swapper_pg_dir. | 322 | * mapping in swapper_pg_dir. |
281 | */ | 323 | */ |
282 | memblock_set_current_limit((PHYS_OFFSET & PGDIR_MASK) + PGDIR_SIZE); | 324 | memblock_set_current_limit((PHYS_OFFSET & PGDIR_MASK) + PGDIR_SIZE); |
283 | 325 | ||
284 | init_mem_pgprot(); | 326 | init_mem_pgprot(); |
285 | map_mem(); | 327 | map_mem(); |
286 | 328 | ||
287 | /* | 329 | /* |
288 | * Finally flush the caches and tlb to ensure that we're in a | 330 | * Finally flush the caches and tlb to ensure that we're in a |
289 | * consistent state. | 331 | * consistent state. |
290 | */ | 332 | */ |
291 | flush_cache_all(); | 333 | flush_cache_all(); |
292 | flush_tlb_all(); | 334 | flush_tlb_all(); |
293 | 335 | ||
294 | /* allocate the zero page. */ | 336 | /* allocate the zero page. */ |
295 | zero_page = early_alloc(PAGE_SIZE); | 337 | zero_page = early_alloc(PAGE_SIZE); |
296 | 338 | ||
297 | bootmem_init(); | 339 | bootmem_init(); |
298 | 340 | ||
299 | empty_zero_page = virt_to_page(zero_page); | 341 | empty_zero_page = virt_to_page(zero_page); |
300 | __flush_dcache_page(empty_zero_page); | 342 | __flush_dcache_page(empty_zero_page); |
301 | 343 | ||
302 | /* | 344 | /* |
303 | * TTBR0 is only used for the identity mapping at this stage. Make it | 345 | * TTBR0 is only used for the identity mapping at this stage. Make it |
304 | * point to zero page to avoid speculatively fetching new entries. | 346 | * point to zero page to avoid speculatively fetching new entries. |
305 | */ | 347 | */ |
306 | cpu_set_reserved_ttbr0(); | 348 | cpu_set_reserved_ttbr0(); |
307 | flush_tlb_all(); | 349 | flush_tlb_all(); |
308 | } | 350 | } |
309 | 351 | ||
310 | /* | 352 | /* |
311 | * Enable the identity mapping to allow the MMU disabling. | 353 | * Enable the identity mapping to allow the MMU disabling. |
312 | */ | 354 | */ |
313 | void setup_mm_for_reboot(void) | 355 | void setup_mm_for_reboot(void) |
314 | { | 356 | { |
315 | cpu_switch_mm(idmap_pg_dir, &init_mm); | 357 | cpu_switch_mm(idmap_pg_dir, &init_mm); |
316 | flush_tlb_all(); | 358 | flush_tlb_all(); |
317 | } | 359 | } |
318 | 360 | ||
319 | /* | 361 | /* |
320 | * Check whether a kernel address is valid (derived from arch/x86/). | 362 | * Check whether a kernel address is valid (derived from arch/x86/). |
321 | */ | 363 | */ |
322 | int kern_addr_valid(unsigned long addr) | 364 | int kern_addr_valid(unsigned long addr) |
323 | { | 365 | { |
324 | pgd_t *pgd; | 366 | pgd_t *pgd; |
325 | pud_t *pud; | 367 | pud_t *pud; |
326 | pmd_t *pmd; | 368 | pmd_t *pmd; |
327 | pte_t *pte; | 369 | pte_t *pte; |
328 | 370 | ||
329 | if ((((long)addr) >> VA_BITS) != -1UL) | 371 | if ((((long)addr) >> VA_BITS) != -1UL) |
330 | return 0; | 372 | return 0; |
331 | 373 | ||
332 | pgd = pgd_offset_k(addr); | 374 | pgd = pgd_offset_k(addr); |
333 | if (pgd_none(*pgd)) | 375 | if (pgd_none(*pgd)) |
334 | return 0; | 376 | return 0; |
335 | 377 | ||
336 | pud = pud_offset(pgd, addr); | 378 | pud = pud_offset(pgd, addr); |
337 | if (pud_none(*pud)) | 379 | if (pud_none(*pud)) |
338 | return 0; | 380 | return 0; |
339 | 381 | ||
340 | pmd = pmd_offset(pud, addr); | 382 | pmd = pmd_offset(pud, addr); |
341 | if (pmd_none(*pmd)) | 383 | if (pmd_none(*pmd)) |
342 | return 0; | 384 | return 0; |
343 | 385 | ||
344 | pte = pte_offset_kernel(pmd, addr); | 386 | pte = pte_offset_kernel(pmd, addr); |
345 | if (pte_none(*pte)) | 387 | if (pte_none(*pte)) |
346 | return 0; | 388 | return 0; |
347 | 389 | ||
348 | return pfn_valid(pte_pfn(*pte)); | 390 | return pfn_valid(pte_pfn(*pte)); |
349 | } | 391 | } |
350 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | 392 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
351 | #ifdef CONFIG_ARM64_64K_PAGES | 393 | #ifdef CONFIG_ARM64_64K_PAGES |
352 | int __meminit vmemmap_populate(struct page *start_page, | 394 | int __meminit vmemmap_populate(struct page *start_page, |
353 | unsigned long size, int node) | 395 | unsigned long size, int node) |
354 | { | 396 | { |
355 | return vmemmap_populate_basepages(start_page, size, node); | 397 | return vmemmap_populate_basepages(start_page, size, node); |
356 | } | 398 | } |
357 | #else /* !CONFIG_ARM64_64K_PAGES */ | 399 | #else /* !CONFIG_ARM64_64K_PAGES */ |
358 | int __meminit vmemmap_populate(struct page *start_page, | 400 | int __meminit vmemmap_populate(struct page *start_page, |
359 | unsigned long size, int node) | 401 | unsigned long size, int node) |
360 | { | 402 | { |
361 | unsigned long addr = (unsigned long)start_page; | 403 | unsigned long addr = (unsigned long)start_page; |
362 | unsigned long end = (unsigned long)(start_page + size); | 404 | unsigned long end = (unsigned long)(start_page + size); |
363 | unsigned long next; | 405 | unsigned long next; |
364 | pgd_t *pgd; | 406 | pgd_t *pgd; |
365 | pud_t *pud; | 407 | pud_t *pud; |
366 | pmd_t *pmd; | 408 | pmd_t *pmd; |
367 | 409 | ||
368 | do { | 410 | do { |
369 | next = pmd_addr_end(addr, end); | 411 | next = pmd_addr_end(addr, end); |
370 | 412 | ||
371 | pgd = vmemmap_pgd_populate(addr, node); | 413 | pgd = vmemmap_pgd_populate(addr, node); |
372 | if (!pgd) | 414 | if (!pgd) |
373 | return -ENOMEM; | 415 | return -ENOMEM; |
374 | 416 | ||
375 | pud = vmemmap_pud_populate(pgd, addr, node); | 417 | pud = vmemmap_pud_populate(pgd, addr, node); |
376 | if (!pud) | 418 | if (!pud) |
377 | return -ENOMEM; | 419 | return -ENOMEM; |
378 | 420 | ||
379 | pmd = pmd_offset(pud, addr); | 421 | pmd = pmd_offset(pud, addr); |
380 | if (pmd_none(*pmd)) { | 422 | if (pmd_none(*pmd)) { |
381 | void *p = NULL; | 423 | void *p = NULL; |
382 | 424 | ||
383 | p = vmemmap_alloc_block_buf(PMD_SIZE, node); | 425 | p = vmemmap_alloc_block_buf(PMD_SIZE, node); |
384 | if (!p) | 426 | if (!p) |
385 | return -ENOMEM; | 427 | return -ENOMEM; |
386 | 428 | ||
387 | set_pmd(pmd, __pmd(__pa(p) | prot_sect_kernel)); | 429 | set_pmd(pmd, __pmd(__pa(p) | prot_sect_kernel)); |
388 | } else | 430 | } else |
389 | vmemmap_verify((pte_t *)pmd, node, addr, next); | 431 | vmemmap_verify((pte_t *)pmd, node, addr, next); |
390 | } while (addr = next, addr != end); | 432 | } while (addr = next, addr != end); |
391 | 433 | ||
392 | return 0; | 434 | return 0; |
393 | } | 435 | } |
394 | #endif /* CONFIG_ARM64_64K_PAGES */ | 436 | #endif /* CONFIG_ARM64_64K_PAGES */ |
395 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ | 437 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ |
396 | 438 |